Merge branch 'cpufreq/arm/linux-next' of git://git.kernel.org/pub/scm/linux/kernel...
authorRafael J. Wysocki <rafael.j.wysocki@intel.com>
Thu, 30 Dec 2021 14:49:54 +0000 (15:49 +0100)
committerRafael J. Wysocki <rafael.j.wysocki@intel.com>
Thu, 30 Dec 2021 14:49:54 +0000 (15:49 +0100)
Pull ARM cpufreq updates for 5.17-rc1 from Viresh Kumar:

"- Qcom cpufreq driver updates improve irq support (Ard Biesheuvel, Stephen Boyd,
   and Vladimir Zapolskiy).

 - Fixes double devm_remap for mediatek driver (Hector Yuan).

 - Introduces thermal pressure helpers (Lukasz Luba)."

* 'cpufreq/arm/linux-next' of git://git.kernel.org/pub/scm/linux/kernel/git/vireshk/pm:
  cpufreq: mediatek-hw: Fix double devm_remap in hotplug case
  cpufreq: qcom-hw: Use optional irq API
  cpufreq: qcom-hw: Set CPU affinity of dcvsh interrupts
  cpufreq: qcom-hw: Fix probable nested interrupt handling
  cpufreq: qcom-cpufreq-hw: Avoid stack buffer for IRQ name
  arch_topology: Remove unused topology_set_thermal_pressure() and related
  cpufreq: qcom-cpufreq-hw: Use new thermal pressure update function
  cpufreq: qcom-cpufreq-hw: Update offline CPUs per-cpu thermal pressure
  thermal: cpufreq_cooling: Use new thermal pressure update function
  arch_topology: Introduce thermal pressure update function

1269 files changed:
.mailmap
Documentation/admin-guide/blockdev/drbd/figures.rst
Documentation/admin-guide/blockdev/drbd/peer-states-8.dot [moved from Documentation/admin-guide/blockdev/drbd/node-states-8.dot with 71% similarity]
Documentation/admin-guide/laptops/thinkpad-acpi.rst
Documentation/admin-guide/sysctl/kernel.rst
Documentation/arm/marvell.rst
Documentation/arm64/pointer-authentication.rst
Documentation/bpf/index.rst
Documentation/conf.py
Documentation/cpu-freq/core.rst
Documentation/devicetree/bindings/i2c/i2c-imx-lpi2c.yaml
Documentation/devicetree/bindings/iio/adc/samsung,exynos-adc.yaml
Documentation/devicetree/bindings/input/gpio-keys.yaml
Documentation/devicetree/bindings/media/nxp,imx7-mipi-csi2.yaml
Documentation/devicetree/bindings/net/ethernet-phy.yaml
Documentation/devicetree/bindings/phy/xlnx,zynqmp-psgtr.yaml
Documentation/devicetree/bindings/power/supply/bq25980.yaml
Documentation/devicetree/bindings/sound/wlf,wm8962.yaml
Documentation/devicetree/bindings/spi/spi-rockchip.yaml
Documentation/doc-guide/sphinx.rst
Documentation/filesystems/autofs.rst
Documentation/filesystems/cifs/ksmbd.rst
Documentation/filesystems/netfs_library.rst
Documentation/i2c/smbus-protocol.rst
Documentation/locking/locktypes.rst
Documentation/networking/ipvs-sysctl.rst
Documentation/networking/timestamping.rst
Documentation/power/energy-model.rst
Documentation/process/changes.rst
Documentation/process/submitting-patches.rst
Documentation/trace/ftrace.rst
Documentation/translations/it_IT/doc-guide/sphinx.rst
Documentation/translations/it_IT/process/changes.rst
Documentation/translations/zh_CN/doc-guide/sphinx.rst
Documentation/translations/zh_CN/process/management-style.rst
MAINTAINERS
Makefile
arch/Kconfig
arch/alpha/kernel/syscalls/syscall.tbl
arch/arc/include/asm/cacheflush.h
arch/arm/Kconfig
arch/arm/boot/dts/bcm2711.dtsi
arch/arm/boot/dts/bcm5301x.dtsi
arch/arm/include/asm/cacheflush.h
arch/arm/mach-socfpga/core.h
arch/arm/mach-socfpga/platsmp.c
arch/arm64/boot/dts/apple/t8103.dtsi
arch/arm64/boot/dts/exynos/exynosautov9.dtsi
arch/arm64/include/asm/ftrace.h
arch/arm64/include/asm/kvm_arm.h
arch/arm64/include/asm/pgalloc.h
arch/arm64/include/asm/stacktrace.h
arch/arm64/include/asm/uaccess.h
arch/arm64/kernel/entry-ftrace.S
arch/arm64/kernel/ftrace.c
arch/arm64/kernel/machine_kexec.c
arch/arm64/kernel/stacktrace.c
arch/arm64/kvm/arm.c
arch/arm64/kvm/hyp/include/hyp/switch.h
arch/arm64/kvm/hyp/include/hyp/sysreg-sr.h
arch/arm64/kvm/hyp/nvhe/switch.c
arch/arm64/kvm/hyp/vhe/switch.c
arch/csky/kernel/traps.c
arch/hexagon/include/asm/timer-regs.h [deleted file]
arch/hexagon/include/asm/timex.h
arch/hexagon/kernel/.gitignore [new file with mode: 0644]
arch/hexagon/kernel/time.c
arch/hexagon/lib/io.c
arch/ia64/kernel/syscalls/syscall.tbl
arch/m68k/include/asm/cacheflush_mm.h
arch/m68k/kernel/syscalls/syscall.tbl
arch/m68k/kernel/traps.c
arch/microblaze/kernel/syscalls/syscall.tbl
arch/mips/Kconfig
arch/mips/bcm63xx/clk.c
arch/mips/boot/compressed/Makefile
arch/mips/generic/yamon-dt.c
arch/mips/include/asm/cacheflush.h
arch/mips/kernel/cpu-probe.c
arch/mips/kernel/proc.c
arch/mips/kernel/syscalls/syscall_n32.tbl
arch/mips/kernel/syscalls/syscall_n64.tbl
arch/mips/kernel/syscalls/syscall_o32.tbl
arch/mips/kvm/mips.c
arch/mips/lantiq/clk.c
arch/mips/net/bpf_jit_comp.h
arch/nds32/include/asm/cacheflush.h
arch/nios2/include/asm/cacheflush.h
arch/parisc/Makefile
arch/parisc/configs/generic-32bit_defconfig
arch/parisc/configs/generic-64bit_defconfig
arch/parisc/include/asm/assembly.h
arch/parisc/include/asm/cacheflush.h
arch/parisc/include/asm/jump_label.h
arch/parisc/include/asm/rt_sigframe.h
arch/parisc/install.sh
arch/parisc/kernel/entry.S
arch/parisc/kernel/signal.c
arch/parisc/kernel/signal32.h
arch/parisc/kernel/syscall.S
arch/parisc/kernel/syscalls/syscall.tbl
arch/parisc/kernel/time.c
arch/parisc/kernel/vmlinux.lds.S
arch/powerpc/kernel/Makefile
arch/powerpc/kernel/head_32.h
arch/powerpc/kernel/head_8xx.S
arch/powerpc/kernel/signal.h
arch/powerpc/kernel/signal_32.c
arch/powerpc/kernel/signal_64.c
arch/powerpc/kernel/syscalls/syscall.tbl
arch/powerpc/kernel/watchdog.c
arch/powerpc/kvm/book3s_hv_builtin.c
arch/powerpc/kvm/book3s_hv_rmhandlers.S
arch/powerpc/kvm/powerpc.c
arch/powerpc/mm/nohash/kaslr_booke.c
arch/powerpc/mm/nohash/tlb.c
arch/powerpc/mm/numa.c
arch/powerpc/platforms/83xx/mcu_mpc8349emitx.c
arch/powerpc/platforms/pseries/iommu.c
arch/powerpc/sysdev/xive/Kconfig
arch/powerpc/sysdev/xive/common.c
arch/riscv/Makefile
arch/riscv/configs/defconfig
arch/riscv/configs/rv32_defconfig
arch/riscv/include/asm/kvm_host.h
arch/riscv/kvm/mmu.c
arch/riscv/kvm/vcpu.c
arch/riscv/kvm/vcpu_sbi.c
arch/riscv/kvm/vm.c
arch/s390/Kconfig
arch/s390/Makefile
arch/s390/boot/startup.c
arch/s390/configs/debug_defconfig
arch/s390/configs/defconfig
arch/s390/configs/zfcpdump_defconfig
arch/s390/include/asm/kexec.h
arch/s390/include/asm/pci_io.h
arch/s390/kernel/crash_dump.c
arch/s390/kernel/ipl.c
arch/s390/kernel/machine_kexec_file.c
arch/s390/kernel/setup.c
arch/s390/kernel/syscalls/syscall.tbl
arch/s390/kernel/traps.c
arch/s390/kernel/vdso32/Makefile
arch/s390/kernel/vdso64/Makefile
arch/s390/kvm/kvm-s390.c
arch/s390/lib/test_unwind.c
arch/sh/include/asm/cacheflush.h
arch/sh/kernel/syscalls/syscall.tbl
arch/sparc/kernel/signal_32.c
arch/sparc/kernel/syscalls/syscall.tbl
arch/sparc/kernel/windows.c
arch/x86/Kconfig
arch/x86/entry/entry_64.S
arch/x86/entry/vsyscall/vsyscall_64.c
arch/x86/events/intel/core.c
arch/x86/events/intel/uncore_snbep.c
arch/x86/hyperv/hv_init.c
arch/x86/include/asm/fpu/api.h
arch/x86/include/asm/intel-family.h
arch/x86/include/asm/kvm_host.h
arch/x86/include/asm/sev-common.h
arch/x86/include/asm/xen/hypercall.h
arch/x86/include/asm/xen/hypervisor.h
arch/x86/kernel/cpu/mshyperv.c
arch/x86/kernel/cpu/sgx/main.c
arch/x86/kernel/fpu/signal.c
arch/x86/kernel/process.c
arch/x86/kernel/setup.c
arch/x86/kernel/sev.c
arch/x86/kernel/smpboot.c
arch/x86/kernel/tsc.c
arch/x86/kernel/tsc_sync.c
arch/x86/kernel/vm86_32.c
arch/x86/kvm/cpuid.c
arch/x86/kvm/hyperv.c
arch/x86/kvm/ioapic.h
arch/x86/kvm/irq.h
arch/x86/kvm/lapic.c
arch/x86/kvm/mmu/mmu.c
arch/x86/kvm/mmu/paging_tmpl.h
arch/x86/kvm/mmu/tdp_mmu.c
arch/x86/kvm/mmu/tdp_mmu.h
arch/x86/kvm/svm/avic.c
arch/x86/kvm/svm/pmu.c
arch/x86/kvm/svm/sev.c
arch/x86/kvm/svm/svm.c
arch/x86/kvm/svm/svm.h
arch/x86/kvm/vmx/nested.c
arch/x86/kvm/vmx/posted_intr.c
arch/x86/kvm/vmx/vmx.c
arch/x86/kvm/vmx/vmx.h
arch/x86/kvm/x86.c
arch/x86/kvm/x86.h
arch/x86/kvm/xen.c
arch/x86/platform/efi/quirks.c
arch/x86/realmode/init.c
arch/x86/xen/xen-asm.S
arch/xtensa/include/asm/cacheflush.h
arch/xtensa/kernel/syscalls/syscall.tbl
block/bdev.c
block/blk-cgroup.c
block/blk-core.c
block/blk-flush.c
block/blk-mq.c
block/blk-mq.h
block/blk-sysfs.c
block/blk.h
block/elevator.c
block/fops.c
block/genhd.c
block/ioprio.c
drivers/acpi/cppc_acpi.c
drivers/acpi/glue.c
drivers/acpi/internal.h
drivers/acpi/property.c
drivers/acpi/scan.c
drivers/android/binder.c
drivers/ata/ahci.c
drivers/ata/ahci_ceva.c
drivers/ata/libahci.c
drivers/ata/libata-core.c
drivers/ata/libata-sata.c
drivers/ata/pata_falcon.c
drivers/ata/sata_fsl.c
drivers/block/loop.c
drivers/block/virtio_blk.c
drivers/block/zram/zram_drv.c
drivers/bus/mhi/core/pm.c
drivers/bus/mhi/pci_generic.c
drivers/char/agp/parisc-agp.c
drivers/char/ipmi/ipmi_msghandler.c
drivers/clk/imx/clk-imx8qxp-lpcg.c
drivers/clk/imx/clk-imx8qxp.c
drivers/clk/qcom/clk-alpha-pll.c
drivers/clk/qcom/clk-regmap-mux.c
drivers/clk/qcom/common.c
drivers/clk/qcom/common.h
drivers/clk/qcom/gcc-sm6125.c
drivers/clk/versatile/clk-icst.c
drivers/clocksource/arm_arch_timer.c
drivers/clocksource/dw_apb_timer_of.c
drivers/cpufreq/cpufreq.c
drivers/cpufreq/cpufreq_conservative.c
drivers/cpufreq/cpufreq_ondemand.c
drivers/cpufreq/intel_pstate.c
drivers/dma-buf/heaps/system_heap.c
drivers/firmware/arm_scmi/base.c
drivers/firmware/arm_scmi/scmi_pm_domain.c
drivers/firmware/arm_scmi/sensors.c
drivers/firmware/arm_scmi/virtio.c
drivers/firmware/arm_scmi/voltage.c
drivers/firmware/smccc/soc_id.c
drivers/gpio/Kconfig
drivers/gpio/gpio-virtio.c
drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c
drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.h
drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c
drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c
drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c
drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c
drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
drivers/gpu/drm/amd/amdgpu/navi10_ih.c
drivers/gpu/drm/amd/amdgpu/nbio_v2_3.c
drivers/gpu/drm/amd/amdgpu/nbio_v6_1.c
drivers/gpu/drm/amd/amdgpu/nbio_v7_0.c
drivers/gpu/drm/amd/amdgpu/nbio_v7_2.c
drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c
drivers/gpu/drm/amd/amdgpu/nv.c
drivers/gpu/drm/amd/amdgpu/soc15.c
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
drivers/gpu/drm/amd/amdkfd/kfd_priv.h
drivers/gpu/drm/amd/amdkfd/kfd_svm.c
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
drivers/gpu/drm/amd/display/dc/core/dc_link.c
drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
drivers/gpu/drm/amd/display/dc/core/dc_resource.c
drivers/gpu/drm/amd/display/dc/dc.h
drivers/gpu/drm/amd/display/dc/dc_link.h
drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.c
drivers/gpu/drm/amd/include/amd_shared.h
drivers/gpu/drm/amd/pm/amdgpu_dpm.c
drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h
drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu10_hwmgr.c
drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c
drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu8_hwmgr.c
drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.c
drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_hwmgr.c
drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_hwmgr.c
drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
drivers/gpu/drm/amd/pm/swsmu/smu11/cyan_skillfish_ppt.c
drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c
drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c
drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.h
drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c
drivers/gpu/drm/aspeed/aspeed_gfx_drv.c
drivers/gpu/drm/drm_gem_cma_helper.c
drivers/gpu/drm/drm_gem_shmem_helper.c
drivers/gpu/drm/drm_syncobj.c
drivers/gpu/drm/hyperv/hyperv_drm_drv.c
drivers/gpu/drm/i915/display/icl_dsi.c
drivers/gpu/drm/i915/display/intel_display_types.h
drivers/gpu/drm/i915/display/intel_dp.c
drivers/gpu/drm/i915/display/intel_dp.h
drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c
drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
drivers/gpu/drm/i915/gt/intel_gt_pm.c
drivers/gpu/drm/i915/gt/intel_gtt.c
drivers/gpu/drm/i915/gt/intel_workarounds.c
drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
drivers/gpu/drm/i915/i915_reg.h
drivers/gpu/drm/i915/i915_request.c
drivers/gpu/drm/i915/pxp/intel_pxp_pm.c
drivers/gpu/drm/i915/pxp/intel_pxp_pm.h
drivers/gpu/drm/lima/lima_device.c
drivers/gpu/drm/msm/Kconfig
drivers/gpu/drm/msm/Makefile
drivers/gpu/drm/msm/adreno/a6xx_gpu.c
drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c
drivers/gpu/drm/msm/dp/dp_aux.c
drivers/gpu/drm/msm/dsi/dsi_host.c
drivers/gpu/drm/msm/msm_debugfs.c
drivers/gpu/drm/msm/msm_drv.c
drivers/gpu/drm/msm/msm_gem.c
drivers/gpu/drm/msm/msm_gem_shrinker.c
drivers/gpu/drm/msm/msm_gem_submit.c
drivers/gpu/drm/msm/msm_gpu.h
drivers/gpu/drm/msm/msm_gpu_devfreq.c
drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
drivers/gpu/drm/nouveau/nvkm/engine/disp/hdmigv100.c
drivers/gpu/drm/nouveau/nvkm/subdev/acr/gm200.c
drivers/gpu/drm/nouveau/nvkm/subdev/acr/gp102.c
drivers/gpu/drm/scheduler/sched_main.c
drivers/gpu/drm/sun4i/Kconfig
drivers/gpu/drm/ttm/ttm_bo.c
drivers/gpu/drm/ttm/ttm_tt.c
drivers/gpu/drm/vc4/vc4_bo.c
drivers/gpu/drm/vc4/vc4_kms.c
drivers/gpu/drm/virtio/virtgpu_drv.c
drivers/gpu/drm/virtio/virtgpu_drv.h
drivers/gpu/drm/virtio/virtgpu_ioctl.c
drivers/gpu/drm/xen/xen_drm_front.c
drivers/hid/Kconfig
drivers/hid/hid-asus.c
drivers/hid/hid-bigbenff.c
drivers/hid/hid-chicony.c
drivers/hid/hid-corsair.c
drivers/hid/hid-elan.c
drivers/hid/hid-elo.c
drivers/hid/hid-ft260.c
drivers/hid/hid-google-hammer.c
drivers/hid/hid-holtek-kbd.c
drivers/hid/hid-holtek-mouse.c
drivers/hid/hid-ids.h
drivers/hid/hid-input.c
drivers/hid/hid-lg.c
drivers/hid/hid-logitech-dj.c
drivers/hid/hid-magicmouse.c
drivers/hid/hid-multitouch.c
drivers/hid/hid-nintendo.c
drivers/hid/hid-prodikeys.c
drivers/hid/hid-quirks.c
drivers/hid/hid-roccat-arvo.c
drivers/hid/hid-roccat-isku.c
drivers/hid/hid-roccat-kone.c
drivers/hid/hid-roccat-koneplus.c
drivers/hid/hid-roccat-konepure.c
drivers/hid/hid-roccat-kovaplus.c
drivers/hid/hid-roccat-lua.c
drivers/hid/hid-roccat-pyra.c
drivers/hid/hid-roccat-ryos.c
drivers/hid/hid-roccat-savu.c
drivers/hid/hid-samsung.c
drivers/hid/hid-sony.c
drivers/hid/hid-thrustmaster.c
drivers/hid/hid-u2fzero.c
drivers/hid/hid-uclogic-core.c
drivers/hid/hid-uclogic-params.c
drivers/hid/intel-ish-hid/ipc/pci-ish.c
drivers/hid/intel-ish-hid/ishtp-fw-loader.c
drivers/hid/intel-ish-hid/ishtp-hid-client.c
drivers/hid/intel-ish-hid/ishtp/bus.c
drivers/hid/wacom_sys.c
drivers/hid/wacom_wac.c
drivers/hid/wacom_wac.h
drivers/hv/hv_balloon.c
drivers/hwmon/corsair-psu.c
drivers/hwmon/dell-smm-hwmon.c
drivers/hwmon/nct6775.c
drivers/hwmon/pwm-fan.c
drivers/hwmon/sht4x.c
drivers/i2c/busses/i2c-cbus-gpio.c
drivers/i2c/busses/i2c-i801.c
drivers/i2c/busses/i2c-mpc.c
drivers/i2c/busses/i2c-rk3x.c
drivers/i2c/busses/i2c-stm32f7.c
drivers/i2c/busses/i2c-virtio.c
drivers/iio/accel/kxcjk-1013.c
drivers/iio/accel/kxsd9.c
drivers/iio/accel/mma8452.c
drivers/iio/adc/Kconfig
drivers/iio/adc/ad7768-1.c
drivers/iio/adc/at91-sama5d2_adc.c
drivers/iio/adc/axp20x_adc.c
drivers/iio/adc/dln2-adc.c
drivers/iio/adc/stm32-adc.c
drivers/iio/gyro/adxrs290.c
drivers/iio/gyro/itg3200_buffer.c
drivers/iio/industrialio-trigger.c
drivers/iio/light/ltr501.c
drivers/iio/light/stk3310.c
drivers/iio/trigger/stm32-timer-trigger.c
drivers/infiniband/core/nldev.c
drivers/infiniband/core/verbs.c
drivers/infiniband/hw/hfi1/chip.c
drivers/infiniband/hw/hfi1/driver.c
drivers/infiniband/hw/hfi1/init.c
drivers/infiniband/hw/hfi1/sdma.c
drivers/infiniband/hw/hfi1/verbs.c
drivers/infiniband/hw/hns/hns_roce_hw_v2.c
drivers/infiniband/hw/irdma/hw.c
drivers/infiniband/hw/irdma/main.h
drivers/infiniband/hw/irdma/pble.c
drivers/infiniband/hw/irdma/pble.h
drivers/infiniband/hw/irdma/utils.c
drivers/infiniband/hw/irdma/verbs.c
drivers/infiniband/hw/irdma/verbs.h
drivers/infiniband/hw/mlx4/main.c
drivers/infiniband/hw/mlx5/mlx5_ib.h
drivers/infiniband/hw/mlx5/mr.c
drivers/infiniband/sw/rxe/rxe_qp.c
drivers/infiniband/ulp/rtrs/rtrs-clt-stats.c
drivers/input/misc/xen-kbdfront.c
drivers/iommu/amd/iommu_v2.c
drivers/iommu/intel/cap_audit.c
drivers/iommu/intel/iommu.c
drivers/iommu/rockchip-iommu.c
drivers/irqchip/irq-apple-aic.c
drivers/irqchip/irq-armada-370-xp.c
drivers/irqchip/irq-aspeed-scu-ic.c
drivers/irqchip/irq-bcm7120-l2.c
drivers/irqchip/irq-gic-v3-its.c
drivers/irqchip/irq-mips-gic.c
drivers/irqchip/irq-nvic.c
drivers/md/md.c
drivers/media/cec/core/cec-adap.c
drivers/media/common/videobuf2/videobuf2-dma-sg.c
drivers/media/i2c/hi846.c
drivers/media/v4l2-core/v4l2-compat-ioctl32.c
drivers/memory/mtk-smi.c
drivers/misc/cardreader/rtsx_pcr.c
drivers/misc/eeprom/at25.c
drivers/misc/fastrpc.c
drivers/mmc/host/mmc_spi.c
drivers/mmc/host/mtk-sd.c
drivers/mmc/host/renesas_sdhi_core.c
drivers/mmc/host/sdhci-esdhc-imx.c
drivers/mmc/host/sdhci.c
drivers/mmc/host/sdhci.h
drivers/mtd/devices/mtd_dataflash.c
drivers/mtd/nand/raw/Kconfig
drivers/mtd/nand/raw/fsmc_nand.c
drivers/mtd/nand/raw/nand_base.c
drivers/net/Kconfig
drivers/net/amt.c
drivers/net/bonding/bond_alb.c
drivers/net/can/kvaser_pciefd.c
drivers/net/can/m_can/m_can.c
drivers/net/can/m_can/m_can.h
drivers/net/can/m_can/m_can_pci.c
drivers/net/can/pch_can.c
drivers/net/can/sja1000/ems_pcmcia.c
drivers/net/can/usb/kvaser_usb/kvaser_usb_leaf.c
drivers/net/dsa/b53/b53_spi.c
drivers/net/dsa/microchip/ksz8795.c
drivers/net/dsa/microchip/ksz9477.c
drivers/net/dsa/microchip/ksz_common.c
drivers/net/dsa/microchip/ksz_common.h
drivers/net/dsa/mv88e6xxx/chip.c
drivers/net/dsa/mv88e6xxx/serdes.c
drivers/net/dsa/mv88e6xxx/serdes.h
drivers/net/dsa/ocelot/felix.c
drivers/net/dsa/qca8k.c
drivers/net/dsa/rtl8365mb.c
drivers/net/ethernet/altera/altera_tse_main.c
drivers/net/ethernet/aquantia/atlantic/aq_common.h
drivers/net/ethernet/aquantia/atlantic/aq_hw.h
drivers/net/ethernet/aquantia/atlantic/aq_nic.c
drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c
drivers/net/ethernet/aquantia/atlantic/aq_ring.c
drivers/net/ethernet/aquantia/atlantic/aq_vec.c
drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c
drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils_fw2x.c
drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2.c
drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2.h
drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2_utils.h
drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2_utils_fw.c
drivers/net/ethernet/asix/ax88796c_main.h
drivers/net/ethernet/asix/ax88796c_spi.c
drivers/net/ethernet/broadcom/bcm4908_enet.c
drivers/net/ethernet/broadcom/bnx2x/bnx2x_init_ops.h
drivers/net/ethernet/broadcom/bnxt/bnxt.h
drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c
drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c
drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
drivers/net/ethernet/dec/tulip/de4x5.c
drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
drivers/net/ethernet/freescale/fec.h
drivers/net/ethernet/freescale/fec_main.c
drivers/net/ethernet/google/gve/gve_utils.c
drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c
drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c
drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
drivers/net/ethernet/huawei/hinic/hinic_sriov.c
drivers/net/ethernet/ibm/ibmvnic.c
drivers/net/ethernet/intel/e100.c
drivers/net/ethernet/intel/i40e/i40e.h
drivers/net/ethernet/intel/i40e/i40e_debugfs.c
drivers/net/ethernet/intel/i40e/i40e_main.c
drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h
drivers/net/ethernet/intel/iavf/iavf.h
drivers/net/ethernet/intel/iavf/iavf_ethtool.c
drivers/net/ethernet/intel/iavf/iavf_main.c
drivers/net/ethernet/intel/iavf/iavf_virtchnl.c
drivers/net/ethernet/intel/ice/ice_dcb_nl.c
drivers/net/ethernet/intel/ice/ice_ethtool_fdir.c
drivers/net/ethernet/intel/ice/ice_fdir.c
drivers/net/ethernet/intel/ice/ice_flex_pipe.c
drivers/net/ethernet/intel/ice/ice_flex_pipe.h
drivers/net/ethernet/intel/ice/ice_lib.c
drivers/net/ethernet/intel/ice/ice_main.c
drivers/net/ethernet/intel/ice/ice_switch.c
drivers/net/ethernet/intel/ice/ice_tc_lib.c
drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c
drivers/net/ethernet/intel/ice/ice_xsk.c
drivers/net/ethernet/intel/igb/igb_main.c
drivers/net/ethernet/lantiq_etop.c
drivers/net/ethernet/marvell/mvmdio.c
drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
drivers/net/ethernet/marvell/octeontx2/af/rvu.c
drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c
drivers/net/ethernet/marvell/octeontx2/nic/otx2_ptp.c
drivers/net/ethernet/marvell/prestera/prestera_switchdev.c
drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
drivers/net/ethernet/mellanox/mlx4/en_netdev.c
drivers/net/ethernet/mellanox/mlx5/core/cmd.c
drivers/net/ethernet/mellanox/mlx5/core/cq.c
drivers/net/ethernet/mellanox/mlx5/core/debugfs.c
drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.c
drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.h
drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c
drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.h
drivers/net/ethernet/mellanox/mlx5/core/en/tc_priv.h
drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c
drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c
drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c
drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c
drivers/net/ethernet/mellanox/mlx5/core/health.c
drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c
drivers/net/ethernet/mellanox/mlx5/core/lag/port_sel.c
drivers/net/ethernet/mellanox/mlx5/core/lib/tout.c
drivers/net/ethernet/mellanox/mlx5/core/lib/tout.h
drivers/net/ethernet/mellanox/mlx5/core/main.c
drivers/net/ethernet/mellanox/mlx5/core/steering/dr_domain.c
drivers/net/ethernet/mellanox/mlx5/core/steering/dr_matcher.c
drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h
drivers/net/ethernet/mellanox/mlxsw/spectrum.c
drivers/net/ethernet/microchip/lan743x_main.c
drivers/net/ethernet/microsoft/mana/hw_channel.c
drivers/net/ethernet/mscc/ocelot.c
drivers/net/ethernet/mscc/ocelot_vcap.c
drivers/net/ethernet/natsemi/xtsonic.c
drivers/net/ethernet/netronome/nfp/nfp_net.h
drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cppcore.c
drivers/net/ethernet/ni/nixge.c
drivers/net/ethernet/qlogic/qed/qed_int.c
drivers/net/ethernet/qlogic/qede/qede_fp.c
drivers/net/ethernet/qlogic/qla3xxx.c
drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
drivers/net/ethernet/realtek/r8169_main.c
drivers/net/ethernet/sis/sis900.c
drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c
drivers/net/ethernet/stmicro/stmmac/stmmac.h
drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
drivers/net/hamradio/6pack.c
drivers/net/hamradio/mkiss.c
drivers/net/ipa/ipa_cmd.c
drivers/net/ipa/ipa_cmd.h
drivers/net/ipa/ipa_endpoint.c
drivers/net/ipa/ipa_main.c
drivers/net/ipa/ipa_modem.c
drivers/net/ipa/ipa_resource.c
drivers/net/ipa/ipa_smp2p.c
drivers/net/ipa/ipa_smp2p.h
drivers/net/mdio/mdio-aspeed.c
drivers/net/phy/phylink.c
drivers/net/slip/slip.h
drivers/net/tun.c
drivers/net/usb/cdc_ncm.c
drivers/net/usb/lan78xx.c
drivers/net/usb/r8152.c
drivers/net/usb/smsc95xx.c
drivers/net/virtio_net.c
drivers/net/vmxnet3/vmxnet3_drv.c
drivers/net/vrf.c
drivers/net/wireguard/allowedips.c
drivers/net/wireguard/device.c
drivers/net/wireguard/device.h
drivers/net/wireguard/main.c
drivers/net/wireguard/queueing.c
drivers/net/wireguard/queueing.h
drivers/net/wireguard/ratelimiter.c
drivers/net/wireguard/receive.c
drivers/net/wireguard/socket.c
drivers/net/wireless/ath/ath11k/mhi.c
drivers/net/wireless/intel/iwlwifi/fw/uefi.c
drivers/net/wireless/intel/iwlwifi/iwl-drv.c
drivers/net/wireless/intel/iwlwifi/iwl-drv.h
drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
drivers/net/wireless/intel/iwlwifi/mvm/ops.c
drivers/net/wireless/intel/iwlwifi/pcie/drv.c
drivers/net/wireless/mediatek/mt76/mt7615/pci_mac.c
drivers/net/wireless/mediatek/mt76/mt7615/usb_sdio.c
drivers/net/wireless/mediatek/mt76/mt76x02_usb_core.c
drivers/net/wireless/mediatek/mt76/mt7915/mac.c
drivers/net/wireless/mediatek/mt76/mt7915/mcu.c
drivers/net/wireless/mediatek/mt76/mt7921/sdio_mac.c
drivers/net/wireless/mediatek/mt76/tx.c
drivers/net/wireless/ralink/rt2x00/rt2x00usb.c
drivers/net/wireless/realtek/rtw89/fw.c
drivers/net/wireless/realtek/rtw89/fw.h
drivers/net/wwan/iosm/iosm_ipc_imem.c
drivers/net/wwan/iosm/iosm_ipc_imem.h
drivers/net/wwan/iosm/iosm_ipc_imem_ops.c
drivers/nfc/virtual_ncidev.c
drivers/nvme/host/core.c
drivers/nvme/host/fabrics.c
drivers/nvme/host/multipath.c
drivers/nvme/host/nvme.h
drivers/nvme/host/tcp.c
drivers/nvme/host/zns.c
drivers/nvme/target/io-cmd-file.c
drivers/nvme/target/tcp.c
drivers/of/irq.c
drivers/pci/controller/dwc/pci-exynos.c
drivers/pci/controller/dwc/pcie-qcom-ep.c
drivers/pci/controller/pci-aardvark.c
drivers/pci/controller/pcie-apple.c
drivers/phy/hisilicon/phy-hi3670-pcie.c
drivers/phy/marvell/phy-mvebu-cp110-utmi.c
drivers/phy/qualcomm/phy-qcom-ipq806x-usb.c
drivers/phy/qualcomm/phy-qcom-qmp.c
drivers/phy/qualcomm/phy-qcom-usb-hsic.c
drivers/phy/st/phy-stm32-usbphyc.c
drivers/phy/ti/phy-am654-serdes.c
drivers/phy/ti/phy-j721e-wiz.c
drivers/phy/ti/phy-omap-usb2.c
drivers/phy/ti/phy-tusb1210.c
drivers/pinctrl/pinctrl-amd.c
drivers/pinctrl/pinctrl-apple-gpio.c
drivers/pinctrl/qcom/Kconfig
drivers/pinctrl/qcom/pinctrl-sdm845.c
drivers/pinctrl/qcom/pinctrl-sm8350.c
drivers/pinctrl/ralink/pinctrl-mt7620.c
drivers/pinctrl/tegra/pinctrl-tegra.c
drivers/pinctrl/tegra/pinctrl-tegra194.c
drivers/platform/chrome/cros_ec_ishtp.c
drivers/platform/mellanox/mlxreg-lc.c
drivers/platform/x86/Kconfig
drivers/platform/x86/amd-pmc.c
drivers/platform/x86/dell/Kconfig
drivers/platform/x86/hp_accel.c
drivers/platform/x86/intel/hid.c
drivers/platform/x86/intel/ishtp_eclite.c
drivers/platform/x86/lg-laptop.c
drivers/platform/x86/samsung-laptop.c
drivers/platform/x86/think-lmi.c
drivers/platform/x86/think-lmi.h
drivers/platform/x86/thinkpad_acpi.c
drivers/platform/x86/touchscreen_dmi.c
drivers/powercap/dtpm.c
drivers/powercap/dtpm_cpu.c
drivers/ptp/ptp_clockmatrix.c
drivers/ptp/ptp_ocp.c
drivers/s390/block/dasd_devmap.c
drivers/s390/char/raw3270.c
drivers/s390/cio/chp.c
drivers/scsi/lpfc/lpfc_els.c
drivers/scsi/mpt3sas/mpt3sas_base.c
drivers/scsi/mpt3sas/mpt3sas_base.h
drivers/scsi/mpt3sas/mpt3sas_scsih.c
drivers/scsi/pm8001/pm8001_init.c
drivers/scsi/qedi/qedi_fw.c
drivers/scsi/qedi/qedi_iscsi.c
drivers/scsi/qedi/qedi_iscsi.h
drivers/scsi/qla2xxx/qla_dbg.c
drivers/scsi/qla2xxx/qla_edif.c
drivers/scsi/qla2xxx/qla_mbx.c
drivers/scsi/scsi_debug.c
drivers/scsi/scsi_sysfs.c
drivers/scsi/scsi_transport_iscsi.c
drivers/scsi/ufs/ufs-mediatek.c
drivers/scsi/ufs/ufshcd-pci.c
drivers/scsi/ufs/ufshcd.c
drivers/scsi/ufs/ufshpb.c
drivers/scsi/virtio_scsi.c
drivers/spi/spi-cadence-quadspi.c
drivers/spi/spi-fsl-lpspi.c
drivers/spi/spi-geni-qcom.c
drivers/spi/spi.c
drivers/staging/Kconfig
drivers/staging/Makefile
drivers/staging/fbtft/fb_ssd1351.c
drivers/staging/fbtft/fbtft-core.c
drivers/staging/greybus/audio_helper.c
drivers/staging/netlogic/Kconfig [deleted file]
drivers/staging/netlogic/Makefile [deleted file]
drivers/staging/netlogic/TODO [deleted file]
drivers/staging/netlogic/platform_net.c [deleted file]
drivers/staging/netlogic/platform_net.h [deleted file]
drivers/staging/netlogic/xlr_net.c [deleted file]
drivers/staging/netlogic/xlr_net.h [deleted file]
drivers/staging/r8188eu/core/rtw_mlme_ext.c
drivers/staging/r8188eu/os_dep/ioctl_linux.c
drivers/staging/r8188eu/os_dep/mlme_linux.c
drivers/staging/rtl8192e/rtl8192e/rtl_core.c
drivers/target/target_core_fabric_configfs.c
drivers/target/target_core_spc.c
drivers/tee/optee/ffa_abi.c
drivers/thermal/intel/int340x_thermal/Kconfig
drivers/thermal/intel/int340x_thermal/processor_thermal_rfim.c
drivers/thermal/thermal_core.c
drivers/tty/hvc/hvc_xen.c
drivers/tty/serial/8250/8250_bcm7271.c
drivers/tty/serial/8250/8250_pci.c
drivers/tty/serial/8250/8250_port.c
drivers/tty/serial/Kconfig
drivers/tty/serial/amba-pl011.c
drivers/tty/serial/fsl_lpuart.c
drivers/tty/serial/liteuart.c
drivers/tty/serial/msm_serial.c
drivers/tty/serial/serial-tegra.c
drivers/tty/serial/serial_core.c
drivers/usb/cdns3/cdns3-gadget.c
drivers/usb/cdns3/cdnsp-mem.c
drivers/usb/cdns3/host.c
drivers/usb/chipidea/ci_hdrc_imx.c
drivers/usb/core/config.c
drivers/usb/core/hub.c
drivers/usb/core/quirks.c
drivers/usb/dwc2/gadget.c
drivers/usb/dwc2/hcd_queue.c
drivers/usb/dwc3/core.c
drivers/usb/dwc3/core.h
drivers/usb/dwc3/dwc3-qcom.c
drivers/usb/dwc3/gadget.c
drivers/usb/gadget/composite.c
drivers/usb/gadget/legacy/dbgp.c
drivers/usb/gadget/legacy/inode.c
drivers/usb/gadget/udc/udc-xilinx.c
drivers/usb/host/xhci-hub.c
drivers/usb/host/xhci-ring.c
drivers/usb/host/xhci-tegra.c
drivers/usb/host/xhci.c
drivers/usb/serial/option.c
drivers/usb/serial/pl2303.c
drivers/usb/typec/tcpm/fusb302.c
drivers/usb/typec/tcpm/tcpm.c
drivers/usb/typec/tipd/core.c
drivers/vdpa/vdpa_sim/vdpa_sim.c
drivers/vfio/pci/vfio_pci_igd.c
drivers/vfio/vfio.c
drivers/vhost/vdpa.c
drivers/vhost/vsock.c
drivers/video/console/sticon.c
drivers/video/console/vgacon.c
drivers/video/fbdev/efifb.c
drivers/video/fbdev/simplefb.c
drivers/video/fbdev/xen-fbfront.c
drivers/virtio/virtio_ring.c
drivers/xen/Kconfig
drivers/xen/pvcalls-front.c
drivers/xen/xenbus/xenbus_probe.c
drivers/xen/xenbus/xenbus_probe_frontend.c
fs/aio.c
fs/attr.c
fs/btrfs/async-thread.c
fs/btrfs/delalloc-space.c
fs/btrfs/disk-io.c
fs/btrfs/extent-tree.c
fs/btrfs/extent_io.c
fs/btrfs/ioctl.c
fs/btrfs/lzo.c
fs/btrfs/root-tree.c
fs/btrfs/scrub.c
fs/btrfs/tree-log.c
fs/btrfs/volumes.c
fs/btrfs/zoned.c
fs/cifs/cifs_swn.c
fs/cifs/cifsfs.h
fs/cifs/cifsproto.h
fs/cifs/connect.c
fs/cifs/dfs_cache.c
fs/cifs/fscache.c
fs/cifs/inode.c
fs/cifs/sess.c
fs/cifs/smb2pdu.c
fs/erofs/utils.c
fs/file.c
fs/fuse/dev.c
fs/gfs2/bmap.c
fs/gfs2/file.c
fs/gfs2/glock.c
fs/gfs2/inode.c
fs/gfs2/super.c
fs/inode.c
fs/io-wq.c
fs/io_uring.c
fs/iomap/buffered-io.c
fs/ksmbd/smb2pdu.c
fs/netfs/read_helper.c
fs/nfs/inode.c
fs/nfs/nfs42proc.c
fs/nfs/nfs42xdr.c
fs/nfs/nfs4state.c
fs/nfs/nfstrace.h
fs/nfsd/nfs4recover.c
fs/nfsd/nfs4state.c
fs/nfsd/nfs4xdr.c
fs/nfsd/nfsctl.c
fs/ntfs/Kconfig
fs/proc/vmcore.c
fs/pstore/Kconfig
fs/pstore/blk.c
fs/signalfd.c
fs/smbfs_common/cifs_arc4.c
fs/tracefs/inode.c
fs/udf/dir.c
fs/udf/namei.c
fs/udf/super.c
fs/xfs/libxfs/xfs_attr.c
fs/xfs/xfs_icache.c
fs/xfs/xfs_inode.c
fs/xfs/xfs_inode.h
fs/xfs/xfs_super.c
include/asm-generic/cacheflush.h
include/linux/acpi.h
include/linux/bpf.h
include/linux/btf.h
include/linux/cacheflush.h [new file with mode: 0644]
include/linux/cacheinfo.h
include/linux/delay.h
include/linux/device/driver.h
include/linux/filter.h
include/linux/fs.h
include/linux/hid.h
include/linux/highmem.h
include/linux/hugetlb_cgroup.h
include/linux/intel-ish-client-if.h
include/linux/ipc_namespace.h
include/linux/kprobes.h
include/linux/kvm_host.h
include/linux/kvm_types.h
include/linux/mhi.h
include/linux/mlx5/eswitch.h
include/linux/mlx5/mlx5_ifc.h
include/linux/mm_types.h
include/linux/mod_devicetable.h
include/linux/netdevice.h
include/linux/page-flags.h
include/linux/pagemap.h
include/linux/percpu-refcount.h
include/linux/percpu.h
include/linux/phy.h
include/linux/pm_runtime.h
include/linux/printk.h
include/linux/ptp_classify.h
include/linux/regulator/driver.h
include/linux/sched/cputime.h
include/linux/sched/signal.h
include/linux/sched/task.h
include/linux/sdb.h [deleted file]
include/linux/siphash.h
include/linux/skbuff.h
include/linux/trace_events.h
include/linux/virtio.h
include/linux/virtio_net.h
include/linux/wait.h
include/net/bond_alb.h
include/net/busy_poll.h
include/net/dst_cache.h
include/net/fib_rules.h
include/net/ip6_fib.h
include/net/ip_fib.h
include/net/ipv6_stubs.h
include/net/netfilter/nf_conntrack.h
include/net/netns/ipv4.h
include/net/nfc/nci_core.h
include/net/nl802154.h
include/net/page_pool.h
include/net/sock.h
include/rdma/rdma_netlink.h
include/soc/mscc/ocelot_vcap.h
include/sound/soc-acpi.h
include/trace/events/rpcgss.h
include/uapi/asm-generic/poll.h
include/uapi/drm/virtgpu_drm.h
include/uapi/linux/if_ether.h
include/uapi/linux/resource.h
include/xen/xenbus.h
init/Kconfig
ipc/shm.c
ipc/util.c
kernel/bpf/btf.c
kernel/bpf/cgroup.c
kernel/bpf/helpers.c
kernel/bpf/syscall.c
kernel/bpf/verifier.c
kernel/cpu.c
kernel/entry/syscall_user_dispatch.c
kernel/events/core.c
kernel/kprobes.c
kernel/locking/rwsem.c
kernel/power/hibernate.c
kernel/power/user.c
kernel/printk/printk.c
kernel/sched/core.c
kernel/sched/cputime.c
kernel/sched/wait.c
kernel/signal.c
kernel/softirq.c
kernel/time/tick-sched.c
kernel/time/timer.c
kernel/trace/bpf_trace.c
kernel/trace/ftrace.c
kernel/trace/trace.c
kernel/trace/trace.h
kernel/trace/trace_events.c
kernel/trace/trace_events_hist.c
kernel/trace/trace_events_synth.c
kernel/trace/trace_uprobe.c
kernel/trace/tracing_map.c
lib/Kconfig.debug
lib/nmi_backtrace.c
lib/siphash.c
lib/test_kasan.c
lib/zstd/Makefile
lib/zstd/common/compiler.h
lib/zstd/compress/zstd_compress_superblock.c
lib/zstd/compress/zstd_opt.c
mm/Kconfig
mm/backing-dev.c
mm/damon/core.c
mm/damon/dbgfs.c
mm/damon/vaddr-test.h
mm/damon/vaddr.c
mm/filemap.c
mm/highmem.c
mm/hugetlb.c
mm/memcontrol.c
mm/memory_hotplug.c
mm/shmem.c
mm/slab.c
mm/slab.h
mm/slob.c
mm/slub.c
mm/swap.c
mm/swap_slots.c
mm/util.c
net/8021q/vlan.c
net/8021q/vlan_dev.c
net/core/dev.c
net/core/devlink.c
net/core/dst_cache.c
net/core/fib_rules.c
net/core/filter.c
net/core/neighbour.c
net/core/page_pool.c
net/core/skmsg.c
net/core/sock.c
net/core/sock_map.c
net/ethtool/ioctl.c
net/ethtool/netlink.c
net/ipv4/bpf_tcp_ca.c
net/ipv4/devinet.c
net/ipv4/fib_frontend.c
net/ipv4/fib_rules.c
net/ipv4/fib_semantics.c
net/ipv4/inet_connection_sock.c
net/ipv4/nexthop.c
net/ipv4/tcp.c
net/ipv4/tcp_cubic.c
net/ipv4/tcp_minisocks.c
net/ipv4/udp.c
net/ipv6/af_inet6.c
net/ipv6/esp6.c
net/ipv6/fib6_rules.c
net/ipv6/ip6_offload.c
net/ipv6/ip6_output.c
net/ipv6/route.c
net/ipv6/seg6_iptunnel.c
net/mac80211/cfg.c
net/mac80211/iface.c
net/mac80211/led.h
net/mac80211/rx.c
net/mac80211/tx.c
net/mac80211/util.c
net/mac80211/wme.c
net/mctp/route.c
net/mctp/test/utils.c
net/mpls/af_mpls.c
net/mpls/internal.h
net/mptcp/options.c
net/mptcp/protocol.c
net/mptcp/protocol.h
net/ncsi/ncsi-cmd.c
net/netfilter/ipvs/ip_vs_core.c
net/netfilter/nf_conntrack_core.c
net/netfilter/nf_conntrack_netlink.c
net/netfilter/nf_flow_table_core.c
net/netfilter/nf_flow_table_offload.c
net/netfilter/nfnetlink_queue.c
net/netfilter/nft_exthdr.c
net/netfilter/nft_payload.c
net/netfilter/nft_set_pipapo_avx2.c
net/netfilter/xt_IDLETIMER.c
net/netlink/af_netlink.c
net/nfc/core.c
net/nfc/nci/core.c
net/nfc/netlink.c
net/rds/tcp.c
net/rxrpc/conn_client.c
net/rxrpc/peer_object.c
net/sched/act_mirred.c
net/sched/sch_ets.c
net/sched/sch_fq_pie.c
net/smc/af_smc.c
net/smc/smc_close.c
net/smc/smc_core.c
net/sunrpc/xprtsock.c
net/tipc/crypto.c
net/tipc/link.c
net/tls/tls_main.c
net/tls/tls_sw.c
net/unix/af_unix.c
net/wireless/nl80211.c
net/wireless/nl80211.h
net/wireless/util.c
net/xdp/xsk_buff_pool.c
samples/Kconfig
samples/Makefile
samples/bpf/hbm_kern.h
samples/bpf/xdp_redirect_cpu_user.c
samples/bpf/xdp_sample_user.c
samples/ftrace/Makefile
samples/ftrace/ftrace-direct-multi-modify.c [new file with mode: 0644]
samples/ftrace/ftrace-direct-multi.c
scripts/mod/devicetable-offsets.c
scripts/mod/file2alias.c
security/selinux/ss/hashtab.c
sound/core/control_compat.c
sound/core/oss/pcm_oss.c
sound/hda/intel-dsp-config.c
sound/pci/cmipci.c
sound/pci/ctxfi/ctamixer.c
sound/pci/ctxfi/ctdaio.c
sound/pci/ctxfi/ctresource.c
sound/pci/ctxfi/ctresource.h
sound/pci/ctxfi/ctsrc.c
sound/pci/hda/hda_intel.c
sound/pci/hda/hda_local.h
sound/pci/hda/patch_cs8409.c
sound/pci/hda/patch_hdmi.c
sound/pci/hda/patch_realtek.c
sound/soc/amd/yc/pci-acp6x.c
sound/soc/codecs/cs35l41-spi.c
sound/soc/codecs/cs35l41.c
sound/soc/codecs/cs35l41.h
sound/soc/codecs/lpass-rx-macro.c
sound/soc/codecs/rk817_codec.c
sound/soc/codecs/rt1011.c
sound/soc/codecs/rt1011.h
sound/soc/codecs/rt5682-i2c.c
sound/soc/codecs/rt5682.c
sound/soc/codecs/rt5682.h
sound/soc/codecs/rt5682s.c
sound/soc/codecs/rt9120.c
sound/soc/codecs/wcd934x.c
sound/soc/codecs/wcd938x.c
sound/soc/codecs/wm_adsp.c
sound/soc/codecs/wsa881x.c
sound/soc/intel/boards/sof_sdw.c
sound/soc/intel/common/soc-acpi-intel-adl-match.c
sound/soc/intel/common/soc-acpi-intel-cml-match.c
sound/soc/mediatek/mt8173/mt8173-afe-pcm.c
sound/soc/mediatek/mt8173/mt8173-rt5650.c
sound/soc/qcom/qdsp6/audioreach.h
sound/soc/qcom/qdsp6/q6adm.c
sound/soc/qcom/qdsp6/q6asm-dai.c
sound/soc/qcom/qdsp6/q6prm.c
sound/soc/qcom/qdsp6/q6routing.c
sound/soc/rockchip/rockchip_i2s_tdm.c
sound/soc/sh/rcar/dma.c
sound/soc/soc-acpi.c
sound/soc/soc-dapm.c
sound/soc/soc-topology.c
sound/soc/sof/Kconfig
sound/soc/sof/control.c
sound/soc/sof/intel/hda-bus.c
sound/soc/sof/intel/hda-codec.c
sound/soc/sof/intel/hda-dsp.c
sound/soc/sof/intel/hda.c
sound/soc/stm/stm32_i2s.c
sound/soc/tegra/tegra186_dspk.c
sound/soc/tegra/tegra210_admaif.c
sound/soc/tegra/tegra210_adx.c
sound/soc/tegra/tegra210_ahub.c
sound/soc/tegra/tegra210_amx.c
sound/soc/tegra/tegra210_dmic.c
sound/soc/tegra/tegra210_i2s.c
sound/soc/tegra/tegra210_mixer.c
sound/soc/tegra/tegra210_mvc.c
sound/soc/tegra/tegra210_sfc.c
sound/usb/mixer_quirks.c
sound/usb/pcm.c
sound/xen/xen_snd_front.c
tools/arch/x86/include/asm/cpufeatures.h
tools/arch/x86/include/uapi/asm/kvm.h
tools/bpf/resolve_btfids/main.c
tools/bpf/runqslower/Makefile
tools/build/Makefile.feature
tools/build/feature/Makefile
tools/build/feature/test-all.c
tools/build/feature/test-libpython-version.c [deleted file]
tools/include/linux/debug_locks.h [deleted file]
tools/include/linux/hardirq.h [deleted file]
tools/include/linux/irqflags.h [deleted file]
tools/include/linux/kernel.h
tools/include/linux/lockdep.h [deleted file]
tools/include/linux/math.h [new file with mode: 0644]
tools/include/linux/proc_fs.h [deleted file]
tools/include/linux/spinlock.h
tools/include/linux/stacktrace.h [deleted file]
tools/include/uapi/linux/if_link.h
tools/include/uapi/linux/kvm.h
tools/lib/bpf/bpf_gen_internal.h
tools/lib/bpf/gen_loader.c
tools/lib/bpf/libbpf.c
tools/objtool/elf.c
tools/objtool/objtool.c
tools/perf/Makefile.config
tools/perf/arch/mips/entry/syscalls/syscall_n64.tbl
tools/perf/arch/powerpc/entry/syscalls/syscall.tbl
tools/perf/arch/s390/entry/syscalls/syscall.tbl
tools/perf/builtin-inject.c
tools/perf/builtin-report.c
tools/perf/tests/event_update.c
tools/perf/tests/expr.c
tools/perf/tests/parse-metric.c
tools/perf/tests/sample-parsing.c
tools/perf/tests/wp.c
tools/perf/ui/hist.c
tools/perf/util/arm-spe.c
tools/perf/util/bpf_skel/bperf.h [deleted file]
tools/perf/util/bpf_skel/bperf_follower.bpf.c
tools/perf/util/bpf_skel/bperf_leader.bpf.c
tools/perf/util/bpf_skel/bpf_prog_profiler.bpf.c
tools/perf/util/event.h
tools/perf/util/evsel.c
tools/perf/util/header.c
tools/perf/util/hist.c
tools/perf/util/hist.h
tools/perf/util/intel-pt-decoder/intel-pt-decoder.c
tools/perf/util/intel-pt.c
tools/perf/util/parse-events.c
tools/perf/util/perf_regs.c
tools/perf/util/python.c
tools/perf/util/smt.c
tools/perf/util/sort.c
tools/perf/util/sort.h
tools/perf/util/util.c
tools/perf/util/util.h
tools/power/acpi/Makefile.config
tools/power/acpi/Makefile.rules
tools/testing/radix-tree/linux/lockdep.h
tools/testing/selftests/bpf/Makefile
tools/testing/selftests/bpf/prog_tests/helper_restricted.c [new file with mode: 0644]
tools/testing/selftests/bpf/progs/test_helper_restricted.c [new file with mode: 0644]
tools/testing/selftests/bpf/test_verifier.c
tools/testing/selftests/bpf/verifier/helper_restricted.c [new file with mode: 0644]
tools/testing/selftests/bpf/verifier/map_in_map.c
tools/testing/selftests/bpf/verifier/xdp_direct_packet_access.c
tools/testing/selftests/damon/.gitignore [new file with mode: 0644]
tools/testing/selftests/damon/Makefile
tools/testing/selftests/damon/_debugfs_common.sh [new file with mode: 0644]
tools/testing/selftests/damon/debugfs_attrs.sh
tools/testing/selftests/damon/debugfs_empty_targets.sh [new file with mode: 0644]
tools/testing/selftests/damon/debugfs_huge_count_read_write.sh [new file with mode: 0644]
tools/testing/selftests/damon/debugfs_schemes.sh [new file with mode: 0644]
tools/testing/selftests/damon/debugfs_target_ids.sh [new file with mode: 0644]
tools/testing/selftests/damon/huge_count_read_write.c [new file with mode: 0644]
tools/testing/selftests/gpio/Makefile
tools/testing/selftests/gpio/gpio-mockup-cdev.c
tools/testing/selftests/kvm/.gitignore
tools/testing/selftests/kvm/Makefile
tools/testing/selftests/kvm/access_tracking_perf_test.c
tools/testing/selftests/kvm/demand_paging_test.c
tools/testing/selftests/kvm/dirty_log_perf_test.c
tools/testing/selftests/kvm/dirty_log_test.c
tools/testing/selftests/kvm/include/kvm_util.h
tools/testing/selftests/kvm/include/perf_test_util.h
tools/testing/selftests/kvm/include/test_util.h
tools/testing/selftests/kvm/kvm_create_max_vcpus.c
tools/testing/selftests/kvm/kvm_page_table_test.c
tools/testing/selftests/kvm/lib/elf.c
tools/testing/selftests/kvm/lib/kvm_util.c
tools/testing/selftests/kvm/lib/perf_test_util.c
tools/testing/selftests/kvm/lib/test_util.c
tools/testing/selftests/kvm/lib/x86_64/processor.c
tools/testing/selftests/kvm/memslot_modification_stress_test.c
tools/testing/selftests/kvm/x86_64/hyperv_features.c
tools/testing/selftests/kvm/x86_64/sev_migrate_tests.c
tools/testing/selftests/kvm/x86_64/svm_int_ctl_test.c
tools/testing/selftests/kvm/x86_64/userspace_io_test.c [new file with mode: 0644]
tools/testing/selftests/kvm/x86_64/xen_shinfo_test.c
tools/testing/selftests/net/Makefile
tools/testing/selftests/net/fcnal-test.sh
tools/testing/selftests/net/fib_nexthops.sh
tools/testing/selftests/net/fib_tests.sh
tools/testing/selftests/net/forwarding/config
tools/testing/selftests/net/forwarding/tc_actions.sh
tools/testing/selftests/net/gre_gso.sh
tools/testing/selftests/net/tls.c
tools/testing/selftests/netfilter/Makefile
tools/testing/selftests/netfilter/conntrack_vrf.sh [new file with mode: 0755]
tools/testing/selftests/netfilter/nft_concat_range.sh
tools/testing/selftests/netfilter/nft_nat.sh
tools/testing/selftests/netfilter/nft_queue.sh
tools/testing/selftests/netfilter/nft_zones_many.sh
tools/testing/selftests/tc-testing/config
tools/testing/selftests/tc-testing/tc-tests/actions/bpf.json
tools/testing/selftests/tc-testing/tc-tests/qdiscs/mq.json
tools/testing/selftests/tc-testing/tdc.py
tools/testing/selftests/tc-testing/tdc.sh
tools/testing/selftests/wireguard/netns.sh
tools/testing/selftests/wireguard/qemu/debug.config
tools/testing/selftests/wireguard/qemu/kernel.config
virt/kvm/kvm_main.c

index 14314e3c5d5e230374b34cd800dc63487ee906f7..b344067e0acb665b66bb7a299cba69fdf15a82fd 100644 (file)
--- a/.mailmap
+++ b/.mailmap
@@ -71,6 +71,9 @@ Chao Yu <chao@kernel.org> <chao2.yu@samsung.com>
 Chao Yu <chao@kernel.org> <yuchao0@huawei.com>
 Chris Chiu <chris.chiu@canonical.com> <chiu@endlessm.com>
 Chris Chiu <chris.chiu@canonical.com> <chiu@endlessos.org>
+Christian Borntraeger <borntraeger@linux.ibm.com> <borntraeger@de.ibm.com>
+Christian Borntraeger <borntraeger@linux.ibm.com> <cborntra@de.ibm.com>
+Christian Borntraeger <borntraeger@linux.ibm.com> <borntrae@de.ibm.com>
 Christophe Ricard <christophe.ricard@gmail.com>
 Christoph Hellwig <hch@lst.de>
 Colin Ian King <colin.king@intel.com> <colin.king@canonical.com>
@@ -123,6 +126,8 @@ Greg Kroah-Hartman <gregkh@suse.de>
 Greg Kroah-Hartman <greg@kroah.com>
 Greg Kurz <groug@kaod.org> <gkurz@linux.vnet.ibm.com>
 Gregory CLEMENT <gregory.clement@bootlin.com> <gregory.clement@free-electrons.com>
+Guo Ren <guoren@kernel.org> <guoren@linux.alibaba.com>
+Guo Ren <guoren@kernel.org> <ren_guo@c-sky.com>
 Gustavo Padovan <gustavo@las.ic.unicamp.br>
 Gustavo Padovan <padovan@profusion.mobi>
 Hanjun Guo <guohanjun@huawei.com> <hanjun.guo@linaro.org>
index bd9a4901fe46d7856d1a1cee33ccd78738cad29e..9f73253ea35324231f2e90f1eab75e438ba63654 100644 (file)
@@ -25,6 +25,6 @@ Sub graphs of DRBD's state transitions
     :alt:   disk-states-8.dot
     :align: center
 
-.. kernel-figure:: node-states-8.dot
-    :alt:   node-states-8.dot
+.. kernel-figure:: peer-states-8.dot
+    :alt:   peer-states-8.dot
     :align: center
similarity index 71%
rename from Documentation/admin-guide/blockdev/drbd/node-states-8.dot
rename to Documentation/admin-guide/blockdev/drbd/peer-states-8.dot
index bfa54e1f8016a183df30fe2523f77848eaf4d2eb..6dc3954954d6afef4e2b64079e99f918f3eb71f8 100644 (file)
@@ -1,8 +1,3 @@
-digraph node_states {
-       Secondary -> Primary           [ label = "ioctl_set_state()" ]
-       Primary   -> Secondary         [ label = "ioctl_set_state()" ]
-}
-
 digraph peer_states {
        Secondary -> Primary           [ label = "recv state packet" ]
        Primary   -> Secondary         [ label = "recv state packet" ]
index 6721a80a2d4fbfd48328a4fe2ca18d892a311808..475eb0e81e4ae765ebcf3f0325bdbc68e66a7131 100644 (file)
@@ -1520,15 +1520,15 @@ This sysfs attribute controls the keyboard "face" that will be shown on the
 Lenovo X1 Carbon 2nd gen (2014)'s adaptive keyboard. The value can be read
 and set.
 
-- 1 = Home mode
-- 2 = Web-browser mode
-- 3 = Web-conference mode
-- 4 = Function mode
-- 5 = Layflat mode
+- 0 = Home mode
+- 1 = Web-browser mode
+- 2 = Web-conference mode
+- 3 = Function mode
+- 4 = Layflat mode
 
 For more details about which buttons will appear depending on the mode, please
 review the laptop's user guide:
-http://www.lenovo.com/shop/americas/content/user_guides/x1carbon_2_ug_en.pdf
+https://download.lenovo.com/ibmdl/pub/pc/pccbbs/mobiles_pdf/x1carbon_2_ug_en.pdf
 
 Battery charge control
 ----------------------
index 426162009ce9984445519c46f53bcb366456f607..0e486f41185ef35a6389283e1cdc7c4ed90cf30a 100644 (file)
@@ -1099,7 +1099,7 @@ task_delayacct
 ===============
 
 Enables/disables task delay accounting (see
-:doc:`accounting/delay-accounting.rst`). Enabling this feature incurs
+Documentation/accounting/delay-accounting.rst. Enabling this feature incurs
 a small amount of overhead in the scheduler but is useful for debugging
 and performance tuning. It is required by some tools such as iotop.
 
index 8323c79d321bbc1edddcc0cb710d94d617cb139b..9485a5a2e2e970aec59d0ec81e07090322b3fcec 100644 (file)
@@ -104,6 +104,8 @@ Discovery family
 
                 Not supported by the Linux kernel.
 
+  Homepage:
+        https://web.archive.org/web/20110924171043/http://www.marvell.com/embedded-processors/discovery-innovation/
   Core:
        Feroceon 88fr571-vd ARMv5 compatible
 
@@ -120,6 +122,7 @@ EBU Armada family
         - 88F6707
         - 88F6W11
 
+    - Product infos:   https://web.archive.org/web/20141002083258/http://www.marvell.com/embedded-processors/armada-370/
     - Product Brief:   https://web.archive.org/web/20121115063038/http://www.marvell.com/embedded-processors/armada-300/assets/Marvell_ARMADA_370_SoC.pdf
     - Hardware Spec:   https://web.archive.org/web/20140617183747/http://www.marvell.com/embedded-processors/armada-300/assets/ARMADA370-datasheet.pdf
     - Functional Spec: https://web.archive.org/web/20140617183701/http://www.marvell.com/embedded-processors/armada-300/assets/ARMADA370-FunctionalSpec-datasheet.pdf
@@ -127,9 +130,29 @@ EBU Armada family
   Core:
        Sheeva ARMv7 compatible PJ4B
 
+  Armada XP Flavors:
+        - MV78230
+        - MV78260
+        - MV78460
+
+    NOTE:
+       not to be confused with the non-SMP 78xx0 SoCs
+
+    - Product infos:   https://web.archive.org/web/20150101215721/http://www.marvell.com/embedded-processors/armada-xp/
+    - Product Brief:   https://web.archive.org/web/20121021173528/http://www.marvell.com/embedded-processors/armada-xp/assets/Marvell-ArmadaXP-SoC-product%20brief.pdf
+    - Functional Spec: https://web.archive.org/web/20180829171131/http://www.marvell.com/embedded-processors/armada-xp/assets/ARMADA-XP-Functional-SpecDatasheet.pdf
+    - Hardware Specs:
+        - https://web.archive.org/web/20141127013651/http://www.marvell.com/embedded-processors/armada-xp/assets/HW_MV78230_OS.PDF
+        - https://web.archive.org/web/20141222000224/http://www.marvell.com/embedded-processors/armada-xp/assets/HW_MV78260_OS.PDF
+        - https://web.archive.org/web/20141222000230/http://www.marvell.com/embedded-processors/armada-xp/assets/HW_MV78460_OS.PDF
+
+  Core:
+       Sheeva ARMv7 compatible Dual-core or Quad-core PJ4B-MP
+
   Armada 375 Flavors:
        - 88F6720
 
+    - Product infos: https://web.archive.org/web/20140108032402/http://www.marvell.com/embedded-processors/armada-375/
     - Product Brief: https://web.archive.org/web/20131216023516/http://www.marvell.com/embedded-processors/armada-300/assets/ARMADA_375_SoC-01_product_brief.pdf
 
   Core:
@@ -162,29 +185,6 @@ EBU Armada family
   Core:
        ARM Cortex-A9
 
-  Armada XP Flavors:
-        - MV78230
-        - MV78260
-        - MV78460
-
-    NOTE:
-       not to be confused with the non-SMP 78xx0 SoCs
-
-    Product Brief:
-       https://web.archive.org/web/20121021173528/http://www.marvell.com/embedded-processors/armada-xp/assets/Marvell-ArmadaXP-SoC-product%20brief.pdf
-
-    Functional Spec:
-       https://web.archive.org/web/20180829171131/http://www.marvell.com/embedded-processors/armada-xp/assets/ARMADA-XP-Functional-SpecDatasheet.pdf
-
-    - Hardware Specs:
-
-        - https://web.archive.org/web/20141127013651/http://www.marvell.com/embedded-processors/armada-xp/assets/HW_MV78230_OS.PDF
-        - https://web.archive.org/web/20141222000224/http://www.marvell.com/embedded-processors/armada-xp/assets/HW_MV78260_OS.PDF
-        - https://web.archive.org/web/20141222000230/http://www.marvell.com/embedded-processors/armada-xp/assets/HW_MV78460_OS.PDF
-
-  Core:
-       Sheeva ARMv7 compatible Dual-core or Quad-core PJ4B-MP
-
   Linux kernel mach directory:
        arch/arm/mach-mvebu
   Linux kernel plat directory:
@@ -436,7 +436,7 @@ Berlin family (Multimedia Solutions)
   - Flavors:
        - 88DE3010, Armada 1000 (no Linux support)
                - Core:         Marvell PJ1 (ARMv5TE), Dual-core
-               - Product Brief:        http://www.marvell.com.cn/digital-entertainment/assets/armada_1000_pb.pdf
+               - Product Brief:        https://web.archive.org/web/20131103162620/http://www.marvell.com/digital-entertainment/assets/armada_1000_pb.pdf
        - 88DE3005, Armada 1500 Mini
                - Design name:  BG2CD
                - Core:         ARM Cortex-A9, PL310 L2CC
index f127666ea3a81659daf3bba59c4c64c38bb6ff20..e5dad2e40aa8937b100cfc7def72bf371b89b72a 100644 (file)
@@ -53,11 +53,10 @@ The number of bits that the PAC occupies in a pointer is 55 minus the
 virtual address size configured by the kernel. For example, with a
 virtual address size of 48, the PAC is 7 bits wide.
 
-Recent versions of GCC can compile code with APIAKey-based return
-address protection when passed the -msign-return-address option. This
-uses instructions in the HINT space (unless -march=armv8.3-a or higher
-is also passed), and such code can run on systems without the pointer
-authentication extension.
+When ARM64_PTR_AUTH_KERNEL is selected, the kernel will be compiled
+with HINT space pointer authentication instructions protecting
+function returns. Kernels built with this option will work on hardware
+with or without pointer authentication support.
 
 In addition to exec(), keys can also be reinitialized to random values
 using the PR_PAC_RESET_KEYS prctl. A bitmask of PR_PAC_APIAKEY,
index 37f273a7e8b65c74f3bd7ba75aa4cb8f2750f547..610450f59e058222d392d1ec0518bd4117f99e3c 100644 (file)
@@ -15,7 +15,7 @@ that goes into great technical depth about the BPF Architecture.
 libbpf
 ======
 
-Documentation/bpf/libbpf/libbpf.rst is a userspace library for loading and interacting with bpf programs.
+Documentation/bpf/libbpf/index.rst is a userspace library for loading and interacting with bpf programs.
 
 BPF Type Format (BTF)
 =====================
index 17f7cee569879e5d018e8b38c9317d4b88a714d0..76e5eb5cb62b045e27079d0bf3c1675759484847 100644 (file)
@@ -249,11 +249,16 @@ except ImportError:
 
 html_static_path = ['sphinx-static']
 
-html_context = {
-    'css_files': [
-        '_static/theme_overrides.css',
-    ],
-}
+html_css_files = [
+    'theme_overrides.css',
+]
+
+if major <= 1 and minor < 8:
+    html_context = {
+        'css_files': [
+            '_static/theme_overrides.css',
+        ],
+    }
 
 # Add any extra paths that contain custom files (such as robots.txt or
 # .htaccess) here, relative to this directory. These files are copied
index 33cb90bd1d8f9fac3b21cdb8190b768c30980224..4ceef8e7217c38fc3e07606ce24b05fbed57d71d 100644 (file)
@@ -73,12 +73,12 @@ CPUFREQ_POSTCHANGE.
 The third argument is a struct cpufreq_freqs with the following
 values:
 
-=====  ===========================
-cpu    number of the affected CPU
+====== ======================================
+policy a pointer to the struct cpufreq_policy
 old    old frequency
 new    new frequency
 flags  flags of the cpufreq driver
-=====  ===========================
+====== ======================================
 
 3. CPUFreq Table Generation with Operating Performance Point (OPP)
 ==================================================================
index 29b9447f3b84afc92edf821718ea26a05be8f0e4..fe0c89edf7c10fcec94189ea04673a4096a69011 100644 (file)
@@ -17,9 +17,10 @@ properties:
     oneOf:
       - enum:
           - fsl,imx7ulp-lpi2c
-          - fsl,imx8qm-lpi2c
       - items:
-          - const: fsl,imx8qxp-lpi2c
+          - enum:
+              - fsl,imx8qxp-lpi2c
+              - fsl,imx8qm-lpi2c
           - const: fsl,imx7ulp-lpi2c
 
   reg:
index c65921e66dc1f4d0f42101c67ebf06488fa5cf12..81c87295912cec047ac6abcdb64e61997ad826ff 100644 (file)
@@ -136,7 +136,7 @@ examples:
         samsung,syscon-phandle = <&pmu_system_controller>;
 
         /* NTC thermistor is a hwmon device */
-        ncp15wb473 {
+        thermistor {
             compatible = "murata,ncp15wb473";
             pullup-uv = <1800000>;
             pullup-ohm = <47000>;
index 060a309ff8e7c757cddc59e083c00a8194db4868..dbe7ecc19ccb94236b5f991a585e127533ccb4ba 100644 (file)
@@ -142,7 +142,7 @@ examples:
         down {
             label = "GPIO Key DOWN";
             linux,code = <108>;
-            interrupts = <1 IRQ_TYPE_LEVEL_HIGH 7>;
+            interrupts = <1 IRQ_TYPE_EDGE_FALLING>;
         };
     };
 
index 877183cf42787d5760c052ad2fb549d30464d0e3..1ef849dc74d7ee3c2110de419c514c8acd2fd1af 100644 (file)
@@ -79,6 +79,8 @@ properties:
 
             properties:
               data-lanes:
+                description:
+                  Note that 'fsl,imx7-mipi-csi2' only supports up to 2 data lines.
                 items:
                   minItems: 1
                   maxItems: 4
@@ -91,18 +93,6 @@ properties:
             required:
               - data-lanes
 
-            allOf:
-              - if:
-                  properties:
-                    compatible:
-                      contains:
-                        const: fsl,imx7-mipi-csi2
-                then:
-                  properties:
-                    data-lanes:
-                      items:
-                        maxItems: 2
-
       port@1:
         $ref: /schemas/graph.yaml#/properties/port
         description:
index 2766fe45bb98b5307065ea2d79c5b9f2b13d6156..ee42328a109dca28f89f3942f8068140172d5076 100644 (file)
@@ -91,6 +91,14 @@ properties:
       compensate for the board being designed with the lanes
       swapped.
 
+  enet-phy-lane-no-swap:
+    $ref: /schemas/types.yaml#/definitions/flag
+    description:
+      If set, indicates that PHY will disable swap of the
+      TX/RX lanes. This property allows the PHY to work correcly after
+      e.g. wrong bootstrap configuration caused by issues in PCB
+      layout design.
+
   eee-broken-100tx:
     $ref: /schemas/types.yaml#/definitions/flag
     description:
index 04d5654efb38070b482a41a8d593cec115b115d5..79906519c6522c34aa93d1710fb386371ac78327 100644 (file)
@@ -29,7 +29,7 @@ properties:
           - PHY_TYPE_PCIE
           - PHY_TYPE_SATA
           - PHY_TYPE_SGMII
-          - PHY_TYPE_USB
+          - PHY_TYPE_USB3
       - description: The PHY instance
         minimum: 0
         maximum: 1 # for DP, SATA or USB
index 06eca6667f67899d0cfe8b10634957b3a1220ed5..8367a1fd40571c8626ec5ad5149272b5a857c1d6 100644 (file)
@@ -105,7 +105,7 @@ examples:
           reg = <0x65>;
           interrupt-parent = <&gpio1>;
           interrupts = <16 IRQ_TYPE_EDGE_FALLING>;
-          ti,watchdog-timer = <0>;
+          ti,watchdog-timeout-ms = <0>;
           ti,sc-ocp-limit-microamp = <2000000>;
           ti,sc-ovp-limit-microvolt = <17800000>;
           monitored-battery = <&bat>;
index 0e6249d7c1330f7e2cfc916b7c1cabbd9c9e5a4e..5e172e9462b99236f8b4b76eb976ec939444ecc4 100644 (file)
@@ -19,6 +19,9 @@ properties:
   clocks:
     maxItems: 1
 
+  interrupts:
+    maxItems: 1
+
   "#sound-dai-cells":
     const: 0
 
index 7f987e79337c89640f066fb868f7f2a3458b279d..52a78a2e362e0f2e83418bf1c881020140171b37 100644 (file)
@@ -33,6 +33,7 @@ properties:
               - rockchip,rk3328-spi
               - rockchip,rk3368-spi
               - rockchip,rk3399-spi
+              - rockchip,rk3568-spi
               - rockchip,rv1126-spi
           - const: rockchip,rk3066-spi
 
index ec3e71f56009f9c66edb9b6e969640de17b9d657..e445cb146efebe1080c74dabf41a29bc1d9057f7 100644 (file)
@@ -27,7 +27,7 @@ Sphinx Install
 ==============
 
 The ReST markups currently used by the Documentation/ files are meant to be
-built with ``Sphinx`` version 1.3 or higher.
+built with ``Sphinx`` version 1.7 or higher.
 
 There's a script that checks for the Sphinx requirements. Please see
 :ref:`sphinx-pre-install` for further details.
@@ -43,10 +43,6 @@ or ``virtualenv``, depending on how your distribution packaged Python 3.
 
 .. note::
 
-   #) Sphinx versions below 1.5 don't work properly with Python's
-      docutils version 0.13.1 or higher. So, if you're willing to use
-      those versions, you should run ``pip install 'docutils==0.12'``.
-
    #) It is recommended to use the RTD theme for html output. Depending
       on the Sphinx version, it should be installed separately,
       with ``pip install sphinx_rtd_theme``.
@@ -55,13 +51,13 @@ or ``virtualenv``, depending on how your distribution packaged Python 3.
       those expressions are written using LaTeX notation. It needs texlive
       installed with amsfonts and amsmath in order to evaluate them.
 
-In summary, if you want to install Sphinx version 1.7.9, you should do::
+In summary, if you want to install Sphinx version 2.4.4, you should do::
 
-       $ virtualenv sphinx_1.7.9
-       $ . sphinx_1.7.9/bin/activate
-       (sphinx_1.7.9) $ pip install -r Documentation/sphinx/requirements.txt
+       $ virtualenv sphinx_2.4.4
+       $ . sphinx_2.4.4/bin/activate
+       (sphinx_2.4.4) $ pip install -r Documentation/sphinx/requirements.txt
 
-After running ``. sphinx_1.7.9/bin/activate``, the prompt will change,
+After running ``. sphinx_2.4.4/bin/activate``, the prompt will change,
 in order to indicate that you're using the new environment. If you
 open a new shell, you need to rerun this command to enter again at
 the virtual environment before building the documentation.
@@ -81,7 +77,7 @@ output.
 PDF and LaTeX builds
 --------------------
 
-Such builds are currently supported only with Sphinx versions 1.4 and higher.
+Such builds are currently supported only with Sphinx versions 2.4 and higher.
 
 For PDF and LaTeX output, you'll also need ``XeLaTeX`` version 3.14159265.
 
@@ -104,8 +100,8 @@ command line options for your distro::
        You should run:
 
                sudo dnf install -y texlive-luatex85
-               /usr/bin/virtualenv sphinx_1.7.9
-               . sphinx_1.7.9/bin/activate
+               /usr/bin/virtualenv sphinx_2.4.4
+               . sphinx_2.4.4/bin/activate
                pip install -r Documentation/sphinx/requirements.txt
 
        Can't build as 1 mandatory dependency is missing at ./scripts/sphinx-pre-install line 468.
index 681c6a492bc0cb51f5d2fb95e73245d5d9c4361d..4f490278d22fca8a27533bb16e5411ea42600f67 100644 (file)
@@ -35,7 +35,7 @@ This document describes only the kernel module and the interactions
 required with any user-space program.  Subsequent text refers to this
 as the "automount daemon" or simply "the daemon".
 
-"autofs" is a Linux kernel module with provides the "autofs"
+"autofs" is a Linux kernel module which provides the "autofs"
 filesystem type.  Several "autofs" filesystems can be mounted and they
 can each be managed separately, or all managed by the same daemon.
 
index a1326157d53ff40a5dcacf50f3301668774990fe..b0d354fd80666199b0f4f35deaebf06f015935fb 100644 (file)
@@ -50,11 +50,11 @@ ksmbd.mountd (user space daemon)
 --------------------------------
 
 ksmbd.mountd is userspace process to, transfer user account and password that
-are registered using ksmbd.adduser(part of utils for user space). Further it
+are registered using ksmbd.adduser (part of utils for user space). Further it
 allows sharing information parameters that parsed from smb.conf to ksmbd in
 kernel. For the execution part it has a daemon which is continuously running
 and connected to the kernel interface using netlink socket, it waits for the
-requests(dcerpc and share/user info). It handles RPC calls (at a minimum few
+requests (dcerpc and share/user info). It handles RPC calls (at a minimum few
 dozen) that are most important for file server from NetShareEnum and
 NetServerGetInfo. Complete DCE/RPC response is prepared from the user space
 and passed over to the associated kernel thread for the client.
@@ -154,11 +154,11 @@ Each layer
 1. Enable all component prints
        # sudo ksmbd.control -d "all"
 
-2. Enable one of components(smb, auth, vfs, oplock, ipc, conn, rdma)
+2. Enable one of components (smb, auth, vfs, oplock, ipc, conn, rdma)
        # sudo ksmbd.control -d "smb"
 
-3. Show what prints are enable.
-       # cat/sys/class/ksmbd-control/debug
+3. Show what prints are enabled.
+       # cat /sys/class/ksmbd-control/debug
          [smb] auth vfs oplock ipc conn [rdma]
 
 4. Disable prints:
index bb68d39f03b789c0a78655ab009b93a24acee862..375baca7edcdc299628c044afe3a7ab8e79c850c 100644 (file)
@@ -1,7 +1,7 @@
 .. SPDX-License-Identifier: GPL-2.0
 
 =================================
-NETWORK FILESYSTEM HELPER LIBRARY
+Network Filesystem Helper Library
 =================================
 
 .. Contents:
@@ -37,22 +37,22 @@ into a common call framework.
 
 The following services are provided:
 
- * Handles transparent huge pages (THPs).
+ * Handle folios that span multiple pages.
 
- * Insulates the netfs from VM interface changes.
+ * Insulate the netfs from VM interface changes.
 
- * Allows the netfs to arbitrarily split reads up into pieces, even ones that
-   don't match page sizes or page alignments and that may cross pages.
+ * Allow the netfs to arbitrarily split reads up into pieces, even ones that
+   don't match folio sizes or folio alignments and that may cross folios.
 
- * Allows the netfs to expand a readahead request in both directions to meet
-   its needs.
+ * Allow the netfs to expand a readahead request in both directions to meet its
+   needs.
 
- * Allows the netfs to partially fulfil a read, which will then be resubmitted.
+ * Allow the netfs to partially fulfil a read, which will then be resubmitted.
 
- * Handles local caching, allowing cached data and server-read data to be
+ * Handle local caching, allowing cached data and server-read data to be
    interleaved for a single request.
 
- * Handles clearing of bufferage that aren't on the server.
+ * Handle clearing of bufferage that aren't on the server.
 
  * Handle retrying of reads that failed, switching reads from the cache to the
    server as necessary.
@@ -70,22 +70,22 @@ Read Helper Functions
 
 Three read helpers are provided::
 
* void netfs_readahead(struct readahead_control *ractl,
-                       const struct netfs_read_request_ops *ops,
-                       void *netfs_priv);``
* int netfs_readpage(struct file *file,
-                     struct page *page,
-                     const struct netfs_read_request_ops *ops,
-                     void *netfs_priv);
* int netfs_write_begin(struct file *file,
-                        struct address_space *mapping,
-                        loff_t pos,
-                        unsigned int len,
-                        unsigned int flags,
-                        struct page **_page,
-                        void **_fsdata,
-                        const struct netfs_read_request_ops *ops,
-                        void *netfs_priv);
      void netfs_readahead(struct readahead_control *ractl,
+                            const struct netfs_read_request_ops *ops,
+                            void *netfs_priv);
      int netfs_readpage(struct file *file,
+                          struct folio *folio,
+                          const struct netfs_read_request_ops *ops,
+                          void *netfs_priv);
      int netfs_write_begin(struct file *file,
+                             struct address_space *mapping,
+                             loff_t pos,
+                             unsigned int len,
+                             unsigned int flags,
+                             struct folio **_folio,
+                             void **_fsdata,
+                             const struct netfs_read_request_ops *ops,
+                             void *netfs_priv);
 
 Each corresponds to a VM operation, with the addition of a couple of parameters
 for the use of the read helpers:
@@ -103,8 +103,8 @@ Both of these values will be stored into the read request structure.
 For ->readahead() and ->readpage(), the network filesystem should just jump
 into the corresponding read helper; whereas for ->write_begin(), it may be a
 little more complicated as the network filesystem might want to flush
-conflicting writes or track dirty data and needs to put the acquired page if an
-error occurs after calling the helper.
+conflicting writes or track dirty data and needs to put the acquired folio if
+an error occurs after calling the helper.
 
 The helpers manage the read request, calling back into the network filesystem
 through the suppplied table of operations.  Waits will be performed as
@@ -253,7 +253,7 @@ through which it can issue requests and negotiate::
                void (*issue_op)(struct netfs_read_subrequest *subreq);
                bool (*is_still_valid)(struct netfs_read_request *rreq);
                int (*check_write_begin)(struct file *file, loff_t pos, unsigned len,
-                                        struct page *page, void **_fsdata);
+                                        struct folio *folio, void **_fsdata);
                void (*done)(struct netfs_read_request *rreq);
                void (*cleanup)(struct address_space *mapping, void *netfs_priv);
        };
@@ -313,13 +313,14 @@ The operations are as follows:
 
    There is no return value; the netfs_subreq_terminated() function should be
    called to indicate whether or not the operation succeeded and how much data
-   it transferred.  The filesystem also should not deal with setting pages
+   it transferred.  The filesystem also should not deal with setting folios
    uptodate, unlocking them or dropping their refs - the helpers need to deal
    with this as they have to coordinate with copying to the local cache.
 
-   Note that the helpers have the pages locked, but not pinned.  It is possible
-   to use the ITER_XARRAY iov iterator to refer to the range of the inode that
-   is being operated upon without the need to allocate large bvec tables.
+   Note that the helpers have the folios locked, but not pinned.  It is
+   possible to use the ITER_XARRAY iov iterator to refer to the range of the
+   inode that is being operated upon without the need to allocate large bvec
+   tables.
 
  * ``is_still_valid()``
 
@@ -330,15 +331,15 @@ The operations are as follows:
  * ``check_write_begin()``
 
    [Optional] This is called from the netfs_write_begin() helper once it has
-   allocated/grabbed the page to be modified to allow the filesystem to flush
+   allocated/grabbed the folio to be modified to allow the filesystem to flush
    conflicting state before allowing it to be modified.
 
-   It should return 0 if everything is now fine, -EAGAIN if the page should be
+   It should return 0 if everything is now fine, -EAGAIN if the folio should be
    regrabbed and any other error code to abort the operation.
 
  * ``done``
 
-   [Optional] This is called after the pages in the request have all been
+   [Optional] This is called after the folios in the request have all been
    unlocked (and marked uptodate if applicable).
 
  * ``cleanup``
@@ -390,7 +391,7 @@ The read helpers work by the following general procedure:
      * If NETFS_SREQ_CLEAR_TAIL was set, a short read will be cleared to the
        end of the slice instead of reissuing.
 
- * Once the data is read, the pages that have been fully read/cleared:
+ * Once the data is read, the folios that have been fully read/cleared:
 
    * Will be marked uptodate.
 
@@ -398,11 +399,11 @@ The read helpers work by the following general procedure:
 
    * Unlocked
 
- * Any pages that need writing to the cache will then have DIO writes issued.
+ * Any folios that need writing to the cache will then have DIO writes issued.
 
  * Synchronous operations will wait for reading to be complete.
 
- * Writes to the cache will proceed asynchronously and the pages will have the
+ * Writes to the cache will proceed asynchronously and the folios will have the
    PG_fscache mark removed when that completes.
 
  * The request structures will be cleaned up when everything has completed.
@@ -452,6 +453,9 @@ operation table looks like the following::
                            netfs_io_terminated_t term_func,
                            void *term_func_priv);
 
+               int (*prepare_write)(struct netfs_cache_resources *cres,
+                                    loff_t *_start, size_t *_len, loff_t i_size);
+
                int (*write)(struct netfs_cache_resources *cres,
                             loff_t start_pos,
                             struct iov_iter *iter,
@@ -509,6 +513,14 @@ The methods defined in the table are:
    indicating whether the termination is definitely happening in the caller's
    context.
 
+ * ``prepare_write()``
+
+   [Required] Called to adjust a write to the cache and check that there is
+   sufficient space in the cache.  The start and length values indicate the
+   size of the write that netfslib is proposing, and this can be adjusted by
+   the cache to respect DIO boundaries.  The file size is passed for
+   information.
+
  * ``write()``
 
    [Required] Called to write to the cache.  The start file offset is given
@@ -525,4 +537,9 @@ not the read request structure as they could be used in other situations where
 there isn't a read request structure as well, such as writing dirty data to the
 cache.
 
+
+API Function Reference
+======================
+
 .. kernel-doc:: include/linux/netfs.h
+.. kernel-doc:: fs/netfs/read_helper.c
index 9e07e6bbe6a392949686611ff6c1dec5d76e2091..00d8e17d0acabca6e68e86ad7d8a182b821a0887 100644 (file)
@@ -36,6 +36,8 @@ Key to symbols
 
 =============== =============================================================
 S               Start condition
+Sr              Repeated start condition, used to switch from write to
+                read mode.
 P               Stop condition
 Rd/Wr (1 bit)   Read/Write bit. Rd equals 1, Wr equals 0.
 A, NA (1 bit)   Acknowledge (ACK) and Not Acknowledge (NACK) bit
@@ -100,7 +102,7 @@ Implemented by i2c_smbus_read_byte_data()
 This reads a single byte from a device, from a designated register.
 The register is specified through the Comm byte::
 
-  S Addr Wr [A] Comm [A] S Addr Rd [A] [Data] NA P
+  S Addr Wr [A] Comm [A] Sr Addr Rd [A] [Data] NA P
 
 Functionality flag: I2C_FUNC_SMBUS_READ_BYTE_DATA
 
@@ -114,7 +116,7 @@ This operation is very like Read Byte; again, data is read from a
 device, from a designated register that is specified through the Comm
 byte. But this time, the data is a complete word (16 bits)::
 
-  S Addr Wr [A] Comm [A] S Addr Rd [A] [DataLow] A [DataHigh] NA P
+  S Addr Wr [A] Comm [A] Sr Addr Rd [A] [DataLow] A [DataHigh] NA P
 
 Functionality flag: I2C_FUNC_SMBUS_READ_WORD_DATA
 
@@ -164,7 +166,7 @@ This command selects a device register (through the Comm byte), sends
 16 bits of data to it, and reads 16 bits of data in return::
 
   S Addr Wr [A] Comm [A] DataLow [A] DataHigh [A]
-                               S Addr Rd [A] [DataLow] A [DataHigh] NA P
+                              Sr Addr Rd [A] [DataLow] A [DataHigh] NA P
 
 Functionality flag: I2C_FUNC_SMBUS_PROC_CALL
 
@@ -181,7 +183,7 @@ of data is specified by the device in the Count byte.
 ::
 
   S Addr Wr [A] Comm [A]
-             S Addr Rd [A] [Count] A [Data] A [Data] A ... A [Data] NA P
+            Sr Addr Rd [A] [Count] A [Data] A [Data] A ... A [Data] NA P
 
 Functionality flag: I2C_FUNC_SMBUS_READ_BLOCK_DATA
 
@@ -212,7 +214,7 @@ This command selects a device register (through the Comm byte), sends
 1 to 31 bytes of data to it, and reads 1 to 31 bytes of data in return::
 
   S Addr Wr [A] Comm [A] Count [A] Data [A] ...
-                               S Addr Rd [A] [Count] A [Data] ... A P
+                              Sr Addr Rd [A] [Count] A [Data] ... A P
 
 Functionality flag: I2C_FUNC_SMBUS_BLOCK_PROC_CALL
 
@@ -300,7 +302,7 @@ This command reads a block of bytes from a device, from a
 designated register that is specified through the Comm byte::
 
   S Addr Wr [A] Comm [A]
-             S Addr Rd [A] [Data] A [Data] A ... A [Data] NA P
+            Sr Addr Rd [A] [Data] A [Data] A ... A [Data] NA P
 
 Functionality flag: I2C_FUNC_SMBUS_READ_I2C_BLOCK
 
index ddada4a53749364b86a9d11f11d2dcf4b7470ca6..4fd7b70fcde19737652a75a754c1ab6b5d99f46c 100644 (file)
@@ -439,11 +439,9 @@ preemption. The following substitution works on both kernels::
   spin_lock(&p->lock);
   p->count += this_cpu_read(var2);
 
-On a non-PREEMPT_RT kernel migrate_disable() maps to preempt_disable()
-which makes the above code fully equivalent. On a PREEMPT_RT kernel
 migrate_disable() ensures that the task is pinned on the current CPU which
 in turn guarantees that the per-CPU access to var1 and var2 are staying on
-the same CPU.
+the same CPU while the task remains preemptible.
 
 The migrate_disable() substitution is not valid for the following
 scenario::
@@ -456,9 +454,8 @@ scenario::
     p = this_cpu_ptr(&var1);
     p->val = func2();
 
-While correct on a non-PREEMPT_RT kernel, this breaks on PREEMPT_RT because
-here migrate_disable() does not protect against reentrancy from a
-preempting task. A correct substitution for this case is::
+This breaks because migrate_disable() does not protect against reentrancy from
+a preempting task. A correct substitution for this case is::
 
   func()
   {
index 95ef56d62077f76f4909383d740946e8becec801..387fda80f05fbbffb3b81ef9e23c1f067aed0d95 100644 (file)
@@ -37,8 +37,7 @@ conn_reuse_mode - INTEGER
 
        0: disable any special handling on port reuse. The new
        connection will be delivered to the same real server that was
-       servicing the previous connection. This will effectively
-       disable expire_nodest_conn.
+       servicing the previous connection.
 
        bit 1: enable rescheduling of new connections when it is safe.
        That is, whenever expire_nodest_conn and for TCP sockets, when
index a722eb30e0140dbcac0722275d533538b66c1396..80b13353254a09c4577a096a73ce32b78f68a8dd 100644 (file)
@@ -486,8 +486,8 @@ of packets.
 Drivers are free to use a more permissive configuration than the requested
 configuration. It is expected that drivers should only implement directly the
 most generic mode that can be supported. For example if the hardware can
-support HWTSTAMP_FILTER_V2_EVENT, then it should generally always upscale
-HWTSTAMP_FILTER_V2_L2_SYNC_MESSAGE, and so forth, as HWTSTAMP_FILTER_V2_EVENT
+support HWTSTAMP_FILTER_PTP_V2_EVENT, then it should generally always upscale
+HWTSTAMP_FILTER_PTP_V2_L2_SYNC, and so forth, as HWTSTAMP_FILTER_PTP_V2_EVENT
 is more generic (and more useful to applications).
 
 A driver which supports hardware time stamping shall update the struct
index 8a2788afe89b8c95ba3a58dabdf0bd9df2681467..5ac62a7b4b7cda500645f6b1e9cd90df473d8a19 100644 (file)
@@ -84,6 +84,16 @@ CONFIG_ENERGY_MODEL must be enabled to use the EM framework.
 2.2 Registration of performance domains
 ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
 
+Registration of 'advanced' EM
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+The 'advanced' EM gets it's name due to the fact that the driver is allowed
+to provide more precised power model. It's not limited to some implemented math
+formula in the framework (like it's in 'simple' EM case). It can better reflect
+the real power measurements performed for each performance state. Thus, this
+registration method should be preferred in case considering EM static power
+(leakage) is important.
+
 Drivers are expected to register performance domains into the EM framework by
 calling the following API::
 
@@ -103,6 +113,18 @@ to: return warning/error, stop working or panic.
 See Section 3. for an example of driver implementing this
 callback, or Section 2.4 for further documentation on this API
 
+Registration of 'simple' EM
+~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+The 'simple' EM is registered using the framework helper function
+cpufreq_register_em_with_opp(). It implements a power model which is tight to
+math formula::
+
+       Power = C * V^2 * f
+
+The EM which is registered using this method might not reflect correctly the
+physics of a real device, e.g. when static power (leakage) is important.
+
 
 2.3 Accessing performance domains
 ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
@@ -138,6 +160,10 @@ or in Section 2.4
 3. Example driver
 -----------------
 
+The CPUFreq framework supports dedicated callback for registering
+the EM for a given CPU(s) 'policy' object: cpufreq_driver::register_em().
+That callback has to be implemented properly for a given driver,
+because the framework would call it at the right time during setup.
 This section provides a simple example of a CPUFreq driver registering a
 performance domain in the Energy Model framework using the (fake) 'foo'
 protocol. The driver implements an est_power() function to be provided to the
@@ -167,25 +193,22 @@ EM framework::
   20           return 0;
   21   }
   22
-  23   static int foo_cpufreq_init(struct cpufreq_policy *policy)
+  23   static void foo_cpufreq_register_em(struct cpufreq_policy *policy)
   24   {
   25           struct em_data_callback em_cb = EM_DATA_CB(est_power);
   26           struct device *cpu_dev;
-  27           int nr_opp, ret;
+  27           int nr_opp;
   28
   29           cpu_dev = get_cpu_device(cpumask_first(policy->cpus));
   30
-  31           /* Do the actual CPUFreq init work ... */
-  32           ret = do_foo_cpufreq_init(policy);
-  33           if (ret)
-  34                   return ret;
-  35
-  36           /* Find the number of OPPs for this policy */
-  37           nr_opp = foo_get_nr_opp(policy);
+  31           /* Find the number of OPPs for this policy */
+  32           nr_opp = foo_get_nr_opp(policy);
+  33
+  34           /* And register the new performance domain */
+  35           em_dev_register_perf_domain(cpu_dev, nr_opp, &em_cb, policy->cpus,
+  36                                       true);
+  37   }
   38
-  39           /* And register the new performance domain */
-  40           em_dev_register_perf_domain(cpu_dev, nr_opp, &em_cb, policy->cpus,
-  41                                       true);
-  42
-  43           return 0;
-  44   }
+  39   static struct cpufreq_driver foo_cpufreq_driver = {
+  40           .register_em = foo_cpufreq_register_em,
+  41   };
index e35ab74a0f804b0453e323b91ebacc6feeda0851..cf908d79666e423b27e352e68a093b44395bd019 100644 (file)
@@ -35,6 +35,7 @@ GNU make               3.81             make --version
 binutils               2.23             ld -v
 flex                   2.5.35           flex --version
 bison                  2.0              bison --version
+pahole                 1.16             pahole --version
 util-linux             2.10o            fdformat --version
 kmod                   13               depmod -V
 e2fsprogs              1.41.4           e2fsck -V
@@ -54,7 +55,7 @@ mcelog                 0.6              mcelog --version
 iptables               1.4.2            iptables -V
 openssl & libcrypto    1.0.0            openssl version
 bc                     1.06.95          bc --version
-Sphinx\ [#f1]_        1.3              sphinx-build --version
+Sphinx\ [#f1]_         1.7              sphinx-build --version
 ====================== ===============  ========================================
 
 .. [#f1] Sphinx is needed only to build the Kernel documentation
@@ -108,6 +109,16 @@ Bison
 Since Linux 4.16, the build system generates parsers
 during build.  This requires bison 2.0 or later.
 
+pahole:
+-------
+
+Since Linux 5.2, if CONFIG_DEBUG_INFO_BTF is selected, the build system
+generates BTF (BPF Type Format) from DWARF in vmlinux, a bit later from kernel
+modules as well.  This requires pahole v1.16 or later.
+
+It is found in the 'dwarves' or 'pahole' distro packages or from
+https://fedorapeople.org/~acme/dwarves/.
+
 Perl
 ----
 
index a0cc96923ea78c0d5d9b1c5422cb6d345334ade8..6b3aaed66fba105a4c4afdf418af038b086ef2c8 100644 (file)
@@ -14,7 +14,8 @@ works, see Documentation/process/development-process.rst. Also, read
 Documentation/process/submit-checklist.rst
 for a list of items to check before submitting code.  If you are submitting
 a driver, also read Documentation/process/submitting-drivers.rst; for device
-tree binding patches, read Documentation/process/submitting-patches.rst.
+tree binding patches, read
+Documentation/devicetree/bindings/submitting-patches.rst.
 
 This documentation assumes that you're using ``git`` to prepare your patches.
 If you're unfamiliar with ``git``, you would be well-advised to learn how to
@@ -22,8 +23,8 @@ use it, it will make your life as a kernel developer and in general much
 easier.
 
 Some subsystems and maintainer trees have additional information about
-their workflow and expectations, see :ref:`Documentation/process/maintainer
-handbooks <maintainer_handbooks_main>`.
+their workflow and expectations, see
+:ref:`Documentation/process/maintainer-handbooks.rst <maintainer_handbooks_main>`.
 
 Obtain a current source tree
 ----------------------------
index 4e5b26f03d5b162ef7f815f7ee3c380f23b45aa9..b3166c4a78678fa22f63cda44b5431c8b5d88358 100644 (file)
@@ -2442,11 +2442,10 @@ Or this simple script!
   #!/bin/bash
 
   tracefs=`sed -ne 's/^tracefs \(.*\) tracefs.*/\1/p' /proc/mounts`
-  echo nop > $tracefs/tracing/current_tracer
-  echo 0 > $tracefs/tracing/tracing_on
-  echo $$ > $tracefs/tracing/set_ftrace_pid
-  echo function > $tracefs/tracing/current_tracer
-  echo 1 > $tracefs/tracing/tracing_on
+  echo 0 > $tracefs/tracing_on
+  echo $$ > $tracefs/set_ftrace_pid
+  echo function > $tracefs/current_tracer
+  echo 1 > $tracefs/tracing_on
   exec "$@"
 
 
index 0046d75d9a706de49ee54f131d0565c05087bef9..9762452c584c1f128bada232f9de51812b6b6231 100644 (file)
@@ -35,7 +35,7 @@ Installazione Sphinx
 ====================
 
 I marcatori ReST utilizzati nei file in Documentation/ sono pensati per essere
-processati da ``Sphinx`` nella versione 1.3 o superiore.
+processati da ``Sphinx`` nella versione 1.7 o superiore.
 
 Esiste uno script che verifica i requisiti Sphinx. Per ulteriori dettagli
 consultate :ref:`it_sphinx-pre-install`.
@@ -53,11 +53,6 @@ pacchettizzato dalla vostra distribuzione.
 
 .. note::
 
-   #) Le versioni di Sphinx inferiori alla 1.5 non funzionano bene
-      con il pacchetto Python docutils versione 0.13.1 o superiore.
-      Se volete usare queste versioni, allora dovere eseguire
-      ``pip install 'docutils==0.12'``.
-
    #) Viene raccomandato l'uso del tema RTD per la documentazione in HTML.
       A seconda della versione di Sphinx, potrebbe essere necessaria
       l'installazione tramite il comando ``pip install sphinx_rtd_theme``.
@@ -67,13 +62,13 @@ pacchettizzato dalla vostra distribuzione.
       utilizzando LaTeX. Per una corretta interpretazione, è necessario aver
       installato texlive con i pacchetti amdfonts e amsmath.
 
-Riassumendo, se volete installare la versione 1.7.9 di Sphinx dovete eseguire::
+Riassumendo, se volete installare la versione 2.4.4 di Sphinx dovete eseguire::
 
-       $ virtualenv sphinx_1.7.9
-       $ . sphinx_1.7.9/bin/activate
-       (sphinx_1.7.9) $ pip install -r Documentation/sphinx/requirements.txt
+       $ virtualenv sphinx_2.4.4
+       $ . sphinx_2.4.4/bin/activate
+       (sphinx_2.4.4) $ pip install -r Documentation/sphinx/requirements.txt
 
-Dopo aver eseguito ``. sphinx_1.7.9/bin/activate``, il prompt cambierà per
+Dopo aver eseguito ``. sphinx_2.4.4/bin/activate``, il prompt cambierà per
 indicare che state usando il nuovo ambiente. Se aprite un nuova sessione,
 prima di generare la documentazione, dovrete rieseguire questo comando per
 rientrare nell'ambiente virtuale.
@@ -94,7 +89,7 @@ Generazione in PDF e LaTeX
 --------------------------
 
 Al momento, la generazione di questi documenti è supportata solo dalle
-versioni di Sphinx superiori alla 1.4.
+versioni di Sphinx superiori alla 2.4.
 
 Per la generazione di PDF e LaTeX, avrete bisogno anche del pacchetto
 ``XeLaTeX`` nella versione 3.14159265
@@ -119,8 +114,8 @@ l'installazione::
        You should run:
 
                sudo dnf install -y texlive-luatex85
-               /usr/bin/virtualenv sphinx_1.7.9
-               . sphinx_1.7.9/bin/activate
+               /usr/bin/virtualenv sphinx_2.4.4
+               . sphinx_2.4.4/bin/activate
                pip install -r Documentation/sphinx/requirements.txt
 
        Can't build as 1 mandatory dependency is missing at ./scripts/sphinx-pre-install line 468.
index 87d081889bfc19d7a5ec00ec99814d01e314374c..dc7193377b7f958a75a22b06fadd55874a35ff33 100644 (file)
@@ -57,7 +57,7 @@ mcelog                 0.6                mcelog --version
 iptables               1.4.2              iptables -V
 openssl & libcrypto    1.0.0              openssl version
 bc                     1.06.95            bc --version
-Sphinx\ [#f1]_         1.3                sphinx-build --version
+Sphinx\ [#f1]_         1.7                sphinx-build --version
 ====================== =================  ========================================
 
 .. [#f1] Sphinx è necessario solo per produrre la documentazione del Kernel
index 951595c7d599b8b4f63144895bb21e67ec09cba0..23eac67fbc30b837f6eeb1a0e6bfffed0c8639e0 100644 (file)
@@ -26,7 +26,7 @@ reStructuredText文件可能包含包含来自源文件的结构化文档注释
 安装Sphinx
 ==========
 
-Documentation/ 下的ReST文件现在使用sphinx1.3或更高版本构建。
+Documentation/ 下的ReST文件现在使用sphinx1.7或更高版本构建。
 
 这有一个脚本可以检查Sphinx的依赖项。更多详细信息见
 :ref:`sphinx-pre-install_zh` 。
@@ -40,22 +40,19 @@ Documentation/ 下的ReST文件现在使用sphinx1.3或更高版本构建。
 
 .. note::
 
-   #) 低于1.5版本的Sphinx无法与Python的0.13.1或更高版本docutils一起正常工作。
-      如果您想使用这些版本,那么应该运行 ``pip install 'docutils==0.12'`` 。
-
    #) html输出建议使用RTD主题。根据Sphinx版本的不同,它应该用
       ``pip install sphinx_rtd_theme`` 单独安装。
 
    #) 一些ReST页面包含数学表达式。由于Sphinx的工作方式,这些表达式是使用 LaTeX
       编写的。它需要安装amsfonts和amsmath宏包,以便显示。
 
-总之,如您要安装Sphinx 1.7.9版本,应执行::
+总之,如您要安装Sphinx 2.4.4版本,应执行::
 
-       $ virtualenv sphinx_1.7.9
-       $ . sphinx_1.7.9/bin/activate
-       (sphinx_1.7.9) $ pip install -r Documentation/sphinx/requirements.txt
+       $ virtualenv sphinx_2.4.4
+       $ . sphinx_2.4.4/bin/activate
+       (sphinx_2.4.4) $ pip install -r Documentation/sphinx/requirements.txt
 
-在运行 ``. sphinx_1.7.9/bin/activate`` 之后,提示符将变化,以指示您正在使用新
+在运行 ``. sphinx_2.4.4/bin/activate`` 之后,提示符将变化,以指示您正在使用新
 环境。如果您打开了一个新的shell,那么在构建文档之前,您需要重新运行此命令以再
 次进入虚拟环境中。
 
@@ -71,7 +68,7 @@ Documentation/ 下的ReST文件现在使用sphinx1.3或更高版本构建。
 PDF和LaTeX构建
 --------------
 
-目前只有Sphinx 1.4及更高版本才支持这种构建。
+目前只有Sphinx 2.4及更高版本才支持这种构建。
 
 对于PDF和LaTeX输出,还需要 ``XeLaTeX`` 3.14159265版本。(译注:此版本号真实
 存在)
@@ -93,8 +90,8 @@ PDF和LaTeX构建
        You should run:
 
                sudo dnf install -y texlive-luatex85
-               /usr/bin/virtualenv sphinx_1.7.9
-               . sphinx_1.7.9/bin/activate
+               /usr/bin/virtualenv sphinx_2.4.4
+               . sphinx_2.4.4/bin/activate
                pip install -r Documentation/sphinx/requirements.txt
 
        Can't build as 1 mandatory dependency is missing at ./scripts/sphinx-pre-install line 468.
index c6a5bb28579724764a8e1db4dc7ff2c07446c3a6..8053ae4743280b993bf46dc5c0372015cd298151 100644 (file)
@@ -36,14 +36,14 @@ Linux内核管理风格
 每个人都认为管理者做决定,而且决策很重要。决定越大越痛苦,管理者就必须越高级。
 这很明显,但事实并非如此。
 
¸¸æ\88\8fç\9a\84å\90\8då­\97是 **避免** 做出决定。尤其是,如果有人告诉你“选择(a)或(b),
\9c\80é\87\8dè¦\81ç\9a\84是 **避免** 做出决定。尤其是,如果有人告诉你“选择(a)或(b),
 我们真的需要你来做决定”,你就是陷入麻烦的管理者。你管理的人比你更了解细节,
 所以如果他们来找你做技术决策,你完蛋了。你显然没有能力为他们做这个决定。
 
 (推论:如果你管理的人不比你更了解细节,你也会被搞砸,尽管原因完全不同。
 也就是说,你的工作是错的,他们应该管理你的才智)
 
\89\80以游æ\88\8fç\9a\84å\90\8då­\97是 **避免** 做出决定,至少是那些大而痛苦的决定。做一些小的
\89\80以æ\9c\80é\87\8dè¦\81ç\9a\84是 **避免** 做出决定,至少是那些大而痛苦的决定。做一些小的
 和非结果性的决定是很好的,并且使您看起来好像知道自己在做什么,所以内核管理者
 需要做的是将那些大的和痛苦的决定变成那些没有人真正关心的小事情。
 
index 7a2345ce85213cd03ef4aca3735a26cc1677d018..13f9a84a617e309f0f7be5a682c5ccb1a5a824f1 100644 (file)
@@ -2263,6 +2263,15 @@ L:       linux-iio@vger.kernel.org
 S:     Maintained
 F:     drivers/counter/microchip-tcb-capture.c
 
+ARM/MILBEAUT ARCHITECTURE
+M:     Taichi Sugaya <sugaya.taichi@socionext.com>
+M:     Takao Orito <orito.takao@socionext.com>
+L:     linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
+S:     Maintained
+F:     arch/arm/boot/dts/milbeaut*
+F:     arch/arm/mach-milbeaut/
+N:     milbeaut
+
 ARM/MIOA701 MACHINE SUPPORT
 M:     Robert Jarzmik <robert.jarzmik@free.fr>
 L:     linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
@@ -2729,10 +2738,11 @@ S:      Maintained
 F:     drivers/memory/*emif*
 
 ARM/TEXAS INSTRUMENT KEYSTONE ARCHITECTURE
+M:     Nishanth Menon <nm@ti.com>
 M:     Santosh Shilimkar <ssantosh@kernel.org>
 L:     linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
 S:     Maintained
-T:     git git://git.kernel.org/pub/scm/linux/kernel/git/ssantosh/linux-keystone.git
+T:     git git://git.kernel.org/pub/scm/linux/kernel/git/ti/linux.git
 F:     arch/arm/boot/dts/keystone-*
 F:     arch/arm/mach-keystone/
 
@@ -3570,13 +3580,14 @@ L:      netdev@vger.kernel.org
 S:     Supported
 F:     drivers/net/ethernet/broadcom/b44.*
 
-BROADCOM B53 ETHERNET SWITCH DRIVER
+BROADCOM B53/SF2 ETHERNET SWITCH DRIVER
 M:     Florian Fainelli <f.fainelli@gmail.com>
 L:     netdev@vger.kernel.org
 L:     openwrt-devel@lists.openwrt.org (subscribers-only)
 S:     Supported
 F:     Documentation/devicetree/bindings/net/dsa/brcm,b53.yaml
 F:     drivers/net/dsa/b53/*
+F:     drivers/net/dsa/bcm_sf2*
 F:     include/linux/dsa/brcm.h
 F:     include/linux/platform_data/b53.h
 
@@ -3733,7 +3744,7 @@ F:        drivers/scsi/bnx2i/
 BROADCOM BNX2X 10 GIGABIT ETHERNET DRIVER
 M:     Ariel Elior <aelior@marvell.com>
 M:     Sudarsana Kalluru <skalluru@marvell.com>
-M:     GR-everest-linux-l2@marvell.com
+M:     Manish Chopra <manishc@marvell.com>
 L:     netdev@vger.kernel.org
 S:     Supported
 F:     drivers/net/ethernet/broadcom/bnx2x/
@@ -9318,7 +9329,6 @@ S:        Maintained
 F:     drivers/iio/pressure/dps310.c
 
 INFINIBAND SUBSYSTEM
-M:     Doug Ledford <dledford@redhat.com>
 M:     Jason Gunthorpe <jgg@nvidia.com>
 L:     linux-rdma@vger.kernel.org
 S:     Supported
@@ -10269,9 +10279,9 @@ F:      lib/Kconfig.kcsan
 F:     scripts/Makefile.kcsan
 
 KDUMP
-M:     Dave Young <dyoung@redhat.com>
 M:     Baoquan He <bhe@redhat.com>
 R:     Vivek Goyal <vgoyal@redhat.com>
+R:     Dave Young <dyoung@redhat.com>
 L:     kexec@lists.infradead.org
 S:     Maintained
 W:     http://lse.sourceforge.net/kdump/
@@ -10445,7 +10455,7 @@ F:      arch/riscv/include/uapi/asm/kvm*
 F:     arch/riscv/kvm/
 
 KERNEL VIRTUAL MACHINE for s390 (KVM/s390)
-M:     Christian Borntraeger <borntraeger@de.ibm.com>
+M:     Christian Borntraeger <borntraeger@linux.ibm.com>
 M:     Janosch Frank <frankja@linux.ibm.com>
 R:     David Hildenbrand <david@redhat.com>
 R:     Claudio Imbrenda <imbrenda@linux.ibm.com>
@@ -12169,8 +12179,8 @@ F:      drivers/net/ethernet/mellanox/mlx5/core/fpga/*
 F:     include/linux/mlx5/mlx5_ifc_fpga.h
 
 MELLANOX ETHERNET SWITCH DRIVERS
-M:     Jiri Pirko <jiri@nvidia.com>
 M:     Ido Schimmel <idosch@nvidia.com>
+M:     Petr Machata <petrm@nvidia.com>
 L:     netdev@vger.kernel.org
 S:     Supported
 W:     http://www.mellanox.com
@@ -15593,7 +15603,7 @@ F:      drivers/scsi/qedi/
 
 QLOGIC QL4xxx ETHERNET DRIVER
 M:     Ariel Elior <aelior@marvell.com>
-M:     GR-everest-linux-l2@marvell.com
+M:     Manish Chopra <manishc@marvell.com>
 L:     netdev@vger.kernel.org
 S:     Supported
 F:     drivers/net/ethernet/qlogic/qed/
@@ -15760,6 +15770,15 @@ S:     Maintained
 F:     Documentation/devicetree/bindings/net/qcom,ethqos.txt
 F:     drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c
 
+QUALCOMM FASTRPC DRIVER
+M:     Srinivas Kandagatla <srinivas.kandagatla@linaro.org>
+M:     Amol Maheshwari <amahesh@qti.qualcomm.com>
+L:     linux-arm-msm@vger.kernel.org
+S:     Maintained
+F:     Documentation/devicetree/bindings/misc/qcom,fastrpc.txt
+F:     drivers/misc/fastrpc.c
+F:     include/uapi/misc/fastrpc.h
+
 QUALCOMM GENERIC INTERFACE I2C DRIVER
 M:     Akash Asthana <akashast@codeaurora.org>
 M:     Mukesh Savaliya <msavaliy@codeaurora.org>
@@ -15968,6 +15987,7 @@ F:      arch/mips/generic/board-ranchu.c
 
 RANDOM NUMBER DRIVER
 M:     "Theodore Ts'o" <tytso@mit.edu>
+M:     Jason A. Donenfeld <Jason@zx2c4.com>
 S:     Maintained
 F:     drivers/char/random.c
 
@@ -16490,6 +16510,12 @@ T:     git git://linuxtv.org/media_tree.git
 F:     Documentation/devicetree/bindings/media/allwinner,sun8i-a83t-de2-rotate.yaml
 F:     drivers/media/platform/sunxi/sun8i-rotate/
 
+RPMSG TTY DRIVER
+M:     Arnaud Pouliquen <arnaud.pouliquen@foss.st.com>
+L:     linux-remoteproc@vger.kernel.org
+S:     Maintained
+F:     drivers/tty/rpmsg_tty.c
+
 RTL2830 MEDIA DRIVER
 M:     Antti Palosaari <crope@iki.fi>
 L:     linux-media@vger.kernel.org
@@ -16573,7 +16599,7 @@ F:      drivers/video/fbdev/savage/
 S390
 M:     Heiko Carstens <hca@linux.ibm.com>
 M:     Vasily Gorbik <gor@linux.ibm.com>
-M:     Christian Borntraeger <borntraeger@de.ibm.com>
+M:     Christian Borntraeger <borntraeger@linux.ibm.com>
 R:     Alexander Gordeev <agordeev@linux.ibm.com>
 L:     linux-s390@vger.kernel.org
 S:     Supported
@@ -16611,8 +16637,8 @@ W:      http://www.ibm.com/developerworks/linux/linux390/
 F:     drivers/iommu/s390-iommu.c
 
 S390 IUCV NETWORK LAYER
-M:     Julian Wiedmann <jwi@linux.ibm.com>
-M:     Karsten Graul <kgraul@linux.ibm.com>
+M:     Alexandra Winter <wintera@linux.ibm.com>
+M:     Wenjia Zhang <wenjia@linux.ibm.com>
 L:     linux-s390@vger.kernel.org
 L:     netdev@vger.kernel.org
 S:     Supported
@@ -16622,8 +16648,8 @@ F:      include/net/iucv/
 F:     net/iucv/
 
 S390 NETWORK DRIVERS
-M:     Julian Wiedmann <jwi@linux.ibm.com>
-M:     Karsten Graul <kgraul@linux.ibm.com>
+M:     Alexandra Winter <wintera@linux.ibm.com>
+M:     Wenjia Zhang <wenjia@linux.ibm.com>
 L:     linux-s390@vger.kernel.org
 L:     netdev@vger.kernel.org
 S:     Supported
@@ -18483,6 +18509,7 @@ F:      include/uapi/linux/pkt_sched.h
 F:     include/uapi/linux/tc_act/
 F:     include/uapi/linux/tc_ematch/
 F:     net/sched/
+F:     tools/testing/selftests/tc-testing
 
 TC90522 MEDIA DRIVER
 M:     Akihiro Tsukada <tskd08@gmail.com>
@@ -19031,11 +19058,12 @@ F:    drivers/mmc/host/tifm_sd.c
 F:     include/linux/tifm.h
 
 TI KEYSTONE MULTICORE NAVIGATOR DRIVERS
+M:     Nishanth Menon <nm@ti.com>
 M:     Santosh Shilimkar <ssantosh@kernel.org>
 L:     linux-kernel@vger.kernel.org
 L:     linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
 S:     Maintained
-T:     git git://git.kernel.org/pub/scm/linux/kernel/git/ssantosh/linux-keystone.git
+T:     git git://git.kernel.org/pub/scm/linux/kernel/git/ti/linux.git
 F:     drivers/soc/ti/*
 
 TI LM49xxx FAMILY ASoC CODEC DRIVERS
@@ -20317,7 +20345,8 @@ F:      arch/x86/include/asm/vmware.h
 F:     arch/x86/kernel/cpu/vmware.c
 
 VMWARE PVRDMA DRIVER
-M:     Adit Ranadive <aditr@vmware.com>
+M:     Bryan Tan <bryantan@vmware.com>
+M:     Vishnu Dasa <vdasa@vmware.com>
 M:     VMware PV-Drivers <pv-drivers@vmware.com>
 L:     linux-rdma@vger.kernel.org
 S:     Maintained
index 9e12c14ea0fbb231122ab4c548fa3909b1718a3b..765115c99655fa4f79382914a1eebc0403b3735d 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -2,8 +2,8 @@
 VERSION = 5
 PATCHLEVEL = 16
 SUBLEVEL = 0
-EXTRAVERSION = -rc1
-NAME = Trick or Treat
+EXTRAVERSION = -rc5
+NAME = Gobble Gobble
 
 # *DOCUMENTATION*
 # To see a list of typical targets execute "make help"
@@ -789,7 +789,7 @@ stackp-flags-$(CONFIG_STACKPROTECTOR_STRONG)      := -fstack-protector-strong
 KBUILD_CFLAGS += $(stackp-flags-y)
 
 KBUILD_CFLAGS-$(CONFIG_WERROR) += -Werror
-KBUILD_CFLAGS += $(KBUILD_CFLAGS-y) $(CONFIG_CC_IMPLICIT_FALLTHROUGH)
+KBUILD_CFLAGS += $(KBUILD_CFLAGS-y) $(CONFIG_CC_IMPLICIT_FALLTHROUGH:"%"=%)
 
 ifdef CONFIG_CC_IS_CLANG
 KBUILD_CPPFLAGS += -Qunused-arguments
@@ -1374,17 +1374,17 @@ endif
 
 ifneq ($(dtstree),)
 
-%.dtb: dt_binding_check include/config/kernel.release scripts_dtc
-       $(Q)$(MAKE) $(build)=$(dtstree) $(dtstree)/$@ $(dtstree)/$*.dt.yaml
+%.dtb: include/config/kernel.release scripts_dtc
+       $(Q)$(MAKE) $(build)=$(dtstree) $(dtstree)/$@
 
-%.dtbo: dt_binding_check include/config/kernel.release scripts_dtc
-       $(Q)$(MAKE) $(build)=$(dtstree) $(dtstree)/$@ $(dtstree)/$*.dt.yaml
+%.dtbo: include/config/kernel.release scripts_dtc
+       $(Q)$(MAKE) $(build)=$(dtstree) $(dtstree)/$@
 
 PHONY += dtbs dtbs_install dtbs_check
 dtbs: include/config/kernel.release scripts_dtc
        $(Q)$(MAKE) $(build)=$(dtstree)
 
-ifneq ($(filter dtbs_check %.dtb %.dtbo, $(MAKECMDGOALS)),)
+ifneq ($(filter dtbs_check, $(MAKECMDGOALS)),)
 export CHECK_DTBS=y
 dtbs: dt_binding_check
 endif
index 26b8ed11639da464ef30f035a960302eaf162f1e..d3c4ab249e9c2758d2bc917556b2039a4e16c5c9 100644 (file)
@@ -991,6 +991,16 @@ config HAVE_ARCH_COMPAT_MMAP_BASES
          and vice-versa 32-bit applications to call 64-bit mmap().
          Required for applications doing different bitness syscalls.
 
+config PAGE_SIZE_LESS_THAN_64KB
+       def_bool y
+       depends on !ARM64_64K_PAGES
+       depends on !IA64_PAGE_SIZE_64KB
+       depends on !PAGE_SIZE_64KB
+       depends on !PARISC_PAGE_SIZE_64KB
+       depends on !PPC_64K_PAGES
+       depends on !PPC_256K_PAGES
+       depends on !PAGE_SIZE_256KB
+
 # This allows to use a set of generic functions to determine mmap base
 # address by giving priority to top-down scheme only if the process
 # is not in legacy mode (compat task, unlimited stack size or
index e4a041cd57154b886faad443802514c0bfbdda48..ca5a32228cd6e53d1f4dd04100b86f7514fd128a 100644 (file)
 556    common  landlock_restrict_self          sys_landlock_restrict_self
 # 557 reserved for memfd_secret
 558    common  process_mrelease                sys_process_mrelease
+559    common  futex_waitv                     sys_futex_waitv
index e8c2c7469e107fab567310d94c6fb133e4bfef81..e201b4b1655afee13e11f564af8d49c6e806b660 100644 (file)
@@ -36,7 +36,6 @@ void __flush_dcache_page(phys_addr_t paddr, unsigned long vaddr);
 #define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
 
 void flush_dcache_page(struct page *page);
-void flush_dcache_folio(struct folio *folio);
 
 void dma_cache_wback_inv(phys_addr_t start, unsigned long sz);
 void dma_cache_inv(phys_addr_t start, unsigned long sz);
index f0f9e8bec83acfa6ca3108664f8f815e0715bd34..c2724d986fa016ee5669683a0f146e65d79b4973 100644 (file)
@@ -1463,6 +1463,7 @@ config HIGHMEM
        bool "High Memory Support"
        depends on MMU
        select KMAP_LOCAL
+       select KMAP_LOCAL_NON_LINEAR_PTE_ARRAY
        help
          The address space of ARM processors is only 4 Gigabytes large
          and it has to accommodate user address space, kernel address
index 3b60297af7f60b09f4c625e121f7e2d938eb9b58..9e01dbca4a011f2794154bbfe4de89ba022d3834 100644 (file)
                        #address-cells = <3>;
                        #interrupt-cells = <1>;
                        #size-cells = <2>;
-                       interrupts = <GIC_SPI 148 IRQ_TYPE_LEVEL_HIGH>,
+                       interrupts = <GIC_SPI 147 IRQ_TYPE_LEVEL_HIGH>,
                                     <GIC_SPI 148 IRQ_TYPE_LEVEL_HIGH>;
                        interrupt-names = "pcie", "msi";
                        interrupt-map-mask = <0x0 0x0 0x0 0x7>;
                        interrupt-map = <0 0 0 1 &gicv2 GIC_SPI 143
+                                                       IRQ_TYPE_LEVEL_HIGH>,
+                                       <0 0 0 2 &gicv2 GIC_SPI 144
+                                                       IRQ_TYPE_LEVEL_HIGH>,
+                                       <0 0 0 3 &gicv2 GIC_SPI 145
+                                                       IRQ_TYPE_LEVEL_HIGH>,
+                                       <0 0 0 4 &gicv2 GIC_SPI 146
                                                        IRQ_TYPE_LEVEL_HIGH>;
                        msi-controller;
                        msi-parent = <&pcie0>;
index d4f355015e3cab0aca29849d15441864159fc64c..f69d2af3c1fa4888f44bbab253366f861906bf4e 100644 (file)
 
                        gpio-controller;
                        #gpio-cells = <2>;
+                       interrupt-controller;
+                       #interrupt-cells = <2>;
                };
 
                pcie0: pcie@12000 {
        i2c0: i2c@18009000 {
                compatible = "brcm,iproc-i2c";
                reg = <0x18009000 0x50>;
-               interrupts = <GIC_SPI 121 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 89 IRQ_TYPE_LEVEL_HIGH>;
                #address-cells = <1>;
                #size-cells = <0>;
                clock-frequency = <100000>;
index e68fb879e4f9d5a86b3dc0fbc0d7c8c1e546ed53..5e56288e343bb6fbb9eff102180f2f3d93b6b959 100644 (file)
@@ -290,7 +290,6 @@ extern void flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr
  */
 #define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
 extern void flush_dcache_page(struct page *);
-void flush_dcache_folio(struct folio *folio);
 
 #define ARCH_IMPLEMENTS_FLUSH_KERNEL_VMAP_RANGE 1
 static inline void flush_kernel_vmap_range(void *addr, int size)
index fc2608b18a0d048c62aed905f4f76da438f94426..18f01190dcfd423d3e8f9ac23408f26af1b729c8 100644 (file)
@@ -33,7 +33,7 @@ extern void __iomem *sdr_ctl_base_addr;
 u32 socfpga_sdram_self_refresh(u32 sdr_base);
 extern unsigned int socfpga_sdram_self_refresh_sz;
 
-extern char secondary_trampoline, secondary_trampoline_end;
+extern char secondary_trampoline[], secondary_trampoline_end[];
 
 extern unsigned long socfpga_cpu1start_addr;
 
index fbb80b883e5dd2606ee0c8130e242e42439d8468..201191cf68f324ca970e93162d46500214b449a5 100644 (file)
 
 static int socfpga_boot_secondary(unsigned int cpu, struct task_struct *idle)
 {
-       int trampoline_size = &secondary_trampoline_end - &secondary_trampoline;
+       int trampoline_size = secondary_trampoline_end - secondary_trampoline;
 
        if (socfpga_cpu1start_addr) {
                /* This will put CPU #1 into reset. */
                writel(RSTMGR_MPUMODRST_CPU1,
                       rst_manager_base_addr + SOCFPGA_RSTMGR_MODMPURST);
 
-               memcpy(phys_to_virt(0), &secondary_trampoline, trampoline_size);
+               memcpy(phys_to_virt(0), secondary_trampoline, trampoline_size);
 
                writel(__pa_symbol(secondary_startup),
                       sys_manager_base_addr + (socfpga_cpu1start_addr & 0x000000ff));
@@ -45,12 +45,12 @@ static int socfpga_boot_secondary(unsigned int cpu, struct task_struct *idle)
 
 static int socfpga_a10_boot_secondary(unsigned int cpu, struct task_struct *idle)
 {
-       int trampoline_size = &secondary_trampoline_end - &secondary_trampoline;
+       int trampoline_size = secondary_trampoline_end - secondary_trampoline;
 
        if (socfpga_cpu1start_addr) {
                writel(RSTMGR_MPUMODRST_CPU1, rst_manager_base_addr +
                       SOCFPGA_A10_RSTMGR_MODMPURST);
-               memcpy(phys_to_virt(0), &secondary_trampoline, trampoline_size);
+               memcpy(phys_to_virt(0), secondary_trampoline, trampoline_size);
 
                writel(__pa_symbol(secondary_startup),
                       sys_manager_base_addr + (socfpga_cpu1start_addr & 0x00000fff));
index fc8b2bb06ffe83b135daa2c041f6ddc6904c7070..e22c9433d5e0b908d64d0ceeb833b7b9313a9dec 100644 (file)
@@ -7,6 +7,7 @@
  * Copyright The Asahi Linux Contributors
  */
 
+#include <dt-bindings/gpio/gpio.h>
 #include <dt-bindings/interrupt-controller/apple-aic.h>
 #include <dt-bindings/interrupt-controller/irq.h>
 #include <dt-bindings/pinctrl/apple.h>
                        port00: pci@0,0 {
                                device_type = "pci";
                                reg = <0x0 0x0 0x0 0x0 0x0>;
-                               reset-gpios = <&pinctrl_ap 152 0>;
+                               reset-gpios = <&pinctrl_ap 152 GPIO_ACTIVE_LOW>;
                                max-link-speed = <2>;
 
                                #address-cells = <3>;
                        port01: pci@1,0 {
                                device_type = "pci";
                                reg = <0x800 0x0 0x0 0x0 0x0>;
-                               reset-gpios = <&pinctrl_ap 153 0>;
+                               reset-gpios = <&pinctrl_ap 153 GPIO_ACTIVE_LOW>;
                                max-link-speed = <2>;
 
                                #address-cells = <3>;
                        port02: pci@2,0 {
                                device_type = "pci";
                                reg = <0x1000 0x0 0x0 0x0 0x0>;
-                               reset-gpios = <&pinctrl_ap 33 0>;
+                               reset-gpios = <&pinctrl_ap 33 GPIO_ACTIVE_LOW>;
                                max-link-speed = <1>;
 
                                #address-cells = <3>;
index 3e4727344b4ac5688449e90921bb61972315276f..a960c0bc2dba2ab0277aa7cfaf224289b2291858 100644 (file)
                        pinctrl-0 = <&ufs_rst_n &ufs_refclk_out>;
                        phys = <&ufs_0_phy>;
                        phy-names = "ufs-phy";
-                       samsung,sysreg = <&syscon_fsys2>;
-                       samsung,ufs-shareability-reg-offset = <0x710>;
+                       samsung,sysreg = <&syscon_fsys2 0x710>;
                        status = "disabled";
                };
        };
index 347b0cc68f07195e9e00c791333e08ccab4c4eee..1494cfa8639bec0ca1bcb0f2ee0fde30d2910858 100644 (file)
 
 #define HAVE_FUNCTION_GRAPH_FP_TEST
 
+/*
+ * HAVE_FUNCTION_GRAPH_RET_ADDR_PTR means that the architecture can provide a
+ * "return address pointer" which can be used to uniquely identify a return
+ * address which has been overwritten.
+ *
+ * On arm64 we use the address of the caller's frame record, which remains the
+ * same for the lifetime of the instrumented function, unlike the return
+ * address in the LR.
+ */
+#define HAVE_FUNCTION_GRAPH_RET_ADDR_PTR
+
 #ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
 #define ARCH_SUPPORTS_FTRACE_OPS 1
 #else
index a39fcf318c774df52e72ceebbdf139756328c1a0..01d47c5886dc43a6925116e3ba38c0a52aaf5657 100644 (file)
@@ -91,7 +91,7 @@
 #define HCR_HOST_VHE_FLAGS (HCR_RW | HCR_TGE | HCR_E2H)
 
 /* TCR_EL2 Registers bits */
-#define TCR_EL2_RES1           ((1 << 31) | (1 << 23))
+#define TCR_EL2_RES1           ((1U << 31) | (1 << 23))
 #define TCR_EL2_TBI            (1 << 20)
 #define TCR_EL2_PS_SHIFT       16
 #define TCR_EL2_PS_MASK                (7 << TCR_EL2_PS_SHIFT)
 #define CPTR_EL2_TFP_SHIFT 10
 
 /* Hyp Coprocessor Trap Register */
-#define CPTR_EL2_TCPAC (1 << 31)
+#define CPTR_EL2_TCPAC (1U << 31)
 #define CPTR_EL2_TAM   (1 << 30)
 #define CPTR_EL2_TTA   (1 << 20)
 #define CPTR_EL2_TFP   (1 << CPTR_EL2_TFP_SHIFT)
index 8433a2058eb15f00a8275156d534d8d65ed702c8..237224484d0f6f114af52b6564c6a014ba235de6 100644 (file)
@@ -76,7 +76,7 @@ static inline void __pmd_populate(pmd_t *pmdp, phys_addr_t ptep,
 static inline void
 pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmdp, pte_t *ptep)
 {
-       VM_BUG_ON(mm != &init_mm);
+       VM_BUG_ON(mm && mm != &init_mm);
        __pmd_populate(pmdp, __pa(ptep), PMD_TYPE_TABLE | PMD_TABLE_UXN);
 }
 
index a4e046ef4568e207077738a6a961979ba551190b..6564a01cc085a951d772e05cb845028e352550c4 100644 (file)
@@ -47,9 +47,6 @@ struct stack_info {
  * @prev_type:   The type of stack this frame record was on, or a synthetic
  *               value of STACK_TYPE_UNKNOWN. This is used to detect a
  *               transition from one stack to another.
- *
- * @graph:       When FUNCTION_GRAPH_TRACER is selected, holds the index of a
- *               replacement lr value in the ftrace graph stack.
  */
 struct stackframe {
        unsigned long fp;
@@ -57,9 +54,6 @@ struct stackframe {
        DECLARE_BITMAP(stacks_done, __NR_STACK_TYPES);
        unsigned long prev_fp;
        enum stack_type prev_type;
-#ifdef CONFIG_FUNCTION_GRAPH_TRACER
-       int graph;
-#endif
 #ifdef CONFIG_KRETPROBES
        struct llist_node *kr_cur;
 #endif
index 6e2e0b7031aba0961c5feab9c4c9815f50cbead1..3a5ff5e2058630e0c4fb1429bd2959a47b917380 100644 (file)
@@ -281,12 +281,22 @@ do {                                                                      \
        (x) = (__force __typeof__(*(ptr)))__gu_val;                     \
 } while (0)
 
+/*
+ * We must not call into the scheduler between uaccess_ttbr0_enable() and
+ * uaccess_ttbr0_disable(). As `x` and `ptr` could contain blocking functions,
+ * we must evaluate these outside of the critical section.
+ */
 #define __raw_get_user(x, ptr, err)                                    \
 do {                                                                   \
+       __typeof__(*(ptr)) __user *__rgu_ptr = (ptr);                   \
+       __typeof__(x) __rgu_val;                                        \
        __chk_user_ptr(ptr);                                            \
+                                                                       \
        uaccess_ttbr0_enable();                                         \
-       __raw_get_mem("ldtr", x, ptr, err);                             \
+       __raw_get_mem("ldtr", __rgu_val, __rgu_ptr, err);               \
        uaccess_ttbr0_disable();                                        \
+                                                                       \
+       (x) = __rgu_val;                                                \
 } while (0)
 
 #define __get_user_error(x, ptr, err)                                  \
@@ -310,14 +320,22 @@ do {                                                                      \
 
 #define get_user       __get_user
 
+/*
+ * We must not call into the scheduler between __uaccess_enable_tco_async() and
+ * __uaccess_disable_tco_async(). As `dst` and `src` may contain blocking
+ * functions, we must evaluate these outside of the critical section.
+ */
 #define __get_kernel_nofault(dst, src, type, err_label)                        \
 do {                                                                   \
+       __typeof__(dst) __gkn_dst = (dst);                              \
+       __typeof__(src) __gkn_src = (src);                              \
        int __gkn_err = 0;                                              \
                                                                        \
        __uaccess_enable_tco_async();                                   \
-       __raw_get_mem("ldr", *((type *)(dst)),                          \
-                     (__force type *)(src), __gkn_err);                \
+       __raw_get_mem("ldr", *((type *)(__gkn_dst)),                    \
+                     (__force type *)(__gkn_src), __gkn_err);          \
        __uaccess_disable_tco_async();                                  \
+                                                                       \
        if (unlikely(__gkn_err))                                        \
                goto err_label;                                         \
 } while (0)
@@ -351,11 +369,19 @@ do {                                                                      \
        }                                                               \
 } while (0)
 
+/*
+ * We must not call into the scheduler between uaccess_ttbr0_enable() and
+ * uaccess_ttbr0_disable(). As `x` and `ptr` could contain blocking functions,
+ * we must evaluate these outside of the critical section.
+ */
 #define __raw_put_user(x, ptr, err)                                    \
 do {                                                                   \
-       __chk_user_ptr(ptr);                                            \
+       __typeof__(*(ptr)) __user *__rpu_ptr = (ptr);                   \
+       __typeof__(*(ptr)) __rpu_val = (x);                             \
+       __chk_user_ptr(__rpu_ptr);                                      \
+                                                                       \
        uaccess_ttbr0_enable();                                         \
-       __raw_put_mem("sttr", x, ptr, err);                             \
+       __raw_put_mem("sttr", __rpu_val, __rpu_ptr, err);               \
        uaccess_ttbr0_disable();                                        \
 } while (0)
 
@@ -380,14 +406,22 @@ do {                                                                      \
 
 #define put_user       __put_user
 
+/*
+ * We must not call into the scheduler between __uaccess_enable_tco_async() and
+ * __uaccess_disable_tco_async(). As `dst` and `src` may contain blocking
+ * functions, we must evaluate these outside of the critical section.
+ */
 #define __put_kernel_nofault(dst, src, type, err_label)                        \
 do {                                                                   \
+       __typeof__(dst) __pkn_dst = (dst);                              \
+       __typeof__(src) __pkn_src = (src);                              \
        int __pkn_err = 0;                                              \
                                                                        \
        __uaccess_enable_tco_async();                                   \
-       __raw_put_mem("str", *((type *)(src)),                          \
-                     (__force type *)(dst), __pkn_err);                \
+       __raw_put_mem("str", *((type *)(__pkn_src)),                    \
+                     (__force type *)(__pkn_dst), __pkn_err);          \
        __uaccess_disable_tco_async();                                  \
+                                                                       \
        if (unlikely(__pkn_err))                                        \
                goto err_label;                                         \
 } while(0)
index b3e4f9a088b1a76118205f0065fda2cc6859aa2f..8cf970d219f5d896a7805c446e9854dadf7d0cd9 100644 (file)
        .endm
 
 SYM_CODE_START(ftrace_regs_caller)
+#ifdef BTI_C
+       BTI_C
+#endif
        ftrace_regs_entry       1
        b       ftrace_common
 SYM_CODE_END(ftrace_regs_caller)
 
 SYM_CODE_START(ftrace_caller)
+#ifdef BTI_C
+       BTI_C
+#endif
        ftrace_regs_entry       0
        b       ftrace_common
 SYM_CODE_END(ftrace_caller)
index fc62dfe73f933a157ddf968217529de1e232f6c5..4506c4a90ac10fbe7910da0d95e9bdaf50ac3945 100644 (file)
@@ -244,8 +244,6 @@ void arch_ftrace_update_code(int command)
  * on the way back to parent. For this purpose, this function is called
  * in _mcount() or ftrace_caller() to replace return address (*parent) on
  * the call stack to return_to_handler.
- *
- * Note that @frame_pointer is used only for sanity check later.
  */
 void prepare_ftrace_return(unsigned long self_addr, unsigned long *parent,
                           unsigned long frame_pointer)
@@ -263,8 +261,10 @@ void prepare_ftrace_return(unsigned long self_addr, unsigned long *parent,
         */
        old = *parent;
 
-       if (!function_graph_enter(old, self_addr, frame_pointer, NULL))
+       if (!function_graph_enter(old, self_addr, frame_pointer,
+           (void *)frame_pointer)) {
                *parent = return_hooker;
+       }
 }
 
 #ifdef CONFIG_DYNAMIC_FTRACE
index 1038494135c8cef847829ebcf43b6fa58d596770..6fb31c117ebe08cab0898cd9a8ca552e3c4a7026 100644 (file)
@@ -147,7 +147,7 @@ int machine_kexec_post_load(struct kimage *kimage)
        if (rc)
                return rc;
        kimage->arch.ttbr1 = __pa(trans_pgd);
-       kimage->arch.zero_page = __pa(empty_zero_page);
+       kimage->arch.zero_page = __pa_symbol(empty_zero_page);
 
        reloc_size = __relocate_new_kernel_end - __relocate_new_kernel_start;
        memcpy(reloc_code, __relocate_new_kernel_start, reloc_size);
index c30624fff6acd632f579b1bf9b9bbee2f8726b33..94f83cd44e507644f12ea774d94a7606a60eacb2 100644 (file)
@@ -38,9 +38,6 @@ void start_backtrace(struct stackframe *frame, unsigned long fp,
 {
        frame->fp = fp;
        frame->pc = pc;
-#ifdef CONFIG_FUNCTION_GRAPH_TRACER
-       frame->graph = 0;
-#endif
 #ifdef CONFIG_KRETPROBES
        frame->kr_cur = NULL;
 #endif
@@ -116,20 +113,23 @@ int notrace unwind_frame(struct task_struct *tsk, struct stackframe *frame)
        frame->prev_fp = fp;
        frame->prev_type = info.type;
 
+       frame->pc = ptrauth_strip_insn_pac(frame->pc);
+
 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
        if (tsk->ret_stack &&
-               (ptrauth_strip_insn_pac(frame->pc) == (unsigned long)return_to_handler)) {
-               struct ftrace_ret_stack *ret_stack;
+               (frame->pc == (unsigned long)return_to_handler)) {
+               unsigned long orig_pc;
                /*
                 * This is a case where function graph tracer has
                 * modified a return address (LR) in a stack frame
                 * to hook a function return.
                 * So replace it to an original value.
                 */
-               ret_stack = ftrace_graph_get_ret_stack(tsk, frame->graph++);
-               if (WARN_ON_ONCE(!ret_stack))
+               orig_pc = ftrace_graph_ret_addr(tsk, NULL, frame->pc,
+                                               (void *)frame->fp);
+               if (WARN_ON_ONCE(frame->pc == orig_pc))
                        return -EINVAL;
-               frame->pc = ret_stack->ret;
+               frame->pc = orig_pc;
        }
 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
 #ifdef CONFIG_KRETPROBES
@@ -137,8 +137,6 @@ int notrace unwind_frame(struct task_struct *tsk, struct stackframe *frame)
                frame->pc = kretprobe_find_ret_addr(tsk, (void *)frame->fp, &frame->kr_cur);
 #endif
 
-       frame->pc = ptrauth_strip_insn_pac(frame->pc);
-
        return 0;
 }
 NOKPROBE_SYMBOL(unwind_frame);
index 2f03cbfefe6764353e626a4ebcfcf4a2277ce9ca..e4727dc771bf3a75dbce8384f23313cf623c7c36 100644 (file)
@@ -223,7 +223,14 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
                r = 1;
                break;
        case KVM_CAP_NR_VCPUS:
-               r = num_online_cpus();
+               /*
+                * ARM64 treats KVM_CAP_NR_CPUS differently from all other
+                * architectures, as it does not always bound it to
+                * KVM_CAP_MAX_VCPUS. It should not matter much because
+                * this is just an advisory value.
+                */
+               r = min_t(unsigned int, num_online_cpus(),
+                         kvm_arm_default_max_vcpus());
                break;
        case KVM_CAP_MAX_VCPUS:
        case KVM_CAP_MAX_VCPU_ID:
index 7a0af1d39303cd580a097e42b14fc287f6ff0c92..96c5f3fb78389ef8be01890df1d8c8fa45d1c6e2 100644 (file)
@@ -403,6 +403,8 @@ typedef bool (*exit_handler_fn)(struct kvm_vcpu *, u64 *);
 
 static const exit_handler_fn *kvm_get_exit_handler_array(struct kvm_vcpu *vcpu);
 
+static void early_exit_filter(struct kvm_vcpu *vcpu, u64 *exit_code);
+
 /*
  * Allow the hypervisor to handle the exit with an exit handler if it has one.
  *
@@ -429,6 +431,18 @@ static inline bool kvm_hyp_handle_exit(struct kvm_vcpu *vcpu, u64 *exit_code)
  */
 static inline bool fixup_guest_exit(struct kvm_vcpu *vcpu, u64 *exit_code)
 {
+       /*
+        * Save PSTATE early so that we can evaluate the vcpu mode
+        * early on.
+        */
+       vcpu->arch.ctxt.regs.pstate = read_sysreg_el2(SYS_SPSR);
+
+       /*
+        * Check whether we want to repaint the state one way or
+        * another.
+        */
+       early_exit_filter(vcpu, exit_code);
+
        if (ARM_EXCEPTION_CODE(*exit_code) != ARM_EXCEPTION_IRQ)
                vcpu->arch.fault.esr_el2 = read_sysreg_el2(SYS_ESR);
 
index de7e14c862e6c9b5415df6a7daf815142a302a16..7ecca8b078519fd315c92cbc7cc059c0d2269bed 100644 (file)
@@ -70,7 +70,12 @@ static inline void __sysreg_save_el1_state(struct kvm_cpu_context *ctxt)
 static inline void __sysreg_save_el2_return_state(struct kvm_cpu_context *ctxt)
 {
        ctxt->regs.pc                   = read_sysreg_el2(SYS_ELR);
-       ctxt->regs.pstate               = read_sysreg_el2(SYS_SPSR);
+       /*
+        * Guest PSTATE gets saved at guest fixup time in all
+        * cases. We still need to handle the nVHE host side here.
+        */
+       if (!has_vhe() && ctxt->__hyp_running_vcpu)
+               ctxt->regs.pstate       = read_sysreg_el2(SYS_SPSR);
 
        if (cpus_have_final_cap(ARM64_HAS_RAS_EXTN))
                ctxt_sys_reg(ctxt, DISR_EL1) = read_sysreg_s(SYS_VDISR_EL2);
index c0e3fed26d93068bb1953fed35be5e076f42b096..d13115a124341601f6d0f3367dc5d7c6f7095806 100644 (file)
@@ -233,7 +233,7 @@ static const exit_handler_fn *kvm_get_exit_handler_array(struct kvm_vcpu *vcpu)
  * Returns false if the guest ran in AArch32 when it shouldn't have, and
  * thus should exit to the host, or true if a the guest run loop can continue.
  */
-static bool handle_aarch32_guest(struct kvm_vcpu *vcpu, u64 *exit_code)
+static void early_exit_filter(struct kvm_vcpu *vcpu, u64 *exit_code)
 {
        struct kvm *kvm = kern_hyp_va(vcpu->kvm);
 
@@ -248,10 +248,7 @@ static bool handle_aarch32_guest(struct kvm_vcpu *vcpu, u64 *exit_code)
                vcpu->arch.target = -1;
                *exit_code &= BIT(ARM_EXIT_WITH_SERROR_BIT);
                *exit_code |= ARM_EXCEPTION_IL;
-               return false;
        }
-
-       return true;
 }
 
 /* Switch to the guest for legacy non-VHE systems */
@@ -316,9 +313,6 @@ int __kvm_vcpu_run(struct kvm_vcpu *vcpu)
                /* Jump in the fire! */
                exit_code = __guest_enter(vcpu);
 
-               if (unlikely(!handle_aarch32_guest(vcpu, &exit_code)))
-                       break;
-
                /* And we're baaack! */
        } while (fixup_guest_exit(vcpu, &exit_code));
 
index 5a2cb5d9bc4b22a55e7afb591962b741619c8446..fbb26b93c347738ce85f27e090ddff776c08e16d 100644 (file)
@@ -112,6 +112,10 @@ static const exit_handler_fn *kvm_get_exit_handler_array(struct kvm_vcpu *vcpu)
        return hyp_exit_handlers;
 }
 
+static void early_exit_filter(struct kvm_vcpu *vcpu, u64 *exit_code)
+{
+}
+
 /* Switch to the guest for VHE systems running in EL2 */
 static int __kvm_vcpu_run_vhe(struct kvm_vcpu *vcpu)
 {
index e5fbf8653a2158825d660b4e8332f0baab836cda..2020af88b6361df9570f878d08512f08563d2362 100644 (file)
@@ -209,7 +209,7 @@ asmlinkage void do_trap_illinsn(struct pt_regs *regs)
 
 asmlinkage void do_trap_fpe(struct pt_regs *regs)
 {
-#ifdef CONFIG_CPU_HAS_FP
+#ifdef CONFIG_CPU_HAS_FPU
        return fpu_fpe(regs);
 #else
        do_trap_error(regs, SIGILL, ILL_ILLOPC, regs->pc,
@@ -219,7 +219,7 @@ asmlinkage void do_trap_fpe(struct pt_regs *regs)
 
 asmlinkage void do_trap_priv(struct pt_regs *regs)
 {
-#ifdef CONFIG_CPU_HAS_FP
+#ifdef CONFIG_CPU_HAS_FPU
        if (user_mode(regs) && fpu_libc_helper(regs))
                return;
 #endif
diff --git a/arch/hexagon/include/asm/timer-regs.h b/arch/hexagon/include/asm/timer-regs.h
deleted file mode 100644 (file)
index ee6c614..0000000
+++ /dev/null
@@ -1,26 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Timer support for Hexagon
- *
- * Copyright (c) 2010-2011, The Linux Foundation. All rights reserved.
- */
-
-#ifndef _ASM_TIMER_REGS_H
-#define _ASM_TIMER_REGS_H
-
-/*  This stuff should go into a platform specific file  */
-#define TCX0_CLK_RATE          19200
-#define TIMER_ENABLE           0
-#define TIMER_CLR_ON_MATCH     1
-
-/*
- * 8x50 HDD Specs 5-8.  Simulator co-sim not fixed until
- * release 1.1, and then it's "adjustable" and probably not defaulted.
- */
-#define RTOS_TIMER_INT         3
-#ifdef CONFIG_HEXAGON_COMET
-#define RTOS_TIMER_REGS_ADDR   0xAB000000UL
-#endif
-#define SLEEP_CLK_RATE         32000
-
-#endif
index 8d4ec76fceb458ab3f21e4fa858fae264d1ef166..dfe69e118b2beb238aaac1700f2622530f077eee 100644 (file)
@@ -7,11 +7,10 @@
 #define _ASM_TIMEX_H
 
 #include <asm-generic/timex.h>
-#include <asm/timer-regs.h>
 #include <asm/hexagon_vm.h>
 
 /* Using TCX0 as our clock.  CLOCK_TICK_RATE scheduled to be removed. */
-#define CLOCK_TICK_RATE              TCX0_CLK_RATE
+#define CLOCK_TICK_RATE              19200
 
 #define ARCH_HAS_READ_CURRENT_TIMER
 
diff --git a/arch/hexagon/kernel/.gitignore b/arch/hexagon/kernel/.gitignore
new file mode 100644 (file)
index 0000000..c5f676c
--- /dev/null
@@ -0,0 +1 @@
+vmlinux.lds
index feffe527ac92939311fe61ee1d247572752ce41d..febc95714d756de139f526ee0771c5da8773ed16 100644 (file)
 #include <linux/of_irq.h>
 #include <linux/module.h>
 
-#include <asm/timer-regs.h>
 #include <asm/hexagon_vm.h>
 
+#define TIMER_ENABLE           BIT(0)
+
 /*
  * For the clocksource we need:
  *     pcycle frequency (600MHz)
@@ -33,6 +34,13 @@ cycles_t     pcycle_freq_mhz;
 cycles_t       thread_freq_mhz;
 cycles_t       sleep_clk_freq;
 
+/*
+ * 8x50 HDD Specs 5-8.  Simulator co-sim not fixed until
+ * release 1.1, and then it's "adjustable" and probably not defaulted.
+ */
+#define RTOS_TIMER_INT         3
+#define RTOS_TIMER_REGS_ADDR   0xAB000000UL
+
 static struct resource rtos_timer_resources[] = {
        {
                .start  = RTOS_TIMER_REGS_ADDR,
@@ -80,7 +88,7 @@ static int set_next_event(unsigned long delta, struct clock_event_device *evt)
        iowrite32(0, &rtos_timer->clear);
 
        iowrite32(delta, &rtos_timer->match);
-       iowrite32(1 << TIMER_ENABLE, &rtos_timer->enable);
+       iowrite32(TIMER_ENABLE, &rtos_timer->enable);
        return 0;
 }
 
index d35d69d6588c4a182106f4951be34c4826f99211..55f75392857b00717bc4999bd491319743b8a5a2 100644 (file)
@@ -27,6 +27,7 @@ void __raw_readsw(const void __iomem *addr, void *data, int len)
                *dst++ = *src;
 
 }
+EXPORT_SYMBOL(__raw_readsw);
 
 /*
  * __raw_writesw - read words a short at a time
@@ -47,6 +48,7 @@ void __raw_writesw(void __iomem *addr, const void *data, int len)
 
 
 }
+EXPORT_SYMBOL(__raw_writesw);
 
 /*  Pretty sure len is pre-adjusted for the length of the access already */
 void __raw_readsl(const void __iomem *addr, void *data, int len)
@@ -62,6 +64,7 @@ void __raw_readsl(const void __iomem *addr, void *data, int len)
 
 
 }
+EXPORT_SYMBOL(__raw_readsl);
 
 void __raw_writesl(void __iomem *addr, const void *data, int len)
 {
@@ -76,3 +79,4 @@ void __raw_writesl(void __iomem *addr, const void *data, int len)
 
 
 }
+EXPORT_SYMBOL(__raw_writesl);
index 6fea1844fb952783bbebcf358067eb0beb378f2c..707ae121f6d39173d5a8aa773ffdd2693077ef16 100644 (file)
 446    common  landlock_restrict_self          sys_landlock_restrict_self
 # 447 reserved for memfd_secret
 448    common  process_mrelease                sys_process_mrelease
+449    common  futex_waitv                     sys_futex_waitv
index 8ab46625ddd32d880eaaf604962103c386bf7889..1ac55e7b47f01cc9ac5427a0eac6854eb961ca48 100644 (file)
@@ -250,7 +250,6 @@ static inline void __flush_page_to_ram(void *vaddr)
 
 #define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
 #define flush_dcache_page(page)                __flush_page_to_ram(page_address(page))
-void flush_dcache_folio(struct folio *folio);
 #define flush_dcache_mmap_lock(mapping)                do { } while (0)
 #define flush_dcache_mmap_unlock(mapping)      do { } while (0)
 #define flush_icache_page(vma, page)   __flush_page_to_ram(page_address(page))
index 7976dff8f879d6f207c7cf3d97973ead1bc28aa3..45bc32a41b903c638c7353e1dd5b07cd0575183f 100644 (file)
 446    common  landlock_restrict_self          sys_landlock_restrict_self
 # 447 reserved for memfd_secret
 448    common  process_mrelease                sys_process_mrelease
+449    common  futex_waitv                     sys_futex_waitv
index 99058a6da956da2f4c10ace2fc45cc6e7ff19cef..34d6458340b0f79097a7e0f17326012a7823c042 100644 (file)
@@ -1145,7 +1145,7 @@ asmlinkage void set_esp0(unsigned long ssp)
  */
 asmlinkage void fpsp040_die(void)
 {
-       force_fatal_sig(SIGSEGV);
+       force_exit_sig(SIGSEGV);
 }
 
 #ifdef CONFIG_M68KFPU_EMU
index 6b0e11362bd2ebfff18b42045e88c371f5a171a0..2204bde3ce4a041ffe594026104d1f01640cc3c9 100644 (file)
 446    common  landlock_restrict_self          sys_landlock_restrict_self
 # 447 reserved for memfd_secret
 448    common  process_mrelease                sys_process_mrelease
+449    common  futex_waitv                     sys_futex_waitv
index de60ad190057643c19f95b419496ebc3daf665cc..0215dc1529e9ae69bb938d296b482ffe61365227 100644 (file)
@@ -3097,7 +3097,7 @@ config STACKTRACE_SUPPORT
 config PGTABLE_LEVELS
        int
        default 4 if PAGE_SIZE_4KB && MIPS_VA_BITS_48
-       default 3 if 64BIT && !PAGE_SIZE_64KB
+       default 3 if 64BIT && (!PAGE_SIZE_64KB || MIPS_VA_BITS_48)
        default 2
 
 config MIPS_AUTO_PFN_OFFSET
index 5a3e325275d0da2b8ed2eef8182b63ddb5ccfa88..1c91064cb448b0ce255584e4702b607826e781c1 100644 (file)
@@ -381,6 +381,12 @@ void clk_disable(struct clk *clk)
 
 EXPORT_SYMBOL(clk_disable);
 
+struct clk *clk_get_parent(struct clk *clk)
+{
+       return NULL;
+}
+EXPORT_SYMBOL(clk_get_parent);
+
 unsigned long clk_get_rate(struct clk *clk)
 {
        if (!clk)
index 2861a05c2e0c046cacc665c4ce5c89d92309e0f7..f27cf31b41401423b9ef51f703fcf03434fa7d3d 100644 (file)
@@ -52,7 +52,7 @@ endif
 
 vmlinuzobjs-$(CONFIG_KERNEL_XZ) += $(obj)/ashldi3.o
 
-vmlinuzobjs-$(CONFIG_KERNEL_ZSTD) += $(obj)/bswapdi.o
+vmlinuzobjs-$(CONFIG_KERNEL_ZSTD) += $(obj)/bswapdi.o $(obj)/ashldi3.o
 
 targets := $(notdir $(vmlinuzobjs-y))
 
index a3aa22c77cadc2507eaff4b067cd34ff62ab1618..a07a5edbcda780f7d22d51833356ee6109da8404 100644 (file)
@@ -75,7 +75,7 @@ static unsigned int __init gen_fdt_mem_array(
 __init int yamon_dt_append_memory(void *fdt,
                                  const struct yamon_mem_region *regions)
 {
-       unsigned long phys_memsize, memsize;
+       unsigned long phys_memsize = 0, memsize;
        __be32 mem_array[2 * MAX_MEM_ARRAY_ENTRIES];
        unsigned int mem_entries;
        int i, err, mem_off;
index f207388541d509429be24ffdad57c3b63aafb4b0..b3dc9c589442a8c3815b7a068b18dde418b1faec 100644 (file)
@@ -61,8 +61,6 @@ static inline void flush_dcache_page(struct page *page)
                SetPageDcacheDirty(page);
 }
 
-void flush_dcache_folio(struct folio *folio);
-
 #define flush_dcache_mmap_lock(mapping)                do { } while (0)
 #define flush_dcache_mmap_unlock(mapping)      do { } while (0)
 
index ac0e2cfc6d57a35cb6f3d1e25e673a54ef10f176..24a529c6c4be58166a449c7c58fcc089eb6477f0 100644 (file)
@@ -1734,8 +1734,6 @@ static inline void decode_cpucfg(struct cpuinfo_mips *c)
 
 static inline void cpu_probe_loongson(struct cpuinfo_mips *c, unsigned int cpu)
 {
-       decode_configs(c);
-
        /* All Loongson processors covered here define ExcCode 16 as GSExc. */
        c->options |= MIPS_CPU_GSEXCEX;
 
@@ -1796,6 +1794,8 @@ static inline void cpu_probe_loongson(struct cpuinfo_mips *c, unsigned int cpu)
                panic("Unknown Loongson Processor ID!");
                break;
        }
+
+       decode_configs(c);
 }
 #else
 static inline void cpu_probe_loongson(struct cpuinfo_mips *c, unsigned int cpu) { }
index 376a6e2676e9e2f128b10b6607f99a1dc6d45223..9f47a889b047e98ef1f6172aa2493180972bd1b2 100644 (file)
@@ -185,7 +185,7 @@ static int show_cpuinfo(struct seq_file *m, void *v)
                seq_puts(m, " tx39_cache");
        if (cpu_has_octeon_cache)
                seq_puts(m, " octeon_cache");
-       if (cpu_has_fpu)
+       if (raw_cpu_has_fpu)
                seq_puts(m, " fpu");
        if (cpu_has_32fpr)
                seq_puts(m, " 32fpr");
index 70e32de2bcaa1eecaf8f7c084b88d49c7d3f4f25..72d02d363f36fe43d1d18806260a525868b09bfa 100644 (file)
 446    n32     landlock_restrict_self          sys_landlock_restrict_self
 # 447 reserved for memfd_secret
 448    n32     process_mrelease                sys_process_mrelease
+449    n32     futex_waitv                     sys_futex_waitv
index 1ca7bc337932bcae3e58cb8c07694096ecdc5592..e2c481fcede6bd11a5bbd4b43c02b780be0de10b 100644 (file)
 446    n64     landlock_restrict_self          sys_landlock_restrict_self
 # 447 reserved for memfd_secret
 448    n64     process_mrelease                sys_process_mrelease
+449    n64     futex_waitv                     sys_futex_waitv
index a61c35edaa74ceff698a4d0caaa39170cf5d3093..3714c97b2643962db70fa56f820c429524f7a661 100644 (file)
 446    o32     landlock_restrict_self          sys_landlock_restrict_self
 # 447 reserved for memfd_secret
 448    o32     process_mrelease                sys_process_mrelease
+449    o32     futex_waitv                     sys_futex_waitv
index 562aa878b26698bc899a0532b7ca5be81c6b33fe..aa20d074d38836ed3482cc0cb6915d92fb4a98d6 100644 (file)
@@ -1067,7 +1067,7 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
                r = 1;
                break;
        case KVM_CAP_NR_VCPUS:
-               r = num_online_cpus();
+               r = min_t(unsigned int, num_online_cpus(), KVM_MAX_VCPUS);
                break;
        case KVM_CAP_MAX_VCPUS:
                r = KVM_MAX_VCPUS;
index dd819e31fcbbfa3f51c6e29867082955e0bfe8af..4916cccf378fdd1d446f6ed23420828133e73563 100644 (file)
@@ -158,6 +158,12 @@ void clk_deactivate(struct clk *clk)
 }
 EXPORT_SYMBOL(clk_deactivate);
 
+struct clk *clk_get_parent(struct clk *clk)
+{
+       return NULL;
+}
+EXPORT_SYMBOL(clk_get_parent);
+
 static inline u32 get_counter_resolution(void)
 {
        u32 res;
index 6f3a7b07294b8340707f01e0f22491a04a1d0e71..a37fe20818eb9c02d971cd42d7de3c69628b6ca2 100644 (file)
@@ -98,7 +98,7 @@ do {                                                          \
 #define emit(...) __emit(__VA_ARGS__)
 
 /* Workaround for R10000 ll/sc errata */
-#ifdef CONFIG_WAR_R10000
+#ifdef CONFIG_WAR_R10000_LLSC
 #define LLSC_beqz      beqzl
 #else
 #define LLSC_beqz      beqz
index 3fc0bb7d6487cc6b350a9d695878fce0b3ffb90b..c2a222ebfa2af1439b96d6774e32d6734c8f42c6 100644 (file)
@@ -27,7 +27,6 @@ void flush_cache_vunmap(unsigned long start, unsigned long end);
 
 #define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
 void flush_dcache_page(struct page *page);
-void flush_dcache_folio(struct folio *folio);
 void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
                       unsigned long vaddr, void *dst, void *src, int len);
 void copy_from_user_page(struct vm_area_struct *vma, struct page *page,
index 1999561b22aa500eaee03aeb3508ed21bf55f86b..d0b71dd7128724efef08ab753ddbc362a0a3041d 100644 (file)
@@ -29,7 +29,6 @@ extern void flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr,
        unsigned long pfn);
 #define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
 void flush_dcache_page(struct page *page);
-void flush_dcache_folio(struct folio *folio);
 
 extern void flush_icache_range(unsigned long start, unsigned long end);
 extern void flush_icache_page(struct vm_area_struct *vma, struct page *page);
index 8db4af4879d02f63c84a72e9304a71586bf70f7c..82d77f4b0d0832b92891899aff9f1b797114d37c 100644 (file)
 # Mike Shaver, Helge Deller and Martin K. Petersen
 #
 
+ifdef CONFIG_PARISC_SELF_EXTRACT
+boot := arch/parisc/boot
+KBUILD_IMAGE := $(boot)/bzImage
+else
 KBUILD_IMAGE := vmlinuz
+endif
 
 NM             = sh $(srctree)/arch/parisc/nm
 CHECKFLAGS     += -D__hppa__=1
index d6fd8fa7ed8c9deb536025080fcc49cb1ca9e21c..53061cb2cf7f09bda6b3349a32c2296ad65f5abe 100644 (file)
@@ -231,6 +231,7 @@ CONFIG_CRYPTO_DEFLATE=y
 CONFIG_CRC_CCITT=m
 CONFIG_CRC_T10DIF=y
 CONFIG_FONTS=y
+CONFIG_PRINTK_TIME=y
 CONFIG_MAGIC_SYSRQ=y
 CONFIG_DEBUG_FS=y
 CONFIG_DEBUG_MEMORY_INIT=y
index d2daeac2b21757cdf62c3c13bf175a58dabc5a44..1b8fd80cbe7f89ad43a89beef21bdadb813b56dd 100644 (file)
@@ -1,7 +1,9 @@
 CONFIG_LOCALVERSION="-64bit"
 # CONFIG_LOCALVERSION_AUTO is not set
+CONFIG_KERNEL_LZ4=y
 CONFIG_SYSVIPC=y
 CONFIG_POSIX_MQUEUE=y
+CONFIG_AUDIT=y
 CONFIG_BSD_PROCESS_ACCT=y
 CONFIG_BSD_PROCESS_ACCT_V3=y
 CONFIG_TASKSTATS=y
@@ -35,6 +37,7 @@ CONFIG_MODVERSIONS=y
 CONFIG_BLK_DEV_INTEGRITY=y
 CONFIG_BINFMT_MISC=m
 # CONFIG_COMPACTION is not set
+CONFIG_MEMORY_FAILURE=y
 CONFIG_NET=y
 CONFIG_PACKET=y
 CONFIG_UNIX=y
@@ -65,12 +68,15 @@ CONFIG_SCSI_ISCSI_ATTRS=y
 CONFIG_SCSI_SRP_ATTRS=y
 CONFIG_ISCSI_BOOT_SYSFS=y
 CONFIG_SCSI_MPT2SAS=y
-CONFIG_SCSI_LASI700=m
+CONFIG_SCSI_LASI700=y
 CONFIG_SCSI_SYM53C8XX_2=y
 CONFIG_SCSI_ZALON=y
 CONFIG_SCSI_QLA_ISCSI=m
 CONFIG_SCSI_DH=y
 CONFIG_ATA=y
+CONFIG_SATA_SIL=y
+CONFIG_SATA_SIS=y
+CONFIG_SATA_VIA=y
 CONFIG_PATA_NS87415=y
 CONFIG_PATA_SIL680=y
 CONFIG_ATA_GENERIC=y
@@ -79,6 +85,7 @@ CONFIG_MD_LINEAR=m
 CONFIG_BLK_DEV_DM=m
 CONFIG_DM_RAID=m
 CONFIG_DM_UEVENT=y
+CONFIG_DM_AUDIT=y
 CONFIG_FUSION=y
 CONFIG_FUSION_SPI=y
 CONFIG_FUSION_SAS=y
@@ -196,10 +203,15 @@ CONFIG_FB_MATROX_G=y
 CONFIG_FB_MATROX_I2C=y
 CONFIG_FB_MATROX_MAVEN=y
 CONFIG_FB_RADEON=y
+CONFIG_LOGO=y
+# CONFIG_LOGO_LINUX_CLUT224 is not set
 CONFIG_HIDRAW=y
 CONFIG_HID_PID=y
 CONFIG_USB_HIDDEV=y
 CONFIG_USB=y
+CONFIG_USB_EHCI_HCD=y
+CONFIG_USB_OHCI_HCD=y
+CONFIG_USB_OHCI_HCD_PLATFORM=y
 CONFIG_UIO=y
 CONFIG_UIO_PDRV_GENIRQ=m
 CONFIG_UIO_AEC=m
index 7085df07970299be04fd763dd8d3d958feed37d9..6d13ae236fcb073848bc851dbf74d6ab68bc198b 100644 (file)
@@ -3,38 +3,19 @@
  * Copyright (C) 1999 Hewlett-Packard (Frank Rowand)
  * Copyright (C) 1999 Philipp Rumpf <prumpf@tux.org>
  * Copyright (C) 1999 SuSE GmbH
+ * Copyright (C) 2021 Helge Deller <deller@gmx.de>
  */
 
 #ifndef _PARISC_ASSEMBLY_H
 #define _PARISC_ASSEMBLY_H
 
-#define CALLEE_FLOAT_FRAME_SIZE        80
-
 #ifdef CONFIG_64BIT
-#define LDREG  ldd
-#define STREG  std
-#define LDREGX  ldd,s
-#define LDREGM ldd,mb
-#define STREGM std,ma
-#define SHRREG shrd
-#define SHLREG shld
-#define ANDCM   andcm,*
-#define        COND(x) * ## x
 #define RP_OFFSET      16
 #define FRAME_SIZE     128
 #define CALLEE_REG_FRAME_SIZE  144
 #define REG_SZ         8
 #define ASM_ULONG_INSN .dword
 #else  /* CONFIG_64BIT */
-#define LDREG  ldw
-#define STREG  stw
-#define LDREGX  ldwx,s
-#define LDREGM ldwm
-#define STREGM stwm
-#define SHRREG shr
-#define SHLREG shlw
-#define ANDCM   andcm
-#define COND(x)        x
 #define RP_OFFSET      20
 #define FRAME_SIZE     64
 #define CALLEE_REG_FRAME_SIZE  128
@@ -45,6 +26,7 @@
 /* Frame alignment for 32- and 64-bit */
 #define FRAME_ALIGN     64
 
+#define CALLEE_FLOAT_FRAME_SIZE        80
 #define CALLEE_SAVE_FRAME_SIZE (CALLEE_REG_FRAME_SIZE + CALLEE_FLOAT_FRAME_SIZE)
 
 #ifdef CONFIG_PA20
 
 #ifdef __ASSEMBLY__
 
+#ifdef CONFIG_64BIT
+#define LDREG  ldd
+#define STREG  std
+#define LDREGX  ldd,s
+#define LDREGM ldd,mb
+#define STREGM std,ma
+#define SHRREG shrd
+#define SHLREG shld
+#define ANDCM   andcm,*
+#define        COND(x) * ## x
+#else  /* CONFIG_64BIT */
+#define LDREG  ldw
+#define STREG  stw
+#define LDREGX  ldwx,s
+#define LDREGM ldwm
+#define STREGM stwm
+#define SHRREG shr
+#define SHLREG shlw
+#define ANDCM   andcm
+#define COND(x)        x
+#endif
+
 #ifdef CONFIG_64BIT
 /* the 64-bit pa gnu assembler unfortunately defaults to .level 1.1 or 2.0 so
  * work around that for now... */
        extrd,u \r, 63-(\sa), 64-(\sa), \t
        .endm
 
+       /* Extract unsigned for 32- and 64-bit
+        * The extru instruction leaves the most significant 32 bits of the
+        * target register in an undefined state on PA 2.0 systems. */
+       .macro extru_safe r, p, len, t
+#ifdef CONFIG_64BIT
+       extrd,u \r, 32+(\p), \len, \t
+#else
+       extru   \r, \p, \len, \t
+#endif
+       .endm
+
        /* load 32-bit 'value' into 'reg' compensating for the ldil
         * sign-extension when running in wide mode.
         * WARNING!! neither 'value' nor 'reg' can be expressions
index da0cd4b3a28f29eb944c5c69adaaef7df82628d9..859b8a34adcfbc499422ee2af1789d7892aae983 100644 (file)
@@ -50,7 +50,6 @@ void invalidate_kernel_vmap_range(void *vaddr, int size);
 
 #define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
 void flush_dcache_page(struct page *page);
-void flush_dcache_folio(struct folio *folio);
 
 #define flush_dcache_mmap_lock(mapping)                xa_lock_irq(&mapping->i_pages)
 #define flush_dcache_mmap_unlock(mapping)      xa_unlock_irq(&mapping->i_pages)
index 7efb1aa2f7f8508a5c304634aad34d3d68e81201..af2a598bc0f819cc912129cbdecd56b642f663ab 100644 (file)
@@ -5,6 +5,7 @@
 #ifndef __ASSEMBLY__
 
 #include <linux/types.h>
+#include <linux/stringify.h>
 #include <asm/assembly.h>
 
 #define JUMP_LABEL_NOP_SIZE 4
index 4b9e3d707571be6f4d375fa89bdcd22bd80aa37b..2b3010ade00e7baa65f77e14e01da1526a490028 100644 (file)
@@ -2,7 +2,7 @@
 #ifndef _ASM_PARISC_RT_SIGFRAME_H
 #define _ASM_PARISC_RT_SIGFRAME_H
 
-#define SIGRETURN_TRAMP 3
+#define SIGRETURN_TRAMP 4
 #define SIGRESTARTBLOCK_TRAMP 5 
 #define TRAMP_SIZE (SIGRETURN_TRAMP + SIGRESTARTBLOCK_TRAMP)
 
index 056d588befdd63671fbaf5a2e3f1afc498d9f8b0..70d3cffb02515c7e8f99a1f493994fca16ce648b 100644 (file)
@@ -39,6 +39,7 @@ verify "$3"
 if [ -n "${INSTALLKERNEL}" ]; then
   if [ -x ~/bin/${INSTALLKERNEL} ]; then exec ~/bin/${INSTALLKERNEL} "$@"; fi
   if [ -x /sbin/${INSTALLKERNEL} ]; then exec /sbin/${INSTALLKERNEL} "$@"; fi
+  if [ -x /usr/sbin/${INSTALLKERNEL} ]; then exec /usr/sbin/${INSTALLKERNEL} "$@"; fi
 fi
 
 # Default install
index 88c188a965d86f23f5df390c831cdd601c574aee..6e9cdb26986282466a9ec5ba62e0613f9129de9f 100644 (file)
         */
        .macro          L2_ptep pmd,pte,index,va,fault
 #if CONFIG_PGTABLE_LEVELS == 3
-       extru           \va,31-ASM_PMD_SHIFT,ASM_BITS_PER_PMD,\index
+       extru_safe      \va,31-ASM_PMD_SHIFT,ASM_BITS_PER_PMD,\index
 #else
-# if defined(CONFIG_64BIT)
-       extrd,u         \va,63-ASM_PGDIR_SHIFT,ASM_BITS_PER_PGD,\index
-  #else
-  # if PAGE_SIZE > 4096
-       extru           \va,31-ASM_PGDIR_SHIFT,32-ASM_PGDIR_SHIFT,\index
-  # else
-       extru           \va,31-ASM_PGDIR_SHIFT,ASM_BITS_PER_PGD,\index
-  # endif
-# endif
+       extru_safe      \va,31-ASM_PGDIR_SHIFT,ASM_BITS_PER_PGD,\index
 #endif
        dep             %r0,31,PAGE_SHIFT,\pmd  /* clear offset */
 #if CONFIG_PGTABLE_LEVELS < 3
        bb,>=,n         \pmd,_PxD_PRESENT_BIT,\fault
        dep             %r0,31,PxD_FLAG_SHIFT,\pmd /* clear flags */
        SHLREG          \pmd,PxD_VALUE_SHIFT,\pmd
-       extru           \va,31-PAGE_SHIFT,ASM_BITS_PER_PTE,\index
+       extru_safe      \va,31-PAGE_SHIFT,ASM_BITS_PER_PTE,\index
        dep             %r0,31,PAGE_SHIFT,\pmd  /* clear offset */
        shladd          \index,BITS_PER_PTE_ENTRY,\pmd,\pmd /* pmd is now pte */
        .endm
index bbfe23c40c016ba44920e15d40df90d0115de71c..46b1050640b80c587abe6758868798c68fa87112 100644 (file)
@@ -288,21 +288,22 @@ setup_rt_frame(struct ksignal *ksig, sigset_t *set, struct pt_regs *regs,
           already in userspace. The first words of tramp are used to
           save the previous sigrestartblock trampoline that might be
           on the stack. We start the sigreturn trampoline at 
-          SIGRESTARTBLOCK_TRAMP. */
+          SIGRESTARTBLOCK_TRAMP+X. */
        err |= __put_user(in_syscall ? INSN_LDI_R25_1 : INSN_LDI_R25_0,
                        &frame->tramp[SIGRESTARTBLOCK_TRAMP+0]);
-       err |= __put_user(INSN_BLE_SR2_R0, 
+       err |= __put_user(INSN_LDI_R20, 
                        &frame->tramp[SIGRESTARTBLOCK_TRAMP+1]);
-       err |= __put_user(INSN_LDI_R20,
+       err |= __put_user(INSN_BLE_SR2_R0, 
                        &frame->tramp[SIGRESTARTBLOCK_TRAMP+2]);
+       err |= __put_user(INSN_NOP, &frame->tramp[SIGRESTARTBLOCK_TRAMP+3]);
 
-       start = (unsigned long) &frame->tramp[SIGRESTARTBLOCK_TRAMP+0];
-       end = (unsigned long) &frame->tramp[SIGRESTARTBLOCK_TRAMP+3];
+       start = (unsigned long) &frame->tramp[0];
+       end = (unsigned long) &frame->tramp[TRAMP_SIZE];
        flush_user_dcache_range_asm(start, end);
        flush_user_icache_range_asm(start, end);
 
        /* TRAMP Words 0-4, Length 5 = SIGRESTARTBLOCK_TRAMP
-        * TRAMP Words 5-7, Length 3 = SIGRETURN_TRAMP
+        * TRAMP Words 5-9, Length 4 = SIGRETURN_TRAMP
         * So the SIGRETURN_TRAMP is at the end of SIGRESTARTBLOCK_TRAMP
         */
        rp = (unsigned long) &frame->tramp[SIGRESTARTBLOCK_TRAMP];
index a5bdbb5678b72f2d6a8a85a5326f809af5fe27bc..f166250f2d064815f235c7cae8238fc3a52258d6 100644 (file)
@@ -36,7 +36,7 @@ struct compat_regfile {
         compat_int_t rf_sar;
 };
 
-#define COMPAT_SIGRETURN_TRAMP 3
+#define COMPAT_SIGRETURN_TRAMP 4
 #define COMPAT_SIGRESTARTBLOCK_TRAMP 5
 #define COMPAT_TRAMP_SIZE (COMPAT_SIGRETURN_TRAMP + \
                                COMPAT_SIGRESTARTBLOCK_TRAMP)
index 4fb3b6a993bf6e58065462b2aa2d6a654c286627..d2497b339d139295a42a45c895a01dfd184cb352 100644 (file)
@@ -566,7 +566,7 @@ lws_compare_and_swap:
        ldo     R%lws_lock_start(%r20), %r28
 
        /* Extract eight bits from r26 and hash lock (Bits 3-11) */
-       extru  %r26, 28, 8, %r20
+       extru_safe  %r26, 28, 8, %r20
 
        /* Find lock to use, the hash is either one of 0 to
           15, multiplied by 16 (keep it 16-byte aligned)
@@ -751,7 +751,7 @@ cas2_lock_start:
        ldo     R%lws_lock_start(%r20), %r28
 
        /* Extract eight bits from r26 and hash lock (Bits 3-11) */
-       extru  %r26, 28, 8, %r20
+       extru_safe  %r26, 28, 8, %r20
 
        /* Find lock to use, the hash is either one of 0 to
           15, multiplied by 16 (keep it 16-byte aligned)
index bf751e0732b700db23719788f90c463e47bdf1c9..358c000007553d3f9a2704b39fa412757e83d494 100644 (file)
 446    common  landlock_restrict_self          sys_landlock_restrict_self
 # 447 reserved for memfd_secret
 448    common  process_mrelease                sys_process_mrelease
+449    common  futex_waitv                     sys_futex_waitv
index 9fb1e794831b0eb1db779c220041e04b964c883c..061119a56fbe81b0a64540de3ff6d2596f5ec112 100644 (file)
@@ -249,30 +249,16 @@ void __init time_init(void)
 static int __init init_cr16_clocksource(void)
 {
        /*
-        * The cr16 interval timers are not syncronized across CPUs on
-        * different sockets, so mark them unstable and lower rating on
-        * multi-socket SMP systems.
+        * The cr16 interval timers are not syncronized across CPUs, even if
+        * they share the same socket.
         */
        if (num_online_cpus() > 1 && !running_on_qemu) {
-               int cpu;
-               unsigned long cpu0_loc;
-               cpu0_loc = per_cpu(cpu_data, 0).cpu_loc;
-
-               for_each_online_cpu(cpu) {
-                       if (cpu == 0)
-                               continue;
-                       if ((cpu0_loc != 0) &&
-                           (cpu0_loc == per_cpu(cpu_data, cpu).cpu_loc))
-                               continue;
-
-                       /* mark sched_clock unstable */
-                       clear_sched_clock_stable();
-
-                       clocksource_cr16.name = "cr16_unstable";
-                       clocksource_cr16.flags = CLOCK_SOURCE_UNSTABLE;
-                       clocksource_cr16.rating = 0;
-                       break;
-               }
+               /* mark sched_clock unstable */
+               clear_sched_clock_stable();
+
+               clocksource_cr16.name = "cr16_unstable";
+               clocksource_cr16.flags = CLOCK_SOURCE_UNSTABLE;
+               clocksource_cr16.rating = 0;
        }
 
        /* register at clocksource framework */
index 3d208afd15bc6d14bf0b14e6b433dc4d6669f7b0..2769eb991f58d120f52bf905dcc96b81f0b556b4 100644 (file)
@@ -57,8 +57,6 @@ SECTIONS
 {
        . = KERNEL_BINARY_TEXT_START;
 
-       _stext = .;     /* start of kernel text, includes init code & data */
-
        __init_begin = .;
        HEAD_TEXT_SECTION
        MLONGCALL_DISCARD(INIT_TEXT_SECTION(8))
@@ -82,6 +80,7 @@ SECTIONS
        /* freed after init ends here */
 
        _text = .;              /* Text and read-only data */
+       _stext = .;
        MLONGCALL_KEEP(INIT_TEXT_SECTION(8))
        .text ALIGN(PAGE_SIZE) : {
                TEXT_TEXT
index 0e3640e14eb111ae4c7263e7f7be08b47ad7d136..5fa68c2ef1f81a8291b0a2ba4cff060a8b9f115e 100644 (file)
@@ -196,3 +196,6 @@ clean-files := vmlinux.lds
 # Force dependency (incbin is bad)
 $(obj)/vdso32_wrapper.o : $(obj)/vdso32/vdso32.so.dbg
 $(obj)/vdso64_wrapper.o : $(obj)/vdso64/vdso64.so.dbg
+
+# for cleaning
+subdir- += vdso32 vdso64
index 6b1ec9e3541b9046ac0a81198ac3598d21f7c294..349c4a820231bd2ecc21baed8ae9a1721acd351e 100644 (file)
@@ -202,11 +202,11 @@ vmap_stack_overflow:
        mfspr   r1, SPRN_SPRG_THREAD
        lwz     r1, TASK_CPU - THREAD(r1)
        slwi    r1, r1, 3
-       addis   r1, r1, emergency_ctx@ha
+       addis   r1, r1, emergency_ctx-PAGE_OFFSET@ha
 #else
-       lis     r1, emergency_ctx@ha
+       lis     r1, emergency_ctx-PAGE_OFFSET@ha
 #endif
-       lwz     r1, emergency_ctx@l(r1)
+       lwz     r1, emergency_ctx-PAGE_OFFSET@l(r1)
        addi    r1, r1, THREAD_SIZE - INT_FRAME_SIZE
        EXCEPTION_PROLOG_2 0 vmap_stack_overflow
        prepare_transfer_to_handler
index 2d596881b70e70c24627020ee3a31f36680ec945..0d073b9fd52c5b5ef0988fe4cdce4a8270113844 100644 (file)
@@ -733,6 +733,7 @@ _GLOBAL(mmu_pin_tlb)
 #ifdef CONFIG_PIN_TLB_DATA
        LOAD_REG_IMMEDIATE(r6, PAGE_OFFSET)
        LOAD_REG_IMMEDIATE(r7, MI_SVALID | MI_PS8MEG | _PMD_ACCESSED)
+       li      r8, 0
 #ifdef CONFIG_PIN_TLB_IMMR
        li      r0, 3
 #else
@@ -741,26 +742,26 @@ _GLOBAL(mmu_pin_tlb)
        mtctr   r0
        cmpwi   r4, 0
        beq     4f
-       LOAD_REG_IMMEDIATE(r8, 0xf0 | _PAGE_RO | _PAGE_SPS | _PAGE_SH | _PAGE_PRESENT)
        LOAD_REG_ADDR(r9, _sinittext)
 
 2:     ori     r0, r6, MD_EVALID
+       ori     r12, r8, 0xf0 | _PAGE_RO | _PAGE_SPS | _PAGE_SH | _PAGE_PRESENT
        mtspr   SPRN_MD_CTR, r5
        mtspr   SPRN_MD_EPN, r0
        mtspr   SPRN_MD_TWC, r7
-       mtspr   SPRN_MD_RPN, r8
+       mtspr   SPRN_MD_RPN, r12
        addi    r5, r5, 0x100
        addis   r6, r6, SZ_8M@h
        addis   r8, r8, SZ_8M@h
        cmplw   r6, r9
        bdnzt   lt, 2b
-
-4:     LOAD_REG_IMMEDIATE(r8, 0xf0 | _PAGE_DIRTY | _PAGE_SPS | _PAGE_SH | _PAGE_PRESENT)
+4:
 2:     ori     r0, r6, MD_EVALID
+       ori     r12, r8, 0xf0 | _PAGE_DIRTY | _PAGE_SPS | _PAGE_SH | _PAGE_PRESENT
        mtspr   SPRN_MD_CTR, r5
        mtspr   SPRN_MD_EPN, r0
        mtspr   SPRN_MD_TWC, r7
-       mtspr   SPRN_MD_RPN, r8
+       mtspr   SPRN_MD_RPN, r12
        addi    r5, r5, 0x100
        addis   r6, r6, SZ_8M@h
        addis   r8, r8, SZ_8M@h
@@ -781,7 +782,7 @@ _GLOBAL(mmu_pin_tlb)
 #endif
 #if defined(CONFIG_PIN_TLB_IMMR) || defined(CONFIG_PIN_TLB_DATA)
        lis     r0, (MD_RSV4I | MD_TWAM)@h
-       mtspr   SPRN_MI_CTR, r0
+       mtspr   SPRN_MD_CTR, r0
 #endif
        mtspr   SPRN_SRR1, r10
        mtspr   SPRN_SRR0, r11
index 1f07317964e499c73ced538b8477e2f45183d3b1..618aeccdf69184464ec8dde6db4208c2fc3ab8eb 100644 (file)
@@ -25,8 +25,14 @@ static inline int __get_user_sigset(sigset_t *dst, const sigset_t __user *src)
 
        return __get_user(dst->sig[0], (u64 __user *)&src->sig[0]);
 }
-#define unsafe_get_user_sigset(dst, src, label) \
-       unsafe_get_user((dst)->sig[0], (u64 __user *)&(src)->sig[0], label)
+#define unsafe_get_user_sigset(dst, src, label) do {                   \
+       sigset_t *__dst = dst;                                          \
+       const sigset_t __user *__src = src;                             \
+       int i;                                                          \
+                                                                       \
+       for (i = 0; i < _NSIG_WORDS; i++)                               \
+               unsafe_get_user(__dst->sig[i], &__src->sig[i], label);  \
+} while (0)
 
 #ifdef CONFIG_VSX
 extern unsigned long copy_vsx_to_user(void __user *to,
index 00a9c9cd6d421c4fc1536fdb9e527cd9e78a45a6..3e053e2fd6b693bec8075c06573cfb252241fdd1 100644 (file)
@@ -1063,7 +1063,7 @@ SYSCALL_DEFINE3(swapcontext, struct ucontext __user *, old_ctx,
         * We kill the task with a SIGSEGV in this situation.
         */
        if (do_setcontext(new_ctx, regs, 0)) {
-               force_fatal_sig(SIGSEGV);
+               force_exit_sig(SIGSEGV);
                return -EFAULT;
        }
 
index ef518535d436878cdc869725789bdaf048f5d3c5..d1e1fc0acbea32822ce3ab910e6f9ad1565019ea 100644 (file)
@@ -704,7 +704,7 @@ SYSCALL_DEFINE3(swapcontext, struct ucontext __user *, old_ctx,
         */
 
        if (__get_user_sigset(&set, &new_ctx->uc_sigmask)) {
-               force_fatal_sig(SIGSEGV);
+               force_exit_sig(SIGSEGV);
                return -EFAULT;
        }
        set_current_blocked(&set);
@@ -713,7 +713,7 @@ SYSCALL_DEFINE3(swapcontext, struct ucontext __user *, old_ctx,
                return -EFAULT;
        if (__unsafe_restore_sigcontext(current, NULL, 0, &new_ctx->uc_mcontext)) {
                user_read_access_end();
-               force_fatal_sig(SIGSEGV);
+               force_exit_sig(SIGSEGV);
                return -EFAULT;
        }
        user_read_access_end();
index 7bef917cc84e6499baf173cc4feef4c8430527f6..15109af9d0754d5fcb6e455532944ef1a0a14e17 100644 (file)
 446    common  landlock_restrict_self          sys_landlock_restrict_self
 # 447 reserved for memfd_secret
 448    common  process_mrelease                sys_process_mrelease
+449    common  futex_waitv                     sys_futex_waitv
index f9ea0e5357f9290a15c7c843a6b2ce45129b74bc..3fa6d240bade21c0e51002f48956e12db9003263 100644 (file)
@@ -187,6 +187,12 @@ static void watchdog_smp_panic(int cpu, u64 tb)
        if (sysctl_hardlockup_all_cpu_backtrace)
                trigger_allbutself_cpu_backtrace();
 
+       /*
+        * Force flush any remote buffers that might be stuck in IRQ context
+        * and therefore could not run their irq_work.
+        */
+       printk_trigger_flush();
+
        if (hardlockup_panic)
                nmi_panic(NULL, "Hard LOCKUP");
 
index fcf4760a3a0ea27ae04b12819fd7fa9b10bad749..70b7a8f97153844f5d1eb4f1f29f744b93fb0084 100644 (file)
@@ -695,6 +695,7 @@ static void flush_guest_tlb(struct kvm *kvm)
                                       "r" (0) : "memory");
                }
                asm volatile("ptesync": : :"memory");
+               // POWER9 congruence-class TLBIEL leaves ERAT. Flush it now.
                asm volatile(PPC_RADIX_INVALIDATE_ERAT_GUEST : : :"memory");
        } else {
                for (set = 0; set < kvm->arch.tlb_sets; ++set) {
@@ -705,7 +706,9 @@ static void flush_guest_tlb(struct kvm *kvm)
                        rb += PPC_BIT(51);      /* increment set number */
                }
                asm volatile("ptesync": : :"memory");
-               asm volatile(PPC_ISA_3_0_INVALIDATE_ERAT : : :"memory");
+               // POWER9 congruence-class TLBIEL leaves ERAT. Flush it now.
+               if (cpu_has_feature(CPU_FTR_ARCH_300))
+                       asm volatile(PPC_ISA_3_0_INVALIDATE_ERAT : : :"memory");
        }
 }
 
index eb776d0c5d8e97115714e79ffdfcd9b8de177576..32a4b4d412b92bee96ab73346bbecf63175d024e 100644 (file)
@@ -2005,7 +2005,7 @@ hcall_real_table:
        .globl  hcall_real_table_end
 hcall_real_table_end:
 
-_GLOBAL(kvmppc_h_set_xdabr)
+_GLOBAL_TOC(kvmppc_h_set_xdabr)
 EXPORT_SYMBOL_GPL(kvmppc_h_set_xdabr)
        andi.   r0, r5, DABRX_USER | DABRX_KERNEL
        beq     6f
@@ -2015,7 +2015,7 @@ EXPORT_SYMBOL_GPL(kvmppc_h_set_xdabr)
 6:     li      r3, H_PARAMETER
        blr
 
-_GLOBAL(kvmppc_h_set_dabr)
+_GLOBAL_TOC(kvmppc_h_set_dabr)
 EXPORT_SYMBOL_GPL(kvmppc_h_set_dabr)
        li      r5, DABRX_USER | DABRX_KERNEL
 3:
index 35e9cccdeef92fe87e04a4fe5f2197cb51674e5e..a72920f4f221fab998e4bb6c50c0739d8c9c63a9 100644 (file)
@@ -641,9 +641,9 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
                 * implementations just count online CPUs.
                 */
                if (hv_enabled)
-                       r = num_present_cpus();
+                       r = min_t(unsigned int, num_present_cpus(), KVM_MAX_VCPUS);
                else
-                       r = num_online_cpus();
+                       r = min_t(unsigned int, num_online_cpus(), KVM_MAX_VCPUS);
                break;
        case KVM_CAP_MAX_VCPUS:
                r = KVM_MAX_VCPUS;
index 8fc49b1b4a913d35ccfd029af33108c6d6497510..6ec978967da0958a3cf550af5a2b545d5a89239e 100644 (file)
@@ -314,7 +314,7 @@ static unsigned long __init kaslr_choose_location(void *dt_ptr, phys_addr_t size
                pr_warn("KASLR: No safe seed for randomizing the kernel base.\n");
 
        ram = min_t(phys_addr_t, __max_low_memory, size);
-       ram = map_mem_in_cams(ram, CONFIG_LOWMEM_CAM_NUM, true, false);
+       ram = map_mem_in_cams(ram, CONFIG_LOWMEM_CAM_NUM, true, true);
        linear_sz = min_t(unsigned long, ram, SZ_512M);
 
        /* If the linear size is smaller than 64M, do not randmize */
index 89353d4f560466ceb36d3b30866c1c7a6e262edb..647bf454a0fa5b4c8e6057fb0f03b9fab92d9e8c 100644 (file)
@@ -645,7 +645,7 @@ static void early_init_this_mmu(void)
 
                if (map)
                        linear_map_top = map_mem_in_cams(linear_map_top,
-                                                        num_cams, true, true);
+                                                        num_cams, false, true);
        }
 #endif
 
@@ -766,7 +766,7 @@ void setup_initial_memory_limit(phys_addr_t first_memblock_base,
                num_cams = (mfspr(SPRN_TLB1CFG) & TLBnCFG_N_ENTRY) / 4;
 
                linear_sz = map_mem_in_cams(first_memblock_size, num_cams,
-                                           false, true);
+                                           true, true);
 
                ppc64_rma_size = min_t(u64, linear_sz, 0x40000000);
        } else
index 6f14c8fb6359d519f14f4de822fe5f772c0ba7c3..59d3cfcd78879a5f443e5b6d803f011029e0e606 100644 (file)
@@ -376,9 +376,9 @@ static void initialize_form2_numa_distance_lookup_table(void)
 {
        int i, j;
        struct device_node *root;
-       const __u8 *numa_dist_table;
+       const __u8 *form2_distances;
        const __be32 *numa_lookup_index;
-       int numa_dist_table_length;
+       int form2_distances_length;
        int max_numa_index, distance_index;
 
        if (firmware_has_feature(FW_FEATURE_OPAL))
@@ -392,45 +392,41 @@ static void initialize_form2_numa_distance_lookup_table(void)
        max_numa_index = of_read_number(&numa_lookup_index[0], 1);
 
        /* first element of the array is the size and is encode-int */
-       numa_dist_table = of_get_property(root, "ibm,numa-distance-table", NULL);
-       numa_dist_table_length = of_read_number((const __be32 *)&numa_dist_table[0], 1);
+       form2_distances = of_get_property(root, "ibm,numa-distance-table", NULL);
+       form2_distances_length = of_read_number((const __be32 *)&form2_distances[0], 1);
        /* Skip the size which is encoded int */
-       numa_dist_table += sizeof(__be32);
+       form2_distances += sizeof(__be32);
 
-       pr_debug("numa_dist_table_len = %d, numa_dist_indexes_len = %d\n",
-                numa_dist_table_length, max_numa_index);
+       pr_debug("form2_distances_len = %d, numa_dist_indexes_len = %d\n",
+                form2_distances_length, max_numa_index);
 
        for (i = 0; i < max_numa_index; i++)
                /* +1 skip the max_numa_index in the property */
                numa_id_index_table[i] = of_read_number(&numa_lookup_index[i + 1], 1);
 
 
-       if (numa_dist_table_length != max_numa_index * max_numa_index) {
+       if (form2_distances_length != max_numa_index * max_numa_index) {
                WARN(1, "Wrong NUMA distance information\n");
-               /* consider everybody else just remote. */
-               for (i = 0;  i < max_numa_index; i++) {
-                       for (j = 0; j < max_numa_index; j++) {
-                               int nodeA = numa_id_index_table[i];
-                               int nodeB = numa_id_index_table[j];
-
-                               if (nodeA == nodeB)
-                                       numa_distance_table[nodeA][nodeB] = LOCAL_DISTANCE;
-                               else
-                                       numa_distance_table[nodeA][nodeB] = REMOTE_DISTANCE;
-                       }
-               }
+               form2_distances = NULL; // don't use it
        }
-
        distance_index = 0;
        for (i = 0;  i < max_numa_index; i++) {
                for (j = 0; j < max_numa_index; j++) {
                        int nodeA = numa_id_index_table[i];
                        int nodeB = numa_id_index_table[j];
-
-                       numa_distance_table[nodeA][nodeB] = numa_dist_table[distance_index++];
-                       pr_debug("dist[%d][%d]=%d ", nodeA, nodeB, numa_distance_table[nodeA][nodeB]);
+                       int dist;
+
+                       if (form2_distances)
+                               dist = form2_distances[distance_index++];
+                       else if (nodeA == nodeB)
+                               dist = LOCAL_DISTANCE;
+                       else
+                               dist = REMOTE_DISTANCE;
+                       numa_distance_table[nodeA][nodeB] = dist;
+                       pr_debug("dist[%d][%d]=%d ", nodeA, nodeB, dist);
                }
        }
+
        of_node_put(root);
 }
 
index bb789f33c70e035e6b52600384b98e4ca161f368..a38372f9ac12c65cc59bfc7ac888a376523c6c78 100644 (file)
@@ -186,7 +186,6 @@ err:
 static int mcu_remove(struct i2c_client *client)
 {
        struct mcu *mcu = i2c_get_clientdata(client);
-       int ret;
 
        kthread_stop(shutdown_thread);
 
index 49b401536d297428da3e54fcfd5deac7aa63731a..8f998e55735bfdf45312604a05e0faa5ef772f5e 100644 (file)
@@ -1094,15 +1094,6 @@ static phys_addr_t ddw_memory_hotplug_max(void)
        phys_addr_t max_addr = memory_hotplug_max();
        struct device_node *memory;
 
-       /*
-        * The "ibm,pmemory" can appear anywhere in the address space.
-        * Assuming it is still backed by page structs, set the upper limit
-        * for the huge DMA window as MAX_PHYSMEM_BITS.
-        */
-       if (of_find_node_by_type(NULL, "ibm,pmemory"))
-               return (sizeof(phys_addr_t) * 8 <= MAX_PHYSMEM_BITS) ?
-                       (phys_addr_t) -1 : (1ULL << MAX_PHYSMEM_BITS);
-
        for_each_node_by_type(memory, "memory") {
                unsigned long start, size;
                int n_mem_addr_cells, n_mem_size_cells, len;
@@ -1238,7 +1229,6 @@ static bool enable_ddw(struct pci_dev *dev, struct device_node *pdn)
        u32 ddw_avail[DDW_APPLICABLE_SIZE];
        struct dma_win *window;
        struct property *win64;
-       bool ddw_enabled = false;
        struct failed_ddw_pdn *fpdn;
        bool default_win_removed = false, direct_mapping = false;
        bool pmem_present;
@@ -1253,7 +1243,6 @@ static bool enable_ddw(struct pci_dev *dev, struct device_node *pdn)
 
        if (find_existing_ddw(pdn, &dev->dev.archdata.dma_offset, &len)) {
                direct_mapping = (len >= max_ram_len);
-               ddw_enabled = true;
                goto out_unlock;
        }
 
@@ -1367,8 +1356,10 @@ static bool enable_ddw(struct pci_dev *dev, struct device_node *pdn)
                len = order_base_2(query.largest_available_block << page_shift);
                win_name = DMA64_PROPNAME;
        } else {
-               direct_mapping = true;
-               win_name = DIRECT64_PROPNAME;
+               direct_mapping = !default_win_removed ||
+                       (len == MAX_PHYSMEM_BITS) ||
+                       (!pmem_present && (len == max_ram_len));
+               win_name = direct_mapping ? DIRECT64_PROPNAME : DMA64_PROPNAME;
        }
 
        ret = create_ddw(dev, ddw_avail, &create, page_shift, len);
@@ -1406,8 +1397,8 @@ static bool enable_ddw(struct pci_dev *dev, struct device_node *pdn)
                        dev_info(&dev->dev, "failed to map DMA window for %pOF: %d\n",
                                 dn, ret);
 
-               /* Make sure to clean DDW if any TCE was set*/
-               clean_dma_window(pdn, win64->value);
+                       /* Make sure to clean DDW if any TCE was set*/
+                       clean_dma_window(pdn, win64->value);
                        goto out_del_list;
                }
        } else {
@@ -1454,7 +1445,6 @@ static bool enable_ddw(struct pci_dev *dev, struct device_node *pdn)
        spin_unlock(&dma_win_list_lock);
 
        dev->dev.archdata.dma_offset = win_addr;
-       ddw_enabled = true;
        goto out_unlock;
 
 out_del_list:
@@ -1490,10 +1480,10 @@ out_unlock:
         * as RAM, then we failed to create a window to cover persistent
         * memory and need to set the DMA limit.
         */
-       if (pmem_present && ddw_enabled && direct_mapping && len == max_ram_len)
+       if (pmem_present && direct_mapping && len == max_ram_len)
                dev->dev.bus_dma_limit = dev->dev.archdata.dma_offset + (1ULL << len);
 
-    return ddw_enabled && direct_mapping;
+       return direct_mapping;
 }
 
 static void pci_dma_dev_setup_pSeriesLP(struct pci_dev *dev)
index 97796c6b63f04e706faa80a99b52bcb432465891..785c292d104b7508b50da247910863383696556e 100644 (file)
@@ -3,7 +3,6 @@ config PPC_XIVE
        bool
        select PPC_SMP_MUXED_IPI
        select HARDIRQS_SW_RESEND
-       select IRQ_DOMAIN_NOMAP
 
 config PPC_XIVE_NATIVE
        bool
index c5d75c02ad8b512d51b13eb0c4a7baf7bec3524e..7b69299c29123564050604d64d81431962dc7c32 100644 (file)
@@ -1443,8 +1443,7 @@ static const struct irq_domain_ops xive_irq_domain_ops = {
 
 static void __init xive_init_host(struct device_node *np)
 {
-       xive_irq_domain = irq_domain_add_nomap(np, XIVE_MAX_IRQ,
-                                              &xive_irq_domain_ops, NULL);
+       xive_irq_domain = irq_domain_add_tree(np, &xive_irq_domain_ops, NULL);
        if (WARN_ON(xive_irq_domain == NULL))
                return;
        irq_set_default_host(xive_irq_domain);
index 5927c94302b87bccd2a11b872b391e5fc164cf0d..8a107ed18b0dc71b7804203540078044cddd1976 100644 (file)
@@ -107,11 +107,13 @@ PHONY += vdso_install
 vdso_install:
        $(Q)$(MAKE) $(build)=arch/riscv/kernel/vdso $@
 
+ifeq ($(KBUILD_EXTMOD),)
 ifeq ($(CONFIG_MMU),y)
 prepare: vdso_prepare
 vdso_prepare: prepare0
        $(Q)$(MAKE) $(build)=arch/riscv/kernel/vdso include/generated/vdso-offsets.h
 endif
+endif
 
 ifneq ($(CONFIG_XIP_KERNEL),y)
 ifeq ($(CONFIG_RISCV_M_MODE)$(CONFIG_SOC_CANAAN),yy)
index c252fd5706d20db933a6873ce789d8b1ad66c5ef..ef473e2f503b2e1ff6a43f1ac61d1e81a9a80337 100644 (file)
@@ -19,6 +19,8 @@ CONFIG_SOC_VIRT=y
 CONFIG_SOC_MICROCHIP_POLARFIRE=y
 CONFIG_SMP=y
 CONFIG_HOTPLUG_CPU=y
+CONFIG_VIRTUALIZATION=y
+CONFIG_KVM=m
 CONFIG_JUMP_LABEL=y
 CONFIG_MODULES=y
 CONFIG_MODULE_UNLOAD=y
index 434ef5b645998e926c81a7a1f01728bc131f3cde..6e9f12ff968acdfb0d65c119b48b9bfc660250ff 100644 (file)
@@ -19,6 +19,8 @@ CONFIG_SOC_VIRT=y
 CONFIG_ARCH_RV32I=y
 CONFIG_SMP=y
 CONFIG_HOTPLUG_CPU=y
+CONFIG_VIRTUALIZATION=y
+CONFIG_KVM=m
 CONFIG_JUMP_LABEL=y
 CONFIG_MODULES=y
 CONFIG_MODULE_UNLOAD=y
index 25ba21f98504120d8a723c13655d5bd583d35b4f..2639b9ee48f97d0b69c72177d55eb2dd1e9be1d2 100644 (file)
 #include <linux/types.h>
 #include <linux/kvm.h>
 #include <linux/kvm_types.h>
+#include <asm/csr.h>
 #include <asm/kvm_vcpu_fp.h>
 #include <asm/kvm_vcpu_timer.h>
 
-#ifdef CONFIG_64BIT
-#define KVM_MAX_VCPUS                  (1U << 16)
-#else
-#define KVM_MAX_VCPUS                  (1U << 9)
-#endif
+#define KVM_MAX_VCPUS                  \
+       ((HGATP_VMID_MASK >> HGATP_VMID_SHIFT) + 1)
 
 #define KVM_HALT_POLL_NS_DEFAULT       500000
 
index d81bae8eb55ea0a0f81bed64b5c3ab589a99a00a..fc058ff5f4b6f3ac393d58ea25d0446a19fc7664 100644 (file)
@@ -453,6 +453,12 @@ void kvm_arch_flush_shadow_all(struct kvm *kvm)
 void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
                                   struct kvm_memory_slot *slot)
 {
+       gpa_t gpa = slot->base_gfn << PAGE_SHIFT;
+       phys_addr_t size = slot->npages << PAGE_SHIFT;
+
+       spin_lock(&kvm->mmu_lock);
+       stage2_unmap_range(kvm, gpa, size, false);
+       spin_unlock(&kvm->mmu_lock);
 }
 
 void kvm_arch_commit_memory_region(struct kvm *kvm,
index e3d3aed461840f517ddcc16bdaa6c98cfd19712a..fb84619df012781bdee0f907bfce58d7dcb1adcc 100644 (file)
@@ -740,7 +740,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
                 * Ensure we set mode to IN_GUEST_MODE after we disable
                 * interrupts and before the final VCPU requests check.
                 * See the comment in kvm_vcpu_exiting_guest_mode() and
-                * Documentation/virtual/kvm/vcpu-requests.rst
+                * Documentation/virt/kvm/vcpu-requests.rst
                 */
                vcpu->mode = IN_GUEST_MODE;
 
index eb3c045edf11b01da7e4a04bcb81bafc0e5d667d..3b0e703d22cfb2da3b41f5cf6bb1ae7789888f02 100644 (file)
@@ -1,5 +1,5 @@
 // SPDX-License-Identifier: GPL-2.0
-/**
+/*
  * Copyright (c) 2019 Western Digital Corporation or its affiliates.
  *
  * Authors:
index 26399df15b63418aaeb2529fd65145c4dec92a47..fb18af34a4b52f1275763a829324bca84a358bbc 100644 (file)
@@ -74,7 +74,7 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
                r = 1;
                break;
        case KVM_CAP_NR_VCPUS:
-               r = num_online_cpus();
+               r = min_t(unsigned int, num_online_cpus(), KVM_MAX_VCPUS);
                break;
        case KVM_CAP_MAX_VCPUS:
                r = KVM_MAX_VCPUS;
index 8857ec3b97eb887d4bc78ecc8462b942b2b62071..2a5bb4f29cfede5a627b61795e6d6b133c979ccd 100644 (file)
@@ -47,7 +47,7 @@ config ARCH_SUPPORTS_UPROBES
 config KASAN_SHADOW_OFFSET
        hex
        depends on KASAN
-       default 0x18000000000000
+       default 0x1C000000000000
 
 config S390
        def_bool y
@@ -194,6 +194,7 @@ config S390
        select HAVE_RELIABLE_STACKTRACE
        select HAVE_RSEQ
        select HAVE_SAMPLE_FTRACE_DIRECT
+       select HAVE_SAMPLE_FTRACE_DIRECT_MULTI
        select HAVE_SOFTIRQ_ON_OWN_STACK
        select HAVE_SYSCALL_TRACEPOINTS
        select HAVE_VIRT_CPU_ACCOUNTING
index 69c45f600273bea6132060f84ffca10587a37889..609e3697324b12fad3c50b810ab403e137dff71c 100644 (file)
@@ -77,10 +77,12 @@ KBUILD_AFLAGS_DECOMPRESSOR += $(aflags-y)
 KBUILD_CFLAGS_DECOMPRESSOR += $(cflags-y)
 
 ifneq ($(call cc-option,-mstack-size=8192 -mstack-guard=128),)
-cflags-$(CONFIG_CHECK_STACK) += -mstack-size=$(STACK_SIZE)
-ifeq ($(call cc-option,-mstack-size=8192),)
-cflags-$(CONFIG_CHECK_STACK) += -mstack-guard=$(CONFIG_STACK_GUARD)
-endif
+  CC_FLAGS_CHECK_STACK := -mstack-size=$(STACK_SIZE)
+  ifeq ($(call cc-option,-mstack-size=8192),)
+    CC_FLAGS_CHECK_STACK += -mstack-guard=$(CONFIG_STACK_GUARD)
+  endif
+  export CC_FLAGS_CHECK_STACK
+  cflags-$(CONFIG_CHECK_STACK) += $(CC_FLAGS_CHECK_STACK)
 endif
 
 ifdef CONFIG_EXPOLINE
index 7571dee72a0cdd7c4fec06cae66b8479a92b6667..1aa11a8f57dd827432c5e439feb0a299e3ece7d7 100644 (file)
@@ -149,82 +149,56 @@ static void setup_ident_map_size(unsigned long max_physmem_end)
 
 static void setup_kernel_memory_layout(void)
 {
-       bool vmalloc_size_verified = false;
-       unsigned long vmemmap_off;
-       unsigned long vspace_left;
+       unsigned long vmemmap_start;
        unsigned long rte_size;
        unsigned long pages;
-       unsigned long vmax;
 
        pages = ident_map_size / PAGE_SIZE;
        /* vmemmap contains a multiple of PAGES_PER_SECTION struct pages */
        vmemmap_size = SECTION_ALIGN_UP(pages) * sizeof(struct page);
 
        /* choose kernel address space layout: 4 or 3 levels. */
-       vmemmap_off = round_up(ident_map_size, _REGION3_SIZE);
+       vmemmap_start = round_up(ident_map_size, _REGION3_SIZE);
        if (IS_ENABLED(CONFIG_KASAN) ||
            vmalloc_size > _REGION2_SIZE ||
-           vmemmap_off + vmemmap_size + vmalloc_size + MODULES_LEN > _REGION2_SIZE)
-               vmax = _REGION1_SIZE;
-       else
-               vmax = _REGION2_SIZE;
-
-       /* keep vmemmap_off aligned to a top level region table entry */
-       rte_size = vmax == _REGION1_SIZE ? _REGION2_SIZE : _REGION3_SIZE;
-       MODULES_END = vmax;
-       if (is_prot_virt_host()) {
-               /*
-                * forcing modules and vmalloc area under the ultravisor
-                * secure storage limit, so that any vmalloc allocation
-                * we do could be used to back secure guest storage.
-                */
-               adjust_to_uv_max(&MODULES_END);
-       }
-
-#ifdef CONFIG_KASAN
-       if (MODULES_END < vmax) {
-               /* force vmalloc and modules below kasan shadow */
-               MODULES_END = min(MODULES_END, KASAN_SHADOW_START);
+           vmemmap_start + vmemmap_size + vmalloc_size + MODULES_LEN >
+                   _REGION2_SIZE) {
+               MODULES_END = _REGION1_SIZE;
+               rte_size = _REGION2_SIZE;
        } else {
-               /*
-                * leave vmalloc and modules above kasan shadow but make
-                * sure they don't overlap with it
-                */
-               vmalloc_size = min(vmalloc_size, vmax - KASAN_SHADOW_END - MODULES_LEN);
-               vmalloc_size_verified = true;
-               vspace_left = KASAN_SHADOW_START;
+               MODULES_END = _REGION2_SIZE;
+               rte_size = _REGION3_SIZE;
        }
+       /*
+        * forcing modules and vmalloc area under the ultravisor
+        * secure storage limit, so that any vmalloc allocation
+        * we do could be used to back secure guest storage.
+        */
+       adjust_to_uv_max(&MODULES_END);
+#ifdef CONFIG_KASAN
+       /* force vmalloc and modules below kasan shadow */
+       MODULES_END = min(MODULES_END, KASAN_SHADOW_START);
 #endif
        MODULES_VADDR = MODULES_END - MODULES_LEN;
        VMALLOC_END = MODULES_VADDR;
 
-       if (vmalloc_size_verified) {
-               VMALLOC_START = VMALLOC_END - vmalloc_size;
-       } else {
-               vmemmap_off = round_up(ident_map_size, rte_size);
-
-               if (vmemmap_off + vmemmap_size > VMALLOC_END ||
-                   vmalloc_size > VMALLOC_END - vmemmap_off - vmemmap_size) {
-                       /*
-                        * allow vmalloc area to occupy up to 1/2 of
-                        * the rest virtual space left.
-                        */
-                       vmalloc_size = min(vmalloc_size, VMALLOC_END / 2);
-               }
-               VMALLOC_START = VMALLOC_END - vmalloc_size;
-               vspace_left = VMALLOC_START;
-       }
+       /* allow vmalloc area to occupy up to about 1/2 of the rest virtual space left */
+       vmalloc_size = min(vmalloc_size, round_down(VMALLOC_END / 2, _REGION3_SIZE));
+       VMALLOC_START = VMALLOC_END - vmalloc_size;
 
-       pages = vspace_left / (PAGE_SIZE + sizeof(struct page));
+       /* split remaining virtual space between 1:1 mapping & vmemmap array */
+       pages = VMALLOC_START / (PAGE_SIZE + sizeof(struct page));
        pages = SECTION_ALIGN_UP(pages);
-       vmemmap_off = round_up(vspace_left - pages * sizeof(struct page), rte_size);
-       /* keep vmemmap left most starting from a fresh region table entry */
-       vmemmap_off = min(vmemmap_off, round_up(ident_map_size, rte_size));
-       /* take care that identity map is lower then vmemmap */
-       ident_map_size = min(ident_map_size, vmemmap_off);
+       /* keep vmemmap_start aligned to a top level region table entry */
+       vmemmap_start = round_down(VMALLOC_START - pages * sizeof(struct page), rte_size);
+       /* vmemmap_start is the future VMEM_MAX_PHYS, make sure it is within MAX_PHYSMEM */
+       vmemmap_start = min(vmemmap_start, 1UL << MAX_PHYSMEM_BITS);
+       /* make sure identity map doesn't overlay with vmemmap */
+       ident_map_size = min(ident_map_size, vmemmap_start);
        vmemmap_size = SECTION_ALIGN_UP(ident_map_size / PAGE_SIZE) * sizeof(struct page);
-       VMALLOC_START = max(vmemmap_off + vmemmap_size, VMALLOC_START);
-       vmemmap = (struct page *)vmemmap_off;
+       /* make sure vmemmap doesn't overlay with vmalloc area */
+       VMALLOC_START = max(vmemmap_start + vmemmap_size, VMALLOC_START);
+       vmemmap = (struct page *)vmemmap_start;
 }
 
 /*
index fd825097cf048b59d8cc7486ae345e9cbec08b07..b626bc6e0eaf9809a3e81fa06c398a9fb7f01cbc 100644 (file)
@@ -403,7 +403,6 @@ CONFIG_DEVTMPFS=y
 CONFIG_CONNECTOR=y
 CONFIG_ZRAM=y
 CONFIG_BLK_DEV_LOOP=m
-CONFIG_BLK_DEV_CRYPTOLOOP=m
 CONFIG_BLK_DEV_DRBD=m
 CONFIG_BLK_DEV_NBD=m
 CONFIG_BLK_DEV_RAM=y
@@ -476,6 +475,7 @@ CONFIG_MACVLAN=m
 CONFIG_MACVTAP=m
 CONFIG_VXLAN=m
 CONFIG_BAREUDP=m
+CONFIG_AMT=m
 CONFIG_TUN=m
 CONFIG_VETH=m
 CONFIG_VIRTIO_NET=m
@@ -489,6 +489,7 @@ CONFIG_NLMON=m
 # CONFIG_NET_VENDOR_AMD is not set
 # CONFIG_NET_VENDOR_AQUANTIA is not set
 # CONFIG_NET_VENDOR_ARC is not set
+# CONFIG_NET_VENDOR_ASIX is not set
 # CONFIG_NET_VENDOR_ATHEROS is not set
 # CONFIG_NET_VENDOR_BROADCOM is not set
 # CONFIG_NET_VENDOR_BROCADE is not set
@@ -571,6 +572,7 @@ CONFIG_WATCHDOG=y
 CONFIG_WATCHDOG_NOWAYOUT=y
 CONFIG_SOFT_WATCHDOG=m
 CONFIG_DIAG288_WATCHDOG=m
+# CONFIG_DRM_DEBUG_MODESET_LOCK is not set
 CONFIG_FB=y
 CONFIG_FRAMEBUFFER_CONSOLE=y
 CONFIG_FRAMEBUFFER_CONSOLE_DETECT_PRIMARY=y
@@ -775,12 +777,14 @@ CONFIG_CRC4=m
 CONFIG_CRC7=m
 CONFIG_CRC8=m
 CONFIG_RANDOM32_SELFTEST=y
+CONFIG_XZ_DEC_MICROLZMA=y
 CONFIG_DMA_CMA=y
 CONFIG_CMA_SIZE_MBYTES=0
 CONFIG_PRINTK_TIME=y
 CONFIG_DYNAMIC_DEBUG=y
 CONFIG_DEBUG_INFO=y
 CONFIG_DEBUG_INFO_DWARF4=y
+CONFIG_DEBUG_INFO_BTF=y
 CONFIG_GDB_SCRIPTS=y
 CONFIG_HEADERS_INSTALL=y
 CONFIG_DEBUG_SECTION_MISMATCH=y
@@ -807,6 +811,7 @@ CONFIG_DEBUG_MEMORY_INIT=y
 CONFIG_MEMORY_NOTIFIER_ERROR_INJECT=m
 CONFIG_DEBUG_PER_CPU_MAPS=y
 CONFIG_KFENCE=y
+CONFIG_KFENCE_STATIC_KEYS=y
 CONFIG_DEBUG_SHIRQ=y
 CONFIG_PANIC_ON_OOPS=y
 CONFIG_DETECT_HUNG_TASK=y
@@ -842,6 +847,7 @@ CONFIG_FTRACE_STARTUP_TEST=y
 CONFIG_SAMPLES=y
 CONFIG_SAMPLE_TRACE_PRINTK=m
 CONFIG_SAMPLE_FTRACE_DIRECT=m
+CONFIG_SAMPLE_FTRACE_DIRECT_MULTI=m
 CONFIG_DEBUG_ENTRY=y
 CONFIG_CIO_INJECT=y
 CONFIG_KUNIT=m
@@ -860,7 +866,7 @@ CONFIG_FAIL_FUNCTION=y
 CONFIG_FAULT_INJECTION_STACKTRACE_FILTER=y
 CONFIG_LKDTM=m
 CONFIG_TEST_MIN_HEAP=y
-CONFIG_KPROBES_SANITY_TEST=y
+CONFIG_KPROBES_SANITY_TEST=m
 CONFIG_RBTREE_TEST=y
 CONFIG_INTERVAL_TREE_TEST=m
 CONFIG_PERCPU_TEST=m
index c9c3cedff2d85634327af0d5c36d795da74ddb01..0056cab273723d9c0423f4aee4f397d6852c4797 100644 (file)
@@ -394,7 +394,6 @@ CONFIG_DEVTMPFS=y
 CONFIG_CONNECTOR=y
 CONFIG_ZRAM=y
 CONFIG_BLK_DEV_LOOP=m
-CONFIG_BLK_DEV_CRYPTOLOOP=m
 CONFIG_BLK_DEV_DRBD=m
 CONFIG_BLK_DEV_NBD=m
 CONFIG_BLK_DEV_RAM=y
@@ -467,6 +466,7 @@ CONFIG_MACVLAN=m
 CONFIG_MACVTAP=m
 CONFIG_VXLAN=m
 CONFIG_BAREUDP=m
+CONFIG_AMT=m
 CONFIG_TUN=m
 CONFIG_VETH=m
 CONFIG_VIRTIO_NET=m
@@ -480,6 +480,7 @@ CONFIG_NLMON=m
 # CONFIG_NET_VENDOR_AMD is not set
 # CONFIG_NET_VENDOR_AQUANTIA is not set
 # CONFIG_NET_VENDOR_ARC is not set
+# CONFIG_NET_VENDOR_ASIX is not set
 # CONFIG_NET_VENDOR_ATHEROS is not set
 # CONFIG_NET_VENDOR_BROADCOM is not set
 # CONFIG_NET_VENDOR_BROCADE is not set
@@ -762,12 +763,14 @@ CONFIG_PRIME_NUMBERS=m
 CONFIG_CRC4=m
 CONFIG_CRC7=m
 CONFIG_CRC8=m
+CONFIG_XZ_DEC_MICROLZMA=y
 CONFIG_DMA_CMA=y
 CONFIG_CMA_SIZE_MBYTES=0
 CONFIG_PRINTK_TIME=y
 CONFIG_DYNAMIC_DEBUG=y
 CONFIG_DEBUG_INFO=y
 CONFIG_DEBUG_INFO_DWARF4=y
+CONFIG_DEBUG_INFO_BTF=y
 CONFIG_GDB_SCRIPTS=y
 CONFIG_DEBUG_SECTION_MISMATCH=y
 CONFIG_MAGIC_SYSRQ=y
@@ -792,9 +795,11 @@ CONFIG_HIST_TRIGGERS=y
 CONFIG_SAMPLES=y
 CONFIG_SAMPLE_TRACE_PRINTK=m
 CONFIG_SAMPLE_FTRACE_DIRECT=m
+CONFIG_SAMPLE_FTRACE_DIRECT_MULTI=m
 CONFIG_KUNIT=m
 CONFIG_KUNIT_DEBUGFS=y
 CONFIG_LKDTM=m
+CONFIG_KPROBES_SANITY_TEST=m
 CONFIG_PERCPU_TEST=m
 CONFIG_ATOMIC64_SELFTEST=y
 CONFIG_TEST_BPF=m
index aceccf3b9a882a57ecd15dc24324b0a63434f17c..eed3b9acfa71aaf59f8fdd2a2dd58517c0f2e144 100644 (file)
@@ -65,9 +65,11 @@ CONFIG_ZFCP=y
 # CONFIG_NETWORK_FILESYSTEMS is not set
 CONFIG_LSM="yama,loadpin,safesetid,integrity"
 # CONFIG_ZLIB_DFLTCC is not set
+CONFIG_XZ_DEC_MICROLZMA=y
 CONFIG_PRINTK_TIME=y
 # CONFIG_SYMBOLIC_ERRNAME is not set
 CONFIG_DEBUG_INFO=y
+CONFIG_DEBUG_INFO_BTF=y
 CONFIG_DEBUG_FS=y
 CONFIG_DEBUG_KERNEL=y
 CONFIG_PANIC_ON_OOPS=y
index ea398a05f6432334154161606771c5b3d52e11a5..7f3c9ac34bd8d11d8bc9b3cf22a58a634f9c90ae 100644 (file)
@@ -74,6 +74,12 @@ void *kexec_file_add_components(struct kimage *image,
 int arch_kexec_do_relocs(int r_type, void *loc, unsigned long val,
                         unsigned long addr);
 
+#define ARCH_HAS_KIMAGE_ARCH
+
+struct kimage_arch {
+       void *ipl_buf;
+};
+
 extern const struct kexec_file_ops s390_kexec_image_ops;
 extern const struct kexec_file_ops s390_kexec_elf_ops;
 
index e4dc64cc9c555c11abb747e642faf6fecc910d71..287bb88f76986e127388efd03c18d117bf4c417e 100644 (file)
 
 /* I/O Map */
 #define ZPCI_IOMAP_SHIFT               48
-#define ZPCI_IOMAP_ADDR_BASE           0x8000000000000000UL
+#define ZPCI_IOMAP_ADDR_SHIFT          62
+#define ZPCI_IOMAP_ADDR_BASE           (1UL << ZPCI_IOMAP_ADDR_SHIFT)
 #define ZPCI_IOMAP_ADDR_OFF_MASK       ((1UL << ZPCI_IOMAP_SHIFT) - 1)
 #define ZPCI_IOMAP_MAX_ENTRIES                                                 \
-       ((ULONG_MAX - ZPCI_IOMAP_ADDR_BASE + 1) / (1UL << ZPCI_IOMAP_SHIFT))
+       (1UL << (ZPCI_IOMAP_ADDR_SHIFT - ZPCI_IOMAP_SHIFT))
 #define ZPCI_IOMAP_ADDR_IDX_MASK                                               \
-       (~ZPCI_IOMAP_ADDR_OFF_MASK - ZPCI_IOMAP_ADDR_BASE)
+       ((ZPCI_IOMAP_ADDR_BASE - 1) & ~ZPCI_IOMAP_ADDR_OFF_MASK)
 
 struct zpci_iomap_entry {
        u32 fh;
index d72a6df058d79f99f9c1702b28682a1b868f84c2..785d54c9350c4a93f710ac29d82e7525779fe595 100644 (file)
@@ -191,8 +191,8 @@ static int copy_oldmem_user(void __user *dst, void *src, size_t count)
                                return rc;
                } else {
                        /* Check for swapped kdump oldmem areas */
-                       if (oldmem_data.start && from - oldmem_data.size < oldmem_data.size) {
-                               from -= oldmem_data.size;
+                       if (oldmem_data.start && from - oldmem_data.start < oldmem_data.size) {
+                               from -= oldmem_data.start;
                                len = min(count, oldmem_data.size - from);
                        } else if (oldmem_data.start && from < oldmem_data.size) {
                                len = min(count, oldmem_data.size - from);
index e2cc35775b99670d02e30a99b6e8c020ccbecc84..5ad1dde23dc59cf78aba25df665143434ba0fb59 100644 (file)
@@ -2156,7 +2156,7 @@ void *ipl_report_finish(struct ipl_report *report)
 
        buf = vzalloc(report->size);
        if (!buf)
-               return ERR_PTR(-ENOMEM);
+               goto out;
        ptr = buf;
 
        memcpy(ptr, report->ipib, report->ipib->hdr.len);
@@ -2195,6 +2195,7 @@ void *ipl_report_finish(struct ipl_report *report)
        }
 
        BUG_ON(ptr > buf + report->size);
+out:
        return buf;
 }
 
index 528edff085d9ab8d3efaa56f2dbf9a7a8ac4d059..9975ad200d74790d5aebc2c9c4f72d78a0bdf39b 100644 (file)
@@ -12,6 +12,7 @@
 #include <linux/kexec.h>
 #include <linux/module_signature.h>
 #include <linux/verification.h>
+#include <linux/vmalloc.h>
 #include <asm/boot_data.h>
 #include <asm/ipl.h>
 #include <asm/setup.h>
@@ -170,6 +171,7 @@ static int kexec_file_add_ipl_report(struct kimage *image,
        struct kexec_buf buf;
        unsigned long addr;
        void *ptr, *end;
+       int ret;
 
        buf.image = image;
 
@@ -199,9 +201,13 @@ static int kexec_file_add_ipl_report(struct kimage *image,
                ptr += len;
        }
 
+       ret = -ENOMEM;
        buf.buffer = ipl_report_finish(data->report);
+       if (!buf.buffer)
+               goto out;
        buf.bufsz = data->report->size;
        buf.memsz = buf.bufsz;
+       image->arch.ipl_buf = buf.buffer;
 
        data->memsz += buf.memsz;
 
@@ -209,7 +215,9 @@ static int kexec_file_add_ipl_report(struct kimage *image,
                data->kernel_buf + offsetof(struct lowcore, ipl_parmblock_ptr);
        *lc_ipl_parmblock_ptr = (__u32)buf.mem;
 
-       return kexec_add_buffer(&buf);
+       ret = kexec_add_buffer(&buf);
+out:
+       return ret;
 }
 
 void *kexec_file_add_components(struct kimage *image,
@@ -322,3 +330,11 @@ int arch_kexec_apply_relocations_add(struct purgatory_info *pi,
        }
        return 0;
 }
+
+int arch_kimage_file_post_load_cleanup(struct kimage *image)
+{
+       vfree(image->arch.ipl_buf);
+       image->arch.ipl_buf = NULL;
+
+       return kexec_image_post_load_cleanup_default(image);
+}
index 40405f2304f1be2376f8c275aec9a3ecd83d0d4a..225ab2d0a4c60baab67bc7c8f834f157954eed1a 100644 (file)
@@ -606,7 +606,7 @@ static void __init setup_resources(void)
 
 static void __init setup_memory_end(void)
 {
-       memblock_remove(ident_map_size, ULONG_MAX);
+       memblock_remove(ident_map_size, PHYS_ADDR_MAX - ident_map_size);
        max_pfn = max_low_pfn = PFN_DOWN(ident_map_size);
        pr_notice("The maximum memory size is %luMB\n", ident_map_size >> 20);
 }
@@ -637,14 +637,6 @@ static struct notifier_block kdump_mem_nb = {
 
 #endif
 
-/*
- * Make sure that the area above identity mapping is protected
- */
-static void __init reserve_above_ident_map(void)
-{
-       memblock_reserve(ident_map_size, ULONG_MAX);
-}
-
 /*
  * Reserve memory for kdump kernel to be loaded with kexec
  */
@@ -785,7 +777,6 @@ static void __init memblock_add_mem_detect_info(void)
        }
        memblock_set_bottom_up(false);
        memblock_set_node(0, ULONG_MAX, &memblock.memory, 0);
-       memblock_dump_all();
 }
 
 /*
@@ -826,9 +817,6 @@ static void __init setup_memory(void)
                storage_key_init_range(start, end);
 
        psw_set_key(PAGE_DEFAULT_KEY);
-
-       /* Only cosmetics */
-       memblock_enforce_memory_limit(memblock_end_of_DRAM());
 }
 
 static void __init relocate_amode31_section(void)
@@ -999,24 +987,24 @@ void __init setup_arch(char **cmdline_p)
        setup_control_program_code();
 
        /* Do some memory reservations *before* memory is added to memblock */
-       reserve_above_ident_map();
        reserve_kernel();
        reserve_initrd();
        reserve_certificate_list();
        reserve_mem_detect_info();
+       memblock_set_current_limit(ident_map_size);
        memblock_allow_resize();
 
        /* Get information about *all* installed memory */
        memblock_add_mem_detect_info();
 
        free_mem_detect_info();
+       setup_memory_end();
+       memblock_dump_all();
+       setup_memory();
 
        relocate_amode31_section();
        setup_cr();
-
        setup_uv();
-       setup_memory_end();
-       setup_memory();
        dma_contiguous_reserve(ident_map_size);
        vmcp_cma_reserve();
        if (MACHINE_HAS_EDAT2)
index df5261e5cfe1f28d6afc412ba5475e588f6c1dde..ed9c5c2eafad700ce45ad0178837ed3d1c9204d1 100644 (file)
 446  common    landlock_restrict_self  sys_landlock_restrict_self      sys_landlock_restrict_self
 # 447 reserved for memfd_secret
 448  common    process_mrelease        sys_process_mrelease            sys_process_mrelease
+449  common    futex_waitv             sys_futex_waitv                 sys_futex_waitv
index 035705c9f23ea0b03a545e94a857075302164105..2b780786fc689f0d2c664efe9a216199b66ec801 100644 (file)
@@ -84,7 +84,7 @@ static void default_trap_handler(struct pt_regs *regs)
 {
        if (user_mode(regs)) {
                report_user_fault(regs, SIGSEGV, 0);
-               force_fatal_sig(SIGSEGV);
+               force_exit_sig(SIGSEGV);
        } else
                die(regs, "Unknown program exception");
 }
index e3e6ac5686df54dc09e2f77b38061dc0a93d42ad..245bddfe9bc0e6bfd8c1997bdef8f55924128f33 100644 (file)
@@ -22,7 +22,7 @@ KBUILD_AFLAGS_32 += -m31 -s
 KBUILD_CFLAGS_32 := $(filter-out -m64,$(KBUILD_CFLAGS))
 KBUILD_CFLAGS_32 += -m31 -fPIC -shared -fno-common -fno-builtin
 
-LDFLAGS_vdso32.so.dbg += -fPIC -shared -nostdlib -soname=linux-vdso32.so.1 \
+LDFLAGS_vdso32.so.dbg += -fPIC -shared -soname=linux-vdso32.so.1 \
        --hash-style=both --build-id=sha1 -melf_s390 -T
 
 $(targets:%=$(obj)/%.dbg): KBUILD_CFLAGS = $(KBUILD_CFLAGS_32)
index 6568de2367010af85f0700fb3bf45226ebbc2e02..9e2b95a222a9838fb2f7c06eee6b104b29f53d93 100644 (file)
@@ -8,8 +8,9 @@ ARCH_REL_TYPE_ABS += R_390_GOT|R_390_PLT
 include $(srctree)/lib/vdso/Makefile
 obj-vdso64 = vdso_user_wrapper.o note.o
 obj-cvdso64 = vdso64_generic.o getcpu.o
-CFLAGS_REMOVE_getcpu.o = -pg $(CC_FLAGS_FTRACE) $(CC_FLAGS_EXPOLINE)
-CFLAGS_REMOVE_vdso64_generic.o = -pg $(CC_FLAGS_FTRACE) $(CC_FLAGS_EXPOLINE)
+VDSO_CFLAGS_REMOVE := -pg $(CC_FLAGS_FTRACE) $(CC_FLAGS_EXPOLINE) $(CC_FLAGS_CHECK_STACK)
+CFLAGS_REMOVE_getcpu.o = $(VDSO_CFLAGS_REMOVE)
+CFLAGS_REMOVE_vdso64_generic.o = $(VDSO_CFLAGS_REMOVE)
 
 # Build rules
 
@@ -25,7 +26,7 @@ KBUILD_AFLAGS_64 += -m64 -s
 
 KBUILD_CFLAGS_64 := $(filter-out -m64,$(KBUILD_CFLAGS))
 KBUILD_CFLAGS_64 += -m64 -fPIC -shared -fno-common -fno-builtin
-ldflags-y := -fPIC -shared -nostdlib -soname=linux-vdso64.so.1 \
+ldflags-y := -fPIC -shared -soname=linux-vdso64.so.1 \
             --hash-style=both --build-id=sha1 -T
 
 $(targets:%=$(obj)/%.dbg): KBUILD_CFLAGS = $(KBUILD_CFLAGS_64)
index c6257f625929cc14f472c797060e0fc4e1c254e8..14a18ba5ff2c8b8e810b06c5979ef654d48e4121 100644 (file)
@@ -585,6 +585,8 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
                        r = KVM_MAX_VCPUS;
                else if (sclp.has_esca && sclp.has_64bscao)
                        r = KVM_S390_ESCA_CPU_SLOTS;
+               if (ext == KVM_CAP_NR_VCPUS)
+                       r = min_t(unsigned int, num_online_cpus(), r);
                break;
        case KVM_CAP_S390_COW:
                r = MACHINE_HAS_ESOP;
index cfc5f5557c06756236b935eacee5313da1f663d3..bc7973359ae2786b71f8386bf9903b0daaf66e1f 100644 (file)
@@ -173,10 +173,11 @@ static noinline int unwindme_func4(struct unwindme *u)
                }
 
                /*
-                * trigger specification exception
+                * Trigger operation exception; use insn notation to bypass
+                * llvm's integrated assembler sanity checks.
                 */
                asm volatile(
-                       "       mvcl    %%r1,%%r1\n"
+                       "       .insn   e,0x0000\n"     /* illegal opcode */
                        "0:     nopr    %%r7\n"
                        EX_TABLE(0b, 0b)
                        :);
index c7a97f32432fb279caab4cebe7868c3664602681..481a664287e2e46cf2c087f8c1873cc5a9d57e05 100644 (file)
@@ -43,7 +43,6 @@ extern void flush_cache_range(struct vm_area_struct *vma,
                                 unsigned long start, unsigned long end);
 #define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
 void flush_dcache_page(struct page *page);
-void flush_dcache_folio(struct folio *folio);
 extern void flush_icache_range(unsigned long start, unsigned long end);
 #define flush_icache_user_range flush_icache_range
 extern void flush_icache_page(struct vm_area_struct *vma,
index 208f131659c5a3334c08733baaad8e65c5822ccc..d9539d28bdaa1cb1670059901f6d83e1c183ce28 100644 (file)
 446    common  landlock_restrict_self          sys_landlock_restrict_self
 # 447 reserved for memfd_secret
 448    common  process_mrelease                sys_process_mrelease
+449    common  futex_waitv                     sys_futex_waitv
index cd677bc564a7e279f4f40b2a541749f1d7f5c5f9..ffab16369beac82f9fd6cfe243571888f9609a81 100644 (file)
@@ -244,7 +244,7 @@ static int setup_frame(struct ksignal *ksig, struct pt_regs *regs,
                get_sigframe(ksig, regs, sigframe_size);
 
        if (invalid_frame_pointer(sf, sigframe_size)) {
-               force_fatal_sig(SIGILL);
+               force_exit_sig(SIGILL);
                return -EINVAL;
        }
 
@@ -336,7 +336,7 @@ static int setup_rt_frame(struct ksignal *ksig, struct pt_regs *regs,
        sf = (struct rt_signal_frame __user *)
                get_sigframe(ksig, regs, sigframe_size);
        if (invalid_frame_pointer(sf, sigframe_size)) {
-               force_fatal_sig(SIGILL);
+               force_exit_sig(SIGILL);
                return -EINVAL;
        }
 
index c37764dc764d13dd9c5ce694a8687083bf9bbc9c..46adabcb1720f69780a52cdc64e75bf77388db65 100644 (file)
 446    common  landlock_restrict_self          sys_landlock_restrict_self
 # 447 reserved for memfd_secret
 448    common  process_mrelease                sys_process_mrelease
+449    common  futex_waitv                     sys_futex_waitv
index bbbd40cc6b282ad7e42ddffe174170ebccd68c7d..8f20862ccc83e77fd01d12cf4856d01455a37564 100644 (file)
@@ -122,7 +122,7 @@ void try_to_clear_window_buffer(struct pt_regs *regs, int who)
                if ((sp & 7) ||
                    copy_to_user((char __user *) sp, &tp->reg_window[window],
                                 sizeof(struct reg_window32))) {
-                       force_fatal_sig(SIGILL);
+                       force_exit_sig(SIGILL);
                        return;
                }
        }
index 95dd1ee01546ac04955f9297dfec9a60371aed0d..5c2ccb85f2efb863fac8fca28a5bcff0f07ab900 100644 (file)
@@ -193,7 +193,7 @@ config X86
        select HAVE_DYNAMIC_FTRACE_WITH_ARGS    if X86_64
        select HAVE_DYNAMIC_FTRACE_WITH_DIRECT_CALLS
        select HAVE_SAMPLE_FTRACE_DIRECT        if X86_64
-       select HAVE_SAMPLE_FTRACE_MULTI_DIRECT  if X86_64
+       select HAVE_SAMPLE_FTRACE_DIRECT_MULTI  if X86_64
        select HAVE_EBPF_JIT
        select HAVE_EFFICIENT_UNALIGNED_ACCESS
        select HAVE_EISA
@@ -1932,6 +1932,7 @@ config EFI
        depends on ACPI
        select UCS2_STRING
        select EFI_RUNTIME_WRAPPERS
+       select ARCH_USE_MEMREMAP_PROT
        help
          This enables the kernel to use EFI runtime services that are
          available (such as the EFI variable services).
index e38a4cf795d962bbb8312dc6241333b8939a6b8b..97b1f84bb53f808b9bcddba8af67732030171026 100644 (file)
@@ -574,6 +574,10 @@ SYM_INNER_LABEL(swapgs_restore_regs_and_return_to_usermode, SYM_L_GLOBAL)
        ud2
 1:
 #endif
+#ifdef CONFIG_XEN_PV
+       ALTERNATIVE "", "jmp xenpv_restore_regs_and_return_to_usermode", X86_FEATURE_XENPV
+#endif
+
        POP_REGS pop_rdi=0
 
        /*
@@ -890,6 +894,7 @@ SYM_CODE_START_LOCAL(paranoid_entry)
 .Lparanoid_entry_checkgs:
        /* EBX = 1 -> kernel GSBASE active, no restore required */
        movl    $1, %ebx
+
        /*
         * The kernel-enforced convention is a negative GSBASE indicates
         * a kernel value. No SWAPGS needed on entry and exit.
@@ -897,21 +902,14 @@ SYM_CODE_START_LOCAL(paranoid_entry)
        movl    $MSR_GS_BASE, %ecx
        rdmsr
        testl   %edx, %edx
-       jns     .Lparanoid_entry_swapgs
-       ret
+       js      .Lparanoid_kernel_gsbase
 
-.Lparanoid_entry_swapgs:
+       /* EBX = 0 -> SWAPGS required on exit */
+       xorl    %ebx, %ebx
        swapgs
+.Lparanoid_kernel_gsbase:
 
-       /*
-        * The above SAVE_AND_SWITCH_TO_KERNEL_CR3 macro doesn't do an
-        * unconditional CR3 write, even in the PTI case.  So do an lfence
-        * to prevent GS speculation, regardless of whether PTI is enabled.
-        */
        FENCE_SWAPGS_KERNEL_ENTRY
-
-       /* EBX = 0 -> SWAPGS required on exit */
-       xorl    %ebx, %ebx
        ret
 SYM_CODE_END(paranoid_entry)
 
@@ -993,11 +991,6 @@ SYM_CODE_START_LOCAL(error_entry)
        pushq   %r12
        ret
 
-.Lerror_entry_done_lfence:
-       FENCE_SWAPGS_KERNEL_ENTRY
-.Lerror_entry_done:
-       ret
-
        /*
         * There are two places in the kernel that can potentially fault with
         * usergs. Handle them here.  B stepping K8s sometimes report a
@@ -1020,8 +1013,14 @@ SYM_CODE_START_LOCAL(error_entry)
         * .Lgs_change's error handler with kernel gsbase.
         */
        SWAPGS
-       FENCE_SWAPGS_USER_ENTRY
-       jmp .Lerror_entry_done
+
+       /*
+        * Issue an LFENCE to prevent GS speculation, regardless of whether it is a
+        * kernel or user gsbase.
+        */
+.Lerror_entry_done_lfence:
+       FENCE_SWAPGS_KERNEL_ENTRY
+       ret
 
 .Lbstep_iret:
        /* Fix truncated RIP */
index 0b6b277ee050b5084a4e74335c70ee637be5b21d..fd2ee9408e914a20a4b50a6cdb1249e297e913c2 100644 (file)
@@ -226,7 +226,7 @@ bool emulate_vsyscall(unsigned long error_code,
        if ((!tmp && regs->orig_ax != syscall_nr) || regs->ip != address) {
                warn_bad_vsyscall(KERN_DEBUG, regs,
                                  "seccomp tried to change syscall nr or ip");
-               force_fatal_sig(SIGSYS);
+               force_exit_sig(SIGSYS);
                return true;
        }
        regs->orig_ax = -1;
index 42cf01ecdd131753af53560f9ee745078d6c93e1..ec6444f2c9dcb536bdab611d7ad0b4bd2315d591 100644 (file)
@@ -2211,7 +2211,6 @@ intel_pmu_snapshot_branch_stack(struct perf_branch_entry *entries, unsigned int
        /* must not have branches... */
        local_irq_save(flags);
        __intel_pmu_disable_all(false); /* we don't care about BTS */
-       __intel_pmu_pebs_disable_all();
        __intel_pmu_lbr_disable();
        /*            ... until here */
        return __intel_pmu_snapshot_branch_stack(entries, cnt, flags);
@@ -2225,7 +2224,6 @@ intel_pmu_snapshot_arch_branch_stack(struct perf_branch_entry *entries, unsigned
        /* must not have branches... */
        local_irq_save(flags);
        __intel_pmu_disable_all(false); /* we don't care about BTS */
-       __intel_pmu_pebs_disable_all();
        __intel_pmu_arch_lbr_disable();
        /*            ... until here */
        return __intel_pmu_snapshot_branch_stack(entries, cnt, flags);
index eb2c6cea9d0d5001b214ad3b46fe384dec148697..3660f698fb2aa1dc563565c244c2082e2b32c8a1 100644 (file)
@@ -3608,6 +3608,9 @@ static int skx_cha_hw_config(struct intel_uncore_box *box, struct perf_event *ev
        struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
        struct extra_reg *er;
        int idx = 0;
+       /* Any of the CHA events may be filtered by Thread/Core-ID.*/
+       if (event->hw.config & SNBEP_CBO_PMON_CTL_TID_EN)
+               idx = SKX_CHA_MSR_PMON_BOX_FILTER_TID;
 
        for (er = skx_uncore_cha_extra_regs; er->msr; er++) {
                if (er->event != (event->hw.config & er->config_mask))
@@ -3675,6 +3678,7 @@ static struct event_constraint skx_uncore_iio_constraints[] = {
        UNCORE_EVENT_CONSTRAINT(0xc0, 0xc),
        UNCORE_EVENT_CONSTRAINT(0xc5, 0xc),
        UNCORE_EVENT_CONSTRAINT(0xd4, 0xc),
+       UNCORE_EVENT_CONSTRAINT(0xd5, 0xc),
        EVENT_CONSTRAINT_END
 };
 
@@ -4525,6 +4529,13 @@ static void snr_iio_cleanup_mapping(struct intel_uncore_type *type)
        pmu_iio_cleanup_mapping(type, &snr_iio_mapping_group);
 }
 
+static struct event_constraint snr_uncore_iio_constraints[] = {
+       UNCORE_EVENT_CONSTRAINT(0x83, 0x3),
+       UNCORE_EVENT_CONSTRAINT(0xc0, 0xc),
+       UNCORE_EVENT_CONSTRAINT(0xd5, 0xc),
+       EVENT_CONSTRAINT_END
+};
+
 static struct intel_uncore_type snr_uncore_iio = {
        .name                   = "iio",
        .num_counters           = 4,
@@ -4536,6 +4547,7 @@ static struct intel_uncore_type snr_uncore_iio = {
        .event_mask_ext         = SNR_IIO_PMON_RAW_EVENT_MASK_EXT,
        .box_ctl                = SNR_IIO_MSR_PMON_BOX_CTL,
        .msr_offset             = SNR_IIO_MSR_OFFSET,
+       .constraints            = snr_uncore_iio_constraints,
        .ops                    = &ivbep_uncore_msr_ops,
        .format_group           = &snr_uncore_iio_format_group,
        .attr_update            = snr_iio_attr_update,
index 24f4a06ac46acd474574d79d6c849927ba40938c..96eb7db31c8ed0e03bc93a8c4594376a20f1f6da 100644 (file)
@@ -177,6 +177,9 @@ void set_hv_tscchange_cb(void (*cb)(void))
                return;
        }
 
+       if (!hv_vp_index)
+               return;
+
        hv_reenlightenment_cb = cb;
 
        /* Make sure callback is registered before we write to MSRs */
@@ -383,20 +386,13 @@ static void __init hv_get_partition_id(void)
  */
 void __init hyperv_init(void)
 {
-       u64 guest_id, required_msrs;
+       u64 guest_id;
        union hv_x64_msr_hypercall_contents hypercall_msr;
        int cpuhp;
 
        if (x86_hyper_type != X86_HYPER_MS_HYPERV)
                return;
 
-       /* Absolutely required MSRs */
-       required_msrs = HV_MSR_HYPERCALL_AVAILABLE |
-               HV_MSR_VP_INDEX_AVAILABLE;
-
-       if ((ms_hyperv.features & required_msrs) != required_msrs)
-               return;
-
        if (hv_common_init())
                return;
 
index 6053674f91320a0fceaedb5af18bb861ad42b968..c2767a6a387e7375a86994914d27f418b23effcb 100644 (file)
@@ -102,12 +102,6 @@ extern void switch_fpu_return(void);
  */
 extern int cpu_has_xfeatures(u64 xfeatures_mask, const char **feature_name);
 
-/*
- * Tasks that are not using SVA have mm->pasid set to zero to note that they
- * will not have the valid bit set in MSR_IA32_PASID while they are running.
- */
-#define PASID_DISABLED 0
-
 /* Trap handling */
 extern int  fpu__exception_code(struct fpu *fpu, int trap_nr);
 extern void fpu_sync_fpstate(struct fpu *fpu);
index 5a0bcf8b78d7c2026e93715153decec133a33869..048b6d5aff504f394baeca8d3bf1f39e816bfdd5 100644 (file)
 #define INTEL_FAM6_ALDERLAKE           0x97    /* Golden Cove / Gracemont */
 #define INTEL_FAM6_ALDERLAKE_L         0x9A    /* Golden Cove / Gracemont */
 
-#define INTEL_FAM6_RAPTOR_LAKE         0xB7
+#define INTEL_FAM6_RAPTORLAKE          0xB7
 
 /* "Small Core" Processors (Atom) */
 
index e5d8700319cc08e99b4058569f8015fb345ad7de..2164b9f4c7b0f20e4fe747e124817c462b9e5387 100644 (file)
@@ -97,7 +97,7 @@
        KVM_ARCH_REQ_FLAGS(25, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
 #define KVM_REQ_TLB_FLUSH_CURRENT      KVM_ARCH_REQ(26)
 #define KVM_REQ_TLB_FLUSH_GUEST \
-       KVM_ARCH_REQ_FLAGS(27, KVM_REQUEST_NO_WAKEUP)
+       KVM_ARCH_REQ_FLAGS(27, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
 #define KVM_REQ_APF_READY              KVM_ARCH_REQ(28)
 #define KVM_REQ_MSR_FILTER_CHANGED     KVM_ARCH_REQ(29)
 #define KVM_REQ_UPDATE_CPU_DIRTY_LOGGING \
@@ -363,6 +363,7 @@ union kvm_mmu_extended_role {
                unsigned int cr4_smap:1;
                unsigned int cr4_smep:1;
                unsigned int cr4_la57:1;
+               unsigned int efer_lma:1;
        };
 };
 
@@ -1035,6 +1036,7 @@ struct kvm_x86_msr_filter {
 #define APICV_INHIBIT_REASON_PIT_REINJ  4
 #define APICV_INHIBIT_REASON_X2APIC    5
 #define APICV_INHIBIT_REASON_BLOCKIRQ  6
+#define APICV_INHIBIT_REASON_ABSENT    7
 
 struct kvm_arch {
        unsigned long n_used_mmu_pages;
index 2cef6c5a52c2a71a550393c78111dfd3242f47d1..6acaf5af0a3d0657ed48b876c95ef0ad73358c71 100644 (file)
 
 #define GHCB_RESP_CODE(v)              ((v) & GHCB_MSR_INFO_MASK)
 
+/*
+ * Error codes related to GHCB input that can be communicated back to the guest
+ * by setting the lower 32-bits of the GHCB SW_EXITINFO1 field to 2.
+ */
+#define GHCB_ERR_NOT_REGISTERED                1
+#define GHCB_ERR_INVALID_USAGE         2
+#define GHCB_ERR_INVALID_SCRATCH_AREA  3
+#define GHCB_ERR_MISSING_INPUT         4
+#define GHCB_ERR_INVALID_INPUT         5
+#define GHCB_ERR_INVALID_EVENT         6
+
 #endif
index 0575f5863b7fef48f9ae9362fbb475d1a077e93e..e5e0fe10c692406165e52b7e24d5361885b06384 100644 (file)
@@ -281,13 +281,13 @@ HYPERVISOR_callback_op(int cmd, void *arg)
        return _hypercall2(int, callback_op, cmd, arg);
 }
 
-static inline int
+static __always_inline int
 HYPERVISOR_set_debugreg(int reg, unsigned long value)
 {
        return _hypercall2(int, set_debugreg, reg, value);
 }
 
-static inline unsigned long
+static __always_inline unsigned long
 HYPERVISOR_get_debugreg(int reg)
 {
        return _hypercall1(unsigned long, get_debugreg, reg);
index 4957f59deb40bdc39a9782bcbd12246ee61ef432..5adab895127e190a8fd9259c839604f943c4de18 100644 (file)
@@ -64,6 +64,7 @@ void xen_arch_unregister_cpu(int num);
 
 #ifdef CONFIG_PVH
 void __init xen_pvh_init(struct boot_params *boot_params);
+void __init mem_map_via_hcall(struct boot_params *boot_params_p);
 #endif
 
 #endif /* _ASM_X86_XEN_HYPERVISOR_H */
index 4794b716ec79e0e96d2f9e88e6f5f4b4ba1153f5..ff55df60228f728c0ea1f5b49eb56316de81661b 100644 (file)
@@ -163,12 +163,22 @@ static uint32_t  __init ms_hyperv_platform(void)
        cpuid(HYPERV_CPUID_VENDOR_AND_MAX_FUNCTIONS,
              &eax, &hyp_signature[0], &hyp_signature[1], &hyp_signature[2]);
 
-       if (eax >= HYPERV_CPUID_MIN &&
-           eax <= HYPERV_CPUID_MAX &&
-           !memcmp("Microsoft Hv", hyp_signature, 12))
-               return HYPERV_CPUID_VENDOR_AND_MAX_FUNCTIONS;
+       if (eax < HYPERV_CPUID_MIN || eax > HYPERV_CPUID_MAX ||
+           memcmp("Microsoft Hv", hyp_signature, 12))
+               return 0;
 
-       return 0;
+       /* HYPERCALL and VP_INDEX MSRs are mandatory for all features. */
+       eax = cpuid_eax(HYPERV_CPUID_FEATURES);
+       if (!(eax & HV_MSR_HYPERCALL_AVAILABLE)) {
+               pr_warn("x86/hyperv: HYPERCALL MSR not available.\n");
+               return 0;
+       }
+       if (!(eax & HV_MSR_VP_INDEX_AVAILABLE)) {
+               pr_warn("x86/hyperv: VP_INDEX MSR not available.\n");
+               return 0;
+       }
+
+       return HYPERV_CPUID_VENDOR_AND_MAX_FUNCTIONS;
 }
 
 static unsigned char hv_get_nmi_reason(void)
index 63d3de02bbccb50ba7ab015227ac4cc680954877..8471a8b9b48e809615784ccbd34ec8dd62ac198b 100644 (file)
@@ -28,8 +28,7 @@ static DECLARE_WAIT_QUEUE_HEAD(ksgxd_waitq);
 static LIST_HEAD(sgx_active_page_list);
 static DEFINE_SPINLOCK(sgx_reclaimer_lock);
 
-/* The free page list lock protected variables prepend the lock. */
-static unsigned long sgx_nr_free_pages;
+static atomic_long_t sgx_nr_free_pages = ATOMIC_LONG_INIT(0);
 
 /* Nodes with one or more EPC sections. */
 static nodemask_t sgx_numa_mask;
@@ -403,14 +402,15 @@ skip:
 
                spin_lock(&node->lock);
                list_add_tail(&epc_page->list, &node->free_page_list);
-               sgx_nr_free_pages++;
                spin_unlock(&node->lock);
+               atomic_long_inc(&sgx_nr_free_pages);
        }
 }
 
 static bool sgx_should_reclaim(unsigned long watermark)
 {
-       return sgx_nr_free_pages < watermark && !list_empty(&sgx_active_page_list);
+       return atomic_long_read(&sgx_nr_free_pages) < watermark &&
+              !list_empty(&sgx_active_page_list);
 }
 
 static int ksgxd(void *p)
@@ -471,9 +471,9 @@ static struct sgx_epc_page *__sgx_alloc_epc_page_from_node(int nid)
 
        page = list_first_entry(&node->free_page_list, struct sgx_epc_page, list);
        list_del_init(&page->list);
-       sgx_nr_free_pages--;
 
        spin_unlock(&node->lock);
+       atomic_long_dec(&sgx_nr_free_pages);
 
        return page;
 }
@@ -625,9 +625,9 @@ void sgx_free_epc_page(struct sgx_epc_page *page)
        spin_lock(&node->lock);
 
        list_add_tail(&page->list, &node->free_page_list);
-       sgx_nr_free_pages++;
 
        spin_unlock(&node->lock);
+       atomic_long_inc(&sgx_nr_free_pages);
 }
 
 static bool __init sgx_setup_epc_section(u64 phys_addr, u64 size,
index d5958278eba6d311d3af758e2ff9e99f60fa3b8a..91d4b6de58abef38792eef58546b525eae278cb3 100644 (file)
@@ -118,7 +118,7 @@ static inline bool save_xstate_epilog(void __user *buf, int ia32_frame,
                                      struct fpstate *fpstate)
 {
        struct xregs_state __user *x = buf;
-       struct _fpx_sw_bytes sw_bytes;
+       struct _fpx_sw_bytes sw_bytes = {};
        u32 xfeatures;
        int err;
 
index e9ee8b5263198e825ce7446b0a5125978667b920..04143a653a8ad556c965009b937c5f457d9a94d7 100644 (file)
@@ -964,6 +964,9 @@ unsigned long __get_wchan(struct task_struct *p)
        struct unwind_state state;
        unsigned long addr = 0;
 
+       if (!try_get_task_stack(p))
+               return 0;
+
        for (unwind_start(&state, p, NULL, NULL); !unwind_done(&state);
             unwind_next_frame(&state)) {
                addr = unwind_get_return_address(&state);
@@ -974,6 +977,8 @@ unsigned long __get_wchan(struct task_struct *p)
                break;
        }
 
+       put_task_stack(p);
+
        return addr;
 }
 
index 49b596db5631e83ad216f737d97ec2c308a08967..6a190c7f4d71b05fe1dc9a476424a647999eeb82 100644 (file)
@@ -742,6 +742,28 @@ dump_kernel_offset(struct notifier_block *self, unsigned long v, void *p)
        return 0;
 }
 
+static char * __init prepare_command_line(void)
+{
+#ifdef CONFIG_CMDLINE_BOOL
+#ifdef CONFIG_CMDLINE_OVERRIDE
+       strlcpy(boot_command_line, builtin_cmdline, COMMAND_LINE_SIZE);
+#else
+       if (builtin_cmdline[0]) {
+               /* append boot loader cmdline to builtin */
+               strlcat(builtin_cmdline, " ", COMMAND_LINE_SIZE);
+               strlcat(builtin_cmdline, boot_command_line, COMMAND_LINE_SIZE);
+               strlcpy(boot_command_line, builtin_cmdline, COMMAND_LINE_SIZE);
+       }
+#endif
+#endif
+
+       strlcpy(command_line, boot_command_line, COMMAND_LINE_SIZE);
+
+       parse_early_param();
+
+       return command_line;
+}
+
 /*
  * Determine if we were loaded by an EFI loader.  If so, then we have also been
  * passed the efi memmap, systab, etc., so we should use these data structures
@@ -830,6 +852,23 @@ void __init setup_arch(char **cmdline_p)
 
        x86_init.oem.arch_setup();
 
+       /*
+        * x86_configure_nx() is called before parse_early_param() (called by
+        * prepare_command_line()) to detect whether hardware doesn't support
+        * NX (so that the early EHCI debug console setup can safely call
+        * set_fixmap()). It may then be called again from within noexec_setup()
+        * during parsing early parameters to honor the respective command line
+        * option.
+        */
+       x86_configure_nx();
+
+       /*
+        * This parses early params and it needs to run before
+        * early_reserve_memory() because latter relies on such settings
+        * supplied as early params.
+        */
+       *cmdline_p = prepare_command_line();
+
        /*
         * Do some memory reservations *before* memory is added to memblock, so
         * memblock allocations won't overwrite it.
@@ -863,33 +902,6 @@ void __init setup_arch(char **cmdline_p)
        bss_resource.start = __pa_symbol(__bss_start);
        bss_resource.end = __pa_symbol(__bss_stop)-1;
 
-#ifdef CONFIG_CMDLINE_BOOL
-#ifdef CONFIG_CMDLINE_OVERRIDE
-       strlcpy(boot_command_line, builtin_cmdline, COMMAND_LINE_SIZE);
-#else
-       if (builtin_cmdline[0]) {
-               /* append boot loader cmdline to builtin */
-               strlcat(builtin_cmdline, " ", COMMAND_LINE_SIZE);
-               strlcat(builtin_cmdline, boot_command_line, COMMAND_LINE_SIZE);
-               strlcpy(boot_command_line, builtin_cmdline, COMMAND_LINE_SIZE);
-       }
-#endif
-#endif
-
-       strlcpy(command_line, boot_command_line, COMMAND_LINE_SIZE);
-       *cmdline_p = command_line;
-
-       /*
-        * x86_configure_nx() is called before parse_early_param() to detect
-        * whether hardware doesn't support NX (so that the early EHCI debug
-        * console setup can safely call set_fixmap()). It may then be called
-        * again from within noexec_setup() during parsing early parameters
-        * to honor the respective command line option.
-        */
-       x86_configure_nx();
-
-       parse_early_param();
-
 #ifdef CONFIG_MEMORY_HOTPLUG
        /*
         * Memory used by the kernel cannot be hot-removed because Linux
index 74f0ec95538486a8dba1d4fab55866ed1b646afd..a9fc2ac7a8bd59cd06ca1c6764cabcd014a61aa3 100644 (file)
@@ -294,11 +294,6 @@ static enum es_result vc_write_mem(struct es_em_ctxt *ctxt,
                                   char *dst, char *buf, size_t size)
 {
        unsigned long error_code = X86_PF_PROT | X86_PF_WRITE;
-       char __user *target = (char __user *)dst;
-       u64 d8;
-       u32 d4;
-       u16 d2;
-       u8  d1;
 
        /*
         * This function uses __put_user() independent of whether kernel or user
@@ -320,26 +315,42 @@ static enum es_result vc_write_mem(struct es_em_ctxt *ctxt,
         * instructions here would cause infinite nesting.
         */
        switch (size) {
-       case 1:
+       case 1: {
+               u8 d1;
+               u8 __user *target = (u8 __user *)dst;
+
                memcpy(&d1, buf, 1);
                if (__put_user(d1, target))
                        goto fault;
                break;
-       case 2:
+       }
+       case 2: {
+               u16 d2;
+               u16 __user *target = (u16 __user *)dst;
+
                memcpy(&d2, buf, 2);
                if (__put_user(d2, target))
                        goto fault;
                break;
-       case 4:
+       }
+       case 4: {
+               u32 d4;
+               u32 __user *target = (u32 __user *)dst;
+
                memcpy(&d4, buf, 4);
                if (__put_user(d4, target))
                        goto fault;
                break;
-       case 8:
+       }
+       case 8: {
+               u64 d8;
+               u64 __user *target = (u64 __user *)dst;
+
                memcpy(&d8, buf, 8);
                if (__put_user(d8, target))
                        goto fault;
                break;
+       }
        default:
                WARN_ONCE(1, "%s: Invalid size: %zu\n", __func__, size);
                return ES_UNSUPPORTED;
@@ -362,11 +373,6 @@ static enum es_result vc_read_mem(struct es_em_ctxt *ctxt,
                                  char *src, char *buf, size_t size)
 {
        unsigned long error_code = X86_PF_PROT;
-       char __user *s = (char __user *)src;
-       u64 d8;
-       u32 d4;
-       u16 d2;
-       u8  d1;
 
        /*
         * This function uses __get_user() independent of whether kernel or user
@@ -388,26 +394,41 @@ static enum es_result vc_read_mem(struct es_em_ctxt *ctxt,
         * instructions here would cause infinite nesting.
         */
        switch (size) {
-       case 1:
+       case 1: {
+               u8 d1;
+               u8 __user *s = (u8 __user *)src;
+
                if (__get_user(d1, s))
                        goto fault;
                memcpy(buf, &d1, 1);
                break;
-       case 2:
+       }
+       case 2: {
+               u16 d2;
+               u16 __user *s = (u16 __user *)src;
+
                if (__get_user(d2, s))
                        goto fault;
                memcpy(buf, &d2, 2);
                break;
-       case 4:
+       }
+       case 4: {
+               u32 d4;
+               u32 __user *s = (u32 __user *)src;
+
                if (__get_user(d4, s))
                        goto fault;
                memcpy(buf, &d4, 4);
                break;
-       case 8:
+       }
+       case 8: {
+               u64 d8;
+               u64 __user *s = (u64 __user *)src;
                if (__get_user(d8, s))
                        goto fault;
                memcpy(buf, &d8, 8);
                break;
+       }
        default:
                WARN_ONCE(1, "%s: Invalid size: %zu\n", __func__, size);
                return ES_UNSUPPORTED;
index ac2909f0cab3478c9428fc2a609759c6dd5af3ef..617012f4619f0ba76419207720fb3f5b326703b9 100644 (file)
@@ -579,6 +579,17 @@ static struct sched_domain_topology_level x86_numa_in_package_topology[] = {
        { NULL, },
 };
 
+static struct sched_domain_topology_level x86_hybrid_topology[] = {
+#ifdef CONFIG_SCHED_SMT
+       { cpu_smt_mask, x86_smt_flags, SD_INIT_NAME(SMT) },
+#endif
+#ifdef CONFIG_SCHED_MC
+       { cpu_coregroup_mask, x86_core_flags, SD_INIT_NAME(MC) },
+#endif
+       { cpu_cpu_mask, SD_INIT_NAME(DIE) },
+       { NULL, },
+};
+
 static struct sched_domain_topology_level x86_topology[] = {
 #ifdef CONFIG_SCHED_SMT
        { cpu_smt_mask, x86_smt_flags, SD_INIT_NAME(SMT) },
@@ -1469,8 +1480,11 @@ void __init native_smp_cpus_done(unsigned int max_cpus)
 
        calculate_max_logical_packages();
 
+       /* XXX for now assume numa-in-package and hybrid don't overlap */
        if (x86_has_numa_in_package)
                set_sched_topology(x86_numa_in_package_topology);
+       if (cpu_feature_enabled(X86_FEATURE_HYBRID_CPU))
+               set_sched_topology(x86_hybrid_topology);
 
        nmi_selftest();
        impress_friends();
index 2e076a459a0c084aa279f32f3fc644604713e595..a698196377be9bf650eb8bc1ea28692068568626 100644 (file)
@@ -1180,6 +1180,12 @@ void mark_tsc_unstable(char *reason)
 
 EXPORT_SYMBOL_GPL(mark_tsc_unstable);
 
+static void __init tsc_disable_clocksource_watchdog(void)
+{
+       clocksource_tsc_early.flags &= ~CLOCK_SOURCE_MUST_VERIFY;
+       clocksource_tsc.flags &= ~CLOCK_SOURCE_MUST_VERIFY;
+}
+
 static void __init check_system_tsc_reliable(void)
 {
 #if defined(CONFIG_MGEODEGX1) || defined(CONFIG_MGEODE_LX) || defined(CONFIG_X86_GENERIC)
@@ -1196,6 +1202,23 @@ static void __init check_system_tsc_reliable(void)
 #endif
        if (boot_cpu_has(X86_FEATURE_TSC_RELIABLE))
                tsc_clocksource_reliable = 1;
+
+       /*
+        * Disable the clocksource watchdog when the system has:
+        *  - TSC running at constant frequency
+        *  - TSC which does not stop in C-States
+        *  - the TSC_ADJUST register which allows to detect even minimal
+        *    modifications
+        *  - not more than two sockets. As the number of sockets cannot be
+        *    evaluated at the early boot stage where this has to be
+        *    invoked, check the number of online memory nodes as a
+        *    fallback solution which is an reasonable estimate.
+        */
+       if (boot_cpu_has(X86_FEATURE_CONSTANT_TSC) &&
+           boot_cpu_has(X86_FEATURE_NONSTOP_TSC) &&
+           boot_cpu_has(X86_FEATURE_TSC_ADJUST) &&
+           nr_online_nodes <= 2)
+               tsc_disable_clocksource_watchdog();
 }
 
 /*
@@ -1387,9 +1410,6 @@ static int __init init_tsc_clocksource(void)
        if (tsc_unstable)
                goto unreg;
 
-       if (tsc_clocksource_reliable || no_tsc_watchdog)
-               clocksource_tsc.flags &= ~CLOCK_SOURCE_MUST_VERIFY;
-
        if (boot_cpu_has(X86_FEATURE_NONSTOP_TSC_S3))
                clocksource_tsc.flags |= CLOCK_SOURCE_SUSPEND_NONSTOP;
 
@@ -1527,7 +1547,7 @@ void __init tsc_init(void)
        }
 
        if (tsc_clocksource_reliable || no_tsc_watchdog)
-               clocksource_tsc_early.flags &= ~CLOCK_SOURCE_MUST_VERIFY;
+               tsc_disable_clocksource_watchdog();
 
        clocksource_register_khz(&clocksource_tsc_early, tsc_khz);
        detect_art();
index 50a4515fe0ad15ec241c257735022287094a4514..9452dc9664b51fddcfaeb6274935c91885814fda 100644 (file)
@@ -30,6 +30,7 @@ struct tsc_adjust {
 };
 
 static DEFINE_PER_CPU(struct tsc_adjust, tsc_adjust);
+static struct timer_list tsc_sync_check_timer;
 
 /*
  * TSC's on different sockets may be reset asynchronously.
@@ -77,6 +78,46 @@ void tsc_verify_tsc_adjust(bool resume)
        }
 }
 
+/*
+ * Normally the tsc_sync will be checked every time system enters idle
+ * state, but there is still caveat that a system won't enter idle,
+ * either because it's too busy or configured purposely to not enter
+ * idle.
+ *
+ * So setup a periodic timer (every 10 minutes) to make sure the check
+ * is always on.
+ */
+
+#define SYNC_CHECK_INTERVAL            (HZ * 600)
+
+static void tsc_sync_check_timer_fn(struct timer_list *unused)
+{
+       int next_cpu;
+
+       tsc_verify_tsc_adjust(false);
+
+       /* Run the check for all onlined CPUs in turn */
+       next_cpu = cpumask_next(raw_smp_processor_id(), cpu_online_mask);
+       if (next_cpu >= nr_cpu_ids)
+               next_cpu = cpumask_first(cpu_online_mask);
+
+       tsc_sync_check_timer.expires += SYNC_CHECK_INTERVAL;
+       add_timer_on(&tsc_sync_check_timer, next_cpu);
+}
+
+static int __init start_sync_check_timer(void)
+{
+       if (!cpu_feature_enabled(X86_FEATURE_TSC_ADJUST) || tsc_clocksource_reliable)
+               return 0;
+
+       timer_setup(&tsc_sync_check_timer, tsc_sync_check_timer_fn, 0);
+       tsc_sync_check_timer.expires = jiffies + SYNC_CHECK_INTERVAL;
+       add_timer(&tsc_sync_check_timer);
+
+       return 0;
+}
+late_initcall(start_sync_check_timer);
+
 static void tsc_sanitize_first_cpu(struct tsc_adjust *cur, s64 bootval,
                                   unsigned int cpu, bool bootcpu)
 {
index cce1c89cb7dfd4897c9256ba1d24516303031e96..c21bcd668284259d8f8833205a936106a8010af6 100644 (file)
@@ -160,7 +160,7 @@ Efault_end:
        user_access_end();
 Efault:
        pr_alert("could not access userspace vm86 info\n");
-       force_fatal_sig(SIGSEGV);
+       force_exit_sig(SIGSEGV);
        goto exit_vm86;
 }
 
index e19dabf1848b449e1d02a3dfcab17d027634bfc0..07e9215e911d74b911efb98a8eb863a7cc4b8a6b 100644 (file)
@@ -125,7 +125,7 @@ static void kvm_update_kvm_cpuid_base(struct kvm_vcpu *vcpu)
        }
 }
 
-struct kvm_cpuid_entry2 *kvm_find_kvm_cpuid_features(struct kvm_vcpu *vcpu)
+static struct kvm_cpuid_entry2 *kvm_find_kvm_cpuid_features(struct kvm_vcpu *vcpu)
 {
        u32 base = vcpu->arch.kvm_cpuid_base;
 
index 4a555f32885a8c489c7f70750956d08fa545d0f4..8d8c1cc7cb539a048e5409512aefb0b941a5f526 100644 (file)
@@ -1922,11 +1922,13 @@ static u64 kvm_hv_send_ipi(struct kvm_vcpu *vcpu, struct kvm_hv_hcall *hc, bool
 
                all_cpus = send_ipi_ex.vp_set.format == HV_GENERIC_SET_ALL;
 
+               if (all_cpus)
+                       goto check_and_send_ipi;
+
                if (!sparse_banks_len)
                        goto ret_success;
 
-               if (!all_cpus &&
-                   kvm_read_guest(kvm,
+               if (kvm_read_guest(kvm,
                                   hc->ingpa + offsetof(struct hv_send_ipi_ex,
                                                        vp_set.bank_contents),
                                   sparse_banks,
@@ -1934,6 +1936,7 @@ static u64 kvm_hv_send_ipi(struct kvm_vcpu *vcpu, struct kvm_hv_hcall *hc, bool
                        return HV_STATUS_INVALID_HYPERCALL_INPUT;
        }
 
+check_and_send_ipi:
        if ((vector < HV_IPI_LOW_VECTOR) || (vector > HV_IPI_HIGH_VECTOR))
                return HV_STATUS_INVALID_HYPERCALL_INPUT;
 
@@ -2022,7 +2025,7 @@ static void kvm_hv_hypercall_set_result(struct kvm_vcpu *vcpu, u64 result)
 {
        bool longmode;
 
-       longmode = is_64_bit_mode(vcpu);
+       longmode = is_64_bit_hypercall(vcpu);
        if (longmode)
                kvm_rax_write(vcpu, result);
        else {
@@ -2171,7 +2174,7 @@ int kvm_hv_hypercall(struct kvm_vcpu *vcpu)
        }
 
 #ifdef CONFIG_X86_64
-       if (is_64_bit_mode(vcpu)) {
+       if (is_64_bit_hypercall(vcpu)) {
                hc.param = kvm_rcx_read(vcpu);
                hc.ingpa = kvm_rdx_read(vcpu);
                hc.outgpa = kvm_r8_read(vcpu);
index e66e620c3bed9eafde705b0315ff977fb158d031..539333ac4b38082f01e0407908441aa39b9ca136 100644 (file)
@@ -81,7 +81,6 @@ struct kvm_ioapic {
        unsigned long irq_states[IOAPIC_NUM_PINS];
        struct kvm_io_device dev;
        struct kvm *kvm;
-       void (*ack_notifier)(void *opaque, int irq);
        spinlock_t lock;
        struct rtc_status rtc_status;
        struct delayed_work eoi_inject;
index 650642b18d151083e7120b81ef51dae690090333..c2d7cfe82d004b1ae4d9518b0a73a0755114bedd 100644 (file)
@@ -56,7 +56,6 @@ struct kvm_pic {
        struct kvm_io_device dev_master;
        struct kvm_io_device dev_slave;
        struct kvm_io_device dev_elcr;
-       void (*ack_notifier)(void *opaque, int irq);
        unsigned long irq_states[PIC_NUM_PINS];
 };
 
index 759952dd122284b183c3735bad40717495920304..f206fc35deff6ef4d0a236eddd4476635d4e5b1f 100644 (file)
@@ -707,7 +707,7 @@ static void pv_eoi_clr_pending(struct kvm_vcpu *vcpu)
 static int apic_has_interrupt_for_ppr(struct kvm_lapic *apic, u32 ppr)
 {
        int highest_irr;
-       if (apic->vcpu->arch.apicv_active)
+       if (kvm_x86_ops.sync_pir_to_irr)
                highest_irr = static_call(kvm_x86_sync_pir_to_irr)(apic->vcpu);
        else
                highest_irr = apic_find_highest_irr(apic);
index 33794379949e01b8e0ed35947acefde61d888816..e2e1d012df2269d26524f396c286b44d93e947bf 100644 (file)
@@ -1582,7 +1582,7 @@ bool kvm_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range)
                flush = kvm_handle_gfn_range(kvm, range, kvm_unmap_rmapp);
 
        if (is_tdp_mmu_enabled(kvm))
-               flush |= kvm_tdp_mmu_unmap_gfn_range(kvm, range, flush);
+               flush = kvm_tdp_mmu_unmap_gfn_range(kvm, range, flush);
 
        return flush;
 }
@@ -1936,7 +1936,11 @@ static void mmu_audit_disable(void) { }
 
 static bool is_obsolete_sp(struct kvm *kvm, struct kvm_mmu_page *sp)
 {
-       return sp->role.invalid ||
+       if (sp->role.invalid)
+               return true;
+
+       /* TDP MMU pages due not use the MMU generation. */
+       return !sp->tdp_mmu_page &&
               unlikely(sp->mmu_valid_gen != kvm->arch.mmu_valid_gen);
 }
 
@@ -2173,10 +2177,10 @@ static void shadow_walk_init_using_root(struct kvm_shadow_walk_iterator *iterato
        iterator->shadow_addr = root;
        iterator->level = vcpu->arch.mmu->shadow_root_level;
 
-       if (iterator->level == PT64_ROOT_4LEVEL &&
+       if (iterator->level >= PT64_ROOT_4LEVEL &&
            vcpu->arch.mmu->root_level < PT64_ROOT_4LEVEL &&
            !vcpu->arch.mmu->direct_map)
-               --iterator->level;
+               iterator->level = PT32E_ROOT_LEVEL;
 
        if (iterator->level == PT32E_ROOT_LEVEL) {
                /*
@@ -3976,6 +3980,20 @@ out_retry:
        return true;
 }
 
+/*
+ * Returns true if the page fault is stale and needs to be retried, i.e. if the
+ * root was invalidated by a memslot update or a relevant mmu_notifier fired.
+ */
+static bool is_page_fault_stale(struct kvm_vcpu *vcpu,
+                               struct kvm_page_fault *fault, int mmu_seq)
+{
+       if (is_obsolete_sp(vcpu->kvm, to_shadow_page(vcpu->arch.mmu->root_hpa)))
+               return true;
+
+       return fault->slot &&
+              mmu_notifier_retry_hva(vcpu->kvm, mmu_seq, fault->hva);
+}
+
 static int direct_page_fault(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault)
 {
        bool is_tdp_mmu_fault = is_tdp_mmu(vcpu->arch.mmu);
@@ -4013,8 +4031,9 @@ static int direct_page_fault(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault
        else
                write_lock(&vcpu->kvm->mmu_lock);
 
-       if (fault->slot && mmu_notifier_retry_hva(vcpu->kvm, mmu_seq, fault->hva))
+       if (is_page_fault_stale(vcpu, fault, mmu_seq))
                goto out_unlock;
+
        r = make_mmu_pages_available(vcpu);
        if (r)
                goto out_unlock;
@@ -4682,6 +4701,7 @@ static union kvm_mmu_extended_role kvm_calc_mmu_role_ext(struct kvm_vcpu *vcpu,
                /* PKEY and LA57 are active iff long mode is active. */
                ext.cr4_pke = ____is_efer_lma(regs) && ____is_cr4_pke(regs);
                ext.cr4_la57 = ____is_efer_lma(regs) && ____is_cr4_la57(regs);
+               ext.efer_lma = ____is_efer_lma(regs);
        }
 
        ext.valid = 1;
@@ -4854,7 +4874,7 @@ void kvm_init_shadow_npt_mmu(struct kvm_vcpu *vcpu, unsigned long cr0,
        struct kvm_mmu *context = &vcpu->arch.guest_mmu;
        struct kvm_mmu_role_regs regs = {
                .cr0 = cr0,
-               .cr4 = cr4,
+               .cr4 = cr4 & ~X86_CR4_PKE,
                .efer = efer,
        };
        union kvm_mmu_role new_role;
@@ -4918,7 +4938,7 @@ void kvm_init_shadow_ept_mmu(struct kvm_vcpu *vcpu, bool execonly,
        context->direct_map = false;
 
        update_permission_bitmask(context, true);
-       update_pkru_bitmask(context);
+       context->pkru_mask = 0;
        reset_rsvds_bits_mask_ept(vcpu, context, execonly);
        reset_ept_shadow_zero_bits_mask(vcpu, context, execonly);
 }
@@ -5024,6 +5044,14 @@ void kvm_mmu_after_set_cpuid(struct kvm_vcpu *vcpu)
        /*
         * Invalidate all MMU roles to force them to reinitialize as CPUID
         * information is factored into reserved bit calculations.
+        *
+        * Correctly handling multiple vCPU models with respect to paging and
+        * physical address properties) in a single VM would require tracking
+        * all relevant CPUID information in kvm_mmu_page_role. That is very
+        * undesirable as it would increase the memory requirements for
+        * gfn_track (see struct kvm_mmu_page_role comments).  For now that
+        * problem is swept under the rug; KVM's CPUID API is horrific and
+        * it's all but impossible to solve it without introducing a new API.
         */
        vcpu->arch.root_mmu.mmu_role.ext.valid = 0;
        vcpu->arch.guest_mmu.mmu_role.ext.valid = 0;
@@ -5031,24 +5059,10 @@ void kvm_mmu_after_set_cpuid(struct kvm_vcpu *vcpu)
        kvm_mmu_reset_context(vcpu);
 
        /*
-        * KVM does not correctly handle changing guest CPUID after KVM_RUN, as
-        * MAXPHYADDR, GBPAGES support, AMD reserved bit behavior, etc.. aren't
-        * tracked in kvm_mmu_page_role.  As a result, KVM may miss guest page
-        * faults due to reusing SPs/SPTEs.  Alert userspace, but otherwise
-        * sweep the problem under the rug.
-        *
-        * KVM's horrific CPUID ABI makes the problem all but impossible to
-        * solve, as correctly handling multiple vCPU models (with respect to
-        * paging and physical address properties) in a single VM would require
-        * tracking all relevant CPUID information in kvm_mmu_page_role.  That
-        * is very undesirable as it would double the memory requirements for
-        * gfn_track (see struct kvm_mmu_page_role comments), and in practice
-        * no sane VMM mucks with the core vCPU model on the fly.
+        * Changing guest CPUID after KVM_RUN is forbidden, see the comment in
+        * kvm_arch_vcpu_ioctl().
         */
-       if (vcpu->arch.last_vmentry_cpu != -1) {
-               pr_warn_ratelimited("KVM: KVM_SET_CPUID{,2} after KVM_RUN may cause guest instability\n");
-               pr_warn_ratelimited("KVM: KVM_SET_CPUID{,2} will fail after KVM_RUN starting with Linux 5.16\n");
-       }
+       KVM_BUG_ON(vcpu->arch.last_vmentry_cpu != -1, vcpu->kvm);
 }
 
 void kvm_mmu_reset_context(struct kvm_vcpu *vcpu)
@@ -5368,7 +5382,7 @@ void kvm_mmu_invalidate_gva(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
 
 void kvm_mmu_invlpg(struct kvm_vcpu *vcpu, gva_t gva)
 {
-       kvm_mmu_invalidate_gva(vcpu, vcpu->arch.mmu, gva, INVALID_PAGE);
+       kvm_mmu_invalidate_gva(vcpu, vcpu->arch.walk_mmu, gva, INVALID_PAGE);
        ++vcpu->stat.invlpg;
 }
 EXPORT_SYMBOL_GPL(kvm_mmu_invlpg);
@@ -5853,8 +5867,6 @@ restart:
 void kvm_mmu_zap_collapsible_sptes(struct kvm *kvm,
                                   const struct kvm_memory_slot *slot)
 {
-       bool flush = false;
-
        if (kvm_memslots_have_rmaps(kvm)) {
                write_lock(&kvm->mmu_lock);
                /*
@@ -5862,17 +5874,14 @@ void kvm_mmu_zap_collapsible_sptes(struct kvm *kvm,
                 * logging at a 4k granularity and never creates collapsible
                 * 2m SPTEs during dirty logging.
                 */
-               flush = slot_handle_level_4k(kvm, slot, kvm_mmu_zap_collapsible_spte, true);
-               if (flush)
+               if (slot_handle_level_4k(kvm, slot, kvm_mmu_zap_collapsible_spte, true))
                        kvm_arch_flush_remote_tlbs_memslot(kvm, slot);
                write_unlock(&kvm->mmu_lock);
        }
 
        if (is_tdp_mmu_enabled(kvm)) {
                read_lock(&kvm->mmu_lock);
-               flush = kvm_tdp_mmu_zap_collapsible_sptes(kvm, slot, flush);
-               if (flush)
-                       kvm_arch_flush_remote_tlbs_memslot(kvm, slot);
+               kvm_tdp_mmu_zap_collapsible_sptes(kvm, slot);
                read_unlock(&kvm->mmu_lock);
        }
 }
@@ -6181,23 +6190,46 @@ void kvm_mmu_module_exit(void)
        mmu_audit_disable();
 }
 
+/*
+ * Calculate the effective recovery period, accounting for '0' meaning "let KVM
+ * select a halving time of 1 hour".  Returns true if recovery is enabled.
+ */
+static bool calc_nx_huge_pages_recovery_period(uint *period)
+{
+       /*
+        * Use READ_ONCE to get the params, this may be called outside of the
+        * param setters, e.g. by the kthread to compute its next timeout.
+        */
+       bool enabled = READ_ONCE(nx_huge_pages);
+       uint ratio = READ_ONCE(nx_huge_pages_recovery_ratio);
+
+       if (!enabled || !ratio)
+               return false;
+
+       *period = READ_ONCE(nx_huge_pages_recovery_period_ms);
+       if (!*period) {
+               /* Make sure the period is not less than one second.  */
+               ratio = min(ratio, 3600u);
+               *period = 60 * 60 * 1000 / ratio;
+       }
+       return true;
+}
+
 static int set_nx_huge_pages_recovery_param(const char *val, const struct kernel_param *kp)
 {
        bool was_recovery_enabled, is_recovery_enabled;
        uint old_period, new_period;
        int err;
 
-       was_recovery_enabled = nx_huge_pages_recovery_ratio;
-       old_period = nx_huge_pages_recovery_period_ms;
+       was_recovery_enabled = calc_nx_huge_pages_recovery_period(&old_period);
 
        err = param_set_uint(val, kp);
        if (err)
                return err;
 
-       is_recovery_enabled = nx_huge_pages_recovery_ratio;
-       new_period = nx_huge_pages_recovery_period_ms;
+       is_recovery_enabled = calc_nx_huge_pages_recovery_period(&new_period);
 
-       if (READ_ONCE(nx_huge_pages) && is_recovery_enabled &&
+       if (is_recovery_enabled &&
            (!was_recovery_enabled || old_period > new_period)) {
                struct kvm *kvm;
 
@@ -6261,18 +6293,13 @@ static void kvm_recover_nx_lpages(struct kvm *kvm)
 
 static long get_nx_lpage_recovery_timeout(u64 start_time)
 {
-       uint ratio = READ_ONCE(nx_huge_pages_recovery_ratio);
-       uint period = READ_ONCE(nx_huge_pages_recovery_period_ms);
+       bool enabled;
+       uint period;
 
-       if (!period && ratio) {
-               /* Make sure the period is not less than one second.  */
-               ratio = min(ratio, 3600u);
-               period = 60 * 60 * 1000 / ratio;
-       }
+       enabled = calc_nx_huge_pages_recovery_period(&period);
 
-       return READ_ONCE(nx_huge_pages) && ratio
-               ? start_time + msecs_to_jiffies(period) - get_jiffies_64()
-               : MAX_SCHEDULE_TIMEOUT;
+       return enabled ? start_time + msecs_to_jiffies(period) - get_jiffies_64()
+                      : MAX_SCHEDULE_TIMEOUT;
 }
 
 static int kvm_nx_lpage_recovery_worker(struct kvm *kvm, uintptr_t data)
index f87d36898c44e33db8dafabf0f603ae9e7ac3458..708a5d297fe1e370c9912da506cc316b84a9bb83 100644 (file)
@@ -911,7 +911,8 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault
 
        r = RET_PF_RETRY;
        write_lock(&vcpu->kvm->mmu_lock);
-       if (fault->slot && mmu_notifier_retry_hva(vcpu->kvm, mmu_seq, fault->hva))
+
+       if (is_page_fault_stale(vcpu, fault, mmu_seq))
                goto out_unlock;
 
        kvm_mmu_audit(vcpu, AUDIT_PRE_PAGE_FAULT);
index a54c3491af42c9fba8a894619ee7bd5c7f3f4628..1db8496259add5411626a99311d4ed6b372aed5d 100644 (file)
@@ -317,9 +317,6 @@ static void handle_removed_tdp_mmu_page(struct kvm *kvm, tdp_ptep_t pt,
        struct kvm_mmu_page *sp = sptep_to_sp(rcu_dereference(pt));
        int level = sp->role.level;
        gfn_t base_gfn = sp->gfn;
-       u64 old_child_spte;
-       u64 *sptep;
-       gfn_t gfn;
        int i;
 
        trace_kvm_mmu_prepare_zap_page(sp);
@@ -327,8 +324,9 @@ static void handle_removed_tdp_mmu_page(struct kvm *kvm, tdp_ptep_t pt,
        tdp_mmu_unlink_page(kvm, sp, shared);
 
        for (i = 0; i < PT64_ENT_PER_PAGE; i++) {
-               sptep = rcu_dereference(pt) + i;
-               gfn = base_gfn + i * KVM_PAGES_PER_HPAGE(level);
+               u64 *sptep = rcu_dereference(pt) + i;
+               gfn_t gfn = base_gfn + i * KVM_PAGES_PER_HPAGE(level);
+               u64 old_child_spte;
 
                if (shared) {
                        /*
@@ -374,7 +372,7 @@ static void handle_removed_tdp_mmu_page(struct kvm *kvm, tdp_ptep_t pt,
                                    shared);
        }
 
-       kvm_flush_remote_tlbs_with_address(kvm, gfn,
+       kvm_flush_remote_tlbs_with_address(kvm, base_gfn,
                                           KVM_PAGES_PER_HPAGE(level + 1));
 
        call_rcu(&sp->rcu_head, tdp_mmu_free_sp_rcu_callback);
@@ -1033,9 +1031,9 @@ bool kvm_tdp_mmu_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range,
 {
        struct kvm_mmu_page *root;
 
-       for_each_tdp_mmu_root(kvm, root, range->slot->as_id)
-               flush |= zap_gfn_range(kvm, root, range->start, range->end,
-                                      range->may_block, flush, false);
+       for_each_tdp_mmu_root_yield_safe(kvm, root, range->slot->as_id, false)
+               flush = zap_gfn_range(kvm, root, range->start, range->end,
+                                     range->may_block, flush, false);
 
        return flush;
 }
@@ -1364,10 +1362,9 @@ void kvm_tdp_mmu_clear_dirty_pt_masked(struct kvm *kvm,
  * Clear leaf entries which could be replaced by large mappings, for
  * GFNs within the slot.
  */
-static bool zap_collapsible_spte_range(struct kvm *kvm,
+static void zap_collapsible_spte_range(struct kvm *kvm,
                                       struct kvm_mmu_page *root,
-                                      const struct kvm_memory_slot *slot,
-                                      bool flush)
+                                      const struct kvm_memory_slot *slot)
 {
        gfn_t start = slot->base_gfn;
        gfn_t end = start + slot->npages;
@@ -1378,10 +1375,8 @@ static bool zap_collapsible_spte_range(struct kvm *kvm,
 
        tdp_root_for_each_pte(iter, root, start, end) {
 retry:
-               if (tdp_mmu_iter_cond_resched(kvm, &iter, flush, true)) {
-                       flush = false;
+               if (tdp_mmu_iter_cond_resched(kvm, &iter, false, true))
                        continue;
-               }
 
                if (!is_shadow_present_pte(iter.old_spte) ||
                    !is_last_spte(iter.old_spte, iter.level))
@@ -1393,6 +1388,7 @@ retry:
                                                            pfn, PG_LEVEL_NUM))
                        continue;
 
+               /* Note, a successful atomic zap also does a remote TLB flush. */
                if (!tdp_mmu_zap_spte_atomic(kvm, &iter)) {
                        /*
                         * The iter must explicitly re-read the SPTE because
@@ -1401,30 +1397,24 @@ retry:
                        iter.old_spte = READ_ONCE(*rcu_dereference(iter.sptep));
                        goto retry;
                }
-               flush = true;
        }
 
        rcu_read_unlock();
-
-       return flush;
 }
 
 /*
  * Clear non-leaf entries (and free associated page tables) which could
  * be replaced by large mappings, for GFNs within the slot.
  */
-bool kvm_tdp_mmu_zap_collapsible_sptes(struct kvm *kvm,
-                                      const struct kvm_memory_slot *slot,
-                                      bool flush)
+void kvm_tdp_mmu_zap_collapsible_sptes(struct kvm *kvm,
+                                      const struct kvm_memory_slot *slot)
 {
        struct kvm_mmu_page *root;
 
        lockdep_assert_held_read(&kvm->mmu_lock);
 
        for_each_tdp_mmu_root_yield_safe(kvm, root, slot->as_id, true)
-               flush = zap_collapsible_spte_range(kvm, root, slot, flush);
-
-       return flush;
+               zap_collapsible_spte_range(kvm, root, slot);
 }
 
 /*
index 476b133544dd94e8465258c91cfdba0f2ddb8029..3899004a5d91e70b8821656cc0715519dc770d36 100644 (file)
@@ -64,9 +64,8 @@ void kvm_tdp_mmu_clear_dirty_pt_masked(struct kvm *kvm,
                                       struct kvm_memory_slot *slot,
                                       gfn_t gfn, unsigned long mask,
                                       bool wrprot);
-bool kvm_tdp_mmu_zap_collapsible_sptes(struct kvm *kvm,
-                                      const struct kvm_memory_slot *slot,
-                                      bool flush);
+void kvm_tdp_mmu_zap_collapsible_sptes(struct kvm *kvm,
+                                      const struct kvm_memory_slot *slot);
 
 bool kvm_tdp_mmu_write_protect_gfn(struct kvm *kvm,
                                   struct kvm_memory_slot *slot, gfn_t gfn,
index affc0ea98d302286303188c91bfdb73bb2cef7e2..8f9af7b7dbbe479fbf914d7c27afdb2d77eb7513 100644 (file)
@@ -900,6 +900,7 @@ out:
 bool svm_check_apicv_inhibit_reasons(ulong bit)
 {
        ulong supported = BIT(APICV_INHIBIT_REASON_DISABLE) |
+                         BIT(APICV_INHIBIT_REASON_ABSENT) |
                          BIT(APICV_INHIBIT_REASON_HYPERV) |
                          BIT(APICV_INHIBIT_REASON_NESTED) |
                          BIT(APICV_INHIBIT_REASON_IRQWIN) |
@@ -989,16 +990,18 @@ void avic_vcpu_put(struct kvm_vcpu *vcpu)
 static void avic_set_running(struct kvm_vcpu *vcpu, bool is_run)
 {
        struct vcpu_svm *svm = to_svm(vcpu);
+       int cpu = get_cpu();
 
+       WARN_ON(cpu != vcpu->cpu);
        svm->avic_is_running = is_run;
 
-       if (!kvm_vcpu_apicv_active(vcpu))
-               return;
-
-       if (is_run)
-               avic_vcpu_load(vcpu, vcpu->cpu);
-       else
-               avic_vcpu_put(vcpu);
+       if (kvm_vcpu_apicv_active(vcpu)) {
+               if (is_run)
+                       avic_vcpu_load(vcpu, cpu);
+               else
+                       avic_vcpu_put(vcpu);
+       }
+       put_cpu();
 }
 
 void svm_vcpu_blocking(struct kvm_vcpu *vcpu)
index 871c426ec389a98632307b16d661c2abf5b856b0..b4095dfeeee62fa1702c3aa48d2af059d03e4d28 100644 (file)
@@ -281,7 +281,7 @@ static void amd_pmu_refresh(struct kvm_vcpu *vcpu)
                pmu->nr_arch_gp_counters = AMD64_NUM_COUNTERS;
 
        pmu->counter_bitmask[KVM_PMC_GP] = ((u64)1 << 48) - 1;
-       pmu->reserved_bits = 0xffffffff00200000ull;
+       pmu->reserved_bits = 0xfffffff000280000ull;
        pmu->version = 1;
        /* not applicable to AMD; but clean them to prevent any fall out */
        pmu->counter_bitmask[KVM_PMC_FIXED] = 0;
index 902c52a8dd0c9155a693568d9fe2a62338f9c9fd..7656a2c5662a68425716469b4f94cb164368cb01 100644 (file)
@@ -237,7 +237,6 @@ static void sev_unbind_asid(struct kvm *kvm, unsigned int handle)
 static int sev_guest_init(struct kvm *kvm, struct kvm_sev_cmd *argp)
 {
        struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
-       bool es_active = argp->id == KVM_SEV_ES_INIT;
        int asid, ret;
 
        if (kvm->created_vcpus)
@@ -247,7 +246,8 @@ static int sev_guest_init(struct kvm *kvm, struct kvm_sev_cmd *argp)
        if (unlikely(sev->active))
                return ret;
 
-       sev->es_active = es_active;
+       sev->active = true;
+       sev->es_active = argp->id == KVM_SEV_ES_INIT;
        asid = sev_asid_new(sev);
        if (asid < 0)
                goto e_no_asid;
@@ -257,8 +257,6 @@ static int sev_guest_init(struct kvm *kvm, struct kvm_sev_cmd *argp)
        if (ret)
                goto e_free;
 
-       sev->active = true;
-       sev->asid = asid;
        INIT_LIST_HEAD(&sev->regions_list);
 
        return 0;
@@ -268,6 +266,7 @@ e_free:
        sev->asid = 0;
 e_no_asid:
        sev->es_active = false;
+       sev->active = false;
        return ret;
 }
 
@@ -1530,7 +1529,7 @@ static int sev_receive_finish(struct kvm *kvm, struct kvm_sev_cmd *argp)
        return sev_issue_cmd(kvm, SEV_CMD_RECEIVE_FINISH, &data, &argp->error);
 }
 
-static bool cmd_allowed_from_miror(u32 cmd_id)
+static bool is_cmd_allowed_from_mirror(u32 cmd_id)
 {
        /*
         * Allow mirrors VM to call KVM_SEV_LAUNCH_UPDATE_VMSA to enable SEV-ES
@@ -1544,28 +1543,50 @@ static bool cmd_allowed_from_miror(u32 cmd_id)
        return false;
 }
 
-static int sev_lock_for_migration(struct kvm *kvm)
+static int sev_lock_two_vms(struct kvm *dst_kvm, struct kvm *src_kvm)
 {
-       struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
+       struct kvm_sev_info *dst_sev = &to_kvm_svm(dst_kvm)->sev_info;
+       struct kvm_sev_info *src_sev = &to_kvm_svm(src_kvm)->sev_info;
+       int r = -EBUSY;
+
+       if (dst_kvm == src_kvm)
+               return -EINVAL;
 
        /*
-        * Bail if this VM is already involved in a migration to avoid deadlock
-        * between two VMs trying to migrate to/from each other.
+        * Bail if these VMs are already involved in a migration to avoid
+        * deadlock between two VMs trying to migrate to/from each other.
         */
-       if (atomic_cmpxchg_acquire(&sev->migration_in_progress, 0, 1))
+       if (atomic_cmpxchg_acquire(&dst_sev->migration_in_progress, 0, 1))
                return -EBUSY;
 
-       mutex_lock(&kvm->lock);
+       if (atomic_cmpxchg_acquire(&src_sev->migration_in_progress, 0, 1))
+               goto release_dst;
 
+       r = -EINTR;
+       if (mutex_lock_killable(&dst_kvm->lock))
+               goto release_src;
+       if (mutex_lock_killable(&src_kvm->lock))
+               goto unlock_dst;
        return 0;
+
+unlock_dst:
+       mutex_unlock(&dst_kvm->lock);
+release_src:
+       atomic_set_release(&src_sev->migration_in_progress, 0);
+release_dst:
+       atomic_set_release(&dst_sev->migration_in_progress, 0);
+       return r;
 }
 
-static void sev_unlock_after_migration(struct kvm *kvm)
+static void sev_unlock_two_vms(struct kvm *dst_kvm, struct kvm *src_kvm)
 {
-       struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
+       struct kvm_sev_info *dst_sev = &to_kvm_svm(dst_kvm)->sev_info;
+       struct kvm_sev_info *src_sev = &to_kvm_svm(src_kvm)->sev_info;
 
-       mutex_unlock(&kvm->lock);
-       atomic_set_release(&sev->migration_in_progress, 0);
+       mutex_unlock(&dst_kvm->lock);
+       mutex_unlock(&src_kvm->lock);
+       atomic_set_release(&dst_sev->migration_in_progress, 0);
+       atomic_set_release(&src_sev->migration_in_progress, 0);
 }
 
 
@@ -1608,14 +1629,15 @@ static void sev_migrate_from(struct kvm_sev_info *dst,
        dst->asid = src->asid;
        dst->handle = src->handle;
        dst->pages_locked = src->pages_locked;
+       dst->enc_context_owner = src->enc_context_owner;
 
        src->asid = 0;
        src->active = false;
        src->handle = 0;
        src->pages_locked = 0;
+       src->enc_context_owner = NULL;
 
-       INIT_LIST_HEAD(&dst->regions_list);
-       list_replace_init(&src->regions_list, &dst->regions_list);
+       list_cut_before(&dst->regions_list, &src->regions_list, &src->regions_list);
 }
 
 static int sev_es_migrate_from(struct kvm *dst, struct kvm *src)
@@ -1667,15 +1689,6 @@ int svm_vm_migrate_from(struct kvm *kvm, unsigned int source_fd)
        bool charged = false;
        int ret;
 
-       ret = sev_lock_for_migration(kvm);
-       if (ret)
-               return ret;
-
-       if (sev_guest(kvm)) {
-               ret = -EINVAL;
-               goto out_unlock;
-       }
-
        source_kvm_file = fget(source_fd);
        if (!file_is_kvm(source_kvm_file)) {
                ret = -EBADF;
@@ -1683,16 +1696,26 @@ int svm_vm_migrate_from(struct kvm *kvm, unsigned int source_fd)
        }
 
        source_kvm = source_kvm_file->private_data;
-       ret = sev_lock_for_migration(source_kvm);
+       ret = sev_lock_two_vms(kvm, source_kvm);
        if (ret)
                goto out_fput;
 
-       if (!sev_guest(source_kvm)) {
+       if (sev_guest(kvm) || !sev_guest(source_kvm)) {
                ret = -EINVAL;
-               goto out_source;
+               goto out_unlock;
        }
 
        src_sev = &to_kvm_svm(source_kvm)->sev_info;
+
+       /*
+        * VMs mirroring src's encryption context rely on it to keep the
+        * ASID allocated, but below we are clearing src_sev->asid.
+        */
+       if (src_sev->num_mirrored_vms) {
+               ret = -EBUSY;
+               goto out_unlock;
+       }
+
        dst_sev->misc_cg = get_current_misc_cg();
        cg_cleanup_sev = dst_sev;
        if (dst_sev->misc_cg != src_sev->misc_cg) {
@@ -1729,13 +1752,11 @@ out_dst_cgroup:
                sev_misc_cg_uncharge(cg_cleanup_sev);
        put_misc_cg(cg_cleanup_sev->misc_cg);
        cg_cleanup_sev->misc_cg = NULL;
-out_source:
-       sev_unlock_after_migration(source_kvm);
+out_unlock:
+       sev_unlock_two_vms(kvm, source_kvm);
 out_fput:
        if (source_kvm_file)
                fput(source_kvm_file);
-out_unlock:
-       sev_unlock_after_migration(kvm);
        return ret;
 }
 
@@ -1757,7 +1778,7 @@ int svm_mem_enc_op(struct kvm *kvm, void __user *argp)
 
        /* Only the enc_context_owner handles some memory enc operations. */
        if (is_mirroring_enc_context(kvm) &&
-           !cmd_allowed_from_miror(sev_cmd.id)) {
+           !is_cmd_allowed_from_mirror(sev_cmd.id)) {
                r = -EINVAL;
                goto out;
        }
@@ -1954,71 +1975,60 @@ int svm_vm_copy_asid_from(struct kvm *kvm, unsigned int source_fd)
 {
        struct file *source_kvm_file;
        struct kvm *source_kvm;
-       struct kvm_sev_info source_sev, *mirror_sev;
+       struct kvm_sev_info *source_sev, *mirror_sev;
        int ret;
 
        source_kvm_file = fget(source_fd);
        if (!file_is_kvm(source_kvm_file)) {
                ret = -EBADF;
-               goto e_source_put;
+               goto e_source_fput;
        }
 
        source_kvm = source_kvm_file->private_data;
-       mutex_lock(&source_kvm->lock);
-
-       if (!sev_guest(source_kvm)) {
-               ret = -EINVAL;
-               goto e_source_unlock;
-       }
+       ret = sev_lock_two_vms(kvm, source_kvm);
+       if (ret)
+               goto e_source_fput;
 
-       /* Mirrors of mirrors should work, but let's not get silly */
-       if (is_mirroring_enc_context(source_kvm) || source_kvm == kvm) {
+       /*
+        * Mirrors of mirrors should work, but let's not get silly.  Also
+        * disallow out-of-band SEV/SEV-ES init if the target is already an
+        * SEV guest, or if vCPUs have been created.  KVM relies on vCPUs being
+        * created after SEV/SEV-ES initialization, e.g. to init intercepts.
+        */
+       if (sev_guest(kvm) || !sev_guest(source_kvm) ||
+           is_mirroring_enc_context(source_kvm) || kvm->created_vcpus) {
                ret = -EINVAL;
-               goto e_source_unlock;
+               goto e_unlock;
        }
 
-       memcpy(&source_sev, &to_kvm_svm(source_kvm)->sev_info,
-              sizeof(source_sev));
-
        /*
         * The mirror kvm holds an enc_context_owner ref so its asid can't
         * disappear until we're done with it
         */
+       source_sev = &to_kvm_svm(source_kvm)->sev_info;
        kvm_get_kvm(source_kvm);
-
-       fput(source_kvm_file);
-       mutex_unlock(&source_kvm->lock);
-       mutex_lock(&kvm->lock);
-
-       if (sev_guest(kvm)) {
-               ret = -EINVAL;
-               goto e_mirror_unlock;
-       }
+       source_sev->num_mirrored_vms++;
 
        /* Set enc_context_owner and copy its encryption context over */
        mirror_sev = &to_kvm_svm(kvm)->sev_info;
        mirror_sev->enc_context_owner = source_kvm;
        mirror_sev->active = true;
-       mirror_sev->asid = source_sev.asid;
-       mirror_sev->fd = source_sev.fd;
-       mirror_sev->es_active = source_sev.es_active;
-       mirror_sev->handle = source_sev.handle;
+       mirror_sev->asid = source_sev->asid;
+       mirror_sev->fd = source_sev->fd;
+       mirror_sev->es_active = source_sev->es_active;
+       mirror_sev->handle = source_sev->handle;
+       INIT_LIST_HEAD(&mirror_sev->regions_list);
+       ret = 0;
+
        /*
         * Do not copy ap_jump_table. Since the mirror does not share the same
         * KVM contexts as the original, and they may have different
         * memory-views.
         */
 
-       mutex_unlock(&kvm->lock);
-       return 0;
-
-e_mirror_unlock:
-       mutex_unlock(&kvm->lock);
-       kvm_put_kvm(source_kvm);
-       return ret;
-e_source_unlock:
-       mutex_unlock(&source_kvm->lock);
-e_source_put:
+e_unlock:
+       sev_unlock_two_vms(kvm, source_kvm);
+e_source_fput:
        if (source_kvm_file)
                fput(source_kvm_file);
        return ret;
@@ -2030,17 +2040,24 @@ void sev_vm_destroy(struct kvm *kvm)
        struct list_head *head = &sev->regions_list;
        struct list_head *pos, *q;
 
+       WARN_ON(sev->num_mirrored_vms);
+
        if (!sev_guest(kvm))
                return;
 
        /* If this is a mirror_kvm release the enc_context_owner and skip sev cleanup */
        if (is_mirroring_enc_context(kvm)) {
-               kvm_put_kvm(sev->enc_context_owner);
+               struct kvm *owner_kvm = sev->enc_context_owner;
+               struct kvm_sev_info *owner_sev = &to_kvm_svm(owner_kvm)->sev_info;
+
+               mutex_lock(&owner_kvm->lock);
+               if (!WARN_ON(!owner_sev->num_mirrored_vms))
+                       owner_sev->num_mirrored_vms--;
+               mutex_unlock(&owner_kvm->lock);
+               kvm_put_kvm(owner_kvm);
                return;
        }
 
-       mutex_lock(&kvm->lock);
-
        /*
         * Ensure that all guest tagged cache entries are flushed before
         * releasing the pages back to the system for use. CLFLUSH will
@@ -2060,8 +2077,6 @@ void sev_vm_destroy(struct kvm *kvm)
                }
        }
 
-       mutex_unlock(&kvm->lock);
-
        sev_unbind_asid(kvm, sev->handle);
        sev_asid_free(sev);
 }
@@ -2245,7 +2260,7 @@ void sev_free_vcpu(struct kvm_vcpu *vcpu)
        __free_page(virt_to_page(svm->sev_es.vmsa));
 
        if (svm->sev_es.ghcb_sa_free)
-               kfree(svm->sev_es.ghcb_sa);
+               kvfree(svm->sev_es.ghcb_sa);
 }
 
 static void dump_ghcb(struct vcpu_svm *svm)
@@ -2337,24 +2352,29 @@ static void sev_es_sync_from_ghcb(struct vcpu_svm *svm)
        memset(ghcb->save.valid_bitmap, 0, sizeof(ghcb->save.valid_bitmap));
 }
 
-static int sev_es_validate_vmgexit(struct vcpu_svm *svm)
+static bool sev_es_validate_vmgexit(struct vcpu_svm *svm)
 {
        struct kvm_vcpu *vcpu;
        struct ghcb *ghcb;
-       u64 exit_code = 0;
+       u64 exit_code;
+       u64 reason;
 
        ghcb = svm->sev_es.ghcb;
 
-       /* Only GHCB Usage code 0 is supported */
-       if (ghcb->ghcb_usage)
-               goto vmgexit_err;
-
        /*
-        * Retrieve the exit code now even though is may not be marked valid
+        * Retrieve the exit code now even though it may not be marked valid
         * as it could help with debugging.
         */
        exit_code = ghcb_get_sw_exit_code(ghcb);
 
+       /* Only GHCB Usage code 0 is supported */
+       if (ghcb->ghcb_usage) {
+               reason = GHCB_ERR_INVALID_USAGE;
+               goto vmgexit_err;
+       }
+
+       reason = GHCB_ERR_MISSING_INPUT;
+
        if (!ghcb_sw_exit_code_is_valid(ghcb) ||
            !ghcb_sw_exit_info_1_is_valid(ghcb) ||
            !ghcb_sw_exit_info_2_is_valid(ghcb))
@@ -2433,30 +2453,34 @@ static int sev_es_validate_vmgexit(struct vcpu_svm *svm)
        case SVM_VMGEXIT_UNSUPPORTED_EVENT:
                break;
        default:
+               reason = GHCB_ERR_INVALID_EVENT;
                goto vmgexit_err;
        }
 
-       return 0;
+       return true;
 
 vmgexit_err:
        vcpu = &svm->vcpu;
 
-       if (ghcb->ghcb_usage) {
+       if (reason == GHCB_ERR_INVALID_USAGE) {
                vcpu_unimpl(vcpu, "vmgexit: ghcb usage %#x is not valid\n",
                            ghcb->ghcb_usage);
+       } else if (reason == GHCB_ERR_INVALID_EVENT) {
+               vcpu_unimpl(vcpu, "vmgexit: exit code %#llx is not valid\n",
+                           exit_code);
        } else {
-               vcpu_unimpl(vcpu, "vmgexit: exit reason %#llx is not valid\n",
+               vcpu_unimpl(vcpu, "vmgexit: exit code %#llx input is not valid\n",
                            exit_code);
                dump_ghcb(svm);
        }
 
-       vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
-       vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_UNEXPECTED_EXIT_REASON;
-       vcpu->run->internal.ndata = 2;
-       vcpu->run->internal.data[0] = exit_code;
-       vcpu->run->internal.data[1] = vcpu->arch.last_vmentry_cpu;
+       /* Clear the valid entries fields */
+       memset(ghcb->save.valid_bitmap, 0, sizeof(ghcb->save.valid_bitmap));
+
+       ghcb_set_sw_exit_info_1(ghcb, 2);
+       ghcb_set_sw_exit_info_2(ghcb, reason);
 
-       return -EINVAL;
+       return false;
 }
 
 void sev_es_unmap_ghcb(struct vcpu_svm *svm)
@@ -2478,7 +2502,7 @@ void sev_es_unmap_ghcb(struct vcpu_svm *svm)
                        svm->sev_es.ghcb_sa_sync = false;
                }
 
-               kfree(svm->sev_es.ghcb_sa);
+               kvfree(svm->sev_es.ghcb_sa);
                svm->sev_es.ghcb_sa = NULL;
                svm->sev_es.ghcb_sa_free = false;
        }
@@ -2526,14 +2550,14 @@ static bool setup_vmgexit_scratch(struct vcpu_svm *svm, bool sync, u64 len)
        scratch_gpa_beg = ghcb_get_sw_scratch(ghcb);
        if (!scratch_gpa_beg) {
                pr_err("vmgexit: scratch gpa not provided\n");
-               return false;
+               goto e_scratch;
        }
 
        scratch_gpa_end = scratch_gpa_beg + len;
        if (scratch_gpa_end < scratch_gpa_beg) {
                pr_err("vmgexit: scratch length (%#llx) not valid for scratch address (%#llx)\n",
                       len, scratch_gpa_beg);
-               return false;
+               goto e_scratch;
        }
 
        if ((scratch_gpa_beg & PAGE_MASK) == control->ghcb_gpa) {
@@ -2551,7 +2575,7 @@ static bool setup_vmgexit_scratch(struct vcpu_svm *svm, bool sync, u64 len)
                    scratch_gpa_end > ghcb_scratch_end) {
                        pr_err("vmgexit: scratch area is outside of GHCB shared buffer area (%#llx - %#llx)\n",
                               scratch_gpa_beg, scratch_gpa_end);
-                       return false;
+                       goto e_scratch;
                }
 
                scratch_va = (void *)svm->sev_es.ghcb;
@@ -2564,18 +2588,18 @@ static bool setup_vmgexit_scratch(struct vcpu_svm *svm, bool sync, u64 len)
                if (len > GHCB_SCRATCH_AREA_LIMIT) {
                        pr_err("vmgexit: scratch area exceeds KVM limits (%#llx requested, %#llx limit)\n",
                               len, GHCB_SCRATCH_AREA_LIMIT);
-                       return false;
+                       goto e_scratch;
                }
-               scratch_va = kzalloc(len, GFP_KERNEL_ACCOUNT);
+               scratch_va = kvzalloc(len, GFP_KERNEL_ACCOUNT);
                if (!scratch_va)
-                       return false;
+                       goto e_scratch;
 
                if (kvm_read_guest(svm->vcpu.kvm, scratch_gpa_beg, scratch_va, len)) {
                        /* Unable to copy scratch area from guest */
                        pr_err("vmgexit: kvm_read_guest for scratch area failed\n");
 
-                       kfree(scratch_va);
-                       return false;
+                       kvfree(scratch_va);
+                       goto e_scratch;
                }
 
                /*
@@ -2592,6 +2616,12 @@ static bool setup_vmgexit_scratch(struct vcpu_svm *svm, bool sync, u64 len)
        svm->sev_es.ghcb_sa_len = len;
 
        return true;
+
+e_scratch:
+       ghcb_set_sw_exit_info_1(ghcb, 2);
+       ghcb_set_sw_exit_info_2(ghcb, GHCB_ERR_INVALID_SCRATCH_AREA);
+
+       return false;
 }
 
 static void set_ghcb_msr_bits(struct vcpu_svm *svm, u64 value, u64 mask,
@@ -2642,7 +2672,7 @@ static int sev_handle_vmgexit_msr_protocol(struct vcpu_svm *svm)
 
                ret = svm_invoke_exit_handler(vcpu, SVM_EXIT_CPUID);
                if (!ret) {
-                       ret = -EINVAL;
+                       /* Error, keep GHCB MSR value as-is */
                        break;
                }
 
@@ -2678,10 +2708,13 @@ static int sev_handle_vmgexit_msr_protocol(struct vcpu_svm *svm)
                                                GHCB_MSR_TERM_REASON_POS);
                pr_info("SEV-ES guest requested termination: %#llx:%#llx\n",
                        reason_set, reason_code);
-               fallthrough;
+
+               ret = -EINVAL;
+               break;
        }
        default:
-               ret = -EINVAL;
+               /* Error, keep GHCB MSR value as-is */
+               break;
        }
 
        trace_kvm_vmgexit_msr_protocol_exit(svm->vcpu.vcpu_id,
@@ -2705,14 +2738,18 @@ int sev_handle_vmgexit(struct kvm_vcpu *vcpu)
 
        if (!ghcb_gpa) {
                vcpu_unimpl(vcpu, "vmgexit: GHCB gpa is not set\n");
-               return -EINVAL;
+
+               /* Without a GHCB, just return right back to the guest */
+               return 1;
        }
 
        if (kvm_vcpu_map(vcpu, ghcb_gpa >> PAGE_SHIFT, &svm->sev_es.ghcb_map)) {
                /* Unable to map GHCB from guest */
                vcpu_unimpl(vcpu, "vmgexit: error mapping GHCB [%#llx] from guest\n",
                            ghcb_gpa);
-               return -EINVAL;
+
+               /* Without a GHCB, just return right back to the guest */
+               return 1;
        }
 
        svm->sev_es.ghcb = svm->sev_es.ghcb_map.hva;
@@ -2722,15 +2759,14 @@ int sev_handle_vmgexit(struct kvm_vcpu *vcpu)
 
        exit_code = ghcb_get_sw_exit_code(ghcb);
 
-       ret = sev_es_validate_vmgexit(svm);
-       if (ret)
-               return ret;
+       if (!sev_es_validate_vmgexit(svm))
+               return 1;
 
        sev_es_sync_from_ghcb(svm);
        ghcb_set_sw_exit_info_1(ghcb, 0);
        ghcb_set_sw_exit_info_2(ghcb, 0);
 
-       ret = -EINVAL;
+       ret = 1;
        switch (exit_code) {
        case SVM_VMGEXIT_MMIO_READ:
                if (!setup_vmgexit_scratch(svm, true, control->exit_info_2))
@@ -2771,20 +2807,17 @@ int sev_handle_vmgexit(struct kvm_vcpu *vcpu)
                default:
                        pr_err("svm: vmgexit: unsupported AP jump table request - exit_info_1=%#llx\n",
                               control->exit_info_1);
-                       ghcb_set_sw_exit_info_1(ghcb, 1);
-                       ghcb_set_sw_exit_info_2(ghcb,
-                                               X86_TRAP_UD |
-                                               SVM_EVTINJ_TYPE_EXEPT |
-                                               SVM_EVTINJ_VALID);
+                       ghcb_set_sw_exit_info_1(ghcb, 2);
+                       ghcb_set_sw_exit_info_2(ghcb, GHCB_ERR_INVALID_INPUT);
                }
 
-               ret = 1;
                break;
        }
        case SVM_VMGEXIT_UNSUPPORTED_EVENT:
                vcpu_unimpl(vcpu,
                            "vmgexit: unsupported event - exit_info_1=%#llx, exit_info_2=%#llx\n",
                            control->exit_info_1, control->exit_info_2);
+               ret = -EINVAL;
                break;
        default:
                ret = svm_invoke_exit_handler(vcpu, exit_code);
@@ -2806,7 +2839,7 @@ int sev_es_string_io(struct vcpu_svm *svm, int size, unsigned int port, int in)
                return -EINVAL;
 
        if (!setup_vmgexit_scratch(svm, in, bytes))
-               return -EINVAL;
+               return 1;
 
        return kvm_sev_es_string_io(&svm->vcpu, size, port, svm->sev_es.ghcb_sa,
                                    count, in);
index 5630c241d5f6e0bdf1899163cfdfef57c18b5d47..d0f68d11ec70bec31890e1dbd23b5a11a14a02e6 100644 (file)
@@ -4651,7 +4651,6 @@ static struct kvm_x86_ops svm_x86_ops __initdata = {
        .load_eoi_exitmap = svm_load_eoi_exitmap,
        .hwapic_irr_update = svm_hwapic_irr_update,
        .hwapic_isr_update = svm_hwapic_isr_update,
-       .sync_pir_to_irr = kvm_lapic_find_highest_irr,
        .apicv_post_state_restore = avic_post_state_restore,
 
        .set_tss_addr = svm_set_tss_addr,
index 437e68504e669139a6dac6832fb3fc67667c3742..1c7306c370fa3c4924a83371c1a1b21da8f6c5b2 100644 (file)
@@ -79,6 +79,7 @@ struct kvm_sev_info {
        struct list_head regions_list;  /* List of registered regions */
        u64 ap_jump_table;      /* SEV-ES AP Jump Table address */
        struct kvm *enc_context_owner; /* Owner of copied encryption context */
+       unsigned long num_mirrored_vms; /* Number of VMs sharing this ASID */
        struct misc_cg *misc_cg; /* For misc cgroup accounting */
        atomic_t migration_in_progress;
 };
@@ -247,7 +248,7 @@ static __always_inline bool sev_es_guest(struct kvm *kvm)
 #ifdef CONFIG_KVM_AMD_SEV
        struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
 
-       return sev_guest(kvm) && sev->es_active;
+       return sev->es_active && !WARN_ON_ONCE(!sev->active);
 #else
        return false;
 #endif
index b213ca966d41da381316d1d14f54addffb8f223c..9c941535f78c050a45a5d134dbb3b020a06a755f 100644 (file)
@@ -670,33 +670,39 @@ static inline bool nested_vmx_prepare_msr_bitmap(struct kvm_vcpu *vcpu,
 static void nested_cache_shadow_vmcs12(struct kvm_vcpu *vcpu,
                                       struct vmcs12 *vmcs12)
 {
-       struct kvm_host_map map;
-       struct vmcs12 *shadow;
+       struct vcpu_vmx *vmx = to_vmx(vcpu);
+       struct gfn_to_hva_cache *ghc = &vmx->nested.shadow_vmcs12_cache;
 
        if (!nested_cpu_has_shadow_vmcs(vmcs12) ||
            vmcs12->vmcs_link_pointer == INVALID_GPA)
                return;
 
-       shadow = get_shadow_vmcs12(vcpu);
-
-       if (kvm_vcpu_map(vcpu, gpa_to_gfn(vmcs12->vmcs_link_pointer), &map))
+       if (ghc->gpa != vmcs12->vmcs_link_pointer &&
+           kvm_gfn_to_hva_cache_init(vcpu->kvm, ghc,
+                                     vmcs12->vmcs_link_pointer, VMCS12_SIZE))
                return;
 
-       memcpy(shadow, map.hva, VMCS12_SIZE);
-       kvm_vcpu_unmap(vcpu, &map, false);
+       kvm_read_guest_cached(vmx->vcpu.kvm, ghc, get_shadow_vmcs12(vcpu),
+                             VMCS12_SIZE);
 }
 
 static void nested_flush_cached_shadow_vmcs12(struct kvm_vcpu *vcpu,
                                              struct vmcs12 *vmcs12)
 {
        struct vcpu_vmx *vmx = to_vmx(vcpu);
+       struct gfn_to_hva_cache *ghc = &vmx->nested.shadow_vmcs12_cache;
 
        if (!nested_cpu_has_shadow_vmcs(vmcs12) ||
            vmcs12->vmcs_link_pointer == INVALID_GPA)
                return;
 
-       kvm_write_guest(vmx->vcpu.kvm, vmcs12->vmcs_link_pointer,
-                       get_shadow_vmcs12(vcpu), VMCS12_SIZE);
+       if (ghc->gpa != vmcs12->vmcs_link_pointer &&
+           kvm_gfn_to_hva_cache_init(vcpu->kvm, ghc,
+                                     vmcs12->vmcs_link_pointer, VMCS12_SIZE))
+               return;
+
+       kvm_write_guest_cached(vmx->vcpu.kvm, ghc, get_shadow_vmcs12(vcpu),
+                              VMCS12_SIZE);
 }
 
 /*
@@ -1156,29 +1162,26 @@ static void nested_vmx_transition_tlb_flush(struct kvm_vcpu *vcpu,
        WARN_ON(!enable_vpid);
 
        /*
-        * If VPID is enabled and used by vmc12, but L2 does not have a unique
-        * TLB tag (ASID), i.e. EPT is disabled and KVM was unable to allocate
-        * a VPID for L2, flush the current context as the effective ASID is
-        * common to both L1 and L2.
-        *
-        * Defer the flush so that it runs after vmcs02.EPTP has been set by
-        * KVM_REQ_LOAD_MMU_PGD (if nested EPT is enabled) and to avoid
-        * redundant flushes further down the nested pipeline.
-        *
-        * If a TLB flush isn't required due to any of the above, and vpid12 is
-        * changing then the new "virtual" VPID (vpid12) will reuse the same
-        * "real" VPID (vpid02), and so needs to be flushed.  There's no direct
-        * mapping between vpid02 and vpid12, vpid02 is per-vCPU and reused for
-        * all nested vCPUs.  Remember, a flush on VM-Enter does not invalidate
-        * guest-physical mappings, so there is no need to sync the nEPT MMU.
+        * VPID is enabled and in use by vmcs12.  If vpid12 is changing, then
+        * emulate a guest TLB flush as KVM does not track vpid12 history nor
+        * is the VPID incorporated into the MMU context.  I.e. KVM must assume
+        * that the new vpid12 has never been used and thus represents a new
+        * guest ASID that cannot have entries in the TLB.
         */
-       if (!nested_has_guest_tlb_tag(vcpu)) {
-               kvm_make_request(KVM_REQ_TLB_FLUSH_CURRENT, vcpu);
-       } else if (is_vmenter &&
-                  vmcs12->virtual_processor_id != vmx->nested.last_vpid) {
+       if (is_vmenter && vmcs12->virtual_processor_id != vmx->nested.last_vpid) {
                vmx->nested.last_vpid = vmcs12->virtual_processor_id;
-               vpid_sync_context(nested_get_vpid02(vcpu));
+               kvm_make_request(KVM_REQ_TLB_FLUSH_GUEST, vcpu);
+               return;
        }
+
+       /*
+        * If VPID is enabled, used by vmc12, and vpid12 is not changing but
+        * does not have a unique TLB tag (ASID), i.e. EPT is disabled and
+        * KVM was unable to allocate a VPID for L2, flush the current context
+        * as the effective ASID is common to both L1 and L2.
+        */
+       if (!nested_has_guest_tlb_tag(vcpu))
+               kvm_make_request(KVM_REQ_TLB_FLUSH_CURRENT, vcpu);
 }
 
 static bool is_bitwise_subset(u64 superset, u64 subset, u64 mask)
@@ -2588,8 +2591,10 @@ static int prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12,
 
        if ((vmcs12->vm_entry_controls & VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL) &&
            WARN_ON_ONCE(kvm_set_msr(vcpu, MSR_CORE_PERF_GLOBAL_CTRL,
-                                    vmcs12->guest_ia32_perf_global_ctrl)))
+                                    vmcs12->guest_ia32_perf_global_ctrl))) {
+               *entry_failure_code = ENTRY_FAIL_DEFAULT;
                return -EINVAL;
+       }
 
        kvm_rsp_write(vcpu, vmcs12->guest_rsp);
        kvm_rip_write(vcpu, vmcs12->guest_rip);
@@ -2830,6 +2835,17 @@ static int nested_vmx_check_controls(struct kvm_vcpu *vcpu,
        return 0;
 }
 
+static int nested_vmx_check_address_space_size(struct kvm_vcpu *vcpu,
+                                      struct vmcs12 *vmcs12)
+{
+#ifdef CONFIG_X86_64
+       if (CC(!!(vmcs12->vm_exit_controls & VM_EXIT_HOST_ADDR_SPACE_SIZE) !=
+               !!(vcpu->arch.efer & EFER_LMA)))
+               return -EINVAL;
+#endif
+       return 0;
+}
+
 static int nested_vmx_check_host_state(struct kvm_vcpu *vcpu,
                                       struct vmcs12 *vmcs12)
 {
@@ -2854,18 +2870,16 @@ static int nested_vmx_check_host_state(struct kvm_vcpu *vcpu,
                return -EINVAL;
 
 #ifdef CONFIG_X86_64
-       ia32e = !!(vcpu->arch.efer & EFER_LMA);
+       ia32e = !!(vmcs12->vm_exit_controls & VM_EXIT_HOST_ADDR_SPACE_SIZE);
 #else
        ia32e = false;
 #endif
 
        if (ia32e) {
-               if (CC(!(vmcs12->vm_exit_controls & VM_EXIT_HOST_ADDR_SPACE_SIZE)) ||
-                   CC(!(vmcs12->host_cr4 & X86_CR4_PAE)))
+               if (CC(!(vmcs12->host_cr4 & X86_CR4_PAE)))
                        return -EINVAL;
        } else {
-               if (CC(vmcs12->vm_exit_controls & VM_EXIT_HOST_ADDR_SPACE_SIZE) ||
-                   CC(vmcs12->vm_entry_controls & VM_ENTRY_IA32E_MODE) ||
+               if (CC(vmcs12->vm_entry_controls & VM_ENTRY_IA32E_MODE) ||
                    CC(vmcs12->host_cr4 & X86_CR4_PCIDE) ||
                    CC((vmcs12->host_rip) >> 32))
                        return -EINVAL;
@@ -2910,9 +2924,9 @@ static int nested_vmx_check_host_state(struct kvm_vcpu *vcpu,
 static int nested_vmx_check_vmcs_link_ptr(struct kvm_vcpu *vcpu,
                                          struct vmcs12 *vmcs12)
 {
-       int r = 0;
-       struct vmcs12 *shadow;
-       struct kvm_host_map map;
+       struct vcpu_vmx *vmx = to_vmx(vcpu);
+       struct gfn_to_hva_cache *ghc = &vmx->nested.shadow_vmcs12_cache;
+       struct vmcs_hdr hdr;
 
        if (vmcs12->vmcs_link_pointer == INVALID_GPA)
                return 0;
@@ -2920,17 +2934,21 @@ static int nested_vmx_check_vmcs_link_ptr(struct kvm_vcpu *vcpu,
        if (CC(!page_address_valid(vcpu, vmcs12->vmcs_link_pointer)))
                return -EINVAL;
 
-       if (CC(kvm_vcpu_map(vcpu, gpa_to_gfn(vmcs12->vmcs_link_pointer), &map)))
-               return -EINVAL;
+       if (ghc->gpa != vmcs12->vmcs_link_pointer &&
+           CC(kvm_gfn_to_hva_cache_init(vcpu->kvm, ghc,
+                                        vmcs12->vmcs_link_pointer, VMCS12_SIZE)))
+                return -EINVAL;
 
-       shadow = map.hva;
+       if (CC(kvm_read_guest_offset_cached(vcpu->kvm, ghc, &hdr,
+                                           offsetof(struct vmcs12, hdr),
+                                           sizeof(hdr))))
+               return -EINVAL;
 
-       if (CC(shadow->hdr.revision_id != VMCS12_REVISION) ||
-           CC(shadow->hdr.shadow_vmcs != nested_cpu_has_shadow_vmcs(vmcs12)))
-               r = -EINVAL;
+       if (CC(hdr.revision_id != VMCS12_REVISION) ||
+           CC(hdr.shadow_vmcs != nested_cpu_has_shadow_vmcs(vmcs12)))
+               return -EINVAL;
 
-       kvm_vcpu_unmap(vcpu, &map, false);
-       return r;
+       return 0;
 }
 
 /*
@@ -3325,8 +3343,7 @@ enum nvmx_vmentry_status nested_vmx_enter_non_root_mode(struct kvm_vcpu *vcpu,
        };
        u32 failed_index;
 
-       if (kvm_check_request(KVM_REQ_TLB_FLUSH_CURRENT, vcpu))
-               kvm_vcpu_flush_tlb_current(vcpu);
+       kvm_service_local_tlb_flush_requests(vcpu);
 
        evaluate_pending_interrupts = exec_controls_get(vmx) &
                (CPU_BASED_INTR_WINDOW_EXITING | CPU_BASED_NMI_WINDOW_EXITING);
@@ -3535,6 +3552,9 @@ static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch)
        if (nested_vmx_check_controls(vcpu, vmcs12))
                return nested_vmx_fail(vcpu, VMXERR_ENTRY_INVALID_CONTROL_FIELD);
 
+       if (nested_vmx_check_address_space_size(vcpu, vmcs12))
+               return nested_vmx_fail(vcpu, VMXERR_ENTRY_INVALID_HOST_STATE_FIELD);
+
        if (nested_vmx_check_host_state(vcpu, vmcs12))
                return nested_vmx_fail(vcpu, VMXERR_ENTRY_INVALID_HOST_STATE_FIELD);
 
@@ -4480,9 +4500,8 @@ void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 vm_exit_reason,
                (void)nested_get_evmcs_page(vcpu);
        }
 
-       /* Service the TLB flush request for L2 before switching to L1. */
-       if (kvm_check_request(KVM_REQ_TLB_FLUSH_CURRENT, vcpu))
-               kvm_vcpu_flush_tlb_current(vcpu);
+       /* Service pending TLB flush requests for L2 before switching to L1. */
+       kvm_service_local_tlb_flush_requests(vcpu);
 
        /*
         * VCPU_EXREG_PDPTR will be clobbered in arch/x86/kvm/vmx/vmx.h between
@@ -4835,6 +4854,7 @@ static int enter_vmx_operation(struct kvm_vcpu *vcpu)
        if (!vmx->nested.cached_vmcs12)
                goto out_cached_vmcs12;
 
+       vmx->nested.shadow_vmcs12_cache.gpa = INVALID_GPA;
        vmx->nested.cached_shadow_vmcs12 = kzalloc(VMCS12_SIZE, GFP_KERNEL_ACCOUNT);
        if (!vmx->nested.cached_shadow_vmcs12)
                goto out_cached_shadow_vmcs12;
@@ -5264,10 +5284,10 @@ static int handle_vmptrld(struct kvm_vcpu *vcpu)
                return 1;
 
        if (vmx->nested.current_vmptr != vmptr) {
-               struct kvm_host_map map;
-               struct vmcs12 *new_vmcs12;
+               struct gfn_to_hva_cache *ghc = &vmx->nested.vmcs12_cache;
+               struct vmcs_hdr hdr;
 
-               if (kvm_vcpu_map(vcpu, gpa_to_gfn(vmptr), &map)) {
+               if (kvm_gfn_to_hva_cache_init(vcpu->kvm, ghc, vmptr, VMCS12_SIZE)) {
                        /*
                         * Reads from an unbacked page return all 1s,
                         * which means that the 32 bits located at the
@@ -5278,12 +5298,16 @@ static int handle_vmptrld(struct kvm_vcpu *vcpu)
                                VMXERR_VMPTRLD_INCORRECT_VMCS_REVISION_ID);
                }
 
-               new_vmcs12 = map.hva;
+               if (kvm_read_guest_offset_cached(vcpu->kvm, ghc, &hdr,
+                                                offsetof(struct vmcs12, hdr),
+                                                sizeof(hdr))) {
+                       return nested_vmx_fail(vcpu,
+                               VMXERR_VMPTRLD_INCORRECT_VMCS_REVISION_ID);
+               }
 
-               if (new_vmcs12->hdr.revision_id != VMCS12_REVISION ||
-                   (new_vmcs12->hdr.shadow_vmcs &&
+               if (hdr.revision_id != VMCS12_REVISION ||
+                   (hdr.shadow_vmcs &&
                     !nested_cpu_has_vmx_shadow_vmcs(vcpu))) {
-                       kvm_vcpu_unmap(vcpu, &map, false);
                        return nested_vmx_fail(vcpu,
                                VMXERR_VMPTRLD_INCORRECT_VMCS_REVISION_ID);
                }
@@ -5294,8 +5318,11 @@ static int handle_vmptrld(struct kvm_vcpu *vcpu)
                 * Load VMCS12 from guest memory since it is not already
                 * cached.
                 */
-               memcpy(vmx->nested.cached_vmcs12, new_vmcs12, VMCS12_SIZE);
-               kvm_vcpu_unmap(vcpu, &map, false);
+               if (kvm_read_guest_cached(vcpu->kvm, ghc, vmx->nested.cached_vmcs12,
+                                         VMCS12_SIZE)) {
+                       return nested_vmx_fail(vcpu,
+                               VMXERR_VMPTRLD_INCORRECT_VMCS_REVISION_ID);
+               }
 
                set_current_vmptr(vmx, vmptr);
        }
index 5f81ef092bd436b1a25ded21fbab536d6743ca24..1c94783b5a54c5520466bb8b3753c89cfa1d5850 100644 (file)
@@ -5,6 +5,7 @@
 #include <asm/cpu.h>
 
 #include "lapic.h"
+#include "irq.h"
 #include "posted_intr.h"
 #include "trace.h"
 #include "vmx.h"
@@ -77,13 +78,18 @@ after_clear_sn:
                pi_set_on(pi_desc);
 }
 
+static bool vmx_can_use_vtd_pi(struct kvm *kvm)
+{
+       return irqchip_in_kernel(kvm) && enable_apicv &&
+               kvm_arch_has_assigned_device(kvm) &&
+               irq_remapping_cap(IRQ_POSTING_CAP);
+}
+
 void vmx_vcpu_pi_put(struct kvm_vcpu *vcpu)
 {
        struct pi_desc *pi_desc = vcpu_to_pi_desc(vcpu);
 
-       if (!kvm_arch_has_assigned_device(vcpu->kvm) ||
-               !irq_remapping_cap(IRQ_POSTING_CAP)  ||
-               !kvm_vcpu_apicv_active(vcpu))
+       if (!vmx_can_use_vtd_pi(vcpu->kvm))
                return;
 
        /* Set SN when the vCPU is preempted */
@@ -141,9 +147,7 @@ int pi_pre_block(struct kvm_vcpu *vcpu)
        struct pi_desc old, new;
        struct pi_desc *pi_desc = vcpu_to_pi_desc(vcpu);
 
-       if (!kvm_arch_has_assigned_device(vcpu->kvm) ||
-               !irq_remapping_cap(IRQ_POSTING_CAP)  ||
-               !kvm_vcpu_apicv_active(vcpu))
+       if (!vmx_can_use_vtd_pi(vcpu->kvm))
                return 0;
 
        WARN_ON(irqs_disabled());
@@ -270,9 +274,7 @@ int pi_update_irte(struct kvm *kvm, unsigned int host_irq, uint32_t guest_irq,
        struct vcpu_data vcpu_info;
        int idx, ret = 0;
 
-       if (!kvm_arch_has_assigned_device(kvm) ||
-           !irq_remapping_cap(IRQ_POSTING_CAP) ||
-           !kvm_vcpu_apicv_active(kvm->vcpus[0]))
+       if (!vmx_can_use_vtd_pi(kvm))
                return 0;
 
        idx = srcu_read_lock(&kvm->irq_srcu);
index ba66c171d951ba06308503570e4d247b40825914..5aadad3e736752f0930e9ec60d95038ae8850f18 100644 (file)
@@ -2646,15 +2646,6 @@ int alloc_loaded_vmcs(struct loaded_vmcs *loaded_vmcs)
                if (!loaded_vmcs->msr_bitmap)
                        goto out_vmcs;
                memset(loaded_vmcs->msr_bitmap, 0xff, PAGE_SIZE);
-
-               if (IS_ENABLED(CONFIG_HYPERV) &&
-                   static_branch_unlikely(&enable_evmcs) &&
-                   (ms_hyperv.nested_features & HV_X64_NESTED_MSR_BITMAP)) {
-                       struct hv_enlightened_vmcs *evmcs =
-                               (struct hv_enlightened_vmcs *)loaded_vmcs->vmcs;
-
-                       evmcs->hv_enlightenments_control.msr_bitmap = 1;
-               }
        }
 
        memset(&loaded_vmcs->host_state, 0, sizeof(struct vmcs_host_state));
@@ -2918,6 +2909,13 @@ static void vmx_flush_tlb_all(struct kvm_vcpu *vcpu)
        }
 }
 
+static inline int vmx_get_current_vpid(struct kvm_vcpu *vcpu)
+{
+       if (is_guest_mode(vcpu))
+               return nested_get_vpid02(vcpu);
+       return to_vmx(vcpu)->vpid;
+}
+
 static void vmx_flush_tlb_current(struct kvm_vcpu *vcpu)
 {
        struct kvm_mmu *mmu = vcpu->arch.mmu;
@@ -2930,31 +2928,29 @@ static void vmx_flush_tlb_current(struct kvm_vcpu *vcpu)
        if (enable_ept)
                ept_sync_context(construct_eptp(vcpu, root_hpa,
                                                mmu->shadow_root_level));
-       else if (!is_guest_mode(vcpu))
-               vpid_sync_context(to_vmx(vcpu)->vpid);
        else
-               vpid_sync_context(nested_get_vpid02(vcpu));
+               vpid_sync_context(vmx_get_current_vpid(vcpu));
 }
 
 static void vmx_flush_tlb_gva(struct kvm_vcpu *vcpu, gva_t addr)
 {
        /*
-        * vpid_sync_vcpu_addr() is a nop if vmx->vpid==0, see the comment in
+        * vpid_sync_vcpu_addr() is a nop if vpid==0, see the comment in
         * vmx_flush_tlb_guest() for an explanation of why this is ok.
         */
-       vpid_sync_vcpu_addr(to_vmx(vcpu)->vpid, addr);
+       vpid_sync_vcpu_addr(vmx_get_current_vpid(vcpu), addr);
 }
 
 static void vmx_flush_tlb_guest(struct kvm_vcpu *vcpu)
 {
        /*
-        * vpid_sync_context() is a nop if vmx->vpid==0, e.g. if enable_vpid==0
-        * or a vpid couldn't be allocated for this vCPU.  VM-Enter and VM-Exit
-        * are required to flush GVA->{G,H}PA mappings from the TLB if vpid is
+        * vpid_sync_context() is a nop if vpid==0, e.g. if enable_vpid==0 or a
+        * vpid couldn't be allocated for this vCPU.  VM-Enter and VM-Exit are
+        * required to flush GVA->{G,H}PA mappings from the TLB if vpid is
         * disabled (VM-Enter with vpid enabled and vpid==0 is disallowed),
         * i.e. no explicit INVVPID is necessary.
         */
-       vpid_sync_context(to_vmx(vcpu)->vpid);
+       vpid_sync_context(vmx_get_current_vpid(vcpu));
 }
 
 void vmx_ept_load_pdptrs(struct kvm_vcpu *vcpu)
@@ -6262,9 +6258,9 @@ static int vmx_sync_pir_to_irr(struct kvm_vcpu *vcpu)
 {
        struct vcpu_vmx *vmx = to_vmx(vcpu);
        int max_irr;
-       bool max_irr_updated;
+       bool got_posted_interrupt;
 
-       if (KVM_BUG_ON(!vcpu->arch.apicv_active, vcpu->kvm))
+       if (KVM_BUG_ON(!enable_apicv, vcpu->kvm))
                return -EIO;
 
        if (pi_test_on(&vmx->pi_desc)) {
@@ -6274,22 +6270,33 @@ static int vmx_sync_pir_to_irr(struct kvm_vcpu *vcpu)
                 * But on x86 this is just a compiler barrier anyway.
                 */
                smp_mb__after_atomic();
-               max_irr_updated =
+               got_posted_interrupt =
                        kvm_apic_update_irr(vcpu, vmx->pi_desc.pir, &max_irr);
-
-               /*
-                * If we are running L2 and L1 has a new pending interrupt
-                * which can be injected, this may cause a vmexit or it may
-                * be injected into L2.  Either way, this interrupt will be
-                * processed via KVM_REQ_EVENT, not RVI, because we do not use
-                * virtual interrupt delivery to inject L1 interrupts into L2.
-                */
-               if (is_guest_mode(vcpu) && max_irr_updated)
-                       kvm_make_request(KVM_REQ_EVENT, vcpu);
        } else {
                max_irr = kvm_lapic_find_highest_irr(vcpu);
+               got_posted_interrupt = false;
        }
-       vmx_hwapic_irr_update(vcpu, max_irr);
+
+       /*
+        * Newly recognized interrupts are injected via either virtual interrupt
+        * delivery (RVI) or KVM_REQ_EVENT.  Virtual interrupt delivery is
+        * disabled in two cases:
+        *
+        * 1) If L2 is running and the vCPU has a new pending interrupt.  If L1
+        * wants to exit on interrupts, KVM_REQ_EVENT is needed to synthesize a
+        * VM-Exit to L1.  If L1 doesn't want to exit, the interrupt is injected
+        * into L2, but KVM doesn't use virtual interrupt delivery to inject
+        * interrupts into L2, and so KVM_REQ_EVENT is again needed.
+        *
+        * 2) If APICv is disabled for this vCPU, assigned devices may still
+        * attempt to post interrupts.  The posted interrupt vector will cause
+        * a VM-Exit and the subsequent entry will call sync_pir_to_irr.
+        */
+       if (!is_guest_mode(vcpu) && kvm_vcpu_apicv_active(vcpu))
+               vmx_set_rvi(max_irr);
+       else if (got_posted_interrupt)
+               kvm_make_request(KVM_REQ_EVENT, vcpu);
+
        return max_irr;
 }
 
@@ -6826,6 +6833,19 @@ static int vmx_create_vcpu(struct kvm_vcpu *vcpu)
        if (err < 0)
                goto free_pml;
 
+       /*
+        * Use Hyper-V 'Enlightened MSR Bitmap' feature when KVM runs as a
+        * nested (L1) hypervisor and Hyper-V in L0 supports it. Enable the
+        * feature only for vmcs01, KVM currently isn't equipped to realize any
+        * performance benefits from enabling it for vmcs02.
+        */
+       if (IS_ENABLED(CONFIG_HYPERV) && static_branch_unlikely(&enable_evmcs) &&
+           (ms_hyperv.nested_features & HV_X64_NESTED_MSR_BITMAP)) {
+               struct hv_enlightened_vmcs *evmcs = (void *)vmx->vmcs01.vmcs;
+
+               evmcs->hv_enlightenments_control.msr_bitmap = 1;
+       }
+
        /* The MSR bitmap starts with all ones */
        bitmap_fill(vmx->shadow_msr_intercept.read, MAX_POSSIBLE_PASSTHROUGH_MSRS);
        bitmap_fill(vmx->shadow_msr_intercept.write, MAX_POSSIBLE_PASSTHROUGH_MSRS);
@@ -7509,6 +7529,7 @@ static void hardware_unsetup(void)
 static bool vmx_check_apicv_inhibit_reasons(ulong bit)
 {
        ulong supported = BIT(APICV_INHIBIT_REASON_DISABLE) |
+                         BIT(APICV_INHIBIT_REASON_ABSENT) |
                          BIT(APICV_INHIBIT_REASON_HYPERV) |
                          BIT(APICV_INHIBIT_REASON_BLOCKIRQ);
 
@@ -7761,10 +7782,10 @@ static __init int hardware_setup(void)
                ple_window_shrink = 0;
        }
 
-       if (!cpu_has_vmx_apicv()) {
+       if (!cpu_has_vmx_apicv())
                enable_apicv = 0;
+       if (!enable_apicv)
                vmx_x86_ops.sync_pir_to_irr = NULL;
-       }
 
        if (cpu_has_vmx_tsc_scaling()) {
                kvm_has_tsc_control = true;
index a4ead6023133a6b0984aa6771b8a547fc8a46a6c..4df2ac24ffc13009db0f43486450594a4d35432b 100644 (file)
@@ -141,6 +141,16 @@ struct nested_vmx {
         */
        struct vmcs12 *cached_shadow_vmcs12;
 
+       /*
+        * GPA to HVA cache for accessing vmcs12->vmcs_link_pointer
+        */
+       struct gfn_to_hva_cache shadow_vmcs12_cache;
+
+       /*
+        * GPA to HVA cache for VMCS12
+        */
+       struct gfn_to_hva_cache vmcs12_cache;
+
        /*
         * Indicates if the shadow vmcs or enlightened vmcs must be updated
         * with the data held by struct vmcs12.
index dc7eb5fddfd3ef8e8154f3acbd48528021bbfcba..0cf1082455dfde8ad97c719edd0b9e089f85dea4 100644 (file)
@@ -890,7 +890,8 @@ int kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
            !load_pdptrs(vcpu, vcpu->arch.walk_mmu, kvm_read_cr3(vcpu)))
                return 1;
 
-       if (!(cr0 & X86_CR0_PG) && kvm_read_cr4_bits(vcpu, X86_CR4_PCIDE))
+       if (!(cr0 & X86_CR0_PG) &&
+           (is_64_bit_mode(vcpu) || kvm_read_cr4_bits(vcpu, X86_CR4_PCIDE)))
                return 1;
 
        static_call(kvm_x86_set_cr0)(vcpu, cr0);
@@ -3258,6 +3259,29 @@ static void kvm_vcpu_flush_tlb_guest(struct kvm_vcpu *vcpu)
        static_call(kvm_x86_tlb_flush_guest)(vcpu);
 }
 
+
+static inline void kvm_vcpu_flush_tlb_current(struct kvm_vcpu *vcpu)
+{
+       ++vcpu->stat.tlb_flush;
+       static_call(kvm_x86_tlb_flush_current)(vcpu);
+}
+
+/*
+ * Service "local" TLB flush requests, which are specific to the current MMU
+ * context.  In addition to the generic event handling in vcpu_enter_guest(),
+ * TLB flushes that are targeted at an MMU context also need to be serviced
+ * prior before nested VM-Enter/VM-Exit.
+ */
+void kvm_service_local_tlb_flush_requests(struct kvm_vcpu *vcpu)
+{
+       if (kvm_check_request(KVM_REQ_TLB_FLUSH_CURRENT, vcpu))
+               kvm_vcpu_flush_tlb_current(vcpu);
+
+       if (kvm_check_request(KVM_REQ_TLB_FLUSH_GUEST, vcpu))
+               kvm_vcpu_flush_tlb_guest(vcpu);
+}
+EXPORT_SYMBOL_GPL(kvm_service_local_tlb_flush_requests);
+
 static void record_steal_time(struct kvm_vcpu *vcpu)
 {
        struct gfn_to_hva_cache *ghc = &vcpu->arch.st.cache;
@@ -3307,9 +3331,9 @@ static void record_steal_time(struct kvm_vcpu *vcpu)
                             "xor %1, %1\n"
                             "2:\n"
                             _ASM_EXTABLE_UA(1b, 2b)
-                            : "+r" (st_preempted),
-                              "+&r" (err)
-                            : "m" (st->preempted));
+                            : "+q" (st_preempted),
+                              "+&r" (err),
+                              "+m" (st->preempted));
                if (err)
                        goto out;
 
@@ -4133,6 +4157,7 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
        case KVM_CAP_SGX_ATTRIBUTE:
 #endif
        case KVM_CAP_VM_COPY_ENC_CONTEXT_FROM:
+       case KVM_CAP_VM_MOVE_ENC_CONTEXT_FROM:
        case KVM_CAP_SREGS2:
        case KVM_CAP_EXIT_ON_EMULATION_FAILURE:
        case KVM_CAP_VCPU_ATTRIBUTES:
@@ -4179,7 +4204,7 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
                r = !static_call(kvm_x86_cpu_has_accelerated_tpr)();
                break;
        case KVM_CAP_NR_VCPUS:
-               r = num_online_cpus();
+               r = min_t(unsigned int, num_online_cpus(), KVM_MAX_VCPUS);
                break;
        case KVM_CAP_MAX_VCPUS:
                r = KVM_MAX_VCPUS;
@@ -4448,8 +4473,7 @@ void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
 static int kvm_vcpu_ioctl_get_lapic(struct kvm_vcpu *vcpu,
                                    struct kvm_lapic_state *s)
 {
-       if (vcpu->arch.apicv_active)
-               static_call(kvm_x86_sync_pir_to_irr)(vcpu);
+       static_call_cond(kvm_x86_sync_pir_to_irr)(vcpu);
 
        return kvm_apic_get_state(vcpu, s);
 }
@@ -5124,6 +5148,17 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
                struct kvm_cpuid __user *cpuid_arg = argp;
                struct kvm_cpuid cpuid;
 
+               /*
+                * KVM does not correctly handle changing guest CPUID after KVM_RUN, as
+                * MAXPHYADDR, GBPAGES support, AMD reserved bit behavior, etc.. aren't
+                * tracked in kvm_mmu_page_role.  As a result, KVM may miss guest page
+                * faults due to reusing SPs/SPTEs.  In practice no sane VMM mucks with
+                * the core vCPU model on the fly, so fail.
+                */
+               r = -EINVAL;
+               if (vcpu->arch.last_vmentry_cpu != -1)
+                       goto out;
+
                r = -EFAULT;
                if (copy_from_user(&cpuid, cpuid_arg, sizeof(cpuid)))
                        goto out;
@@ -5134,6 +5169,14 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
                struct kvm_cpuid2 __user *cpuid_arg = argp;
                struct kvm_cpuid2 cpuid;
 
+               /*
+                * KVM_SET_CPUID{,2} after KVM_RUN is forbidded, see the comment in
+                * KVM_SET_CPUID case above.
+                */
+               r = -EINVAL;
+               if (vcpu->arch.last_vmentry_cpu != -1)
+                       goto out;
+
                r = -EFAULT;
                if (copy_from_user(&cpuid, cpuid_arg, sizeof(cpuid)))
                        goto out;
@@ -5698,6 +5741,7 @@ int kvm_vm_ioctl_enable_cap(struct kvm *kvm,
                smp_wmb();
                kvm->arch.irqchip_mode = KVM_IRQCHIP_SPLIT;
                kvm->arch.nr_reserved_ioapic_pins = cap->args[0];
+               kvm_request_apicv_update(kvm, true, APICV_INHIBIT_REASON_ABSENT);
                r = 0;
 split_irqchip_unlock:
                mutex_unlock(&kvm->lock);
@@ -6078,6 +6122,7 @@ set_identity_unlock:
                /* Write kvm->irq_routing before enabling irqchip_in_kernel. */
                smp_wmb();
                kvm->arch.irqchip_mode = KVM_IRQCHIP_KERNEL;
+               kvm_request_apicv_update(kvm, true, APICV_INHIBIT_REASON_ABSENT);
        create_irqchip_unlock:
                mutex_unlock(&kvm->lock);
                break;
@@ -7077,7 +7122,13 @@ static int emulator_pio_in(struct kvm_vcpu *vcpu, int size,
                           unsigned short port, void *val, unsigned int count)
 {
        if (vcpu->arch.pio.count) {
-               /* Complete previous iteration.  */
+               /*
+                * Complete a previous iteration that required userspace I/O.
+                * Note, @count isn't guaranteed to match pio.count as userspace
+                * can modify ECX before rerunning the vCPU.  Ignore any such
+                * shenanigans as KVM doesn't support modifying the rep count,
+                * and the emulator ensures @count doesn't overflow the buffer.
+                */
        } else {
                int r = __emulator_pio_in(vcpu, size, port, count);
                if (!r)
@@ -7086,7 +7137,6 @@ static int emulator_pio_in(struct kvm_vcpu *vcpu, int size,
                /* Results already available, fall through.  */
        }
 
-       WARN_ON(count != vcpu->arch.pio.count);
        complete_emulator_pio_in(vcpu, val);
        return 1;
 }
@@ -8776,10 +8826,9 @@ static void kvm_apicv_init(struct kvm *kvm)
 {
        init_rwsem(&kvm->arch.apicv_update_lock);
 
-       if (enable_apicv)
-               clear_bit(APICV_INHIBIT_REASON_DISABLE,
-                         &kvm->arch.apicv_inhibit_reasons);
-       else
+       set_bit(APICV_INHIBIT_REASON_ABSENT,
+               &kvm->arch.apicv_inhibit_reasons);
+       if (!enable_apicv)
                set_bit(APICV_INHIBIT_REASON_DISABLE,
                        &kvm->arch.apicv_inhibit_reasons);
 }
@@ -8848,7 +8897,7 @@ int kvm_emulate_hypercall(struct kvm_vcpu *vcpu)
 
        trace_kvm_hypercall(nr, a0, a1, a2, a3);
 
-       op_64_bit = is_64_bit_mode(vcpu);
+       op_64_bit = is_64_bit_hypercall(vcpu);
        if (!op_64_bit) {
                nr &= 0xFFFFFFFF;
                a0 &= 0xFFFFFFFF;
@@ -9528,8 +9577,7 @@ static void vcpu_scan_ioapic(struct kvm_vcpu *vcpu)
        if (irqchip_split(vcpu->kvm))
                kvm_scan_ioapic_routes(vcpu, vcpu->arch.ioapic_handled_vectors);
        else {
-               if (vcpu->arch.apicv_active)
-                       static_call(kvm_x86_sync_pir_to_irr)(vcpu);
+               static_call_cond(kvm_x86_sync_pir_to_irr)(vcpu);
                if (ioapic_in_kernel(vcpu->kvm))
                        kvm_ioapic_scan_entry(vcpu, vcpu->arch.ioapic_handled_vectors);
        }
@@ -9547,12 +9595,16 @@ static void vcpu_load_eoi_exitmap(struct kvm_vcpu *vcpu)
        if (!kvm_apic_hw_enabled(vcpu->arch.apic))
                return;
 
-       if (to_hv_vcpu(vcpu))
+       if (to_hv_vcpu(vcpu)) {
                bitmap_or((ulong *)eoi_exit_bitmap,
                          vcpu->arch.ioapic_handled_vectors,
                          to_hv_synic(vcpu)->vec_bitmap, 256);
+               static_call(kvm_x86_load_eoi_exitmap)(vcpu, eoi_exit_bitmap);
+               return;
+       }
 
-       static_call(kvm_x86_load_eoi_exitmap)(vcpu, eoi_exit_bitmap);
+       static_call(kvm_x86_load_eoi_exitmap)(
+               vcpu, (u64 *)vcpu->arch.ioapic_handled_vectors);
 }
 
 void kvm_arch_mmu_notifier_invalidate_range(struct kvm *kvm,
@@ -9644,10 +9696,7 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
                        /* Flushing all ASIDs flushes the current ASID... */
                        kvm_clear_request(KVM_REQ_TLB_FLUSH_CURRENT, vcpu);
                }
-               if (kvm_check_request(KVM_REQ_TLB_FLUSH_CURRENT, vcpu))
-                       kvm_vcpu_flush_tlb_current(vcpu);
-               if (kvm_check_request(KVM_REQ_TLB_FLUSH_GUEST, vcpu))
-                       kvm_vcpu_flush_tlb_guest(vcpu);
+               kvm_service_local_tlb_flush_requests(vcpu);
 
                if (kvm_check_request(KVM_REQ_REPORT_TPR_ACCESS, vcpu)) {
                        vcpu->run->exit_reason = KVM_EXIT_TPR_ACCESS;
@@ -9798,10 +9847,12 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
 
        /*
         * This handles the case where a posted interrupt was
-        * notified with kvm_vcpu_kick.
+        * notified with kvm_vcpu_kick.  Assigned devices can
+        * use the POSTED_INTR_VECTOR even if APICv is disabled,
+        * so do it even if APICv is disabled on this vCPU.
         */
-       if (kvm_lapic_enabled(vcpu) && vcpu->arch.apicv_active)
-               static_call(kvm_x86_sync_pir_to_irr)(vcpu);
+       if (kvm_lapic_enabled(vcpu))
+               static_call_cond(kvm_x86_sync_pir_to_irr)(vcpu);
 
        if (kvm_vcpu_exit_request(vcpu)) {
                vcpu->mode = OUTSIDE_GUEST_MODE;
@@ -9845,8 +9896,8 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
                if (likely(exit_fastpath != EXIT_FASTPATH_REENTER_GUEST))
                        break;
 
-               if (vcpu->arch.apicv_active)
-                       static_call(kvm_x86_sync_pir_to_irr)(vcpu);
+               if (kvm_lapic_enabled(vcpu))
+                       static_call_cond(kvm_x86_sync_pir_to_irr)(vcpu);
 
                if (unlikely(kvm_vcpu_exit_request(vcpu))) {
                        exit_fastpath = EXIT_FASTPATH_EXIT_HANDLED;
index ea264c4502e413f699e836065e868bcc20865289..4abcd8d9836ddc2296748069f2bfcf9c26fe8c9c 100644 (file)
@@ -103,6 +103,7 @@ static inline unsigned int __shrink_ple_window(unsigned int val,
 
 #define MSR_IA32_CR_PAT_DEFAULT  0x0007040600070406ULL
 
+void kvm_service_local_tlb_flush_requests(struct kvm_vcpu *vcpu);
 int kvm_check_nested_events(struct kvm_vcpu *vcpu);
 
 static inline void kvm_clear_exception_queue(struct kvm_vcpu *vcpu)
@@ -153,12 +154,24 @@ static inline bool is_64_bit_mode(struct kvm_vcpu *vcpu)
 {
        int cs_db, cs_l;
 
+       WARN_ON_ONCE(vcpu->arch.guest_state_protected);
+
        if (!is_long_mode(vcpu))
                return false;
        static_call(kvm_x86_get_cs_db_l_bits)(vcpu, &cs_db, &cs_l);
        return cs_l;
 }
 
+static inline bool is_64_bit_hypercall(struct kvm_vcpu *vcpu)
+{
+       /*
+        * If running with protected guest state, the CS register is not
+        * accessible. The hypercall register values will have had to been
+        * provided in 64-bit mode, so assume the guest is in 64-bit.
+        */
+       return vcpu->arch.guest_state_protected || is_64_bit_mode(vcpu);
+}
+
 static inline bool x86_exception_has_error_code(unsigned int vector)
 {
        static u32 exception_has_error_code = BIT(DF_VECTOR) | BIT(TS_VECTOR) |
@@ -173,12 +186,6 @@ static inline bool mmu_is_nested(struct kvm_vcpu *vcpu)
        return vcpu->arch.walk_mmu == &vcpu->arch.nested_mmu;
 }
 
-static inline void kvm_vcpu_flush_tlb_current(struct kvm_vcpu *vcpu)
-{
-       ++vcpu->stat.tlb_flush;
-       static_call(kvm_x86_tlb_flush_current)(vcpu);
-}
-
 static inline int is_pae(struct kvm_vcpu *vcpu)
 {
        return kvm_read_cr4_bits(vcpu, X86_CR4_PAE);
index 8f62baebd028626d493b135468796ee31d5a3b27..dff2bdf9507a8cf679f449b277666df1ccf34773 100644 (file)
@@ -127,9 +127,9 @@ void kvm_xen_update_runstate_guest(struct kvm_vcpu *v, int state)
        state_entry_time = vx->runstate_entry_time;
        state_entry_time |= XEN_RUNSTATE_UPDATE;
 
-       BUILD_BUG_ON(sizeof(((struct vcpu_runstate_info *)0)->state_entry_time) !=
+       BUILD_BUG_ON(sizeof_field(struct vcpu_runstate_info, state_entry_time) !=
                     sizeof(state_entry_time));
-       BUILD_BUG_ON(sizeof(((struct compat_vcpu_runstate_info *)0)->state_entry_time) !=
+       BUILD_BUG_ON(sizeof_field(struct compat_vcpu_runstate_info, state_entry_time) !=
                     sizeof(state_entry_time));
 
        if (kvm_write_guest_offset_cached(v->kvm, &v->arch.xen.runstate_cache,
@@ -144,9 +144,9 @@ void kvm_xen_update_runstate_guest(struct kvm_vcpu *v, int state)
         */
        BUILD_BUG_ON(offsetof(struct vcpu_runstate_info, state) !=
                     offsetof(struct compat_vcpu_runstate_info, state));
-       BUILD_BUG_ON(sizeof(((struct vcpu_runstate_info *)0)->state) !=
+       BUILD_BUG_ON(sizeof_field(struct vcpu_runstate_info, state) !=
                     sizeof(vx->current_runstate));
-       BUILD_BUG_ON(sizeof(((struct compat_vcpu_runstate_info *)0)->state) !=
+       BUILD_BUG_ON(sizeof_field(struct compat_vcpu_runstate_info, state) !=
                     sizeof(vx->current_runstate));
 
        if (kvm_write_guest_offset_cached(v->kvm, &v->arch.xen.runstate_cache,
@@ -163,9 +163,9 @@ void kvm_xen_update_runstate_guest(struct kvm_vcpu *v, int state)
                     offsetof(struct vcpu_runstate_info, time) - sizeof(u64));
        BUILD_BUG_ON(offsetof(struct compat_vcpu_runstate_info, state_entry_time) !=
                     offsetof(struct compat_vcpu_runstate_info, time) - sizeof(u64));
-       BUILD_BUG_ON(sizeof(((struct vcpu_runstate_info *)0)->time) !=
-                    sizeof(((struct compat_vcpu_runstate_info *)0)->time));
-       BUILD_BUG_ON(sizeof(((struct vcpu_runstate_info *)0)->time) !=
+       BUILD_BUG_ON(sizeof_field(struct vcpu_runstate_info, time) !=
+                    sizeof_field(struct compat_vcpu_runstate_info, time));
+       BUILD_BUG_ON(sizeof_field(struct vcpu_runstate_info, time) !=
                     sizeof(vx->runstate_times));
 
        if (kvm_write_guest_offset_cached(v->kvm, &v->arch.xen.runstate_cache,
@@ -205,9 +205,9 @@ int __kvm_xen_has_interrupt(struct kvm_vcpu *v)
        BUILD_BUG_ON(offsetof(struct vcpu_info, evtchn_upcall_pending) !=
                     offsetof(struct compat_vcpu_info, evtchn_upcall_pending));
        BUILD_BUG_ON(sizeof(rc) !=
-                    sizeof(((struct vcpu_info *)0)->evtchn_upcall_pending));
+                    sizeof_field(struct vcpu_info, evtchn_upcall_pending));
        BUILD_BUG_ON(sizeof(rc) !=
-                    sizeof(((struct compat_vcpu_info *)0)->evtchn_upcall_pending));
+                    sizeof_field(struct compat_vcpu_info, evtchn_upcall_pending));
 
        /*
         * For efficiency, this mirrors the checks for using the valid
@@ -299,7 +299,7 @@ int kvm_xen_hvm_get_attr(struct kvm *kvm, struct kvm_xen_hvm_attr *data)
                break;
 
        case KVM_XEN_ATTR_TYPE_SHARED_INFO:
-               data->u.shared_info.gfn = gpa_to_gfn(kvm->arch.xen.shinfo_gfn);
+               data->u.shared_info.gfn = kvm->arch.xen.shinfo_gfn;
                r = 0;
                break;
 
@@ -698,7 +698,7 @@ int kvm_xen_hypercall(struct kvm_vcpu *vcpu)
            kvm_hv_hypercall_enabled(vcpu))
                return kvm_hv_hypercall(vcpu);
 
-       longmode = is_64_bit_mode(vcpu);
+       longmode = is_64_bit_hypercall(vcpu);
        if (!longmode) {
                params[0] = (u32)kvm_rbx_read(vcpu);
                params[1] = (u32)kvm_rcx_read(vcpu);
index b15ebfe40a73ea16660d68c6aa624094bbe8a8ad..b0b848d6933afbcf118415ea6689488e28675054 100644 (file)
@@ -277,7 +277,8 @@ void __init efi_arch_mem_reserve(phys_addr_t addr, u64 size)
                return;
        }
 
-       new = early_memremap(data.phys_map, data.size);
+       new = early_memremap_prot(data.phys_map, data.size,
+                                 pgprot_val(pgprot_encrypted(FIXMAP_PAGE_NORMAL)));
        if (!new) {
                pr_err("Failed to map new boot services memmap\n");
                return;
index 4a3da7592b99c938eed72dd583475bef4ae131a1..38d24d2ab38b3329e3ec3f9f527b573688c7dbe9 100644 (file)
@@ -72,6 +72,7 @@ static void __init setup_real_mode(void)
 #ifdef CONFIG_X86_64
        u64 *trampoline_pgd;
        u64 efer;
+       int i;
 #endif
 
        base = (unsigned char *)real_mode_header;
@@ -128,8 +129,17 @@ static void __init setup_real_mode(void)
        trampoline_header->flags = 0;
 
        trampoline_pgd = (u64 *) __va(real_mode_header->trampoline_pgd);
+
+       /* Map the real mode stub as virtual == physical */
        trampoline_pgd[0] = trampoline_pgd_entry.pgd;
-       trampoline_pgd[511] = init_top_pgt[511].pgd;
+
+       /*
+        * Include the entirety of the kernel mapping into the trampoline
+        * PGD.  This way, all mappings present in the normal kernel page
+        * tables are usable while running on trampoline_pgd.
+        */
+       for (i = pgd_index(__PAGE_OFFSET); i < PTRS_PER_PGD; i++)
+               trampoline_pgd[i] = init_top_pgt[i].pgd;
 #endif
 
        sme_sev_setup_real_mode(trampoline_header);
index 220dd96784947624d9d43fb62dd72e4ae0614936..444d824775f6a9ccb10929c55ac980af7f73376e 100644 (file)
@@ -20,6 +20,7 @@
 
 #include <linux/init.h>
 #include <linux/linkage.h>
+#include <../entry/calling.h>
 
 .pushsection .noinstr.text, "ax"
 /*
@@ -192,6 +193,25 @@ SYM_CODE_START(xen_iret)
        jmp hypercall_iret
 SYM_CODE_END(xen_iret)
 
+/*
+ * XEN pv doesn't use trampoline stack, PER_CPU_VAR(cpu_tss_rw + TSS_sp0) is
+ * also the kernel stack.  Reusing swapgs_restore_regs_and_return_to_usermode()
+ * in XEN pv would cause %rsp to move up to the top of the kernel stack and
+ * leave the IRET frame below %rsp, which is dangerous to be corrupted if #NMI
+ * interrupts. And swapgs_restore_regs_and_return_to_usermode() pushing the IRET
+ * frame at the same address is useless.
+ */
+SYM_CODE_START(xenpv_restore_regs_and_return_to_usermode)
+       UNWIND_HINT_REGS
+       POP_REGS
+
+       /* stackleak_erase() can work safely on the kernel stack. */
+       STACKLEAK_ERASE_NOCLOBBER
+
+       addq    $8, %rsp        /* skip regs->orig_ax */
+       jmp xen_iret
+SYM_CODE_END(xenpv_restore_regs_and_return_to_usermode)
+
 /*
  * Xen handles syscall callbacks much like ordinary exceptions, which
  * means we have:
index a8a041609c5d0e04602b1f04c3cd6233de5e991f..7b4359312c25766f7d96f04c387dc32b1cbbd159 100644 (file)
@@ -121,7 +121,6 @@ void flush_cache_page(struct vm_area_struct*,
 
 #define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
 void flush_dcache_page(struct page *);
-void flush_dcache_folio(struct folio *);
 
 void local_flush_cache_range(struct vm_area_struct *vma,
                unsigned long start, unsigned long end);
@@ -138,9 +137,7 @@ void local_flush_cache_page(struct vm_area_struct *vma,
 #define flush_cache_vunmap(start,end)                  do { } while (0)
 
 #define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 0
-#define ARCH_IMPLEMENTS_FLUSH_DCACHE_FOLIO
 #define flush_dcache_page(page)                                do { } while (0)
-static inline void flush_dcache_folio(struct folio *folio) { }
 
 #define flush_icache_range local_flush_icache_range
 #define flush_cache_page(vma, addr, pfn)               do { } while (0)
index 104b327f8ac9fd834421e559164ba77dcb7271e4..3e3e1a506bed1a8cc693a60637960640fe67812d 100644 (file)
 446    common  landlock_restrict_self          sys_landlock_restrict_self
 # 447 reserved for memfd_secret
 448    common  process_mrelease                sys_process_mrelease
+449    common  futex_waitv                     sys_futex_waitv
index b4dab2fb6a74690c655fb0e712df5de8332abd19..b1d087e5e205f6afb0f48393c1bbc452f95c3e6c 100644 (file)
@@ -753,8 +753,7 @@ struct block_device *blkdev_get_no_open(dev_t dev)
 
        if (!bdev)
                return NULL;
-       if ((bdev->bd_disk->flags & GENHD_FL_HIDDEN) ||
-           !try_module_get(bdev->bd_disk->fops->owner)) {
+       if ((bdev->bd_disk->flags & GENHD_FL_HIDDEN)) {
                put_device(&bdev->bd_device);
                return NULL;
        }
@@ -764,7 +763,6 @@ struct block_device *blkdev_get_no_open(dev_t dev)
 
 void blkdev_put_no_open(struct block_device *bdev)
 {
-       module_put(bdev->bd_disk->fops->owner);
        put_device(&bdev->bd_device);
 }
 
@@ -820,12 +818,14 @@ struct block_device *blkdev_get_by_dev(dev_t dev, fmode_t mode, void *holder)
        ret = -ENXIO;
        if (!disk_live(disk))
                goto abort_claiming;
+       if (!try_module_get(disk->fops->owner))
+               goto abort_claiming;
        if (bdev_is_partition(bdev))
                ret = blkdev_get_part(bdev, mode);
        else
                ret = blkdev_get_whole(bdev, mode);
        if (ret)
-               goto abort_claiming;
+               goto put_module;
        if (mode & FMODE_EXCL) {
                bd_finish_claiming(bdev, holder);
 
@@ -847,7 +847,8 @@ struct block_device *blkdev_get_by_dev(dev_t dev, fmode_t mode, void *holder)
        if (unblock_events)
                disk_unblock_events(disk);
        return bdev;
-
+put_module:
+       module_put(disk->fops->owner);
 abort_claiming:
        if (mode & FMODE_EXCL)
                bd_abort_claiming(bdev, holder);
@@ -956,6 +957,7 @@ void blkdev_put(struct block_device *bdev, fmode_t mode)
                blkdev_put_whole(bdev, mode);
        mutex_unlock(&disk->open_mutex);
 
+       module_put(disk->fops->owner);
        blkdev_put_no_open(bdev);
 }
 EXPORT_SYMBOL(blkdev_put);
index 88b1fce905200c1f2897598298733d7a0699aa9c..663aabfeba183ae87a346b75a5af24f4a233bb47 100644 (file)
@@ -640,7 +640,7 @@ int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol,
         */
        ret = blk_queue_enter(q, 0);
        if (ret)
-               return ret;
+               goto fail;
 
        rcu_read_lock();
        spin_lock_irq(&q->queue_lock);
@@ -676,13 +676,13 @@ int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol,
                new_blkg = blkg_alloc(pos, q, GFP_KERNEL);
                if (unlikely(!new_blkg)) {
                        ret = -ENOMEM;
-                       goto fail;
+                       goto fail_exit_queue;
                }
 
                if (radix_tree_preload(GFP_KERNEL)) {
                        blkg_free(new_blkg);
                        ret = -ENOMEM;
-                       goto fail;
+                       goto fail_exit_queue;
                }
 
                rcu_read_lock();
@@ -722,9 +722,10 @@ fail_preloaded:
 fail_unlock:
        spin_unlock_irq(&q->queue_lock);
        rcu_read_unlock();
+fail_exit_queue:
+       blk_queue_exit(q);
 fail:
        blkdev_put_no_open(bdev);
-       blk_queue_exit(q);
        /*
         * If queue was bypassing, we should retry.  Do so after a
         * short msleep().  It isn't strictly necessary but queue
index 9ee32f85d74e1a8042b40c1b3eca31fa6122b412..1378d084c770f6641a911caf8d98d302026fa689 100644 (file)
@@ -363,8 +363,10 @@ void blk_cleanup_queue(struct request_queue *q)
        blk_queue_flag_set(QUEUE_FLAG_DEAD, q);
 
        blk_sync_queue(q);
-       if (queue_is_mq(q))
+       if (queue_is_mq(q)) {
+               blk_mq_cancel_work_sync(q);
                blk_mq_exit_queue(q);
+       }
 
        /*
         * In theory, request pool of sched_tags belongs to request queue.
@@ -1015,6 +1017,7 @@ EXPORT_SYMBOL(submit_bio);
 /**
  * bio_poll - poll for BIO completions
  * @bio: bio to poll for
+ * @iob: batches of IO
  * @flags: BLK_POLL_* flags that control the behavior
  *
  * Poll for completions on queue associated with the bio. Returns number of
index 8e364bda516618135dbd05e0b09d9f0565a99e6c..1fce6d16e6d3abf806b1f58a90d089b4e01c9684 100644 (file)
@@ -379,7 +379,7 @@ static void mq_flush_data_end_io(struct request *rq, blk_status_t error)
  * @rq is being submitted.  Analyze what needs to be done and put it on the
  * right queue.
  */
-bool blk_insert_flush(struct request *rq)
+void blk_insert_flush(struct request *rq)
 {
        struct request_queue *q = rq->q;
        unsigned long fflags = q->queue_flags;  /* may change, cache */
@@ -409,7 +409,7 @@ bool blk_insert_flush(struct request *rq)
         */
        if (!policy) {
                blk_mq_end_request(rq, 0);
-               return true;
+               return;
        }
 
        BUG_ON(rq->bio != rq->biotail); /*assumes zero or single bio rq */
@@ -420,8 +420,10 @@ bool blk_insert_flush(struct request *rq)
         * for normal execution.
         */
        if ((policy & REQ_FSEQ_DATA) &&
-           !(policy & (REQ_FSEQ_PREFLUSH | REQ_FSEQ_POSTFLUSH)))
-               return false;
+           !(policy & (REQ_FSEQ_PREFLUSH | REQ_FSEQ_POSTFLUSH))) {
+               blk_mq_request_bypass_insert(rq, false, true);
+               return;
+       }
 
        /*
         * @rq should go through flush machinery.  Mark it part of flush
@@ -437,8 +439,6 @@ bool blk_insert_flush(struct request *rq)
        spin_lock_irq(&fq->mq_flush_lock);
        blk_flush_complete_seq(rq, fq, REQ_FSEQ_ACTIONS & ~policy, 0);
        spin_unlock_irq(&fq->mq_flush_lock);
-
-       return true;
 }
 
 /**
index 3ab34c4f20daf700e2f54fe0e6b4f79d3ef39cf6..8874a63ae952b28031eb9c38323d06dca4f3129e 100644 (file)
@@ -860,13 +860,14 @@ void blk_mq_end_request_batch(struct io_comp_batch *iob)
                if (iob->need_ts)
                        __blk_mq_end_request_acct(rq, now);
 
+               rq_qos_done(rq->q, rq);
+
                WRITE_ONCE(rq->state, MQ_RQ_IDLE);
                if (!refcount_dec_and_test(&rq->ref))
                        continue;
 
                blk_crypto_free_request(rq);
                blk_pm_mark_last_busy(rq);
-               rq_qos_done(rq->q, rq);
 
                if (nr_tags == TAG_COMP_BATCH || cur_hctx != rq->mq_hctx) {
                        if (cur_hctx)
@@ -2543,8 +2544,7 @@ static struct request *blk_mq_get_new_requests(struct request_queue *q,
        return NULL;
 }
 
-static inline bool blk_mq_can_use_cached_rq(struct request *rq,
-               struct bio *bio)
+static inline bool blk_mq_can_use_cached_rq(struct request *rq, struct bio *bio)
 {
        if (blk_mq_get_hctx_type(bio->bi_opf) != rq->mq_hctx->type)
                return false;
@@ -2565,7 +2565,6 @@ static inline struct request *blk_mq_get_request(struct request_queue *q,
        bool checked = false;
 
        if (plug) {
-
                rq = rq_list_peek(&plug->cached_rq);
                if (rq && rq->q == q) {
                        if (unlikely(!submit_bio_checks(bio)))
@@ -2587,12 +2586,14 @@ static inline struct request *blk_mq_get_request(struct request_queue *q,
 fallback:
        if (unlikely(bio_queue_enter(bio)))
                return NULL;
-       if (!checked && !submit_bio_checks(bio))
-               return NULL;
+       if (unlikely(!checked && !submit_bio_checks(bio)))
+               goto out_put;
        rq = blk_mq_get_new_requests(q, plug, bio, nsegs, same_queue_rq);
-       if (!rq)
-               blk_queue_exit(q);
-       return rq;
+       if (rq)
+               return rq;
+out_put:
+       blk_queue_exit(q);
+       return NULL;
 }
 
 /**
@@ -2647,8 +2648,10 @@ void blk_mq_submit_bio(struct bio *bio)
                return;
        }
 
-       if (op_is_flush(bio->bi_opf) && blk_insert_flush(rq))
+       if (op_is_flush(bio->bi_opf)) {
+               blk_insert_flush(rq);
                return;
+       }
 
        if (plug && (q->nr_hw_queues == 1 ||
            blk_mq_is_shared_tags(rq->mq_hctx->flags) ||
@@ -4417,6 +4420,19 @@ unsigned int blk_mq_rq_cpu(struct request *rq)
 }
 EXPORT_SYMBOL(blk_mq_rq_cpu);
 
+void blk_mq_cancel_work_sync(struct request_queue *q)
+{
+       if (queue_is_mq(q)) {
+               struct blk_mq_hw_ctx *hctx;
+               int i;
+
+               cancel_delayed_work_sync(&q->requeue_work);
+
+               queue_for_each_hw_ctx(q, hctx, i)
+                       cancel_delayed_work_sync(&hctx->run_work);
+       }
+}
+
 static int __init blk_mq_init(void)
 {
        int i;
index 8acfa650f575156ce27f7b79860df4c75a158e3d..afcf9931a4890467d1a2837219a78026381e4c2c 100644 (file)
@@ -128,6 +128,8 @@ extern void blk_mq_hctx_kobj_init(struct blk_mq_hw_ctx *hctx);
 void blk_mq_free_plug_rqs(struct blk_plug *plug);
 void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule);
 
+void blk_mq_cancel_work_sync(struct request_queue *q);
+
 void blk_mq_release(struct request_queue *q);
 
 static inline struct blk_mq_ctx *__blk_mq_get_ctx(struct request_queue *q,
index cef1f713370bd79bdea184efb28553767ace86a0..cd75b0f73dc6fc32eb62e91c919a176f96042dc2 100644 (file)
@@ -791,16 +791,6 @@ static void blk_release_queue(struct kobject *kobj)
 
        blk_free_queue_stats(q->stats);
 
-       if (queue_is_mq(q)) {
-               struct blk_mq_hw_ctx *hctx;
-               int i;
-
-               cancel_delayed_work_sync(&q->requeue_work);
-
-               queue_for_each_hw_ctx(q, hctx, i)
-                       cancel_delayed_work_sync(&hctx->run_work);
-       }
-
        blk_exit_queue(q);
 
        blk_queue_free_zone_bitmaps(q);
index b4fed2033e48f1a881b62305dc54944fa70e299f..ccde6e6f1736096ff59cfbe17c782e661bb762c8 100644 (file)
@@ -271,7 +271,7 @@ void __blk_account_io_done(struct request *req, u64 now);
  */
 #define ELV_ON_HASH(rq) ((rq)->rq_flags & RQF_HASHED)
 
-bool blk_insert_flush(struct request *rq);
+void blk_insert_flush(struct request *rq);
 
 int elevator_switch_mq(struct request_queue *q,
                              struct elevator_type *new_e);
index 1f39f6e8ebb9625deaaeb79ecdae4aa00dca072a..19a78d5516ba7e7fd1b999739657f266f29a8044 100644 (file)
@@ -694,12 +694,18 @@ void elevator_init_mq(struct request_queue *q)
        if (!e)
                return;
 
+       /*
+        * We are called before adding disk, when there isn't any FS I/O,
+        * so freezing queue plus canceling dispatch work is enough to
+        * drain any dispatch activities originated from passthrough
+        * requests, then no need to quiesce queue which may add long boot
+        * latency, especially when lots of disks are involved.
+        */
        blk_mq_freeze_queue(q);
-       blk_mq_quiesce_queue(q);
+       blk_mq_cancel_work_sync(q);
 
        err = blk_mq_init_sched(q, e);
 
-       blk_mq_unquiesce_queue(q);
        blk_mq_unfreeze_queue(q);
 
        if (err) {
index ad732a36f9b303f7cb202eea48c00a488e800417..0da147edbd1864f59ce30c6c3f7af350d9a2cf4d 100644 (file)
@@ -15,6 +15,7 @@
 #include <linux/falloc.h>
 #include <linux/suspend.h>
 #include <linux/fs.h>
+#include <linux/module.h>
 #include "blk.h"
 
 static inline struct inode *bdev_file_inode(struct file *file)
@@ -340,8 +341,7 @@ static ssize_t __blkdev_direct_IO_async(struct kiocb *iocb,
        } else {
                ret = bio_iov_iter_get_pages(bio, iter);
                if (unlikely(ret)) {
-                       bio->bi_status = BLK_STS_IOERR;
-                       bio_endio(bio);
+                       bio_put(bio);
                        return ret;
                }
        }
index c5392cc24d37ef32fbeac5b78b18f35d920d8cef..30362aeacac4b88b4fa18a841130a046b7f7f105 100644 (file)
@@ -1111,6 +1111,8 @@ static void disk_release(struct device *dev)
        might_sleep();
        WARN_ON_ONCE(disk_live(disk));
 
+       blk_mq_cancel_work_sync(disk->queue);
+
        disk_release_events(disk);
        kfree(disk->random);
        xa_destroy(&disk->part_tbl);
index 0e4ff245f2bf21b7c1ab7d5b36ac23b89decf7a0..6f01d35a5145a66ffac5e7852c943df3b0f66c10 100644 (file)
@@ -69,7 +69,14 @@ int ioprio_check_cap(int ioprio)
 
        switch (class) {
                case IOPRIO_CLASS_RT:
-                       if (!capable(CAP_SYS_NICE) && !capable(CAP_SYS_ADMIN))
+                       /*
+                        * Originally this only checked for CAP_SYS_ADMIN,
+                        * which was implicitly allowed for pid 0 by security
+                        * modules such as SELinux. Make sure we check
+                        * CAP_SYS_ADMIN first to avoid a denial/avc for
+                        * possibly missing CAP_SYS_NICE permission.
+                        */
+                       if (!capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_NICE))
                                return -EPERM;
                        fallthrough;
                        /* rt has prio field too */
@@ -213,6 +220,7 @@ SYSCALL_DEFINE2(ioprio_get, int, which, int, who)
                                pgrp = task_pgrp(current);
                        else
                                pgrp = find_vpid(who);
+                       read_lock(&tasklist_lock);
                        do_each_pid_thread(pgrp, PIDTYPE_PGID, p) {
                                tmpio = get_task_ioprio(p);
                                if (tmpio < 0)
@@ -222,6 +230,8 @@ SYSCALL_DEFINE2(ioprio_get, int, which, int, who)
                                else
                                        ret = ioprio_best(ret, tmpio);
                        } while_each_pid_thread(pgrp, PIDTYPE_PGID, p);
+                       read_unlock(&tasklist_lock);
+
                        break;
                case IOPRIO_WHO_USER:
                        uid = make_kuid(current_user_ns(), who);
index a85c351589bee53084b4c07c8bef7dc72d8d87ba..b62c87b8ce4a99650c0ed3140ac449ce45238d10 100644 (file)
@@ -998,7 +998,14 @@ static int cpc_write(int cpu, struct cpc_register_resource *reg_res, u64 val)
 static int cppc_get_perf(int cpunum, enum cppc_regs reg_idx, u64 *perf)
 {
        struct cpc_desc *cpc_desc = per_cpu(cpc_desc_ptr, cpunum);
-       struct cpc_register_resource *reg = &cpc_desc->cpc_regs[reg_idx];
+       struct cpc_register_resource *reg;
+
+       if (!cpc_desc) {
+               pr_debug("No CPC descriptor for CPU:%d\n", cpunum);
+               return -ENODEV;
+       }
+
+       reg = &cpc_desc->cpc_regs[reg_idx];
 
        if (CPC_IN_PCC(reg)) {
                int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpunum);
index 7cd0009e7ff34b0f1af9d9e8efd9fcee856693a5..ef104809f27b17415758cff0d3793a8d7193b070 100644 (file)
@@ -347,28 +347,3 @@ void acpi_device_notify_remove(struct device *dev)
 
        acpi_unbind_one(dev);
 }
-
-int acpi_dev_turn_off_if_unused(struct device *dev, void *not_used)
-{
-       struct acpi_device *adev = to_acpi_device(dev);
-
-       /*
-        * Skip device objects with device IDs, because they may be in use even
-        * if they are not companions of any physical device objects.
-        */
-       if (adev->pnp.type.hardware_id)
-               return 0;
-
-       mutex_lock(&adev->physical_node_lock);
-
-       /*
-        * Device objects without device IDs are not in use if they have no
-        * corresponding physical device objects.
-        */
-       if (list_empty(&adev->physical_node_list))
-               acpi_device_set_power(adev, ACPI_STATE_D3_COLD);
-
-       mutex_unlock(&adev->physical_node_lock);
-
-       return 0;
-}
index 8fbdc172864b0ae230bbb4a142156bb31f330d9a..d91b560e8867472d006c0bffbd9399b822a1636d 100644 (file)
@@ -117,7 +117,6 @@ bool acpi_device_is_battery(struct acpi_device *adev);
 bool acpi_device_is_first_physical_node(struct acpi_device *adev,
                                        const struct device *dev);
 int acpi_bus_register_early_device(int type);
-int acpi_dev_turn_off_if_unused(struct device *dev, void *not_used);
 
 /* --------------------------------------------------------------------------
                      Device Matching and Notification
index e312ebaed8db49d10a060bcfb100db6a251cef05..2366f54d8e9cf8b263613839a3a9e39419508473 100644 (file)
@@ -1084,21 +1084,17 @@ struct fwnode_handle *acpi_get_next_subnode(const struct fwnode_handle *fwnode,
  * Returns parent node of an ACPI device or data firmware node or %NULL if
  * not available.
  */
-struct fwnode_handle *acpi_node_get_parent(const struct fwnode_handle *fwnode)
+static struct fwnode_handle *
+acpi_node_get_parent(const struct fwnode_handle *fwnode)
 {
        if (is_acpi_data_node(fwnode)) {
                /* All data nodes have parent pointer so just return that */
                return to_acpi_data_node(fwnode)->parent;
        } else if (is_acpi_device_node(fwnode)) {
-               acpi_handle handle, parent_handle;
-
-               handle = to_acpi_device_node(fwnode)->handle;
-               if (ACPI_SUCCESS(acpi_get_parent(handle, &parent_handle))) {
-                       struct acpi_device *adev;
+               struct device *dev = to_acpi_device_node(fwnode)->dev.parent;
 
-                       if (!acpi_bus_get_device(parent_handle, &adev))
-                               return acpi_fwnode_handle(adev);
-               }
+               if (dev)
+                       return acpi_fwnode_handle(to_acpi_device(dev));
        }
 
        return NULL;
index a50f1967c73dcc381fdeaa7cc3ba28cba28b473b..2c80765670bc7fc35508ad91347f8343f5f1f1d6 100644 (file)
@@ -2564,12 +2564,6 @@ int __init acpi_scan_init(void)
                }
        }
 
-       /*
-        * Make sure that power management resources are not blocked by ACPI
-        * device objects with no users.
-        */
-       bus_for_each_dev(&acpi_bus_type, NULL, NULL, acpi_dev_turn_off_if_unused);
-
        acpi_turn_off_unused_power_resources();
 
        acpi_scan_initialized = true;
index 49fb74196d02fbc4d087f7996d65f14b45812a83..c75fb600740cc1ee5c97598c090f658324d3e722 100644 (file)
@@ -2710,7 +2710,7 @@ static void binder_transaction(struct binder_proc *proc,
                t->from = thread;
        else
                t->from = NULL;
-       t->sender_euid = proc->cred->euid;
+       t->sender_euid = task_euid(proc->tsk);
        t->to_proc = target_proc;
        t->to_thread = target_thread;
        t->code = tr->code;
@@ -4422,23 +4422,20 @@ static int binder_thread_release(struct binder_proc *proc,
        __release(&t->lock);
 
        /*
-        * If this thread used poll, make sure we remove the waitqueue
-        * from any epoll data structures holding it with POLLFREE.
-        * waitqueue_active() is safe to use here because we're holding
-        * the inner lock.
+        * If this thread used poll, make sure we remove the waitqueue from any
+        * poll data structures holding it.
         */
-       if ((thread->looper & BINDER_LOOPER_STATE_POLL) &&
-           waitqueue_active(&thread->wait)) {
-               wake_up_poll(&thread->wait, EPOLLHUP | POLLFREE);
-       }
+       if (thread->looper & BINDER_LOOPER_STATE_POLL)
+               wake_up_pollfree(&thread->wait);
 
        binder_inner_proc_unlock(thread->proc);
 
        /*
-        * This is needed to avoid races between wake_up_poll() above and
-        * and ep_remove_waitqueue() called for other reasons (eg the epoll file
-        * descriptor being closed); ep_remove_waitqueue() holds an RCU read
-        * lock, so we can be sure it's done after calling synchronize_rcu().
+        * This is needed to avoid races between wake_up_pollfree() above and
+        * someone else removing the last entry from the queue for other reasons
+        * (e.g. ep_remove_wait_queue() being called due to an epoll file
+        * descriptor being closed).  Such other users hold an RCU read lock, so
+        * we can be sure they're done after we call synchronize_rcu().
         */
        if (thread->looper & BINDER_LOOPER_STATE_POLL)
                synchronize_rcu();
index d60f34718b5d68a42f28fedfbc8422247924f540..1e1167e725a407f430fdb92b47585892a77ef1ef 100644 (file)
@@ -438,6 +438,7 @@ static const struct pci_device_id ahci_pci_tbl[] = {
        /* AMD */
        { PCI_VDEVICE(AMD, 0x7800), board_ahci }, /* AMD Hudson-2 */
        { PCI_VDEVICE(AMD, 0x7900), board_ahci }, /* AMD CZ */
+       { PCI_VDEVICE(AMD, 0x7901), board_ahci_mobile }, /* AMD Green Sardine */
        /* AMD is using RAID class only for ahci controllers */
        { PCI_VENDOR_ID_AMD, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
          PCI_CLASS_STORAGE_RAID << 8, 0xffffff, board_ahci },
index 50b56cd0039d287267269250a04b184328e2159b..e9c7c07fd84c8f063d317aadd54e321afac327d3 100644 (file)
@@ -94,6 +94,7 @@ struct ceva_ahci_priv {
 static unsigned int ceva_ahci_read_id(struct ata_device *dev,
                                        struct ata_taskfile *tf, u16 *id)
 {
+       __le16 *__id = (__le16 *)id;
        u32 err_mask;
 
        err_mask = ata_do_dev_read_id(dev, tf, id);
@@ -103,7 +104,7 @@ static unsigned int ceva_ahci_read_id(struct ata_device *dev,
         * Since CEVA controller does not support device sleep feature, we
         * need to clear DEVSLP (bit 8) in word78 of the IDENTIFY DEVICE data.
         */
-       id[ATA_ID_FEATURE_SUPP] &= cpu_to_le16(~(1 << 8));
+       __id[ATA_ID_FEATURE_SUPP] &= cpu_to_le16(~(1 << 8));
 
        return 0;
 }
index 8a6835bfd18a27850abe827fb257ec09ddfa5631..f76b8418e6fb10b05dbc90439c87d7999b07508b 100644 (file)
@@ -2323,6 +2323,18 @@ int ahci_port_resume(struct ata_port *ap)
 EXPORT_SYMBOL_GPL(ahci_port_resume);
 
 #ifdef CONFIG_PM
+static void ahci_handle_s2idle(struct ata_port *ap)
+{
+       void __iomem *port_mmio = ahci_port_base(ap);
+       u32 devslp;
+
+       if (pm_suspend_via_firmware())
+               return;
+       devslp = readl(port_mmio + PORT_DEVSLP);
+       if ((devslp & PORT_DEVSLP_ADSE))
+               ata_msleep(ap, devslp_idle_timeout);
+}
+
 static int ahci_port_suspend(struct ata_port *ap, pm_message_t mesg)
 {
        const char *emsg = NULL;
@@ -2336,6 +2348,9 @@ static int ahci_port_suspend(struct ata_port *ap, pm_message_t mesg)
                ata_port_freeze(ap);
        }
 
+       if (acpi_storage_d3(ap->host->dev))
+               ahci_handle_s2idle(ap);
+
        ahci_rpm_put_port(ap);
        return rc;
 }
index 8a0ccb190d767021d2d2a8db6071eda220789815..aba0c67d1bd6563d34250e4b8b11114612e91dcf 100644 (file)
@@ -2031,8 +2031,9 @@ retry:
                        dev->horkage |= ATA_HORKAGE_NO_DMA_LOG;
                        goto retry;
                }
-               ata_dev_err(dev, "Read log page 0x%02x failed, Emask 0x%x\n",
-                           (unsigned int)page, err_mask);
+               ata_dev_err(dev,
+                           "Read log 0x%02x page 0x%02x failed, Emask 0x%x\n",
+                           (unsigned int)log, (unsigned int)page, err_mask);
        }
 
        return err_mask;
@@ -2177,6 +2178,9 @@ static void ata_dev_config_ncq_prio(struct ata_device *dev)
        struct ata_port *ap = dev->link->ap;
        unsigned int err_mask;
 
+       if (!ata_identify_page_supported(dev, ATA_LOG_SATA_SETTINGS))
+               return;
+
        err_mask = ata_read_log_page(dev,
                                     ATA_LOG_IDENTIFY_DEVICE,
                                     ATA_LOG_SATA_SETTINGS,
@@ -2453,7 +2457,8 @@ static void ata_dev_config_devslp(struct ata_device *dev)
         * Check device sleep capability. Get DevSlp timing variables
         * from SATA Settings page of Identify Device Data Log.
         */
-       if (!ata_id_has_devslp(dev->id))
+       if (!ata_id_has_devslp(dev->id) ||
+           !ata_identify_page_supported(dev, ATA_LOG_SATA_SETTINGS))
                return;
 
        err_mask = ata_read_log_page(dev,
@@ -3915,6 +3920,8 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
        { "VRFDFC22048UCHC-TE*", NULL,          ATA_HORKAGE_NODMA },
        /* Odd clown on sil3726/4726 PMPs */
        { "Config  Disk",       NULL,           ATA_HORKAGE_DISABLE },
+       /* Similar story with ASMedia 1092 */
+       { "ASMT109x- Config",   NULL,           ATA_HORKAGE_DISABLE },
 
        /* Weird ATAPI devices */
        { "TORiSAN DVD-ROM DRD-N216", NULL,     ATA_HORKAGE_MAX_SEC_128 },
index 4e88597aa9df3fa022d2475a8d27b51d1a49e801..b9c77885b8726ee0c43b5318602d95353397b176 100644 (file)
@@ -827,7 +827,7 @@ static ssize_t ata_scsi_lpm_show(struct device *dev,
        if (ap->target_lpm_policy >= ARRAY_SIZE(ata_lpm_policy_names))
                return -EINVAL;
 
-       return snprintf(buf, PAGE_SIZE, "%s\n",
+       return sysfs_emit(buf, "%s\n",
                        ata_lpm_policy_names[ap->target_lpm_policy]);
 }
 DEVICE_ATTR(link_power_management_policy, S_IRUGO | S_IWUSR,
@@ -922,7 +922,7 @@ DEVICE_ATTR(ncq_prio_enable, S_IRUGO | S_IWUSR,
            ata_ncq_prio_enable_show, ata_ncq_prio_enable_store);
 EXPORT_SYMBOL_GPL(dev_attr_ncq_prio_enable);
 
-struct attribute *ata_ncq_sdev_attrs[] = {
+static struct attribute *ata_ncq_sdev_attrs[] = {
        &dev_attr_unload_heads.attr,
        &dev_attr_ncq_prio_enable.attr,
        &dev_attr_ncq_prio_supported.attr,
index 121635aa8c00c10e8f5e42cc5855d173971cf4e7..823c88622e34a089e670a8bb9d7dc0838ced78d5 100644 (file)
@@ -55,14 +55,14 @@ static unsigned int pata_falcon_data_xfer(struct ata_queued_cmd *qc,
        /* Transfer multiple of 2 bytes */
        if (rw == READ) {
                if (swap)
-                       raw_insw_swapw((u16 *)data_addr, (u16 *)buf, words);
+                       raw_insw_swapw(data_addr, (u16 *)buf, words);
                else
-                       raw_insw((u16 *)data_addr, (u16 *)buf, words);
+                       raw_insw(data_addr, (u16 *)buf, words);
        } else {
                if (swap)
-                       raw_outsw_swapw((u16 *)data_addr, (u16 *)buf, words);
+                       raw_outsw_swapw(data_addr, (u16 *)buf, words);
                else
-                       raw_outsw((u16 *)data_addr, (u16 *)buf, words);
+                       raw_outsw(data_addr, (u16 *)buf, words);
        }
 
        /* Transfer trailing byte, if any. */
@@ -74,16 +74,16 @@ static unsigned int pata_falcon_data_xfer(struct ata_queued_cmd *qc,
 
                if (rw == READ) {
                        if (swap)
-                               raw_insw_swapw((u16 *)data_addr, (u16 *)pad, 1);
+                               raw_insw_swapw(data_addr, (u16 *)pad, 1);
                        else
-                               raw_insw((u16 *)data_addr, (u16 *)pad, 1);
+                               raw_insw(data_addr, (u16 *)pad, 1);
                        *buf = pad[0];
                } else {
                        pad[0] = *buf;
                        if (swap)
-                               raw_outsw_swapw((u16 *)data_addr, (u16 *)pad, 1);
+                               raw_outsw_swapw(data_addr, (u16 *)pad, 1);
                        else
-                               raw_outsw((u16 *)data_addr, (u16 *)pad, 1);
+                               raw_outsw(data_addr, (u16 *)pad, 1);
                }
                words++;
        }
index e5838b23c9e0a177712283e3c5df5087f4ff1da0..3b31a4f596d865f0a9a7ea9022729aa0a84f0ca9 100644 (file)
@@ -1394,6 +1394,14 @@ static int sata_fsl_init_controller(struct ata_host *host)
        return 0;
 }
 
+static void sata_fsl_host_stop(struct ata_host *host)
+{
+        struct sata_fsl_host_priv *host_priv = host->private_data;
+
+        iounmap(host_priv->hcr_base);
+        kfree(host_priv);
+}
+
 /*
  * scsi mid-layer and libata interface structures
  */
@@ -1426,6 +1434,8 @@ static struct ata_port_operations sata_fsl_ops = {
        .port_start = sata_fsl_port_start,
        .port_stop = sata_fsl_port_stop,
 
+       .host_stop      = sata_fsl_host_stop,
+
        .pmp_attach = sata_fsl_pmp_attach,
        .pmp_detach = sata_fsl_pmp_detach,
 };
@@ -1480,9 +1490,9 @@ static int sata_fsl_probe(struct platform_device *ofdev)
        host_priv->ssr_base = ssr_base;
        host_priv->csr_base = csr_base;
 
-       irq = irq_of_parse_and_map(ofdev->dev.of_node, 0);
-       if (!irq) {
-               dev_err(&ofdev->dev, "invalid irq from platform\n");
+       irq = platform_get_irq(ofdev, 0);
+       if (irq < 0) {
+               retval = irq;
                goto error_exit_with_cleanup;
        }
        host_priv->irq = irq;
@@ -1557,10 +1567,6 @@ static int sata_fsl_remove(struct platform_device *ofdev)
 
        ata_host_detach(host);
 
-       irq_dispose_mapping(host_priv->irq);
-       iounmap(host_priv->hcr_base);
-       kfree(host_priv);
-
        return 0;
 }
 
index a154cab6cd989808b5cae51fd3d12506a6d52f9c..c3a36cfaa855a679f8f7d215852c8981aea3e2f7 100644 (file)
@@ -2103,7 +2103,7 @@ static int loop_control_remove(int idx)
        int ret;
 
        if (idx < 0) {
-               pr_warn("deleting an unspecified loop device is not supported.\n");
+               pr_warn_once("deleting an unspecified loop device is not supported.\n");
                return -EINVAL;
        }
                
index 97bf051a50ced340621b0dd4cb2022c8956770ee..6ae38776e30e5049b31f45dc8b8158cf98643a4d 100644 (file)
@@ -316,7 +316,7 @@ static blk_status_t virtio_queue_rq(struct blk_mq_hw_ctx *hctx,
        struct request *req = bd->rq;
        struct virtblk_req *vbr = blk_mq_rq_to_pdu(req);
        unsigned long flags;
-       unsigned int num;
+       int num;
        int qid = hctx->queue_num;
        bool notify = false;
        blk_status_t status;
@@ -1049,7 +1049,6 @@ static struct virtio_driver virtio_blk = {
        .feature_table_size             = ARRAY_SIZE(features),
        .feature_table_legacy           = features_legacy,
        .feature_table_size_legacy      = ARRAY_SIZE(features_legacy),
-       .suppress_used_validation       = true,
        .driver.name                    = KBUILD_MODNAME,
        .driver.owner                   = THIS_MODULE,
        .id_table                       = id_table,
index 08d7953ec5f107483b217d382484980691884d92..25071126995befb16c69e2c88a590fb54dd0491b 100644 (file)
@@ -1853,12 +1853,14 @@ static const struct block_device_operations zram_devops = {
        .owner = THIS_MODULE
 };
 
+#ifdef CONFIG_ZRAM_WRITEBACK
 static const struct block_device_operations zram_wb_devops = {
        .open = zram_open,
        .submit_bio = zram_submit_bio,
        .swap_slot_free_notify = zram_slot_free_notify,
        .owner = THIS_MODULE
 };
+#endif
 
 static DEVICE_ATTR_WO(compact);
 static DEVICE_ATTR_RW(disksize);
index fb99e3727155b6059fe4629ffa812e2ab7a1c4a3..547e6e769546a45d3d62ab8b6de97ba0c82e7493 100644 (file)
@@ -881,7 +881,7 @@ int mhi_pm_suspend(struct mhi_controller *mhi_cntrl)
 }
 EXPORT_SYMBOL_GPL(mhi_pm_suspend);
 
-int mhi_pm_resume(struct mhi_controller *mhi_cntrl)
+static int __mhi_pm_resume(struct mhi_controller *mhi_cntrl, bool force)
 {
        struct mhi_chan *itr, *tmp;
        struct device *dev = &mhi_cntrl->mhi_dev->dev;
@@ -898,8 +898,12 @@ int mhi_pm_resume(struct mhi_controller *mhi_cntrl)
        if (MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state))
                return -EIO;
 
-       if (mhi_get_mhi_state(mhi_cntrl) != MHI_STATE_M3)
-               return -EINVAL;
+       if (mhi_get_mhi_state(mhi_cntrl) != MHI_STATE_M3) {
+               dev_warn(dev, "Resuming from non M3 state (%s)\n",
+                        TO_MHI_STATE_STR(mhi_get_mhi_state(mhi_cntrl)));
+               if (!force)
+                       return -EINVAL;
+       }
 
        /* Notify clients about exiting LPM */
        list_for_each_entry_safe(itr, tmp, &mhi_cntrl->lpm_chans, node) {
@@ -940,8 +944,19 @@ int mhi_pm_resume(struct mhi_controller *mhi_cntrl)
 
        return 0;
 }
+
+int mhi_pm_resume(struct mhi_controller *mhi_cntrl)
+{
+       return __mhi_pm_resume(mhi_cntrl, false);
+}
 EXPORT_SYMBOL_GPL(mhi_pm_resume);
 
+int mhi_pm_resume_force(struct mhi_controller *mhi_cntrl)
+{
+       return __mhi_pm_resume(mhi_cntrl, true);
+}
+EXPORT_SYMBOL_GPL(mhi_pm_resume_force);
+
 int __mhi_device_get_sync(struct mhi_controller *mhi_cntrl)
 {
        int ret;
index 59a4896a803096bce6634d4c1a0825fb30f72a54..4c577a73170910566668c4db85a30267f5d55fff 100644 (file)
@@ -20,7 +20,7 @@
 
 #define MHI_PCI_DEFAULT_BAR_NUM 0
 
-#define MHI_POST_RESET_DELAY_MS 500
+#define MHI_POST_RESET_DELAY_MS 2000
 
 #define HEALTH_CHECK_PERIOD (HZ * 2)
 
index ed3c4c42fc23b7f8280394060a44006b9d5adcf4..d68d05d5d38388523b0fecfdd45cb6e32488c784 100644 (file)
@@ -281,7 +281,7 @@ agp_ioc_init(void __iomem *ioc_regs)
         return 0;
 }
 
-static int
+static int __init
 lba_find_capability(int cap)
 {
        struct _parisc_agp_info *info = &parisc_agp_info;
@@ -366,7 +366,7 @@ fail:
        return error;
 }
 
-static int
+static int __init
 find_quicksilver(struct device *dev, void *data)
 {
        struct parisc_device **lba = data;
@@ -378,7 +378,7 @@ find_quicksilver(struct device *dev, void *data)
        return 0;
 }
 
-static int
+static int __init
 parisc_agp_init(void)
 {
        extern struct sba_device *sba_list;
index deed355422f4e9561e10ac0a1d62cc42649bffde..c837d5416e0eeeb9ca578f23d75f8141be5e9481 100644 (file)
@@ -191,6 +191,8 @@ struct ipmi_user {
        struct work_struct remove_work;
 };
 
+static struct workqueue_struct *remove_work_wq;
+
 static struct ipmi_user *acquire_ipmi_user(struct ipmi_user *user, int *index)
        __acquires(user->release_barrier)
 {
@@ -1297,7 +1299,7 @@ static void free_user(struct kref *ref)
        struct ipmi_user *user = container_of(ref, struct ipmi_user, refcount);
 
        /* SRCU cleanup must happen in task context. */
-       schedule_work(&user->remove_work);
+       queue_work(remove_work_wq, &user->remove_work);
 }
 
 static void _ipmi_destroy_user(struct ipmi_user *user)
@@ -3918,9 +3920,11 @@ static int handle_ipmb_direct_rcv_cmd(struct ipmi_smi *intf,
                /* We didn't find a user, deliver an error response. */
                ipmi_inc_stat(intf, unhandled_commands);
 
-               msg->data[0] = ((netfn + 1) << 2) | (msg->rsp[4] & 0x3);
-               msg->data[1] = msg->rsp[2];
-               msg->data[2] = msg->rsp[4] & ~0x3;
+               msg->data[0] = (netfn + 1) << 2;
+               msg->data[0] |= msg->rsp[2] & 0x3; /* rqLUN */
+               msg->data[1] = msg->rsp[1]; /* Addr */
+               msg->data[2] = msg->rsp[2] & ~0x3; /* rqSeq */
+               msg->data[2] |= msg->rsp[0] & 0x3; /* rsLUN */
                msg->data[3] = cmd;
                msg->data[4] = IPMI_INVALID_CMD_COMPLETION_CODE;
                msg->data_size = 5;
@@ -4455,13 +4459,24 @@ return_unspecified:
                msg->rsp[2] = IPMI_ERR_UNSPECIFIED;
                msg->rsp_size = 3;
        } else if (msg->type == IPMI_SMI_MSG_TYPE_IPMB_DIRECT) {
-               /* commands must have at least 3 bytes, responses 4. */
-               if (is_cmd && (msg->rsp_size < 3)) {
+               /* commands must have at least 4 bytes, responses 5. */
+               if (is_cmd && (msg->rsp_size < 4)) {
                        ipmi_inc_stat(intf, invalid_commands);
                        goto out;
                }
-               if (!is_cmd && (msg->rsp_size < 4))
-                       goto return_unspecified;
+               if (!is_cmd && (msg->rsp_size < 5)) {
+                       ipmi_inc_stat(intf, invalid_ipmb_responses);
+                       /* Construct a valid error response. */
+                       msg->rsp[0] = msg->data[0] & 0xfc; /* NetFN */
+                       msg->rsp[0] |= (1 << 2); /* Make it a response */
+                       msg->rsp[0] |= msg->data[2] & 3; /* rqLUN */
+                       msg->rsp[1] = msg->data[1]; /* Addr */
+                       msg->rsp[2] = msg->data[2] & 0xfc; /* rqSeq */
+                       msg->rsp[2] |= msg->data[0] & 0x3; /* rsLUN */
+                       msg->rsp[3] = msg->data[3]; /* Cmd */
+                       msg->rsp[4] = IPMI_ERR_UNSPECIFIED;
+                       msg->rsp_size = 5;
+               }
        } else if ((msg->data_size >= 2)
            && (msg->data[0] == (IPMI_NETFN_APP_REQUEST << 2))
            && (msg->data[1] == IPMI_SEND_MSG_CMD)
@@ -5031,6 +5046,7 @@ struct ipmi_smi_msg *ipmi_alloc_smi_msg(void)
        if (rv) {
                rv->done = free_smi_msg;
                rv->user_data = NULL;
+               rv->type = IPMI_SMI_MSG_TYPE_NORMAL;
                atomic_inc(&smi_msg_inuse_count);
        }
        return rv;
@@ -5383,6 +5399,13 @@ static int ipmi_init_msghandler(void)
 
        atomic_notifier_chain_register(&panic_notifier_list, &panic_block);
 
+       remove_work_wq = create_singlethread_workqueue("ipmi-msghandler-remove-wq");
+       if (!remove_work_wq) {
+               pr_err("unable to create ipmi-msghandler-remove-wq workqueue");
+               rv = -ENOMEM;
+               goto out;
+       }
+
        initialized = true;
 
 out:
@@ -5408,6 +5431,8 @@ static void __exit cleanup_ipmi(void)
        int count;
 
        if (initialized) {
+               destroy_workqueue(remove_work_wq);
+
                atomic_notifier_chain_unregister(&panic_notifier_list,
                                                 &panic_block);
 
index d3e905cf867d7eb9c553b772c321202cc1f1ab40..b23758083ce52d37d74393a24bc213a4e5ac23de 100644 (file)
@@ -370,7 +370,7 @@ static struct platform_driver imx8qxp_lpcg_clk_driver = {
        .probe = imx8qxp_lpcg_clk_probe,
 };
 
-builtin_platform_driver(imx8qxp_lpcg_clk_driver);
+module_platform_driver(imx8qxp_lpcg_clk_driver);
 
 MODULE_AUTHOR("Aisheng Dong <aisheng.dong@nxp.com>");
 MODULE_DESCRIPTION("NXP i.MX8QXP LPCG clock driver");
index c53a688d8ccca0bc54b6561d27816277a1273bf0..40a2efb1329be4ae998d53b38cc60f994d02bac4 100644 (file)
@@ -308,7 +308,7 @@ static struct platform_driver imx8qxp_clk_driver = {
        },
        .probe = imx8qxp_clk_probe,
 };
-builtin_platform_driver(imx8qxp_clk_driver);
+module_platform_driver(imx8qxp_clk_driver);
 
 MODULE_AUTHOR("Aisheng Dong <aisheng.dong@nxp.com>");
 MODULE_DESCRIPTION("NXP i.MX8QXP clock driver");
index eaedcceb766f91e9ba9ad5d61b23088f15cacaf5..8f65b9bdafce4f38dc2c58f681bd1e8c6d92de8f 100644 (file)
@@ -1429,6 +1429,15 @@ EXPORT_SYMBOL_GPL(clk_alpha_pll_postdiv_fabia_ops);
 void clk_trion_pll_configure(struct clk_alpha_pll *pll, struct regmap *regmap,
                             const struct alpha_pll_config *config)
 {
+       /*
+        * If the bootloader left the PLL enabled it's likely that there are
+        * RCGs that will lock up if we disable the PLL below.
+        */
+       if (trion_pll_is_enabled(pll, regmap)) {
+               pr_debug("Trion PLL is already enabled, skipping configuration\n");
+               return;
+       }
+
        clk_alpha_pll_write_config(regmap, PLL_L_VAL(pll), config->l);
        regmap_write(regmap, PLL_CAL_L_VAL(pll), TRION_PLL_CAL_VAL);
        clk_alpha_pll_write_config(regmap, PLL_ALPHA_VAL(pll), config->alpha);
index b2d00b4519634614d2b6e1dfd6e1f648440e0321..45d9cca28064fb89465ceaa453ca31cac911c46b 100644 (file)
@@ -28,7 +28,7 @@ static u8 mux_get_parent(struct clk_hw *hw)
        val &= mask;
 
        if (mux->parent_map)
-               return qcom_find_src_index(hw, mux->parent_map, val);
+               return qcom_find_cfg_index(hw, mux->parent_map, val);
 
        return val;
 }
index 0932e019dd12ee9c77d5ce7f4a8b6efb92b9aef9..75f09e6e057e1a9413b63f37230d47ab348b8c01 100644 (file)
@@ -69,6 +69,18 @@ int qcom_find_src_index(struct clk_hw *hw, const struct parent_map *map, u8 src)
 }
 EXPORT_SYMBOL_GPL(qcom_find_src_index);
 
+int qcom_find_cfg_index(struct clk_hw *hw, const struct parent_map *map, u8 cfg)
+{
+       int i, num_parents = clk_hw_get_num_parents(hw);
+
+       for (i = 0; i < num_parents; i++)
+               if (cfg == map[i].cfg)
+                       return i;
+
+       return -ENOENT;
+}
+EXPORT_SYMBOL_GPL(qcom_find_cfg_index);
+
 struct regmap *
 qcom_cc_map(struct platform_device *pdev, const struct qcom_cc_desc *desc)
 {
index bb39a7e106d8a94950aabb25ab3885f16a421ddf..9c8f7b798d9fc92ceea235e93a68bb3e08d138de 100644 (file)
@@ -49,6 +49,8 @@ extern void
 qcom_pll_set_fsm_mode(struct regmap *m, u32 reg, u8 bias_count, u8 lock_count);
 extern int qcom_find_src_index(struct clk_hw *hw, const struct parent_map *map,
                               u8 src);
+extern int qcom_find_cfg_index(struct clk_hw *hw, const struct parent_map *map,
+                              u8 cfg);
 
 extern int qcom_cc_register_board_clk(struct device *dev, const char *path,
                                      const char *name, unsigned long rate);
index 543cfab7561f9248555ec8e1021b5100c23c94ec..431b55bb0d2f796544ab3245d2dde822b2ea0b1d 100644 (file)
@@ -1121,7 +1121,7 @@ static struct clk_rcg2 gcc_sdcc1_apps_clk_src = {
                .name = "gcc_sdcc1_apps_clk_src",
                .parent_data = gcc_parent_data_1,
                .num_parents = ARRAY_SIZE(gcc_parent_data_1),
-               .ops = &clk_rcg2_ops,
+               .ops = &clk_rcg2_floor_ops,
        },
 };
 
@@ -1143,7 +1143,7 @@ static struct clk_rcg2 gcc_sdcc1_ice_core_clk_src = {
                .name = "gcc_sdcc1_ice_core_clk_src",
                .parent_data = gcc_parent_data_0,
                .num_parents = ARRAY_SIZE(gcc_parent_data_0),
-               .ops = &clk_rcg2_floor_ops,
+               .ops = &clk_rcg2_ops,
        },
 };
 
index d52f976dc875f2986556fa493a1a8c433450110f..d5cb372f0901c0ae637de88e0025560db8db96e8 100644 (file)
@@ -543,8 +543,8 @@ static void __init of_syscon_icst_setup(struct device_node *np)
 
        regclk = icst_clk_setup(NULL, &icst_desc, name, parent_name, map, ctype);
        if (IS_ERR(regclk)) {
-               kfree(name);
                pr_err("error setting up syscon ICST clock %s\n", name);
+               kfree(name);
                return;
        }
        of_clk_add_provider(np, of_clk_src_simple_get, regclk);
index 9a04eacc4412ba169610ed496a30a9b7f0c5b9ab..1ecd52f903b8ddab27275f4ee9b60e05ebbd1787 100644 (file)
@@ -394,8 +394,13 @@ EXPORT_SYMBOL_GPL(timer_unstable_counter_workaround);
 
 static atomic_t timer_unstable_counter_workaround_in_use = ATOMIC_INIT(0);
 
-static void erratum_set_next_event_generic(const int access, unsigned long evt,
-                                               struct clock_event_device *clk)
+/*
+ * Force the inlining of this function so that the register accesses
+ * can be themselves correctly inlined.
+ */
+static __always_inline
+void erratum_set_next_event_generic(const int access, unsigned long evt,
+                                   struct clock_event_device *clk)
 {
        unsigned long ctrl;
        u64 cval;
index 3819ef5b709894621d4f18c7ecd7145595d4d46b..3245eb0c602d24c7aaccbe915a1da518ab58b98b 100644 (file)
@@ -47,7 +47,7 @@ static int __init timer_get_base_and_rate(struct device_node *np,
                        pr_warn("pclk for %pOFn is present, but could not be activated\n",
                                np);
 
-       if (!of_property_read_u32(np, "clock-freq", rate) &&
+       if (!of_property_read_u32(np, "clock-freq", rate) ||
            !of_property_read_u32(np, "clock-frequency", rate))
                return 0;
 
index e338d2f010feb2a8978fc3ddfa22970d28aa15e5..b8d95536ee22e8530fedb72fb199c3ff8dcbf57b 100644 (file)
@@ -924,7 +924,7 @@ cpufreq_freq_attr_rw(scaling_max_freq);
 cpufreq_freq_attr_rw(scaling_governor);
 cpufreq_freq_attr_rw(scaling_setspeed);
 
-static struct attribute *default_attrs[] = {
+static struct attribute *cpufreq_attrs[] = {
        &cpuinfo_min_freq.attr,
        &cpuinfo_max_freq.attr,
        &cpuinfo_transition_latency.attr,
@@ -938,6 +938,7 @@ static struct attribute *default_attrs[] = {
        &scaling_setspeed.attr,
        NULL
 };
+ATTRIBUTE_GROUPS(cpufreq);
 
 #define to_policy(k) container_of(k, struct cpufreq_policy, kobj)
 #define to_attr(a) container_of(a, struct freq_attr, attr)
@@ -1000,14 +1001,13 @@ static const struct sysfs_ops sysfs_ops = {
 
 static struct kobj_type ktype_cpufreq = {
        .sysfs_ops      = &sysfs_ops,
-       .default_attrs  = default_attrs,
+       .default_groups = cpufreq_groups,
        .release        = cpufreq_sysfs_release,
 };
 
-static void add_cpu_dev_symlink(struct cpufreq_policy *policy, unsigned int cpu)
+static void add_cpu_dev_symlink(struct cpufreq_policy *policy, unsigned int cpu,
+                               struct device *dev)
 {
-       struct device *dev = get_cpu_device(cpu);
-
        if (unlikely(!dev))
                return;
 
@@ -1296,8 +1296,9 @@ static void cpufreq_policy_free(struct cpufreq_policy *policy)
 
        if (policy->max_freq_req) {
                /*
-                * CPUFREQ_CREATE_POLICY notification is sent only after
-                * successfully adding max_freq_req request.
+                * Remove max_freq_req after sending CPUFREQ_REMOVE_POLICY
+                * notification, since CPUFREQ_CREATE_POLICY notification was
+                * sent after adding max_freq_req earlier.
                 */
                blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
                                             CPUFREQ_REMOVE_POLICY, policy);
@@ -1391,7 +1392,7 @@ static int cpufreq_online(unsigned int cpu)
        if (new_policy) {
                for_each_cpu(j, policy->related_cpus) {
                        per_cpu(cpufreq_cpu_data, j) = policy;
-                       add_cpu_dev_symlink(policy, j);
+                       add_cpu_dev_symlink(policy, j, get_cpu_device(j));
                }
 
                policy->min_freq_req = kzalloc(2 * sizeof(*policy->min_freq_req),
@@ -1403,7 +1404,7 @@ static int cpufreq_online(unsigned int cpu)
 
                ret = freq_qos_add_request(&policy->constraints,
                                           policy->min_freq_req, FREQ_QOS_MIN,
-                                          policy->min);
+                                          FREQ_QOS_MIN_DEFAULT_VALUE);
                if (ret < 0) {
                        /*
                         * So we don't call freq_qos_remove_request() for an
@@ -1423,7 +1424,7 @@ static int cpufreq_online(unsigned int cpu)
 
                ret = freq_qos_add_request(&policy->constraints,
                                           policy->max_freq_req, FREQ_QOS_MAX,
-                                          policy->max);
+                                          FREQ_QOS_MAX_DEFAULT_VALUE);
                if (ret < 0) {
                        policy->max_freq_req = NULL;
                        goto out_destroy_policy;
@@ -1565,7 +1566,7 @@ static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
        /* Create sysfs link on CPU registration */
        policy = per_cpu(cpufreq_cpu_data, cpu);
        if (policy)
-               add_cpu_dev_symlink(policy, cpu);
+               add_cpu_dev_symlink(policy, cpu, dev);
 
        return 0;
 }
index 0879ec3c170cae97b6066de4137d0b684f9c27fc..08515f7e515fee102d0fe0e87038d2aa92f2c7d3 100644 (file)
@@ -257,7 +257,7 @@ gov_attr_rw(ignore_nice_load);
 gov_attr_rw(down_threshold);
 gov_attr_rw(freq_step);
 
-static struct attribute *cs_attributes[] = {
+static struct attribute *cs_attrs[] = {
        &sampling_rate.attr,
        &sampling_down_factor.attr,
        &up_threshold.attr,
@@ -266,6 +266,7 @@ static struct attribute *cs_attributes[] = {
        &freq_step.attr,
        NULL
 };
+ATTRIBUTE_GROUPS(cs);
 
 /************************** sysfs end ************************/
 
@@ -315,7 +316,7 @@ static void cs_start(struct cpufreq_policy *policy)
 
 static struct dbs_governor cs_governor = {
        .gov = CPUFREQ_DBS_GOVERNOR_INITIALIZER("conservative"),
-       .kobj_type = { .default_attrs = cs_attributes },
+       .kobj_type = { .default_groups = cs_groups },
        .gov_dbs_update = cs_dbs_update,
        .alloc = cs_alloc,
        .free = cs_free,
index 3b8f924771b439740db468f758fd43f00e809078..6a41ea4729b8a62cce5e001e236e0091298a4a0d 100644 (file)
@@ -328,7 +328,7 @@ gov_attr_rw(sampling_down_factor);
 gov_attr_rw(ignore_nice_load);
 gov_attr_rw(powersave_bias);
 
-static struct attribute *od_attributes[] = {
+static struct attribute *od_attrs[] = {
        &sampling_rate.attr,
        &up_threshold.attr,
        &sampling_down_factor.attr,
@@ -337,6 +337,7 @@ static struct attribute *od_attributes[] = {
        &io_is_busy.attr,
        NULL
 };
+ATTRIBUTE_GROUPS(od);
 
 /************************** sysfs end ************************/
 
@@ -401,7 +402,7 @@ static struct od_ops od_ops = {
 
 static struct dbs_governor od_dbs_gov = {
        .gov = CPUFREQ_DBS_GOVERNOR_INITIALIZER("ondemand"),
-       .kobj_type = { .default_attrs = od_attributes },
+       .kobj_type = { .default_groups = od_groups },
        .gov_dbs_update = od_dbs_update,
        .alloc = od_alloc,
        .free = od_free,
index 815df3daae9df3710560b8ee7f11665204ea2830..bc7f7e6759bd6f05ae3853c845e9c633112967b0 100644 (file)
@@ -338,6 +338,8 @@ static void intel_pstste_sched_itmt_work_fn(struct work_struct *work)
 
 static DECLARE_WORK(sched_itmt_work, intel_pstste_sched_itmt_work_fn);
 
+#define CPPC_MAX_PERF  U8_MAX
+
 static void intel_pstate_set_itmt_prio(int cpu)
 {
        struct cppc_perf_caps cppc_perf;
@@ -348,6 +350,14 @@ static void intel_pstate_set_itmt_prio(int cpu)
        if (ret)
                return;
 
+       /*
+        * On some systems with overclocking enabled, CPPC.highest_perf is hardcoded to 0xff.
+        * In this case we can't use CPPC.highest_perf to enable ITMT.
+        * In this case we can look at MSR_HWP_CAPABILITIES bits [8:0] to decide.
+        */
+       if (cppc_perf.highest_perf == CPPC_MAX_PERF)
+               cppc_perf.highest_perf = HWP_HIGHEST_PERF(READ_ONCE(all_cpu_data[cpu]->hwp_cap_cached));
+
        /*
         * The priorities can be set regardless of whether or not
         * sched_set_itmt_support(true) has been called and it is valid to
@@ -654,19 +664,29 @@ static int intel_pstate_set_epb(int cpu, s16 pref)
  *     3               balance_power
  *     4               power
  */
+
+enum energy_perf_value_index {
+       EPP_INDEX_DEFAULT = 0,
+       EPP_INDEX_PERFORMANCE,
+       EPP_INDEX_BALANCE_PERFORMANCE,
+       EPP_INDEX_BALANCE_POWERSAVE,
+       EPP_INDEX_POWERSAVE,
+};
+
 static const char * const energy_perf_strings[] = {
-       "default",
-       "performance",
-       "balance_performance",
-       "balance_power",
-       "power",
+       [EPP_INDEX_DEFAULT] = "default",
+       [EPP_INDEX_PERFORMANCE] = "performance",
+       [EPP_INDEX_BALANCE_PERFORMANCE] = "balance_performance",
+       [EPP_INDEX_BALANCE_POWERSAVE] = "balance_power",
+       [EPP_INDEX_POWERSAVE] = "power",
        NULL
 };
-static const unsigned int epp_values[] = {
-       HWP_EPP_PERFORMANCE,
-       HWP_EPP_BALANCE_PERFORMANCE,
-       HWP_EPP_BALANCE_POWERSAVE,
-       HWP_EPP_POWERSAVE
+static unsigned int epp_values[] = {
+       [EPP_INDEX_DEFAULT] = 0, /* Unused index */
+       [EPP_INDEX_PERFORMANCE] = HWP_EPP_PERFORMANCE,
+       [EPP_INDEX_BALANCE_PERFORMANCE] = HWP_EPP_BALANCE_PERFORMANCE,
+       [EPP_INDEX_BALANCE_POWERSAVE] = HWP_EPP_BALANCE_POWERSAVE,
+       [EPP_INDEX_POWERSAVE] = HWP_EPP_POWERSAVE,
 };
 
 static int intel_pstate_get_energy_pref_index(struct cpudata *cpu_data, int *raw_epp)
@@ -680,14 +700,14 @@ static int intel_pstate_get_energy_pref_index(struct cpudata *cpu_data, int *raw
                return epp;
 
        if (boot_cpu_has(X86_FEATURE_HWP_EPP)) {
-               if (epp == HWP_EPP_PERFORMANCE)
-                       return 1;
-               if (epp == HWP_EPP_BALANCE_PERFORMANCE)
-                       return 2;
-               if (epp == HWP_EPP_BALANCE_POWERSAVE)
-                       return 3;
-               if (epp == HWP_EPP_POWERSAVE)
-                       return 4;
+               if (epp == epp_values[EPP_INDEX_PERFORMANCE])
+                       return EPP_INDEX_PERFORMANCE;
+               if (epp == epp_values[EPP_INDEX_BALANCE_PERFORMANCE])
+                       return EPP_INDEX_BALANCE_PERFORMANCE;
+               if (epp == epp_values[EPP_INDEX_BALANCE_POWERSAVE])
+                       return EPP_INDEX_BALANCE_POWERSAVE;
+               if (epp == epp_values[EPP_INDEX_POWERSAVE])
+                       return EPP_INDEX_POWERSAVE;
                *raw_epp = epp;
                return 0;
        } else if (boot_cpu_has(X86_FEATURE_EPB)) {
@@ -747,7 +767,7 @@ static int intel_pstate_set_energy_pref_index(struct cpudata *cpu_data,
                if (use_raw)
                        epp = raw_epp;
                else if (epp == -EINVAL)
-                       epp = epp_values[pref_index - 1];
+                       epp = epp_values[pref_index];
 
                /*
                 * To avoid confusion, refuse to set EPP to any values different
@@ -833,7 +853,7 @@ static ssize_t store_energy_performance_preference(
                 * upfront.
                 */
                if (!raw)
-                       epp = ret ? epp_values[ret - 1] : cpu->epp_default;
+                       epp = ret ? epp_values[ret] : cpu->epp_default;
 
                if (cpu->epp_cached != epp) {
                        int err;
@@ -1006,6 +1026,12 @@ static void intel_pstate_hwp_offline(struct cpudata *cpu)
                 */
                value &= ~GENMASK_ULL(31, 24);
                value |= HWP_ENERGY_PERF_PREFERENCE(cpu->epp_cached);
+               /*
+                * However, make sure that EPP will be set to "performance" when
+                * the CPU is brought back online again and the "performance"
+                * scaling algorithm is still in effect.
+                */
+               cpu->epp_policy = CPUFREQ_POLICY_UNKNOWN;
        }
 
        /*
@@ -1108,19 +1134,22 @@ static void intel_pstate_update_policies(void)
                cpufreq_update_policy(cpu);
 }
 
+static void __intel_pstate_update_max_freq(struct cpudata *cpudata,
+                                          struct cpufreq_policy *policy)
+{
+       policy->cpuinfo.max_freq = global.turbo_disabled_mf ?
+                       cpudata->pstate.max_freq : cpudata->pstate.turbo_freq;
+       refresh_frequency_limits(policy);
+}
+
 static void intel_pstate_update_max_freq(unsigned int cpu)
 {
        struct cpufreq_policy *policy = cpufreq_cpu_acquire(cpu);
-       struct cpudata *cpudata;
 
        if (!policy)
                return;
 
-       cpudata = all_cpu_data[cpu];
-       policy->cpuinfo.max_freq = global.turbo_disabled_mf ?
-                       cpudata->pstate.max_freq : cpudata->pstate.turbo_freq;
-
-       refresh_frequency_limits(policy);
+       __intel_pstate_update_max_freq(all_cpu_data[cpu], policy);
 
        cpufreq_cpu_release(policy);
 }
@@ -1568,8 +1597,15 @@ static void intel_pstate_notify_work(struct work_struct *work)
 {
        struct cpudata *cpudata =
                container_of(to_delayed_work(work), struct cpudata, hwp_notify_work);
+       struct cpufreq_policy *policy = cpufreq_cpu_acquire(cpudata->cpu);
+
+       if (policy) {
+               intel_pstate_get_hwp_cap(cpudata);
+               __intel_pstate_update_max_freq(cpudata, policy);
+
+               cpufreq_cpu_release(policy);
+       }
 
-       cpufreq_update_policy(cpudata->cpu);
        wrmsrl_on_cpu(cpudata->cpu, MSR_HWP_STATUS, 0);
 }
 
@@ -1663,10 +1699,18 @@ static void intel_pstate_hwp_enable(struct cpudata *cpudata)
                wrmsrl_on_cpu(cpudata->cpu, MSR_HWP_INTERRUPT, 0x00);
 
        wrmsrl_on_cpu(cpudata->cpu, MSR_PM_ENABLE, 0x1);
-       if (cpudata->epp_default == -EINVAL)
-               cpudata->epp_default = intel_pstate_get_epp(cpudata, 0);
 
        intel_pstate_enable_hwp_interrupt(cpudata);
+
+       if (cpudata->epp_default >= 0)
+               return;
+
+       if (epp_values[EPP_INDEX_BALANCE_PERFORMANCE] == HWP_EPP_BALANCE_PERFORMANCE) {
+               cpudata->epp_default = intel_pstate_get_epp(cpudata, 0);
+       } else {
+               cpudata->epp_default = epp_values[EPP_INDEX_BALANCE_PERFORMANCE];
+               intel_pstate_set_epp(cpudata, cpudata->epp_default);
+       }
 }
 
 static int atom_get_min_pstate(void)
@@ -2353,6 +2397,7 @@ static const struct x86_cpu_id intel_pstate_cpu_oob_ids[] __initconst = {
        X86_MATCH(BROADWELL_D,          core_funcs),
        X86_MATCH(BROADWELL_X,          core_funcs),
        X86_MATCH(SKYLAKE_X,            core_funcs),
+       X86_MATCH(ICELAKE_X,            core_funcs),
        {}
 };
 
@@ -2469,18 +2514,14 @@ static void intel_pstate_update_perf_limits(struct cpudata *cpu,
         * HWP needs some special consideration, because HWP_REQUEST uses
         * abstract values to represent performance rather than pure ratios.
         */
-       if (hwp_active) {
-               intel_pstate_get_hwp_cap(cpu);
+       if (hwp_active && cpu->pstate.scaling != perf_ctl_scaling) {
+               int scaling = cpu->pstate.scaling;
+               int freq;
 
-               if (cpu->pstate.scaling != perf_ctl_scaling) {
-                       int scaling = cpu->pstate.scaling;
-                       int freq;
-
-                       freq = max_policy_perf * perf_ctl_scaling;
-                       max_policy_perf = DIV_ROUND_UP(freq, scaling);
-                       freq = min_policy_perf * perf_ctl_scaling;
-                       min_policy_perf = DIV_ROUND_UP(freq, scaling);
-               }
+               freq = max_policy_perf * perf_ctl_scaling;
+               max_policy_perf = DIV_ROUND_UP(freq, scaling);
+               freq = min_policy_perf * perf_ctl_scaling;
+               min_policy_perf = DIV_ROUND_UP(freq, scaling);
        }
 
        pr_debug("cpu:%d min_policy_perf:%d max_policy_perf:%d\n",
@@ -3332,6 +3373,16 @@ static bool intel_pstate_hwp_is_enabled(void)
        return !!(value & 0x1);
 }
 
+static const struct x86_cpu_id intel_epp_balance_perf[] = {
+       /*
+        * Set EPP value as 102, this is the max suggested EPP
+        * which can result in one core turbo frequency for
+        * AlderLake Mobile CPUs.
+        */
+       X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE_L, 102),
+       {}
+};
+
 static int __init intel_pstate_init(void)
 {
        static struct cpudata **_all_cpu_data;
@@ -3421,6 +3472,13 @@ hwp_cpu_matched:
 
        intel_pstate_sysfs_expose_params();
 
+       if (hwp_active) {
+               const struct x86_cpu_id *id = x86_match_cpu(intel_epp_balance_perf);
+
+               if (id)
+                       epp_values[EPP_INDEX_BALANCE_PERFORMANCE] = id->driver_data;
+       }
+
        mutex_lock(&intel_pstate_driver_lock);
        rc = intel_pstate_register_driver(default_driver);
        mutex_unlock(&intel_pstate_driver_lock);
index f57a39ddd0635e8b830d8b51a6e0c3b69933024c..ab7fd896d2c43dd0d8635d0d2cbc544d6cfd13ab 100644 (file)
@@ -290,7 +290,7 @@ static void system_heap_dma_buf_release(struct dma_buf *dmabuf)
        int i;
 
        table = &buffer->sg_table;
-       for_each_sg(table->sgl, sg, table->nents, i) {
+       for_each_sgtable_sg(table, sg, i) {
                struct page *page = sg_page(sg);
 
                __free_pages(page, compound_order(page));
index de416f9e792132ad944a697a46ac2e87f7a84999..f5219334fd3a56cf68fe8e3a28e55905d43a93a7 100644 (file)
@@ -34,6 +34,12 @@ struct scmi_msg_resp_base_attributes {
        __le16 reserved;
 };
 
+struct scmi_msg_resp_base_discover_agent {
+       __le32 agent_id;
+       u8 name[SCMI_MAX_STR_SIZE];
+};
+
+
 struct scmi_msg_base_error_notify {
        __le32 event_control;
 #define BASE_TP_NOTIFY_ALL     BIT(0)
@@ -225,18 +231,21 @@ static int scmi_base_discover_agent_get(const struct scmi_protocol_handle *ph,
                                        int id, char *name)
 {
        int ret;
+       struct scmi_msg_resp_base_discover_agent *agent_info;
        struct scmi_xfer *t;
 
        ret = ph->xops->xfer_get_init(ph, BASE_DISCOVER_AGENT,
-                                     sizeof(__le32), SCMI_MAX_STR_SIZE, &t);
+                                     sizeof(__le32), sizeof(*agent_info), &t);
        if (ret)
                return ret;
 
        put_unaligned_le32(id, t->tx.buf);
 
        ret = ph->xops->do_xfer(ph, t);
-       if (!ret)
-               strlcpy(name, t->rx.buf, SCMI_MAX_STR_SIZE);
+       if (!ret) {
+               agent_info = t->rx.buf;
+               strlcpy(name, agent_info->name, SCMI_MAX_STR_SIZE);
+       }
 
        ph->xops->xfer_put(ph, t);
 
index 4371fdcd5a73f3d65d87cbeff23a1fe1197de3bc..581d34c9576954d0b4563090e2d7e08f5ec550da 100644 (file)
@@ -138,9 +138,7 @@ static int scmi_pm_domain_probe(struct scmi_device *sdev)
        scmi_pd_data->domains = domains;
        scmi_pd_data->num_domains = num_domains;
 
-       of_genpd_add_provider_onecell(np, scmi_pd_data);
-
-       return 0;
+       return of_genpd_add_provider_onecell(np, scmi_pd_data);
 }
 
 static const struct scmi_device_id scmi_id_table[] = {
index 308471586381f4261346d57f3c51659a35ffb081..cdbb287bd8bcd4d8da9d01bb81ff35f5139d1505 100644 (file)
@@ -637,7 +637,7 @@ static int scmi_sensor_config_get(const struct scmi_protocol_handle *ph,
        if (ret)
                return ret;
 
-       put_unaligned_le32(cpu_to_le32(sensor_id), t->tx.buf);
+       put_unaligned_le32(sensor_id, t->tx.buf);
        ret = ph->xops->do_xfer(ph, t);
        if (!ret) {
                struct sensors_info *si = ph->get_priv(ph);
index 11e8efb7137512fb292bb6055501829bec0cc881..87039c5c03fdb96c3de61dc6e58d519f918914e7 100644 (file)
@@ -82,7 +82,8 @@ static bool scmi_vio_have_vq_rx(struct virtio_device *vdev)
 }
 
 static int scmi_vio_feed_vq_rx(struct scmi_vio_channel *vioch,
-                              struct scmi_vio_msg *msg)
+                              struct scmi_vio_msg *msg,
+                              struct device *dev)
 {
        struct scatterlist sg_in;
        int rc;
@@ -94,8 +95,7 @@ static int scmi_vio_feed_vq_rx(struct scmi_vio_channel *vioch,
 
        rc = virtqueue_add_inbuf(vioch->vqueue, &sg_in, 1, msg, GFP_ATOMIC);
        if (rc)
-               dev_err_once(vioch->cinfo->dev,
-                            "failed to add to virtqueue (%d)\n", rc);
+               dev_err_once(dev, "failed to add to virtqueue (%d)\n", rc);
        else
                virtqueue_kick(vioch->vqueue);
 
@@ -108,7 +108,7 @@ static void scmi_finalize_message(struct scmi_vio_channel *vioch,
                                  struct scmi_vio_msg *msg)
 {
        if (vioch->is_rx) {
-               scmi_vio_feed_vq_rx(vioch, msg);
+               scmi_vio_feed_vq_rx(vioch, msg, vioch->cinfo->dev);
        } else {
                /* Here IRQs are assumed to be already disabled by the caller */
                spin_lock(&vioch->lock);
@@ -269,7 +269,7 @@ static int virtio_chan_setup(struct scmi_chan_info *cinfo, struct device *dev,
                        list_add_tail(&msg->list, &vioch->free_list);
                        spin_unlock_irqrestore(&vioch->lock, flags);
                } else {
-                       scmi_vio_feed_vq_rx(vioch, msg);
+                       scmi_vio_feed_vq_rx(vioch, msg, cinfo->dev);
                }
        }
 
index a5048956a0be9441be1cb9035b50933b929d658e..ac08e819088bba8a530b37e30e248f67239c33d5 100644 (file)
@@ -156,7 +156,7 @@ static int scmi_voltage_descriptors_get(const struct scmi_protocol_handle *ph,
                        int cnt;
 
                        cmd->domain_id = cpu_to_le32(v->id);
-                       cmd->level_index = desc_index;
+                       cmd->level_index = cpu_to_le32(desc_index);
                        ret = ph->xops->do_xfer(ph, tl);
                        if (ret)
                                break;
index 581aa5e9b0778bea6e7c853cd7c23a9bbc07f3e3..dd7c3d5e8b0bbabd32cf9395e3e39f6bf5c77d20 100644 (file)
@@ -50,7 +50,7 @@ static int __init smccc_soc_init(void)
        arm_smccc_1_1_invoke(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
                             ARM_SMCCC_ARCH_SOC_ID, &res);
 
-       if (res.a0 == SMCCC_RET_NOT_SUPPORTED) {
+       if ((int)res.a0 == SMCCC_RET_NOT_SUPPORTED) {
                pr_info("ARCH_SOC_ID not implemented, skipping ....\n");
                return 0;
        }
index 072ed610f9c666aaae98594806925d2e410dddf5..60d9374c72c025515afab18fe33180d5acda9e02 100644 (file)
@@ -523,6 +523,7 @@ config GPIO_REG
 config GPIO_ROCKCHIP
        tristate "Rockchip GPIO support"
        depends on ARCH_ROCKCHIP || COMPILE_TEST
+       select GENERIC_IRQ_CHIP
        select GPIOLIB_IRQCHIP
        default ARCH_ROCKCHIP
        help
index aeec4bf0b6250ae0e6a8589a98f9f0014b5b6fef..84f96b78f32af34d9ce45a2172a1953887b9c65c 100644 (file)
@@ -434,7 +434,7 @@ static void virtio_gpio_event_vq(struct virtqueue *vq)
                ret = generic_handle_domain_irq(vgpio->gc.irq.domain, gpio);
                if (ret)
                        dev_err(dev, "failed to handle interrupt: %d\n", ret);
-       };
+       }
 }
 
 static void virtio_gpio_request_vq(struct virtqueue *vq)
index 71acd577803ec3a6f9899883c2e25f9ab745c47f..6348559608ce78bb5febda33b4cc69f9b99d9b12 100644 (file)
@@ -646,12 +646,6 @@ kfd_mem_attach_dmabuf(struct amdgpu_device *adev, struct kgd_mem *mem,
        if (IS_ERR(gobj))
                return PTR_ERR(gobj);
 
-       /* Import takes an extra reference on the dmabuf. Drop it now to
-        * avoid leaking it. We only need the one reference in
-        * kgd_mem->dmabuf.
-        */
-       dma_buf_put(mem->dmabuf);
-
        *bo = gem_to_amdgpu_bo(gobj);
        (*bo)->flags |= AMDGPU_GEM_CREATE_PREEMPTIBLE;
        (*bo)->parent = amdgpu_bo_ref(mem->bo);
@@ -1402,7 +1396,7 @@ int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(
        struct sg_table *sg = NULL;
        uint64_t user_addr = 0;
        struct amdgpu_bo *bo;
-       struct drm_gem_object *gobj;
+       struct drm_gem_object *gobj = NULL;
        u32 domain, alloc_domain;
        u64 alloc_flags;
        int ret;
@@ -1512,14 +1506,16 @@ allocate_init_user_pages_failed:
        remove_kgd_mem_from_kfd_bo_list(*mem, avm->process_info);
        drm_vma_node_revoke(&gobj->vma_node, drm_priv);
 err_node_allow:
-       drm_gem_object_put(gobj);
        /* Don't unreserve system mem limit twice */
        goto err_reserve_limit;
 err_bo_create:
        unreserve_mem_limit(adev, size, alloc_domain, !!sg);
 err_reserve_limit:
        mutex_destroy(&(*mem)->lock);
-       kfree(*mem);
+       if (gobj)
+               drm_gem_object_put(gobj);
+       else
+               kfree(*mem);
 err:
        if (sg) {
                sg_free_table(sg);
index 96b7bb13a2dd95f424925194d55ac33786b0924e..12a6b1c99c93e9d25f3146456af1f24e5807e50c 100644 (file)
@@ -1569,6 +1569,18 @@ void amdgpu_atombios_scratch_regs_engine_hung(struct amdgpu_device *adev,
        WREG32(adev->bios_scratch_reg_offset + 3, tmp);
 }
 
+void amdgpu_atombios_scratch_regs_set_backlight_level(struct amdgpu_device *adev,
+                                                     u32 backlight_level)
+{
+       u32 tmp = RREG32(adev->bios_scratch_reg_offset + 2);
+
+       tmp &= ~ATOM_S2_CURRENT_BL_LEVEL_MASK;
+       tmp |= (backlight_level << ATOM_S2_CURRENT_BL_LEVEL_SHIFT) &
+               ATOM_S2_CURRENT_BL_LEVEL_MASK;
+
+       WREG32(adev->bios_scratch_reg_offset + 2, tmp);
+}
+
 bool amdgpu_atombios_scratch_need_asic_init(struct amdgpu_device *adev)
 {
        u32 tmp = RREG32(adev->bios_scratch_reg_offset + 7);
index 8cc0222dba1910e1569a88d4e8d4313c3420b76c..27e74b1fc260a3b27e97e1fb576bad29457908c3 100644 (file)
@@ -185,6 +185,8 @@ bool amdgpu_atombios_has_gpu_virtualization_table(struct amdgpu_device *adev);
 void amdgpu_atombios_scratch_regs_lock(struct amdgpu_device *adev, bool lock);
 void amdgpu_atombios_scratch_regs_engine_hung(struct amdgpu_device *adev,
                                              bool hung);
+void amdgpu_atombios_scratch_regs_set_backlight_level(struct amdgpu_device *adev,
+                                                     u32 backlight_level);
 bool amdgpu_atombios_scratch_need_asic_init(struct amdgpu_device *adev);
 
 void amdgpu_atombios_copy_swap(u8 *dst, u8 *src, u8 num_bytes, bool to_le);
index b9c11c2b2885a32ca6633450d43eebf66edb4148..0de66f59adb8ab0aaf4e555ea8220ae1716f45e9 100644 (file)
@@ -827,6 +827,7 @@ static int amdgpu_connector_vga_get_modes(struct drm_connector *connector)
 
        amdgpu_connector_get_edid(connector);
        ret = amdgpu_connector_ddc_get_modes(connector);
+       amdgpu_get_native_mode(connector);
 
        return ret;
 }
index 5625f7736e37874f226d8efd90bccc08b61d7131..1e651b9591419e340031ae52c20737de79905ed7 100644 (file)
@@ -3509,6 +3509,9 @@ int amdgpu_device_init(struct amdgpu_device *adev,
                adev->rmmio_size = pci_resource_len(adev->pdev, 2);
        }
 
+       for (i = 0; i < AMD_IP_BLOCK_TYPE_NUM; i++)
+               atomic_set(&adev->pm.pwr_state[i], POWER_STATE_UNKNOWN);
+
        adev->rmmio = ioremap(adev->rmmio_base, adev->rmmio_size);
        if (adev->rmmio == NULL) {
                return -ENOMEM;
@@ -3830,7 +3833,7 @@ void amdgpu_device_fini_hw(struct amdgpu_device *adev)
        /* disable all interrupts */
        amdgpu_irq_disable_all(adev);
        if (adev->mode_info.mode_config_initialized){
-               if (!amdgpu_device_has_dc_support(adev))
+               if (!drm_drv_uses_atomic_modeset(adev_to_drm(adev)))
                        drm_helper_force_disable_all(adev_to_drm(adev));
                else
                        drm_atomic_helper_shutdown(adev_to_drm(adev));
@@ -4286,6 +4289,8 @@ static int amdgpu_device_reset_sriov(struct amdgpu_device *adev,
 {
        int r;
 
+       amdgpu_amdkfd_pre_reset(adev);
+
        if (from_hypervisor)
                r = amdgpu_virt_request_full_gpu(adev, true);
        else
@@ -5028,7 +5033,8 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
 
                cancel_delayed_work_sync(&tmp_adev->delayed_init_work);
 
-               amdgpu_amdkfd_pre_reset(tmp_adev);
+               if (!amdgpu_sriov_vf(tmp_adev))
+                       amdgpu_amdkfd_pre_reset(tmp_adev);
 
                /*
                 * Mark these ASICs to be reseted as untracked first
@@ -5086,7 +5092,7 @@ retry:    /* Rest of adevs pre asic reset from XGMI hive. */
 
        tmp_vram_lost_counter = atomic_read(&((adev)->vram_lost_counter));
        /* Actual ASIC resets if needed.*/
-       /* TODO Implement XGMI hive reset logic for SRIOV */
+       /* Host driver will handle XGMI hive reset for SRIOV */
        if (amdgpu_sriov_vf(adev)) {
                r = amdgpu_device_reset_sriov(adev, job ? false : true);
                if (r)
@@ -5127,7 +5133,7 @@ skip_hw_reset:
                        drm_sched_start(&ring->sched, !tmp_adev->asic_reset_res);
                }
 
-               if (!amdgpu_device_has_dc_support(tmp_adev) && !job_signaled) {
+               if (!drm_drv_uses_atomic_modeset(adev_to_drm(tmp_adev)) && !job_signaled) {
                        drm_helper_resume_force_mode(adev_to_drm(tmp_adev));
                }
 
@@ -5148,7 +5154,7 @@ skip_sched_resume:
        list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
                /* unlock kfd: SRIOV would do it separately */
                if (!need_emergency_restart && !amdgpu_sriov_vf(tmp_adev))
-                       amdgpu_amdkfd_post_reset(tmp_adev);
+                       amdgpu_amdkfd_post_reset(tmp_adev);
 
                /* kfd_post_reset will do nothing if kfd device is not initialized,
                 * need to bring up kfd here if it's not be initialized before
index ff70bc233489f8a8f488a84c628a40e9701890b0..ea00090b3fb36f93e65c3c1d60c93a591c328eaa 100644 (file)
@@ -157,6 +157,8 @@ static int hw_id_map[MAX_HWIP] = {
        [HDP_HWIP]      = HDP_HWID,
        [SDMA0_HWIP]    = SDMA0_HWID,
        [SDMA1_HWIP]    = SDMA1_HWID,
+       [SDMA2_HWIP]    = SDMA2_HWID,
+       [SDMA3_HWIP]    = SDMA3_HWID,
        [MMHUB_HWIP]    = MMHUB_HWID,
        [ATHUB_HWIP]    = ATHUB_HWID,
        [NBIO_HWIP]     = NBIF_HWID,
@@ -248,8 +250,8 @@ get_from_vram:
 
        offset = offsetof(struct binary_header, binary_checksum) +
                sizeof(bhdr->binary_checksum);
-       size = bhdr->binary_size - offset;
-       checksum = bhdr->binary_checksum;
+       size = le16_to_cpu(bhdr->binary_size) - offset;
+       checksum = le16_to_cpu(bhdr->binary_checksum);
 
        if (!amdgpu_discovery_verify_checksum(adev->mman.discovery_bin + offset,
                                              size, checksum)) {
@@ -270,7 +272,7 @@ get_from_vram:
        }
 
        if (!amdgpu_discovery_verify_checksum(adev->mman.discovery_bin + offset,
-                                             ihdr->size, checksum)) {
+                                             le16_to_cpu(ihdr->size), checksum)) {
                DRM_ERROR("invalid ip discovery data table checksum\n");
                r = -EINVAL;
                goto out;
@@ -282,7 +284,7 @@ get_from_vram:
        ghdr = (struct gpu_info_header *)(adev->mman.discovery_bin + offset);
 
        if (!amdgpu_discovery_verify_checksum(adev->mman.discovery_bin + offset,
-                                             ghdr->size, checksum)) {
+                                             le32_to_cpu(ghdr->size), checksum)) {
                DRM_ERROR("invalid gc data table checksum\n");
                r = -EINVAL;
                goto out;
@@ -489,10 +491,10 @@ void amdgpu_discovery_harvest_ip(struct amdgpu_device *adev)
                        le16_to_cpu(bhdr->table_list[HARVEST_INFO].offset));
 
        for (i = 0; i < 32; i++) {
-               if (le32_to_cpu(harvest_info->list[i].hw_id) == 0)
+               if (le16_to_cpu(harvest_info->list[i].hw_id) == 0)
                        break;
 
-               switch (le32_to_cpu(harvest_info->list[i].hw_id)) {
+               switch (le16_to_cpu(harvest_info->list[i].hw_id)) {
                case VCN_HWID:
                        vcn_harvest_count++;
                        if (harvest_info->list[i].number_instance == 0)
@@ -587,6 +589,9 @@ static int amdgpu_discovery_set_common_ip_blocks(struct amdgpu_device *adev)
                amdgpu_device_ip_block_add(adev, &nv_common_ip_block);
                break;
        default:
+               dev_err(adev->dev,
+                       "Failed to add common ip block(GC_HWIP:0x%x)\n",
+                       adev->ip_versions[GC_HWIP][0]);
                return -EINVAL;
        }
        return 0;
@@ -619,6 +624,9 @@ static int amdgpu_discovery_set_gmc_ip_blocks(struct amdgpu_device *adev)
                amdgpu_device_ip_block_add(adev, &gmc_v10_0_ip_block);
                break;
        default:
+               dev_err(adev->dev,
+                       "Failed to add gmc ip block(GC_HWIP:0x%x)\n",
+                       adev->ip_versions[GC_HWIP][0]);
                return -EINVAL;
        }
        return 0;
@@ -648,6 +656,9 @@ static int amdgpu_discovery_set_ih_ip_blocks(struct amdgpu_device *adev)
                amdgpu_device_ip_block_add(adev, &navi10_ih_ip_block);
                break;
        default:
+               dev_err(adev->dev,
+                       "Failed to add ih ip block(OSSSYS_HWIP:0x%x)\n",
+                       adev->ip_versions[OSSSYS_HWIP][0]);
                return -EINVAL;
        }
        return 0;
@@ -688,6 +699,9 @@ static int amdgpu_discovery_set_psp_ip_blocks(struct amdgpu_device *adev)
                amdgpu_device_ip_block_add(adev, &psp_v13_0_ip_block);
                break;
        default:
+               dev_err(adev->dev,
+                       "Failed to add psp ip block(MP0_HWIP:0x%x)\n",
+                       adev->ip_versions[MP0_HWIP][0]);
                return -EINVAL;
        }
        return 0;
@@ -726,6 +740,9 @@ static int amdgpu_discovery_set_smu_ip_blocks(struct amdgpu_device *adev)
                amdgpu_device_ip_block_add(adev, &smu_v13_0_ip_block);
                break;
        default:
+               dev_err(adev->dev,
+                       "Failed to add smu ip block(MP1_HWIP:0x%x)\n",
+                       adev->ip_versions[MP1_HWIP][0]);
                return -EINVAL;
        }
        return 0;
@@ -753,6 +770,9 @@ static int amdgpu_discovery_set_display_ip_blocks(struct amdgpu_device *adev)
                        amdgpu_device_ip_block_add(adev, &dm_ip_block);
                        break;
                default:
+                       dev_err(adev->dev,
+                               "Failed to add dm ip block(DCE_HWIP:0x%x)\n",
+                               adev->ip_versions[DCE_HWIP][0]);
                        return -EINVAL;
                }
        } else if (adev->ip_versions[DCI_HWIP][0]) {
@@ -763,6 +783,9 @@ static int amdgpu_discovery_set_display_ip_blocks(struct amdgpu_device *adev)
                        amdgpu_device_ip_block_add(adev, &dm_ip_block);
                        break;
                default:
+                       dev_err(adev->dev,
+                               "Failed to add dm ip block(DCI_HWIP:0x%x)\n",
+                               adev->ip_versions[DCI_HWIP][0]);
                        return -EINVAL;
                }
 #endif
@@ -796,6 +819,9 @@ static int amdgpu_discovery_set_gc_ip_blocks(struct amdgpu_device *adev)
                amdgpu_device_ip_block_add(adev, &gfx_v10_0_ip_block);
                break;
        default:
+               dev_err(adev->dev,
+                       "Failed to add gfx ip block(GC_HWIP:0x%x)\n",
+                       adev->ip_versions[GC_HWIP][0]);
                return -EINVAL;
        }
        return 0;
@@ -829,6 +855,9 @@ static int amdgpu_discovery_set_sdma_ip_blocks(struct amdgpu_device *adev)
                amdgpu_device_ip_block_add(adev, &sdma_v5_2_ip_block);
                break;
        default:
+               dev_err(adev->dev,
+                       "Failed to add sdma ip block(SDMA0_HWIP:0x%x)\n",
+                       adev->ip_versions[SDMA0_HWIP][0]);
                return -EINVAL;
        }
        return 0;
@@ -845,6 +874,9 @@ static int amdgpu_discovery_set_mm_ip_blocks(struct amdgpu_device *adev)
                                amdgpu_device_ip_block_add(adev, &uvd_v7_0_ip_block);
                        break;
                default:
+                       dev_err(adev->dev,
+                               "Failed to add uvd v7 ip block(UVD_HWIP:0x%x)\n",
+                               adev->ip_versions[UVD_HWIP][0]);
                        return -EINVAL;
                }
                switch (adev->ip_versions[VCE_HWIP][0]) {
@@ -855,6 +887,9 @@ static int amdgpu_discovery_set_mm_ip_blocks(struct amdgpu_device *adev)
                                amdgpu_device_ip_block_add(adev, &vce_v4_0_ip_block);
                        break;
                default:
+                       dev_err(adev->dev,
+                               "Failed to add VCE v4 ip block(VCE_HWIP:0x%x)\n",
+                               adev->ip_versions[VCE_HWIP][0]);
                        return -EINVAL;
                }
        } else {
@@ -885,6 +920,7 @@ static int amdgpu_discovery_set_mm_ip_blocks(struct amdgpu_device *adev)
                case IP_VERSION(3, 0, 64):
                case IP_VERSION(3, 1, 1):
                case IP_VERSION(3, 0, 2):
+               case IP_VERSION(3, 0, 192):
                        amdgpu_device_ip_block_add(adev, &vcn_v3_0_ip_block);
                        if (!amdgpu_sriov_vf(adev))
                                amdgpu_device_ip_block_add(adev, &jpeg_v3_0_ip_block);
@@ -893,6 +929,9 @@ static int amdgpu_discovery_set_mm_ip_blocks(struct amdgpu_device *adev)
                        amdgpu_device_ip_block_add(adev, &vcn_v3_0_ip_block);
                        break;
                default:
+                       dev_err(adev->dev,
+                               "Failed to add vcn/jpeg ip block(UVD_HWIP:0x%x)\n",
+                               adev->ip_versions[UVD_HWIP][0]);
                        return -EINVAL;
                }
        }
index f3d62e196901a85fe77d3221e80dd91b8d64dcd4..0c7963dfacad1e4ce458ae6993b3d5fdeafe0091 100644 (file)
@@ -223,7 +223,7 @@ int amdgpu_ih_wait_on_checkpoint_process(struct amdgpu_device *adev,
  */
 int amdgpu_ih_process(struct amdgpu_device *adev, struct amdgpu_ih_ring *ih)
 {
-       unsigned int count = AMDGPU_IH_MAX_NUM_IVS;
+       unsigned int count;
        u32 wptr;
 
        if (!ih->enabled || adev->shutdown)
@@ -232,6 +232,7 @@ int amdgpu_ih_process(struct amdgpu_device *adev, struct amdgpu_ih_ring *ih)
        wptr = amdgpu_ih_get_wptr(adev, ih);
 
 restart_ih:
+       count  = AMDGPU_IH_MAX_NUM_IVS;
        DRM_DEBUG("%s: rptr %d, wptr %d\n", __func__, ih->rptr, wptr);
 
        /* Order reading of wptr vs. reading of IH ring data */
index 4f7c70845785a9ed20efd1f5f78ab85e6a63d043..585961c2f5f27c34ccaf15d68a10a57367c1265a 100644 (file)
@@ -135,6 +135,7 @@ int amdgpu_vcn_sw_init(struct amdgpu_device *adev)
                break;
        case IP_VERSION(3, 0, 0):
        case IP_VERSION(3, 0, 64):
+       case IP_VERSION(3, 0, 192):
                if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(10, 3, 0))
                        fw_name = FIRMWARE_SIENNA_CICHLID;
                else
index ce982afeff913e7ac064954a0a384eb06a5512f7..ac9a8cd21c4b64b2c90d51c830f3f21d9589d2f3 100644 (file)
@@ -504,8 +504,8 @@ static int amdgpu_vkms_sw_fini(void *handle)
        int i = 0;
 
        for (i = 0; i < adev->mode_info.num_crtc; i++)
-               if (adev->mode_info.crtcs[i])
-                       hrtimer_cancel(&adev->mode_info.crtcs[i]->vblank_timer);
+               if (adev->amdgpu_vkms_output[i].vblank_hrtimer.function)
+                       hrtimer_cancel(&adev->amdgpu_vkms_output[i].vblank_hrtimer);
 
        kfree(adev->mode_info.bios_hardcoded_edid);
        kfree(adev->amdgpu_vkms_output);
index 0fad2bf854ae9dc10096c5f8b931d8b7ba792939..567df2db23ac783aa851011194298b55429a6bed 100644 (file)
@@ -386,6 +386,7 @@ struct amdgpu_hive_info *amdgpu_get_xgmi_hive(struct amdgpu_device *adev)
                        "%s", "xgmi_hive_info");
        if (ret) {
                dev_err(adev->dev, "XGMI: failed initializing kobject for xgmi hive\n");
+               kobject_put(&hive->kobj);
                kfree(hive);
                hive = NULL;
                goto pro_end;
index e7dfeb466a0e4dd41b14e39ed2bb3ff5a3756ad8..dbe7442fb25cc4968e4237f7dbef1fbd3eab1c03 100644 (file)
@@ -7707,8 +7707,19 @@ static uint64_t gfx_v10_0_get_gpu_clock_counter(struct amdgpu_device *adev)
        switch (adev->ip_versions[GC_HWIP][0]) {
        case IP_VERSION(10, 3, 1):
        case IP_VERSION(10, 3, 3):
-               clock = (uint64_t)RREG32_SOC15(SMUIO, 0, mmGOLDEN_TSC_COUNT_LOWER_Vangogh) |
-                       ((uint64_t)RREG32_SOC15(SMUIO, 0, mmGOLDEN_TSC_COUNT_UPPER_Vangogh) << 32ULL);
+               preempt_disable();
+               clock_hi = RREG32_SOC15_NO_KIQ(SMUIO, 0, mmGOLDEN_TSC_COUNT_UPPER_Vangogh);
+               clock_lo = RREG32_SOC15_NO_KIQ(SMUIO, 0, mmGOLDEN_TSC_COUNT_LOWER_Vangogh);
+               hi_check = RREG32_SOC15_NO_KIQ(SMUIO, 0, mmGOLDEN_TSC_COUNT_UPPER_Vangogh);
+               /* The SMUIO TSC clock frequency is 100MHz, which sets 32-bit carry over
+                * roughly every 42 seconds.
+                */
+               if (hi_check != clock_hi) {
+                       clock_lo = RREG32_SOC15_NO_KIQ(SMUIO, 0, mmGOLDEN_TSC_COUNT_LOWER_Vangogh);
+                       clock_hi = hi_check;
+               }
+               preempt_enable();
+               clock = clock_lo | (clock_hi << 32ULL);
                break;
        default:
                preempt_disable();
index b4b80f27b894098f822068b2ba3ccc656f28ef6b..b305fd39874fe68a8ab7f5141c3f01aaa0f7517d 100644 (file)
@@ -140,6 +140,11 @@ MODULE_FIRMWARE("amdgpu/aldebaran_rlc.bin");
 #define mmTCP_CHAN_STEER_5_ARCT                                                                0x0b0c
 #define mmTCP_CHAN_STEER_5_ARCT_BASE_IDX                                                       0
 
+#define mmGOLDEN_TSC_COUNT_UPPER_Renoir                0x0025
+#define mmGOLDEN_TSC_COUNT_UPPER_Renoir_BASE_IDX       1
+#define mmGOLDEN_TSC_COUNT_LOWER_Renoir                0x0026
+#define mmGOLDEN_TSC_COUNT_LOWER_Renoir_BASE_IDX       1
+
 enum ta_ras_gfx_subblock {
        /*CPC*/
        TA_RAS_BLOCK__GFX_CPC_INDEX_START = 0,
@@ -4055,9 +4060,10 @@ static int gfx_v9_0_hw_fini(void *handle)
 
        gfx_v9_0_cp_enable(adev, false);
 
-       /* Skip suspend with A+A reset */
-       if (adev->gmc.xgmi.connected_to_cpu && amdgpu_in_reset(adev)) {
-               dev_dbg(adev->dev, "Device in reset. Skipping RLC halt\n");
+       /* Skip stopping RLC with A+A reset or when RLC controls GFX clock */
+       if ((adev->gmc.xgmi.connected_to_cpu && amdgpu_in_reset(adev)) ||
+           (adev->ip_versions[GC_HWIP][0] >= IP_VERSION(9, 4, 2))) {
+               dev_dbg(adev->dev, "Skipping RLC halt\n");
                return 0;
        }
 
@@ -4238,19 +4244,38 @@ failed_kiq_read:
 
 static uint64_t gfx_v9_0_get_gpu_clock_counter(struct amdgpu_device *adev)
 {
-       uint64_t clock;
+       uint64_t clock, clock_lo, clock_hi, hi_check;
 
-       amdgpu_gfx_off_ctrl(adev, false);
-       mutex_lock(&adev->gfx.gpu_clock_mutex);
-       if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 0, 1) && amdgpu_sriov_runtime(adev)) {
-               clock = gfx_v9_0_kiq_read_clock(adev);
-       } else {
-               WREG32_SOC15(GC, 0, mmRLC_CAPTURE_GPU_CLOCK_COUNT, 1);
-               clock = (uint64_t)RREG32_SOC15(GC, 0, mmRLC_GPU_CLOCK_COUNT_LSB) |
-                       ((uint64_t)RREG32_SOC15(GC, 0, mmRLC_GPU_CLOCK_COUNT_MSB) << 32ULL);
+       switch (adev->ip_versions[GC_HWIP][0]) {
+       case IP_VERSION(9, 3, 0):
+               preempt_disable();
+               clock_hi = RREG32_SOC15_NO_KIQ(SMUIO, 0, mmGOLDEN_TSC_COUNT_UPPER_Renoir);
+               clock_lo = RREG32_SOC15_NO_KIQ(SMUIO, 0, mmGOLDEN_TSC_COUNT_LOWER_Renoir);
+               hi_check = RREG32_SOC15_NO_KIQ(SMUIO, 0, mmGOLDEN_TSC_COUNT_UPPER_Renoir);
+               /* The SMUIO TSC clock frequency is 100MHz, which sets 32-bit carry over
+                * roughly every 42 seconds.
+                */
+               if (hi_check != clock_hi) {
+                       clock_lo = RREG32_SOC15_NO_KIQ(SMUIO, 0, mmGOLDEN_TSC_COUNT_LOWER_Renoir);
+                       clock_hi = hi_check;
+               }
+               preempt_enable();
+               clock = clock_lo | (clock_hi << 32ULL);
+               break;
+       default:
+               amdgpu_gfx_off_ctrl(adev, false);
+               mutex_lock(&adev->gfx.gpu_clock_mutex);
+               if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 0, 1) && amdgpu_sriov_runtime(adev)) {
+                       clock = gfx_v9_0_kiq_read_clock(adev);
+               } else {
+                       WREG32_SOC15(GC, 0, mmRLC_CAPTURE_GPU_CLOCK_COUNT, 1);
+                       clock = (uint64_t)RREG32_SOC15(GC, 0, mmRLC_GPU_CLOCK_COUNT_LSB) |
+                               ((uint64_t)RREG32_SOC15(GC, 0, mmRLC_GPU_CLOCK_COUNT_MSB) << 32ULL);
+               }
+               mutex_unlock(&adev->gfx.gpu_clock_mutex);
+               amdgpu_gfx_off_ctrl(adev, true);
+               break;
        }
-       mutex_unlock(&adev->gfx.gpu_clock_mutex);
-       amdgpu_gfx_off_ctrl(adev, true);
        return clock;
 }
 
index 1d8414c3fadb6d5edd6ba924e0ec6ae5631f5180..38241cf0e1f1639f5d1e5ac97c5f6d0d7d716706 100644 (file)
@@ -160,6 +160,7 @@ static int navi10_ih_toggle_ring_interrupts(struct amdgpu_device *adev,
 
        tmp = RREG32(ih_regs->ih_rb_cntl);
        tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, RB_ENABLE, (enable ? 1 : 0));
+       tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, RB_GPU_TS_ENABLE, 1);
        /* enable_intr field is only valid in ring0 */
        if (ih == &adev->irq.ih)
                tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, ENABLE_INTR, (enable ? 1 : 0));
@@ -275,10 +276,8 @@ static int navi10_ih_enable_ring(struct amdgpu_device *adev,
        tmp = navi10_ih_rb_cntl(ih, tmp);
        if (ih == &adev->irq.ih)
                tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, RPTR_REARM, !!adev->irq.msi_enabled);
-       if (ih == &adev->irq.ih1) {
-               tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_ENABLE, 0);
+       if (ih == &adev->irq.ih1)
                tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, RB_FULL_DRAIN_ENABLE, 1);
-       }
 
        if (amdgpu_sriov_vf(adev) && amdgpu_sriov_reg_indirect_ih(adev)) {
                if (psp_reg_program(&adev->psp, ih_regs->psp_reg_id, tmp)) {
@@ -319,7 +318,6 @@ static int navi10_ih_irq_init(struct amdgpu_device *adev)
 {
        struct amdgpu_ih_ring *ih[] = {&adev->irq.ih, &adev->irq.ih1, &adev->irq.ih2};
        u32 ih_chicken;
-       u32 tmp;
        int ret;
        int i;
 
@@ -363,15 +361,6 @@ static int navi10_ih_irq_init(struct amdgpu_device *adev)
        adev->nbio.funcs->ih_doorbell_range(adev, ih[0]->use_doorbell,
                                            ih[0]->doorbell_index);
 
-       tmp = RREG32_SOC15(OSSSYS, 0, mmIH_STORM_CLIENT_LIST_CNTL);
-       tmp = REG_SET_FIELD(tmp, IH_STORM_CLIENT_LIST_CNTL,
-                           CLIENT18_IS_STORM_CLIENT, 1);
-       WREG32_SOC15(OSSSYS, 0, mmIH_STORM_CLIENT_LIST_CNTL, tmp);
-
-       tmp = RREG32_SOC15(OSSSYS, 0, mmIH_INT_FLOOD_CNTL);
-       tmp = REG_SET_FIELD(tmp, IH_INT_FLOOD_CNTL, FLOOD_CNTL_ENABLE, 1);
-       WREG32_SOC15(OSSSYS, 0, mmIH_INT_FLOOD_CNTL, tmp);
-
        pci_set_master(adev->pdev);
 
        /* enable interrupts */
@@ -420,12 +409,19 @@ static u32 navi10_ih_get_wptr(struct amdgpu_device *adev,
        u32 wptr, tmp;
        struct amdgpu_ih_regs *ih_regs;
 
-       wptr = le32_to_cpu(*ih->wptr_cpu);
-       ih_regs = &ih->ih_regs;
+       if (ih == &adev->irq.ih) {
+               /* Only ring0 supports writeback. On other rings fall back
+                * to register-based code with overflow checking below.
+                */
+               wptr = le32_to_cpu(*ih->wptr_cpu);
 
-       if (!REG_GET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW))
-               goto out;
+               if (!REG_GET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW))
+                       goto out;
+       }
 
+       ih_regs = &ih->ih_regs;
+
+       /* Double check that the overflow wasn't already cleared. */
        wptr = RREG32_NO_KIQ(ih_regs->ih_rb_wptr);
        if (!REG_GET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW))
                goto out;
@@ -513,15 +509,11 @@ static int navi10_ih_self_irq(struct amdgpu_device *adev,
                              struct amdgpu_irq_src *source,
                              struct amdgpu_iv_entry *entry)
 {
-       uint32_t wptr = cpu_to_le32(entry->src_data[0]);
-
        switch (entry->ring_id) {
        case 1:
-               *adev->irq.ih1.wptr_cpu = wptr;
                schedule_work(&adev->irq.ih1_work);
                break;
        case 2:
-               *adev->irq.ih2.wptr_cpu = wptr;
                schedule_work(&adev->irq.ih2_work);
                break;
        default: break;
index 4ecd2b5808cee083349b869df527a42d25fcdfeb..ee7cab37dfd58a0a76ec658c8bf1e0d06dca37b1 100644 (file)
@@ -359,6 +359,10 @@ static void nbio_v2_3_init_registers(struct amdgpu_device *adev)
 
        if (def != data)
                WREG32_PCIE(smnPCIE_CONFIG_CNTL, data);
+
+       if (amdgpu_sriov_vf(adev))
+               adev->rmmio_remap.reg_offset = SOC15_REG_OFFSET(NBIO, 0,
+                       mmBIF_BX_DEV0_EPF0_VF0_HDP_MEM_COHERENCY_FLUSH_CNTL) << 2;
 }
 
 #define NAVI10_PCIE__LC_L0S_INACTIVITY_DEFAULT         0x00000000 // off by default, no gains over L1
index 0d2d629e2d6a2223c5fdb7e7461288e90332136f..4bbacf1be25a413b9ae1af0c064421333c7e2acb 100644 (file)
@@ -276,6 +276,10 @@ static void nbio_v6_1_init_registers(struct amdgpu_device *adev)
 
        if (def != data)
                WREG32_PCIE(smnPCIE_CI_CNTL, data);
+
+       if (amdgpu_sriov_vf(adev))
+               adev->rmmio_remap.reg_offset = SOC15_REG_OFFSET(NBIO, 0,
+                       mmBIF_BX_DEV0_EPF0_VF0_HDP_MEM_COHERENCY_FLUSH_CNTL) << 2;
 }
 
 static void nbio_v6_1_program_ltr(struct amdgpu_device *adev)
index 3c00666a13e16b45213a98da1aa8151628640d5c..37a4039fdfc53da3b85a4a18bc05e2891bee4671 100644 (file)
@@ -273,7 +273,9 @@ const struct nbio_hdp_flush_reg nbio_v7_0_hdp_flush_reg = {
 
 static void nbio_v7_0_init_registers(struct amdgpu_device *adev)
 {
-
+       if (amdgpu_sriov_vf(adev))
+               adev->rmmio_remap.reg_offset =
+                       SOC15_REG_OFFSET(NBIO, 0, mmHDP_MEM_COHERENCY_FLUSH_CNTL) << 2;
 }
 
 const struct amdgpu_nbio_funcs nbio_v7_0_funcs = {
index 8f2a315e7c73ce8d874dd31238be23bf547bedb8..3444332ea1104e5334a0c96438c9f0f409b26f60 100644 (file)
@@ -371,6 +371,10 @@ static void nbio_v7_2_init_registers(struct amdgpu_device *adev)
                if (def != data)
                        WREG32_PCIE_PORT(SOC15_REG_OFFSET(NBIO, 0, regPCIE_CONFIG_CNTL), data);
        }
+
+       if (amdgpu_sriov_vf(adev))
+               adev->rmmio_remap.reg_offset = SOC15_REG_OFFSET(NBIO, 0,
+                       regBIF_BX_PF0_HDP_MEM_COHERENCY_FLUSH_CNTL) << 2;
 }
 
 const struct amdgpu_nbio_funcs nbio_v7_2_funcs = {
index b8bd03d16dbaf13ec5634a92b60e6babf08b9885..dc5e93756fea4fd8d7a193dd75774604b49cd570 100644 (file)
@@ -362,7 +362,9 @@ const struct nbio_hdp_flush_reg nbio_v7_4_hdp_flush_reg_ald = {
 
 static void nbio_v7_4_init_registers(struct amdgpu_device *adev)
 {
-
+       if (amdgpu_sriov_vf(adev))
+               adev->rmmio_remap.reg_offset = SOC15_REG_OFFSET(NBIO, 0,
+                       mmBIF_BX_DEV0_EPF0_VF0_HDP_MEM_COHERENCY_FLUSH_CNTL) << 2;
 }
 
 static void nbio_v7_4_handle_ras_controller_intr_no_bifring(struct amdgpu_device *adev)
@@ -692,6 +694,9 @@ static void nbio_v7_4_program_aspm(struct amdgpu_device *adev)
 {
        uint32_t def, data;
 
+       if (adev->ip_versions[NBIO_HWIP][0] == IP_VERSION(7, 4, 4))
+               return;
+
        def = data = RREG32_PCIE(smnPCIE_LC_CNTL);
        data &= ~PCIE_LC_CNTL__LC_L1_INACTIVITY_MASK;
        data &= ~PCIE_LC_CNTL__LC_L0S_INACTIVITY_MASK;
index 59eafa31c626ac6fdafbe84ad68fc0da0e035260..2ec1ffb36b1fc54db2b840b2498a6963d1a36a69 100644 (file)
@@ -183,6 +183,7 @@ static int nv_query_video_codecs(struct amdgpu_device *adev, bool encode,
        switch (adev->ip_versions[UVD_HWIP][0]) {
        case IP_VERSION(3, 0, 0):
        case IP_VERSION(3, 0, 64):
+       case IP_VERSION(3, 0, 192):
                if (amdgpu_sriov_vf(adev)) {
                        if (encode)
                                *codecs = &sriov_sc_video_codecs_encode;
@@ -731,8 +732,10 @@ static int nv_common_early_init(void *handle)
 #define MMIO_REG_HOLE_OFFSET (0x80000 - PAGE_SIZE)
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
-       adev->rmmio_remap.reg_offset = MMIO_REG_HOLE_OFFSET;
-       adev->rmmio_remap.bus_addr = adev->rmmio_base + MMIO_REG_HOLE_OFFSET;
+       if (!amdgpu_sriov_vf(adev)) {
+               adev->rmmio_remap.reg_offset = MMIO_REG_HOLE_OFFSET;
+               adev->rmmio_remap.bus_addr = adev->rmmio_base + MMIO_REG_HOLE_OFFSET;
+       }
        adev->smc_rreg = NULL;
        adev->smc_wreg = NULL;
        adev->pcie_rreg = &nv_pcie_rreg;
@@ -1032,7 +1035,7 @@ static int nv_common_hw_init(void *handle)
         * for the purpose of expose those registers
         * to process space
         */
-       if (adev->nbio.funcs->remap_hdp_registers)
+       if (adev->nbio.funcs->remap_hdp_registers && !amdgpu_sriov_vf(adev))
                adev->nbio.funcs->remap_hdp_registers(adev);
        /* enable the doorbell aperture */
        nv_enable_doorbell_aperture(adev, true);
index 0c316a2d42ed2431f64e3c7b0833b1b91f83c50b..de9b55383e9f8088cb8dc8425fe47f86a7686208 100644 (file)
@@ -971,8 +971,10 @@ static int soc15_common_early_init(void *handle)
 #define MMIO_REG_HOLE_OFFSET (0x80000 - PAGE_SIZE)
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
-       adev->rmmio_remap.reg_offset = MMIO_REG_HOLE_OFFSET;
-       adev->rmmio_remap.bus_addr = adev->rmmio_base + MMIO_REG_HOLE_OFFSET;
+       if (!amdgpu_sriov_vf(adev)) {
+               adev->rmmio_remap.reg_offset = MMIO_REG_HOLE_OFFSET;
+               adev->rmmio_remap.bus_addr = adev->rmmio_base + MMIO_REG_HOLE_OFFSET;
+       }
        adev->smc_rreg = NULL;
        adev->smc_wreg = NULL;
        adev->pcie_rreg = &soc15_pcie_rreg;
@@ -1285,7 +1287,7 @@ static int soc15_common_hw_init(void *handle)
         * for the purpose of expose those registers
         * to process space
         */
-       if (adev->nbio.funcs->remap_hdp_registers)
+       if (adev->nbio.funcs->remap_hdp_registers && !amdgpu_sriov_vf(adev))
                adev->nbio.funcs->remap_hdp_registers(adev);
 
        /* enable the doorbell aperture */
index 003ba6a373ff459a050af89fcd3564e6d6837bc1..93e33dd84dd41cf37a84c815718d95ba5de2cae4 100644 (file)
@@ -1226,6 +1226,11 @@ static int stop_cpsch(struct device_queue_manager *dqm)
        bool hanging;
 
        dqm_lock(dqm);
+       if (!dqm->sched_running) {
+               dqm_unlock(dqm);
+               return 0;
+       }
+
        if (!dqm->is_hws_hang)
                unmap_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES, 0);
        hanging = dqm->is_hws_hang || dqm->is_resetting;
index 94e92c0812db733535244a2d18133baf280903f2..8fd48d0ed240ceb0bcccd634819d71b2837ceba4 100644 (file)
@@ -766,7 +766,7 @@ struct svm_range_list {
        struct list_head                deferred_range_list;
        spinlock_t                      deferred_list_lock;
        atomic_t                        evicted_ranges;
-       bool                            drain_pagefaults;
+       atomic_t                        drain_pagefaults;
        struct delayed_work             restore_work;
        DECLARE_BITMAP(bitmap_supported, MAX_GPU_INSTANCE);
        struct task_struct              *faulting_task;
index 16137c4247bbec50b4a2b41fb2af55e04f992dd8..3cb4681c5f539abe9075df9a3c237dc62a3ac313 100644 (file)
@@ -1574,7 +1574,6 @@ retry_flush_work:
 static void svm_range_restore_work(struct work_struct *work)
 {
        struct delayed_work *dwork = to_delayed_work(work);
-       struct amdkfd_process_info *process_info;
        struct svm_range_list *svms;
        struct svm_range *prange;
        struct kfd_process *p;
@@ -1594,12 +1593,10 @@ static void svm_range_restore_work(struct work_struct *work)
         * the lifetime of this thread, kfd_process and mm will be valid.
         */
        p = container_of(svms, struct kfd_process, svms);
-       process_info = p->kgd_process_info;
        mm = p->mm;
        if (!mm)
                return;
 
-       mutex_lock(&process_info->lock);
        svm_range_list_lock_and_flush_work(svms, mm);
        mutex_lock(&svms->lock);
 
@@ -1652,7 +1649,6 @@ static void svm_range_restore_work(struct work_struct *work)
 out_reschedule:
        mutex_unlock(&svms->lock);
        mmap_write_unlock(mm);
-       mutex_unlock(&process_info->lock);
 
        /* If validation failed, reschedule another attempt */
        if (evicted_ranges) {
@@ -1968,10 +1964,16 @@ static void svm_range_drain_retry_fault(struct svm_range_list *svms)
        struct kfd_process_device *pdd;
        struct amdgpu_device *adev;
        struct kfd_process *p;
+       int drain;
        uint32_t i;
 
        p = container_of(svms, struct kfd_process, svms);
 
+restart:
+       drain = atomic_read(&svms->drain_pagefaults);
+       if (!drain)
+               return;
+
        for_each_set_bit(i, svms->bitmap_supported, p->n_pdds) {
                pdd = p->pdds[i];
                if (!pdd)
@@ -1983,6 +1985,8 @@ static void svm_range_drain_retry_fault(struct svm_range_list *svms)
                amdgpu_ih_wait_on_checkpoint_process(adev, &adev->irq.ih1);
                pr_debug("drain retry fault gpu %d svms 0x%p done\n", i, svms);
        }
+       if (atomic_cmpxchg(&svms->drain_pagefaults, drain, 0) != drain)
+               goto restart;
 }
 
 static void svm_range_deferred_list_work(struct work_struct *work)
@@ -1990,43 +1994,41 @@ static void svm_range_deferred_list_work(struct work_struct *work)
        struct svm_range_list *svms;
        struct svm_range *prange;
        struct mm_struct *mm;
+       struct kfd_process *p;
 
        svms = container_of(work, struct svm_range_list, deferred_list_work);
        pr_debug("enter svms 0x%p\n", svms);
 
+       p = container_of(svms, struct kfd_process, svms);
+       /* Avoid mm is gone when inserting mmu notifier */
+       mm = get_task_mm(p->lead_thread);
+       if (!mm) {
+               pr_debug("svms 0x%p process mm gone\n", svms);
+               return;
+       }
+retry:
+       mmap_write_lock(mm);
+
+       /* Checking for the need to drain retry faults must be inside
+        * mmap write lock to serialize with munmap notifiers.
+        */
+       if (unlikely(atomic_read(&svms->drain_pagefaults))) {
+               mmap_write_unlock(mm);
+               svm_range_drain_retry_fault(svms);
+               goto retry;
+       }
+
        spin_lock(&svms->deferred_list_lock);
        while (!list_empty(&svms->deferred_range_list)) {
                prange = list_first_entry(&svms->deferred_range_list,
                                          struct svm_range, deferred_list);
+               list_del_init(&prange->deferred_list);
                spin_unlock(&svms->deferred_list_lock);
+
                pr_debug("prange 0x%p [0x%lx 0x%lx] op %d\n", prange,
                         prange->start, prange->last, prange->work_item.op);
 
-               mm = prange->work_item.mm;
-retry:
-               mmap_write_lock(mm);
                mutex_lock(&svms->lock);
-
-               /* Checking for the need to drain retry faults must be in
-                * mmap write lock to serialize with munmap notifiers.
-                *
-                * Remove from deferred_list must be inside mmap write lock,
-                * otherwise, svm_range_list_lock_and_flush_work may hold mmap
-                * write lock, and continue because deferred_list is empty, then
-                * deferred_list handle is blocked by mmap write lock.
-                */
-               spin_lock(&svms->deferred_list_lock);
-               if (unlikely(svms->drain_pagefaults)) {
-                       svms->drain_pagefaults = false;
-                       spin_unlock(&svms->deferred_list_lock);
-                       mutex_unlock(&svms->lock);
-                       mmap_write_unlock(mm);
-                       svm_range_drain_retry_fault(svms);
-                       goto retry;
-               }
-               list_del_init(&prange->deferred_list);
-               spin_unlock(&svms->deferred_list_lock);
-
                mutex_lock(&prange->migrate_mutex);
                while (!list_empty(&prange->child_list)) {
                        struct svm_range *pchild;
@@ -2042,12 +2044,13 @@ retry:
 
                svm_range_handle_list_op(svms, prange);
                mutex_unlock(&svms->lock);
-               mmap_write_unlock(mm);
 
                spin_lock(&svms->deferred_list_lock);
        }
        spin_unlock(&svms->deferred_list_lock);
 
+       mmap_write_unlock(mm);
+       mmput(mm);
        pr_debug("exit svms 0x%p\n", svms);
 }
 
@@ -2056,12 +2059,6 @@ svm_range_add_list_work(struct svm_range_list *svms, struct svm_range *prange,
                        struct mm_struct *mm, enum svm_work_list_ops op)
 {
        spin_lock(&svms->deferred_list_lock);
-       /* Make sure pending page faults are drained in the deferred worker
-        * before the range is freed to avoid straggler interrupts on
-        * unmapped memory causing "phantom faults".
-        */
-       if (op == SVM_OP_UNMAP_RANGE)
-               svms->drain_pagefaults = true;
        /* if prange is on the deferred list */
        if (!list_empty(&prange->deferred_list)) {
                pr_debug("update exist prange 0x%p work op %d\n", prange, op);
@@ -2140,6 +2137,12 @@ svm_range_unmap_from_cpu(struct mm_struct *mm, struct svm_range *prange,
        pr_debug("svms 0x%p prange 0x%p [0x%lx 0x%lx] [0x%lx 0x%lx]\n", svms,
                 prange, prange->start, prange->last, start, last);
 
+       /* Make sure pending page faults are drained in the deferred worker
+        * before the range is freed to avoid straggler interrupts on
+        * unmapped memory causing "phantom faults".
+        */
+       atomic_inc(&svms->drain_pagefaults);
+
        unmap_parent = start <= prange->start && last >= prange->last;
 
        list_for_each_entry(pchild, &prange->child_list, child_list) {
@@ -2559,20 +2562,13 @@ svm_range_count_fault(struct amdgpu_device *adev, struct kfd_process *p,
 }
 
 static bool
-svm_fault_allowed(struct mm_struct *mm, uint64_t addr, bool write_fault)
+svm_fault_allowed(struct vm_area_struct *vma, bool write_fault)
 {
        unsigned long requested = VM_READ;
-       struct vm_area_struct *vma;
 
        if (write_fault)
                requested |= VM_WRITE;
 
-       vma = find_vma(mm, addr << PAGE_SHIFT);
-       if (!vma || (addr << PAGE_SHIFT) < vma->vm_start) {
-               pr_debug("address 0x%llx VMA is removed\n", addr);
-               return true;
-       }
-
        pr_debug("requested 0x%lx, vma permission flags 0x%lx\n", requested,
                vma->vm_flags);
        return (vma->vm_flags & requested) == requested;
@@ -2590,6 +2586,7 @@ svm_range_restore_pages(struct amdgpu_device *adev, unsigned int pasid,
        int32_t best_loc;
        int32_t gpuidx = MAX_GPU_INSTANCE;
        bool write_locked = false;
+       struct vm_area_struct *vma;
        int r = 0;
 
        if (!KFD_IS_SVM_API_SUPPORTED(adev->kfd.dev)) {
@@ -2600,7 +2597,7 @@ svm_range_restore_pages(struct amdgpu_device *adev, unsigned int pasid,
        p = kfd_lookup_process_by_pasid(pasid);
        if (!p) {
                pr_debug("kfd process not founded pasid 0x%x\n", pasid);
-               return -ESRCH;
+               return 0;
        }
        if (!p->xnack_enabled) {
                pr_debug("XNACK not enabled for pasid 0x%x\n", pasid);
@@ -2611,10 +2608,19 @@ svm_range_restore_pages(struct amdgpu_device *adev, unsigned int pasid,
 
        pr_debug("restoring svms 0x%p fault address 0x%llx\n", svms, addr);
 
+       if (atomic_read(&svms->drain_pagefaults)) {
+               pr_debug("draining retry fault, drop fault 0x%llx\n", addr);
+               r = 0;
+               goto out;
+       }
+
+       /* p->lead_thread is available as kfd_process_wq_release flush the work
+        * before releasing task ref.
+        */
        mm = get_task_mm(p->lead_thread);
        if (!mm) {
                pr_debug("svms 0x%p failed to get mm\n", svms);
-               r = -ESRCH;
+               r = 0;
                goto out;
        }
 
@@ -2652,6 +2658,7 @@ retry_write_locked:
 
        if (svm_range_skip_recover(prange)) {
                amdgpu_gmc_filter_faults_remove(adev, addr, pasid);
+               r = 0;
                goto out_unlock_range;
        }
 
@@ -2660,10 +2667,21 @@ retry_write_locked:
        if (timestamp < AMDGPU_SVM_RANGE_RETRY_FAULT_PENDING) {
                pr_debug("svms 0x%p [0x%lx %lx] already restored\n",
                         svms, prange->start, prange->last);
+               r = 0;
                goto out_unlock_range;
        }
 
-       if (!svm_fault_allowed(mm, addr, write_fault)) {
+       /* __do_munmap removed VMA, return success as we are handling stale
+        * retry fault.
+        */
+       vma = find_vma(mm, addr << PAGE_SHIFT);
+       if (!vma || (addr << PAGE_SHIFT) < vma->vm_start) {
+               pr_debug("address 0x%llx VMA is removed\n", addr);
+               r = 0;
+               goto out_unlock_range;
+       }
+
+       if (!svm_fault_allowed(vma, write_fault)) {
                pr_debug("fault addr 0x%llx no %s permission\n", addr,
                        write_fault ? "write" : "read");
                r = -EPERM;
@@ -2741,6 +2759,14 @@ void svm_range_list_fini(struct kfd_process *p)
        /* Ensure list work is finished before process is destroyed */
        flush_work(&p->svms.deferred_list_work);
 
+       /*
+        * Ensure no retry fault comes in afterwards, as page fault handler will
+        * not find kfd process and take mm lock to recover fault.
+        */
+       atomic_inc(&p->svms.drain_pagefaults);
+       svm_range_drain_retry_fault(&p->svms);
+
+
        list_for_each_entry_safe(prange, next, &p->svms.list, list) {
                svm_range_unlink(prange);
                svm_range_remove_notifier(prange);
@@ -2761,6 +2787,7 @@ int svm_range_list_init(struct kfd_process *p)
        mutex_init(&svms->lock);
        INIT_LIST_HEAD(&svms->list);
        atomic_set(&svms->evicted_ranges, 0);
+       atomic_set(&svms->drain_pagefaults, 0);
        INIT_DELAYED_WORK(&svms->restore_work, svm_range_restore_work);
        INIT_WORK(&svms->deferred_list_work, svm_range_deferred_list_work);
        INIT_LIST_HEAD(&svms->deferred_range_list);
@@ -3150,7 +3177,6 @@ static int
 svm_range_set_attr(struct kfd_process *p, uint64_t start, uint64_t size,
                   uint32_t nattr, struct kfd_ioctl_svm_attribute *attrs)
 {
-       struct amdkfd_process_info *process_info = p->kgd_process_info;
        struct mm_struct *mm = current->mm;
        struct list_head update_list;
        struct list_head insert_list;
@@ -3169,8 +3195,6 @@ svm_range_set_attr(struct kfd_process *p, uint64_t start, uint64_t size,
 
        svms = &p->svms;
 
-       mutex_lock(&process_info->lock);
-
        svm_range_list_lock_and_flush_work(svms, mm);
 
        r = svm_range_is_valid(p, start, size);
@@ -3246,8 +3270,6 @@ out_unlock_range:
        mutex_unlock(&svms->lock);
        mmap_read_unlock(mm);
 out:
-       mutex_unlock(&process_info->lock);
-
        pr_debug("pasid 0x%x svms 0x%p [0x%llx 0x%llx] done, r=%d\n", p->pasid,
                 &p->svms, start, start + size - 1, r);
 
index c911b30de6588e707611095c61975d7241652589..122dae1a1813b3eed2517d590bc19ea95fe77346 100644 (file)
@@ -51,6 +51,7 @@
 #include <drm/drm_hdcp.h>
 #endif
 #include "amdgpu_pm.h"
+#include "amdgpu_atombios.h"
 
 #include "amd_shared.h"
 #include "amdgpu_dm_irq.h"
@@ -2561,6 +2562,23 @@ static int dm_resume(void *handle)
        if (amdgpu_in_reset(adev)) {
                dc_state = dm->cached_dc_state;
 
+               /*
+                * The dc->current_state is backed up into dm->cached_dc_state
+                * before we commit 0 streams.
+                *
+                * DC will clear link encoder assignments on the real state
+                * but the changes won't propagate over to the copy we made
+                * before the 0 streams commit.
+                *
+                * DC expects that link encoder assignments are *not* valid
+                * when committing a state, so as a workaround it needs to be
+                * cleared here.
+                */
+               link_enc_cfg_init(dm->dc, dc_state);
+
+               if (dc_enable_dmub_notifications(adev->dm.dc))
+                       amdgpu_dm_outbox_init(adev);
+
                r = dm_dmub_hw_init(adev);
                if (r)
                        DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
@@ -2572,8 +2590,8 @@ static int dm_resume(void *handle)
 
                for (i = 0; i < dc_state->stream_count; i++) {
                        dc_state->streams[i]->mode_changed = true;
-                       for (j = 0; j < dc_state->stream_status->plane_count; j++) {
-                               dc_state->stream_status->plane_states[j]->update_flags.raw
+                       for (j = 0; j < dc_state->stream_status[i].plane_count; j++) {
+                               dc_state->stream_status[i].plane_states[j]->update_flags.raw
                                        = 0xffffffff;
                        }
                }
@@ -2608,6 +2626,10 @@ static int dm_resume(void *handle)
        /* TODO: Remove dc_state->dccg, use dc->dccg directly. */
        dc_resource_state_construct(dm->dc, dm_state->context);
 
+       /* Re-enable outbox interrupts for DPIA. */
+       if (dc_enable_dmub_notifications(adev->dm.dc))
+               amdgpu_dm_outbox_init(adev);
+
        /* Before powering on DC we need to re-initialize DMUB. */
        r = dm_dmub_hw_init(adev);
        if (r)
@@ -3909,6 +3931,9 @@ static int amdgpu_dm_backlight_set_level(struct amdgpu_display_manager *dm,
        caps = dm->backlight_caps[bl_idx];
 
        dm->brightness[bl_idx] = user_brightness;
+       /* update scratch register */
+       if (bl_idx == 0)
+               amdgpu_atombios_scratch_regs_set_backlight_level(dm->adev, dm->brightness[bl_idx]);
        brightness = convert_brightness_from_user(&caps, dm->brightness[bl_idx]);
        link = (struct dc_link *)dm->backlight_link[bl_idx];
 
@@ -4242,7 +4267,8 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
                } else if (dc_link_detect(link, DETECT_REASON_BOOT)) {
                        amdgpu_dm_update_connector_after_detect(aconnector);
                        register_backlight_device(dm, link);
-
+                       if (dm->num_of_edps)
+                               update_connector_ext_caps(aconnector);
                        if (psr_feature_enabled)
                                amdgpu_dm_set_psr_caps(link);
                }
index cce062adc439149e3a808c920110f8409ab549f2..8a441a22c46ec7493910f7f6ead1cad05b14c778 100644 (file)
@@ -314,6 +314,14 @@ int amdgpu_dm_crtc_set_crc_source(struct drm_crtc *crtc, const char *src_name)
                        ret = -EINVAL;
                        goto cleanup;
                }
+
+               if ((aconn->base.connector_type != DRM_MODE_CONNECTOR_DisplayPort) &&
+                               (aconn->base.connector_type != DRM_MODE_CONNECTOR_eDP)) {
+                       DRM_DEBUG_DRIVER("No DP connector available for CRC source\n");
+                       ret = -EINVAL;
+                       goto cleanup;
+               }
+
        }
 
 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
index 32a5ce09a62a9bb373b318f881e649250b951e99..cc34a35d0bcbfe2d9c6db547a160ffa7c0bba5e9 100644 (file)
@@ -36,6 +36,8 @@
 #include "dm_helpers.h"
 
 #include "dc_link_ddc.h"
+#include "ddc_service_types.h"
+#include "dpcd_defs.h"
 
 #include "i2caux_interface.h"
 #include "dmub_cmd.h"
@@ -157,6 +159,16 @@ static const struct drm_connector_funcs dm_dp_mst_connector_funcs = {
 };
 
 #if defined(CONFIG_DRM_AMD_DC_DCN)
+static bool needs_dsc_aux_workaround(struct dc_link *link)
+{
+       if (link->dpcd_caps.branch_dev_id == DP_BRANCH_DEVICE_ID_90CC24 &&
+           (link->dpcd_caps.dpcd_rev.raw == DPCD_REV_14 || link->dpcd_caps.dpcd_rev.raw == DPCD_REV_12) &&
+           link->dpcd_caps.sink_count.bits.SINK_COUNT >= 2)
+               return true;
+
+       return false;
+}
+
 static bool validate_dsc_caps_on_connector(struct amdgpu_dm_connector *aconnector)
 {
        struct dc_sink *dc_sink = aconnector->dc_sink;
@@ -166,7 +178,7 @@ static bool validate_dsc_caps_on_connector(struct amdgpu_dm_connector *aconnecto
        u8 *dsc_branch_dec_caps = NULL;
 
        aconnector->dsc_aux = drm_dp_mst_dsc_aux_for_port(port);
-#if defined(CONFIG_HP_HOOK_WORKAROUND)
+
        /*
         * drm_dp_mst_dsc_aux_for_port() will return NULL for certain configs
         * because it only check the dsc/fec caps of the "port variable" and not the dock
@@ -176,10 +188,10 @@ static bool validate_dsc_caps_on_connector(struct amdgpu_dm_connector *aconnecto
         * Workaround: explicitly check the use case above and use the mst dock's aux as dsc_aux
         *
         */
-
-       if (!aconnector->dsc_aux && !port->parent->port_parent)
+       if (!aconnector->dsc_aux && !port->parent->port_parent &&
+           needs_dsc_aux_workaround(aconnector->dc_link))
                aconnector->dsc_aux = &aconnector->mst_port->dm_dp_aux.aux;
-#endif
+
        if (!aconnector->dsc_aux)
                return false;
 
index 60544788e911ee15969e0cefa1aeb630cf1f3f44..c8457babfdea428b57a6bd5084f61cd0fa5208a4 100644 (file)
@@ -758,6 +758,18 @@ static bool detect_dp(struct dc_link *link,
                        dal_ddc_service_set_transaction_type(link->ddc,
                                                             sink_caps->transaction_type);
 
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+                       /* Apply work around for tunneled MST on certain USB4 docks. Always use DSC if dock
+                        * reports DSC support.
+                        */
+                       if (link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA &&
+                                       link->type == dc_connection_mst_branch &&
+                                       link->dpcd_caps.branch_dev_id == DP_BRANCH_DEVICE_ID_90CC24 &&
+                                       link->dpcd_caps.dsc_caps.dsc_basic_caps.fields.dsc_support.DSC_SUPPORT &&
+                                       !link->dc->debug.dpia_debug.bits.disable_mst_dsc_work_around)
+                               link->wa_flags.dpia_mst_dsc_always_on = true;
+#endif
+
 #if defined(CONFIG_DRM_AMD_DC_HDCP)
                        /* In case of fallback to SST when topology discovery below fails
                         * HDCP caps will be querried again later by the upper layer (caller
@@ -1203,6 +1215,10 @@ static bool dc_link_detect_helper(struct dc_link *link,
                        LINK_INFO("link=%d, mst branch is now Disconnected\n",
                                  link->link_index);
 
+                       /* Disable work around which keeps DSC on for tunneled MST on certain USB4 docks. */
+                       if (link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA)
+                               link->wa_flags.dpia_mst_dsc_always_on = false;
+
                        dm_helpers_dp_mst_stop_top_mgr(link->ctx, link);
 
                        link->mst_stream_alloc_table.stream_count = 0;
index cb7bf9148904edb02534b01db8ca1e8f90b966c8..13bc69d6b6791c4616131467f940c5c0e6272eaa 100644 (file)
@@ -2138,7 +2138,7 @@ static enum link_training_result dp_perform_8b_10b_link_training(
                }
 
                for (lane = 0; lane < (uint8_t)lt_settings->link_settings.lane_count; lane++)
-                       lt_settings->dpcd_lane_settings[lane].bits.VOLTAGE_SWING_SET = VOLTAGE_SWING_LEVEL0;
+                       lt_settings->dpcd_lane_settings[lane].raw = 0;
        }
 
        if (status == LINK_TRAINING_SUCCESS) {
index c32fdccd4d925c96b4f1bda165453d29a0340feb..e2d9a46d0e1ad4ccf10cf5160fb070e6fffd170a 100644 (file)
@@ -1664,6 +1664,10 @@ bool dc_is_stream_unchanged(
        if (old_stream->ignore_msa_timing_param != stream->ignore_msa_timing_param)
                return false;
 
+       // Only Have Audio left to check whether it is same or not. This is a corner case for Tiled sinks
+       if (old_stream->audio_info.mode_count != stream->audio_info.mode_count)
+               return false;
+
        return true;
 }
 
@@ -2252,16 +2256,6 @@ enum dc_status dc_validate_global_state(
 
        if (!new_ctx)
                return DC_ERROR_UNEXPECTED;
-#if defined(CONFIG_DRM_AMD_DC_DCN)
-
-       /*
-        * Update link encoder to stream assignment.
-        * TODO: Split out reason allocation from validation.
-        */
-       if (dc->res_pool->funcs->link_encs_assign && fast_validate == false)
-               dc->res_pool->funcs->link_encs_assign(
-                       dc, new_ctx, new_ctx->streams, new_ctx->stream_count);
-#endif
 
        if (dc->res_pool->funcs->validate_global) {
                result = dc->res_pool->funcs->validate_global(dc, new_ctx);
@@ -2313,6 +2307,16 @@ enum dc_status dc_validate_global_state(
                if (!dc->res_pool->funcs->validate_bandwidth(dc, new_ctx, fast_validate))
                        result = DC_FAIL_BANDWIDTH_VALIDATE;
 
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+       /*
+        * Only update link encoder to stream assignment after bandwidth validation passed.
+        * TODO: Split out assignment and validation.
+        */
+       if (result == DC_OK && dc->res_pool->funcs->link_encs_assign && fast_validate == false)
+               dc->res_pool->funcs->link_encs_assign(
+                       dc, new_ctx, new_ctx->streams, new_ctx->stream_count);
+#endif
+
        return result;
 }
 
index 3aac3f4a28525623382f21dd934dd39851baf1af..618e7989176fc86c964aa35a8df5d926b13b6c2f 100644 (file)
@@ -508,7 +508,8 @@ union dpia_debug_options {
                uint32_t disable_dpia:1;
                uint32_t force_non_lttpr:1;
                uint32_t extend_aux_rd_interval:1;
-               uint32_t reserved:29;
+               uint32_t disable_mst_dsc_work_around:1;
+               uint32_t reserved:28;
        } bits;
        uint32_t raw;
 };
index 180ecd860296b250fe40f6d040d6a2a33bb88817..fad3d883ed891c14e50f110f28893fd70dec9902 100644 (file)
@@ -191,6 +191,8 @@ struct dc_link {
                bool dp_skip_DID2;
                bool dp_skip_reset_segment;
                bool dp_mot_reset_segment;
+               /* Some USB4 docks do not handle turning off MST DSC once it has been enabled. */
+               bool dpia_mst_dsc_always_on;
        } wa_flags;
        struct link_mst_stream_allocation_table mst_stream_alloc_table;
 
@@ -224,6 +226,8 @@ static inline void get_edp_links(const struct dc *dc,
        *edp_num = 0;
        for (i = 0; i < dc->link_count; i++) {
                // report any eDP links, even unconnected DDI's
+               if (!dc->links[i])
+                       continue;
                if (dc->links[i]->connector_signal == SIGNAL_TYPE_EDP) {
                        edp_links[*edp_num] = dc->links[i];
                        if (++(*edp_num) == MAX_NUM_EDP)
index 0b788d794fb334e0fc9e66fc67e919936bbec6f1..04d7bddc915bdec5bc296fbc47c8e6e5c0e4f1ad 100644 (file)
@@ -1637,7 +1637,7 @@ void dcn10_reset_hw_ctx_wrap(
 
                        dcn10_reset_back_end_for_pipe(dc, pipe_ctx_old, dc->current_state);
                        if (hws->funcs.enable_stream_gating)
-                               hws->funcs.enable_stream_gating(dc, pipe_ctx);
+                               hws->funcs.enable_stream_gating(dc, pipe_ctx_old);
                        if (old_clk)
                                old_clk->funcs->cs_power_down(old_clk);
                }
index 4f88376a118f8169093f1032007a9c12e7fb8c71..e6af99ae3d9f54f691b48bea363d0e0eead8d3b6 100644 (file)
@@ -2270,7 +2270,7 @@ void dcn20_reset_hw_ctx_wrap(
 
                        dcn20_reset_back_end_for_pipe(dc, pipe_ctx_old, dc->current_state);
                        if (hws->funcs.enable_stream_gating)
-                               hws->funcs.enable_stream_gating(dc, pipe_ctx);
+                               hws->funcs.enable_stream_gating(dc, pipe_ctx_old);
                        if (old_clk)
                                old_clk->funcs->cs_power_down(old_clk);
                }
index 5dd1ce9ddb539afb2aa1dbfb16f6550086402ecd..4d4ac4ceb1e87dc664e56420c9748ace6bb5436d 100644 (file)
@@ -602,7 +602,7 @@ void dcn31_reset_hw_ctx_wrap(
 
                        dcn31_reset_back_end_for_pipe(dc, pipe_ctx_old, dc->current_state);
                        if (hws->funcs.enable_stream_gating)
-                               hws->funcs.enable_stream_gating(dc, pipe_ctx);
+                               hws->funcs.enable_stream_gating(dc, pipe_ctx_old);
                        if (old_clk)
                                old_clk->funcs->cs_power_down(old_clk);
                }
index f1a46d16f7eacee5ce3a5acc1a4c8994e847a81b..4b9e68a79f068a488ba22c57322e584f22f29e01 100644 (file)
@@ -98,7 +98,8 @@ enum amd_ip_block_type {
        AMD_IP_BLOCK_TYPE_ACP,
        AMD_IP_BLOCK_TYPE_VCN,
        AMD_IP_BLOCK_TYPE_MES,
-       AMD_IP_BLOCK_TYPE_JPEG
+       AMD_IP_BLOCK_TYPE_JPEG,
+       AMD_IP_BLOCK_TYPE_NUM,
 };
 
 enum amd_clockgating_state {
index 03581d5b183607862828e07ba92ae1edce66de99..08362d506534ba14964e8f8152178b816443506c 100644 (file)
@@ -927,6 +927,13 @@ int amdgpu_dpm_set_powergating_by_smu(struct amdgpu_device *adev, uint32_t block
 {
        int ret = 0;
        const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
+       enum ip_power_state pwr_state = gate ? POWER_STATE_OFF : POWER_STATE_ON;
+
+       if (atomic_read(&adev->pm.pwr_state[block_type]) == pwr_state) {
+               dev_dbg(adev->dev, "IP block%d already in the target %s state!",
+                               block_type, gate ? "gate" : "ungate");
+               return 0;
+       }
 
        switch (block_type) {
        case AMD_IP_BLOCK_TYPE_UVD:
@@ -979,6 +986,9 @@ int amdgpu_dpm_set_powergating_by_smu(struct amdgpu_device *adev, uint32_t block
                break;
        }
 
+       if (!ret)
+               atomic_set(&adev->pm.pwr_state[block_type], pwr_state);
+
        return ret;
 }
 
index 98f1b3d8c1d59d80e5ddfa006f4dbb348798cb32..16e3f72d31b9ff59fd2384e03b10031a22ad6193 100644 (file)
@@ -417,6 +417,12 @@ struct amdgpu_dpm {
        enum amd_dpm_forced_level forced_level;
 };
 
+enum ip_power_state {
+       POWER_STATE_UNKNOWN,
+       POWER_STATE_ON,
+       POWER_STATE_OFF,
+};
+
 struct amdgpu_pm {
        struct mutex            mutex;
        u32                     current_sclk;
@@ -452,6 +458,8 @@ struct amdgpu_pm {
        struct i2c_adapter smu_i2c;
        struct mutex            smu_i2c_mutex;
        struct list_head        pm_attr_list;
+
+       atomic_t                pwr_state[AMD_IP_BLOCK_TYPE_NUM];
 };
 
 #define R600_SSTU_DFLT                               0
index 258c573acc979849a25b67b9d27a8acdf357bfa2..1f406f21b452fa4241d849421900127dd51a0379 100644 (file)
@@ -1024,8 +1024,6 @@ static int smu10_print_clock_levels(struct pp_hwmgr *hwmgr,
        uint32_t min_freq, max_freq = 0;
        uint32_t ret = 0;
 
-       phm_get_sysfs_buf(&buf, &size);
-
        switch (type) {
        case PP_SCLK:
                smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetGfxclkFrequency, &now);
@@ -1038,13 +1036,13 @@ static int smu10_print_clock_levels(struct pp_hwmgr *hwmgr,
                else
                        i = 1;
 
-               size += sysfs_emit_at(buf, size, "0: %uMhz %s\n",
+               size += sprintf(buf + size, "0: %uMhz %s\n",
                                        data->gfx_min_freq_limit/100,
                                        i == 0 ? "*" : "");
-               size += sysfs_emit_at(buf, size, "1: %uMhz %s\n",
+               size += sprintf(buf + size, "1: %uMhz %s\n",
                                        i == 1 ? now : SMU10_UMD_PSTATE_GFXCLK,
                                        i == 1 ? "*" : "");
-               size += sysfs_emit_at(buf, size, "2: %uMhz %s\n",
+               size += sprintf(buf + size, "2: %uMhz %s\n",
                                        data->gfx_max_freq_limit/100,
                                        i == 2 ? "*" : "");
                break;
@@ -1052,7 +1050,7 @@ static int smu10_print_clock_levels(struct pp_hwmgr *hwmgr,
                smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetFclkFrequency, &now);
 
                for (i = 0; i < mclk_table->count; i++)
-                       size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n",
+                       size += sprintf(buf + size, "%d: %uMhz %s\n",
                                        i,
                                        mclk_table->entries[i].clk / 100,
                                        ((mclk_table->entries[i].clk / 100)
@@ -1067,10 +1065,10 @@ static int smu10_print_clock_levels(struct pp_hwmgr *hwmgr,
                        if (ret)
                                return ret;
 
-                       size += sysfs_emit_at(buf, size, "%s:\n", "OD_SCLK");
-                       size += sysfs_emit_at(buf, size, "0: %10uMhz\n",
+                       size += sprintf(buf + size, "%s:\n", "OD_SCLK");
+                       size += sprintf(buf + size, "0: %10uMhz\n",
                        (data->gfx_actual_soft_min_freq > 0) ? data->gfx_actual_soft_min_freq : min_freq);
-                       size += sysfs_emit_at(buf, size, "1: %10uMhz\n",
+                       size += sprintf(buf + size, "1: %10uMhz\n",
                        (data->gfx_actual_soft_max_freq > 0) ? data->gfx_actual_soft_max_freq : max_freq);
                }
                break;
@@ -1083,8 +1081,8 @@ static int smu10_print_clock_levels(struct pp_hwmgr *hwmgr,
                        if (ret)
                                return ret;
 
-                       size += sysfs_emit_at(buf, size, "%s:\n", "OD_RANGE");
-                       size += sysfs_emit_at(buf, size, "SCLK: %7uMHz %10uMHz\n",
+                       size += sprintf(buf + size, "%s:\n", "OD_RANGE");
+                       size += sprintf(buf + size, "SCLK: %7uMHz %10uMHz\n",
                                min_freq, max_freq);
                }
                break;
index aceebf58422530e7d946a397eb699830a92aa888..611969bf452077c87c4c10f7c6d4b7e7f52e24bc 100644 (file)
@@ -4914,8 +4914,6 @@ static int smu7_print_clock_levels(struct pp_hwmgr *hwmgr,
        int size = 0;
        uint32_t i, now, clock, pcie_speed;
 
-       phm_get_sysfs_buf(&buf, &size);
-
        switch (type) {
        case PP_SCLK:
                smum_send_msg_to_smc(hwmgr, PPSMC_MSG_API_GetSclkFrequency, &clock);
@@ -4928,7 +4926,7 @@ static int smu7_print_clock_levels(struct pp_hwmgr *hwmgr,
                now = i;
 
                for (i = 0; i < sclk_table->count; i++)
-                       size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n",
+                       size += sprintf(buf + size, "%d: %uMhz %s\n",
                                        i, sclk_table->dpm_levels[i].value / 100,
                                        (i == now) ? "*" : "");
                break;
@@ -4943,7 +4941,7 @@ static int smu7_print_clock_levels(struct pp_hwmgr *hwmgr,
                now = i;
 
                for (i = 0; i < mclk_table->count; i++)
-                       size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n",
+                       size += sprintf(buf + size, "%d: %uMhz %s\n",
                                        i, mclk_table->dpm_levels[i].value / 100,
                                        (i == now) ? "*" : "");
                break;
@@ -4957,7 +4955,7 @@ static int smu7_print_clock_levels(struct pp_hwmgr *hwmgr,
                now = i;
 
                for (i = 0; i < pcie_table->count; i++)
-                       size += sysfs_emit_at(buf, size, "%d: %s %s\n", i,
+                       size += sprintf(buf + size, "%d: %s %s\n", i,
                                        (pcie_table->dpm_levels[i].value == 0) ? "2.5GT/s, x8" :
                                        (pcie_table->dpm_levels[i].value == 1) ? "5.0GT/s, x16" :
                                        (pcie_table->dpm_levels[i].value == 2) ? "8.0GT/s, x16" : "",
@@ -4965,32 +4963,32 @@ static int smu7_print_clock_levels(struct pp_hwmgr *hwmgr,
                break;
        case OD_SCLK:
                if (hwmgr->od_enabled) {
-                       size += sysfs_emit_at(buf, size, "%s:\n", "OD_SCLK");
+                       size += sprintf(buf + size, "%s:\n", "OD_SCLK");
                        for (i = 0; i < odn_sclk_table->num_of_pl; i++)
-                               size += sysfs_emit_at(buf, size, "%d: %10uMHz %10umV\n",
+                               size += sprintf(buf + size, "%d: %10uMHz %10umV\n",
                                        i, odn_sclk_table->entries[i].clock/100,
                                        odn_sclk_table->entries[i].vddc);
                }
                break;
        case OD_MCLK:
                if (hwmgr->od_enabled) {
-                       size += sysfs_emit_at(buf, size, "%s:\n", "OD_MCLK");
+                       size += sprintf(buf + size, "%s:\n", "OD_MCLK");
                        for (i = 0; i < odn_mclk_table->num_of_pl; i++)
-                               size += sysfs_emit_at(buf, size, "%d: %10uMHz %10umV\n",
+                               size += sprintf(buf + size, "%d: %10uMHz %10umV\n",
                                        i, odn_mclk_table->entries[i].clock/100,
                                        odn_mclk_table->entries[i].vddc);
                }
                break;
        case OD_RANGE:
                if (hwmgr->od_enabled) {
-                       size += sysfs_emit_at(buf, size, "%s:\n", "OD_RANGE");
-                       size += sysfs_emit_at(buf, size, "SCLK: %7uMHz %10uMHz\n",
+                       size += sprintf(buf + size, "%s:\n", "OD_RANGE");
+                       size += sprintf(buf + size, "SCLK: %7uMHz %10uMHz\n",
                                data->golden_dpm_table.sclk_table.dpm_levels[0].value/100,
                                hwmgr->platform_descriptor.overdriveLimit.engineClock/100);
-                       size += sysfs_emit_at(buf, size, "MCLK: %7uMHz %10uMHz\n",
+                       size += sprintf(buf + size, "MCLK: %7uMHz %10uMHz\n",
                                data->golden_dpm_table.mclk_table.dpm_levels[0].value/100,
                                hwmgr->platform_descriptor.overdriveLimit.memoryClock/100);
-                       size += sysfs_emit_at(buf, size, "VDDC: %7umV %11umV\n",
+                       size += sprintf(buf + size, "VDDC: %7umV %11umV\n",
                                data->odn_dpm_table.min_vddc,
                                data->odn_dpm_table.max_vddc);
                }
index 8e28a8eecefc641c435c5b8549a29792cd56f1ae..03bf8f0692228d48de0b9776fb3fe9205f7ee2ce 100644 (file)
@@ -1550,8 +1550,6 @@ static int smu8_print_clock_levels(struct pp_hwmgr *hwmgr,
        uint32_t i, now;
        int size = 0;
 
-       phm_get_sysfs_buf(&buf, &size);
-
        switch (type) {
        case PP_SCLK:
                now = PHM_GET_FIELD(cgs_read_ind_register(hwmgr->device,
@@ -1561,7 +1559,7 @@ static int smu8_print_clock_levels(struct pp_hwmgr *hwmgr,
                                CURR_SCLK_INDEX);
 
                for (i = 0; i < sclk_table->count; i++)
-                       size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n",
+                       size += sprintf(buf + size, "%d: %uMhz %s\n",
                                        i, sclk_table->entries[i].clk / 100,
                                        (i == now) ? "*" : "");
                break;
@@ -1573,7 +1571,7 @@ static int smu8_print_clock_levels(struct pp_hwmgr *hwmgr,
                                CURR_MCLK_INDEX);
 
                for (i = SMU8_NUM_NBPMEMORYCLOCK; i > 0; i--)
-                       size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n",
+                       size += sprintf(buf + size, "%d: %uMhz %s\n",
                                        SMU8_NUM_NBPMEMORYCLOCK-i, data->sys_info.nbp_memory_clock[i-1] / 100,
                                        (SMU8_NUM_NBPMEMORYCLOCK-i == now) ? "*" : "");
                break;
index c981fc2882f017eb3be21a4e4a47c2f5ea9876ea..e6336654c5655eeca69113dda4b1fc06b5fecf65 100644 (file)
@@ -4639,8 +4639,6 @@ static int vega10_print_clock_levels(struct pp_hwmgr *hwmgr,
 
        int i, now, size = 0, count = 0;
 
-       phm_get_sysfs_buf(&buf, &size);
-
        switch (type) {
        case PP_SCLK:
                if (data->registry_data.sclk_dpm_key_disabled)
@@ -4654,7 +4652,7 @@ static int vega10_print_clock_levels(struct pp_hwmgr *hwmgr,
                else
                        count = sclk_table->count;
                for (i = 0; i < count; i++)
-                       size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n",
+                       size += sprintf(buf + size, "%d: %uMhz %s\n",
                                        i, sclk_table->dpm_levels[i].value / 100,
                                        (i == now) ? "*" : "");
                break;
@@ -4665,7 +4663,7 @@ static int vega10_print_clock_levels(struct pp_hwmgr *hwmgr,
                smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetCurrentUclkIndex, &now);
 
                for (i = 0; i < mclk_table->count; i++)
-                       size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n",
+                       size += sprintf(buf + size, "%d: %uMhz %s\n",
                                        i, mclk_table->dpm_levels[i].value / 100,
                                        (i == now) ? "*" : "");
                break;
@@ -4676,7 +4674,7 @@ static int vega10_print_clock_levels(struct pp_hwmgr *hwmgr,
                smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetCurrentSocclkIndex, &now);
 
                for (i = 0; i < soc_table->count; i++)
-                       size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n",
+                       size += sprintf(buf + size, "%d: %uMhz %s\n",
                                        i, soc_table->dpm_levels[i].value / 100,
                                        (i == now) ? "*" : "");
                break;
@@ -4688,7 +4686,7 @@ static int vega10_print_clock_levels(struct pp_hwmgr *hwmgr,
                                PPSMC_MSG_GetClockFreqMHz, CLK_DCEFCLK, &now);
 
                for (i = 0; i < dcef_table->count; i++)
-                       size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n",
+                       size += sprintf(buf + size, "%d: %uMhz %s\n",
                                        i, dcef_table->dpm_levels[i].value / 100,
                                        (dcef_table->dpm_levels[i].value / 100 == now) ?
                                        "*" : "");
@@ -4702,7 +4700,7 @@ static int vega10_print_clock_levels(struct pp_hwmgr *hwmgr,
                        gen_speed = pptable->PcieGenSpeed[i];
                        lane_width = pptable->PcieLaneCount[i];
 
-                       size += sysfs_emit_at(buf, size, "%d: %s %s %s\n", i,
+                       size += sprintf(buf + size, "%d: %s %s %s\n", i,
                                        (gen_speed == 0) ? "2.5GT/s," :
                                        (gen_speed == 1) ? "5.0GT/s," :
                                        (gen_speed == 2) ? "8.0GT/s," :
@@ -4721,34 +4719,34 @@ static int vega10_print_clock_levels(struct pp_hwmgr *hwmgr,
 
        case OD_SCLK:
                if (hwmgr->od_enabled) {
-                       size += sysfs_emit_at(buf, size, "%s:\n", "OD_SCLK");
+                       size += sprintf(buf + size, "%s:\n", "OD_SCLK");
                        podn_vdd_dep = &data->odn_dpm_table.vdd_dep_on_sclk;
                        for (i = 0; i < podn_vdd_dep->count; i++)
-                               size += sysfs_emit_at(buf, size, "%d: %10uMhz %10umV\n",
+                               size += sprintf(buf + size, "%d: %10uMhz %10umV\n",
                                        i, podn_vdd_dep->entries[i].clk / 100,
                                                podn_vdd_dep->entries[i].vddc);
                }
                break;
        case OD_MCLK:
                if (hwmgr->od_enabled) {
-                       size += sysfs_emit_at(buf, size, "%s:\n", "OD_MCLK");
+                       size += sprintf(buf + size, "%s:\n", "OD_MCLK");
                        podn_vdd_dep = &data->odn_dpm_table.vdd_dep_on_mclk;
                        for (i = 0; i < podn_vdd_dep->count; i++)
-                               size += sysfs_emit_at(buf, size, "%d: %10uMhz %10umV\n",
+                               size += sprintf(buf + size, "%d: %10uMhz %10umV\n",
                                        i, podn_vdd_dep->entries[i].clk/100,
                                                podn_vdd_dep->entries[i].vddc);
                }
                break;
        case OD_RANGE:
                if (hwmgr->od_enabled) {
-                       size += sysfs_emit_at(buf, size, "%s:\n", "OD_RANGE");
-                       size += sysfs_emit_at(buf, size, "SCLK: %7uMHz %10uMHz\n",
+                       size += sprintf(buf + size, "%s:\n", "OD_RANGE");
+                       size += sprintf(buf + size, "SCLK: %7uMHz %10uMHz\n",
                                data->golden_dpm_table.gfx_table.dpm_levels[0].value/100,
                                hwmgr->platform_descriptor.overdriveLimit.engineClock/100);
-                       size += sysfs_emit_at(buf, size, "MCLK: %7uMHz %10uMHz\n",
+                       size += sprintf(buf + size, "MCLK: %7uMHz %10uMHz\n",
                                data->golden_dpm_table.mem_table.dpm_levels[0].value/100,
                                hwmgr->platform_descriptor.overdriveLimit.memoryClock/100);
-                       size += sysfs_emit_at(buf, size, "VDDC: %7umV %11umV\n",
+                       size += sprintf(buf + size, "VDDC: %7umV %11umV\n",
                                data->odn_dpm_table.min_vddc,
                                data->odn_dpm_table.max_vddc);
                }
index f7e783e1c888f3442ef3bdfbcfbf3be658a777e2..a2f4d6773d458cd8e988dac3162945281e8bd300 100644 (file)
@@ -2246,8 +2246,6 @@ static int vega12_print_clock_levels(struct pp_hwmgr *hwmgr,
        int i, now, size = 0;
        struct pp_clock_levels_with_latency clocks;
 
-       phm_get_sysfs_buf(&buf, &size);
-
        switch (type) {
        case PP_SCLK:
                PP_ASSERT_WITH_CODE(
@@ -2260,7 +2258,7 @@ static int vega12_print_clock_levels(struct pp_hwmgr *hwmgr,
                                "Attempt to get gfx clk levels Failed!",
                                return -1);
                for (i = 0; i < clocks.num_levels; i++)
-                       size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n",
+                       size += sprintf(buf + size, "%d: %uMhz %s\n",
                                i, clocks.data[i].clocks_in_khz / 1000,
                                (clocks.data[i].clocks_in_khz / 1000 == now / 100) ? "*" : "");
                break;
@@ -2276,7 +2274,7 @@ static int vega12_print_clock_levels(struct pp_hwmgr *hwmgr,
                                "Attempt to get memory clk levels Failed!",
                                return -1);
                for (i = 0; i < clocks.num_levels; i++)
-                       size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n",
+                       size += sprintf(buf + size, "%d: %uMhz %s\n",
                                i, clocks.data[i].clocks_in_khz / 1000,
                                (clocks.data[i].clocks_in_khz / 1000 == now / 100) ? "*" : "");
                break;
@@ -2294,7 +2292,7 @@ static int vega12_print_clock_levels(struct pp_hwmgr *hwmgr,
                                "Attempt to get soc clk levels Failed!",
                                return -1);
                for (i = 0; i < clocks.num_levels; i++)
-                       size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n",
+                       size += sprintf(buf + size, "%d: %uMhz %s\n",
                                i, clocks.data[i].clocks_in_khz / 1000,
                                (clocks.data[i].clocks_in_khz / 1000 == now) ? "*" : "");
                break;
@@ -2312,7 +2310,7 @@ static int vega12_print_clock_levels(struct pp_hwmgr *hwmgr,
                                "Attempt to get dcef clk levels Failed!",
                                return -1);
                for (i = 0; i < clocks.num_levels; i++)
-                       size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n",
+                       size += sprintf(buf + size, "%d: %uMhz %s\n",
                                i, clocks.data[i].clocks_in_khz / 1000,
                                (clocks.data[i].clocks_in_khz / 1000 == now) ? "*" : "");
                break;
index 03e63be4ee2756832b2bce8ddb3ed37523a5c8fd..85d55ab4e369fccbb3fd0c03343b16eee63f3eb1 100644 (file)
@@ -3366,8 +3366,6 @@ static int vega20_print_clock_levels(struct pp_hwmgr *hwmgr,
        int ret = 0;
        uint32_t gen_speed, lane_width, current_gen_speed, current_lane_width;
 
-       phm_get_sysfs_buf(&buf, &size);
-
        switch (type) {
        case PP_SCLK:
                ret = vega20_get_current_clk_freq(hwmgr, PPCLK_GFXCLK, &now);
@@ -3376,13 +3374,13 @@ static int vega20_print_clock_levels(struct pp_hwmgr *hwmgr,
                                return ret);
 
                if (vega20_get_sclks(hwmgr, &clocks)) {
-                       size += sysfs_emit_at(buf, size, "0: %uMhz * (DPM disabled)\n",
+                       size += sprintf(buf + size, "0: %uMhz * (DPM disabled)\n",
                                now / 100);
                        break;
                }
 
                for (i = 0; i < clocks.num_levels; i++)
-                       size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n",
+                       size += sprintf(buf + size, "%d: %uMhz %s\n",
                                i, clocks.data[i].clocks_in_khz / 1000,
                                (clocks.data[i].clocks_in_khz == now * 10) ? "*" : "");
                break;
@@ -3394,13 +3392,13 @@ static int vega20_print_clock_levels(struct pp_hwmgr *hwmgr,
                                return ret);
 
                if (vega20_get_memclocks(hwmgr, &clocks)) {
-                       size += sysfs_emit_at(buf, size, "0: %uMhz * (DPM disabled)\n",
+                       size += sprintf(buf + size, "0: %uMhz * (DPM disabled)\n",
                                now / 100);
                        break;
                }
 
                for (i = 0; i < clocks.num_levels; i++)
-                       size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n",
+                       size += sprintf(buf + size, "%d: %uMhz %s\n",
                                i, clocks.data[i].clocks_in_khz / 1000,
                                (clocks.data[i].clocks_in_khz == now * 10) ? "*" : "");
                break;
@@ -3412,13 +3410,13 @@ static int vega20_print_clock_levels(struct pp_hwmgr *hwmgr,
                                return ret);
 
                if (vega20_get_socclocks(hwmgr, &clocks)) {
-                       size += sysfs_emit_at(buf, size, "0: %uMhz * (DPM disabled)\n",
+                       size += sprintf(buf + size, "0: %uMhz * (DPM disabled)\n",
                                now / 100);
                        break;
                }
 
                for (i = 0; i < clocks.num_levels; i++)
-                       size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n",
+                       size += sprintf(buf + size, "%d: %uMhz %s\n",
                                i, clocks.data[i].clocks_in_khz / 1000,
                                (clocks.data[i].clocks_in_khz == now * 10) ? "*" : "");
                break;
@@ -3430,7 +3428,7 @@ static int vega20_print_clock_levels(struct pp_hwmgr *hwmgr,
                                return ret);
 
                for (i = 0; i < fclk_dpm_table->count; i++)
-                       size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n",
+                       size += sprintf(buf + size, "%d: %uMhz %s\n",
                                i, fclk_dpm_table->dpm_levels[i].value,
                                fclk_dpm_table->dpm_levels[i].value == (now / 100) ? "*" : "");
                break;
@@ -3442,13 +3440,13 @@ static int vega20_print_clock_levels(struct pp_hwmgr *hwmgr,
                                return ret);
 
                if (vega20_get_dcefclocks(hwmgr, &clocks)) {
-                       size += sysfs_emit_at(buf, size, "0: %uMhz * (DPM disabled)\n",
+                       size += sprintf(buf + size, "0: %uMhz * (DPM disabled)\n",
                                now / 100);
                        break;
                }
 
                for (i = 0; i < clocks.num_levels; i++)
-                       size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n",
+                       size += sprintf(buf + size, "%d: %uMhz %s\n",
                                i, clocks.data[i].clocks_in_khz / 1000,
                                (clocks.data[i].clocks_in_khz == now * 10) ? "*" : "");
                break;
@@ -3462,7 +3460,7 @@ static int vega20_print_clock_levels(struct pp_hwmgr *hwmgr,
                        gen_speed = pptable->PcieGenSpeed[i];
                        lane_width = pptable->PcieLaneCount[i];
 
-                       size += sysfs_emit_at(buf, size, "%d: %s %s %dMhz %s\n", i,
+                       size += sprintf(buf + size, "%d: %s %s %dMhz %s\n", i,
                                        (gen_speed == 0) ? "2.5GT/s," :
                                        (gen_speed == 1) ? "5.0GT/s," :
                                        (gen_speed == 2) ? "8.0GT/s," :
@@ -3483,18 +3481,18 @@ static int vega20_print_clock_levels(struct pp_hwmgr *hwmgr,
        case OD_SCLK:
                if (od8_settings[OD8_SETTING_GFXCLK_FMIN].feature_id &&
                    od8_settings[OD8_SETTING_GFXCLK_FMAX].feature_id) {
-                       size += sysfs_emit_at(buf, size, "%s:\n", "OD_SCLK");
-                       size += sysfs_emit_at(buf, size, "0: %10uMhz\n",
+                       size += sprintf(buf + size, "%s:\n", "OD_SCLK");
+                       size += sprintf(buf + size, "0: %10uMhz\n",
                                od_table->GfxclkFmin);
-                       size += sysfs_emit_at(buf, size, "1: %10uMhz\n",
+                       size += sprintf(buf + size, "1: %10uMhz\n",
                                od_table->GfxclkFmax);
                }
                break;
 
        case OD_MCLK:
                if (od8_settings[OD8_SETTING_UCLK_FMAX].feature_id) {
-                       size += sysfs_emit_at(buf, size, "%s:\n", "OD_MCLK");
-                       size += sysfs_emit_at(buf, size, "1: %10uMhz\n",
+                       size += sprintf(buf + size, "%s:\n", "OD_MCLK");
+                       size += sprintf(buf + size, "1: %10uMhz\n",
                                od_table->UclkFmax);
                }
 
@@ -3507,14 +3505,14 @@ static int vega20_print_clock_levels(struct pp_hwmgr *hwmgr,
                    od8_settings[OD8_SETTING_GFXCLK_VOLTAGE1].feature_id &&
                    od8_settings[OD8_SETTING_GFXCLK_VOLTAGE2].feature_id &&
                    od8_settings[OD8_SETTING_GFXCLK_VOLTAGE3].feature_id) {
-                       size += sysfs_emit_at(buf, size, "%s:\n", "OD_VDDC_CURVE");
-                       size += sysfs_emit_at(buf, size, "0: %10uMhz %10dmV\n",
+                       size += sprintf(buf + size, "%s:\n", "OD_VDDC_CURVE");
+                       size += sprintf(buf + size, "0: %10uMhz %10dmV\n",
                                od_table->GfxclkFreq1,
                                od_table->GfxclkVolt1 / VOLTAGE_SCALE);
-                       size += sysfs_emit_at(buf, size, "1: %10uMhz %10dmV\n",
+                       size += sprintf(buf + size, "1: %10uMhz %10dmV\n",
                                od_table->GfxclkFreq2,
                                od_table->GfxclkVolt2 / VOLTAGE_SCALE);
-                       size += sysfs_emit_at(buf, size, "2: %10uMhz %10dmV\n",
+                       size += sprintf(buf + size, "2: %10uMhz %10dmV\n",
                                od_table->GfxclkFreq3,
                                od_table->GfxclkVolt3 / VOLTAGE_SCALE);
                }
@@ -3522,17 +3520,17 @@ static int vega20_print_clock_levels(struct pp_hwmgr *hwmgr,
                break;
 
        case OD_RANGE:
-               size += sysfs_emit_at(buf, size, "%s:\n", "OD_RANGE");
+               size += sprintf(buf + size, "%s:\n", "OD_RANGE");
 
                if (od8_settings[OD8_SETTING_GFXCLK_FMIN].feature_id &&
                    od8_settings[OD8_SETTING_GFXCLK_FMAX].feature_id) {
-                       size += sysfs_emit_at(buf, size, "SCLK: %7uMhz %10uMhz\n",
+                       size += sprintf(buf + size, "SCLK: %7uMhz %10uMhz\n",
                                od8_settings[OD8_SETTING_GFXCLK_FMIN].min_value,
                                od8_settings[OD8_SETTING_GFXCLK_FMAX].max_value);
                }
 
                if (od8_settings[OD8_SETTING_UCLK_FMAX].feature_id) {
-                       size += sysfs_emit_at(buf, size, "MCLK: %7uMhz %10uMhz\n",
+                       size += sprintf(buf + size, "MCLK: %7uMhz %10uMhz\n",
                                od8_settings[OD8_SETTING_UCLK_FMAX].min_value,
                                od8_settings[OD8_SETTING_UCLK_FMAX].max_value);
                }
@@ -3543,22 +3541,22 @@ static int vega20_print_clock_levels(struct pp_hwmgr *hwmgr,
                    od8_settings[OD8_SETTING_GFXCLK_VOLTAGE1].feature_id &&
                    od8_settings[OD8_SETTING_GFXCLK_VOLTAGE2].feature_id &&
                    od8_settings[OD8_SETTING_GFXCLK_VOLTAGE3].feature_id) {
-                       size += sysfs_emit_at(buf, size, "VDDC_CURVE_SCLK[0]: %7uMhz %10uMhz\n",
+                       size += sprintf(buf + size, "VDDC_CURVE_SCLK[0]: %7uMhz %10uMhz\n",
                                od8_settings[OD8_SETTING_GFXCLK_FREQ1].min_value,
                                od8_settings[OD8_SETTING_GFXCLK_FREQ1].max_value);
-                       size += sysfs_emit_at(buf, size, "VDDC_CURVE_VOLT[0]: %7dmV %11dmV\n",
+                       size += sprintf(buf + size, "VDDC_CURVE_VOLT[0]: %7dmV %11dmV\n",
                                od8_settings[OD8_SETTING_GFXCLK_VOLTAGE1].min_value,
                                od8_settings[OD8_SETTING_GFXCLK_VOLTAGE1].max_value);
-                       size += sysfs_emit_at(buf, size, "VDDC_CURVE_SCLK[1]: %7uMhz %10uMhz\n",
+                       size += sprintf(buf + size, "VDDC_CURVE_SCLK[1]: %7uMhz %10uMhz\n",
                                od8_settings[OD8_SETTING_GFXCLK_FREQ2].min_value,
                                od8_settings[OD8_SETTING_GFXCLK_FREQ2].max_value);
-                       size += sysfs_emit_at(buf, size, "VDDC_CURVE_VOLT[1]: %7dmV %11dmV\n",
+                       size += sprintf(buf + size, "VDDC_CURVE_VOLT[1]: %7dmV %11dmV\n",
                                od8_settings[OD8_SETTING_GFXCLK_VOLTAGE2].min_value,
                                od8_settings[OD8_SETTING_GFXCLK_VOLTAGE2].max_value);
-                       size += sysfs_emit_at(buf, size, "VDDC_CURVE_SCLK[2]: %7uMhz %10uMhz\n",
+                       size += sprintf(buf + size, "VDDC_CURVE_SCLK[2]: %7uMhz %10uMhz\n",
                                od8_settings[OD8_SETTING_GFXCLK_FREQ3].min_value,
                                od8_settings[OD8_SETTING_GFXCLK_FREQ3].max_value);
-                       size += sysfs_emit_at(buf, size, "VDDC_CURVE_VOLT[2]: %7dmV %11dmV\n",
+                       size += sprintf(buf + size, "VDDC_CURVE_VOLT[2]: %7dmV %11dmV\n",
                                od8_settings[OD8_SETTING_GFXCLK_VOLTAGE3].min_value,
                                od8_settings[OD8_SETTING_GFXCLK_VOLTAGE3].max_value);
                }
index 01168b8955bff3ce80b1c7a6e6df04b050a9bcab..8a3244585d809372e2b179ae1d906a2a55005740 100644 (file)
@@ -1468,7 +1468,7 @@ static int smu_disable_dpms(struct smu_context *smu)
                        dev_err(adev->dev, "Failed to disable smu features.\n");
        }
 
-       if (adev->ip_versions[GC_HWIP][0] >= IP_VERSION(10, 0, 0) &&
+       if (adev->ip_versions[GC_HWIP][0] >= IP_VERSION(9, 4, 2) &&
            adev->gfx.rlc.funcs->stop)
                adev->gfx.rlc.funcs->stop(adev);
 
index cbc3f99e857348018eb71d992130e3daa58ab1eb..2238ee19c2226a5fa80cbf8ea7ba769132094d23 100644 (file)
@@ -309,6 +309,7 @@ static int cyan_skillfish_print_clk_levels(struct smu_context *smu,
 {
        int ret = 0, size = 0;
        uint32_t cur_value = 0;
+       int i;
 
        smu_cmn_get_sysfs_buf(&buf, &size);
 
@@ -334,8 +335,6 @@ static int cyan_skillfish_print_clk_levels(struct smu_context *smu,
                size += sysfs_emit_at(buf, size, "VDDC: %7umV  %10umV\n",
                                                CYAN_SKILLFISH_VDDC_MIN, CYAN_SKILLFISH_VDDC_MAX);
                break;
-       case SMU_GFXCLK:
-       case SMU_SCLK:
        case SMU_FCLK:
        case SMU_MCLK:
        case SMU_SOCCLK:
@@ -346,6 +345,25 @@ static int cyan_skillfish_print_clk_levels(struct smu_context *smu,
                        return ret;
                size += sysfs_emit_at(buf, size, "0: %uMhz *\n", cur_value);
                break;
+       case SMU_SCLK:
+       case SMU_GFXCLK:
+               ret = cyan_skillfish_get_current_clk_freq(smu, clk_type, &cur_value);
+               if (ret)
+                       return ret;
+               if (cur_value  == CYAN_SKILLFISH_SCLK_MAX)
+                       i = 2;
+               else if (cur_value == CYAN_SKILLFISH_SCLK_MIN)
+                       i = 0;
+               else
+                       i = 1;
+               size += sysfs_emit_at(buf, size, "0: %uMhz %s\n", CYAN_SKILLFISH_SCLK_MIN,
+                               i == 0 ? "*" : "");
+               size += sysfs_emit_at(buf, size, "1: %uMhz %s\n",
+                               i == 1 ? cur_value : cyan_skillfish_sclk_default,
+                               i == 1 ? "*" : "");
+               size += sysfs_emit_at(buf, size, "2: %uMhz %s\n", CYAN_SKILLFISH_SCLK_MAX,
+                               i == 2 ? "*" : "");
+               break;
        default:
                dev_warn(smu->adev->dev, "Unsupported clock type\n");
                return ret;
index 71161f6b78fea9f42df5c57744031b9e5d569a9a..60a557068ea4d6c9ef37577ec23400d19767403f 100644 (file)
@@ -1265,7 +1265,7 @@ static int navi10_print_clk_levels(struct smu_context *smu,
                        enum smu_clk_type clk_type, char *buf)
 {
        uint16_t *curve_settings;
-       int i, size = 0, ret = 0;
+       int i, levels, size = 0, ret = 0;
        uint32_t cur_value = 0, value = 0, count = 0;
        uint32_t freq_values[3] = {0};
        uint32_t mark_index = 0;
@@ -1319,14 +1319,17 @@ static int navi10_print_clk_levels(struct smu_context *smu,
                        freq_values[1] = cur_value;
                        mark_index = cur_value == freq_values[0] ? 0 :
                                     cur_value == freq_values[2] ? 2 : 1;
-                       if (mark_index != 1)
-                               freq_values[1] = (freq_values[0] + freq_values[2]) / 2;
 
-                       for (i = 0; i < 3; i++) {
+                       levels = 3;
+                       if (mark_index != 1) {
+                               levels = 2;
+                               freq_values[1] = freq_values[2];
+                       }
+
+                       for (i = 0; i < levels; i++) {
                                size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n", i, freq_values[i],
                                                i == mark_index ? "*" : "");
                        }
-
                }
                break;
        case SMU_PCIE:
index 421f38e8dada08204b74d0fd6ac6202178c9ddba..c02ed65ffa38bad67764496659b362e14799119b 100644 (file)
@@ -683,6 +683,7 @@ static int vangogh_print_clk_levels(struct smu_context *smu,
        int i, size = 0, ret = 0;
        uint32_t cur_value = 0, value = 0, count = 0;
        bool cur_value_match_level = false;
+       uint32_t min, max;
 
        memset(&metrics, 0, sizeof(metrics));
 
@@ -743,6 +744,13 @@ static int vangogh_print_clk_levels(struct smu_context *smu,
                if (ret)
                        return ret;
                break;
+       case SMU_GFXCLK:
+       case SMU_SCLK:
+               ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_GetGfxclkFrequency, 0, &cur_value);
+               if (ret) {
+                       return ret;
+               }
+               break;
        default:
                break;
        }
@@ -768,6 +776,24 @@ static int vangogh_print_clk_levels(struct smu_context *smu,
                if (!cur_value_match_level)
                        size += sysfs_emit_at(buf, size, "   %uMhz *\n", cur_value);
                break;
+       case SMU_GFXCLK:
+       case SMU_SCLK:
+               min = (smu->gfx_actual_hard_min_freq > 0) ? smu->gfx_actual_hard_min_freq : smu->gfx_default_hard_min_freq;
+               max = (smu->gfx_actual_soft_max_freq > 0) ? smu->gfx_actual_soft_max_freq : smu->gfx_default_soft_max_freq;
+               if (cur_value  == max)
+                       i = 2;
+               else if (cur_value == min)
+                       i = 0;
+               else
+                       i = 1;
+               size += sysfs_emit_at(buf, size, "0: %uMhz %s\n", min,
+                               i == 0 ? "*" : "");
+               size += sysfs_emit_at(buf, size, "1: %uMhz %s\n",
+                               i == 1 ? cur_value : VANGOGH_UMD_PSTATE_STANDARD_GFXCLK,
+                               i == 1 ? "*" : "");
+               size += sysfs_emit_at(buf, size, "2: %uMhz %s\n", max,
+                               i == 2 ? "*" : "");
+               break;
        default:
                break;
        }
index 8215bbf5ed7c2a64e481a6aba69c7beea1f88032..caf1775d48ef6ab670fb6291fe3dfc87a884be39 100644 (file)
@@ -697,6 +697,11 @@ static int yellow_carp_get_current_clk_freq(struct smu_context *smu,
        case SMU_FCLK:
                return smu_cmn_send_smc_msg_with_param(smu,
                                SMU_MSG_GetFclkFrequency, 0, value);
+       case SMU_GFXCLK:
+       case SMU_SCLK:
+               return smu_cmn_send_smc_msg_with_param(smu,
+                               SMU_MSG_GetGfxclkFrequency, 0, value);
+               break;
        default:
                return -EINVAL;
        }
@@ -967,6 +972,7 @@ static int yellow_carp_print_clk_levels(struct smu_context *smu,
 {
        int i, size = 0, ret = 0;
        uint32_t cur_value = 0, value = 0, count = 0;
+       uint32_t min, max;
 
        smu_cmn_get_sysfs_buf(&buf, &size);
 
@@ -1005,6 +1011,27 @@ static int yellow_carp_print_clk_levels(struct smu_context *smu,
                                        cur_value == value ? "*" : "");
                }
                break;
+       case SMU_GFXCLK:
+       case SMU_SCLK:
+               ret = yellow_carp_get_current_clk_freq(smu, clk_type, &cur_value);
+               if (ret)
+                       goto print_clk_out;
+               min = (smu->gfx_actual_hard_min_freq > 0) ? smu->gfx_actual_hard_min_freq : smu->gfx_default_hard_min_freq;
+               max = (smu->gfx_actual_soft_max_freq > 0) ? smu->gfx_actual_soft_max_freq : smu->gfx_default_soft_max_freq;
+               if (cur_value  == max)
+                       i = 2;
+               else if (cur_value == min)
+                       i = 0;
+               else
+                       i = 1;
+               size += sysfs_emit_at(buf, size, "0: %uMhz %s\n", min,
+                               i == 0 ? "*" : "");
+               size += sysfs_emit_at(buf, size, "1: %uMhz %s\n",
+                               i == 1 ? cur_value : YELLOW_CARP_UMD_PSTATE_GFXCLK,
+                               i == 1 ? "*" : "");
+               size += sysfs_emit_at(buf, size, "2: %uMhz %s\n", max,
+                               i == 2 ? "*" : "");
+               break;
        default:
                break;
        }
index b3ad8352c68ae1445b34cff2ee91b9f55489b47d..a9205a8ea3ad2cc7410d14a5a58b291ff7c2f62c 100644 (file)
@@ -24,5 +24,6 @@
 #define __YELLOW_CARP_PPT_H__
 
 extern void yellow_carp_set_ppt_funcs(struct smu_context *smu);
+#define YELLOW_CARP_UMD_PSTATE_GFXCLK       1100
 
 #endif
index 843d2cbfc71d4caea9570d6408ffd76bf9828bd9..ea6f50c08c5f3b838ed40669998af18a47ea4c37 100644 (file)
@@ -139,9 +139,13 @@ static void __smu_cmn_reg_print_error(struct smu_context *smu,
        const char *message = smu_get_message_name(smu, msg);
 
        switch (reg_c2pmsg_90) {
-       case SMU_RESP_NONE:
+       case SMU_RESP_NONE: {
+               u32 msg_idx = RREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_66);
+               u32 prm     = RREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_82);
                dev_err_ratelimited(adev->dev,
-                                   "SMU: I'm not done with your previous command!");
+                                   "SMU: I'm not done with your previous command: SMN_C2PMSG_66:0x%08X SMN_C2PMSG_82:0x%08X",
+                                   msg_idx, prm);
+       }
                break;
        case SMU_RESP_OK:
                /* The SMU executed the command. It completed with a
index b53fee6f1c170a83a0a95c9bc9f9b500ca269363..65f172807a0d5767fd628e48a552ec04734d808a 100644 (file)
@@ -291,7 +291,7 @@ vga_pw_show(struct device *dev, struct device_attribute *attr, char *buf)
        if (rc)
                return rc;
 
-       return sprintf(buf, "%u\n", reg & 1);
+       return sprintf(buf, "%u\n", reg);
 }
 static DEVICE_ATTR_RO(vga_pw);
 
index d53388199f34c5602c08e7b07dd844b95b72bf75..9d05674550a4f9d568773bbfd8218648753bc883 100644 (file)
@@ -210,8 +210,13 @@ void drm_gem_cma_free_object(struct drm_gem_object *gem_obj)
                        dma_buf_vunmap(gem_obj->import_attach->dmabuf, &map);
                drm_prime_gem_destroy(gem_obj, cma_obj->sgt);
        } else if (cma_obj->vaddr) {
-               dma_free_wc(gem_obj->dev->dev, cma_obj->base.size,
-                           cma_obj->vaddr, cma_obj->paddr);
+               if (cma_obj->map_noncoherent)
+                       dma_free_noncoherent(gem_obj->dev->dev, cma_obj->base.size,
+                                            cma_obj->vaddr, cma_obj->paddr,
+                                            DMA_TO_DEVICE);
+               else
+                       dma_free_wc(gem_obj->dev->dev, cma_obj->base.size,
+                                   cma_obj->vaddr, cma_obj->paddr);
        }
 
        drm_gem_object_release(gem_obj);
index 7b9f69f21f1eda7491f230a3be176aacce3d34e9..bca0de92802efa29892a631026589043dbf88ff1 100644 (file)
@@ -9,6 +9,7 @@
 #include <linux/shmem_fs.h>
 #include <linux/slab.h>
 #include <linux/vmalloc.h>
+#include <linux/module.h>
 
 #ifdef CONFIG_X86
 #include <asm/set_memory.h>
index c9a9d74f338c1ca676bc7aba73e2f5f52dbfe8c2..c313a5b4549c4ea00801918a0f0e08be392c5ad8 100644 (file)
@@ -404,8 +404,17 @@ int drm_syncobj_find_fence(struct drm_file *file_private,
 
        if (*fence) {
                ret = dma_fence_chain_find_seqno(fence, point);
-               if (!ret)
+               if (!ret) {
+                       /* If the requested seqno is already signaled
+                        * drm_syncobj_find_fence may return a NULL
+                        * fence. To make sure the recipient gets
+                        * signalled, use a new fence instead.
+                        */
+                       if (!*fence)
+                               *fence = dma_fence_get_stub();
+
                        goto out;
+               }
                dma_fence_put(*fence);
        } else {
                ret = -EINVAL;
index cd818a6291835d93412337e01a9d31b2e5caf7f0..00e53de4812bb5f7cedfe3459eca711383896f31 100644 (file)
@@ -225,12 +225,29 @@ static int hyperv_vmbus_remove(struct hv_device *hdev)
 {
        struct drm_device *dev = hv_get_drvdata(hdev);
        struct hyperv_drm_device *hv = to_hv(dev);
+       struct pci_dev *pdev;
 
        drm_dev_unplug(dev);
        drm_atomic_helper_shutdown(dev);
        vmbus_close(hdev->channel);
        hv_set_drvdata(hdev, NULL);
-       vmbus_free_mmio(hv->mem->start, hv->fb_size);
+
+       /*
+        * Free allocated MMIO memory only on Gen2 VMs.
+        * On Gen1 VMs, release the PCI device
+        */
+       if (efi_enabled(EFI_BOOT)) {
+               vmbus_free_mmio(hv->mem->start, hv->fb_size);
+       } else {
+               pdev = pci_get_device(PCI_VENDOR_ID_MICROSOFT,
+                                     PCI_DEVICE_ID_HYPERV_VIDEO, NULL);
+               if (!pdev) {
+                       drm_err(dev, "Unable to find PCI Hyper-V video\n");
+                       return -ENODEV;
+               }
+               pci_release_region(pdev, 0);
+               pci_dev_put(pdev);
+       }
 
        return 0;
 }
index 168c84a74d30bda3f182832239f6526e2e73b5da..71fbdcddd31f6ba2d7fe702454a3a27270891b5d 100644 (file)
@@ -696,10 +696,7 @@ static void gen11_dsi_map_pll(struct intel_encoder *encoder,
        intel_de_write(dev_priv, ICL_DPCLKA_CFGCR0, val);
 
        for_each_dsi_phy(phy, intel_dsi->phys) {
-               if (DISPLAY_VER(dev_priv) >= 12)
-                       val |= ICL_DPCLKA_CFGCR0_DDI_CLK_OFF(phy);
-               else
-                       val &= ~ICL_DPCLKA_CFGCR0_DDI_CLK_OFF(phy);
+               val &= ~ICL_DPCLKA_CFGCR0_DDI_CLK_OFF(phy);
        }
        intel_de_write(dev_priv, ICL_DPCLKA_CFGCR0, val);
 
@@ -1135,8 +1132,6 @@ static void
 gen11_dsi_enable_port_and_phy(struct intel_encoder *encoder,
                              const struct intel_crtc_state *crtc_state)
 {
-       struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
-
        /* step 4a: power up all lanes of the DDI used by DSI */
        gen11_dsi_power_up_lanes(encoder);
 
@@ -1162,8 +1157,7 @@ gen11_dsi_enable_port_and_phy(struct intel_encoder *encoder,
        gen11_dsi_configure_transcoder(encoder, crtc_state);
 
        /* Step 4l: Gate DDI clocks */
-       if (DISPLAY_VER(dev_priv) == 11)
-               gen11_dsi_gate_clocks(encoder);
+       gen11_dsi_gate_clocks(encoder);
 }
 
 static void gen11_dsi_powerup_panel(struct intel_encoder *encoder)
@@ -1271,7 +1265,8 @@ static void adlp_set_lp_hs_wakeup_gb(struct intel_encoder *encoder)
        if (DISPLAY_VER(i915) == 13) {
                for_each_dsi_port(port, intel_dsi->ports)
                        intel_de_rmw(i915, TGL_DSI_CHKN_REG(port),
-                                    TGL_DSI_CHKN_LSHS_GB, 0x4);
+                                    TGL_DSI_CHKN_LSHS_GB_MASK,
+                                    TGL_DSI_CHKN_LSHS_GB(4));
        }
 }
 
index 39e11eaec1a3f1cccade492af13a57abcea826a6..aa7238245b0ea108a0d4bacf357b189af672e161 100644 (file)
@@ -1640,6 +1640,9 @@ struct intel_dp {
        struct intel_dp_pcon_frl frl;
 
        struct intel_psr psr;
+
+       /* When we last wrote the OUI for eDP */
+       unsigned long last_oui_write;
 };
 
 enum lspcon_vendor {
index be883469d2fcc30299a211dafddc2f5c6c489c84..a552f05a67e58b157de60bc79110e92a2d393bf1 100644 (file)
@@ -29,6 +29,7 @@
 #include <linux/i2c.h>
 #include <linux/notifier.h>
 #include <linux/slab.h>
+#include <linux/timekeeping.h>
 #include <linux/types.h>
 
 #include <asm/byteorder.h>
@@ -1955,6 +1956,16 @@ intel_edp_init_source_oui(struct intel_dp *intel_dp, bool careful)
 
        if (drm_dp_dpcd_write(&intel_dp->aux, DP_SOURCE_OUI, oui, sizeof(oui)) < 0)
                drm_err(&i915->drm, "Failed to write source OUI\n");
+
+       intel_dp->last_oui_write = jiffies;
+}
+
+void intel_dp_wait_source_oui(struct intel_dp *intel_dp)
+{
+       struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+
+       drm_dbg_kms(&i915->drm, "Performing OUI wait\n");
+       wait_remaining_ms_from_jiffies(intel_dp->last_oui_write, 30);
 }
 
 /* If the device supports it, try to set the power state appropriately */
index ce229026dc91dccd795292de944b98ab3ea55ccc..b64145a3869a9eb5d14edbc20721415f8eb32638 100644 (file)
@@ -119,4 +119,6 @@ void intel_dp_pcon_dsc_configure(struct intel_dp *intel_dp,
                                 const struct intel_crtc_state *crtc_state);
 void intel_dp_phy_test(struct intel_encoder *encoder);
 
+void intel_dp_wait_source_oui(struct intel_dp *intel_dp);
+
 #endif /* __INTEL_DP_H__ */
index 569d17b4d00f0bd4136604cd3dd7439c8d99137e..3897468140e02885ee12d6e98749984f1d0c1891 100644 (file)
@@ -36,6 +36,7 @@
 
 #include "intel_backlight.h"
 #include "intel_display_types.h"
+#include "intel_dp.h"
 #include "intel_dp_aux_backlight.h"
 
 /* TODO:
@@ -106,6 +107,8 @@ intel_dp_aux_supports_hdr_backlight(struct intel_connector *connector)
        int ret;
        u8 tcon_cap[4];
 
+       intel_dp_wait_source_oui(intel_dp);
+
        ret = drm_dp_dpcd_read(aux, INTEL_EDP_HDR_TCON_CAP0, tcon_cap, sizeof(tcon_cap));
        if (ret != sizeof(tcon_cap))
                return false;
@@ -204,6 +207,8 @@ intel_dp_aux_hdr_enable_backlight(const struct intel_crtc_state *crtc_state,
        int ret;
        u8 old_ctrl, ctrl;
 
+       intel_dp_wait_source_oui(intel_dp);
+
        ret = drm_dp_dpcd_readb(&intel_dp->aux, INTEL_EDP_HDR_GETSET_CTRL_PARAMS, &old_ctrl);
        if (ret != 1) {
                drm_err(&i915->drm, "Failed to read current backlight control mode: %d\n", ret);
@@ -293,6 +298,13 @@ intel_dp_aux_vesa_enable_backlight(const struct intel_crtc_state *crtc_state,
        struct intel_panel *panel = &connector->panel;
        struct intel_dp *intel_dp = enc_to_intel_dp(connector->encoder);
 
+       if (!panel->backlight.edp.vesa.info.aux_enable) {
+               u32 pwm_level = intel_backlight_invert_pwm_level(connector,
+                                                                panel->backlight.pwm_level_max);
+
+               panel->backlight.pwm_funcs->enable(crtc_state, conn_state, pwm_level);
+       }
+
        drm_edp_backlight_enable(&intel_dp->aux, &panel->backlight.edp.vesa.info, level);
 }
 
@@ -304,6 +316,10 @@ static void intel_dp_aux_vesa_disable_backlight(const struct drm_connector_state
        struct intel_dp *intel_dp = enc_to_intel_dp(connector->encoder);
 
        drm_edp_backlight_disable(&intel_dp->aux, &panel->backlight.edp.vesa.info);
+
+       if (!panel->backlight.edp.vesa.info.aux_enable)
+               panel->backlight.pwm_funcs->disable(old_conn_state,
+                                                   intel_backlight_invert_pwm_level(connector, 0));
 }
 
 static int intel_dp_aux_vesa_setup_backlight(struct intel_connector *connector, enum pipe pipe)
@@ -321,6 +337,15 @@ static int intel_dp_aux_vesa_setup_backlight(struct intel_connector *connector,
        if (ret < 0)
                return ret;
 
+       if (!panel->backlight.edp.vesa.info.aux_enable) {
+               ret = panel->backlight.pwm_funcs->setup(connector, pipe);
+               if (ret < 0) {
+                       drm_err(&i915->drm,
+                               "Failed to setup PWM backlight controls for eDP backlight: %d\n",
+                               ret);
+                       return ret;
+               }
+       }
        panel->backlight.max = panel->backlight.edp.vesa.info.max;
        panel->backlight.min = 0;
        if (current_mode == DP_EDP_BACKLIGHT_CONTROL_MODE_DPCD) {
@@ -340,12 +365,7 @@ intel_dp_aux_supports_vesa_backlight(struct intel_connector *connector)
        struct intel_dp *intel_dp = intel_attached_dp(connector);
        struct drm_i915_private *i915 = dp_to_i915(intel_dp);
 
-       /* TODO: We currently only support AUX only backlight configurations, not backlights which
-        * require a mix of PWM and AUX controls to work. In the mean time, these machines typically
-        * work just fine using normal PWM controls anyway.
-        */
-       if ((intel_dp->edp_dpcd[1] & DP_EDP_BACKLIGHT_AUX_ENABLE_CAP) &&
-           drm_edp_backlight_supported(intel_dp->edp_dpcd)) {
+       if (drm_edp_backlight_supported(intel_dp->edp_dpcd)) {
                drm_dbg_kms(&i915->drm, "AUX Backlight Control Supported!\n");
                return true;
        }
index 4d7da07442f2a274620bf2b7bd4915e524918f25..9b24d9b5ade1f750d8e87c72e7f592c610f531c6 100644 (file)
@@ -3277,6 +3277,7 @@ i915_gem_do_execbuffer(struct drm_device *dev,
        out_fence = eb_requests_create(&eb, in_fence, out_fence_fd);
        if (IS_ERR(out_fence)) {
                err = PTR_ERR(out_fence);
+               out_fence = NULL;
                if (eb.requests[0])
                        goto err_request;
                else
index 524eaf67879066b16583d004e2b02fa7cac2f83c..795689eb3fc7bd05e64c102c0b91acb31b2bc520 100644 (file)
@@ -301,7 +301,7 @@ void intel_gt_suspend_prepare(struct intel_gt *gt)
        user_forcewake(gt, true);
        wait_for_suspend(gt);
 
-       intel_pxp_suspend(&gt->pxp, false);
+       intel_pxp_suspend_prepare(&gt->pxp);
 }
 
 static suspend_state_t pm_suspend_target(void)
@@ -326,6 +326,7 @@ void intel_gt_suspend_late(struct intel_gt *gt)
        GEM_BUG_ON(gt->awake);
 
        intel_uc_suspend(&gt->uc);
+       intel_pxp_suspend(&gt->pxp);
 
        /*
         * On disabling the device, we want to turn off HW access to memory
@@ -353,7 +354,7 @@ void intel_gt_suspend_late(struct intel_gt *gt)
 
 void intel_gt_runtime_suspend(struct intel_gt *gt)
 {
-       intel_pxp_suspend(&gt->pxp, true);
+       intel_pxp_runtime_suspend(&gt->pxp);
        intel_uc_runtime_suspend(&gt->uc);
 
        GT_TRACE(gt, "\n");
@@ -371,7 +372,7 @@ int intel_gt_runtime_resume(struct intel_gt *gt)
        if (ret)
                return ret;
 
-       intel_pxp_resume(&gt->pxp);
+       intel_pxp_runtime_resume(&gt->pxp);
 
        return 0;
 }
index 67d14afa66237a26f553c8d3265074fe63387812..b67f620c3d93cd1b6e85dacee3216a869f3e4ed1 100644 (file)
@@ -6,6 +6,7 @@
 #include <linux/slab.h> /* fault-inject.h is not standalone! */
 
 #include <linux/fault-inject.h>
+#include <linux/sched/mm.h>
 
 #include "gem/i915_gem_lmem.h"
 #include "i915_trace.h"
index e1f36253088918c9bedc0805e3bb9a7f80235c7b..2400d6423ba5eda77c4cada5be72fd1204b7bc90 100644 (file)
@@ -621,13 +621,6 @@ static void gen12_ctx_workarounds_init(struct intel_engine_cs *engine,
               FF_MODE2_GS_TIMER_MASK,
               FF_MODE2_GS_TIMER_224,
               0, false);
-
-       /*
-        * Wa_14012131227:dg1
-        * Wa_1508744258:tgl,rkl,dg1,adl-s,adl-p
-        */
-       wa_masked_en(wal, GEN7_COMMON_SLICE_CHICKEN1,
-                    GEN9_RHWO_OPTIMIZATION_DISABLE);
 }
 
 static void dg1_ctx_workarounds_init(struct intel_engine_cs *engine,
@@ -1134,6 +1127,15 @@ icl_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
                    GAMT_CHKN_BIT_REG,
                    GAMT_CHKN_DISABLE_L3_COH_PIPE);
 
+       /* Wa_1407352427:icl,ehl */
+       wa_write_or(wal, UNSLICE_UNIT_LEVEL_CLKGATE2,
+                   PSDUNIT_CLKGATE_DIS);
+
+       /* Wa_1406680159:icl,ehl */
+       wa_write_or(wal,
+                   SUBSLICE_UNIT_LEVEL_CLKGATE,
+                   GWUNIT_CLKGATE_DIS);
+
        /* Wa_1607087056:icl,ehl,jsl */
        if (IS_ICELAKE(i915) ||
            IS_JSL_EHL_GT_STEP(i915, STEP_A0, STEP_B0))
@@ -1859,15 +1861,6 @@ rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
                wa_write_or(wal, UNSLICE_UNIT_LEVEL_CLKGATE,
                            VSUNIT_CLKGATE_DIS | HSUNIT_CLKGATE_DIS);
 
-               /* Wa_1407352427:icl,ehl */
-               wa_write_or(wal, UNSLICE_UNIT_LEVEL_CLKGATE2,
-                           PSDUNIT_CLKGATE_DIS);
-
-               /* Wa_1406680159:icl,ehl */
-               wa_write_or(wal,
-                           SUBSLICE_UNIT_LEVEL_CLKGATE,
-                           GWUNIT_CLKGATE_DIS);
-
                /*
                 * Wa_1408767742:icl[a2..forever],ehl[all]
                 * Wa_1605460711:icl[a0..c0]
index 38b47e73e35dbd013aacd8cc9f1cfe659e14203a..c48557dfa04c4a73ef2dbdb51134722fe0b40142 100644 (file)
@@ -3080,8 +3080,8 @@ guc_create_parallel(struct intel_engine_cs **engines,
 
                ce = intel_engine_create_virtual(siblings, num_siblings,
                                                 FORCE_VIRTUAL);
-               if (!ce) {
-                       err = ERR_PTR(-ENOMEM);
+               if (IS_ERR(ce)) {
+                       err = ERR_CAST(ce);
                        goto unwind;
                }
 
index da9055c3ebf0f90cfeaea3d5edfbac059a1532f1..bcee121bec5adfece6a07b6f99a0a12394fab0ef 100644 (file)
@@ -11717,7 +11717,9 @@ enum skl_power_gate {
 #define TGL_DSI_CHKN_REG(port)         _MMIO_PORT(port,        \
                                                    _TGL_DSI_CHKN_REG_0, \
                                                    _TGL_DSI_CHKN_REG_1)
-#define TGL_DSI_CHKN_LSHS_GB                   REG_GENMASK(15, 12)
+#define TGL_DSI_CHKN_LSHS_GB_MASK              REG_GENMASK(15, 12)
+#define TGL_DSI_CHKN_LSHS_GB(byte_clocks)      REG_FIELD_PREP(TGL_DSI_CHKN_LSHS_GB_MASK, \
+                                                              (byte_clocks))
 
 /* Display Stream Splitter Control */
 #define DSS_CTL1                               _MMIO(0x67400)
index 820a1f38b271e095ffbe17bf1294582110c1312b..89cccefeea635344e7ae045a87d3d7f03efbd9d2 100644 (file)
@@ -29,6 +29,7 @@
 #include <linux/sched.h>
 #include <linux/sched/clock.h>
 #include <linux/sched/signal.h>
+#include <linux/sched/mm.h>
 
 #include "gem/i915_gem_context.h"
 #include "gt/intel_breadcrumbs.h"
index 23fd86de5a240f4315ea716bb03a79812480f1fa..6a7d4e2ee138dd9a2d53ad493c9891217cb5ca5a 100644 (file)
@@ -7,26 +7,29 @@
 #include "intel_pxp_irq.h"
 #include "intel_pxp_pm.h"
 #include "intel_pxp_session.h"
+#include "i915_drv.h"
 
-void intel_pxp_suspend(struct intel_pxp *pxp, bool runtime)
+void intel_pxp_suspend_prepare(struct intel_pxp *pxp)
 {
        if (!intel_pxp_is_enabled(pxp))
                return;
 
        pxp->arb_is_valid = false;
 
-       /*
-        * Contexts using protected objects keep a runtime PM reference, so we
-        * can only runtime suspend when all of them have been either closed
-        * or banned. Therefore, there is no need to invalidate in that
-        * scenario.
-        */
-       if (!runtime)
-               intel_pxp_invalidate(pxp);
+       intel_pxp_invalidate(pxp);
+}
 
-       intel_pxp_fini_hw(pxp);
+void intel_pxp_suspend(struct intel_pxp *pxp)
+{
+       intel_wakeref_t wakeref;
 
-       pxp->hw_state_invalidated = false;
+       if (!intel_pxp_is_enabled(pxp))
+               return;
+
+       with_intel_runtime_pm(&pxp_to_gt(pxp)->i915->runtime_pm, wakeref) {
+               intel_pxp_fini_hw(pxp);
+               pxp->hw_state_invalidated = false;
+       }
 }
 
 void intel_pxp_resume(struct intel_pxp *pxp)
@@ -44,3 +47,15 @@ void intel_pxp_resume(struct intel_pxp *pxp)
 
        intel_pxp_init_hw(pxp);
 }
+
+void intel_pxp_runtime_suspend(struct intel_pxp *pxp)
+{
+       if (!intel_pxp_is_enabled(pxp))
+               return;
+
+       pxp->arb_is_valid = false;
+
+       intel_pxp_fini_hw(pxp);
+
+       pxp->hw_state_invalidated = false;
+}
index c89e97a0c3d00acb310916de803250f9699014a2..16990a3f2f8518a927746013ce9678a545444767 100644 (file)
@@ -9,16 +9,29 @@
 #include "intel_pxp_types.h"
 
 #ifdef CONFIG_DRM_I915_PXP
-void intel_pxp_suspend(struct intel_pxp *pxp, bool runtime);
+void intel_pxp_suspend_prepare(struct intel_pxp *pxp);
+void intel_pxp_suspend(struct intel_pxp *pxp);
 void intel_pxp_resume(struct intel_pxp *pxp);
+void intel_pxp_runtime_suspend(struct intel_pxp *pxp);
 #else
-static inline void intel_pxp_suspend(struct intel_pxp *pxp, bool runtime)
+static inline void intel_pxp_suspend_prepare(struct intel_pxp *pxp)
+{
+}
+
+static inline void intel_pxp_suspend(struct intel_pxp *pxp)
 {
 }
 
 static inline void intel_pxp_resume(struct intel_pxp *pxp)
 {
 }
-#endif
 
+static inline void intel_pxp_runtime_suspend(struct intel_pxp *pxp)
+{
+}
+#endif
+static inline void intel_pxp_runtime_resume(struct intel_pxp *pxp)
+{
+       intel_pxp_resume(pxp);
+}
 #endif /* __INTEL_PXP_PM_H__ */
index 65fdca366e41f00d940f13a0865234fc47df5595..f74f8048af8f2c5551ae384871a9418d1347929b 100644 (file)
@@ -4,6 +4,7 @@
 #include <linux/regulator/consumer.h>
 #include <linux/reset.h>
 #include <linux/clk.h>
+#include <linux/slab.h>
 #include <linux/dma-mapping.h>
 #include <linux/platform_device.h>
 
index ae11061727ff80d7141609909552bee59f444f1d..39197b4beea78b8ed7b93c5a1a98c4d524376cd6 100644 (file)
@@ -4,8 +4,8 @@ config DRM_MSM
        tristate "MSM DRM"
        depends on DRM
        depends on ARCH_QCOM || SOC_IMX5 || COMPILE_TEST
+       depends on COMMON_CLK
        depends on IOMMU_SUPPORT
-       depends on (OF && COMMON_CLK) || COMPILE_TEST
        depends on QCOM_OCMEM || QCOM_OCMEM=n
        depends on QCOM_LLCC || QCOM_LLCC=n
        depends on QCOM_COMMAND_DB || QCOM_COMMAND_DB=n
index 40577f8856d8f10cf1b1c45420df80d34321ef34..093454457545f14424408c8ea7393e332b9f1c4d 100644 (file)
@@ -23,8 +23,10 @@ msm-y := \
        hdmi/hdmi_i2c.o \
        hdmi/hdmi_phy.o \
        hdmi/hdmi_phy_8960.o \
+       hdmi/hdmi_phy_8996.o \
        hdmi/hdmi_phy_8x60.o \
        hdmi/hdmi_phy_8x74.o \
+       hdmi/hdmi_pll_8960.o \
        edp/edp.o \
        edp/edp_aux.o \
        edp/edp_bridge.o \
@@ -37,6 +39,7 @@ msm-y := \
        disp/mdp4/mdp4_dtv_encoder.o \
        disp/mdp4/mdp4_lcdc_encoder.o \
        disp/mdp4/mdp4_lvds_connector.o \
+       disp/mdp4/mdp4_lvds_pll.o \
        disp/mdp4/mdp4_irq.o \
        disp/mdp4/mdp4_kms.o \
        disp/mdp4/mdp4_plane.o \
@@ -116,9 +119,6 @@ msm-$(CONFIG_DRM_MSM_DP)+= dp/dp_aux.o \
        dp/dp_audio.o
 
 msm-$(CONFIG_DRM_FBDEV_EMULATION) += msm_fbdev.o
-msm-$(CONFIG_COMMON_CLK) += disp/mdp4/mdp4_lvds_pll.o
-msm-$(CONFIG_COMMON_CLK) += hdmi/hdmi_pll_8960.o
-msm-$(CONFIG_COMMON_CLK) += hdmi/hdmi_phy_8996.o
 
 msm-$(CONFIG_DRM_MSM_HDMI_HDCP) += hdmi/hdmi_hdcp.o
 
index 267a880811d654c78ba89de035fd660026afe898..78aad5216a613041fa210acb021208c42df918b9 100644 (file)
@@ -1424,17 +1424,24 @@ static void a6xx_llc_activate(struct a6xx_gpu *a6xx_gpu)
 {
        struct adreno_gpu *adreno_gpu = &a6xx_gpu->base;
        struct msm_gpu *gpu = &adreno_gpu->base;
-       u32 gpu_scid, cntl1_regval = 0;
+       u32 cntl1_regval = 0;
 
        if (IS_ERR(a6xx_gpu->llc_mmio))
                return;
 
        if (!llcc_slice_activate(a6xx_gpu->llc_slice)) {
-               gpu_scid = llcc_get_slice_id(a6xx_gpu->llc_slice);
+               u32 gpu_scid = llcc_get_slice_id(a6xx_gpu->llc_slice);
 
                gpu_scid &= 0x1f;
                cntl1_regval = (gpu_scid << 0) | (gpu_scid << 5) | (gpu_scid << 10) |
                               (gpu_scid << 15) | (gpu_scid << 20);
+
+               /* On A660, the SCID programming for UCHE traffic is done in
+                * A6XX_GBIF_SCACHE_CNTL0[14:10]
+                */
+               if (adreno_is_a660_family(adreno_gpu))
+                       gpu_rmw(gpu, REG_A6XX_GBIF_SCACHE_CNTL0, (0x1f << 10) |
+                               (1 << 8), (gpu_scid << 10) | (1 << 8));
        }
 
        /*
@@ -1471,13 +1478,6 @@ static void a6xx_llc_activate(struct a6xx_gpu *a6xx_gpu)
        }
 
        gpu_rmw(gpu, REG_A6XX_GBIF_SCACHE_CNTL1, GENMASK(24, 0), cntl1_regval);
-
-       /* On A660, the SCID programming for UCHE traffic is done in
-        * A6XX_GBIF_SCACHE_CNTL0[14:10]
-        */
-       if (adreno_is_a660_family(adreno_gpu))
-               gpu_rmw(gpu, REG_A6XX_GBIF_SCACHE_CNTL0, (0x1f << 10) |
-                       (1 << 8), (gpu_scid << 10) | (1 << 8));
 }
 
 static void a6xx_llc_slices_destroy(struct a6xx_gpu *a6xx_gpu)
@@ -1640,7 +1640,7 @@ static unsigned long a6xx_gpu_busy(struct msm_gpu *gpu)
        return (unsigned long)busy_time;
 }
 
-void a6xx_gpu_set_freq(struct msm_gpu *gpu, struct dev_pm_opp *opp)
+static void a6xx_gpu_set_freq(struct msm_gpu *gpu, struct dev_pm_opp *opp)
 {
        struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
        struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
index 7501849ed15d93c4871348421dd6c7f3c88254e7..6e90209cd543bf2819f5c8bfff8e7de26e8f66b4 100644 (file)
@@ -777,12 +777,12 @@ static void a6xx_get_gmu_registers(struct msm_gpu *gpu,
        struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
 
        a6xx_state->gmu_registers = state_kcalloc(a6xx_state,
-               2, sizeof(*a6xx_state->gmu_registers));
+               3, sizeof(*a6xx_state->gmu_registers));
 
        if (!a6xx_state->gmu_registers)
                return;
 
-       a6xx_state->nr_gmu_registers = 2;
+       a6xx_state->nr_gmu_registers = 3;
 
        /* Get the CX GMU registers from AHB */
        _a6xx_get_gmu_registers(gpu, a6xx_state, &a6xx_gmu_reglist[0],
index eb40d8413bca937e8c87ccf457e9cf88723801f7..6d36f63c333881c01f2ecc0cfc1a504537dbc4f7 100644 (file)
@@ -33,6 +33,7 @@ struct dp_aux_private {
        bool read;
        bool no_send_addr;
        bool no_send_stop;
+       bool initted;
        u32 offset;
        u32 segment;
 
@@ -331,6 +332,10 @@ static ssize_t dp_aux_transfer(struct drm_dp_aux *dp_aux,
        }
 
        mutex_lock(&aux->mutex);
+       if (!aux->initted) {
+               ret = -EIO;
+               goto exit;
+       }
 
        dp_aux_update_offset_and_segment(aux, msg);
        dp_aux_transfer_helper(aux, msg, true);
@@ -380,6 +385,8 @@ static ssize_t dp_aux_transfer(struct drm_dp_aux *dp_aux,
        }
 
        aux->cmd_busy = false;
+
+exit:
        mutex_unlock(&aux->mutex);
 
        return ret;
@@ -431,8 +438,13 @@ void dp_aux_init(struct drm_dp_aux *dp_aux)
 
        aux = container_of(dp_aux, struct dp_aux_private, dp_aux);
 
+       mutex_lock(&aux->mutex);
+
        dp_catalog_aux_enable(aux->catalog, true);
        aux->retry_cnt = 0;
+       aux->initted = true;
+
+       mutex_unlock(&aux->mutex);
 }
 
 void dp_aux_deinit(struct drm_dp_aux *dp_aux)
@@ -441,7 +453,12 @@ void dp_aux_deinit(struct drm_dp_aux *dp_aux)
 
        aux = container_of(dp_aux, struct dp_aux_private, dp_aux);
 
+       mutex_lock(&aux->mutex);
+
+       aux->initted = false;
        dp_catalog_aux_enable(aux->catalog, false);
+
+       mutex_unlock(&aux->mutex);
 }
 
 int dp_aux_register(struct drm_dp_aux *dp_aux)
index f69a125f955958ae41d89cf9214c1cb963df30d6..0afc3b756f92ddd62f8207ad8dfc6b211b715d0e 100644 (file)
@@ -1658,6 +1658,8 @@ static int dsi_host_parse_lane_data(struct msm_dsi_host *msm_host,
        if (!prop) {
                DRM_DEV_DEBUG(dev,
                        "failed to find data lane mapping, using default\n");
+               /* Set the number of date lanes to 4 by default. */
+               msm_host->num_data_lanes = 4;
                return 0;
        }
 
index 09d2d279c30ae7bc92ae35c9921a9425ea51c693..dee13fedee3b5cc925d4e7352c0906ebd8123521 100644 (file)
@@ -77,6 +77,7 @@ static int msm_gpu_open(struct inode *inode, struct file *file)
                goto free_priv;
 
        pm_runtime_get_sync(&gpu->pdev->dev);
+       msm_gpu_hw_init(gpu);
        show_priv->state = gpu->funcs->gpu_state_get(gpu);
        pm_runtime_put_sync(&gpu->pdev->dev);
 
index 7936e8d498dda30e900d5a76c8ab57f11ad94994..892c04365239bb4397a3f5aa8b5debdf9a1aee56 100644 (file)
@@ -967,29 +967,18 @@ static int msm_ioctl_gem_info(struct drm_device *dev, void *data,
        return ret;
 }
 
-static int msm_ioctl_wait_fence(struct drm_device *dev, void *data,
-               struct drm_file *file)
+static int wait_fence(struct msm_gpu_submitqueue *queue, uint32_t fence_id,
+                     ktime_t timeout)
 {
-       struct msm_drm_private *priv = dev->dev_private;
-       struct drm_msm_wait_fence *args = data;
-       ktime_t timeout = to_ktime(args->timeout);
-       struct msm_gpu_submitqueue *queue;
-       struct msm_gpu *gpu = priv->gpu;
        struct dma_fence *fence;
        int ret;
 
-       if (args->pad) {
-               DRM_ERROR("invalid pad: %08x\n", args->pad);
+       if (fence_id > queue->last_fence) {
+               DRM_ERROR_RATELIMITED("waiting on invalid fence: %u (of %u)\n",
+                                     fence_id, queue->last_fence);
                return -EINVAL;
        }
 
-       if (!gpu)
-               return 0;
-
-       queue = msm_submitqueue_get(file->driver_priv, args->queueid);
-       if (!queue)
-               return -ENOENT;
-
        /*
         * Map submitqueue scoped "seqno" (which is actually an idr key)
         * back to underlying dma-fence
@@ -1001,7 +990,7 @@ static int msm_ioctl_wait_fence(struct drm_device *dev, void *data,
        ret = mutex_lock_interruptible(&queue->lock);
        if (ret)
                return ret;
-       fence = idr_find(&queue->fence_idr, args->fence);
+       fence = idr_find(&queue->fence_idr, fence_id);
        if (fence)
                fence = dma_fence_get_rcu(fence);
        mutex_unlock(&queue->lock);
@@ -1017,6 +1006,32 @@ static int msm_ioctl_wait_fence(struct drm_device *dev, void *data,
        }
 
        dma_fence_put(fence);
+
+       return ret;
+}
+
+static int msm_ioctl_wait_fence(struct drm_device *dev, void *data,
+               struct drm_file *file)
+{
+       struct msm_drm_private *priv = dev->dev_private;
+       struct drm_msm_wait_fence *args = data;
+       struct msm_gpu_submitqueue *queue;
+       int ret;
+
+       if (args->pad) {
+               DRM_ERROR("invalid pad: %08x\n", args->pad);
+               return -EINVAL;
+       }
+
+       if (!priv->gpu)
+               return 0;
+
+       queue = msm_submitqueue_get(file->driver_priv, args->queueid);
+       if (!queue)
+               return -ENOENT;
+
+       ret = wait_fence(queue, args->fence, to_ktime(args->timeout));
+
        msm_submitqueue_put(queue);
 
        return ret;
index 104fdfc140278863c62a1dc1225c253d863e727a..512d55eecbaf15e225e3ba5f84f4107ae34962bb 100644 (file)
@@ -1056,8 +1056,7 @@ static int msm_gem_object_mmap(struct drm_gem_object *obj, struct vm_area_struct
 {
        struct msm_gem_object *msm_obj = to_msm_bo(obj);
 
-       vma->vm_flags &= ~VM_PFNMAP;
-       vma->vm_flags |= VM_MIXEDMAP | VM_DONTEXPAND;
+       vma->vm_flags |= VM_IO | VM_MIXEDMAP | VM_DONTEXPAND | VM_DONTDUMP;
        vma->vm_page_prot = msm_gem_pgprot(msm_obj, vm_get_page_prot(vma->vm_flags));
 
        return 0;
@@ -1121,7 +1120,7 @@ static int msm_gem_new_impl(struct drm_device *dev,
                        break;
                fallthrough;
        default:
-               DRM_DEV_ERROR(dev->dev, "invalid cache flag: %x\n",
+               DRM_DEV_DEBUG(dev->dev, "invalid cache flag: %x\n",
                                (flags & MSM_BO_CACHE_MASK));
                return -EINVAL;
        }
index 4a1420b05e978fb2bcd618e0bde05171b080655d..086dacf2f26a74120f015a4d5f1ed93b9e0fdc1b 100644 (file)
@@ -5,6 +5,7 @@
  */
 
 #include <linux/vmalloc.h>
+#include <linux/sched/mm.h>
 
 #include "msm_drv.h"
 #include "msm_gem.h"
index 3cb029f1092555bfa4aa3ef1b0bd99d897dad23c..282628d6b72c09f8ed5bbf141e0330e417d463f4 100644 (file)
@@ -772,6 +772,7 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
                args->nr_cmds);
        if (IS_ERR(submit)) {
                ret = PTR_ERR(submit);
+               submit = NULL;
                goto out_unlock;
        }
 
@@ -904,6 +905,7 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
        drm_sched_entity_push_job(&submit->base);
 
        args->fence = submit->fence_id;
+       queue->last_fence = submit->fence_id;
 
        msm_reset_syncobjs(syncobjs_to_reset, args->nr_in_syncobjs);
        msm_process_post_deps(post_deps, args->nr_out_syncobjs,
index 59cdd00b69d0401e2720b2e37b8ff8d546c0c7ac..48ea2de911f1357bf69c7169dc0fce8f522e4c56 100644 (file)
@@ -359,6 +359,8 @@ static inline int msm_gpu_convert_priority(struct msm_gpu *gpu, int prio,
  * @ring_nr:   the ringbuffer used by this submitqueue, which is determined
  *             by the submitqueue's priority
  * @faults:    the number of GPU hangs associated with this submitqueue
+ * @last_fence: the sequence number of the last allocated fence (for error
+ *             checking)
  * @ctx:       the per-drm_file context associated with the submitqueue (ie.
  *             which set of pgtables do submits jobs associated with the
  *             submitqueue use)
@@ -374,6 +376,7 @@ struct msm_gpu_submitqueue {
        u32 flags;
        u32 ring_nr;
        int faults;
+       uint32_t last_fence;
        struct msm_file_private *ctx;
        struct list_head node;
        struct idr fence_idr;
index 8b7473f69cb8fe6bf86a86a097dfd0396e5b4207..384e90c4b2a7999bb1310e91d0253afd3f8558be 100644 (file)
@@ -20,6 +20,10 @@ static int msm_devfreq_target(struct device *dev, unsigned long *freq,
        struct msm_gpu *gpu = dev_to_gpu(dev);
        struct dev_pm_opp *opp;
 
+       /*
+        * Note that devfreq_recommended_opp() can modify the freq
+        * to something that actually is in the opp table:
+        */
        opp = devfreq_recommended_opp(dev, freq, flags);
 
        /*
@@ -28,6 +32,7 @@ static int msm_devfreq_target(struct device *dev, unsigned long *freq,
         */
        if (gpu->devfreq.idle_freq) {
                gpu->devfreq.idle_freq = *freq;
+               dev_pm_opp_put(opp);
                return 0;
        }
 
@@ -203,9 +208,6 @@ static void msm_devfreq_idle_work(struct kthread_work *work)
        struct msm_gpu *gpu = container_of(df, struct msm_gpu, devfreq);
        unsigned long idle_freq, target_freq = 0;
 
-       if (!df->devfreq)
-               return;
-
        /*
         * Hold devfreq lock to synchronize with get_dev_status()/
         * target() callbacks
@@ -227,6 +229,9 @@ void msm_devfreq_idle(struct msm_gpu *gpu)
 {
        struct msm_gpu_devfreq *df = &gpu->devfreq;
 
+       if (!df->devfreq)
+               return;
+
        msm_hrtimer_queue_work(&df->idle_work, ms_to_ktime(1),
-                              HRTIMER_MODE_ABS);
+                              HRTIMER_MODE_REL);
 }
index b51d690f375ff4721ababf34a2fc53c27b2976f8..88d262ba648cf901652f899c5b47f77c9c8ab6c9 100644 (file)
@@ -2626,6 +2626,27 @@ nv174_chipset = {
        .fifo     = { 0x00000001, ga102_fifo_new },
 };
 
+static const struct nvkm_device_chip
+nv176_chipset = {
+       .name = "GA106",
+       .bar      = { 0x00000001, tu102_bar_new },
+       .bios     = { 0x00000001, nvkm_bios_new },
+       .devinit  = { 0x00000001, ga100_devinit_new },
+       .fb       = { 0x00000001, ga102_fb_new },
+       .gpio     = { 0x00000001, ga102_gpio_new },
+       .i2c      = { 0x00000001, gm200_i2c_new },
+       .imem     = { 0x00000001, nv50_instmem_new },
+       .mc       = { 0x00000001, ga100_mc_new },
+       .mmu      = { 0x00000001, tu102_mmu_new },
+       .pci      = { 0x00000001, gp100_pci_new },
+       .privring = { 0x00000001, gm200_privring_new },
+       .timer    = { 0x00000001, gk20a_timer_new },
+       .top      = { 0x00000001, ga100_top_new },
+       .disp     = { 0x00000001, ga102_disp_new },
+       .dma      = { 0x00000001, gv100_dma_new },
+       .fifo     = { 0x00000001, ga102_fifo_new },
+};
+
 static const struct nvkm_device_chip
 nv177_chipset = {
        .name = "GA107",
@@ -3072,6 +3093,7 @@ nvkm_device_ctor(const struct nvkm_device_func *func,
                case 0x168: device->chip = &nv168_chipset; break;
                case 0x172: device->chip = &nv172_chipset; break;
                case 0x174: device->chip = &nv174_chipset; break;
+               case 0x176: device->chip = &nv176_chipset; break;
                case 0x177: device->chip = &nv177_chipset; break;
                default:
                        if (nvkm_boolopt(device->cfgopt, "NvEnableUnsupportedChipsets", false)) {
index 6e3c450eaacef779b2a8cae771da37457c79e465..3ff49344abc771a61993691149fd91c292fafb80 100644 (file)
@@ -62,7 +62,6 @@ gv100_hdmi_ctrl(struct nvkm_ior *ior, int head, bool enable, u8 max_ac_packet,
                nvkm_wr32(device, 0x6f0108 + hdmi, vendor_infoframe.header);
                nvkm_wr32(device, 0x6f010c + hdmi, vendor_infoframe.subpack0_low);
                nvkm_wr32(device, 0x6f0110 + hdmi, vendor_infoframe.subpack0_high);
-               nvkm_wr32(device, 0x6f0110 + hdmi, 0x00000000);
                nvkm_wr32(device, 0x6f0114 + hdmi, 0x00000000);
                nvkm_wr32(device, 0x6f0118 + hdmi, 0x00000000);
                nvkm_wr32(device, 0x6f011c + hdmi, 0x00000000);
index cdb1ead26d84f0efb0076ef775dfb77b6b757675..82b4c8e1457c28314c80aacbb6e50e1c0c3a73c0 100644 (file)
@@ -207,11 +207,13 @@ int
 gm200_acr_wpr_parse(struct nvkm_acr *acr)
 {
        const struct wpr_header *hdr = (void *)acr->wpr_fw->data;
+       struct nvkm_acr_lsfw *lsfw;
 
        while (hdr->falcon_id != WPR_HEADER_V0_FALCON_ID_INVALID) {
                wpr_header_dump(&acr->subdev, hdr);
-               if (!nvkm_acr_lsfw_add(NULL, acr, NULL, (hdr++)->falcon_id))
-                       return -ENOMEM;
+               lsfw = nvkm_acr_lsfw_add(NULL, acr, NULL, (hdr++)->falcon_id);
+               if (IS_ERR(lsfw))
+                       return PTR_ERR(lsfw);
        }
 
        return 0;
index fb9132a39bb1a5be122cf23fcb074a901ee9a435..fd97a935a380e8a0ba2a95bd51823f96d5a8f4b1 100644 (file)
@@ -161,11 +161,13 @@ int
 gp102_acr_wpr_parse(struct nvkm_acr *acr)
 {
        const struct wpr_header_v1 *hdr = (void *)acr->wpr_fw->data;
+       struct nvkm_acr_lsfw *lsfw;
 
        while (hdr->falcon_id != WPR_HEADER_V1_FALCON_ID_INVALID) {
                wpr_header_v1_dump(&acr->subdev, hdr);
-               if (!nvkm_acr_lsfw_add(NULL, acr, NULL, (hdr++)->falcon_id))
-                       return -ENOMEM;
+               lsfw = nvkm_acr_lsfw_add(NULL, acr, NULL, (hdr++)->falcon_id);
+               if (IS_ERR(lsfw))
+                       return PTR_ERR(lsfw);
        }
 
        return 0;
index 5bc5f775abe1ef10a4ab467072f3a4ba13d81aab..f91fb31ab7a7c77e90631216d3b10de5cd92e5d8 100644 (file)
@@ -704,9 +704,13 @@ int drm_sched_job_add_implicit_dependencies(struct drm_sched_job *job,
        int ret;
 
        dma_resv_for_each_fence(&cursor, obj->resv, write, fence) {
+               /* Make sure to grab an additional ref on the added fence */
+               dma_fence_get(fence);
                ret = drm_sched_job_add_dependency(job, fence);
-               if (ret)
+               if (ret) {
+                       dma_fence_put(fence);
                        return ret;
+               }
        }
        return 0;
 }
index 5755f0432e77435bca7942430c05ee2dc9e84d31..8c796de53222c950999b897635cc36e6f44f9408 100644 (file)
@@ -46,6 +46,7 @@ config DRM_SUN6I_DSI
        default MACH_SUN8I
        select CRC_CCITT
        select DRM_MIPI_DSI
+       select RESET_CONTROLLER
        select PHY_SUN6I_MIPI_DPHY
        help
          Choose this option if you want have an Allwinner SoC with
index 739f11c0109cbea1972cbe92f9bfcb93f2271e0c..047adc42d9a0dc6afd807b9ddfa79575ab937a78 100644 (file)
@@ -1103,7 +1103,7 @@ int ttm_bo_swapout(struct ttm_buffer_object *bo, struct ttm_operation_ctx *ctx,
         * as an indication that we're about to swap out.
         */
        memset(&place, 0, sizeof(place));
-       place.mem_type = TTM_PL_SYSTEM;
+       place.mem_type = bo->resource->mem_type;
        if (!ttm_bo_evict_swapout_allowable(bo, ctx, &place, &locked, NULL))
                return -EBUSY;
 
@@ -1135,6 +1135,7 @@ int ttm_bo_swapout(struct ttm_buffer_object *bo, struct ttm_operation_ctx *ctx,
                struct ttm_place hop;
 
                memset(&hop, 0, sizeof(hop));
+               place.mem_type = TTM_PL_SYSTEM;
                ret = ttm_resource_alloc(bo, &place, &evict_mem);
                if (unlikely(ret))
                        goto out;
index 7e83c00a3f48926b56519902599ed966e4c379d4..79c870a3bef8d5399d000aca435b32d55ebd56bc 100644 (file)
@@ -34,6 +34,7 @@
 #include <linux/sched.h>
 #include <linux/shmem_fs.h>
 #include <linux/file.h>
+#include <linux/module.h>
 #include <drm/drm_cache.h>
 #include <drm/ttm/ttm_bo_driver.h>
 
index fddaeb0b09c11764cb46b23ffff6ec345f505cd7..f642bd6e71ff49b7aea487099329b7caf6455c7e 100644 (file)
@@ -391,7 +391,7 @@ struct drm_gem_object *vc4_create_object(struct drm_device *dev, size_t size)
 
        bo = kzalloc(sizeof(*bo), GFP_KERNEL);
        if (!bo)
-               return ERR_PTR(-ENOMEM);
+               return NULL;
 
        bo->madv = VC4_MADV_WILLNEED;
        refcount_set(&bo->usecnt, 0);
index f0b3e4cf5bceb6c33f41663cbc10bd904d18e630..b61792d2aa65740db39b49f957dc41eda99c5824 100644 (file)
@@ -337,10 +337,10 @@ static void vc4_atomic_commit_tail(struct drm_atomic_state *state)
        struct drm_device *dev = state->dev;
        struct vc4_dev *vc4 = to_vc4_dev(dev);
        struct vc4_hvs *hvs = vc4->hvs;
-       struct drm_crtc_state *old_crtc_state;
        struct drm_crtc_state *new_crtc_state;
        struct drm_crtc *crtc;
        struct vc4_hvs_state *old_hvs_state;
+       unsigned int channel;
        int i;
 
        for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
@@ -353,30 +353,32 @@ static void vc4_atomic_commit_tail(struct drm_atomic_state *state)
                vc4_hvs_mask_underrun(dev, vc4_crtc_state->assigned_channel);
        }
 
-       if (vc4->hvs->hvs5)
-               clk_set_min_rate(hvs->core_clk, 500000000);
-
        old_hvs_state = vc4_hvs_get_old_global_state(state);
-       if (!old_hvs_state)
+       if (IS_ERR(old_hvs_state))
                return;
 
-       for_each_old_crtc_in_state(state, crtc, old_crtc_state, i) {
-               struct vc4_crtc_state *vc4_crtc_state =
-                       to_vc4_crtc_state(old_crtc_state);
-               unsigned int channel = vc4_crtc_state->assigned_channel;
+       for (channel = 0; channel < HVS_NUM_CHANNELS; channel++) {
+               struct drm_crtc_commit *commit;
                int ret;
 
-               if (channel == VC4_HVS_CHANNEL_DISABLED)
+               if (!old_hvs_state->fifo_state[channel].in_use)
                        continue;
 
-               if (!old_hvs_state->fifo_state[channel].in_use)
+               commit = old_hvs_state->fifo_state[channel].pending_commit;
+               if (!commit)
                        continue;
 
-               ret = drm_crtc_commit_wait(old_hvs_state->fifo_state[channel].pending_commit);
+               ret = drm_crtc_commit_wait(commit);
                if (ret)
                        drm_err(dev, "Timed out waiting for commit\n");
+
+               drm_crtc_commit_put(commit);
+               old_hvs_state->fifo_state[channel].pending_commit = NULL;
        }
 
+       if (vc4->hvs->hvs5)
+               clk_set_min_rate(hvs->core_clk, 500000000);
+
        drm_atomic_helper_commit_modeset_disables(dev, state);
 
        vc4_ctm_commit(vc4, state);
@@ -410,8 +412,8 @@ static int vc4_atomic_commit_setup(struct drm_atomic_state *state)
        unsigned int i;
 
        hvs_state = vc4_hvs_get_new_global_state(state);
-       if (!hvs_state)
-               return -EINVAL;
+       if (WARN_ON(IS_ERR(hvs_state)))
+               return PTR_ERR(hvs_state);
 
        for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
                struct vc4_crtc_state *vc4_crtc_state =
@@ -668,12 +670,6 @@ vc4_hvs_channels_duplicate_state(struct drm_private_obj *obj)
 
        for (i = 0; i < HVS_NUM_CHANNELS; i++) {
                state->fifo_state[i].in_use = old_state->fifo_state[i].in_use;
-
-               if (!old_state->fifo_state[i].pending_commit)
-                       continue;
-
-               state->fifo_state[i].pending_commit =
-                       drm_crtc_commit_get(old_state->fifo_state[i].pending_commit);
        }
 
        return &state->base;
@@ -762,8 +758,8 @@ static int vc4_pv_muxing_atomic_check(struct drm_device *dev,
        unsigned int i;
 
        hvs_new_state = vc4_hvs_get_global_state(state);
-       if (!hvs_new_state)
-               return -EINVAL;
+       if (IS_ERR(hvs_new_state))
+               return PTR_ERR(hvs_new_state);
 
        for (i = 0; i < ARRAY_SIZE(hvs_new_state->fifo_state); i++)
                if (!hvs_new_state->fifo_state[i].in_use)
index d86e1ad4a97260b82895d98bbb2feef93ef2ca7f..5072dbb0669a333fb4b722fa6b26167e20843c65 100644 (file)
@@ -157,36 +157,6 @@ static void virtio_gpu_config_changed(struct virtio_device *vdev)
        schedule_work(&vgdev->config_changed_work);
 }
 
-static __poll_t virtio_gpu_poll(struct file *filp,
-                               struct poll_table_struct *wait)
-{
-       struct drm_file *drm_file = filp->private_data;
-       struct virtio_gpu_fpriv *vfpriv = drm_file->driver_priv;
-       struct drm_device *dev = drm_file->minor->dev;
-       struct virtio_gpu_device *vgdev = dev->dev_private;
-       struct drm_pending_event *e = NULL;
-       __poll_t mask = 0;
-
-       if (!vgdev->has_virgl_3d || !vfpriv || !vfpriv->ring_idx_mask)
-               return drm_poll(filp, wait);
-
-       poll_wait(filp, &drm_file->event_wait, wait);
-
-       if (!list_empty(&drm_file->event_list)) {
-               spin_lock_irq(&dev->event_lock);
-               e = list_first_entry(&drm_file->event_list,
-                                    struct drm_pending_event, link);
-               drm_file->event_space += e->event->length;
-               list_del(&e->link);
-               spin_unlock_irq(&dev->event_lock);
-
-               kfree(e);
-               mask |= EPOLLIN | EPOLLRDNORM;
-       }
-
-       return mask;
-}
-
 static struct virtio_device_id id_table[] = {
        { VIRTIO_ID_GPU, VIRTIO_DEV_ANY_ID },
        { 0 },
@@ -226,17 +196,7 @@ MODULE_AUTHOR("Dave Airlie <airlied@redhat.com>");
 MODULE_AUTHOR("Gerd Hoffmann <kraxel@redhat.com>");
 MODULE_AUTHOR("Alon Levy");
 
-static const struct file_operations virtio_gpu_driver_fops = {
-       .owner          = THIS_MODULE,
-       .open           = drm_open,
-       .release        = drm_release,
-       .unlocked_ioctl = drm_ioctl,
-       .compat_ioctl   = drm_compat_ioctl,
-       .poll           = virtio_gpu_poll,
-       .read           = drm_read,
-       .llseek         = noop_llseek,
-       .mmap           = drm_gem_mmap
-};
+DEFINE_DRM_GEM_FOPS(virtio_gpu_driver_fops);
 
 static const struct drm_driver driver = {
        .driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_RENDER | DRIVER_ATOMIC,
index e0265fe74aa565f639127042f7071faf80c5b50c..0a194aaad4192b7b5dff1b8e2762d0461fee9631 100644 (file)
@@ -138,7 +138,6 @@ struct virtio_gpu_fence_driver {
        spinlock_t       lock;
 };
 
-#define VIRTGPU_EVENT_FENCE_SIGNALED_INTERNAL 0x10000000
 struct virtio_gpu_fence_event {
        struct drm_pending_event base;
        struct drm_event event;
index 5618a1d5879c56382ed23d6aa046f58a88524c25..3607646d322954c9d07a3245e052cf7857fae8c5 100644 (file)
@@ -54,7 +54,7 @@ static int virtio_gpu_fence_event_create(struct drm_device *dev,
        if (!e)
                return -ENOMEM;
 
-       e->event.type = VIRTGPU_EVENT_FENCE_SIGNALED_INTERNAL;
+       e->event.type = VIRTGPU_EVENT_FENCE_SIGNALED;
        e->event.length = sizeof(e->event);
 
        ret = drm_event_reserve_init(dev, file, &e->base, &e->event);
index 9f14d99c763c2b359b47b78dc9c76787b5ba97ab..bc7605324db39535938cc44ac1e15503d516f60c 100644 (file)
@@ -773,6 +773,7 @@ static struct xenbus_driver xen_driver = {
        .probe = xen_drv_probe,
        .remove = xen_drv_remove,
        .otherend_changed = displback_changed,
+       .not_essential = true,
 };
 
 static int __init xen_drv_init(void)
index 9f5435b55949b3786b3be4fc76fd5444f7428658..a7c78ac96270d2eb15e585bbd6d31087aee0fec9 100644 (file)
@@ -207,14 +207,14 @@ config HID_CHERRY
 
 config HID_CHICONY
        tristate "Chicony devices"
-       depends on HID
+       depends on USB_HID
        default !EXPERT
        help
        Support for Chicony Tactical pad and special keys on Chicony keyboards.
 
 config HID_CORSAIR
        tristate "Corsair devices"
-       depends on HID && USB && LEDS_CLASS
+       depends on USB_HID && LEDS_CLASS
        help
        Support for Corsair devices that are not fully compliant with the
        HID standard.
@@ -245,7 +245,7 @@ config HID_MACALLY
 
 config HID_PRODIKEYS
        tristate "Prodikeys PC-MIDI Keyboard support"
-       depends on HID && SND
+       depends on USB_HID && SND
        select SND_RAWMIDI
        help
        Support for Prodikeys PC-MIDI Keyboard device support.
@@ -560,7 +560,7 @@ config HID_LENOVO
 
 config HID_LOGITECH
        tristate "Logitech devices"
-       depends on HID
+       depends on USB_HID
        depends on LEDS_CLASS
        default !EXPERT
        help
@@ -951,7 +951,7 @@ config HID_SAITEK
 
 config HID_SAMSUNG
        tristate "Samsung InfraRed remote control or keyboards"
-       depends on HID
+       depends on USB_HID
        help
        Support for Samsung InfraRed remote control or keyboards.
 
index 5d57214d8dee4aedea057128a7c5fdf5c2df64d7..08c9a9a60ae47a54045aa834aeb92b431205550b 100644 (file)
@@ -854,7 +854,7 @@ static int asus_input_mapping(struct hid_device *hdev,
                switch (usage->hid & HID_USAGE) {
                case 0x10: asus_map_key_clear(KEY_BRIGHTNESSDOWN);      break;
                case 0x20: asus_map_key_clear(KEY_BRIGHTNESSUP);                break;
-               case 0x35: asus_map_key_clear(KEY_SCREENLOCK);          break;
+               case 0x35: asus_map_key_clear(KEY_DISPLAY_OFF);         break;
                case 0x6c: asus_map_key_clear(KEY_SLEEP);               break;
                case 0x7c: asus_map_key_clear(KEY_MICMUTE);             break;
                case 0x82: asus_map_key_clear(KEY_CAMERA);              break;
@@ -1028,8 +1028,7 @@ static int asus_probe(struct hid_device *hdev, const struct hid_device_id *id)
        if (drvdata->quirks & QUIRK_IS_MULTITOUCH)
                drvdata->tp = &asus_i2c_tp;
 
-       if ((drvdata->quirks & QUIRK_T100_KEYBOARD) &&
-           hid_is_using_ll_driver(hdev, &usb_hid_driver)) {
+       if ((drvdata->quirks & QUIRK_T100_KEYBOARD) && hid_is_usb(hdev)) {
                struct usb_interface *intf = to_usb_interface(hdev->dev.parent);
 
                if (intf->altsetting->desc.bInterfaceNumber == T100_TPAD_INTF) {
@@ -1057,8 +1056,7 @@ static int asus_probe(struct hid_device *hdev, const struct hid_device_id *id)
                drvdata->tp = &asus_t100chi_tp;
        }
 
-       if ((drvdata->quirks & QUIRK_MEDION_E1239T) &&
-           hid_is_using_ll_driver(hdev, &usb_hid_driver)) {
+       if ((drvdata->quirks & QUIRK_MEDION_E1239T) && hid_is_usb(hdev)) {
                struct usb_host_interface *alt =
                        to_usb_interface(hdev->dev.parent)->altsetting;
 
index db6da21ade06315c457d087447cecf563eac8c55..74ad8bf98bfd5acea3d24ecff58300bdab434a26 100644 (file)
@@ -191,7 +191,7 @@ static void bigben_worker(struct work_struct *work)
                struct bigben_device, worker);
        struct hid_field *report_field = bigben->report->field[0];
 
-       if (bigben->removed)
+       if (bigben->removed || !report_field)
                return;
 
        if (bigben->work_led) {
index ca556d39da2aedb666733642194e2dced5e8a40a..f04d2aa23efe4647332f452f1f901a4a9cce5d93 100644 (file)
@@ -114,6 +114,9 @@ static int ch_probe(struct hid_device *hdev, const struct hid_device_id *id)
 {
        int ret;
 
+       if (!hid_is_usb(hdev))
+               return -EINVAL;
+
        hdev->quirks |= HID_QUIRK_INPUT_PER_APP;
        ret = hid_parse(hdev);
        if (ret) {
index 902a60e249ed268489b22809d6c1340d638ed954..8c895c820b672772340ec17d34dc60e198eb56a5 100644 (file)
@@ -553,7 +553,12 @@ static int corsair_probe(struct hid_device *dev, const struct hid_device_id *id)
        int ret;
        unsigned long quirks = id->driver_data;
        struct corsair_drvdata *drvdata;
-       struct usb_interface *usbif = to_usb_interface(dev->dev.parent);
+       struct usb_interface *usbif;
+
+       if (!hid_is_usb(dev))
+               return -EINVAL;
+
+       usbif = to_usb_interface(dev->dev.parent);
 
        drvdata = devm_kzalloc(&dev->dev, sizeof(struct corsair_drvdata),
                               GFP_KERNEL);
index 021049805bb71f50be232eb2160d32100b2a2b03..3091355d48df64a28ae1bc704eae4185274975f5 100644 (file)
@@ -50,7 +50,7 @@ struct elan_drvdata {
 
 static int is_not_elan_touchpad(struct hid_device *hdev)
 {
-       if (hdev->bus == BUS_USB) {
+       if (hid_is_usb(hdev)) {
                struct usb_interface *intf = to_usb_interface(hdev->dev.parent);
 
                return (intf->altsetting->desc.bInterfaceNumber !=
index 383dfda8c12fcede9f98bce23a21f55dfab22118..8e960d7b233b3aad3eca844166c7299cb918cd12 100644 (file)
@@ -230,6 +230,9 @@ static int elo_probe(struct hid_device *hdev, const struct hid_device_id *id)
        int ret;
        struct usb_device *udev;
 
+       if (!hid_is_usb(hdev))
+               return -EINVAL;
+
        priv = kzalloc(sizeof(*priv), GFP_KERNEL);
        if (!priv)
                return -ENOMEM;
index 4ef1c3b8094ea036ce382232c255e00d7d849be2..79505c64dbfe72ebb00c518fde12ec0337d0d39c 100644 (file)
@@ -915,6 +915,9 @@ static int ft260_probe(struct hid_device *hdev, const struct hid_device_id *id)
        struct ft260_get_chip_version_report version;
        int ret;
 
+       if (!hid_is_usb(hdev))
+               return -EINVAL;
+
        dev = devm_kzalloc(&hdev->dev, sizeof(*dev), GFP_KERNEL);
        if (!dev)
                return -ENOMEM;
@@ -966,24 +969,23 @@ static int ft260_probe(struct hid_device *hdev, const struct hid_device_id *id)
        mutex_init(&dev->lock);
        init_completion(&dev->wait);
 
+       ret = ft260_xfer_status(dev);
+       if (ret)
+               ft260_i2c_reset(hdev);
+
+       i2c_set_adapdata(&dev->adap, dev);
        ret = i2c_add_adapter(&dev->adap);
        if (ret) {
                hid_err(hdev, "failed to add i2c adapter\n");
                goto err_hid_close;
        }
 
-       i2c_set_adapdata(&dev->adap, dev);
-
        ret = sysfs_create_group(&hdev->dev.kobj, &ft260_attr_group);
        if (ret < 0) {
                hid_err(hdev, "failed to create sysfs attrs\n");
                goto err_i2c_free;
        }
 
-       ret = ft260_xfer_status(dev);
-       if (ret)
-               ft260_i2c_reset(hdev);
-
        return 0;
 
 err_i2c_free:
index 8123b871a3ebf7f200ce6017b60e31a2f5431700..0403beb3104b9e47af2c7cb79acf734879e05b24 100644 (file)
@@ -585,6 +585,8 @@ static void hammer_remove(struct hid_device *hdev)
 static const struct hid_device_id hammer_devices[] = {
        { HID_DEVICE(BUS_USB, HID_GROUP_GENERIC,
                     USB_VENDOR_ID_GOOGLE, USB_DEVICE_ID_GOOGLE_DON) },
+       { HID_DEVICE(BUS_USB, HID_GROUP_GENERIC,
+                    USB_VENDOR_ID_GOOGLE, USB_DEVICE_ID_GOOGLE_EEL) },
        { HID_DEVICE(BUS_USB, HID_GROUP_GENERIC,
                     USB_VENDOR_ID_GOOGLE, USB_DEVICE_ID_GOOGLE_HAMMER) },
        { HID_DEVICE(BUS_USB, HID_GROUP_GENERIC,
index 0a38e8e9bc783051f8bc634cff597885fdcb6161..403506b9697e75a3c2ac9042299bac8e1e2d342e 100644 (file)
@@ -140,12 +140,17 @@ static int holtek_kbd_input_event(struct input_dev *dev, unsigned int type,
 static int holtek_kbd_probe(struct hid_device *hdev,
                const struct hid_device_id *id)
 {
-       struct usb_interface *intf = to_usb_interface(hdev->dev.parent);
-       int ret = hid_parse(hdev);
+       struct usb_interface *intf;
+       int ret;
+
+       if (!hid_is_usb(hdev))
+               return -EINVAL;
 
+       ret = hid_parse(hdev);
        if (!ret)
                ret = hid_hw_start(hdev, HID_CONNECT_DEFAULT);
 
+       intf = to_usb_interface(hdev->dev.parent);
        if (!ret && intf->cur_altsetting->desc.bInterfaceNumber == 1) {
                struct hid_input *hidinput;
                list_for_each_entry(hidinput, &hdev->inputs, list) {
index 195b735b001d03d56f366b63b0ceb14b2d5d7552..b7172c48ef9f0898ec5b6d0596d808aa4908b26c 100644 (file)
@@ -62,6 +62,14 @@ static __u8 *holtek_mouse_report_fixup(struct hid_device *hdev, __u8 *rdesc,
        return rdesc;
 }
 
+static int holtek_mouse_probe(struct hid_device *hdev,
+                             const struct hid_device_id *id)
+{
+       if (!hid_is_usb(hdev))
+               return -EINVAL;
+       return 0;
+}
+
 static const struct hid_device_id holtek_mouse_devices[] = {
        { HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK_ALT,
                        USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A067) },
@@ -83,6 +91,7 @@ static struct hid_driver holtek_mouse_driver = {
        .name = "holtek_mouse",
        .id_table = holtek_mouse_devices,
        .report_fixup = holtek_mouse_report_fixup,
+       .probe = holtek_mouse_probe,
 };
 
 module_hid_driver(holtek_mouse_driver);
index 95037a3e2e6ef9f6c6bea7c2a34673b910b87a6c..19da07777d6283f589061d5309177e3b5640145d 100644 (file)
 #define USB_DEVICE_ID_TOSHIBA_CLICK_L9W        0x0401
 #define USB_DEVICE_ID_HP_X2            0x074d
 #define USB_DEVICE_ID_HP_X2_10_COVER   0x0755
+#define I2C_DEVICE_ID_HP_ENVY_X360_15  0x2d05
 #define I2C_DEVICE_ID_HP_SPECTRE_X360_15       0x2817
+#define USB_DEVICE_ID_ASUS_UX550VE_TOUCHSCREEN 0x2544
 #define USB_DEVICE_ID_ASUS_UX550_TOUCHSCREEN   0x2706
 #define I2C_DEVICE_ID_SURFACE_GO_TOUCHSCREEN   0x261A
 
 #define USB_DEVICE_ID_GOOGLE_MAGNEMITE 0x503d
 #define USB_DEVICE_ID_GOOGLE_MOONBALL  0x5044
 #define USB_DEVICE_ID_GOOGLE_DON       0x5050
+#define USB_DEVICE_ID_GOOGLE_EEL       0x5057
 
 #define USB_VENDOR_ID_GOTOP            0x08f2
 #define USB_DEVICE_ID_SUPER_Q2         0x007f
 #define USB_DEVICE_ID_MS_TOUCH_COVER_2   0x07a7
 #define USB_DEVICE_ID_MS_TYPE_COVER_2    0x07a9
 #define USB_DEVICE_ID_MS_POWER_COVER     0x07da
+#define USB_DEVICE_ID_MS_SURFACE3_COVER                0x07de
 #define USB_DEVICE_ID_MS_XBOX_ONE_S_CONTROLLER 0x02fd
 #define USB_DEVICE_ID_MS_PIXART_MOUSE    0x00cb
 #define USB_DEVICE_ID_8BITDO_SN30_PRO_PLUS      0x02e0
index 2c72ce4147b174219778545d9b7d7c64056de3d4..03f994541981c1cf245a3eea0a38e3dedbdcf99f 100644 (file)
@@ -160,6 +160,7 @@ static int hidinput_setkeycode(struct input_dev *dev,
        if (usage) {
                *old_keycode = usage->type == EV_KEY ?
                                usage->code : KEY_RESERVED;
+               usage->type = EV_KEY;
                usage->code = ke->keycode;
 
                clear_bit(*old_keycode, dev->keybit);
@@ -324,6 +325,10 @@ static const struct hid_device_id hid_battery_quirks[] = {
          HID_BATTERY_QUIRK_IGNORE },
        { HID_USB_DEVICE(USB_VENDOR_ID_ELAN, USB_DEVICE_ID_ASUS_UX550_TOUCHSCREEN),
          HID_BATTERY_QUIRK_IGNORE },
+       { HID_USB_DEVICE(USB_VENDOR_ID_ELAN, USB_DEVICE_ID_ASUS_UX550VE_TOUCHSCREEN),
+         HID_BATTERY_QUIRK_IGNORE },
+       { HID_I2C_DEVICE(USB_VENDOR_ID_ELAN, I2C_DEVICE_ID_HP_ENVY_X360_15),
+         HID_BATTERY_QUIRK_IGNORE },
        { HID_I2C_DEVICE(USB_VENDOR_ID_ELAN, I2C_DEVICE_ID_HP_SPECTRE_X360_15),
          HID_BATTERY_QUIRK_IGNORE },
        { HID_I2C_DEVICE(USB_VENDOR_ID_ELAN, I2C_DEVICE_ID_SURFACE_GO_TOUCHSCREEN),
@@ -650,10 +655,9 @@ static void hidinput_configure_usage(struct hid_input *hidinput, struct hid_fiel
                                                code += KEY_MACRO1;
                                        else
                                                code += BTN_TRIGGER_HAPPY - 0x1e;
-                               } else {
-                                       goto ignore;
+                                       break;
                                }
-                               break;
+                               fallthrough;
                default:
                        switch (field->physical) {
                        case HID_GD_MOUSE:
index d40af911df635a42f763459d9c3792bbe59847b3..fb3f7258009c26a4c86b5a7cfc679976ed43bb01 100644 (file)
@@ -749,12 +749,18 @@ static int lg_raw_event(struct hid_device *hdev, struct hid_report *report,
 
 static int lg_probe(struct hid_device *hdev, const struct hid_device_id *id)
 {
-       struct usb_interface *iface = to_usb_interface(hdev->dev.parent);
-       __u8 iface_num = iface->cur_altsetting->desc.bInterfaceNumber;
+       struct usb_interface *iface;
+       __u8 iface_num;
        unsigned int connect_mask = HID_CONNECT_DEFAULT;
        struct lg_drv_data *drv_data;
        int ret;
 
+       if (!hid_is_usb(hdev))
+               return -EINVAL;
+
+       iface = to_usb_interface(hdev->dev.parent);
+       iface_num = iface->cur_altsetting->desc.bInterfaceNumber;
+
        /* G29 only work with the 1st interface */
        if ((hdev->product == USB_DEVICE_ID_LOGITECH_G29_WHEEL) &&
            (iface_num != 0)) {
index a0017b010c3421bef0eb09c0606fdf5b3597c86e..7106b921b53cf5038137fef8aeaead8b91affa00 100644 (file)
@@ -1777,7 +1777,7 @@ static int logi_dj_probe(struct hid_device *hdev,
        case recvr_type_bluetooth:      no_dj_interfaces = 2; break;
        case recvr_type_dinovo:         no_dj_interfaces = 2; break;
        }
-       if (hid_is_using_ll_driver(hdev, &usb_hid_driver)) {
+       if (hid_is_usb(hdev)) {
                intf = to_usb_interface(hdev->dev.parent);
                if (intf && intf->altsetting->desc.bInterfaceNumber >=
                                                        no_dj_interfaces) {
index 686788ebf3e1e7953010d66dd980943da4d0048b..d7687ce706144aa04417cd72c7a8279605e5b3ec 100644 (file)
@@ -256,8 +256,11 @@ static void magicmouse_emit_touch(struct magicmouse_sc *msc, int raw_id, u8 *tda
                unsigned long now = jiffies;
                int step_x = msc->touches[id].scroll_x - x;
                int step_y = msc->touches[id].scroll_y - y;
-               int step_hr = ((64 - (int)scroll_speed) * msc->scroll_accel) /
-                             SCROLL_HR_STEPS;
+               int step_hr =
+                       max_t(int,
+                             ((64 - (int)scroll_speed) * msc->scroll_accel) /
+                                       SCROLL_HR_STEPS,
+                             1);
                int step_x_hr = msc->touches[id].scroll_x_hr - x;
                int step_y_hr = msc->touches[id].scroll_y_hr - y;
 
index e1afddb7b33d8e72a249b2831b1fca7a00719e89..082376a6cb3d73ad8c5e24939c29af1a2a947239 100644 (file)
@@ -1888,6 +1888,11 @@ static const struct hid_device_id mt_devices[] = {
                MT_USB_DEVICE(USB_VENDOR_ID_CVTOUCH,
                        USB_DEVICE_ID_CVTOUCH_SCREEN) },
 
+       /* eGalax devices (SAW) */
+       { .driver_data = MT_CLS_EXPORT_ALL_INPUTS,
+               MT_USB_DEVICE(USB_VENDOR_ID_DWAV,
+                       USB_DEVICE_ID_EGALAX_TOUCHCONTROLLER) },
+
        /* eGalax devices (resistive) */
        { .driver_data = MT_CLS_EGALAX,
                MT_USB_DEVICE(USB_VENDOR_ID_DWAV,
index a1e0f6849875e4cfacf67c18d057e059f8d1d0d4..b6a9a0f3966ee0d1853aba4b42c1fca2f94d70a1 100644 (file)
@@ -189,6 +189,7 @@ struct joycon_rumble_amp_data {
        u16 amp;
 };
 
+#if IS_ENABLED(CONFIG_NINTENDO_FF)
 /*
  * These tables are from
  * https://github.com/dekuNukem/Nintendo_Switch_Reverse_Engineering/blob/master/rumble_data_table.md
@@ -289,6 +290,10 @@ static const struct joycon_rumble_amp_data joycon_rumble_amplitudes[] = {
        { 0xc2, 0x8070,  940 }, { 0xc4, 0x0071,  960 }, { 0xc6, 0x8071,  981 },
        { 0xc8, 0x0072, joycon_max_rumble_amp }
 };
+static const u16 JC_RUMBLE_DFLT_LOW_FREQ = 160;
+static const u16 JC_RUMBLE_DFLT_HIGH_FREQ = 320;
+#endif /* IS_ENABLED(CONFIG_NINTENDO_FF) */
+static const u16 JC_RUMBLE_PERIOD_MS = 50;
 
 /* States for controller state machine */
 enum joycon_ctlr_state {
@@ -397,9 +402,6 @@ struct joycon_input_report {
 #define JC_RUMBLE_DATA_SIZE    8
 #define JC_RUMBLE_QUEUE_SIZE   8
 
-static const u16 JC_RUMBLE_DFLT_LOW_FREQ = 160;
-static const u16 JC_RUMBLE_DFLT_HIGH_FREQ = 320;
-static const u16 JC_RUMBLE_PERIOD_MS = 50;
 static const unsigned short JC_RUMBLE_ZERO_AMP_PKT_CNT = 5;
 
 static const char * const joycon_player_led_names[] = {
@@ -1850,8 +1852,10 @@ static int joycon_leds_create(struct joycon_ctlr *ctlr)
                                      d_name,
                                      "green",
                                      joycon_player_led_names[i]);
-               if (!name)
+               if (!name) {
+                       mutex_unlock(&joycon_input_num_mutex);
                        return -ENOMEM;
+               }
 
                led = &ctlr->leds[i];
                led->name = name;
@@ -1864,6 +1868,7 @@ static int joycon_leds_create(struct joycon_ctlr *ctlr)
                ret = devm_led_classdev_register(&hdev->dev, led);
                if (ret) {
                        hid_err(hdev, "Failed registering %s LED\n", led->name);
+                       mutex_unlock(&joycon_input_num_mutex);
                        return ret;
                }
        }
index 2666af02d5c1a11f1a32f098eecb61ddc458cf0a..e4e9471d0f1e92d9b17a3c0f90d1b7dc0ceeb7de 100644 (file)
@@ -798,12 +798,18 @@ static int pk_raw_event(struct hid_device *hdev, struct hid_report *report,
 static int pk_probe(struct hid_device *hdev, const struct hid_device_id *id)
 {
        int ret;
-       struct usb_interface *intf = to_usb_interface(hdev->dev.parent);
-       unsigned short ifnum = intf->cur_altsetting->desc.bInterfaceNumber;
+       struct usb_interface *intf;
+       unsigned short ifnum;
        unsigned long quirks = id->driver_data;
        struct pk_device *pk;
        struct pcmidi_snd *pm = NULL;
 
+       if (!hid_is_usb(hdev))
+               return -EINVAL;
+
+       intf = to_usb_interface(hdev->dev.parent);
+       ifnum = intf->cur_altsetting->desc.bInterfaceNumber;
+
        pk = kzalloc(sizeof(*pk), GFP_KERNEL);
        if (pk == NULL) {
                hid_err(hdev, "can't alloc descriptor\n");
index 06b7908c874c18acb23f83aa86fd366f39c2b36b..ee7e504e7279f3522f9fdafd73dc5c809a6a4378 100644 (file)
@@ -124,6 +124,7 @@ static const struct hid_device_id hid_quirks[] = {
        { HID_USB_DEVICE(USB_VENDOR_ID_MCS, USB_DEVICE_ID_MCS_GAMEPADBLOCK), HID_QUIRK_MULTI_INPUT },
        { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_PIXART_MOUSE), HID_QUIRK_ALWAYS_POLL },
        { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_POWER_COVER), HID_QUIRK_NO_INIT_REPORTS },
+       { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_SURFACE3_COVER), HID_QUIRK_NO_INIT_REPORTS },
        { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_SURFACE_PRO_2), HID_QUIRK_NO_INIT_REPORTS },
        { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TOUCH_COVER_2), HID_QUIRK_NO_INIT_REPORTS },
        { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_2), HID_QUIRK_NO_INIT_REPORTS },
index 4556d2a50f754ebd56feea419e3aafb950627eea..d94ee0539421e7f9a742c6622f65059649440ad9 100644 (file)
@@ -344,6 +344,9 @@ static int arvo_probe(struct hid_device *hdev,
 {
        int retval;
 
+       if (!hid_is_usb(hdev))
+               return -EINVAL;
+
        retval = hid_parse(hdev);
        if (retval) {
                hid_err(hdev, "parse failed\n");
index ce5f22519956acf91527d390d99f3ff35a424420..e95d59cd8d075dc82716fb47a4193286bd0c2b09 100644 (file)
@@ -324,6 +324,9 @@ static int isku_probe(struct hid_device *hdev,
 {
        int retval;
 
+       if (!hid_is_usb(hdev))
+               return -EINVAL;
+
        retval = hid_parse(hdev);
        if (retval) {
                hid_err(hdev, "parse failed\n");
index ea17abc7ad5212d842e6276413490f6fe19825ec..76da04801ca9bde5c882c89444529eb7cb8c6578 100644 (file)
@@ -749,6 +749,9 @@ static int kone_probe(struct hid_device *hdev, const struct hid_device_id *id)
 {
        int retval;
 
+       if (!hid_is_usb(hdev))
+               return -EINVAL;
+
        retval = hid_parse(hdev);
        if (retval) {
                hid_err(hdev, "parse failed\n");
index 0316edf8c5bb44e4d72e5a9a6ab858b53d0eefbb..1896c69ea512f787b264ed27f82f13d813fd82ea 100644 (file)
@@ -431,6 +431,9 @@ static int koneplus_probe(struct hid_device *hdev,
 {
        int retval;
 
+       if (!hid_is_usb(hdev))
+               return -EINVAL;
+
        retval = hid_parse(hdev);
        if (retval) {
                hid_err(hdev, "parse failed\n");
index 5248b3c7cf7859abbd62dab6fed53e88d1b37773..cf8eeb33a12571bf47c2c57118ff7935909cd865 100644 (file)
@@ -133,6 +133,9 @@ static int konepure_probe(struct hid_device *hdev,
 {
        int retval;
 
+       if (!hid_is_usb(hdev))
+               return -EINVAL;
+
        retval = hid_parse(hdev);
        if (retval) {
                hid_err(hdev, "parse failed\n");
index 960012881570569df2d31c368b33c06f4f67ba4d..6fb9b9563769dfbcc1cfc521c1f113feb442d411 100644 (file)
@@ -501,6 +501,9 @@ static int kovaplus_probe(struct hid_device *hdev,
 {
        int retval;
 
+       if (!hid_is_usb(hdev))
+               return -EINVAL;
+
        retval = hid_parse(hdev);
        if (retval) {
                hid_err(hdev, "parse failed\n");
index 4a88a76d5c62293b890c56093d469cf6687a7baf..d5ddf0d68346b2c147f3c888badb77c06d77f46c 100644 (file)
@@ -160,6 +160,9 @@ static int lua_probe(struct hid_device *hdev,
 {
        int retval;
 
+       if (!hid_is_usb(hdev))
+               return -EINVAL;
+
        retval = hid_parse(hdev);
        if (retval) {
                hid_err(hdev, "parse failed\n");
index 989927defe8db84d6d1916424a9c1d50fc5c3911..4fcc8e7d276f228cb2c29f99df8e6d5db9eef119 100644 (file)
@@ -449,6 +449,9 @@ static int pyra_probe(struct hid_device *hdev, const struct hid_device_id *id)
 {
        int retval;
 
+       if (!hid_is_usb(hdev))
+               return -EINVAL;
+
        retval = hid_parse(hdev);
        if (retval) {
                hid_err(hdev, "parse failed\n");
index 3956a6c9c5217efdb88e25c918f3598a9e5e61fb..5bf1971a2b14d83c6388d3d1e9541617ff415fa6 100644 (file)
@@ -141,6 +141,9 @@ static int ryos_probe(struct hid_device *hdev,
 {
        int retval;
 
+       if (!hid_is_usb(hdev))
+               return -EINVAL;
+
        retval = hid_parse(hdev);
        if (retval) {
                hid_err(hdev, "parse failed\n");
index 818701f7a028178b2c565e1506a28360c1deee08..a784bb4ee6512d8b39a0ede9a653b29cf901bf5d 100644 (file)
@@ -113,6 +113,9 @@ static int savu_probe(struct hid_device *hdev,
 {
        int retval;
 
+       if (!hid_is_usb(hdev))
+               return -EINVAL;
+
        retval = hid_parse(hdev);
        if (retval) {
                hid_err(hdev, "parse failed\n");
index 2e1c31156eca046417d65d76d0e8aa847719ecb3..cf5992e97094003628a4aaacbded99a9f3603a10 100644 (file)
@@ -152,6 +152,9 @@ static int samsung_probe(struct hid_device *hdev,
        int ret;
        unsigned int cmask = HID_CONNECT_DEFAULT;
 
+       if (!hid_is_usb(hdev))
+               return -EINVAL;
+
        ret = hid_parse(hdev);
        if (ret) {
                hid_err(hdev, "parse failed\n");
index d1b107d547f546719b9b449b8e420feb535bcd70..60ec2b29d54de49e0b19c57cc57288efa47af7a3 100644 (file)
@@ -3000,7 +3000,6 @@ static int sony_probe(struct hid_device *hdev, const struct hid_device_id *id)
        sc->quirks = quirks;
        hid_set_drvdata(hdev, sc);
        sc->hdev = hdev;
-       usbdev = to_usb_device(sc->hdev->dev.parent->parent);
 
        ret = hid_parse(hdev);
        if (ret) {
@@ -3038,14 +3037,23 @@ static int sony_probe(struct hid_device *hdev, const struct hid_device_id *id)
         */
        if (!(hdev->claimed & HID_CLAIMED_INPUT)) {
                hid_err(hdev, "failed to claim input\n");
-               hid_hw_stop(hdev);
-               return -ENODEV;
+               ret = -ENODEV;
+               goto err;
        }
 
        if (sc->quirks & (GHL_GUITAR_PS3WIIU | GHL_GUITAR_PS4)) {
+               if (!hid_is_usb(hdev)) {
+                       ret = -EINVAL;
+                       goto err;
+               }
+
+               usbdev = to_usb_device(sc->hdev->dev.parent->parent);
+
                sc->ghl_urb = usb_alloc_urb(0, GFP_ATOMIC);
-               if (!sc->ghl_urb)
-                       return -ENOMEM;
+               if (!sc->ghl_urb) {
+                       ret = -ENOMEM;
+                       goto err;
+               }
 
                if (sc->quirks & GHL_GUITAR_PS3WIIU)
                        ret = ghl_init_urb(sc, usbdev, ghl_ps3wiiu_magic_data,
@@ -3055,7 +3063,7 @@ static int sony_probe(struct hid_device *hdev, const struct hid_device_id *id)
                                                           ARRAY_SIZE(ghl_ps4_magic_data));
                if (ret) {
                        hid_err(hdev, "error preparing URB\n");
-                       return ret;
+                       goto err;
                }
 
                timer_setup(&sc->ghl_poke_timer, ghl_magic_poke, 0);
@@ -3064,6 +3072,10 @@ static int sony_probe(struct hid_device *hdev, const struct hid_device_id *id)
        }
 
        return ret;
+
+err:
+       hid_hw_stop(hdev);
+       return ret;
 }
 
 static void sony_remove(struct hid_device *hdev)
index d44550aa880573c037857be00ae94582e7fac644..03b935ff02d5616f67e3287ceeb0dc9926703429 100644 (file)
@@ -205,7 +205,7 @@ static void thrustmaster_model_handler(struct urb *urb)
        struct tm_wheel *tm_wheel = hid_get_drvdata(hdev);
        uint16_t model = 0;
        int i, ret;
-       const struct tm_wheel_info *twi = 0;
+       const struct tm_wheel_info *twi = NULL;
 
        if (urb->status) {
                hid_err(hdev, "URB to get model id failed with error %d\n", urb->status);
@@ -238,7 +238,7 @@ static void thrustmaster_model_handler(struct urb *urb)
                tm_wheel->usb_dev,
                usb_sndctrlpipe(tm_wheel->usb_dev, 0),
                (char *)tm_wheel->change_request,
-               0, 0, // We do not expect any response from the wheel
+               NULL, 0, // We do not expect any response from the wheel
                thrustmaster_change_handler,
                hdev
        );
@@ -272,7 +272,10 @@ static void thrustmaster_remove(struct hid_device *hdev)
 static int thrustmaster_probe(struct hid_device *hdev, const struct hid_device_id *id)
 {
        int ret = 0;
-       struct tm_wheel *tm_wheel = 0;
+       struct tm_wheel *tm_wheel = NULL;
+
+       if (!hid_is_usb(hdev))
+               return -EINVAL;
 
        ret = hid_parse(hdev);
        if (ret) {
index 31ea7fc69916b1f95b57645dac7bc1ccaa3152ab..ad489caf53ad80e60d2a9f2f64d47143816d139c 100644 (file)
@@ -311,7 +311,7 @@ static int u2fzero_probe(struct hid_device *hdev,
        unsigned int minor;
        int ret;
 
-       if (!hid_is_using_ll_driver(hdev, &usb_hid_driver))
+       if (!hid_is_usb(hdev))
                return -EINVAL;
 
        dev = devm_kzalloc(&hdev->dev, sizeof(*dev), GFP_KERNEL);
index 6a9865dd703c027be18d1420c5fec1d764b61631..d8ab0139e5cdae30d70191ae08d7a479849b04d2 100644 (file)
@@ -164,6 +164,9 @@ static int uclogic_probe(struct hid_device *hdev,
        struct uclogic_drvdata *drvdata = NULL;
        bool params_initialized = false;
 
+       if (!hid_is_usb(hdev))
+               return -EINVAL;
+
        /*
         * libinput requires the pad interface to be on a different node
         * than the pen, so use QUIRK_MULTI_INPUT for all tablets.
index 3d67b748a3b959f4febddc2db3f7a3f408924256..adff1bd68d9f84819a15043e15c786acfd934579 100644 (file)
@@ -843,8 +843,7 @@ int uclogic_params_init(struct uclogic_params *params,
        struct uclogic_params p = {0, };
 
        /* Check arguments */
-       if (params == NULL || hdev == NULL ||
-           !hid_is_using_ll_driver(hdev, &usb_hid_driver)) {
+       if (params == NULL || hdev == NULL || !hid_is_usb(hdev)) {
                rc = -EINVAL;
                goto cleanup;
        }
index 1c5039081db2786474d8cdc9846f0f18bba58090..8e9d9450cb835df16dbc556dd0043713dd7d986a 100644 (file)
@@ -266,7 +266,8 @@ static void __maybe_unused ish_resume_handler(struct work_struct *work)
 
        if (ish_should_leave_d0i3(pdev) && !dev->suspend_flag
                        && IPC_IS_ISH_ILUP(fwsts)) {
-               disable_irq_wake(pdev->irq);
+               if (device_may_wakeup(&pdev->dev))
+                       disable_irq_wake(pdev->irq);
 
                ish_set_host_ready(dev);
 
@@ -337,7 +338,8 @@ static int __maybe_unused ish_suspend(struct device *device)
                         */
                        pci_save_state(pdev);
 
-                       enable_irq_wake(pdev->irq);
+                       if (device_may_wakeup(&pdev->dev))
+                               enable_irq_wake(pdev->irq);
                }
        } else {
                /*
index 1b486f2627477cc458b0b136f868f1ecb0fc9e63..0e1183e961471d57e49295ed60d050e94783d7cb 100644 (file)
@@ -76,9 +76,12 @@ enum ish_loader_commands {
 #define LOADER_XFER_MODE_ISHTP                 BIT(1)
 
 /* ISH Transport Loader client unique GUID */
-static const guid_t loader_ishtp_guid =
-       GUID_INIT(0xc804d06a, 0x55bd, 0x4ea7,
-                 0xad, 0xed, 0x1e, 0x31, 0x22, 0x8c, 0x76, 0xdc);
+static const struct ishtp_device_id loader_ishtp_id_table[] = {
+       { .guid = GUID_INIT(0xc804d06a, 0x55bd, 0x4ea7,
+                 0xad, 0xed, 0x1e, 0x31, 0x22, 0x8c, 0x76, 0xdc) },
+       { }
+};
+MODULE_DEVICE_TABLE(ishtp, loader_ishtp_id_table);
 
 #define FILENAME_SIZE                          256
 
@@ -880,7 +883,7 @@ static int loader_init(struct ishtp_cl *loader_ishtp_cl, int reset)
 
        fw_client =
                ishtp_fw_cl_get_client(ishtp_get_ishtp_device(loader_ishtp_cl),
-                                      &loader_ishtp_guid);
+                                      &loader_ishtp_id_table[0].guid);
        if (!fw_client) {
                dev_err(cl_data_to_dev(client_data),
                        "ISH client uuid not found\n");
@@ -1057,7 +1060,7 @@ static int loader_ishtp_cl_reset(struct ishtp_cl_device *cl_device)
 
 static struct ishtp_cl_driver  loader_ishtp_cl_driver = {
        .name = "ish-loader",
-       .guid = &loader_ishtp_guid,
+       .id = loader_ishtp_id_table,
        .probe = loader_ishtp_cl_probe,
        .remove = loader_ishtp_cl_remove,
        .reset = loader_ishtp_cl_reset,
@@ -1083,4 +1086,3 @@ MODULE_DESCRIPTION("ISH ISH-TP Host firmware Loader Client Driver");
 MODULE_AUTHOR("Rushikesh S Kadam <rushikesh.s.kadam@intel.com>");
 
 MODULE_LICENSE("GPL v2");
-MODULE_ALIAS("ishtp:*");
index 91bf4d01e91a7c0bcd36cd6eb2e5a6fbe12181f4..4338c9b68a43d3cc72e51f7f97787e2714e10201 100644 (file)
 #include "ishtp-hid.h"
 
 /* ISH Transport protocol (ISHTP in short) GUID */
-static const guid_t hid_ishtp_guid =
-       GUID_INIT(0x33AECD58, 0xB679, 0x4E54,
-                 0x9B, 0xD9, 0xA0, 0x4D, 0x34, 0xF0, 0xC2, 0x26);
+static const struct ishtp_device_id hid_ishtp_id_table[] = {
+       { .guid = GUID_INIT(0x33AECD58, 0xB679, 0x4E54,
+                 0x9B, 0xD9, 0xA0, 0x4D, 0x34, 0xF0, 0xC2, 0x26), },
+       { }
+};
+MODULE_DEVICE_TABLE(ishtp, hid_ishtp_id_table);
 
 /* Rx ring buffer pool size */
 #define HID_CL_RX_RING_SIZE    32
@@ -662,7 +665,7 @@ static int hid_ishtp_cl_init(struct ishtp_cl *hid_ishtp_cl, int reset)
        ishtp_set_tx_ring_size(hid_ishtp_cl, HID_CL_TX_RING_SIZE);
        ishtp_set_rx_ring_size(hid_ishtp_cl, HID_CL_RX_RING_SIZE);
 
-       fw_client = ishtp_fw_cl_get_client(dev, &hid_ishtp_guid);
+       fw_client = ishtp_fw_cl_get_client(dev, &hid_ishtp_id_table[0].guid);
        if (!fw_client) {
                dev_err(cl_data_to_dev(client_data),
                        "ish client uuid not found\n");
@@ -945,7 +948,7 @@ static const struct dev_pm_ops hid_ishtp_pm_ops = {
 
 static struct ishtp_cl_driver  hid_ishtp_cl_driver = {
        .name = "ish-hid",
-       .guid = &hid_ishtp_guid,
+       .id = hid_ishtp_id_table,
        .probe = hid_ishtp_cl_probe,
        .remove = hid_ishtp_cl_remove,
        .reset = hid_ishtp_cl_reset,
@@ -981,4 +984,3 @@ MODULE_AUTHOR("Daniel Drubin <daniel.drubin@intel.com>");
 MODULE_AUTHOR("Srinivas Pandruvada <srinivas.pandruvada@linux.intel.com>");
 
 MODULE_LICENSE("GPL");
-MODULE_ALIAS("ishtp:*");
index 334eac611774d7aaa292dd13102adf21acd0c3a5..f68aba8794fe50eaa6a4fd2948dd2b7fd65b4679 100644 (file)
@@ -241,7 +241,7 @@ static int ishtp_cl_bus_match(struct device *dev, struct device_driver *drv)
        struct ishtp_cl_device *device = to_ishtp_cl_device(dev);
        struct ishtp_cl_driver *driver = to_ishtp_cl_driver(drv);
 
-       return guid_equal(driver->guid,
+       return guid_equal(&driver->id[0].guid,
                          &device->fw_client->props.protocol_name);
 }
 
@@ -350,7 +350,7 @@ static ssize_t modalias_show(struct device *dev, struct device_attribute *a,
 {
        int len;
 
-       len = snprintf(buf, PAGE_SIZE, "ishtp:%s\n", dev_name(dev));
+       len = snprintf(buf, PAGE_SIZE, ISHTP_MODULE_PREFIX "%s\n", dev_name(dev));
        return (len >= PAGE_SIZE) ? (PAGE_SIZE - 1) : len;
 }
 static DEVICE_ATTR_RO(modalias);
@@ -363,7 +363,7 @@ ATTRIBUTE_GROUPS(ishtp_cl_dev);
 
 static int ishtp_cl_uevent(struct device *dev, struct kobj_uevent_env *env)
 {
-       if (add_uevent_var(env, "MODALIAS=ishtp:%s", dev_name(dev)))
+       if (add_uevent_var(env, "MODALIAS=" ISHTP_MODULE_PREFIX "%s", dev_name(dev)))
                return -ENOMEM;
        return 0;
 }
index 2717d39600b402af0b8b7a1498926468a38239b2..066c567dbaa229f3fa5948cbc3cc1484e8bf5e2b 100644 (file)
@@ -726,7 +726,7 @@ static void wacom_retrieve_hid_descriptor(struct hid_device *hdev,
         * Skip the query for this type and modify defaults based on
         * interface number.
         */
-       if (features->type == WIRELESS) {
+       if (features->type == WIRELESS && intf) {
                if (intf->cur_altsetting->desc.bInterfaceNumber == 0)
                        features->device_type = WACOM_DEVICETYPE_WL_MONITOR;
                else
@@ -2214,7 +2214,7 @@ static void wacom_update_name(struct wacom *wacom, const char *suffix)
        if ((features->type == HID_GENERIC) && !strcmp("Wacom HID", features->name)) {
                char *product_name = wacom->hdev->name;
 
-               if (hid_is_using_ll_driver(wacom->hdev, &usb_hid_driver)) {
+               if (hid_is_usb(wacom->hdev)) {
                        struct usb_interface *intf = to_usb_interface(wacom->hdev->dev.parent);
                        struct usb_device *dev = interface_to_usbdev(intf);
                        product_name = dev->product;
@@ -2451,6 +2451,9 @@ static void wacom_wireless_work(struct work_struct *work)
 
        wacom_destroy_battery(wacom);
 
+       if (!usbdev)
+               return;
+
        /* Stylus interface */
        hdev1 = usb_get_intfdata(usbdev->config->interface[1]);
        wacom1 = hid_get_drvdata(hdev1);
@@ -2730,8 +2733,6 @@ static void wacom_mode_change_work(struct work_struct *work)
 static int wacom_probe(struct hid_device *hdev,
                const struct hid_device_id *id)
 {
-       struct usb_interface *intf = to_usb_interface(hdev->dev.parent);
-       struct usb_device *dev = interface_to_usbdev(intf);
        struct wacom *wacom;
        struct wacom_wac *wacom_wac;
        struct wacom_features *features;
@@ -2766,8 +2767,14 @@ static int wacom_probe(struct hid_device *hdev,
        wacom_wac->hid_data.inputmode = -1;
        wacom_wac->mode_report = -1;
 
-       wacom->usbdev = dev;
-       wacom->intf = intf;
+       if (hid_is_usb(hdev)) {
+               struct usb_interface *intf = to_usb_interface(hdev->dev.parent);
+               struct usb_device *dev = interface_to_usbdev(intf);
+
+               wacom->usbdev = dev;
+               wacom->intf = intf;
+       }
+
        mutex_init(&wacom->lock);
        INIT_DELAYED_WORK(&wacom->init_work, wacom_init_work);
        INIT_WORK(&wacom->wireless_work, wacom_wireless_work);
index 33a6908995b1be7818d9e9ee0a4869573da67627..2a4cc39962e765b2eadf19490effa98fb3c7ae3c 100644 (file)
@@ -2603,6 +2603,9 @@ static void wacom_wac_finger_event(struct hid_device *hdev,
                return;
 
        switch (equivalent_usage) {
+       case HID_DG_CONFIDENCE:
+               wacom_wac->hid_data.confidence = value;
+               break;
        case HID_GD_X:
                wacom_wac->hid_data.x = value;
                break;
@@ -2635,7 +2638,8 @@ static void wacom_wac_finger_event(struct hid_device *hdev,
        }
 
        if (usage->usage_index + 1 == field->report_count) {
-               if (equivalent_usage == wacom_wac->hid_data.last_slot_field)
+               if (equivalent_usage == wacom_wac->hid_data.last_slot_field &&
+                   wacom_wac->hid_data.confidence)
                        wacom_wac_finger_slot(wacom_wac, wacom_wac->touch_input);
        }
 }
@@ -2653,6 +2657,8 @@ static void wacom_wac_finger_pre_report(struct hid_device *hdev,
 
        wacom_wac->is_invalid_bt_frame = false;
 
+       hid_data->confidence = true;
+
        for (i = 0; i < report->maxfield; i++) {
                struct hid_field *field = report->field[i];
                int j;
index 8b2d4e5b2303c33ef76eafae9a707b8e591507fd..466b62cc16dc19e8f29618ed258ea5404a790150 100644 (file)
@@ -301,6 +301,7 @@ struct hid_data {
        bool barrelswitch;
        bool barrelswitch2;
        bool serialhi;
+       bool confidence;
        int x;
        int y;
        int pressure;
index 7f11ea07d698f1cb7d2029e9fc35554c9ca7a575..ca873a3b98dbe6c81023be39f98da71565af598d 100644 (file)
@@ -480,7 +480,7 @@ module_param(pressure_report_delay, uint, (S_IRUGO | S_IWUSR));
 MODULE_PARM_DESC(pressure_report_delay, "Delay in secs in reporting pressure");
 static atomic_t trans_id = ATOMIC_INIT(0);
 
-static int dm_ring_size = 20 * 1024;
+static int dm_ring_size = VMBUS_RING_SIZE(16 * 1024);
 
 /*
  * Driver specific state.
index 731d5117f9f10879506a9f123f46e4f1b7513de2..14389fd7afb896a9a4032d640b3418e810880d9e 100644 (file)
@@ -729,7 +729,7 @@ static int corsairpsu_probe(struct hid_device *hdev, const struct hid_device_id
        corsairpsu_check_cmd_support(priv);
 
        priv->hwmon_dev = hwmon_device_register_with_info(&hdev->dev, "corsairpsu", priv,
-                                                         &corsairpsu_chip_info, 0);
+                                                         &corsairpsu_chip_info, NULL);
 
        if (IS_ERR(priv->hwmon_dev)) {
                ret = PTR_ERR(priv->hwmon_dev);
index eaace478f50878f6ffb4a8b26caee1f190a1c15c..5596c211f38d92f64a351ff3add5459dba9a3edd 100644 (file)
@@ -627,10 +627,9 @@ static void __init i8k_init_procfs(struct device *dev)
 {
        struct dell_smm_data *data = dev_get_drvdata(dev);
 
-       /* Register the proc entry */
-       proc_create_data("i8k", 0, NULL, &i8k_proc_ops, data);
-
-       devm_add_action_or_reset(dev, i8k_exit_procfs, NULL);
+       /* Only register exit function if creation was successful */
+       if (proc_create_data("i8k", 0, NULL, &i8k_proc_ops, data))
+               devm_add_action_or_reset(dev, i8k_exit_procfs, NULL);
 }
 
 #else
index 93dca471972ea352b1129e543e084b25acbdd1e8..57ce8633a72561d2e126e868c3aede44e6f7478e 100644 (file)
@@ -1527,7 +1527,7 @@ static u16 nct6775_wmi_read_value(struct nct6775_data *data, u16 reg)
 
        nct6775_wmi_set_bank(data, reg);
 
-       err = nct6775_asuswmi_read(data->bank, reg, &tmp);
+       err = nct6775_asuswmi_read(data->bank, reg & 0xff, &tmp);
        if (err)
                return 0;
 
index 17518b4cab1b007092dd4dd3973ebabe2e6b6e43..f12b9a28a232d498a68dcfe025998342e2137783 100644 (file)
@@ -336,8 +336,6 @@ static int pwm_fan_probe(struct platform_device *pdev)
                        return ret;
        }
 
-       ctx->pwm_value = MAX_PWM;
-
        pwm_init_state(ctx->pwm, &ctx->pwm_state);
 
        /*
index 09c2a0b0644440d0cd27e29236c3678621593780..3415d7a0e0fc78a4acd3a5e3a9283b98eea78e20 100644 (file)
@@ -23,7 +23,7 @@
 /*
  * I2C command delays (in microseconds)
  */
-#define SHT4X_MEAS_DELAY       1000
+#define SHT4X_MEAS_DELAY_HPM   8200    /* see t_MEAS,h in datasheet */
 #define SHT4X_DELAY_EXTRA      10000
 
 /*
@@ -90,7 +90,7 @@ static int sht4x_read_values(struct sht4x_data *data)
        if (ret < 0)
                goto unlock;
 
-       usleep_range(SHT4X_MEAS_DELAY, SHT4X_MEAS_DELAY + SHT4X_DELAY_EXTRA);
+       usleep_range(SHT4X_MEAS_DELAY_HPM, SHT4X_MEAS_DELAY_HPM + SHT4X_DELAY_EXTRA);
 
        ret = i2c_master_recv(client, raw_data, SHT4X_RESPONSE_LENGTH);
        if (ret != SHT4X_RESPONSE_LENGTH) {
index 72df563477b1c3e3d99feea2126bdc03fd408cd2..f8639a4457d23ae55eece7874c10e3a05562b58d 100644 (file)
@@ -195,8 +195,9 @@ static u32 cbus_i2c_func(struct i2c_adapter *adapter)
 }
 
 static const struct i2c_algorithm cbus_i2c_algo = {
-       .smbus_xfer     = cbus_i2c_smbus_xfer,
-       .functionality  = cbus_i2c_func,
+       .smbus_xfer             = cbus_i2c_smbus_xfer,
+       .smbus_xfer_atomic      = cbus_i2c_smbus_xfer,
+       .functionality          = cbus_i2c_func,
 };
 
 static int cbus_i2c_remove(struct platform_device *pdev)
index 05187457f88a2ec24e8329aa3cd83afc208c4939..41446f9cc52da96db1084ad06a94c08a91d93374 100644 (file)
 #define SMBSLVSTS_HST_NTFY_STS BIT(0)
 
 /* Host Notify Command register bits */
+#define SMBSLVCMD_SMBALERT_DISABLE     BIT(2)
 #define SMBSLVCMD_HST_NTFY_INTREN      BIT(0)
 
 #define STATUS_ERROR_FLAGS     (SMBHSTSTS_FAILED | SMBHSTSTS_BUS_ERR | \
@@ -259,6 +260,7 @@ struct i801_priv {
        struct i2c_adapter adapter;
        unsigned long smba;
        unsigned char original_hstcfg;
+       unsigned char original_hstcnt;
        unsigned char original_slvcmd;
        struct pci_dev *pci_dev;
        unsigned int features;
@@ -641,12 +643,20 @@ static irqreturn_t i801_isr(int irq, void *dev_id)
                i801_isr_byte_done(priv);
 
        /*
-        * Clear irq sources and report transaction result.
+        * Clear remaining IRQ sources: Completion of last command, errors
+        * and the SMB_ALERT signal. SMB_ALERT status is set after signal
+        * assertion independently of the interrupt generation being blocked
+        * or not so clear it always when the status is set.
+        */
+       status &= SMBHSTSTS_INTR | STATUS_ERROR_FLAGS | SMBHSTSTS_SMBALERT_STS;
+       if (status)
+               outb_p(status, SMBHSTSTS(priv));
+       status &= ~SMBHSTSTS_SMBALERT_STS; /* SMB_ALERT not reported */
+       /*
+        * Report transaction result.
         * ->status must be cleared before the next transaction is started.
         */
-       status &= SMBHSTSTS_INTR | STATUS_ERROR_FLAGS;
        if (status) {
-               outb_p(status, SMBHSTSTS(priv));
                priv->status = status;
                complete(&priv->done);
        }
@@ -974,9 +984,13 @@ static void i801_enable_host_notify(struct i2c_adapter *adapter)
        if (!(priv->features & FEATURE_HOST_NOTIFY))
                return;
 
-       if (!(SMBSLVCMD_HST_NTFY_INTREN & priv->original_slvcmd))
-               outb_p(SMBSLVCMD_HST_NTFY_INTREN | priv->original_slvcmd,
-                      SMBSLVCMD(priv));
+       /*
+        * Enable host notify interrupt and block the generation of interrupt
+        * from the SMB_ALERT signal because the driver does not support
+        * SMBus Alert.
+        */
+       outb_p(SMBSLVCMD_HST_NTFY_INTREN | SMBSLVCMD_SMBALERT_DISABLE |
+              priv->original_slvcmd, SMBSLVCMD(priv));
 
        /* clear Host Notify bit to allow a new notification */
        outb_p(SMBSLVSTS_HST_NTFY_STS, SMBSLVSTS(priv));
@@ -1805,7 +1819,8 @@ static int i801_probe(struct pci_dev *dev, const struct pci_device_id *id)
                outb_p(inb_p(SMBAUXCTL(priv)) &
                       ~(SMBAUXCTL_CRC | SMBAUXCTL_E32B), SMBAUXCTL(priv));
 
-       /* Remember original Host Notify setting */
+       /* Remember original Interrupt and Host Notify settings */
+       priv->original_hstcnt = inb_p(SMBHSTCNT(priv)) & ~SMBHSTCNT_KILL;
        if (priv->features & FEATURE_HOST_NOTIFY)
                priv->original_slvcmd = inb_p(SMBSLVCMD(priv));
 
@@ -1869,6 +1884,7 @@ static void i801_remove(struct pci_dev *dev)
 {
        struct i801_priv *priv = pci_get_drvdata(dev);
 
+       outb_p(priv->original_hstcnt, SMBHSTCNT(priv));
        i801_disable_host_notify(priv);
        i801_del_mux(priv);
        i2c_del_adapter(&priv->adapter);
@@ -1892,6 +1908,7 @@ static void i801_shutdown(struct pci_dev *dev)
        struct i801_priv *priv = pci_get_drvdata(dev);
 
        /* Restore config registers to avoid hard hang on some systems */
+       outb_p(priv->original_hstcnt, SMBHSTCNT(priv));
        i801_disable_host_notify(priv);
        pci_write_config_byte(dev, SMBHSTCFG, priv->original_hstcfg);
 }
@@ -1901,6 +1918,7 @@ static int i801_suspend(struct device *dev)
 {
        struct i801_priv *priv = dev_get_drvdata(dev);
 
+       outb_p(priv->original_hstcnt, SMBHSTCNT(priv));
        pci_write_config_byte(priv->pci_dev, SMBHSTCFG, priv->original_hstcfg);
        return 0;
 }
index a6ea1eb1394e1cd78b4c4e7711a2b6d15ff913cb..53b8da6dbb23f0ec8768b192d537f732d6a46dce 100644 (file)
@@ -636,7 +636,7 @@ static irqreturn_t mpc_i2c_isr(int irq, void *dev_id)
        status = readb(i2c->base + MPC_I2C_SR);
        if (status & CSR_MIF) {
                /* Wait up to 100us for transfer to properly complete */
-               readb_poll_timeout(i2c->base + MPC_I2C_SR, status, !(status & CSR_MCF), 0, 100);
+               readb_poll_timeout_atomic(i2c->base + MPC_I2C_SR, status, status & CSR_MCF, 0, 100);
                writeb(0, i2c->base + MPC_I2C_SR);
                mpc_i2c_do_intr(i2c, status);
                return IRQ_HANDLED;
index 819ab4ee517e13cf1367cbfb157a72add5129d6b..02ddb237f69afdfcc766f1703e9b81297882534a 100644 (file)
@@ -423,8 +423,8 @@ static void rk3x_i2c_handle_read(struct rk3x_i2c *i2c, unsigned int ipd)
        if (!(ipd & REG_INT_MBRF))
                return;
 
-       /* ack interrupt */
-       i2c_writel(i2c, REG_INT_MBRF, REG_IPD);
+       /* ack interrupt (read also produces a spurious START flag, clear it too) */
+       i2c_writel(i2c, REG_INT_MBRF | REG_INT_START, REG_IPD);
 
        /* Can only handle a maximum of 32 bytes at a time */
        if (len > 32)
index b9b19a2a2ffa0b83957cdec0314b636db968b168..66145d2b9b55867e16dce3a756877fc145a4e323 100644 (file)
@@ -1493,6 +1493,7 @@ static irqreturn_t stm32f7_i2c_isr_event(int irq, void *data)
 {
        struct stm32f7_i2c_dev *i2c_dev = data;
        struct stm32f7_i2c_msg *f7_msg = &i2c_dev->f7_msg;
+       struct stm32_i2c_dma *dma = i2c_dev->dma;
        void __iomem *base = i2c_dev->base;
        u32 status, mask;
        int ret = IRQ_HANDLED;
@@ -1518,6 +1519,10 @@ static irqreturn_t stm32f7_i2c_isr_event(int irq, void *data)
                dev_dbg(i2c_dev->dev, "<%s>: Receive NACK (addr %x)\n",
                        __func__, f7_msg->addr);
                writel_relaxed(STM32F7_I2C_ICR_NACKCF, base + STM32F7_I2C_ICR);
+               if (i2c_dev->use_dma) {
+                       stm32f7_i2c_disable_dma_req(i2c_dev);
+                       dmaengine_terminate_async(dma->chan_using);
+               }
                f7_msg->result = -ENXIO;
        }
 
@@ -1533,7 +1538,7 @@ static irqreturn_t stm32f7_i2c_isr_event(int irq, void *data)
                /* Clear STOP flag */
                writel_relaxed(STM32F7_I2C_ICR_STOPCF, base + STM32F7_I2C_ICR);
 
-               if (i2c_dev->use_dma) {
+               if (i2c_dev->use_dma && !f7_msg->result) {
                        ret = IRQ_WAKE_THREAD;
                } else {
                        i2c_dev->master_mode = false;
@@ -1546,7 +1551,7 @@ static irqreturn_t stm32f7_i2c_isr_event(int irq, void *data)
                if (f7_msg->stop) {
                        mask = STM32F7_I2C_CR2_STOP;
                        stm32f7_i2c_set_bits(base + STM32F7_I2C_CR2, mask);
-               } else if (i2c_dev->use_dma) {
+               } else if (i2c_dev->use_dma && !f7_msg->result) {
                        ret = IRQ_WAKE_THREAD;
                } else if (f7_msg->smbus) {
                        stm32f7_i2c_smbus_rep_start(i2c_dev);
@@ -1583,7 +1588,7 @@ static irqreturn_t stm32f7_i2c_isr_event_thread(int irq, void *data)
        if (!ret) {
                dev_dbg(i2c_dev->dev, "<%s>: Timed out\n", __func__);
                stm32f7_i2c_disable_dma_req(i2c_dev);
-               dmaengine_terminate_all(dma->chan_using);
+               dmaengine_terminate_async(dma->chan_using);
                f7_msg->result = -ETIMEDOUT;
        }
 
@@ -1660,7 +1665,7 @@ static irqreturn_t stm32f7_i2c_isr_error(int irq, void *data)
        /* Disable dma */
        if (i2c_dev->use_dma) {
                stm32f7_i2c_disable_dma_req(i2c_dev);
-               dmaengine_terminate_all(dma->chan_using);
+               dmaengine_terminate_async(dma->chan_using);
        }
 
        i2c_dev->master_mode = false;
@@ -1696,12 +1701,26 @@ static int stm32f7_i2c_xfer(struct i2c_adapter *i2c_adap,
        time_left = wait_for_completion_timeout(&i2c_dev->complete,
                                                i2c_dev->adap.timeout);
        ret = f7_msg->result;
+       if (ret) {
+               if (i2c_dev->use_dma)
+                       dmaengine_synchronize(dma->chan_using);
+
+               /*
+                * It is possible that some unsent data have already been
+                * written into TXDR. To avoid sending old data in a
+                * further transfer, flush TXDR in case of any error
+                */
+               writel_relaxed(STM32F7_I2C_ISR_TXE,
+                              i2c_dev->base + STM32F7_I2C_ISR);
+               goto pm_free;
+       }
 
        if (!time_left) {
                dev_dbg(i2c_dev->dev, "Access to slave 0x%x timed out\n",
                        i2c_dev->msg->addr);
                if (i2c_dev->use_dma)
-                       dmaengine_terminate_all(dma->chan_using);
+                       dmaengine_terminate_sync(dma->chan_using);
+               stm32f7_i2c_wait_free_bus(i2c_dev);
                ret = -ETIMEDOUT;
        }
 
@@ -1744,13 +1763,25 @@ static int stm32f7_i2c_smbus_xfer(struct i2c_adapter *adapter, u16 addr,
        timeout = wait_for_completion_timeout(&i2c_dev->complete,
                                              i2c_dev->adap.timeout);
        ret = f7_msg->result;
-       if (ret)
+       if (ret) {
+               if (i2c_dev->use_dma)
+                       dmaengine_synchronize(dma->chan_using);
+
+               /*
+                * It is possible that some unsent data have already been
+                * written into TXDR. To avoid sending old data in a
+                * further transfer, flush TXDR in case of any error
+                */
+               writel_relaxed(STM32F7_I2C_ISR_TXE,
+                              i2c_dev->base + STM32F7_I2C_ISR);
                goto pm_free;
+       }
 
        if (!timeout) {
                dev_dbg(dev, "Access to slave 0x%x timed out\n", f7_msg->addr);
                if (i2c_dev->use_dma)
-                       dmaengine_terminate_all(dma->chan_using);
+                       dmaengine_terminate_sync(dma->chan_using);
+               stm32f7_i2c_wait_free_bus(i2c_dev);
                ret = -ETIMEDOUT;
                goto pm_free;
        }
index 1ed4daa918a06dc919673364663132d55bc087ec..41eb0dcc3204fe86ddcc7570730ab5bf509850e3 100644 (file)
 /**
  * struct virtio_i2c - virtio I2C data
  * @vdev: virtio device for this controller
- * @completion: completion of virtio I2C message
  * @adap: I2C adapter for this controller
  * @vq: the virtio virtqueue for communication
  */
 struct virtio_i2c {
        struct virtio_device *vdev;
-       struct completion completion;
        struct i2c_adapter adap;
        struct virtqueue *vq;
 };
 
 /**
  * struct virtio_i2c_req - the virtio I2C request structure
+ * @completion: completion of virtio I2C message
  * @out_hdr: the OUT header of the virtio I2C message
  * @buf: the buffer into which data is read, or from which it's written
  * @in_hdr: the IN header of the virtio I2C message
  */
 struct virtio_i2c_req {
+       struct completion completion;
        struct virtio_i2c_out_hdr out_hdr       ____cacheline_aligned;
        uint8_t *buf                            ____cacheline_aligned;
        struct virtio_i2c_in_hdr in_hdr         ____cacheline_aligned;
@@ -47,9 +47,11 @@ struct virtio_i2c_req {
 
 static void virtio_i2c_msg_done(struct virtqueue *vq)
 {
-       struct virtio_i2c *vi = vq->vdev->priv;
+       struct virtio_i2c_req *req;
+       unsigned int len;
 
-       complete(&vi->completion);
+       while ((req = virtqueue_get_buf(vq, &len)))
+               complete(&req->completion);
 }
 
 static int virtio_i2c_prepare_reqs(struct virtqueue *vq,
@@ -62,6 +64,8 @@ static int virtio_i2c_prepare_reqs(struct virtqueue *vq,
        for (i = 0; i < num; i++) {
                int outcnt = 0, incnt = 0;
 
+               init_completion(&reqs[i].completion);
+
                /*
                 * Only 7-bit mode supported for this moment. For the address
                 * format, Please check the Virtio I2C Specification.
@@ -104,24 +108,17 @@ static int virtio_i2c_prepare_reqs(struct virtqueue *vq,
 
 static int virtio_i2c_complete_reqs(struct virtqueue *vq,
                                    struct virtio_i2c_req *reqs,
-                                   struct i2c_msg *msgs, int num,
-                                   bool timedout)
+                                   struct i2c_msg *msgs, int num)
 {
-       struct virtio_i2c_req *req;
-       bool failed = timedout;
-       unsigned int len;
+       bool failed = false;
        int i, j = 0;
 
        for (i = 0; i < num; i++) {
-               /* Detach the ith request from the vq */
-               req = virtqueue_get_buf(vq, &len);
+               struct virtio_i2c_req *req = &reqs[i];
 
-               /*
-                * Condition req == &reqs[i] should always meet since we have
-                * total num requests in the vq. reqs[i] can never be NULL here.
-                */
-               if (!failed && (WARN_ON(req != &reqs[i]) ||
-                               req->in_hdr.status != VIRTIO_I2C_MSG_OK))
+               wait_for_completion(&req->completion);
+
+               if (!failed && req->in_hdr.status != VIRTIO_I2C_MSG_OK)
                        failed = true;
 
                i2c_put_dma_safe_msg_buf(reqs[i].buf, &msgs[i], !failed);
@@ -130,7 +127,7 @@ static int virtio_i2c_complete_reqs(struct virtqueue *vq,
                        j++;
        }
 
-       return timedout ? -ETIMEDOUT : j;
+       return j;
 }
 
 static int virtio_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs,
@@ -139,7 +136,6 @@ static int virtio_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs,
        struct virtio_i2c *vi = i2c_get_adapdata(adap);
        struct virtqueue *vq = vi->vq;
        struct virtio_i2c_req *reqs;
-       unsigned long time_left;
        int count;
 
        reqs = kcalloc(num, sizeof(*reqs), GFP_KERNEL);
@@ -158,15 +154,9 @@ static int virtio_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs,
         * remote here to clear the virtqueue, so we can try another set of
         * messages later on.
         */
-
-       reinit_completion(&vi->completion);
        virtqueue_kick(vq);
 
-       time_left = wait_for_completion_timeout(&vi->completion, adap->timeout);
-       if (!time_left)
-               dev_err(&adap->dev, "virtio i2c backend timeout.\n");
-
-       count = virtio_i2c_complete_reqs(vq, reqs, msgs, count, !time_left);
+       count = virtio_i2c_complete_reqs(vq, reqs, msgs, count);
 
 err_free:
        kfree(reqs);
@@ -214,8 +204,6 @@ static int virtio_i2c_probe(struct virtio_device *vdev)
        vdev->priv = vi;
        vi->vdev = vdev;
 
-       init_completion(&vi->completion);
-
        ret = virtio_i2c_setup_vqs(vi);
        if (ret)
                return ret;
index a51fdd3c9b5b5b6bdfa059cceb3086fa0adfa62f..24c9387c296879cffe851bfcbac7dcaea343634a 100644 (file)
@@ -1595,8 +1595,7 @@ static int kxcjk1013_probe(struct i2c_client *client,
        return 0;
 
 err_buffer_cleanup:
-       if (data->dready_trig)
-               iio_triggered_buffer_cleanup(indio_dev);
+       iio_triggered_buffer_cleanup(indio_dev);
 err_trigger_unregister:
        if (data->dready_trig)
                iio_trigger_unregister(data->dready_trig);
@@ -1618,8 +1617,8 @@ static int kxcjk1013_remove(struct i2c_client *client)
        pm_runtime_disable(&client->dev);
        pm_runtime_set_suspended(&client->dev);
 
+       iio_triggered_buffer_cleanup(indio_dev);
        if (data->dready_trig) {
-               iio_triggered_buffer_cleanup(indio_dev);
                iio_trigger_unregister(data->dready_trig);
                iio_trigger_unregister(data->motion_trig);
        }
index 2faf85ca996e1daffb218df82361da71a29c02da..552eba5e8b4fda35799ebcae61e9b91e6f15ecff 100644 (file)
@@ -224,14 +224,14 @@ static irqreturn_t kxsd9_trigger_handler(int irq, void *p)
                               hw_values.chan,
                               sizeof(hw_values.chan));
        if (ret) {
-               dev_err(st->dev,
-                       "error reading data\n");
-               return ret;
+               dev_err(st->dev, "error reading data: %d\n", ret);
+               goto out;
        }
 
        iio_push_to_buffers_with_timestamp(indio_dev,
                                           &hw_values,
                                           iio_get_time_ns(indio_dev));
+out:
        iio_trigger_notify_done(indio_dev->trig);
 
        return IRQ_HANDLED;
index 715b8138fb715cb64897635364c853437a4afd08..09c7f10fefb6e426c02736c84242e8ccf3d4d9d6 100644 (file)
@@ -1470,7 +1470,7 @@ static int mma8452_trigger_setup(struct iio_dev *indio_dev)
        if (ret)
                return ret;
 
-       indio_dev->trig = trig;
+       indio_dev->trig = iio_trigger_get(trig);
 
        return 0;
 }
index 8bf5b62a73f423ebf358e43719449673b7fa0866..3363af15a43f886ced8ae53a05b7b5db3e625158 100644 (file)
@@ -532,7 +532,7 @@ config IMX7D_ADC
 
 config IMX8QXP_ADC
        tristate "NXP IMX8QXP ADC driver"
-       depends on ARCH_MXC_ARM64 || COMPILE_TEST
+       depends on ARCH_MXC || COMPILE_TEST
        depends on HAS_IOMEM
        help
          Say yes here to build support for IMX8QXP ADC.
index 2c5c8a3672b2d5d28f9392e16ff8bcfaa8d599e5..aa42ba759fa1a0013bf544558bfae39813076e8d 100644 (file)
@@ -480,8 +480,8 @@ static irqreturn_t ad7768_trigger_handler(int irq, void *p)
        iio_push_to_buffers_with_timestamp(indio_dev, &st->data.scan,
                                           iio_get_time_ns(indio_dev));
 
-       iio_trigger_notify_done(indio_dev->trig);
 err_unlock:
+       iio_trigger_notify_done(indio_dev->trig);
        mutex_unlock(&st->lock);
 
        return IRQ_HANDLED;
index 4c922ef634f8e81b5c45ce4dfcdf4f31722f03ca..92a57cf10fba4a05d797b7308404a66255263824 100644 (file)
@@ -1586,7 +1586,8 @@ static int at91_adc_read_info_raw(struct iio_dev *indio_dev,
                *val = st->conversion_value;
                ret = at91_adc_adjust_val_osr(st, val);
                if (chan->scan_type.sign == 's')
-                       *val = sign_extend32(*val, 11);
+                       *val = sign_extend32(*val,
+                                            chan->scan_type.realbits - 1);
                st->conversion_done = false;
        }
 
index 3e0c0233b43156c899398c34b240001097d40058..df99f1365c398306524389f376002060247b6fb0 100644 (file)
@@ -251,19 +251,8 @@ static int axp22x_adc_raw(struct iio_dev *indio_dev,
                          struct iio_chan_spec const *chan, int *val)
 {
        struct axp20x_adc_iio *info = iio_priv(indio_dev);
-       int size;
 
-       /*
-        * N.B.: Unlike the Chinese datasheets tell, the charging current is
-        * stored on 12 bits, not 13 bits. Only discharging current is on 13
-        * bits.
-        */
-       if (chan->type == IIO_CURRENT && chan->channel == AXP22X_BATT_DISCHRG_I)
-               size = 13;
-       else
-               size = 12;
-
-       *val = axp20x_read_variable_width(info->regmap, chan->address, size);
+       *val = axp20x_read_variable_width(info->regmap, chan->address, 12);
        if (*val < 0)
                return *val;
 
@@ -386,9 +375,8 @@ static int axp22x_adc_scale(struct iio_chan_spec const *chan, int *val,
                return IIO_VAL_INT_PLUS_MICRO;
 
        case IIO_CURRENT:
-               *val = 0;
-               *val2 = 500000;
-               return IIO_VAL_INT_PLUS_MICRO;
+               *val = 1;
+               return IIO_VAL_INT;
 
        case IIO_TEMP:
                *val = 100;
index 16407664182ce8005642ccd7d09ec15f4958a596..97d162a3cba4eaaec1442718898d539698529951 100644 (file)
@@ -248,7 +248,6 @@ static int dln2_adc_set_chan_period(struct dln2_adc *dln2,
 static int dln2_adc_read(struct dln2_adc *dln2, unsigned int channel)
 {
        int ret, i;
-       struct iio_dev *indio_dev = platform_get_drvdata(dln2->pdev);
        u16 conflict;
        __le16 value;
        int olen = sizeof(value);
@@ -257,13 +256,9 @@ static int dln2_adc_read(struct dln2_adc *dln2, unsigned int channel)
                .chan = channel,
        };
 
-       ret = iio_device_claim_direct_mode(indio_dev);
-       if (ret < 0)
-               return ret;
-
        ret = dln2_adc_set_chan_enabled(dln2, channel, true);
        if (ret < 0)
-               goto release_direct;
+               return ret;
 
        ret = dln2_adc_set_port_enabled(dln2, true, &conflict);
        if (ret < 0) {
@@ -300,8 +295,6 @@ disable_port:
        dln2_adc_set_port_enabled(dln2, false, NULL);
 disable_chan:
        dln2_adc_set_chan_enabled(dln2, channel, false);
-release_direct:
-       iio_device_release_direct_mode(indio_dev);
 
        return ret;
 }
@@ -337,10 +330,16 @@ static int dln2_adc_read_raw(struct iio_dev *indio_dev,
 
        switch (mask) {
        case IIO_CHAN_INFO_RAW:
+               ret = iio_device_claim_direct_mode(indio_dev);
+               if (ret < 0)
+                       return ret;
+
                mutex_lock(&dln2->mutex);
                ret = dln2_adc_read(dln2, chan->channel);
                mutex_unlock(&dln2->mutex);
 
+               iio_device_release_direct_mode(indio_dev);
+
                if (ret < 0)
                        return ret;
 
@@ -656,7 +655,11 @@ static int dln2_adc_probe(struct platform_device *pdev)
                return -ENOMEM;
        }
        iio_trigger_set_drvdata(dln2->trig, dln2);
-       devm_iio_trigger_register(dev, dln2->trig);
+       ret = devm_iio_trigger_register(dev, dln2->trig);
+       if (ret) {
+               dev_err(dev, "failed to register trigger: %d\n", ret);
+               return ret;
+       }
        iio_trigger_set_immutable(indio_dev, dln2->trig);
 
        ret = devm_iio_triggered_buffer_setup(dev, indio_dev, NULL,
index 6245434f8377954b635a6e6db556500b75acbd67..8cd258cb2682e6620bac9e7963670cf927be6eff 100644 (file)
@@ -1117,6 +1117,7 @@ static void stm32h7_adc_unprepare(struct iio_dev *indio_dev)
 {
        struct stm32_adc *adc = iio_priv(indio_dev);
 
+       stm32_adc_writel(adc, STM32H7_ADC_PCSEL, 0);
        stm32h7_adc_disable(indio_dev);
        stm32_adc_int_ch_disable(adc);
        stm32h7_adc_enter_pwr_down(adc);
@@ -1986,7 +1987,7 @@ static int stm32_adc_populate_int_ch(struct iio_dev *indio_dev, const char *ch_n
                        /* Get calibration data for vrefint channel */
                        ret = nvmem_cell_read_u16(&indio_dev->dev, "vrefint", &vrefint);
                        if (ret && ret != -ENOENT) {
-                               return dev_err_probe(&indio_dev->dev, ret,
+                               return dev_err_probe(indio_dev->dev.parent, ret,
                                                     "nvmem access error\n");
                        }
                        if (ret == -ENOENT)
index 3e0734ddafe363355c17614a3c14b885ae7164ea..600e9725da7882ce2f143217d0f94844d4e104a7 100644 (file)
@@ -7,6 +7,7 @@
  */
 
 #include <linux/bitfield.h>
+#include <linux/bitops.h>
 #include <linux/delay.h>
 #include <linux/device.h>
 #include <linux/kernel.h>
@@ -124,7 +125,7 @@ static int adxrs290_get_rate_data(struct iio_dev *indio_dev, const u8 cmd, int *
                goto err_unlock;
        }
 
-       *val = temp;
+       *val = sign_extend32(temp, 15);
 
 err_unlock:
        mutex_unlock(&st->lock);
@@ -146,7 +147,7 @@ static int adxrs290_get_temp_data(struct iio_dev *indio_dev, int *val)
        }
 
        /* extract lower 12 bits temperature reading */
-       *val = temp & 0x0FFF;
+       *val = sign_extend32(temp, 11);
 
 err_unlock:
        mutex_unlock(&st->lock);
index 04dd6a7969ea79cf62a3c5bb2e0d6582a83c0e24..4cfa0d43956053e173686fa85ac708903396e50c 100644 (file)
@@ -61,9 +61,9 @@ static irqreturn_t itg3200_trigger_handler(int irq, void *p)
 
        iio_push_to_buffers_with_timestamp(indio_dev, &scan, pf->timestamp);
 
+error_ret:
        iio_trigger_notify_done(indio_dev->trig);
 
-error_ret:
        return IRQ_HANDLED;
 }
 
index b23caa2f2aa1fd30327ee5f5c1ed1251e6b3768a..93990ff1dfe39e5a760e9b8c82099f4af8f5a282 100644 (file)
@@ -556,7 +556,6 @@ struct iio_trigger *viio_trigger_alloc(struct device *parent,
                irq_modify_status(trig->subirq_base + i,
                                  IRQ_NOREQUEST | IRQ_NOAUTOEN, IRQ_NOPROBE);
        }
-       get_device(&trig->dev);
 
        return trig;
 
index 7e51aaac0bf86db63dd1717b816bfd3b3f6a3b12..b2983b1a9ed1cc05ddda4ec5a08372b0b2cac493 100644 (file)
@@ -1275,7 +1275,7 @@ static irqreturn_t ltr501_trigger_handler(int irq, void *p)
                ret = regmap_bulk_read(data->regmap, LTR501_ALS_DATA1,
                                       als_buf, sizeof(als_buf));
                if (ret < 0)
-                       return ret;
+                       goto done;
                if (test_bit(0, indio_dev->active_scan_mask))
                        scan.channels[j++] = le16_to_cpu(als_buf[1]);
                if (test_bit(1, indio_dev->active_scan_mask))
index 07e91846307c7d38474b46e2fafc68767b997724..fc63856ed54debe80b55e815f43a6e9cfc8ba6e6 100644 (file)
@@ -546,9 +546,8 @@ static irqreturn_t stk3310_irq_event_handler(int irq, void *private)
        mutex_lock(&data->lock);
        ret = regmap_field_read(data->reg_flag_nf, &dir);
        if (ret < 0) {
-               dev_err(&data->client->dev, "register read failed\n");
-               mutex_unlock(&data->lock);
-               return ret;
+               dev_err(&data->client->dev, "register read failed: %d\n", ret);
+               goto out;
        }
        event = IIO_UNMOD_EVENT_CODE(IIO_PROXIMITY, 1,
                                     IIO_EV_TYPE_THRESH,
@@ -560,6 +559,7 @@ static irqreturn_t stk3310_irq_event_handler(int irq, void *private)
        ret = regmap_field_write(data->reg_flag_psint, 0);
        if (ret < 0)
                dev_err(&data->client->dev, "failed to reset interrupts\n");
+out:
        mutex_unlock(&data->lock);
 
        return IRQ_HANDLED;
index 33083877cd19d134857f647b8f80030c7cc7e3a1..4353b749ecef2795cd11d055f30f4c364397208b 100644 (file)
@@ -912,6 +912,6 @@ static struct platform_driver stm32_timer_trigger_driver = {
 };
 module_platform_driver(stm32_timer_trigger_driver);
 
-MODULE_ALIAS("platform: stm32-timer-trigger");
+MODULE_ALIAS("platform:stm32-timer-trigger");
 MODULE_DESCRIPTION("STMicroelectronics STM32 Timer Trigger driver");
 MODULE_LICENSE("GPL v2");
index fedc0fa6ebf9baae421558dcc7ea86b6e935681f..f5aacaf7fb8efb1de96a9d4961c091aa224dbaf1 100644 (file)
@@ -1906,7 +1906,8 @@ static int nldev_stat_set_mode_doit(struct sk_buff *msg,
        int ret;
 
        /* Currently only counter for QP is supported */
-       if (nla_get_u32(tb[RDMA_NLDEV_ATTR_STAT_RES]) != RDMA_NLDEV_ATTR_RES_QP)
+       if (!tb[RDMA_NLDEV_ATTR_STAT_RES] ||
+           nla_get_u32(tb[RDMA_NLDEV_ATTR_STAT_RES]) != RDMA_NLDEV_ATTR_RES_QP)
                return -EINVAL;
 
        mode = nla_get_u32(tb[RDMA_NLDEV_ATTR_STAT_MODE]);
index 692d5ff657dfa2c1e37884e484fe8b192cf067a6..c18634bec212671aae12fde4dd2db78a9b9cbbb6 100644 (file)
@@ -1232,6 +1232,9 @@ static struct ib_qp *create_qp(struct ib_device *dev, struct ib_pd *pd,
        INIT_LIST_HEAD(&qp->rdma_mrs);
        INIT_LIST_HEAD(&qp->sig_mrs);
 
+       qp->send_cq = attr->send_cq;
+       qp->recv_cq = attr->recv_cq;
+
        rdma_restrack_new(&qp->res, RDMA_RESTRACK_QP);
        WARN_ONCE(!udata && !caller, "Missing kernel QP owner");
        rdma_restrack_set_name(&qp->res, udata ? NULL : caller);
index ec37f4fd8e96bac28efd784d757bc6ff12e1dbb0..f1245c94ae2629d329d68e19349360b37bf7f725 100644 (file)
@@ -8415,6 +8415,8 @@ static void receive_interrupt_common(struct hfi1_ctxtdata *rcd)
  */
 static void __hfi1_rcd_eoi_intr(struct hfi1_ctxtdata *rcd)
 {
+       if (!rcd->rcvhdrq)
+               return;
        clear_recv_intr(rcd);
        if (check_packet_present(rcd))
                force_recv_intr(rcd);
index 61f341c3005cb972dc56a8c34e64fbbc7cd03131..e2c634af40e990103d9010c5e0eab5cd80faa663 100644 (file)
@@ -1012,6 +1012,8 @@ int handle_receive_interrupt(struct hfi1_ctxtdata *rcd, int thread)
        struct hfi1_packet packet;
        int skip_pkt = 0;
 
+       if (!rcd->rcvhdrq)
+               return RCV_PKT_OK;
        /* Control context will always use the slow path interrupt handler */
        needset = (rcd->ctxt == HFI1_CTRL_CTXT) ? 0 : 1;
 
index dbd1c31830b9c22e0f405db3149d5c9effdaf59b..4436ed41547c4fcc82479a6a5baaaa75dcc87e44 100644 (file)
@@ -113,7 +113,6 @@ static int hfi1_create_kctxt(struct hfi1_devdata *dd,
        rcd->fast_handler = get_dma_rtail_setting(rcd) ?
                                handle_receive_interrupt_dma_rtail :
                                handle_receive_interrupt_nodma_rtail;
-       rcd->slow_handler = handle_receive_interrupt;
 
        hfi1_set_seq_cnt(rcd, 1);
 
@@ -334,6 +333,8 @@ int hfi1_create_ctxtdata(struct hfi1_pportdata *ppd, int numa,
                rcd->numa_id = numa;
                rcd->rcv_array_groups = dd->rcv_entries.ngroups;
                rcd->rhf_rcv_function_map = normal_rhf_rcv_functions;
+               rcd->slow_handler = handle_receive_interrupt;
+               rcd->do_interrupt = rcd->slow_handler;
                rcd->msix_intr = CCE_NUM_MSIX_VECTORS;
 
                mutex_init(&rcd->exp_mutex);
@@ -874,18 +875,6 @@ int hfi1_init(struct hfi1_devdata *dd, int reinit)
        if (ret)
                goto done;
 
-       /* allocate dummy tail memory for all receive contexts */
-       dd->rcvhdrtail_dummy_kvaddr = dma_alloc_coherent(&dd->pcidev->dev,
-                                                        sizeof(u64),
-                                                        &dd->rcvhdrtail_dummy_dma,
-                                                        GFP_KERNEL);
-
-       if (!dd->rcvhdrtail_dummy_kvaddr) {
-               dd_dev_err(dd, "cannot allocate dummy tail memory\n");
-               ret = -ENOMEM;
-               goto done;
-       }
-
        /* dd->rcd can be NULL if early initialization failed */
        for (i = 0; dd->rcd && i < dd->first_dyn_alloc_ctxt; ++i) {
                /*
@@ -898,8 +887,6 @@ int hfi1_init(struct hfi1_devdata *dd, int reinit)
                if (!rcd)
                        continue;
 
-               rcd->do_interrupt = &handle_receive_interrupt;
-
                lastfail = hfi1_create_rcvhdrq(dd, rcd);
                if (!lastfail)
                        lastfail = hfi1_setup_eagerbufs(rcd);
@@ -1120,7 +1107,7 @@ void hfi1_free_ctxtdata(struct hfi1_devdata *dd, struct hfi1_ctxtdata *rcd)
        rcd->egrbufs.rcvtids = NULL;
 
        for (e = 0; e < rcd->egrbufs.alloced; e++) {
-               if (rcd->egrbufs.buffers[e].dma)
+               if (rcd->egrbufs.buffers[e].addr)
                        dma_free_coherent(&dd->pcidev->dev,
                                          rcd->egrbufs.buffers[e].len,
                                          rcd->egrbufs.buffers[e].addr,
@@ -1201,6 +1188,11 @@ void hfi1_free_devdata(struct hfi1_devdata *dd)
        dd->tx_opstats    = NULL;
        kfree(dd->comp_vect);
        dd->comp_vect = NULL;
+       if (dd->rcvhdrtail_dummy_kvaddr)
+               dma_free_coherent(&dd->pcidev->dev, sizeof(u64),
+                                 (void *)dd->rcvhdrtail_dummy_kvaddr,
+                                 dd->rcvhdrtail_dummy_dma);
+       dd->rcvhdrtail_dummy_kvaddr = NULL;
        sdma_clean(dd, dd->num_sdma);
        rvt_dealloc_device(&dd->verbs_dev.rdi);
 }
@@ -1298,6 +1290,15 @@ static struct hfi1_devdata *hfi1_alloc_devdata(struct pci_dev *pdev,
                goto bail;
        }
 
+       /* allocate dummy tail memory for all receive contexts */
+       dd->rcvhdrtail_dummy_kvaddr =
+               dma_alloc_coherent(&dd->pcidev->dev, sizeof(u64),
+                                  &dd->rcvhdrtail_dummy_dma, GFP_KERNEL);
+       if (!dd->rcvhdrtail_dummy_kvaddr) {
+               ret = -ENOMEM;
+               goto bail;
+       }
+
        atomic_set(&dd->ipoib_rsm_usr_num, 0);
        return dd;
 
@@ -1505,13 +1506,6 @@ static void cleanup_device_data(struct hfi1_devdata *dd)
 
        free_credit_return(dd);
 
-       if (dd->rcvhdrtail_dummy_kvaddr) {
-               dma_free_coherent(&dd->pcidev->dev, sizeof(u64),
-                                 (void *)dd->rcvhdrtail_dummy_kvaddr,
-                                 dd->rcvhdrtail_dummy_dma);
-               dd->rcvhdrtail_dummy_kvaddr = NULL;
-       }
-
        /*
         * Free any resources still in use (usually just kernel contexts)
         * at unload; we do for ctxtcnt, because that's what we allocate.
index 2b6c24b7b58655b2d847585db6db71c4c5d85b13..f07d328689d3d4de4e554867aa08d283c95ebb74 100644 (file)
@@ -838,8 +838,8 @@ struct sdma_engine *sdma_select_user_engine(struct hfi1_devdata *dd,
        if (current->nr_cpus_allowed != 1)
                goto out;
 
-       cpu_id = smp_processor_id();
        rcu_read_lock();
+       cpu_id = smp_processor_id();
        rht_node = rhashtable_lookup(dd->sdma_rht, &cpu_id,
                                     sdma_rht_params);
 
index ed9fa0d84e9ed3a3a87d55d566460613513f65cb..dc9211f3a0098fa54c1fbe934c7e7f561342a4ab 100644 (file)
@@ -1628,8 +1628,7 @@ static int init_cntr_names(const char *names_in, const size_t names_len,
                        n++;
 
        names_out =
-               kmalloc((n + num_extra_names) * sizeof(struct rdma_stat_desc) +
-                               names_len,
+               kzalloc((n + num_extra_names) * sizeof(*q) + names_len,
                        GFP_KERNEL);
        if (!names_out) {
                *num_cntrs = 0;
@@ -1637,7 +1636,7 @@ static int init_cntr_names(const char *names_in, const size_t names_len,
                return -ENOMEM;
        }
 
-       p = names_out + (n + num_extra_names) * sizeof(struct rdma_stat_desc);
+       p = names_out + (n + num_extra_names) * sizeof(*q);
        memcpy(p, names_in, names_len);
 
        q = (struct rdma_stat_desc *)names_out;
index 9bfbaddd1763de473a5f15dd7243b1f382d9b82b..bbfa1332dedc003cc6f6177cc27812e1e62befd7 100644 (file)
@@ -33,6 +33,7 @@
 #include <linux/acpi.h>
 #include <linux/etherdevice.h>
 #include <linux/interrupt.h>
+#include <linux/iopoll.h>
 #include <linux/kernel.h>
 #include <linux/types.h>
 #include <net/addrconf.h>
@@ -1050,9 +1051,14 @@ static u32 hns_roce_v2_cmd_hw_resetting(struct hns_roce_dev *hr_dev,
                                        unsigned long instance_stage,
                                        unsigned long reset_stage)
 {
+#define HW_RESET_TIMEOUT_US 1000000
+#define HW_RESET_SLEEP_US 1000
+
        struct hns_roce_v2_priv *priv = hr_dev->priv;
        struct hnae3_handle *handle = priv->handle;
        const struct hnae3_ae_ops *ops = handle->ae_algo->ops;
+       unsigned long val;
+       int ret;
 
        /* When hardware reset is detected, we should stop sending mailbox&cmq&
         * doorbell to hardware. If now in .init_instance() function, we should
@@ -1064,7 +1070,11 @@ static u32 hns_roce_v2_cmd_hw_resetting(struct hns_roce_dev *hr_dev,
         * again.
         */
        hr_dev->dis_db = true;
-       if (!ops->get_hw_reset_stat(handle))
+
+       ret = read_poll_timeout(ops->ae_dev_reset_cnt, val,
+                               val > hr_dev->reset_cnt, HW_RESET_SLEEP_US,
+                               HW_RESET_TIMEOUT_US, false, handle);
+       if (!ret)
                hr_dev->is_reset = true;
 
        if (!hr_dev->is_reset || reset_stage == HNS_ROCE_STATE_RST_INIT ||
@@ -6387,10 +6397,8 @@ static int hns_roce_hw_v2_reset_notify_down(struct hnae3_handle *handle)
        if (!hr_dev)
                return 0;
 
-       hr_dev->is_reset = true;
        hr_dev->active = false;
        hr_dev->dis_db = true;
-
        hr_dev->state = HNS_ROCE_DEVICE_STATE_RST_DOWN;
 
        return 0;
index 4108dcabece2390229ff3efd014a1551cccfb6e8..b4c657f5f2f95cb6d0c2c681e51d5d9579133717 100644 (file)
@@ -60,6 +60,8 @@ static void irdma_iwarp_ce_handler(struct irdma_sc_cq *iwcq)
 {
        struct irdma_cq *cq = iwcq->back_cq;
 
+       if (!cq->user_mode)
+               cq->armed = false;
        if (cq->ibcq.comp_handler)
                cq->ibcq.comp_handler(&cq->ibcq, cq->ibcq.cq_context);
 }
@@ -146,6 +148,7 @@ static void irdma_set_flush_fields(struct irdma_sc_qp *qp,
                qp->flush_code = FLUSH_PROT_ERR;
                break;
        case IRDMA_AE_AMP_BAD_QP:
+       case IRDMA_AE_WQE_UNEXPECTED_OPCODE:
                qp->flush_code = FLUSH_LOC_QP_OP_ERR;
                break;
        case IRDMA_AE_AMP_BAD_STAG_KEY:
@@ -156,7 +159,6 @@ static void irdma_set_flush_fields(struct irdma_sc_qp *qp,
        case IRDMA_AE_PRIV_OPERATION_DENIED:
        case IRDMA_AE_IB_INVALID_REQUEST:
        case IRDMA_AE_IB_REMOTE_ACCESS_ERROR:
-       case IRDMA_AE_IB_REMOTE_OP_ERROR:
                qp->flush_code = FLUSH_REM_ACCESS_ERR;
                qp->event_type = IRDMA_QP_EVENT_ACCESS_ERR;
                break;
@@ -184,6 +186,9 @@ static void irdma_set_flush_fields(struct irdma_sc_qp *qp,
        case IRDMA_AE_AMP_MWBIND_INVALID_BOUNDS:
                qp->flush_code = FLUSH_MW_BIND_ERR;
                break;
+       case IRDMA_AE_IB_REMOTE_OP_ERROR:
+               qp->flush_code = FLUSH_REM_OP_ERR;
+               break;
        default:
                qp->flush_code = FLUSH_FATAL_ERR;
                break;
index 91a497139ba3a690af37f75c5342480d43c7b8d3..cb218cab79ac12d9e9f195d4075bf0aa77156d55 100644 (file)
@@ -542,6 +542,7 @@ int irdma_ah_cqp_op(struct irdma_pci_f *rf, struct irdma_sc_ah *sc_ah, u8 cmd,
                    void (*callback_fcn)(struct irdma_cqp_request *cqp_request),
                    void *cb_param);
 void irdma_gsi_ud_qp_ah_cb(struct irdma_cqp_request *cqp_request);
+bool irdma_cq_empty(struct irdma_cq *iwcq);
 int irdma_inetaddr_event(struct notifier_block *notifier, unsigned long event,
                         void *ptr);
 int irdma_inet6addr_event(struct notifier_block *notifier, unsigned long event,
index aeeb1c310965decb8b3110cf8a93101c73500e65..fed49da770f3b66eec5a335941b5bd69914493d3 100644 (file)
@@ -25,8 +25,7 @@ void irdma_destroy_pble_prm(struct irdma_hmc_pble_rsrc *pble_rsrc)
                list_del(&chunk->list);
                if (chunk->type == PBLE_SD_PAGED)
                        irdma_pble_free_paged_mem(chunk);
-               if (chunk->bitmapbuf)
-                       kfree(chunk->bitmapmem.va);
+               bitmap_free(chunk->bitmapbuf);
                kfree(chunk->chunkmem.va);
        }
 }
@@ -283,7 +282,6 @@ add_pble_prm(struct irdma_hmc_pble_rsrc *pble_rsrc)
                  "PBLE: next_fpm_addr = %llx chunk_size[%llu] = 0x%llx\n",
                  pble_rsrc->next_fpm_addr, chunk->size, chunk->size);
        pble_rsrc->unallocated_pble -= (u32)(chunk->size >> 3);
-       list_add(&chunk->list, &pble_rsrc->pinfo.clist);
        sd_reg_val = (sd_entry_type == IRDMA_SD_TYPE_PAGED) ?
                             sd_entry->u.pd_table.pd_page_addr.pa :
                             sd_entry->u.bp.addr.pa;
@@ -295,12 +293,12 @@ add_pble_prm(struct irdma_hmc_pble_rsrc *pble_rsrc)
                        goto error;
        }
 
+       list_add(&chunk->list, &pble_rsrc->pinfo.clist);
        sd_entry->valid = true;
        return 0;
 
 error:
-       if (chunk->bitmapbuf)
-               kfree(chunk->bitmapmem.va);
+       bitmap_free(chunk->bitmapbuf);
        kfree(chunk->chunkmem.va);
 
        return ret_code;
index e1b3b8118a2ca4cd2d348f6c0c4414ddc2b95ca4..aa20827dcc9de53ef47ab241e87013d7bfe4c5bb 100644 (file)
@@ -78,7 +78,6 @@ struct irdma_chunk {
        u32 pg_cnt;
        enum irdma_alloc_type type;
        struct irdma_sc_dev *dev;
-       struct irdma_virt_mem bitmapmem;
        struct irdma_virt_mem chunkmem;
 };
 
index 8b42c43fc14fe89dfd504c90edffcf9975daa464..398736d8c78a4d0427830e2f18d70be80f415b0a 100644 (file)
@@ -2239,15 +2239,10 @@ enum irdma_status_code irdma_prm_add_pble_mem(struct irdma_pble_prm *pprm,
 
        sizeofbitmap = (u64)pchunk->size >> pprm->pble_shift;
 
-       pchunk->bitmapmem.size = sizeofbitmap >> 3;
-       pchunk->bitmapmem.va = kzalloc(pchunk->bitmapmem.size, GFP_KERNEL);
-
-       if (!pchunk->bitmapmem.va)
+       pchunk->bitmapbuf = bitmap_zalloc(sizeofbitmap, GFP_KERNEL);
+       if (!pchunk->bitmapbuf)
                return IRDMA_ERR_NO_MEMORY;
 
-       pchunk->bitmapbuf = pchunk->bitmapmem.va;
-       bitmap_zero(pchunk->bitmapbuf, sizeofbitmap);
-
        pchunk->sizeofbitmap = sizeofbitmap;
        /* each pble is 8 bytes hence shift by 3 */
        pprm->total_pble_alloc += pchunk->size >> 3;
@@ -2491,3 +2486,18 @@ void irdma_ib_qp_event(struct irdma_qp *iwqp, enum irdma_qp_event_type event)
        ibevent.element.qp = &iwqp->ibqp;
        iwqp->ibqp.event_handler(&ibevent, iwqp->ibqp.qp_context);
 }
+
+bool irdma_cq_empty(struct irdma_cq *iwcq)
+{
+       struct irdma_cq_uk *ukcq;
+       u64 qword3;
+       __le64 *cqe;
+       u8 polarity;
+
+       ukcq  = &iwcq->sc_cq.cq_uk;
+       cqe = IRDMA_GET_CURRENT_CQ_ELEM(ukcq);
+       get_64bit_val(cqe, 24, &qword3);
+       polarity = (u8)FIELD_GET(IRDMA_CQ_VALID, qword3);
+
+       return polarity != ukcq->polarity;
+}
index 0f66e809d41850e39516f7465bcf8f2476639475..8cd5f9261692d45f00baa80d199f30cdaca37a15 100644 (file)
@@ -3584,18 +3584,31 @@ static int irdma_req_notify_cq(struct ib_cq *ibcq,
        struct irdma_cq *iwcq;
        struct irdma_cq_uk *ukcq;
        unsigned long flags;
-       enum irdma_cmpl_notify cq_notify = IRDMA_CQ_COMPL_EVENT;
+       enum irdma_cmpl_notify cq_notify;
+       bool promo_event = false;
+       int ret = 0;
 
+       cq_notify = notify_flags == IB_CQ_SOLICITED ?
+                   IRDMA_CQ_COMPL_SOLICITED : IRDMA_CQ_COMPL_EVENT;
        iwcq = to_iwcq(ibcq);
        ukcq = &iwcq->sc_cq.cq_uk;
-       if (notify_flags == IB_CQ_SOLICITED)
-               cq_notify = IRDMA_CQ_COMPL_SOLICITED;
 
        spin_lock_irqsave(&iwcq->lock, flags);
-       irdma_uk_cq_request_notification(ukcq, cq_notify);
+       /* Only promote to arm the CQ for any event if the last arm event was solicited. */
+       if (iwcq->last_notify == IRDMA_CQ_COMPL_SOLICITED && notify_flags != IB_CQ_SOLICITED)
+               promo_event = true;
+
+       if (!iwcq->armed || promo_event) {
+               iwcq->armed = true;
+               iwcq->last_notify = cq_notify;
+               irdma_uk_cq_request_notification(ukcq, cq_notify);
+       }
+
+       if ((notify_flags & IB_CQ_REPORT_MISSED_EVENTS) && !irdma_cq_empty(iwcq))
+               ret = 1;
        spin_unlock_irqrestore(&iwcq->lock, flags);
 
-       return 0;
+       return ret;
 }
 
 static int irdma_roce_port_immutable(struct ib_device *ibdev, u32 port_num,
index 5c244cd321a3ac79fac18fe60f2b5072fb366066..d0fdef8d09eada33149faed94ed2633577c7479b 100644 (file)
@@ -110,6 +110,8 @@ struct irdma_cq {
        u16 cq_size;
        u16 cq_num;
        bool user_mode;
+       bool armed;
+       enum irdma_cmpl_notify last_notify;
        u32 polled_cmpls;
        u32 cq_mem_size;
        struct irdma_dma_mem kmem;
index ceca05982f613840393c4fcf81cd20e7d8352f83..0d2fa3338784eafa57fdd887be359ce39ae626aa 100644 (file)
@@ -2215,6 +2215,11 @@ static const struct ib_device_ops mlx4_ib_hw_stats_ops = {
        .get_hw_stats = mlx4_ib_get_hw_stats,
 };
 
+static const struct ib_device_ops mlx4_ib_hw_stats_ops1 = {
+       .alloc_hw_device_stats = mlx4_ib_alloc_hw_device_stats,
+       .get_hw_stats = mlx4_ib_get_hw_stats,
+};
+
 static int mlx4_ib_alloc_diag_counters(struct mlx4_ib_dev *ibdev)
 {
        struct mlx4_ib_diag_counters *diag = ibdev->diag_counters;
@@ -2227,9 +2232,16 @@ static int mlx4_ib_alloc_diag_counters(struct mlx4_ib_dev *ibdev)
                return 0;
 
        for (i = 0; i < MLX4_DIAG_COUNTERS_TYPES; i++) {
-               /* i == 1 means we are building port counters */
-               if (i && !per_port)
-                       continue;
+               /*
+                * i == 1 means we are building port counters, set a different
+                * stats ops without port stats callback.
+                */
+               if (i && !per_port) {
+                       ib_set_device_ops(&ibdev->ib_dev,
+                                         &mlx4_ib_hw_stats_ops1);
+
+                       return 0;
+               }
 
                ret = __mlx4_ib_alloc_diag_counters(ibdev, &diag[i].descs,
                                                    &diag[i].offset,
index e636e954f6bf2a30061ed41d2dcaa4a91557e9e7..4a7a56ed740b9b35e84b40e870ec70e7cf7feffd 100644 (file)
@@ -664,7 +664,6 @@ struct mlx5_ib_mr {
 
        /* User MR data */
        struct mlx5_cache_ent *cache_ent;
-       struct ib_umem *umem;
 
        /* This is zero'd when the MR is allocated */
        union {
@@ -676,7 +675,7 @@ struct mlx5_ib_mr {
                        struct list_head list;
                };
 
-               /* Used only by kernel MRs (umem == NULL) */
+               /* Used only by kernel MRs */
                struct {
                        void *descs;
                        void *descs_alloc;
@@ -697,8 +696,9 @@ struct mlx5_ib_mr {
                        int data_length;
                };
 
-               /* Used only by User MRs (umem != NULL) */
+               /* Used only by User MRs */
                struct {
+                       struct ib_umem *umem;
                        unsigned int page_shift;
                        /* Current access_flags */
                        int access_flags;
index 157d862fb86429ba3ba51cd18e98adf632f4d1cf..63e2129f1142ba5e49ffac9d356fd18d2418f44a 100644 (file)
@@ -1904,19 +1904,18 @@ err:
        return ret;
 }
 
-static void
-mlx5_free_priv_descs(struct mlx5_ib_mr *mr)
+static void mlx5_free_priv_descs(struct mlx5_ib_mr *mr)
 {
-       if (!mr->umem && mr->descs) {
-               struct ib_device *device = mr->ibmr.device;
-               int size = mr->max_descs * mr->desc_size;
-               struct mlx5_ib_dev *dev = to_mdev(device);
+       struct mlx5_ib_dev *dev = to_mdev(mr->ibmr.device);
+       int size = mr->max_descs * mr->desc_size;
 
-               dma_unmap_single(&dev->mdev->pdev->dev, mr->desc_map, size,
-                                DMA_TO_DEVICE);
-               kfree(mr->descs_alloc);
-               mr->descs = NULL;
-       }
+       if (!mr->descs)
+               return;
+
+       dma_unmap_single(&dev->mdev->pdev->dev, mr->desc_map, size,
+                        DMA_TO_DEVICE);
+       kfree(mr->descs_alloc);
+       mr->descs = NULL;
 }
 
 int mlx5_ib_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata)
@@ -1992,7 +1991,8 @@ int mlx5_ib_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata)
        if (mr->cache_ent) {
                mlx5_mr_cache_free(dev, mr);
        } else {
-               mlx5_free_priv_descs(mr);
+               if (!udata)
+                       mlx5_free_priv_descs(mr);
                kfree(mr);
        }
        return 0;
@@ -2079,7 +2079,6 @@ static struct mlx5_ib_mr *mlx5_ib_alloc_pi_mr(struct ib_pd *pd,
        if (err)
                goto err_free_in;
 
-       mr->umem = NULL;
        kfree(in);
 
        return mr;
@@ -2206,7 +2205,6 @@ static struct ib_mr *__mlx5_ib_alloc_mr(struct ib_pd *pd,
        }
 
        mr->ibmr.device = pd->device;
-       mr->umem = NULL;
 
        switch (mr_type) {
        case IB_MR_TYPE_MEM_REG:
index 975321812c870ff544c39a560ed7dd6a8d6ffc06..54b8711321c1e58753e7df39af6d404dc63ff443 100644 (file)
@@ -359,6 +359,7 @@ int rxe_qp_from_init(struct rxe_dev *rxe, struct rxe_qp *qp, struct rxe_pd *pd,
 
 err2:
        rxe_queue_cleanup(qp->sq.queue);
+       qp->sq.queue = NULL;
 err1:
        qp->pd = NULL;
        qp->rcq = NULL;
index f7e459fe68be6e04e2adc31525daf5872942b892..76e4352fe3f63e6aaf17576e655ce63628264d90 100644 (file)
@@ -19,7 +19,7 @@ void rtrs_clt_update_wc_stats(struct rtrs_clt_con *con)
        int cpu;
 
        cpu = raw_smp_processor_id();
-       s = this_cpu_ptr(stats->pcpu_stats);
+       s = get_cpu_ptr(stats->pcpu_stats);
        if (con->cpu != cpu) {
                s->cpu_migr.to++;
 
@@ -27,14 +27,16 @@ void rtrs_clt_update_wc_stats(struct rtrs_clt_con *con)
                s = per_cpu_ptr(stats->pcpu_stats, con->cpu);
                atomic_inc(&s->cpu_migr.from);
        }
+       put_cpu_ptr(stats->pcpu_stats);
 }
 
 void rtrs_clt_inc_failover_cnt(struct rtrs_clt_stats *stats)
 {
        struct rtrs_clt_stats_pcpu *s;
 
-       s = this_cpu_ptr(stats->pcpu_stats);
+       s = get_cpu_ptr(stats->pcpu_stats);
        s->rdma.failover_cnt++;
+       put_cpu_ptr(stats->pcpu_stats);
 }
 
 int rtrs_clt_stats_migration_from_cnt_to_str(struct rtrs_clt_stats *stats, char *buf)
@@ -169,9 +171,10 @@ static inline void rtrs_clt_update_rdma_stats(struct rtrs_clt_stats *stats,
 {
        struct rtrs_clt_stats_pcpu *s;
 
-       s = this_cpu_ptr(stats->pcpu_stats);
+       s = get_cpu_ptr(stats->pcpu_stats);
        s->rdma.dir[d].cnt++;
        s->rdma.dir[d].size_total += size;
+       put_cpu_ptr(stats->pcpu_stats);
 }
 
 void rtrs_clt_update_all_stats(struct rtrs_clt_io_req *req, int dir)
index 4ff5cd2a6d8de6b4c19da59a97de93ccf31ac0f9..3d17a0b3fe5118e336cb170b2735c1386fe25d2b 100644 (file)
@@ -542,6 +542,7 @@ static struct xenbus_driver xenkbd_driver = {
        .remove = xenkbd_remove,
        .resume = xenkbd_resume,
        .otherend_changed = xenkbd_backend_changed,
+       .not_essential = true,
 };
 
 static int __init xenkbd_init(void)
index 13cbeb997cc1a069302a11200be97ff0c2c1bd99..58da08cc3d01606af7932b63de41bca7e3b3bd53 100644 (file)
@@ -929,10 +929,8 @@ static int __init amd_iommu_v2_init(void)
 {
        int ret;
 
-       pr_info("AMD IOMMUv2 driver by Joerg Roedel <jroedel@suse.de>\n");
-
        if (!amd_iommu_v2_supported()) {
-               pr_info("AMD IOMMUv2 functionality not available on this system\n");
+               pr_info("AMD IOMMUv2 functionality not available on this system - This is not a bug.\n");
                /*
                 * Load anyway to provide the symbols to other modules
                 * which may use AMD IOMMUv2 optionally.
@@ -947,6 +945,8 @@ static int __init amd_iommu_v2_init(void)
 
        amd_iommu_register_ppr_notifier(&ppr_nb);
 
+       pr_info("AMD IOMMUv2 loaded and initialized\n");
+
        return 0;
 
 out:
index b39d223926a491cd26db5f9573bb1b9b1614ca22..71596fc62822c36d76a8f7ac4c9232b96e5bc6cc 100644 (file)
@@ -144,6 +144,7 @@ static int cap_audit_static(struct intel_iommu *iommu, enum cap_audit_type type)
 {
        struct dmar_drhd_unit *d;
        struct intel_iommu *i;
+       int rc = 0;
 
        rcu_read_lock();
        if (list_empty(&dmar_drhd_units))
@@ -169,11 +170,11 @@ static int cap_audit_static(struct intel_iommu *iommu, enum cap_audit_type type)
         */
        if (intel_cap_smts_sanity() &&
            !intel_cap_flts_sanity() && !intel_cap_slts_sanity())
-               return -EOPNOTSUPP;
+               rc = -EOPNOTSUPP;
 
 out:
        rcu_read_unlock();
-       return 0;
+       return rc;
 }
 
 int intel_cap_audit(enum cap_audit_type type, struct intel_iommu *iommu)
index 0bde0c8b41269ce7f6654bfb49a2115d65d3935d..b6a8f3282411fcc66a08d258f28088083417107e 100644 (file)
@@ -1339,13 +1339,11 @@ static struct page *dma_pte_clear_level(struct dmar_domain *domain, int level,
        pte = &pte[pfn_level_offset(pfn, level)];
 
        do {
-               unsigned long level_pfn;
+               unsigned long level_pfn = pfn & level_mask(level);
 
                if (!dma_pte_present(pte))
                        goto next;
 
-               level_pfn = pfn & level_mask(level);
-
                /* If range covers entire pagetable, free it */
                if (start_pfn <= level_pfn &&
                    last_pfn >= level_pfn + level_size(level) - 1) {
@@ -1366,7 +1364,7 @@ static struct page *dma_pte_clear_level(struct dmar_domain *domain, int level,
                                                       freelist);
                }
 next:
-               pfn += level_size(level);
+               pfn = level_pfn + level_size(level);
        } while (!first_pte_in_page(++pte) && pfn <= last_pfn);
 
        if (first_pte)
index 5cb260820eda6a540462b4721afa62c1dd1af4b6..7f23ad61c094fb30c3d0978269594e4b8c9b368e 100644 (file)
@@ -200,8 +200,8 @@ static inline phys_addr_t rk_dte_pt_address(u32 dte)
 #define DTE_HI_MASK2   GENMASK(7, 4)
 #define DTE_HI_SHIFT1  24 /* shift bit 8 to bit 32 */
 #define DTE_HI_SHIFT2  32 /* shift bit 4 to bit 36 */
-#define PAGE_DESC_HI_MASK1     GENMASK_ULL(39, 36)
-#define PAGE_DESC_HI_MASK2     GENMASK_ULL(35, 32)
+#define PAGE_DESC_HI_MASK1     GENMASK_ULL(35, 32)
+#define PAGE_DESC_HI_MASK2     GENMASK_ULL(39, 36)
 
 static inline phys_addr_t rk_dte_pt_address_v2(u32 dte)
 {
index 3759dc36cc8f73f664e10e8ed6c8fd2ceca1754f..2543ef65825b903438f16331de4c92f864fe125e 100644 (file)
@@ -707,7 +707,7 @@ static const struct irq_domain_ops aic_ipi_domain_ops = {
        .free = aic_ipi_free,
 };
 
-static int aic_init_smp(struct aic_irq_chip *irqc, struct device_node *node)
+static int __init aic_init_smp(struct aic_irq_chip *irqc, struct device_node *node)
 {
        struct irq_domain *ipi_domain;
        int base_ipi;
index 80906bfec845f7c2f25b06ce2adabf06164bce22..5b8d571c041dccfe80fbad1756ebfd77cf7fb7ed 100644 (file)
@@ -232,16 +232,12 @@ static int armada_370_xp_msi_alloc(struct irq_domain *domain, unsigned int virq,
        int hwirq, i;
 
        mutex_lock(&msi_used_lock);
+       hwirq = bitmap_find_free_region(msi_used, PCI_MSI_DOORBELL_NR,
+                                       order_base_2(nr_irqs));
+       mutex_unlock(&msi_used_lock);
 
-       hwirq = bitmap_find_next_zero_area(msi_used, PCI_MSI_DOORBELL_NR,
-                                          0, nr_irqs, 0);
-       if (hwirq >= PCI_MSI_DOORBELL_NR) {
-               mutex_unlock(&msi_used_lock);
+       if (hwirq < 0)
                return -ENOSPC;
-       }
-
-       bitmap_set(msi_used, hwirq, nr_irqs);
-       mutex_unlock(&msi_used_lock);
 
        for (i = 0; i < nr_irqs; i++) {
                irq_domain_set_info(domain, virq + i, hwirq + i,
@@ -250,7 +246,7 @@ static int armada_370_xp_msi_alloc(struct irq_domain *domain, unsigned int virq,
                                    NULL, NULL);
        }
 
-       return hwirq;
+       return 0;
 }
 
 static void armada_370_xp_msi_free(struct irq_domain *domain,
@@ -259,7 +255,7 @@ static void armada_370_xp_msi_free(struct irq_domain *domain,
        struct irq_data *d = irq_domain_get_irq_data(domain, virq);
 
        mutex_lock(&msi_used_lock);
-       bitmap_clear(msi_used, d->hwirq, nr_irqs);
+       bitmap_release_region(msi_used, d->hwirq, order_base_2(nr_irqs));
        mutex_unlock(&msi_used_lock);
 }
 
index f3c6855a4cefba5735ca64c73e9a76ffd82e6ab7..18b77c3e6db4ba939b79152b5df8b79316c60e86 100644 (file)
@@ -76,8 +76,8 @@ static void aspeed_scu_ic_irq_handler(struct irq_desc *desc)
                generic_handle_domain_irq(scu_ic->irq_domain,
                                          bit - scu_ic->irq_shift);
 
-               regmap_update_bits(scu_ic->scu, scu_ic->reg, mask,
-                                  BIT(bit + ASPEED_SCU_IC_STATUS_SHIFT));
+               regmap_write_bits(scu_ic->scu, scu_ic->reg, mask,
+                                 BIT(bit + ASPEED_SCU_IC_STATUS_SHIFT));
        }
 
        chained_irq_exit(chip, desc);
index d80e67a6aad2a13c21a4c38fe5eceaad44dd967f..bb6609cebdbce9bc907bbf13fb46e472001064c0 100644 (file)
@@ -238,6 +238,7 @@ static int __init bcm7120_l2_intc_probe(struct device_node *dn,
        }
 
        data->num_parent_irqs = platform_irq_count(pdev);
+       put_device(&pdev->dev);
        if (data->num_parent_irqs <= 0) {
                pr_err("invalid number of parent interrupts\n");
                ret = -ENOMEM;
index eb0882d1536661475086a132bb0db4efee9ae4e6..0cb584d9815b96b3d219a3fc493bc35981fc6413 100644 (file)
@@ -742,7 +742,7 @@ static struct its_collection *its_build_invall_cmd(struct its_node *its,
 
        its_fixup_cmd(cmd);
 
-       return NULL;
+       return desc->its_invall_cmd.col;
 }
 
 static struct its_vpe *its_build_vinvall_cmd(struct its_node *its,
index d02b05a067d950a0834a17f511af711603e1819b..ff89b36267dd4955e2a1a25184edb0daaf944e73 100644 (file)
@@ -9,6 +9,7 @@
 
 #define pr_fmt(fmt) "irq-mips-gic: " fmt
 
+#include <linux/bitfield.h>
 #include <linux/bitmap.h>
 #include <linux/clocksource.h>
 #include <linux/cpuhotplug.h>
@@ -735,8 +736,7 @@ static int __init gic_of_init(struct device_node *node,
        mips_gic_base = ioremap(gic_base, gic_len);
 
        gicconfig = read_gic_config();
-       gic_shared_intrs = gicconfig & GIC_CONFIG_NUMINTERRUPTS;
-       gic_shared_intrs >>= __ffs(GIC_CONFIG_NUMINTERRUPTS);
+       gic_shared_intrs = FIELD_GET(GIC_CONFIG_NUMINTERRUPTS, gicconfig);
        gic_shared_intrs = (gic_shared_intrs + 1) * 8;
 
        if (cpu_has_veic) {
index 63bac3f78863a71d5e6fce81526ad0715185db24..ba4759b3e26930181873282e55513c163d646f38 100644 (file)
@@ -26,7 +26,7 @@
 
 #define NVIC_ISER              0x000
 #define NVIC_ICER              0x080
-#define NVIC_IPR               0x300
+#define NVIC_IPR               0x400
 
 #define NVIC_MAX_BANKS         16
 /*
index 5111ed966947e2dcc3e2b01f02e6c5fad7259fcd..41d6e2383517bbf940210d15f2110d96fd421e91 100644 (file)
@@ -2189,6 +2189,7 @@ super_1_rdev_size_change(struct md_rdev *rdev, sector_t num_sectors)
 
                if (!num_sectors || num_sectors > max_sectors)
                        num_sectors = max_sectors;
+               rdev->sb_start = sb_start;
        }
        sb = page_address(rdev->sb_page);
        sb->data_size = cpu_to_le64(num_sectors);
@@ -6270,7 +6271,8 @@ static void __md_stop(struct mddev *mddev)
        spin_lock(&mddev->lock);
        mddev->pers = NULL;
        spin_unlock(&mddev->lock);
-       pers->free(mddev, mddev->private);
+       if (mddev->private)
+               pers->free(mddev, mddev->private);
        mddev->private = NULL;
        if (pers->sync_request && mddev->to_remove == NULL)
                mddev->to_remove = &md_redundancy_group;
index 79fa36de8a04a936249d7383751bc853cefd8103..cd9cb354dc2c7f2eb91a41cd9b800a25b71253ba 100644 (file)
@@ -1199,6 +1199,7 @@ void cec_received_msg_ts(struct cec_adapter *adap,
                        if (abort)
                                dst->rx_status |= CEC_RX_STATUS_FEATURE_ABORT;
                        msg->flags = dst->flags;
+                       msg->sequence = dst->sequence;
                        /* Remove it from the wait_queue */
                        list_del_init(&data->list);
 
index 1094575abf956c17662179f20e5c043c3da5c9a4..90acafd9a290b8ad819a85d8bb35228e648a58fd 100644 (file)
@@ -241,6 +241,7 @@ static void *vb2_dma_sg_get_userptr(struct vb2_buffer *vb, struct device *dev,
        buf->offset = vaddr & ~PAGE_MASK;
        buf->size = size;
        buf->dma_sgt = &buf->sg_table;
+       buf->vb = vb;
        vec = vb2_create_framevec(vaddr, size);
        if (IS_ERR(vec))
                goto userptr_fail_pfnvec;
@@ -642,6 +643,7 @@ static void *vb2_dma_sg_attach_dmabuf(struct vb2_buffer *vb, struct device *dev,
        buf->dma_dir = vb->vb2_queue->dma_dir;
        buf->size = size;
        buf->db_attach = dba;
+       buf->vb = vb;
 
        return buf;
 }
index 822ce3021fde4dc5e44cba4760c7f7c8cfb964d9..48909faeced4663cd169276abd33eb5df1ac88cd 100644 (file)
@@ -7,9 +7,9 @@
 #include <linux/gpio/consumer.h>
 #include <linux/i2c.h>
 #include <linux/module.h>
-#include <linux/of_graph.h>
 #include <linux/pm_runtime.h>
 #include <linux/pm.h>
+#include <linux/property.h>
 #include <linux/regulator/consumer.h>
 #include <media/v4l2-ctrls.h>
 #include <media/v4l2-device.h>
@@ -2176,7 +2176,7 @@ static struct i2c_driver hi846_i2c_driver = {
        .driver = {
                .name = "hi846",
                .pm = &hi846_pm_ops,
-               .of_match_table = of_match_ptr(hi846_of_match),
+               .of_match_table = hi846_of_match,
        },
        .probe_new = hi846_probe,
        .remove = hi846_remove,
index 8176769a89fa44b9cb7cd57446e3f4701a88979e..0f3d6b5667b07e6a08a332cafa18333295e365fd 100644 (file)
@@ -751,10 +751,6 @@ static int put_v4l2_ext_controls32(struct v4l2_ext_controls *p64,
 /*
  * x86 is the only compat architecture with different struct alignment
  * between 32-bit and 64-bit tasks.
- *
- * On all other architectures, v4l2_event32 and v4l2_event32_time32 are
- * the same as v4l2_event and v4l2_event_time32, so we can use the native
- * handlers, converting v4l2_event to v4l2_event_time32 if necessary.
  */
 struct v4l2_event32 {
        __u32                           type;
@@ -772,21 +768,6 @@ struct v4l2_event32 {
        __u32                           reserved[8];
 };
 
-#ifdef CONFIG_COMPAT_32BIT_TIME
-struct v4l2_event32_time32 {
-       __u32                           type;
-       union {
-               compat_s64              value64;
-               __u8                    data[64];
-       } u;
-       __u32                           pending;
-       __u32                           sequence;
-       struct old_timespec32           timestamp;
-       __u32                           id;
-       __u32                           reserved[8];
-};
-#endif
-
 static int put_v4l2_event32(struct v4l2_event *p64,
                            struct v4l2_event32 __user *p32)
 {
@@ -802,7 +783,22 @@ static int put_v4l2_event32(struct v4l2_event *p64,
        return 0;
 }
 
+#endif
+
 #ifdef CONFIG_COMPAT_32BIT_TIME
+struct v4l2_event32_time32 {
+       __u32                           type;
+       union {
+               compat_s64              value64;
+               __u8                    data[64];
+       } u;
+       __u32                           pending;
+       __u32                           sequence;
+       struct old_timespec32           timestamp;
+       __u32                           id;
+       __u32                           reserved[8];
+};
+
 static int put_v4l2_event32_time32(struct v4l2_event *p64,
                                   struct v4l2_event32_time32 __user *p32)
 {
@@ -818,7 +814,6 @@ static int put_v4l2_event32_time32(struct v4l2_event *p64,
        return 0;
 }
 #endif
-#endif
 
 struct v4l2_edid32 {
        __u32 pad;
@@ -880,9 +875,7 @@ static int put_v4l2_edid32(struct v4l2_edid *p64,
 #define VIDIOC_QUERYBUF32_TIME32       _IOWR('V',  9, struct v4l2_buffer32_time32)
 #define VIDIOC_QBUF32_TIME32           _IOWR('V', 15, struct v4l2_buffer32_time32)
 #define VIDIOC_DQBUF32_TIME32          _IOWR('V', 17, struct v4l2_buffer32_time32)
-#ifdef CONFIG_X86_64
 #define        VIDIOC_DQEVENT32_TIME32         _IOR ('V', 89, struct v4l2_event32_time32)
-#endif
 #define VIDIOC_PREPARE_BUF32_TIME32    _IOWR('V', 93, struct v4l2_buffer32_time32)
 #endif
 
@@ -936,10 +929,10 @@ unsigned int v4l2_compat_translate_cmd(unsigned int cmd)
 #ifdef CONFIG_X86_64
        case VIDIOC_DQEVENT32:
                return VIDIOC_DQEVENT;
+#endif
 #ifdef CONFIG_COMPAT_32BIT_TIME
        case VIDIOC_DQEVENT32_TIME32:
                return VIDIOC_DQEVENT;
-#endif
 #endif
        }
        return cmd;
@@ -1032,10 +1025,10 @@ int v4l2_compat_put_user(void __user *arg, void *parg, unsigned int cmd)
 #ifdef CONFIG_X86_64
        case VIDIOC_DQEVENT32:
                return put_v4l2_event32(parg, arg);
+#endif
 #ifdef CONFIG_COMPAT_32BIT_TIME
        case VIDIOC_DQEVENT32_TIME32:
                return put_v4l2_event32_time32(parg, arg);
-#endif
 #endif
        }
        return 0;
index b883dcc0bbfa4611ec75fc5d0743621f46845a62..e201e5976f34f92cffb950691e2bf925213ff90f 100644 (file)
@@ -241,7 +241,7 @@ static void mtk_smi_larb_config_port_gen2_general(struct device *dev)
 {
        struct mtk_smi_larb *larb = dev_get_drvdata(dev);
        u32 reg, flags_general = larb->larb_gen->flags_general;
-       const u8 *larbostd = larb->larb_gen->ostd[larb->larbid];
+       const u8 *larbostd = larb->larb_gen->ostd ? larb->larb_gen->ostd[larb->larbid] : NULL;
        int i;
 
        if (BIT(larb->larbid) & larb->larb_gen->larb_direct_to_common_mask)
index 8c72eb590f79dc8bd6bc92cdc7329c462d40f339..6ac509c1821c91b1bc3462beeff057d1f5c38111 100644 (file)
@@ -1803,8 +1803,6 @@ static int rtsx_pci_runtime_suspend(struct device *device)
        mutex_lock(&pcr->pcr_mutex);
        rtsx_pci_power_off(pcr, HOST_ENTER_S3);
 
-       free_irq(pcr->irq, (void *)pcr);
-
        mutex_unlock(&pcr->pcr_mutex);
 
        pcr->is_runtime_suspended = true;
@@ -1825,8 +1823,6 @@ static int rtsx_pci_runtime_resume(struct device *device)
        mutex_lock(&pcr->pcr_mutex);
 
        rtsx_pci_write_register(pcr, HOST_SLEEP_STATE, 0x03, 0x00);
-       rtsx_pci_acquire_irq(pcr);
-       synchronize_irq(pcr->irq);
 
        if (pcr->ops->fetch_vendor_settings)
                pcr->ops->fetch_vendor_settings(pcr);
index 632325474233a11f0916047ed42338a9eb342d76..b38978a3b3ffa4f9ab5ae8b86d366de9880fe21d 100644 (file)
@@ -376,7 +376,6 @@ MODULE_DEVICE_TABLE(spi, at25_spi_ids);
 static int at25_probe(struct spi_device *spi)
 {
        struct at25_data        *at25 = NULL;
-       struct spi_eeprom       chip;
        int                     err;
        int                     sr;
        u8 id[FM25_ID_LEN];
@@ -389,15 +388,18 @@ static int at25_probe(struct spi_device *spi)
        if (match && !strcmp(match->compatible, "cypress,fm25"))
                is_fram = 1;
 
+       at25 = devm_kzalloc(&spi->dev, sizeof(struct at25_data), GFP_KERNEL);
+       if (!at25)
+               return -ENOMEM;
+
        /* Chip description */
-       if (!spi->dev.platform_data) {
-               if (!is_fram) {
-                       err = at25_fw_to_chip(&spi->dev, &chip);
-                       if (err)
-                               return err;
-               }
-       } else
-               chip = *(struct spi_eeprom *)spi->dev.platform_data;
+       if (spi->dev.platform_data) {
+               memcpy(&at25->chip, spi->dev.platform_data, sizeof(at25->chip));
+       } else if (!is_fram) {
+               err = at25_fw_to_chip(&spi->dev, &at25->chip);
+               if (err)
+                       return err;
+       }
 
        /* Ping the chip ... the status register is pretty portable,
         * unlike probing manufacturer IDs.  We do expect that system
@@ -409,12 +411,7 @@ static int at25_probe(struct spi_device *spi)
                return -ENXIO;
        }
 
-       at25 = devm_kzalloc(&spi->dev, sizeof(struct at25_data), GFP_KERNEL);
-       if (!at25)
-               return -ENOMEM;
-
        mutex_init(&at25->lock);
-       at25->chip = chip;
        at25->spi = spi;
        spi_set_drvdata(spi, at25);
 
@@ -431,7 +428,7 @@ static int at25_probe(struct spi_device *spi)
                        dev_err(&spi->dev, "Error: unsupported size (id %02x)\n", id[7]);
                        return -ENODEV;
                }
-               chip.byte_len = int_pow(2, id[7] - 0x21 + 4) * 1024;
+               at25->chip.byte_len = int_pow(2, id[7] - 0x21 + 4) * 1024;
 
                if (at25->chip.byte_len > 64 * 1024)
                        at25->chip.flags |= EE_ADDR3;
@@ -464,7 +461,7 @@ static int at25_probe(struct spi_device *spi)
        at25->nvmem_config.type = is_fram ? NVMEM_TYPE_FRAM : NVMEM_TYPE_EEPROM;
        at25->nvmem_config.name = dev_name(&spi->dev);
        at25->nvmem_config.dev = &spi->dev;
-       at25->nvmem_config.read_only = chip.flags & EE_READONLY;
+       at25->nvmem_config.read_only = at25->chip.flags & EE_READONLY;
        at25->nvmem_config.root_only = true;
        at25->nvmem_config.owner = THIS_MODULE;
        at25->nvmem_config.compat = true;
@@ -474,17 +471,18 @@ static int at25_probe(struct spi_device *spi)
        at25->nvmem_config.priv = at25;
        at25->nvmem_config.stride = 1;
        at25->nvmem_config.word_size = 1;
-       at25->nvmem_config.size = chip.byte_len;
+       at25->nvmem_config.size = at25->chip.byte_len;
 
        at25->nvmem = devm_nvmem_register(&spi->dev, &at25->nvmem_config);
        if (IS_ERR(at25->nvmem))
                return PTR_ERR(at25->nvmem);
 
        dev_info(&spi->dev, "%d %s %s %s%s, pagesize %u\n",
-                (chip.byte_len < 1024) ? chip.byte_len : (chip.byte_len / 1024),
-                (chip.byte_len < 1024) ? "Byte" : "KByte",
+                (at25->chip.byte_len < 1024) ?
+                       at25->chip.byte_len : (at25->chip.byte_len / 1024),
+                (at25->chip.byte_len < 1024) ? "Byte" : "KByte",
                 at25->chip.name, is_fram ? "fram" : "eeprom",
-                (chip.flags & EE_READONLY) ? " (readonly)" : "",
+                (at25->chip.flags & EE_READONLY) ? " (readonly)" : "",
                 at25->chip.page_size);
        return 0;
 }
index 39aca775371993f079410d62fd19620b86736007..4ccbf43e6bfa942bc40075760841dc1ddef7a447 100644 (file)
@@ -719,16 +719,18 @@ static int fastrpc_get_meta_size(struct fastrpc_invoke_ctx *ctx)
 static u64 fastrpc_get_payload_size(struct fastrpc_invoke_ctx *ctx, int metalen)
 {
        u64 size = 0;
-       int i;
+       int oix;
 
        size = ALIGN(metalen, FASTRPC_ALIGN);
-       for (i = 0; i < ctx->nscalars; i++) {
+       for (oix = 0; oix < ctx->nbufs; oix++) {
+               int i = ctx->olaps[oix].raix;
+
                if (ctx->args[i].fd == 0 || ctx->args[i].fd == -1) {
 
-                       if (ctx->olaps[i].offset == 0)
+                       if (ctx->olaps[oix].offset == 0)
                                size = ALIGN(size, FASTRPC_ALIGN);
 
-                       size += (ctx->olaps[i].mend - ctx->olaps[i].mstart);
+                       size += (ctx->olaps[oix].mend - ctx->olaps[oix].mstart);
                }
        }
 
index f4c8e1a61f5373c628c32a6a6eecf305527da2d7..b431cdd27353b7ff4af84a7d9c72419945004de6 100644 (file)
@@ -1514,6 +1514,12 @@ static int mmc_spi_remove(struct spi_device *spi)
        return 0;
 }
 
+static const struct spi_device_id mmc_spi_dev_ids[] = {
+       { "mmc-spi-slot"},
+       { },
+};
+MODULE_DEVICE_TABLE(spi, mmc_spi_dev_ids);
+
 static const struct of_device_id mmc_spi_of_match_table[] = {
        { .compatible = "mmc-spi-slot", },
        {},
@@ -1525,6 +1531,7 @@ static struct spi_driver mmc_spi_driver = {
                .name =         "mmc_spi",
                .of_match_table = mmc_spi_of_match_table,
        },
+       .id_table =     mmc_spi_dev_ids,
        .probe =        mmc_spi_probe,
        .remove =       mmc_spi_remove,
 };
index 943940b44e835e13635ea9318220d776f60a2add..632775217d35c5bd1242be1703d28d78248f4bfb 100644 (file)
@@ -2291,8 +2291,10 @@ static int msdc_execute_hs400_tuning(struct mmc_host *mmc, struct mmc_card *card
                        sdr_set_field(host->base + PAD_DS_TUNE,
                                      PAD_DS_TUNE_DLY1, i);
                ret = mmc_get_ext_csd(card, &ext_csd);
-               if (!ret)
+               if (!ret) {
                        result_dly1 |= (1 << i);
+                       kfree(ext_csd);
+               }
        }
        host->hs400_tuning = false;
 
index a4407f391f66a6f2f05bebad1334748895c9fe77..f5b2684ad8058b5f40ec6393f1c487de9a6f57f7 100644 (file)
@@ -673,7 +673,7 @@ static int renesas_sdhi_execute_tuning(struct mmc_host *mmc, u32 opcode)
 
        /* Issue CMD19 twice for each tap */
        for (i = 0; i < 2 * priv->tap_num; i++) {
-               int cmd_error;
+               int cmd_error = 0;
 
                /* Set sampling clock position */
                sd_scc_write32(host, priv, SH_MOBILE_SDHI_SCC_TAPSET, i % priv->tap_num);
index afaf33707d46a926717cec6e7044134442a21c89..764ee1b761d9bd37e9e73890df2b74560de5ad98 100644 (file)
@@ -310,7 +310,6 @@ static struct esdhc_soc_data usdhc_imx8qxp_data = {
        .flags = ESDHC_FLAG_USDHC | ESDHC_FLAG_STD_TUNING
                        | ESDHC_FLAG_HAVE_CAP1 | ESDHC_FLAG_HS200
                        | ESDHC_FLAG_HS400 | ESDHC_FLAG_HS400_ES
-                       | ESDHC_FLAG_CQHCI
                        | ESDHC_FLAG_STATE_LOST_IN_LPMODE
                        | ESDHC_FLAG_CLK_RATE_LOST_IN_PM_RUNTIME,
 };
@@ -319,7 +318,6 @@ static struct esdhc_soc_data usdhc_imx8mm_data = {
        .flags = ESDHC_FLAG_USDHC | ESDHC_FLAG_STD_TUNING
                        | ESDHC_FLAG_HAVE_CAP1 | ESDHC_FLAG_HS200
                        | ESDHC_FLAG_HS400 | ESDHC_FLAG_HS400_ES
-                       | ESDHC_FLAG_CQHCI
                        | ESDHC_FLAG_STATE_LOST_IN_LPMODE,
 };
 
index 269c865694024c8f2164574349352a299eacf5d2..07c6da1f2f0fe7b38414ab0afa273c3f912ec20e 100644 (file)
@@ -771,7 +771,19 @@ static void sdhci_adma_table_pre(struct sdhci_host *host,
                        len -= offset;
                }
 
-               BUG_ON(len > 65536);
+               /*
+                * The block layer forces a minimum segment size of PAGE_SIZE,
+                * so 'len' can be too big here if PAGE_SIZE >= 64KiB. Write
+                * multiple descriptors, noting that the ADMA table is sized
+                * for 4KiB chunks anyway, so it will be big enough.
+                */
+               while (len > host->max_adma) {
+                       int n = 32 * 1024; /* 32KiB*/
+
+                       __sdhci_adma_write_desc(host, &desc, addr, n, ADMA2_TRAN_VALID);
+                       addr += n;
+                       len -= n;
+               }
 
                /* tran, valid */
                if (len)
@@ -3968,6 +3980,7 @@ struct sdhci_host *sdhci_alloc_host(struct device *dev,
         * descriptor for each segment, plus 1 for a nop end descriptor.
         */
        host->adma_table_cnt = SDHCI_MAX_SEGS * 2 + 1;
+       host->max_adma = 65536;
 
        host->max_timeout_count = 0xE;
 
@@ -4633,10 +4646,12 @@ int sdhci_setup_host(struct sdhci_host *host)
         * be larger than 64 KiB though.
         */
        if (host->flags & SDHCI_USE_ADMA) {
-               if (host->quirks & SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC)
+               if (host->quirks & SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC) {
+                       host->max_adma = 65532; /* 32-bit alignment */
                        mmc->max_seg_size = 65535;
-               else
+               } else {
                        mmc->max_seg_size = 65536;
+               }
        } else {
                mmc->max_seg_size = mmc->max_req_size;
        }
index bb883553d3b461982ffca66d7a7b23aaa7c70729..d7929d72573006bd388511f66fdde315af449fd2 100644 (file)
@@ -340,7 +340,8 @@ struct sdhci_adma2_64_desc {
 
 /*
  * Maximum segments assuming a 512KiB maximum requisition size and a minimum
- * 4KiB page size.
+ * 4KiB page size. Note this also allows enough for multiple descriptors in
+ * case of PAGE_SIZE >= 64KiB.
  */
 #define SDHCI_MAX_SEGS         128
 
@@ -543,6 +544,7 @@ struct sdhci_host {
        unsigned int blocks;    /* remaining PIO blocks */
 
        int sg_count;           /* Mapped sg entries */
+       int max_adma;           /* Max. length in ADMA descriptor */
 
        void *adma_table;       /* ADMA descriptor table */
        void *align_buffer;     /* Bounce buffer */
index 9802e265fca80a56cd7be99488713dc22dc55246..2b317ed6c103f16f823d2fdbbfc90dc5f5fd1834 100644 (file)
@@ -96,6 +96,13 @@ struct dataflash {
        struct mtd_info         mtd;
 };
 
+static const struct spi_device_id dataflash_dev_ids[] = {
+       { "at45" },
+       { "dataflash" },
+       { },
+};
+MODULE_DEVICE_TABLE(spi, dataflash_dev_ids);
+
 #ifdef CONFIG_OF
 static const struct of_device_id dataflash_dt_ids[] = {
        { .compatible = "atmel,at45", },
@@ -927,6 +934,7 @@ static struct spi_driver dataflash_driver = {
                .name           = "mtd_dataflash",
                .of_match_table = of_match_ptr(dataflash_dt_ids),
        },
+       .id_table = dataflash_dev_ids,
 
        .probe          = dataflash_probe,
        .remove         = dataflash_remove,
index 67b7cb67c0307b5cbe4288860b1f6a705d5530aa..0a45d3c6c15ba1fca6b042fa0249c0d21375c166 100644 (file)
@@ -26,7 +26,7 @@ config MTD_NAND_DENALI_PCI
 config MTD_NAND_DENALI_DT
        tristate "Denali NAND controller as a DT device"
        select MTD_NAND_DENALI
-       depends on HAS_DMA && HAVE_CLK && OF
+       depends on HAS_DMA && HAVE_CLK && OF && HAS_IOMEM
        help
          Enable the driver for NAND flash on platforms using a Denali NAND
          controller as a DT device.
index 658f0cbe7ce8ca3b545c28cba577258ed54f4e52..6b2bda815b880c969c239d9dc6336d2392dcbb42 100644 (file)
@@ -15,6 +15,7 @@
 
 #include <linux/clk.h>
 #include <linux/completion.h>
+#include <linux/delay.h>
 #include <linux/dmaengine.h>
 #include <linux/dma-direction.h>
 #include <linux/dma-mapping.h>
 
 #define FSMC_BUSY_WAIT_TIMEOUT (1 * HZ)
 
+/*
+ * According to SPEAr300 Reference Manual (RM0082)
+ *  TOUDEL = 7ns (Output delay from the flip-flops to the board)
+ *  TINDEL = 5ns (Input delay from the board to the flipflop)
+ */
+#define TOUTDEL        7000
+#define TINDEL 5000
+
 struct fsmc_nand_timings {
        u8 tclr;
        u8 tar;
@@ -277,7 +286,7 @@ static int fsmc_calc_timings(struct fsmc_nand_data *host,
 {
        unsigned long hclk = clk_get_rate(host->clk);
        unsigned long hclkn = NSEC_PER_SEC / hclk;
-       u32 thiz, thold, twait, tset;
+       u32 thiz, thold, twait, tset, twait_min;
 
        if (sdrt->tRC_min < 30000)
                return -EOPNOTSUPP;
@@ -309,13 +318,6 @@ static int fsmc_calc_timings(struct fsmc_nand_data *host,
        else if (tims->thold > FSMC_THOLD_MASK)
                tims->thold = FSMC_THOLD_MASK;
 
-       twait = max(sdrt->tRP_min, sdrt->tWP_min);
-       tims->twait = DIV_ROUND_UP(twait / 1000, hclkn) - 1;
-       if (tims->twait == 0)
-               tims->twait = 1;
-       else if (tims->twait > FSMC_TWAIT_MASK)
-               tims->twait = FSMC_TWAIT_MASK;
-
        tset = max(sdrt->tCS_min - sdrt->tWP_min,
                   sdrt->tCEA_max - sdrt->tREA_max);
        tims->tset = DIV_ROUND_UP(tset / 1000, hclkn) - 1;
@@ -324,6 +326,21 @@ static int fsmc_calc_timings(struct fsmc_nand_data *host,
        else if (tims->tset > FSMC_TSET_MASK)
                tims->tset = FSMC_TSET_MASK;
 
+       /*
+        * According to SPEAr300 Reference Manual (RM0082) which gives more
+        * information related to FSMSC timings than the SPEAr600 one (RM0305),
+        *   twait >= tCEA - (tset * TCLK) + TOUTDEL + TINDEL
+        */
+       twait_min = sdrt->tCEA_max - ((tims->tset + 1) * hclkn * 1000)
+                   + TOUTDEL + TINDEL;
+       twait = max3(sdrt->tRP_min, sdrt->tWP_min, twait_min);
+
+       tims->twait = DIV_ROUND_UP(twait / 1000, hclkn) - 1;
+       if (tims->twait == 0)
+               tims->twait = 1;
+       else if (tims->twait > FSMC_TWAIT_MASK)
+               tims->twait = FSMC_TWAIT_MASK;
+
        return 0;
 }
 
@@ -664,6 +681,9 @@ static int fsmc_exec_op(struct nand_chip *chip, const struct nand_operation *op,
                                                instr->ctx.waitrdy.timeout_ms);
                        break;
                }
+
+               if (instr->delay_ns)
+                       ndelay(instr->delay_ns);
        }
 
        return ret;
index 3d6c6e88052072751886f8391ae1561f960e8b72..a130320de4128e79d3aa15bcaf23b5a318993a16 100644 (file)
@@ -926,7 +926,7 @@ int nand_choose_best_sdr_timings(struct nand_chip *chip,
                                 struct nand_sdr_timings *spec_timings)
 {
        const struct nand_controller_ops *ops = chip->controller->ops;
-       int best_mode = 0, mode, ret;
+       int best_mode = 0, mode, ret = -EOPNOTSUPP;
 
        iface->type = NAND_SDR_IFACE;
 
@@ -977,7 +977,7 @@ int nand_choose_best_nvddr_timings(struct nand_chip *chip,
                                   struct nand_nvddr_timings *spec_timings)
 {
        const struct nand_controller_ops *ops = chip->controller->ops;
-       int best_mode = 0, mode, ret;
+       int best_mode = 0, mode, ret = -EOPNOTSUPP;
 
        iface->type = NAND_NVDDR_IFACE;
 
@@ -1837,7 +1837,7 @@ int nand_erase_op(struct nand_chip *chip, unsigned int eraseblock)
                        NAND_OP_CMD(NAND_CMD_ERASE1, 0),
                        NAND_OP_ADDR(2, addrs, 0),
                        NAND_OP_CMD(NAND_CMD_ERASE2,
-                                   NAND_COMMON_TIMING_MS(conf, tWB_max)),
+                                   NAND_COMMON_TIMING_NS(conf, tWB_max)),
                        NAND_OP_WAIT_RDY(NAND_COMMON_TIMING_MS(conf, tBERS_max),
                                         0),
                };
index 10506a4b66ef85b9d5202e668729e1a9e5ba1bee..6cccc3dc00bcf3b30a728aa06d5242646bd9b6f2 100644 (file)
@@ -567,9 +567,7 @@ config XEN_NETDEV_BACKEND
 config VMXNET3
        tristate "VMware VMXNET3 ethernet driver"
        depends on PCI && INET
-       depends on !(PAGE_SIZE_64KB || ARM64_64K_PAGES || \
-                    IA64_PAGE_SIZE_64KB || PARISC_PAGE_SIZE_64KB || \
-                    PPC_64K_PAGES)
+       depends on PAGE_SIZE_LESS_THAN_64KB
        help
          This driver supports VMware's vmxnet3 virtual ethernet NIC.
          To compile this driver as a module, choose M here: the
index 47a04c330885ef33a06e117b3f10e8393f4fa5ff..b732ee9a50ef948bd4fe30a32f9870ceb2ba0517 100644 (file)
@@ -3286,7 +3286,7 @@ static void __exit amt_fini(void)
 {
        rtnl_link_unregister(&amt_link_ops);
        unregister_netdevice_notifier(&amt_notifier_block);
-       cancel_delayed_work(&source_gc_wq);
+       cancel_delayed_work_sync(&source_gc_wq);
        __amt_source_gc_work();
        destroy_workqueue(amt_wq);
 }
index 2ec8e015c7b3364ae8ab07d0750c4ded29d5815a..533e476988f2492bd56dd97e1e16f6cb80737086 100644 (file)
@@ -1501,14 +1501,14 @@ void bond_alb_monitor(struct work_struct *work)
        struct slave *slave;
 
        if (!bond_has_slaves(bond)) {
-               bond_info->tx_rebalance_counter = 0;
+               atomic_set(&bond_info->tx_rebalance_counter, 0);
                bond_info->lp_counter = 0;
                goto re_arm;
        }
 
        rcu_read_lock();
 
-       bond_info->tx_rebalance_counter++;
+       atomic_inc(&bond_info->tx_rebalance_counter);
        bond_info->lp_counter++;
 
        /* send learning packets */
@@ -1530,7 +1530,7 @@ void bond_alb_monitor(struct work_struct *work)
        }
 
        /* rebalance tx traffic */
-       if (bond_info->tx_rebalance_counter >= BOND_TLB_REBALANCE_TICKS) {
+       if (atomic_read(&bond_info->tx_rebalance_counter) >= BOND_TLB_REBALANCE_TICKS) {
                bond_for_each_slave_rcu(bond, slave, iter) {
                        tlb_clear_slave(bond, slave, 1);
                        if (slave == rcu_access_pointer(bond->curr_active_slave)) {
@@ -1540,7 +1540,7 @@ void bond_alb_monitor(struct work_struct *work)
                                bond_info->unbalanced_load = 0;
                        }
                }
-               bond_info->tx_rebalance_counter = 0;
+               atomic_set(&bond_info->tx_rebalance_counter, 0);
        }
 
        if (bond_info->rlb_enabled) {
@@ -1610,7 +1610,8 @@ int bond_alb_init_slave(struct bonding *bond, struct slave *slave)
        tlb_init_slave(slave);
 
        /* order a rebalance ASAP */
-       bond->alb_info.tx_rebalance_counter = BOND_TLB_REBALANCE_TICKS;
+       atomic_set(&bond->alb_info.tx_rebalance_counter,
+                  BOND_TLB_REBALANCE_TICKS);
 
        if (bond->alb_info.rlb_enabled)
                bond->alb_info.rlb_rebalance = 1;
@@ -1647,7 +1648,8 @@ void bond_alb_handle_link_change(struct bonding *bond, struct slave *slave, char
                        rlb_clear_slave(bond, slave);
        } else if (link == BOND_LINK_UP) {
                /* order a rebalance ASAP */
-               bond_info->tx_rebalance_counter = BOND_TLB_REBALANCE_TICKS;
+               atomic_set(&bond_info->tx_rebalance_counter,
+                          BOND_TLB_REBALANCE_TICKS);
                if (bond->alb_info.rlb_enabled) {
                        bond->alb_info.rlb_rebalance = 1;
                        /* If the updelay module parameter is smaller than the
index 74d9899fc904c099d51f23c591ec71b2acb6c37e..eb74cdf26b88c55e7df9fee2cc1d212c5b2a1e6b 100644 (file)
@@ -248,6 +248,9 @@ MODULE_DESCRIPTION("CAN driver for Kvaser CAN/PCIe devices");
 #define KVASER_PCIEFD_SPACK_EWLR BIT(23)
 #define KVASER_PCIEFD_SPACK_EPLR BIT(24)
 
+/* Kvaser KCAN_EPACK second word */
+#define KVASER_PCIEFD_EPACK_DIR_TX BIT(0)
+
 struct kvaser_pciefd;
 
 struct kvaser_pciefd_can {
@@ -1285,7 +1288,10 @@ static int kvaser_pciefd_rx_error_frame(struct kvaser_pciefd_can *can,
 
        can->err_rep_cnt++;
        can->can.can_stats.bus_error++;
-       stats->rx_errors++;
+       if (p->header[1] & KVASER_PCIEFD_EPACK_DIR_TX)
+               stats->tx_errors++;
+       else
+               stats->rx_errors++;
 
        can->bec.txerr = bec.txerr;
        can->bec.rxerr = bec.rxerr;
index 2470c47b2e315b46cc55f94dd55520096f88b23e..c2a8421e7845c49ffdf16727dd168cdbf4df8c55 100644 (file)
@@ -204,16 +204,16 @@ enum m_can_reg {
 
 /* Interrupts for version 3.0.x */
 #define IR_ERR_LEC_30X (IR_STE | IR_FOE | IR_ACKE | IR_BE | IR_CRCE)
-#define IR_ERR_BUS_30X (IR_ERR_LEC_30X | IR_WDI | IR_ELO | IR_BEU | \
-                        IR_BEC | IR_TOO | IR_MRAF | IR_TSW | IR_TEFL | \
-                        IR_RF1L | IR_RF0L)
+#define IR_ERR_BUS_30X (IR_ERR_LEC_30X | IR_WDI | IR_BEU | IR_BEC | \
+                        IR_TOO | IR_MRAF | IR_TSW | IR_TEFL | IR_RF1L | \
+                        IR_RF0L)
 #define IR_ERR_ALL_30X (IR_ERR_STATE | IR_ERR_BUS_30X)
 
 /* Interrupts for version >= 3.1.x */
 #define IR_ERR_LEC_31X (IR_PED | IR_PEA)
-#define IR_ERR_BUS_31X      (IR_ERR_LEC_31X | IR_WDI | IR_ELO | IR_BEU | \
-                        IR_BEC | IR_TOO | IR_MRAF | IR_TSW | IR_TEFL | \
-                        IR_RF1L | IR_RF0L)
+#define IR_ERR_BUS_31X      (IR_ERR_LEC_31X | IR_WDI | IR_BEU | IR_BEC | \
+                        IR_TOO | IR_MRAF | IR_TSW | IR_TEFL | IR_RF1L | \
+                        IR_RF0L)
 #define IR_ERR_ALL_31X (IR_ERR_STATE | IR_ERR_BUS_31X)
 
 /* Interrupt Line Select (ILS) */
@@ -517,7 +517,7 @@ static int m_can_read_fifo(struct net_device *dev, u32 rxfs)
                err = m_can_fifo_read(cdev, fgi, M_CAN_FIFO_DATA,
                                      cf->data, DIV_ROUND_UP(cf->len, 4));
                if (err)
-                       goto out_fail;
+                       goto out_free_skb;
        }
 
        /* acknowledge rx fifo 0 */
@@ -532,6 +532,8 @@ static int m_can_read_fifo(struct net_device *dev, u32 rxfs)
 
        return 0;
 
+out_free_skb:
+       kfree_skb(skb);
 out_fail:
        netdev_err(dev, "FIFO read returned %d\n", err);
        return err;
@@ -810,8 +812,6 @@ static void m_can_handle_other_err(struct net_device *dev, u32 irqstatus)
 {
        if (irqstatus & IR_WDI)
                netdev_err(dev, "Message RAM Watchdog event due to missing READY\n");
-       if (irqstatus & IR_ELO)
-               netdev_err(dev, "Error Logging Overflow\n");
        if (irqstatus & IR_BEU)
                netdev_err(dev, "Bit Error Uncorrected\n");
        if (irqstatus & IR_BEC)
@@ -1494,20 +1494,32 @@ static int m_can_dev_setup(struct m_can_classdev *cdev)
        case 30:
                /* CAN_CTRLMODE_FD_NON_ISO is fixed with M_CAN IP v3.0.x */
                can_set_static_ctrlmode(dev, CAN_CTRLMODE_FD_NON_ISO);
-               cdev->can.bittiming_const = &m_can_bittiming_const_30X;
-               cdev->can.data_bittiming_const = &m_can_data_bittiming_const_30X;
+               cdev->can.bittiming_const = cdev->bit_timing ?
+                       cdev->bit_timing : &m_can_bittiming_const_30X;
+
+               cdev->can.data_bittiming_const = cdev->data_timing ?
+                       cdev->data_timing :
+                       &m_can_data_bittiming_const_30X;
                break;
        case 31:
                /* CAN_CTRLMODE_FD_NON_ISO is fixed with M_CAN IP v3.1.x */
                can_set_static_ctrlmode(dev, CAN_CTRLMODE_FD_NON_ISO);
-               cdev->can.bittiming_const = &m_can_bittiming_const_31X;
-               cdev->can.data_bittiming_const = &m_can_data_bittiming_const_31X;
+               cdev->can.bittiming_const = cdev->bit_timing ?
+                       cdev->bit_timing : &m_can_bittiming_const_31X;
+
+               cdev->can.data_bittiming_const = cdev->data_timing ?
+                       cdev->data_timing :
+                       &m_can_data_bittiming_const_31X;
                break;
        case 32:
        case 33:
                /* Support both MCAN version v3.2.x and v3.3.0 */
-               cdev->can.bittiming_const = &m_can_bittiming_const_31X;
-               cdev->can.data_bittiming_const = &m_can_data_bittiming_const_31X;
+               cdev->can.bittiming_const = cdev->bit_timing ?
+                       cdev->bit_timing : &m_can_bittiming_const_31X;
+
+               cdev->can.data_bittiming_const = cdev->data_timing ?
+                       cdev->data_timing :
+                       &m_can_data_bittiming_const_31X;
 
                cdev->can.ctrlmode_supported |=
                        (m_can_niso_supported(cdev) ?
index d18b515e6ccc76c33660ce9fd0bed098ebf18559..2c5d40997168616ca1ab85d1235560c654f2dfe7 100644 (file)
@@ -85,6 +85,9 @@ struct m_can_classdev {
        struct sk_buff *tx_skb;
        struct phy *transceiver;
 
+       const struct can_bittiming_const *bit_timing;
+       const struct can_bittiming_const *data_timing;
+
        struct m_can_ops *ops;
 
        int version;
index 89cc3d41e952bb9b49c85b77dc979f24a8979ffe..b56a54d6c5a9c4d274ecc76b637fbaa02ff5cf84 100644 (file)
 
 #define M_CAN_PCI_MMIO_BAR             0
 
-#define M_CAN_CLOCK_FREQ_EHL           100000000
 #define CTL_CSR_INT_CTL_OFFSET         0x508
 
+struct m_can_pci_config {
+       const struct can_bittiming_const *bit_timing;
+       const struct can_bittiming_const *data_timing;
+       unsigned int clock_freq;
+};
+
 struct m_can_pci_priv {
        struct m_can_classdev cdev;
 
@@ -42,8 +47,13 @@ static u32 iomap_read_reg(struct m_can_classdev *cdev, int reg)
 static int iomap_read_fifo(struct m_can_classdev *cdev, int offset, void *val, size_t val_count)
 {
        struct m_can_pci_priv *priv = cdev_to_priv(cdev);
+       void __iomem *src = priv->base + offset;
 
-       ioread32_rep(priv->base + offset, val, val_count);
+       while (val_count--) {
+               *(unsigned int *)val = ioread32(src);
+               val += 4;
+               src += 4;
+       }
 
        return 0;
 }
@@ -61,8 +71,13 @@ static int iomap_write_fifo(struct m_can_classdev *cdev, int offset,
                            const void *val, size_t val_count)
 {
        struct m_can_pci_priv *priv = cdev_to_priv(cdev);
+       void __iomem *dst = priv->base + offset;
 
-       iowrite32_rep(priv->base + offset, val, val_count);
+       while (val_count--) {
+               iowrite32(*(unsigned int *)val, dst);
+               val += 4;
+               dst += 4;
+       }
 
        return 0;
 }
@@ -74,9 +89,40 @@ static struct m_can_ops m_can_pci_ops = {
        .read_fifo = iomap_read_fifo,
 };
 
+static const struct can_bittiming_const m_can_bittiming_const_ehl = {
+       .name = KBUILD_MODNAME,
+       .tseg1_min = 2,         /* Time segment 1 = prop_seg + phase_seg1 */
+       .tseg1_max = 64,
+       .tseg2_min = 1,         /* Time segment 2 = phase_seg2 */
+       .tseg2_max = 128,
+       .sjw_max = 128,
+       .brp_min = 1,
+       .brp_max = 512,
+       .brp_inc = 1,
+};
+
+static const struct can_bittiming_const m_can_data_bittiming_const_ehl = {
+       .name = KBUILD_MODNAME,
+       .tseg1_min = 2,         /* Time segment 1 = prop_seg + phase_seg1 */
+       .tseg1_max = 16,
+       .tseg2_min = 1,         /* Time segment 2 = phase_seg2 */
+       .tseg2_max = 8,
+       .sjw_max = 4,
+       .brp_min = 1,
+       .brp_max = 32,
+       .brp_inc = 1,
+};
+
+static const struct m_can_pci_config m_can_pci_ehl = {
+       .bit_timing = &m_can_bittiming_const_ehl,
+       .data_timing = &m_can_data_bittiming_const_ehl,
+       .clock_freq = 200000000,
+};
+
 static int m_can_pci_probe(struct pci_dev *pci, const struct pci_device_id *id)
 {
        struct device *dev = &pci->dev;
+       const struct m_can_pci_config *cfg;
        struct m_can_classdev *mcan_class;
        struct m_can_pci_priv *priv;
        void __iomem *base;
@@ -104,6 +150,8 @@ static int m_can_pci_probe(struct pci_dev *pci, const struct pci_device_id *id)
        if (!mcan_class)
                return -ENOMEM;
 
+       cfg = (const struct m_can_pci_config *)id->driver_data;
+
        priv = cdev_to_priv(mcan_class);
 
        priv->base = base;
@@ -115,7 +163,9 @@ static int m_can_pci_probe(struct pci_dev *pci, const struct pci_device_id *id)
        mcan_class->dev = &pci->dev;
        mcan_class->net->irq = pci_irq_vector(pci, 0);
        mcan_class->pm_clock_support = 1;
-       mcan_class->can.clock.freq = id->driver_data;
+       mcan_class->bit_timing = cfg->bit_timing;
+       mcan_class->data_timing = cfg->data_timing;
+       mcan_class->can.clock.freq = cfg->clock_freq;
        mcan_class->ops = &m_can_pci_ops;
 
        pci_set_drvdata(pci, mcan_class);
@@ -168,8 +218,8 @@ static SIMPLE_DEV_PM_OPS(m_can_pci_pm_ops,
                         m_can_pci_suspend, m_can_pci_resume);
 
 static const struct pci_device_id m_can_pci_id_table[] = {
-       { PCI_VDEVICE(INTEL, 0x4bc1), M_CAN_CLOCK_FREQ_EHL, },
-       { PCI_VDEVICE(INTEL, 0x4bc2), M_CAN_CLOCK_FREQ_EHL, },
+       { PCI_VDEVICE(INTEL, 0x4bc1), (kernel_ulong_t)&m_can_pci_ehl, },
+       { PCI_VDEVICE(INTEL, 0x4bc2), (kernel_ulong_t)&m_can_pci_ehl, },
        {  }    /* Terminating Entry */
 };
 MODULE_DEVICE_TABLE(pci, m_can_pci_id_table);
index 92a54a5fd4c502c3238f500e9bf39b44b44582a6..964c8a09226a9fc1af3de261948d62638ff9f48e 100644 (file)
@@ -692,11 +692,11 @@ static int pch_can_rx_normal(struct net_device *ndev, u32 obj_num, int quota)
                        cf->data[i + 1] = data_reg >> 8;
                }
 
-               netif_receive_skb(skb);
                rcv_pkts++;
                stats->rx_packets++;
                quota--;
                stats->rx_bytes += cf->len;
+               netif_receive_skb(skb);
 
                pch_fifo_thresh(priv, obj_num);
                obj_num++;
index e21b169c14c0122d9038fdd967dfc15cedde7da3..4642b6d4aaf7bb1f3055eb80ee563b9cd55fd1d5 100644 (file)
@@ -234,7 +234,12 @@ static int ems_pcmcia_add_card(struct pcmcia_device *pdev, unsigned long base)
                        free_sja1000dev(dev);
        }
 
-       err = request_irq(dev->irq, &ems_pcmcia_interrupt, IRQF_SHARED,
+       if (!card->channels) {
+               err = -ENODEV;
+               goto failure_cleanup;
+       }
+
+       err = request_irq(pdev->irq, &ems_pcmcia_interrupt, IRQF_SHARED,
                          DRV_NAME, card);
        if (!err)
                return 0;
index 59ba7c7beec00f4e10c19bcc03995941017c158b..f7af1bf5ab46d92bc7c117c91e1a61de4017838d 100644 (file)
 
 #include "kvaser_usb.h"
 
-/* Forward declaration */
-static const struct kvaser_usb_dev_cfg kvaser_usb_leaf_dev_cfg;
-
-#define CAN_USB_CLOCK                  8000000
 #define MAX_USBCAN_NET_DEVICES         2
 
 /* Command header size */
@@ -80,6 +76,12 @@ static const struct kvaser_usb_dev_cfg kvaser_usb_leaf_dev_cfg;
 
 #define CMD_LEAF_LOG_MESSAGE           106
 
+/* Leaf frequency options */
+#define KVASER_USB_LEAF_SWOPTION_FREQ_MASK 0x60
+#define KVASER_USB_LEAF_SWOPTION_FREQ_16_MHZ_CLK 0
+#define KVASER_USB_LEAF_SWOPTION_FREQ_32_MHZ_CLK BIT(5)
+#define KVASER_USB_LEAF_SWOPTION_FREQ_24_MHZ_CLK BIT(6)
+
 /* error factors */
 #define M16C_EF_ACKE                   BIT(0)
 #define M16C_EF_CRCE                   BIT(1)
@@ -340,6 +342,50 @@ struct kvaser_usb_err_summary {
        };
 };
 
+static const struct can_bittiming_const kvaser_usb_leaf_bittiming_const = {
+       .name = "kvaser_usb",
+       .tseg1_min = KVASER_USB_TSEG1_MIN,
+       .tseg1_max = KVASER_USB_TSEG1_MAX,
+       .tseg2_min = KVASER_USB_TSEG2_MIN,
+       .tseg2_max = KVASER_USB_TSEG2_MAX,
+       .sjw_max = KVASER_USB_SJW_MAX,
+       .brp_min = KVASER_USB_BRP_MIN,
+       .brp_max = KVASER_USB_BRP_MAX,
+       .brp_inc = KVASER_USB_BRP_INC,
+};
+
+static const struct kvaser_usb_dev_cfg kvaser_usb_leaf_dev_cfg_8mhz = {
+       .clock = {
+               .freq = 8000000,
+       },
+       .timestamp_freq = 1,
+       .bittiming_const = &kvaser_usb_leaf_bittiming_const,
+};
+
+static const struct kvaser_usb_dev_cfg kvaser_usb_leaf_dev_cfg_16mhz = {
+       .clock = {
+               .freq = 16000000,
+       },
+       .timestamp_freq = 1,
+       .bittiming_const = &kvaser_usb_leaf_bittiming_const,
+};
+
+static const struct kvaser_usb_dev_cfg kvaser_usb_leaf_dev_cfg_24mhz = {
+       .clock = {
+               .freq = 24000000,
+       },
+       .timestamp_freq = 1,
+       .bittiming_const = &kvaser_usb_leaf_bittiming_const,
+};
+
+static const struct kvaser_usb_dev_cfg kvaser_usb_leaf_dev_cfg_32mhz = {
+       .clock = {
+               .freq = 32000000,
+       },
+       .timestamp_freq = 1,
+       .bittiming_const = &kvaser_usb_leaf_bittiming_const,
+};
+
 static void *
 kvaser_usb_leaf_frame_to_cmd(const struct kvaser_usb_net_priv *priv,
                             const struct sk_buff *skb, int *frame_len,
@@ -471,6 +517,27 @@ static int kvaser_usb_leaf_send_simple_cmd(const struct kvaser_usb *dev,
        return rc;
 }
 
+static void kvaser_usb_leaf_get_software_info_leaf(struct kvaser_usb *dev,
+                                                  const struct leaf_cmd_softinfo *softinfo)
+{
+       u32 sw_options = le32_to_cpu(softinfo->sw_options);
+
+       dev->fw_version = le32_to_cpu(softinfo->fw_version);
+       dev->max_tx_urbs = le16_to_cpu(softinfo->max_outstanding_tx);
+
+       switch (sw_options & KVASER_USB_LEAF_SWOPTION_FREQ_MASK) {
+       case KVASER_USB_LEAF_SWOPTION_FREQ_16_MHZ_CLK:
+               dev->cfg = &kvaser_usb_leaf_dev_cfg_16mhz;
+               break;
+       case KVASER_USB_LEAF_SWOPTION_FREQ_24_MHZ_CLK:
+               dev->cfg = &kvaser_usb_leaf_dev_cfg_24mhz;
+               break;
+       case KVASER_USB_LEAF_SWOPTION_FREQ_32_MHZ_CLK:
+               dev->cfg = &kvaser_usb_leaf_dev_cfg_32mhz;
+               break;
+       }
+}
+
 static int kvaser_usb_leaf_get_software_info_inner(struct kvaser_usb *dev)
 {
        struct kvaser_cmd cmd;
@@ -486,14 +553,13 @@ static int kvaser_usb_leaf_get_software_info_inner(struct kvaser_usb *dev)
 
        switch (dev->card_data.leaf.family) {
        case KVASER_LEAF:
-               dev->fw_version = le32_to_cpu(cmd.u.leaf.softinfo.fw_version);
-               dev->max_tx_urbs =
-                       le16_to_cpu(cmd.u.leaf.softinfo.max_outstanding_tx);
+               kvaser_usb_leaf_get_software_info_leaf(dev, &cmd.u.leaf.softinfo);
                break;
        case KVASER_USBCAN:
                dev->fw_version = le32_to_cpu(cmd.u.usbcan.softinfo.fw_version);
                dev->max_tx_urbs =
                        le16_to_cpu(cmd.u.usbcan.softinfo.max_outstanding_tx);
+               dev->cfg = &kvaser_usb_leaf_dev_cfg_8mhz;
                break;
        }
 
@@ -1225,24 +1291,11 @@ static int kvaser_usb_leaf_init_card(struct kvaser_usb *dev)
 {
        struct kvaser_usb_dev_card_data *card_data = &dev->card_data;
 
-       dev->cfg = &kvaser_usb_leaf_dev_cfg;
        card_data->ctrlmode_supported |= CAN_CTRLMODE_3_SAMPLES;
 
        return 0;
 }
 
-static const struct can_bittiming_const kvaser_usb_leaf_bittiming_const = {
-       .name = "kvaser_usb",
-       .tseg1_min = KVASER_USB_TSEG1_MIN,
-       .tseg1_max = KVASER_USB_TSEG1_MAX,
-       .tseg2_min = KVASER_USB_TSEG2_MIN,
-       .tseg2_max = KVASER_USB_TSEG2_MAX,
-       .sjw_max = KVASER_USB_SJW_MAX,
-       .brp_min = KVASER_USB_BRP_MIN,
-       .brp_max = KVASER_USB_BRP_MAX,
-       .brp_inc = KVASER_USB_BRP_INC,
-};
-
 static int kvaser_usb_leaf_set_bittiming(struct net_device *netdev)
 {
        struct kvaser_usb_net_priv *priv = netdev_priv(netdev);
@@ -1348,11 +1401,3 @@ const struct kvaser_usb_dev_ops kvaser_usb_leaf_dev_ops = {
        .dev_read_bulk_callback = kvaser_usb_leaf_read_bulk_callback,
        .dev_frame_to_cmd = kvaser_usb_leaf_frame_to_cmd,
 };
-
-static const struct kvaser_usb_dev_cfg kvaser_usb_leaf_dev_cfg = {
-       .clock = {
-               .freq = CAN_USB_CLOCK,
-       },
-       .timestamp_freq = 1,
-       .bittiming_const = &kvaser_usb_leaf_bittiming_const,
-};
index 01e37b75471e111430fcd30c1a159afc7d5540bc..2b88f03e5252182251a47e056221f60802311543 100644 (file)
@@ -349,6 +349,19 @@ static const struct of_device_id b53_spi_of_match[] = {
 };
 MODULE_DEVICE_TABLE(of, b53_spi_of_match);
 
+static const struct spi_device_id b53_spi_ids[] = {
+       { .name = "bcm5325" },
+       { .name = "bcm5365" },
+       { .name = "bcm5395" },
+       { .name = "bcm5397" },
+       { .name = "bcm5398" },
+       { .name = "bcm53115" },
+       { .name = "bcm53125" },
+       { .name = "bcm53128" },
+       { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(spi, b53_spi_ids);
+
 static struct spi_driver b53_spi_driver = {
        .driver = {
                .name   = "b53-switch",
@@ -357,6 +370,7 @@ static struct spi_driver b53_spi_driver = {
        .probe  = b53_spi_probe,
        .remove = b53_spi_remove,
        .shutdown = b53_spi_shutdown,
+       .id_table = b53_spi_ids,
 };
 
 module_spi_driver(b53_spi_driver);
index 43fc3087aeb3ea5c266b8dfaa4f568662dbe4a80..013e9c02be71ab366c0e54a61fb3af61aa8f67ef 100644 (file)
@@ -1002,57 +1002,32 @@ static void ksz8_cfg_port_member(struct ksz_device *dev, int port, u8 member)
        data &= ~PORT_VLAN_MEMBERSHIP;
        data |= (member & dev->port_mask);
        ksz_pwrite8(dev, port, P_MIRROR_CTRL, data);
-       dev->ports[port].member = member;
 }
 
 static void ksz8_port_stp_state_set(struct dsa_switch *ds, int port, u8 state)
 {
        struct ksz_device *dev = ds->priv;
-       int forward = dev->member;
        struct ksz_port *p;
-       int member = -1;
        u8 data;
 
-       p = &dev->ports[port];
-
        ksz_pread8(dev, port, P_STP_CTRL, &data);
        data &= ~(PORT_TX_ENABLE | PORT_RX_ENABLE | PORT_LEARN_DISABLE);
 
        switch (state) {
        case BR_STATE_DISABLED:
                data |= PORT_LEARN_DISABLE;
-               if (port < dev->phy_port_cnt)
-                       member = 0;
                break;
        case BR_STATE_LISTENING:
                data |= (PORT_RX_ENABLE | PORT_LEARN_DISABLE);
-               if (port < dev->phy_port_cnt &&
-                   p->stp_state == BR_STATE_DISABLED)
-                       member = dev->host_mask | p->vid_member;
                break;
        case BR_STATE_LEARNING:
                data |= PORT_RX_ENABLE;
                break;
        case BR_STATE_FORWARDING:
                data |= (PORT_TX_ENABLE | PORT_RX_ENABLE);
-
-               /* This function is also used internally. */
-               if (port == dev->cpu_port)
-                       break;
-
-               /* Port is a member of a bridge. */
-               if (dev->br_member & BIT(port)) {
-                       dev->member |= BIT(port);
-                       member = dev->member;
-               } else {
-                       member = dev->host_mask | p->vid_member;
-               }
                break;
        case BR_STATE_BLOCKING:
                data |= PORT_LEARN_DISABLE;
-               if (port < dev->phy_port_cnt &&
-                   p->stp_state == BR_STATE_DISABLED)
-                       member = dev->host_mask | p->vid_member;
                break;
        default:
                dev_err(ds->dev, "invalid STP state: %d\n", state);
@@ -1060,22 +1035,11 @@ static void ksz8_port_stp_state_set(struct dsa_switch *ds, int port, u8 state)
        }
 
        ksz_pwrite8(dev, port, P_STP_CTRL, data);
+
+       p = &dev->ports[port];
        p->stp_state = state;
-       /* Port membership may share register with STP state. */
-       if (member >= 0 && member != p->member)
-               ksz8_cfg_port_member(dev, port, (u8)member);
-
-       /* Check if forwarding needs to be updated. */
-       if (state != BR_STATE_FORWARDING) {
-               if (dev->br_member & BIT(port))
-                       dev->member &= ~BIT(port);
-       }
 
-       /* When topology has changed the function ksz_update_port_member
-        * should be called to modify port forwarding behavior.
-        */
-       if (forward != dev->member)
-               ksz_update_port_member(dev, port);
+       ksz_update_port_member(dev, port);
 }
 
 static void ksz8_flush_dyn_mac_table(struct ksz_device *dev, int port)
@@ -1341,7 +1305,7 @@ static void ksz8795_cpu_interface_select(struct ksz_device *dev, int port)
 
 static void ksz8_port_setup(struct ksz_device *dev, int port, bool cpu_port)
 {
-       struct ksz_port *p = &dev->ports[port];
+       struct dsa_switch *ds = dev->ds;
        struct ksz8 *ksz8 = dev->priv;
        const u32 *masks;
        u8 member;
@@ -1368,10 +1332,11 @@ static void ksz8_port_setup(struct ksz_device *dev, int port, bool cpu_port)
                if (!ksz_is_ksz88x3(dev))
                        ksz8795_cpu_interface_select(dev, port);
 
-               member = dev->port_mask;
+               member = dsa_user_ports(ds);
        } else {
-               member = dev->host_mask | p->vid_member;
+               member = BIT(dsa_upstream_port(ds, port));
        }
+
        ksz8_cfg_port_member(dev, port, member);
 }
 
@@ -1392,20 +1357,13 @@ static void ksz8_config_cpu_port(struct dsa_switch *ds)
        ksz_cfg(dev, regs[S_TAIL_TAG_CTRL], masks[SW_TAIL_TAG_ENABLE], true);
 
        p = &dev->ports[dev->cpu_port];
-       p->vid_member = dev->port_mask;
        p->on = 1;
 
        ksz8_port_setup(dev, dev->cpu_port, true);
-       dev->member = dev->host_mask;
 
        for (i = 0; i < dev->phy_port_cnt; i++) {
                p = &dev->ports[i];
 
-               /* Initialize to non-zero so that ksz_cfg_port_member() will
-                * be called.
-                */
-               p->vid_member = BIT(i);
-               p->member = dev->port_mask;
                ksz8_port_stp_state_set(ds, i, BR_STATE_DISABLED);
 
                /* Last port may be disabled. */
index 854e25f43fa708157cf4785ca1c170763b96c0ee..353b5f98174075399159f86dd1327883dffbc4e0 100644 (file)
@@ -391,7 +391,6 @@ static void ksz9477_cfg_port_member(struct ksz_device *dev, int port,
                                    u8 member)
 {
        ksz_pwrite32(dev, port, REG_PORT_VLAN_MEMBERSHIP__4, member);
-       dev->ports[port].member = member;
 }
 
 static void ksz9477_port_stp_state_set(struct dsa_switch *ds, int port,
@@ -400,8 +399,6 @@ static void ksz9477_port_stp_state_set(struct dsa_switch *ds, int port,
        struct ksz_device *dev = ds->priv;
        struct ksz_port *p = &dev->ports[port];
        u8 data;
-       int member = -1;
-       int forward = dev->member;
 
        ksz_pread8(dev, port, P_STP_CTRL, &data);
        data &= ~(PORT_TX_ENABLE | PORT_RX_ENABLE | PORT_LEARN_DISABLE);
@@ -409,40 +406,18 @@ static void ksz9477_port_stp_state_set(struct dsa_switch *ds, int port,
        switch (state) {
        case BR_STATE_DISABLED:
                data |= PORT_LEARN_DISABLE;
-               if (port != dev->cpu_port)
-                       member = 0;
                break;
        case BR_STATE_LISTENING:
                data |= (PORT_RX_ENABLE | PORT_LEARN_DISABLE);
-               if (port != dev->cpu_port &&
-                   p->stp_state == BR_STATE_DISABLED)
-                       member = dev->host_mask | p->vid_member;
                break;
        case BR_STATE_LEARNING:
                data |= PORT_RX_ENABLE;
                break;
        case BR_STATE_FORWARDING:
                data |= (PORT_TX_ENABLE | PORT_RX_ENABLE);
-
-               /* This function is also used internally. */
-               if (port == dev->cpu_port)
-                       break;
-
-               member = dev->host_mask | p->vid_member;
-               mutex_lock(&dev->dev_mutex);
-
-               /* Port is a member of a bridge. */
-               if (dev->br_member & (1 << port)) {
-                       dev->member |= (1 << port);
-                       member = dev->member;
-               }
-               mutex_unlock(&dev->dev_mutex);
                break;
        case BR_STATE_BLOCKING:
                data |= PORT_LEARN_DISABLE;
-               if (port != dev->cpu_port &&
-                   p->stp_state == BR_STATE_DISABLED)
-                       member = dev->host_mask | p->vid_member;
                break;
        default:
                dev_err(ds->dev, "invalid STP state: %d\n", state);
@@ -451,23 +426,8 @@ static void ksz9477_port_stp_state_set(struct dsa_switch *ds, int port,
 
        ksz_pwrite8(dev, port, P_STP_CTRL, data);
        p->stp_state = state;
-       mutex_lock(&dev->dev_mutex);
-       /* Port membership may share register with STP state. */
-       if (member >= 0 && member != p->member)
-               ksz9477_cfg_port_member(dev, port, (u8)member);
-
-       /* Check if forwarding needs to be updated. */
-       if (state != BR_STATE_FORWARDING) {
-               if (dev->br_member & (1 << port))
-                       dev->member &= ~(1 << port);
-       }
 
-       /* When topology has changed the function ksz_update_port_member
-        * should be called to modify port forwarding behavior.
-        */
-       if (forward != dev->member)
-               ksz_update_port_member(dev, port);
-       mutex_unlock(&dev->dev_mutex);
+       ksz_update_port_member(dev, port);
 }
 
 static void ksz9477_flush_dyn_mac_table(struct ksz_device *dev, int port)
@@ -1168,10 +1128,10 @@ static void ksz9477_phy_errata_setup(struct ksz_device *dev, int port)
 
 static void ksz9477_port_setup(struct ksz_device *dev, int port, bool cpu_port)
 {
-       u8 data8;
-       u8 member;
-       u16 data16;
        struct ksz_port *p = &dev->ports[port];
+       struct dsa_switch *ds = dev->ds;
+       u8 data8, member;
+       u16 data16;
 
        /* enable tag tail for host port */
        if (cpu_port)
@@ -1250,12 +1210,12 @@ static void ksz9477_port_setup(struct ksz_device *dev, int port, bool cpu_port)
                ksz_pwrite8(dev, port, REG_PORT_XMII_CTRL_1, data8);
                p->phydev.duplex = 1;
        }
-       mutex_lock(&dev->dev_mutex);
+
        if (cpu_port)
-               member = dev->port_mask;
+               member = dsa_user_ports(ds);
        else
-               member = dev->host_mask | p->vid_member;
-       mutex_unlock(&dev->dev_mutex);
+               member = BIT(dsa_upstream_port(ds, port));
+
        ksz9477_cfg_port_member(dev, port, member);
 
        /* clear pending interrupts */
@@ -1276,8 +1236,6 @@ static void ksz9477_config_cpu_port(struct dsa_switch *ds)
                        const char *prev_mode;
 
                        dev->cpu_port = i;
-                       dev->host_mask = (1 << dev->cpu_port);
-                       dev->port_mask |= dev->host_mask;
                        p = &dev->ports[i];
 
                        /* Read from XMII register to determine host port
@@ -1312,23 +1270,15 @@ static void ksz9477_config_cpu_port(struct dsa_switch *ds)
 
                        /* enable cpu port */
                        ksz9477_port_setup(dev, i, true);
-                       p->vid_member = dev->port_mask;
                        p->on = 1;
                }
        }
 
-       dev->member = dev->host_mask;
-
        for (i = 0; i < dev->port_cnt; i++) {
                if (i == dev->cpu_port)
                        continue;
                p = &dev->ports[i];
 
-               /* Initialize to non-zero so that ksz_cfg_port_member() will
-                * be called.
-                */
-               p->vid_member = (1 << i);
-               p->member = dev->port_mask;
                ksz9477_port_stp_state_set(ds, i, BR_STATE_DISABLED);
                p->on = 1;
                if (i < dev->phy_port_cnt)
index 7c2968a639eba552e552bb418fc3cbeabe8a9dea..8a04302018dce681c9db545a5990fbc88ed72ea7 100644 (file)
 
 void ksz_update_port_member(struct ksz_device *dev, int port)
 {
-       struct ksz_port *p;
+       struct ksz_port *p = &dev->ports[port];
+       struct dsa_switch *ds = dev->ds;
+       u8 port_member = 0, cpu_port;
+       const struct dsa_port *dp;
        int i;
 
-       for (i = 0; i < dev->port_cnt; i++) {
-               if (i == port || i == dev->cpu_port)
+       if (!dsa_is_user_port(ds, port))
+               return;
+
+       dp = dsa_to_port(ds, port);
+       cpu_port = BIT(dsa_upstream_port(ds, port));
+
+       for (i = 0; i < ds->num_ports; i++) {
+               const struct dsa_port *other_dp = dsa_to_port(ds, i);
+               struct ksz_port *other_p = &dev->ports[i];
+               u8 val = 0;
+
+               if (!dsa_is_user_port(ds, i))
                        continue;
-               p = &dev->ports[i];
-               if (!(dev->member & (1 << i)))
+               if (port == i)
+                       continue;
+               if (!dp->bridge_dev || dp->bridge_dev != other_dp->bridge_dev)
                        continue;
 
-               /* Port is a member of the bridge and is forwarding. */
-               if (p->stp_state == BR_STATE_FORWARDING &&
-                   p->member != dev->member)
-                       dev->dev_ops->cfg_port_member(dev, i, dev->member);
+               if (other_p->stp_state == BR_STATE_FORWARDING &&
+                   p->stp_state == BR_STATE_FORWARDING) {
+                       val |= BIT(port);
+                       port_member |= BIT(i);
+               }
+
+               dev->dev_ops->cfg_port_member(dev, i, val | cpu_port);
        }
+
+       dev->dev_ops->cfg_port_member(dev, port, port_member | cpu_port);
 }
 EXPORT_SYMBOL_GPL(ksz_update_port_member);
 
@@ -175,12 +194,6 @@ EXPORT_SYMBOL_GPL(ksz_get_ethtool_stats);
 int ksz_port_bridge_join(struct dsa_switch *ds, int port,
                         struct net_device *br)
 {
-       struct ksz_device *dev = ds->priv;
-
-       mutex_lock(&dev->dev_mutex);
-       dev->br_member |= (1 << port);
-       mutex_unlock(&dev->dev_mutex);
-
        /* port_stp_state_set() will be called after to put the port in
         * appropriate state so there is no need to do anything.
         */
@@ -192,13 +205,6 @@ EXPORT_SYMBOL_GPL(ksz_port_bridge_join);
 void ksz_port_bridge_leave(struct dsa_switch *ds, int port,
                           struct net_device *br)
 {
-       struct ksz_device *dev = ds->priv;
-
-       mutex_lock(&dev->dev_mutex);
-       dev->br_member &= ~(1 << port);
-       dev->member &= ~(1 << port);
-       mutex_unlock(&dev->dev_mutex);
-
        /* port_stp_state_set() will be called after to put the port in
         * forwarding state so there is no need to do anything.
         */
index 1597c63988b4e430bcfbb1002fd3644c9328730c..54b456bc89728081fd5ec931821b096d87a55ac3 100644 (file)
@@ -25,8 +25,6 @@ struct ksz_port_mib {
 };
 
 struct ksz_port {
-       u16 member;
-       u16 vid_member;
        bool remove_tag;                /* Remove Tag flag set, for ksz8795 only */
        int stp_state;
        struct phy_device phydev;
@@ -83,8 +81,6 @@ struct ksz_device {
        struct ksz_port *ports;
        struct delayed_work mib_read;
        unsigned long mib_read_interval;
-       u16 br_member;
-       u16 member;
        u16 mirror_rx;
        u16 mirror_tx;
        u32 features;                   /* chip specific features */
index f00cbf5753b914040be2dc3b31f2a15cb4274976..14f87f6ac479a9922cb9474f78174be182bded83 100644 (file)
@@ -471,6 +471,12 @@ static int mv88e6xxx_port_ppu_updates(struct mv88e6xxx_chip *chip, int port)
        u16 reg;
        int err;
 
+       /* The 88e6250 family does not have the PHY detect bit. Instead,
+        * report whether the port is internal.
+        */
+       if (chip->info->family == MV88E6XXX_FAMILY_6250)
+               return port < chip->info->num_internal_phys;
+
        err = mv88e6xxx_port_read(chip, port, MV88E6XXX_PORT_STS, &reg);
        if (err) {
                dev_err(chip->dev,
@@ -692,44 +698,48 @@ static void mv88e6xxx_mac_config(struct dsa_switch *ds, int port,
 {
        struct mv88e6xxx_chip *chip = ds->priv;
        struct mv88e6xxx_port *p;
-       int err;
+       int err = 0;
 
        p = &chip->ports[port];
 
-       /* FIXME: is this the correct test? If we're in fixed mode on an
-        * internal port, why should we process this any different from
-        * PHY mode? On the other hand, the port may be automedia between
-        * an internal PHY and the serdes...
-        */
-       if ((mode == MLO_AN_PHY) && mv88e6xxx_phy_is_internal(ds, port))
-               return;
-
        mv88e6xxx_reg_lock(chip);
-       /* In inband mode, the link may come up at any time while the link
-        * is not forced down. Force the link down while we reconfigure the
-        * interface mode.
-        */
-       if (mode == MLO_AN_INBAND && p->interface != state->interface &&
-           chip->info->ops->port_set_link)
-               chip->info->ops->port_set_link(chip, port, LINK_FORCED_DOWN);
-
-       err = mv88e6xxx_port_config_interface(chip, port, state->interface);
-       if (err && err != -EOPNOTSUPP)
-               goto err_unlock;
 
-       err = mv88e6xxx_serdes_pcs_config(chip, port, mode, state->interface,
-                                         state->advertising);
-       /* FIXME: we should restart negotiation if something changed - which
-        * is something we get if we convert to using phylinks PCS operations.
-        */
-       if (err > 0)
-               err = 0;
+       if (mode != MLO_AN_PHY || !mv88e6xxx_phy_is_internal(ds, port)) {
+               /* In inband mode, the link may come up at any time while the
+                * link is not forced down. Force the link down while we
+                * reconfigure the interface mode.
+                */
+               if (mode == MLO_AN_INBAND &&
+                   p->interface != state->interface &&
+                   chip->info->ops->port_set_link)
+                       chip->info->ops->port_set_link(chip, port,
+                                                      LINK_FORCED_DOWN);
+
+               err = mv88e6xxx_port_config_interface(chip, port,
+                                                     state->interface);
+               if (err && err != -EOPNOTSUPP)
+                       goto err_unlock;
+
+               err = mv88e6xxx_serdes_pcs_config(chip, port, mode,
+                                                 state->interface,
+                                                 state->advertising);
+               /* FIXME: we should restart negotiation if something changed -
+                * which is something we get if we convert to using phylinks
+                * PCS operations.
+                */
+               if (err > 0)
+                       err = 0;
+       }
 
        /* Undo the forced down state above after completing configuration
-        * irrespective of its state on entry, which allows the link to come up.
+        * irrespective of its state on entry, which allows the link to come
+        * up in the in-band case where there is no separate SERDES. Also
+        * ensure that the link can come up if the PPU is in use and we are
+        * in PHY mode (we treat the PPU as an effective in-band mechanism.)
         */
-       if (mode == MLO_AN_INBAND && p->interface != state->interface &&
-           chip->info->ops->port_set_link)
+       if (chip->info->ops->port_set_link &&
+           ((mode == MLO_AN_INBAND && p->interface != state->interface) ||
+            (mode == MLO_AN_PHY && mv88e6xxx_port_ppu_updates(chip, port))))
                chip->info->ops->port_set_link(chip, port, LINK_UNFORCED);
 
        p->interface = state->interface;
@@ -752,11 +762,10 @@ static void mv88e6xxx_mac_link_down(struct dsa_switch *ds, int port,
        ops = chip->info->ops;
 
        mv88e6xxx_reg_lock(chip);
-       /* Internal PHYs propagate their configuration directly to the MAC.
-        * External PHYs depend on whether the PPU is enabled for this port.
+       /* Force the link down if we know the port may not be automatically
+        * updated by the switch or if we are using fixed-link mode.
         */
-       if (((!mv88e6xxx_phy_is_internal(ds, port) &&
-             !mv88e6xxx_port_ppu_updates(chip, port)) ||
+       if ((!mv88e6xxx_port_ppu_updates(chip, port) ||
             mode == MLO_AN_FIXED) && ops->port_sync_link)
                err = ops->port_sync_link(chip, port, mode, false);
        mv88e6xxx_reg_unlock(chip);
@@ -779,11 +788,11 @@ static void mv88e6xxx_mac_link_up(struct dsa_switch *ds, int port,
        ops = chip->info->ops;
 
        mv88e6xxx_reg_lock(chip);
-       /* Internal PHYs propagate their configuration directly to the MAC.
-        * External PHYs depend on whether the PPU is enabled for this port.
+       /* Configure and force the link up if we know that the port may not
+        * automatically updated by the switch or if we are using fixed-link
+        * mode.
         */
-       if ((!mv88e6xxx_phy_is_internal(ds, port) &&
-            !mv88e6xxx_port_ppu_updates(chip, port)) ||
+       if (!mv88e6xxx_port_ppu_updates(chip, port) ||
            mode == MLO_AN_FIXED) {
                /* FIXME: for an automedia port, should we force the link
                 * down here - what if the link comes up due to "other" media
index 6ea003678798651f70df5f8235863f7c79973c85..2b05ead515cdcbe756ec4d34d67e823912174e49 100644 (file)
@@ -50,11 +50,22 @@ static int mv88e6390_serdes_write(struct mv88e6xxx_chip *chip,
 }
 
 static int mv88e6xxx_serdes_pcs_get_state(struct mv88e6xxx_chip *chip,
-                                         u16 status, u16 lpa,
+                                         u16 ctrl, u16 status, u16 lpa,
                                          struct phylink_link_state *state)
 {
+       state->link = !!(status & MV88E6390_SGMII_PHY_STATUS_LINK);
+
        if (status & MV88E6390_SGMII_PHY_STATUS_SPD_DPL_VALID) {
-               state->link = !!(status & MV88E6390_SGMII_PHY_STATUS_LINK);
+               /* The Spped and Duplex Resolved register is 1 if AN is enabled
+                * and complete, or if AN is disabled. So with disabled AN we
+                * still get here on link up. But we want to set an_complete
+                * only if AN was enabled, thus we look at BMCR_ANENABLE.
+                * (According to 802.3-2008 section 22.2.4.2.10, we should be
+                *  able to get this same value from BMSR_ANEGCAPABLE, but tests
+                *  show that these Marvell PHYs don't conform to this part of
+                *  the specificaion - BMSR_ANEGCAPABLE is simply always 1.)
+                */
+               state->an_complete = !!(ctrl & BMCR_ANENABLE);
                state->duplex = status &
                                MV88E6390_SGMII_PHY_STATUS_DUPLEX_FULL ?
                                                 DUPLEX_FULL : DUPLEX_HALF;
@@ -81,6 +92,18 @@ static int mv88e6xxx_serdes_pcs_get_state(struct mv88e6xxx_chip *chip,
                        dev_err(chip->dev, "invalid PHY speed\n");
                        return -EINVAL;
                }
+       } else if (state->link &&
+                  state->interface != PHY_INTERFACE_MODE_SGMII) {
+               /* If Speed and Duplex Resolved register is 0 and link is up, it
+                * means that AN was enabled, but link partner had it disabled
+                * and the PHY invoked the Auto-Negotiation Bypass feature and
+                * linked anyway.
+                */
+               state->duplex = DUPLEX_FULL;
+               if (state->interface == PHY_INTERFACE_MODE_2500BASEX)
+                       state->speed = SPEED_2500;
+               else
+                       state->speed = SPEED_1000;
        } else {
                state->link = false;
        }
@@ -168,9 +191,15 @@ int mv88e6352_serdes_pcs_config(struct mv88e6xxx_chip *chip, int port,
 int mv88e6352_serdes_pcs_get_state(struct mv88e6xxx_chip *chip, int port,
                                   int lane, struct phylink_link_state *state)
 {
-       u16 lpa, status;
+       u16 lpa, status, ctrl;
        int err;
 
+       err = mv88e6352_serdes_read(chip, MII_BMCR, &ctrl);
+       if (err) {
+               dev_err(chip->dev, "can't read Serdes PHY control: %d\n", err);
+               return err;
+       }
+
        err = mv88e6352_serdes_read(chip, 0x11, &status);
        if (err) {
                dev_err(chip->dev, "can't read Serdes PHY status: %d\n", err);
@@ -183,7 +212,7 @@ int mv88e6352_serdes_pcs_get_state(struct mv88e6xxx_chip *chip, int port,
                return err;
        }
 
-       return mv88e6xxx_serdes_pcs_get_state(chip, status, lpa, state);
+       return mv88e6xxx_serdes_pcs_get_state(chip, ctrl, status, lpa, state);
 }
 
 int mv88e6352_serdes_pcs_an_restart(struct mv88e6xxx_chip *chip, int port,
@@ -801,7 +830,7 @@ int mv88e6390_serdes_power(struct mv88e6xxx_chip *chip, int port, int lane,
                           bool up)
 {
        u8 cmode = chip->ports[port].cmode;
-       int err = 0;
+       int err;
 
        switch (cmode) {
        case MV88E6XXX_PORT_STS_CMODE_SGMII:
@@ -813,6 +842,9 @@ int mv88e6390_serdes_power(struct mv88e6xxx_chip *chip, int port, int lane,
        case MV88E6XXX_PORT_STS_CMODE_RXAUI:
                err = mv88e6390_serdes_power_10g(chip, lane, up);
                break;
+       default:
+               err = -EINVAL;
+               break;
        }
 
        if (!err && up)
@@ -883,9 +915,16 @@ int mv88e6390_serdes_pcs_config(struct mv88e6xxx_chip *chip, int port,
 static int mv88e6390_serdes_pcs_get_state_sgmii(struct mv88e6xxx_chip *chip,
        int port, int lane, struct phylink_link_state *state)
 {
-       u16 lpa, status;
+       u16 lpa, status, ctrl;
        int err;
 
+       err = mv88e6390_serdes_read(chip, lane, MDIO_MMD_PHYXS,
+                                   MV88E6390_SGMII_BMCR, &ctrl);
+       if (err) {
+               dev_err(chip->dev, "can't read Serdes PHY control: %d\n", err);
+               return err;
+       }
+
        err = mv88e6390_serdes_read(chip, lane, MDIO_MMD_PHYXS,
                                    MV88E6390_SGMII_PHY_STATUS, &status);
        if (err) {
@@ -900,7 +939,7 @@ static int mv88e6390_serdes_pcs_get_state_sgmii(struct mv88e6xxx_chip *chip,
                return err;
        }
 
-       return mv88e6xxx_serdes_pcs_get_state(chip, status, lpa, state);
+       return mv88e6xxx_serdes_pcs_get_state(chip, ctrl, status, lpa, state);
 }
 
 static int mv88e6390_serdes_pcs_get_state_10g(struct mv88e6xxx_chip *chip,
@@ -1271,9 +1310,31 @@ void mv88e6390_serdes_get_regs(struct mv88e6xxx_chip *chip, int port, void *_p)
        }
 }
 
-static int mv88e6393x_serdes_port_errata(struct mv88e6xxx_chip *chip, int lane)
+static int mv88e6393x_serdes_power_lane(struct mv88e6xxx_chip *chip, int lane,
+                                       bool on)
 {
-       u16 reg, pcs;
+       u16 reg;
+       int err;
+
+       err = mv88e6390_serdes_read(chip, lane, MDIO_MMD_PHYXS,
+                                   MV88E6393X_SERDES_CTRL1, &reg);
+       if (err)
+               return err;
+
+       if (on)
+               reg &= ~(MV88E6393X_SERDES_CTRL1_TX_PDOWN |
+                        MV88E6393X_SERDES_CTRL1_RX_PDOWN);
+       else
+               reg |= MV88E6393X_SERDES_CTRL1_TX_PDOWN |
+                      MV88E6393X_SERDES_CTRL1_RX_PDOWN;
+
+       return mv88e6390_serdes_write(chip, lane, MDIO_MMD_PHYXS,
+                                     MV88E6393X_SERDES_CTRL1, reg);
+}
+
+static int mv88e6393x_serdes_erratum_4_6(struct mv88e6xxx_chip *chip, int lane)
+{
+       u16 reg;
        int err;
 
        /* mv88e6393x family errata 4.6:
@@ -1284,26 +1345,45 @@ static int mv88e6393x_serdes_port_errata(struct mv88e6xxx_chip *chip, int lane)
         * It seems that after this workaround the SERDES is automatically
         * powered up (the bit is cleared), so power it down.
         */
-       if (lane == MV88E6393X_PORT0_LANE || lane == MV88E6393X_PORT9_LANE ||
-           lane == MV88E6393X_PORT10_LANE) {
-               err = mv88e6390_serdes_read(chip, lane,
-                                           MDIO_MMD_PHYXS,
-                                           MV88E6393X_SERDES_POC, &reg);
-               if (err)
-                       return err;
+       err = mv88e6390_serdes_read(chip, lane, MDIO_MMD_PHYXS,
+                                   MV88E6393X_SERDES_POC, &reg);
+       if (err)
+               return err;
 
-               reg &= ~MV88E6393X_SERDES_POC_PDOWN;
-               reg |= MV88E6393X_SERDES_POC_RESET;
+       reg &= ~MV88E6393X_SERDES_POC_PDOWN;
+       reg |= MV88E6393X_SERDES_POC_RESET;
 
-               err = mv88e6390_serdes_write(chip, lane, MDIO_MMD_PHYXS,
-                                            MV88E6393X_SERDES_POC, reg);
-               if (err)
-                       return err;
+       err = mv88e6390_serdes_write(chip, lane, MDIO_MMD_PHYXS,
+                                    MV88E6393X_SERDES_POC, reg);
+       if (err)
+               return err;
 
-               err = mv88e6390_serdes_power_sgmii(chip, lane, false);
-               if (err)
-                       return err;
-       }
+       err = mv88e6390_serdes_power_sgmii(chip, lane, false);
+       if (err)
+               return err;
+
+       return mv88e6393x_serdes_power_lane(chip, lane, false);
+}
+
+int mv88e6393x_serdes_setup_errata(struct mv88e6xxx_chip *chip)
+{
+       int err;
+
+       err = mv88e6393x_serdes_erratum_4_6(chip, MV88E6393X_PORT0_LANE);
+       if (err)
+               return err;
+
+       err = mv88e6393x_serdes_erratum_4_6(chip, MV88E6393X_PORT9_LANE);
+       if (err)
+               return err;
+
+       return mv88e6393x_serdes_erratum_4_6(chip, MV88E6393X_PORT10_LANE);
+}
+
+static int mv88e6393x_serdes_erratum_4_8(struct mv88e6xxx_chip *chip, int lane)
+{
+       u16 reg, pcs;
+       int err;
 
        /* mv88e6393x family errata 4.8:
         * When a SERDES port is operating in 1000BASE-X or SGMII mode link may
@@ -1334,38 +1414,152 @@ static int mv88e6393x_serdes_port_errata(struct mv88e6xxx_chip *chip, int lane)
                                      MV88E6393X_ERRATA_4_8_REG, reg);
 }
 
-int mv88e6393x_serdes_setup_errata(struct mv88e6xxx_chip *chip)
+static int mv88e6393x_serdes_erratum_5_2(struct mv88e6xxx_chip *chip, int lane,
+                                        u8 cmode)
+{
+       static const struct {
+               u16 dev, reg, val, mask;
+       } fixes[] = {
+               { MDIO_MMD_VEND1, 0x8093, 0xcb5a, 0xffff },
+               { MDIO_MMD_VEND1, 0x8171, 0x7088, 0xffff },
+               { MDIO_MMD_VEND1, 0x80c9, 0x311a, 0xffff },
+               { MDIO_MMD_VEND1, 0x80a2, 0x8000, 0xff7f },
+               { MDIO_MMD_VEND1, 0x80a9, 0x0000, 0xfff0 },
+               { MDIO_MMD_VEND1, 0x80a3, 0x0000, 0xf8ff },
+               { MDIO_MMD_PHYXS, MV88E6393X_SERDES_POC,
+                 MV88E6393X_SERDES_POC_RESET, MV88E6393X_SERDES_POC_RESET },
+       };
+       int err, i;
+       u16 reg;
+
+       /* mv88e6393x family errata 5.2:
+        * For optimal signal integrity the following sequence should be applied
+        * to SERDES operating in 10G mode. These registers only apply to 10G
+        * operation and have no effect on other speeds.
+        */
+       if (cmode != MV88E6393X_PORT_STS_CMODE_10GBASER)
+               return 0;
+
+       for (i = 0; i < ARRAY_SIZE(fixes); ++i) {
+               err = mv88e6390_serdes_read(chip, lane, fixes[i].dev,
+                                           fixes[i].reg, &reg);
+               if (err)
+                       return err;
+
+               reg &= ~fixes[i].mask;
+               reg |= fixes[i].val;
+
+               err = mv88e6390_serdes_write(chip, lane, fixes[i].dev,
+                                            fixes[i].reg, reg);
+               if (err)
+                       return err;
+       }
+
+       return 0;
+}
+
+static int mv88e6393x_serdes_fix_2500basex_an(struct mv88e6xxx_chip *chip,
+                                             int lane, u8 cmode, bool on)
 {
+       u16 reg;
        int err;
 
-       err = mv88e6393x_serdes_port_errata(chip, MV88E6393X_PORT0_LANE);
+       if (cmode != MV88E6XXX_PORT_STS_CMODE_2500BASEX)
+               return 0;
+
+       /* Inband AN is broken on Amethyst in 2500base-x mode when set by
+        * standard mechanism (via cmode).
+        * We can get around this by configuring the PCS mode to 1000base-x
+        * and then writing value 0x58 to register 1e.8000. (This must be done
+        * while SerDes receiver and transmitter are disabled, which is, when
+        * this function is called.)
+        * It seem that when we do this configuration to 2500base-x mode (by
+        * changing PCS mode to 1000base-x and frequency to 3.125 GHz from
+        * 1.25 GHz) and then configure to sgmii or 1000base-x, the device
+        * thinks that it already has SerDes at 1.25 GHz and does not change
+        * the 1e.8000 register, leaving SerDes at 3.125 GHz.
+        * To avoid this, change PCS mode back to 2500base-x when disabling
+        * SerDes from 2500base-x mode.
+        */
+       err = mv88e6390_serdes_read(chip, lane, MDIO_MMD_PHYXS,
+                                   MV88E6393X_SERDES_POC, &reg);
+       if (err)
+               return err;
+
+       reg &= ~(MV88E6393X_SERDES_POC_PCS_MASK | MV88E6393X_SERDES_POC_AN);
+       if (on)
+               reg |= MV88E6393X_SERDES_POC_PCS_1000BASEX |
+                      MV88E6393X_SERDES_POC_AN;
+       else
+               reg |= MV88E6393X_SERDES_POC_PCS_2500BASEX;
+       reg |= MV88E6393X_SERDES_POC_RESET;
+
+       err = mv88e6390_serdes_write(chip, lane, MDIO_MMD_PHYXS,
+                                    MV88E6393X_SERDES_POC, reg);
        if (err)
                return err;
 
-       err = mv88e6393x_serdes_port_errata(chip, MV88E6393X_PORT9_LANE);
+       err = mv88e6390_serdes_write(chip, lane, MDIO_MMD_VEND1, 0x8000, 0x58);
        if (err)
                return err;
 
-       return mv88e6393x_serdes_port_errata(chip, MV88E6393X_PORT10_LANE);
+       return 0;
 }
 
 int mv88e6393x_serdes_power(struct mv88e6xxx_chip *chip, int port, int lane,
                            bool on)
 {
        u8 cmode = chip->ports[port].cmode;
+       int err;
 
        if (port != 0 && port != 9 && port != 10)
                return -EOPNOTSUPP;
 
+       if (on) {
+               err = mv88e6393x_serdes_erratum_4_8(chip, lane);
+               if (err)
+                       return err;
+
+               err = mv88e6393x_serdes_erratum_5_2(chip, lane, cmode);
+               if (err)
+                       return err;
+
+               err = mv88e6393x_serdes_fix_2500basex_an(chip, lane, cmode,
+                                                        true);
+               if (err)
+                       return err;
+
+               err = mv88e6393x_serdes_power_lane(chip, lane, true);
+               if (err)
+                       return err;
+       }
+
        switch (cmode) {
        case MV88E6XXX_PORT_STS_CMODE_SGMII:
        case MV88E6XXX_PORT_STS_CMODE_1000BASEX:
        case MV88E6XXX_PORT_STS_CMODE_2500BASEX:
-               return mv88e6390_serdes_power_sgmii(chip, lane, on);
+               err = mv88e6390_serdes_power_sgmii(chip, lane, on);
+               break;
        case MV88E6393X_PORT_STS_CMODE_5GBASER:
        case MV88E6393X_PORT_STS_CMODE_10GBASER:
-               return mv88e6390_serdes_power_10g(chip, lane, on);
+               err = mv88e6390_serdes_power_10g(chip, lane, on);
+               break;
+       default:
+               err = -EINVAL;
+               break;
        }
 
-       return 0;
+       if (err)
+               return err;
+
+       if (!on) {
+               err = mv88e6393x_serdes_power_lane(chip, lane, false);
+               if (err)
+                       return err;
+
+               err = mv88e6393x_serdes_fix_2500basex_an(chip, lane, cmode,
+                                                        false);
+       }
+
+       return err;
 }
index cbb3ba30caea9db2e5f9f3fa3820946b50d1c4fe..8dd8ed225b4594f5421d91fb3755dfe242cb0bfb 100644 (file)
 #define MV88E6393X_SERDES_POC_PCS_MASK         0x0007
 #define MV88E6393X_SERDES_POC_RESET            BIT(15)
 #define MV88E6393X_SERDES_POC_PDOWN            BIT(5)
+#define MV88E6393X_SERDES_POC_AN               BIT(3)
+#define MV88E6393X_SERDES_CTRL1                        0xf003
+#define MV88E6393X_SERDES_CTRL1_TX_PDOWN       BIT(9)
+#define MV88E6393X_SERDES_CTRL1_RX_PDOWN       BIT(8)
 
 #define MV88E6393X_ERRATA_4_8_REG              0xF074
 #define MV88E6393X_ERRATA_4_8_BIT              BIT(14)
index 327cc46548065461995cbaf3b23f2abfaba129a5..f1a05e7dc81811ea36a5e9825d553badccbefbbb 100644 (file)
@@ -290,8 +290,11 @@ static int felix_setup_mmio_filtering(struct felix *felix)
                }
        }
 
-       if (cpu < 0)
+       if (cpu < 0) {
+               kfree(tagging_rule);
+               kfree(redirect_rule);
                return -EINVAL;
+       }
 
        tagging_rule->key_type = OCELOT_VCAP_KEY_ETYPE;
        *(__be16 *)tagging_rule->key.etype.etype.value = htons(ETH_P_1588);
index a429c9750add1bddcd5f1afaa1174d6f7ca7a9e4..147ca39531a3bbaaa8f29f1a39071397bdc23fa3 100644 (file)
@@ -1256,8 +1256,12 @@ qca8k_setup(struct dsa_switch *ds)
                /* Set initial MTU for every port.
                 * We have only have a general MTU setting. So track
                 * every port and set the max across all port.
+                * Set per port MTU to 1500 as the MTU change function
+                * will add the overhead and if its set to 1518 then it
+                * will apply the overhead again and we will end up with
+                * MTU of 1536 instead of 1518
                 */
-               priv->port_mtu[i] = ETH_FRAME_LEN + ETH_FCS_LEN;
+               priv->port_mtu[i] = ETH_DATA_LEN;
        }
 
        /* Special GLOBAL_FC_THRESH value are needed for ar8327 switch */
@@ -1433,6 +1437,12 @@ qca8k_phylink_mac_config(struct dsa_switch *ds, int port, unsigned int mode,
 
                qca8k_write(priv, QCA8K_REG_SGMII_CTRL, val);
 
+               /* From original code is reported port instability as SGMII also
+                * require delay set. Apply advised values here or take them from DT.
+                */
+               if (state->interface == PHY_INTERFACE_MODE_SGMII)
+                       qca8k_mac_config_setup_internal_delay(priv, cpu_port_index, reg);
+
                /* For qca8327/qca8328/qca8334/qca8338 sgmii is unique and
                 * falling edge is set writing in the PORT0 PAD reg
                 */
@@ -1455,12 +1465,6 @@ qca8k_phylink_mac_config(struct dsa_switch *ds, int port, unsigned int mode,
                                        QCA8K_PORT0_PAD_SGMII_TXCLK_FALLING_EDGE,
                                        val);
 
-               /* From original code is reported port instability as SGMII also
-                * require delay set. Apply advised values here or take them from DT.
-                */
-               if (state->interface == PHY_INTERFACE_MODE_SGMII)
-                       qca8k_mac_config_setup_internal_delay(priv, cpu_port_index, reg);
-
                break;
        default:
                dev_err(ds->dev, "xMII mode %s not supported for port %d\n",
index baaae97283c5e259ef3cbd7e293c3db3f257e459..078ca4cd716057ee3674be7516de58185c03a533 100644 (file)
 #define RTL8365MB_LEARN_LIMIT_MAX_8365MB_VC    2112
 
 /* Family-specific data and limits */
+#define RTL8365MB_PHYADDRMAX   7
 #define RTL8365MB_NUM_PHYREGS  32
 #define RTL8365MB_PHYREGMAX    (RTL8365MB_NUM_PHYREGS - 1)
 #define RTL8365MB_MAX_NUM_PORTS        (RTL8365MB_CPU_PORT_NUM_8365MB_VC + 1)
 #define RTL8365MB_INDIRECT_ACCESS_STATUS_REG                   0x1F01
 #define RTL8365MB_INDIRECT_ACCESS_ADDRESS_REG                  0x1F02
 #define   RTL8365MB_INDIRECT_ACCESS_ADDRESS_OCPADR_5_1_MASK    GENMASK(4, 0)
-#define   RTL8365MB_INDIRECT_ACCESS_ADDRESS_PHYNUM_MASK                GENMASK(6, 5)
+#define   RTL8365MB_INDIRECT_ACCESS_ADDRESS_PHYNUM_MASK                GENMASK(7, 5)
 #define   RTL8365MB_INDIRECT_ACCESS_ADDRESS_OCPADR_9_6_MASK    GENMASK(11, 8)
 #define   RTL8365MB_PHY_BASE                                   0x2000
 #define RTL8365MB_INDIRECT_ACCESS_WRITE_DATA_REG               0x1F03
@@ -679,6 +680,9 @@ static int rtl8365mb_phy_read(struct realtek_smi *smi, int phy, int regnum)
        u16 val;
        int ret;
 
+       if (phy > RTL8365MB_PHYADDRMAX)
+               return -EINVAL;
+
        if (regnum > RTL8365MB_PHYREGMAX)
                return -EINVAL;
 
@@ -704,6 +708,9 @@ static int rtl8365mb_phy_write(struct realtek_smi *smi, int phy, int regnum,
        u32 ocp_addr;
        int ret;
 
+       if (phy > RTL8365MB_PHYADDRMAX)
+               return -EINVAL;
+
        if (regnum > RTL8365MB_PHYREGMAX)
                return -EINVAL;
 
index d75d95a97dd93207df9604b210dfb82e5cd09aa4..993b2fb429612c403821e060d0fb63cb83a19906 100644 (file)
@@ -1430,16 +1430,19 @@ static int altera_tse_probe(struct platform_device *pdev)
                priv->rxdescmem_busaddr = dma_res->start;
 
        } else {
+               ret = -ENODEV;
                goto err_free_netdev;
        }
 
-       if (!dma_set_mask(priv->device, DMA_BIT_MASK(priv->dmaops->dmamask)))
+       if (!dma_set_mask(priv->device, DMA_BIT_MASK(priv->dmaops->dmamask))) {
                dma_set_coherent_mask(priv->device,
                                      DMA_BIT_MASK(priv->dmaops->dmamask));
-       else if (!dma_set_mask(priv->device, DMA_BIT_MASK(32)))
+       } else if (!dma_set_mask(priv->device, DMA_BIT_MASK(32))) {
                dma_set_coherent_mask(priv->device, DMA_BIT_MASK(32));
-       else
+       } else {
+               ret = -EIO;
                goto err_free_netdev;
+       }
 
        /* MAC address space */
        ret = request_and_map(pdev, "control_port", &control_port,
index 23b2d390fcdda8af72fe7e474fc5fef7194a9274..ace691d7cd759f52d38d116ce8cfe979089d7f25 100644 (file)
 
 #define AQ_DEVICE_ID_AQC113DEV 0x00C0
 #define AQ_DEVICE_ID_AQC113CS  0x94C0
+#define AQ_DEVICE_ID_AQC113CA  0x34C0
 #define AQ_DEVICE_ID_AQC114CS  0x93C0
 #define AQ_DEVICE_ID_AQC113    0x04C0
 #define AQ_DEVICE_ID_AQC113C   0x14C0
 #define AQ_DEVICE_ID_AQC115C   0x12C0
+#define AQ_DEVICE_ID_AQC116C   0x11C0
 
 #define HW_ATL_NIC_NAME "Marvell (aQuantia) AQtion 10Gbit Network Adapter"
 
 
 #define AQ_NIC_RATE_10G                BIT(0)
 #define AQ_NIC_RATE_5G         BIT(1)
-#define AQ_NIC_RATE_5GSR       BIT(2)
-#define AQ_NIC_RATE_2G5                BIT(3)
-#define AQ_NIC_RATE_1G         BIT(4)
-#define AQ_NIC_RATE_100M       BIT(5)
-#define AQ_NIC_RATE_10M                BIT(6)
-#define AQ_NIC_RATE_1G_HALF    BIT(7)
-#define AQ_NIC_RATE_100M_HALF  BIT(8)
-#define AQ_NIC_RATE_10M_HALF   BIT(9)
+#define AQ_NIC_RATE_2G5                BIT(2)
+#define AQ_NIC_RATE_1G         BIT(3)
+#define AQ_NIC_RATE_100M       BIT(4)
+#define AQ_NIC_RATE_10M                BIT(5)
+#define AQ_NIC_RATE_1G_HALF    BIT(6)
+#define AQ_NIC_RATE_100M_HALF  BIT(7)
+#define AQ_NIC_RATE_10M_HALF   BIT(8)
 
-#define AQ_NIC_RATE_EEE_10G    BIT(10)
-#define AQ_NIC_RATE_EEE_5G     BIT(11)
-#define AQ_NIC_RATE_EEE_2G5    BIT(12)
-#define AQ_NIC_RATE_EEE_1G     BIT(13)
-#define AQ_NIC_RATE_EEE_100M   BIT(14)
+#define AQ_NIC_RATE_EEE_10G    BIT(9)
+#define AQ_NIC_RATE_EEE_5G     BIT(10)
+#define AQ_NIC_RATE_EEE_2G5    BIT(11)
+#define AQ_NIC_RATE_EEE_1G     BIT(12)
+#define AQ_NIC_RATE_EEE_100M   BIT(13)
 #define AQ_NIC_RATE_EEE_MSK     (AQ_NIC_RATE_EEE_10G |\
                                 AQ_NIC_RATE_EEE_5G |\
                                 AQ_NIC_RATE_EEE_2G5 |\
index 062a300a566a55c505cb556530a3ca0aec83c703..dbd28466013580aca8cab6b99bbc168dd9c40e99 100644 (file)
@@ -80,6 +80,8 @@ struct aq_hw_link_status_s {
 };
 
 struct aq_stats_s {
+       u64 brc;
+       u64 btc;
        u64 uprc;
        u64 mprc;
        u64 bprc;
index 1acf544afeb4449b55ad4ce93781c13ccfa98a51..33f1a1377588bda47db7a76c94421a3f84d3ac4f 100644 (file)
@@ -316,18 +316,22 @@ int aq_nic_ndev_register(struct aq_nic_s *self)
        aq_macsec_init(self);
 #endif
 
-       mutex_lock(&self->fwreq_mutex);
-       err = self->aq_fw_ops->get_mac_permanent(self->aq_hw, addr);
-       mutex_unlock(&self->fwreq_mutex);
-       if (err)
-               goto err_exit;
+       if (platform_get_ethdev_address(&self->pdev->dev, self->ndev) != 0) {
+               // If DT has none or an invalid one, ask device for MAC address
+               mutex_lock(&self->fwreq_mutex);
+               err = self->aq_fw_ops->get_mac_permanent(self->aq_hw, addr);
+               mutex_unlock(&self->fwreq_mutex);
 
-       eth_hw_addr_set(self->ndev, addr);
+               if (err)
+                       goto err_exit;
 
-       if (!is_valid_ether_addr(self->ndev->dev_addr) ||
-           !aq_nic_is_valid_ether_addr(self->ndev->dev_addr)) {
-               netdev_warn(self->ndev, "MAC is invalid, will use random.");
-               eth_hw_addr_random(self->ndev);
+               if (is_valid_ether_addr(addr) &&
+                   aq_nic_is_valid_ether_addr(addr)) {
+                       eth_hw_addr_set(self->ndev, addr);
+               } else {
+                       netdev_warn(self->ndev, "MAC is invalid, will use random.");
+                       eth_hw_addr_random(self->ndev);
+               }
        }
 
 #if defined(AQ_CFG_MAC_ADDR_PERMANENT)
@@ -905,8 +909,14 @@ u64 *aq_nic_get_stats(struct aq_nic_s *self, u64 *data)
        data[++i] = stats->mbtc;
        data[++i] = stats->bbrc;
        data[++i] = stats->bbtc;
-       data[++i] = stats->ubrc + stats->mbrc + stats->bbrc;
-       data[++i] = stats->ubtc + stats->mbtc + stats->bbtc;
+       if (stats->brc)
+               data[++i] = stats->brc;
+       else
+               data[++i] = stats->ubrc + stats->mbrc + stats->bbrc;
+       if (stats->btc)
+               data[++i] = stats->btc;
+       else
+               data[++i] = stats->ubtc + stats->mbtc + stats->bbtc;
        data[++i] = stats->dma_pkt_rc;
        data[++i] = stats->dma_pkt_tc;
        data[++i] = stats->dma_oct_rc;
index d4b1976ee69b934d4a5dffac2d13da5697d78066..797a95142d1f44dbc0ed454e2daf955ec8fc90ea 100644 (file)
@@ -49,6 +49,8 @@ static const struct pci_device_id aq_pci_tbl[] = {
        { PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_AQC113), },
        { PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_AQC113C), },
        { PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_AQC115C), },
+       { PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_AQC113CA), },
+       { PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_AQC116C), },
 
        {}
 };
@@ -85,7 +87,10 @@ static const struct aq_board_revision_s hw_atl_boards[] = {
        { AQ_DEVICE_ID_AQC113CS,        AQ_HWREV_ANY,   &hw_atl2_ops, &hw_atl2_caps_aqc113, },
        { AQ_DEVICE_ID_AQC114CS,        AQ_HWREV_ANY,   &hw_atl2_ops, &hw_atl2_caps_aqc113, },
        { AQ_DEVICE_ID_AQC113C,         AQ_HWREV_ANY,   &hw_atl2_ops, &hw_atl2_caps_aqc113, },
-       { AQ_DEVICE_ID_AQC115C,         AQ_HWREV_ANY,   &hw_atl2_ops, &hw_atl2_caps_aqc113, },
+       { AQ_DEVICE_ID_AQC115C,         AQ_HWREV_ANY,   &hw_atl2_ops, &hw_atl2_caps_aqc115c, },
+       { AQ_DEVICE_ID_AQC113CA,        AQ_HWREV_ANY,   &hw_atl2_ops, &hw_atl2_caps_aqc113, },
+       { AQ_DEVICE_ID_AQC116C,         AQ_HWREV_ANY,   &hw_atl2_ops, &hw_atl2_caps_aqc116c, },
+
 };
 
 MODULE_DEVICE_TABLE(pci, aq_pci_tbl);
index 24122ccda614cc46fab36f59b208977fbc8b62b7..81b3756417ec2cf464f3097e672bcd12797267a5 100644 (file)
@@ -298,13 +298,14 @@ bool aq_ring_tx_clean(struct aq_ring_s *self)
                        }
                }
 
-               if (unlikely(buff->is_eop)) {
+               if (unlikely(buff->is_eop && buff->skb)) {
                        u64_stats_update_begin(&self->stats.tx.syncp);
                        ++self->stats.tx.packets;
                        self->stats.tx.bytes += buff->skb->len;
                        u64_stats_update_end(&self->stats.tx.syncp);
 
                        dev_kfree_skb_any(buff->skb);
+                       buff->skb = NULL;
                }
                buff->pa = 0U;
                buff->eop_index = 0xffffU;
index d281322d7dd29074185e2645aa0a17bb344ef404..f4774cf051c9780cfc434450673bb82d175e2736 100644 (file)
@@ -362,9 +362,6 @@ unsigned int aq_vec_get_sw_stats(struct aq_vec_s *self, const unsigned int tc, u
 {
        unsigned int count;
 
-       WARN_ONCE(!aq_vec_is_valid_tc(self, tc),
-                 "Invalid tc %u (#rx=%u, #tx=%u)\n",
-                 tc, self->rx_rings, self->tx_rings);
        if (!aq_vec_is_valid_tc(self, tc))
                return 0;
 
index fc0e6600664464d1a05acb5ae7c2c10791dbac82..7e88d7234b14588c1816b2808ab1421ecf23b4a8 100644 (file)
@@ -559,6 +559,11 @@ int hw_atl_utils_fw_rpc_wait(struct aq_hw_s *self,
                        goto err_exit;
 
                if (fw.len == 0xFFFFU) {
+                       if (sw.len > sizeof(self->rpc)) {
+                               printk(KERN_INFO "Invalid sw len: %x\n", sw.len);
+                               err = -EINVAL;
+                               goto err_exit;
+                       }
                        err = hw_atl_utils_fw_rpc_call(self, sw.len);
                        if (err < 0)
                                goto err_exit;
@@ -567,6 +572,11 @@ int hw_atl_utils_fw_rpc_wait(struct aq_hw_s *self,
 
        if (rpc) {
                if (fw.len) {
+                       if (fw.len > sizeof(self->rpc)) {
+                               printk(KERN_INFO "Invalid fw len: %x\n", fw.len);
+                               err = -EINVAL;
+                               goto err_exit;
+                       }
                        err =
                        hw_atl_utils_fw_downld_dwords(self,
                                                      self->rpc_addr,
@@ -857,12 +867,20 @@ static int hw_atl_fw1x_deinit(struct aq_hw_s *self)
 int hw_atl_utils_update_stats(struct aq_hw_s *self)
 {
        struct aq_stats_s *cs = &self->curr_stats;
+       struct aq_stats_s curr_stats = *cs;
        struct hw_atl_utils_mbox mbox;
+       bool corrupted_stats = false;
 
        hw_atl_utils_mpi_read_stats(self, &mbox);
 
-#define AQ_SDELTA(_N_) (self->curr_stats._N_ += \
-                       mbox.stats._N_ - self->last_stats._N_)
+#define AQ_SDELTA(_N_)  \
+do { \
+       if (!corrupted_stats && \
+           ((s64)(mbox.stats._N_ - self->last_stats._N_)) >= 0) \
+               curr_stats._N_ += mbox.stats._N_ - self->last_stats._N_; \
+       else \
+               corrupted_stats = true; \
+} while (0)
 
        if (self->aq_link_status.mbps) {
                AQ_SDELTA(uprc);
@@ -882,6 +900,9 @@ int hw_atl_utils_update_stats(struct aq_hw_s *self)
                AQ_SDELTA(bbrc);
                AQ_SDELTA(bbtc);
                AQ_SDELTA(dpc);
+
+               if (!corrupted_stats)
+                       *cs = curr_stats;
        }
 #undef AQ_SDELTA
 
index eac631c45c565a4c17802123e38c4120bfef848d..4d4cfbc91e19cf658fccd181d037c876a864e8f2 100644 (file)
@@ -132,9 +132,6 @@ static enum hw_atl_fw2x_rate link_speed_mask_2fw2x_ratemask(u32 speed)
        if (speed & AQ_NIC_RATE_5G)
                rate |= FW2X_RATE_5G;
 
-       if (speed & AQ_NIC_RATE_5GSR)
-               rate |= FW2X_RATE_5G;
-
        if (speed & AQ_NIC_RATE_2G5)
                rate |= FW2X_RATE_2G5;
 
index c98708bb044cad11597fba703882b761ec46f132..5dfc751572edc5d4817d3a0bab21ebddd376bded 100644 (file)
@@ -65,11 +65,25 @@ const struct aq_hw_caps_s hw_atl2_caps_aqc113 = {
                          AQ_NIC_RATE_5G  |
                          AQ_NIC_RATE_2G5 |
                          AQ_NIC_RATE_1G  |
-                         AQ_NIC_RATE_1G_HALF   |
                          AQ_NIC_RATE_100M      |
-                         AQ_NIC_RATE_100M_HALF |
-                         AQ_NIC_RATE_10M       |
-                         AQ_NIC_RATE_10M_HALF,
+                         AQ_NIC_RATE_10M,
+};
+
+const struct aq_hw_caps_s hw_atl2_caps_aqc115c = {
+       DEFAULT_BOARD_BASIC_CAPABILITIES,
+       .media_type = AQ_HW_MEDIA_TYPE_TP,
+       .link_speed_msk = AQ_NIC_RATE_2G5 |
+                         AQ_NIC_RATE_1G  |
+                         AQ_NIC_RATE_100M      |
+                         AQ_NIC_RATE_10M,
+};
+
+const struct aq_hw_caps_s hw_atl2_caps_aqc116c = {
+       DEFAULT_BOARD_BASIC_CAPABILITIES,
+       .media_type = AQ_HW_MEDIA_TYPE_TP,
+       .link_speed_msk = AQ_NIC_RATE_1G  |
+                         AQ_NIC_RATE_100M      |
+                         AQ_NIC_RATE_10M,
 };
 
 static u32 hw_atl2_sem_act_rslvr_get(struct aq_hw_s *self)
index de8723f1c28a13f0e98214f9011ac771028f2eff..346f0dc9912e500014d253135ab431f67be8eb3c 100644 (file)
@@ -9,6 +9,8 @@
 #include "aq_common.h"
 
 extern const struct aq_hw_caps_s hw_atl2_caps_aqc113;
+extern const struct aq_hw_caps_s hw_atl2_caps_aqc115c;
+extern const struct aq_hw_caps_s hw_atl2_caps_aqc116c;
 extern const struct aq_hw_ops hw_atl2_ops;
 
 #endif /* HW_ATL2_H */
index b66fa346581ce30b94627f810cf013d02f63323c..6bad64c77b87c94a258b4bee5391ffa66a52c031 100644 (file)
@@ -239,7 +239,8 @@ struct version_s {
                u8 minor;
                u16 build;
        } phy;
-       u32 rsvd;
+       u32 drv_iface_ver:4;
+       u32 rsvd:28;
 };
 
 struct link_status_s {
@@ -424,7 +425,7 @@ struct cable_diag_status_s {
        u16 rsvd2;
 };
 
-struct statistics_s {
+struct statistics_a0_s {
        struct {
                u32 link_up;
                u32 link_down;
@@ -457,6 +458,33 @@ struct statistics_s {
        u32 reserve_fw_gap;
 };
 
+struct __packed statistics_b0_s {
+       u64 rx_good_octets;
+       u64 rx_pause_frames;
+       u64 rx_good_frames;
+       u64 rx_errors;
+       u64 rx_unicast_frames;
+       u64 rx_multicast_frames;
+       u64 rx_broadcast_frames;
+
+       u64 tx_good_octets;
+       u64 tx_pause_frames;
+       u64 tx_good_frames;
+       u64 tx_errors;
+       u64 tx_unicast_frames;
+       u64 tx_multicast_frames;
+       u64 tx_broadcast_frames;
+
+       u32 main_loop_cycles;
+};
+
+struct __packed statistics_s {
+       union __packed {
+               struct statistics_a0_s a0;
+               struct statistics_b0_s b0;
+       };
+};
+
 struct filter_caps_s {
        u8 l2_filters_base_index:6;
        u8 flexible_filter_mask:2;
@@ -545,7 +573,7 @@ struct management_status_s {
        u32 rsvd5;
 };
 
-struct fw_interface_out {
+struct __packed fw_interface_out {
        struct transaction_counter_s transaction_id;
        struct version_s version;
        struct link_status_s link_status;
@@ -569,7 +597,6 @@ struct fw_interface_out {
        struct core_dump_s core_dump;
        u32 rsvd11;
        struct statistics_s stats;
-       u32 rsvd12;
        struct filter_caps_s filter_caps;
        struct device_caps_s device_caps;
        u32 rsvd13;
@@ -592,6 +619,9 @@ struct fw_interface_out {
 #define  AQ_HOST_MODE_LOW_POWER    3U
 #define  AQ_HOST_MODE_SHUTDOWN     4U
 
+#define  AQ_A2_FW_INTERFACE_A0     0
+#define  AQ_A2_FW_INTERFACE_B0     1
+
 int hw_atl2_utils_initfw(struct aq_hw_s *self, const struct aq_fw_ops **fw_ops);
 
 int hw_atl2_utils_soft_reset(struct aq_hw_s *self);
index dd259c8f2f4f397adbed0b5ea284bd8881a48735..58d426dda3edbf13b4e852b3fab14e720d55d3b2 100644 (file)
@@ -84,7 +84,7 @@ static int hw_atl2_shared_buffer_read_block(struct aq_hw_s *self,
                        if (cnt > AQ_A2_FW_READ_TRY_MAX)
                                return -ETIME;
                        if (tid1.transaction_cnt_a != tid1.transaction_cnt_b)
-                               udelay(1);
+                               mdelay(1);
                } while (tid1.transaction_cnt_a != tid1.transaction_cnt_b);
 
                hw_atl2_mif_shared_buf_read(self, offset, (u32 *)data, dwords);
@@ -154,7 +154,7 @@ static void a2_link_speed_mask2fw(u32 speed,
 {
        link_options->rate_10G = !!(speed & AQ_NIC_RATE_10G);
        link_options->rate_5G = !!(speed & AQ_NIC_RATE_5G);
-       link_options->rate_N5G = !!(speed & AQ_NIC_RATE_5GSR);
+       link_options->rate_N5G = link_options->rate_5G;
        link_options->rate_2P5G = !!(speed & AQ_NIC_RATE_2G5);
        link_options->rate_N2P5G = link_options->rate_2P5G;
        link_options->rate_1G = !!(speed & AQ_NIC_RATE_1G);
@@ -192,8 +192,6 @@ static u32 a2_fw_lkp_to_mask(struct lkp_link_caps_s *lkp_link_caps)
                rate |= AQ_NIC_RATE_10G;
        if (lkp_link_caps->rate_5G)
                rate |= AQ_NIC_RATE_5G;
-       if (lkp_link_caps->rate_N5G)
-               rate |= AQ_NIC_RATE_5GSR;
        if (lkp_link_caps->rate_2P5G)
                rate |= AQ_NIC_RATE_2G5;
        if (lkp_link_caps->rate_1G)
@@ -335,15 +333,22 @@ static int aq_a2_fw_get_mac_permanent(struct aq_hw_s *self, u8 *mac)
        return 0;
 }
 
-static int aq_a2_fw_update_stats(struct aq_hw_s *self)
+static void aq_a2_fill_a0_stats(struct aq_hw_s *self,
+                               struct statistics_s *stats)
 {
        struct hw_atl2_priv *priv = (struct hw_atl2_priv *)self->priv;
-       struct statistics_s stats;
-
-       hw_atl2_shared_buffer_read_safe(self, stats, &stats);
-
-#define AQ_SDELTA(_N_, _F_) (self->curr_stats._N_ += \
-                       stats.msm._F_ - priv->last_stats.msm._F_)
+       struct aq_stats_s *cs = &self->curr_stats;
+       struct aq_stats_s curr_stats = *cs;
+       bool corrupted_stats = false;
+
+#define AQ_SDELTA(_N, _F)  \
+do { \
+       if (!corrupted_stats && \
+           ((s64)(stats->a0.msm._F - priv->last_stats.a0.msm._F)) >= 0) \
+               curr_stats._N += stats->a0.msm._F - priv->last_stats.a0.msm._F;\
+       else \
+               corrupted_stats = true; \
+} while (0)
 
        if (self->aq_link_status.mbps) {
                AQ_SDELTA(uprc, rx_unicast_frames);
@@ -362,17 +367,76 @@ static int aq_a2_fw_update_stats(struct aq_hw_s *self)
                AQ_SDELTA(mbtc, tx_multicast_octets);
                AQ_SDELTA(bbrc, rx_broadcast_octets);
                AQ_SDELTA(bbtc, tx_broadcast_octets);
+
+               if (!corrupted_stats)
+                       *cs = curr_stats;
        }
 #undef AQ_SDELTA
-       self->curr_stats.dma_pkt_rc =
-               hw_atl_stats_rx_dma_good_pkt_counter_get(self);
-       self->curr_stats.dma_pkt_tc =
-               hw_atl_stats_tx_dma_good_pkt_counter_get(self);
-       self->curr_stats.dma_oct_rc =
-               hw_atl_stats_rx_dma_good_octet_counter_get(self);
-       self->curr_stats.dma_oct_tc =
-               hw_atl_stats_tx_dma_good_octet_counter_get(self);
-       self->curr_stats.dpc = hw_atl_rpb_rx_dma_drop_pkt_cnt_get(self);
+
+}
+
+static void aq_a2_fill_b0_stats(struct aq_hw_s *self,
+                               struct statistics_s *stats)
+{
+       struct hw_atl2_priv *priv = (struct hw_atl2_priv *)self->priv;
+       struct aq_stats_s *cs = &self->curr_stats;
+       struct aq_stats_s curr_stats = *cs;
+       bool corrupted_stats = false;
+
+#define AQ_SDELTA(_N, _F)  \
+do { \
+       if (!corrupted_stats && \
+           ((s64)(stats->b0._F - priv->last_stats.b0._F)) >= 0) \
+               curr_stats._N += stats->b0._F - priv->last_stats.b0._F; \
+       else \
+               corrupted_stats = true; \
+} while (0)
+
+       if (self->aq_link_status.mbps) {
+               AQ_SDELTA(uprc, rx_unicast_frames);
+               AQ_SDELTA(mprc, rx_multicast_frames);
+               AQ_SDELTA(bprc, rx_broadcast_frames);
+               AQ_SDELTA(erpr, rx_errors);
+               AQ_SDELTA(brc, rx_good_octets);
+
+               AQ_SDELTA(uptc, tx_unicast_frames);
+               AQ_SDELTA(mptc, tx_multicast_frames);
+               AQ_SDELTA(bptc, tx_broadcast_frames);
+               AQ_SDELTA(erpt, tx_errors);
+               AQ_SDELTA(btc, tx_good_octets);
+
+               if (!corrupted_stats)
+                       *cs = curr_stats;
+       }
+#undef AQ_SDELTA
+}
+
+static int aq_a2_fw_update_stats(struct aq_hw_s *self)
+{
+       struct hw_atl2_priv *priv = (struct hw_atl2_priv *)self->priv;
+       struct aq_stats_s *cs = &self->curr_stats;
+       struct statistics_s stats;
+       struct version_s version;
+       int err;
+
+       err = hw_atl2_shared_buffer_read_safe(self, version, &version);
+       if (err)
+               return err;
+
+       err = hw_atl2_shared_buffer_read_safe(self, stats, &stats);
+       if (err)
+               return err;
+
+       if (version.drv_iface_ver == AQ_A2_FW_INTERFACE_A0)
+               aq_a2_fill_a0_stats(self, &stats);
+       else
+               aq_a2_fill_b0_stats(self, &stats);
+
+       cs->dma_pkt_rc = hw_atl_stats_rx_dma_good_pkt_counter_get(self);
+       cs->dma_pkt_tc = hw_atl_stats_tx_dma_good_pkt_counter_get(self);
+       cs->dma_oct_rc = hw_atl_stats_rx_dma_good_octet_counter_get(self);
+       cs->dma_oct_tc = hw_atl_stats_tx_dma_good_octet_counter_get(self);
+       cs->dpc = hw_atl_rpb_rx_dma_drop_pkt_cnt_get(self);
 
        memcpy(&priv->last_stats, &stats, sizeof(stats));
 
@@ -499,9 +563,9 @@ u32 hw_atl2_utils_get_fw_version(struct aq_hw_s *self)
        hw_atl2_shared_buffer_read_safe(self, version, &version);
 
        /* A2 FW version is stored in reverse order */
-       return version.mac.major << 24 |
-              version.mac.minor << 16 |
-              version.mac.build;
+       return version.bundle.major << 24 |
+              version.bundle.minor << 16 |
+              version.bundle.build;
 }
 
 int hw_atl2_utils_get_action_resolve_table_caps(struct aq_hw_s *self,
index 80263c3cef7575f650187432c13f358e1cc6198c..4a83c991dcbedc3e7a65660b2e1e456a2f4c4519 100644 (file)
@@ -127,9 +127,9 @@ struct ax88796c_device {
                #define AX_PRIV_FLAGS_MASK      (AX_CAP_COMP)
 
        unsigned long           flags;
-               #define EVENT_INTR              BIT(0)
-               #define EVENT_TX                BIT(1)
-               #define EVENT_SET_MULTI         BIT(2)
+               #define EVENT_INTR              0
+               #define EVENT_TX                1
+               #define EVENT_SET_MULTI         2
 
 };
 
index 94df4f96d2be210407a9fa72081dd04360be7510..0710e716d6825fdc5b245f8dacda762e0848e826 100644 (file)
@@ -34,7 +34,7 @@ int axspi_read_status(struct axspi_data *ax_spi, struct spi_status *status)
 
        /* OP */
        ax_spi->cmd_buf[0] = AX_SPICMD_READ_STATUS;
-       ret = spi_write_then_read(ax_spi->spi, ax_spi->cmd_buf, 1, (u8 *)&status, 3);
+       ret = spi_write_then_read(ax_spi->spi, ax_spi->cmd_buf, 1, (u8 *)status, 3);
        if (ret)
                dev_err(&ax_spi->spi->dev, "%s() failed: ret = %d\n", __func__, ret);
        else
index 7cc5213c575a1d40cc012d16a0feaf44c611c3cf..b07cb9bc5f2d0bbd5a02364a6100f6e1be806295 100644 (file)
@@ -708,7 +708,9 @@ static int bcm4908_enet_probe(struct platform_device *pdev)
 
        enet->irq_tx = platform_get_irq_byname(pdev, "tx");
 
-       dma_set_coherent_mask(dev, DMA_BIT_MASK(32));
+       err = dma_set_coherent_mask(dev, DMA_BIT_MASK(32));
+       if (err)
+               return err;
 
        err = bcm4908_enet_dma_alloc(enet);
        if (err)
index 1835d2e451c0139e272e400774b0d6d19487df98..fc7fce642666ceab237461a33bf1ee051eef50b2 100644 (file)
@@ -635,11 +635,13 @@ static int bnx2x_ilt_client_mem_op(struct bnx2x *bp, int cli_num,
 {
        int i, rc;
        struct bnx2x_ilt *ilt = BP_ILT(bp);
-       struct ilt_client_info *ilt_cli = &ilt->clients[cli_num];
+       struct ilt_client_info *ilt_cli;
 
        if (!ilt || !ilt->lines)
                return -1;
 
+       ilt_cli = &ilt->clients[cli_num];
+
        if (ilt_cli->flags & (ILT_CLIENT_SKIP_INIT | ILT_CLIENT_SKIP_MEM))
                return 0;
 
index d0d5da9b78f8b2f8fa8d9d5bab274775ac33560f..4c9507d82fd0d29cbbdce912b15235512dcf0598 100644 (file)
@@ -2258,6 +2258,16 @@ static inline void bnxt_db_write(struct bnxt *bp, struct bnxt_db_info *db,
        }
 }
 
+/* Must hold rtnl_lock */
+static inline bool bnxt_sriov_cfg(struct bnxt *bp)
+{
+#if defined(CONFIG_BNXT_SRIOV)
+       return BNXT_PF(bp) && (bp->pf.active_vfs || bp->sriov_cfg);
+#else
+       return false;
+#endif
+}
+
 extern const u16 bnxt_lhint_arr[];
 
 int bnxt_alloc_rx_data(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
index 5c464ea73576f40bb0e8d418aa6e4a3be6140416..951c4c569a9b3ac846fc5c5d04df70bdb64c3673 100644 (file)
@@ -360,7 +360,7 @@ bnxt_dl_livepatch_report_err(struct bnxt *bp, struct netlink_ext_ack *extack,
                NL_SET_ERR_MSG_MOD(extack, "Live patch already applied");
                break;
        default:
-               netdev_err(bp->dev, "Unexpected live patch error: %hhd\n", err);
+               netdev_err(bp->dev, "Unexpected live patch error: %d\n", err);
                NL_SET_ERR_MSG_MOD(extack, "Failed to activate live patch");
                break;
        }
@@ -441,12 +441,13 @@ static int bnxt_dl_reload_down(struct devlink *dl, bool netns_change,
 
        switch (action) {
        case DEVLINK_RELOAD_ACTION_DRIVER_REINIT: {
-               if (BNXT_PF(bp) && bp->pf.active_vfs) {
+               rtnl_lock();
+               if (bnxt_sriov_cfg(bp)) {
                        NL_SET_ERR_MSG_MOD(extack,
-                                          "reload is unsupported when VFs are allocated");
+                                          "reload is unsupported while VFs are allocated or being configured");
+                       rtnl_unlock();
                        return -EOPNOTSUPP;
                }
-               rtnl_lock();
                if (bp->dev->reg_state == NETREG_UNREGISTERED) {
                        rtnl_unlock();
                        return -ENODEV;
index e6a4a768b10b29a791db83567e9b34385b53e7dc..1471b6130a2b9015ee787afc5f51dd1f968eef88 100644 (file)
@@ -1868,7 +1868,7 @@ static int bnxt_tc_setup_indr_block_cb(enum tc_setup_type type,
        struct flow_cls_offload *flower = type_data;
        struct bnxt *bp = priv->bp;
 
-       if (flower->common.chain_index)
+       if (!tc_cls_can_offload_and_chain0(bp->dev, type_data))
                return -EOPNOTSUPP;
 
        switch (type) {
index 64479c464b4ec5a1f6ab82f789b2834f031e8ee6..ae9cca768d748a7b005d641131c60a23ae297446 100644 (file)
@@ -3196,6 +3196,7 @@ static int cxgb4vf_pci_probe(struct pci_dev *pdev,
        }
        if (adapter->registered_device_map == 0) {
                dev_err(&pdev->dev, "could not register any net devices\n");
+               err = -EINVAL;
                goto err_disable_interrupts;
        }
 
index 13121c4dcfe6f4fc881498452a301c7ce45d0e3e..71730ef4cd57097c5c1ae432365f74391e5780df 100644 (file)
@@ -4709,6 +4709,10 @@ type3_infoblock(struct net_device *dev, u_char count, u_char *p)
         lp->ibn = 3;
         lp->active = *p++;
        if (MOTO_SROM_BUG) lp->active = 0;
+       /* if (MOTO_SROM_BUG) statement indicates lp->active could
+        * be 8 (i.e. the size of array lp->phy) */
+       if (WARN_ON(lp->active >= ARRAY_SIZE(lp->phy)))
+               return -EINVAL;
        lp->phy[lp->active].gep = (*p ? p : NULL); p += (2 * (*p) + 1);
        lp->phy[lp->active].rst = (*p ? p : NULL); p += (2 * (*p) + 1);
        lp->phy[lp->active].mc  = get_unaligned_le16(p); p += 2;
@@ -5000,19 +5004,23 @@ mii_get_phy(struct net_device *dev)
        }
        if ((j == limit) && (i < DE4X5_MAX_MII)) {
            for (k=0; k < DE4X5_MAX_PHY && lp->phy[k].id; k++);
-           lp->phy[k].addr = i;
-           lp->phy[k].id = id;
-           lp->phy[k].spd.reg = GENERIC_REG;      /* ANLPA register         */
-           lp->phy[k].spd.mask = GENERIC_MASK;    /* 100Mb/s technologies   */
-           lp->phy[k].spd.value = GENERIC_VALUE;  /* TX & T4, H/F Duplex    */
-           lp->mii_cnt++;
-           lp->active++;
-           printk("%s: Using generic MII device control. If the board doesn't operate,\nplease mail the following dump to the author:\n", dev->name);
-           j = de4x5_debug;
-           de4x5_debug |= DEBUG_MII;
-           de4x5_dbg_mii(dev, k);
-           de4x5_debug = j;
-           printk("\n");
+           if (k < DE4X5_MAX_PHY) {
+               lp->phy[k].addr = i;
+               lp->phy[k].id = id;
+               lp->phy[k].spd.reg = GENERIC_REG;      /* ANLPA register         */
+               lp->phy[k].spd.mask = GENERIC_MASK;    /* 100Mb/s technologies   */
+               lp->phy[k].spd.value = GENERIC_VALUE;  /* TX & T4, H/F Duplex    */
+               lp->mii_cnt++;
+               lp->active++;
+               printk("%s: Using generic MII device control. If the board doesn't operate,\nplease mail the following dump to the author:\n", dev->name);
+               j = de4x5_debug;
+               de4x5_debug |= DEBUG_MII;
+               de4x5_dbg_mii(dev, k);
+               de4x5_debug = j;
+               printk("\n");
+           } else {
+               goto purgatory;
+           }
        }
     }
   purgatory:
index 714e961e7a77a28ae861a80810cd8fc320cc9733..8e643567abce2a0673eb32eb3e55d3cc6c15b1bd 100644 (file)
@@ -4550,10 +4550,12 @@ static int dpaa2_eth_remove(struct fsl_mc_device *ls_dev)
 
        fsl_mc_portal_free(priv->mc_io);
 
-       free_netdev(net_dev);
+       destroy_workqueue(priv->dpaa2_ptp_wq);
 
        dev_dbg(net_dev->dev.parent, "Removed interface %s\n", net_dev->name);
 
+       free_netdev(net_dev);
+
        return 0;
 }
 
index 7b4961daa25402003df2d190ccbff610b78c50b6..ed7301b6916941268d1f6ad54adab7c1d3615ca5 100644 (file)
@@ -377,6 +377,9 @@ struct bufdesc_ex {
 #define FEC_ENET_WAKEUP        ((uint)0x00020000)      /* Wakeup request */
 #define FEC_ENET_TXF   (FEC_ENET_TXF_0 | FEC_ENET_TXF_1 | FEC_ENET_TXF_2)
 #define FEC_ENET_RXF   (FEC_ENET_RXF_0 | FEC_ENET_RXF_1 | FEC_ENET_RXF_2)
+#define FEC_ENET_RXF_GET(X)    (((X) == 0) ? FEC_ENET_RXF_0 :  \
+                               (((X) == 1) ? FEC_ENET_RXF_1 :  \
+                               FEC_ENET_RXF_2))
 #define FEC_ENET_TS_AVAIL       ((uint)0x00010000)
 #define FEC_ENET_TS_TIMER       ((uint)0x00008000)
 
index bc418b910999fc5821cf6917097435c9316745ed..1b1f7f2a61306e78d4dcfc07f1502dfda5b5de43 100644 (file)
@@ -1480,7 +1480,7 @@ fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id)
                        break;
                pkt_received++;
 
-               writel(FEC_ENET_RXF, fep->hwp + FEC_IEVENT);
+               writel(FEC_ENET_RXF_GET(queue_id), fep->hwp + FEC_IEVENT);
 
                /* Check for errors. */
                status ^= BD_ENET_RX_LAST;
index 88ca49cbc1e2987031ab54a96ab343786fca42ea..d57508bc4307fe3901146983710c6f53734eb4e5 100644 (file)
@@ -68,6 +68,9 @@ struct sk_buff *gve_rx_copy(struct net_device *dev, struct napi_struct *napi,
                set_protocol = ctx->curr_frag_cnt == ctx->expected_frag_cnt - 1;
        } else {
                skb = napi_alloc_skb(napi, len);
+
+               if (unlikely(!skb))
+                       return NULL;
                set_protocol = true;
        }
        __skb_put(skb, len);
index 23d9cbf262c3201916f84448a3a2c99ad30901be..740850b64aff50b1c2465178edf7b525af38ac96 100644 (file)
@@ -400,6 +400,10 @@ static void hns_dsaf_ge_srst_by_port(struct dsaf_device *dsaf_dev, u32 port,
                return;
 
        if (!HNS_DSAF_IS_DEBUG(dsaf_dev)) {
+               /* DSAF_MAX_PORT_NUM is 6, but DSAF_GE_NUM is 8.
+                  We need check to prevent array overflow */
+               if (port >= DSAF_MAX_PORT_NUM)
+                       return;
                reg_val_1  = 0x1 << port;
                port_rst_off = dsaf_dev->mac_cb[port]->port_rst_off;
                /* there is difference between V1 and V2 in register.*/
index 67364ab63a1ff98c6607bad28a6f5b19c4cee0a0..081295bff7654930d243c3eee10379cf65ae4b46 100644 (file)
@@ -1081,7 +1081,8 @@ static void hns3_dump_page_pool_info(struct hns3_enet_ring *ring,
        u32 j = 0;
 
        sprintf(result[j++], "%u", index);
-       sprintf(result[j++], "%u", ring->page_pool->pages_state_hold_cnt);
+       sprintf(result[j++], "%u",
+               READ_ONCE(ring->page_pool->pages_state_hold_cnt));
        sprintf(result[j++], "%u",
                atomic_read(&ring->page_pool->pages_state_release_cnt));
        sprintf(result[j++], "%u", ring->page_pool->p.pool_size);
@@ -1106,6 +1107,11 @@ hns3_dbg_page_pool_info(struct hnae3_handle *h, char *buf, int len)
                return -EFAULT;
        }
 
+       if (!priv->ring[h->kinfo.num_tqps].page_pool) {
+               dev_err(&h->pdev->dev, "page pool is not initialized\n");
+               return -EFAULT;
+       }
+
        for (i = 0; i < ARRAY_SIZE(page_pool_info_items); i++)
                result[i] = &data_str[i][0];
 
index c8442b86df9417cdcfad68512c882acc119b952d..c9b4568d7a8d775556335e641e48c3f5aa5c40e0 100644 (file)
@@ -987,6 +987,7 @@ static int hns3_set_reset(struct net_device *netdev, u32 *flags)
        struct hnae3_ae_dev *ae_dev = pci_get_drvdata(h->pdev);
        const struct hnae3_ae_ops *ops = h->ae_algo->ops;
        const struct hns3_reset_type_map *rst_type_map;
+       enum ethtool_reset_flags rst_flags;
        u32 i, size;
 
        if (ops->ae_dev_resetting && ops->ae_dev_resetting(h))
@@ -1006,6 +1007,7 @@ static int hns3_set_reset(struct net_device *netdev, u32 *flags)
        for (i = 0; i < size; i++) {
                if (rst_type_map[i].rst_flags == *flags) {
                        rst_type = rst_type_map[i].rst_type;
+                       rst_flags = rst_type_map[i].rst_flags;
                        break;
                }
        }
@@ -1021,6 +1023,8 @@ static int hns3_set_reset(struct net_device *netdev, u32 *flags)
 
        ops->reset_event(h->pdev, h);
 
+       *flags &= ~rst_flags;
+
        return 0;
 }
 
index 25c419d40066dc2cbeacd6f4caff5124aa96d40a..41afaeea881bc010270006985b7383209775ea56 100644 (file)
@@ -703,9 +703,9 @@ static int hclgevf_set_rss_tc_mode(struct hclgevf_dev *hdev,  u16 rss_size)
        roundup_size = ilog2(roundup_size);
 
        for (i = 0; i < HCLGEVF_MAX_TC_NUM; i++) {
-               tc_valid[i] = !!(hdev->hw_tc_map & BIT(i));
+               tc_valid[i] = 1;
                tc_size[i] = roundup_size;
-               tc_offset[i] = rss_size * i;
+               tc_offset[i] = (hdev->hw_tc_map & BIT(i)) ? rss_size * i : 0;
        }
 
        hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_RSS_TC_MODE, false);
index a78c398bf5b25688759be25f5fda981befbae336..01e7d3c0b68ed3020afeee4bb7df8da6794f8b0a 100644 (file)
@@ -8,6 +8,7 @@
 #include <linux/interrupt.h>
 #include <linux/etherdevice.h>
 #include <linux/netdevice.h>
+#include <linux/module.h>
 
 #include "hinic_hw_dev.h"
 #include "hinic_dev.h"
index 3cca51735421a7435f4c7f32fa3f5af9003f2d37..0bb3911dd014d08c3611ea209961fe4e6f7125d4 100644 (file)
@@ -628,17 +628,9 @@ static bool reuse_rx_pools(struct ibmvnic_adapter *adapter)
        old_buff_size = adapter->prev_rx_buf_sz;
        new_buff_size = adapter->cur_rx_buf_sz;
 
-       /* Require buff size to be exactly same for now */
-       if (old_buff_size != new_buff_size)
-               return false;
-
-       if (old_num_pools == new_num_pools && old_pool_size == new_pool_size)
-               return true;
-
-       if (old_num_pools < adapter->min_rx_queues ||
-           old_num_pools > adapter->max_rx_queues ||
-           old_pool_size < adapter->min_rx_add_entries_per_subcrq ||
-           old_pool_size > adapter->max_rx_add_entries_per_subcrq)
+       if (old_buff_size != new_buff_size ||
+           old_num_pools != new_num_pools ||
+           old_pool_size != new_pool_size)
                return false;
 
        return true;
@@ -874,17 +866,9 @@ static bool reuse_tx_pools(struct ibmvnic_adapter *adapter)
        old_mtu = adapter->prev_mtu;
        new_mtu = adapter->req_mtu;
 
-       /* Require MTU to be exactly same to reuse pools for now */
-       if (old_mtu != new_mtu)
-               return false;
-
-       if (old_num_pools == new_num_pools && old_pool_size == new_pool_size)
-               return true;
-
-       if (old_num_pools < adapter->min_tx_queues ||
-           old_num_pools > adapter->max_tx_queues ||
-           old_pool_size < adapter->min_tx_entries_per_subcrq ||
-           old_pool_size > adapter->max_tx_entries_per_subcrq)
+       if (old_mtu != new_mtu ||
+           old_num_pools != new_num_pools ||
+           old_pool_size != new_pool_size)
                return false;
 
        return true;
index 5039a25369517b03344e0f6add5af324759ea985..0bf3d47bb90dc9a8919d45c47cc498ba70d4826a 100644 (file)
@@ -3003,9 +3003,10 @@ static void __e100_shutdown(struct pci_dev *pdev, bool *enable_wake)
        struct net_device *netdev = pci_get_drvdata(pdev);
        struct nic *nic = netdev_priv(netdev);
 
+       netif_device_detach(netdev);
+
        if (netif_running(netdev))
                e100_down(nic);
-       netif_device_detach(netdev);
 
        if ((nic->flags & wol_magic) | e100_asf(nic)) {
                /* enable reverse auto-negotiation */
@@ -3022,7 +3023,7 @@ static void __e100_shutdown(struct pci_dev *pdev, bool *enable_wake)
                *enable_wake = false;
        }
 
-       pci_clear_master(pdev);
+       pci_disable_device(pdev);
 }
 
 static int __e100_power_off(struct pci_dev *pdev, bool wake)
@@ -3042,8 +3043,6 @@ static int __maybe_unused e100_suspend(struct device *dev_d)
 
        __e100_shutdown(to_pci_dev(dev_d), &wake);
 
-       device_wakeup_disable(dev_d);
-
        return 0;
 }
 
@@ -3051,6 +3050,14 @@ static int __maybe_unused e100_resume(struct device *dev_d)
 {
        struct net_device *netdev = dev_get_drvdata(dev_d);
        struct nic *nic = netdev_priv(netdev);
+       int err;
+
+       err = pci_enable_device(to_pci_dev(dev_d));
+       if (err) {
+               netdev_err(netdev, "Resume cannot enable PCI device, aborting\n");
+               return err;
+       }
+       pci_set_master(to_pci_dev(dev_d));
 
        /* disable reverse auto-negotiation */
        if (nic->phy == phy_82552_v) {
@@ -3062,10 +3069,11 @@ static int __maybe_unused e100_resume(struct device *dev_d)
                           smartspeed & ~(E100_82552_REV_ANEG));
        }
 
-       netif_device_attach(netdev);
        if (netif_running(netdev))
                e100_up(nic);
 
+       netif_device_attach(netdev);
+
        return 0;
 }
 
index 3d528fba754b4cce4553bcab13f08add22e42b5c..4d939af0a626c79705f74caca02f4df0f2c62cc2 100644 (file)
@@ -161,6 +161,7 @@ enum i40e_vsi_state_t {
        __I40E_VSI_OVERFLOW_PROMISC,
        __I40E_VSI_REINIT_REQUESTED,
        __I40E_VSI_DOWN_REQUESTED,
+       __I40E_VSI_RELEASING,
        /* This must be last as it determines the size of the BITMAP */
        __I40E_VSI_STATE_SIZE__,
 };
@@ -1247,6 +1248,7 @@ void i40e_ptp_restore_hw_time(struct i40e_pf *pf);
 void i40e_ptp_init(struct i40e_pf *pf);
 void i40e_ptp_stop(struct i40e_pf *pf);
 int i40e_ptp_alloc_pins(struct i40e_pf *pf);
+int i40e_update_adq_vsi_queues(struct i40e_vsi *vsi, int vsi_offset);
 int i40e_is_vsi_uplink_mode_veb(struct i40e_vsi *vsi);
 i40e_status i40e_get_partition_bw_setting(struct i40e_pf *pf);
 i40e_status i40e_set_partition_bw_setting(struct i40e_pf *pf);
index 291e61ac3e4489eb0fc1e2420723861c914ed18b..2c1b1da1220eca3bb0b5e8a8cf4fe54ce1a70ed2 100644 (file)
@@ -553,6 +553,14 @@ static void i40e_dbg_dump_desc(int cnt, int vsi_seid, int ring_id, int desc_n,
                dev_info(&pf->pdev->dev, "vsi %d not found\n", vsi_seid);
                return;
        }
+       if (vsi->type != I40E_VSI_MAIN &&
+           vsi->type != I40E_VSI_FDIR &&
+           vsi->type != I40E_VSI_VMDQ2) {
+               dev_info(&pf->pdev->dev,
+                        "vsi %d type %d descriptor rings not available\n",
+                        vsi_seid, vsi->type);
+               return;
+       }
        if (type == RING_TYPE_XDP && !i40e_enabled_xdp_vsi(vsi)) {
                dev_info(&pf->pdev->dev, "XDP not enabled on VSI %d\n", vsi_seid);
                return;
index ba862131b9bdf2c51bc664703177eda072dd0a47..e118cf9265c79e3a9ae343381f3ac1a07f3d7410 100644 (file)
@@ -1790,6 +1790,7 @@ static void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi,
                                     bool is_add)
 {
        struct i40e_pf *pf = vsi->back;
+       u16 num_tc_qps = 0;
        u16 sections = 0;
        u8 netdev_tc = 0;
        u16 numtc = 1;
@@ -1797,13 +1798,33 @@ static void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi,
        u8 offset;
        u16 qmap;
        int i;
-       u16 num_tc_qps = 0;
 
        sections = I40E_AQ_VSI_PROP_QUEUE_MAP_VALID;
        offset = 0;
+       /* zero out queue mapping, it will get updated on the end of the function */
+       memset(ctxt->info.queue_mapping, 0, sizeof(ctxt->info.queue_mapping));
+
+       if (vsi->type == I40E_VSI_MAIN) {
+               /* This code helps add more queue to the VSI if we have
+                * more cores than RSS can support, the higher cores will
+                * be served by ATR or other filters. Furthermore, the
+                * non-zero req_queue_pairs says that user requested a new
+                * queue count via ethtool's set_channels, so use this
+                * value for queues distribution across traffic classes
+                */
+               if (vsi->req_queue_pairs > 0)
+                       vsi->num_queue_pairs = vsi->req_queue_pairs;
+               else if (pf->flags & I40E_FLAG_MSIX_ENABLED)
+                       vsi->num_queue_pairs = pf->num_lan_msix;
+       }
 
        /* Number of queues per enabled TC */
-       num_tc_qps = vsi->alloc_queue_pairs;
+       if (vsi->type == I40E_VSI_MAIN ||
+           (vsi->type == I40E_VSI_SRIOV && vsi->num_queue_pairs != 0))
+               num_tc_qps = vsi->num_queue_pairs;
+       else
+               num_tc_qps = vsi->alloc_queue_pairs;
+
        if (enabled_tc && (vsi->back->flags & I40E_FLAG_DCB_ENABLED)) {
                /* Find numtc from enabled TC bitmap */
                for (i = 0, numtc = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
@@ -1881,15 +1902,11 @@ static void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi,
                }
                ctxt->info.tc_mapping[i] = cpu_to_le16(qmap);
        }
-
-       /* Set actual Tx/Rx queue pairs */
-       vsi->num_queue_pairs = offset;
-       if ((vsi->type == I40E_VSI_MAIN) && (numtc == 1)) {
-               if (vsi->req_queue_pairs > 0)
-                       vsi->num_queue_pairs = vsi->req_queue_pairs;
-               else if (pf->flags & I40E_FLAG_MSIX_ENABLED)
-                       vsi->num_queue_pairs = pf->num_lan_msix;
-       }
+       /* Do not change previously set num_queue_pairs for PFs and VFs*/
+       if ((vsi->type == I40E_VSI_MAIN && numtc != 1) ||
+           (vsi->type == I40E_VSI_SRIOV && vsi->num_queue_pairs == 0) ||
+           (vsi->type != I40E_VSI_MAIN && vsi->type != I40E_VSI_SRIOV))
+               vsi->num_queue_pairs = offset;
 
        /* Scheduler section valid can only be set for ADD VSI */
        if (is_add) {
@@ -2623,7 +2640,8 @@ static void i40e_sync_filters_subtask(struct i40e_pf *pf)
 
        for (v = 0; v < pf->num_alloc_vsi; v++) {
                if (pf->vsi[v] &&
-                   (pf->vsi[v]->flags & I40E_VSI_FLAG_FILTER_CHANGED)) {
+                   (pf->vsi[v]->flags & I40E_VSI_FLAG_FILTER_CHANGED) &&
+                   !test_bit(__I40E_VSI_RELEASING, pf->vsi[v]->state)) {
                        int ret = i40e_sync_vsi_filters(pf->vsi[v]);
 
                        if (ret) {
@@ -5426,6 +5444,58 @@ static void i40e_vsi_update_queue_map(struct i40e_vsi *vsi,
               sizeof(vsi->info.tc_mapping));
 }
 
+/**
+ * i40e_update_adq_vsi_queues - update queue mapping for ADq VSI
+ * @vsi: the VSI being reconfigured
+ * @vsi_offset: offset from main VF VSI
+ */
+int i40e_update_adq_vsi_queues(struct i40e_vsi *vsi, int vsi_offset)
+{
+       struct i40e_vsi_context ctxt = {};
+       struct i40e_pf *pf;
+       struct i40e_hw *hw;
+       int ret;
+
+       if (!vsi)
+               return I40E_ERR_PARAM;
+       pf = vsi->back;
+       hw = &pf->hw;
+
+       ctxt.seid = vsi->seid;
+       ctxt.pf_num = hw->pf_id;
+       ctxt.vf_num = vsi->vf_id + hw->func_caps.vf_base_id + vsi_offset;
+       ctxt.uplink_seid = vsi->uplink_seid;
+       ctxt.connection_type = I40E_AQ_VSI_CONN_TYPE_NORMAL;
+       ctxt.flags = I40E_AQ_VSI_TYPE_VF;
+       ctxt.info = vsi->info;
+
+       i40e_vsi_setup_queue_map(vsi, &ctxt, vsi->tc_config.enabled_tc,
+                                false);
+       if (vsi->reconfig_rss) {
+               vsi->rss_size = min_t(int, pf->alloc_rss_size,
+                                     vsi->num_queue_pairs);
+               ret = i40e_vsi_config_rss(vsi);
+               if (ret) {
+                       dev_info(&pf->pdev->dev, "Failed to reconfig rss for num_queues\n");
+                       return ret;
+               }
+               vsi->reconfig_rss = false;
+       }
+
+       ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
+       if (ret) {
+               dev_info(&pf->pdev->dev, "Update vsi config failed, err %s aq_err %s\n",
+                        i40e_stat_str(hw, ret),
+                        i40e_aq_str(hw, hw->aq.asq_last_status));
+               return ret;
+       }
+       /* update the local VSI info with updated queue map */
+       i40e_vsi_update_queue_map(vsi, &ctxt);
+       vsi->info.valid_sections = 0;
+
+       return ret;
+}
+
 /**
  * i40e_vsi_config_tc - Configure VSI Tx Scheduler for given TC map
  * @vsi: VSI to be configured
@@ -5716,24 +5786,6 @@ static void i40e_remove_queue_channels(struct i40e_vsi *vsi)
        INIT_LIST_HEAD(&vsi->ch_list);
 }
 
-/**
- * i40e_is_any_channel - channel exist or not
- * @vsi: ptr to VSI to which channels are associated with
- *
- * Returns true or false if channel(s) exist for associated VSI or not
- **/
-static bool i40e_is_any_channel(struct i40e_vsi *vsi)
-{
-       struct i40e_channel *ch, *ch_tmp;
-
-       list_for_each_entry_safe(ch, ch_tmp, &vsi->ch_list, list) {
-               if (ch->initialized)
-                       return true;
-       }
-
-       return false;
-}
-
 /**
  * i40e_get_max_queues_for_channel
  * @vsi: ptr to VSI to which channels are associated with
@@ -6240,26 +6292,15 @@ int i40e_create_queue_channel(struct i40e_vsi *vsi,
        /* By default we are in VEPA mode, if this is the first VF/VMDq
         * VSI to be added switch to VEB mode.
         */
-       if ((!(pf->flags & I40E_FLAG_VEB_MODE_ENABLED)) ||
-           (!i40e_is_any_channel(vsi))) {
-               if (!is_power_of_2(vsi->tc_config.tc_info[0].qcount)) {
-                       dev_dbg(&pf->pdev->dev,
-                               "Failed to create channel. Override queues (%u) not power of 2\n",
-                               vsi->tc_config.tc_info[0].qcount);
-                       return -EINVAL;
-               }
 
-               if (!(pf->flags & I40E_FLAG_VEB_MODE_ENABLED)) {
-                       pf->flags |= I40E_FLAG_VEB_MODE_ENABLED;
+       if (!(pf->flags & I40E_FLAG_VEB_MODE_ENABLED)) {
+               pf->flags |= I40E_FLAG_VEB_MODE_ENABLED;
 
-                       if (vsi->type == I40E_VSI_MAIN) {
-                               if (pf->flags & I40E_FLAG_TC_MQPRIO)
-                                       i40e_do_reset(pf, I40E_PF_RESET_FLAG,
-                                                     true);
-                               else
-                                       i40e_do_reset_safe(pf,
-                                                          I40E_PF_RESET_FLAG);
-                       }
+               if (vsi->type == I40E_VSI_MAIN) {
+                       if (pf->flags & I40E_FLAG_TC_MQPRIO)
+                               i40e_do_reset(pf, I40E_PF_RESET_FLAG, true);
+                       else
+                               i40e_do_reset_safe(pf, I40E_PF_RESET_FLAG);
                }
                /* now onwards for main VSI, number of queues will be value
                 * of TC0's queue count
@@ -7912,12 +7953,20 @@ config_tc:
                            vsi->seid);
                need_reset = true;
                goto exit;
-       } else {
-               dev_info(&vsi->back->pdev->dev,
-                        "Setup channel (id:%u) utilizing num_queues %d\n",
-                        vsi->seid, vsi->tc_config.tc_info[0].qcount);
+       } else if (enabled_tc &&
+                  (!is_power_of_2(vsi->tc_config.tc_info[0].qcount))) {
+               netdev_info(netdev,
+                           "Failed to create channel. Override queues (%u) not power of 2\n",
+                           vsi->tc_config.tc_info[0].qcount);
+               ret = -EINVAL;
+               need_reset = true;
+               goto exit;
        }
 
+       dev_info(&vsi->back->pdev->dev,
+                "Setup channel (id:%u) utilizing num_queues %d\n",
+                vsi->seid, vsi->tc_config.tc_info[0].qcount);
+
        if (pf->flags & I40E_FLAG_TC_MQPRIO) {
                if (vsi->mqprio_qopt.max_rate[0]) {
                        u64 max_tx_rate = vsi->mqprio_qopt.max_rate[0];
@@ -8482,9 +8531,8 @@ static int i40e_configure_clsflower(struct i40e_vsi *vsi,
                err = i40e_add_del_cloud_filter(vsi, filter, true);
 
        if (err) {
-               dev_err(&pf->pdev->dev,
-                       "Failed to add cloud filter, err %s\n",
-                       i40e_stat_str(&pf->hw, err));
+               dev_err(&pf->pdev->dev, "Failed to add cloud filter, err %d\n",
+                       err);
                goto err;
        }
 
@@ -13771,7 +13819,7 @@ int i40e_vsi_release(struct i40e_vsi *vsi)
                dev_info(&pf->pdev->dev, "Can't remove PF VSI\n");
                return -ENODEV;
        }
-
+       set_bit(__I40E_VSI_RELEASING, vsi->state);
        uplink_seid = vsi->uplink_seid;
        if (vsi->type != I40E_VSI_SRIOV) {
                if (vsi->netdev_registered) {
index 472f56b360b8c6f84a51adb68ee8e476386f13a5..2ea4deb8fc44c60c45f8b74eab9596e8edee9469 100644 (file)
@@ -183,17 +183,18 @@ void i40e_vc_notify_vf_reset(struct i40e_vf *vf)
 /***********************misc routines*****************************/
 
 /**
- * i40e_vc_disable_vf
+ * i40e_vc_reset_vf
  * @vf: pointer to the VF info
- *
- * Disable the VF through a SW reset.
+ * @notify_vf: notify vf about reset or not
+ * Reset VF handler.
  **/
-static inline void i40e_vc_disable_vf(struct i40e_vf *vf)
+static void i40e_vc_reset_vf(struct i40e_vf *vf, bool notify_vf)
 {
        struct i40e_pf *pf = vf->pf;
        int i;
 
-       i40e_vc_notify_vf_reset(vf);
+       if (notify_vf)
+               i40e_vc_notify_vf_reset(vf);
 
        /* We want to ensure that an actual reset occurs initiated after this
         * function was called. However, we do not want to wait forever, so
@@ -211,9 +212,14 @@ static inline void i40e_vc_disable_vf(struct i40e_vf *vf)
                usleep_range(10000, 20000);
        }
 
-       dev_warn(&vf->pf->pdev->dev,
-                "Failed to initiate reset for VF %d after 200 milliseconds\n",
-                vf->vf_id);
+       if (notify_vf)
+               dev_warn(&vf->pf->pdev->dev,
+                        "Failed to initiate reset for VF %d after 200 milliseconds\n",
+                        vf->vf_id);
+       else
+               dev_dbg(&vf->pf->pdev->dev,
+                       "Failed to initiate reset for VF %d after 200 milliseconds\n",
+                       vf->vf_id);
 }
 
 /**
@@ -674,14 +680,13 @@ static int i40e_config_vsi_rx_queue(struct i40e_vf *vf, u16 vsi_id,
                                    u16 vsi_queue_id,
                                    struct virtchnl_rxq_info *info)
 {
+       u16 pf_queue_id = i40e_vc_get_pf_queue_id(vf, vsi_id, vsi_queue_id);
        struct i40e_pf *pf = vf->pf;
+       struct i40e_vsi *vsi = pf->vsi[vf->lan_vsi_idx];
        struct i40e_hw *hw = &pf->hw;
        struct i40e_hmc_obj_rxq rx_ctx;
-       u16 pf_queue_id;
        int ret = 0;
 
-       pf_queue_id = i40e_vc_get_pf_queue_id(vf, vsi_id, vsi_queue_id);
-
        /* clear the context structure first */
        memset(&rx_ctx, 0, sizeof(struct i40e_hmc_obj_rxq));
 
@@ -719,6 +724,10 @@ static int i40e_config_vsi_rx_queue(struct i40e_vf *vf, u16 vsi_id,
        }
        rx_ctx.rxmax = info->max_pkt_size;
 
+       /* if port VLAN is configured increase the max packet size */
+       if (vsi->info.pvid)
+               rx_ctx.rxmax += VLAN_HLEN;
+
        /* enable 32bytes desc always */
        rx_ctx.dsize = 1;
 
@@ -1939,6 +1948,32 @@ static int i40e_vc_send_resp_to_vf(struct i40e_vf *vf,
        return i40e_vc_send_msg_to_vf(vf, opcode, retval, NULL, 0);
 }
 
+/**
+ * i40e_sync_vf_state
+ * @vf: pointer to the VF info
+ * @state: VF state
+ *
+ * Called from a VF message to synchronize the service with a potential
+ * VF reset state
+ **/
+static bool i40e_sync_vf_state(struct i40e_vf *vf, enum i40e_vf_states state)
+{
+       int i;
+
+       /* When handling some messages, it needs VF state to be set.
+        * It is possible that this flag is cleared during VF reset,
+        * so there is a need to wait until the end of the reset to
+        * handle the request message correctly.
+        */
+       for (i = 0; i < I40E_VF_STATE_WAIT_COUNT; i++) {
+               if (test_bit(state, &vf->vf_states))
+                       return true;
+               usleep_range(10000, 20000);
+       }
+
+       return test_bit(state, &vf->vf_states);
+}
+
 /**
  * i40e_vc_get_version_msg
  * @vf: pointer to the VF info
@@ -1999,7 +2034,7 @@ static int i40e_vc_get_vf_resources_msg(struct i40e_vf *vf, u8 *msg)
        size_t len = 0;
        int ret;
 
-       if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) {
+       if (!i40e_sync_vf_state(vf, I40E_VF_STATE_INIT)) {
                aq_ret = I40E_ERR_PARAM;
                goto err;
        }
@@ -2105,20 +2140,6 @@ err:
        return ret;
 }
 
-/**
- * i40e_vc_reset_vf_msg
- * @vf: pointer to the VF info
- *
- * called from the VF to reset itself,
- * unlike other virtchnl messages, PF driver
- * doesn't send the response back to the VF
- **/
-static void i40e_vc_reset_vf_msg(struct i40e_vf *vf)
-{
-       if (test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states))
-               i40e_reset_vf(vf, false);
-}
-
 /**
  * i40e_vc_config_promiscuous_mode_msg
  * @vf: pointer to the VF info
@@ -2136,7 +2157,7 @@ static int i40e_vc_config_promiscuous_mode_msg(struct i40e_vf *vf, u8 *msg)
        bool allmulti = false;
        bool alluni = false;
 
-       if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
+       if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) {
                aq_ret = I40E_ERR_PARAM;
                goto err_out;
        }
@@ -2217,13 +2238,14 @@ static int i40e_vc_config_queues_msg(struct i40e_vf *vf, u8 *msg)
        struct virtchnl_vsi_queue_config_info *qci =
            (struct virtchnl_vsi_queue_config_info *)msg;
        struct virtchnl_queue_pair_info *qpi;
-       struct i40e_pf *pf = vf->pf;
        u16 vsi_id, vsi_queue_id = 0;
-       u16 num_qps_all = 0;
+       struct i40e_pf *pf = vf->pf;
        i40e_status aq_ret = 0;
        int i, j = 0, idx = 0;
+       struct i40e_vsi *vsi;
+       u16 num_qps_all = 0;
 
-       if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
+       if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) {
                aq_ret = I40E_ERR_PARAM;
                goto error_param;
        }
@@ -2310,9 +2332,15 @@ static int i40e_vc_config_queues_msg(struct i40e_vf *vf, u8 *msg)
                pf->vsi[vf->lan_vsi_idx]->num_queue_pairs =
                        qci->num_queue_pairs;
        } else {
-               for (i = 0; i < vf->num_tc; i++)
-                       pf->vsi[vf->ch[i].vsi_idx]->num_queue_pairs =
-                              vf->ch[i].num_qps;
+               for (i = 0; i < vf->num_tc; i++) {
+                       vsi = pf->vsi[vf->ch[i].vsi_idx];
+                       vsi->num_queue_pairs = vf->ch[i].num_qps;
+
+                       if (i40e_update_adq_vsi_queues(vsi, i)) {
+                               aq_ret = I40E_ERR_CONFIG;
+                               goto error_param;
+                       }
+               }
        }
 
 error_param:
@@ -2366,7 +2394,7 @@ static int i40e_vc_config_irq_map_msg(struct i40e_vf *vf, u8 *msg)
        i40e_status aq_ret = 0;
        int i;
 
-       if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
+       if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) {
                aq_ret = I40E_ERR_PARAM;
                goto error_param;
        }
@@ -2538,7 +2566,7 @@ static int i40e_vc_disable_queues_msg(struct i40e_vf *vf, u8 *msg)
        struct i40e_pf *pf = vf->pf;
        i40e_status aq_ret = 0;
 
-       if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
+       if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) {
                aq_ret = I40E_ERR_PARAM;
                goto error_param;
        }
@@ -2588,7 +2616,7 @@ static int i40e_vc_request_queues_msg(struct i40e_vf *vf, u8 *msg)
        u8 cur_pairs = vf->num_queue_pairs;
        struct i40e_pf *pf = vf->pf;
 
-       if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states))
+       if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE))
                return -EINVAL;
 
        if (req_pairs > I40E_MAX_VF_QUEUES) {
@@ -2607,8 +2635,7 @@ static int i40e_vc_request_queues_msg(struct i40e_vf *vf, u8 *msg)
        } else {
                /* successful request */
                vf->num_req_queues = req_pairs;
-               i40e_vc_notify_vf_reset(vf);
-               i40e_reset_vf(vf, false);
+               i40e_vc_reset_vf(vf, true);
                return 0;
        }
 
@@ -2634,7 +2661,7 @@ static int i40e_vc_get_stats_msg(struct i40e_vf *vf, u8 *msg)
 
        memset(&stats, 0, sizeof(struct i40e_eth_stats));
 
-       if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
+       if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) {
                aq_ret = I40E_ERR_PARAM;
                goto error_param;
        }
@@ -2751,7 +2778,7 @@ static int i40e_vc_add_mac_addr_msg(struct i40e_vf *vf, u8 *msg)
        i40e_status ret = 0;
        int i;
 
-       if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) ||
+       if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE) ||
            !i40e_vc_isvalid_vsi_id(vf, al->vsi_id)) {
                ret = I40E_ERR_PARAM;
                goto error_param;
@@ -2823,7 +2850,7 @@ static int i40e_vc_del_mac_addr_msg(struct i40e_vf *vf, u8 *msg)
        i40e_status ret = 0;
        int i;
 
-       if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) ||
+       if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE) ||
            !i40e_vc_isvalid_vsi_id(vf, al->vsi_id)) {
                ret = I40E_ERR_PARAM;
                goto error_param;
@@ -2967,7 +2994,7 @@ static int i40e_vc_remove_vlan_msg(struct i40e_vf *vf, u8 *msg)
        i40e_status aq_ret = 0;
        int i;
 
-       if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) ||
+       if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE) ||
            !i40e_vc_isvalid_vsi_id(vf, vfl->vsi_id)) {
                aq_ret = I40E_ERR_PARAM;
                goto error_param;
@@ -3087,9 +3114,9 @@ static int i40e_vc_config_rss_key(struct i40e_vf *vf, u8 *msg)
        struct i40e_vsi *vsi = NULL;
        i40e_status aq_ret = 0;
 
-       if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) ||
+       if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE) ||
            !i40e_vc_isvalid_vsi_id(vf, vrk->vsi_id) ||
-           (vrk->key_len != I40E_HKEY_ARRAY_SIZE)) {
+           vrk->key_len != I40E_HKEY_ARRAY_SIZE) {
                aq_ret = I40E_ERR_PARAM;
                goto err;
        }
@@ -3118,9 +3145,9 @@ static int i40e_vc_config_rss_lut(struct i40e_vf *vf, u8 *msg)
        i40e_status aq_ret = 0;
        u16 i;
 
-       if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) ||
+       if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE) ||
            !i40e_vc_isvalid_vsi_id(vf, vrl->vsi_id) ||
-           (vrl->lut_entries != I40E_VF_HLUT_ARRAY_SIZE)) {
+           vrl->lut_entries != I40E_VF_HLUT_ARRAY_SIZE) {
                aq_ret = I40E_ERR_PARAM;
                goto err;
        }
@@ -3153,7 +3180,7 @@ static int i40e_vc_get_rss_hena(struct i40e_vf *vf, u8 *msg)
        i40e_status aq_ret = 0;
        int len = 0;
 
-       if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
+       if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) {
                aq_ret = I40E_ERR_PARAM;
                goto err;
        }
@@ -3189,7 +3216,7 @@ static int i40e_vc_set_rss_hena(struct i40e_vf *vf, u8 *msg)
        struct i40e_hw *hw = &pf->hw;
        i40e_status aq_ret = 0;
 
-       if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
+       if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) {
                aq_ret = I40E_ERR_PARAM;
                goto err;
        }
@@ -3214,7 +3241,7 @@ static int i40e_vc_enable_vlan_stripping(struct i40e_vf *vf, u8 *msg)
        i40e_status aq_ret = 0;
        struct i40e_vsi *vsi;
 
-       if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
+       if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) {
                aq_ret = I40E_ERR_PARAM;
                goto err;
        }
@@ -3240,7 +3267,7 @@ static int i40e_vc_disable_vlan_stripping(struct i40e_vf *vf, u8 *msg)
        i40e_status aq_ret = 0;
        struct i40e_vsi *vsi;
 
-       if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
+       if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) {
                aq_ret = I40E_ERR_PARAM;
                goto err;
        }
@@ -3467,7 +3494,7 @@ static int i40e_vc_del_cloud_filter(struct i40e_vf *vf, u8 *msg)
        i40e_status aq_ret = 0;
        int i, ret;
 
-       if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
+       if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) {
                aq_ret = I40E_ERR_PARAM;
                goto err;
        }
@@ -3598,7 +3625,7 @@ static int i40e_vc_add_cloud_filter(struct i40e_vf *vf, u8 *msg)
        i40e_status aq_ret = 0;
        int i, ret;
 
-       if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
+       if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) {
                aq_ret = I40E_ERR_PARAM;
                goto err_out;
        }
@@ -3707,7 +3734,7 @@ static int i40e_vc_add_qch_msg(struct i40e_vf *vf, u8 *msg)
        i40e_status aq_ret = 0;
        u64 speed = 0;
 
-       if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
+       if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) {
                aq_ret = I40E_ERR_PARAM;
                goto err;
        }
@@ -3796,15 +3823,9 @@ static int i40e_vc_add_qch_msg(struct i40e_vf *vf, u8 *msg)
 
        /* set this flag only after making sure all inputs are sane */
        vf->adq_enabled = true;
-       /* num_req_queues is set when user changes number of queues via ethtool
-        * and this causes issue for default VSI(which depends on this variable)
-        * when ADq is enabled, hence reset it.
-        */
-       vf->num_req_queues = 0;
 
        /* reset the VF in order to allocate resources */
-       i40e_vc_notify_vf_reset(vf);
-       i40e_reset_vf(vf, false);
+       i40e_vc_reset_vf(vf, true);
 
        return I40E_SUCCESS;
 
@@ -3824,7 +3845,7 @@ static int i40e_vc_del_qch_msg(struct i40e_vf *vf, u8 *msg)
        struct i40e_pf *pf = vf->pf;
        i40e_status aq_ret = 0;
 
-       if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
+       if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) {
                aq_ret = I40E_ERR_PARAM;
                goto err;
        }
@@ -3844,8 +3865,7 @@ static int i40e_vc_del_qch_msg(struct i40e_vf *vf, u8 *msg)
        }
 
        /* reset the VF in order to allocate resources */
-       i40e_vc_notify_vf_reset(vf);
-       i40e_reset_vf(vf, false);
+       i40e_vc_reset_vf(vf, true);
 
        return I40E_SUCCESS;
 
@@ -3907,7 +3927,7 @@ int i40e_vc_process_vf_msg(struct i40e_pf *pf, s16 vf_id, u32 v_opcode,
                i40e_vc_notify_vf_link_state(vf);
                break;
        case VIRTCHNL_OP_RESET_VF:
-               i40e_vc_reset_vf_msg(vf);
+               i40e_vc_reset_vf(vf, false);
                ret = 0;
                break;
        case VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE:
@@ -4161,7 +4181,7 @@ int i40e_ndo_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac)
        /* Force the VF interface down so it has to bring up with new MAC
         * address
         */
-       i40e_vc_disable_vf(vf);
+       i40e_vc_reset_vf(vf, true);
        dev_info(&pf->pdev->dev, "Bring down and up the VF interface to make this change effective.\n");
 
 error_param:
@@ -4169,34 +4189,6 @@ error_param:
        return ret;
 }
 
-/**
- * i40e_vsi_has_vlans - True if VSI has configured VLANs
- * @vsi: pointer to the vsi
- *
- * Check if a VSI has configured any VLANs. False if we have a port VLAN or if
- * we have no configured VLANs. Do not call while holding the
- * mac_filter_hash_lock.
- */
-static bool i40e_vsi_has_vlans(struct i40e_vsi *vsi)
-{
-       bool have_vlans;
-
-       /* If we have a port VLAN, then the VSI cannot have any VLANs
-        * configured, as all MAC/VLAN filters will be assigned to the PVID.
-        */
-       if (vsi->info.pvid)
-               return false;
-
-       /* Since we don't have a PVID, we know that if the device is in VLAN
-        * mode it must be because of a VLAN filter configured on this VSI.
-        */
-       spin_lock_bh(&vsi->mac_filter_hash_lock);
-       have_vlans = i40e_is_vsi_in_vlan(vsi);
-       spin_unlock_bh(&vsi->mac_filter_hash_lock);
-
-       return have_vlans;
-}
-
 /**
  * i40e_ndo_set_vf_port_vlan
  * @netdev: network interface device structure
@@ -4253,19 +4245,9 @@ int i40e_ndo_set_vf_port_vlan(struct net_device *netdev, int vf_id,
                /* duplicate request, so just return success */
                goto error_pvid;
 
-       if (i40e_vsi_has_vlans(vsi)) {
-               dev_err(&pf->pdev->dev,
-                       "VF %d has already configured VLAN filters and the administrator is requesting a port VLAN override.\nPlease unload and reload the VF driver for this change to take effect.\n",
-                       vf_id);
-               /* Administrator Error - knock the VF offline until he does
-                * the right thing by reconfiguring his network correctly
-                * and then reloading the VF driver.
-                */
-               i40e_vc_disable_vf(vf);
-               /* During reset the VF got a new VSI, so refresh the pointer. */
-               vsi = pf->vsi[vf->lan_vsi_idx];
-       }
-
+       i40e_vc_reset_vf(vf, true);
+       /* During reset the VF got a new VSI, so refresh a pointer. */
+       vsi = pf->vsi[vf->lan_vsi_idx];
        /* Locked once because multiple functions below iterate list */
        spin_lock_bh(&vsi->mac_filter_hash_lock);
 
@@ -4641,7 +4623,7 @@ int i40e_ndo_set_vf_trust(struct net_device *netdev, int vf_id, bool setting)
                goto out;
 
        vf->trusted = setting;
-       i40e_vc_disable_vf(vf);
+       i40e_vc_reset_vf(vf, true);
        dev_info(&pf->pdev->dev, "VF %u is now %strusted\n",
                 vf_id, setting ? "" : "un");
 
index 091e32c1bb46fa12dc4a91afa10293ccac535cfe..49575a640a84c5e6c3abeaaae05427818b06fd87 100644 (file)
@@ -18,6 +18,8 @@
 
 #define I40E_MAX_VF_PROMISC_FLAGS      3
 
+#define I40E_VF_STATE_WAIT_COUNT       20
+
 /* Various queue ctrls */
 enum i40e_queue_ctrl {
        I40E_QUEUE_CTRL_UNKNOWN = 0,
index e6e7c1da47fbe5e9746afe0253810e82f2ae1439..3789269ce741d40219dbf5202e5eab2dfd0793f5 100644 (file)
@@ -39,6 +39,7 @@
 #include "iavf_txrx.h"
 #include "iavf_fdir.h"
 #include "iavf_adv_rss.h"
+#include <linux/bitmap.h>
 
 #define DEFAULT_DEBUG_LEVEL_SHIFT 3
 #define PFX "iavf: "
@@ -304,6 +305,7 @@ struct iavf_adapter {
 #define IAVF_FLAG_AQ_DEL_FDIR_FILTER           BIT(26)
 #define IAVF_FLAG_AQ_ADD_ADV_RSS_CFG           BIT(27)
 #define IAVF_FLAG_AQ_DEL_ADV_RSS_CFG           BIT(28)
+#define IAVF_FLAG_AQ_REQUEST_STATS             BIT(29)
 
        /* OS defined structs */
        struct net_device *netdev;
@@ -443,6 +445,7 @@ int iavf_up(struct iavf_adapter *adapter);
 void iavf_down(struct iavf_adapter *adapter);
 int iavf_process_config(struct iavf_adapter *adapter);
 void iavf_schedule_reset(struct iavf_adapter *adapter);
+void iavf_schedule_request_stats(struct iavf_adapter *adapter);
 void iavf_reset(struct iavf_adapter *adapter);
 void iavf_set_ethtool_ops(struct net_device *netdev);
 void iavf_update_stats(struct iavf_adapter *adapter);
@@ -500,4 +503,5 @@ void iavf_add_adv_rss_cfg(struct iavf_adapter *adapter);
 void iavf_del_adv_rss_cfg(struct iavf_adapter *adapter);
 struct iavf_mac_filter *iavf_add_filter(struct iavf_adapter *adapter,
                                        const u8 *macaddr);
+int iavf_lock_timeout(struct mutex *lock, unsigned int msecs);
 #endif /* _IAVF_H_ */
index 5a359a0a20ecc2f4a0f6a75b07e45b19c48bfadf..461f5237a2f889f203be69379caf43b541036f56 100644 (file)
@@ -354,6 +354,9 @@ static void iavf_get_ethtool_stats(struct net_device *netdev,
        struct iavf_adapter *adapter = netdev_priv(netdev);
        unsigned int i;
 
+       /* Explicitly request stats refresh */
+       iavf_schedule_request_stats(adapter);
+
        iavf_add_ethtool_stats(&data, adapter, iavf_gstrings_stats);
 
        rcu_read_lock();
@@ -612,23 +615,44 @@ static int iavf_set_ringparam(struct net_device *netdev,
        if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
                return -EINVAL;
 
-       new_tx_count = clamp_t(u32, ring->tx_pending,
-                              IAVF_MIN_TXD,
-                              IAVF_MAX_TXD);
-       new_tx_count = ALIGN(new_tx_count, IAVF_REQ_DESCRIPTOR_MULTIPLE);
+       if (ring->tx_pending > IAVF_MAX_TXD ||
+           ring->tx_pending < IAVF_MIN_TXD ||
+           ring->rx_pending > IAVF_MAX_RXD ||
+           ring->rx_pending < IAVF_MIN_RXD) {
+               netdev_err(netdev, "Descriptors requested (Tx: %d / Rx: %d) out of range [%d-%d] (increment %d)\n",
+                          ring->tx_pending, ring->rx_pending, IAVF_MIN_TXD,
+                          IAVF_MAX_RXD, IAVF_REQ_DESCRIPTOR_MULTIPLE);
+               return -EINVAL;
+       }
+
+       new_tx_count = ALIGN(ring->tx_pending, IAVF_REQ_DESCRIPTOR_MULTIPLE);
+       if (new_tx_count != ring->tx_pending)
+               netdev_info(netdev, "Requested Tx descriptor count rounded up to %d\n",
+                           new_tx_count);
 
-       new_rx_count = clamp_t(u32, ring->rx_pending,
-                              IAVF_MIN_RXD,
-                              IAVF_MAX_RXD);
-       new_rx_count = ALIGN(new_rx_count, IAVF_REQ_DESCRIPTOR_MULTIPLE);
+       new_rx_count = ALIGN(ring->rx_pending, IAVF_REQ_DESCRIPTOR_MULTIPLE);
+       if (new_rx_count != ring->rx_pending)
+               netdev_info(netdev, "Requested Rx descriptor count rounded up to %d\n",
+                           new_rx_count);
 
        /* if nothing to do return success */
        if ((new_tx_count == adapter->tx_desc_count) &&
-           (new_rx_count == adapter->rx_desc_count))
+           (new_rx_count == adapter->rx_desc_count)) {
+               netdev_dbg(netdev, "Nothing to change, descriptor count is same as requested\n");
                return 0;
+       }
+
+       if (new_tx_count != adapter->tx_desc_count) {
+               netdev_dbg(netdev, "Changing Tx descriptor count from %d to %d\n",
+                          adapter->tx_desc_count, new_tx_count);
+               adapter->tx_desc_count = new_tx_count;
+       }
 
-       adapter->tx_desc_count = new_tx_count;
-       adapter->rx_desc_count = new_rx_count;
+       if (new_rx_count != adapter->rx_desc_count) {
+               netdev_dbg(netdev, "Changing Rx descriptor count from %d to %d\n",
+                          adapter->rx_desc_count, new_rx_count);
+               adapter->rx_desc_count = new_rx_count;
+       }
 
        if (netif_running(netdev)) {
                adapter->flags |= IAVF_FLAG_RESET_NEEDED;
@@ -723,12 +747,31 @@ static int iavf_get_per_queue_coalesce(struct net_device *netdev, u32 queue,
  *
  * Change the ITR settings for a specific queue.
  **/
-static void iavf_set_itr_per_queue(struct iavf_adapter *adapter,
-                                  struct ethtool_coalesce *ec, int queue)
+static int iavf_set_itr_per_queue(struct iavf_adapter *adapter,
+                                 struct ethtool_coalesce *ec, int queue)
 {
        struct iavf_ring *rx_ring = &adapter->rx_rings[queue];
        struct iavf_ring *tx_ring = &adapter->tx_rings[queue];
        struct iavf_q_vector *q_vector;
+       u16 itr_setting;
+
+       itr_setting = rx_ring->itr_setting & ~IAVF_ITR_DYNAMIC;
+
+       if (ec->rx_coalesce_usecs != itr_setting &&
+           ec->use_adaptive_rx_coalesce) {
+               netif_info(adapter, drv, adapter->netdev,
+                          "Rx interrupt throttling cannot be changed if adaptive-rx is enabled\n");
+               return -EINVAL;
+       }
+
+       itr_setting = tx_ring->itr_setting & ~IAVF_ITR_DYNAMIC;
+
+       if (ec->tx_coalesce_usecs != itr_setting &&
+           ec->use_adaptive_tx_coalesce) {
+               netif_info(adapter, drv, adapter->netdev,
+                          "Tx interrupt throttling cannot be changed if adaptive-tx is enabled\n");
+               return -EINVAL;
+       }
 
        rx_ring->itr_setting = ITR_REG_ALIGN(ec->rx_coalesce_usecs);
        tx_ring->itr_setting = ITR_REG_ALIGN(ec->tx_coalesce_usecs);
@@ -751,6 +794,7 @@ static void iavf_set_itr_per_queue(struct iavf_adapter *adapter,
         * the Tx and Rx ITR values based on the values we have entered
         * into the q_vector, no need to write the values now.
         */
+       return 0;
 }
 
 /**
@@ -792,9 +836,11 @@ static int __iavf_set_coalesce(struct net_device *netdev,
         */
        if (queue < 0) {
                for (i = 0; i < adapter->num_active_queues; i++)
-                       iavf_set_itr_per_queue(adapter, ec, i);
+                       if (iavf_set_itr_per_queue(adapter, ec, i))
+                               return -EINVAL;
        } else if (queue < adapter->num_active_queues) {
-               iavf_set_itr_per_queue(adapter, ec, queue);
+               if (iavf_set_itr_per_queue(adapter, ec, queue))
+                       return -EINVAL;
        } else {
                netif_info(adapter, drv, netdev, "Invalid queue value, queue range is 0 - %d\n",
                           adapter->num_active_queues - 1);
@@ -1776,6 +1822,7 @@ static int iavf_set_channels(struct net_device *netdev,
 {
        struct iavf_adapter *adapter = netdev_priv(netdev);
        u32 num_req = ch->combined_count;
+       int i;
 
        if ((adapter->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ADQ) &&
            adapter->num_tc) {
@@ -1786,7 +1833,7 @@ static int iavf_set_channels(struct net_device *netdev,
        /* All of these should have already been checked by ethtool before this
         * even gets to us, but just to be sure.
         */
-       if (num_req > adapter->vsi_res->num_queue_pairs)
+       if (num_req == 0 || num_req > adapter->vsi_res->num_queue_pairs)
                return -EINVAL;
 
        if (num_req == adapter->num_active_queues)
@@ -1798,6 +1845,20 @@ static int iavf_set_channels(struct net_device *netdev,
        adapter->num_req_queues = num_req;
        adapter->flags |= IAVF_FLAG_REINIT_ITR_NEEDED;
        iavf_schedule_reset(adapter);
+
+       /* wait for the reset is done */
+       for (i = 0; i < IAVF_RESET_WAIT_COMPLETE_COUNT; i++) {
+               msleep(IAVF_RESET_WAIT_MS);
+               if (adapter->flags & IAVF_FLAG_RESET_PENDING)
+                       continue;
+               break;
+       }
+       if (i == IAVF_RESET_WAIT_COMPLETE_COUNT) {
+               adapter->flags &= ~IAVF_FLAG_REINIT_ITR_NEEDED;
+               adapter->num_active_queues = num_req;
+               return -EOPNOTSUPP;
+       }
+
        return 0;
 }
 
@@ -1844,14 +1905,13 @@ static int iavf_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key,
 
        if (hfunc)
                *hfunc = ETH_RSS_HASH_TOP;
-       if (!indir)
-               return 0;
-
-       memcpy(key, adapter->rss_key, adapter->rss_key_size);
+       if (key)
+               memcpy(key, adapter->rss_key, adapter->rss_key_size);
 
-       /* Each 32 bits pointed by 'indir' is stored with a lut entry */
-       for (i = 0; i < adapter->rss_lut_size; i++)
-               indir[i] = (u32)adapter->rss_lut[i];
+       if (indir)
+               /* Each 32 bits pointed by 'indir' is stored with a lut entry */
+               for (i = 0; i < adapter->rss_lut_size; i++)
+                       indir[i] = (u32)adapter->rss_lut[i];
 
        return 0;
 }
index 847d67e32a5401669b88346e29a038100414c730..cfdbf8c08d18bcd82050dc089705850c640481e4 100644 (file)
@@ -147,7 +147,7 @@ enum iavf_status iavf_free_virt_mem_d(struct iavf_hw *hw,
  *
  * Returns 0 on success, negative on failure
  **/
-static int iavf_lock_timeout(struct mutex *lock, unsigned int msecs)
+int iavf_lock_timeout(struct mutex *lock, unsigned int msecs)
 {
        unsigned int wait, delay = 10;
 
@@ -174,6 +174,19 @@ void iavf_schedule_reset(struct iavf_adapter *adapter)
        }
 }
 
+/**
+ * iavf_schedule_request_stats - Set the flags and schedule statistics request
+ * @adapter: board private structure
+ *
+ * Sets IAVF_FLAG_AQ_REQUEST_STATS flag so iavf_watchdog_task() will explicitly
+ * request and refresh ethtool stats
+ **/
+void iavf_schedule_request_stats(struct iavf_adapter *adapter)
+{
+       adapter->aq_required |= IAVF_FLAG_AQ_REQUEST_STATS;
+       mod_delayed_work(iavf_wq, &adapter->watchdog_task, 0);
+}
+
 /**
  * iavf_tx_timeout - Respond to a Tx Hang
  * @netdev: network interface device structure
@@ -696,6 +709,21 @@ static void iavf_del_vlan(struct iavf_adapter *adapter, u16 vlan)
        spin_unlock_bh(&adapter->mac_vlan_list_lock);
 }
 
+/**
+ * iavf_restore_filters
+ * @adapter: board private structure
+ *
+ * Restore existing non MAC filters when VF netdev comes back up
+ **/
+static void iavf_restore_filters(struct iavf_adapter *adapter)
+{
+       u16 vid;
+
+       /* re-add all VLAN filters */
+       for_each_set_bit(vid, adapter->vsi.active_vlans, VLAN_N_VID)
+               iavf_add_vlan(adapter, vid);
+}
+
 /**
  * iavf_vlan_rx_add_vid - Add a VLAN filter to a device
  * @netdev: network device struct
@@ -709,8 +737,11 @@ static int iavf_vlan_rx_add_vid(struct net_device *netdev,
 
        if (!VLAN_ALLOWED(adapter))
                return -EIO;
+
        if (iavf_add_vlan(adapter, vid) == NULL)
                return -ENOMEM;
+
+       set_bit(vid, adapter->vsi.active_vlans);
        return 0;
 }
 
@@ -725,11 +756,10 @@ static int iavf_vlan_rx_kill_vid(struct net_device *netdev,
 {
        struct iavf_adapter *adapter = netdev_priv(netdev);
 
-       if (VLAN_ALLOWED(adapter)) {
-               iavf_del_vlan(adapter, vid);
-               return 0;
-       }
-       return -EIO;
+       iavf_del_vlan(adapter, vid);
+       clear_bit(vid, adapter->vsi.active_vlans);
+
+       return 0;
 }
 
 /**
@@ -1639,8 +1669,7 @@ static int iavf_process_aq_command(struct iavf_adapter *adapter)
                iavf_set_promiscuous(adapter, FLAG_VF_MULTICAST_PROMISC);
                return 0;
        }
-
-       if ((adapter->aq_required & IAVF_FLAG_AQ_RELEASE_PROMISC) &&
+       if ((adapter->aq_required & IAVF_FLAG_AQ_RELEASE_PROMISC) ||
            (adapter->aq_required & IAVF_FLAG_AQ_RELEASE_ALLMULTI)) {
                iavf_set_promiscuous(adapter, 0);
                return 0;
@@ -1688,6 +1717,11 @@ static int iavf_process_aq_command(struct iavf_adapter *adapter)
                iavf_del_adv_rss_cfg(adapter);
                return 0;
        }
+       if (adapter->aq_required & IAVF_FLAG_AQ_REQUEST_STATS) {
+               iavf_request_stats(adapter);
+               return 0;
+       }
+
        return -EAGAIN;
 }
 
@@ -2123,8 +2157,8 @@ static void iavf_disable_vf(struct iavf_adapter *adapter)
 
        iavf_free_misc_irq(adapter);
        iavf_reset_interrupt_capability(adapter);
-       iavf_free_queues(adapter);
        iavf_free_q_vectors(adapter);
+       iavf_free_queues(adapter);
        memset(adapter->vf_res, 0, IAVF_VIRTCHNL_VF_RESOURCE_SIZE);
        iavf_shutdown_adminq(&adapter->hw);
        adapter->netdev->flags &= ~IFF_UP;
@@ -2152,7 +2186,6 @@ static void iavf_reset_task(struct work_struct *work)
        struct net_device *netdev = adapter->netdev;
        struct iavf_hw *hw = &adapter->hw;
        struct iavf_mac_filter *f, *ftmp;
-       struct iavf_vlan_filter *vlf;
        struct iavf_cloud_filter *cf;
        u32 reg_val;
        int i = 0, err;
@@ -2215,6 +2248,7 @@ static void iavf_reset_task(struct work_struct *work)
        }
 
        pci_set_master(adapter->pdev);
+       pci_restore_msi_state(adapter->pdev);
 
        if (i == IAVF_RESET_WAIT_COMPLETE_COUNT) {
                dev_err(&adapter->pdev->dev, "Reset never finished (%x)\n",
@@ -2233,6 +2267,7 @@ continue_reset:
                   (adapter->state == __IAVF_RESETTING));
 
        if (running) {
+               netdev->flags &= ~IFF_UP;
                netif_carrier_off(netdev);
                netif_tx_stop_all_queues(netdev);
                adapter->link_up = false;
@@ -2292,11 +2327,6 @@ continue_reset:
        list_for_each_entry(f, &adapter->mac_filter_list, list) {
                f->add = true;
        }
-       /* re-add all VLAN filters */
-       list_for_each_entry(vlf, &adapter->vlan_filter_list, list) {
-               vlf->add = true;
-       }
-
        spin_unlock_bh(&adapter->mac_vlan_list_lock);
 
        /* check if TCs are running and re-add all cloud filters */
@@ -2310,7 +2340,6 @@ continue_reset:
        spin_unlock_bh(&adapter->cloud_filter_list_lock);
 
        adapter->aq_required |= IAVF_FLAG_AQ_ADD_MAC_FILTER;
-       adapter->aq_required |= IAVF_FLAG_AQ_ADD_VLAN_FILTER;
        adapter->aq_required |= IAVF_FLAG_AQ_ADD_CLOUD_FILTER;
        iavf_misc_irq_enable(adapter);
 
@@ -2344,7 +2373,7 @@ continue_reset:
                 * to __IAVF_RUNNING
                 */
                iavf_up_complete(adapter);
-
+               netdev->flags |= IFF_UP;
                iavf_irq_enable(adapter, true);
        } else {
                iavf_change_state(adapter, __IAVF_DOWN);
@@ -2357,8 +2386,10 @@ continue_reset:
 reset_err:
        mutex_unlock(&adapter->client_lock);
        mutex_unlock(&adapter->crit_lock);
-       if (running)
+       if (running) {
                iavf_change_state(adapter, __IAVF_RUNNING);
+               netdev->flags |= IFF_UP;
+       }
        dev_err(&adapter->pdev->dev, "failed to allocate resources during reinit\n");
        iavf_close(netdev);
 }
@@ -2410,7 +2441,7 @@ static void iavf_adminq_task(struct work_struct *work)
 
        /* check for error indications */
        val = rd32(hw, hw->aq.arq.len);
-       if (val == 0xdeadbeef) /* indicates device in reset */
+       if (val == 0xdeadbeef || val == 0xffffffff) /* device in reset */
                goto freedom;
        oldval = val;
        if (val & IAVF_VF_ARQLEN1_ARQVFE_MASK) {
@@ -3095,8 +3126,10 @@ static int iavf_configure_clsflower(struct iavf_adapter *adapter,
                return -ENOMEM;
 
        while (!mutex_trylock(&adapter->crit_lock)) {
-               if (--count == 0)
-                       goto err;
+               if (--count == 0) {
+                       kfree(filter);
+                       return err;
+               }
                udelay(1);
        }
 
@@ -3107,11 +3140,11 @@ static int iavf_configure_clsflower(struct iavf_adapter *adapter,
        /* start out with flow type and eth type IPv4 to begin with */
        filter->f.flow_type = VIRTCHNL_TCP_V4_FLOW;
        err = iavf_parse_cls_flower(adapter, cls_flower, filter);
-       if (err < 0)
+       if (err)
                goto err;
 
        err = iavf_handle_tclass(adapter, tc, filter);
-       if (err < 0)
+       if (err)
                goto err;
 
        /* add filter to the list */
@@ -3308,6 +3341,9 @@ static int iavf_open(struct net_device *netdev)
 
        spin_unlock_bh(&adapter->mac_vlan_list_lock);
 
+       /* Restore VLAN filters that were removed with IFF_DOWN */
+       iavf_restore_filters(adapter);
+
        iavf_configure(adapter);
 
        iavf_up_complete(adapter);
@@ -3415,11 +3451,16 @@ static int iavf_set_features(struct net_device *netdev,
 {
        struct iavf_adapter *adapter = netdev_priv(netdev);
 
-       /* Don't allow changing VLAN_RX flag when adapter is not capable
-        * of VLAN offload
+       /* Don't allow enabling VLAN features when adapter is not capable
+        * of VLAN offload/filtering
         */
        if (!VLAN_ALLOWED(adapter)) {
-               if ((netdev->features ^ features) & NETIF_F_HW_VLAN_CTAG_RX)
+               netdev->hw_features &= ~(NETIF_F_HW_VLAN_CTAG_RX |
+                                        NETIF_F_HW_VLAN_CTAG_TX |
+                                        NETIF_F_HW_VLAN_CTAG_FILTER);
+               if (features & (NETIF_F_HW_VLAN_CTAG_RX |
+                               NETIF_F_HW_VLAN_CTAG_TX |
+                               NETIF_F_HW_VLAN_CTAG_FILTER))
                        return -EINVAL;
        } else if ((netdev->features ^ features) & NETIF_F_HW_VLAN_CTAG_RX) {
                if (features & NETIF_F_HW_VLAN_CTAG_RX)
@@ -3503,7 +3544,8 @@ static netdev_features_t iavf_fix_features(struct net_device *netdev,
 {
        struct iavf_adapter *adapter = netdev_priv(netdev);
 
-       if (!(adapter->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN))
+       if (adapter->vf_res &&
+           !(adapter->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN))
                features &= ~(NETIF_F_HW_VLAN_CTAG_TX |
                              NETIF_F_HW_VLAN_CTAG_RX |
                              NETIF_F_HW_VLAN_CTAG_FILTER);
index 8c3f0f77cb5742679ba2205c5ae68c8942dd525e..d60bf7c2120063eb31093fd7403a242ecf68eb12 100644 (file)
@@ -607,7 +607,7 @@ void iavf_add_vlans(struct iavf_adapter *adapter)
                if (f->add)
                        count++;
        }
-       if (!count) {
+       if (!count || !VLAN_ALLOWED(adapter)) {
                adapter->aq_required &= ~IAVF_FLAG_AQ_ADD_VLAN_FILTER;
                spin_unlock_bh(&adapter->mac_vlan_list_lock);
                return;
@@ -673,9 +673,19 @@ void iavf_del_vlans(struct iavf_adapter *adapter)
 
        spin_lock_bh(&adapter->mac_vlan_list_lock);
 
-       list_for_each_entry(f, &adapter->vlan_filter_list, list) {
-               if (f->remove)
+       list_for_each_entry_safe(f, ftmp, &adapter->vlan_filter_list, list) {
+               /* since VLAN capabilities are not allowed, we dont want to send
+                * a VLAN delete request because it will most likely fail and
+                * create unnecessary errors/noise, so just free the VLAN
+                * filters marked for removal to enable bailing out before
+                * sending a virtchnl message
+                */
+               if (f->remove && !VLAN_ALLOWED(adapter)) {
+                       list_del(&f->list);
+                       kfree(f);
+               } else if (f->remove) {
                        count++;
+               }
        }
        if (!count) {
                adapter->aq_required &= ~IAVF_FLAG_AQ_DEL_VLAN_FILTER;
@@ -784,6 +794,8 @@ void iavf_request_stats(struct iavf_adapter *adapter)
                /* no error message, this isn't crucial */
                return;
        }
+
+       adapter->aq_required &= ~IAVF_FLAG_AQ_REQUEST_STATS;
        adapter->current_op = VIRTCHNL_OP_GET_STATS;
        vqs.vsi_id = adapter->vsi_res->vsi_id;
        /* queue maps are ignored for this message - only the vsi is used */
@@ -1722,8 +1734,37 @@ void iavf_virtchnl_completion(struct iavf_adapter *adapter,
                }
                spin_lock_bh(&adapter->mac_vlan_list_lock);
                iavf_add_filter(adapter, adapter->hw.mac.addr);
+
+               if (VLAN_ALLOWED(adapter)) {
+                       if (!list_empty(&adapter->vlan_filter_list)) {
+                               struct iavf_vlan_filter *vlf;
+
+                               /* re-add all VLAN filters over virtchnl */
+                               list_for_each_entry(vlf,
+                                                   &adapter->vlan_filter_list,
+                                                   list)
+                                       vlf->add = true;
+
+                               adapter->aq_required |=
+                                       IAVF_FLAG_AQ_ADD_VLAN_FILTER;
+                       }
+               }
+
                spin_unlock_bh(&adapter->mac_vlan_list_lock);
                iavf_process_config(adapter);
+
+               /* unlock crit_lock before acquiring rtnl_lock as other
+                * processes holding rtnl_lock could be waiting for the same
+                * crit_lock
+                */
+               mutex_unlock(&adapter->crit_lock);
+               rtnl_lock();
+               netdev_update_features(adapter->netdev);
+               rtnl_unlock();
+               if (iavf_lock_timeout(&adapter->crit_lock, 10000))
+                       dev_warn(&adapter->pdev->dev, "failed to acquire crit_lock in %s\n",
+                                __FUNCTION__);
+
                }
                break;
        case VIRTCHNL_OP_ENABLE_QUEUES:
index 7fdeb411b6df44063b9bf74db5840d9d0a4c51ed..3eb01731e496b34d2ac3d681b6a5c25abfbe6a54 100644 (file)
@@ -97,6 +97,9 @@ static int ice_dcbnl_setets(struct net_device *netdev, struct ieee_ets *ets)
 
        new_cfg->etscfg.maxtcs = pf->hw.func_caps.common_cap.maxtc;
 
+       if (!bwcfg)
+               new_cfg->etscfg.tcbwtable[0] = 100;
+
        if (!bwrec)
                new_cfg->etsrec.tcbwtable[0] = 100;
 
@@ -167,15 +170,18 @@ static u8 ice_dcbnl_setdcbx(struct net_device *netdev, u8 mode)
        if (mode == pf->dcbx_cap)
                return ICE_DCB_NO_HW_CHG;
 
-       pf->dcbx_cap = mode;
        qos_cfg = &pf->hw.port_info->qos_cfg;
-       if (mode & DCB_CAP_DCBX_VER_CEE) {
-               if (qos_cfg->local_dcbx_cfg.pfc_mode == ICE_QOS_MODE_DSCP)
-                       return ICE_DCB_NO_HW_CHG;
+
+       /* DSCP configuration is not DCBx negotiated */
+       if (qos_cfg->local_dcbx_cfg.pfc_mode == ICE_QOS_MODE_DSCP)
+               return ICE_DCB_NO_HW_CHG;
+
+       pf->dcbx_cap = mode;
+
+       if (mode & DCB_CAP_DCBX_VER_CEE)
                qos_cfg->local_dcbx_cfg.dcbx_mode = ICE_DCBX_MODE_CEE;
-       } else {
+       else
                qos_cfg->local_dcbx_cfg.dcbx_mode = ICE_DCBX_MODE_IEEE;
-       }
 
        dev_info(ice_pf_to_dev(pf), "DCBx mode = 0x%x\n", mode);
        return ICE_DCB_HW_CHG_RST;
index 38960bcc384c03f94439977feefb27cdb96aff7a..b6e7f47c8c78fd68ddf7c4f863a8383d1e41e6ca 100644 (file)
@@ -1268,7 +1268,7 @@ ice_fdir_write_all_fltr(struct ice_pf *pf, struct ice_fdir_fltr *input,
                bool is_tun = tun == ICE_FD_HW_SEG_TUN;
                int err;
 
-               if (is_tun && !ice_get_open_tunnel_port(&pf->hw, &port_num))
+               if (is_tun && !ice_get_open_tunnel_port(&pf->hw, &port_num, TNL_ALL))
                        continue;
                err = ice_fdir_write_fltr(pf, input, add, is_tun);
                if (err)
@@ -1652,7 +1652,7 @@ int ice_add_fdir_ethtool(struct ice_vsi *vsi, struct ethtool_rxnfc *cmd)
        }
 
        /* return error if not an update and no available filters */
-       fltrs_needed = ice_get_open_tunnel_port(hw, &tunnel_port) ? 2 : 1;
+       fltrs_needed = ice_get_open_tunnel_port(hw, &tunnel_port, TNL_ALL) ? 2 : 1;
        if (!ice_fdir_find_fltr_by_idx(hw, fsp->location) &&
            ice_fdir_num_avail_fltr(hw, pf->vsi[vsi->idx]) < fltrs_needed) {
                dev_err(dev, "Failed to add filter.  The maximum number of flow director filters has been reached.\n");
index cbd8424631e32a0a854bd468da9a70b41fb88ea6..4dca009bdd50f2fb7771ab5ac25d0b7863119d01 100644 (file)
@@ -924,7 +924,7 @@ ice_fdir_get_gen_prgm_pkt(struct ice_hw *hw, struct ice_fdir_fltr *input,
                memcpy(pkt, ice_fdir_pkt[idx].pkt, ice_fdir_pkt[idx].pkt_len);
                loc = pkt;
        } else {
-               if (!ice_get_open_tunnel_port(hw, &tnl_port))
+               if (!ice_get_open_tunnel_port(hw, &tnl_port, TNL_ALL))
                        return ICE_ERR_DOES_NOT_EXIST;
                if (!ice_fdir_pkt[idx].tun_pkt)
                        return ICE_ERR_PARAM;
index 23cfcceb1536dcac3cebf75cad36e58dd622c60f..6ad1c255972439a0dc60e256428f062e5138bf13 100644 (file)
@@ -1899,9 +1899,11 @@ static struct ice_buf *ice_pkg_buf(struct ice_buf_build *bld)
  * ice_get_open_tunnel_port - retrieve an open tunnel port
  * @hw: pointer to the HW structure
  * @port: returns open port
+ * @type: type of tunnel, can be TNL_LAST if it doesn't matter
  */
 bool
-ice_get_open_tunnel_port(struct ice_hw *hw, u16 *port)
+ice_get_open_tunnel_port(struct ice_hw *hw, u16 *port,
+                        enum ice_tunnel_type type)
 {
        bool res = false;
        u16 i;
@@ -1909,7 +1911,8 @@ ice_get_open_tunnel_port(struct ice_hw *hw, u16 *port)
        mutex_lock(&hw->tnl_lock);
 
        for (i = 0; i < hw->tnl.count && i < ICE_TUNNEL_MAX_ENTRIES; i++)
-               if (hw->tnl.tbl[i].valid && hw->tnl.tbl[i].port) {
+               if (hw->tnl.tbl[i].valid && hw->tnl.tbl[i].port &&
+                   (type == TNL_LAST || type == hw->tnl.tbl[i].type)) {
                        *port = hw->tnl.tbl[i].port;
                        res = true;
                        break;
index 344c2637facda3c821f1fed4f81ae13214334c92..a2863f38fd1fd5f6ef32d8375a5acbb31e4c2be9 100644 (file)
@@ -33,7 +33,8 @@ enum ice_status
 ice_get_sw_fv_list(struct ice_hw *hw, u8 *prot_ids, u16 ids_cnt,
                   unsigned long *bm, struct list_head *fv_list);
 bool
-ice_get_open_tunnel_port(struct ice_hw *hw, u16 *port);
+ice_get_open_tunnel_port(struct ice_hw *hw, u16 *port,
+                        enum ice_tunnel_type type);
 int ice_udp_tunnel_set_port(struct net_device *netdev, unsigned int table,
                            unsigned int idx, struct udp_tunnel_info *ti);
 int ice_udp_tunnel_unset_port(struct net_device *netdev, unsigned int table,
index 40562600a8cf2b38c1f43590297cddb2407fd4bf..09a3297cd63cd384a1ce01e2cf72c62744ee499b 100644 (file)
@@ -89,8 +89,13 @@ static int ice_vsi_alloc_arrays(struct ice_vsi *vsi)
        if (!vsi->rx_rings)
                goto err_rings;
 
-       /* XDP will have vsi->alloc_txq Tx queues as well, so double the size */
-       vsi->txq_map = devm_kcalloc(dev, (2 * vsi->alloc_txq),
+       /* txq_map needs to have enough space to track both Tx (stack) rings
+        * and XDP rings; at this point vsi->num_xdp_txq might not be set,
+        * so use num_possible_cpus() as we want to always provide XDP ring
+        * per CPU, regardless of queue count settings from user that might
+        * have come from ethtool's set_channels() callback;
+        */
+       vsi->txq_map = devm_kcalloc(dev, (vsi->alloc_txq + num_possible_cpus()),
                                    sizeof(*vsi->txq_map), GFP_KERNEL);
 
        if (!vsi->txq_map)
index f099797f35e375a254d564b41f3df915df1f404d..73c61cdb036f9a86302aaaf76b9f367fe1099129 100644 (file)
@@ -2609,7 +2609,18 @@ int ice_prepare_xdp_rings(struct ice_vsi *vsi, struct bpf_prog *prog)
                        ice_stat_str(status));
                goto clear_xdp_rings;
        }
-       ice_vsi_assign_bpf_prog(vsi, prog);
+
+       /* assign the prog only when it's not already present on VSI;
+        * this flow is a subject of both ethtool -L and ndo_bpf flows;
+        * VSI rebuild that happens under ethtool -L can expose us to
+        * the bpf_prog refcount issues as we would be swapping same
+        * bpf_prog pointers from vsi->xdp_prog and calling bpf_prog_put
+        * on it as it would be treated as an 'old_prog'; for ndo_bpf
+        * this is not harmful as dev_xdp_install bumps the refcount
+        * before calling the op exposed by the driver;
+        */
+       if (!ice_is_xdp_ena_vsi(vsi))
+               ice_vsi_assign_bpf_prog(vsi, prog);
 
        return 0;
 clear_xdp_rings:
@@ -2785,6 +2796,11 @@ ice_xdp_setup_prog(struct ice_vsi *vsi, struct bpf_prog *prog,
                if (xdp_ring_err)
                        NL_SET_ERR_MSG_MOD(extack, "Freeing XDP Tx resources failed");
        } else {
+               /* safe to call even when prog == vsi->xdp_prog as
+                * dev_xdp_install in net/core/dev.c incremented prog's
+                * refcount so corresponding bpf_prog_put won't cause
+                * underflow
+                */
                ice_vsi_assign_bpf_prog(vsi, prog);
        }
 
@@ -5865,6 +5881,9 @@ static int ice_up_complete(struct ice_vsi *vsi)
                netif_carrier_on(vsi->netdev);
        }
 
+       /* clear this now, and the first stats read will be used as baseline */
+       vsi->stat_offsets_loaded = false;
+
        ice_service_task_schedule(pf);
 
        return 0;
@@ -5911,14 +5930,15 @@ ice_fetch_u64_stats_per_ring(struct u64_stats_sync *syncp, struct ice_q_stats st
 /**
  * ice_update_vsi_tx_ring_stats - Update VSI Tx ring stats counters
  * @vsi: the VSI to be updated
+ * @vsi_stats: the stats struct to be updated
  * @rings: rings to work on
  * @count: number of rings
  */
 static void
-ice_update_vsi_tx_ring_stats(struct ice_vsi *vsi, struct ice_tx_ring **rings,
-                            u16 count)
+ice_update_vsi_tx_ring_stats(struct ice_vsi *vsi,
+                            struct rtnl_link_stats64 *vsi_stats,
+                            struct ice_tx_ring **rings, u16 count)
 {
-       struct rtnl_link_stats64 *vsi_stats = &vsi->net_stats;
        u16 i;
 
        for (i = 0; i < count; i++) {
@@ -5942,15 +5962,13 @@ ice_update_vsi_tx_ring_stats(struct ice_vsi *vsi, struct ice_tx_ring **rings,
  */
 static void ice_update_vsi_ring_stats(struct ice_vsi *vsi)
 {
-       struct rtnl_link_stats64 *vsi_stats = &vsi->net_stats;
+       struct rtnl_link_stats64 *vsi_stats;
        u64 pkts, bytes;
        int i;
 
-       /* reset netdev stats */
-       vsi_stats->tx_packets = 0;
-       vsi_stats->tx_bytes = 0;
-       vsi_stats->rx_packets = 0;
-       vsi_stats->rx_bytes = 0;
+       vsi_stats = kzalloc(sizeof(*vsi_stats), GFP_ATOMIC);
+       if (!vsi_stats)
+               return;
 
        /* reset non-netdev (extended) stats */
        vsi->tx_restart = 0;
@@ -5962,7 +5980,8 @@ static void ice_update_vsi_ring_stats(struct ice_vsi *vsi)
        rcu_read_lock();
 
        /* update Tx rings counters */
-       ice_update_vsi_tx_ring_stats(vsi, vsi->tx_rings, vsi->num_txq);
+       ice_update_vsi_tx_ring_stats(vsi, vsi_stats, vsi->tx_rings,
+                                    vsi->num_txq);
 
        /* update Rx rings counters */
        ice_for_each_rxq(vsi, i) {
@@ -5977,10 +5996,17 @@ static void ice_update_vsi_ring_stats(struct ice_vsi *vsi)
 
        /* update XDP Tx rings counters */
        if (ice_is_xdp_ena_vsi(vsi))
-               ice_update_vsi_tx_ring_stats(vsi, vsi->xdp_rings,
+               ice_update_vsi_tx_ring_stats(vsi, vsi_stats, vsi->xdp_rings,
                                             vsi->num_xdp_txq);
 
        rcu_read_unlock();
+
+       vsi->net_stats.tx_packets = vsi_stats->tx_packets;
+       vsi->net_stats.tx_bytes = vsi_stats->tx_bytes;
+       vsi->net_stats.rx_packets = vsi_stats->rx_packets;
+       vsi->net_stats.rx_bytes = vsi_stats->rx_bytes;
+
+       kfree(vsi_stats);
 }
 
 /**
index 793f4a9fc2cdb03fc233c4800eb52f546bdb89ac..183d9303389064dcd819eb0ed1f9173d333e6794 100644 (file)
@@ -3796,10 +3796,13 @@ static struct ice_protocol_entry ice_prot_id_tbl[ICE_PROTOCOL_LAST] = {
  * ice_find_recp - find a recipe
  * @hw: pointer to the hardware structure
  * @lkup_exts: extension sequence to match
+ * @tun_type: type of recipe tunnel
  *
  * Returns index of matching recipe, or ICE_MAX_NUM_RECIPES if not found.
  */
-static u16 ice_find_recp(struct ice_hw *hw, struct ice_prot_lkup_ext *lkup_exts)
+static u16
+ice_find_recp(struct ice_hw *hw, struct ice_prot_lkup_ext *lkup_exts,
+             enum ice_sw_tunnel_type tun_type)
 {
        bool refresh_required = true;
        struct ice_sw_recipe *recp;
@@ -3860,8 +3863,9 @@ static u16 ice_find_recp(struct ice_hw *hw, struct ice_prot_lkup_ext *lkup_exts)
                        }
                        /* If for "i"th recipe the found was never set to false
                         * then it means we found our match
+                        * Also tun type of recipe needs to be checked
                         */
-                       if (found)
+                       if (found && recp[i].tun_type == tun_type)
                                return i; /* Return the recipe ID */
                }
        }
@@ -4651,11 +4655,12 @@ ice_add_adv_recipe(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
        }
 
        /* Look for a recipe which matches our requested fv / mask list */
-       *rid = ice_find_recp(hw, lkup_exts);
+       *rid = ice_find_recp(hw, lkup_exts, rinfo->tun_type);
        if (*rid < ICE_MAX_NUM_RECIPES)
                /* Success if found a recipe that match the existing criteria */
                goto err_unroll;
 
+       rm->tun_type = rinfo->tun_type;
        /* Recipe we need does not exist, add a recipe */
        status = ice_add_sw_recipe(hw, rm, profiles);
        if (status)
@@ -4958,11 +4963,13 @@ ice_fill_adv_packet_tun(struct ice_hw *hw, enum ice_sw_tunnel_type tun_type,
 
        switch (tun_type) {
        case ICE_SW_TUN_VXLAN:
+               if (!ice_get_open_tunnel_port(hw, &open_port, TNL_VXLAN))
+                       return ICE_ERR_CFG;
+               break;
        case ICE_SW_TUN_GENEVE:
-               if (!ice_get_open_tunnel_port(hw, &open_port))
+               if (!ice_get_open_tunnel_port(hw, &open_port, TNL_GENEVE))
                        return ICE_ERR_CFG;
                break;
-
        default:
                /* Nothing needs to be done for this tunnel type */
                return 0;
@@ -5555,7 +5562,7 @@ ice_rem_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
        if (status)
                return status;
 
-       rid = ice_find_recp(hw, &lkup_exts);
+       rid = ice_find_recp(hw, &lkup_exts, rinfo->tun_type);
        /* If did not find a recipe that match the existing criteria */
        if (rid == ICE_MAX_NUM_RECIPES)
                return ICE_ERR_PARAM;
index e5d23feb6701772e921e9e1df835a094359ea6ac..25cca5c4ae575bbc237ddaa63c434231ce074d90 100644 (file)
@@ -74,21 +74,13 @@ static enum ice_protocol_type ice_proto_type_from_ipv6(bool inner)
        return inner ? ICE_IPV6_IL : ICE_IPV6_OFOS;
 }
 
-static enum ice_protocol_type
-ice_proto_type_from_l4_port(bool inner, u16 ip_proto)
+static enum ice_protocol_type ice_proto_type_from_l4_port(u16 ip_proto)
 {
-       if (inner) {
-               switch (ip_proto) {
-               case IPPROTO_UDP:
-                       return ICE_UDP_ILOS;
-               }
-       } else {
-               switch (ip_proto) {
-               case IPPROTO_TCP:
-                       return ICE_TCP_IL;
-               case IPPROTO_UDP:
-                       return ICE_UDP_OF;
-               }
+       switch (ip_proto) {
+       case IPPROTO_TCP:
+               return ICE_TCP_IL;
+       case IPPROTO_UDP:
+               return ICE_UDP_ILOS;
        }
 
        return 0;
@@ -191,8 +183,9 @@ ice_tc_fill_tunnel_outer(u32 flags, struct ice_tc_flower_fltr *fltr,
                i++;
        }
 
-       if (flags & ICE_TC_FLWR_FIELD_ENC_DEST_L4_PORT) {
-               list[i].type = ice_proto_type_from_l4_port(false, hdr->l3_key.ip_proto);
+       if ((flags & ICE_TC_FLWR_FIELD_ENC_DEST_L4_PORT) &&
+           hdr->l3_key.ip_proto == IPPROTO_UDP) {
+               list[i].type = ICE_UDP_OF;
                list[i].h_u.l4_hdr.dst_port = hdr->l4_key.dst_port;
                list[i].m_u.l4_hdr.dst_port = hdr->l4_mask.dst_port;
                i++;
@@ -317,7 +310,7 @@ ice_tc_fill_rules(struct ice_hw *hw, u32 flags,
                     ICE_TC_FLWR_FIELD_SRC_L4_PORT)) {
                struct ice_tc_l4_hdr *l4_key, *l4_mask;
 
-               list[i].type = ice_proto_type_from_l4_port(inner, headers->l3_key.ip_proto);
+               list[i].type = ice_proto_type_from_l4_port(headers->l3_key.ip_proto);
                l4_key = &headers->l4_key;
                l4_mask = &headers->l4_mask;
 
@@ -802,7 +795,8 @@ ice_parse_tunnel_attr(struct net_device *dev, struct flow_rule *rule,
                headers->l3_mask.ttl = match.mask->ttl;
        }
 
-       if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_PORTS)) {
+       if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_PORTS) &&
+           fltr->tunnel_type != TNL_VXLAN && fltr->tunnel_type != TNL_GENEVE) {
                struct flow_match_ports match;
 
                flow_rule_match_enc_ports(rule, &match);
index 217ff5e9a6f1434d00c67b8945048411ab315024..6427e7ec93de6a6d11d65c9a44c3382c5610dcdf 100644 (file)
@@ -1617,6 +1617,7 @@ bool ice_reset_all_vfs(struct ice_pf *pf, bool is_vflr)
                ice_vc_set_default_allowlist(vf);
 
                ice_vf_fdir_exit(vf);
+               ice_vf_fdir_init(vf);
                /* clean VF control VSI when resetting VFs since it should be
                 * setup only when VF creates its first FDIR rule.
                 */
@@ -1747,6 +1748,7 @@ bool ice_reset_vf(struct ice_vf *vf, bool is_vflr)
        }
 
        ice_vf_fdir_exit(vf);
+       ice_vf_fdir_init(vf);
        /* clean VF control VSI when resetting VF since it should be setup
         * only when VF creates its first FDIR rule.
         */
@@ -2021,6 +2023,10 @@ static int ice_ena_vfs(struct ice_pf *pf, u16 num_vfs)
        if (ret)
                goto err_unroll_sriov;
 
+       /* rearm global interrupts */
+       if (test_and_clear_bit(ICE_OICR_INTR_DIS, pf->state))
+               ice_irq_dynamic_ena(hw, NULL, NULL);
+
        return 0;
 
 err_unroll_sriov:
index ff55cb415b110fb9e63bd8adc6a401e1c78887ad..bb9a8084729888ab764cf9b6834edf2085068b79 100644 (file)
@@ -383,6 +383,7 @@ bool ice_alloc_rx_bufs_zc(struct ice_rx_ring *rx_ring, u16 count)
        while (i--) {
                dma = xsk_buff_xdp_get_dma(*xdp);
                rx_desc->read.pkt_addr = cpu_to_le64(dma);
+               rx_desc->wb.status_error0 = 0;
 
                rx_desc++;
                xdp++;
index 836be0d3b29105d48530e2ce6b3f8db13c730e71..fd54d3ef890bc191503d5ace25e91ead932a2098 100644 (file)
@@ -8026,7 +8026,7 @@ static int igb_poll(struct napi_struct *napi, int budget)
        if (likely(napi_complete_done(napi, work_done)))
                igb_ring_irq_enable(q_vector);
 
-       return min(work_done, budget - 1);
+       return work_done;
 }
 
 /**
index 6433c909c6b26d3c0c19267312f2c8f95ba5f8a7..072391c494ce4f15693c31af38f1c2a094aee0aa 100644 (file)
@@ -25,6 +25,7 @@
 #include <linux/io.h>
 #include <linux/dma-mapping.h>
 #include <linux/module.h>
+#include <linux/property.h>
 
 #include <asm/checksum.h>
 
@@ -239,6 +240,7 @@ ltq_etop_hw_init(struct net_device *dev)
 {
        struct ltq_etop_priv *priv = netdev_priv(dev);
        int i;
+       int err;
 
        ltq_pmu_enable(PMU_PPE);
 
@@ -273,7 +275,13 @@ ltq_etop_hw_init(struct net_device *dev)
 
                if (IS_TX(i)) {
                        ltq_dma_alloc_tx(&ch->dma);
-                       request_irq(irq, ltq_etop_dma_irq, 0, "etop_tx", priv);
+                       err = request_irq(irq, ltq_etop_dma_irq, 0, "etop_tx", priv);
+                       if (err) {
+                               netdev_err(dev,
+                                          "Unable to get Tx DMA IRQ %d\n",
+                                          irq);
+                               return err;
+                       }
                } else if (IS_RX(i)) {
                        ltq_dma_alloc_rx(&ch->dma);
                        for (ch->dma.desc = 0; ch->dma.desc < LTQ_DESC_NUM;
@@ -281,7 +289,13 @@ ltq_etop_hw_init(struct net_device *dev)
                                if (ltq_etop_alloc_skb(ch))
                                        return -ENOMEM;
                        ch->dma.desc = 0;
-                       request_irq(irq, ltq_etop_dma_irq, 0, "etop_rx", priv);
+                       err = request_irq(irq, ltq_etop_dma_irq, 0, "etop_rx", priv);
+                       if (err) {
+                               netdev_err(dev,
+                                          "Unable to get Rx DMA IRQ %d\n",
+                                          irq);
+                               return err;
+                       }
                }
                ch->dma.irq = irq;
        }
@@ -726,7 +740,7 @@ static struct platform_driver ltq_mii_driver = {
        },
 };
 
-int __init
+static int __init
 init_ltq_etop(void)
 {
        int ret = platform_driver_probe(&ltq_mii_driver, ltq_etop_probe);
index 62a97c46fba0550a1aa3dd742236c91a63a05811..ef878973b8597d234b29bebc5475f450e37c4771 100644 (file)
@@ -429,12 +429,14 @@ static const struct of_device_id orion_mdio_match[] = {
 };
 MODULE_DEVICE_TABLE(of, orion_mdio_match);
 
+#ifdef CONFIG_ACPI
 static const struct acpi_device_id orion_mdio_acpi_match[] = {
        { "MRVL0100", BUS_TYPE_SMI },
        { "MRVL0101", BUS_TYPE_XSMI },
        { },
 };
 MODULE_DEVICE_TABLE(acpi, orion_mdio_acpi_match);
+#endif
 
 static struct platform_driver orion_mdio_driver = {
        .probe = orion_mdio_probe,
index 2b18d89d9756d8e08df8b0a698920f5267810677..6da8a595026bbbb08c8a99e288579455316f31ba 100644 (file)
@@ -2960,11 +2960,11 @@ static int mvpp2_rxq_init(struct mvpp2_port *port,
        mvpp2_rxq_status_update(port, rxq->id, 0, rxq->size);
 
        if (priv->percpu_pools) {
-               err = xdp_rxq_info_reg(&rxq->xdp_rxq_short, port->dev, rxq->id, 0);
+               err = xdp_rxq_info_reg(&rxq->xdp_rxq_short, port->dev, rxq->logic_rxq, 0);
                if (err < 0)
                        goto err_free_dma;
 
-               err = xdp_rxq_info_reg(&rxq->xdp_rxq_long, port->dev, rxq->id, 0);
+               err = xdp_rxq_info_reg(&rxq->xdp_rxq_long, port->dev, rxq->logic_rxq, 0);
                if (err < 0)
                        goto err_unregister_rxq_short;
 
@@ -5017,11 +5017,13 @@ static int mvpp2_change_mtu(struct net_device *dev, int mtu)
                mtu = ALIGN(MVPP2_RX_PKT_SIZE(mtu), 8);
        }
 
+       if (port->xdp_prog && mtu > MVPP2_MAX_RX_BUF_SIZE) {
+               netdev_err(dev, "Illegal MTU value %d (> %d) for XDP mode\n",
+                          mtu, (int)MVPP2_MAX_RX_BUF_SIZE);
+               return -EINVAL;
+       }
+
        if (MVPP2_RX_PKT_SIZE(mtu) > MVPP2_BM_LONG_PKT_SIZE) {
-               if (port->xdp_prog) {
-                       netdev_err(dev, "Jumbo frames are not supported with XDP\n");
-                       return -EINVAL;
-               }
                if (priv->percpu_pools) {
                        netdev_warn(dev, "mtu %d too high, switching to shared buffers", mtu);
                        mvpp2_bm_switch_buffers(priv, false);
@@ -5307,8 +5309,8 @@ static int mvpp2_xdp_setup(struct mvpp2_port *port, struct netdev_bpf *bpf)
        bool running = netif_running(port->dev);
        bool reset = !prog != !port->xdp_prog;
 
-       if (port->dev->mtu > ETH_DATA_LEN) {
-               NL_SET_ERR_MSG_MOD(bpf->extack, "XDP is not supported with jumbo frames enabled");
+       if (port->dev->mtu > MVPP2_MAX_RX_BUF_SIZE) {
+               NL_SET_ERR_MSG_MOD(bpf->extack, "MTU too large for XDP");
                return -EOPNOTSUPP;
        }
 
@@ -7456,7 +7458,7 @@ static int mvpp2_probe(struct platform_device *pdev)
 
        shared = num_present_cpus() - priv->nthreads;
        if (shared > 0)
-               bitmap_fill(&priv->lock_map,
+               bitmap_set(&priv->lock_map, 0,
                            min_t(int, shared, MVPP2_MAX_THREADS));
 
        for (i = 0; i < MVPP2_MAX_THREADS; i++) {
index cb56e171ddd4c27d420f8f38604f51c1275aa3db..3ca6b942ebe2539ab9fc2d10f8cbe48934157588 100644 (file)
@@ -2341,7 +2341,7 @@ static int rvu_mbox_init(struct rvu *rvu, struct mbox_wq_info *mw,
                        goto free_regions;
                break;
        default:
-               return err;
+               goto free_regions;
        }
 
        mw->mbox_wq = alloc_workqueue(name,
index c7fd466a0efdc5deb5966e83c33b3cec1240b615..a09a507369ac3338edff46555fcff8242d7d1c0b 100644 (file)
@@ -236,10 +236,11 @@ static ssize_t rvu_dbg_lmtst_map_table_display(struct file *filp,
        u64 lmt_addr, val, tbl_base;
        int pf, vf, num_vfs, hw_vfs;
        void __iomem *lmt_map_base;
-       int index = 0, off = 0;
-       int bytes_not_copied;
        int buf_size = 10240;
+       size_t off = 0;
+       int index = 0;
        char *buf;
+       int ret;
 
        /* don't allow partial reads */
        if (*ppos != 0)
@@ -303,15 +304,17 @@ static ssize_t rvu_dbg_lmtst_map_table_display(struct file *filp,
        }
        off +=  scnprintf(&buf[off], buf_size - 1 - off, "\n");
 
-       bytes_not_copied = copy_to_user(buffer, buf, off);
+       ret = min(off, count);
+       if (copy_to_user(buffer, buf, ret))
+               ret = -EFAULT;
        kfree(buf);
 
        iounmap(lmt_map_base);
-       if (bytes_not_copied)
-               return -EFAULT;
+       if (ret < 0)
+               return ret;
 
-       *ppos = off;
-       return off;
+       *ppos = ret;
+       return ret;
 }
 
 RVU_DEBUG_FOPS(lmtst_map_table, lmtst_map_table_display, NULL);
index 0ef68fdd1f26bf4e80f62da41c0dd8e9471da228..61c20907315f4d7cfe86e19f927954c1dbc5bc40 100644 (file)
@@ -5,6 +5,8 @@
  *
  */
 
+#include <linux/module.h>
+
 #include "otx2_common.h"
 #include "otx2_ptp.h"
 
index 3ce6ccd0f53942e62db072a350b46dbbc6c0155d..b4599fe4ca8da61b30fdce2a5e6f298c127464b3 100644 (file)
@@ -497,8 +497,8 @@ int prestera_bridge_port_join(struct net_device *br_dev,
 
        br_port = prestera_bridge_port_add(bridge, port->dev);
        if (IS_ERR(br_port)) {
-               err = PTR_ERR(br_port);
-               goto err_brport_create;
+               prestera_bridge_put(bridge);
+               return PTR_ERR(br_port);
        }
 
        err = switchdev_bridge_port_offload(br_port->dev, port->dev, NULL,
@@ -519,8 +519,6 @@ err_port_join:
        switchdev_bridge_port_unoffload(br_port->dev, NULL, NULL, NULL);
 err_switchdev_offload:
        prestera_bridge_port_put(br_port);
-err_brport_create:
-       prestera_bridge_put(bridge);
        return err;
 }
 
@@ -1124,7 +1122,7 @@ static int prestera_switchdev_blk_event(struct notifier_block *unused,
                                                     prestera_port_obj_attr_set);
                break;
        default:
-               err = -EOPNOTSUPP;
+               return NOTIFY_DONE;
        }
 
        return notifier_from_errno(err);
index 066d79e4ecfc28d8bfc5fa7ced33f3192a339cb9..10238bedd694fe7752edb5734a2dc1aa4a5183b3 100644 (file)
@@ -670,7 +670,7 @@ void __init mlx4_en_init_ptys2ethtool_map(void)
        MLX4_BUILD_PTYS2ETHTOOL_CONFIG(MLX4_1000BASE_T, SPEED_1000,
                                       ETHTOOL_LINK_MODE_1000baseT_Full_BIT);
        MLX4_BUILD_PTYS2ETHTOOL_CONFIG(MLX4_1000BASE_CX_SGMII, SPEED_1000,
-                                      ETHTOOL_LINK_MODE_1000baseKX_Full_BIT);
+                                      ETHTOOL_LINK_MODE_1000baseX_Full_BIT);
        MLX4_BUILD_PTYS2ETHTOOL_CONFIG(MLX4_1000BASE_KX, SPEED_1000,
                                       ETHTOOL_LINK_MODE_1000baseKX_Full_BIT);
        MLX4_BUILD_PTYS2ETHTOOL_CONFIG(MLX4_10GBASE_T, SPEED_10000,
@@ -682,9 +682,9 @@ void __init mlx4_en_init_ptys2ethtool_map(void)
        MLX4_BUILD_PTYS2ETHTOOL_CONFIG(MLX4_10GBASE_KR, SPEED_10000,
                                       ETHTOOL_LINK_MODE_10000baseKR_Full_BIT);
        MLX4_BUILD_PTYS2ETHTOOL_CONFIG(MLX4_10GBASE_CR, SPEED_10000,
-                                      ETHTOOL_LINK_MODE_10000baseKR_Full_BIT);
+                                      ETHTOOL_LINK_MODE_10000baseCR_Full_BIT);
        MLX4_BUILD_PTYS2ETHTOOL_CONFIG(MLX4_10GBASE_SR, SPEED_10000,
-                                      ETHTOOL_LINK_MODE_10000baseKR_Full_BIT);
+                                      ETHTOOL_LINK_MODE_10000baseSR_Full_BIT);
        MLX4_BUILD_PTYS2ETHTOOL_CONFIG(MLX4_20GBASE_KR2, SPEED_20000,
                                       ETHTOOL_LINK_MODE_20000baseMLD2_Full_BIT,
                                       ETHTOOL_LINK_MODE_20000baseKR2_Full_BIT);
index 3f6d5c38463723e23be94af802bdf707f9a7c4e2..f1c10f2bda780a1d4a9dabe3c1a126b80809e5e9 100644 (file)
@@ -2286,9 +2286,14 @@ int mlx4_en_try_alloc_resources(struct mlx4_en_priv *priv,
                                bool carry_xdp_prog)
 {
        struct bpf_prog *xdp_prog;
-       int i, t;
+       int i, t, ret;
 
-       mlx4_en_copy_priv(tmp, priv, prof);
+       ret = mlx4_en_copy_priv(tmp, priv, prof);
+       if (ret) {
+               en_warn(priv, "%s: mlx4_en_copy_priv() failed, return\n",
+                       __func__);
+               return ret;
+       }
 
        if (mlx4_en_alloc_resources(tmp)) {
                en_warn(priv,
index f71ec4d9d68e38af2b8a7d73e0e48ac430ae003d..a46284ca517200dde86231bf3aa741c226774e48 100644 (file)
@@ -339,6 +339,9 @@ static int mlx5_internal_err_ret_value(struct mlx5_core_dev *dev, u16 op,
        case MLX5_CMD_OP_PAGE_FAULT_RESUME:
        case MLX5_CMD_OP_QUERY_ESW_FUNCTIONS:
        case MLX5_CMD_OP_DEALLOC_SF:
+       case MLX5_CMD_OP_DESTROY_UCTX:
+       case MLX5_CMD_OP_DESTROY_UMEM:
+       case MLX5_CMD_OP_MODIFY_RQT:
                return MLX5_CMD_STAT_OK;
 
        case MLX5_CMD_OP_QUERY_HCA_CAP:
@@ -444,7 +447,6 @@ static int mlx5_internal_err_ret_value(struct mlx5_core_dev *dev, u16 op,
        case MLX5_CMD_OP_MODIFY_TIS:
        case MLX5_CMD_OP_QUERY_TIS:
        case MLX5_CMD_OP_CREATE_RQT:
-       case MLX5_CMD_OP_MODIFY_RQT:
        case MLX5_CMD_OP_QUERY_RQT:
 
        case MLX5_CMD_OP_CREATE_FLOW_TABLE:
@@ -464,9 +466,7 @@ static int mlx5_internal_err_ret_value(struct mlx5_core_dev *dev, u16 op,
        case MLX5_CMD_OP_MODIFY_GENERAL_OBJECT:
        case MLX5_CMD_OP_QUERY_GENERAL_OBJECT:
        case MLX5_CMD_OP_CREATE_UCTX:
-       case MLX5_CMD_OP_DESTROY_UCTX:
        case MLX5_CMD_OP_CREATE_UMEM:
-       case MLX5_CMD_OP_DESTROY_UMEM:
        case MLX5_CMD_OP_ALLOC_MEMIC:
        case MLX5_CMD_OP_MODIFY_XRQ:
        case MLX5_CMD_OP_RELEASE_XRQ_ERROR:
index 02e77ffe5c3e4f68e3828482f07c2f2a12fdee68..5371ad0a12eb5651e3227edb1ad785b49c25a0d3 100644 (file)
@@ -164,13 +164,14 @@ int mlx5_core_destroy_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq)
        MLX5_SET(destroy_cq_in, in, cqn, cq->cqn);
        MLX5_SET(destroy_cq_in, in, uid, cq->uid);
        err = mlx5_cmd_exec_in(dev, destroy_cq, in);
+       if (err)
+               return err;
 
        synchronize_irq(cq->irqn);
-
        mlx5_cq_put(cq);
        wait_for_completion(&cq->free);
 
-       return err;
+       return 0;
 }
 EXPORT_SYMBOL(mlx5_core_destroy_cq);
 
index 07c8d9811bc811249ef42ff583cf9c15827a5829..10d195042ab554029304343e76922967f632b55a 100644 (file)
@@ -507,6 +507,8 @@ void mlx5_debug_cq_remove(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq)
        if (!mlx5_debugfs_root)
                return;
 
-       if (cq->dbg)
+       if (cq->dbg) {
                rem_res_tree(cq->dbg);
+               cq->dbg = NULL;
+       }
 }
index 14295384799606cc69cdc7e421ad835d9f32d67c..0015a81eb9a17b6550cb5038baa8b5271b7580de 100644 (file)
@@ -13,6 +13,9 @@ struct mlx5e_rx_res {
        unsigned int max_nch;
        u32 drop_rqn;
 
+       struct mlx5e_packet_merge_param pkt_merge_param;
+       struct rw_semaphore pkt_merge_param_sem;
+
        struct mlx5e_rss *rss[MLX5E_MAX_NUM_RSS];
        bool rss_active;
        u32 rss_rqns[MLX5E_INDIR_RQT_SIZE];
@@ -392,6 +395,7 @@ static int mlx5e_rx_res_ptp_init(struct mlx5e_rx_res *res)
        if (err)
                goto out;
 
+       /* Separated from the channels RQs, does not share pkt_merge state with them */
        mlx5e_tir_builder_build_rqt(builder, res->mdev->mlx5e_res.hw_objs.td.tdn,
                                    mlx5e_rqt_get_rqtn(&res->ptp.rqt),
                                    inner_ft_support);
@@ -447,6 +451,9 @@ int mlx5e_rx_res_init(struct mlx5e_rx_res *res, struct mlx5_core_dev *mdev,
        res->max_nch = max_nch;
        res->drop_rqn = drop_rqn;
 
+       res->pkt_merge_param = *init_pkt_merge_param;
+       init_rwsem(&res->pkt_merge_param_sem);
+
        err = mlx5e_rx_res_rss_init_def(res, init_pkt_merge_param, init_nch);
        if (err)
                goto err_out;
@@ -513,7 +520,7 @@ u32 mlx5e_rx_res_get_tirn_ptp(struct mlx5e_rx_res *res)
        return mlx5e_tir_get_tirn(&res->ptp.tir);
 }
 
-u32 mlx5e_rx_res_get_rqtn_direct(struct mlx5e_rx_res *res, unsigned int ix)
+static u32 mlx5e_rx_res_get_rqtn_direct(struct mlx5e_rx_res *res, unsigned int ix)
 {
        return mlx5e_rqt_get_rqtn(&res->channels[ix].direct_rqt);
 }
@@ -656,6 +663,9 @@ int mlx5e_rx_res_packet_merge_set_param(struct mlx5e_rx_res *res,
        if (!builder)
                return -ENOMEM;
 
+       down_write(&res->pkt_merge_param_sem);
+       res->pkt_merge_param = *pkt_merge_param;
+
        mlx5e_tir_builder_build_packet_merge(builder, pkt_merge_param);
 
        final_err = 0;
@@ -681,6 +691,7 @@ int mlx5e_rx_res_packet_merge_set_param(struct mlx5e_rx_res *res,
                }
        }
 
+       up_write(&res->pkt_merge_param_sem);
        mlx5e_tir_builder_free(builder);
        return final_err;
 }
@@ -689,3 +700,31 @@ struct mlx5e_rss_params_hash mlx5e_rx_res_get_current_hash(struct mlx5e_rx_res *
 {
        return mlx5e_rss_get_hash(res->rss[0]);
 }
+
+int mlx5e_rx_res_tls_tir_create(struct mlx5e_rx_res *res, unsigned int rxq,
+                               struct mlx5e_tir *tir)
+{
+       bool inner_ft_support = res->features & MLX5E_RX_RES_FEATURE_INNER_FT;
+       struct mlx5e_tir_builder *builder;
+       u32 rqtn;
+       int err;
+
+       builder = mlx5e_tir_builder_alloc(false);
+       if (!builder)
+               return -ENOMEM;
+
+       rqtn = mlx5e_rx_res_get_rqtn_direct(res, rxq);
+
+       mlx5e_tir_builder_build_rqt(builder, res->mdev->mlx5e_res.hw_objs.td.tdn, rqtn,
+                                   inner_ft_support);
+       mlx5e_tir_builder_build_direct(builder);
+       mlx5e_tir_builder_build_tls(builder);
+       down_read(&res->pkt_merge_param_sem);
+       mlx5e_tir_builder_build_packet_merge(builder, &res->pkt_merge_param);
+       err = mlx5e_tir_init(tir, builder, res->mdev, false);
+       up_read(&res->pkt_merge_param_sem);
+
+       mlx5e_tir_builder_free(builder);
+
+       return err;
+}
index d09f7d174a5180a18cb0db8948bf043323325e82..b39b20a720e0fa244b2047d2ee3ad969f18bfdc9 100644 (file)
@@ -37,9 +37,6 @@ u32 mlx5e_rx_res_get_tirn_rss(struct mlx5e_rx_res *res, enum mlx5_traffic_types
 u32 mlx5e_rx_res_get_tirn_rss_inner(struct mlx5e_rx_res *res, enum mlx5_traffic_types tt);
 u32 mlx5e_rx_res_get_tirn_ptp(struct mlx5e_rx_res *res);
 
-/* RQTN getters for modules that create their own TIRs */
-u32 mlx5e_rx_res_get_rqtn_direct(struct mlx5e_rx_res *res, unsigned int ix);
-
 /* Activate/deactivate API */
 void mlx5e_rx_res_channels_activate(struct mlx5e_rx_res *res, struct mlx5e_channels *chs);
 void mlx5e_rx_res_channels_deactivate(struct mlx5e_rx_res *res);
@@ -69,4 +66,7 @@ struct mlx5e_rss *mlx5e_rx_res_rss_get(struct mlx5e_rx_res *res, u32 rss_idx);
 /* Workaround for hairpin */
 struct mlx5e_rss_params_hash mlx5e_rx_res_get_current_hash(struct mlx5e_rx_res *res);
 
+/* Accel TIRs */
+int mlx5e_rx_res_tls_tir_create(struct mlx5e_rx_res *res, unsigned int rxq,
+                               struct mlx5e_tir *tir);
 #endif /* __MLX5_EN_RX_RES_H__ */
index c1c6e74c79c4f3b686039f3c78a13a987d77d446..2445e2ae3324ea6be0a44f00fbd4ecac84a2600b 100644 (file)
@@ -1356,9 +1356,13 @@ mlx5_tc_ct_match_add(struct mlx5_tc_ct_priv *priv,
 int
 mlx5_tc_ct_parse_action(struct mlx5_tc_ct_priv *priv,
                        struct mlx5_flow_attr *attr,
+                       struct mlx5e_tc_mod_hdr_acts *mod_acts,
                        const struct flow_action_entry *act,
                        struct netlink_ext_ack *extack)
 {
+       bool clear_action = act->ct.action & TCA_CT_ACT_CLEAR;
+       int err;
+
        if (!priv) {
                NL_SET_ERR_MSG_MOD(extack,
                                   "offload of ct action isn't available");
@@ -1369,6 +1373,17 @@ mlx5_tc_ct_parse_action(struct mlx5_tc_ct_priv *priv,
        attr->ct_attr.ct_action = act->ct.action;
        attr->ct_attr.nf_ft = act->ct.flow_table;
 
+       if (!clear_action)
+               goto out;
+
+       err = mlx5_tc_ct_entry_set_registers(priv, mod_acts, 0, 0, 0, 0);
+       if (err) {
+               NL_SET_ERR_MSG_MOD(extack, "Failed to set registers for ct clear");
+               return err;
+       }
+       attr->action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
+
+out:
        return 0;
 }
 
@@ -1898,23 +1913,16 @@ __mlx5_tc_ct_flow_offload_clear(struct mlx5_tc_ct_priv *ct_priv,
 
        memcpy(pre_ct_attr, attr, attr_sz);
 
-       err = mlx5_tc_ct_entry_set_registers(ct_priv, mod_acts, 0, 0, 0, 0);
-       if (err) {
-               ct_dbg("Failed to set register for ct clear");
-               goto err_set_registers;
-       }
-
        mod_hdr = mlx5_modify_header_alloc(priv->mdev, ct_priv->ns_type,
                                           mod_acts->num_actions,
                                           mod_acts->actions);
        if (IS_ERR(mod_hdr)) {
                err = PTR_ERR(mod_hdr);
                ct_dbg("Failed to add create ct clear mod hdr");
-               goto err_set_registers;
+               goto err_mod_hdr;
        }
 
        pre_ct_attr->modify_hdr = mod_hdr;
-       pre_ct_attr->action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
 
        rule = mlx5_tc_rule_insert(priv, orig_spec, pre_ct_attr);
        if (IS_ERR(rule)) {
@@ -1930,7 +1938,7 @@ __mlx5_tc_ct_flow_offload_clear(struct mlx5_tc_ct_priv *ct_priv,
 
 err_insert:
        mlx5_modify_header_dealloc(priv->mdev, mod_hdr);
-err_set_registers:
+err_mod_hdr:
        netdev_warn(priv->netdev,
                    "Failed to offload ct clear flow, err %d\n", err);
        kfree(pre_ct_attr);
index 363329f4aac610e7c9e0a18ddcc3ba3afc07d94f..99662af1e41a72aaad1fe9ee55516d3cb82adb3a 100644 (file)
@@ -110,6 +110,7 @@ int mlx5_tc_ct_add_no_trk_match(struct mlx5_flow_spec *spec);
 int
 mlx5_tc_ct_parse_action(struct mlx5_tc_ct_priv *priv,
                        struct mlx5_flow_attr *attr,
+                       struct mlx5e_tc_mod_hdr_acts *mod_acts,
                        const struct flow_action_entry *act,
                        struct netlink_ext_ack *extack);
 
@@ -172,6 +173,7 @@ mlx5_tc_ct_add_no_trk_match(struct mlx5_flow_spec *spec)
 static inline int
 mlx5_tc_ct_parse_action(struct mlx5_tc_ct_priv *priv,
                        struct mlx5_flow_attr *attr,
+                       struct mlx5e_tc_mod_hdr_acts *mod_acts,
                        const struct flow_action_entry *act,
                        struct netlink_ext_ack *extack)
 {
index 8f64f2c8895a948952bb1404951d5f1ff41c8a8e..b689701ac7d816b1eb98ce85238db1d793c96fc1 100644 (file)
@@ -102,6 +102,7 @@ struct mlx5e_tc_flow {
        refcount_t refcnt;
        struct rcu_head rcu_head;
        struct completion init_done;
+       struct completion del_hw_done;
        int tunnel_id; /* the mapped tunnel id of this flow */
        struct mlx5_flow_attr *attr;
 };
index 660cca73c36c8f54a3b57c6850b2d1ce997dd52c..042b1abe1437fd8cc574a66e0db931eb73acc9fd 100644 (file)
@@ -245,8 +245,14 @@ static void mlx5e_take_tmp_flow(struct mlx5e_tc_flow *flow,
                                struct list_head *flow_list,
                                int index)
 {
-       if (IS_ERR(mlx5e_flow_get(flow)))
+       if (IS_ERR(mlx5e_flow_get(flow))) {
+               /* Flow is being deleted concurrently. Wait for it to be
+                * unoffloaded from hardware, otherwise deleting encap will
+                * fail.
+                */
+               wait_for_completion(&flow->del_hw_done);
                return;
+       }
        wait_for_completion(&flow->init_done);
 
        flow->tmp_entry_index = index;
index fb5397324aa4f2b597a1f1b0bcd355836e0c382f..2db9573a3fe69d9f175c663334cc6c6c14ad5ec0 100644 (file)
@@ -191,7 +191,7 @@ static void mlx5e_ipsec_set_swp(struct sk_buff *skb,
                        eseg->swp_inner_l3_offset = skb_inner_network_offset(skb) / 2;
                        eseg->swp_inner_l4_offset =
                                (skb->csum_start + skb->head - skb->data) / 2;
-                       if (skb->protocol == htons(ETH_P_IPV6))
+                       if (inner_ip_hdr(skb)->version == 6)
                                eseg->swp_flags |= MLX5_ETH_WQE_SWP_INNER_L3_IPV6;
                        break;
                default:
index 62abce008c7b805dd24054b4ef647feb2323dcbc..15711814d2d28d8498eec641230470706df14115 100644 (file)
@@ -55,6 +55,7 @@ struct mlx5e_ktls_offload_context_rx {
        DECLARE_BITMAP(flags, MLX5E_NUM_PRIV_RX_FLAGS);
 
        /* resync */
+       spinlock_t lock; /* protects resync fields */
        struct mlx5e_ktls_rx_resync_ctx resync;
        struct list_head list;
 };
@@ -99,25 +100,6 @@ mlx5e_ktls_rx_resync_create_resp_list(void)
        return resp_list;
 }
 
-static int mlx5e_ktls_create_tir(struct mlx5_core_dev *mdev, struct mlx5e_tir *tir, u32 rqtn)
-{
-       struct mlx5e_tir_builder *builder;
-       int err;
-
-       builder = mlx5e_tir_builder_alloc(false);
-       if (!builder)
-               return -ENOMEM;
-
-       mlx5e_tir_builder_build_rqt(builder, mdev->mlx5e_res.hw_objs.td.tdn, rqtn, false);
-       mlx5e_tir_builder_build_direct(builder);
-       mlx5e_tir_builder_build_tls(builder);
-       err = mlx5e_tir_init(tir, builder, mdev, false);
-
-       mlx5e_tir_builder_free(builder);
-
-       return err;
-}
-
 static void accel_rule_handle_work(struct work_struct *work)
 {
        struct mlx5e_ktls_offload_context_rx *priv_rx;
@@ -386,14 +368,18 @@ static void resync_handle_seq_match(struct mlx5e_ktls_offload_context_rx *priv_r
        struct mlx5e_icosq *sq;
        bool trigger_poll;
 
-       memcpy(info->rec_seq, &priv_rx->resync.sw_rcd_sn_be, sizeof(info->rec_seq));
-
        sq = &c->async_icosq;
        ktls_resync = sq->ktls_resync;
+       trigger_poll = false;
 
        spin_lock_bh(&ktls_resync->lock);
-       list_add_tail(&priv_rx->list, &ktls_resync->list);
-       trigger_poll = !test_and_set_bit(MLX5E_SQ_STATE_PENDING_TLS_RX_RESYNC, &sq->state);
+       spin_lock_bh(&priv_rx->lock);
+       memcpy(info->rec_seq, &priv_rx->resync.sw_rcd_sn_be, sizeof(info->rec_seq));
+       if (list_empty(&priv_rx->list)) {
+               list_add_tail(&priv_rx->list, &ktls_resync->list);
+               trigger_poll = !test_and_set_bit(MLX5E_SQ_STATE_PENDING_TLS_RX_RESYNC, &sq->state);
+       }
+       spin_unlock_bh(&priv_rx->lock);
        spin_unlock_bh(&ktls_resync->lock);
 
        if (!trigger_poll)
@@ -604,7 +590,6 @@ int mlx5e_ktls_add_rx(struct net_device *netdev, struct sock *sk,
        struct mlx5_core_dev *mdev;
        struct mlx5e_priv *priv;
        int rxq, err;
-       u32 rqtn;
 
        tls_ctx = tls_get_ctx(sk);
        priv = netdev_priv(netdev);
@@ -617,6 +602,8 @@ int mlx5e_ktls_add_rx(struct net_device *netdev, struct sock *sk,
        if (err)
                goto err_create_key;
 
+       INIT_LIST_HEAD(&priv_rx->list);
+       spin_lock_init(&priv_rx->lock);
        priv_rx->crypto_info  =
                *(struct tls12_crypto_info_aes_gcm_128 *)crypto_info;
 
@@ -628,9 +615,7 @@ int mlx5e_ktls_add_rx(struct net_device *netdev, struct sock *sk,
        priv_rx->sw_stats = &priv->tls->sw_stats;
        mlx5e_set_ktls_rx_priv_ctx(tls_ctx, priv_rx);
 
-       rqtn = mlx5e_rx_res_get_rqtn_direct(priv->rx_res, rxq);
-
-       err = mlx5e_ktls_create_tir(mdev, &priv_rx->tir, rqtn);
+       err = mlx5e_rx_res_tls_tir_create(priv->rx_res, rxq, &priv_rx->tir);
        if (err)
                goto err_create_tir;
 
@@ -730,10 +715,14 @@ bool mlx5e_ktls_rx_handle_resync_list(struct mlx5e_channel *c, int budget)
                priv_rx = list_first_entry(&local_list,
                                           struct mlx5e_ktls_offload_context_rx,
                                           list);
+               spin_lock(&priv_rx->lock);
                cseg = post_static_params(sq, priv_rx);
-               if (IS_ERR(cseg))
+               if (IS_ERR(cseg)) {
+                       spin_unlock(&priv_rx->lock);
                        break;
-               list_del(&priv_rx->list);
+               }
+               list_del_init(&priv_rx->list);
+               spin_unlock(&priv_rx->lock);
                db_cseg = cseg;
        }
        if (db_cseg)
index e58a9ec4255322538e6cb847f8dddecc6659754c..48895d79796a82634441668f2a0369a5e7b7e96d 100644 (file)
@@ -1080,6 +1080,10 @@ static mlx5e_stats_grp_t mlx5e_ul_rep_stats_grps[] = {
        &MLX5E_STATS_GRP(pme),
        &MLX5E_STATS_GRP(channels),
        &MLX5E_STATS_GRP(per_port_buff_congest),
+#ifdef CONFIG_MLX5_EN_IPSEC
+       &MLX5E_STATS_GRP(ipsec_sw),
+       &MLX5E_STATS_GRP(ipsec_hw),
+#endif
 };
 
 static unsigned int mlx5e_ul_rep_stats_grps_num(struct mlx5e_priv *priv)
index 96967b0a24418c5aa4018f5396a4a73f731a0c37..793511d5ee4cd969d15c16c7e20af5914161e8f7 100644 (file)
@@ -543,13 +543,13 @@ static int mlx5e_build_shampo_hd_umr(struct mlx5e_rq *rq,
                                     u16 klm_entries, u16 index)
 {
        struct mlx5e_shampo_hd *shampo = rq->mpwqe.shampo;
-       u16 entries, pi, i, header_offset, err, wqe_bbs, new_entries;
+       u16 entries, pi, header_offset, err, wqe_bbs, new_entries;
        u32 lkey = rq->mdev->mlx5e_res.hw_objs.mkey;
        struct page *page = shampo->last_page;
        u64 addr = shampo->last_addr;
        struct mlx5e_dma_info *dma_info;
        struct mlx5e_umr_wqe *umr_wqe;
-       int headroom;
+       int headroom, i;
 
        headroom = rq->buff.headroom;
        new_entries = klm_entries - (shampo->pi & (MLX5_UMR_KLM_ALIGNMENT - 1));
@@ -601,9 +601,7 @@ update_klm:
 
 err_unmap:
        while (--i >= 0) {
-               if (--index < 0)
-                       index = shampo->hd_per_wq - 1;
-               dma_info = &shampo->info[index];
+               dma_info = &shampo->info[--index];
                if (!(i & (MLX5E_SHAMPO_WQ_HEADER_PER_PAGE - 1))) {
                        dma_info->addr = ALIGN_DOWN(dma_info->addr, PAGE_SIZE);
                        mlx5e_page_release(rq, dma_info, true);
index 835caa1c7b745964f741f883153f64f297d53ec9..3d45f4ae80c0a41ce89240314c94b5aafd8bf121 100644 (file)
@@ -1600,6 +1600,7 @@ static void mlx5e_tc_del_fdb_flow(struct mlx5e_priv *priv,
                else
                        mlx5e_tc_unoffload_fdb_rules(esw, flow, attr);
        }
+       complete_all(&flow->del_hw_done);
 
        if (mlx5_flow_has_geneve_opt(flow))
                mlx5_geneve_tlv_option_del(priv->mdev->geneve);
@@ -3607,7 +3608,9 @@ parse_tc_nic_actions(struct mlx5e_priv *priv,
                        attr->dest_chain = act->chain_index;
                        break;
                case FLOW_ACTION_CT:
-                       err = mlx5_tc_ct_parse_action(get_ct_priv(priv), attr, act, extack);
+                       err = mlx5_tc_ct_parse_action(get_ct_priv(priv), attr,
+                                                     &parse_attr->mod_hdr_acts,
+                                                     act, extack);
                        if (err)
                                return err;
 
@@ -4276,7 +4279,9 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv,
                                NL_SET_ERR_MSG_MOD(extack, "Sample action with connection tracking is not supported");
                                return -EOPNOTSUPP;
                        }
-                       err = mlx5_tc_ct_parse_action(get_ct_priv(priv), attr, act, extack);
+                       err = mlx5_tc_ct_parse_action(get_ct_priv(priv), attr,
+                                                     &parse_attr->mod_hdr_acts,
+                                                     act, extack);
                        if (err)
                                return err;
 
@@ -4465,6 +4470,7 @@ mlx5e_alloc_flow(struct mlx5e_priv *priv, int attr_size,
        INIT_LIST_HEAD(&flow->l3_to_l2_reformat);
        refcount_set(&flow->refcnt, 1);
        init_completion(&flow->init_done);
+       init_completion(&flow->del_hw_done);
 
        *__flow = flow;
        *__parse_attr = parse_attr;
index c6cc67cb4f6add88e0b1f24f00c6112fd8c521d1..d377ddc70fc70b072c441d8a512ce98781e17e71 100644 (file)
@@ -130,7 +130,7 @@ static u32 esw_qos_calculate_min_rate_divider(struct mlx5_eswitch *esw,
        /* If vports min rate divider is 0 but their group has bw_share configured, then
         * need to set bw_share for vports to minimal value.
         */
-       if (!group_level && !max_guarantee && group->bw_share)
+       if (!group_level && !max_guarantee && group && group->bw_share)
                return 1;
        return 0;
 }
@@ -423,7 +423,7 @@ static int esw_qos_vport_update_group(struct mlx5_eswitch *esw,
                return err;
 
        /* Recalculate bw share weights of old and new groups */
-       if (vport->qos.bw_share) {
+       if (vport->qos.bw_share || new_group->bw_share) {
                esw_qos_normalize_vports_min_rate(esw, curr_group, extack);
                esw_qos_normalize_vports_min_rate(esw, new_group, extack);
        }
index ec136b4992045025bc0514121a81edbc4a0aa9c7..51a8cecc4a7ce25fe6a77aab50d53b7cc6e2af16 100644 (file)
@@ -1305,12 +1305,17 @@ abort:
  */
 int mlx5_eswitch_enable(struct mlx5_eswitch *esw, int num_vfs)
 {
+       bool toggle_lag;
        int ret;
 
        if (!mlx5_esw_allowed(esw))
                return 0;
 
-       mlx5_lag_disable_change(esw->dev);
+       toggle_lag = esw->mode == MLX5_ESWITCH_NONE;
+
+       if (toggle_lag)
+               mlx5_lag_disable_change(esw->dev);
+
        down_write(&esw->mode_lock);
        if (esw->mode == MLX5_ESWITCH_NONE) {
                ret = mlx5_eswitch_enable_locked(esw, MLX5_ESWITCH_LEGACY, num_vfs);
@@ -1324,7 +1329,10 @@ int mlx5_eswitch_enable(struct mlx5_eswitch *esw, int num_vfs)
                        esw->esw_funcs.num_vfs = num_vfs;
        }
        up_write(&esw->mode_lock);
-       mlx5_lag_enable_change(esw->dev);
+
+       if (toggle_lag)
+               mlx5_lag_enable_change(esw->dev);
+
        return ret;
 }
 
@@ -1572,6 +1580,11 @@ int mlx5_eswitch_init(struct mlx5_core_dev *dev)
        esw->enabled_vports = 0;
        esw->mode = MLX5_ESWITCH_NONE;
        esw->offloads.inline_mode = MLX5_INLINE_MODE_NONE;
+       if (MLX5_CAP_ESW_FLOWTABLE_FDB(dev, reformat) &&
+           MLX5_CAP_ESW_FLOWTABLE_FDB(dev, decap))
+               esw->offloads.encap = DEVLINK_ESWITCH_ENCAP_MODE_BASIC;
+       else
+               esw->offloads.encap = DEVLINK_ESWITCH_ENCAP_MODE_NONE;
 
        dev->priv.eswitch = esw;
        BLOCKING_INIT_NOTIFIER_HEAD(&esw->n_head);
@@ -1934,7 +1947,7 @@ free_out:
        return err;
 }
 
-u8 mlx5_eswitch_mode(struct mlx5_core_dev *dev)
+u8 mlx5_eswitch_mode(const struct mlx5_core_dev *dev)
 {
        struct mlx5_eswitch *esw = dev->priv.eswitch;
 
@@ -1948,7 +1961,7 @@ mlx5_eswitch_get_encap_mode(const struct mlx5_core_dev *dev)
        struct mlx5_eswitch *esw;
 
        esw = dev->priv.eswitch;
-       return mlx5_esw_allowed(esw) ? esw->offloads.encap :
+       return (mlx5_eswitch_mode(dev) == MLX5_ESWITCH_OFFLOADS)  ? esw->offloads.encap :
                DEVLINK_ESWITCH_ENCAP_MODE_NONE;
 }
 EXPORT_SYMBOL(mlx5_eswitch_get_encap_mode);
index f4eaa589388601d1b359b0b7f5ceddf047031897..32bc08a399256c5ce3bcfed8b9de181ec76045b2 100644 (file)
@@ -329,14 +329,25 @@ static bool
 esw_is_indir_table(struct mlx5_eswitch *esw, struct mlx5_flow_attr *attr)
 {
        struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr;
+       bool result = false;
        int i;
 
-       for (i = esw_attr->split_count; i < esw_attr->out_count; i++)
+       /* Indirect table is supported only for flows with in_port uplink
+        * and the destination is vport on the same eswitch as the uplink,
+        * return false in case at least one of destinations doesn't meet
+        * this criteria.
+        */
+       for (i = esw_attr->split_count; i < esw_attr->out_count; i++) {
                if (esw_attr->dests[i].rep &&
                    mlx5_esw_indir_table_needed(esw, attr, esw_attr->dests[i].rep->vport,
-                                               esw_attr->dests[i].mdev))
-                       return true;
-       return false;
+                                               esw_attr->dests[i].mdev)) {
+                       result = true;
+               } else {
+                       result = false;
+                       break;
+               }
+       }
+       return result;
 }
 
 static int
@@ -2512,6 +2523,7 @@ static int esw_set_master_egress_rule(struct mlx5_core_dev *master,
        struct mlx5_eswitch *esw = master->priv.eswitch;
        struct mlx5_flow_table_attr ft_attr = {
                .max_fte = 1, .prio = 0, .level = 0,
+               .flags = MLX5_FLOW_TABLE_OTHER_VPORT,
        };
        struct mlx5_flow_namespace *egress_ns;
        struct mlx5_flow_table *acl;
@@ -3183,12 +3195,6 @@ int esw_offloads_enable(struct mlx5_eswitch *esw)
        u64 mapping_id;
        int err;
 
-       if (MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, reformat) &&
-           MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, decap))
-               esw->offloads.encap = DEVLINK_ESWITCH_ENCAP_MODE_BASIC;
-       else
-               esw->offloads.encap = DEVLINK_ESWITCH_ENCAP_MODE_NONE;
-
        mutex_init(&esw->offloads.termtbl_mutex);
        mlx5_rdma_enable_roce(esw->dev);
 
@@ -3286,7 +3292,6 @@ void esw_offloads_disable(struct mlx5_eswitch *esw)
        esw_offloads_metadata_uninit(esw);
        mlx5_rdma_disable_roce(esw->dev);
        mutex_destroy(&esw->offloads.termtbl_mutex);
-       esw->offloads.encap = DEVLINK_ESWITCH_ENCAP_MODE_NONE;
 }
 
 static int esw_mode_from_devlink(u16 mode, u16 *mlx5_mode)
@@ -3630,7 +3635,7 @@ int mlx5_devlink_eswitch_encap_mode_get(struct devlink *devlink,
        *encap = esw->offloads.encap;
 unlock:
        up_write(&esw->mode_lock);
-       return 0;
+       return err;
 }
 
 static bool
index 31c99d53faf79c3ff0b920b0f9fa3e9d1ae2d55d..7e0e04cf26f86f7e1882af122267ebf51f3f2743 100644 (file)
@@ -40,7 +40,7 @@
 #define MLX5_FC_STATS_PERIOD msecs_to_jiffies(1000)
 /* Max number of counters to query in bulk read is 32K */
 #define MLX5_SW_MAX_COUNTERS_BULK BIT(15)
-#define MLX5_SF_NUM_COUNTERS_BULK 6
+#define MLX5_SF_NUM_COUNTERS_BULK 8
 #define MLX5_FC_POOL_MAX_THRESHOLD BIT(18)
 #define MLX5_FC_POOL_USED_BUFF_RATIO 10
 
index 64f1abc4dc367fe1d05787a571742b9522cdfd93..3ca998874c50d583bbe3713d950bcaf7aa7f950b 100644 (file)
@@ -835,6 +835,9 @@ void mlx5_start_health_poll(struct mlx5_core_dev *dev)
 
        health->timer.expires = jiffies + msecs_to_jiffies(poll_interval_ms);
        add_timer(&health->timer);
+
+       if (mlx5_core_is_pf(dev) && MLX5_CAP_MCAM_REG(dev, mrtc))
+               queue_delayed_work(health->wq, &health->update_fw_log_ts_work, 0);
 }
 
 void mlx5_stop_health_poll(struct mlx5_core_dev *dev, bool disable_health)
@@ -902,8 +905,6 @@ int mlx5_health_init(struct mlx5_core_dev *dev)
        INIT_WORK(&health->fatal_report_work, mlx5_fw_fatal_reporter_err_work);
        INIT_WORK(&health->report_work, mlx5_fw_reporter_err_work);
        INIT_DELAYED_WORK(&health->update_fw_log_ts_work, mlx5_health_log_ts_update);
-       if (mlx5_core_is_pf(dev))
-               queue_delayed_work(health->wq, &health->update_fw_log_ts_work, 0);
 
        return 0;
 
index 48d2ea690d7ad90d91502d9a47eca7faeaf910a3..4ddf6b330a44254c64b1f33c8be6703035b0dd1f 100644 (file)
@@ -615,6 +615,7 @@ static int mlx5_handle_changeupper_event(struct mlx5_lag *ldev,
        bool is_bonded, is_in_lag, mode_supported;
        int bond_status = 0;
        int num_slaves = 0;
+       int changed = 0;
        int idx;
 
        if (!netif_is_lag_master(upper))
@@ -653,27 +654,27 @@ static int mlx5_handle_changeupper_event(struct mlx5_lag *ldev,
         */
        is_in_lag = num_slaves == MLX5_MAX_PORTS && bond_status == 0x3;
 
-       if (!mlx5_lag_is_ready(ldev) && is_in_lag) {
-               NL_SET_ERR_MSG_MOD(info->info.extack,
-                                  "Can't activate LAG offload, PF is configured with more than 64 VFs");
-               return 0;
-       }
-
        /* Lag mode must be activebackup or hash. */
        mode_supported = tracker->tx_type == NETDEV_LAG_TX_TYPE_ACTIVEBACKUP ||
                         tracker->tx_type == NETDEV_LAG_TX_TYPE_HASH;
 
-       if (is_in_lag && !mode_supported)
-               NL_SET_ERR_MSG_MOD(info->info.extack,
-                                  "Can't activate LAG offload, TX type isn't supported");
-
        is_bonded = is_in_lag && mode_supported;
        if (tracker->is_bonded != is_bonded) {
                tracker->is_bonded = is_bonded;
-               return 1;
+               changed = 1;
        }
 
-       return 0;
+       if (!is_in_lag)
+               return changed;
+
+       if (!mlx5_lag_is_ready(ldev))
+               NL_SET_ERR_MSG_MOD(info->info.extack,
+                                  "Can't activate LAG offload, PF is configured with more than 64 VFs");
+       else if (!mode_supported)
+               NL_SET_ERR_MSG_MOD(info->info.extack,
+                                  "Can't activate LAG offload, TX type isn't supported");
+
+       return changed;
 }
 
 static int mlx5_handle_changelowerstate_event(struct mlx5_lag *ldev,
@@ -716,9 +717,6 @@ static int mlx5_lag_netdev_event(struct notifier_block *this,
 
        ldev    = container_of(this, struct mlx5_lag, nb);
 
-       if (!mlx5_lag_is_ready(ldev) && event == NETDEV_CHANGELOWERSTATE)
-               return NOTIFY_DONE;
-
        tracker = ldev->tracker;
 
        switch (event) {
index ad63dd45c8fb9dbf4d401e16c35f5343b41af857..a6592f9c3c05fc8d2c38f9ea491e3360e9527e80 100644 (file)
@@ -608,4 +608,5 @@ void mlx5_lag_port_sel_destroy(struct mlx5_lag *ldev)
        if (port_sel->tunnel)
                mlx5_destroy_ttc_table(port_sel->inner.ttc);
        mlx5_lag_destroy_definers(ldev);
+       memset(port_sel, 0, sizeof(*port_sel));
 }
index 0dd96a6b140dddfd993ca1a57aa5ffd8acc7d2c0..c1df0d3595d87e283994af8d7a798d93da7842b6 100644 (file)
@@ -31,11 +31,11 @@ static void tout_set(struct mlx5_core_dev *dev, u64 val, enum mlx5_timeouts_type
        dev->timeouts->to[type] = val;
 }
 
-static void tout_set_def_val(struct mlx5_core_dev *dev)
+void mlx5_tout_set_def_val(struct mlx5_core_dev *dev)
 {
        int i;
 
-       for (i = MLX5_TO_FW_PRE_INIT_TIMEOUT_MS; i < MAX_TIMEOUT_TYPES; i++)
+       for (i = 0; i < MAX_TIMEOUT_TYPES; i++)
                tout_set(dev, tout_def_sw_val[i], i);
 }
 
@@ -45,7 +45,6 @@ int mlx5_tout_init(struct mlx5_core_dev *dev)
        if (!dev->timeouts)
                return -ENOMEM;
 
-       tout_set_def_val(dev);
        return 0;
 }
 
index 31faa5c17aa91c89d9e428bb82ef2074e4cfc19a..1c42ead782fa7f4470a3f58dc1d9b2cd4c57e990 100644 (file)
@@ -34,6 +34,7 @@ int mlx5_tout_init(struct mlx5_core_dev *dev);
 void mlx5_tout_cleanup(struct mlx5_core_dev *dev);
 void mlx5_tout_query_iseg(struct mlx5_core_dev *dev);
 int mlx5_tout_query_dtor(struct mlx5_core_dev *dev);
+void mlx5_tout_set_def_val(struct mlx5_core_dev *dev);
 u64 _mlx5_tout_ms(struct mlx5_core_dev *dev, enum mlx5_timeouts_types type);
 
 #define mlx5_tout_ms(dev, type) _mlx5_tout_ms(dev, MLX5_TO_##type##_MS)
index a92a92a52346d8c33c2d272fb8a087bb81a07a22..7df9c7f8d9c8ad27fd624bffb98ddfd80ae75303 100644 (file)
@@ -992,11 +992,7 @@ static int mlx5_function_setup(struct mlx5_core_dev *dev, bool boot)
        if (mlx5_core_is_pf(dev))
                pcie_print_link_status(dev->pdev);
 
-       err = mlx5_tout_init(dev);
-       if (err) {
-               mlx5_core_err(dev, "Failed initializing timeouts, aborting\n");
-               return err;
-       }
+       mlx5_tout_set_def_val(dev);
 
        /* wait for firmware to accept initialization segments configurations
         */
@@ -1005,13 +1001,13 @@ static int mlx5_function_setup(struct mlx5_core_dev *dev, bool boot)
        if (err) {
                mlx5_core_err(dev, "Firmware over %llu MS in pre-initializing state, aborting\n",
                              mlx5_tout_ms(dev, FW_PRE_INIT_TIMEOUT));
-               goto err_tout_cleanup;
+               return err;
        }
 
        err = mlx5_cmd_init(dev);
        if (err) {
                mlx5_core_err(dev, "Failed initializing command interface, aborting\n");
-               goto err_tout_cleanup;
+               return err;
        }
 
        mlx5_tout_query_iseg(dev);
@@ -1075,18 +1071,16 @@ static int mlx5_function_setup(struct mlx5_core_dev *dev, bool boot)
 
        mlx5_set_driver_version(dev);
 
-       mlx5_start_health_poll(dev);
-
        err = mlx5_query_hca_caps(dev);
        if (err) {
                mlx5_core_err(dev, "query hca failed\n");
-               goto stop_health;
+               goto reclaim_boot_pages;
        }
 
+       mlx5_start_health_poll(dev);
+
        return 0;
 
-stop_health:
-       mlx5_stop_health_poll(dev, boot);
 reclaim_boot_pages:
        mlx5_reclaim_startup_pages(dev);
 err_disable_hca:
@@ -1094,8 +1088,6 @@ err_disable_hca:
 err_cmd_cleanup:
        mlx5_cmd_set_state(dev, MLX5_CMDIF_STATE_DOWN);
        mlx5_cmd_cleanup(dev);
-err_tout_cleanup:
-       mlx5_tout_cleanup(dev);
 
        return err;
 }
@@ -1114,7 +1106,6 @@ static int mlx5_function_teardown(struct mlx5_core_dev *dev, bool boot)
        mlx5_core_disable_hca(dev, 0);
        mlx5_cmd_set_state(dev, MLX5_CMDIF_STATE_DOWN);
        mlx5_cmd_cleanup(dev);
-       mlx5_tout_cleanup(dev);
 
        return 0;
 }
@@ -1476,6 +1467,12 @@ int mlx5_mdev_init(struct mlx5_core_dev *dev, int profile_idx)
                                            mlx5_debugfs_root);
        INIT_LIST_HEAD(&priv->traps);
 
+       err = mlx5_tout_init(dev);
+       if (err) {
+               mlx5_core_err(dev, "Failed initializing timeouts, aborting\n");
+               goto err_timeout_init;
+       }
+
        err = mlx5_health_init(dev);
        if (err)
                goto err_health_init;
@@ -1501,6 +1498,8 @@ err_adev_init:
 err_pagealloc_init:
        mlx5_health_cleanup(dev);
 err_health_init:
+       mlx5_tout_cleanup(dev);
+err_timeout_init:
        debugfs_remove(dev->priv.dbg_root);
        mutex_destroy(&priv->pgdir_mutex);
        mutex_destroy(&priv->alloc_mutex);
@@ -1518,6 +1517,7 @@ void mlx5_mdev_uninit(struct mlx5_core_dev *dev)
        mlx5_adev_cleanup(dev);
        mlx5_pagealloc_cleanup(dev);
        mlx5_health_cleanup(dev);
+       mlx5_tout_cleanup(dev);
        debugfs_remove_recursive(dev->priv.dbg_root);
        mutex_destroy(&priv->pgdir_mutex);
        mutex_destroy(&priv->alloc_mutex);
index 49089cbe897c69c985fa3bb855592198ffa534bf..8cbd36c82b3b82a84f49fa93c3109e5ea65b80ad 100644 (file)
@@ -135,25 +135,14 @@ static void dr_domain_fill_uplink_caps(struct mlx5dr_domain *dmn,
 
 static int dr_domain_query_vport(struct mlx5dr_domain *dmn,
                                 u16 vport_number,
+                                bool other_vport,
                                 struct mlx5dr_cmd_vport_cap *vport_caps)
 {
-       u16 cmd_vport = vport_number;
-       bool other_vport = true;
        int ret;
 
-       if (vport_number == MLX5_VPORT_UPLINK) {
-               dr_domain_fill_uplink_caps(dmn, vport_caps);
-               return 0;
-       }
-
-       if (dmn->info.caps.is_ecpf && vport_number == MLX5_VPORT_ECPF) {
-               other_vport = false;
-               cmd_vport = 0;
-       }
-
        ret = mlx5dr_cmd_query_esw_vport_context(dmn->mdev,
                                                 other_vport,
-                                                cmd_vport,
+                                                vport_number,
                                                 &vport_caps->icm_address_rx,
                                                 &vport_caps->icm_address_tx);
        if (ret)
@@ -161,7 +150,7 @@ static int dr_domain_query_vport(struct mlx5dr_domain *dmn,
 
        ret = mlx5dr_cmd_query_gvmi(dmn->mdev,
                                    other_vport,
-                                   cmd_vport,
+                                   vport_number,
                                    &vport_caps->vport_gvmi);
        if (ret)
                return ret;
@@ -176,9 +165,15 @@ static int dr_domain_query_esw_mngr(struct mlx5dr_domain *dmn)
 {
        return dr_domain_query_vport(dmn,
                                     dmn->info.caps.is_ecpf ? MLX5_VPORT_ECPF : 0,
+                                    false,
                                     &dmn->info.caps.vports.esw_manager_caps);
 }
 
+static void dr_domain_query_uplink(struct mlx5dr_domain *dmn)
+{
+       dr_domain_fill_uplink_caps(dmn, &dmn->info.caps.vports.uplink_caps);
+}
+
 static struct mlx5dr_cmd_vport_cap *
 dr_domain_add_vport_cap(struct mlx5dr_domain *dmn, u16 vport)
 {
@@ -190,7 +185,7 @@ dr_domain_add_vport_cap(struct mlx5dr_domain *dmn, u16 vport)
        if (!vport_caps)
                return NULL;
 
-       ret = dr_domain_query_vport(dmn, vport, vport_caps);
+       ret = dr_domain_query_vport(dmn, vport, true, vport_caps);
        if (ret) {
                kvfree(vport_caps);
                return NULL;
@@ -207,16 +202,26 @@ dr_domain_add_vport_cap(struct mlx5dr_domain *dmn, u16 vport)
        return vport_caps;
 }
 
+static bool dr_domain_is_esw_mgr_vport(struct mlx5dr_domain *dmn, u16 vport)
+{
+       struct mlx5dr_cmd_caps *caps = &dmn->info.caps;
+
+       return (caps->is_ecpf && vport == MLX5_VPORT_ECPF) ||
+              (!caps->is_ecpf && vport == 0);
+}
+
 struct mlx5dr_cmd_vport_cap *
 mlx5dr_domain_get_vport_cap(struct mlx5dr_domain *dmn, u16 vport)
 {
        struct mlx5dr_cmd_caps *caps = &dmn->info.caps;
        struct mlx5dr_cmd_vport_cap *vport_caps;
 
-       if ((caps->is_ecpf && vport == MLX5_VPORT_ECPF) ||
-           (!caps->is_ecpf && vport == 0))
+       if (dr_domain_is_esw_mgr_vport(dmn, vport))
                return &caps->vports.esw_manager_caps;
 
+       if (vport == MLX5_VPORT_UPLINK)
+               return &caps->vports.uplink_caps;
+
 vport_load:
        vport_caps = xa_load(&caps->vports.vports_caps_xa, vport);
        if (vport_caps)
@@ -241,17 +246,6 @@ static void dr_domain_clear_vports(struct mlx5dr_domain *dmn)
        }
 }
 
-static int dr_domain_query_uplink(struct mlx5dr_domain *dmn)
-{
-       struct mlx5dr_cmd_vport_cap *vport_caps;
-
-       vport_caps = mlx5dr_domain_get_vport_cap(dmn, MLX5_VPORT_UPLINK);
-       if (!vport_caps)
-               return -EINVAL;
-
-       return 0;
-}
-
 static int dr_domain_query_fdb_caps(struct mlx5_core_dev *mdev,
                                    struct mlx5dr_domain *dmn)
 {
@@ -281,11 +275,7 @@ static int dr_domain_query_fdb_caps(struct mlx5_core_dev *mdev,
                goto free_vports_caps_xa;
        }
 
-       ret = dr_domain_query_uplink(dmn);
-       if (ret) {
-               mlx5dr_err(dmn, "Failed to query uplink vport caps (err: %d)", ret);
-               goto free_vports_caps_xa;
-       }
+       dr_domain_query_uplink(dmn);
 
        return 0;
 
index 75c775bee351056305d882f1d851bf107047533f..793365242e852c0c58122d8fe7a3298775f48e7c 100644 (file)
@@ -924,11 +924,12 @@ static int dr_matcher_init(struct mlx5dr_matcher *matcher,
 
        /* Check that all mask data was consumed */
        for (i = 0; i < consumed_mask.match_sz; i++) {
-               if (consumed_mask.match_buf[i]) {
-                       mlx5dr_dbg(dmn, "Match param mask contains unsupported parameters\n");
-                       ret = -EOPNOTSUPP;
-                       goto free_consumed_mask;
-               }
+               if (!((u8 *)consumed_mask.match_buf)[i])
+                       continue;
+
+               mlx5dr_dbg(dmn, "Match param mask contains unsupported parameters\n");
+               ret = -EOPNOTSUPP;
+               goto free_consumed_mask;
        }
 
        ret =  0;
index 3028b776da00cec586efeae3874268a562ce4fa1..2333c2439c2874c17284b8e5a502237c5759c5dc 100644 (file)
@@ -764,6 +764,7 @@ struct mlx5dr_roce_cap {
 
 struct mlx5dr_vports {
        struct mlx5dr_cmd_vport_cap esw_manager_caps;
+       struct mlx5dr_cmd_vport_cap uplink_caps;
        struct xarray vports_caps_xa;
 };
 
index 5925db386b1ba5bdfae89c23f0d03b88046c2e88..03e5bad4e405fedfc06c1fe9a87a3f06a98f76c8 100644 (file)
@@ -2153,7 +2153,7 @@ static void mlxsw_sp_pude_event_func(const struct mlxsw_reg_info *reg,
        max_ports = mlxsw_core_max_ports(mlxsw_sp->core);
        local_port = mlxsw_reg_pude_local_port_get(pude_pl);
 
-       if (WARN_ON_ONCE(local_port >= max_ports))
+       if (WARN_ON_ONCE(!local_port || local_port >= max_ports))
                return;
        mlxsw_sp_port = mlxsw_sp->ports[local_port];
        if (!mlxsw_sp_port)
@@ -3290,10 +3290,10 @@ mlxsw_sp_resources_rif_mac_profile_register(struct mlxsw_core *mlxsw_core)
        u8 max_rif_mac_profiles;
 
        if (!MLXSW_CORE_RES_VALID(mlxsw_core, MAX_RIF_MAC_PROFILES))
-               return -EIO;
-
-       max_rif_mac_profiles = MLXSW_CORE_RES_GET(mlxsw_core,
-                                                 MAX_RIF_MAC_PROFILES);
+               max_rif_mac_profiles = 1;
+       else
+               max_rif_mac_profiles = MLXSW_CORE_RES_GET(mlxsw_core,
+                                                         MAX_RIF_MAC_PROFILES);
        devlink_resource_size_params_init(&size_params, max_rif_mac_profiles,
                                          max_rif_mac_profiles, 1,
                                          DEVLINK_RESOURCE_UNIT_ENTRY);
index 4fc97823bc84fb00bc3d6857f38c396653a57965..7d7647481f70e99d1ca7b6d35109d3b49923013e 100644 (file)
@@ -914,8 +914,7 @@ static int lan743x_phy_reset(struct lan743x_adapter *adapter)
 }
 
 static void lan743x_phy_update_flowcontrol(struct lan743x_adapter *adapter,
-                                          u8 duplex, u16 local_adv,
-                                          u16 remote_adv)
+                                          u16 local_adv, u16 remote_adv)
 {
        struct lan743x_phy *phy = &adapter->phy;
        u8 cap;
@@ -943,7 +942,6 @@ static void lan743x_phy_link_status_change(struct net_device *netdev)
 
        phy_print_status(phydev);
        if (phydev->state == PHY_RUNNING) {
-               struct ethtool_link_ksettings ksettings;
                int remote_advertisement = 0;
                int local_advertisement = 0;
 
@@ -980,18 +978,14 @@ static void lan743x_phy_link_status_change(struct net_device *netdev)
                }
                lan743x_csr_write(adapter, MAC_CR, data);
 
-               memset(&ksettings, 0, sizeof(ksettings));
-               phy_ethtool_get_link_ksettings(netdev, &ksettings);
                local_advertisement =
                        linkmode_adv_to_mii_adv_t(phydev->advertising);
                remote_advertisement =
                        linkmode_adv_to_mii_adv_t(phydev->lp_advertising);
 
-               lan743x_phy_update_flowcontrol(adapter,
-                                              ksettings.base.duplex,
-                                              local_advertisement,
+               lan743x_phy_update_flowcontrol(adapter, local_advertisement,
                                               remote_advertisement);
-               lan743x_ptp_update_latency(adapter, ksettings.base.speed);
+               lan743x_ptp_update_latency(adapter, phydev->speed);
        }
 }
 
index 34b971ff8ef8ba79c99b2b64eb49fba56a5552bf..078d6a5a0768876cda1f3762def4fa0fdc44b3ca 100644 (file)
@@ -480,16 +480,16 @@ static int mana_hwc_create_wq(struct hw_channel_context *hwc,
        if (err)
                goto out;
 
-       err = mana_hwc_alloc_dma_buf(hwc, q_depth, max_msg_size,
-                                    &hwc_wq->msg_buf);
-       if (err)
-               goto out;
-
        hwc_wq->hwc = hwc;
        hwc_wq->gdma_wq = queue;
        hwc_wq->queue_depth = q_depth;
        hwc_wq->hwc_cq = hwc_cq;
 
+       err = mana_hwc_alloc_dma_buf(hwc, q_depth, max_msg_size,
+                                    &hwc_wq->msg_buf);
+       if (err)
+               goto out;
+
        *hwc_wq_ptr = hwc_wq;
        return 0;
 out:
index e6c18b598d5c5e531b6ce5cfd27a5976bc34a534..1e4ad953cffbc5fc066954c75f6b2bf9469142e9 100644 (file)
@@ -1278,6 +1278,225 @@ int ocelot_fdb_dump(struct ocelot *ocelot, int port,
 }
 EXPORT_SYMBOL(ocelot_fdb_dump);
 
+static void ocelot_populate_l2_ptp_trap_key(struct ocelot_vcap_filter *trap)
+{
+       trap->key_type = OCELOT_VCAP_KEY_ETYPE;
+       *(__be16 *)trap->key.etype.etype.value = htons(ETH_P_1588);
+       *(__be16 *)trap->key.etype.etype.mask = htons(0xffff);
+}
+
+static void
+ocelot_populate_ipv4_ptp_event_trap_key(struct ocelot_vcap_filter *trap)
+{
+       trap->key_type = OCELOT_VCAP_KEY_IPV4;
+       trap->key.ipv4.dport.value = PTP_EV_PORT;
+       trap->key.ipv4.dport.mask = 0xffff;
+}
+
+static void
+ocelot_populate_ipv6_ptp_event_trap_key(struct ocelot_vcap_filter *trap)
+{
+       trap->key_type = OCELOT_VCAP_KEY_IPV6;
+       trap->key.ipv6.dport.value = PTP_EV_PORT;
+       trap->key.ipv6.dport.mask = 0xffff;
+}
+
+static void
+ocelot_populate_ipv4_ptp_general_trap_key(struct ocelot_vcap_filter *trap)
+{
+       trap->key_type = OCELOT_VCAP_KEY_IPV4;
+       trap->key.ipv4.dport.value = PTP_GEN_PORT;
+       trap->key.ipv4.dport.mask = 0xffff;
+}
+
+static void
+ocelot_populate_ipv6_ptp_general_trap_key(struct ocelot_vcap_filter *trap)
+{
+       trap->key_type = OCELOT_VCAP_KEY_IPV6;
+       trap->key.ipv6.dport.value = PTP_GEN_PORT;
+       trap->key.ipv6.dport.mask = 0xffff;
+}
+
+static int ocelot_trap_add(struct ocelot *ocelot, int port,
+                          unsigned long cookie,
+                          void (*populate)(struct ocelot_vcap_filter *f))
+{
+       struct ocelot_vcap_block *block_vcap_is2;
+       struct ocelot_vcap_filter *trap;
+       bool new = false;
+       int err;
+
+       block_vcap_is2 = &ocelot->block[VCAP_IS2];
+
+       trap = ocelot_vcap_block_find_filter_by_id(block_vcap_is2, cookie,
+                                                  false);
+       if (!trap) {
+               trap = kzalloc(sizeof(*trap), GFP_KERNEL);
+               if (!trap)
+                       return -ENOMEM;
+
+               populate(trap);
+               trap->prio = 1;
+               trap->id.cookie = cookie;
+               trap->id.tc_offload = false;
+               trap->block_id = VCAP_IS2;
+               trap->type = OCELOT_VCAP_FILTER_OFFLOAD;
+               trap->lookup = 0;
+               trap->action.cpu_copy_ena = true;
+               trap->action.mask_mode = OCELOT_MASK_MODE_PERMIT_DENY;
+               trap->action.port_mask = 0;
+               new = true;
+       }
+
+       trap->ingress_port_mask |= BIT(port);
+
+       if (new)
+               err = ocelot_vcap_filter_add(ocelot, trap, NULL);
+       else
+               err = ocelot_vcap_filter_replace(ocelot, trap);
+       if (err) {
+               trap->ingress_port_mask &= ~BIT(port);
+               if (!trap->ingress_port_mask)
+                       kfree(trap);
+               return err;
+       }
+
+       return 0;
+}
+
+static int ocelot_trap_del(struct ocelot *ocelot, int port,
+                          unsigned long cookie)
+{
+       struct ocelot_vcap_block *block_vcap_is2;
+       struct ocelot_vcap_filter *trap;
+
+       block_vcap_is2 = &ocelot->block[VCAP_IS2];
+
+       trap = ocelot_vcap_block_find_filter_by_id(block_vcap_is2, cookie,
+                                                  false);
+       if (!trap)
+               return 0;
+
+       trap->ingress_port_mask &= ~BIT(port);
+       if (!trap->ingress_port_mask)
+               return ocelot_vcap_filter_del(ocelot, trap);
+
+       return ocelot_vcap_filter_replace(ocelot, trap);
+}
+
+static int ocelot_l2_ptp_trap_add(struct ocelot *ocelot, int port)
+{
+       unsigned long l2_cookie = ocelot->num_phys_ports + 1;
+
+       return ocelot_trap_add(ocelot, port, l2_cookie,
+                              ocelot_populate_l2_ptp_trap_key);
+}
+
+static int ocelot_l2_ptp_trap_del(struct ocelot *ocelot, int port)
+{
+       unsigned long l2_cookie = ocelot->num_phys_ports + 1;
+
+       return ocelot_trap_del(ocelot, port, l2_cookie);
+}
+
+static int ocelot_ipv4_ptp_trap_add(struct ocelot *ocelot, int port)
+{
+       unsigned long ipv4_gen_cookie = ocelot->num_phys_ports + 2;
+       unsigned long ipv4_ev_cookie = ocelot->num_phys_ports + 3;
+       int err;
+
+       err = ocelot_trap_add(ocelot, port, ipv4_ev_cookie,
+                             ocelot_populate_ipv4_ptp_event_trap_key);
+       if (err)
+               return err;
+
+       err = ocelot_trap_add(ocelot, port, ipv4_gen_cookie,
+                             ocelot_populate_ipv4_ptp_general_trap_key);
+       if (err)
+               ocelot_trap_del(ocelot, port, ipv4_ev_cookie);
+
+       return err;
+}
+
+static int ocelot_ipv4_ptp_trap_del(struct ocelot *ocelot, int port)
+{
+       unsigned long ipv4_gen_cookie = ocelot->num_phys_ports + 2;
+       unsigned long ipv4_ev_cookie = ocelot->num_phys_ports + 3;
+       int err;
+
+       err = ocelot_trap_del(ocelot, port, ipv4_ev_cookie);
+       err |= ocelot_trap_del(ocelot, port, ipv4_gen_cookie);
+       return err;
+}
+
+static int ocelot_ipv6_ptp_trap_add(struct ocelot *ocelot, int port)
+{
+       unsigned long ipv6_gen_cookie = ocelot->num_phys_ports + 4;
+       unsigned long ipv6_ev_cookie = ocelot->num_phys_ports + 5;
+       int err;
+
+       err = ocelot_trap_add(ocelot, port, ipv6_ev_cookie,
+                             ocelot_populate_ipv6_ptp_event_trap_key);
+       if (err)
+               return err;
+
+       err = ocelot_trap_add(ocelot, port, ipv6_gen_cookie,
+                             ocelot_populate_ipv6_ptp_general_trap_key);
+       if (err)
+               ocelot_trap_del(ocelot, port, ipv6_ev_cookie);
+
+       return err;
+}
+
+static int ocelot_ipv6_ptp_trap_del(struct ocelot *ocelot, int port)
+{
+       unsigned long ipv6_gen_cookie = ocelot->num_phys_ports + 4;
+       unsigned long ipv6_ev_cookie = ocelot->num_phys_ports + 5;
+       int err;
+
+       err = ocelot_trap_del(ocelot, port, ipv6_ev_cookie);
+       err |= ocelot_trap_del(ocelot, port, ipv6_gen_cookie);
+       return err;
+}
+
+static int ocelot_setup_ptp_traps(struct ocelot *ocelot, int port,
+                                 bool l2, bool l4)
+{
+       int err;
+
+       if (l2)
+               err = ocelot_l2_ptp_trap_add(ocelot, port);
+       else
+               err = ocelot_l2_ptp_trap_del(ocelot, port);
+       if (err)
+               return err;
+
+       if (l4) {
+               err = ocelot_ipv4_ptp_trap_add(ocelot, port);
+               if (err)
+                       goto err_ipv4;
+
+               err = ocelot_ipv6_ptp_trap_add(ocelot, port);
+               if (err)
+                       goto err_ipv6;
+       } else {
+               err = ocelot_ipv4_ptp_trap_del(ocelot, port);
+
+               err |= ocelot_ipv6_ptp_trap_del(ocelot, port);
+       }
+       if (err)
+               return err;
+
+       return 0;
+
+err_ipv6:
+       ocelot_ipv4_ptp_trap_del(ocelot, port);
+err_ipv4:
+       if (l2)
+               ocelot_l2_ptp_trap_del(ocelot, port);
+       return err;
+}
+
 int ocelot_hwstamp_get(struct ocelot *ocelot, int port, struct ifreq *ifr)
 {
        return copy_to_user(ifr->ifr_data, &ocelot->hwtstamp_config,
@@ -1288,7 +1507,9 @@ EXPORT_SYMBOL(ocelot_hwstamp_get);
 int ocelot_hwstamp_set(struct ocelot *ocelot, int port, struct ifreq *ifr)
 {
        struct ocelot_port *ocelot_port = ocelot->ports[port];
+       bool l2 = false, l4 = false;
        struct hwtstamp_config cfg;
+       int err;
 
        if (copy_from_user(&cfg, ifr->ifr_data, sizeof(cfg)))
                return -EFAULT;
@@ -1320,28 +1541,42 @@ int ocelot_hwstamp_set(struct ocelot *ocelot, int port, struct ifreq *ifr)
        switch (cfg.rx_filter) {
        case HWTSTAMP_FILTER_NONE:
                break;
-       case HWTSTAMP_FILTER_ALL:
-       case HWTSTAMP_FILTER_SOME:
-       case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
-       case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
-       case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
-       case HWTSTAMP_FILTER_NTP_ALL:
        case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
        case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
        case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
+               l4 = true;
+               break;
        case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
        case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
        case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
+               l2 = true;
+               break;
        case HWTSTAMP_FILTER_PTP_V2_EVENT:
        case HWTSTAMP_FILTER_PTP_V2_SYNC:
        case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
-               cfg.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
+               l2 = true;
+               l4 = true;
                break;
        default:
                mutex_unlock(&ocelot->ptp_lock);
                return -ERANGE;
        }
 
+       err = ocelot_setup_ptp_traps(ocelot, port, l2, l4);
+       if (err) {
+               mutex_unlock(&ocelot->ptp_lock);
+               return err;
+       }
+
+       if (l2 && l4)
+               cfg.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
+       else if (l2)
+               cfg.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT;
+       else if (l4)
+               cfg.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
+       else
+               cfg.rx_filter = HWTSTAMP_FILTER_NONE;
+
        /* Commit back the result & save it */
        memcpy(&ocelot->hwtstamp_config, &cfg, sizeof(cfg));
        mutex_unlock(&ocelot->ptp_lock);
@@ -1444,7 +1679,10 @@ int ocelot_get_ts_info(struct ocelot *ocelot, int port,
                                 SOF_TIMESTAMPING_RAW_HARDWARE;
        info->tx_types = BIT(HWTSTAMP_TX_OFF) | BIT(HWTSTAMP_TX_ON) |
                         BIT(HWTSTAMP_TX_ONESTEP_SYNC);
-       info->rx_filters = BIT(HWTSTAMP_FILTER_NONE) | BIT(HWTSTAMP_FILTER_ALL);
+       info->rx_filters = BIT(HWTSTAMP_FILTER_NONE) |
+                          BIT(HWTSTAMP_FILTER_PTP_V2_EVENT) |
+                          BIT(HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
+                          BIT(HWTSTAMP_FILTER_PTP_V2_L4_EVENT);
 
        return 0;
 }
index 99d7376a70a748d50d123ace7797a722b5e7f372..337cd08b1a543812b9aa691bed9022d91cd1c2cf 100644 (file)
@@ -1217,6 +1217,22 @@ int ocelot_vcap_filter_del(struct ocelot *ocelot,
 }
 EXPORT_SYMBOL(ocelot_vcap_filter_del);
 
+int ocelot_vcap_filter_replace(struct ocelot *ocelot,
+                              struct ocelot_vcap_filter *filter)
+{
+       struct ocelot_vcap_block *block = &ocelot->block[filter->block_id];
+       int index;
+
+       index = ocelot_vcap_block_get_filter_index(block, filter);
+       if (index < 0)
+               return index;
+
+       vcap_entry_set(ocelot, index, filter);
+
+       return 0;
+}
+EXPORT_SYMBOL(ocelot_vcap_filter_replace);
+
 int ocelot_vcap_filter_stats_update(struct ocelot *ocelot,
                                    struct ocelot_vcap_filter *filter)
 {
index ca4686094701c60eff5a9991990575257f1310ca..0a02d8bd0a3e57ed996c798a5e4fa7875f422338 100644 (file)
@@ -120,7 +120,7 @@ static const struct net_device_ops xtsonic_netdev_ops = {
        .ndo_set_mac_address    = eth_mac_addr,
 };
 
-static int __init sonic_probe1(struct net_device *dev)
+static int sonic_probe1(struct net_device *dev)
 {
        unsigned int silicon_revision;
        struct sonic_local *lp = netdev_priv(dev);
index df203738511bfd68a89880b020968decb05ba3a8..0b1865e9f0b596730c3baca84cccd455f7d04110 100644 (file)
@@ -565,7 +565,6 @@ struct nfp_net_dp {
  * @exn_name:           Name for Exception interrupt
  * @shared_handler:     Handler for shared interrupts
  * @shared_name:        Name for shared interrupt
- * @me_freq_mhz:        ME clock_freq (MHz)
  * @reconfig_lock:     Protects @reconfig_posted, @reconfig_timer_active,
  *                     @reconfig_sync_present and HW reconfiguration request
  *                     regs/machinery from async requests (sync must take
@@ -650,8 +649,6 @@ struct nfp_net {
        irq_handler_t shared_handler;
        char shared_name[IFNAMSIZ + 8];
 
-       u32 me_freq_mhz;
-
        bool link_up;
        spinlock_t link_status_lock;
 
index 1de076f557405e9e48df97eb5edf39acc363efb5..cf7882933993495a6ed5f25a91c060cd28d644ea 100644 (file)
@@ -1344,7 +1344,7 @@ static int nfp_net_set_coalesce(struct net_device *netdev,
         * ME timestamp ticks.  There are 16 ME clock cycles for each timestamp
         * count.
         */
-       factor = nn->me_freq_mhz / 16;
+       factor = nn->tlv_caps.me_freq_mhz / 16;
 
        /* Each pair of (usecs, max_frames) fields specifies that interrupts
         * should be coalesced until
index d7ac0307797fd8f2fc60b6f1b6b4873e80688091..34c0d2ddf9ef6aad21755a7a453a05cdcf2a58d6 100644 (file)
@@ -803,8 +803,10 @@ int nfp_cpp_area_cache_add(struct nfp_cpp *cpp, size_t size)
                return -ENOMEM;
 
        cache = kzalloc(sizeof(*cache), GFP_KERNEL);
-       if (!cache)
+       if (!cache) {
+               nfp_cpp_area_free(area);
                return -ENOMEM;
+       }
 
        cache->id = 0;
        cache->addr = 0;
index cfeb7620ae20b645e433f6f63665bf90de0e55c0..07a00dd9cfe0a6da6f3b036c576298ab2949fcd4 100644 (file)
@@ -1209,7 +1209,7 @@ static void *nixge_get_nvmem_address(struct device *dev)
 
        cell = nvmem_cell_get(dev, "address");
        if (IS_ERR(cell))
-               return NULL;
+               return cell;
 
        mac = nvmem_cell_read(cell, &cell_size);
        nvmem_cell_put(cell);
@@ -1282,7 +1282,7 @@ static int nixge_probe(struct platform_device *pdev)
        ndev->max_mtu = NIXGE_JUMBO_MTU;
 
        mac_addr = nixge_get_nvmem_address(&pdev->dev);
-       if (mac_addr && is_valid_ether_addr(mac_addr)) {
+       if (!IS_ERR(mac_addr) && is_valid_ether_addr(mac_addr)) {
                eth_hw_addr_set(ndev, mac_addr);
                kfree(mac_addr);
        } else {
index a97f691839e04f10a14e564cb667bb541904f27c..6958adeca86d0e6c413115be436e1f0720307b1c 100644 (file)
@@ -1045,7 +1045,7 @@ static int qed_int_deassertion(struct qed_hwfn  *p_hwfn,
                if (!parities)
                        continue;
 
-               for (j = 0, bit_idx = 0; bit_idx < 32; j++) {
+               for (j = 0, bit_idx = 0; bit_idx < 32 && j < 32; j++) {
                        struct aeu_invert_reg_bit *p_bit = &p_aeu->bits[j];
 
                        if (qed_int_is_parity_flag(p_hwfn, p_bit) &&
@@ -1083,7 +1083,7 @@ static int qed_int_deassertion(struct qed_hwfn  *p_hwfn,
                         * to current group, making them responsible for the
                         * previous assertion.
                         */
-                       for (j = 0, bit_idx = 0; bit_idx < 32; j++) {
+                       for (j = 0, bit_idx = 0; bit_idx < 32 && j < 32; j++) {
                                long unsigned int bitmask;
                                u8 bit, bit_len;
 
@@ -1382,7 +1382,7 @@ static void qed_int_sb_attn_init(struct qed_hwfn *p_hwfn,
        memset(sb_info->parity_mask, 0, sizeof(u32) * NUM_ATTN_REGS);
        for (i = 0; i < NUM_ATTN_REGS; i++) {
                /* j is array index, k is bit index */
-               for (j = 0, k = 0; k < 32; j++) {
+               for (j = 0, k = 0; k < 32 && j < 32; j++) {
                        struct aeu_invert_reg_bit *p_aeu;
 
                        p_aeu = &aeu_descs[i].bits[j];
index 065e9004598ee8f37e4669f48b5239a39426d62b..999abcfe3310a81a51554ba01cf7ff7b5aaf7077 100644 (file)
@@ -1643,6 +1643,13 @@ netdev_tx_t qede_start_xmit(struct sk_buff *skb, struct net_device *ndev)
                        data_split = true;
                }
        } else {
+               if (unlikely(skb->len > ETH_TX_MAX_NON_LSO_PKT_LEN)) {
+                       DP_ERR(edev, "Unexpected non LSO skb length = 0x%x\n", skb->len);
+                       qede_free_failed_tx_pkt(txq, first_bd, 0, false);
+                       qede_update_tx_producer(txq);
+                       return NETDEV_TX_OK;
+               }
+
                val |= ((skb->len & ETH_TX_DATA_1ST_BD_PKT_LEN_MASK) <<
                         ETH_TX_DATA_1ST_BD_PKT_LEN_SHIFT);
        }
index 1e6d72adfe43994064aaadddc26e56b2fe20c799..71523d747e93f3ae109cc1b27319c567e6008d3c 100644 (file)
@@ -3480,20 +3480,19 @@ static int ql_adapter_up(struct ql3_adapter *qdev)
 
        spin_lock_irqsave(&qdev->hw_lock, hw_flags);
 
-       err = ql_wait_for_drvr_lock(qdev);
-       if (err) {
-               err = ql_adapter_initialize(qdev);
-               if (err) {
-                       netdev_err(ndev, "Unable to initialize adapter\n");
-                       goto err_init;
-               }
-               netdev_err(ndev, "Releasing driver lock\n");
-               ql_sem_unlock(qdev, QL_DRVR_SEM_MASK);
-       } else {
+       if (!ql_wait_for_drvr_lock(qdev)) {
                netdev_err(ndev, "Could not acquire driver lock\n");
+               err = -ENODEV;
                goto err_lock;
        }
 
+       err = ql_adapter_initialize(qdev);
+       if (err) {
+               netdev_err(ndev, "Unable to initialize adapter\n");
+               goto err_init;
+       }
+       ql_sem_unlock(qdev, QL_DRVR_SEM_MASK);
+
        spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
 
        set_bit(QL_ADAPTER_UP, &qdev->flags);
index d51bac7ba5afadca6df37a8761838432fba08b1f..bd06076803295fb5a6a0946db8be6bf4ad901076 100644 (file)
@@ -1077,8 +1077,14 @@ static int qlcnic_83xx_add_rings(struct qlcnic_adapter *adapter)
        sds_mbx_size = sizeof(struct qlcnic_sds_mbx);
        context_id = recv_ctx->context_id;
        num_sds = adapter->drv_sds_rings - QLCNIC_MAX_SDS_RINGS;
-       ahw->hw_ops->alloc_mbx_args(&cmd, adapter,
-                                   QLCNIC_CMD_ADD_RCV_RINGS);
+       err = ahw->hw_ops->alloc_mbx_args(&cmd, adapter,
+                                       QLCNIC_CMD_ADD_RCV_RINGS);
+       if (err) {
+               dev_err(&adapter->pdev->dev,
+                       "Failed to alloc mbx args %d\n", err);
+               return err;
+       }
+
        cmd.req.arg[1] = 0 | (num_sds << 8) | (context_id << 16);
 
        /* set up status rings, mbx 2-81 */
index bbe21db204172d3e92dbf25b5eef4648af4387e5..86c44bc5f73f8bfef4e88062d01be536f837db29 100644 (file)
@@ -5217,8 +5217,8 @@ static int rtl_get_ether_clk(struct rtl8169_private *tp)
 
 static void rtl_init_mac_address(struct rtl8169_private *tp)
 {
+       u8 mac_addr[ETH_ALEN] __aligned(2) = {};
        struct net_device *dev = tp->dev;
-       u8 mac_addr[ETH_ALEN];
        int rc;
 
        rc = eth_platform_get_mac_address(tp_to_dev(tp), mac_addr);
@@ -5233,7 +5233,8 @@ static void rtl_init_mac_address(struct rtl8169_private *tp)
        if (is_valid_ether_addr(mac_addr))
                goto done;
 
-       eth_hw_addr_random(dev);
+       eth_random_addr(mac_addr);
+       dev->addr_assign_type = NET_ADDR_RANDOM;
        dev_warn(tp_to_dev(tp), "can't read MAC address, setting random one\n");
 done:
        eth_hw_addr_set(dev, mac_addr);
index cc2d907c4c4bc90fe5095492adb7a86adc7b11e3..23a336c5096ed7c2f553a93bb7ac4874f4a34b89 100644 (file)
@@ -392,7 +392,7 @@ static int sis96x_get_mac_addr(struct pci_dev *pci_dev,
                        /* get MAC address from EEPROM */
                        for (i = 0; i < 3; i++)
                                addr[i] = read_eeprom(ioaddr, i + EEPROMMACAddr);
-                        eth_hw_addr_set(net_dev, (u8 *)addr);
+                       eth_hw_addr_set(net_dev, (u8 *)addr);
 
                        rc = 1;
                        break;
index 85208128f135ca48fb504a812483d69bd095a386..b7c2579c963b68a72190a9b43f60dd852614858b 100644 (file)
@@ -485,8 +485,28 @@ static int socfpga_dwmac_resume(struct device *dev)
 }
 #endif /* CONFIG_PM_SLEEP */
 
-static SIMPLE_DEV_PM_OPS(socfpga_dwmac_pm_ops, stmmac_suspend,
-                                              socfpga_dwmac_resume);
+static int __maybe_unused socfpga_dwmac_runtime_suspend(struct device *dev)
+{
+       struct net_device *ndev = dev_get_drvdata(dev);
+       struct stmmac_priv *priv = netdev_priv(ndev);
+
+       stmmac_bus_clks_config(priv, false);
+
+       return 0;
+}
+
+static int __maybe_unused socfpga_dwmac_runtime_resume(struct device *dev)
+{
+       struct net_device *ndev = dev_get_drvdata(dev);
+       struct stmmac_priv *priv = netdev_priv(ndev);
+
+       return stmmac_bus_clks_config(priv, true);
+}
+
+static const struct dev_pm_ops socfpga_dwmac_pm_ops = {
+       SET_SYSTEM_SLEEP_PM_OPS(stmmac_suspend, socfpga_dwmac_resume)
+       SET_RUNTIME_PM_OPS(socfpga_dwmac_runtime_suspend, socfpga_dwmac_runtime_resume, NULL)
+};
 
 static const struct socfpga_dwmac_ops socfpga_gen5_ops = {
        .set_phy_mode = socfpga_gen5_set_phy_mode,
index 43eead726886a0a13a1c749a300a353f9e7e9ebd..5f129733aabd2e914a84a89b72e90bd3a41caaa2 100644 (file)
@@ -314,6 +314,7 @@ int stmmac_mdio_reset(struct mii_bus *mii);
 int stmmac_xpcs_setup(struct mii_bus *mii);
 void stmmac_set_ethtool_ops(struct net_device *netdev);
 
+int stmmac_init_tstamp_counter(struct stmmac_priv *priv, u32 systime_flags);
 void stmmac_ptp_register(struct stmmac_priv *priv);
 void stmmac_ptp_unregister(struct stmmac_priv *priv);
 int stmmac_open(struct net_device *dev);
index d3f350c25b9b64a5bbc8837abac18db7d04548f8..da8306f6073027ac99d57423d1cc50bf426ae233 100644 (file)
 #include "dwxgmac2.h"
 #include "hwif.h"
 
+/* As long as the interface is active, we keep the timestamping counter enabled
+ * with fine resolution and binary rollover. This avoid non-monotonic behavior
+ * (clock jumps) when changing timestamping settings at runtime.
+ */
+#define STMMAC_HWTS_ACTIVE     (PTP_TCR_TSENA | PTP_TCR_TSCFUPDT | \
+                                PTP_TCR_TSCTRLSSR)
+
 #define        STMMAC_ALIGN(x)         ALIGN(ALIGN(x, SMP_CACHE_BYTES), 16)
 #define        TSO_MAX_BUFF_SIZE       (SZ_16K - 1)
 
@@ -511,6 +518,14 @@ bool stmmac_eee_init(struct stmmac_priv *priv)
        return true;
 }
 
+static inline u32 stmmac_cdc_adjust(struct stmmac_priv *priv)
+{
+       /* Correct the clk domain crossing(CDC) error */
+       if (priv->plat->has_gmac4 && priv->plat->clk_ptp_rate)
+               return (2 * NSEC_PER_SEC) / priv->plat->clk_ptp_rate;
+       return 0;
+}
+
 /* stmmac_get_tx_hwtstamp - get HW TX timestamps
  * @priv: driver private structure
  * @p : descriptor pointer
@@ -524,7 +539,6 @@ static void stmmac_get_tx_hwtstamp(struct stmmac_priv *priv,
 {
        struct skb_shared_hwtstamps shhwtstamp;
        bool found = false;
-       s64 adjust = 0;
        u64 ns = 0;
 
        if (!priv->hwts_tx_en)
@@ -543,12 +557,7 @@ static void stmmac_get_tx_hwtstamp(struct stmmac_priv *priv,
        }
 
        if (found) {
-               /* Correct the clk domain crossing(CDC) error */
-               if (priv->plat->has_gmac4 && priv->plat->clk_ptp_rate) {
-                       adjust += -(2 * (NSEC_PER_SEC /
-                                        priv->plat->clk_ptp_rate));
-                       ns += adjust;
-               }
+               ns -= stmmac_cdc_adjust(priv);
 
                memset(&shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
                shhwtstamp.hwtstamp = ns_to_ktime(ns);
@@ -573,7 +582,6 @@ static void stmmac_get_rx_hwtstamp(struct stmmac_priv *priv, struct dma_desc *p,
 {
        struct skb_shared_hwtstamps *shhwtstamp = NULL;
        struct dma_desc *desc = p;
-       u64 adjust = 0;
        u64 ns = 0;
 
        if (!priv->hwts_rx_en)
@@ -586,11 +594,7 @@ static void stmmac_get_rx_hwtstamp(struct stmmac_priv *priv, struct dma_desc *p,
        if (stmmac_get_rx_timestamp_status(priv, p, np, priv->adv_ts)) {
                stmmac_get_timestamp(priv, desc, priv->adv_ts, &ns);
 
-               /* Correct the clk domain crossing(CDC) error */
-               if (priv->plat->has_gmac4 && priv->plat->clk_ptp_rate) {
-                       adjust += 2 * (NSEC_PER_SEC / priv->plat->clk_ptp_rate);
-                       ns -= adjust;
-               }
+               ns -= stmmac_cdc_adjust(priv);
 
                netdev_dbg(priv->dev, "get valid RX hw timestamp %llu\n", ns);
                shhwtstamp = skb_hwtstamps(skb);
@@ -616,8 +620,6 @@ static int stmmac_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
 {
        struct stmmac_priv *priv = netdev_priv(dev);
        struct hwtstamp_config config;
-       struct timespec64 now;
-       u64 temp = 0;
        u32 ptp_v2 = 0;
        u32 tstamp_all = 0;
        u32 ptp_over_ipv4_udp = 0;
@@ -626,11 +628,6 @@ static int stmmac_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
        u32 snap_type_sel = 0;
        u32 ts_master_en = 0;
        u32 ts_event_en = 0;
-       u32 sec_inc = 0;
-       u32 value = 0;
-       bool xmac;
-
-       xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
 
        if (!(priv->dma_cap.time_stamp || priv->adv_ts)) {
                netdev_alert(priv->dev, "No support for HW time stamping\n");
@@ -792,42 +789,17 @@ static int stmmac_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
        priv->hwts_rx_en = ((config.rx_filter == HWTSTAMP_FILTER_NONE) ? 0 : 1);
        priv->hwts_tx_en = config.tx_type == HWTSTAMP_TX_ON;
 
-       if (!priv->hwts_tx_en && !priv->hwts_rx_en)
-               stmmac_config_hw_tstamping(priv, priv->ptpaddr, 0);
-       else {
-               value = (PTP_TCR_TSENA | PTP_TCR_TSCFUPDT | PTP_TCR_TSCTRLSSR |
-                        tstamp_all | ptp_v2 | ptp_over_ethernet |
-                        ptp_over_ipv6_udp | ptp_over_ipv4_udp | ts_event_en |
-                        ts_master_en | snap_type_sel);
-               stmmac_config_hw_tstamping(priv, priv->ptpaddr, value);
-
-               /* program Sub Second Increment reg */
-               stmmac_config_sub_second_increment(priv,
-                               priv->ptpaddr, priv->plat->clk_ptp_rate,
-                               xmac, &sec_inc);
-               temp = div_u64(1000000000ULL, sec_inc);
-
-               /* Store sub second increment and flags for later use */
-               priv->sub_second_inc = sec_inc;
-               priv->systime_flags = value;
-
-               /* calculate default added value:
-                * formula is :
-                * addend = (2^32)/freq_div_ratio;
-                * where, freq_div_ratio = 1e9ns/sec_inc
-                */
-               temp = (u64)(temp << 32);
-               priv->default_addend = div_u64(temp, priv->plat->clk_ptp_rate);
-               stmmac_config_addend(priv, priv->ptpaddr, priv->default_addend);
-
-               /* initialize system time */
-               ktime_get_real_ts64(&now);
+       priv->systime_flags = STMMAC_HWTS_ACTIVE;
 
-               /* lower 32 bits of tv_sec are safe until y2106 */
-               stmmac_init_systime(priv, priv->ptpaddr,
-                               (u32)now.tv_sec, now.tv_nsec);
+       if (priv->hwts_tx_en || priv->hwts_rx_en) {
+               priv->systime_flags |= tstamp_all | ptp_v2 |
+                                      ptp_over_ethernet | ptp_over_ipv6_udp |
+                                      ptp_over_ipv4_udp | ts_event_en |
+                                      ts_master_en | snap_type_sel;
        }
 
+       stmmac_config_hw_tstamping(priv, priv->ptpaddr, priv->systime_flags);
+
        memcpy(&priv->tstamp_config, &config, sizeof(config));
 
        return copy_to_user(ifr->ifr_data, &config,
@@ -855,6 +827,66 @@ static int stmmac_hwtstamp_get(struct net_device *dev, struct ifreq *ifr)
                            sizeof(*config)) ? -EFAULT : 0;
 }
 
+/**
+ * stmmac_init_tstamp_counter - init hardware timestamping counter
+ * @priv: driver private structure
+ * @systime_flags: timestamping flags
+ * Description:
+ * Initialize hardware counter for packet timestamping.
+ * This is valid as long as the interface is open and not suspended.
+ * Will be rerun after resuming from suspend, case in which the timestamping
+ * flags updated by stmmac_hwtstamp_set() also need to be restored.
+ */
+int stmmac_init_tstamp_counter(struct stmmac_priv *priv, u32 systime_flags)
+{
+       bool xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
+       struct timespec64 now;
+       u32 sec_inc = 0;
+       u64 temp = 0;
+       int ret;
+
+       if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp))
+               return -EOPNOTSUPP;
+
+       ret = clk_prepare_enable(priv->plat->clk_ptp_ref);
+       if (ret < 0) {
+               netdev_warn(priv->dev,
+                           "failed to enable PTP reference clock: %pe\n",
+                           ERR_PTR(ret));
+               return ret;
+       }
+
+       stmmac_config_hw_tstamping(priv, priv->ptpaddr, systime_flags);
+       priv->systime_flags = systime_flags;
+
+       /* program Sub Second Increment reg */
+       stmmac_config_sub_second_increment(priv, priv->ptpaddr,
+                                          priv->plat->clk_ptp_rate,
+                                          xmac, &sec_inc);
+       temp = div_u64(1000000000ULL, sec_inc);
+
+       /* Store sub second increment for later use */
+       priv->sub_second_inc = sec_inc;
+
+       /* calculate default added value:
+        * formula is :
+        * addend = (2^32)/freq_div_ratio;
+        * where, freq_div_ratio = 1e9ns/sec_inc
+        */
+       temp = (u64)(temp << 32);
+       priv->default_addend = div_u64(temp, priv->plat->clk_ptp_rate);
+       stmmac_config_addend(priv, priv->ptpaddr, priv->default_addend);
+
+       /* initialize system time */
+       ktime_get_real_ts64(&now);
+
+       /* lower 32 bits of tv_sec are safe until y2106 */
+       stmmac_init_systime(priv, priv->ptpaddr, (u32)now.tv_sec, now.tv_nsec);
+
+       return 0;
+}
+EXPORT_SYMBOL_GPL(stmmac_init_tstamp_counter);
+
 /**
  * stmmac_init_ptp - init PTP
  * @priv: driver private structure
@@ -865,9 +897,11 @@ static int stmmac_hwtstamp_get(struct net_device *dev, struct ifreq *ifr)
 static int stmmac_init_ptp(struct stmmac_priv *priv)
 {
        bool xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
+       int ret;
 
-       if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp))
-               return -EOPNOTSUPP;
+       ret = stmmac_init_tstamp_counter(priv, STMMAC_HWTS_ACTIVE);
+       if (ret)
+               return ret;
 
        priv->adv_ts = 0;
        /* Check if adv_ts can be enabled for dwmac 4.x / xgmac core */
@@ -3275,10 +3309,6 @@ static int stmmac_hw_setup(struct net_device *dev, bool init_ptp)
        stmmac_mmc_setup(priv);
 
        if (init_ptp) {
-               ret = clk_prepare_enable(priv->plat->clk_ptp_ref);
-               if (ret < 0)
-                       netdev_warn(priv->dev, "failed to enable PTP reference clock: %d\n", ret);
-
                ret = stmmac_init_ptp(priv);
                if (ret == -EOPNOTSUPP)
                        netdev_warn(priv->dev, "PTP not supported by HW\n");
@@ -3772,6 +3802,8 @@ int stmmac_release(struct net_device *dev)
        struct stmmac_priv *priv = netdev_priv(dev);
        u32 chan;
 
+       netif_tx_disable(dev);
+
        if (device_may_wakeup(priv->device))
                phylink_speed_down(priv->phylink, false);
        /* Stop and disconnect the PHY */
@@ -5164,12 +5196,13 @@ read_again:
                if (likely(!(status & rx_not_ls)) &&
                    (likely(priv->synopsys_id >= DWMAC_CORE_4_00) ||
                     unlikely(status != llc_snap))) {
-                       if (buf2_len)
+                       if (buf2_len) {
                                buf2_len -= ETH_FCS_LEN;
-                       else
+                               len -= ETH_FCS_LEN;
+                       } else if (buf1_len) {
                                buf1_len -= ETH_FCS_LEN;
-
-                       len -= ETH_FCS_LEN;
+                               len -= ETH_FCS_LEN;
+                       }
                }
 
                if (!skb) {
@@ -5507,8 +5540,6 @@ static int stmmac_set_features(struct net_device *netdev,
                               netdev_features_t features)
 {
        struct stmmac_priv *priv = netdev_priv(netdev);
-       bool sph_en;
-       u32 chan;
 
        /* Keep the COE Type in case of csum is supporting */
        if (features & NETIF_F_RXCSUM)
@@ -5520,10 +5551,13 @@ static int stmmac_set_features(struct net_device *netdev,
         */
        stmmac_rx_ipc(priv, priv->hw);
 
-       sph_en = (priv->hw->rx_csum > 0) && priv->sph;
+       if (priv->sph_cap) {
+               bool sph_en = (priv->hw->rx_csum > 0) && priv->sph;
+               u32 chan;
 
-       for (chan = 0; chan < priv->plat->rx_queues_to_use; chan++)
-               stmmac_enable_sph(priv, priv->ioaddr, sph_en, chan);
+               for (chan = 0; chan < priv->plat->rx_queues_to_use; chan++)
+                       stmmac_enable_sph(priv, priv->ioaddr, sph_en, chan);
+       }
 
        return 0;
 }
index 232ac98943cd08a66a4bd65b53c2d100843d29f1..5d29f336315b79d87844af9f4c6fa3f71803eecd 100644 (file)
@@ -816,7 +816,7 @@ static int __maybe_unused stmmac_pltfr_noirq_resume(struct device *dev)
                if (ret)
                        return ret;
 
-               clk_prepare_enable(priv->plat->clk_ptp_ref);
+               stmmac_init_tstamp_counter(priv, priv->systime_flags);
        }
 
        return 0;
index bfdf89e54752c59b5b43f26ecccfac66f47cf907..8a19a06b505d125dcc88e7accd1765b4decb7530 100644 (file)
@@ -306,7 +306,6 @@ static void sp_setup(struct net_device *dev)
 {
        /* Finish setting up the DEVICE info. */
        dev->netdev_ops         = &sp_netdev_ops;
-       dev->needs_free_netdev  = true;
        dev->mtu                = SIXP_MTU;
        dev->hard_header_len    = AX25_MAX_HEADER_LEN;
        dev->header_ops         = &ax25_header_ops;
index e2b332b54f06d2babe198e51e03b02aed0980122..7da2bb8a443c02ba6680edb36a3102fcac95023c 100644 (file)
@@ -31,6 +31,8 @@
 
 #define AX_MTU         236
 
+/* some arch define END as assembly function ending, just undef it */
+#undef END
 /* SLIP/KISS protocol characters. */
 #define END             0300           /* indicates end of frame       */
 #define ESC             0333           /* indicates byte stuffing      */
index cff51731195aa4c12ff5420d99920c2c3322e881..d57472ea077f2d7ad1097608eaa91f0f4bbb4736 100644 (file)
@@ -661,22 +661,6 @@ void ipa_cmd_pipeline_clear_wait(struct ipa *ipa)
        wait_for_completion(&ipa->completion);
 }
 
-void ipa_cmd_pipeline_clear(struct ipa *ipa)
-{
-       u32 count = ipa_cmd_pipeline_clear_count();
-       struct gsi_trans *trans;
-
-       trans = ipa_cmd_trans_alloc(ipa, count);
-       if (trans) {
-               ipa_cmd_pipeline_clear_add(trans);
-               gsi_trans_commit_wait(trans);
-               ipa_cmd_pipeline_clear_wait(ipa);
-       } else {
-               dev_err(&ipa->pdev->dev,
-                       "error allocating %u entry tag transaction\n", count);
-       }
-}
-
 static struct ipa_cmd_info *
 ipa_cmd_info_alloc(struct ipa_endpoint *endpoint, u32 tre_count)
 {
index 69cd085d427dbf20d626dbbd4426e2c1b0d527d6..05ed7e42e184205e252288e7c568ff0bd61ead92 100644 (file)
@@ -163,12 +163,6 @@ u32 ipa_cmd_pipeline_clear_count(void);
  */
 void ipa_cmd_pipeline_clear_wait(struct ipa *ipa);
 
-/**
- * ipa_cmd_pipeline_clear() - Clear the hardware pipeline
- * @ipa:       - IPA pointer
- */
-void ipa_cmd_pipeline_clear(struct ipa *ipa);
-
 /**
  * ipa_cmd_trans_alloc() - Allocate a transaction for the command TX endpoint
  * @ipa:       IPA pointer
index 5528d97110d56de1825c7bc4c2c7896ce034b586..03a170993420878c2b8c5d68683e3870ec547e34 100644 (file)
@@ -853,6 +853,7 @@ static void ipa_endpoint_init_hol_block_timer(struct ipa_endpoint *endpoint,
        u32 offset;
        u32 val;
 
+       /* This should only be changed when HOL_BLOCK_EN is disabled */
        offset = IPA_REG_ENDP_INIT_HOL_BLOCK_TIMER_N_OFFSET(endpoint_id);
        val = hol_block_timer_val(ipa, microseconds);
        iowrite32(val, ipa->reg_virt + offset);
@@ -868,6 +869,9 @@ ipa_endpoint_init_hol_block_enable(struct ipa_endpoint *endpoint, bool enable)
        val = enable ? HOL_BLOCK_EN_FMASK : 0;
        offset = IPA_REG_ENDP_INIT_HOL_BLOCK_EN_N_OFFSET(endpoint_id);
        iowrite32(val, endpoint->ipa->reg_virt + offset);
+       /* When enabling, the register must be written twice for IPA v4.5+ */
+       if (enable && endpoint->ipa->version >= IPA_VERSION_4_5)
+               iowrite32(val, endpoint->ipa->reg_virt + offset);
 }
 
 void ipa_endpoint_modem_hol_block_clear_all(struct ipa *ipa)
@@ -880,6 +884,7 @@ void ipa_endpoint_modem_hol_block_clear_all(struct ipa *ipa)
                if (endpoint->toward_ipa || endpoint->ee_id != GSI_EE_MODEM)
                        continue;
 
+               ipa_endpoint_init_hol_block_enable(endpoint, false);
                ipa_endpoint_init_hol_block_timer(endpoint, 0);
                ipa_endpoint_init_hol_block_enable(endpoint, true);
        }
@@ -1631,8 +1636,6 @@ void ipa_endpoint_suspend(struct ipa *ipa)
        if (ipa->modem_netdev)
                ipa_modem_suspend(ipa->modem_netdev);
 
-       ipa_cmd_pipeline_clear(ipa);
-
        ipa_endpoint_suspend_one(ipa->name_map[IPA_ENDPOINT_AP_LAN_RX]);
        ipa_endpoint_suspend_one(ipa->name_map[IPA_ENDPOINT_AP_COMMAND_TX]);
 }
index cdfa98a76e1f4a66a030c7fbdbfd9a29606d6eb7..a448ec198bee1862647b96f34f6346ae30fdc067 100644 (file)
@@ -28,6 +28,7 @@
 #include "ipa_reg.h"
 #include "ipa_mem.h"
 #include "ipa_table.h"
+#include "ipa_smp2p.h"
 #include "ipa_modem.h"
 #include "ipa_uc.h"
 #include "ipa_interrupt.h"
@@ -801,6 +802,11 @@ static int ipa_remove(struct platform_device *pdev)
        struct device *dev = &pdev->dev;
        int ret;
 
+       /* Prevent the modem from triggering a call to ipa_setup().  This
+        * also ensures a modem-initiated setup that's underway completes.
+        */
+       ipa_smp2p_irq_disable_setup(ipa);
+
        ret = pm_runtime_get_sync(dev);
        if (WARN_ON(ret < 0))
                goto out_power_put;
index ad116bcc0580e859ac790ee93e4f36f88ace02a2..d0ab4d70c303b098a2de56011c9d87030a181753 100644 (file)
@@ -339,9 +339,6 @@ int ipa_modem_stop(struct ipa *ipa)
        if (state != IPA_MODEM_STATE_RUNNING)
                return -EBUSY;
 
-       /* Prevent the modem from triggering a call to ipa_setup() */
-       ipa_smp2p_disable(ipa);
-
        /* Clean up the netdev and endpoints if it was started */
        if (netdev) {
                struct ipa_priv *priv = netdev_priv(netdev);
@@ -369,6 +366,9 @@ static void ipa_modem_crashed(struct ipa *ipa)
        struct device *dev = &ipa->pdev->dev;
        int ret;
 
+       /* Prevent the modem from triggering a call to ipa_setup() */
+       ipa_smp2p_irq_disable_setup(ipa);
+
        ret = pm_runtime_get_sync(dev);
        if (ret < 0) {
                dev_err(dev, "error %d getting power to handle crash\n", ret);
index e3da95d69409914b3e672f16d383da97b5278de3..06cec71993823deeadf98756bc95e4b43ae63d01 100644 (file)
@@ -52,7 +52,7 @@ static bool ipa_resource_limits_valid(struct ipa *ipa,
                                return false;
        }
 
-       group_count = data->rsrc_group_src_count;
+       group_count = data->rsrc_group_dst_count;
        if (!group_count || group_count > IPA_RESOURCE_GROUP_MAX)
                return false;
 
index df7639c39d7160a2c1de809c199e6fb23b3748e1..2112336120391c21b26a6bf5df5cf334deee1ee2 100644 (file)
@@ -53,7 +53,7 @@
  * @setup_ready_irq:   IPA interrupt triggered by modem to signal GSI ready
  * @power_on:          Whether IPA power is on
  * @notified:          Whether modem has been notified of power state
- * @disabled:          Whether setup ready interrupt handling is disabled
+ * @setup_disabled:    Whether setup ready interrupt handler is disabled
  * @mutex:             Mutex protecting ready-interrupt/shutdown interlock
  * @panic_notifier:    Panic notifier structure
 */
@@ -67,7 +67,7 @@ struct ipa_smp2p {
        u32 setup_ready_irq;
        bool power_on;
        bool notified;
-       bool disabled;
+       bool setup_disabled;
        struct mutex mutex;
        struct notifier_block panic_notifier;
 };
@@ -155,11 +155,9 @@ static irqreturn_t ipa_smp2p_modem_setup_ready_isr(int irq, void *dev_id)
        struct device *dev;
        int ret;
 
-       mutex_lock(&smp2p->mutex);
-
-       if (smp2p->disabled)
-               goto out_mutex_unlock;
-       smp2p->disabled = true;         /* If any others arrive, ignore them */
+       /* Ignore any (spurious) interrupts received after the first */
+       if (smp2p->ipa->setup_complete)
+               return IRQ_HANDLED;
 
        /* Power needs to be active for setup */
        dev = &smp2p->ipa->pdev->dev;
@@ -176,8 +174,6 @@ static irqreturn_t ipa_smp2p_modem_setup_ready_isr(int irq, void *dev_id)
 out_power_put:
        pm_runtime_mark_last_busy(dev);
        (void)pm_runtime_put_autosuspend(dev);
-out_mutex_unlock:
-       mutex_unlock(&smp2p->mutex);
 
        return IRQ_HANDLED;
 }
@@ -313,7 +309,7 @@ void ipa_smp2p_exit(struct ipa *ipa)
        kfree(smp2p);
 }
 
-void ipa_smp2p_disable(struct ipa *ipa)
+void ipa_smp2p_irq_disable_setup(struct ipa *ipa)
 {
        struct ipa_smp2p *smp2p = ipa->smp2p;
 
@@ -322,7 +318,10 @@ void ipa_smp2p_disable(struct ipa *ipa)
 
        mutex_lock(&smp2p->mutex);
 
-       smp2p->disabled = true;
+       if (!smp2p->setup_disabled) {
+               disable_irq(smp2p->setup_ready_irq);
+               smp2p->setup_disabled = true;
+       }
 
        mutex_unlock(&smp2p->mutex);
 }
index 99a9567896388b4d6ceb276146014c6b09cef1e6..59cee31a738365dda7c50649a0f70fa892bc40d9 100644 (file)
@@ -27,13 +27,12 @@ int ipa_smp2p_init(struct ipa *ipa, bool modem_init);
 void ipa_smp2p_exit(struct ipa *ipa);
 
 /**
- * ipa_smp2p_disable() - Prevent "ipa-setup-ready" interrupt handling
+ * ipa_smp2p_irq_disable_setup() - Disable the "setup ready" interrupt
  * @ipa:       IPA pointer
  *
- * Prevent handling of the "setup ready" interrupt from the modem.
- * This is used before initiating shutdown of the driver.
+ * Disable the "ipa-setup-ready" interrupt from the modem.
  */
-void ipa_smp2p_disable(struct ipa *ipa);
+void ipa_smp2p_irq_disable_setup(struct ipa *ipa);
 
 /**
  * ipa_smp2p_notify_reset() - Reset modem notification state
index cad820568f75119f70c234d9efe15cb54847bd7f..966c3b4ad59d14e74bc113680e0fbff231d296d2 100644 (file)
@@ -61,6 +61,13 @@ static int aspeed_mdio_read(struct mii_bus *bus, int addr, int regnum)
 
        iowrite32(ctrl, ctx->base + ASPEED_MDIO_CTRL);
 
+       rc = readl_poll_timeout(ctx->base + ASPEED_MDIO_CTRL, ctrl,
+                               !(ctrl & ASPEED_MDIO_CTRL_FIRE),
+                               ASPEED_MDIO_INTERVAL_US,
+                               ASPEED_MDIO_TIMEOUT_US);
+       if (rc < 0)
+               return rc;
+
        rc = readl_poll_timeout(ctx->base + ASPEED_MDIO_DATA, data,
                                data & ASPEED_MDIO_DATA_IDLE,
                                ASPEED_MDIO_INTERVAL_US,
index 3ad7397b81198acb45dcbc34f977058c12e223ab..ea82ea5660e788709d676c3d8c35f02416d56bff 100644 (file)
@@ -710,6 +710,7 @@ static void phylink_resolve(struct work_struct *w)
        struct phylink_link_state link_state;
        struct net_device *ndev = pl->netdev;
        bool mac_config = false;
+       bool retrigger = false;
        bool cur_link_state;
 
        mutex_lock(&pl->state_mutex);
@@ -723,6 +724,7 @@ static void phylink_resolve(struct work_struct *w)
                link_state.link = false;
        } else if (pl->mac_link_dropped) {
                link_state.link = false;
+               retrigger = true;
        } else {
                switch (pl->cur_link_an_mode) {
                case MLO_AN_PHY:
@@ -739,6 +741,19 @@ static void phylink_resolve(struct work_struct *w)
                case MLO_AN_INBAND:
                        phylink_mac_pcs_get_state(pl, &link_state);
 
+                       /* The PCS may have a latching link-fail indicator.
+                        * If the link was up, bring the link down and
+                        * re-trigger the resolve. Otherwise, re-read the
+                        * PCS state to get the current status of the link.
+                        */
+                       if (!link_state.link) {
+                               if (cur_link_state)
+                                       retrigger = true;
+                               else
+                                       phylink_mac_pcs_get_state(pl,
+                                                                 &link_state);
+                       }
+
                        /* If we have a phy, the "up" state is the union of
                         * both the PHY and the MAC
                         */
@@ -747,6 +762,15 @@ static void phylink_resolve(struct work_struct *w)
 
                        /* Only update if the PHY link is up */
                        if (pl->phydev && pl->phy_state.link) {
+                               /* If the interface has changed, force a
+                                * link down event if the link isn't already
+                                * down, and re-resolve.
+                                */
+                               if (link_state.interface !=
+                                   pl->phy_state.interface) {
+                                       retrigger = true;
+                                       link_state.link = false;
+                               }
                                link_state.interface = pl->phy_state.interface;
 
                                /* If we have a PHY, we need to update with
@@ -789,7 +813,7 @@ static void phylink_resolve(struct work_struct *w)
                else
                        phylink_link_up(pl, link_state);
        }
-       if (!link_state.link && pl->mac_link_dropped) {
+       if (!link_state.link && retrigger) {
                pl->mac_link_dropped = false;
                queue_work(system_power_efficient_wq, &pl->resolve);
        }
@@ -1364,6 +1388,7 @@ EXPORT_SYMBOL_GPL(phylink_stop);
  * @mac_wol: true if the MAC needs to receive packets for Wake-on-Lan
  *
  * Handle a network device suspend event. There are several cases:
+ *
  * - If Wake-on-Lan is not active, we can bring down the link between
  *   the MAC and PHY by calling phylink_stop().
  * - If Wake-on-Lan is active, and being handled only by the PHY, we
index c420e59485221d31126ba97b23814c6ac0fa268e..3d7f88b330c1e097bc20076ac982404c294ae6e1 100644 (file)
@@ -40,6 +40,8 @@
                                           insmod -oslip_maxdev=nnn     */
 #define SL_MTU         296             /* 296; I am used to 600- FvK   */
 
+/* some arch define END as assembly function ending, just undef it */
+#undef END
 /* SLIP protocol characters. */
 #define END             0300           /* indicates end of frame       */
 #define ESC             0333           /* indicates byte stuffing      */
index fecc9a1d293ae6c8a354f42e76ba58b2a57da871..1572878c340319953fc07ab2721dc9a994ed1b88 100644 (file)
@@ -1010,6 +1010,7 @@ static netdev_tx_t tun_net_xmit(struct sk_buff *skb, struct net_device *dev)
 {
        struct tun_struct *tun = netdev_priv(dev);
        int txq = skb->queue_mapping;
+       struct netdev_queue *queue;
        struct tun_file *tfile;
        int len = skb->len;
 
@@ -1054,6 +1055,10 @@ static netdev_tx_t tun_net_xmit(struct sk_buff *skb, struct net_device *dev)
        if (ptr_ring_produce(&tfile->tx_ring, skb))
                goto drop;
 
+       /* NETIF_F_LLTX requires to do our own update of trans_start */
+       queue = netdev_get_tx_queue(dev, txq);
+       queue->trans_start = jiffies;
+
        /* Notify and wake up reader process */
        if (tfile->flags & TUN_FASYNC)
                kill_fasync(&tfile->fasync, SIGIO, POLL_IN);
index 24753a4da7e606ef956d6fcb45d2da51d6f67f5b..e303b522efb50a4bb86443da3ffd20794e666734 100644 (file)
@@ -181,6 +181,8 @@ static u32 cdc_ncm_check_tx_max(struct usbnet *dev, u32 new_tx)
                min = ctx->max_datagram_size + ctx->max_ndp_size + sizeof(struct usb_cdc_ncm_nth32);
 
        max = min_t(u32, CDC_NCM_NTB_MAX_SIZE_TX, le32_to_cpu(ctx->ncm_parm.dwNtbOutMaxSize));
+       if (max == 0)
+               max = CDC_NCM_NTB_MAX_SIZE_TX; /* dwNtbOutMaxSize not set */
 
        /* some devices set dwNtbOutMaxSize too low for the above default */
        min = min(min, max);
index f20376c1ef3fb1f3cc924a7e06a52801aac7696a..8cd265fc1fd9d4eb11b5196e7d831d2ec828f58c 100644 (file)
@@ -2228,7 +2228,7 @@ static int lan78xx_phy_init(struct lan78xx_net *dev)
        if (dev->domain_data.phyirq > 0)
                phydev->irq = dev->domain_data.phyirq;
        else
-               phydev->irq = 0;
+               phydev->irq = PHY_POLL;
        netdev_dbg(dev->net, "phydev->irq = %d\n", phydev->irq);
 
        /* set to AUTOMDIX */
index 4a02f33f0643d77ce94d859a5f45e486936a929b..f9877a3e83acf8194a923e5dbdb129f00da202b9 100644 (file)
@@ -9603,12 +9603,9 @@ static int rtl8152_probe(struct usb_interface *intf,
                netdev->hw_features &= ~NETIF_F_RXCSUM;
        }
 
-       if (le16_to_cpu(udev->descriptor.idVendor) == VENDOR_ID_LENOVO) {
-               switch (le16_to_cpu(udev->descriptor.idProduct)) {
-               case DEVICE_ID_THINKPAD_THUNDERBOLT3_DOCK_GEN2:
-               case DEVICE_ID_THINKPAD_USB_C_DOCK_GEN2:
-                       tp->lenovo_macpassthru = 1;
-               }
+       if (udev->parent &&
+                       le16_to_cpu(udev->parent->descriptor.idVendor) == VENDOR_ID_LENOVO) {
+               tp->lenovo_macpassthru = 1;
        }
 
        if (le16_to_cpu(udev->descriptor.bcdDevice) == 0x3011 && udev->serial &&
index 20fe4cd8f7840c737bbc00d6c457ca51403620e1..abe0149ed917a5fe83225b3baf7b56ce1d385e42 100644 (file)
@@ -1050,6 +1050,14 @@ static const struct net_device_ops smsc95xx_netdev_ops = {
        .ndo_set_features       = smsc95xx_set_features,
 };
 
+static void smsc95xx_handle_link_change(struct net_device *net)
+{
+       struct usbnet *dev = netdev_priv(net);
+
+       phy_print_status(net->phydev);
+       usbnet_defer_kevent(dev, EVENT_LINK_CHANGE);
+}
+
 static int smsc95xx_bind(struct usbnet *dev, struct usb_interface *intf)
 {
        struct smsc95xx_priv *pdata;
@@ -1154,6 +1162,17 @@ static int smsc95xx_bind(struct usbnet *dev, struct usb_interface *intf)
        dev->net->min_mtu = ETH_MIN_MTU;
        dev->net->max_mtu = ETH_DATA_LEN;
        dev->hard_mtu = dev->net->mtu + dev->net->hard_header_len;
+
+       ret = phy_connect_direct(dev->net, pdata->phydev,
+                                &smsc95xx_handle_link_change,
+                                PHY_INTERFACE_MODE_MII);
+       if (ret) {
+               netdev_err(dev->net, "can't attach PHY to %s\n", pdata->mdiobus->id);
+               goto unregister_mdio;
+       }
+
+       phy_attached_info(dev->net->phydev);
+
        return 0;
 
 unregister_mdio:
@@ -1171,47 +1190,25 @@ static void smsc95xx_unbind(struct usbnet *dev, struct usb_interface *intf)
 {
        struct smsc95xx_priv *pdata = dev->driver_priv;
 
+       phy_disconnect(dev->net->phydev);
        mdiobus_unregister(pdata->mdiobus);
        mdiobus_free(pdata->mdiobus);
        netif_dbg(dev, ifdown, dev->net, "free pdata\n");
        kfree(pdata);
 }
 
-static void smsc95xx_handle_link_change(struct net_device *net)
-{
-       struct usbnet *dev = netdev_priv(net);
-
-       phy_print_status(net->phydev);
-       usbnet_defer_kevent(dev, EVENT_LINK_CHANGE);
-}
-
 static int smsc95xx_start_phy(struct usbnet *dev)
 {
-       struct smsc95xx_priv *pdata = dev->driver_priv;
-       struct net_device *net = dev->net;
-       int ret;
+       phy_start(dev->net->phydev);
 
-       ret = smsc95xx_reset(dev);
-       if (ret < 0)
-               return ret;
-
-       ret = phy_connect_direct(net, pdata->phydev,
-                                &smsc95xx_handle_link_change,
-                                PHY_INTERFACE_MODE_MII);
-       if (ret) {
-               netdev_err(net, "can't attach PHY to %s\n", pdata->mdiobus->id);
-               return ret;
-       }
-
-       phy_attached_info(net->phydev);
-       phy_start(net->phydev);
        return 0;
 }
 
-static int smsc95xx_disconnect_phy(struct usbnet *dev)
+static int smsc95xx_stop(struct usbnet *dev)
 {
-       phy_stop(dev->net->phydev);
-       phy_disconnect(dev->net->phydev);
+       if (dev->net->phydev)
+               phy_stop(dev->net->phydev);
+
        return 0;
 }
 
@@ -1966,7 +1963,7 @@ static const struct driver_info smsc95xx_info = {
        .unbind         = smsc95xx_unbind,
        .link_reset     = smsc95xx_link_reset,
        .reset          = smsc95xx_start_phy,
-       .stop           = smsc95xx_disconnect_phy,
+       .stop           = smsc95xx_stop,
        .rx_fixup       = smsc95xx_rx_fixup,
        .tx_fixup       = smsc95xx_tx_fixup,
        .status         = smsc95xx_status,
index 1771d6e5224fd834a4dfca4ba578134439d4d201..55db6a336f7ead862ab98ab99e4518a7d2b3b87f 100644 (file)
@@ -3423,7 +3423,6 @@ static struct virtio_driver virtio_net_driver = {
        .feature_table_size = ARRAY_SIZE(features),
        .feature_table_legacy = features_legacy,
        .feature_table_size_legacy = ARRAY_SIZE(features_legacy),
-       .suppress_used_validation = true,
        .driver.name =  KBUILD_MODNAME,
        .driver.owner = THIS_MODULE,
        .id_table =     id_table,
index 14fae317bc70f738be004ddc31be61ce6f95974c..fd407c0e28569b171d4234c0d7f4b6ef04c6198a 100644 (file)
@@ -3261,7 +3261,7 @@ vmxnet3_alloc_intr_resources(struct vmxnet3_adapter *adapter)
 
 #ifdef CONFIG_PCI_MSI
        if (adapter->intr.type == VMXNET3_IT_MSIX) {
-               int i, nvec;
+               int i, nvec, nvec_allocated;
 
                nvec  = adapter->share_intr == VMXNET3_INTR_TXSHARE ?
                        1 : adapter->num_tx_queues;
@@ -3274,14 +3274,15 @@ vmxnet3_alloc_intr_resources(struct vmxnet3_adapter *adapter)
                for (i = 0; i < nvec; i++)
                        adapter->intr.msix_entries[i].entry = i;
 
-               nvec = vmxnet3_acquire_msix_vectors(adapter, nvec);
-               if (nvec < 0)
+               nvec_allocated = vmxnet3_acquire_msix_vectors(adapter, nvec);
+               if (nvec_allocated < 0)
                        goto msix_err;
 
                /* If we cannot allocate one MSIx vector per queue
                 * then limit the number of rx queues to 1
                 */
-               if (nvec == VMXNET3_LINUX_MIN_MSIX_VECT) {
+               if (nvec_allocated == VMXNET3_LINUX_MIN_MSIX_VECT &&
+                   nvec != VMXNET3_LINUX_MIN_MSIX_VECT) {
                        if (adapter->share_intr != VMXNET3_INTR_BUDDYSHARE
                            || adapter->num_rx_queues != 1) {
                                adapter->share_intr = VMXNET3_INTR_TXSHARE;
@@ -3291,14 +3292,14 @@ vmxnet3_alloc_intr_resources(struct vmxnet3_adapter *adapter)
                        }
                }
 
-               adapter->intr.num_intrs = nvec;
+               adapter->intr.num_intrs = nvec_allocated;
                return;
 
 msix_err:
                /* If we cannot allocate MSIx vectors use only one rx queue */
                dev_info(&adapter->pdev->dev,
                         "Failed to enable MSI-X, error %d. "
-                        "Limiting #rx queues to 1, try MSI.\n", nvec);
+                        "Limiting #rx queues to 1, try MSI.\n", nvec_allocated);
 
                adapter->intr.type = VMXNET3_IT_MSI;
        }
index ccf677015d5bc7f7a93d42f1c71571eb08b38e0f..b2242a082431c2c7621a5fbc1f21fb750c7e2da5 100644 (file)
@@ -497,6 +497,7 @@ static netdev_tx_t vrf_process_v6_outbound(struct sk_buff *skb,
        /* strip the ethernet header added for pass through VRF device */
        __skb_pull(skb, skb_network_offset(skb));
 
+       memset(IP6CB(skb), 0, sizeof(*IP6CB(skb)));
        ret = vrf_ip6_local_out(net, skb->sk, skb);
        if (unlikely(net_xmit_eval(ret)))
                dev->stats.tx_errors++;
@@ -579,6 +580,7 @@ static netdev_tx_t vrf_process_v4_outbound(struct sk_buff *skb,
                                               RT_SCOPE_LINK);
        }
 
+       memset(IPCB(skb), 0, sizeof(*IPCB(skb)));
        ret = vrf_ip_local_out(dev_net(skb_dst(skb)->dev), skb->sk, skb);
        if (unlikely(net_xmit_eval(ret)))
                vrf_dev->stats.tx_errors++;
@@ -768,8 +770,6 @@ static struct sk_buff *vrf_ip6_out_direct(struct net_device *vrf_dev,
 
        skb->dev = vrf_dev;
 
-       vrf_nf_set_untracked(skb);
-
        err = nf_hook(NFPROTO_IPV6, NF_INET_LOCAL_OUT, net, sk,
                      skb, NULL, vrf_dev, vrf_ip6_out_direct_finish);
 
@@ -790,6 +790,8 @@ static struct sk_buff *vrf_ip6_out(struct net_device *vrf_dev,
        if (rt6_need_strict(&ipv6_hdr(skb)->daddr))
                return skb;
 
+       vrf_nf_set_untracked(skb);
+
        if (qdisc_tx_is_default(vrf_dev) ||
            IP6CB(skb)->flags & IP6SKB_XFRM_TRANSFORMED)
                return vrf_ip6_out_direct(vrf_dev, sk, skb);
@@ -998,8 +1000,6 @@ static struct sk_buff *vrf_ip_out_direct(struct net_device *vrf_dev,
 
        skb->dev = vrf_dev;
 
-       vrf_nf_set_untracked(skb);
-
        err = nf_hook(NFPROTO_IPV4, NF_INET_LOCAL_OUT, net, sk,
                      skb, NULL, vrf_dev, vrf_ip_out_direct_finish);
 
@@ -1021,6 +1021,8 @@ static struct sk_buff *vrf_ip_out(struct net_device *vrf_dev,
            ipv4_is_lbcast(ip_hdr(skb)->daddr))
                return skb;
 
+       vrf_nf_set_untracked(skb);
+
        if (qdisc_tx_is_default(vrf_dev) ||
            IPCB(skb)->flags & IPSKB_XFRM_TRANSFORMED)
                return vrf_ip_out_direct(vrf_dev, sk, skb);
index b7197e80f2264053d4e4e28bdf69a33038335294..9a4c8ff32d9dd9407ec50591a44008570f4e7411 100644 (file)
@@ -163,7 +163,7 @@ static bool node_placement(struct allowedips_node __rcu *trie, const u8 *key,
        return exact;
 }
 
-static inline void connect_node(struct allowedips_node **parent, u8 bit, struct allowedips_node *node)
+static inline void connect_node(struct allowedips_node __rcu **parent, u8 bit, struct allowedips_node *node)
 {
        node->parent_bit_packed = (unsigned long)parent | bit;
        rcu_assign_pointer(*parent, node);
index 551ddaaaf5400e6eb3e138853f0b4710e0a48901..a46067c38bf5def99accd4b885d3c3bd3095ad4b 100644 (file)
@@ -98,6 +98,7 @@ static int wg_stop(struct net_device *dev)
 {
        struct wg_device *wg = netdev_priv(dev);
        struct wg_peer *peer;
+       struct sk_buff *skb;
 
        mutex_lock(&wg->device_update_lock);
        list_for_each_entry(peer, &wg->peer_list, peer_list) {
@@ -108,7 +109,9 @@ static int wg_stop(struct net_device *dev)
                wg_noise_reset_last_sent_handshake(&peer->last_sent_handshake);
        }
        mutex_unlock(&wg->device_update_lock);
-       skb_queue_purge(&wg->incoming_handshakes);
+       while ((skb = ptr_ring_consume(&wg->handshake_queue.ring)) != NULL)
+               kfree_skb(skb);
+       atomic_set(&wg->handshake_queue_len, 0);
        wg_socket_reinit(wg, NULL, NULL);
        return 0;
 }
@@ -235,14 +238,13 @@ static void wg_destruct(struct net_device *dev)
        destroy_workqueue(wg->handshake_receive_wq);
        destroy_workqueue(wg->handshake_send_wq);
        destroy_workqueue(wg->packet_crypt_wq);
-       wg_packet_queue_free(&wg->decrypt_queue);
-       wg_packet_queue_free(&wg->encrypt_queue);
+       wg_packet_queue_free(&wg->handshake_queue, true);
+       wg_packet_queue_free(&wg->decrypt_queue, false);
+       wg_packet_queue_free(&wg->encrypt_queue, false);
        rcu_barrier(); /* Wait for all the peers to be actually freed. */
        wg_ratelimiter_uninit();
        memzero_explicit(&wg->static_identity, sizeof(wg->static_identity));
-       skb_queue_purge(&wg->incoming_handshakes);
        free_percpu(dev->tstats);
-       free_percpu(wg->incoming_handshakes_worker);
        kvfree(wg->index_hashtable);
        kvfree(wg->peer_hashtable);
        mutex_unlock(&wg->device_update_lock);
@@ -298,7 +300,6 @@ static int wg_newlink(struct net *src_net, struct net_device *dev,
        init_rwsem(&wg->static_identity.lock);
        mutex_init(&wg->socket_update_lock);
        mutex_init(&wg->device_update_lock);
-       skb_queue_head_init(&wg->incoming_handshakes);
        wg_allowedips_init(&wg->peer_allowedips);
        wg_cookie_checker_init(&wg->cookie_checker, wg);
        INIT_LIST_HEAD(&wg->peer_list);
@@ -316,16 +317,10 @@ static int wg_newlink(struct net *src_net, struct net_device *dev,
        if (!dev->tstats)
                goto err_free_index_hashtable;
 
-       wg->incoming_handshakes_worker =
-               wg_packet_percpu_multicore_worker_alloc(
-                               wg_packet_handshake_receive_worker, wg);
-       if (!wg->incoming_handshakes_worker)
-               goto err_free_tstats;
-
        wg->handshake_receive_wq = alloc_workqueue("wg-kex-%s",
                        WQ_CPU_INTENSIVE | WQ_FREEZABLE, 0, dev->name);
        if (!wg->handshake_receive_wq)
-               goto err_free_incoming_handshakes;
+               goto err_free_tstats;
 
        wg->handshake_send_wq = alloc_workqueue("wg-kex-%s",
                        WQ_UNBOUND | WQ_FREEZABLE, 0, dev->name);
@@ -347,10 +342,15 @@ static int wg_newlink(struct net *src_net, struct net_device *dev,
        if (ret < 0)
                goto err_free_encrypt_queue;
 
-       ret = wg_ratelimiter_init();
+       ret = wg_packet_queue_init(&wg->handshake_queue, wg_packet_handshake_receive_worker,
+                                  MAX_QUEUED_INCOMING_HANDSHAKES);
        if (ret < 0)
                goto err_free_decrypt_queue;
 
+       ret = wg_ratelimiter_init();
+       if (ret < 0)
+               goto err_free_handshake_queue;
+
        ret = register_netdevice(dev);
        if (ret < 0)
                goto err_uninit_ratelimiter;
@@ -367,18 +367,18 @@ static int wg_newlink(struct net *src_net, struct net_device *dev,
 
 err_uninit_ratelimiter:
        wg_ratelimiter_uninit();
+err_free_handshake_queue:
+       wg_packet_queue_free(&wg->handshake_queue, false);
 err_free_decrypt_queue:
-       wg_packet_queue_free(&wg->decrypt_queue);
+       wg_packet_queue_free(&wg->decrypt_queue, false);
 err_free_encrypt_queue:
-       wg_packet_queue_free(&wg->encrypt_queue);
+       wg_packet_queue_free(&wg->encrypt_queue, false);
 err_destroy_packet_crypt:
        destroy_workqueue(wg->packet_crypt_wq);
 err_destroy_handshake_send:
        destroy_workqueue(wg->handshake_send_wq);
 err_destroy_handshake_receive:
        destroy_workqueue(wg->handshake_receive_wq);
-err_free_incoming_handshakes:
-       free_percpu(wg->incoming_handshakes_worker);
 err_free_tstats:
        free_percpu(dev->tstats);
 err_free_index_hashtable:
@@ -398,6 +398,7 @@ static struct rtnl_link_ops link_ops __read_mostly = {
 static void wg_netns_pre_exit(struct net *net)
 {
        struct wg_device *wg;
+       struct wg_peer *peer;
 
        rtnl_lock();
        list_for_each_entry(wg, &device_list, device_list) {
@@ -407,6 +408,8 @@ static void wg_netns_pre_exit(struct net *net)
                        mutex_lock(&wg->device_update_lock);
                        rcu_assign_pointer(wg->creating_net, NULL);
                        wg_socket_reinit(wg, NULL, NULL);
+                       list_for_each_entry(peer, &wg->peer_list, peer_list)
+                               wg_socket_clear_peer_endpoint_src(peer);
                        mutex_unlock(&wg->device_update_lock);
                }
        }
index 854bc3d97150e1c1dab3befbe64966add4f65746..43c7cebbf50b08f2a1868f0017d0bee8aee700f8 100644 (file)
@@ -39,21 +39,18 @@ struct prev_queue {
 
 struct wg_device {
        struct net_device *dev;
-       struct crypt_queue encrypt_queue, decrypt_queue;
+       struct crypt_queue encrypt_queue, decrypt_queue, handshake_queue;
        struct sock __rcu *sock4, *sock6;
        struct net __rcu *creating_net;
        struct noise_static_identity static_identity;
-       struct workqueue_struct *handshake_receive_wq, *handshake_send_wq;
-       struct workqueue_struct *packet_crypt_wq;
-       struct sk_buff_head incoming_handshakes;
-       int incoming_handshake_cpu;
-       struct multicore_worker __percpu *incoming_handshakes_worker;
+       struct workqueue_struct *packet_crypt_wq,*handshake_receive_wq, *handshake_send_wq;
        struct cookie_checker cookie_checker;
        struct pubkey_hashtable *peer_hashtable;
        struct index_hashtable *index_hashtable;
        struct allowedips peer_allowedips;
        struct mutex device_update_lock, socket_update_lock;
        struct list_head device_list, peer_list;
+       atomic_t handshake_queue_len;
        unsigned int num_peers, device_update_gen;
        u32 fwmark;
        u16 incoming_port;
index 75dbe77b0b4b4aeacbc75d77524108248b37a2fa..ee4da9ab8013c3ad2721e0e1d4432b2fe007886b 100644 (file)
@@ -17,7 +17,7 @@
 #include <linux/genetlink.h>
 #include <net/rtnetlink.h>
 
-static int __init mod_init(void)
+static int __init wg_mod_init(void)
 {
        int ret;
 
@@ -60,7 +60,7 @@ err_allowedips:
        return ret;
 }
 
-static void __exit mod_exit(void)
+static void __exit wg_mod_exit(void)
 {
        wg_genetlink_uninit();
        wg_device_uninit();
@@ -68,8 +68,8 @@ static void __exit mod_exit(void)
        wg_allowedips_slab_uninit();
 }
 
-module_init(mod_init);
-module_exit(mod_exit);
+module_init(wg_mod_init);
+module_exit(wg_mod_exit);
 MODULE_LICENSE("GPL v2");
 MODULE_DESCRIPTION("WireGuard secure network tunnel");
 MODULE_AUTHOR("Jason A. Donenfeld <Jason@zx2c4.com>");
index 48e7b982a30736bc147712ce52957517e3e862af..1de413b19e3424a2ace2edcbcf0d0d49c4be6167 100644 (file)
@@ -38,11 +38,11 @@ int wg_packet_queue_init(struct crypt_queue *queue, work_func_t function,
        return 0;
 }
 
-void wg_packet_queue_free(struct crypt_queue *queue)
+void wg_packet_queue_free(struct crypt_queue *queue, bool purge)
 {
        free_percpu(queue->worker);
-       WARN_ON(!__ptr_ring_empty(&queue->ring));
-       ptr_ring_cleanup(&queue->ring, NULL);
+       WARN_ON(!purge && !__ptr_ring_empty(&queue->ring));
+       ptr_ring_cleanup(&queue->ring, purge ? (void(*)(void*))kfree_skb : NULL);
 }
 
 #define NEXT(skb) ((skb)->prev)
index 4ef2944a68bc906ebec5167d1e17e281ea67be61..e2388107f7fdc9c040841adfc164459e0c777d7d 100644 (file)
@@ -23,7 +23,7 @@ struct sk_buff;
 /* queueing.c APIs: */
 int wg_packet_queue_init(struct crypt_queue *queue, work_func_t function,
                         unsigned int len);
-void wg_packet_queue_free(struct crypt_queue *queue);
+void wg_packet_queue_free(struct crypt_queue *queue, bool purge);
 struct multicore_worker __percpu *
 wg_packet_percpu_multicore_worker_alloc(work_func_t function, void *ptr);
 
index 3fedd1d21f5ee019917a7280294cfc7398e65b11..dd55e5c26f468f71518cc5956f4b84480af5d9c8 100644 (file)
@@ -176,12 +176,12 @@ int wg_ratelimiter_init(void)
                        (1U << 14) / sizeof(struct hlist_head)));
        max_entries = table_size * 8;
 
-       table_v4 = kvzalloc(table_size * sizeof(*table_v4), GFP_KERNEL);
+       table_v4 = kvcalloc(table_size, sizeof(*table_v4), GFP_KERNEL);
        if (unlikely(!table_v4))
                goto err_kmemcache;
 
 #if IS_ENABLED(CONFIG_IPV6)
-       table_v6 = kvzalloc(table_size * sizeof(*table_v6), GFP_KERNEL);
+       table_v6 = kvcalloc(table_size, sizeof(*table_v6), GFP_KERNEL);
        if (unlikely(!table_v6)) {
                kvfree(table_v4);
                goto err_kmemcache;
index 7dc84bcca26139991be00759c0228d3126df2671..7b8df406c7737398f0270361afcb196af4b6a76e 100644 (file)
@@ -116,8 +116,8 @@ static void wg_receive_handshake_packet(struct wg_device *wg,
                return;
        }
 
-       under_load = skb_queue_len(&wg->incoming_handshakes) >=
-                    MAX_QUEUED_INCOMING_HANDSHAKES / 8;
+       under_load = atomic_read(&wg->handshake_queue_len) >=
+                       MAX_QUEUED_INCOMING_HANDSHAKES / 8;
        if (under_load) {
                last_under_load = ktime_get_coarse_boottime_ns();
        } else if (last_under_load) {
@@ -212,13 +212,14 @@ static void wg_receive_handshake_packet(struct wg_device *wg,
 
 void wg_packet_handshake_receive_worker(struct work_struct *work)
 {
-       struct wg_device *wg = container_of(work, struct multicore_worker,
-                                           work)->ptr;
+       struct crypt_queue *queue = container_of(work, struct multicore_worker, work)->ptr;
+       struct wg_device *wg = container_of(queue, struct wg_device, handshake_queue);
        struct sk_buff *skb;
 
-       while ((skb = skb_dequeue(&wg->incoming_handshakes)) != NULL) {
+       while ((skb = ptr_ring_consume_bh(&queue->ring)) != NULL) {
                wg_receive_handshake_packet(wg, skb);
                dev_kfree_skb(skb);
+               atomic_dec(&wg->handshake_queue_len);
                cond_resched();
        }
 }
@@ -553,22 +554,28 @@ void wg_packet_receive(struct wg_device *wg, struct sk_buff *skb)
        case cpu_to_le32(MESSAGE_HANDSHAKE_INITIATION):
        case cpu_to_le32(MESSAGE_HANDSHAKE_RESPONSE):
        case cpu_to_le32(MESSAGE_HANDSHAKE_COOKIE): {
-               int cpu;
-
-               if (skb_queue_len(&wg->incoming_handshakes) >
-                           MAX_QUEUED_INCOMING_HANDSHAKES ||
-                   unlikely(!rng_is_initialized())) {
+               int cpu, ret = -EBUSY;
+
+               if (unlikely(!rng_is_initialized()))
+                       goto drop;
+               if (atomic_read(&wg->handshake_queue_len) > MAX_QUEUED_INCOMING_HANDSHAKES / 2) {
+                       if (spin_trylock_bh(&wg->handshake_queue.ring.producer_lock)) {
+                               ret = __ptr_ring_produce(&wg->handshake_queue.ring, skb);
+                               spin_unlock_bh(&wg->handshake_queue.ring.producer_lock);
+                       }
+               } else
+                       ret = ptr_ring_produce_bh(&wg->handshake_queue.ring, skb);
+               if (ret) {
+       drop:
                        net_dbg_skb_ratelimited("%s: Dropping handshake packet from %pISpfsc\n",
                                                wg->dev->name, skb);
                        goto err;
                }
-               skb_queue_tail(&wg->incoming_handshakes, skb);
-               /* Queues up a call to packet_process_queued_handshake_
-                * packets(skb):
-                */
-               cpu = wg_cpumask_next_online(&wg->incoming_handshake_cpu);
+               atomic_inc(&wg->handshake_queue_len);
+               cpu = wg_cpumask_next_online(&wg->handshake_queue.last_cpu);
+               /* Queues up a call to packet_process_queued_handshake_packets(skb): */
                queue_work_on(cpu, wg->handshake_receive_wq,
-                       &per_cpu_ptr(wg->incoming_handshakes_worker, cpu)->work);
+                             &per_cpu_ptr(wg->handshake_queue.worker, cpu)->work);
                break;
        }
        case cpu_to_le32(MESSAGE_DATA):
index 8c496b7471082eb6c093154d7a05662718f9999c..6f07b949cb81d037842934d6836f3c8b79e4d0ed 100644 (file)
@@ -308,7 +308,7 @@ void wg_socket_clear_peer_endpoint_src(struct wg_peer *peer)
 {
        write_lock_bh(&peer->endpoint_lock);
        memset(&peer->endpoint.src6, 0, sizeof(peer->endpoint.src6));
-       dst_cache_reset(&peer->endpoint_cache);
+       dst_cache_reset_now(&peer->endpoint_cache);
        write_unlock_bh(&peer->endpoint_lock);
 }
 
index 26c7ae242db67bd41f88c69f30f106b9a8bf577c..49c0b1ad40a02d4abcb18874dd1d3ca9e6cbc5ad 100644 (file)
@@ -533,7 +533,11 @@ static int ath11k_mhi_set_state(struct ath11k_pci *ab_pci,
                ret = mhi_pm_suspend(ab_pci->mhi_ctrl);
                break;
        case ATH11K_MHI_RESUME:
-               ret = mhi_pm_resume(ab_pci->mhi_ctrl);
+               /* Do force MHI resume as some devices like QCA6390, WCN6855
+                * are not in M3 state but they are functional. So just ignore
+                * the MHI state while resuming.
+                */
+               ret = mhi_pm_resume_force(ab_pci->mhi_ctrl);
                break;
        case ATH11K_MHI_TRIGGER_RDDM:
                ret = mhi_force_rddm_mode(ab_pci->mhi_ctrl);
index c875bf35533ce4b8126f88e78ae9bb271d951129..009dd4be597b0c8c096582eeb876cbcb30ba658c 100644 (file)
@@ -86,6 +86,7 @@ static void *iwl_uefi_reduce_power_section(struct iwl_trans *trans,
                if (len < tlv_len) {
                        IWL_ERR(trans, "invalid TLV len: %zd/%u\n",
                                len, tlv_len);
+                       kfree(reduce_power_data);
                        reduce_power_data = ERR_PTR(-EINVAL);
                        goto out;
                }
@@ -105,6 +106,7 @@ static void *iwl_uefi_reduce_power_section(struct iwl_trans *trans,
                                IWL_DEBUG_FW(trans,
                                             "Couldn't allocate (more) reduce_power_data\n");
 
+                               kfree(reduce_power_data);
                                reduce_power_data = ERR_PTR(-ENOMEM);
                                goto out;
                        }
@@ -134,6 +136,10 @@ static void *iwl_uefi_reduce_power_section(struct iwl_trans *trans,
 done:
        if (!size) {
                IWL_DEBUG_FW(trans, "Empty REDUCE_POWER, skipping.\n");
+               /* Better safe than sorry, but 'reduce_power_data' should
+                * always be NULL if !size.
+                */
+               kfree(reduce_power_data);
                reduce_power_data = ERR_PTR(-ENOENT);
                goto out;
        }
index 36196e07b1a04597d61b5cadca7f7d0d927fc432..5cec467b995bb665f340d95b6aa33f10436ab0e0 100644 (file)
@@ -1313,23 +1313,31 @@ _iwl_op_mode_start(struct iwl_drv *drv, struct iwlwifi_opmode_table *op)
        const struct iwl_op_mode_ops *ops = op->ops;
        struct dentry *dbgfs_dir = NULL;
        struct iwl_op_mode *op_mode = NULL;
+       int retry, max_retry = !!iwlwifi_mod_params.fw_restart * IWL_MAX_INIT_RETRY;
+
+       for (retry = 0; retry <= max_retry; retry++) {
 
 #ifdef CONFIG_IWLWIFI_DEBUGFS
-       drv->dbgfs_op_mode = debugfs_create_dir(op->name,
-                                               drv->dbgfs_drv);
-       dbgfs_dir = drv->dbgfs_op_mode;
+               drv->dbgfs_op_mode = debugfs_create_dir(op->name,
+                                                       drv->dbgfs_drv);
+               dbgfs_dir = drv->dbgfs_op_mode;
 #endif
 
-       op_mode = ops->start(drv->trans, drv->trans->cfg, &drv->fw, dbgfs_dir);
+               op_mode = ops->start(drv->trans, drv->trans->cfg,
+                                    &drv->fw, dbgfs_dir);
+
+               if (op_mode)
+                       return op_mode;
+
+               IWL_ERR(drv, "retry init count %d\n", retry);
 
 #ifdef CONFIG_IWLWIFI_DEBUGFS
-       if (!op_mode) {
                debugfs_remove_recursive(drv->dbgfs_op_mode);
                drv->dbgfs_op_mode = NULL;
-       }
 #endif
+       }
 
-       return op_mode;
+       return NULL;
 }
 
 static void _iwl_op_mode_stop(struct iwl_drv *drv)
index 2e2d60a586925d8a03714ab5919344da50ae4438..0fd009e6d6857f5939e73356c9f73a46001fe4d5 100644 (file)
@@ -89,4 +89,7 @@ void iwl_drv_stop(struct iwl_drv *drv);
 #define IWL_EXPORT_SYMBOL(sym)
 #endif
 
+/* max retry for init flow */
+#define IWL_MAX_INIT_RETRY 2
+
 #endif /* __iwl_drv_h__ */
index 9fb9c7dad314f1bcbbf6e4f67645befdb3d31d08..897e3b91ddb2fec9e3d2f85842fbcc85f44281ca 100644 (file)
@@ -16,6 +16,7 @@
 #include <net/ieee80211_radiotap.h>
 #include <net/tcp.h>
 
+#include "iwl-drv.h"
 #include "iwl-op-mode.h"
 #include "iwl-io.h"
 #include "mvm.h"
@@ -1117,9 +1118,30 @@ static int iwl_mvm_mac_start(struct ieee80211_hw *hw)
 {
        struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
        int ret;
+       int retry, max_retry = 0;
 
        mutex_lock(&mvm->mutex);
-       ret = __iwl_mvm_mac_start(mvm);
+
+       /* we are starting the mac not in error flow, and restart is enabled */
+       if (!test_bit(IWL_MVM_STATUS_HW_RESTART_REQUESTED, &mvm->status) &&
+           iwlwifi_mod_params.fw_restart) {
+               max_retry = IWL_MAX_INIT_RETRY;
+               /*
+                * This will prevent mac80211 recovery flows to trigger during
+                * init failures
+                */
+               set_bit(IWL_MVM_STATUS_STARTING, &mvm->status);
+       }
+
+       for (retry = 0; retry <= max_retry; retry++) {
+               ret = __iwl_mvm_mac_start(mvm);
+               if (!ret)
+                       break;
+
+               IWL_ERR(mvm, "mac start retry %d\n", retry);
+       }
+       clear_bit(IWL_MVM_STATUS_STARTING, &mvm->status);
+
        mutex_unlock(&mvm->mutex);
 
        return ret;
index 2b1dcd60e00f65598d77712b042493247b4a2b91..a72d85086fe331cb256a2e88c88293a5e4b55bf0 100644 (file)
@@ -1123,6 +1123,8 @@ struct iwl_mvm {
  * @IWL_MVM_STATUS_FIRMWARE_RUNNING: firmware is running
  * @IWL_MVM_STATUS_NEED_FLUSH_P2P: need to flush P2P bcast STA
  * @IWL_MVM_STATUS_IN_D3: in D3 (or at least about to go into it)
+ * @IWL_MVM_STATUS_STARTING: starting mac,
+ *     used to disable restart flow while in STARTING state
  */
 enum iwl_mvm_status {
        IWL_MVM_STATUS_HW_RFKILL,
@@ -1134,6 +1136,7 @@ enum iwl_mvm_status {
        IWL_MVM_STATUS_FIRMWARE_RUNNING,
        IWL_MVM_STATUS_NEED_FLUSH_P2P,
        IWL_MVM_STATUS_IN_D3,
+       IWL_MVM_STATUS_STARTING,
 };
 
 /* Keep track of completed init configuration */
index 232ad531d612a2d2b4e85fc7a8558919b119ad8d..cd08e289cd9a0bf8c3746e1734ea72664277074b 100644 (file)
@@ -686,6 +686,7 @@ static int iwl_mvm_start_get_nvm(struct iwl_mvm *mvm)
        int ret;
 
        rtnl_lock();
+       wiphy_lock(mvm->hw->wiphy);
        mutex_lock(&mvm->mutex);
 
        ret = iwl_run_init_mvm_ucode(mvm);
@@ -701,6 +702,7 @@ static int iwl_mvm_start_get_nvm(struct iwl_mvm *mvm)
                iwl_mvm_stop_device(mvm);
 
        mutex_unlock(&mvm->mutex);
+       wiphy_unlock(mvm->hw->wiphy);
        rtnl_unlock();
 
        if (ret < 0)
@@ -1600,6 +1602,9 @@ void iwl_mvm_nic_restart(struct iwl_mvm *mvm, bool fw_error)
         */
        if (!mvm->fw_restart && fw_error) {
                iwl_fw_error_collect(&mvm->fwrt, false);
+       } else if (test_bit(IWL_MVM_STATUS_STARTING,
+                           &mvm->status)) {
+               IWL_ERR(mvm, "Starting mac, retry will be triggered anyway\n");
        } else if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) {
                struct iwl_mvm_reprobe *reprobe;
 
index c574f041f0969268132a1fe3f68b347bea3b903a..5ce07f28e7c33e853768581f851e3b72e9bd715d 100644 (file)
@@ -1339,9 +1339,13 @@ iwl_pci_find_dev_info(u16 device, u16 subsystem_device,
                      u16 mac_type, u8 mac_step,
                      u16 rf_type, u8 cdb, u8 rf_id, u8 no_160, u8 cores)
 {
+       int num_devices = ARRAY_SIZE(iwl_dev_info_table);
        int i;
 
-       for (i = ARRAY_SIZE(iwl_dev_info_table) - 1; i >= 0; i--) {
+       if (!num_devices)
+               return NULL;
+
+       for (i = num_devices - 1; i >= 0; i--) {
                const struct iwl_dev_info *dev_info = &iwl_dev_info_table[i];
 
                if (dev_info->device != (u16)IWL_CFG_ANY &&
@@ -1442,8 +1446,10 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
         */
        if (iwl_trans->trans_cfg->rf_id &&
            iwl_trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_9000 &&
-           !CSR_HW_RFID_TYPE(iwl_trans->hw_rf_id) && get_crf_id(iwl_trans))
+           !CSR_HW_RFID_TYPE(iwl_trans->hw_rf_id) && get_crf_id(iwl_trans)) {
+               ret = -EINVAL;
                goto out_free_trans;
+       }
 
        dev_info = iwl_pci_find_dev_info(pdev->device, pdev->subsystem_device,
                                         CSR_HW_REV_TYPE(iwl_trans->hw_rev),
index 5ee52cd70a4b45a1be50b518015a8e0943b71879..d1806f198aed99e866aa9683d8114eba2e92be5c 100644 (file)
@@ -143,8 +143,6 @@ int mt7615_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
        if (!wcid)
                wcid = &dev->mt76.global_wcid;
 
-       pid = mt76_tx_status_skb_add(mdev, wcid, tx_info->skb);
-
        if ((info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE) && msta) {
                struct mt7615_phy *phy = &dev->phy;
 
@@ -164,6 +162,7 @@ int mt7615_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
        if (id < 0)
                return id;
 
+       pid = mt76_tx_status_skb_add(mdev, wcid, tx_info->skb);
        mt7615_mac_write_txwi(dev, txwi_ptr, tx_info->skb, wcid, sta,
                              pid, key, false);
 
index bd2939ebcbf4841a4265e162b0cfacdee11a7338..5a6d7829c6e04f7fd9c101e62fbc047d75486b6b 100644 (file)
@@ -43,19 +43,11 @@ EXPORT_SYMBOL_GPL(mt7663_usb_sdio_reg_map);
 static void
 mt7663_usb_sdio_write_txwi(struct mt7615_dev *dev, struct mt76_wcid *wcid,
                           enum mt76_txq_id qid, struct ieee80211_sta *sta,
+                          struct ieee80211_key_conf *key, int pid,
                           struct sk_buff *skb)
 {
-       struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
-       struct ieee80211_key_conf *key = info->control.hw_key;
-       __le32 *txwi;
-       int pid;
-
-       if (!wcid)
-               wcid = &dev->mt76.global_wcid;
-
-       pid = mt76_tx_status_skb_add(&dev->mt76, wcid, skb);
+       __le32 *txwi = (__le32 *)(skb->data - MT_USB_TXD_SIZE);
 
-       txwi = (__le32 *)(skb->data - MT_USB_TXD_SIZE);
        memset(txwi, 0, MT_USB_TXD_SIZE);
        mt7615_mac_write_txwi(dev, txwi, skb, wcid, sta, pid, key, false);
        skb_push(skb, MT_USB_TXD_SIZE);
@@ -194,10 +186,14 @@ int mt7663_usb_sdio_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
        struct mt7615_dev *dev = container_of(mdev, struct mt7615_dev, mt76);
        struct sk_buff *skb = tx_info->skb;
        struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+       struct ieee80211_key_conf *key = info->control.hw_key;
        struct mt7615_sta *msta;
-       int pad;
+       int pad, err, pktid;
 
        msta = wcid ? container_of(wcid, struct mt7615_sta, wcid) : NULL;
+       if (!wcid)
+               wcid = &dev->mt76.global_wcid;
+
        if ((info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE) &&
            msta && !msta->rate_probe) {
                /* request to configure sampling rate */
@@ -207,7 +203,8 @@ int mt7663_usb_sdio_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
                spin_unlock_bh(&dev->mt76.lock);
        }
 
-       mt7663_usb_sdio_write_txwi(dev, wcid, qid, sta, skb);
+       pktid = mt76_tx_status_skb_add(&dev->mt76, wcid, skb);
+       mt7663_usb_sdio_write_txwi(dev, wcid, qid, sta, key, pktid, skb);
        if (mt76_is_usb(mdev)) {
                u32 len = skb->len;
 
@@ -217,7 +214,12 @@ int mt7663_usb_sdio_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
                pad = round_up(skb->len, 4) - skb->len;
        }
 
-       return mt76_skb_adjust_pad(skb, pad);
+       err = mt76_skb_adjust_pad(skb, pad);
+       if (err)
+               /* Release pktid in case of error. */
+               idr_remove(&wcid->pktid, pktid);
+
+       return err;
 }
 EXPORT_SYMBOL_GPL(mt7663_usb_sdio_tx_prepare_skb);
 
index efd70ddc2fd109b754df8e8c12c71c5b2403f9d0..2c6c03809b20eb628b99a22bb851ff07f14be2a6 100644 (file)
@@ -72,6 +72,7 @@ int mt76x02u_tx_prepare_skb(struct mt76_dev *mdev, void *data,
        bool ampdu = IEEE80211_SKB_CB(tx_info->skb)->flags & IEEE80211_TX_CTL_AMPDU;
        enum mt76_qsel qsel;
        u32 flags;
+       int err;
 
        mt76_insert_hdr_pad(tx_info->skb);
 
@@ -106,7 +107,12 @@ int mt76x02u_tx_prepare_skb(struct mt76_dev *mdev, void *data,
                ewma_pktlen_add(&msta->pktlen, tx_info->skb->len);
        }
 
-       return mt76x02u_skb_dma_info(tx_info->skb, WLAN_PORT, flags);
+       err = mt76x02u_skb_dma_info(tx_info->skb, WLAN_PORT, flags);
+       if (err && wcid)
+               /* Release pktid in case of error. */
+               idr_remove(&wcid->pktid, pid);
+
+       return err;
 }
 EXPORT_SYMBOL_GPL(mt76x02u_tx_prepare_skb);
 
index 5fcf35f2d9fbe42760c7ff28b53c3a96f68d62f8..809dc18e5083c21206447e4b8f4e811fd0c3c41f 100644 (file)
@@ -1151,8 +1151,14 @@ int mt7915_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
                }
        }
 
-       pid = mt76_tx_status_skb_add(mdev, wcid, tx_info->skb);
+       t = (struct mt76_txwi_cache *)(txwi + mdev->drv->txwi_size);
+       t->skb = tx_info->skb;
+
+       id = mt76_token_consume(mdev, &t);
+       if (id < 0)
+               return id;
 
+       pid = mt76_tx_status_skb_add(mdev, wcid, tx_info->skb);
        mt7915_mac_write_txwi(dev, txwi_ptr, tx_info->skb, wcid, pid, key,
                              false);
 
@@ -1178,13 +1184,6 @@ int mt7915_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
                txp->bss_idx = mvif->idx;
        }
 
-       t = (struct mt76_txwi_cache *)(txwi + mdev->drv->txwi_size);
-       t->skb = tx_info->skb;
-
-       id = mt76_token_consume(mdev, &t);
-       if (id < 0)
-               return id;
-
        txp->token = cpu_to_le16(id);
        if (test_bit(MT_WCID_FLAG_4ADDR, &wcid->flags))
                txp->rept_wds_wcid = cpu_to_le16(wcid->idx);
index 899957b9d0f19c6c11ccb3f0c52654d49e3dec50..852d5d97c70b1517902af6177b5f16a0c0a98698 100644 (file)
@@ -176,7 +176,7 @@ mt7915_get_phy_mode(struct ieee80211_vif *vif, struct ieee80211_sta *sta)
                if (ht_cap->ht_supported)
                        mode |= PHY_MODE_GN;
 
-               if (he_cap->has_he)
+               if (he_cap && he_cap->has_he)
                        mode |= PHY_MODE_AX_24G;
        } else if (band == NL80211_BAND_5GHZ) {
                mode |= PHY_MODE_A;
@@ -187,7 +187,7 @@ mt7915_get_phy_mode(struct ieee80211_vif *vif, struct ieee80211_sta *sta)
                if (vht_cap->vht_supported)
                        mode |= PHY_MODE_AC;
 
-               if (he_cap->has_he)
+               if (he_cap && he_cap->has_he)
                        mode |= PHY_MODE_AX_5G;
        }
 
index 137f86a6dbf875d3122c23b2a26e670843a16db4..bdec508b6b9ffa00a65875df4de56259ad5be02e 100644 (file)
@@ -142,15 +142,11 @@ out:
 static void
 mt7921s_write_txwi(struct mt7921_dev *dev, struct mt76_wcid *wcid,
                   enum mt76_txq_id qid, struct ieee80211_sta *sta,
+                  struct ieee80211_key_conf *key, int pid,
                   struct sk_buff *skb)
 {
-       struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
-       struct ieee80211_key_conf *key = info->control.hw_key;
-       __le32 *txwi;
-       int pid;
+       __le32 *txwi = (__le32 *)(skb->data - MT_SDIO_TXD_SIZE);
 
-       pid = mt76_tx_status_skb_add(&dev->mt76, wcid, skb);
-       txwi = (__le32 *)(skb->data - MT_SDIO_TXD_SIZE);
        memset(txwi, 0, MT_SDIO_TXD_SIZE);
        mt7921_mac_write_txwi(dev, txwi, skb, wcid, key, pid, false);
        skb_push(skb, MT_SDIO_TXD_SIZE);
@@ -163,8 +159,9 @@ int mt7921s_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
 {
        struct mt7921_dev *dev = container_of(mdev, struct mt7921_dev, mt76);
        struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx_info->skb);
+       struct ieee80211_key_conf *key = info->control.hw_key;
        struct sk_buff *skb = tx_info->skb;
-       int pad;
+       int err, pad, pktid;
 
        if (unlikely(tx_info->skb->len <= ETH_HLEN))
                return -EINVAL;
@@ -181,12 +178,18 @@ int mt7921s_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
                }
        }
 
-       mt7921s_write_txwi(dev, wcid, qid, sta, skb);
+       pktid = mt76_tx_status_skb_add(&dev->mt76, wcid, skb);
+       mt7921s_write_txwi(dev, wcid, qid, sta, key, pktid, skb);
 
        mt7921_skb_add_sdio_hdr(skb, MT7921_SDIO_DATA);
        pad = round_up(skb->len, 4) - skb->len;
 
-       return mt76_skb_adjust_pad(skb, pad);
+       err = mt76_skb_adjust_pad(skb, pad);
+       if (err)
+               /* Release pktid in case of error. */
+               idr_remove(&wcid->pktid, pktid);
+
+       return err;
 }
 
 void mt7921s_tx_complete_skb(struct mt76_dev *mdev, struct mt76_queue_entry *e)
index 11719ef034d888482144f30dab21d0270e480ffc..6b8c9dc80542554f19a2232f4080bb5cd9b79da8 100644 (file)
@@ -173,7 +173,7 @@ mt76_tx_status_skb_get(struct mt76_dev *dev, struct mt76_wcid *wcid, int pktid,
                        if (!(cb->flags & MT_TX_CB_DMA_DONE))
                                continue;
 
-                       if (!time_is_after_jiffies(cb->jiffies +
+                       if (time_is_after_jiffies(cb->jiffies +
                                                   MT_TX_STATUS_SKB_TIMEOUT))
                                continue;
                }
index e4473a5512415241d012f973862e12d122b3845e..74c3d8cb31002d0ec583fbbda66df4d9efcde6db 100644 (file)
@@ -25,6 +25,9 @@ static bool rt2x00usb_check_usb_error(struct rt2x00_dev *rt2x00dev, int status)
        if (status == -ENODEV || status == -ENOENT)
                return true;
 
+       if (!test_bit(DEVICE_STATE_STARTED, &rt2x00dev->flags))
+               return false;
+
        if (status == -EPROTO || status == -ETIMEDOUT)
                rt2x00dev->num_proto_errs++;
        else
index 212aaf577d3c5eca878c0793749f142720658a0f..65ef3dc9d061415acc9ce97cd11ebafb742b2b36 100644 (file)
@@ -91,7 +91,6 @@ static int rtw89_fw_hdr_parser(struct rtw89_dev *rtwdev, const u8 *fw, u32 len,
        info->section_num = GET_FW_HDR_SEC_NUM(fw);
        info->hdr_len = RTW89_FW_HDR_SIZE +
                        info->section_num * RTW89_FW_SECTION_HDR_SIZE;
-       SET_FW_HDR_PART_SIZE(fw, FWDL_SECTION_PER_PKT_LEN);
 
        bin = fw + info->hdr_len;
 
@@ -275,6 +274,7 @@ static int __rtw89_fw_download_hdr(struct rtw89_dev *rtwdev, const u8 *fw, u32 l
        }
 
        skb_put_data(skb, fw, len);
+       SET_FW_HDR_PART_SIZE(skb->data, FWDL_SECTION_PER_PKT_LEN);
        rtw89_h2c_pkt_set_hdr_fwdl(rtwdev, skb, FWCMD_TYPE_H2C,
                                   H2C_CAT_MAC, H2C_CL_MAC_FWDL,
                                   H2C_FUNC_MAC_FWHDR_DL, len);
index 7ee0d932331075359d0af2f14ba2048822e0d531..36e8d0da6c1e78a0af948c8e4d5caaf4dd7e48a8 100644 (file)
@@ -282,8 +282,10 @@ struct rtw89_h2creg_sch_tx_en {
        le32_get_bits(*((__le32 *)(fwhdr) + 6), GENMASK(15, 8))
 #define GET_FW_HDR_CMD_VERSERION(fwhdr)        \
        le32_get_bits(*((__le32 *)(fwhdr) + 7), GENMASK(31, 24))
-#define SET_FW_HDR_PART_SIZE(fwhdr, val)       \
-       le32p_replace_bits((__le32 *)(fwhdr) + 7, val, GENMASK(15, 0))
+static inline void SET_FW_HDR_PART_SIZE(void *fwhdr, u32 val)
+{
+       le32p_replace_bits((__le32 *)fwhdr + 7, val, GENMASK(15, 0));
+}
 
 #define SET_CTRL_INFO_MACID(table, val) \
        le32p_replace_bits((__le32 *)(table) + 0, val, GENMASK(6, 0))
index cff3b43ca4d7d3f9ce813aef5e9148c10f0c1979..12c03dacb5dd0c032d7963ac7bfb4c6f9e4a3082 100644 (file)
@@ -181,9 +181,9 @@ void ipc_imem_hrtimer_stop(struct hrtimer *hr_timer)
 bool ipc_imem_ul_write_td(struct iosm_imem *ipc_imem)
 {
        struct ipc_mem_channel *channel;
+       bool hpda_ctrl_pending = false;
        struct sk_buff_head *ul_list;
        bool hpda_pending = false;
-       bool forced_hpdu = false;
        struct ipc_pipe *pipe;
        int i;
 
@@ -200,15 +200,19 @@ bool ipc_imem_ul_write_td(struct iosm_imem *ipc_imem)
                ul_list = &channel->ul_list;
 
                /* Fill the transfer descriptor with the uplink buffer info. */
-               hpda_pending |= ipc_protocol_ul_td_send(ipc_imem->ipc_protocol,
+               if (!ipc_imem_check_wwan_ips(channel)) {
+                       hpda_ctrl_pending |=
+                               ipc_protocol_ul_td_send(ipc_imem->ipc_protocol,
                                                        pipe, ul_list);
-
-               /* forced HP update needed for non data channels */
-               if (hpda_pending && !ipc_imem_check_wwan_ips(channel))
-                       forced_hpdu = true;
+               } else {
+                       hpda_pending |=
+                               ipc_protocol_ul_td_send(ipc_imem->ipc_protocol,
+                                                       pipe, ul_list);
+               }
        }
 
-       if (forced_hpdu) {
+       /* forced HP update needed for non data channels */
+       if (hpda_ctrl_pending) {
                hpda_pending = false;
                ipc_protocol_doorbell_trigger(ipc_imem->ipc_protocol,
                                              IPC_HP_UL_WRITE_TD);
@@ -527,6 +531,9 @@ static void ipc_imem_run_state_worker(struct work_struct *instance)
                return;
        }
 
+       if (test_and_clear_bit(IOSM_DEVLINK_INIT, &ipc_imem->flag))
+               ipc_devlink_deinit(ipc_imem->ipc_devlink);
+
        if (!ipc_imem_setup_cp_mux_cap_init(ipc_imem, &mux_cfg))
                ipc_imem->mux = ipc_mux_init(&mux_cfg, ipc_imem);
 
@@ -1167,7 +1174,7 @@ void ipc_imem_cleanup(struct iosm_imem *ipc_imem)
                ipc_port_deinit(ipc_imem->ipc_port);
        }
 
-       if (ipc_imem->ipc_devlink)
+       if (test_and_clear_bit(IOSM_DEVLINK_INIT, &ipc_imem->flag))
                ipc_devlink_deinit(ipc_imem->ipc_devlink);
 
        ipc_imem_device_ipc_uninit(ipc_imem);
@@ -1263,7 +1270,6 @@ struct iosm_imem *ipc_imem_init(struct iosm_pcie *pcie, unsigned int device_id,
 
        ipc_imem->pci_device_id = device_id;
 
-       ipc_imem->ev_cdev_write_pending = false;
        ipc_imem->cp_version = 0;
        ipc_imem->device_sleep = IPC_HOST_SLEEP_ENTER_SLEEP;
 
@@ -1331,6 +1337,8 @@ struct iosm_imem *ipc_imem_init(struct iosm_pcie *pcie, unsigned int device_id,
 
                if (ipc_flash_link_establish(ipc_imem))
                        goto devlink_channel_fail;
+
+               set_bit(IOSM_DEVLINK_INIT, &ipc_imem->flag);
        }
        return ipc_imem;
 devlink_channel_fail:
index 6be6708b4eec86c71309513880d9242501a3d624..6b8a837faef2fe5f064d48681595e8f1724f49f9 100644 (file)
@@ -101,6 +101,7 @@ struct ipc_chnl_cfg;
 #define IOSM_CHIP_INFO_SIZE_MAX 100
 
 #define FULLY_FUNCTIONAL 0
+#define IOSM_DEVLINK_INIT 1
 
 /* List of the supported UL/DL pipes. */
 enum ipc_mem_pipes {
@@ -335,8 +336,6 @@ enum ipc_phase {
  *                             process the irq actions.
  * @flag:                      Flag to monitor the state of driver
  * @td_update_timer_suspended: if true then td update timer suspend
- * @ev_cdev_write_pending:     0 means inform the IPC tasklet to pass
- *                             the accumulated uplink buffers to CP.
  * @ev_mux_net_transmit_pending:0 means inform the IPC tasklet to pass
  * @reset_det_n:               Reset detect flag
  * @pcie_wake_n:               Pcie wake flag
@@ -374,7 +373,6 @@ struct iosm_imem {
        u8 ev_irq_pending[IPC_IRQ_VECTORS];
        unsigned long flag;
        u8 td_update_timer_suspended:1,
-          ev_cdev_write_pending:1,
           ev_mux_net_transmit_pending:1,
           reset_det_n:1,
           pcie_wake_n:1;
index 825e8e5ffb2aedc8c32f0066e1451ab9b778b0b5..831cdae28e8a9f56884187cce380966213037205 100644 (file)
@@ -41,7 +41,6 @@ void ipc_imem_sys_wwan_close(struct iosm_imem *ipc_imem, int if_id,
 static int ipc_imem_tq_cdev_write(struct iosm_imem *ipc_imem, int arg,
                                  void *msg, size_t size)
 {
-       ipc_imem->ev_cdev_write_pending = false;
        ipc_imem_ul_send(ipc_imem);
 
        return 0;
@@ -50,11 +49,6 @@ static int ipc_imem_tq_cdev_write(struct iosm_imem *ipc_imem, int arg,
 /* Through tasklet to do sio write. */
 static int ipc_imem_call_cdev_write(struct iosm_imem *ipc_imem)
 {
-       if (ipc_imem->ev_cdev_write_pending)
-               return -1;
-
-       ipc_imem->ev_cdev_write_pending = true;
-
        return ipc_task_queue_send_task(ipc_imem, ipc_imem_tq_cdev_write, 0,
                                        NULL, 0, false);
 }
@@ -450,6 +444,7 @@ void ipc_imem_sys_devlink_close(struct iosm_devlink *ipc_devlink)
        /* Release the pipe resources */
        ipc_imem_pipe_cleanup(ipc_imem, &channel->ul_pipe);
        ipc_imem_pipe_cleanup(ipc_imem, &channel->dl_pipe);
+       ipc_imem->nr_of_channels--;
 }
 
 void ipc_imem_sys_devlink_notify_rx(struct iosm_devlink *ipc_devlink,
index 221fa3bb8705e26801bb1e3c8f1362c4e77772ee..f577449e4935083b1848bb0a2fd82e45e7c52358 100644 (file)
@@ -202,7 +202,7 @@ static int __init virtual_ncidev_init(void)
        miscdev.minor = MISC_DYNAMIC_MINOR;
        miscdev.name = "virtual_nci";
        miscdev.fops = &virtual_ncidev_fops;
-       miscdev.mode = S_IALLUGO;
+       miscdev.mode = 0600;
 
        return misc_register(&miscdev);
 }
index 4b5de8f5435a5fddf35884ac71fd8c7aec46aa63..1af8a4513708a798b36726dda30787465bb10c33 100644 (file)
@@ -666,6 +666,7 @@ blk_status_t nvme_fail_nonready_command(struct nvme_ctrl *ctrl,
                struct request *rq)
 {
        if (ctrl->state != NVME_CTRL_DELETING_NOIO &&
+           ctrl->state != NVME_CTRL_DELETING &&
            ctrl->state != NVME_CTRL_DEAD &&
            !test_bit(NVME_CTRL_FAILFAST_EXPIRED, &ctrl->flags) &&
            !blk_noretry_request(rq) && !(rq->cmd_flags & REQ_NVME_MPATH))
@@ -895,10 +896,19 @@ static inline blk_status_t nvme_setup_write_zeroes(struct nvme_ns *ns,
                cpu_to_le64(nvme_sect_to_lba(ns, blk_rq_pos(req)));
        cmnd->write_zeroes.length =
                cpu_to_le16((blk_rq_bytes(req) >> ns->lba_shift) - 1);
-       if (nvme_ns_has_pi(ns))
+
+       if (nvme_ns_has_pi(ns)) {
                cmnd->write_zeroes.control = cpu_to_le16(NVME_RW_PRINFO_PRACT);
-       else
-               cmnd->write_zeroes.control = 0;
+
+               switch (ns->pi_type) {
+               case NVME_NS_DPS_PI_TYPE1:
+               case NVME_NS_DPS_PI_TYPE2:
+                       cmnd->write_zeroes.reftag =
+                               cpu_to_le32(t10_pi_ref_tag(req));
+                       break;
+               }
+       }
+
        return BLK_STS_OK;
 }
 
@@ -1740,9 +1750,20 @@ static int nvme_configure_metadata(struct nvme_ns *ns, struct nvme_id_ns *id)
                 */
                if (WARN_ON_ONCE(!(id->flbas & NVME_NS_FLBAS_META_EXT)))
                        return -EINVAL;
-               if (ctrl->max_integrity_segments)
-                       ns->features |=
-                               (NVME_NS_METADATA_SUPPORTED | NVME_NS_EXT_LBAS);
+
+               ns->features |= NVME_NS_EXT_LBAS;
+
+               /*
+                * The current fabrics transport drivers support namespace
+                * metadata formats only if nvme_ns_has_pi() returns true.
+                * Suppress support for all other formats so the namespace will
+                * have a 0 capacity and not be usable through the block stack.
+                *
+                * Note, this check will need to be modified if any drivers
+                * gain the ability to use other metadata formats.
+                */
+               if (ctrl->max_integrity_segments && nvme_ns_has_pi(ns))
+                       ns->features |= NVME_NS_METADATA_SUPPORTED;
        } else {
                /*
                 * For PCIe controllers, we can't easily remap the separate
@@ -2469,6 +2490,20 @@ static const struct nvme_core_quirk_entry core_quirks[] = {
                .vid = 0x14a4,
                .fr = "22301111",
                .quirks = NVME_QUIRK_SIMPLE_SUSPEND,
+       },
+       {
+               /*
+                * This Kioxia CD6-V Series / HPE PE8030 device times out and
+                * aborts I/O during any load, but more easily reproducible
+                * with discards (fstrim).
+                *
+                * The device is left in a state where it is also not possible
+                * to use "nvme set-feature" to disable APST, but booting with
+                * nvme_core.default_ps_max_latency=0 works.
+                */
+               .vid = 0x1e0f,
+               .mn = "KCD6XVUL6T40",
+               .quirks = NVME_QUIRK_NO_APST,
        }
 };
 
@@ -2673,8 +2708,9 @@ static bool nvme_validate_cntlid(struct nvme_subsystem *subsys,
 
                if (tmp->cntlid == ctrl->cntlid) {
                        dev_err(ctrl->device,
-                               "Duplicate cntlid %u with %s, rejecting\n",
-                               ctrl->cntlid, dev_name(tmp->device));
+                               "Duplicate cntlid %u with %s, subsys %s, rejecting\n",
+                               ctrl->cntlid, dev_name(tmp->device),
+                               subsys->subnqn);
                        return false;
                }
 
index c5a2b71c526863f78b3179d6409d301159df24a2..282d54117e0ac710f7aed294b54ea49dd746b414 100644 (file)
@@ -698,6 +698,9 @@ static int nvmf_parse_options(struct nvmf_ctrl_options *opts,
                        if (token >= 0)
                                pr_warn("I/O fail on reconnect controller after %d sec\n",
                                        token);
+                       else
+                               token = -1;
+
                        opts->fast_io_fail_tmo = token;
                        break;
                case NVMF_OPT_HOSTNQN:
index 7f2071f2460c877d23cfeb0f499feb534705b79e..13e5d503ed0765af2e6eaed65b344a3dc2ef339f 100644 (file)
@@ -866,7 +866,7 @@ int nvme_mpath_init_identify(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id)
        }
        if (ana_log_size > ctrl->ana_log_size) {
                nvme_mpath_stop(ctrl);
-               kfree(ctrl->ana_log_buf);
+               nvme_mpath_uninit(ctrl);
                ctrl->ana_log_buf = kmalloc(ana_log_size, GFP_KERNEL);
                if (!ctrl->ana_log_buf)
                        return -ENOMEM;
@@ -886,4 +886,5 @@ void nvme_mpath_uninit(struct nvme_ctrl *ctrl)
 {
        kfree(ctrl->ana_log_buf);
        ctrl->ana_log_buf = NULL;
+       ctrl->ana_log_size = 0;
 }
index b334af8aa264285f508e1c06cb09ed59904e1499..9b095ee01364996da8841d49af66f74d62a27df6 100644 (file)
@@ -709,7 +709,7 @@ static inline bool nvme_check_ready(struct nvme_ctrl *ctrl, struct request *rq,
                return true;
        if (ctrl->ops->flags & NVME_F_FABRICS &&
            ctrl->state == NVME_CTRL_DELETING)
-               return true;
+               return queue_live;
        return __nvme_check_ready(ctrl, rq, queue_live);
 }
 int nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
index 33bc83d8d992851ac8ea4788d3ac6c1a352a3381..4ceb28675fdf63b4b1dba86b0e790b925bf6b57b 100644 (file)
@@ -572,7 +572,7 @@ static int nvme_tcp_handle_comp(struct nvme_tcp_queue *queue,
        return ret;
 }
 
-static int nvme_tcp_setup_h2c_data_pdu(struct nvme_tcp_request *req,
+static void nvme_tcp_setup_h2c_data_pdu(struct nvme_tcp_request *req,
                struct nvme_tcp_r2t_pdu *pdu)
 {
        struct nvme_tcp_data_pdu *data = req->pdu;
@@ -581,32 +581,11 @@ static int nvme_tcp_setup_h2c_data_pdu(struct nvme_tcp_request *req,
        u8 hdgst = nvme_tcp_hdgst_len(queue);
        u8 ddgst = nvme_tcp_ddgst_len(queue);
 
+       req->state = NVME_TCP_SEND_H2C_PDU;
+       req->offset = 0;
        req->pdu_len = le32_to_cpu(pdu->r2t_length);
        req->pdu_sent = 0;
 
-       if (unlikely(!req->pdu_len)) {
-               dev_err(queue->ctrl->ctrl.device,
-                       "req %d r2t len is %u, probably a bug...\n",
-                       rq->tag, req->pdu_len);
-               return -EPROTO;
-       }
-
-       if (unlikely(req->data_sent + req->pdu_len > req->data_len)) {
-               dev_err(queue->ctrl->ctrl.device,
-                       "req %d r2t len %u exceeded data len %u (%zu sent)\n",
-                       rq->tag, req->pdu_len, req->data_len,
-                       req->data_sent);
-               return -EPROTO;
-       }
-
-       if (unlikely(le32_to_cpu(pdu->r2t_offset) < req->data_sent)) {
-               dev_err(queue->ctrl->ctrl.device,
-                       "req %d unexpected r2t offset %u (expected %zu)\n",
-                       rq->tag, le32_to_cpu(pdu->r2t_offset),
-                       req->data_sent);
-               return -EPROTO;
-       }
-
        memset(data, 0, sizeof(*data));
        data->hdr.type = nvme_tcp_h2c_data;
        data->hdr.flags = NVME_TCP_F_DATA_LAST;
@@ -622,7 +601,6 @@ static int nvme_tcp_setup_h2c_data_pdu(struct nvme_tcp_request *req,
        data->command_id = nvme_cid(rq);
        data->data_offset = pdu->r2t_offset;
        data->data_length = cpu_to_le32(req->pdu_len);
-       return 0;
 }
 
 static int nvme_tcp_handle_r2t(struct nvme_tcp_queue *queue,
@@ -630,7 +608,7 @@ static int nvme_tcp_handle_r2t(struct nvme_tcp_queue *queue,
 {
        struct nvme_tcp_request *req;
        struct request *rq;
-       int ret;
+       u32 r2t_length = le32_to_cpu(pdu->r2t_length);
 
        rq = nvme_find_rq(nvme_tcp_tagset(queue), pdu->command_id);
        if (!rq) {
@@ -641,13 +619,28 @@ static int nvme_tcp_handle_r2t(struct nvme_tcp_queue *queue,
        }
        req = blk_mq_rq_to_pdu(rq);
 
-       ret = nvme_tcp_setup_h2c_data_pdu(req, pdu);
-       if (unlikely(ret))
-               return ret;
+       if (unlikely(!r2t_length)) {
+               dev_err(queue->ctrl->ctrl.device,
+                       "req %d r2t len is %u, probably a bug...\n",
+                       rq->tag, r2t_length);
+               return -EPROTO;
+       }
 
-       req->state = NVME_TCP_SEND_H2C_PDU;
-       req->offset = 0;
+       if (unlikely(req->data_sent + r2t_length > req->data_len)) {
+               dev_err(queue->ctrl->ctrl.device,
+                       "req %d r2t len %u exceeded data len %u (%zu sent)\n",
+                       rq->tag, r2t_length, req->data_len, req->data_sent);
+               return -EPROTO;
+       }
 
+       if (unlikely(le32_to_cpu(pdu->r2t_offset) < req->data_sent)) {
+               dev_err(queue->ctrl->ctrl.device,
+                       "req %d unexpected r2t offset %u (expected %zu)\n",
+                       rq->tag, le32_to_cpu(pdu->r2t_offset), req->data_sent);
+               return -EPROTO;
+       }
+
+       nvme_tcp_setup_h2c_data_pdu(req, pdu);
        nvme_tcp_queue_request(req, false, true);
 
        return 0;
@@ -1232,6 +1225,7 @@ static int nvme_tcp_alloc_async_req(struct nvme_tcp_ctrl *ctrl)
 
 static void nvme_tcp_free_queue(struct nvme_ctrl *nctrl, int qid)
 {
+       struct page *page;
        struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl);
        struct nvme_tcp_queue *queue = &ctrl->queues[qid];
 
@@ -1241,6 +1235,11 @@ static void nvme_tcp_free_queue(struct nvme_ctrl *nctrl, int qid)
        if (queue->hdr_digest || queue->data_digest)
                nvme_tcp_free_crypto(queue);
 
+       if (queue->pf_cache.va) {
+               page = virt_to_head_page(queue->pf_cache.va);
+               __page_frag_cache_drain(page, queue->pf_cache.pagecnt_bias);
+               queue->pf_cache.va = NULL;
+       }
        sock_release(queue->sock);
        kfree(queue->pdu);
        mutex_destroy(&queue->send_mutex);
index bfc259e0d7b87e11b7d9c3b595e413535842c5a7..9f81beb4df4eff844881854c3ed81ee82b07e821 100644 (file)
@@ -166,7 +166,10 @@ static int nvme_zone_parse_entry(struct nvme_ns *ns,
        zone.len = ns->zsze;
        zone.capacity = nvme_lba_to_sect(ns, le64_to_cpu(entry->zcap));
        zone.start = nvme_lba_to_sect(ns, le64_to_cpu(entry->zslba));
-       zone.wp = nvme_lba_to_sect(ns, le64_to_cpu(entry->wp));
+       if (zone.cond == BLK_ZONE_COND_FULL)
+               zone.wp = zone.start + zone.len;
+       else
+               zone.wp = nvme_lba_to_sect(ns, le64_to_cpu(entry->wp));
 
        return cb(&zone, idx, data);
 }
index 6aa30f30b572ece0e1dd848ab878e20a20b6ebd0..6be6e59d273bb72c43bc2b106fc2df8ddfbd5622 100644 (file)
@@ -8,6 +8,7 @@
 #include <linux/uio.h>
 #include <linux/falloc.h>
 #include <linux/file.h>
+#include <linux/fs.h>
 #include "nvmet.h"
 
 #define NVMET_MAX_MPOOL_BVEC           16
@@ -266,7 +267,8 @@ static void nvmet_file_execute_rw(struct nvmet_req *req)
 
        if (req->ns->buffered_io) {
                if (likely(!req->f.mpool_alloc) &&
-                               nvmet_file_execute_io(req, IOCB_NOWAIT))
+                   (req->ns->file->f_mode & FMODE_NOWAIT) &&
+                   nvmet_file_execute_io(req, IOCB_NOWAIT))
                        return;
                nvmet_file_submit_buffered_io(req);
        } else
index 84c387e4bf4314e3826412d7c133c96d65ba610b..7c1c43ce466bcbab9a98414bb3c0d74f1f35139b 100644 (file)
@@ -166,6 +166,8 @@ static struct workqueue_struct *nvmet_tcp_wq;
 static const struct nvmet_fabrics_ops nvmet_tcp_ops;
 static void nvmet_tcp_free_cmd(struct nvmet_tcp_cmd *c);
 static void nvmet_tcp_finish_cmd(struct nvmet_tcp_cmd *cmd);
+static void nvmet_tcp_free_cmd_buffers(struct nvmet_tcp_cmd *cmd);
+static void nvmet_tcp_unmap_pdu_iovec(struct nvmet_tcp_cmd *cmd);
 
 static inline u16 nvmet_tcp_cmd_tag(struct nvmet_tcp_queue *queue,
                struct nvmet_tcp_cmd *cmd)
@@ -297,6 +299,16 @@ static int nvmet_tcp_check_ddgst(struct nvmet_tcp_queue *queue, void *pdu)
        return 0;
 }
 
+static void nvmet_tcp_free_cmd_buffers(struct nvmet_tcp_cmd *cmd)
+{
+       WARN_ON(unlikely(cmd->nr_mapped > 0));
+
+       kfree(cmd->iov);
+       sgl_free(cmd->req.sg);
+       cmd->iov = NULL;
+       cmd->req.sg = NULL;
+}
+
 static void nvmet_tcp_unmap_pdu_iovec(struct nvmet_tcp_cmd *cmd)
 {
        struct scatterlist *sg;
@@ -306,6 +318,8 @@ static void nvmet_tcp_unmap_pdu_iovec(struct nvmet_tcp_cmd *cmd)
 
        for (i = 0; i < cmd->nr_mapped; i++)
                kunmap(sg_page(&sg[i]));
+
+       cmd->nr_mapped = 0;
 }
 
 static void nvmet_tcp_map_pdu_iovec(struct nvmet_tcp_cmd *cmd)
@@ -387,7 +401,7 @@ static int nvmet_tcp_map_data(struct nvmet_tcp_cmd *cmd)
 
        return 0;
 err:
-       sgl_free(cmd->req.sg);
+       nvmet_tcp_free_cmd_buffers(cmd);
        return NVME_SC_INTERNAL;
 }
 
@@ -632,10 +646,8 @@ static int nvmet_try_send_data(struct nvmet_tcp_cmd *cmd, bool last_in_batch)
                }
        }
 
-       if (queue->nvme_sq.sqhd_disabled) {
-               kfree(cmd->iov);
-               sgl_free(cmd->req.sg);
-       }
+       if (queue->nvme_sq.sqhd_disabled)
+               nvmet_tcp_free_cmd_buffers(cmd);
 
        return 1;
 
@@ -664,8 +676,7 @@ static int nvmet_try_send_response(struct nvmet_tcp_cmd *cmd,
        if (left)
                return -EAGAIN;
 
-       kfree(cmd->iov);
-       sgl_free(cmd->req.sg);
+       nvmet_tcp_free_cmd_buffers(cmd);
        cmd->queue->snd_cmd = NULL;
        nvmet_tcp_put_cmd(cmd);
        return 1;
@@ -700,10 +711,11 @@ static int nvmet_try_send_r2t(struct nvmet_tcp_cmd *cmd, bool last_in_batch)
 static int nvmet_try_send_ddgst(struct nvmet_tcp_cmd *cmd, bool last_in_batch)
 {
        struct nvmet_tcp_queue *queue = cmd->queue;
+       int left = NVME_TCP_DIGEST_LENGTH - cmd->offset;
        struct msghdr msg = { .msg_flags = MSG_DONTWAIT };
        struct kvec iov = {
                .iov_base = (u8 *)&cmd->exp_ddgst + cmd->offset,
-               .iov_len = NVME_TCP_DIGEST_LENGTH - cmd->offset
+               .iov_len = left
        };
        int ret;
 
@@ -717,6 +729,10 @@ static int nvmet_try_send_ddgst(struct nvmet_tcp_cmd *cmd, bool last_in_batch)
                return ret;
 
        cmd->offset += ret;
+       left -= ret;
+
+       if (left)
+               return -EAGAIN;
 
        if (queue->nvme_sq.sqhd_disabled) {
                cmd->queue->snd_cmd = NULL;
@@ -906,7 +922,14 @@ static void nvmet_tcp_handle_req_failure(struct nvmet_tcp_queue *queue,
        size_t data_len = le32_to_cpu(req->cmd->common.dptr.sgl.length);
        int ret;
 
-       if (!nvme_is_write(cmd->req.cmd) ||
+       /*
+        * This command has not been processed yet, hence we are trying to
+        * figure out if there is still pending data left to receive. If
+        * we don't, we can simply prepare for the next pdu and bail out,
+        * otherwise we will need to prepare a buffer and receive the
+        * stale data before continuing forward.
+        */
+       if (!nvme_is_write(cmd->req.cmd) || !data_len ||
            data_len > cmd->req.port->inline_data_size) {
                nvmet_prepare_receive_pdu(queue);
                return;
@@ -1406,8 +1429,7 @@ static void nvmet_tcp_finish_cmd(struct nvmet_tcp_cmd *cmd)
 {
        nvmet_req_uninit(&cmd->req);
        nvmet_tcp_unmap_pdu_iovec(cmd);
-       kfree(cmd->iov);
-       sgl_free(cmd->req.sg);
+       nvmet_tcp_free_cmd_buffers(cmd);
 }
 
 static void nvmet_tcp_uninit_data_in_cmds(struct nvmet_tcp_queue *queue)
@@ -1417,7 +1439,10 @@ static void nvmet_tcp_uninit_data_in_cmds(struct nvmet_tcp_queue *queue)
 
        for (i = 0; i < queue->nr_cmds; i++, cmd++) {
                if (nvmet_tcp_need_data_in(cmd))
-                       nvmet_tcp_finish_cmd(cmd);
+                       nvmet_req_uninit(&cmd->req);
+
+               nvmet_tcp_unmap_pdu_iovec(cmd);
+               nvmet_tcp_free_cmd_buffers(cmd);
        }
 
        if (!queue->nr_cmds && nvmet_tcp_need_data_in(&queue->connect)) {
@@ -1437,7 +1462,9 @@ static void nvmet_tcp_release_queue_work(struct work_struct *w)
        mutex_unlock(&nvmet_tcp_queue_mutex);
 
        nvmet_tcp_restore_socket_callbacks(queue);
-       flush_work(&queue->io_work);
+       cancel_work_sync(&queue->io_work);
+       /* stop accepting incoming data */
+       queue->rcv_state = NVMET_TCP_RECV_ERR;
 
        nvmet_tcp_uninit_data_in_cmds(queue);
        nvmet_sq_destroy(&queue->nvme_sq);
index b10f015b2e3775a930c6580502cdeb618c3e7cf3..2b07677a386b7c860d275c6145f7480536640e63 100644 (file)
@@ -76,6 +76,26 @@ struct device_node *of_irq_find_parent(struct device_node *child)
 }
 EXPORT_SYMBOL_GPL(of_irq_find_parent);
 
+/*
+ * These interrupt controllers abuse interrupt-map for unspeakable
+ * reasons and rely on the core code to *ignore* it (the drivers do
+ * their own parsing of the property).
+ *
+ * If you think of adding to the list for something *new*, think
+ * again. There is a high chance that you will be sent back to the
+ * drawing board.
+ */
+static const char * const of_irq_imap_abusers[] = {
+       "CBEA,platform-spider-pic",
+       "sti,platform-spider-pic",
+       "realtek,rtl-intc",
+       "fsl,ls1021a-extirq",
+       "fsl,ls1043a-extirq",
+       "fsl,ls1088a-extirq",
+       "renesas,rza1-irqc",
+       NULL,
+};
+
 /**
  * of_irq_parse_raw - Low level interrupt tree parsing
  * @addr:      address specifier (start of "reg" property of the device) in be32 format
@@ -159,12 +179,15 @@ int of_irq_parse_raw(const __be32 *addr, struct of_phandle_args *out_irq)
                /*
                 * Now check if cursor is an interrupt-controller and
                 * if it is then we are done, unless there is an
-                * interrupt-map which takes precedence.
+                * interrupt-map which takes precedence except on one
+                * of these broken platforms that want to parse
+                * interrupt-map themselves for $reason.
                 */
                bool intc = of_property_read_bool(ipar, "interrupt-controller");
 
                imap = of_get_property(ipar, "interrupt-map", &imaplen);
-               if (imap == NULL && intc) {
+               if (intc &&
+                   (!imap || of_device_compatible_match(ipar, of_irq_imap_abusers))) {
                        pr_debug(" -> got it !\n");
                        return 0;
                }
index c24dab383654b00a126e0b2f9e6185e47a6f42f4..722dacdd5a17f8135f110077ab519c424b60163b 100644 (file)
@@ -19,6 +19,7 @@
 #include <linux/platform_device.h>
 #include <linux/phy/phy.h>
 #include <linux/regulator/consumer.h>
+#include <linux/module.h>
 
 #include "pcie-designware.h"
 
index 7b17da2f9b3f8d9ddf2c713cce3cad45a46dde1d..cfe66bf04c1d38ba5dbbe8cb6f7772ca2e996a61 100644 (file)
@@ -18,6 +18,7 @@
 #include <linux/pm_domain.h>
 #include <linux/regmap.h>
 #include <linux/reset.h>
+#include <linux/module.h>
 
 #include "pcie-designware.h"
 
index c5300d49807a23109bbcf75792ba1f97e7f48c83..c3b725afa11fdbd0b96f0bcc5443731817ee49e8 100644 (file)
@@ -32,7 +32,6 @@
 #define PCIE_CORE_DEV_ID_REG                                   0x0
 #define PCIE_CORE_CMD_STATUS_REG                               0x4
 #define PCIE_CORE_DEV_REV_REG                                  0x8
-#define PCIE_CORE_EXP_ROM_BAR_REG                              0x30
 #define PCIE_CORE_PCIEXP_CAP                                   0xc0
 #define PCIE_CORE_ERR_CAPCTL_REG                               0x118
 #define     PCIE_CORE_ERR_CAPCTL_ECRC_CHK_TX                   BIT(5)
@@ -774,10 +773,6 @@ advk_pci_bridge_emul_base_conf_read(struct pci_bridge_emul *bridge,
                *value = advk_readl(pcie, PCIE_CORE_CMD_STATUS_REG);
                return PCI_BRIDGE_EMUL_HANDLED;
 
-       case PCI_ROM_ADDRESS1:
-               *value = advk_readl(pcie, PCIE_CORE_EXP_ROM_BAR_REG);
-               return PCI_BRIDGE_EMUL_HANDLED;
-
        case PCI_INTERRUPT_LINE: {
                /*
                 * From the whole 32bit register we support reading from HW only
@@ -810,10 +805,6 @@ advk_pci_bridge_emul_base_conf_write(struct pci_bridge_emul *bridge,
                advk_writel(pcie, new, PCIE_CORE_CMD_STATUS_REG);
                break;
 
-       case PCI_ROM_ADDRESS1:
-               advk_writel(pcie, new, PCIE_CORE_EXP_ROM_BAR_REG);
-               break;
-
        case PCI_INTERRUPT_LINE:
                if (mask & (PCI_BRIDGE_CTL_BUS_RESET << 16)) {
                        u32 val = advk_readl(pcie, PCIE_CORE_CTRL1_REG);
index 1bf4d75b61be7834d56e5132115d4f65df8f59f8..b090924b41feefac638f5a127cdfa9bcf801f2a7 100644 (file)
@@ -516,7 +516,7 @@ static int apple_pcie_setup_port(struct apple_pcie *pcie,
        int ret, i;
 
        reset = gpiod_get_from_of_node(np, "reset-gpios", 0,
-                                      GPIOD_OUT_LOW, "#PERST");
+                                      GPIOD_OUT_LOW, "PERST#");
        if (IS_ERR(reset))
                return PTR_ERR(reset);
 
@@ -539,12 +539,22 @@ static int apple_pcie_setup_port(struct apple_pcie *pcie,
 
        rmw_set(PORT_APPCLK_EN, port->base + PORT_APPCLK);
 
+       /* Assert PERST# before setting up the clock */
+       gpiod_set_value(reset, 1);
+
        ret = apple_pcie_setup_refclk(pcie, port);
        if (ret < 0)
                return ret;
 
+       /* The minimal Tperst-clk value is 100us (PCIe CEM r5.0, 2.9.2) */
+       usleep_range(100, 200);
+
+       /* Deassert PERST# */
        rmw_set(PORT_PERST_OFF, port->base + PORT_PERST);
-       gpiod_set_value(reset, 1);
+       gpiod_set_value(reset, 0);
+
+       /* Wait for 100ms after PERST# deassertion (PCIe r5.0, 6.6.1) */
+       msleep(100);
 
        ret = readl_relaxed_poll_timeout(port->base + PORT_STATUS, stat,
                                         stat & PORT_STATUS_READY, 100, 250000);
index c64c6679b1b9a47297adf7576b6c91c2ed7afa27..0ac9634b398dac5a44370ce549ca9c8135ee8dd3 100644 (file)
@@ -757,8 +757,8 @@ static int hi3670_pcie_phy_get_resources(struct hi3670_pcie_phy *phy,
                return PTR_ERR(phy->sysctrl);
 
        phy->pmctrl = syscon_regmap_lookup_by_compatible("hisilicon,hi3670-pmctrl");
-       if (IS_ERR(phy->sysctrl))
-               return PTR_ERR(phy->sysctrl);
+       if (IS_ERR(phy->pmctrl))
+               return PTR_ERR(phy->pmctrl);
 
        /* clocks */
        phy->phy_ref_clk = devm_clk_get(dev, "phy_ref");
index 08d178a4dc13f5c5405617af08bb89b5ef91de51..aa27c799461040c1a4869f65c22b2b6cb7d5a341 100644 (file)
@@ -82,9 +82,9 @@
  * struct mvebu_cp110_utmi - PHY driver data
  *
  * @regs: PHY registers
- * @syscom: Regmap with system controller registers
+ * @syscon: Regmap with system controller registers
  * @dev: device driver handle
- * @caps: PHY capabilities
+ * @ops: phy ops
  */
 struct mvebu_cp110_utmi {
        void __iomem *regs;
index bfff0c8c9130389e4c472d22a978548aaf7672f3..fec1da470d26da76e6e2cd6280ff71a4549a073d 100644 (file)
@@ -127,12 +127,13 @@ struct phy_drvdata {
 };
 
 /**
- * Write register and read back masked value to confirm it is written
+ * usb_phy_write_readback() - Write register and read back masked value to
+ * confirm it is written
  *
- * @base - QCOM DWC3 PHY base virtual address.
- * @offset - register offset.
- * @mask - register bitmask specifying what should be updated
- * @val - value to write.
+ * @phy_dwc3: QCOM DWC3 phy context
+ * @offset: register offset.
+ * @mask: register bitmask specifying what should be updated
+ * @val: value to write.
  */
 static inline void usb_phy_write_readback(struct usb_phy *phy_dwc3,
                                          u32 offset,
@@ -171,11 +172,11 @@ static int wait_for_latch(void __iomem *addr)
 }
 
 /**
- * Write SSPHY register
+ * usb_ss_write_phycreg() - Write SSPHY register
  *
- * @base - QCOM DWC3 PHY base virtual address.
- * @addr - SSPHY address to write.
- * @val - value to write.
+ * @phy_dwc3: QCOM DWC3 phy context
+ * @addr: SSPHY address to write.
+ * @val: value to write.
  */
 static int usb_ss_write_phycreg(struct usb_phy *phy_dwc3,
                                u32 addr, u32 val)
@@ -209,10 +210,11 @@ err_wait:
 }
 
 /**
- * Read SSPHY register.
+ * usb_ss_read_phycreg() - Read SSPHY register.
  *
- * @base - QCOM DWC3 PHY base virtual address.
- * @addr - SSPHY address to read.
+ * @phy_dwc3: QCOM DWC3 phy context
+ * @addr: SSPHY address to read.
+ * @val: pointer in which read is store.
  */
 static int usb_ss_read_phycreg(struct usb_phy *phy_dwc3,
                               u32 addr, u32 *val)
index 456a59d8c7d047ae17629107e6360f02d08cd888..c96639d5f5819ec2b6be258b499e8c7b267b098a 100644 (file)
@@ -2973,6 +2973,9 @@ struct qmp_phy_combo_cfg {
  * @qmp: QMP phy to which this lane belongs
  * @lane_rst: lane's reset controller
  * @mode: current PHY mode
+ * @dp_aux_cfg: Display port aux config
+ * @dp_opts: Display port optional config
+ * @dp_clks: Display port clocks
  */
 struct qmp_phy {
        struct phy *phy;
index 04d18d52f700d93e4d22a8f47bbae0f936788394..716a77748ed83684af9375ba0cc5b40e3390de5c 100644 (file)
@@ -1,5 +1,5 @@
 // SPDX-License-Identifier: GPL-2.0-only
-/**
+/*
  * Copyright (C) 2016 Linaro Ltd
  */
 #include <linux/module.h>
index 7df6a63ad37ba7f1f7aaaa780a67f7750d3e70a3..e4f4a9be513200a43bbc343b32349f56687f1902 100644 (file)
@@ -478,7 +478,7 @@ static void stm32_usbphyc_phy_tuning(struct stm32_usbphyc *usbphyc,
        if (!of_property_read_bool(np, "st,no-lsfs-fb-cap"))
                usbphyc_phy->tune |= LFSCAPEN;
 
-       if (of_property_read_bool(np, "st,slow-hs-slew-rate"))
+       if (of_property_read_bool(np, "st,decrease-hs-slew-rate"))
                usbphyc_phy->tune |= HSDRVSLEW;
 
        ret = of_property_read_u32(np, "st,tune-hs-dc-level", &val);
index 2ff56ce77b307a5230c3651183da923a6b9df8cb..c1211c4f863cadf650d838b4d7ec9997594cdee2 100644 (file)
@@ -1,5 +1,5 @@
 // SPDX-License-Identifier: GPL-2.0
-/**
+/*
  * PCIe SERDES driver for AM654x SoC
  *
  * Copyright (C) 2018 - 2019 Texas Instruments Incorporated - http://www.ti.com/
index 126f5b8735cc1afffe8dbf6deeaa1dedc3486934..b3384c31637ae7e59c04b17a19f988da877cbd27 100644 (file)
@@ -1,5 +1,5 @@
 // SPDX-License-Identifier: GPL-2.0
-/**
+/*
  * Wrapper driver for SERDES used in J721E
  *
  * Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com/
index ebceb1520ce88ade401a7c4e97a35806a90efb48..3a505fe5715addae9a61ce079491a11ef163ce65 100644 (file)
@@ -89,9 +89,9 @@ static inline void omap_usb_writel(void __iomem *addr, unsigned int offset,
 }
 
 /**
- * omap_usb2_set_comparator - links the comparator present in the system with
- *     this phy
- * @comparator - the companion phy(comparator) for this phy
+ * omap_usb2_set_comparator() - links the comparator present in the system with this phy
+ *
+ * @comparator the companion phy(comparator) for this phy
  *
  * The phy companion driver should call this API passing the phy_companion
  * filled with set_vbus and start_srp to be used by usb phy.
index a63213f5972a7e591061bf6dce32d05644528ba2..15c1c79e5c294dc66ab551cfad9fc72732289889 100644 (file)
@@ -1,5 +1,5 @@
 // SPDX-License-Identifier: GPL-2.0-only
-/**
+/*
  * tusb1210.c - TUSB1210 USB ULPI PHY driver
  *
  * Copyright (C) 2015 Intel Corporation
index bae9d429b813e3c57e98447622ddcf9a4995aaa9..ecab9064a8458504fc96b34420bb83fdf1c92e19 100644 (file)
@@ -598,14 +598,14 @@ static struct irq_chip amd_gpio_irqchip = {
 
 #define PIN_IRQ_PENDING        (BIT(INTERRUPT_STS_OFF) | BIT(WAKE_STS_OFF))
 
-static irqreturn_t amd_gpio_irq_handler(int irq, void *dev_id)
+static bool do_amd_gpio_irq_handler(int irq, void *dev_id)
 {
        struct amd_gpio *gpio_dev = dev_id;
        struct gpio_chip *gc = &gpio_dev->gc;
-       irqreturn_t ret = IRQ_NONE;
        unsigned int i, irqnr;
        unsigned long flags;
        u32 __iomem *regs;
+       bool ret = false;
        u32  regval;
        u64 status, mask;
 
@@ -627,6 +627,14 @@ static irqreturn_t amd_gpio_irq_handler(int irq, void *dev_id)
                /* Each status bit covers four pins */
                for (i = 0; i < 4; i++) {
                        regval = readl(regs + i);
+                       /* caused wake on resume context for shared IRQ */
+                       if (irq < 0 && (regval & BIT(WAKE_STS_OFF))) {
+                               dev_dbg(&gpio_dev->pdev->dev,
+                                       "Waking due to GPIO %d: 0x%x",
+                                       irqnr + i, regval);
+                               return true;
+                       }
+
                        if (!(regval & PIN_IRQ_PENDING) ||
                            !(regval & BIT(INTERRUPT_MASK_OFF)))
                                continue;
@@ -650,9 +658,12 @@ static irqreturn_t amd_gpio_irq_handler(int irq, void *dev_id)
                        }
                        writel(regval, regs + i);
                        raw_spin_unlock_irqrestore(&gpio_dev->lock, flags);
-                       ret = IRQ_HANDLED;
+                       ret = true;
                }
        }
+       /* did not cause wake on resume context for shared IRQ */
+       if (irq < 0)
+               return false;
 
        /* Signal EOI to the GPIO unit */
        raw_spin_lock_irqsave(&gpio_dev->lock, flags);
@@ -664,6 +675,16 @@ static irqreturn_t amd_gpio_irq_handler(int irq, void *dev_id)
        return ret;
 }
 
+static irqreturn_t amd_gpio_irq_handler(int irq, void *dev_id)
+{
+       return IRQ_RETVAL(do_amd_gpio_irq_handler(irq, dev_id));
+}
+
+static bool __maybe_unused amd_gpio_check_wake(void *dev_id)
+{
+       return do_amd_gpio_irq_handler(-1, dev_id);
+}
+
 static int amd_get_groups_count(struct pinctrl_dev *pctldev)
 {
        struct amd_gpio *gpio_dev = pinctrl_dev_get_drvdata(pctldev);
@@ -1033,6 +1054,7 @@ static int amd_gpio_probe(struct platform_device *pdev)
                goto out2;
 
        platform_set_drvdata(pdev, gpio_dev);
+       acpi_register_wakeup_handler(gpio_dev->irq, amd_gpio_check_wake, gpio_dev);
 
        dev_dbg(&pdev->dev, "amd gpio driver loaded\n");
        return ret;
@@ -1050,6 +1072,7 @@ static int amd_gpio_remove(struct platform_device *pdev)
        gpio_dev = platform_get_drvdata(pdev);
 
        gpiochip_remove(&gpio_dev->gc);
+       acpi_unregister_wakeup_handler(amd_gpio_check_wake, gpio_dev);
 
        return 0;
 }
index 0cc346bfc4c35295f6469e0c0190b3190c40e710..a7861079a6502207c121d93169646fe0f92aa3ef 100644 (file)
@@ -258,7 +258,7 @@ static void apple_gpio_irq_ack(struct irq_data *data)
               pctl->base + REG_IRQ(irqgrp, data->hwirq));
 }
 
-static int apple_gpio_irq_type(unsigned int type)
+static unsigned int apple_gpio_irq_type(unsigned int type)
 {
        switch (type & IRQ_TYPE_SENSE_MASK) {
        case IRQ_TYPE_EDGE_RISING:
@@ -272,7 +272,7 @@ static int apple_gpio_irq_type(unsigned int type)
        case IRQ_TYPE_LEVEL_LOW:
                return REG_GPIOx_IN_IRQ_LO;
        default:
-               return -EINVAL;
+               return REG_GPIOx_IN_IRQ_OFF;
        }
 }
 
@@ -288,7 +288,7 @@ static void apple_gpio_irq_unmask(struct irq_data *data)
 {
        struct apple_gpio_pinctrl *pctl =
                gpiochip_get_data(irq_data_get_irq_chip_data(data));
-       int irqtype = apple_gpio_irq_type(irqd_get_trigger_type(data));
+       unsigned int irqtype = apple_gpio_irq_type(irqd_get_trigger_type(data));
 
        apple_gpio_set_reg(pctl, data->hwirq, REG_GPIOx_MODE,
                           FIELD_PREP(REG_GPIOx_MODE, irqtype));
@@ -313,10 +313,10 @@ static int apple_gpio_irq_set_type(struct irq_data *data,
 {
        struct apple_gpio_pinctrl *pctl =
                gpiochip_get_data(irq_data_get_irq_chip_data(data));
-       int irqtype = apple_gpio_irq_type(type);
+       unsigned int irqtype = apple_gpio_irq_type(type);
 
-       if (irqtype < 0)
-               return irqtype;
+       if (irqtype == REG_GPIOx_IN_IRQ_OFF)
+               return -EINVAL;
 
        apple_gpio_set_reg(pctl, data->hwirq, REG_GPIOx_MODE,
                           FIELD_PREP(REG_GPIOx_MODE, irqtype));
index b9191f1abb1c4fe1fe5d3a268a80ac031121d73e..3e0c00766f59add5b984a3e1d2161fcb74293c0c 100644 (file)
@@ -197,6 +197,7 @@ config PINCTRL_QCOM_SPMI_PMIC
        select PINMUX
        select PINCONF
        select GENERIC_PINCONF
+  select GPIOLIB
        select GPIOLIB_IRQCHIP
        select IRQ_DOMAIN_HIERARCHY
        help
@@ -211,6 +212,7 @@ config PINCTRL_QCOM_SSBI_PMIC
        select PINMUX
        select PINCONF
        select GENERIC_PINCONF
+  select GPIOLIB
        select GPIOLIB_IRQCHIP
        select IRQ_DOMAIN_HIERARCHY
        help
index c51793f6546f1c20c06aa946ffd52da4a9937f69..fdfd7b8f3a76d7d66659291bfdb497c1f21a8810 100644 (file)
@@ -1310,6 +1310,7 @@ static const struct msm_pinctrl_soc_data sdm845_pinctrl = {
        .ngpios = 151,
        .wakeirq_map = sdm845_pdc_map,
        .nwakeirq_map = ARRAY_SIZE(sdm845_pdc_map),
+       .wakeirq_dual_edge_errata = true,
 };
 
 static const struct msm_pinctrl_soc_data sdm845_acpi_pinctrl = {
index 4d8f8636c2b39863a81c9490a19443bcc7fd8c47..1c042d39380c6dab558ef12816061ba305c8d20c 100644 (file)
@@ -1597,10 +1597,10 @@ static const struct msm_pingroup sm8350_groups[] = {
        [200] = PINGROUP(200, qdss_gpio, _, _, _, _, _, _, _, _),
        [201] = PINGROUP(201, _, _, _, _, _, _, _, _, _),
        [202] = PINGROUP(202, _, _, _, _, _, _, _, _, _),
-       [203] = UFS_RESET(ufs_reset, 0x1d8000),
-       [204] = SDC_PINGROUP(sdc2_clk, 0x1cf000, 14, 6),
-       [205] = SDC_PINGROUP(sdc2_cmd, 0x1cf000, 11, 3),
-       [206] = SDC_PINGROUP(sdc2_data, 0x1cf000, 9, 0),
+       [203] = UFS_RESET(ufs_reset, 0xd8000),
+       [204] = SDC_PINGROUP(sdc2_clk, 0xcf000, 14, 6),
+       [205] = SDC_PINGROUP(sdc2_cmd, 0xcf000, 11, 3),
+       [206] = SDC_PINGROUP(sdc2_data, 0xcf000, 9, 0),
 };
 
 static const struct msm_gpio_wakeirq_map sm8350_pdc_map[] = {
index 425d55a2ee19f4340aebf69f511e5e95e611f7b7..6853b5b8b0fe7fbe08eb04fa6dae601567252dff 100644 (file)
@@ -1,5 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0-only
 
+#include <asm/mach-ralink/ralink_regs.h>
 #include <asm/mach-ralink/mt7620.h>
 #include <linux/module.h>
 #include <linux/platform_device.h>
index 8d734bfc33d2051590179cf2bf7e150ecd986d38..50bd26a30ac0a3bc19d7816014d04dc50cc1790b 100644 (file)
@@ -275,7 +275,7 @@ static int tegra_pinctrl_set_mux(struct pinctrl_dev *pctldev,
        return 0;
 }
 
-static struct tegra_pingroup *tegra_pinctrl_get_group(struct pinctrl_dev *pctldev,
+static const struct tegra_pingroup *tegra_pinctrl_get_group(struct pinctrl_dev *pctldev,
                                        unsigned int offset)
 {
        struct tegra_pmx *pmx = pinctrl_dev_get_drvdata(pctldev);
@@ -289,7 +289,7 @@ static struct tegra_pingroup *tegra_pinctrl_get_group(struct pinctrl_dev *pctlde
                        continue;
                for (j = 0; j < num_pins; j++) {
                        if (offset == pins[j])
-                               return (struct tegra_pingroup *)&pmx->soc->groups[group];
+                               return &pmx->soc->groups[group];
                }
        }
 
index b4fef9185d8813018fe821e9cd4701e5967a8ee3..5c1dfcb46749d0485da289d952ac25eb347af759 100644 (file)
@@ -1387,7 +1387,6 @@ static struct tegra_function tegra194_functions[] = {
                .schmitt_bit = schmitt_b,                       \
                .drvtype_bit = 13,                              \
                .lpdr_bit = e_lpdr,                             \
-               .drv_reg = -1,                                  \
 
 #define drive_touch_clk_pcc4            DRV_PINGROUP_ENTRY_Y(0x2004,   12,     5,      20,     5,      -1,     -1,     -1,     -1,     1)
 #define drive_uart3_rx_pcc6             DRV_PINGROUP_ENTRY_Y(0x200c,   12,     5,      20,     5,      -1,     -1,     -1,     -1,     1)
index 9d1e7e03628e207b35e26a7e4adf6e9e95cdca72..4020b8354bae90cb783a337c09c724eabe0a3a94 100644 (file)
@@ -41,9 +41,12 @@ enum cros_ec_ish_channel {
 #define ISHTP_SEND_TIMEOUT                     (3 * HZ)
 
 /* ISH Transport CrOS EC ISH client unique GUID */
-static const guid_t cros_ish_guid =
-       GUID_INIT(0x7b7154d0, 0x56f4, 0x4bdc,
-                 0xb0, 0xd8, 0x9e, 0x7c, 0xda, 0xe0, 0xd6, 0xa0);
+static const struct ishtp_device_id cros_ec_ishtp_id_table[] = {
+       { .guid = GUID_INIT(0x7b7154d0, 0x56f4, 0x4bdc,
+                 0xb0, 0xd8, 0x9e, 0x7c, 0xda, 0xe0, 0xd6, 0xa0), },
+       { }
+};
+MODULE_DEVICE_TABLE(ishtp, cros_ec_ishtp_id_table);
 
 struct header {
        u8 channel;
@@ -389,7 +392,7 @@ static int cros_ish_init(struct ishtp_cl *cros_ish_cl)
        ishtp_set_tx_ring_size(cros_ish_cl, CROS_ISH_CL_TX_RING_SIZE);
        ishtp_set_rx_ring_size(cros_ish_cl, CROS_ISH_CL_RX_RING_SIZE);
 
-       fw_client = ishtp_fw_cl_get_client(dev, &cros_ish_guid);
+       fw_client = ishtp_fw_cl_get_client(dev, &cros_ec_ishtp_id_table[0].guid);
        if (!fw_client) {
                dev_err(cl_data_to_dev(client_data),
                        "ish client uuid not found\n");
@@ -765,7 +768,7 @@ static SIMPLE_DEV_PM_OPS(cros_ec_ishtp_pm_ops, cros_ec_ishtp_suspend,
 
 static struct ishtp_cl_driver  cros_ec_ishtp_driver = {
        .name = "cros_ec_ishtp",
-       .guid = &cros_ish_guid,
+       .id = cros_ec_ishtp_id_table,
        .probe = cros_ec_ishtp_probe,
        .remove = cros_ec_ishtp_remove,
        .reset = cros_ec_ishtp_reset,
@@ -791,4 +794,3 @@ MODULE_DESCRIPTION("ChromeOS EC ISHTP Client Driver");
 MODULE_AUTHOR("Rushikesh S Kadam <rushikesh.s.kadam@intel.com>");
 
 MODULE_LICENSE("GPL v2");
-MODULE_ALIAS("ishtp:*");
index 0b7f58feb701ee8b4355b321e67684f5e08b6c79..c897a2f158404e6b7910f291e7ebf8f0fd526b92 100644 (file)
@@ -413,7 +413,7 @@ mlxreg_lc_create_static_devices(struct mlxreg_lc *mlxreg_lc, struct mlxreg_hotpl
                                int size)
 {
        struct mlxreg_hotplug_device *dev = devs;
-       int i;
+       int i, ret;
 
        /* Create static I2C device feeding by auxiliary or main power. */
        for (i = 0; i < size; i++, dev++) {
@@ -423,6 +423,7 @@ mlxreg_lc_create_static_devices(struct mlxreg_lc *mlxreg_lc, struct mlxreg_hotpl
                                dev->brdinfo->type, dev->nr, dev->brdinfo->addr);
 
                        dev->adapter = NULL;
+                       ret = PTR_ERR(dev->client);
                        goto fail_create_static_devices;
                }
        }
@@ -435,7 +436,7 @@ fail_create_static_devices:
                i2c_unregister_device(dev->client);
                dev->client = NULL;
        }
-       return IS_ERR(dev->client);
+       return ret;
 }
 
 static void
index d4c079f4afc63d9e052518e278b9dc377016d143..7400bc5da5bec6e7150dc4e99eb5ecb1a6f82522 100644 (file)
@@ -185,7 +185,7 @@ config ACER_WMI
 
 config AMD_PMC
        tristate "AMD SoC PMC driver"
-       depends on ACPI && PCI
+       depends on ACPI && PCI && RTC_CLASS
        help
          The driver provides support for AMD Power Management Controller
          primarily responsible for S2Idle transactions that are driven from
index b7e50ed050a802f4fa9b6007ee820acfd20b9a15..841c44cd64c2c0c0545e60b791ccb632e76a684e 100644 (file)
@@ -76,7 +76,7 @@
 #define AMD_CPU_ID_CZN                 AMD_CPU_ID_RN
 #define AMD_CPU_ID_YC                  0x14B5
 
-#define PMC_MSG_DELAY_MIN_US           100
+#define PMC_MSG_DELAY_MIN_US           50
 #define RESPONSE_REGISTER_LOOP_MAX     20000
 
 #define SOC_SUBSYSTEM_IP_MAX   12
index 2fffa57e596e4191c68301e5244518bd2474b759..fe224a54f24c018acec3710f2d582c9ec9496923 100644 (file)
@@ -187,7 +187,7 @@ config DELL_WMI_AIO
 
 config DELL_WMI_DESCRIPTOR
        tristate
-       default m
+       default n
        depends on ACPI_WMI
 
 config DELL_WMI_LED
index b183967ecfb7e26330a219538279700ebce99bf8..435a91fe25687f68da8943323b3f7dfabd55f569 100644 (file)
@@ -331,9 +331,11 @@ static int lis3lv02d_probe(struct platform_device *device)
        INIT_WORK(&hpled_led.work, delayed_set_status_worker);
        ret = led_classdev_register(NULL, &hpled_led.led_classdev);
        if (ret) {
+               i8042_remove_filter(hp_accel_i8042_filter);
                lis3lv02d_joystick_disable(&lis3_dev);
                lis3lv02d_poweroff(&lis3_dev);
                flush_work(&hpled_led.work);
+               lis3lv02d_remove_fs(&lis3_dev);
                return ret;
        }
 
index 08598942a6d780388286ef501be45caa851625e5..13f8cf70b9aee559d17fcb100abc748652a9b68a 100644 (file)
@@ -99,6 +99,13 @@ static const struct dmi_system_id button_array_table[] = {
                        DMI_MATCH(DMI_PRODUCT_FAMILY, "ThinkPad X1 Tablet Gen 2"),
                },
        },
+       {
+               .ident = "Microsoft Surface Go 3",
+               .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "Microsoft Corporation"),
+                       DMI_MATCH(DMI_PRODUCT_NAME, "Surface Go 3"),
+               },
+       },
        { }
 };
 
index 12fc98a486577628bc76e323be3be4013ddd7b77..93ac8b2dbf3878217dfee9f107c1b7bb72a47185 100644 (file)
@@ -93,9 +93,12 @@ struct ishtp_opregion_dev {
 };
 
 /* eclite ishtp client UUID: 6a19cc4b-d760-4de3-b14d-f25ebd0fbcd9 */
-static const guid_t ecl_ishtp_guid =
-       GUID_INIT(0x6a19cc4b, 0xd760, 0x4de3,
-                 0xb1, 0x4d, 0xf2, 0x5e, 0xbd, 0xf, 0xbc, 0xd9);
+static const struct ishtp_device_id ecl_ishtp_id_table[] = {
+       { .guid = GUID_INIT(0x6a19cc4b, 0xd760, 0x4de3,
+                 0xb1, 0x4d, 0xf2, 0x5e, 0xbd, 0xf, 0xbc, 0xd9), },
+       { }
+};
+MODULE_DEVICE_TABLE(ishtp, ecl_ishtp_id_table);
 
 /* ACPI DSM UUID: 91d936a7-1f01-49c6-a6b4-72f00ad8d8a5 */
 static const guid_t ecl_acpi_guid =
@@ -462,7 +465,7 @@ static int ecl_ishtp_cl_init(struct ishtp_cl *ecl_ishtp_cl)
        ishtp_set_tx_ring_size(ecl_ishtp_cl, ECL_CL_TX_RING_SIZE);
        ishtp_set_rx_ring_size(ecl_ishtp_cl, ECL_CL_RX_RING_SIZE);
 
-       fw_client = ishtp_fw_cl_get_client(dev, &ecl_ishtp_guid);
+       fw_client = ishtp_fw_cl_get_client(dev, &ecl_ishtp_id_table[0].guid);
        if (!fw_client) {
                dev_err(cl_data_to_dev(opr_dev), "fw client not found\n");
                return -ENOENT;
@@ -674,7 +677,7 @@ static const struct dev_pm_ops ecl_ishtp_pm_ops = {
 
 static struct ishtp_cl_driver ecl_ishtp_cl_driver = {
        .name = "ishtp-eclite",
-       .guid = &ecl_ishtp_guid,
+       .id = ecl_ishtp_id_table,
        .probe = ecl_ishtp_cl_probe,
        .remove = ecl_ishtp_cl_remove,
        .reset = ecl_ishtp_cl_reset,
@@ -698,4 +701,3 @@ MODULE_DESCRIPTION("ISH ISHTP eclite client opregion driver");
 MODULE_AUTHOR("K Naduvalath, Sumesh <sumesh.k.naduvalath@intel.com>");
 
 MODULE_LICENSE("GPL v2");
-MODULE_ALIAS("ishtp:*");
index ae9293024c77bd23427a9089f19659cb1f894d59..a91847a551a725336a063235697046d660c698dc 100644 (file)
@@ -657,6 +657,18 @@ static int acpi_add(struct acpi_device *device)
        if (product && strlen(product) > 4)
                switch (product[4]) {
                case '5':
+                       if (strlen(product) > 5)
+                               switch (product[5]) {
+                               case 'N':
+                                       year = 2021;
+                                       break;
+                               case '0':
+                                       year = 2016;
+                                       break;
+                               default:
+                                       year = 2022;
+                               }
+                       break;
                case '6':
                        year = 2016;
                        break;
index 7ee010aa740aa7a2479b24d32a58a3603c67086f..c1d9ed9b7b672d45b30a4671113a105001a2eb1b 100644 (file)
@@ -152,7 +152,7 @@ struct sabi_config {
 
 static const struct sabi_config sabi_configs[] = {
        {
-               /* I don't know if it is really 2, but it it is
+               /* I don't know if it is really 2, but it is
                 * less than 3 anyway */
                .sabi_version = 2,
 
index 9472aae72df29c83fc6fc4351dfc2b8afbde560b..c4d9c45350f7c33685cc897aead11c8f85d70fb3 100644 (file)
@@ -888,8 +888,10 @@ static int tlmi_analyze(void)
                        break;
                if (!item)
                        break;
-               if (!*item)
+               if (!*item) {
+                       kfree(item);
                        continue;
+               }
 
                /* It is not allowed to have '/' for file name. Convert it into '\'. */
                strreplace(item, '/', '\\');
@@ -902,6 +904,7 @@ static int tlmi_analyze(void)
                setting = kzalloc(sizeof(*setting), GFP_KERNEL);
                if (!setting) {
                        ret = -ENOMEM;
+                       kfree(item);
                        goto fail_clear_attr;
                }
                setting->index = i;
@@ -916,7 +919,6 @@ static int tlmi_analyze(void)
                }
                kobject_init(&setting->kobj, &tlmi_attr_setting_ktype);
                tlmi_priv.setting[i] = setting;
-               tlmi_priv.settings_count++;
                kfree(item);
        }
 
@@ -983,7 +985,12 @@ static void tlmi_remove(struct wmi_device *wdev)
 
 static int tlmi_probe(struct wmi_device *wdev, const void *context)
 {
-       tlmi_analyze();
+       int ret;
+
+       ret = tlmi_analyze();
+       if (ret)
+               return ret;
+
        return tlmi_sysfs_init();
 }
 
index f8e26823075fde7bc399ff80557d4d4c8513442a..2ce5086a5af2713c5b00fd29dc549c3fd0fbf33b 100644 (file)
@@ -55,7 +55,6 @@ struct tlmi_attr_setting {
 struct think_lmi {
        struct wmi_device *wmi_device;
 
-       int settings_count;
        bool can_set_bios_settings;
        bool can_get_bios_selections;
        bool can_set_bios_password;
index 9c632df734bbf45a7039a8a636138722847f0f32..bb1abb947e1eaf5061944fc6fac218affc67069e 100644 (file)
@@ -1105,15 +1105,6 @@ static int tpacpi_rfk_update_swstate(const struct tpacpi_rfk *tp_rfk)
        return status;
 }
 
-/* Query FW and update rfkill sw state for all rfkill switches */
-static void tpacpi_rfk_update_swstate_all(void)
-{
-       unsigned int i;
-
-       for (i = 0; i < TPACPI_RFK_SW_MAX; i++)
-               tpacpi_rfk_update_swstate(tpacpi_rfkill_switches[i]);
-}
-
 /*
  * Sync the HW-blocking state of all rfkill switches,
  * do notice it causes the rfkill core to schedule uevents
@@ -3024,6 +3015,8 @@ static struct attribute *hotkey_attributes[] = {
        &dev_attr_hotkey_all_mask.attr,
        &dev_attr_hotkey_adaptive_all_mask.attr,
        &dev_attr_hotkey_recommended_mask.attr,
+       &dev_attr_hotkey_tablet_mode.attr,
+       &dev_attr_hotkey_radio_sw.attr,
 #ifdef CONFIG_THINKPAD_ACPI_HOTKEY_POLL
        &dev_attr_hotkey_source_mask.attr,
        &dev_attr_hotkey_poll_freq.attr,
@@ -3074,9 +3067,6 @@ static void tpacpi_send_radiosw_update(void)
        if (wlsw == TPACPI_RFK_RADIO_OFF)
                tpacpi_rfk_update_hwblock_state(true);
 
-       /* Sync sw blocking state */
-       tpacpi_rfk_update_swstate_all();
-
        /* Sync hw blocking state last if it is hw-unblocked */
        if (wlsw == TPACPI_RFK_RADIO_ON)
                tpacpi_rfk_update_hwblock_state(false);
@@ -5738,11 +5728,11 @@ static const char * const tpacpi_led_names[TPACPI_LED_NUMLEDS] = {
        "tpacpi::standby",
        "tpacpi::dock_status1",
        "tpacpi::dock_status2",
-       "tpacpi::unknown_led2",
+       "tpacpi::lid_logo_dot",
        "tpacpi::unknown_led3",
        "tpacpi::thinkvantage",
 };
-#define TPACPI_SAFE_LEDS       0x1081U
+#define TPACPI_SAFE_LEDS       0x1481U
 
 static inline bool tpacpi_is_led_restricted(const unsigned int led)
 {
@@ -8766,6 +8756,7 @@ static const struct tpacpi_quirk fan_quirk_table[] __initconst = {
        TPACPI_Q_LNV3('N', '2', 'E', TPACPI_FAN_2CTL),  /* P1 / X1 Extreme (1st gen) */
        TPACPI_Q_LNV3('N', '2', 'O', TPACPI_FAN_2CTL),  /* P1 / X1 Extreme (2nd gen) */
        TPACPI_Q_LNV3('N', '2', 'V', TPACPI_FAN_2CTL),  /* P1 / X1 Extreme (3nd gen) */
+       TPACPI_Q_LNV3('N', '4', '0', TPACPI_FAN_2CTL),  /* P1 / X1 Extreme (4nd gen) */
        TPACPI_Q_LNV3('N', '3', '0', TPACPI_FAN_2CTL),  /* P15 (1st gen) / P15v (1st gen) */
        TPACPI_Q_LNV3('N', '3', '2', TPACPI_FAN_2CTL),  /* X1 Carbon (9th gen) */
 };
index fa8812039b82b760eadd588665524f73da3327b6..17dd54d4b783c391cdf0fb7c84a7e3dd17dd6ef1 100644 (file)
@@ -905,6 +905,16 @@ static const struct ts_dmi_data trekstor_primetab_t13b_data = {
        .properties = trekstor_primetab_t13b_props,
 };
 
+static const struct property_entry trekstor_surftab_duo_w1_props[] = {
+       PROPERTY_ENTRY_BOOL("touchscreen-inverted-x"),
+       { }
+};
+
+static const struct ts_dmi_data trekstor_surftab_duo_w1_data = {
+       .acpi_name      = "GDIX1001:00",
+       .properties     = trekstor_surftab_duo_w1_props,
+};
+
 static const struct property_entry trekstor_surftab_twin_10_1_props[] = {
        PROPERTY_ENTRY_U32("touchscreen-min-x", 20),
        PROPERTY_ENTRY_U32("touchscreen-min-y", 0),
@@ -1502,6 +1512,14 @@ const struct dmi_system_id touchscreen_dmi_table[] = {
                        DMI_MATCH(DMI_PRODUCT_NAME, "Primetab T13B"),
                },
        },
+       {
+               /* TrekStor SurfTab duo W1 10.1 ST10432-10b */
+               .driver_data = (void *)&trekstor_surftab_duo_w1_data,
+               .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "TrekStor"),
+                       DMI_MATCH(DMI_PRODUCT_NAME, "SurfTab duo W1 10.1 (VT4)"),
+               },
+       },
        {
                /* TrekStor SurfTab twin 10.1 ST10432-8 */
                .driver_data = (void *)&trekstor_surftab_twin_10_1_data,
index b9fac786246ab5c076da2d6248bfeb136adbbc31..2a5c1829aab790f6b4ade183ec96e2eca84c357b 100644 (file)
@@ -463,17 +463,12 @@ int dtpm_register(const char *name, struct dtpm *dtpm, struct dtpm *parent)
 
 static int __init init_dtpm(void)
 {
-       struct dtpm_descr *dtpm_descr;
-
        pct = powercap_register_control_type(NULL, "dtpm", NULL);
        if (IS_ERR(pct)) {
                pr_err("Failed to register control type\n");
                return PTR_ERR(pct);
        }
 
-       for_each_dtpm_table(dtpm_descr)
-               dtpm_descr->init();
-
        return 0;
 }
 late_initcall(init_dtpm);
index 44faa3a74db6ab2c1716f4b7864a9c6c887e1def..b740866b228d972e4f2e0bb7054359044f73d7cd 100644 (file)
@@ -166,16 +166,13 @@ static struct dtpm_ops dtpm_ops = {
 
 static int cpuhp_dtpm_cpu_offline(unsigned int cpu)
 {
-       struct em_perf_domain *pd;
        struct dtpm_cpu *dtpm_cpu;
 
-       pd = em_cpu_get(cpu);
-       if (!pd)
-               return -EINVAL;
-
        dtpm_cpu = per_cpu(dtpm_per_cpu, cpu);
+       if (dtpm_cpu)
+               dtpm_update_power(&dtpm_cpu->dtpm);
 
-       return dtpm_update_power(&dtpm_cpu->dtpm);
+       return 0;
 }
 
 static int cpuhp_dtpm_cpu_online(unsigned int cpu)
index 6bc5791a7ec5bb74e44022b17e98d783e6e294cf..08e429a0692218780aefbdcbbc501d9806564092 100644 (file)
@@ -1699,12 +1699,9 @@ static int initialize_dco_operating_mode(struct idtcm_channel *channel)
 
 /* PTP Hardware Clock interface */
 
-/**
+/*
  * Maximum absolute value for write phase offset in picoseconds
  *
- * @channel:  channel
- * @delta_ns: delta in nanoseconds
- *
  * Destination signed register is 32-bit register in resolution of 50ps
  *
  * 0x7fffffff * 50 =  2147483647 * 50 = 107374182350
index 34f943c8c9fd84c4adbce0816d15d82334c8bdd5..0f1b5a7d2a89c1101cfd64f90783d777cd323d4c 100644 (file)
@@ -1304,10 +1304,11 @@ ptp_ocp_register_ext(struct ptp_ocp *bp, struct ocp_resource *r)
        if (!ext)
                return -ENOMEM;
 
-       err = -EINVAL;
        ext->mem = ptp_ocp_get_mem(bp, r);
-       if (!ext->mem)
+       if (IS_ERR(ext->mem)) {
+               err = PTR_ERR(ext->mem);
                goto out;
+       }
 
        ext->bp = bp;
        ext->info = r->extra;
@@ -1371,8 +1372,8 @@ ptp_ocp_register_mem(struct ptp_ocp *bp, struct ocp_resource *r)
        void __iomem *mem;
 
        mem = ptp_ocp_get_mem(bp, r);
-       if (!mem)
-               return -EINVAL;
+       if (IS_ERR(mem))
+               return PTR_ERR(mem);
 
        bp_assign_entry(bp, r, mem);
 
index 2c40fe15da55225a11e865951e2e27cd136a8db8..6043c832d09e49942b6eb057aa1d01a87ee6942f 100644 (file)
@@ -731,7 +731,7 @@ static ssize_t dasd_ff_show(struct device *dev, struct device_attribute *attr,
                ff_flag = (devmap->features & DASD_FEATURE_FAILFAST) != 0;
        else
                ff_flag = (DASD_FEATURE_DEFAULT & DASD_FEATURE_FAILFAST) != 0;
-       return snprintf(buf, PAGE_SIZE, ff_flag ? "1\n" : "0\n");
+       return sysfs_emit(buf, ff_flag ? "1\n" : "0\n");
 }
 
 static ssize_t dasd_ff_store(struct device *dev, struct device_attribute *attr,
@@ -773,7 +773,7 @@ dasd_ro_show(struct device *dev, struct device_attribute *attr, char *buf)
        spin_unlock(&dasd_devmap_lock);
 
 out:
-       return snprintf(buf, PAGE_SIZE, ro_flag ? "1\n" : "0\n");
+       return sysfs_emit(buf, ro_flag ? "1\n" : "0\n");
 }
 
 static ssize_t
@@ -834,7 +834,7 @@ dasd_erplog_show(struct device *dev, struct device_attribute *attr, char *buf)
                erplog = (devmap->features & DASD_FEATURE_ERPLOG) != 0;
        else
                erplog = (DASD_FEATURE_DEFAULT & DASD_FEATURE_ERPLOG) != 0;
-       return snprintf(buf, PAGE_SIZE, erplog ? "1\n" : "0\n");
+       return sysfs_emit(buf, erplog ? "1\n" : "0\n");
 }
 
 static ssize_t
@@ -1033,13 +1033,13 @@ dasd_discipline_show(struct device *dev, struct device_attribute *attr,
                dasd_put_device(device);
                goto out;
        } else {
-               len = snprintf(buf, PAGE_SIZE, "%s\n",
-                              device->discipline->name);
+               len = sysfs_emit(buf, "%s\n",
+                                device->discipline->name);
                dasd_put_device(device);
                return len;
        }
 out:
-       len = snprintf(buf, PAGE_SIZE, "none\n");
+       len = sysfs_emit(buf, "none\n");
        return len;
 }
 
@@ -1056,30 +1056,30 @@ dasd_device_status_show(struct device *dev, struct device_attribute *attr,
        if (!IS_ERR(device)) {
                switch (device->state) {
                case DASD_STATE_NEW:
-                       len = snprintf(buf, PAGE_SIZE, "new\n");
+                       len = sysfs_emit(buf, "new\n");
                        break;
                case DASD_STATE_KNOWN:
-                       len = snprintf(buf, PAGE_SIZE, "detected\n");
+                       len = sysfs_emit(buf, "detected\n");
                        break;
                case DASD_STATE_BASIC:
-                       len = snprintf(buf, PAGE_SIZE, "basic\n");
+                       len = sysfs_emit(buf, "basic\n");
                        break;
                case DASD_STATE_UNFMT:
-                       len = snprintf(buf, PAGE_SIZE, "unformatted\n");
+                       len = sysfs_emit(buf, "unformatted\n");
                        break;
                case DASD_STATE_READY:
-                       len = snprintf(buf, PAGE_SIZE, "ready\n");
+                       len = sysfs_emit(buf, "ready\n");
                        break;
                case DASD_STATE_ONLINE:
-                       len = snprintf(buf, PAGE_SIZE, "online\n");
+                       len = sysfs_emit(buf, "online\n");
                        break;
                default:
-                       len = snprintf(buf, PAGE_SIZE, "no stat\n");
+                       len = sysfs_emit(buf, "no stat\n");
                        break;
                }
                dasd_put_device(device);
        } else
-               len = snprintf(buf, PAGE_SIZE, "unknown\n");
+               len = sysfs_emit(buf, "unknown\n");
        return len;
 }
 
@@ -1120,7 +1120,7 @@ static ssize_t dasd_vendor_show(struct device *dev,
        device = dasd_device_from_cdev(to_ccwdev(dev));
        vendor = "";
        if (IS_ERR(device))
-               return snprintf(buf, PAGE_SIZE, "%s\n", vendor);
+               return sysfs_emit(buf, "%s\n", vendor);
 
        if (device->discipline && device->discipline->get_uid &&
            !device->discipline->get_uid(device, &uid))
@@ -1128,7 +1128,7 @@ static ssize_t dasd_vendor_show(struct device *dev,
 
        dasd_put_device(device);
 
-       return snprintf(buf, PAGE_SIZE, "%s\n", vendor);
+       return sysfs_emit(buf, "%s\n", vendor);
 }
 
 static DEVICE_ATTR(vendor, 0444, dasd_vendor_show, NULL);
@@ -1148,7 +1148,7 @@ dasd_uid_show(struct device *dev, struct device_attribute *attr, char *buf)
        device = dasd_device_from_cdev(to_ccwdev(dev));
        uid_string[0] = 0;
        if (IS_ERR(device))
-               return snprintf(buf, PAGE_SIZE, "%s\n", uid_string);
+               return sysfs_emit(buf, "%s\n", uid_string);
 
        if (device->discipline && device->discipline->get_uid &&
            !device->discipline->get_uid(device, &uid)) {
@@ -1183,7 +1183,7 @@ dasd_uid_show(struct device *dev, struct device_attribute *attr, char *buf)
        }
        dasd_put_device(device);
 
-       return snprintf(buf, PAGE_SIZE, "%s\n", uid_string);
+       return sysfs_emit(buf, "%s\n", uid_string);
 }
 static DEVICE_ATTR(uid, 0444, dasd_uid_show, NULL);
 
@@ -1201,7 +1201,7 @@ dasd_eer_show(struct device *dev, struct device_attribute *attr, char *buf)
                eer_flag = dasd_eer_enabled(devmap->device);
        else
                eer_flag = 0;
-       return snprintf(buf, PAGE_SIZE, eer_flag ? "1\n" : "0\n");
+       return sysfs_emit(buf, eer_flag ? "1\n" : "0\n");
 }
 
 static ssize_t
@@ -1243,7 +1243,7 @@ dasd_expires_show(struct device *dev, struct device_attribute *attr, char *buf)
        device = dasd_device_from_cdev(to_ccwdev(dev));
        if (IS_ERR(device))
                return -ENODEV;
-       len = snprintf(buf, PAGE_SIZE, "%lu\n", device->default_expires);
+       len = sysfs_emit(buf, "%lu\n", device->default_expires);
        dasd_put_device(device);
        return len;
 }
@@ -1283,7 +1283,7 @@ dasd_retries_show(struct device *dev, struct device_attribute *attr, char *buf)
        device = dasd_device_from_cdev(to_ccwdev(dev));
        if (IS_ERR(device))
                return -ENODEV;
-       len = snprintf(buf, PAGE_SIZE, "%lu\n", device->default_retries);
+       len = sysfs_emit(buf, "%lu\n", device->default_retries);
        dasd_put_device(device);
        return len;
 }
@@ -1324,7 +1324,7 @@ dasd_timeout_show(struct device *dev, struct device_attribute *attr,
        device = dasd_device_from_cdev(to_ccwdev(dev));
        if (IS_ERR(device))
                return -ENODEV;
-       len = snprintf(buf, PAGE_SIZE, "%lu\n", device->blk_timeout);
+       len = sysfs_emit(buf, "%lu\n", device->blk_timeout);
        dasd_put_device(device);
        return len;
 }
@@ -1398,11 +1398,11 @@ static ssize_t dasd_hpf_show(struct device *dev, struct device_attribute *attr,
                return -ENODEV;
        if (!device->discipline || !device->discipline->hpf_enabled) {
                dasd_put_device(device);
-               return snprintf(buf, PAGE_SIZE, "%d\n", dasd_nofcx);
+               return sysfs_emit(buf, "%d\n", dasd_nofcx);
        }
        hpf = device->discipline->hpf_enabled(device);
        dasd_put_device(device);
-       return snprintf(buf, PAGE_SIZE, "%d\n", hpf);
+       return sysfs_emit(buf, "%d\n", hpf);
 }
 
 static DEVICE_ATTR(hpf, 0444, dasd_hpf_show, NULL);
@@ -1416,13 +1416,13 @@ static ssize_t dasd_reservation_policy_show(struct device *dev,
 
        devmap = dasd_find_busid(dev_name(dev));
        if (IS_ERR(devmap)) {
-               rc = snprintf(buf, PAGE_SIZE, "ignore\n");
+               rc = sysfs_emit(buf, "ignore\n");
        } else {
                spin_lock(&dasd_devmap_lock);
                if (devmap->features & DASD_FEATURE_FAILONSLCK)
-                       rc = snprintf(buf, PAGE_SIZE, "fail\n");
+                       rc = sysfs_emit(buf, "fail\n");
                else
-                       rc = snprintf(buf, PAGE_SIZE, "ignore\n");
+                       rc = sysfs_emit(buf, "ignore\n");
                spin_unlock(&dasd_devmap_lock);
        }
        return rc;
@@ -1457,14 +1457,14 @@ static ssize_t dasd_reservation_state_show(struct device *dev,
 
        device = dasd_device_from_cdev(to_ccwdev(dev));
        if (IS_ERR(device))
-               return snprintf(buf, PAGE_SIZE, "none\n");
+               return sysfs_emit(buf, "none\n");
 
        if (test_bit(DASD_FLAG_IS_RESERVED, &device->flags))
-               rc = snprintf(buf, PAGE_SIZE, "reserved\n");
+               rc = sysfs_emit(buf, "reserved\n");
        else if (test_bit(DASD_FLAG_LOCK_STOLEN, &device->flags))
-               rc = snprintf(buf, PAGE_SIZE, "lost\n");
+               rc = sysfs_emit(buf, "lost\n");
        else
-               rc = snprintf(buf, PAGE_SIZE, "none\n");
+               rc = sysfs_emit(buf, "none\n");
        dasd_put_device(device);
        return rc;
 }
@@ -1531,7 +1531,7 @@ dasd_path_threshold_show(struct device *dev,
        device = dasd_device_from_cdev(to_ccwdev(dev));
        if (IS_ERR(device))
                return -ENODEV;
-       len = snprintf(buf, PAGE_SIZE, "%lu\n", device->path_thrhld);
+       len = sysfs_emit(buf, "%lu\n", device->path_thrhld);
        dasd_put_device(device);
        return len;
 }
@@ -1578,7 +1578,7 @@ dasd_path_autodisable_show(struct device *dev,
        else
                flag = (DASD_FEATURE_DEFAULT &
                        DASD_FEATURE_PATH_AUTODISABLE) != 0;
-       return snprintf(buf, PAGE_SIZE, flag ? "1\n" : "0\n");
+       return sysfs_emit(buf, flag ? "1\n" : "0\n");
 }
 
 static ssize_t
@@ -1616,7 +1616,7 @@ dasd_path_interval_show(struct device *dev,
        device = dasd_device_from_cdev(to_ccwdev(dev));
        if (IS_ERR(device))
                return -ENODEV;
-       len = snprintf(buf, PAGE_SIZE, "%lu\n", device->path_interval);
+       len = sysfs_emit(buf, "%lu\n", device->path_interval);
        dasd_put_device(device);
        return len;
 }
@@ -1662,9 +1662,9 @@ dasd_device_fcs_show(struct device *dev, struct device_attribute *attr,
                return -ENODEV;
        fc_sec = dasd_path_get_fcs_device(device);
        if (fc_sec == -EINVAL)
-               rc = snprintf(buf, PAGE_SIZE, "Inconsistent\n");
+               rc = sysfs_emit(buf, "Inconsistent\n");
        else
-               rc = snprintf(buf, PAGE_SIZE, "%s\n", dasd_path_get_fcs_str(fc_sec));
+               rc = sysfs_emit(buf, "%s\n", dasd_path_get_fcs_str(fc_sec));
        dasd_put_device(device);
 
        return rc;
@@ -1677,7 +1677,7 @@ dasd_path_fcs_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
        struct dasd_path *path = to_dasd_path(kobj);
        unsigned int fc_sec = path->fc_security;
 
-       return snprintf(buf, PAGE_SIZE, "%s\n", dasd_path_get_fcs_str(fc_sec));
+       return sysfs_emit(buf, "%s\n", dasd_path_get_fcs_str(fc_sec));
 }
 
 static struct kobj_attribute path_fcs_attribute =
@@ -1698,7 +1698,7 @@ static ssize_t dasd_##_name##_show(struct device *dev,                    \
                val = _func(device);                                    \
        dasd_put_device(device);                                        \
                                                                        \
-       return snprintf(buf, PAGE_SIZE, "%d\n", val);                   \
+       return sysfs_emit(buf, "%d\n", val);                    \
 }                                                                      \
 static DEVICE_ATTR(_name, 0444, dasd_##_name##_show, NULL);            \
 
index 646ec796bb83b66e12481ba4ff349e977fc1b35a..dfde0d941c3c4fc9c71a1329e6c8a7d6b6afdc43 100644 (file)
@@ -1047,24 +1047,24 @@ raw3270_probe (struct ccw_device *cdev)
 static ssize_t
 raw3270_model_show(struct device *dev, struct device_attribute *attr, char *buf)
 {
-       return snprintf(buf, PAGE_SIZE, "%i\n",
-                       ((struct raw3270 *) dev_get_drvdata(dev))->model);
+       return sysfs_emit(buf, "%i\n",
+                         ((struct raw3270 *)dev_get_drvdata(dev))->model);
 }
 static DEVICE_ATTR(model, 0444, raw3270_model_show, NULL);
 
 static ssize_t
 raw3270_rows_show(struct device *dev, struct device_attribute *attr, char *buf)
 {
-       return snprintf(buf, PAGE_SIZE, "%i\n",
-                       ((struct raw3270 *) dev_get_drvdata(dev))->rows);
+       return sysfs_emit(buf, "%i\n",
+                         ((struct raw3270 *)dev_get_drvdata(dev))->rows);
 }
 static DEVICE_ATTR(rows, 0444, raw3270_rows_show, NULL);
 
 static ssize_t
 raw3270_columns_show(struct device *dev, struct device_attribute *attr, char *buf)
 {
-       return snprintf(buf, PAGE_SIZE, "%i\n",
-                       ((struct raw3270 *) dev_get_drvdata(dev))->cols);
+       return sysfs_emit(buf, "%i\n",
+                         ((struct raw3270 *)dev_get_drvdata(dev))->cols);
 }
 static DEVICE_ATTR(columns, 0444, raw3270_columns_show, NULL);
 
index 1097e76982a5d0fad7ffcf7ee01856d5cd478305..5440f285f3494d67c8c66e96b260146f881cbf30 100644 (file)
@@ -285,7 +285,7 @@ static ssize_t chp_configure_show(struct device *dev,
        if (status < 0)
                return status;
 
-       return snprintf(buf, PAGE_SIZE, "%d\n", status);
+       return sysfs_emit(buf, "%d\n", status);
 }
 
 static int cfg_wait_idle(void);
index b940e0268f96fad37563e49782fafa7b240a0588..e83453bea2aee1eaf2957b5223f8b41685c7760c 100644 (file)
@@ -5095,14 +5095,9 @@ lpfc_cmpl_els_logo_acc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
                /* NPort Recovery mode or node is just allocated */
                if (!lpfc_nlp_not_used(ndlp)) {
                        /* A LOGO is completing and the node is in NPR state.
-                        * If this a fabric node that cleared its transport
-                        * registration, release the rpi.
+                        * Just unregister the RPI because the node is still
+                        * required.
                         */
-                       spin_lock_irq(&ndlp->lock);
-                       ndlp->nlp_flag &= ~NLP_NPR_2B_DISC;
-                       if (phba->sli_rev == LPFC_SLI_REV4)
-                               ndlp->nlp_flag |= NLP_RELEASE_RPI;
-                       spin_unlock_irq(&ndlp->lock);
                        lpfc_unreg_rpi(vport, ndlp);
                } else {
                        /* Indicate the node has already released, should
index 27eb652b564f54117e02d257d92ab6111c9412d5..81dab9b82f79f98e8b14229647b3b70f6a2a983d 100644 (file)
@@ -639,8 +639,8 @@ static void _base_sync_drv_fw_timestamp(struct MPT3SAS_ADAPTER *ioc)
        mpi_request->IOCParameter = MPI26_SET_IOC_PARAMETER_SYNC_TIMESTAMP;
        current_time = ktime_get_real();
        TimeStamp = ktime_to_ms(current_time);
-       mpi_request->Reserved7 = cpu_to_le32(TimeStamp & 0xFFFFFFFF);
-       mpi_request->IOCParameterValue = cpu_to_le32(TimeStamp >> 32);
+       mpi_request->Reserved7 = cpu_to_le32(TimeStamp >> 32);
+       mpi_request->IOCParameterValue = cpu_to_le32(TimeStamp & 0xFFFFFFFF);
        init_completion(&ioc->scsih_cmds.done);
        ioc->put_smid_default(ioc, smid);
        dinitprintk(ioc, ioc_info(ioc,
index db6a759de1e99344bf400ae555accec7ea0dcac6..a0af986633d2a016a2ccdb1b4d83a5dbe4f631b4 100644 (file)
 
 #define MPT_MAX_CALLBACKS              32
 
+#define MPT_MAX_HBA_NUM_PHYS           32
+
 #define INTERNAL_CMDS_COUNT            10      /* reserved cmds */
 /* reserved for issuing internally framed scsi io cmds */
 #define INTERNAL_SCSIIO_CMDS_COUNT     3
@@ -798,6 +800,7 @@ struct _sas_phy {
  * @enclosure_handle: handle for this a member of an enclosure
  * @device_info: bitwise defining capabilities of this sas_host/expander
  * @responding: used in _scsih_expander_device_mark_responding
+ * @nr_phys_allocated: Allocated memory for this many count phys
  * @phy: a list of phys that make up this sas_host/expander
  * @sas_port_list: list of ports attached to this sas_host/expander
  * @port: hba port entry containing node's port number info
@@ -813,6 +816,7 @@ struct _sas_node {
        u16     enclosure_handle;
        u64     enclosure_logical_id;
        u8      responding;
+       u8      nr_phys_allocated;
        struct hba_port *port;
        struct  _sas_phy *phy;
        struct list_head sas_port_list;
index cee7170beae856d2ff0637d6e476db172cfc4931..00792767c620d707be9f8a64329b32877327778f 100644 (file)
@@ -3869,7 +3869,7 @@ _scsih_ublock_io_device(struct MPT3SAS_ADAPTER *ioc,
 
        shost_for_each_device(sdev, ioc->shost) {
                sas_device_priv_data = sdev->hostdata;
-               if (!sas_device_priv_data)
+               if (!sas_device_priv_data || !sas_device_priv_data->sas_target)
                        continue;
                if (sas_device_priv_data->sas_target->sas_address
                    != sas_address)
@@ -6406,11 +6406,26 @@ _scsih_sas_port_refresh(struct MPT3SAS_ADAPTER *ioc)
        int i, j, count = 0, lcount = 0;
        int ret;
        u64 sas_addr;
+       u8 num_phys;
 
        drsprintk(ioc, ioc_info(ioc,
            "updating ports for sas_host(0x%016llx)\n",
            (unsigned long long)ioc->sas_hba.sas_address));
 
+       mpt3sas_config_get_number_hba_phys(ioc, &num_phys);
+       if (!num_phys) {
+               ioc_err(ioc, "failure at %s:%d/%s()!\n",
+                   __FILE__, __LINE__, __func__);
+               return;
+       }
+
+       if (num_phys > ioc->sas_hba.nr_phys_allocated) {
+               ioc_err(ioc, "failure at %s:%d/%s()!\n",
+                  __FILE__, __LINE__, __func__);
+               return;
+       }
+       ioc->sas_hba.num_phys = num_phys;
+
        port_table = kcalloc(ioc->sas_hba.num_phys,
            sizeof(struct hba_port), GFP_KERNEL);
        if (!port_table)
@@ -6611,6 +6626,30 @@ _scsih_sas_host_refresh(struct MPT3SAS_ADAPTER *ioc)
                        ioc->sas_hba.phy[i].hba_vphy = 1;
                }
 
+               /*
+                * Add new HBA phys to STL if these new phys got added as part
+                * of HBA Firmware upgrade/downgrade operation.
+                */
+               if (!ioc->sas_hba.phy[i].phy) {
+                       if ((mpt3sas_config_get_phy_pg0(ioc, &mpi_reply,
+                                                       &phy_pg0, i))) {
+                               ioc_err(ioc, "failure at %s:%d/%s()!\n",
+                                       __FILE__, __LINE__, __func__);
+                               continue;
+                       }
+                       ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
+                               MPI2_IOCSTATUS_MASK;
+                       if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
+                               ioc_err(ioc, "failure at %s:%d/%s()!\n",
+                                       __FILE__, __LINE__, __func__);
+                               continue;
+                       }
+                       ioc->sas_hba.phy[i].phy_id = i;
+                       mpt3sas_transport_add_host_phy(ioc,
+                               &ioc->sas_hba.phy[i], phy_pg0,
+                               ioc->sas_hba.parent_dev);
+                       continue;
+               }
                ioc->sas_hba.phy[i].handle = ioc->sas_hba.handle;
                attached_handle = le16_to_cpu(sas_iounit_pg0->PhyData[i].
                    AttachedDevHandle);
@@ -6622,6 +6661,19 @@ _scsih_sas_host_refresh(struct MPT3SAS_ADAPTER *ioc)
                    attached_handle, i, link_rate,
                    ioc->sas_hba.phy[i].port);
        }
+       /*
+        * Clear the phy details if this phy got disabled as part of
+        * HBA Firmware upgrade/downgrade operation.
+        */
+       for (i = ioc->sas_hba.num_phys;
+            i < ioc->sas_hba.nr_phys_allocated; i++) {
+               if (ioc->sas_hba.phy[i].phy &&
+                   ioc->sas_hba.phy[i].phy->negotiated_linkrate >=
+                   SAS_LINK_RATE_1_5_GBPS)
+                       mpt3sas_transport_update_links(ioc,
+                               ioc->sas_hba.sas_address, 0, i,
+                               MPI2_SAS_NEG_LINK_RATE_PHY_DISABLED, NULL);
+       }
  out:
        kfree(sas_iounit_pg0);
 }
@@ -6654,7 +6706,10 @@ _scsih_sas_host_add(struct MPT3SAS_ADAPTER *ioc)
                        __FILE__, __LINE__, __func__);
                return;
        }
-       ioc->sas_hba.phy = kcalloc(num_phys,
+
+       ioc->sas_hba.nr_phys_allocated = max_t(u8,
+           MPT_MAX_HBA_NUM_PHYS, num_phys);
+       ioc->sas_hba.phy = kcalloc(ioc->sas_hba.nr_phys_allocated,
            sizeof(struct _sas_phy), GFP_KERNEL);
        if (!ioc->sas_hba.phy) {
                ioc_err(ioc, "failure at %s:%d/%s()!\n",
index bed8cc125544841cf523ace529729203e1978b24..fbfeb0b046ddddfa490772a78e9a082a42084d0b 100644 (file)
@@ -282,12 +282,12 @@ static int pm8001_alloc(struct pm8001_hba_info *pm8001_ha,
        if (rc) {
                pm8001_dbg(pm8001_ha, FAIL,
                           "pm8001_setup_irq failed [ret: %d]\n", rc);
-               goto err_out_shost;
+               goto err_out;
        }
        /* Request Interrupt */
        rc = pm8001_request_irq(pm8001_ha);
        if (rc)
-               goto err_out_shost;
+               goto err_out;
 
        count = pm8001_ha->max_q_num;
        /* Queues are chosen based on the number of cores/msix availability */
@@ -423,8 +423,6 @@ static int pm8001_alloc(struct pm8001_hba_info *pm8001_ha,
        pm8001_tag_init(pm8001_ha);
        return 0;
 
-err_out_shost:
-       scsi_remove_host(pm8001_ha->shost);
 err_out_nodev:
        for (i = 0; i < pm8001_ha->max_memcnt; i++) {
                if (pm8001_ha->memoryMap.region[i].virt_ptr != NULL) {
index 84a4204a2cb472f0cffe8df555b0bf56e6dec823..5916ed7662d56ea79ccce4be337f319e49baae06 100644 (file)
@@ -732,7 +732,6 @@ static void qedi_process_cmd_cleanup_resp(struct qedi_ctx *qedi,
 {
        struct qedi_work_map *work, *work_tmp;
        u32 proto_itt = cqe->itid;
-       itt_t protoitt = 0;
        int found = 0;
        struct qedi_cmd *qedi_cmd = NULL;
        u32 iscsi_cid;
@@ -812,16 +811,12 @@ unlock:
        return;
 
 check_cleanup_reqs:
-       if (qedi_conn->cmd_cleanup_req > 0) {
-               QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_TID,
+       if (atomic_inc_return(&qedi_conn->cmd_cleanup_cmpl) ==
+           qedi_conn->cmd_cleanup_req) {
+               QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_SCSI_TM,
                          "Freeing tid=0x%x for cid=0x%x\n",
                          cqe->itid, qedi_conn->iscsi_conn_id);
-               qedi_conn->cmd_cleanup_cmpl++;
                wake_up(&qedi_conn->wait_queue);
-       } else {
-               QEDI_ERR(&qedi->dbg_ctx,
-                        "Delayed or untracked cleanup response, itt=0x%x, tid=0x%x, cid=0x%x\n",
-                        protoitt, cqe->itid, qedi_conn->iscsi_conn_id);
        }
 }
 
@@ -1163,7 +1158,7 @@ int qedi_cleanup_all_io(struct qedi_ctx *qedi, struct qedi_conn *qedi_conn,
        }
 
        qedi_conn->cmd_cleanup_req = 0;
-       qedi_conn->cmd_cleanup_cmpl = 0;
+       atomic_set(&qedi_conn->cmd_cleanup_cmpl, 0);
 
        QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_SCSI_TM,
                  "active_cmd_count=%d, cid=0x%x, in_recovery=%d, lun_reset=%d\n",
@@ -1215,16 +1210,15 @@ int qedi_cleanup_all_io(struct qedi_ctx *qedi, struct qedi_conn *qedi_conn,
                  qedi_conn->iscsi_conn_id);
 
        rval  = wait_event_interruptible_timeout(qedi_conn->wait_queue,
-                                                ((qedi_conn->cmd_cleanup_req ==
-                                                qedi_conn->cmd_cleanup_cmpl) ||
-                                                test_bit(QEDI_IN_RECOVERY,
-                                                         &qedi->flags)),
-                                                5 * HZ);
+                               (qedi_conn->cmd_cleanup_req ==
+                                atomic_read(&qedi_conn->cmd_cleanup_cmpl)) ||
+                               test_bit(QEDI_IN_RECOVERY, &qedi->flags),
+                               5 * HZ);
        if (rval) {
                QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_SCSI_TM,
                          "i/o cmd_cleanup_req=%d, equal to cmd_cleanup_cmpl=%d, cid=0x%x\n",
                          qedi_conn->cmd_cleanup_req,
-                         qedi_conn->cmd_cleanup_cmpl,
+                         atomic_read(&qedi_conn->cmd_cleanup_cmpl),
                          qedi_conn->iscsi_conn_id);
 
                return 0;
@@ -1233,7 +1227,7 @@ int qedi_cleanup_all_io(struct qedi_ctx *qedi, struct qedi_conn *qedi_conn,
        QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_SCSI_TM,
                  "i/o cmd_cleanup_req=%d, not equal to cmd_cleanup_cmpl=%d, cid=0x%x\n",
                  qedi_conn->cmd_cleanup_req,
-                 qedi_conn->cmd_cleanup_cmpl,
+                 atomic_read(&qedi_conn->cmd_cleanup_cmpl),
                  qedi_conn->iscsi_conn_id);
 
        iscsi_host_for_each_session(qedi->shost,
@@ -1242,11 +1236,10 @@ int qedi_cleanup_all_io(struct qedi_ctx *qedi, struct qedi_conn *qedi_conn,
 
        /* Enable IOs for all other sessions except current.*/
        if (!wait_event_interruptible_timeout(qedi_conn->wait_queue,
-                                             (qedi_conn->cmd_cleanup_req ==
-                                              qedi_conn->cmd_cleanup_cmpl) ||
-                                              test_bit(QEDI_IN_RECOVERY,
-                                                       &qedi->flags),
-                                             5 * HZ)) {
+                               (qedi_conn->cmd_cleanup_req ==
+                                atomic_read(&qedi_conn->cmd_cleanup_cmpl)) ||
+                               test_bit(QEDI_IN_RECOVERY, &qedi->flags),
+                               5 * HZ)) {
                iscsi_host_for_each_session(qedi->shost,
                                            qedi_mark_device_available);
                return -1;
@@ -1266,7 +1259,7 @@ void qedi_clearsq(struct qedi_ctx *qedi, struct qedi_conn *qedi_conn,
 
        qedi_ep = qedi_conn->ep;
        qedi_conn->cmd_cleanup_req = 0;
-       qedi_conn->cmd_cleanup_cmpl = 0;
+       atomic_set(&qedi_conn->cmd_cleanup_cmpl, 0);
 
        if (!qedi_ep) {
                QEDI_WARN(&qedi->dbg_ctx,
index 88aa7d8b11c9a2508638135896c9a035e810683f..282ecb4e39bbdcf7f64fd63909ead221fad7722f 100644 (file)
@@ -412,7 +412,7 @@ static int qedi_conn_bind(struct iscsi_cls_session *cls_session,
        qedi_conn->iscsi_conn_id = qedi_ep->iscsi_cid;
        qedi_conn->fw_cid = qedi_ep->fw_cid;
        qedi_conn->cmd_cleanup_req = 0;
-       qedi_conn->cmd_cleanup_cmpl = 0;
+       atomic_set(&qedi_conn->cmd_cleanup_cmpl, 0);
 
        if (qedi_bind_conn_to_iscsi_cid(qedi, qedi_conn)) {
                rc = -EINVAL;
index a282860da0aa08c4debeee9f457241645190d33f..9b9f2e44fdde480b53e950b5c8a1291033fd0c4a 100644 (file)
@@ -155,7 +155,7 @@ struct qedi_conn {
        spinlock_t list_lock;           /* internal conn lock */
        u32 active_cmd_count;
        u32 cmd_cleanup_req;
-       u32 cmd_cleanup_cmpl;
+       atomic_t cmd_cleanup_cmpl;
 
        u32 iscsi_conn_id;
        int itt;
index 25549a8a2d72dd7bbe1ad05a34c80f082bcf3403..7cf1f78cbaeee6e0fec9f0ba8460f8d187b6cac0 100644 (file)
@@ -2491,6 +2491,9 @@ ql_dbg(uint level, scsi_qla_host_t *vha, uint id, const char *fmt, ...)
        struct va_format vaf;
        char pbuf[64];
 
+       if (!ql_mask_match(level) && !trace_ql_dbg_log_enabled())
+               return;
+
        va_start(va, fmt);
 
        vaf.fmt = fmt;
index 2e37b189cb7559ac4994b783af8883ca10477ddf..53d2b85620271a48398b7971682dcf0c74053b65 100644 (file)
@@ -865,7 +865,7 @@ qla_edif_app_getfcinfo(scsi_qla_host_t *vha, struct bsg_job *bsg_job)
                            "APP request entry - portid=%06x.\n", tdid.b24);
 
                        /* Ran out of space */
-                       if (pcnt > app_req.num_ports)
+                       if (pcnt >= app_req.num_ports)
                                break;
 
                        if (tdid.b24 != 0 && tdid.b24 != fcport->d_id.b24)
index 73a353153d33bf1b5a5b0814b39a052d7f8a6e95..10d2655ef6767e209d5a7fa450730fafa2e986b5 100644 (file)
@@ -1695,10 +1695,8 @@ qla2x00_get_adapter_id(scsi_qla_host_t *vha, uint16_t *id, uint8_t *al_pa,
                mcp->in_mb |= MBX_13|MBX_12|MBX_11|MBX_10;
        if (IS_FWI2_CAPABLE(vha->hw))
                mcp->in_mb |= MBX_19|MBX_18|MBX_17|MBX_16;
-       if (IS_QLA27XX(vha->hw) || IS_QLA28XX(vha->hw)) {
-               mcp->in_mb |= MBX_15;
-               mcp->out_mb |= MBX_7|MBX_21|MBX_22|MBX_23;
-       }
+       if (IS_QLA27XX(vha->hw) || IS_QLA28XX(vha->hw))
+               mcp->in_mb |= MBX_15|MBX_21|MBX_22|MBX_23;
 
        mcp->tov = MBX_TOV_SECONDS;
        mcp->flags = 0;
index 1d0278da9041394bd0ccc365e48f502436f3f6c9..2104973a35cd35695a5b47fd54912149a53f9f28 100644 (file)
@@ -1189,7 +1189,7 @@ static int p_fill_from_dev_buffer(struct scsi_cmnd *scp, const void *arr,
                 __func__, off_dst, scsi_bufflen(scp), act_len,
                 scsi_get_resid(scp));
        n = scsi_bufflen(scp) - (off_dst + act_len);
-       scsi_set_resid(scp, min_t(int, scsi_get_resid(scp), n));
+       scsi_set_resid(scp, min_t(u32, scsi_get_resid(scp), n));
        return 0;
 }
 
@@ -1562,7 +1562,8 @@ static int resp_inquiry(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
        unsigned char pq_pdt;
        unsigned char *arr;
        unsigned char *cmd = scp->cmnd;
-       int alloc_len, n, ret;
+       u32 alloc_len, n;
+       int ret;
        bool have_wlun, is_disk, is_zbc, is_disk_zbc;
 
        alloc_len = get_unaligned_be16(cmd + 3);
@@ -1585,7 +1586,8 @@ static int resp_inquiry(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
                kfree(arr);
                return check_condition_result;
        } else if (0x1 & cmd[1]) {  /* EVPD bit set */
-               int lu_id_num, port_group_id, target_dev_id, len;
+               int lu_id_num, port_group_id, target_dev_id;
+               u32 len;
                char lu_id_str[6];
                int host_no = devip->sdbg_host->shost->host_no;
                
@@ -1676,9 +1678,9 @@ static int resp_inquiry(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
                        kfree(arr);
                        return check_condition_result;
                }
-               len = min(get_unaligned_be16(arr + 2) + 4, alloc_len);
+               len = min_t(u32, get_unaligned_be16(arr + 2) + 4, alloc_len);
                ret = fill_from_dev_buffer(scp, arr,
-                           min(len, SDEBUG_MAX_INQ_ARR_SZ));
+                           min_t(u32, len, SDEBUG_MAX_INQ_ARR_SZ));
                kfree(arr);
                return ret;
        }
@@ -1714,7 +1716,7 @@ static int resp_inquiry(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
        }
        put_unaligned_be16(0x2100, arr + n);    /* SPL-4 no version claimed */
        ret = fill_from_dev_buffer(scp, arr,
-                           min_t(int, alloc_len, SDEBUG_LONG_INQ_SZ));
+                           min_t(u32, alloc_len, SDEBUG_LONG_INQ_SZ));
        kfree(arr);
        return ret;
 }
@@ -1729,8 +1731,8 @@ static int resp_requests(struct scsi_cmnd *scp,
        unsigned char *cmd = scp->cmnd;
        unsigned char arr[SCSI_SENSE_BUFFERSIZE];       /* assume >= 18 bytes */
        bool dsense = !!(cmd[1] & 1);
-       int alloc_len = cmd[4];
-       int len = 18;
+       u32 alloc_len = cmd[4];
+       u32 len = 18;
        int stopped_state = atomic_read(&devip->stopped);
 
        memset(arr, 0, sizeof(arr));
@@ -1774,7 +1776,7 @@ static int resp_requests(struct scsi_cmnd *scp,
                        arr[7] = 0xa;
                }
        }
-       return fill_from_dev_buffer(scp, arr, min_t(int, len, alloc_len));
+       return fill_from_dev_buffer(scp, arr, min_t(u32, len, alloc_len));
 }
 
 static int resp_start_stop(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
@@ -2312,7 +2314,8 @@ static int resp_mode_sense(struct scsi_cmnd *scp,
 {
        int pcontrol, pcode, subpcode, bd_len;
        unsigned char dev_spec;
-       int alloc_len, offset, len, target_dev_id;
+       u32 alloc_len, offset, len;
+       int target_dev_id;
        int target = scp->device->id;
        unsigned char *ap;
        unsigned char arr[SDEBUG_MAX_MSENSE_SZ];
@@ -2468,7 +2471,7 @@ static int resp_mode_sense(struct scsi_cmnd *scp,
                arr[0] = offset - 1;
        else
                put_unaligned_be16((offset - 2), arr + 0);
-       return fill_from_dev_buffer(scp, arr, min_t(int, alloc_len, offset));
+       return fill_from_dev_buffer(scp, arr, min_t(u32, alloc_len, offset));
 }
 
 #define SDEBUG_MAX_MSELECT_SZ 512
@@ -2499,11 +2502,11 @@ static int resp_mode_select(struct scsi_cmnd *scp,
                            __func__, param_len, res);
        md_len = mselect6 ? (arr[0] + 1) : (get_unaligned_be16(arr + 0) + 2);
        bd_len = mselect6 ? arr[3] : get_unaligned_be16(arr + 6);
-       if (md_len > 2) {
+       off = bd_len + (mselect6 ? 4 : 8);
+       if (md_len > 2 || off >= res) {
                mk_sense_invalid_fld(scp, SDEB_IN_DATA, 0, -1);
                return check_condition_result;
        }
-       off = bd_len + (mselect6 ? 4 : 8);
        mpage = arr[off] & 0x3f;
        ps = !!(arr[off] & 0x80);
        if (ps) {
@@ -2583,7 +2586,8 @@ static int resp_ie_l_pg(unsigned char *arr)
 static int resp_log_sense(struct scsi_cmnd *scp,
                          struct sdebug_dev_info *devip)
 {
-       int ppc, sp, pcode, subpcode, alloc_len, len, n;
+       int ppc, sp, pcode, subpcode;
+       u32 alloc_len, len, n;
        unsigned char arr[SDEBUG_MAX_LSENSE_SZ];
        unsigned char *cmd = scp->cmnd;
 
@@ -2653,9 +2657,9 @@ static int resp_log_sense(struct scsi_cmnd *scp,
                mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
                return check_condition_result;
        }
-       len = min_t(int, get_unaligned_be16(arr + 2) + 4, alloc_len);
+       len = min_t(u32, get_unaligned_be16(arr + 2) + 4, alloc_len);
        return fill_from_dev_buffer(scp, arr,
-                   min_t(int, len, SDEBUG_MAX_INQ_ARR_SZ));
+                   min_t(u32, len, SDEBUG_MAX_INQ_ARR_SZ));
 }
 
 static inline bool sdebug_dev_is_zoned(struct sdebug_dev_info *devip)
@@ -4338,7 +4342,7 @@ static int resp_report_zones(struct scsi_cmnd *scp,
        rep_max_zones = min((alloc_len - 64) >> ilog2(RZONES_DESC_HD),
                            max_zones);
 
-       arr = kcalloc(RZONES_DESC_HD, alloc_len, GFP_ATOMIC);
+       arr = kzalloc(alloc_len, GFP_ATOMIC);
        if (!arr) {
                mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
                                INSUFF_RES_ASCQ);
@@ -4430,7 +4434,7 @@ static int resp_report_zones(struct scsi_cmnd *scp,
        put_unaligned_be64(sdebug_capacity - 1, arr + 8);
 
        rep_len = (unsigned long)desc - (unsigned long)arr;
-       ret = fill_from_dev_buffer(scp, arr, min_t(int, alloc_len, rep_len));
+       ret = fill_from_dev_buffer(scp, arr, min_t(u32, alloc_len, rep_len));
 
 fini:
        read_unlock(macc_lckp);
@@ -4653,6 +4657,7 @@ static void zbc_rwp_zone(struct sdebug_dev_info *devip,
                         struct sdeb_zone_state *zsp)
 {
        enum sdebug_z_cond zc;
+       struct sdeb_store_info *sip = devip2sip(devip, false);
 
        if (zbc_zone_is_conv(zsp))
                return;
@@ -4664,6 +4669,10 @@ static void zbc_rwp_zone(struct sdebug_dev_info *devip,
        if (zsp->z_cond == ZC4_CLOSED)
                devip->nr_closed--;
 
+       if (zsp->z_wp > zsp->z_start)
+               memset(sip->storep + zsp->z_start * sdebug_sector_size, 0,
+                      (zsp->z_wp - zsp->z_start) * sdebug_sector_size);
+
        zsp->z_non_seq_resource = false;
        zsp->z_wp = zsp->z_start;
        zsp->z_cond = ZC1_EMPTY;
index 55addd78fde4420ef58bfc9d01c53f28857abbb0..d4edce930a4a066880c9c1bb40c6d4b8e08ab8f0 100644 (file)
@@ -792,6 +792,7 @@ store_state_field(struct device *dev, struct device_attribute *attr,
        int i, ret;
        struct scsi_device *sdev = to_scsi_device(dev);
        enum scsi_device_state state = 0;
+       bool rescan_dev = false;
 
        for (i = 0; i < ARRAY_SIZE(sdev_states); i++) {
                const int len = strlen(sdev_states[i].name);
@@ -810,20 +811,27 @@ store_state_field(struct device *dev, struct device_attribute *attr,
        }
 
        mutex_lock(&sdev->state_mutex);
-       ret = scsi_device_set_state(sdev, state);
-       /*
-        * If the device state changes to SDEV_RUNNING, we need to
-        * run the queue to avoid I/O hang, and rescan the device
-        * to revalidate it. Running the queue first is necessary
-        * because another thread may be waiting inside
-        * blk_mq_freeze_queue_wait() and because that call may be
-        * waiting for pending I/O to finish.
-        */
-       if (ret == 0 && state == SDEV_RUNNING) {
+       if (sdev->sdev_state == SDEV_RUNNING && state == SDEV_RUNNING) {
+               ret = 0;
+       } else {
+               ret = scsi_device_set_state(sdev, state);
+               if (ret == 0 && state == SDEV_RUNNING)
+                       rescan_dev = true;
+       }
+       mutex_unlock(&sdev->state_mutex);
+
+       if (rescan_dev) {
+               /*
+                * If the device state changes to SDEV_RUNNING, we need to
+                * run the queue to avoid I/O hang, and rescan the device
+                * to revalidate it. Running the queue first is necessary
+                * because another thread may be waiting inside
+                * blk_mq_freeze_queue_wait() and because that call may be
+                * waiting for pending I/O to finish.
+                */
                blk_mq_run_hw_queues(sdev->request_queue, true);
                scsi_rescan_device(dev);
        }
-       mutex_unlock(&sdev->state_mutex);
 
        return ret == 0 ? count : -EINVAL;
 }
index 78343d3f938573226c3f394009f57165db18c56b..554b6f7842236c64d694d0233450431ca6aed2ab 100644 (file)
@@ -1899,12 +1899,12 @@ static void session_recovery_timedout(struct work_struct *work)
        }
        spin_unlock_irqrestore(&session->lock, flags);
 
-       if (session->transport->session_recovery_timedout)
-               session->transport->session_recovery_timedout(session);
-
        ISCSI_DBG_TRANS_SESSION(session, "Unblocking SCSI target\n");
        scsi_target_unblock(&session->dev, SDEV_TRANSPORT_OFFLINE);
        ISCSI_DBG_TRANS_SESSION(session, "Completed unblocking SCSI target\n");
+
+       if (session->transport->session_recovery_timedout)
+               session->transport->session_recovery_timedout(session);
 }
 
 static void __iscsi_unblock_session(struct work_struct *work)
index fc5b214347b364b2dffdf05d28cf9026ad00a09f..5393b5c9dd9c87dfbb929608bd84738cbdc162fb 100644 (file)
@@ -1189,6 +1189,7 @@ static int ufs_mtk_probe(struct platform_device *pdev)
        }
        link = device_link_add(dev, &reset_pdev->dev,
                DL_FLAG_AUTOPROBE_CONSUMER);
+       put_device(&reset_pdev->dev);
        if (!link) {
                dev_notice(dev, "add reset device_link fail\n");
                goto skip_reset;
index 51424557810dab378a5178d2f7beda751fa12a94..f725248ba57f428f107a822880a4ebde605135ab 100644 (file)
@@ -421,6 +421,13 @@ static int ufs_intel_lkf_init(struct ufs_hba *hba)
        return err;
 }
 
+static int ufs_intel_adl_init(struct ufs_hba *hba)
+{
+       hba->nop_out_timeout = 200;
+       hba->quirks |= UFSHCD_QUIRK_BROKEN_AUTO_HIBERN8;
+       return ufs_intel_common_init(hba);
+}
+
 static struct ufs_hba_variant_ops ufs_intel_cnl_hba_vops = {
        .name                   = "intel-pci",
        .init                   = ufs_intel_common_init,
@@ -449,6 +456,15 @@ static struct ufs_hba_variant_ops ufs_intel_lkf_hba_vops = {
        .device_reset           = ufs_intel_device_reset,
 };
 
+static struct ufs_hba_variant_ops ufs_intel_adl_hba_vops = {
+       .name                   = "intel-pci",
+       .init                   = ufs_intel_adl_init,
+       .exit                   = ufs_intel_common_exit,
+       .link_startup_notify    = ufs_intel_link_startup_notify,
+       .resume                 = ufs_intel_resume,
+       .device_reset           = ufs_intel_device_reset,
+};
+
 #ifdef CONFIG_PM_SLEEP
 static int ufshcd_pci_restore(struct device *dev)
 {
@@ -563,6 +579,8 @@ static const struct pci_device_id ufshcd_pci_tbl[] = {
        { PCI_VDEVICE(INTEL, 0x4B41), (kernel_ulong_t)&ufs_intel_ehl_hba_vops },
        { PCI_VDEVICE(INTEL, 0x4B43), (kernel_ulong_t)&ufs_intel_ehl_hba_vops },
        { PCI_VDEVICE(INTEL, 0x98FA), (kernel_ulong_t)&ufs_intel_lkf_hba_vops },
+       { PCI_VDEVICE(INTEL, 0x51FF), (kernel_ulong_t)&ufs_intel_adl_hba_vops },
+       { PCI_VDEVICE(INTEL, 0x54FF), (kernel_ulong_t)&ufs_intel_adl_hba_vops },
        { }     /* terminate list */
 };
 
index afd38142b1c0282f6db4c410c06ce8684c98622c..13c09dbd99b92a705c72cf217b0a1957e8a96a76 100644 (file)
@@ -6453,9 +6453,8 @@ static irqreturn_t ufshcd_tmc_handler(struct ufs_hba *hba)
        irqreturn_t ret = IRQ_NONE;
        int tag;
 
-       pending = ufshcd_readl(hba, REG_UTP_TASK_REQ_DOOR_BELL);
-
        spin_lock_irqsave(hba->host->host_lock, flags);
+       pending = ufshcd_readl(hba, REG_UTP_TASK_REQ_DOOR_BELL);
        issued = hba->outstanding_tasks & ~pending;
        for_each_set_bit(tag, &issued, hba->nutmrs) {
                struct request *req = hba->tmf_rqs[tag];
@@ -6616,11 +6615,6 @@ static int __ufshcd_issue_tm_cmd(struct ufs_hba *hba,
        err = wait_for_completion_io_timeout(&wait,
                        msecs_to_jiffies(TM_CMD_TIMEOUT));
        if (!err) {
-               /*
-                * Make sure that ufshcd_compl_tm() does not trigger a
-                * use-after-free.
-                */
-               req->end_io_data = NULL;
                ufshcd_add_tm_upiu_trace(hba, task_tag, UFS_TM_ERR);
                dev_err(hba->dev, "%s: task management cmd 0x%.2x timed-out\n",
                                __func__, tm_function);
@@ -7116,6 +7110,7 @@ static int ufshcd_abort(struct scsi_cmnd *cmd)
                goto release;
        }
 
+       lrbp->cmd = NULL;
        err = SUCCESS;
 
 release:
index 2e31e14138262cd37eb3f4c55e61ef305256c556..ded5ba9b1466a948b172029ab3ec3711af80dd3e 100644 (file)
@@ -331,7 +331,7 @@ ufshpb_set_hpb_read_to_upiu(struct ufs_hba *hba, struct ufshcd_lrb *lrbp,
        cdb[0] = UFSHPB_READ;
 
        if (hba->dev_quirks & UFS_DEVICE_QUIRK_SWAP_L2P_ENTRY_FOR_HPB_READ)
-               ppn_tmp = swab64(ppn);
+               ppn_tmp = (__force __be64)swab64((__force u64)ppn);
 
        /* ppn value is stored as big-endian in the host memory */
        memcpy(&cdb[6], &ppn_tmp, sizeof(__be64));
index 19f7d7b906251b0a4976b7bc17692df35bb69ea3..28e1d98ae1021440c86ae73a6a4d8cf793d7934f 100644 (file)
@@ -977,7 +977,6 @@ static unsigned int features[] = {
 static struct virtio_driver virtio_scsi_driver = {
        .feature_table = features,
        .feature_table_size = ARRAY_SIZE(features),
-       .suppress_used_validation = true,
        .driver.name = KBUILD_MODNAME,
        .driver.owner = THIS_MODULE,
        .id_table = id_table,
index 8b3d268ac63c93520f6be389dab4ccb686c841cf..b808c94641fa67ad8854fe870f2367adc9c8c130 100644 (file)
@@ -37,6 +37,7 @@
 #define CQSPI_NEEDS_WR_DELAY           BIT(0)
 #define CQSPI_DISABLE_DAC_MODE         BIT(1)
 #define CQSPI_SUPPORT_EXTERNAL_DMA     BIT(2)
+#define CQSPI_NO_SUPPORT_WR_COMPLETION BIT(3)
 
 /* Capabilities */
 #define CQSPI_SUPPORTS_OCTAL           BIT(0)
@@ -86,6 +87,7 @@ struct cqspi_st {
        struct cqspi_flash_pdata f_pdata[CQSPI_MAX_CHIPSELECT];
        bool                    use_dma_read;
        u32                     pd_dev_id;
+       bool                    wr_completion;
 };
 
 struct cqspi_driver_platdata {
@@ -996,9 +998,11 @@ static int cqspi_write_setup(struct cqspi_flash_pdata *f_pdata,
         * polling on the controller's side. spinand and spi-nor will take
         * care of polling the status register.
         */
-       reg = readl(reg_base + CQSPI_REG_WR_COMPLETION_CTRL);
-       reg |= CQSPI_REG_WR_DISABLE_AUTO_POLL;
-       writel(reg, reg_base + CQSPI_REG_WR_COMPLETION_CTRL);
+       if (cqspi->wr_completion) {
+               reg = readl(reg_base + CQSPI_REG_WR_COMPLETION_CTRL);
+               reg |= CQSPI_REG_WR_DISABLE_AUTO_POLL;
+               writel(reg, reg_base + CQSPI_REG_WR_COMPLETION_CTRL);
+       }
 
        reg = readl(reg_base + CQSPI_REG_SIZE);
        reg &= ~CQSPI_REG_SIZE_ADDRESS_MASK;
@@ -1736,6 +1740,10 @@ static int cqspi_probe(struct platform_device *pdev)
 
        cqspi->master_ref_clk_hz = clk_get_rate(cqspi->clk);
        master->max_speed_hz = cqspi->master_ref_clk_hz;
+
+       /* write completion is supported by default */
+       cqspi->wr_completion = true;
+
        ddata  = of_device_get_match_data(dev);
        if (ddata) {
                if (ddata->quirks & CQSPI_NEEDS_WR_DELAY)
@@ -1747,6 +1755,8 @@ static int cqspi_probe(struct platform_device *pdev)
                        cqspi->use_direct_mode = true;
                if (ddata->quirks & CQSPI_SUPPORT_EXTERNAL_DMA)
                        cqspi->use_dma_read = true;
+               if (ddata->quirks & CQSPI_NO_SUPPORT_WR_COMPLETION)
+                       cqspi->wr_completion = false;
 
                if (of_device_is_compatible(pdev->dev.of_node,
                                            "xlnx,versal-ospi-1.0"))
@@ -1859,6 +1869,10 @@ static const struct cqspi_driver_platdata intel_lgm_qspi = {
        .quirks = CQSPI_DISABLE_DAC_MODE,
 };
 
+static const struct cqspi_driver_platdata socfpga_qspi = {
+       .quirks = CQSPI_NO_SUPPORT_WR_COMPLETION,
+};
+
 static const struct cqspi_driver_platdata versal_ospi = {
        .hwcaps_mask = CQSPI_SUPPORTS_OCTAL,
        .quirks = CQSPI_DISABLE_DAC_MODE | CQSPI_SUPPORT_EXTERNAL_DMA,
@@ -1887,6 +1901,10 @@ static const struct of_device_id cqspi_dt_ids[] = {
                .compatible = "xlnx,versal-ospi-1.0",
                .data = (void *)&versal_ospi,
        },
+       {
+               .compatible = "intel,socfpga-qspi",
+               .data = (void *)&socfpga_qspi,
+       },
        { /* end of table */ }
 };
 
index 5d98611dd999d19b0cd10facd8501e05c8090944..c72e501c270fd8156a38d8c64d885f7914cd1cbb 100644 (file)
@@ -912,7 +912,7 @@ static int fsl_lpspi_probe(struct platform_device *pdev)
 
        ret = devm_spi_register_controller(&pdev->dev, controller);
        if (ret < 0) {
-               dev_err(&pdev->dev, "spi_register_controller error.\n");
+               dev_err_probe(&pdev->dev, ret, "spi_register_controller error: %i\n", ret);
                goto out_pm_get;
        }
 
index 27a446faf143334b4c6eb6be4434c76ce3590a42..e2affaee4e769d250c43f572cce71d5c236708f8 100644 (file)
@@ -491,22 +491,26 @@ static int spi_geni_grab_gpi_chan(struct spi_geni_master *mas)
        int ret;
 
        mas->tx = dma_request_chan(mas->dev, "tx");
-       ret = dev_err_probe(mas->dev, IS_ERR(mas->tx), "Failed to get tx DMA ch\n");
-       if (ret < 0)
+       if (IS_ERR(mas->tx)) {
+               ret = dev_err_probe(mas->dev, PTR_ERR(mas->tx),
+                                   "Failed to get tx DMA ch\n");
                goto err_tx;
+       }
 
        mas->rx = dma_request_chan(mas->dev, "rx");
-       ret = dev_err_probe(mas->dev, IS_ERR(mas->rx), "Failed to get rx DMA ch\n");
-       if (ret < 0)
+       if (IS_ERR(mas->rx)) {
+               ret = dev_err_probe(mas->dev, PTR_ERR(mas->rx),
+                                   "Failed to get rx DMA ch\n");
                goto err_rx;
+       }
 
        return 0;
 
 err_rx:
+       mas->rx = NULL;
        dma_release_channel(mas->tx);
-       mas->tx = NULL;
 err_tx:
-       mas->rx = NULL;
+       mas->tx = NULL;
        return ret;
 }
 
index b23e675953e1ab43f953c9481c85274921fbcc65..fdd530b150a7abdc95a74c9399bed5242d729492 100644 (file)
@@ -3099,12 +3099,6 @@ void spi_unregister_controller(struct spi_controller *ctlr)
 
        device_del(&ctlr->dev);
 
-       /* Release the last reference on the controller if its driver
-        * has not yet been converted to devm_spi_alloc_master/slave().
-        */
-       if (!ctlr->devm_allocated)
-               put_device(&ctlr->dev);
-
        /* free bus id */
        mutex_lock(&board_lock);
        if (found == ctlr)
@@ -3113,6 +3107,12 @@ void spi_unregister_controller(struct spi_controller *ctlr)
 
        if (IS_ENABLED(CONFIG_SPI_DYNAMIC))
                mutex_unlock(&ctlr->add_lock);
+
+       /* Release the last reference on the controller if its driver
+        * has not yet been converted to devm_spi_alloc_master/slave().
+        */
+       if (!ctlr->devm_allocated)
+               put_device(&ctlr->dev);
 }
 EXPORT_SYMBOL_GPL(spi_unregister_controller);
 
index 59af251e75769cb34ccd45927e627264468bcad2..7fec8694613126d077ac3996151301c0a4b2d799 100644 (file)
@@ -66,8 +66,6 @@ source "drivers/staging/gdm724x/Kconfig"
 
 source "drivers/staging/fwserial/Kconfig"
 
-source "drivers/staging/netlogic/Kconfig"
-
 source "drivers/staging/gs_fpgaboot/Kconfig"
 
 source "drivers/staging/unisys/Kconfig"
index 76f413470bc8fe8990756b7b091df0d8cd0e267e..e66e19c454257a3885cc68ad4966628351d83e32 100644 (file)
@@ -10,7 +10,6 @@ obj-$(CONFIG_RTL8723BS)               += rtl8723bs/
 obj-$(CONFIG_R8712U)           += rtl8712/
 obj-$(CONFIG_R8188EU)          += r8188eu/
 obj-$(CONFIG_RTS5208)          += rts5208/
-obj-$(CONFIG_NETLOGIC_XLR_NET) += netlogic/
 obj-$(CONFIG_OCTEON_ETHERNET)  += octeon/
 obj-$(CONFIG_OCTEON_USB)       += octeon-usb/
 obj-$(CONFIG_VT6655)           += vt6655/
index cf263a58a1489fe4dd507dfce6b59718ffd1fb68..6fd549a424d53fc89fb938a070fa2be0442c1e89 100644 (file)
@@ -187,7 +187,6 @@ static struct fbtft_display display = {
        },
 };
 
-#ifdef CONFIG_FB_BACKLIGHT
 static int update_onboard_backlight(struct backlight_device *bd)
 {
        struct fbtft_par *par = bl_get_data(bd);
@@ -231,9 +230,6 @@ static void register_onboard_backlight(struct fbtft_par *par)
        if (!par->fbtftops.unregister_backlight)
                par->fbtftops.unregister_backlight = fbtft_unregister_backlight;
 }
-#else
-static void register_onboard_backlight(struct fbtft_par *par) { };
-#endif
 
 FBTFT_REGISTER_DRIVER(DRVNAME, "solomon,ssd1351", &display);
 
index ecb5f75f6dd56858c33df9184f2804058e7d2827..f2684d2d68516cc0296f337a1e8ec2a1cecd7865 100644 (file)
@@ -128,7 +128,6 @@ static int fbtft_request_gpios(struct fbtft_par *par)
        return 0;
 }
 
-#ifdef CONFIG_FB_BACKLIGHT
 static int fbtft_backlight_update_status(struct backlight_device *bd)
 {
        struct fbtft_par *par = bl_get_data(bd);
@@ -161,6 +160,7 @@ void fbtft_unregister_backlight(struct fbtft_par *par)
                par->info->bl_dev = NULL;
        }
 }
+EXPORT_SYMBOL(fbtft_unregister_backlight);
 
 static const struct backlight_ops fbtft_bl_ops = {
        .get_brightness = fbtft_backlight_get_brightness,
@@ -198,12 +198,7 @@ void fbtft_register_backlight(struct fbtft_par *par)
        if (!par->fbtftops.unregister_backlight)
                par->fbtftops.unregister_backlight = fbtft_unregister_backlight;
 }
-#else
-void fbtft_register_backlight(struct fbtft_par *par) { };
-void fbtft_unregister_backlight(struct fbtft_par *par) { };
-#endif
 EXPORT_SYMBOL(fbtft_register_backlight);
-EXPORT_SYMBOL(fbtft_unregister_backlight);
 
 static void fbtft_set_addr_win(struct fbtft_par *par, int xs, int ys, int xe,
                               int ye)
@@ -853,13 +848,11 @@ int fbtft_register_framebuffer(struct fb_info *fb_info)
                 fb_info->fix.smem_len >> 10, text1,
                 HZ / fb_info->fbdefio->delay, text2);
 
-#ifdef CONFIG_FB_BACKLIGHT
        /* Turn on backlight if available */
        if (fb_info->bl_dev) {
                fb_info->bl_dev->props.power = FB_BLANK_UNBLANK;
                fb_info->bl_dev->ops->update_status(fb_info->bl_dev);
        }
-#endif
 
        return 0;
 
index 1ed4772d2771502cfd673e45d1242340794ea675..843760675876afdc866250cb11924d30eedfd625 100644 (file)
@@ -192,7 +192,11 @@ int gbaudio_remove_component_controls(struct snd_soc_component *component,
                                      unsigned int num_controls)
 {
        struct snd_card *card = component->card->snd_card;
+       int err;
 
-       return gbaudio_remove_controls(card, component->dev, controls,
-                                      num_controls, component->name_prefix);
+       down_write(&card->controls_rwsem);
+       err = gbaudio_remove_controls(card, component->dev, controls,
+                                     num_controls, component->name_prefix);
+       up_write(&card->controls_rwsem);
+       return err;
 }
diff --git a/drivers/staging/netlogic/Kconfig b/drivers/staging/netlogic/Kconfig
deleted file mode 100644 (file)
index e171260..0000000
+++ /dev/null
@@ -1,9 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0
-config NETLOGIC_XLR_NET
-       tristate "Netlogic XLR/XLS network device"
-       depends on CPU_XLR
-       depends on NETDEVICES
-       select PHYLIB
-       help
-       This driver support Netlogic XLR/XLS on chip gigabit
-       Ethernet.
diff --git a/drivers/staging/netlogic/Makefile b/drivers/staging/netlogic/Makefile
deleted file mode 100644 (file)
index 7e2902a..0000000
+++ /dev/null
@@ -1,2 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0
-obj-$(CONFIG_NETLOGIC_XLR_NET) += xlr_net.o platform_net.o
diff --git a/drivers/staging/netlogic/TODO b/drivers/staging/netlogic/TODO
deleted file mode 100644 (file)
index 20e22ec..0000000
+++ /dev/null
@@ -1,11 +0,0 @@
-* Implementing 64bit stat counter in software
-* All memory allocation should be changed to DMA allocations
-* Changing comments into linux standard format
-
-Please send patches
-To:
-Ganesan Ramalingam <ganesanr@broadcom.com>
-Greg Kroah-Hartman <gregkh@linuxfoundation.org>
-Cc:
-Jayachandran Chandrashekaran Nair <jchandra@broadcom.com>
-
diff --git a/drivers/staging/netlogic/platform_net.c b/drivers/staging/netlogic/platform_net.c
deleted file mode 100644 (file)
index 8be9d0b..0000000
+++ /dev/null
@@ -1,219 +0,0 @@
-// SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
-/*
- * Copyright (c) 2003-2012 Broadcom Corporation
- * All Rights Reserved
- */
-
-#include <linux/device.h>
-#include <linux/platform_device.h>
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/io.h>
-#include <linux/delay.h>
-#include <linux/ioport.h>
-#include <linux/resource.h>
-#include <linux/phy.h>
-
-#include <asm/netlogic/haldefs.h>
-#include <asm/netlogic/common.h>
-#include <asm/netlogic/xlr/fmn.h>
-#include <asm/netlogic/xlr/xlr.h>
-#include <asm/netlogic/psb-bootinfo.h>
-#include <asm/netlogic/xlr/pic.h>
-#include <asm/netlogic/xlr/iomap.h>
-
-#include "platform_net.h"
-
-/* Linux Net */
-#define MAX_NUM_GMAC           8
-#define MAX_NUM_XLS_GMAC       8
-#define MAX_NUM_XLR_GMAC       4
-
-static u32 xlr_gmac_offsets[] = {
-       NETLOGIC_IO_GMAC_0_OFFSET, NETLOGIC_IO_GMAC_1_OFFSET,
-       NETLOGIC_IO_GMAC_2_OFFSET, NETLOGIC_IO_GMAC_3_OFFSET,
-       NETLOGIC_IO_GMAC_4_OFFSET, NETLOGIC_IO_GMAC_5_OFFSET,
-       NETLOGIC_IO_GMAC_6_OFFSET, NETLOGIC_IO_GMAC_7_OFFSET
-};
-
-static u32 xlr_gmac_irqs[] = { PIC_GMAC_0_IRQ, PIC_GMAC_1_IRQ,
-       PIC_GMAC_2_IRQ, PIC_GMAC_3_IRQ,
-       PIC_GMAC_4_IRQ, PIC_GMAC_5_IRQ,
-       PIC_GMAC_6_IRQ, PIC_GMAC_7_IRQ
-};
-
-static struct resource xlr_net0_res[8];
-static struct resource xlr_net1_res[8];
-static u32 __iomem *gmac4_addr;
-static u32 __iomem *gpio_addr;
-
-static void xlr_resource_init(struct resource *res, int offset, int irq)
-{
-       res->name = "gmac";
-
-       res->start = CPHYSADDR(nlm_mmio_base(offset));
-       res->end = res->start + 0xfff;
-       res->flags = IORESOURCE_MEM;
-
-       res++;
-       res->name = "gmac";
-       res->start = irq;
-       res->end = irq;
-       res->flags = IORESOURCE_IRQ;
-}
-
-static struct platform_device *gmac_controller2_init(void *gmac0_addr)
-{
-       int mac;
-       static struct xlr_net_data ndata1 = {
-               .phy_interface  = PHY_INTERFACE_MODE_SGMII,
-               .rfr_station    = FMN_STNID_GMAC1_FR_0,
-               .bucket_size    = xlr_board_fmn_config.bucket_size,
-               .gmac_fmn_info  = &xlr_board_fmn_config.gmac[1],
-       };
-
-       static struct platform_device xlr_net_dev1 = {
-               .name           = "xlr-net",
-               .id             = 1,
-               .dev.platform_data = &ndata1,
-       };
-
-       gmac4_addr =
-               ioremap(CPHYSADDR(nlm_mmio_base(NETLOGIC_IO_GMAC_4_OFFSET)),
-                       0xfff);
-       ndata1.serdes_addr = gmac4_addr;
-       ndata1.pcs_addr = gmac4_addr;
-       ndata1.mii_addr = gmac0_addr;
-       ndata1.gpio_addr = gpio_addr;
-       ndata1.cpu_mask = nlm_current_node()->coremask;
-
-       xlr_net_dev1.resource = xlr_net1_res;
-
-       for (mac = 0; mac < 4; mac++) {
-               ndata1.tx_stnid[mac] = FMN_STNID_GMAC1_TX0 + mac;
-               ndata1.phy_addr[mac] = mac + 4 + 0x10;
-
-               xlr_resource_init(&xlr_net1_res[mac * 2],
-                                 xlr_gmac_offsets[mac + 4],
-                                 xlr_gmac_irqs[mac + 4]);
-       }
-       xlr_net_dev1.num_resources = 8;
-
-       return &xlr_net_dev1;
-}
-
-static void xls_gmac_init(void)
-{
-       int mac;
-       struct platform_device *xlr_net_dev1;
-       void __iomem *gmac0_addr =
-               ioremap(CPHYSADDR(nlm_mmio_base(NETLOGIC_IO_GMAC_0_OFFSET)),
-                       0xfff);
-
-       static struct xlr_net_data ndata0 = {
-               .rfr_station    = FMN_STNID_GMACRFR_0,
-               .bucket_size    = xlr_board_fmn_config.bucket_size,
-               .gmac_fmn_info  = &xlr_board_fmn_config.gmac[0],
-       };
-
-       static struct platform_device xlr_net_dev0 = {
-               .name           = "xlr-net",
-               .id             = 0,
-       };
-       xlr_net_dev0.dev.platform_data = &ndata0;
-       ndata0.serdes_addr = gmac0_addr;
-       ndata0.pcs_addr = gmac0_addr;
-       ndata0.mii_addr = gmac0_addr;
-
-       /* Passing GPIO base for serdes init. Only needed on sgmii ports */
-       gpio_addr =
-               ioremap(CPHYSADDR(nlm_mmio_base(NETLOGIC_IO_GPIO_OFFSET)),
-                       0xfff);
-       ndata0.gpio_addr = gpio_addr;
-       ndata0.cpu_mask = nlm_current_node()->coremask;
-
-       xlr_net_dev0.resource = xlr_net0_res;
-
-       switch (nlm_prom_info.board_major_version) {
-       case 12:
-               /* first block RGMII or XAUI, use RGMII */
-               ndata0.phy_interface = PHY_INTERFACE_MODE_RGMII;
-               ndata0.tx_stnid[0] = FMN_STNID_GMAC0_TX0;
-               ndata0.phy_addr[0] = 0;
-
-               xlr_net_dev0.num_resources = 2;
-
-               xlr_resource_init(&xlr_net0_res[0], xlr_gmac_offsets[0],
-                                 xlr_gmac_irqs[0]);
-               platform_device_register(&xlr_net_dev0);
-
-               /* second block is XAUI, not supported yet */
-               break;
-       default:
-               /* default XLS config, all ports SGMII */
-               ndata0.phy_interface = PHY_INTERFACE_MODE_SGMII;
-               for (mac = 0; mac < 4; mac++) {
-                       ndata0.tx_stnid[mac] = FMN_STNID_GMAC0_TX0 + mac;
-                       ndata0.phy_addr[mac] = mac + 0x10;
-
-                       xlr_resource_init(&xlr_net0_res[mac * 2],
-                                         xlr_gmac_offsets[mac],
-                                       xlr_gmac_irqs[mac]);
-               }
-               xlr_net_dev0.num_resources = 8;
-               platform_device_register(&xlr_net_dev0);
-
-               xlr_net_dev1 = gmac_controller2_init(gmac0_addr);
-               platform_device_register(xlr_net_dev1);
-       }
-}
-
-static void xlr_gmac_init(void)
-{
-       int mac;
-
-       /* assume all GMACs for now */
-       static struct xlr_net_data ndata0 = {
-               .phy_interface  = PHY_INTERFACE_MODE_RGMII,
-               .serdes_addr    = NULL,
-               .pcs_addr       = NULL,
-               .rfr_station    = FMN_STNID_GMACRFR_0,
-               .bucket_size    = xlr_board_fmn_config.bucket_size,
-               .gmac_fmn_info  = &xlr_board_fmn_config.gmac[0],
-               .gpio_addr      = NULL,
-       };
-
-       static struct platform_device xlr_net_dev0 = {
-               .name           = "xlr-net",
-               .id             = 0,
-               .dev.platform_data = &ndata0,
-       };
-       ndata0.mii_addr =
-               ioremap(CPHYSADDR(nlm_mmio_base(NETLOGIC_IO_GMAC_0_OFFSET)),
-                       0xfff);
-
-       ndata0.cpu_mask = nlm_current_node()->coremask;
-
-       for (mac = 0; mac < MAX_NUM_XLR_GMAC; mac++) {
-               ndata0.tx_stnid[mac] = FMN_STNID_GMAC0_TX0 + mac;
-               ndata0.phy_addr[mac] = mac;
-               xlr_resource_init(&xlr_net0_res[mac * 2], xlr_gmac_offsets[mac],
-                                 xlr_gmac_irqs[mac]);
-       }
-       xlr_net_dev0.num_resources = 8;
-       xlr_net_dev0.resource = xlr_net0_res;
-
-       platform_device_register(&xlr_net_dev0);
-}
-
-static int __init xlr_net_init(void)
-{
-       if (nlm_chip_is_xls())
-               xls_gmac_init();
-       else
-               xlr_gmac_init();
-
-       return 0;
-}
-
-arch_initcall(xlr_net_init);
diff --git a/drivers/staging/netlogic/platform_net.h b/drivers/staging/netlogic/platform_net.h
deleted file mode 100644 (file)
index c8d4c13..0000000
+++ /dev/null
@@ -1,21 +0,0 @@
-/* SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause) */
-/*
- * Copyright (c) 2003-2012 Broadcom Corporation
- * All Rights Reserved
- */
-
-#define PORTS_PER_CONTROLLER           4
-
-struct xlr_net_data {
-       int cpu_mask;
-       u32 __iomem *mii_addr;
-       u32 __iomem *serdes_addr;
-       u32 __iomem *pcs_addr;
-       u32 __iomem *gpio_addr;
-       int phy_interface;
-       int rfr_station;
-       int tx_stnid[PORTS_PER_CONTROLLER];
-       int *bucket_size;
-       int phy_addr[PORTS_PER_CONTROLLER];
-       struct xlr_fmn_info *gmac_fmn_info;
-};
diff --git a/drivers/staging/netlogic/xlr_net.c b/drivers/staging/netlogic/xlr_net.c
deleted file mode 100644 (file)
index 69ea61f..0000000
+++ /dev/null
@@ -1,1080 +0,0 @@
-// SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
-/*
- * Copyright (c) 2003-2012 Broadcom Corporation
- * All Rights Reserved
- */
-
-#include <linux/phy.h>
-#include <linux/delay.h>
-#include <linux/netdevice.h>
-#include <linux/smp.h>
-#include <linux/ethtool.h>
-#include <linux/module.h>
-#include <linux/etherdevice.h>
-#include <linux/skbuff.h>
-#include <linux/jiffies.h>
-#include <linux/interrupt.h>
-#include <linux/platform_device.h>
-
-#include <asm/mipsregs.h>
-/*
- * fmn.h - For FMN credit configuration and registering fmn_handler.
- * FMN is communication mechanism that allows processing agents within
- * XLR/XLS to communicate each other.
- */
-#include <asm/netlogic/xlr/fmn.h>
-
-#include "platform_net.h"
-#include "xlr_net.h"
-
-/*
- * The readl/writel implementation byteswaps on XLR/XLS, so
- * we need to use __raw_ IO to read the NAE registers
- * because they are in the big-endian MMIO area on the SoC.
- */
-static inline void xlr_nae_wreg(u32 __iomem *base, unsigned int reg, u32 val)
-{
-       __raw_writel(val, base + reg);
-}
-
-static inline u32 xlr_nae_rdreg(u32 __iomem *base, unsigned int reg)
-{
-       return __raw_readl(base + reg);
-}
-
-static inline void xlr_reg_update(u32 *base_addr, u32 off, u32 val, u32 mask)
-{
-       u32 tmp;
-
-       tmp = xlr_nae_rdreg(base_addr, off);
-       xlr_nae_wreg(base_addr, off, (tmp & ~mask) | (val & mask));
-}
-
-#define MAC_SKB_BACK_PTR_SIZE SMP_CACHE_BYTES
-
-static int send_to_rfr_fifo(struct xlr_net_priv *priv, void *addr)
-{
-       struct nlm_fmn_msg msg;
-       int ret = 0, num_try = 0, stnid;
-       unsigned long paddr, mflags;
-
-       paddr = virt_to_bus(addr);
-       msg.msg0 = (u64)paddr & 0xffffffffe0ULL;
-       msg.msg1 = 0;
-       msg.msg2 = 0;
-       msg.msg3 = 0;
-       stnid = priv->nd->rfr_station;
-       do {
-               mflags = nlm_cop2_enable_irqsave();
-               ret = nlm_fmn_send(1, 0, stnid, &msg);
-               nlm_cop2_disable_irqrestore(mflags);
-               if (ret == 0)
-                       return 0;
-       } while (++num_try < 10000);
-
-       netdev_err(priv->ndev, "Send to RFR failed in RX path\n");
-       return ret;
-}
-
-static inline unsigned char *xlr_alloc_skb(void)
-{
-       struct sk_buff *skb;
-       int buf_len = sizeof(struct sk_buff *);
-       unsigned char *skb_data;
-
-       /* skb->data is cache aligned */
-       skb = alloc_skb(XLR_RX_BUF_SIZE, GFP_ATOMIC);
-       if (!skb)
-               return NULL;
-       skb_data = skb->data;
-       skb_reserve(skb, MAC_SKB_BACK_PTR_SIZE);
-       memcpy(skb_data, &skb, buf_len);
-
-       return skb->data;
-}
-
-static void xlr_net_fmn_handler(int bkt, int src_stnid, int size, int code,
-                               struct nlm_fmn_msg *msg, void *arg)
-{
-       struct sk_buff *skb;
-       void *skb_data = NULL;
-       struct net_device *ndev;
-       struct xlr_net_priv *priv;
-       u32 port, length;
-       unsigned char *addr;
-       struct xlr_adapter *adapter = arg;
-
-       length = (msg->msg0 >> 40) & 0x3fff;
-       if (length == 0) {
-               addr = bus_to_virt(msg->msg0 & 0xffffffffffULL);
-               addr = addr - MAC_SKB_BACK_PTR_SIZE;
-               skb = (struct sk_buff *)(*(unsigned long *)addr);
-               dev_kfree_skb_any((struct sk_buff *)addr);
-       } else {
-               addr = (unsigned char *)
-                       bus_to_virt(msg->msg0 & 0xffffffffe0ULL);
-               length = length - BYTE_OFFSET - MAC_CRC_LEN;
-               port = ((int)msg->msg0) & 0x0f;
-               addr = addr - MAC_SKB_BACK_PTR_SIZE;
-               skb = (struct sk_buff *)(*(unsigned long *)addr);
-               skb->dev = adapter->netdev[port];
-               if (!skb->dev)
-                       return;
-               ndev = skb->dev;
-               priv = netdev_priv(ndev);
-
-               /* 16 byte IP header align */
-               skb_reserve(skb, BYTE_OFFSET);
-               skb_put(skb, length);
-               skb->protocol = eth_type_trans(skb, skb->dev);
-               netif_rx(skb);
-               /* Fill rx ring */
-               skb_data = xlr_alloc_skb();
-               if (skb_data)
-                       send_to_rfr_fifo(priv, skb_data);
-       }
-}
-
-static struct phy_device *xlr_get_phydev(struct xlr_net_priv *priv)
-{
-       return mdiobus_get_phy(priv->mii_bus, priv->phy_addr);
-}
-
-/*
- * Ethtool operation
- */
-static int xlr_get_link_ksettings(struct net_device *ndev,
-                                 struct ethtool_link_ksettings *ecmd)
-{
-       struct xlr_net_priv *priv = netdev_priv(ndev);
-       struct phy_device *phydev = xlr_get_phydev(priv);
-
-       if (!phydev)
-               return -ENODEV;
-
-       phy_ethtool_ksettings_get(phydev, ecmd);
-
-       return 0;
-}
-
-static int xlr_set_link_ksettings(struct net_device *ndev,
-                                 const struct ethtool_link_ksettings *ecmd)
-{
-       struct xlr_net_priv *priv = netdev_priv(ndev);
-       struct phy_device *phydev = xlr_get_phydev(priv);
-
-       if (!phydev)
-               return -ENODEV;
-       return phy_ethtool_ksettings_set(phydev, ecmd);
-}
-
-static const struct ethtool_ops xlr_ethtool_ops = {
-       .get_link_ksettings = xlr_get_link_ksettings,
-       .set_link_ksettings = xlr_set_link_ksettings,
-};
-
-/*
- * Net operations
- */
-static int xlr_net_fill_rx_ring(struct net_device *ndev)
-{
-       void *skb_data;
-       struct xlr_net_priv *priv = netdev_priv(ndev);
-       int i;
-
-       for (i = 0; i < MAX_FRIN_SPILL / 4; i++) {
-               skb_data = xlr_alloc_skb();
-               if (!skb_data)
-                       return -ENOMEM;
-               send_to_rfr_fifo(priv, skb_data);
-       }
-       netdev_info(ndev, "Rx ring setup done\n");
-       return 0;
-}
-
-static int xlr_net_open(struct net_device *ndev)
-{
-       u32 err;
-       struct xlr_net_priv *priv = netdev_priv(ndev);
-       struct phy_device *phydev = xlr_get_phydev(priv);
-
-       /* schedule a link state check */
-       phy_start(phydev);
-
-       err = phy_start_aneg(phydev);
-       if (err) {
-               pr_err("Autoneg failed\n");
-               return err;
-       }
-       /* Setup the speed from PHY to internal reg*/
-       xlr_set_gmac_speed(priv);
-
-       netif_tx_start_all_queues(ndev);
-
-       return 0;
-}
-
-static int xlr_net_stop(struct net_device *ndev)
-{
-       struct xlr_net_priv *priv = netdev_priv(ndev);
-       struct phy_device *phydev = xlr_get_phydev(priv);
-
-       phy_stop(phydev);
-       netif_tx_stop_all_queues(ndev);
-       return 0;
-}
-
-static void xlr_make_tx_desc(struct nlm_fmn_msg *msg, unsigned long addr,
-                            struct sk_buff *skb)
-{
-       unsigned long physkb = virt_to_phys(skb);
-       int cpu_core = nlm_core_id();
-       int fr_stn_id = cpu_core * 8 + XLR_FB_STN;      /* FB to 6th bucket */
-
-       msg->msg0 = (((u64)1 << 63)     |       /* End of packet descriptor */
-               ((u64)127 << 54)        |       /* No Free back */
-               (u64)skb->len << 40     |       /* Length of data */
-               ((u64)addr));
-       msg->msg1 = (((u64)1 << 63)     |
-               ((u64)fr_stn_id << 54)  |       /* Free back id */
-               (u64)0 << 40            |       /* Set len to 0 */
-               ((u64)physkb  & 0xffffffff));   /* 32bit address */
-       msg->msg2 = 0;
-       msg->msg3 = 0;
-}
-
-static netdev_tx_t xlr_net_start_xmit(struct sk_buff *skb,
-                                     struct net_device *ndev)
-{
-       struct nlm_fmn_msg msg;
-       struct xlr_net_priv *priv = netdev_priv(ndev);
-       int ret;
-       u32 flags;
-
-       xlr_make_tx_desc(&msg, virt_to_phys(skb->data), skb);
-       flags = nlm_cop2_enable_irqsave();
-       ret = nlm_fmn_send(2, 0, priv->tx_stnid, &msg);
-       nlm_cop2_disable_irqrestore(flags);
-       if (ret)
-               dev_kfree_skb_any(skb);
-       return NETDEV_TX_OK;
-}
-
-static void xlr_hw_set_mac_addr(struct net_device *ndev)
-{
-       struct xlr_net_priv *priv = netdev_priv(ndev);
-
-       /* set mac station address */
-       xlr_nae_wreg(priv->base_addr, R_MAC_ADDR0,
-                    ((ndev->dev_addr[5] << 24) | (ndev->dev_addr[4] << 16) |
-                    (ndev->dev_addr[3] << 8) | (ndev->dev_addr[2])));
-       xlr_nae_wreg(priv->base_addr, R_MAC_ADDR0 + 1,
-                    ((ndev->dev_addr[1] << 24) | (ndev->dev_addr[0] << 16)));
-
-       xlr_nae_wreg(priv->base_addr, R_MAC_ADDR_MASK2, 0xffffffff);
-       xlr_nae_wreg(priv->base_addr, R_MAC_ADDR_MASK2 + 1, 0xffffffff);
-       xlr_nae_wreg(priv->base_addr, R_MAC_ADDR_MASK3, 0xffffffff);
-       xlr_nae_wreg(priv->base_addr, R_MAC_ADDR_MASK3 + 1, 0xffffffff);
-
-       xlr_nae_wreg(priv->base_addr, R_MAC_FILTER_CONFIG,
-                    (1 << O_MAC_FILTER_CONFIG__BROADCAST_EN) |
-                    (1 << O_MAC_FILTER_CONFIG__ALL_MCAST_EN) |
-                    (1 << O_MAC_FILTER_CONFIG__MAC_ADDR0_VALID));
-
-       if (priv->nd->phy_interface == PHY_INTERFACE_MODE_RGMII ||
-           priv->nd->phy_interface == PHY_INTERFACE_MODE_SGMII)
-               xlr_reg_update(priv->base_addr, R_IPG_IFG, MAC_B2B_IPG, 0x7f);
-}
-
-static int xlr_net_set_mac_addr(struct net_device *ndev, void *data)
-{
-       int err;
-
-       err = eth_mac_addr(ndev, data);
-       if (err)
-               return err;
-       xlr_hw_set_mac_addr(ndev);
-       return 0;
-}
-
-static void xlr_set_rx_mode(struct net_device *ndev)
-{
-       struct xlr_net_priv *priv = netdev_priv(ndev);
-       u32 regval;
-
-       regval = xlr_nae_rdreg(priv->base_addr, R_MAC_FILTER_CONFIG);
-
-       if (ndev->flags & IFF_PROMISC) {
-               regval |= (1 << O_MAC_FILTER_CONFIG__BROADCAST_EN) |
-               (1 << O_MAC_FILTER_CONFIG__PAUSE_FRAME_EN) |
-               (1 << O_MAC_FILTER_CONFIG__ALL_MCAST_EN) |
-               (1 << O_MAC_FILTER_CONFIG__ALL_UCAST_EN);
-       } else {
-               regval &= ~((1 << O_MAC_FILTER_CONFIG__PAUSE_FRAME_EN) |
-               (1 << O_MAC_FILTER_CONFIG__ALL_UCAST_EN));
-       }
-
-       xlr_nae_wreg(priv->base_addr, R_MAC_FILTER_CONFIG, regval);
-}
-
-static void xlr_stats(struct net_device *ndev, struct rtnl_link_stats64 *stats)
-{
-       struct xlr_net_priv *priv = netdev_priv(ndev);
-
-       stats->rx_packets = xlr_nae_rdreg(priv->base_addr, RX_PACKET_COUNTER);
-       stats->tx_packets = xlr_nae_rdreg(priv->base_addr, TX_PACKET_COUNTER);
-       stats->rx_bytes = xlr_nae_rdreg(priv->base_addr, RX_BYTE_COUNTER);
-       stats->tx_bytes = xlr_nae_rdreg(priv->base_addr, TX_BYTE_COUNTER);
-       stats->tx_errors = xlr_nae_rdreg(priv->base_addr, TX_FCS_ERROR_COUNTER);
-       stats->rx_dropped = xlr_nae_rdreg(priv->base_addr,
-                                         RX_DROP_PACKET_COUNTER);
-       stats->tx_dropped = xlr_nae_rdreg(priv->base_addr,
-                                         TX_DROP_FRAME_COUNTER);
-
-       stats->multicast = xlr_nae_rdreg(priv->base_addr,
-                                        RX_MULTICAST_PACKET_COUNTER);
-       stats->collisions = xlr_nae_rdreg(priv->base_addr,
-                                         TX_TOTAL_COLLISION_COUNTER);
-
-       stats->rx_length_errors = xlr_nae_rdreg(priv->base_addr,
-                                               RX_FRAME_LENGTH_ERROR_COUNTER);
-       stats->rx_over_errors = xlr_nae_rdreg(priv->base_addr,
-                                             RX_DROP_PACKET_COUNTER);
-       stats->rx_crc_errors = xlr_nae_rdreg(priv->base_addr,
-                                            RX_FCS_ERROR_COUNTER);
-       stats->rx_frame_errors = xlr_nae_rdreg(priv->base_addr,
-                                              RX_ALIGNMENT_ERROR_COUNTER);
-
-       stats->rx_fifo_errors = xlr_nae_rdreg(priv->base_addr,
-                                             RX_DROP_PACKET_COUNTER);
-       stats->rx_missed_errors = xlr_nae_rdreg(priv->base_addr,
-                                               RX_CARRIER_SENSE_ERROR_COUNTER);
-
-       stats->rx_errors = (stats->rx_over_errors + stats->rx_crc_errors +
-                           stats->rx_frame_errors + stats->rx_fifo_errors +
-                           stats->rx_missed_errors);
-
-       stats->tx_aborted_errors = xlr_nae_rdreg(priv->base_addr,
-                                                TX_EXCESSIVE_COLLISION_PACKET_COUNTER);
-       stats->tx_carrier_errors = xlr_nae_rdreg(priv->base_addr,
-                                                TX_DROP_FRAME_COUNTER);
-       stats->tx_fifo_errors = xlr_nae_rdreg(priv->base_addr,
-                                             TX_DROP_FRAME_COUNTER);
-}
-
-static const struct net_device_ops xlr_netdev_ops = {
-       .ndo_open = xlr_net_open,
-       .ndo_stop = xlr_net_stop,
-       .ndo_start_xmit = xlr_net_start_xmit,
-       .ndo_select_queue = dev_pick_tx_cpu_id,
-       .ndo_set_mac_address = xlr_net_set_mac_addr,
-       .ndo_set_rx_mode = xlr_set_rx_mode,
-       .ndo_get_stats64 = xlr_stats,
-};
-
-/*
- * Gmac init
- */
-static void *xlr_config_spill(struct xlr_net_priv *priv, int reg_start_0,
-                             int reg_start_1, int reg_size, int size)
-{
-       void *spill;
-       u32 *base;
-       unsigned long phys_addr;
-       u32 spill_size;
-
-       base = priv->base_addr;
-       spill_size = size;
-       spill = kmalloc(spill_size + SMP_CACHE_BYTES, GFP_KERNEL);
-       if (!spill)
-               return ZERO_SIZE_PTR;
-
-       spill = PTR_ALIGN(spill, SMP_CACHE_BYTES);
-       phys_addr = virt_to_phys(spill);
-       dev_dbg(&priv->ndev->dev, "Allocated spill %d bytes at %lx\n",
-               size, phys_addr);
-       xlr_nae_wreg(base, reg_start_0, (phys_addr >> 5) & 0xffffffff);
-       xlr_nae_wreg(base, reg_start_1, ((u64)phys_addr >> 37) & 0x07);
-       xlr_nae_wreg(base, reg_size, spill_size);
-
-       return spill;
-}
-
-/*
- * Configure the 6 FIFO's that are used by the network accelarator to
- * communicate with the rest of the XLx device. 4 of the FIFO's are for
- * packets from NA --> cpu (called Class FIFO's) and 2 are for feeding
- * the NA with free descriptors.
- */
-static void xlr_config_fifo_spill_area(struct xlr_net_priv *priv)
-{
-       priv->frin_spill = xlr_config_spill(priv,
-                                           R_REG_FRIN_SPILL_MEM_START_0,
-                                           R_REG_FRIN_SPILL_MEM_START_1,
-                                           R_REG_FRIN_SPILL_MEM_SIZE,
-                                           MAX_FRIN_SPILL * sizeof(u64));
-       priv->frout_spill = xlr_config_spill(priv,
-                                            R_FROUT_SPILL_MEM_START_0,
-                                            R_FROUT_SPILL_MEM_START_1,
-                                            R_FROUT_SPILL_MEM_SIZE,
-                                            MAX_FROUT_SPILL * sizeof(u64));
-       priv->class_0_spill = xlr_config_spill(priv,
-                                              R_CLASS0_SPILL_MEM_START_0,
-                                              R_CLASS0_SPILL_MEM_START_1,
-                                              R_CLASS0_SPILL_MEM_SIZE,
-                                              MAX_CLASS_0_SPILL * sizeof(u64));
-       priv->class_1_spill = xlr_config_spill(priv,
-                                              R_CLASS1_SPILL_MEM_START_0,
-                                              R_CLASS1_SPILL_MEM_START_1,
-                                              R_CLASS1_SPILL_MEM_SIZE,
-                                              MAX_CLASS_1_SPILL * sizeof(u64));
-       priv->class_2_spill = xlr_config_spill(priv,
-                                              R_CLASS2_SPILL_MEM_START_0,
-                                              R_CLASS2_SPILL_MEM_START_1,
-                                              R_CLASS2_SPILL_MEM_SIZE,
-                                              MAX_CLASS_2_SPILL * sizeof(u64));
-       priv->class_3_spill = xlr_config_spill(priv,
-                                              R_CLASS3_SPILL_MEM_START_0,
-                                              R_CLASS3_SPILL_MEM_START_1,
-                                              R_CLASS3_SPILL_MEM_SIZE,
-                                              MAX_CLASS_3_SPILL * sizeof(u64));
-}
-
-/*
- * Configure PDE to Round-Robin distribution of packets to the
- * available cpu
- */
-static void xlr_config_pde(struct xlr_net_priv *priv)
-{
-       int i = 0;
-       u64 bkt_map = 0;
-
-       /* Each core has 8 buckets(station) */
-       for (i = 0; i < hweight32(priv->nd->cpu_mask); i++)
-               bkt_map |= (0xff << (i * 8));
-
-       xlr_nae_wreg(priv->base_addr, R_PDE_CLASS_0, (bkt_map & 0xffffffff));
-       xlr_nae_wreg(priv->base_addr, R_PDE_CLASS_0 + 1,
-                    ((bkt_map >> 32) & 0xffffffff));
-
-       xlr_nae_wreg(priv->base_addr, R_PDE_CLASS_1, (bkt_map & 0xffffffff));
-       xlr_nae_wreg(priv->base_addr, R_PDE_CLASS_1 + 1,
-                    ((bkt_map >> 32) & 0xffffffff));
-
-       xlr_nae_wreg(priv->base_addr, R_PDE_CLASS_2, (bkt_map & 0xffffffff));
-       xlr_nae_wreg(priv->base_addr, R_PDE_CLASS_2 + 1,
-                    ((bkt_map >> 32) & 0xffffffff));
-
-       xlr_nae_wreg(priv->base_addr, R_PDE_CLASS_3, (bkt_map & 0xffffffff));
-       xlr_nae_wreg(priv->base_addr, R_PDE_CLASS_3 + 1,
-                    ((bkt_map >> 32) & 0xffffffff));
-}
-
-/*
- * Setup the Message ring credits, bucket size and other
- * common configuration
- */
-static int xlr_config_common(struct xlr_net_priv *priv)
-{
-       struct xlr_fmn_info *gmac = priv->nd->gmac_fmn_info;
-       int start_stn_id = gmac->start_stn_id;
-       int end_stn_id = gmac->end_stn_id;
-       int *bucket_size = priv->nd->bucket_size;
-       int i, j, err;
-
-       /* Setting non-core MsgBktSize(0x321 - 0x325) */
-       for (i = start_stn_id; i <= end_stn_id; i++) {
-               xlr_nae_wreg(priv->base_addr,
-                            R_GMAC_RFR0_BUCKET_SIZE + i - start_stn_id,
-                            bucket_size[i]);
-       }
-
-       /*
-        * Setting non-core Credit counter register
-        * Distributing Gmac's credit to CPU's
-        */
-       for (i = 0; i < 8; i++) {
-               for (j = 0; j < 8; j++)
-                       xlr_nae_wreg(priv->base_addr,
-                                    (R_CC_CPU0_0 + (i * 8)) + j,
-                                    gmac->credit_config[(i * 8) + j]);
-       }
-
-       xlr_nae_wreg(priv->base_addr, R_MSG_TX_THRESHOLD, 3);
-       xlr_nae_wreg(priv->base_addr, R_DMACR0, 0xffffffff);
-       xlr_nae_wreg(priv->base_addr, R_DMACR1, 0xffffffff);
-       xlr_nae_wreg(priv->base_addr, R_DMACR2, 0xffffffff);
-       xlr_nae_wreg(priv->base_addr, R_DMACR3, 0xffffffff);
-       xlr_nae_wreg(priv->base_addr, R_FREEQCARVE, 0);
-
-       err = xlr_net_fill_rx_ring(priv->ndev);
-       if (err)
-               return err;
-       nlm_register_fmn_handler(start_stn_id, end_stn_id, xlr_net_fmn_handler,
-                                priv->adapter);
-       return 0;
-}
-
-static void xlr_config_translate_table(struct xlr_net_priv *priv)
-{
-       u32 cpu_mask;
-       u32 val;
-       int bkts[32]; /* one bucket is assumed for each cpu */
-       int b1, b2, c1, c2, i, j, k;
-       int use_bkt;
-
-       use_bkt = 0;
-       cpu_mask = priv->nd->cpu_mask;
-
-       pr_info("Using %s-based distribution\n",
-               (use_bkt) ? "bucket" : "class");
-       j = 0;
-       for (i = 0; i < 32; i++) {
-               if ((1 << i) & cpu_mask) {
-                       /* for each cpu, mark the 4+threadid bucket */
-                       bkts[j] = ((i / 4) * 8) + (i % 4);
-                       j++;
-               }
-       }
-
-       /*configure the 128 * 9 Translation table to send to available buckets*/
-       k = 0;
-       c1 = 3;
-       c2 = 0;
-       for (i = 0; i < 64; i++) {
-               /*
-                * On use_bkt set the b0, b1 are used, else
-                * the 4 classes are used, here implemented
-                * a logic to distribute the packets to the
-                * buckets equally or based on the class
-                */
-               c1 = (c1 + 1) & 3;
-               c2 = (c1 + 1) & 3;
-               b1 = bkts[k];
-               k = (k + 1) % j;
-               b2 = bkts[k];
-               k = (k + 1) % j;
-
-               val = ((c1 << 23) | (b1 << 17) | (use_bkt << 16) |
-                               (c2 << 7) | (b2 << 1) | (use_bkt << 0));
-               dev_dbg(&priv->ndev->dev, "Table[%d] b1=%d b2=%d c1=%d c2=%d\n",
-                       i, b1, b2, c1, c2);
-               xlr_nae_wreg(priv->base_addr, R_TRANSLATETABLE + i, val);
-               c1 = c2;
-       }
-}
-
-static void xlr_config_parser(struct xlr_net_priv *priv)
-{
-       u32 val;
-
-       /* Mark it as ETHERNET type */
-       xlr_nae_wreg(priv->base_addr, R_L2TYPE_0, 0x01);
-
-       /* Use 7bit CRChash for flow classification with 127 as CRC polynomial*/
-       xlr_nae_wreg(priv->base_addr, R_PARSERCONFIGREG,
-                    ((0x7f << 8) | (1 << 1)));
-
-       /* configure the parser : L2 Type is configured in the bootloader */
-       /* extract IP: src, dest protocol */
-       xlr_nae_wreg(priv->base_addr, R_L3CTABLE,
-                    (9 << 20) | (1 << 19) | (1 << 18) | (0x01 << 16) |
-                    (0x0800 << 0));
-       xlr_nae_wreg(priv->base_addr, R_L3CTABLE + 1,
-                    (9 << 25) | (1 << 21) | (12 << 14) | (4 << 10) |
-                    (16 << 4) | 4);
-
-       /* Configure to extract SRC port and Dest port for TCP and UDP pkts */
-       xlr_nae_wreg(priv->base_addr, R_L4CTABLE, 6);
-       xlr_nae_wreg(priv->base_addr, R_L4CTABLE + 2, 17);
-       val = ((0 << 21) | (2 << 17) | (2 << 11) | (2 << 7));
-       xlr_nae_wreg(priv->base_addr, R_L4CTABLE + 1, val);
-       xlr_nae_wreg(priv->base_addr, R_L4CTABLE + 3, val);
-
-       xlr_config_translate_table(priv);
-}
-
-static int xlr_phy_write(u32 *base_addr, int phy_addr, int regnum, u16 val)
-{
-       unsigned long timeout, stoptime, checktime;
-       int timedout;
-
-       /* 100ms timeout*/
-       timeout = msecs_to_jiffies(100);
-       stoptime = jiffies + timeout;
-       timedout = 0;
-
-       xlr_nae_wreg(base_addr, R_MII_MGMT_ADDRESS, (phy_addr << 8) | regnum);
-
-       /* Write the data which starts the write cycle */
-       xlr_nae_wreg(base_addr, R_MII_MGMT_WRITE_DATA, (u32)val);
-
-       /* poll for the read cycle to complete */
-       while (!timedout) {
-               checktime = jiffies;
-               if (xlr_nae_rdreg(base_addr, R_MII_MGMT_INDICATORS) == 0)
-                       break;
-               timedout = time_after(checktime, stoptime);
-       }
-       if (timedout) {
-               pr_info("Phy device write err: device busy");
-               return -EBUSY;
-       }
-
-       return 0;
-}
-
-static int xlr_phy_read(u32 *base_addr, int phy_addr, int regnum)
-{
-       unsigned long timeout, stoptime, checktime;
-       int timedout;
-
-       /* 100ms timeout*/
-       timeout = msecs_to_jiffies(100);
-       stoptime = jiffies + timeout;
-       timedout = 0;
-
-       /* setup the phy reg to be used */
-       xlr_nae_wreg(base_addr, R_MII_MGMT_ADDRESS,
-                    (phy_addr << 8) | (regnum << 0));
-
-       /* Issue the read command */
-       xlr_nae_wreg(base_addr, R_MII_MGMT_COMMAND,
-                    (1 << O_MII_MGMT_COMMAND__rstat));
-
-       /* poll for the read cycle to complete */
-       while (!timedout) {
-               checktime = jiffies;
-               if (xlr_nae_rdreg(base_addr, R_MII_MGMT_INDICATORS) == 0)
-                       break;
-               timedout = time_after(checktime, stoptime);
-       }
-       if (timedout) {
-               pr_info("Phy device read err: device busy");
-               return -EBUSY;
-       }
-
-       /* clear the read cycle */
-       xlr_nae_wreg(base_addr, R_MII_MGMT_COMMAND, 0);
-
-       /* Read the data */
-       return xlr_nae_rdreg(base_addr, R_MII_MGMT_STATUS);
-}
-
-static int xlr_mii_write(struct mii_bus *bus, int phy_addr, int regnum, u16 val)
-{
-       struct xlr_net_priv *priv = bus->priv;
-       int ret;
-
-       ret = xlr_phy_write(priv->mii_addr, phy_addr, regnum, val);
-       dev_dbg(&priv->ndev->dev, "mii_write phy %d : %d <- %x [%x]\n",
-               phy_addr, regnum, val, ret);
-       return ret;
-}
-
-static int xlr_mii_read(struct mii_bus *bus, int phy_addr, int regnum)
-{
-       struct xlr_net_priv *priv = bus->priv;
-       int ret;
-
-       ret =  xlr_phy_read(priv->mii_addr, phy_addr, regnum);
-       dev_dbg(&priv->ndev->dev, "mii_read phy %d : %d [%x]\n",
-               phy_addr, regnum, ret);
-       return ret;
-}
-
-/*
- * XLR ports are RGMII. XLS ports are SGMII mostly except the port0,
- * which can be configured either SGMII or RGMII, considered SGMII
- * by default, if board setup to RGMII the port_type need to set
- * accordingly.Serdes and PCS layer need to configured for SGMII
- */
-static void xlr_sgmii_init(struct xlr_net_priv *priv)
-{
-       int phy;
-
-       xlr_phy_write(priv->serdes_addr, 26, 0, 0x6DB0);
-       xlr_phy_write(priv->serdes_addr, 26, 1, 0xFFFF);
-       xlr_phy_write(priv->serdes_addr, 26, 2, 0xB6D0);
-       xlr_phy_write(priv->serdes_addr, 26, 3, 0x00FF);
-       xlr_phy_write(priv->serdes_addr, 26, 4, 0x0000);
-       xlr_phy_write(priv->serdes_addr, 26, 5, 0x0000);
-       xlr_phy_write(priv->serdes_addr, 26, 6, 0x0005);
-       xlr_phy_write(priv->serdes_addr, 26, 7, 0x0001);
-       xlr_phy_write(priv->serdes_addr, 26, 8, 0x0000);
-       xlr_phy_write(priv->serdes_addr, 26, 9, 0x0000);
-       xlr_phy_write(priv->serdes_addr, 26, 10, 0x0000);
-
-       /* program  GPIO values for serdes init parameters */
-       xlr_nae_wreg(priv->gpio_addr, 0x20, 0x7e6802);
-       xlr_nae_wreg(priv->gpio_addr, 0x10, 0x7104);
-
-       xlr_nae_wreg(priv->gpio_addr, 0x22, 0x7e6802);
-       xlr_nae_wreg(priv->gpio_addr, 0x21, 0x7104);
-
-       /* enable autoneg - more magic */
-       phy = priv->phy_addr % 4 + 27;
-       xlr_phy_write(priv->pcs_addr, phy, 0, 0x1000);
-       xlr_phy_write(priv->pcs_addr, phy, 0, 0x0200);
-}
-
-void xlr_set_gmac_speed(struct xlr_net_priv *priv)
-{
-       struct phy_device *phydev = xlr_get_phydev(priv);
-       int speed;
-
-       if (phydev->interface == PHY_INTERFACE_MODE_SGMII)
-               xlr_sgmii_init(priv);
-
-       if (phydev->speed != priv->phy_speed) {
-               speed = phydev->speed;
-               if (speed == SPEED_1000) {
-                       /* Set interface to Byte mode */
-                       xlr_nae_wreg(priv->base_addr, R_MAC_CONFIG_2, 0x7217);
-                       priv->phy_speed = speed;
-               } else if (speed == SPEED_100 || speed == SPEED_10) {
-                       /* Set interface to Nibble mode */
-                       xlr_nae_wreg(priv->base_addr, R_MAC_CONFIG_2, 0x7117);
-                       priv->phy_speed = speed;
-               }
-               /* Set SGMII speed in Interface control reg */
-               if (phydev->interface == PHY_INTERFACE_MODE_SGMII) {
-                       if (speed == SPEED_10)
-                               xlr_nae_wreg(priv->base_addr,
-                                            R_INTERFACE_CONTROL,
-                                            SGMII_SPEED_10);
-                       if (speed == SPEED_100)
-                               xlr_nae_wreg(priv->base_addr,
-                                            R_INTERFACE_CONTROL,
-                                            SGMII_SPEED_100);
-                       if (speed == SPEED_1000)
-                               xlr_nae_wreg(priv->base_addr,
-                                            R_INTERFACE_CONTROL,
-                                            SGMII_SPEED_1000);
-               }
-               if (speed == SPEED_10)
-                       xlr_nae_wreg(priv->base_addr, R_CORECONTROL, 0x2);
-               if (speed == SPEED_100)
-                       xlr_nae_wreg(priv->base_addr, R_CORECONTROL, 0x1);
-               if (speed == SPEED_1000)
-                       xlr_nae_wreg(priv->base_addr, R_CORECONTROL, 0x0);
-       }
-       pr_info("gmac%d : %dMbps\n", priv->port_id, priv->phy_speed);
-}
-
-static void xlr_gmac_link_adjust(struct net_device *ndev)
-{
-       struct xlr_net_priv *priv = netdev_priv(ndev);
-       struct phy_device *phydev = xlr_get_phydev(priv);
-       u32 intreg;
-
-       intreg = xlr_nae_rdreg(priv->base_addr, R_INTREG);
-       if (phydev->link) {
-               if (phydev->speed != priv->phy_speed) {
-                       xlr_set_gmac_speed(priv);
-                       pr_info("gmac%d : Link up\n", priv->port_id);
-               }
-       } else {
-               xlr_set_gmac_speed(priv);
-               pr_info("gmac%d : Link down\n", priv->port_id);
-       }
-}
-
-static int xlr_mii_probe(struct xlr_net_priv *priv)
-{
-       struct phy_device *phydev = xlr_get_phydev(priv);
-
-       if (!phydev) {
-               pr_err("no PHY found on phy_addr %d\n", priv->phy_addr);
-               return -ENODEV;
-       }
-
-       /* Attach MAC to PHY */
-       phydev = phy_connect(priv->ndev, phydev_name(phydev),
-                            xlr_gmac_link_adjust, priv->nd->phy_interface);
-
-       if (IS_ERR(phydev)) {
-               pr_err("could not attach PHY\n");
-               return PTR_ERR(phydev);
-       }
-       phydev->supported &= (ADVERTISED_10baseT_Full
-                               | ADVERTISED_10baseT_Half
-                               | ADVERTISED_100baseT_Full
-                               | ADVERTISED_100baseT_Half
-                               | ADVERTISED_1000baseT_Full
-                               | ADVERTISED_Autoneg
-                               | ADVERTISED_MII);
-
-       phydev->advertising = phydev->supported;
-       phy_attached_info(phydev);
-       return 0;
-}
-
-static int xlr_setup_mdio(struct xlr_net_priv *priv,
-                         struct platform_device *pdev)
-{
-       int err;
-
-       priv->mii_bus = mdiobus_alloc();
-       if (!priv->mii_bus) {
-               pr_err("mdiobus alloc failed\n");
-               return -ENOMEM;
-       }
-
-       priv->mii_bus->priv = priv;
-       priv->mii_bus->name = "xlr-mdio";
-       snprintf(priv->mii_bus->id, MII_BUS_ID_SIZE, "%s-%d",
-                priv->mii_bus->name, priv->port_id);
-       priv->mii_bus->read = xlr_mii_read;
-       priv->mii_bus->write = xlr_mii_write;
-       priv->mii_bus->parent = &pdev->dev;
-
-       /* Scan only the enabled address */
-       priv->mii_bus->phy_mask = ~(1 << priv->phy_addr);
-
-       /* setting clock divisor to 54 */
-       xlr_nae_wreg(priv->base_addr, R_MII_MGMT_CONFIG, 0x7);
-
-       err = mdiobus_register(priv->mii_bus);
-       if (err) {
-               mdiobus_free(priv->mii_bus);
-               pr_err("mdio bus registration failed\n");
-               return err;
-       }
-
-       pr_info("Registered mdio bus id : %s\n", priv->mii_bus->id);
-       err = xlr_mii_probe(priv);
-       if (err) {
-               mdiobus_free(priv->mii_bus);
-               return err;
-       }
-       return 0;
-}
-
-static void xlr_port_enable(struct xlr_net_priv *priv)
-{
-       u32 prid = (read_c0_prid() & 0xf000);
-
-       /* Setup MAC_CONFIG reg if (xls & rgmii) */
-       if ((prid == 0x8000 || prid == 0x4000 || prid == 0xc000) &&
-           priv->nd->phy_interface == PHY_INTERFACE_MODE_RGMII)
-               xlr_reg_update(priv->base_addr, R_RX_CONTROL,
-                              (1 << O_RX_CONTROL__RGMII),
-                              (1 << O_RX_CONTROL__RGMII));
-
-       /* Rx Tx enable */
-       xlr_reg_update(priv->base_addr, R_MAC_CONFIG_1,
-                      ((1 << O_MAC_CONFIG_1__rxen) |
-                       (1 << O_MAC_CONFIG_1__txen) |
-                       (1 << O_MAC_CONFIG_1__rxfc) |
-                       (1 << O_MAC_CONFIG_1__txfc)),
-                      ((1 << O_MAC_CONFIG_1__rxen) |
-                       (1 << O_MAC_CONFIG_1__txen) |
-                       (1 << O_MAC_CONFIG_1__rxfc) |
-                       (1 << O_MAC_CONFIG_1__txfc)));
-
-       /* Setup tx control reg */
-       xlr_reg_update(priv->base_addr, R_TX_CONTROL,
-                      ((1 << O_TX_CONTROL__TXENABLE) |
-                      (512 << O_TX_CONTROL__TXTHRESHOLD)), 0x3fff);
-
-       /* Setup rx control reg */
-       xlr_reg_update(priv->base_addr, R_RX_CONTROL,
-                      1 << O_RX_CONTROL__RXENABLE,
-                      1 << O_RX_CONTROL__RXENABLE);
-}
-
-static void xlr_port_disable(struct xlr_net_priv *priv)
-{
-       /* Setup MAC_CONFIG reg */
-       /* Rx Tx disable*/
-       xlr_reg_update(priv->base_addr, R_MAC_CONFIG_1,
-                      ((1 << O_MAC_CONFIG_1__rxen) |
-                       (1 << O_MAC_CONFIG_1__txen) |
-                       (1 << O_MAC_CONFIG_1__rxfc) |
-                       (1 << O_MAC_CONFIG_1__txfc)), 0x0);
-
-       /* Setup tx control reg */
-       xlr_reg_update(priv->base_addr, R_TX_CONTROL,
-                      ((1 << O_TX_CONTROL__TXENABLE) |
-                      (512 << O_TX_CONTROL__TXTHRESHOLD)), 0);
-
-       /* Setup rx control reg */
-       xlr_reg_update(priv->base_addr, R_RX_CONTROL,
-                      1 << O_RX_CONTROL__RXENABLE, 0);
-}
-
-/*
- * Initialization of gmac
- */
-static int xlr_gmac_init(struct xlr_net_priv *priv,
-                        struct platform_device *pdev)
-{
-       int ret;
-
-       pr_info("Initializing the gmac%d\n", priv->port_id);
-
-       xlr_port_disable(priv);
-
-       xlr_nae_wreg(priv->base_addr, R_DESC_PACK_CTRL,
-                    (1 << O_DESC_PACK_CTRL__MAXENTRY) |
-                    (BYTE_OFFSET << O_DESC_PACK_CTRL__BYTEOFFSET) |
-                    (1600 << O_DESC_PACK_CTRL__REGULARSIZE));
-
-       ret = xlr_setup_mdio(priv, pdev);
-       if (ret)
-               return ret;
-       xlr_port_enable(priv);
-
-       /* Enable Full-duplex/1000Mbps/CRC */
-       xlr_nae_wreg(priv->base_addr, R_MAC_CONFIG_2, 0x7217);
-       /* speed 2.5Mhz */
-       xlr_nae_wreg(priv->base_addr, R_CORECONTROL, 0x02);
-       /* Setup Interrupt mask reg */
-       xlr_nae_wreg(priv->base_addr, R_INTMASK, (1 << O_INTMASK__TXILLEGAL) |
-                    (1 << O_INTMASK__MDINT) | (1 << O_INTMASK__TXFETCHERROR) |
-                    (1 << O_INTMASK__P2PSPILLECC) | (1 << O_INTMASK__TAGFULL) |
-                    (1 << O_INTMASK__UNDERRUN) | (1 << O_INTMASK__ABORT));
-
-       /* Clear all stats */
-       xlr_reg_update(priv->base_addr, R_STATCTRL, 0, 1 << O_STATCTRL__CLRCNT);
-       xlr_reg_update(priv->base_addr, R_STATCTRL, 1 << 2, 1 << 2);
-       return 0;
-}
-
-static int xlr_net_probe(struct platform_device *pdev)
-{
-       struct xlr_net_priv *priv = NULL;
-       struct net_device *ndev;
-       struct resource *res;
-       struct xlr_adapter *adapter;
-       int err, port;
-
-       pr_info("XLR/XLS Ethernet Driver controller %d\n", pdev->id);
-       /*
-        * Allocate our adapter data structure and attach it to the device.
-        */
-       adapter = devm_kzalloc(&pdev->dev, sizeof(*adapter), GFP_KERNEL);
-       if (!adapter)
-               return -ENOMEM;
-
-       /*
-        * XLR and XLS have 1 and 2 NAE controller respectively
-        * Each controller has 4 gmac ports, mapping each controller
-        * under one parent device, 4 gmac ports under one device.
-        */
-       for (port = 0; port < pdev->num_resources / 2; port++) {
-               ndev = alloc_etherdev_mq(sizeof(struct xlr_net_priv), 32);
-               if (!ndev) {
-                       dev_err(&pdev->dev,
-                               "Allocation of Ethernet device failed\n");
-                       return -ENOMEM;
-               }
-
-               priv = netdev_priv(ndev);
-               priv->pdev = pdev;
-               priv->ndev = ndev;
-               priv->port_id = (pdev->id * 4) + port;
-               priv->nd = (struct xlr_net_data *)pdev->dev.platform_data;
-               priv->base_addr = devm_platform_ioremap_resource(pdev, port);
-               if (IS_ERR(priv->base_addr)) {
-                       err = PTR_ERR(priv->base_addr);
-                       goto err_gmac;
-               }
-               priv->adapter = adapter;
-               adapter->netdev[port] = ndev;
-
-               res = platform_get_resource(pdev, IORESOURCE_IRQ, port);
-               if (!res) {
-                       dev_err(&pdev->dev, "No irq resource for MAC %d\n",
-                               priv->port_id);
-                       err = -ENODEV;
-                       goto err_gmac;
-               }
-
-               ndev->irq = res->start;
-
-               priv->phy_addr = priv->nd->phy_addr[port];
-               priv->tx_stnid = priv->nd->tx_stnid[port];
-               priv->mii_addr = priv->nd->mii_addr;
-               priv->serdes_addr = priv->nd->serdes_addr;
-               priv->pcs_addr = priv->nd->pcs_addr;
-               priv->gpio_addr = priv->nd->gpio_addr;
-
-               ndev->netdev_ops = &xlr_netdev_ops;
-               ndev->watchdog_timeo = HZ;
-
-               /* Setup Mac address and Rx mode */
-               eth_hw_addr_random(ndev);
-               xlr_hw_set_mac_addr(ndev);
-               xlr_set_rx_mode(ndev);
-
-               priv->num_rx_desc += MAX_NUM_DESC_SPILL;
-               ndev->ethtool_ops = &xlr_ethtool_ops;
-               SET_NETDEV_DEV(ndev, &pdev->dev);
-
-               xlr_config_fifo_spill_area(priv);
-               /* Configure PDE to Round-Robin pkt distribution */
-               xlr_config_pde(priv);
-               xlr_config_parser(priv);
-
-               /* Call init with respect to port */
-               if (strcmp(res->name, "gmac") == 0) {
-                       err = xlr_gmac_init(priv, pdev);
-                       if (err) {
-                               dev_err(&pdev->dev, "gmac%d init failed\n",
-                                       priv->port_id);
-                               goto err_gmac;
-                       }
-               }
-
-               if (priv->port_id == 0 || priv->port_id == 4) {
-                       err = xlr_config_common(priv);
-                       if (err)
-                               goto err_netdev;
-               }
-
-               err = register_netdev(ndev);
-               if (err) {
-                       dev_err(&pdev->dev,
-                               "Registering netdev failed for gmac%d\n",
-                               priv->port_id);
-                       goto err_netdev;
-               }
-               platform_set_drvdata(pdev, priv);
-       }
-
-       return 0;
-
-err_netdev:
-       mdiobus_free(priv->mii_bus);
-err_gmac:
-       free_netdev(ndev);
-       return err;
-}
-
-static int xlr_net_remove(struct platform_device *pdev)
-{
-       struct xlr_net_priv *priv = platform_get_drvdata(pdev);
-
-       unregister_netdev(priv->ndev);
-       mdiobus_unregister(priv->mii_bus);
-       mdiobus_free(priv->mii_bus);
-       free_netdev(priv->ndev);
-       return 0;
-}
-
-static struct platform_driver xlr_net_driver = {
-       .probe          = xlr_net_probe,
-       .remove         = xlr_net_remove,
-       .driver         = {
-               .name   = "xlr-net",
-       },
-};
-
-module_platform_driver(xlr_net_driver);
-
-MODULE_AUTHOR("Ganesan Ramalingam <ganesanr@broadcom.com>");
-MODULE_DESCRIPTION("Ethernet driver for Netlogic XLR/XLS");
-MODULE_LICENSE("Dual BSD/GPL");
-MODULE_ALIAS("platform:xlr-net");
diff --git a/drivers/staging/netlogic/xlr_net.h b/drivers/staging/netlogic/xlr_net.h
deleted file mode 100644 (file)
index 8365b74..0000000
+++ /dev/null
@@ -1,1079 +0,0 @@
-/* SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause) */
-/*
- * Copyright (c) 2003-2012 Broadcom Corporation
- * All Rights Reserved
- */
-
-/* #define MAC_SPLIT_MODE */
-
-#define MAC_SPACING                 0x400
-#define XGMAC_SPACING               0x400
-
-/* PE-MCXMAC register and bit field definitions */
-#define R_MAC_CONFIG_1                                              0x00
-#define   O_MAC_CONFIG_1__srst                                      31
-#define   O_MAC_CONFIG_1__simr                                      30
-#define   O_MAC_CONFIG_1__hrrmc                                     18
-#define   W_MAC_CONFIG_1__hrtmc                                      2
-#define   O_MAC_CONFIG_1__hrrfn                                     16
-#define   W_MAC_CONFIG_1__hrtfn                                      2
-#define   O_MAC_CONFIG_1__intlb                                      8
-#define   O_MAC_CONFIG_1__rxfc                                       5
-#define   O_MAC_CONFIG_1__txfc                                       4
-#define   O_MAC_CONFIG_1__srxen                                      3
-#define   O_MAC_CONFIG_1__rxen                                       2
-#define   O_MAC_CONFIG_1__stxen                                      1
-#define   O_MAC_CONFIG_1__txen                                       0
-#define R_MAC_CONFIG_2                                              0x01
-#define   O_MAC_CONFIG_2__prlen                                     12
-#define   W_MAC_CONFIG_2__prlen                                      4
-#define   O_MAC_CONFIG_2__speed                                      8
-#define   W_MAC_CONFIG_2__speed                                      2
-#define   O_MAC_CONFIG_2__hugen                                      5
-#define   O_MAC_CONFIG_2__flchk                                      4
-#define   O_MAC_CONFIG_2__crce                                       1
-#define   O_MAC_CONFIG_2__fulld                                      0
-#define R_IPG_IFG                                                   0x02
-#define   O_IPG_IFG__ipgr1                                          24
-#define   W_IPG_IFG__ipgr1                                           7
-#define   O_IPG_IFG__ipgr2                                          16
-#define   W_IPG_IFG__ipgr2                                           7
-#define   O_IPG_IFG__mifg                                            8
-#define   W_IPG_IFG__mifg                                            8
-#define   O_IPG_IFG__ipgt                                            0
-#define   W_IPG_IFG__ipgt                                            7
-#define R_HALF_DUPLEX                                               0x03
-#define   O_HALF_DUPLEX__abebt                                      24
-#define   W_HALF_DUPLEX__abebt                                       4
-#define   O_HALF_DUPLEX__abebe                                      19
-#define   O_HALF_DUPLEX__bpnb                                       18
-#define   O_HALF_DUPLEX__nobo                                       17
-#define   O_HALF_DUPLEX__edxsdfr                                    16
-#define   O_HALF_DUPLEX__retry                                      12
-#define   W_HALF_DUPLEX__retry                                       4
-#define   O_HALF_DUPLEX__lcol                                        0
-#define   W_HALF_DUPLEX__lcol                                       10
-#define R_MAXIMUM_FRAME_LENGTH                                      0x04
-#define   O_MAXIMUM_FRAME_LENGTH__maxf                               0
-#define   W_MAXIMUM_FRAME_LENGTH__maxf                              16
-#define R_TEST                                                      0x07
-#define   O_TEST__mbof                                               3
-#define   O_TEST__rthdf                                              2
-#define   O_TEST__tpause                                             1
-#define   O_TEST__sstct                                              0
-#define R_MII_MGMT_CONFIG                                           0x08
-#define   O_MII_MGMT_CONFIG__scinc                                   5
-#define   O_MII_MGMT_CONFIG__spre                                    4
-#define   O_MII_MGMT_CONFIG__clks                                    3
-#define   W_MII_MGMT_CONFIG__clks                                    3
-#define R_MII_MGMT_COMMAND                                          0x09
-#define   O_MII_MGMT_COMMAND__scan                                   1
-#define   O_MII_MGMT_COMMAND__rstat                                  0
-#define R_MII_MGMT_ADDRESS                                          0x0A
-#define   O_MII_MGMT_ADDRESS__fiad                                   8
-#define   W_MII_MGMT_ADDRESS__fiad                                   5
-#define   O_MII_MGMT_ADDRESS__fgad                                   5
-#define   W_MII_MGMT_ADDRESS__fgad                                   0
-#define R_MII_MGMT_WRITE_DATA                                       0x0B
-#define   O_MII_MGMT_WRITE_DATA__ctld                                0
-#define   W_MII_MGMT_WRITE_DATA__ctld                               16
-#define R_MII_MGMT_STATUS                                           0x0C
-#define R_MII_MGMT_INDICATORS                                       0x0D
-#define   O_MII_MGMT_INDICATORS__nvalid                              2
-#define   O_MII_MGMT_INDICATORS__scan                                1
-#define   O_MII_MGMT_INDICATORS__busy                                0
-#define R_INTERFACE_CONTROL                                         0x0E
-#define   O_INTERFACE_CONTROL__hrstint                              31
-#define   O_INTERFACE_CONTROL__tbimode                              27
-#define   O_INTERFACE_CONTROL__ghdmode                              26
-#define   O_INTERFACE_CONTROL__lhdmode                              25
-#define   O_INTERFACE_CONTROL__phymod                               24
-#define   O_INTERFACE_CONTROL__hrrmi                                23
-#define   O_INTERFACE_CONTROL__rspd                                 16
-#define   O_INTERFACE_CONTROL__hr100                                15
-#define   O_INTERFACE_CONTROL__frcq                                 10
-#define   O_INTERFACE_CONTROL__nocfr                                 9
-#define   O_INTERFACE_CONTROL__dlfct                                 8
-#define   O_INTERFACE_CONTROL__enjab                                 0
-#define R_INTERFACE_STATUS                                         0x0F
-#define   O_INTERFACE_STATUS__xsdfr                                  9
-#define   O_INTERFACE_STATUS__ssrr                                   8
-#define   W_INTERFACE_STATUS__ssrr                                   5
-#define   O_INTERFACE_STATUS__miilf                                  3
-#define   O_INTERFACE_STATUS__locar                                  2
-#define   O_INTERFACE_STATUS__sqerr                                  1
-#define   O_INTERFACE_STATUS__jabber                                 0
-#define R_STATION_ADDRESS_LS                                       0x10
-#define R_STATION_ADDRESS_MS                                       0x11
-
-/* A-XGMAC register and bit field definitions */
-#define R_XGMAC_CONFIG_0    0x00
-#define   O_XGMAC_CONFIG_0__hstmacrst               31
-#define   O_XGMAC_CONFIG_0__hstrstrctl              23
-#define   O_XGMAC_CONFIG_0__hstrstrfn               22
-#define   O_XGMAC_CONFIG_0__hstrsttctl              18
-#define   O_XGMAC_CONFIG_0__hstrsttfn               17
-#define   O_XGMAC_CONFIG_0__hstrstmiim              16
-#define   O_XGMAC_CONFIG_0__hstloopback             8
-#define R_XGMAC_CONFIG_1    0x01
-#define   O_XGMAC_CONFIG_1__hsttctlen               31
-#define   O_XGMAC_CONFIG_1__hsttfen                 30
-#define   O_XGMAC_CONFIG_1__hstrctlen               29
-#define   O_XGMAC_CONFIG_1__hstrfen                 28
-#define   O_XGMAC_CONFIG_1__tfen                    26
-#define   O_XGMAC_CONFIG_1__rfen                    24
-#define   O_XGMAC_CONFIG_1__hstrctlshrtp            12
-#define   O_XGMAC_CONFIG_1__hstdlyfcstx             10
-#define   W_XGMAC_CONFIG_1__hstdlyfcstx              2
-#define   O_XGMAC_CONFIG_1__hstdlyfcsrx              8
-#define   W_XGMAC_CONFIG_1__hstdlyfcsrx              2
-#define   O_XGMAC_CONFIG_1__hstppen                  7
-#define   O_XGMAC_CONFIG_1__hstbytswp                6
-#define   O_XGMAC_CONFIG_1__hstdrplt64               5
-#define   O_XGMAC_CONFIG_1__hstprmscrx               4
-#define   O_XGMAC_CONFIG_1__hstlenchk                3
-#define   O_XGMAC_CONFIG_1__hstgenfcs                2
-#define   O_XGMAC_CONFIG_1__hstpadmode               0
-#define   W_XGMAC_CONFIG_1__hstpadmode               2
-#define R_XGMAC_CONFIG_2    0x02
-#define   O_XGMAC_CONFIG_2__hsttctlfrcp             31
-#define   O_XGMAC_CONFIG_2__hstmlnkflth             27
-#define   O_XGMAC_CONFIG_2__hstalnkflth             26
-#define   O_XGMAC_CONFIG_2__rflnkflt                24
-#define   W_XGMAC_CONFIG_2__rflnkflt                 2
-#define   O_XGMAC_CONFIG_2__hstipgextmod            16
-#define   W_XGMAC_CONFIG_2__hstipgextmod             5
-#define   O_XGMAC_CONFIG_2__hstrctlfrcp             15
-#define   O_XGMAC_CONFIG_2__hstipgexten              5
-#define   O_XGMAC_CONFIG_2__hstmipgext               0
-#define   W_XGMAC_CONFIG_2__hstmipgext               5
-#define R_XGMAC_CONFIG_3    0x03
-#define   O_XGMAC_CONFIG_3__hstfltrfrm              31
-#define   W_XGMAC_CONFIG_3__hstfltrfrm              16
-#define   O_XGMAC_CONFIG_3__hstfltrfrmdc            15
-#define   W_XGMAC_CONFIG_3__hstfltrfrmdc            16
-#define R_XGMAC_STATION_ADDRESS_LS      0x04
-#define   O_XGMAC_STATION_ADDRESS_LS__hstmacadr0    0
-#define   W_XGMAC_STATION_ADDRESS_LS__hstmacadr0    32
-#define R_XGMAC_STATION_ADDRESS_MS      0x05
-#define R_XGMAC_MAX_FRAME_LEN           0x08
-#define   O_XGMAC_MAX_FRAME_LEN__hstmxfrmwctx       16
-#define   W_XGMAC_MAX_FRAME_LEN__hstmxfrmwctx       14
-#define   O_XGMAC_MAX_FRAME_LEN__hstmxfrmbcrx        0
-#define   W_XGMAC_MAX_FRAME_LEN__hstmxfrmbcrx       16
-#define R_XGMAC_REV_LEVEL               0x0B
-#define   O_XGMAC_REV_LEVEL__revlvl                  0
-#define   W_XGMAC_REV_LEVEL__revlvl                 15
-#define R_XGMAC_MIIM_COMMAND            0x10
-#define   O_XGMAC_MIIM_COMMAND__hstldcmd             3
-#define   O_XGMAC_MIIM_COMMAND__hstmiimcmd           0
-#define   W_XGMAC_MIIM_COMMAND__hstmiimcmd           3
-#define R_XGMAC_MIIM_FILED              0x11
-#define   O_XGMAC_MIIM_FILED__hststfield            30
-#define   W_XGMAC_MIIM_FILED__hststfield             2
-#define   O_XGMAC_MIIM_FILED__hstopfield            28
-#define   W_XGMAC_MIIM_FILED__hstopfield             2
-#define   O_XGMAC_MIIM_FILED__hstphyadx             23
-#define   W_XGMAC_MIIM_FILED__hstphyadx              5
-#define   O_XGMAC_MIIM_FILED__hstregadx             18
-#define   W_XGMAC_MIIM_FILED__hstregadx              5
-#define   O_XGMAC_MIIM_FILED__hsttafield            16
-#define   W_XGMAC_MIIM_FILED__hsttafield             2
-#define   O_XGMAC_MIIM_FILED__miimrddat              0
-#define   W_XGMAC_MIIM_FILED__miimrddat             16
-#define R_XGMAC_MIIM_CONFIG             0x12
-#define   O_XGMAC_MIIM_CONFIG__hstnopram             7
-#define   O_XGMAC_MIIM_CONFIG__hstclkdiv             0
-#define   W_XGMAC_MIIM_CONFIG__hstclkdiv             7
-#define R_XGMAC_MIIM_LINK_FAIL_VECTOR   0x13
-#define   O_XGMAC_MIIM_LINK_FAIL_VECTOR__miimlfvec   0
-#define   W_XGMAC_MIIM_LINK_FAIL_VECTOR__miimlfvec  32
-#define R_XGMAC_MIIM_INDICATOR          0x14
-#define   O_XGMAC_MIIM_INDICATOR__miimphylf          4
-#define   O_XGMAC_MIIM_INDICATOR__miimmoncplt        3
-#define   O_XGMAC_MIIM_INDICATOR__miimmonvld         2
-#define   O_XGMAC_MIIM_INDICATOR__miimmon            1
-#define   O_XGMAC_MIIM_INDICATOR__miimbusy           0
-
-/* GMAC stats registers */
-#define R_RBYT                                                     0x27
-#define R_RPKT                                                     0x28
-#define R_RFCS                                                     0x29
-#define R_RMCA                                                     0x2A
-#define R_RBCA                                                     0x2B
-#define R_RXCF                                                     0x2C
-#define R_RXPF                                                     0x2D
-#define R_RXUO                                                     0x2E
-#define R_RALN                                                     0x2F
-#define R_RFLR                                                     0x30
-#define R_RCDE                                                     0x31
-#define R_RCSE                                                     0x32
-#define R_RUND                                                     0x33
-#define R_ROVR                                                     0x34
-#define R_TBYT                                                     0x38
-#define R_TPKT                                                     0x39
-#define R_TMCA                                                     0x3A
-#define R_TBCA                                                     0x3B
-#define R_TXPF                                                     0x3C
-#define R_TDFR                                                     0x3D
-#define R_TEDF                                                     0x3E
-#define R_TSCL                                                     0x3F
-#define R_TMCL                                                     0x40
-#define R_TLCL                                                     0x41
-#define R_TXCL                                                     0x42
-#define R_TNCL                                                     0x43
-#define R_TJBR                                                     0x46
-#define R_TFCS                                                     0x47
-#define R_TXCF                                                     0x48
-#define R_TOVR                                                     0x49
-#define R_TUND                                                     0x4A
-#define R_TFRG                                                     0x4B
-
-/* Glue logic register and bit field definitions */
-#define R_MAC_ADDR0                                                 0x50
-#define R_MAC_ADDR1                                                 0x52
-#define R_MAC_ADDR2                                                 0x54
-#define R_MAC_ADDR3                                                 0x56
-#define R_MAC_ADDR_MASK2                                            0x58
-#define R_MAC_ADDR_MASK3                                            0x5A
-#define R_MAC_FILTER_CONFIG                                         0x5C
-#define   O_MAC_FILTER_CONFIG__BROADCAST_EN                         10
-#define   O_MAC_FILTER_CONFIG__PAUSE_FRAME_EN                       9
-#define   O_MAC_FILTER_CONFIG__ALL_MCAST_EN                         8
-#define   O_MAC_FILTER_CONFIG__ALL_UCAST_EN                         7
-#define   O_MAC_FILTER_CONFIG__HASH_MCAST_EN                        6
-#define   O_MAC_FILTER_CONFIG__HASH_UCAST_EN                        5
-#define   O_MAC_FILTER_CONFIG__ADDR_MATCH_DISC                      4
-#define   O_MAC_FILTER_CONFIG__MAC_ADDR3_VALID                      3
-#define   O_MAC_FILTER_CONFIG__MAC_ADDR2_VALID                      2
-#define   O_MAC_FILTER_CONFIG__MAC_ADDR1_VALID                      1
-#define   O_MAC_FILTER_CONFIG__MAC_ADDR0_VALID                      0
-#define R_HASH_TABLE_VECTOR                                         0x30
-#define R_TX_CONTROL                                                 0x0A0
-#define   O_TX_CONTROL__TX15HALT                                     31
-#define   O_TX_CONTROL__TX14HALT                                     30
-#define   O_TX_CONTROL__TX13HALT                                     29
-#define   O_TX_CONTROL__TX12HALT                                     28
-#define   O_TX_CONTROL__TX11HALT                                     27
-#define   O_TX_CONTROL__TX10HALT                                     26
-#define   O_TX_CONTROL__TX9HALT                                      25
-#define   O_TX_CONTROL__TX8HALT                                      24
-#define   O_TX_CONTROL__TX7HALT                                      23
-#define   O_TX_CONTROL__TX6HALT                                      22
-#define   O_TX_CONTROL__TX5HALT                                      21
-#define   O_TX_CONTROL__TX4HALT                                      20
-#define   O_TX_CONTROL__TX3HALT                                      19
-#define   O_TX_CONTROL__TX2HALT                                      18
-#define   O_TX_CONTROL__TX1HALT                                      17
-#define   O_TX_CONTROL__TX0HALT                                      16
-#define   O_TX_CONTROL__TXIDLE                                       15
-#define   O_TX_CONTROL__TXENABLE                                     14
-#define   O_TX_CONTROL__TXTHRESHOLD                                  0
-#define   W_TX_CONTROL__TXTHRESHOLD                                  14
-#define R_RX_CONTROL                                                 0x0A1
-#define   O_RX_CONTROL__RGMII                                        10
-#define   O_RX_CONTROL__SOFTRESET                                   2
-#define   O_RX_CONTROL__RXHALT                                       1
-#define   O_RX_CONTROL__RXENABLE                                     0
-#define R_DESC_PACK_CTRL                                            0x0A2
-#define   O_DESC_PACK_CTRL__BYTEOFFSET                              17
-#define   W_DESC_PACK_CTRL__BYTEOFFSET                              3
-#define   O_DESC_PACK_CTRL__PREPADENABLE                            16
-#define   O_DESC_PACK_CTRL__MAXENTRY                                14
-#define   W_DESC_PACK_CTRL__MAXENTRY                                2
-#define   O_DESC_PACK_CTRL__REGULARSIZE                             0
-#define   W_DESC_PACK_CTRL__REGULARSIZE                             14
-#define R_STATCTRL                                                  0x0A3
-#define   O_STATCTRL__OVERFLOWEN                                    4
-#define   O_STATCTRL__GIG                                           3
-#define   O_STATCTRL__STEN                                          2
-#define   O_STATCTRL__CLRCNT                                        1
-#define   O_STATCTRL__AUTOZ                                         0
-#define R_L2ALLOCCTRL                                               0x0A4
-#define   O_L2ALLOCCTRL__TXL2ALLOCATE                               9
-#define   W_L2ALLOCCTRL__TXL2ALLOCATE                               9
-#define   O_L2ALLOCCTRL__RXL2ALLOCATE                               0
-#define   W_L2ALLOCCTRL__RXL2ALLOCATE                               9
-#define R_INTMASK                                                   0x0A5
-#define   O_INTMASK__SPI4TXERROR                                     28
-#define   O_INTMASK__SPI4RXERROR                                     27
-#define   O_INTMASK__RGMIIHALFDUPCOLLISION                           27
-#define   O_INTMASK__ABORT                                           26
-#define   O_INTMASK__UNDERRUN                                        25
-#define   O_INTMASK__DISCARDPACKET                                   24
-#define   O_INTMASK__ASYNCFIFOFULL                                   23
-#define   O_INTMASK__TAGFULL                                         22
-#define   O_INTMASK__CLASS3FULL                                      21
-#define   O_INTMASK__C3EARLYFULL                                     20
-#define   O_INTMASK__CLASS2FULL                                      19
-#define   O_INTMASK__C2EARLYFULL                                     18
-#define   O_INTMASK__CLASS1FULL                                      17
-#define   O_INTMASK__C1EARLYFULL                                     16
-#define   O_INTMASK__CLASS0FULL                                      15
-#define   O_INTMASK__C0EARLYFULL                                     14
-#define   O_INTMASK__RXDATAFULL                                      13
-#define   O_INTMASK__RXEARLYFULL                                     12
-#define   O_INTMASK__RFREEEMPTY                                      9
-#define   O_INTMASK__RFEARLYEMPTY                                    8
-#define   O_INTMASK__P2PSPILLECC                                     7
-#define   O_INTMASK__FREEDESCFULL                                    5
-#define   O_INTMASK__FREEEARLYFULL                                   4
-#define   O_INTMASK__TXFETCHERROR                                    3
-#define   O_INTMASK__STATCARRY                                       2
-#define   O_INTMASK__MDINT                                           1
-#define   O_INTMASK__TXILLEGAL                                       0
-#define R_INTREG                                                    0x0A6
-#define   O_INTREG__SPI4TXERROR                                     28
-#define   O_INTREG__SPI4RXERROR                                     27
-#define   O_INTREG__RGMIIHALFDUPCOLLISION                           27
-#define   O_INTREG__ABORT                                           26
-#define   O_INTREG__UNDERRUN                                        25
-#define   O_INTREG__DISCARDPACKET                                   24
-#define   O_INTREG__ASYNCFIFOFULL                                   23
-#define   O_INTREG__TAGFULL                                         22
-#define   O_INTREG__CLASS3FULL                                      21
-#define   O_INTREG__C3EARLYFULL                                     20
-#define   O_INTREG__CLASS2FULL                                      19
-#define   O_INTREG__C2EARLYFULL                                     18
-#define   O_INTREG__CLASS1FULL                                      17
-#define   O_INTREG__C1EARLYFULL                                     16
-#define   O_INTREG__CLASS0FULL                                      15
-#define   O_INTREG__C0EARLYFULL                                     14
-#define   O_INTREG__RXDATAFULL                                      13
-#define   O_INTREG__RXEARLYFULL                                     12
-#define   O_INTREG__RFREEEMPTY                                      9
-#define   O_INTREG__RFEARLYEMPTY                                    8
-#define   O_INTREG__P2PSPILLECC                                     7
-#define   O_INTREG__FREEDESCFULL                                    5
-#define   O_INTREG__FREEEARLYFULL                                   4
-#define   O_INTREG__TXFETCHERROR                                    3
-#define   O_INTREG__STATCARRY                                       2
-#define   O_INTREG__MDINT                                           1
-#define   O_INTREG__TXILLEGAL                                       0
-#define R_TXRETRY                                                   0x0A7
-#define   O_TXRETRY__COLLISIONRETRY                                 6
-#define   O_TXRETRY__BUSERRORRETRY                                  5
-#define   O_TXRETRY__UNDERRUNRETRY                                  4
-#define   O_TXRETRY__RETRIES                                        0
-#define   W_TXRETRY__RETRIES                                        4
-#define R_CORECONTROL                                               0x0A8
-#define   O_CORECONTROL__ERRORTHREAD                                4
-#define   W_CORECONTROL__ERRORTHREAD                                7
-#define   O_CORECONTROL__SHUTDOWN                                   2
-#define   O_CORECONTROL__SPEED                                      0
-#define   W_CORECONTROL__SPEED                                      2
-#define R_BYTEOFFSET0                                               0x0A9
-#define R_BYTEOFFSET1                                               0x0AA
-#define R_L2TYPE_0                                                  0x0F0
-#define   O_L2TYPE__EXTRAHDRPROTOSIZE                               26
-#define   W_L2TYPE__EXTRAHDRPROTOSIZE                               5
-#define   O_L2TYPE__EXTRAHDRPROTOOFFSET                             20
-#define   W_L2TYPE__EXTRAHDRPROTOOFFSET                             6
-#define   O_L2TYPE__EXTRAHEADERSIZE                                 14
-#define   W_L2TYPE__EXTRAHEADERSIZE                                 6
-#define   O_L2TYPE__PROTOOFFSET                                     8
-#define   W_L2TYPE__PROTOOFFSET                                     6
-#define   O_L2TYPE__L2HDROFFSET                                     2
-#define   W_L2TYPE__L2HDROFFSET                                     6
-#define   O_L2TYPE__L2PROTO                                         0
-#define   W_L2TYPE__L2PROTO                                         2
-#define R_L2TYPE_1                                                  0xF0
-#define R_L2TYPE_2                                                  0xF0
-#define R_L2TYPE_3                                                  0xF0
-#define R_PARSERCONFIGREG                                           0x100
-#define   O_PARSERCONFIGREG__CRCHASHPOLY                            8
-#define   W_PARSERCONFIGREG__CRCHASHPOLY                            7
-#define   O_PARSERCONFIGREG__PREPADOFFSET                           4
-#define   W_PARSERCONFIGREG__PREPADOFFSET                           4
-#define   O_PARSERCONFIGREG__USECAM                                 2
-#define   O_PARSERCONFIGREG__USEHASH                                1
-#define   O_PARSERCONFIGREG__USEPROTO                               0
-#define R_L3CTABLE                                                  0x140
-#define   O_L3CTABLE__OFFSET0                                       25
-#define   W_L3CTABLE__OFFSET0                                       7
-#define   O_L3CTABLE__LEN0                                          21
-#define   W_L3CTABLE__LEN0                                          4
-#define   O_L3CTABLE__OFFSET1                                       14
-#define   W_L3CTABLE__OFFSET1                                       7
-#define   O_L3CTABLE__LEN1                                          10
-#define   W_L3CTABLE__LEN1                                          4
-#define   O_L3CTABLE__OFFSET2                                       4
-#define   W_L3CTABLE__OFFSET2                                       6
-#define   O_L3CTABLE__LEN2                                          0
-#define   W_L3CTABLE__LEN2                                          4
-#define   O_L3CTABLE__L3HDROFFSET                                   26
-#define   W_L3CTABLE__L3HDROFFSET                                   6
-#define   O_L3CTABLE__L4PROTOOFFSET                                 20
-#define   W_L3CTABLE__L4PROTOOFFSET                                 6
-#define   O_L3CTABLE__IPCHKSUMCOMPUTE                               19
-#define   O_L3CTABLE__L4CLASSIFY                                    18
-#define   O_L3CTABLE__L2PROTO                                       16
-#define   W_L3CTABLE__L2PROTO                                       2
-#define   O_L3CTABLE__L3PROTOKEY                                    0
-#define   W_L3CTABLE__L3PROTOKEY                                    16
-#define R_L4CTABLE                                                  0x160
-#define   O_L4CTABLE__OFFSET0                                       21
-#define   W_L4CTABLE__OFFSET0                                       6
-#define   O_L4CTABLE__LEN0                                          17
-#define   W_L4CTABLE__LEN0                                          4
-#define   O_L4CTABLE__OFFSET1                                       11
-#define   W_L4CTABLE__OFFSET1                                       6
-#define   O_L4CTABLE__LEN1                                          7
-#define   W_L4CTABLE__LEN1                                          4
-#define   O_L4CTABLE__TCPCHKSUMENABLE                               0
-#define R_CAM4X128TABLE                                             0x172
-#define   O_CAM4X128TABLE__CLASSID                                  7
-#define   W_CAM4X128TABLE__CLASSID                                  2
-#define   O_CAM4X128TABLE__BUCKETID                                 1
-#define   W_CAM4X128TABLE__BUCKETID                                 6
-#define   O_CAM4X128TABLE__USEBUCKET                                0
-#define R_CAM4X128KEY                                               0x180
-#define R_TRANSLATETABLE                                            0x1A0
-#define R_DMACR0                                                    0x200
-#define   O_DMACR0__DATA0WRMAXCR                                    27
-#define   W_DMACR0__DATA0WRMAXCR                                    3
-#define   O_DMACR0__DATA0RDMAXCR                                    24
-#define   W_DMACR0__DATA0RDMAXCR                                    3
-#define   O_DMACR0__DATA1WRMAXCR                                    21
-#define   W_DMACR0__DATA1WRMAXCR                                    3
-#define   O_DMACR0__DATA1RDMAXCR                                    18
-#define   W_DMACR0__DATA1RDMAXCR                                    3
-#define   O_DMACR0__DATA2WRMAXCR                                    15
-#define   W_DMACR0__DATA2WRMAXCR                                    3
-#define   O_DMACR0__DATA2RDMAXCR                                    12
-#define   W_DMACR0__DATA2RDMAXCR                                    3
-#define   O_DMACR0__DATA3WRMAXCR                                    9
-#define   W_DMACR0__DATA3WRMAXCR                                    3
-#define   O_DMACR0__DATA3RDMAXCR                                    6
-#define   W_DMACR0__DATA3RDMAXCR                                    3
-#define   O_DMACR0__DATA4WRMAXCR                                    3
-#define   W_DMACR0__DATA4WRMAXCR                                    3
-#define   O_DMACR0__DATA4RDMAXCR                                    0
-#define   W_DMACR0__DATA4RDMAXCR                                    3
-#define R_DMACR1                                                    0x201
-#define   O_DMACR1__DATA5WRMAXCR                                    27
-#define   W_DMACR1__DATA5WRMAXCR                                    3
-#define   O_DMACR1__DATA5RDMAXCR                                    24
-#define   W_DMACR1__DATA5RDMAXCR                                    3
-#define   O_DMACR1__DATA6WRMAXCR                                    21
-#define   W_DMACR1__DATA6WRMAXCR                                    3
-#define   O_DMACR1__DATA6RDMAXCR                                    18
-#define   W_DMACR1__DATA6RDMAXCR                                    3
-#define   O_DMACR1__DATA7WRMAXCR                                    15
-#define   W_DMACR1__DATA7WRMAXCR                                    3
-#define   O_DMACR1__DATA7RDMAXCR                                    12
-#define   W_DMACR1__DATA7RDMAXCR                                    3
-#define   O_DMACR1__DATA8WRMAXCR                                    9
-#define   W_DMACR1__DATA8WRMAXCR                                    3
-#define   O_DMACR1__DATA8RDMAXCR                                    6
-#define   W_DMACR1__DATA8RDMAXCR                                    3
-#define   O_DMACR1__DATA9WRMAXCR                                    3
-#define   W_DMACR1__DATA9WRMAXCR                                    3
-#define   O_DMACR1__DATA9RDMAXCR                                    0
-#define   W_DMACR1__DATA9RDMAXCR                                    3
-#define R_DMACR2                                                    0x202
-#define   O_DMACR2__DATA10WRMAXCR                                   27
-#define   W_DMACR2__DATA10WRMAXCR                                   3
-#define   O_DMACR2__DATA10RDMAXCR                                   24
-#define   W_DMACR2__DATA10RDMAXCR                                   3
-#define   O_DMACR2__DATA11WRMAXCR                                   21
-#define   W_DMACR2__DATA11WRMAXCR                                   3
-#define   O_DMACR2__DATA11RDMAXCR                                   18
-#define   W_DMACR2__DATA11RDMAXCR                                   3
-#define   O_DMACR2__DATA12WRMAXCR                                   15
-#define   W_DMACR2__DATA12WRMAXCR                                   3
-#define   O_DMACR2__DATA12RDMAXCR                                   12
-#define   W_DMACR2__DATA12RDMAXCR                                   3
-#define   O_DMACR2__DATA13WRMAXCR                                   9
-#define   W_DMACR2__DATA13WRMAXCR                                   3
-#define   O_DMACR2__DATA13RDMAXCR                                   6
-#define   W_DMACR2__DATA13RDMAXCR                                   3
-#define   O_DMACR2__DATA14WRMAXCR                                   3
-#define   W_DMACR2__DATA14WRMAXCR                                   3
-#define   O_DMACR2__DATA14RDMAXCR                                   0
-#define   W_DMACR2__DATA14RDMAXCR                                   3
-#define R_DMACR3                                                    0x203
-#define   O_DMACR3__DATA15WRMAXCR                                   27
-#define   W_DMACR3__DATA15WRMAXCR                                   3
-#define   O_DMACR3__DATA15RDMAXCR                                   24
-#define   W_DMACR3__DATA15RDMAXCR                                   3
-#define   O_DMACR3__SPCLASSWRMAXCR                                  21
-#define   W_DMACR3__SPCLASSWRMAXCR                                  3
-#define   O_DMACR3__SPCLASSRDMAXCR                                  18
-#define   W_DMACR3__SPCLASSRDMAXCR                                  3
-#define   O_DMACR3__JUMFRINWRMAXCR                                  15
-#define   W_DMACR3__JUMFRINWRMAXCR                                  3
-#define   O_DMACR3__JUMFRINRDMAXCR                                  12
-#define   W_DMACR3__JUMFRINRDMAXCR                                  3
-#define   O_DMACR3__REGFRINWRMAXCR                                  9
-#define   W_DMACR3__REGFRINWRMAXCR                                  3
-#define   O_DMACR3__REGFRINRDMAXCR                                  6
-#define   W_DMACR3__REGFRINRDMAXCR                                  3
-#define   O_DMACR3__FROUTWRMAXCR                                    3
-#define   W_DMACR3__FROUTWRMAXCR                                    3
-#define   O_DMACR3__FROUTRDMAXCR                                    0
-#define   W_DMACR3__FROUTRDMAXCR                                    3
-#define R_REG_FRIN_SPILL_MEM_START_0                                0x204
-#define   O_REG_FRIN_SPILL_MEM_START_0__REGFRINSPILLMEMSTART0        0
-#define   W_REG_FRIN_SPILL_MEM_START_0__REGFRINSPILLMEMSTART0       32
-#define R_REG_FRIN_SPILL_MEM_START_1                                0x205
-#define   O_REG_FRIN_SPILL_MEM_START_1__REGFRINSPILLMEMSTART1        0
-#define   W_REG_FRIN_SPILL_MEM_START_1__REGFRINSPILLMEMSTART1        3
-#define R_REG_FRIN_SPILL_MEM_SIZE                                   0x206
-#define   O_REG_FRIN_SPILL_MEM_SIZE__REGFRINSPILLMEMSIZE             0
-#define   W_REG_FRIN_SPILL_MEM_SIZE__REGFRINSPILLMEMSIZE            32
-#define R_FROUT_SPILL_MEM_START_0                                   0x207
-#define   O_FROUT_SPILL_MEM_START_0__FROUTSPILLMEMSTART0             0
-#define   W_FROUT_SPILL_MEM_START_0__FROUTSPILLMEMSTART0            32
-#define R_FROUT_SPILL_MEM_START_1                                   0x208
-#define   O_FROUT_SPILL_MEM_START_1__FROUTSPILLMEMSTART1             0
-#define   W_FROUT_SPILL_MEM_START_1__FROUTSPILLMEMSTART1             3
-#define R_FROUT_SPILL_MEM_SIZE                                      0x209
-#define   O_FROUT_SPILL_MEM_SIZE__FROUTSPILLMEMSIZE                  0
-#define   W_FROUT_SPILL_MEM_SIZE__FROUTSPILLMEMSIZE                 32
-#define R_CLASS0_SPILL_MEM_START_0                                  0x20A
-#define   O_CLASS0_SPILL_MEM_START_0__CLASS0SPILLMEMSTART0           0
-#define   W_CLASS0_SPILL_MEM_START_0__CLASS0SPILLMEMSTART0          32
-#define R_CLASS0_SPILL_MEM_START_1                                  0x20B
-#define   O_CLASS0_SPILL_MEM_START_1__CLASS0SPILLMEMSTART1           0
-#define   W_CLASS0_SPILL_MEM_START_1__CLASS0SPILLMEMSTART1           3
-#define R_CLASS0_SPILL_MEM_SIZE                                     0x20C
-#define   O_CLASS0_SPILL_MEM_SIZE__CLASS0SPILLMEMSIZE                0
-#define   W_CLASS0_SPILL_MEM_SIZE__CLASS0SPILLMEMSIZE               32
-#define R_JUMFRIN_SPILL_MEM_START_0                                 0x20D
-#define   O_JUMFRIN_SPILL_MEM_START_0__JUMFRINSPILLMEMSTART0          0
-#define   W_JUMFRIN_SPILL_MEM_START_0__JUMFRINSPILLMEMSTART0         32
-#define R_JUMFRIN_SPILL_MEM_START_1                                 0x20E
-#define   O_JUMFRIN_SPILL_MEM_START_1__JUMFRINSPILLMEMSTART1         0
-#define   W_JUMFRIN_SPILL_MEM_START_1__JUMFRINSPILLMEMSTART1         3
-#define R_JUMFRIN_SPILL_MEM_SIZE                                    0x20F
-#define   O_JUMFRIN_SPILL_MEM_SIZE__JUMFRINSPILLMEMSIZE              0
-#define   W_JUMFRIN_SPILL_MEM_SIZE__JUMFRINSPILLMEMSIZE             32
-#define R_CLASS1_SPILL_MEM_START_0                                  0x210
-#define   O_CLASS1_SPILL_MEM_START_0__CLASS1SPILLMEMSTART0           0
-#define   W_CLASS1_SPILL_MEM_START_0__CLASS1SPILLMEMSTART0          32
-#define R_CLASS1_SPILL_MEM_START_1                                  0x211
-#define   O_CLASS1_SPILL_MEM_START_1__CLASS1SPILLMEMSTART1           0
-#define   W_CLASS1_SPILL_MEM_START_1__CLASS1SPILLMEMSTART1           3
-#define R_CLASS1_SPILL_MEM_SIZE                                     0x212
-#define   O_CLASS1_SPILL_MEM_SIZE__CLASS1SPILLMEMSIZE                0
-#define   W_CLASS1_SPILL_MEM_SIZE__CLASS1SPILLMEMSIZE               32
-#define R_CLASS2_SPILL_MEM_START_0                                  0x213
-#define   O_CLASS2_SPILL_MEM_START_0__CLASS2SPILLMEMSTART0           0
-#define   W_CLASS2_SPILL_MEM_START_0__CLASS2SPILLMEMSTART0          32
-#define R_CLASS2_SPILL_MEM_START_1                                  0x214
-#define   O_CLASS2_SPILL_MEM_START_1__CLASS2SPILLMEMSTART1           0
-#define   W_CLASS2_SPILL_MEM_START_1__CLASS2SPILLMEMSTART1           3
-#define R_CLASS2_SPILL_MEM_SIZE                                     0x215
-#define   O_CLASS2_SPILL_MEM_SIZE__CLASS2SPILLMEMSIZE                0
-#define   W_CLASS2_SPILL_MEM_SIZE__CLASS2SPILLMEMSIZE               32
-#define R_CLASS3_SPILL_MEM_START_0                                  0x216
-#define   O_CLASS3_SPILL_MEM_START_0__CLASS3SPILLMEMSTART0           0
-#define   W_CLASS3_SPILL_MEM_START_0__CLASS3SPILLMEMSTART0          32
-#define R_CLASS3_SPILL_MEM_START_1                                  0x217
-#define   O_CLASS3_SPILL_MEM_START_1__CLASS3SPILLMEMSTART1           0
-#define   W_CLASS3_SPILL_MEM_START_1__CLASS3SPILLMEMSTART1           3
-#define R_CLASS3_SPILL_MEM_SIZE                                     0x218
-#define   O_CLASS3_SPILL_MEM_SIZE__CLASS3SPILLMEMSIZE                0
-#define   W_CLASS3_SPILL_MEM_SIZE__CLASS3SPILLMEMSIZE               32
-#define R_REG_FRIN1_SPILL_MEM_START_0                               0x219
-#define R_REG_FRIN1_SPILL_MEM_START_1                               0x21a
-#define R_REG_FRIN1_SPILL_MEM_SIZE                                  0x21b
-#define R_SPIHNGY0                                                  0x219
-#define   O_SPIHNGY0__EG_HNGY_THRESH_0                              24
-#define   W_SPIHNGY0__EG_HNGY_THRESH_0                              7
-#define   O_SPIHNGY0__EG_HNGY_THRESH_1                              16
-#define   W_SPIHNGY0__EG_HNGY_THRESH_1                              7
-#define   O_SPIHNGY0__EG_HNGY_THRESH_2                              8
-#define   W_SPIHNGY0__EG_HNGY_THRESH_2                              7
-#define   O_SPIHNGY0__EG_HNGY_THRESH_3                              0
-#define   W_SPIHNGY0__EG_HNGY_THRESH_3                              7
-#define R_SPIHNGY1                                                  0x21A
-#define   O_SPIHNGY1__EG_HNGY_THRESH_4                              24
-#define   W_SPIHNGY1__EG_HNGY_THRESH_4                              7
-#define   O_SPIHNGY1__EG_HNGY_THRESH_5                              16
-#define   W_SPIHNGY1__EG_HNGY_THRESH_5                              7
-#define   O_SPIHNGY1__EG_HNGY_THRESH_6                              8
-#define   W_SPIHNGY1__EG_HNGY_THRESH_6                              7
-#define   O_SPIHNGY1__EG_HNGY_THRESH_7                              0
-#define   W_SPIHNGY1__EG_HNGY_THRESH_7                              7
-#define R_SPIHNGY2                                                  0x21B
-#define   O_SPIHNGY2__EG_HNGY_THRESH_8                              24
-#define   W_SPIHNGY2__EG_HNGY_THRESH_8                              7
-#define   O_SPIHNGY2__EG_HNGY_THRESH_9                              16
-#define   W_SPIHNGY2__EG_HNGY_THRESH_9                              7
-#define   O_SPIHNGY2__EG_HNGY_THRESH_10                             8
-#define   W_SPIHNGY2__EG_HNGY_THRESH_10                             7
-#define   O_SPIHNGY2__EG_HNGY_THRESH_11                             0
-#define   W_SPIHNGY2__EG_HNGY_THRESH_11                             7
-#define R_SPIHNGY3                                                  0x21C
-#define   O_SPIHNGY3__EG_HNGY_THRESH_12                             24
-#define   W_SPIHNGY3__EG_HNGY_THRESH_12                             7
-#define   O_SPIHNGY3__EG_HNGY_THRESH_13                             16
-#define   W_SPIHNGY3__EG_HNGY_THRESH_13                             7
-#define   O_SPIHNGY3__EG_HNGY_THRESH_14                             8
-#define   W_SPIHNGY3__EG_HNGY_THRESH_14                             7
-#define   O_SPIHNGY3__EG_HNGY_THRESH_15                             0
-#define   W_SPIHNGY3__EG_HNGY_THRESH_15                             7
-#define R_SPISTRV0                                                  0x21D
-#define   O_SPISTRV0__EG_STRV_THRESH_0                              24
-#define   W_SPISTRV0__EG_STRV_THRESH_0                              7
-#define   O_SPISTRV0__EG_STRV_THRESH_1                              16
-#define   W_SPISTRV0__EG_STRV_THRESH_1                              7
-#define   O_SPISTRV0__EG_STRV_THRESH_2                              8
-#define   W_SPISTRV0__EG_STRV_THRESH_2                              7
-#define   O_SPISTRV0__EG_STRV_THRESH_3                              0
-#define   W_SPISTRV0__EG_STRV_THRESH_3                              7
-#define R_SPISTRV1                                                  0x21E
-#define   O_SPISTRV1__EG_STRV_THRESH_4                              24
-#define   W_SPISTRV1__EG_STRV_THRESH_4                              7
-#define   O_SPISTRV1__EG_STRV_THRESH_5                              16
-#define   W_SPISTRV1__EG_STRV_THRESH_5                              7
-#define   O_SPISTRV1__EG_STRV_THRESH_6                              8
-#define   W_SPISTRV1__EG_STRV_THRESH_6                              7
-#define   O_SPISTRV1__EG_STRV_THRESH_7                              0
-#define   W_SPISTRV1__EG_STRV_THRESH_7                              7
-#define R_SPISTRV2                                                  0x21F
-#define   O_SPISTRV2__EG_STRV_THRESH_8                              24
-#define   W_SPISTRV2__EG_STRV_THRESH_8                              7
-#define   O_SPISTRV2__EG_STRV_THRESH_9                              16
-#define   W_SPISTRV2__EG_STRV_THRESH_9                              7
-#define   O_SPISTRV2__EG_STRV_THRESH_10                             8
-#define   W_SPISTRV2__EG_STRV_THRESH_10                             7
-#define   O_SPISTRV2__EG_STRV_THRESH_11                             0
-#define   W_SPISTRV2__EG_STRV_THRESH_11                             7
-#define R_SPISTRV3                                                  0x220
-#define   O_SPISTRV3__EG_STRV_THRESH_12                             24
-#define   W_SPISTRV3__EG_STRV_THRESH_12                             7
-#define   O_SPISTRV3__EG_STRV_THRESH_13                             16
-#define   W_SPISTRV3__EG_STRV_THRESH_13                             7
-#define   O_SPISTRV3__EG_STRV_THRESH_14                             8
-#define   W_SPISTRV3__EG_STRV_THRESH_14                             7
-#define   O_SPISTRV3__EG_STRV_THRESH_15                             0
-#define   W_SPISTRV3__EG_STRV_THRESH_15                             7
-#define R_TXDATAFIFO0                                               0x221
-#define   O_TXDATAFIFO0__TX0DATAFIFOSTART                           24
-#define   W_TXDATAFIFO0__TX0DATAFIFOSTART                           7
-#define   O_TXDATAFIFO0__TX0DATAFIFOSIZE                            16
-#define   W_TXDATAFIFO0__TX0DATAFIFOSIZE                            7
-#define   O_TXDATAFIFO0__TX1DATAFIFOSTART                           8
-#define   W_TXDATAFIFO0__TX1DATAFIFOSTART                           7
-#define   O_TXDATAFIFO0__TX1DATAFIFOSIZE                            0
-#define   W_TXDATAFIFO0__TX1DATAFIFOSIZE                            7
-#define R_TXDATAFIFO1                                               0x222
-#define   O_TXDATAFIFO1__TX2DATAFIFOSTART                           24
-#define   W_TXDATAFIFO1__TX2DATAFIFOSTART                           7
-#define   O_TXDATAFIFO1__TX2DATAFIFOSIZE                            16
-#define   W_TXDATAFIFO1__TX2DATAFIFOSIZE                            7
-#define   O_TXDATAFIFO1__TX3DATAFIFOSTART                           8
-#define   W_TXDATAFIFO1__TX3DATAFIFOSTART                           7
-#define   O_TXDATAFIFO1__TX3DATAFIFOSIZE                            0
-#define   W_TXDATAFIFO1__TX3DATAFIFOSIZE                            7
-#define R_TXDATAFIFO2                                               0x223
-#define   O_TXDATAFIFO2__TX4DATAFIFOSTART                           24
-#define   W_TXDATAFIFO2__TX4DATAFIFOSTART                           7
-#define   O_TXDATAFIFO2__TX4DATAFIFOSIZE                            16
-#define   W_TXDATAFIFO2__TX4DATAFIFOSIZE                            7
-#define   O_TXDATAFIFO2__TX5DATAFIFOSTART                           8
-#define   W_TXDATAFIFO2__TX5DATAFIFOSTART                           7
-#define   O_TXDATAFIFO2__TX5DATAFIFOSIZE                            0
-#define   W_TXDATAFIFO2__TX5DATAFIFOSIZE                            7
-#define R_TXDATAFIFO3                                               0x224
-#define   O_TXDATAFIFO3__TX6DATAFIFOSTART                           24
-#define   W_TXDATAFIFO3__TX6DATAFIFOSTART                           7
-#define   O_TXDATAFIFO3__TX6DATAFIFOSIZE                            16
-#define   W_TXDATAFIFO3__TX6DATAFIFOSIZE                            7
-#define   O_TXDATAFIFO3__TX7DATAFIFOSTART                           8
-#define   W_TXDATAFIFO3__TX7DATAFIFOSTART                           7
-#define   O_TXDATAFIFO3__TX7DATAFIFOSIZE                            0
-#define   W_TXDATAFIFO3__TX7DATAFIFOSIZE                            7
-#define R_TXDATAFIFO4                                               0x225
-#define   O_TXDATAFIFO4__TX8DATAFIFOSTART                           24
-#define   W_TXDATAFIFO4__TX8DATAFIFOSTART                           7
-#define   O_TXDATAFIFO4__TX8DATAFIFOSIZE                            16
-#define   W_TXDATAFIFO4__TX8DATAFIFOSIZE                            7
-#define   O_TXDATAFIFO4__TX9DATAFIFOSTART                           8
-#define   W_TXDATAFIFO4__TX9DATAFIFOSTART                           7
-#define   O_TXDATAFIFO4__TX9DATAFIFOSIZE                            0
-#define   W_TXDATAFIFO4__TX9DATAFIFOSIZE                            7
-#define R_TXDATAFIFO5                                               0x226
-#define   O_TXDATAFIFO5__TX10DATAFIFOSTART                          24
-#define   W_TXDATAFIFO5__TX10DATAFIFOSTART                          7
-#define   O_TXDATAFIFO5__TX10DATAFIFOSIZE                           16
-#define   W_TXDATAFIFO5__TX10DATAFIFOSIZE                           7
-#define   O_TXDATAFIFO5__TX11DATAFIFOSTART                          8
-#define   W_TXDATAFIFO5__TX11DATAFIFOSTART                          7
-#define   O_TXDATAFIFO5__TX11DATAFIFOSIZE                           0
-#define   W_TXDATAFIFO5__TX11DATAFIFOSIZE                           7
-#define R_TXDATAFIFO6                                               0x227
-#define   O_TXDATAFIFO6__TX12DATAFIFOSTART                          24
-#define   W_TXDATAFIFO6__TX12DATAFIFOSTART                          7
-#define   O_TXDATAFIFO6__TX12DATAFIFOSIZE                           16
-#define   W_TXDATAFIFO6__TX12DATAFIFOSIZE                           7
-#define   O_TXDATAFIFO6__TX13DATAFIFOSTART                          8
-#define   W_TXDATAFIFO6__TX13DATAFIFOSTART                          7
-#define   O_TXDATAFIFO6__TX13DATAFIFOSIZE                           0
-#define   W_TXDATAFIFO6__TX13DATAFIFOSIZE                           7
-#define R_TXDATAFIFO7                                               0x228
-#define   O_TXDATAFIFO7__TX14DATAFIFOSTART                          24
-#define   W_TXDATAFIFO7__TX14DATAFIFOSTART                          7
-#define   O_TXDATAFIFO7__TX14DATAFIFOSIZE                           16
-#define   W_TXDATAFIFO7__TX14DATAFIFOSIZE                           7
-#define   O_TXDATAFIFO7__TX15DATAFIFOSTART                          8
-#define   W_TXDATAFIFO7__TX15DATAFIFOSTART                          7
-#define   O_TXDATAFIFO7__TX15DATAFIFOSIZE                           0
-#define   W_TXDATAFIFO7__TX15DATAFIFOSIZE                           7
-#define R_RXDATAFIFO0                                               0x229
-#define   O_RXDATAFIFO0__RX0DATAFIFOSTART                           24
-#define   W_RXDATAFIFO0__RX0DATAFIFOSTART                           7
-#define   O_RXDATAFIFO0__RX0DATAFIFOSIZE                            16
-#define   W_RXDATAFIFO0__RX0DATAFIFOSIZE                            7
-#define   O_RXDATAFIFO0__RX1DATAFIFOSTART                           8
-#define   W_RXDATAFIFO0__RX1DATAFIFOSTART                           7
-#define   O_RXDATAFIFO0__RX1DATAFIFOSIZE                            0
-#define   W_RXDATAFIFO0__RX1DATAFIFOSIZE                            7
-#define R_RXDATAFIFO1                                               0x22A
-#define   O_RXDATAFIFO1__RX2DATAFIFOSTART                           24
-#define   W_RXDATAFIFO1__RX2DATAFIFOSTART                           7
-#define   O_RXDATAFIFO1__RX2DATAFIFOSIZE                            16
-#define   W_RXDATAFIFO1__RX2DATAFIFOSIZE                            7
-#define   O_RXDATAFIFO1__RX3DATAFIFOSTART                           8
-#define   W_RXDATAFIFO1__RX3DATAFIFOSTART                           7
-#define   O_RXDATAFIFO1__RX3DATAFIFOSIZE                            0
-#define   W_RXDATAFIFO1__RX3DATAFIFOSIZE                            7
-#define R_RXDATAFIFO2                                               0x22B
-#define   O_RXDATAFIFO2__RX4DATAFIFOSTART                           24
-#define   W_RXDATAFIFO2__RX4DATAFIFOSTART                           7
-#define   O_RXDATAFIFO2__RX4DATAFIFOSIZE                            16
-#define   W_RXDATAFIFO2__RX4DATAFIFOSIZE                            7
-#define   O_RXDATAFIFO2__RX5DATAFIFOSTART                           8
-#define   W_RXDATAFIFO2__RX5DATAFIFOSTART                           7
-#define   O_RXDATAFIFO2__RX5DATAFIFOSIZE                            0
-#define   W_RXDATAFIFO2__RX5DATAFIFOSIZE                            7
-#define R_RXDATAFIFO3                                               0x22C
-#define   O_RXDATAFIFO3__RX6DATAFIFOSTART                           24
-#define   W_RXDATAFIFO3__RX6DATAFIFOSTART                           7
-#define   O_RXDATAFIFO3__RX6DATAFIFOSIZE                            16
-#define   W_RXDATAFIFO3__RX6DATAFIFOSIZE                            7
-#define   O_RXDATAFIFO3__RX7DATAFIFOSTART                           8
-#define   W_RXDATAFIFO3__RX7DATAFIFOSTART                           7
-#define   O_RXDATAFIFO3__RX7DATAFIFOSIZE                            0
-#define   W_RXDATAFIFO3__RX7DATAFIFOSIZE                            7
-#define R_RXDATAFIFO4                                               0x22D
-#define   O_RXDATAFIFO4__RX8DATAFIFOSTART                           24
-#define   W_RXDATAFIFO4__RX8DATAFIFOSTART                           7
-#define   O_RXDATAFIFO4__RX8DATAFIFOSIZE                            16
-#define   W_RXDATAFIFO4__RX8DATAFIFOSIZE                            7
-#define   O_RXDATAFIFO4__RX9DATAFIFOSTART                           8
-#define   W_RXDATAFIFO4__RX9DATAFIFOSTART                           7
-#define   O_RXDATAFIFO4__RX9DATAFIFOSIZE                            0
-#define   W_RXDATAFIFO4__RX9DATAFIFOSIZE                            7
-#define R_RXDATAFIFO5                                               0x22E
-#define   O_RXDATAFIFO5__RX10DATAFIFOSTART                          24
-#define   W_RXDATAFIFO5__RX10DATAFIFOSTART                          7
-#define   O_RXDATAFIFO5__RX10DATAFIFOSIZE                           16
-#define   W_RXDATAFIFO5__RX10DATAFIFOSIZE                           7
-#define   O_RXDATAFIFO5__RX11DATAFIFOSTART                          8
-#define   W_RXDATAFIFO5__RX11DATAFIFOSTART                          7
-#define   O_RXDATAFIFO5__RX11DATAFIFOSIZE                           0
-#define   W_RXDATAFIFO5__RX11DATAFIFOSIZE                           7
-#define R_RXDATAFIFO6                                               0x22F
-#define   O_RXDATAFIFO6__RX12DATAFIFOSTART                          24
-#define   W_RXDATAFIFO6__RX12DATAFIFOSTART                          7
-#define   O_RXDATAFIFO6__RX12DATAFIFOSIZE                           16
-#define   W_RXDATAFIFO6__RX12DATAFIFOSIZE                           7
-#define   O_RXDATAFIFO6__RX13DATAFIFOSTART                          8
-#define   W_RXDATAFIFO6__RX13DATAFIFOSTART                          7
-#define   O_RXDATAFIFO6__RX13DATAFIFOSIZE                           0
-#define   W_RXDATAFIFO6__RX13DATAFIFOSIZE                           7
-#define R_RXDATAFIFO7                                               0x230
-#define   O_RXDATAFIFO7__RX14DATAFIFOSTART                          24
-#define   W_RXDATAFIFO7__RX14DATAFIFOSTART                          7
-#define   O_RXDATAFIFO7__RX14DATAFIFOSIZE                           16
-#define   W_RXDATAFIFO7__RX14DATAFIFOSIZE                           7
-#define   O_RXDATAFIFO7__RX15DATAFIFOSTART                          8
-#define   W_RXDATAFIFO7__RX15DATAFIFOSTART                          7
-#define   O_RXDATAFIFO7__RX15DATAFIFOSIZE                           0
-#define   W_RXDATAFIFO7__RX15DATAFIFOSIZE                           7
-#define R_XGMACPADCALIBRATION                                       0x231
-#define R_FREEQCARVE                                                0x233
-#define R_SPI4STATICDELAY0                                          0x240
-#define   O_SPI4STATICDELAY0__DATALINE7                             28
-#define   W_SPI4STATICDELAY0__DATALINE7                             4
-#define   O_SPI4STATICDELAY0__DATALINE6                             24
-#define   W_SPI4STATICDELAY0__DATALINE6                             4
-#define   O_SPI4STATICDELAY0__DATALINE5                             20
-#define   W_SPI4STATICDELAY0__DATALINE5                             4
-#define   O_SPI4STATICDELAY0__DATALINE4                             16
-#define   W_SPI4STATICDELAY0__DATALINE4                             4
-#define   O_SPI4STATICDELAY0__DATALINE3                             12
-#define   W_SPI4STATICDELAY0__DATALINE3                             4
-#define   O_SPI4STATICDELAY0__DATALINE2                             8
-#define   W_SPI4STATICDELAY0__DATALINE2                             4
-#define   O_SPI4STATICDELAY0__DATALINE1                             4
-#define   W_SPI4STATICDELAY0__DATALINE1                             4
-#define   O_SPI4STATICDELAY0__DATALINE0                             0
-#define   W_SPI4STATICDELAY0__DATALINE0                             4
-#define R_SPI4STATICDELAY1                                          0x241
-#define   O_SPI4STATICDELAY1__DATALINE15                            28
-#define   W_SPI4STATICDELAY1__DATALINE15                            4
-#define   O_SPI4STATICDELAY1__DATALINE14                            24
-#define   W_SPI4STATICDELAY1__DATALINE14                            4
-#define   O_SPI4STATICDELAY1__DATALINE13                            20
-#define   W_SPI4STATICDELAY1__DATALINE13                            4
-#define   O_SPI4STATICDELAY1__DATALINE12                            16
-#define   W_SPI4STATICDELAY1__DATALINE12                            4
-#define   O_SPI4STATICDELAY1__DATALINE11                            12
-#define   W_SPI4STATICDELAY1__DATALINE11                            4
-#define   O_SPI4STATICDELAY1__DATALINE10                            8
-#define   W_SPI4STATICDELAY1__DATALINE10                            4
-#define   O_SPI4STATICDELAY1__DATALINE9                             4
-#define   W_SPI4STATICDELAY1__DATALINE9                             4
-#define   O_SPI4STATICDELAY1__DATALINE8                             0
-#define   W_SPI4STATICDELAY1__DATALINE8                             4
-#define R_SPI4STATICDELAY2                                          0x242
-#define   O_SPI4STATICDELAY0__TXSTAT1                               8
-#define   W_SPI4STATICDELAY0__TXSTAT1                               4
-#define   O_SPI4STATICDELAY0__TXSTAT0                               4
-#define   W_SPI4STATICDELAY0__TXSTAT0                               4
-#define   O_SPI4STATICDELAY0__RXCONTROL                             0
-#define   W_SPI4STATICDELAY0__RXCONTROL                             4
-#define R_SPI4CONTROL                                               0x243
-#define   O_SPI4CONTROL__STATICDELAY                                2
-#define   O_SPI4CONTROL__LVDS_LVTTL                                 1
-#define   O_SPI4CONTROL__SPI4ENABLE                                 0
-#define R_CLASSWATERMARKS                                           0x244
-#define   O_CLASSWATERMARKS__CLASS0WATERMARK                        24
-#define   W_CLASSWATERMARKS__CLASS0WATERMARK                        5
-#define   O_CLASSWATERMARKS__CLASS1WATERMARK                        16
-#define   W_CLASSWATERMARKS__CLASS1WATERMARK                        5
-#define   O_CLASSWATERMARKS__CLASS3WATERMARK                        0
-#define   W_CLASSWATERMARKS__CLASS3WATERMARK                        5
-#define R_RXWATERMARKS1                                              0x245
-#define   O_RXWATERMARKS__RX0DATAWATERMARK                          24
-#define   W_RXWATERMARKS__RX0DATAWATERMARK                          7
-#define   O_RXWATERMARKS__RX1DATAWATERMARK                          16
-#define   W_RXWATERMARKS__RX1DATAWATERMARK                          7
-#define   O_RXWATERMARKS__RX3DATAWATERMARK                          0
-#define   W_RXWATERMARKS__RX3DATAWATERMARK                          7
-#define R_RXWATERMARKS2                                              0x246
-#define   O_RXWATERMARKS__RX4DATAWATERMARK                          24
-#define   W_RXWATERMARKS__RX4DATAWATERMARK                          7
-#define   O_RXWATERMARKS__RX5DATAWATERMARK                          16
-#define   W_RXWATERMARKS__RX5DATAWATERMARK                          7
-#define   O_RXWATERMARKS__RX6DATAWATERMARK                          8
-#define   W_RXWATERMARKS__RX6DATAWATERMARK                          7
-#define   O_RXWATERMARKS__RX7DATAWATERMARK                          0
-#define   W_RXWATERMARKS__RX7DATAWATERMARK                          7
-#define R_RXWATERMARKS3                                              0x247
-#define   O_RXWATERMARKS__RX8DATAWATERMARK                          24
-#define   W_RXWATERMARKS__RX8DATAWATERMARK                          7
-#define   O_RXWATERMARKS__RX9DATAWATERMARK                          16
-#define   W_RXWATERMARKS__RX9DATAWATERMARK                          7
-#define   O_RXWATERMARKS__RX10DATAWATERMARK                         8
-#define   W_RXWATERMARKS__RX10DATAWATERMARK                         7
-#define   O_RXWATERMARKS__RX11DATAWATERMARK                         0
-#define   W_RXWATERMARKS__RX11DATAWATERMARK                         7
-#define R_RXWATERMARKS4                                              0x248
-#define   O_RXWATERMARKS__RX12DATAWATERMARK                         24
-#define   W_RXWATERMARKS__RX12DATAWATERMARK                         7
-#define   O_RXWATERMARKS__RX13DATAWATERMARK                         16
-#define   W_RXWATERMARKS__RX13DATAWATERMARK                         7
-#define   O_RXWATERMARKS__RX14DATAWATERMARK                         8
-#define   W_RXWATERMARKS__RX14DATAWATERMARK                         7
-#define   O_RXWATERMARKS__RX15DATAWATERMARK                         0
-#define   W_RXWATERMARKS__RX15DATAWATERMARK                         7
-#define R_FREEWATERMARKS                                            0x249
-#define   O_FREEWATERMARKS__FREEOUTWATERMARK                        16
-#define   W_FREEWATERMARKS__FREEOUTWATERMARK                        16
-#define   O_FREEWATERMARKS__JUMFRWATERMARK                          8
-#define   W_FREEWATERMARKS__JUMFRWATERMARK                          7
-#define   O_FREEWATERMARKS__REGFRWATERMARK                          0
-#define   W_FREEWATERMARKS__REGFRWATERMARK                          7
-#define R_EGRESSFIFOCARVINGSLOTS                                    0x24a
-
-#define CTRL_RES0           0
-#define CTRL_RES1           1
-#define CTRL_REG_FREE       2
-#define CTRL_JUMBO_FREE     3
-#define CTRL_CONT           4
-#define CTRL_EOP            5
-#define CTRL_START          6
-#define CTRL_SNGL           7
-
-#define CTRL_B0_NOT_EOP     0
-#define CTRL_B0_EOP         1
-
-#define R_ROUND_ROBIN_TABLE                 0
-#define R_PDE_CLASS_0                       0x300
-#define R_PDE_CLASS_1                       0x302
-#define R_PDE_CLASS_2                       0x304
-#define R_PDE_CLASS_3                       0x306
-
-#define R_MSG_TX_THRESHOLD                  0x308
-
-#define R_GMAC_JFR0_BUCKET_SIZE              0x320
-#define R_GMAC_RFR0_BUCKET_SIZE              0x321
-#define R_GMAC_TX0_BUCKET_SIZE              0x322
-#define R_GMAC_TX1_BUCKET_SIZE              0x323
-#define R_GMAC_TX2_BUCKET_SIZE              0x324
-#define R_GMAC_TX3_BUCKET_SIZE              0x325
-#define R_GMAC_JFR1_BUCKET_SIZE              0x326
-#define R_GMAC_RFR1_BUCKET_SIZE              0x327
-
-#define R_XGS_TX0_BUCKET_SIZE               0x320
-#define R_XGS_TX1_BUCKET_SIZE               0x321
-#define R_XGS_TX2_BUCKET_SIZE               0x322
-#define R_XGS_TX3_BUCKET_SIZE               0x323
-#define R_XGS_TX4_BUCKET_SIZE               0x324
-#define R_XGS_TX5_BUCKET_SIZE               0x325
-#define R_XGS_TX6_BUCKET_SIZE               0x326
-#define R_XGS_TX7_BUCKET_SIZE               0x327
-#define R_XGS_TX8_BUCKET_SIZE               0x328
-#define R_XGS_TX9_BUCKET_SIZE               0x329
-#define R_XGS_TX10_BUCKET_SIZE              0x32A
-#define R_XGS_TX11_BUCKET_SIZE              0x32B
-#define R_XGS_TX12_BUCKET_SIZE              0x32C
-#define R_XGS_TX13_BUCKET_SIZE              0x32D
-#define R_XGS_TX14_BUCKET_SIZE              0x32E
-#define R_XGS_TX15_BUCKET_SIZE              0x32F
-#define R_XGS_JFR_BUCKET_SIZE               0x330
-#define R_XGS_RFR_BUCKET_SIZE               0x331
-
-#define R_CC_CPU0_0                         0x380
-#define R_CC_CPU1_0                         0x388
-#define R_CC_CPU2_0                         0x390
-#define R_CC_CPU3_0                         0x398
-#define R_CC_CPU4_0                         0x3a0
-#define R_CC_CPU5_0                         0x3a8
-#define R_CC_CPU6_0                         0x3b0
-#define R_CC_CPU7_0                         0x3b8
-
-#define XLR_GMAC_BLK_SZ                            (XLR_IO_GMAC_1_OFFSET - \
-               XLR_IO_GMAC_0_OFFSET)
-
-/* Constants used for configuring the devices */
-
-#define XLR_FB_STN                     6 /* Bucket used for Tx freeback */
-
-#define MAC_B2B_IPG                     88
-
-#define        XLR_NET_PREPAD_LEN              32
-
-/* frame sizes need to be cacheline aligned */
-#define MAX_FRAME_SIZE                  (1536 + XLR_NET_PREPAD_LEN)
-#define MAX_FRAME_SIZE_JUMBO            9216
-
-#define MAC_SKB_BACK_PTR_SIZE           SMP_CACHE_BYTES
-#define MAC_PREPAD                      0
-#define BYTE_OFFSET                     2
-#define XLR_RX_BUF_SIZE                 (MAX_FRAME_SIZE + BYTE_OFFSET + \
-               MAC_PREPAD + MAC_SKB_BACK_PTR_SIZE + SMP_CACHE_BYTES)
-#define MAC_CRC_LEN                     4
-#define MAX_NUM_MSGRNG_STN_CC           128
-#define MAX_MSG_SND_ATTEMPTS           100     /* 13 stns x 4 entry msg/stn +
-                                                * headroom
-                                                */
-
-#define MAC_FRIN_TO_BE_SENT_THRESHOLD   16
-
-#define MAX_NUM_DESC_SPILL             1024
-#define MAX_FRIN_SPILL                  (MAX_NUM_DESC_SPILL << 2)
-#define MAX_FROUT_SPILL                 (MAX_NUM_DESC_SPILL << 2)
-#define MAX_CLASS_0_SPILL               (MAX_NUM_DESC_SPILL << 2)
-#define MAX_CLASS_1_SPILL               (MAX_NUM_DESC_SPILL << 2)
-#define MAX_CLASS_2_SPILL               (MAX_NUM_DESC_SPILL << 2)
-#define MAX_CLASS_3_SPILL               (MAX_NUM_DESC_SPILL << 2)
-
-enum {
-       SGMII_SPEED_10 = 0x00000000,
-       SGMII_SPEED_100 = 0x02000000,
-       SGMII_SPEED_1000 = 0x04000000,
-};
-
-enum tsv_rsv_reg {
-       TX_RX_64_BYTE_FRAME = 0x20,
-       TX_RX_64_127_BYTE_FRAME,
-       TX_RX_128_255_BYTE_FRAME,
-       TX_RX_256_511_BYTE_FRAME,
-       TX_RX_512_1023_BYTE_FRAME,
-       TX_RX_1024_1518_BYTE_FRAME,
-       TX_RX_1519_1522_VLAN_BYTE_FRAME,
-
-       RX_BYTE_COUNTER = 0x27,
-       RX_PACKET_COUNTER,
-       RX_FCS_ERROR_COUNTER,
-       RX_MULTICAST_PACKET_COUNTER,
-       RX_BROADCAST_PACKET_COUNTER,
-       RX_CONTROL_FRAME_PACKET_COUNTER,
-       RX_PAUSE_FRAME_PACKET_COUNTER,
-       RX_UNKNOWN_OP_CODE_COUNTER,
-       RX_ALIGNMENT_ERROR_COUNTER,
-       RX_FRAME_LENGTH_ERROR_COUNTER,
-       RX_CODE_ERROR_COUNTER,
-       RX_CARRIER_SENSE_ERROR_COUNTER,
-       RX_UNDERSIZE_PACKET_COUNTER,
-       RX_OVERSIZE_PACKET_COUNTER,
-       RX_FRAGMENTS_COUNTER,
-       RX_JABBER_COUNTER,
-       RX_DROP_PACKET_COUNTER,
-
-       TX_BYTE_COUNTER   = 0x38,
-       TX_PACKET_COUNTER,
-       TX_MULTICAST_PACKET_COUNTER,
-       TX_BROADCAST_PACKET_COUNTER,
-       TX_PAUSE_CONTROL_FRAME_COUNTER,
-       TX_DEFERRAL_PACKET_COUNTER,
-       TX_EXCESSIVE_DEFERRAL_PACKET_COUNTER,
-       TX_SINGLE_COLLISION_PACKET_COUNTER,
-       TX_MULTI_COLLISION_PACKET_COUNTER,
-       TX_LATE_COLLISION_PACKET_COUNTER,
-       TX_EXCESSIVE_COLLISION_PACKET_COUNTER,
-       TX_TOTAL_COLLISION_COUNTER,
-       TX_PAUSE_FRAME_HONERED_COUNTER,
-       TX_DROP_FRAME_COUNTER,
-       TX_JABBER_FRAME_COUNTER,
-       TX_FCS_ERROR_COUNTER,
-       TX_CONTROL_FRAME_COUNTER,
-       TX_OVERSIZE_FRAME_COUNTER,
-       TX_UNDERSIZE_FRAME_COUNTER,
-       TX_FRAGMENT_FRAME_COUNTER,
-
-       CARRY_REG_1 = 0x4c,
-       CARRY_REG_2 = 0x4d,
-};
-
-struct xlr_adapter {
-       struct net_device *netdev[4];
-};
-
-struct xlr_net_priv {
-       u32 __iomem *base_addr;
-       struct net_device *ndev;
-       struct xlr_adapter *adapter;
-       struct mii_bus *mii_bus;
-       int num_rx_desc;
-       int phy_addr;   /* PHY addr on MDIO bus */
-       int pcs_id;     /* PCS id on MDIO bus */
-       int port_id;    /* Port(gmac/xgmac) number, i.e 0-7 */
-       int tx_stnid;
-       u32 __iomem *mii_addr;
-       u32 __iomem *serdes_addr;
-       u32 __iomem *pcs_addr;
-       u32 __iomem *gpio_addr;
-       int phy_speed;
-       int port_type;
-       struct timer_list queue_timer;
-       int wakeup_q;
-       struct platform_device *pdev;
-       struct xlr_net_data *nd;
-
-       u64 *frin_spill;
-       u64 *frout_spill;
-       u64 *class_0_spill;
-       u64 *class_1_spill;
-       u64 *class_2_spill;
-       u64 *class_3_spill;
-};
-
-void xlr_set_gmac_speed(struct xlr_net_priv *priv);
index 55c3d4a6faeb8fa7a60a758fd8eac36fa8e8631d..b4820ad2cee735f2bf16bada6e7d143de90b6f76 100644 (file)
@@ -107,6 +107,7 @@ static struct rt_channel_plan_map   RTW_ChannelPlanMap[RT_CHANNEL_DOMAIN_MAX] = {
        {0x01}, /* 0x10, RT_CHANNEL_DOMAIN_JAPAN */
        {0x02}, /* 0x11, RT_CHANNEL_DOMAIN_FCC_NO_DFS */
        {0x01}, /* 0x12, RT_CHANNEL_DOMAIN_JAPAN_NO_DFS */
+       {0x00}, /* 0x13 */
        {0x02}, /* 0x14, RT_CHANNEL_DOMAIN_TAIWAN_NO_DFS */
        {0x00}, /* 0x15, RT_CHANNEL_DOMAIN_ETSI_NO_DFS */
        {0x00}, /* 0x16, RT_CHANNEL_DOMAIN_KOREA_NO_DFS */
@@ -118,6 +119,7 @@ static struct rt_channel_plan_map   RTW_ChannelPlanMap[RT_CHANNEL_DOMAIN_MAX] = {
        {0x00}, /* 0x1C, */
        {0x00}, /* 0x1D, */
        {0x00}, /* 0x1E, */
+       {0x00}, /* 0x1F, */
        /*  0x20 ~ 0x7F , New Define ===== */
        {0x00}, /* 0x20, RT_CHANNEL_DOMAIN_WORLD_NULL */
        {0x01}, /* 0x21, RT_CHANNEL_DOMAIN_ETSI1_NULL */
@@ -6845,12 +6847,12 @@ void report_del_sta_event(struct adapter *padapter, unsigned char *MacAddr, unsi
        struct mlme_ext_priv            *pmlmeext = &padapter->mlmeextpriv;
        struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
 
-       pcmd_obj = kzalloc(sizeof(struct cmd_obj), GFP_KERNEL);
+       pcmd_obj = kzalloc(sizeof(*pcmd_obj), GFP_ATOMIC);
        if (!pcmd_obj)
                return;
 
        cmdsz = (sizeof(struct stadel_event) + sizeof(struct C2HEvent_Header));
-       pevtcmd = kzalloc(cmdsz, GFP_KERNEL);
+       pevtcmd = kzalloc(cmdsz, GFP_ATOMIC);
        if (!pevtcmd) {
                kfree(pcmd_obj);
                return;
index 52d42e57644332c303caa12e8c06e22cb4e6cb3e..9404355726d0ae709c5e6cd0a5d04eafa8f29d98 100644 (file)
@@ -1980,6 +1980,7 @@ static int rtw_wx_read32(struct net_device *dev,
        u32 data32;
        u32 bytes;
        u8 *ptmp;
+       int ret;
 
        padapter = (struct adapter *)rtw_netdev_priv(dev);
        p = &wrqu->data;
@@ -2007,12 +2008,17 @@ static int rtw_wx_read32(struct net_device *dev,
                break;
        default:
                DBG_88E(KERN_INFO "%s: usage> read [bytes],[address(hex)]\n", __func__);
-               return -EINVAL;
+               ret = -EINVAL;
+               goto err_free_ptmp;
        }
        DBG_88E(KERN_INFO "%s: addr = 0x%08X data =%s\n", __func__, addr, extra);
 
        kfree(ptmp);
        return 0;
+
+err_free_ptmp:
+       kfree(ptmp);
+       return ret;
 }
 
 static int rtw_wx_write32(struct net_device *dev,
index a9b6ffdbf31ab94a5c10feb0b67438518197de6d..f7ce724ebf87ed5f3a0c298aec7fc690cb3a989a 100644 (file)
@@ -112,7 +112,7 @@ void rtw_report_sec_ie(struct adapter *adapter, u8 authmode, u8 *sec_ie)
 
        buff = NULL;
        if (authmode == _WPA_IE_ID_) {
-               buff = kzalloc(IW_CUSTOM_MAX, GFP_KERNEL);
+               buff = kzalloc(IW_CUSTOM_MAX, GFP_ATOMIC);
                if (!buff)
                        return;
                p = buff;
index d2e9df60e9baff9c96d5505e1dd7390787ea4ab6..b9ce71848023a3e3fa27d6cfedbe28f3dfe954da 100644 (file)
@@ -2549,13 +2549,14 @@ static void _rtl92e_pci_disconnect(struct pci_dev *pdev)
                        free_irq(dev->irq, dev);
                        priv->irq = 0;
                }
-               free_rtllib(dev);
 
                if (dev->mem_start != 0) {
                        iounmap((void __iomem *)dev->mem_start);
                        release_mem_region(pci_resource_start(pdev, 1),
                                        pci_resource_len(pdev, 1));
                }
+
+               free_rtllib(dev);
        }
 
        pci_disable_device(pdev);
index 0b65de9f2df18c5b6e0d05e7362b9c43242a4e35..95a88f6224cd969f5ab3ae4d719669b9c5e4e3f4 100644 (file)
@@ -520,7 +520,7 @@ static ssize_t target_fabric_port_alua_tg_pt_gp_show(struct config_item *item,
 {
        struct se_lun *lun = item_to_lun(item);
 
-       if (!lun || !lun->lun_se_dev)
+       if (!lun->lun_se_dev)
                return -ENODEV;
 
        return core_alua_show_tg_pt_gp_info(lun, page);
@@ -531,7 +531,7 @@ static ssize_t target_fabric_port_alua_tg_pt_gp_store(struct config_item *item,
 {
        struct se_lun *lun = item_to_lun(item);
 
-       if (!lun || !lun->lun_se_dev)
+       if (!lun->lun_se_dev)
                return -ENODEV;
 
        return core_alua_store_tg_pt_gp_info(lun, page, count);
@@ -542,7 +542,7 @@ static ssize_t target_fabric_port_alua_tg_pt_offline_show(
 {
        struct se_lun *lun = item_to_lun(item);
 
-       if (!lun || !lun->lun_se_dev)
+       if (!lun->lun_se_dev)
                return -ENODEV;
 
        return core_alua_show_offline_bit(lun, page);
@@ -553,7 +553,7 @@ static ssize_t target_fabric_port_alua_tg_pt_offline_store(
 {
        struct se_lun *lun = item_to_lun(item);
 
-       if (!lun || !lun->lun_se_dev)
+       if (!lun->lun_se_dev)
                return -ENODEV;
 
        return core_alua_store_offline_bit(lun, page, count);
@@ -564,7 +564,7 @@ static ssize_t target_fabric_port_alua_tg_pt_status_show(
 {
        struct se_lun *lun = item_to_lun(item);
 
-       if (!lun || !lun->lun_se_dev)
+       if (!lun->lun_se_dev)
                return -ENODEV;
 
        return core_alua_show_secondary_status(lun, page);
@@ -575,7 +575,7 @@ static ssize_t target_fabric_port_alua_tg_pt_status_store(
 {
        struct se_lun *lun = item_to_lun(item);
 
-       if (!lun || !lun->lun_se_dev)
+       if (!lun->lun_se_dev)
                return -ENODEV;
 
        return core_alua_store_secondary_status(lun, page, count);
@@ -586,7 +586,7 @@ static ssize_t target_fabric_port_alua_tg_pt_write_md_show(
 {
        struct se_lun *lun = item_to_lun(item);
 
-       if (!lun || !lun->lun_se_dev)
+       if (!lun->lun_se_dev)
                return -ENODEV;
 
        return core_alua_show_secondary_write_metadata(lun, page);
@@ -597,7 +597,7 @@ static ssize_t target_fabric_port_alua_tg_pt_write_md_store(
 {
        struct se_lun *lun = item_to_lun(item);
 
-       if (!lun || !lun->lun_se_dev)
+       if (!lun->lun_se_dev)
                return -ENODEV;
 
        return core_alua_store_secondary_write_metadata(lun, page, count);
index 22703a0dbd072ece6d5ca0a91d1d1c898f84ccc3..4c76498d3fb06ec80bbb7bb2a745153680f886dc 100644 (file)
@@ -40,11 +40,11 @@ static void spc_fill_alua_data(struct se_lun *lun, unsigned char *buf)
         *
         * See spc4r17 section 6.4.2 Table 135
         */
-       spin_lock(&lun->lun_tg_pt_gp_lock);
-       tg_pt_gp = lun->lun_tg_pt_gp;
+       rcu_read_lock();
+       tg_pt_gp = rcu_dereference(lun->lun_tg_pt_gp);
        if (tg_pt_gp)
                buf[5] |= tg_pt_gp->tg_pt_gp_alua_access_type;
-       spin_unlock(&lun->lun_tg_pt_gp_lock);
+       rcu_read_unlock();
 }
 
 static u16
@@ -325,14 +325,14 @@ check_t10_vend_desc:
                 * Get the PROTOCOL IDENTIFIER as defined by spc4r17
                 * section 7.5.1 Table 362
                 */
-               spin_lock(&lun->lun_tg_pt_gp_lock);
-               tg_pt_gp = lun->lun_tg_pt_gp;
+               rcu_read_lock();
+               tg_pt_gp = rcu_dereference(lun->lun_tg_pt_gp);
                if (!tg_pt_gp) {
-                       spin_unlock(&lun->lun_tg_pt_gp_lock);
+                       rcu_read_unlock();
                        goto check_lu_gp;
                }
                tg_pt_gp_id = tg_pt_gp->tg_pt_gp_id;
-               spin_unlock(&lun->lun_tg_pt_gp_lock);
+               rcu_read_unlock();
 
                buf[off] = tpg->proto_id << 4;
                buf[off++] |= 0x1; /* CODE SET == Binary */
index 45424824e0f9fc9a56727801ff94cde574ec4c72..d8c8683863aa06bb9bb7edc83ea8e2cae2ecd5ee 100644 (file)
@@ -810,10 +810,9 @@ static int optee_ffa_probe(struct ffa_device *ffa_dev)
                return -EINVAL;
 
        optee = kzalloc(sizeof(*optee), GFP_KERNEL);
-       if (!optee) {
-               rc = -ENOMEM;
-               goto err;
-       }
+       if (!optee)
+               return -ENOMEM;
+
        optee->pool = optee_ffa_config_dyn_shm();
        if (IS_ERR(optee->pool)) {
                rc = PTR_ERR(optee->pool);
index 45c31f3d6054b7eec9c70fa80070ade258ad525e..5d046de96a5d030ea846a362a7262afb7900af22 100644 (file)
@@ -5,12 +5,12 @@
 
 config INT340X_THERMAL
        tristate "ACPI INT340X thermal drivers"
-       depends on X86 && ACPI && PCI
+       depends on X86_64 && ACPI && PCI
        select THERMAL_GOV_USER_SPACE
        select ACPI_THERMAL_REL
        select ACPI_FAN
        select INTEL_SOC_DTS_IOSF_CORE
-       select PROC_THERMAL_MMIO_RAPL if X86_64 && POWERCAP
+       select PROC_THERMAL_MMIO_RAPL if POWERCAP
        help
          Newer laptops and tablets that use ACPI may have thermal sensors and
          other devices with thermal control capabilities outside the core
index b25b54d4bac1ad0acb3e295b4c4acabb379d1322..e693ec8234fbcea7572accb8c733abab90945227 100644 (file)
@@ -29,7 +29,7 @@ static const char * const fivr_strings[] = {
 };
 
 static const struct mmio_reg tgl_fivr_mmio_regs[] = {
-       { 0, 0x5A18, 3, 0x7, 12}, /* vco_ref_code_lo */
+       { 0, 0x5A18, 3, 0x7, 11}, /* vco_ref_code_lo */
        { 0, 0x5A18, 8, 0xFF, 16}, /* vco_ref_code_hi */
        { 0, 0x5A08, 8, 0xFF, 0}, /* spread_spectrum_pct */
        { 0, 0x5A08, 1, 0x1, 8}, /* spread_spectrum_clk_enable */
index 648829ab79ff71d76b0cc84cdaa00cb487e5dbb1..82654dc8382b85e5a7a17733fa112757d61fccf9 100644 (file)
@@ -421,6 +421,8 @@ static void thermal_zone_device_init(struct thermal_zone_device *tz)
 {
        struct thermal_instance *pos;
        tz->temperature = THERMAL_TEMP_INVALID;
+       tz->prev_low_trip = -INT_MAX;
+       tz->prev_high_trip = INT_MAX;
        list_for_each_entry(pos, &tz->thermal_instances, tz_node)
                pos->initialized = false;
 }
index f0bf01ea069ae0a217728987dd5237f8b4b6dd38..71e0dd2c0ce5b76717c96232bb9fcedf635a8091 100644 (file)
@@ -522,6 +522,7 @@ static struct xenbus_driver xencons_driver = {
        .remove = xencons_remove,
        .resume = xencons_resume,
        .otherend_changed = xencons_backend_changed,
+       .not_essential = true,
 };
 #endif /* CONFIG_HVC_XEN_FRONTEND */
 
index 7f656fac503fef0dd15521b85ab02feec0d70750..5163d60756b7332845cdb2b1b518b7a592b8bed2 100644 (file)
@@ -237,6 +237,7 @@ struct brcmuart_priv {
        u32             rx_err;
        u32             rx_timeout;
        u32             rx_abort;
+       u32             saved_mctrl;
 };
 
 static struct dentry *brcmuart_debugfs_root;
@@ -1133,16 +1134,27 @@ static int brcmuart_remove(struct platform_device *pdev)
 static int __maybe_unused brcmuart_suspend(struct device *dev)
 {
        struct brcmuart_priv *priv = dev_get_drvdata(dev);
+       struct uart_8250_port *up = serial8250_get_port(priv->line);
+       struct uart_port *port = &up->port;
 
        serial8250_suspend_port(priv->line);
        clk_disable_unprepare(priv->baud_mux_clk);
 
+       /*
+        * This will prevent resume from enabling RTS before the
+        *  baud rate has been resored.
+        */
+       priv->saved_mctrl = port->mctrl;
+       port->mctrl = 0;
+
        return 0;
 }
 
 static int __maybe_unused brcmuart_resume(struct device *dev)
 {
        struct brcmuart_priv *priv = dev_get_drvdata(dev);
+       struct uart_8250_port *up = serial8250_get_port(priv->line);
+       struct uart_port *port = &up->port;
        int ret;
 
        ret = clk_prepare_enable(priv->baud_mux_clk);
@@ -1165,6 +1177,7 @@ static int __maybe_unused brcmuart_resume(struct device *dev)
                start_rx_dma(serial8250_get_port(priv->line));
        }
        serial8250_resume_port(priv->line);
+       port->mctrl = priv->saved_mctrl;
        return 0;
 }
 
index 5d43de143f3399459e2d20a2d70bd45c7a7d9a41..60f8fffdfd7765ada0d9a310fa211a088d746ce6 100644 (file)
@@ -1324,29 +1324,33 @@ pericom_do_set_divisor(struct uart_port *port, unsigned int baud,
 {
        int scr;
        int lcr;
-       int actual_baud;
-       int tolerance;
 
-       for (scr = 5 ; scr <= 15 ; scr++) {
-               actual_baud = 921600 * 16 / scr;
-               tolerance = actual_baud / 50;
+       for (scr = 16; scr > 4; scr--) {
+               unsigned int maxrate = port->uartclk / scr;
+               unsigned int divisor = max(maxrate / baud, 1U);
+               int delta = maxrate / divisor - baud;
 
-               if ((baud < actual_baud + tolerance) &&
-                       (baud > actual_baud - tolerance)) {
+               if (baud > maxrate + baud / 50)
+                       continue;
 
+               if (delta > baud / 50)
+                       divisor++;
+
+               if (divisor > 0xffff)
+                       continue;
+
+               /* Update delta due to possible divisor change */
+               delta = maxrate / divisor - baud;
+               if (abs(delta) < baud / 50) {
                        lcr = serial_port_in(port, UART_LCR);
                        serial_port_out(port, UART_LCR, lcr | 0x80);
-
-                       serial_port_out(port, UART_DLL, 1);
-                       serial_port_out(port, UART_DLM, 0);
+                       serial_port_out(port, UART_DLL, divisor & 0xff);
+                       serial_port_out(port, UART_DLM, divisor >> 8 & 0xff);
                        serial_port_out(port, 2, 16 - scr);
                        serial_port_out(port, UART_LCR, lcr);
                        return;
-               } else if (baud > actual_baud) {
-                       break;
                }
        }
-       serial8250_do_set_divisor(port, baud, quot, quot_frac);
 }
 static int pci_pericom_setup(struct serial_private *priv,
                  const struct pciserial_board *board,
@@ -2291,12 +2295,19 @@ static struct pci_serial_quirk pci_serial_quirks[] = {
                .setup      = pci_pericom_setup_four_at_eight,
        },
        {
-               .vendor     = PCI_DEVICE_ID_ACCESIO_PCIE_ICM_4S,
+               .vendor     = PCI_VENDOR_ID_ACCESIO,
                .device     = PCI_DEVICE_ID_ACCESIO_PCIE_ICM232_4,
                .subvendor  = PCI_ANY_ID,
                .subdevice  = PCI_ANY_ID,
                .setup      = pci_pericom_setup_four_at_eight,
        },
+       {
+               .vendor     = PCI_VENDOR_ID_ACCESIO,
+               .device     = PCI_DEVICE_ID_ACCESIO_PCIE_ICM_4S,
+               .subvendor  = PCI_ANY_ID,
+               .subdevice  = PCI_ANY_ID,
+               .setup      = pci_pericom_setup_four_at_eight,
+       },
        {
                .vendor     = PCI_VENDOR_ID_ACCESIO,
                .device     = PCI_DEVICE_ID_ACCESIO_MPCIE_ICM232_4,
index 5775cbff8f6ebf5916b61087384901b0ed684b4e..46e2079ad1aa2021d8a11ab3181a2ec9ca580ad4 100644 (file)
@@ -2024,13 +2024,6 @@ void serial8250_do_set_mctrl(struct uart_port *port, unsigned int mctrl)
        struct uart_8250_port *up = up_to_u8250p(port);
        unsigned char mcr;
 
-       if (port->rs485.flags & SER_RS485_ENABLED) {
-               if (serial8250_in_MCR(up) & UART_MCR_RTS)
-                       mctrl |= TIOCM_RTS;
-               else
-                       mctrl &= ~TIOCM_RTS;
-       }
-
        mcr = serial8250_TIOCM_to_MCR(mctrl);
 
        mcr = (mcr & up->mcr_mask) | up->mcr_force | up->mcr;
index 6ff94cfcd9dbd3f339c4d880c86b55407ffdbf1c..fc543ac97c13193b13218e71bb8dba88892622d1 100644 (file)
@@ -1533,7 +1533,7 @@ config SERIAL_LITEUART
        tristate "LiteUART serial port support"
        depends on HAS_IOMEM
        depends on OF || COMPILE_TEST
-       depends on LITEX
+       depends on LITEX || COMPILE_TEST
        select SERIAL_CORE
        help
          This driver is for the FPGA-based LiteUART serial controller from LiteX
index d361cd84ff8cfecf95b4d0b6b433796bdf700c24..52518a606c06a2c597a47c1b9c3d4735b2072a9c 100644 (file)
@@ -2947,6 +2947,7 @@ MODULE_DEVICE_TABLE(of, sbsa_uart_of_match);
 
 static const struct acpi_device_id __maybe_unused sbsa_uart_acpi_match[] = {
        { "ARMH0011", 0 },
+       { "ARMHB000", 0 },
        {},
 };
 MODULE_DEVICE_TABLE(acpi, sbsa_uart_acpi_match);
index b1e7190ae4836e4e34ffc05bc048de18f110c0c5..ac5112def40d1df35d93ef8eb453fe9d3884d95c 100644 (file)
@@ -2625,6 +2625,7 @@ OF_EARLYCON_DECLARE(lpuart, "fsl,vf610-lpuart", lpuart_early_console_setup);
 OF_EARLYCON_DECLARE(lpuart32, "fsl,ls1021a-lpuart", lpuart32_early_console_setup);
 OF_EARLYCON_DECLARE(lpuart32, "fsl,ls1028a-lpuart", ls1028a_early_console_setup);
 OF_EARLYCON_DECLARE(lpuart32, "fsl,imx7ulp-lpuart", lpuart32_imx_early_console_setup);
+OF_EARLYCON_DECLARE(lpuart32, "fsl,imx8qxp-lpuart", lpuart32_imx_early_console_setup);
 EARLYCON_DECLARE(lpuart, lpuart_early_console_setup);
 EARLYCON_DECLARE(lpuart32, lpuart32_early_console_setup);
 
index dbc0559a9157546c6377a433a7071d0b0dd74251..2941659e52747f9d6b1833789b36dcdfc3122532 100644 (file)
@@ -270,8 +270,10 @@ static int liteuart_probe(struct platform_device *pdev)
 
        /* get membase */
        port->membase = devm_platform_get_and_ioremap_resource(pdev, 0, NULL);
-       if (IS_ERR(port->membase))
-               return PTR_ERR(port->membase);
+       if (IS_ERR(port->membase)) {
+               ret = PTR_ERR(port->membase);
+               goto err_erase_id;
+       }
 
        /* values not from device tree */
        port->dev = &pdev->dev;
@@ -285,7 +287,18 @@ static int liteuart_probe(struct platform_device *pdev)
        port->line = dev_id;
        spin_lock_init(&port->lock);
 
-       return uart_add_one_port(&liteuart_driver, &uart->port);
+       platform_set_drvdata(pdev, port);
+
+       ret = uart_add_one_port(&liteuart_driver, &uart->port);
+       if (ret)
+               goto err_erase_id;
+
+       return 0;
+
+err_erase_id:
+       xa_erase(&liteuart_array, uart->id);
+
+       return ret;
 }
 
 static int liteuart_remove(struct platform_device *pdev)
@@ -293,6 +306,7 @@ static int liteuart_remove(struct platform_device *pdev)
        struct uart_port *port = platform_get_drvdata(pdev);
        struct liteuart_port *uart = to_liteuart_port(port);
 
+       uart_remove_one_port(&liteuart_driver, port);
        xa_erase(&liteuart_array, uart->id);
 
        return 0;
index fcef7a961430b37aa5a4b0f7940912f575f52fb6..489d19274f9ade45497d86b8da52623b4ccd062a 100644 (file)
@@ -598,6 +598,9 @@ static void msm_start_rx_dma(struct msm_port *msm_port)
        u32 val;
        int ret;
 
+       if (IS_ENABLED(CONFIG_CONSOLE_POLL))
+               return;
+
        if (!dma->chan)
                return;
 
index 45e2e4109acd0d8a3557b379674bb2726a95310c..b6223fab0687da64f947e70f20b72b6b419590ff 100644 (file)
@@ -1506,7 +1506,7 @@ static struct tegra_uart_chip_data tegra20_uart_chip_data = {
        .fifo_mode_enable_status        = false,
        .uart_max_port                  = 5,
        .max_dma_burst_bytes            = 4,
-       .error_tolerance_low_range      = 0,
+       .error_tolerance_low_range      = -4,
        .error_tolerance_high_range     = 4,
 };
 
@@ -1517,7 +1517,7 @@ static struct tegra_uart_chip_data tegra30_uart_chip_data = {
        .fifo_mode_enable_status        = false,
        .uart_max_port                  = 5,
        .max_dma_burst_bytes            = 4,
-       .error_tolerance_low_range      = 0,
+       .error_tolerance_low_range      = -4,
        .error_tolerance_high_range     = 4,
 };
 
index 1e738f265eeaa210626df63cb124145f34d90751..61e3dd0222af141bb9f568299165889cb3ce25b5 100644 (file)
@@ -1075,6 +1075,11 @@ uart_tiocmset(struct tty_struct *tty, unsigned int set, unsigned int clear)
                goto out;
 
        if (!tty_io_error(tty)) {
+               if (uport->rs485.flags & SER_RS485_ENABLED) {
+                       set &= ~TIOCM_RTS;
+                       clear &= ~TIOCM_RTS;
+               }
+
                uart_update_mctrl(uport, set, clear);
                ret = 0;
        }
@@ -1549,6 +1554,7 @@ static void uart_tty_port_shutdown(struct tty_port *port)
 {
        struct uart_state *state = container_of(port, struct uart_state, port);
        struct uart_port *uport = uart_port_check(state);
+       char *buf;
 
        /*
         * At this point, we stop accepting input.  To do this, we
@@ -1570,8 +1576,18 @@ static void uart_tty_port_shutdown(struct tty_port *port)
         */
        tty_port_set_suspended(port, 0);
 
-       uart_change_pm(state, UART_PM_STATE_OFF);
+       /*
+        * Free the transmit buffer.
+        */
+       spin_lock_irq(&uport->lock);
+       buf = state->xmit.buf;
+       state->xmit.buf = NULL;
+       spin_unlock_irq(&uport->lock);
 
+       if (buf)
+               free_page((unsigned long)buf);
+
+       uart_change_pm(state, UART_PM_STATE_OFF);
 }
 
 static void uart_wait_until_sent(struct tty_struct *tty, int timeout)
index 1f3b4a1422126bff26011c4ff7fd48bfa1f38e8c..f9af7ebe003d7b917c24cc83f4b8a47ee4a67263 100644 (file)
@@ -337,19 +337,6 @@ static void cdns3_ep_inc_deq(struct cdns3_endpoint *priv_ep)
        cdns3_ep_inc_trb(&priv_ep->dequeue, &priv_ep->ccs, priv_ep->num_trbs);
 }
 
-static void cdns3_move_deq_to_next_trb(struct cdns3_request *priv_req)
-{
-       struct cdns3_endpoint *priv_ep = priv_req->priv_ep;
-       int current_trb = priv_req->start_trb;
-
-       while (current_trb != priv_req->end_trb) {
-               cdns3_ep_inc_deq(priv_ep);
-               current_trb = priv_ep->dequeue;
-       }
-
-       cdns3_ep_inc_deq(priv_ep);
-}
-
 /**
  * cdns3_allow_enable_l1 - enable/disable permits to transition to L1.
  * @priv_dev: Extended gadget object
@@ -1517,10 +1504,11 @@ static void cdns3_transfer_completed(struct cdns3_device *priv_dev,
 
                trb = priv_ep->trb_pool + priv_ep->dequeue;
 
-               /* Request was dequeued and TRB was changed to TRB_LINK. */
-               if (TRB_FIELD_TO_TYPE(le32_to_cpu(trb->control)) == TRB_LINK) {
+               /* The TRB was changed as link TRB, and the request was handled at ep_dequeue */
+               while (TRB_FIELD_TO_TYPE(le32_to_cpu(trb->control)) == TRB_LINK) {
                        trace_cdns3_complete_trb(priv_ep, trb);
-                       cdns3_move_deq_to_next_trb(priv_req);
+                       cdns3_ep_inc_deq(priv_ep);
+                       trb = priv_ep->trb_pool + priv_ep->dequeue;
                }
 
                if (!request->stream_id) {
index ad9aee3f1e3982b1c07023772c088b3c394e07df..97866bfb2da9d941c9aa915f78f8c79b26060fe8 100644 (file)
@@ -987,6 +987,9 @@ int cdnsp_endpoint_init(struct cdnsp_device *pdev,
 
        /* Set up the endpoint ring. */
        pep->ring = cdnsp_ring_alloc(pdev, 2, ring_type, max_packet, mem_flags);
+       if (!pep->ring)
+               return -ENOMEM;
+
        pep->skip = false;
 
        /* Fill the endpoint context */
index 84dadfa726aa6a816283b610ff258e0d2b53990a..9643b905e2d8b38da465ff48028d23274be0a0c5 100644 (file)
@@ -10,6 +10,7 @@
  */
 
 #include <linux/platform_device.h>
+#include <linux/slab.h>
 #include "core.h"
 #include "drd.h"
 #include "host-export.h"
index f1d100671ee6a174ec3637c2d5f7bc7baec01d36..097142ffb184203735f084ff3c823c67ba7d5e59 100644 (file)
@@ -420,15 +420,15 @@ static int ci_hdrc_imx_probe(struct platform_device *pdev)
        data->phy = devm_usb_get_phy_by_phandle(dev, "fsl,usbphy", 0);
        if (IS_ERR(data->phy)) {
                ret = PTR_ERR(data->phy);
-               if (ret == -ENODEV) {
-                       data->phy = devm_usb_get_phy_by_phandle(dev, "phys", 0);
-                       if (IS_ERR(data->phy)) {
-                               ret = PTR_ERR(data->phy);
-                               if (ret == -ENODEV)
-                                       data->phy = NULL;
-                               else
-                                       goto err_clk;
-                       }
+               if (ret != -ENODEV)
+                       goto err_clk;
+               data->phy = devm_usb_get_phy_by_phandle(dev, "phys", 0);
+               if (IS_ERR(data->phy)) {
+                       ret = PTR_ERR(data->phy);
+                       if (ret == -ENODEV)
+                               data->phy = NULL;
+                       else
+                               goto err_clk;
                }
        }
 
index 16b1fd9dc60c959703368f330f75984763dddf93..48bc8a4814ac4e985e118242eecf73202bc7148f 100644 (file)
@@ -406,7 +406,7 @@ static int usb_parse_endpoint(struct device *ddev, int cfgno,
         * the USB-2 spec requires such endpoints to have wMaxPacketSize = 0
         * (see the end of section 5.6.3), so don't warn about them.
         */
-       maxp = usb_endpoint_maxp(&endpoint->desc);
+       maxp = le16_to_cpu(endpoint->desc.wMaxPacketSize);
        if (maxp == 0 && !(usb_endpoint_xfer_isoc(d) && asnum == 0)) {
                dev_warn(ddev, "config %d interface %d altsetting %d endpoint 0x%X has invalid wMaxPacketSize 0\n",
                    cfgno, inum, asnum, d->bEndpointAddress);
@@ -422,9 +422,9 @@ static int usb_parse_endpoint(struct device *ddev, int cfgno,
                maxpacket_maxes = full_speed_maxpacket_maxes;
                break;
        case USB_SPEED_HIGH:
-               /* Bits 12..11 are allowed only for HS periodic endpoints */
+               /* Multiple-transactions bits are allowed only for HS periodic endpoints */
                if (usb_endpoint_xfer_int(d) || usb_endpoint_xfer_isoc(d)) {
-                       i = maxp & (BIT(12) | BIT(11));
+                       i = maxp & USB_EP_MAXP_MULT_MASK;
                        maxp &= ~i;
                }
                fallthrough;
index 86658a81d28445181198ea9f2876a8a2b44fdee9..00070a8a65079206648ca39f83f35cdef9038601 100644 (file)
@@ -4700,8 +4700,6 @@ hub_port_init(struct usb_hub *hub, struct usb_device *udev, int port1,
        if (oldspeed == USB_SPEED_LOW)
                delay = HUB_LONG_RESET_TIME;
 
-       mutex_lock(hcd->address0_mutex);
-
        /* Reset the device; full speed may morph to high speed */
        /* FIXME a USB 2.0 device may morph into SuperSpeed on reset. */
        retval = hub_port_reset(hub, port1, udev, delay, false);
@@ -5016,7 +5014,6 @@ fail:
                hub_port_disable(hub, port1, 0);
                update_devnum(udev, devnum);    /* for disconnect processing */
        }
-       mutex_unlock(hcd->address0_mutex);
        return retval;
 }
 
@@ -5191,6 +5188,7 @@ static void hub_port_connect(struct usb_hub *hub, int port1, u16 portstatus,
        struct usb_port *port_dev = hub->ports[port1 - 1];
        struct usb_device *udev = port_dev->child;
        static int unreliable_port = -1;
+       bool retry_locked;
 
        /* Disconnect any existing devices under this port */
        if (udev) {
@@ -5246,8 +5244,11 @@ static void hub_port_connect(struct usb_hub *hub, int port1, u16 portstatus,
                unit_load = 100;
 
        status = 0;
-       for (i = 0; i < PORT_INIT_TRIES; i++) {
 
+       for (i = 0; i < PORT_INIT_TRIES; i++) {
+               usb_lock_port(port_dev);
+               mutex_lock(hcd->address0_mutex);
+               retry_locked = true;
                /* reallocate for each attempt, since references
                 * to the previous one can escape in various ways
                 */
@@ -5255,6 +5256,8 @@ static void hub_port_connect(struct usb_hub *hub, int port1, u16 portstatus,
                if (!udev) {
                        dev_err(&port_dev->dev,
                                        "couldn't allocate usb_device\n");
+                       mutex_unlock(hcd->address0_mutex);
+                       usb_unlock_port(port_dev);
                        goto done;
                }
 
@@ -5276,12 +5279,14 @@ static void hub_port_connect(struct usb_hub *hub, int port1, u16 portstatus,
                }
 
                /* reset (non-USB 3.0 devices) and get descriptor */
-               usb_lock_port(port_dev);
                status = hub_port_init(hub, udev, port1, i);
-               usb_unlock_port(port_dev);
                if (status < 0)
                        goto loop;
 
+               mutex_unlock(hcd->address0_mutex);
+               usb_unlock_port(port_dev);
+               retry_locked = false;
+
                if (udev->quirks & USB_QUIRK_DELAY_INIT)
                        msleep(2000);
 
@@ -5374,6 +5379,10 @@ loop:
                usb_ep0_reinit(udev);
                release_devnum(udev);
                hub_free_dev(udev);
+               if (retry_locked) {
+                       mutex_unlock(hcd->address0_mutex);
+                       usb_unlock_port(port_dev);
+               }
                usb_put_dev(udev);
                if ((status == -ENOTCONN) || (status == -ENOTSUPP))
                        break;
@@ -5915,6 +5924,8 @@ static int usb_reset_and_verify_device(struct usb_device *udev)
        bos = udev->bos;
        udev->bos = NULL;
 
+       mutex_lock(hcd->address0_mutex);
+
        for (i = 0; i < PORT_INIT_TRIES; ++i) {
 
                /* ep0 maxpacket size may change; let the HCD know about it.
@@ -5924,6 +5935,7 @@ static int usb_reset_and_verify_device(struct usb_device *udev)
                if (ret >= 0 || ret == -ENOTCONN || ret == -ENODEV)
                        break;
        }
+       mutex_unlock(hcd->address0_mutex);
 
        if (ret < 0)
                goto re_enumerate;
index 8239fe7129dd7a3b241813e215c04dd69df20d36..019351c0b52cfa250b4325fe9434e416fb4cd342 100644 (file)
@@ -434,6 +434,9 @@ static const struct usb_device_id usb_quirk_list[] = {
        { USB_DEVICE(0x1532, 0x0116), .driver_info =
                        USB_QUIRK_LINEAR_UFRAME_INTR_BINTERVAL },
 
+       /* Lenovo Powered USB-C Travel Hub (4X90S92381, RTL8153 GigE) */
+       { USB_DEVICE(0x17ef, 0x721e), .driver_info = USB_QUIRK_NO_LPM },
+
        /* Lenovo ThinkCenter A630Z TI024Gen3 usb-audio */
        { USB_DEVICE(0x17ef, 0xa012), .driver_info =
                        USB_QUIRK_DISCONNECT_SUSPEND },
index 4ab4a1d5062b2a0ebeecf5c0b77ef3537d1f25e6..ab8d7dad9f567093743a5a17be4fa39e4f92e6be 100644 (file)
@@ -1198,6 +1198,8 @@ static void dwc2_hsotg_start_req(struct dwc2_hsotg *hsotg,
                        }
                        ctrl |= DXEPCTL_CNAK;
                } else {
+                       hs_req->req.frame_number = hs_ep->target_frame;
+                       hs_req->req.actual = 0;
                        dwc2_hsotg_complete_request(hsotg, hs_ep, hs_req, -ENODATA);
                        return;
                }
@@ -2857,9 +2859,12 @@ static void dwc2_gadget_handle_ep_disabled(struct dwc2_hsotg_ep *hs_ep)
 
        do {
                hs_req = get_ep_head(hs_ep);
-               if (hs_req)
+               if (hs_req) {
+                       hs_req->req.frame_number = hs_ep->target_frame;
+                       hs_req->req.actual = 0;
                        dwc2_hsotg_complete_request(hsotg, hs_ep, hs_req,
                                                    -ENODATA);
+               }
                dwc2_gadget_incr_frame_num(hs_ep);
                /* Update current frame number value. */
                hsotg->frame_number = dwc2_hsotg_read_frameno(hsotg);
@@ -2912,8 +2917,11 @@ static void dwc2_gadget_handle_out_token_ep_disabled(struct dwc2_hsotg_ep *ep)
 
        while (dwc2_gadget_target_frame_elapsed(ep)) {
                hs_req = get_ep_head(ep);
-               if (hs_req)
+               if (hs_req) {
+                       hs_req->req.frame_number = ep->target_frame;
+                       hs_req->req.actual = 0;
                        dwc2_hsotg_complete_request(hsotg, ep, hs_req, -ENODATA);
+               }
 
                dwc2_gadget_incr_frame_num(ep);
                /* Update current frame number value. */
@@ -3002,8 +3010,11 @@ static void dwc2_gadget_handle_nak(struct dwc2_hsotg_ep *hs_ep)
 
        while (dwc2_gadget_target_frame_elapsed(hs_ep)) {
                hs_req = get_ep_head(hs_ep);
-               if (hs_req)
+               if (hs_req) {
+                       hs_req->req.frame_number = hs_ep->target_frame;
+                       hs_req->req.actual = 0;
                        dwc2_hsotg_complete_request(hsotg, hs_ep, hs_req, -ENODATA);
+               }
 
                dwc2_gadget_incr_frame_num(hs_ep);
                /* Update current frame number value. */
index 89a788326c5620e65686493a8683ead0b5c2fcdf..24beff610cf2c65b057f8f2b12f1a04bf80b0685 100644 (file)
@@ -59,7 +59,7 @@
 #define DWC2_UNRESERVE_DELAY (msecs_to_jiffies(5))
 
 /* If we get a NAK, wait this long before retrying */
-#define DWC2_RETRY_WAIT_DELAY (1 * 1E6L)
+#define DWC2_RETRY_WAIT_DELAY (1 * NSEC_PER_MSEC)
 
 /**
  * dwc2_periodic_channel_available() - Checks that a channel is available for a
index 643239d7d370078d9288d295ee67256b7cf830b1..f4c09951b517ebcc89eb774cd11c4631dcd50f55 100644 (file)
@@ -1594,9 +1594,11 @@ static int dwc3_probe(struct platform_device *pdev)
 
        dwc3_get_properties(dwc);
 
-       ret = dma_set_mask_and_coherent(dwc->sysdev, DMA_BIT_MASK(64));
-       if (ret)
-               return ret;
+       if (!dwc->sysdev_is_parent) {
+               ret = dma_set_mask_and_coherent(dwc->sysdev, DMA_BIT_MASK(64));
+               if (ret)
+                       return ret;
+       }
 
        dwc->reset = devm_reset_control_array_get_optional_shared(dev);
        if (IS_ERR(dwc->reset))
index 620c8d3914d7ccbe90dce6f5b9cfa02f48ebc23a..5c491d0a19d7b90d5d2b441d8a8266b01aa06acd 100644 (file)
 #define DWC3_GHWPARAMS8                0xc600
 #define DWC3_GUCTL3            0xc60c
 #define DWC3_GFLADJ            0xc630
-#define DWC3_GHWPARAMS9                0xc680
+#define DWC3_GHWPARAMS9                0xc6e0
 
 /* Device Registers */
 #define DWC3_DCFG              0xc700
index 9abbd01028c5ffcc3508f37e1776908f9648284d..3cb01cdd02c29870a540a06a0539b1d3adba7448 100644 (file)
@@ -649,7 +649,6 @@ static int dwc3_qcom_of_register_core(struct platform_device *pdev)
        struct dwc3_qcom        *qcom = platform_get_drvdata(pdev);
        struct device_node      *np = pdev->dev.of_node, *dwc3_np;
        struct device           *dev = &pdev->dev;
-       struct property         *prop;
        int                     ret;
 
        dwc3_np = of_get_compatible_child(np, "snps,dwc3");
@@ -658,20 +657,6 @@ static int dwc3_qcom_of_register_core(struct platform_device *pdev)
                return -ENODEV;
        }
 
-       prop = devm_kzalloc(dev, sizeof(*prop), GFP_KERNEL);
-       if (!prop) {
-               ret = -ENOMEM;
-               dev_err(dev, "unable to allocate memory for property\n");
-               goto node_put;
-       }
-
-       prop->name = "tx-fifo-resize";
-       ret = of_add_property(dwc3_np, prop);
-       if (ret) {
-               dev_err(dev, "unable to add property\n");
-               goto node_put;
-       }
-
        ret = of_platform_populate(np, NULL, NULL, dev);
        if (ret) {
                dev_err(dev, "failed to register dwc3 core - %d\n", ret);
index 23de2a5a40d6efceb45c2fb7f49bf0bf7d6eb3fc..7e3db00e9759529579af1c11dd12c9f4ab821937 100644 (file)
@@ -310,13 +310,24 @@ int dwc3_send_gadget_ep_cmd(struct dwc3_ep *dep, unsigned int cmd,
        if (DWC3_DEPCMD_CMD(cmd) == DWC3_DEPCMD_STARTTRANSFER) {
                int link_state;
 
+               /*
+                * Initiate remote wakeup if the link state is in U3 when
+                * operating in SS/SSP or L1/L2 when operating in HS/FS. If the
+                * link state is in U1/U2, no remote wakeup is needed. The Start
+                * Transfer command will initiate the link recovery.
+                */
                link_state = dwc3_gadget_get_link_state(dwc);
-               if (link_state == DWC3_LINK_STATE_U1 ||
-                   link_state == DWC3_LINK_STATE_U2 ||
-                   link_state == DWC3_LINK_STATE_U3) {
+               switch (link_state) {
+               case DWC3_LINK_STATE_U2:
+                       if (dwc->gadget->speed >= USB_SPEED_SUPER)
+                               break;
+
+                       fallthrough;
+               case DWC3_LINK_STATE_U3:
                        ret = __dwc3_gadget_wakeup(dwc);
                        dev_WARN_ONCE(dwc->dev, ret, "wakeup failed --> %d\n",
                                        ret);
+                       break;
                }
        }
 
@@ -3252,6 +3263,9 @@ static bool dwc3_gadget_endpoint_trbs_complete(struct dwc3_ep *dep,
        struct dwc3             *dwc = dep->dwc;
        bool                    no_started_trb = true;
 
+       if (!dep->endpoint.desc)
+               return no_started_trb;
+
        dwc3_gadget_ep_cleanup_completed_requests(dep, event, status);
 
        if (dep->flags & DWC3_EP_END_TRANSFER_PENDING)
@@ -3299,6 +3313,9 @@ static void dwc3_gadget_endpoint_transfer_in_progress(struct dwc3_ep *dep,
 {
        int status = 0;
 
+       if (!dep->endpoint.desc)
+               return;
+
        if (usb_endpoint_xfer_isoc(dep->endpoint.desc))
                dwc3_gadget_endpoint_frame_from_event(dep, event);
 
@@ -3352,6 +3369,14 @@ static void dwc3_gadget_endpoint_command_complete(struct dwc3_ep *dep,
        if (cmd != DWC3_DEPCMD_ENDTRANSFER)
                return;
 
+       /*
+        * The END_TRANSFER command will cause the controller to generate a
+        * NoStream Event, and it's not due to the host DP NoStream rejection.
+        * Ignore the next NoStream event.
+        */
+       if (dep->stream_capable)
+               dep->flags |= DWC3_EP_IGNORE_NEXT_NOSTREAM;
+
        dep->flags &= ~DWC3_EP_END_TRANSFER_PENDING;
        dep->flags &= ~DWC3_EP_TRANSFER_STARTED;
        dwc3_gadget_ep_cleanup_cancelled_requests(dep);
@@ -3574,14 +3599,6 @@ static void dwc3_stop_active_transfer(struct dwc3_ep *dep, bool force,
        WARN_ON_ONCE(ret);
        dep->resource_index = 0;
 
-       /*
-        * The END_TRANSFER command will cause the controller to generate a
-        * NoStream Event, and it's not due to the host DP NoStream rejection.
-        * Ignore the next NoStream event.
-        */
-       if (dep->stream_capable)
-               dep->flags |= DWC3_EP_IGNORE_NEXT_NOSTREAM;
-
        if (!interrupt)
                dep->flags &= ~DWC3_EP_TRANSFER_STARTED;
        else
index 504c1cbc255d14a9ba7fefbc1d4f1e10a67f264c..284eea9f6e4d8dbfc603e7ad77a8674904bb3a3f 100644 (file)
@@ -1679,6 +1679,18 @@ composite_setup(struct usb_gadget *gadget, const struct usb_ctrlrequest *ctrl)
        struct usb_function             *f = NULL;
        u8                              endp;
 
+       if (w_length > USB_COMP_EP0_BUFSIZ) {
+               if (ctrl->bRequestType == USB_DIR_OUT) {
+                       goto done;
+               } else {
+                       /* Cast away the const, we are going to overwrite on purpose. */
+                       __le16 *temp = (__le16 *)&ctrl->wLength;
+
+                       *temp = cpu_to_le16(USB_COMP_EP0_BUFSIZ);
+                       w_length = USB_COMP_EP0_BUFSIZ;
+               }
+       }
+
        /* partial re-init of the response message; the function or the
         * gadget might need to intercept e.g. a control-OUT completion
         * when we delegate to it.
@@ -2209,7 +2221,7 @@ int composite_dev_prepare(struct usb_composite_driver *composite,
        if (!cdev->req)
                return -ENOMEM;
 
-       cdev->req->buf = kmalloc(USB_COMP_EP0_BUFSIZ, GFP_KERNEL);
+       cdev->req->buf = kzalloc(USB_COMP_EP0_BUFSIZ, GFP_KERNEL);
        if (!cdev->req->buf)
                goto fail;
 
index e1d566c9918ae576d55fc322f317d2992193b7f1..355bc7dab9d5f623fbef3b92dd2530d2dc47e443 100644 (file)
@@ -137,7 +137,7 @@ static int dbgp_enable_ep_req(struct usb_ep *ep)
                goto fail_1;
        }
 
-       req->buf = kmalloc(DBGP_REQ_LEN, GFP_KERNEL);
+       req->buf = kzalloc(DBGP_REQ_LEN, GFP_KERNEL);
        if (!req->buf) {
                err = -ENOMEM;
                stp = 2;
@@ -345,6 +345,19 @@ static int dbgp_setup(struct usb_gadget *gadget,
        void *data = NULL;
        u16 len = 0;
 
+       if (length > DBGP_REQ_LEN) {
+               if (ctrl->bRequestType == USB_DIR_OUT) {
+                       return err;
+               } else {
+                       /* Cast away the const, we are going to overwrite on purpose. */
+                       __le16 *temp = (__le16 *)&ctrl->wLength;
+
+                       *temp = cpu_to_le16(DBGP_REQ_LEN);
+                       length = DBGP_REQ_LEN;
+               }
+       }
+
+
        if (request == USB_REQ_GET_DESCRIPTOR) {
                switch (value>>8) {
                case USB_DT_DEVICE:
index 78be947502329b2b7c041f7ff32bd179d6ac6cd5..63150e3889efb12787f4440416ef761df71bb7fa 100644 (file)
@@ -110,6 +110,8 @@ enum ep0_state {
 /* enough for the whole queue: most events invalidate others */
 #define        N_EVENT                 5
 
+#define RBUF_SIZE              256
+
 struct dev_data {
        spinlock_t                      lock;
        refcount_t                      count;
@@ -144,7 +146,7 @@ struct dev_data {
        struct dentry                   *dentry;
 
        /* except this scratch i/o buffer for ep0 */
-       u8                              rbuf [256];
+       u8                              rbuf[RBUF_SIZE];
 };
 
 static inline void get_dev (struct dev_data *data)
@@ -1331,6 +1333,18 @@ gadgetfs_setup (struct usb_gadget *gadget, const struct usb_ctrlrequest *ctrl)
        u16                             w_value = le16_to_cpu(ctrl->wValue);
        u16                             w_length = le16_to_cpu(ctrl->wLength);
 
+       if (w_length > RBUF_SIZE) {
+               if (ctrl->bRequestType == USB_DIR_OUT) {
+                       return value;
+               } else {
+                       /* Cast away the const, we are going to overwrite on purpose. */
+                       __le16 *temp = (__le16 *)&ctrl->wLength;
+
+                       *temp = cpu_to_le16(RBUF_SIZE);
+                       w_length = RBUF_SIZE;
+               }
+       }
+
        spin_lock (&dev->lock);
        dev->setup_abort = 0;
        if (dev->state == STATE_DEV_UNCONNECTED) {
index f5ca670776a370f10fd64e8358df849463afcd26..857159dd5ae05f3e00c7324fd9b696ee38bc356e 100644 (file)
@@ -2136,7 +2136,7 @@ static int xudc_probe(struct platform_device *pdev)
 
        ret = usb_add_gadget_udc(&pdev->dev, &udc->gadget);
        if (ret)
-               goto fail;
+               goto err_disable_unprepare_clk;
 
        udc->dev = &udc->gadget.dev;
 
@@ -2155,6 +2155,9 @@ static int xudc_probe(struct platform_device *pdev)
                 udc->dma_enabled ? "with DMA" : "without DMA");
 
        return 0;
+
+err_disable_unprepare_clk:
+       clk_disable_unprepare(udc->clk);
 fail:
        dev_err(&pdev->dev, "probe failed, %d\n", ret);
        return ret;
index af946c42b6f0a0be25b948a0d0f6b5daf7e1a97b..df3522dab31b5eaac4ab8c24759fc1e4e963e882 100644 (file)
@@ -717,6 +717,7 @@ static int xhci_enter_test_mode(struct xhci_hcd *xhci,
                        continue;
 
                retval = xhci_disable_slot(xhci, i);
+               xhci_free_virt_device(xhci, i);
                if (retval)
                        xhci_err(xhci, "Failed to disable slot %d, %d. Enter test mode anyway\n",
                                 i, retval);
index 311597bba80e2a4d469277042cbde721e93af335..d0b6806275e01a92e21fa79e575fc4eb3fea72a7 100644 (file)
@@ -366,7 +366,9 @@ static void xhci_handle_stopped_cmd_ring(struct xhci_hcd *xhci,
 /* Must be called with xhci->lock held, releases and aquires lock back */
 static int xhci_abort_cmd_ring(struct xhci_hcd *xhci, unsigned long flags)
 {
-       u32 temp_32;
+       struct xhci_segment *new_seg    = xhci->cmd_ring->deq_seg;
+       union xhci_trb *new_deq         = xhci->cmd_ring->dequeue;
+       u64 crcr;
        int ret;
 
        xhci_dbg(xhci, "Abort command ring\n");
@@ -375,13 +377,18 @@ static int xhci_abort_cmd_ring(struct xhci_hcd *xhci, unsigned long flags)
 
        /*
         * The control bits like command stop, abort are located in lower
-        * dword of the command ring control register. Limit the write
-        * to the lower dword to avoid corrupting the command ring pointer
-        * in case if the command ring is stopped by the time upper dword
-        * is written.
+        * dword of the command ring control register.
+        * Some controllers require all 64 bits to be written to abort the ring.
+        * Make sure the upper dword is valid, pointing to the next command,
+        * avoiding corrupting the command ring pointer in case the command ring
+        * is stopped by the time the upper dword is written.
         */
-       temp_32 = readl(&xhci->op_regs->cmd_ring);
-       writel(temp_32 | CMD_RING_ABORT, &xhci->op_regs->cmd_ring);
+       next_trb(xhci, NULL, &new_seg, &new_deq);
+       if (trb_is_link(new_deq))
+               next_trb(xhci, NULL, &new_seg, &new_deq);
+
+       crcr = xhci_trb_virt_to_dma(new_seg, new_deq);
+       xhci_write_64(xhci, crcr | CMD_RING_ABORT, &xhci->op_regs->cmd_ring);
 
        /* Section 4.6.1.2 of xHCI 1.0 spec says software should also time the
         * completion of the Command Abort operation. If CRR is not negated in 5
@@ -1518,7 +1525,6 @@ static void xhci_handle_cmd_disable_slot(struct xhci_hcd *xhci, int slot_id)
        if (xhci->quirks & XHCI_EP_LIMIT_QUIRK)
                /* Delete default control endpoint resources */
                xhci_free_device_endpoint_resources(xhci, virt_dev, true);
-       xhci_free_virt_device(xhci, slot_id);
 }
 
 static void xhci_handle_cmd_config_ep(struct xhci_hcd *xhci, int slot_id,
index 1bf494b649bd24c834c26acaf984704e91203974..c8af2cd2216d6012baa82cdcaa547851294c288c 100644 (file)
@@ -1400,6 +1400,7 @@ static void tegra_xusb_deinit_usb_phy(struct tegra_xusb *tegra)
 
 static int tegra_xusb_probe(struct platform_device *pdev)
 {
+       struct of_phandle_args args;
        struct tegra_xusb *tegra;
        struct device_node *np;
        struct resource *regs;
@@ -1454,10 +1455,17 @@ static int tegra_xusb_probe(struct platform_device *pdev)
                goto put_padctl;
        }
 
-       tegra->padctl_irq = of_irq_get(np, 0);
-       if (tegra->padctl_irq <= 0) {
-               err = (tegra->padctl_irq == 0) ? -ENODEV : tegra->padctl_irq;
-               goto put_padctl;
+       /* Older device-trees don't have padctrl interrupt */
+       err = of_irq_parse_one(np, 0, &args);
+       if (!err) {
+               tegra->padctl_irq = of_irq_get(np, 0);
+               if (tegra->padctl_irq <= 0) {
+                       err = (tegra->padctl_irq == 0) ? -ENODEV : tegra->padctl_irq;
+                       goto put_padctl;
+               }
+       } else {
+               dev_dbg(&pdev->dev,
+                       "%pOF is missing an interrupt, disabling PM support\n", np);
        }
 
        tegra->host_clk = devm_clk_get(&pdev->dev, "xusb_host");
@@ -1696,11 +1704,15 @@ static int tegra_xusb_probe(struct platform_device *pdev)
                goto remove_usb3;
        }
 
-       err = devm_request_threaded_irq(&pdev->dev, tegra->padctl_irq, NULL, tegra_xusb_padctl_irq,
-                                       IRQF_ONESHOT, dev_name(&pdev->dev), tegra);
-       if (err < 0) {
-               dev_err(&pdev->dev, "failed to request padctl IRQ: %d\n", err);
-               goto remove_usb3;
+       if (tegra->padctl_irq) {
+               err = devm_request_threaded_irq(&pdev->dev, tegra->padctl_irq,
+                                               NULL, tegra_xusb_padctl_irq,
+                                               IRQF_ONESHOT, dev_name(&pdev->dev),
+                                               tegra);
+               if (err < 0) {
+                       dev_err(&pdev->dev, "failed to request padctl IRQ: %d\n", err);
+                       goto remove_usb3;
+               }
        }
 
        err = tegra_xusb_enable_firmware_messages(tegra);
@@ -1718,13 +1730,16 @@ static int tegra_xusb_probe(struct platform_device *pdev)
        /* Enable wake for both USB 2.0 and USB 3.0 roothubs */
        device_init_wakeup(&tegra->hcd->self.root_hub->dev, true);
        device_init_wakeup(&xhci->shared_hcd->self.root_hub->dev, true);
-       device_init_wakeup(tegra->dev, true);
 
        pm_runtime_use_autosuspend(tegra->dev);
        pm_runtime_set_autosuspend_delay(tegra->dev, 2000);
        pm_runtime_mark_last_busy(tegra->dev);
        pm_runtime_set_active(tegra->dev);
-       pm_runtime_enable(tegra->dev);
+
+       if (tegra->padctl_irq) {
+               device_init_wakeup(tegra->dev, true);
+               pm_runtime_enable(tegra->dev);
+       }
 
        return 0;
 
@@ -1772,7 +1787,9 @@ static int tegra_xusb_remove(struct platform_device *pdev)
        dma_free_coherent(&pdev->dev, tegra->fw.size, tegra->fw.virt,
                          tegra->fw.phys);
 
-       pm_runtime_disable(&pdev->dev);
+       if (tegra->padctl_irq)
+               pm_runtime_disable(&pdev->dev);
+
        pm_runtime_put(&pdev->dev);
 
        tegra_xusb_powergate_partitions(tegra);
index 902f410874e8eac93552b3821dc02dc0dad3f41f..f5b1bcc875dedce9834820ae9d7ea492019707e6 100644 (file)
@@ -3934,7 +3934,6 @@ static void xhci_free_dev(struct usb_hcd *hcd, struct usb_device *udev)
        struct xhci_slot_ctx *slot_ctx;
        int i, ret;
 
-#ifndef CONFIG_USB_DEFAULT_PERSIST
        /*
         * We called pm_runtime_get_noresume when the device was attached.
         * Decrement the counter here to allow controller to runtime suspend
@@ -3942,7 +3941,6 @@ static void xhci_free_dev(struct usb_hcd *hcd, struct usb_device *udev)
         */
        if (xhci->quirks & XHCI_RESET_ON_RESUME)
                pm_runtime_put_noidle(hcd->self.controller);
-#endif
 
        ret = xhci_check_args(hcd, udev, NULL, 0, true, __func__);
        /* If the host is halted due to driver unload, we still need to free the
@@ -3961,9 +3959,8 @@ static void xhci_free_dev(struct usb_hcd *hcd, struct usb_device *udev)
                del_timer_sync(&virt_dev->eps[i].stop_cmd_timer);
        }
        virt_dev->udev = NULL;
-       ret = xhci_disable_slot(xhci, udev->slot_id);
-       if (ret)
-               xhci_free_virt_device(xhci, udev->slot_id);
+       xhci_disable_slot(xhci, udev->slot_id);
+       xhci_free_virt_device(xhci, udev->slot_id);
 }
 
 int xhci_disable_slot(struct xhci_hcd *xhci, u32 slot_id)
@@ -3973,7 +3970,7 @@ int xhci_disable_slot(struct xhci_hcd *xhci, u32 slot_id)
        u32 state;
        int ret = 0;
 
-       command = xhci_alloc_command(xhci, false, GFP_KERNEL);
+       command = xhci_alloc_command(xhci, true, GFP_KERNEL);
        if (!command)
                return -ENOMEM;
 
@@ -3998,6 +3995,15 @@ int xhci_disable_slot(struct xhci_hcd *xhci, u32 slot_id)
        }
        xhci_ring_cmd_db(xhci);
        spin_unlock_irqrestore(&xhci->lock, flags);
+
+       wait_for_completion(command->completion);
+
+       if (command->status != COMP_SUCCESS)
+               xhci_warn(xhci, "Unsuccessful disable slot %u command, status %d\n",
+                         slot_id, command->status);
+
+       xhci_free_command(xhci, command);
+
        return ret;
 }
 
@@ -4094,23 +4100,20 @@ int xhci_alloc_dev(struct usb_hcd *hcd, struct usb_device *udev)
 
        xhci_debugfs_create_slot(xhci, slot_id);
 
-#ifndef CONFIG_USB_DEFAULT_PERSIST
        /*
         * If resetting upon resume, we can't put the controller into runtime
         * suspend if there is a device attached.
         */
        if (xhci->quirks & XHCI_RESET_ON_RESUME)
                pm_runtime_get_noresume(hcd->self.controller);
-#endif
 
        /* Is this a LS or FS device under a HS hub? */
        /* Hub or peripherial? */
        return 1;
 
 disable_slot:
-       ret = xhci_disable_slot(xhci, udev->slot_id);
-       if (ret)
-               xhci_free_virt_device(xhci, udev->slot_id);
+       xhci_disable_slot(xhci, udev->slot_id);
+       xhci_free_virt_device(xhci, udev->slot_id);
 
        return 0;
 }
@@ -4240,6 +4243,7 @@ static int xhci_setup_device(struct usb_hcd *hcd, struct usb_device *udev,
 
                mutex_unlock(&xhci->mutex);
                ret = xhci_disable_slot(xhci, udev->slot_id);
+               xhci_free_virt_device(xhci, udev->slot_id);
                if (!ret)
                        xhci_alloc_dev(hcd, udev);
                kfree(command->completion);
index a484ff5e4ebf83761964213eeb33d79f0ef28f6a..546fce4617a8548fb755a152132a36d3ba344893 100644 (file)
@@ -1267,6 +1267,8 @@ static const struct usb_device_id option_ids[] = {
          .driver_info = NCTRL(2) },
        { USB_DEVICE(TELIT_VENDOR_ID, 0x9010),                          /* Telit SBL FN980 flashing device */
          .driver_info = NCTRL(0) | ZLP },
+       { USB_DEVICE(TELIT_VENDOR_ID, 0x9200),                          /* Telit LE910S1 flashing device */
+         .driver_info = NCTRL(0) | ZLP },
        { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_MF622, 0xff, 0xff, 0xff) }, /* ZTE WCDMA products */
        { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0002, 0xff, 0xff, 0xff),
          .driver_info = RSVD(1) },
@@ -2094,6 +2096,9 @@ static const struct usb_device_id option_ids[] = {
        { USB_DEVICE_AND_INTERFACE_INFO(0x2cb7, 0x010b, 0xff, 0xff, 0x30) },    /* Fibocom FG150 Diag */
        { USB_DEVICE_AND_INTERFACE_INFO(0x2cb7, 0x010b, 0xff, 0, 0) },          /* Fibocom FG150 AT */
        { USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x01a0, 0xff) },                   /* Fibocom NL668-AM/NL652-EU (laptop MBIM) */
+       { USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x01a2, 0xff) },                   /* Fibocom FM101-GL (laptop MBIM) */
+       { USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x01a4, 0xff),                     /* Fibocom FM101-GL (laptop MBIM) */
+         .driver_info = RSVD(4) },
        { USB_DEVICE_INTERFACE_CLASS(0x2df3, 0x9d03, 0xff) },                   /* LongSung M5710 */
        { USB_DEVICE_INTERFACE_CLASS(0x305a, 0x1404, 0xff) },                   /* GosunCn GM500 RNDIS */
        { USB_DEVICE_INTERFACE_CLASS(0x305a, 0x1405, 0xff) },                   /* GosunCn GM500 MBIM */
index f45ca7ddf78eac62db2948aaa83c665e5fadfe5e..a70fd86f735ca866b58a44ce4884ad8f6e71e94a 100644 (file)
@@ -432,6 +432,7 @@ static int pl2303_detect_type(struct usb_serial *serial)
        case 0x200:
                switch (bcdDevice) {
                case 0x100:
+               case 0x105:
                case 0x305:
                case 0x405:
                        /*
index 7a2a17866a823b36d5e11bce7e31517fd19c1284..72f9001b07921c96c4f3a91937d1e622fb0edafa 100644 (file)
@@ -669,25 +669,27 @@ static int tcpm_set_cc(struct tcpc_dev *dev, enum typec_cc_status cc)
                ret = fusb302_i2c_mask_write(chip, FUSB_REG_MASK,
                                             FUSB_REG_MASK_BC_LVL |
                                             FUSB_REG_MASK_COMP_CHNG,
-                                            FUSB_REG_MASK_COMP_CHNG);
+                                            FUSB_REG_MASK_BC_LVL);
                if (ret < 0) {
                        fusb302_log(chip, "cannot set SRC interrupt, ret=%d",
                                    ret);
                        goto done;
                }
                chip->intr_comp_chng = true;
+               chip->intr_bc_lvl = false;
                break;
        case TYPEC_CC_RD:
                ret = fusb302_i2c_mask_write(chip, FUSB_REG_MASK,
                                             FUSB_REG_MASK_BC_LVL |
                                             FUSB_REG_MASK_COMP_CHNG,
-                                            FUSB_REG_MASK_BC_LVL);
+                                            FUSB_REG_MASK_COMP_CHNG);
                if (ret < 0) {
                        fusb302_log(chip, "cannot set SRC interrupt, ret=%d",
                                    ret);
                        goto done;
                }
                chip->intr_bc_lvl = true;
+               chip->intr_comp_chng = false;
                break;
        default:
                break;
index 7f2f3ff1b39112e72b683873c2a031fdb155c644..6010b99011261b5127c76bec8f959997dd08a00b 100644 (file)
@@ -4110,11 +4110,7 @@ static void run_state_machine(struct tcpm_port *port)
                                       tcpm_try_src(port) ? SRC_TRY
                                                          : SNK_ATTACHED,
                                       0);
-               else
-                       /* Wait for VBUS, but not forever */
-                       tcpm_set_state(port, PORT_RESET, PD_T_PS_SOURCE_ON);
                break;
-
        case SRC_TRY:
                port->try_src_count++;
                tcpm_set_cc(port, tcpm_rp_cc(port));
index fb8ef12bbe9c85de3e0ea4bd69cbe02b39f3c68d..6d27a5b5e3cac6beb8df1edce9e59f14e5abb8be 100644 (file)
@@ -653,7 +653,7 @@ static int cd321x_switch_power_state(struct tps6598x *tps, u8 target_state)
        if (state == target_state)
                return 0;
 
-       ret = tps6598x_exec_cmd(tps, "SPSS", sizeof(u8), &target_state, 0, NULL);
+       ret = tps6598x_exec_cmd(tps, "SSPS", sizeof(u8), &target_state, 0, NULL);
        if (ret)
                return ret;
 
@@ -707,6 +707,7 @@ static int tps6598x_probe(struct i2c_client *client)
        u32 conf;
        u32 vid;
        int ret;
+       u64 mask1;
 
        tps = devm_kzalloc(&client->dev, sizeof(*tps), GFP_KERNEL);
        if (!tps)
@@ -730,11 +731,6 @@ static int tps6598x_probe(struct i2c_client *client)
        if (i2c_check_functionality(client->adapter, I2C_FUNC_I2C))
                tps->i2c_protocol = true;
 
-       /* Make sure the controller has application firmware running */
-       ret = tps6598x_check_mode(tps);
-       if (ret)
-               return ret;
-
        if (np && of_device_is_compatible(np, "apple,cd321x")) {
                /* Switch CD321X chips to the correct system power state */
                ret = cd321x_switch_power_state(tps, TPS_SYSTEM_POWER_STATE_S0);
@@ -742,24 +738,27 @@ static int tps6598x_probe(struct i2c_client *client)
                        return ret;
 
                /* CD321X chips have all interrupts masked initially */
-               ret = tps6598x_write64(tps, TPS_REG_INT_MASK1,
-                                       APPLE_CD_REG_INT_POWER_STATUS_UPDATE |
-                                       APPLE_CD_REG_INT_DATA_STATUS_UPDATE |
-                                       APPLE_CD_REG_INT_PLUG_EVENT);
-               if (ret)
-                       return ret;
+               mask1 = APPLE_CD_REG_INT_POWER_STATUS_UPDATE |
+                       APPLE_CD_REG_INT_DATA_STATUS_UPDATE |
+                       APPLE_CD_REG_INT_PLUG_EVENT;
 
                irq_handler = cd321x_interrupt;
        } else {
                /* Enable power status, data status and plug event interrupts */
-               ret = tps6598x_write64(tps, TPS_REG_INT_MASK1,
-                                      TPS_REG_INT_POWER_STATUS_UPDATE |
-                                      TPS_REG_INT_DATA_STATUS_UPDATE |
-                                      TPS_REG_INT_PLUG_EVENT);
-               if (ret)
-                       return ret;
+               mask1 = TPS_REG_INT_POWER_STATUS_UPDATE |
+                       TPS_REG_INT_DATA_STATUS_UPDATE |
+                       TPS_REG_INT_PLUG_EVENT;
        }
 
+       /* Make sure the controller has application firmware running */
+       ret = tps6598x_check_mode(tps);
+       if (ret)
+               return ret;
+
+       ret = tps6598x_write64(tps, TPS_REG_INT_MASK1, mask1);
+       if (ret)
+               return ret;
+
        ret = tps6598x_read32(tps, TPS_REG_STATUS, &status);
        if (ret < 0)
                return ret;
index 5f484fff8dbecbdf7e5515db61720e1efb0050c1..41b0cd17fcbac16dfbaf2b35f0e8121320d2ecc6 100644 (file)
@@ -591,8 +591,11 @@ static void vdpasim_free(struct vdpa_device *vdpa)
                vringh_kiov_cleanup(&vdpasim->vqs[i].in_iov);
        }
 
-       put_iova_domain(&vdpasim->iova);
-       iova_cache_put();
+       if (vdpa_get_dma_dev(vdpa)) {
+               put_iova_domain(&vdpasim->iova);
+               iova_cache_put();
+       }
+
        kvfree(vdpasim->buffer);
        if (vdpasim->iommu)
                vhost_iotlb_free(vdpasim->iommu);
index 56cd551e0e04dfb5840cddc5a227cffdee1b904f..362f91ec884585c929b0f1cfb22913a6b9c94793 100644 (file)
@@ -98,7 +98,8 @@ static ssize_t vfio_pci_igd_rw(struct vfio_pci_core_device *vdev,
                        version = cpu_to_le16(0x0201);
 
                if (igd_opregion_shift_copy(buf, &off,
-                                           &version + (pos - OPREGION_VERSION),
+                                           (u8 *)&version +
+                                           (pos - OPREGION_VERSION),
                                            &pos, &remaining, bytes))
                        return -EFAULT;
        }
@@ -121,7 +122,7 @@ static ssize_t vfio_pci_igd_rw(struct vfio_pci_core_device *vdev,
                                          OPREGION_SIZE : 0);
 
                if (igd_opregion_shift_copy(buf, &off,
-                                           &rvda + (pos - OPREGION_RVDA),
+                                           (u8 *)&rvda + (pos - OPREGION_RVDA),
                                            &pos, &remaining, bytes))
                        return -EFAULT;
        }
index 82fb75464f923d47a225f8262595af0e51b51753..735d1d344af9d48277508e565221182c695858f0 100644 (file)
@@ -232,7 +232,7 @@ static inline bool vfio_iommu_driver_allowed(struct vfio_container *container,
 }
 #endif /* CONFIG_VFIO_NOIOMMU */
 
-/**
+/*
  * IOMMU driver registration
  */
 int vfio_register_iommu_driver(const struct vfio_iommu_driver_ops *ops)
@@ -285,7 +285,7 @@ static int vfio_iommu_group_notifier(struct notifier_block *nb,
                                     unsigned long action, void *data);
 static void vfio_group_get(struct vfio_group *group);
 
-/**
+/*
  * Container objects - containers are created when /dev/vfio/vfio is
  * opened, but their lifecycle extends until the last user is done, so
  * it's freed via kref.  Must support container/group/device being
@@ -309,7 +309,7 @@ static void vfio_container_put(struct vfio_container *container)
        kref_put(&container->kref, vfio_container_release);
 }
 
-/**
+/*
  * Group objects - create, release, get, put, search
  */
 static struct vfio_group *
@@ -488,7 +488,7 @@ static struct vfio_group *vfio_group_get_from_dev(struct device *dev)
        return group;
 }
 
-/**
+/*
  * Device objects - create, release, get, put, search
  */
 /* Device reference always implies a group reference */
@@ -595,7 +595,7 @@ static int vfio_dev_viable(struct device *dev, void *data)
        return ret;
 }
 
-/**
+/*
  * Async device support
  */
 static int vfio_group_nb_add_dev(struct vfio_group *group, struct device *dev)
@@ -689,7 +689,7 @@ static int vfio_iommu_group_notifier(struct notifier_block *nb,
        return NOTIFY_OK;
 }
 
-/**
+/*
  * VFIO driver API
  */
 void vfio_init_group_dev(struct vfio_device *device, struct device *dev,
@@ -831,7 +831,7 @@ int vfio_register_emulated_iommu_dev(struct vfio_device *device)
 }
 EXPORT_SYMBOL_GPL(vfio_register_emulated_iommu_dev);
 
-/**
+/*
  * Get a reference to the vfio_device for a device.  Even if the
  * caller thinks they own the device, they could be racing with a
  * release call path, so we can't trust drvdata for the shortcut.
@@ -965,7 +965,7 @@ void vfio_unregister_group_dev(struct vfio_device *device)
 }
 EXPORT_SYMBOL_GPL(vfio_unregister_group_dev);
 
-/**
+/*
  * VFIO base fd, /dev/vfio/vfio
  */
 static long vfio_ioctl_check_extension(struct vfio_container *container,
@@ -1183,7 +1183,7 @@ static const struct file_operations vfio_fops = {
        .compat_ioctl   = compat_ptr_ioctl,
 };
 
-/**
+/*
  * VFIO Group fd, /dev/vfio/$GROUP
  */
 static void __vfio_group_unset_container(struct vfio_group *group)
@@ -1536,7 +1536,7 @@ static const struct file_operations vfio_group_fops = {
        .release        = vfio_group_fops_release,
 };
 
-/**
+/*
  * VFIO Device fd
  */
 static int vfio_device_fops_release(struct inode *inode, struct file *filep)
@@ -1611,7 +1611,7 @@ static const struct file_operations vfio_device_fops = {
        .mmap           = vfio_device_fops_mmap,
 };
 
-/**
+/*
  * External user API, exported by symbols to be linked dynamically.
  *
  * The protocol includes:
@@ -1659,7 +1659,7 @@ struct vfio_group *vfio_group_get_external_user(struct file *filep)
 }
 EXPORT_SYMBOL_GPL(vfio_group_get_external_user);
 
-/**
+/*
  * External user API, exported by symbols to be linked dynamically.
  * The external user passes in a device pointer
  * to verify that:
@@ -1725,7 +1725,7 @@ long vfio_external_check_extension(struct vfio_group *group, unsigned long arg)
 }
 EXPORT_SYMBOL_GPL(vfio_external_check_extension);
 
-/**
+/*
  * Sub-module support
  */
 /*
@@ -2272,7 +2272,7 @@ struct iommu_domain *vfio_group_iommu_domain(struct vfio_group *group)
 }
 EXPORT_SYMBOL_GPL(vfio_group_iommu_domain);
 
-/**
+/*
  * Module/class support
  */
 static char *vfio_devnode(struct device *dev, umode_t *mode)
index 01c59ce7e2508008e3b1de849a13a0253c55a9df..29cced1cd27784a708df783b2f81e2340c86f145 100644 (file)
@@ -1014,12 +1014,12 @@ static int vhost_vdpa_release(struct inode *inode, struct file *filep)
 
        mutex_lock(&d->mutex);
        filep->private_data = NULL;
+       vhost_vdpa_clean_irq(v);
        vhost_vdpa_reset(v);
        vhost_dev_stop(&v->vdev);
        vhost_vdpa_iotlb_free(v);
        vhost_vdpa_free_domain(v);
        vhost_vdpa_config_put(v);
-       vhost_vdpa_clean_irq(v);
        vhost_dev_cleanup(&v->vdev);
        kfree(v->vdev.vqs);
        mutex_unlock(&d->mutex);
index 938aefbc75ecc24cae2637ebf9091d275e98b98c..d6ca1c7ad513ff102fe7b6df8f9caafbd40efdc8 100644 (file)
@@ -511,8 +511,6 @@ static void vhost_vsock_handle_tx_kick(struct vhost_work *work)
 
        vhost_disable_notify(&vsock->dev, vq);
        do {
-               u32 len;
-
                if (!vhost_vsock_more_replies(vsock)) {
                        /* Stop tx until the device processes already
                         * pending replies.  Leave tx virtqueue
@@ -540,7 +538,7 @@ static void vhost_vsock_handle_tx_kick(struct vhost_work *work)
                        continue;
                }
 
-               len = pkt->len;
+               total_len += sizeof(pkt->hdr) + pkt->len;
 
                /* Deliver to monitoring devices all received packets */
                virtio_transport_deliver_tap_pkt(pkt);
@@ -553,9 +551,7 @@ static void vhost_vsock_handle_tx_kick(struct vhost_work *work)
                else
                        virtio_transport_free_pkt(pkt);
 
-               len += sizeof(pkt->hdr);
-               vhost_add_used(vq, head, len);
-               total_len += len;
+               vhost_add_used(vq, head, 0);
                added = true;
        } while(likely(!vhost_exceeds_weight(vq, ++pkts, total_len)));
 
index 1b451165311c942dff5304d92302d57b80f21c67..40496e9e9b4381fe81da1d3f60ce722dde4c16f3 100644 (file)
@@ -332,13 +332,13 @@ static u8 sticon_build_attr(struct vc_data *conp, u8 color,
                            bool blink, bool underline, bool reverse,
                            bool italic)
 {
-    u8 attr = ((color & 0x70) >> 1) | ((color & 7));
+       u8 fg = color & 7;
+       u8 bg = (color & 0x70) >> 4;
 
-    if (reverse) {
-       color = ((color >> 3) & 0x7) | ((color & 0x7) << 3);
-    }
-
-    return attr;
+       if (reverse)
+               return (fg << 3) | bg;
+       else
+               return (bg << 3) | fg;
 }
 
 static void sticon_invert_region(struct vc_data *conp, u16 *p, int count)
index ef9c57ce090663d5f35c0f80dd425edcaa9a4c44..9a49ea6b5112fb49621f1aa792cd68f6a0f2eda4 100644 (file)
@@ -366,11 +366,17 @@ static void vgacon_init(struct vc_data *c, int init)
        struct uni_pagedir *p;
 
        /*
-        * We cannot be loaded as a module, therefore init is always 1,
-        * but vgacon_init can be called more than once, and init will
-        * not be 1.
+        * We cannot be loaded as a module, therefore init will be 1
+        * if we are the default console, however if we are a fallback
+        * console, for example if fbcon has failed registration, then
+        * init will be 0, so we need to make sure our boot parameters
+        * have been copied to the console structure for vgacon_resize
+        * ultimately called by vc_resize.  Any subsequent calls to
+        * vgacon_init init will have init set to 0 too.
         */
        c->vc_can_do_color = vga_can_do_color;
+       c->vc_scan_lines = vga_scan_lines;
+       c->vc_font.height = c->vc_cell_height = vga_video_font_height;
 
        /* set dimensions manually if init != 0 since vc_resize() will fail */
        if (init) {
@@ -379,8 +385,6 @@ static void vgacon_init(struct vc_data *c, int init)
        } else
                vc_resize(c, vga_video_num_columns, vga_video_num_lines);
 
-       c->vc_scan_lines = vga_scan_lines;
-       c->vc_font.height = c->vc_cell_height = vga_video_font_height;
        c->vc_complement_mask = 0x7700;
        if (vga_512_chars)
                c->vc_hi_font_mask = 0x0800;
index edca3703b9640ccf905cc627140535852806738c..ea42ba6445b2ddaad7cd1a699ff793dec1bceb4c 100644 (file)
@@ -351,6 +351,17 @@ static int efifb_probe(struct platform_device *dev)
        char *option = NULL;
        efi_memory_desc_t md;
 
+       /*
+        * Generic drivers must not be registered if a framebuffer exists.
+        * If a native driver was probed, the display hardware was already
+        * taken and attempting to use the system framebuffer is dangerous.
+        */
+       if (num_registered_fb > 0) {
+               dev_err(&dev->dev,
+                       "efifb: a framebuffer is already registered\n");
+               return -EINVAL;
+       }
+
        if (screen_info.orig_video_isVGA != VIDEO_TYPE_EFI || pci_dev_disabled)
                return -ENODEV;
 
index 62f0ded706815dd6889608252c7ff702ba708ca0..b63074fd892e507b59bf1094d884e0c42a43c193 100644 (file)
@@ -407,6 +407,17 @@ static int simplefb_probe(struct platform_device *pdev)
        struct simplefb_par *par;
        struct resource *mem;
 
+       /*
+        * Generic drivers must not be registered if a framebuffer exists.
+        * If a native driver was probed, the display hardware was already
+        * taken and attempting to use the system framebuffer is dangerous.
+        */
+       if (num_registered_fb > 0) {
+               dev_err(&pdev->dev,
+                       "simplefb: a framebuffer is already registered\n");
+               return -EINVAL;
+       }
+
        if (fb_get_options("simplefb", NULL))
                return -ENODEV;
 
index 5ec51445bee882beb88cefb66dd2f63483a14e8b..6826f986da436859c3e97379c24cda9d645dab93 100644 (file)
@@ -695,6 +695,7 @@ static struct xenbus_driver xenfb_driver = {
        .remove = xenfb_remove,
        .resume = xenfb_resume,
        .otherend_changed = xenfb_backend_changed,
+       .not_essential = true,
 };
 
 static int __init xenfb_init(void)
index 00f64f2f8b72b45fcdb230223aa689adff620bcd..6d2614e34470f463bb553e7e9e80f7bb0a0cef4a 100644 (file)
@@ -14,9 +14,6 @@
 #include <linux/spinlock.h>
 #include <xen/xen.h>
 
-static bool force_used_validation = false;
-module_param(force_used_validation, bool, 0444);
-
 #ifdef DEBUG
 /* For development, we want to crash whenever the ring is screwed. */
 #define BAD_RING(_vq, fmt, args...)                            \
@@ -185,9 +182,6 @@ struct vring_virtqueue {
                } packed;
        };
 
-       /* Per-descriptor in buffer length */
-       u32 *buflen;
-
        /* How to notify other side. FIXME: commonalize hcalls! */
        bool (*notify)(struct virtqueue *vq);
 
@@ -496,7 +490,6 @@ static inline int virtqueue_add_split(struct virtqueue *_vq,
        unsigned int i, n, avail, descs_used, prev, err_idx;
        int head;
        bool indirect;
-       u32 buflen = 0;
 
        START_USE(vq);
 
@@ -578,7 +571,6 @@ static inline int virtqueue_add_split(struct virtqueue *_vq,
                                                     VRING_DESC_F_NEXT |
                                                     VRING_DESC_F_WRITE,
                                                     indirect);
-                       buflen += sg->length;
                }
        }
        /* Last one doesn't continue. */
@@ -618,10 +610,6 @@ static inline int virtqueue_add_split(struct virtqueue *_vq,
        else
                vq->split.desc_state[head].indir_desc = ctx;
 
-       /* Store in buffer length if necessary */
-       if (vq->buflen)
-               vq->buflen[head] = buflen;
-
        /* Put entry in available array (but don't update avail->idx until they
         * do sync). */
        avail = vq->split.avail_idx_shadow & (vq->split.vring.num - 1);
@@ -796,11 +784,6 @@ static void *virtqueue_get_buf_ctx_split(struct virtqueue *_vq,
                BAD_RING(vq, "id %u is not a head!\n", i);
                return NULL;
        }
-       if (vq->buflen && unlikely(*len > vq->buflen[i])) {
-               BAD_RING(vq, "used len %d is larger than in buflen %u\n",
-                       *len, vq->buflen[i]);
-               return NULL;
-       }
 
        /* detach_buf_split clears data, so grab it now. */
        ret = vq->split.desc_state[i].data;
@@ -1079,7 +1062,6 @@ static int virtqueue_add_indirect_packed(struct vring_virtqueue *vq,
        unsigned int i, n, err_idx;
        u16 head, id;
        dma_addr_t addr;
-       u32 buflen = 0;
 
        head = vq->packed.next_avail_idx;
        desc = alloc_indirect_packed(total_sg, gfp);
@@ -1109,8 +1091,6 @@ static int virtqueue_add_indirect_packed(struct vring_virtqueue *vq,
                        desc[i].addr = cpu_to_le64(addr);
                        desc[i].len = cpu_to_le32(sg->length);
                        i++;
-                       if (n >= out_sgs)
-                               buflen += sg->length;
                }
        }
 
@@ -1164,10 +1144,6 @@ static int virtqueue_add_indirect_packed(struct vring_virtqueue *vq,
        vq->packed.desc_state[id].indir_desc = desc;
        vq->packed.desc_state[id].last = id;
 
-       /* Store in buffer length if necessary */
-       if (vq->buflen)
-               vq->buflen[id] = buflen;
-
        vq->num_added += 1;
 
        pr_debug("Added buffer head %i to %p\n", head, vq);
@@ -1203,7 +1179,6 @@ static inline int virtqueue_add_packed(struct virtqueue *_vq,
        __le16 head_flags, flags;
        u16 head, id, prev, curr, avail_used_flags;
        int err;
-       u32 buflen = 0;
 
        START_USE(vq);
 
@@ -1283,8 +1258,6 @@ static inline int virtqueue_add_packed(struct virtqueue *_vq,
                                        1 << VRING_PACKED_DESC_F_AVAIL |
                                        1 << VRING_PACKED_DESC_F_USED;
                        }
-                       if (n >= out_sgs)
-                               buflen += sg->length;
                }
        }
 
@@ -1304,10 +1277,6 @@ static inline int virtqueue_add_packed(struct virtqueue *_vq,
        vq->packed.desc_state[id].indir_desc = ctx;
        vq->packed.desc_state[id].last = prev;
 
-       /* Store in buffer length if necessary */
-       if (vq->buflen)
-               vq->buflen[id] = buflen;
-
        /*
         * A driver MUST NOT make the first descriptor in the list
         * available before all subsequent descriptors comprising
@@ -1494,11 +1463,6 @@ static void *virtqueue_get_buf_ctx_packed(struct virtqueue *_vq,
                BAD_RING(vq, "id %u is not a head!\n", id);
                return NULL;
        }
-       if (vq->buflen && unlikely(*len > vq->buflen[id])) {
-               BAD_RING(vq, "used len %d is larger than in buflen %u\n",
-                       *len, vq->buflen[id]);
-               return NULL;
-       }
 
        /* detach_buf_packed clears data, so grab it now. */
        ret = vq->packed.desc_state[id].data;
@@ -1704,7 +1668,6 @@ static struct virtqueue *vring_create_virtqueue_packed(
        struct vring_virtqueue *vq;
        struct vring_packed_desc *ring;
        struct vring_packed_desc_event *driver, *device;
-       struct virtio_driver *drv = drv_to_virtio(vdev->dev.driver);
        dma_addr_t ring_dma_addr, driver_event_dma_addr, device_event_dma_addr;
        size_t ring_size_in_bytes, event_size_in_bytes;
 
@@ -1794,15 +1757,6 @@ static struct virtqueue *vring_create_virtqueue_packed(
        if (!vq->packed.desc_extra)
                goto err_desc_extra;
 
-       if (!drv->suppress_used_validation || force_used_validation) {
-               vq->buflen = kmalloc_array(num, sizeof(*vq->buflen),
-                                          GFP_KERNEL);
-               if (!vq->buflen)
-                       goto err_buflen;
-       } else {
-               vq->buflen = NULL;
-       }
-
        /* No callback?  Tell other side not to bother us. */
        if (!callback) {
                vq->packed.event_flags_shadow = VRING_PACKED_EVENT_FLAG_DISABLE;
@@ -1815,8 +1769,6 @@ static struct virtqueue *vring_create_virtqueue_packed(
        spin_unlock(&vdev->vqs_list_lock);
        return &vq->vq;
 
-err_buflen:
-       kfree(vq->packed.desc_extra);
 err_desc_extra:
        kfree(vq->packed.desc_state);
 err_desc_state:
@@ -2224,7 +2176,6 @@ struct virtqueue *__vring_new_virtqueue(unsigned int index,
                                        void (*callback)(struct virtqueue *),
                                        const char *name)
 {
-       struct virtio_driver *drv = drv_to_virtio(vdev->dev.driver);
        struct vring_virtqueue *vq;
 
        if (virtio_has_feature(vdev, VIRTIO_F_RING_PACKED))
@@ -2284,15 +2235,6 @@ struct virtqueue *__vring_new_virtqueue(unsigned int index,
        if (!vq->split.desc_extra)
                goto err_extra;
 
-       if (!drv->suppress_used_validation || force_used_validation) {
-               vq->buflen = kmalloc_array(vring.num, sizeof(*vq->buflen),
-                                          GFP_KERNEL);
-               if (!vq->buflen)
-                       goto err_buflen;
-       } else {
-               vq->buflen = NULL;
-       }
-
        /* Put everything in free lists. */
        vq->free_head = 0;
        memset(vq->split.desc_state, 0, vring.num *
@@ -2303,8 +2245,6 @@ struct virtqueue *__vring_new_virtqueue(unsigned int index,
        spin_unlock(&vdev->vqs_list_lock);
        return &vq->vq;
 
-err_buflen:
-       kfree(vq->split.desc_extra);
 err_extra:
        kfree(vq->split.desc_state);
 err_state:
index a1b11c62da9e360dc79f151eb49bf3f2a1757ba3..33e941e40082d98562ba2e60c25d5f9e095550fd 100644 (file)
@@ -259,9 +259,15 @@ config XEN_SCSI_BACKEND
          if guests need generic access to SCSI devices.
 
 config XEN_PRIVCMD
-       tristate
+       tristate "Xen hypercall passthrough driver"
        depends on XEN
        default m
+       help
+         The hypercall passthrough driver allows privileged user programs to
+         perform Xen hypercalls. This driver is normally required for systems
+         running as Dom0 to perform privileged operations, but in some
+         disaggregated Xen setups this driver might be needed for other
+         domains, too.
 
 config XEN_ACPI_PROCESSOR
        tristate "Xen ACPI processor"
index 7984645b59563b7fc974ec4ecfcc10fb2dbf3e39..3c9ae156b597fa4506af71a8751f336e3ee2d374 100644 (file)
@@ -1275,6 +1275,7 @@ static struct xenbus_driver pvcalls_front_driver = {
        .probe = pvcalls_front_probe,
        .remove = pvcalls_front_remove,
        .otherend_changed = pvcalls_front_changed,
+       .not_essential = true,
 };
 
 static int __init pvcalls_frontend_init(void)
index bd003ca8acbe92bc7a28141ec1cec9783126e228..fe360c33ce717fcedd4c8494d903888059005919 100644 (file)
@@ -909,7 +909,7 @@ static struct notifier_block xenbus_resume_nb = {
 
 static int __init xenbus_init(void)
 {
-       int err = 0;
+       int err;
        uint64_t v = 0;
        xen_store_domain_type = XS_UNKNOWN;
 
@@ -949,6 +949,29 @@ static int __init xenbus_init(void)
                err = hvm_get_parameter(HVM_PARAM_STORE_PFN, &v);
                if (err)
                        goto out_error;
+               /*
+                * Uninitialized hvm_params are zero and return no error.
+                * Although it is theoretically possible to have
+                * HVM_PARAM_STORE_PFN set to zero on purpose, in reality it is
+                * not zero when valid. If zero, it means that Xenstore hasn't
+                * been properly initialized. Instead of attempting to map a
+                * wrong guest physical address return error.
+                *
+                * Also recognize all bits set as an invalid value.
+                */
+               if (!v || !~v) {
+                       err = -ENOENT;
+                       goto out_error;
+               }
+               /* Avoid truncation on 32-bit. */
+#if BITS_PER_LONG == 32
+               if (v > ULONG_MAX) {
+                       pr_err("%s: cannot handle HVM_PARAM_STORE_PFN=%llx > ULONG_MAX\n",
+                              __func__, v);
+                       err = -EINVAL;
+                       goto out_error;
+               }
+#endif
                xen_store_gfn = (unsigned long)v;
                xen_store_interface =
                        xen_remap(xen_store_gfn << XEN_PAGE_SHIFT,
@@ -983,8 +1006,10 @@ static int __init xenbus_init(void)
         */
        proc_create_mount_point("xen");
 #endif
+       return 0;
 
 out_error:
+       xen_store_domain_type = XS_UNKNOWN;
        return err;
 }
 
index 480944606a3c9910ea8df4a13d3905a51ac11dff..07b010a68fcf9cd5e6bca1e4ec47d8487013e227 100644 (file)
@@ -211,19 +211,11 @@ static int is_device_connecting(struct device *dev, void *data, bool ignore_none
        if (drv && (dev->driver != drv))
                return 0;
 
-       if (ignore_nonessential) {
-               /* With older QEMU, for PVonHVM guests the guest config files
-                * could contain: vfb = [ 'vnc=1, vnclisten=0.0.0.0']
-                * which is nonsensical as there is no PV FB (there can be
-                * a PVKB) running as HVM guest. */
+       xendrv = to_xenbus_driver(dev->driver);
 
-               if ((strncmp(xendev->nodename, "device/vkbd", 11) == 0))
-                       return 0;
+       if (ignore_nonessential && xendrv->not_essential)
+               return 0;
 
-               if ((strncmp(xendev->nodename, "device/vfb", 10) == 0))
-                       return 0;
-       }
-       xendrv = to_xenbus_driver(dev->driver);
        return (xendev->state < XenbusStateConnected ||
                (xendev->state == XenbusStateConnected &&
                 xendrv->is_ready && !xendrv->is_ready(xendev)));
index 9c81cf611d659fcbdb61c1307c1b260836dfdba6..f6f1cbffef9e8c8cc302c2b2c5b8d55a6172fe3b 100644 (file)
--- a/fs/aio.c
+++ b/fs/aio.c
@@ -181,8 +181,9 @@ struct poll_iocb {
        struct file             *file;
        struct wait_queue_head  *head;
        __poll_t                events;
-       bool                    done;
        bool                    cancelled;
+       bool                    work_scheduled;
+       bool                    work_need_resched;
        struct wait_queue_entry wait;
        struct work_struct      work;
 };
@@ -1619,6 +1620,51 @@ static void aio_poll_put_work(struct work_struct *work)
        iocb_put(iocb);
 }
 
+/*
+ * Safely lock the waitqueue which the request is on, synchronizing with the
+ * case where the ->poll() provider decides to free its waitqueue early.
+ *
+ * Returns true on success, meaning that req->head->lock was locked, req->wait
+ * is on req->head, and an RCU read lock was taken.  Returns false if the
+ * request was already removed from its waitqueue (which might no longer exist).
+ */
+static bool poll_iocb_lock_wq(struct poll_iocb *req)
+{
+       wait_queue_head_t *head;
+
+       /*
+        * While we hold the waitqueue lock and the waitqueue is nonempty,
+        * wake_up_pollfree() will wait for us.  However, taking the waitqueue
+        * lock in the first place can race with the waitqueue being freed.
+        *
+        * We solve this as eventpoll does: by taking advantage of the fact that
+        * all users of wake_up_pollfree() will RCU-delay the actual free.  If
+        * we enter rcu_read_lock() and see that the pointer to the queue is
+        * non-NULL, we can then lock it without the memory being freed out from
+        * under us, then check whether the request is still on the queue.
+        *
+        * Keep holding rcu_read_lock() as long as we hold the queue lock, in
+        * case the caller deletes the entry from the queue, leaving it empty.
+        * In that case, only RCU prevents the queue memory from being freed.
+        */
+       rcu_read_lock();
+       head = smp_load_acquire(&req->head);
+       if (head) {
+               spin_lock(&head->lock);
+               if (!list_empty(&req->wait.entry))
+                       return true;
+               spin_unlock(&head->lock);
+       }
+       rcu_read_unlock();
+       return false;
+}
+
+static void poll_iocb_unlock_wq(struct poll_iocb *req)
+{
+       spin_unlock(&req->head->lock);
+       rcu_read_unlock();
+}
+
 static void aio_poll_complete_work(struct work_struct *work)
 {
        struct poll_iocb *req = container_of(work, struct poll_iocb, work);
@@ -1638,14 +1684,27 @@ static void aio_poll_complete_work(struct work_struct *work)
         * avoid further branches in the fast path.
         */
        spin_lock_irq(&ctx->ctx_lock);
-       if (!mask && !READ_ONCE(req->cancelled)) {
-               add_wait_queue(req->head, &req->wait);
-               spin_unlock_irq(&ctx->ctx_lock);
-               return;
-       }
+       if (poll_iocb_lock_wq(req)) {
+               if (!mask && !READ_ONCE(req->cancelled)) {
+                       /*
+                        * The request isn't actually ready to be completed yet.
+                        * Reschedule completion if another wakeup came in.
+                        */
+                       if (req->work_need_resched) {
+                               schedule_work(&req->work);
+                               req->work_need_resched = false;
+                       } else {
+                               req->work_scheduled = false;
+                       }
+                       poll_iocb_unlock_wq(req);
+                       spin_unlock_irq(&ctx->ctx_lock);
+                       return;
+               }
+               list_del_init(&req->wait.entry);
+               poll_iocb_unlock_wq(req);
+       } /* else, POLLFREE has freed the waitqueue, so we must complete */
        list_del_init(&iocb->ki_list);
        iocb->ki_res.res = mangle_poll(mask);
-       req->done = true;
        spin_unlock_irq(&ctx->ctx_lock);
 
        iocb_put(iocb);
@@ -1657,13 +1716,14 @@ static int aio_poll_cancel(struct kiocb *iocb)
        struct aio_kiocb *aiocb = container_of(iocb, struct aio_kiocb, rw);
        struct poll_iocb *req = &aiocb->poll;
 
-       spin_lock(&req->head->lock);
-       WRITE_ONCE(req->cancelled, true);
-       if (!list_empty(&req->wait.entry)) {
-               list_del_init(&req->wait.entry);
-               schedule_work(&aiocb->poll.work);
-       }
-       spin_unlock(&req->head->lock);
+       if (poll_iocb_lock_wq(req)) {
+               WRITE_ONCE(req->cancelled, true);
+               if (!req->work_scheduled) {
+                       schedule_work(&aiocb->poll.work);
+                       req->work_scheduled = true;
+               }
+               poll_iocb_unlock_wq(req);
+       } /* else, the request was force-cancelled by POLLFREE already */
 
        return 0;
 }
@@ -1680,21 +1740,27 @@ static int aio_poll_wake(struct wait_queue_entry *wait, unsigned mode, int sync,
        if (mask && !(mask & req->events))
                return 0;
 
-       list_del_init(&req->wait.entry);
-
-       if (mask && spin_trylock_irqsave(&iocb->ki_ctx->ctx_lock, flags)) {
+       /*
+        * Complete the request inline if possible.  This requires that three
+        * conditions be met:
+        *   1. An event mask must have been passed.  If a plain wakeup was done
+        *      instead, then mask == 0 and we have to call vfs_poll() to get
+        *      the events, so inline completion isn't possible.
+        *   2. The completion work must not have already been scheduled.
+        *   3. ctx_lock must not be busy.  We have to use trylock because we
+        *      already hold the waitqueue lock, so this inverts the normal
+        *      locking order.  Use irqsave/irqrestore because not all
+        *      filesystems (e.g. fuse) call this function with IRQs disabled,
+        *      yet IRQs have to be disabled before ctx_lock is obtained.
+        */
+       if (mask && !req->work_scheduled &&
+           spin_trylock_irqsave(&iocb->ki_ctx->ctx_lock, flags)) {
                struct kioctx *ctx = iocb->ki_ctx;
 
-               /*
-                * Try to complete the iocb inline if we can. Use
-                * irqsave/irqrestore because not all filesystems (e.g. fuse)
-                * call this function with IRQs disabled and because IRQs
-                * have to be disabled before ctx_lock is obtained.
-                */
+               list_del_init(&req->wait.entry);
                list_del(&iocb->ki_list);
                iocb->ki_res.res = mangle_poll(mask);
-               req->done = true;
-               if (iocb->ki_eventfd && eventfd_signal_allowed()) {
+               if (iocb->ki_eventfd && !eventfd_signal_allowed()) {
                        iocb = NULL;
                        INIT_WORK(&req->work, aio_poll_put_work);
                        schedule_work(&req->work);
@@ -1703,7 +1769,43 @@ static int aio_poll_wake(struct wait_queue_entry *wait, unsigned mode, int sync,
                if (iocb)
                        iocb_put(iocb);
        } else {
-               schedule_work(&req->work);
+               /*
+                * Schedule the completion work if needed.  If it was already
+                * scheduled, record that another wakeup came in.
+                *
+                * Don't remove the request from the waitqueue here, as it might
+                * not actually be complete yet (we won't know until vfs_poll()
+                * is called), and we must not miss any wakeups.  POLLFREE is an
+                * exception to this; see below.
+                */
+               if (req->work_scheduled) {
+                       req->work_need_resched = true;
+               } else {
+                       schedule_work(&req->work);
+                       req->work_scheduled = true;
+               }
+
+               /*
+                * If the waitqueue is being freed early but we can't complete
+                * the request inline, we have to tear down the request as best
+                * we can.  That means immediately removing the request from its
+                * waitqueue and preventing all further accesses to the
+                * waitqueue via the request.  We also need to schedule the
+                * completion work (done above).  Also mark the request as
+                * cancelled, to potentially skip an unneeded call to ->poll().
+                */
+               if (mask & POLLFREE) {
+                       WRITE_ONCE(req->cancelled, true);
+                       list_del_init(&req->wait.entry);
+
+                       /*
+                        * Careful: this *must* be the last step, since as soon
+                        * as req->head is NULL'ed out, the request can be
+                        * completed and freed, since aio_poll_complete_work()
+                        * will no longer need to take the waitqueue lock.
+                        */
+                       smp_store_release(&req->head, NULL);
+               }
        }
        return 1;
 }
@@ -1711,6 +1813,7 @@ static int aio_poll_wake(struct wait_queue_entry *wait, unsigned mode, int sync,
 struct aio_poll_table {
        struct poll_table_struct        pt;
        struct aio_kiocb                *iocb;
+       bool                            queued;
        int                             error;
 };
 
@@ -1721,11 +1824,12 @@ aio_poll_queue_proc(struct file *file, struct wait_queue_head *head,
        struct aio_poll_table *pt = container_of(p, struct aio_poll_table, pt);
 
        /* multiple wait queues per file are not supported */
-       if (unlikely(pt->iocb->poll.head)) {
+       if (unlikely(pt->queued)) {
                pt->error = -EINVAL;
                return;
        }
 
+       pt->queued = true;
        pt->error = 0;
        pt->iocb->poll.head = head;
        add_wait_queue(head, &pt->iocb->poll.wait);
@@ -1750,12 +1854,14 @@ static int aio_poll(struct aio_kiocb *aiocb, const struct iocb *iocb)
        req->events = demangle_poll(iocb->aio_buf) | EPOLLERR | EPOLLHUP;
 
        req->head = NULL;
-       req->done = false;
        req->cancelled = false;
+       req->work_scheduled = false;
+       req->work_need_resched = false;
 
        apt.pt._qproc = aio_poll_queue_proc;
        apt.pt._key = req->events;
        apt.iocb = aiocb;
+       apt.queued = false;
        apt.error = -EINVAL; /* same as no support for IOCB_CMD_POLL */
 
        /* initialized the list so that we can do list_empty checks */
@@ -1764,23 +1870,35 @@ static int aio_poll(struct aio_kiocb *aiocb, const struct iocb *iocb)
 
        mask = vfs_poll(req->file, &apt.pt) & req->events;
        spin_lock_irq(&ctx->ctx_lock);
-       if (likely(req->head)) {
-               spin_lock(&req->head->lock);
-               if (unlikely(list_empty(&req->wait.entry))) {
-                       if (apt.error)
+       if (likely(apt.queued)) {
+               bool on_queue = poll_iocb_lock_wq(req);
+
+               if (!on_queue || req->work_scheduled) {
+                       /*
+                        * aio_poll_wake() already either scheduled the async
+                        * completion work, or completed the request inline.
+                        */
+                       if (apt.error) /* unsupported case: multiple queues */
                                cancel = true;
                        apt.error = 0;
                        mask = 0;
                }
                if (mask || apt.error) {
+                       /* Steal to complete synchronously. */
                        list_del_init(&req->wait.entry);
                } else if (cancel) {
+                       /* Cancel if possible (may be too late though). */
                        WRITE_ONCE(req->cancelled, true);
-               } else if (!req->done) { /* actually waiting for an event */
+               } else if (on_queue) {
+                       /*
+                        * Actually waiting for an event, so add the request to
+                        * active_reqs so that it can be cancelled if needed.
+                        */
                        list_add_tail(&aiocb->ki_list, &ctx->active_reqs);
                        aiocb->ki_cancel = aio_poll_cancel;
                }
-               spin_unlock(&req->head->lock);
+               if (on_queue)
+                       poll_iocb_unlock_wq(req);
        }
        if (mask) { /* no async, we'd stolen it */
                aiocb->ki_res.res = mangle_poll(mask);
index 473d21b3a86de46755955f42500cf6cd9e7f057c..66899b6e9bd86117f4330a3add56399d888dfa05 100644 (file)
--- a/fs/attr.c
+++ b/fs/attr.c
@@ -35,7 +35,7 @@ static bool chown_ok(struct user_namespace *mnt_userns,
                     kuid_t uid)
 {
        kuid_t kuid = i_uid_into_mnt(mnt_userns, inode);
-       if (uid_eq(current_fsuid(), kuid) && uid_eq(uid, kuid))
+       if (uid_eq(current_fsuid(), kuid) && uid_eq(uid, inode->i_uid))
                return true;
        if (capable_wrt_inode_uidgid(mnt_userns, inode, CAP_CHOWN))
                return true;
@@ -62,7 +62,7 @@ static bool chgrp_ok(struct user_namespace *mnt_userns,
 {
        kgid_t kgid = i_gid_into_mnt(mnt_userns, inode);
        if (uid_eq(current_fsuid(), i_uid_into_mnt(mnt_userns, inode)) &&
-           (in_group_p(gid) || gid_eq(gid, kgid)))
+           (in_group_p(gid) || gid_eq(gid, inode->i_gid)))
                return true;
        if (capable_wrt_inode_uidgid(mnt_userns, inode, CAP_CHOWN))
                return true;
index 309516e6a96820cd760e378f7110dc887e316855..43c89952b7d25746f1a96cf1fba527d4624782b9 100644 (file)
@@ -234,6 +234,13 @@ static void run_ordered_work(struct __btrfs_workqueue *wq,
                                  ordered_list);
                if (!test_bit(WORK_DONE_BIT, &work->flags))
                        break;
+               /*
+                * Orders all subsequent loads after reading WORK_DONE_BIT,
+                * paired with the smp_mb__before_atomic in btrfs_work_helper
+                * this guarantees that the ordered function will see all
+                * updates from ordinary work function.
+                */
+               smp_rmb();
 
                /*
                 * we are going to call the ordered done function, but
@@ -317,6 +324,13 @@ static void btrfs_work_helper(struct work_struct *normal_work)
        thresh_exec_hook(wq);
        work->func(work);
        if (need_order) {
+               /*
+                * Ensures all memory accesses done in the work function are
+                * ordered before setting the WORK_DONE_BIT. Ensuring the thread
+                * which is going to executed the ordered work sees them.
+                * Pairs with the smp_rmb in run_ordered_work.
+                */
+               smp_mb__before_atomic();
                set_bit(WORK_DONE_BIT, &work->flags);
                run_ordered_work(wq, work);
        } else {
index 2059d1504149a6326cfc2fc2bc34d9b9f1d8eb67..40c4d6ba3fb9a79d639f5b8d8de2cb018c8904be 100644 (file)
@@ -143,10 +143,13 @@ int btrfs_check_data_free_space(struct btrfs_inode *inode,
 
        /* Use new btrfs_qgroup_reserve_data to reserve precious data space. */
        ret = btrfs_qgroup_reserve_data(inode, reserved, start, len);
-       if (ret < 0)
+       if (ret < 0) {
                btrfs_free_reserved_data_space_noquota(fs_info, len);
-       else
+               extent_changeset_free(*reserved);
+               *reserved = NULL;
+       } else {
                ret = 0;
+       }
        return ret;
 }
 
@@ -452,8 +455,11 @@ int btrfs_delalloc_reserve_space(struct btrfs_inode *inode,
        if (ret < 0)
                return ret;
        ret = btrfs_delalloc_reserve_metadata(inode, len);
-       if (ret < 0)
+       if (ret < 0) {
                btrfs_free_reserved_data_space(inode, *reserved, start, len);
+               extent_changeset_free(*reserved);
+               *reserved = NULL;
+       }
        return ret;
 }
 
index 59c3be8c1f4c667b718ba1f81ce0cd684cc65106..514ead6e93b6f2fc5dd687e365cedd1de4f8997f 100644 (file)
@@ -3978,11 +3978,23 @@ static void btrfs_end_empty_barrier(struct bio *bio)
  */
 static void write_dev_flush(struct btrfs_device *device)
 {
-       struct request_queue *q = bdev_get_queue(device->bdev);
        struct bio *bio = device->flush_bio;
 
+#ifndef CONFIG_BTRFS_FS_CHECK_INTEGRITY
+       /*
+        * When a disk has write caching disabled, we skip submission of a bio
+        * with flush and sync requests before writing the superblock, since
+        * it's not needed. However when the integrity checker is enabled, this
+        * results in reports that there are metadata blocks referred by a
+        * superblock that were not properly flushed. So don't skip the bio
+        * submission only when the integrity checker is enabled for the sake
+        * of simplicity, since this is a debug tool and not meant for use in
+        * non-debug builds.
+        */
+       struct request_queue *q = bdev_get_queue(device->bdev);
        if (!test_bit(QUEUE_FLAG_WC, &q->queue_flags))
                return;
+#endif
 
        bio_reset(bio);
        bio->bi_end_io = btrfs_end_empty_barrier;
index 3fd736a02c1e6d4ad5d2e26edda031416f33c8f3..fc4895e6a62cd18ca6110b3c87d1874d47d237f0 100644 (file)
@@ -6051,6 +6051,9 @@ int btrfs_trim_fs(struct btrfs_fs_info *fs_info, struct fstrim_range *range)
        int dev_ret = 0;
        int ret = 0;
 
+       if (range->start == U64_MAX)
+               return -EINVAL;
+
        /*
         * Check range overflow if range->len is set.
         * The default range->len is U64_MAX.
index 4e03a6d3aa324be2d0ff259e256e5d9956b35fbf..3258b6f01e85b3601ad8b93553e0811e05e71190 100644 (file)
@@ -4313,6 +4313,20 @@ static void set_btree_ioerr(struct page *page, struct extent_buffer *eb)
        if (test_and_set_bit(EXTENT_BUFFER_WRITE_ERR, &eb->bflags))
                return;
 
+       /*
+        * A read may stumble upon this buffer later, make sure that it gets an
+        * error and knows there was an error.
+        */
+       clear_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
+
+       /*
+        * We need to set the mapping with the io error as well because a write
+        * error will flip the file system readonly, and then syncfs() will
+        * return a 0 because we are readonly if we don't modify the err seq for
+        * the superblock.
+        */
+       mapping_set_error(page->mapping, -EIO);
+
        /*
         * If we error out, we should add back the dirty_metadata_bytes
         * to make it consistent.
index fb8cc9642ac40ad2fda24feb2cb6c787e88378b4..2b84846ed9343f193485d7639b9cc9417fc31b2a 100644 (file)
@@ -3187,10 +3187,8 @@ static long btrfs_ioctl_rm_dev_v2(struct file *file, void __user *arg)
                return -EPERM;
 
        vol_args = memdup_user(arg, sizeof(*vol_args));
-       if (IS_ERR(vol_args)) {
-               ret = PTR_ERR(vol_args);
-               goto out;
-       }
+       if (IS_ERR(vol_args))
+               return PTR_ERR(vol_args);
 
        if (vol_args->flags & ~BTRFS_DEVICE_REMOVE_ARGS_MASK) {
                ret = -EOPNOTSUPP;
@@ -3985,6 +3983,10 @@ static long btrfs_ioctl_balance(struct file *file, void __user *arg)
        bool need_unlock; /* for mut. excl. ops lock */
        int ret;
 
+       if (!arg)
+               btrfs_warn(fs_info,
+       "IOC_BALANCE ioctl (v1) is deprecated and will be removed in kernel 5.18");
+
        if (!capable(CAP_SYS_ADMIN))
                return -EPERM;
 
index 65cb0766e62d648dbaf5d487cada8e1b6495692f..0fb90cbe76697c0efeac27f54a0a5fa005db508f 100644 (file)
@@ -125,6 +125,7 @@ static inline size_t read_compress_length(const char *buf)
 static int copy_compressed_data_to_page(char *compressed_data,
                                        size_t compressed_size,
                                        struct page **out_pages,
+                                       unsigned long max_nr_page,
                                        u32 *cur_out,
                                        const u32 sectorsize)
 {
@@ -133,6 +134,9 @@ static int copy_compressed_data_to_page(char *compressed_data,
        struct page *cur_page;
        char *kaddr;
 
+       if ((*cur_out / PAGE_SIZE) >= max_nr_page)
+               return -E2BIG;
+
        /*
         * We never allow a segment header crossing sector boundary, previous
         * run should ensure we have enough space left inside the sector.
@@ -161,6 +165,10 @@ static int copy_compressed_data_to_page(char *compressed_data,
                                     orig_out + compressed_size - *cur_out);
 
                kunmap(cur_page);
+
+               if ((*cur_out / PAGE_SIZE) >= max_nr_page)
+                       return -E2BIG;
+
                cur_page = out_pages[*cur_out / PAGE_SIZE];
                /* Allocate a new page */
                if (!cur_page) {
@@ -203,6 +211,7 @@ int lzo_compress_pages(struct list_head *ws, struct address_space *mapping,
        const u32 sectorsize = btrfs_sb(mapping->host->i_sb)->sectorsize;
        struct page *page_in = NULL;
        char *sizes_ptr;
+       const unsigned long max_nr_page = *out_pages;
        int ret = 0;
        /* Points to the file offset of input data */
        u64 cur_in = start;
@@ -210,6 +219,7 @@ int lzo_compress_pages(struct list_head *ws, struct address_space *mapping,
        u32 cur_out = 0;
        u32 len = *total_out;
 
+       ASSERT(max_nr_page > 0);
        *out_pages = 0;
        *total_out = 0;
        *total_in = 0;
@@ -248,7 +258,8 @@ int lzo_compress_pages(struct list_head *ws, struct address_space *mapping,
                }
 
                ret = copy_compressed_data_to_page(workspace->cbuf, out_len,
-                                                  pages, &cur_out, sectorsize);
+                                                  pages, max_nr_page,
+                                                  &cur_out, sectorsize);
                if (ret < 0)
                        goto out;
 
@@ -279,6 +290,8 @@ int lzo_compress_pages(struct list_head *ws, struct address_space *mapping,
        *total_out = cur_out;
        *total_in = cur_in - start;
 out:
+       if (page_in)
+               put_page(page_in);
        *out_pages = DIV_ROUND_UP(cur_out, PAGE_SIZE);
        return ret;
 }
index 12ceb14a114168946166c3be3f2ee7cdc96e5a43..d20166336557697f7b3d48716d63727a37aa15fa 100644 (file)
@@ -334,7 +334,8 @@ int btrfs_del_root_ref(struct btrfs_trans_handle *trans, u64 root_id,
        key.offset = ref_id;
 again:
        ret = btrfs_search_slot(trans, tree_root, &key, path, -1, 1);
-       BUG_ON(ret < 0);
+       if (ret < 0)
+               goto out;
        if (ret == 0) {
                leaf = path->nodes[0];
                ref = btrfs_item_ptr(leaf, path->slots[0],
index cf82ea6f54fb418be744dde63ddf7963583715a4..8f6ceea33969038679debba49eeea7a3f4657939 100644 (file)
@@ -73,8 +73,8 @@ struct scrub_page {
        u64                     physical_for_dev_replace;
        atomic_t                refs;
        u8                      mirror_num;
-       int                     have_csum:1;
-       int                     io_error:1;
+       unsigned int            have_csum:1;
+       unsigned int            io_error:1;
        u8                      csum[BTRFS_CSUM_SIZE];
 
        struct scrub_recover    *recover;
index 8ab33caf016f315129565ac66568e858e52dde56..3e6f14e13918b9485d5d427a1cf17f340a7d4c84 100644 (file)
@@ -2908,6 +2908,8 @@ static noinline int walk_up_log_tree(struct btrfs_trans_handle *trans,
                                                     path->nodes[*level]->len);
                                        if (ret)
                                                return ret;
+                                       btrfs_redirty_list_add(trans->transaction,
+                                                              next);
                                } else {
                                        if (test_and_clear_bit(EXTENT_BUFFER_DIRTY, &next->bflags))
                                                clear_extent_buffer_dirty(next);
@@ -2988,6 +2990,7 @@ static int walk_log_tree(struct btrfs_trans_handle *trans,
                                                next->start, next->len);
                                if (ret)
                                        goto out;
+                               btrfs_redirty_list_add(trans->transaction, next);
                        } else {
                                if (test_and_clear_bit(EXTENT_BUFFER_DIRTY, &next->bflags))
                                        clear_extent_buffer_dirty(next);
@@ -3438,8 +3441,6 @@ static void free_log_tree(struct btrfs_trans_handle *trans,
                          EXTENT_DIRTY | EXTENT_NEW | EXTENT_NEED_WAIT);
        extent_io_tree_release(&log->log_csum_range);
 
-       if (trans && log->node)
-               btrfs_redirty_list_add(trans->transaction, log->node);
        btrfs_put_root(log);
 }
 
index 61ac57bcbf1ae4116a58863f920479572528828c..0997e3cd74e915c3f056eeff302a7cc1de7dba96 100644 (file)
@@ -7558,6 +7558,19 @@ int btrfs_read_chunk_tree(struct btrfs_fs_info *fs_info)
         */
        fs_info->fs_devices->total_rw_bytes = 0;
 
+       /*
+        * Lockdep complains about possible circular locking dependency between
+        * a disk's open_mutex (struct gendisk.open_mutex), the rw semaphores
+        * used for freeze procection of a fs (struct super_block.s_writers),
+        * which we take when starting a transaction, and extent buffers of the
+        * chunk tree if we call read_one_dev() while holding a lock on an
+        * extent buffer of the chunk tree. Since we are mounting the filesystem
+        * and at this point there can't be any concurrent task modifying the
+        * chunk tree, to keep it simple, just skip locking on the chunk tree.
+        */
+       ASSERT(!test_bit(BTRFS_FS_OPEN, &fs_info->flags));
+       path->skip_locking = 1;
+
        /*
         * Read all device items, and then all the chunk items. All
         * device items are found before any chunk item (their object id
@@ -7583,10 +7596,6 @@ int btrfs_read_chunk_tree(struct btrfs_fs_info *fs_info)
                                goto error;
                        break;
                }
-               /*
-                * The nodes on level 1 are not locked but we don't need to do
-                * that during mount time as nothing else can access the tree
-                */
                node = path->nodes[1];
                if (node) {
                        if (last_ra_node != node->start) {
@@ -7614,7 +7623,6 @@ int btrfs_read_chunk_tree(struct btrfs_fs_info *fs_info)
                         * requirement for chunk allocation, see the comment on
                         * top of btrfs_chunk_alloc() for details.
                         */
-                       ASSERT(!test_bit(BTRFS_FS_OPEN, &fs_info->flags));
                        chunk = btrfs_item_ptr(leaf, slot, struct btrfs_chunk);
                        ret = read_one_chunk(&found_key, leaf, chunk);
                        if (ret)
index 67d932d707984cffd3f6d502fed36c307ad4c113..678a2946951196028da3473dd36471640f481488 100644 (file)
@@ -1860,6 +1860,7 @@ int btrfs_zone_finish(struct btrfs_block_group *block_group)
        block_group->alloc_offset = block_group->zone_capacity;
        block_group->free_space_ctl->free_space = 0;
        btrfs_clear_treelog_bg(block_group);
+       btrfs_clear_data_reloc_bg(block_group);
        spin_unlock(&block_group->lock);
 
        ret = blkdev_zone_mgmt(device->bdev, REQ_OP_ZONE_FINISH,
@@ -1942,6 +1943,7 @@ void btrfs_zone_finish_endio(struct btrfs_fs_info *fs_info, u64 logical, u64 len
        ASSERT(block_group->alloc_offset == block_group->zone_capacity);
        ASSERT(block_group->free_space_ctl->free_space == 0);
        btrfs_clear_treelog_bg(block_group);
+       btrfs_clear_data_reloc_bg(block_group);
        spin_unlock(&block_group->lock);
 
        map = block_group->physical_map;
index 12bde7bfda86be84b0f7cf32f5a1803251f38407..23a1ed2fb7699ebaaebe89f56f29584079603420 100644 (file)
@@ -393,26 +393,14 @@ static void cifs_put_swn_reg(struct cifs_swn_reg *swnreg)
 
 static int cifs_swn_resource_state_changed(struct cifs_swn_reg *swnreg, const char *name, int state)
 {
-       int i;
-
        switch (state) {
        case CIFS_SWN_RESOURCE_STATE_UNAVAILABLE:
                cifs_dbg(FYI, "%s: resource name '%s' become unavailable\n", __func__, name);
-               for (i = 0; i < swnreg->tcon->ses->chan_count; i++) {
-                       spin_lock(&GlobalMid_Lock);
-                       if (swnreg->tcon->ses->chans[i].server->tcpStatus != CifsExiting)
-                               swnreg->tcon->ses->chans[i].server->tcpStatus = CifsNeedReconnect;
-                       spin_unlock(&GlobalMid_Lock);
-               }
+               cifs_ses_mark_for_reconnect(swnreg->tcon->ses);
                break;
        case CIFS_SWN_RESOURCE_STATE_AVAILABLE:
                cifs_dbg(FYI, "%s: resource name '%s' become available\n", __func__, name);
-               for (i = 0; i < swnreg->tcon->ses->chan_count; i++) {
-                       spin_lock(&GlobalMid_Lock);
-                       if (swnreg->tcon->ses->chans[i].server->tcpStatus != CifsExiting)
-                               swnreg->tcon->ses->chans[i].server->tcpStatus = CifsNeedReconnect;
-                       spin_unlock(&GlobalMid_Lock);
-               }
+               cifs_ses_mark_for_reconnect(swnreg->tcon->ses);
                break;
        case CIFS_SWN_RESOURCE_STATE_UNKNOWN:
                cifs_dbg(FYI, "%s: resource name '%s' changed to unknown state\n", __func__, name);
index b50da1901ebd2ddaaa507c171fab8f773d7db849..9e5d9e192ef042202c079f204e157c6203c2f025 100644 (file)
@@ -152,5 +152,5 @@ extern struct dentry *cifs_smb3_do_mount(struct file_system_type *fs_type,
 extern const struct export_operations cifs_export_ops;
 #endif /* CONFIG_CIFS_NFSD_EXPORT */
 
-#define CIFS_VERSION   "2.33"
+#define CIFS_VERSION   "2.34"
 #endif                         /* _CIFSFS_H */
index f3073a62ce574e379e054163d664dd97114b1623..4f5a3e857df4aea68fbcd56aff0e76f58b823c15 100644 (file)
@@ -599,6 +599,7 @@ int cifs_try_adding_channels(struct cifs_sb_info *cifs_sb, struct cifs_ses *ses)
 bool is_server_using_iface(struct TCP_Server_Info *server,
                           struct cifs_server_iface *iface);
 bool is_ses_using_iface(struct cifs_ses *ses, struct cifs_server_iface *iface);
+void cifs_ses_mark_for_reconnect(struct cifs_ses *ses);
 
 void extract_unc_hostname(const char *unc, const char **h, size_t *len);
 int copy_path_name(char *dst, const char *src);
index 82577a7a5bb147101d248c334de5d93220ce4ad3..18448dbd762a89ce92b6c70e0b55eb54c1d0f74a 100644 (file)
@@ -1271,10 +1271,8 @@ static int match_server(struct TCP_Server_Info *server, struct smb3_fs_context *
 {
        struct sockaddr *addr = (struct sockaddr *)&ctx->dstaddr;
 
-       if (ctx->nosharesock) {
-               server->nosharesock = true;
+       if (ctx->nosharesock)
                return 0;
-       }
 
        /* this server does not share socket */
        if (server->nosharesock)
@@ -1438,6 +1436,9 @@ cifs_get_tcp_session(struct smb3_fs_context *ctx,
                goto out_err;
        }
 
+       if (ctx->nosharesock)
+               tcp_ses->nosharesock = true;
+
        tcp_ses->ops = ctx->ops;
        tcp_ses->vals = ctx->vals;
        cifs_set_net_ns(tcp_ses, get_net(current->nsproxy->net_ns));
@@ -1452,8 +1453,10 @@ cifs_get_tcp_session(struct smb3_fs_context *ctx,
        tcp_ses->max_in_flight = 0;
        tcp_ses->credits = 1;
        if (primary_server) {
+               spin_lock(&cifs_tcp_ses_lock);
                ++primary_server->srv_count;
                tcp_ses->primary_server = primary_server;
+               spin_unlock(&cifs_tcp_ses_lock);
        }
        init_waitqueue_head(&tcp_ses->response_q);
        init_waitqueue_head(&tcp_ses->request_q);
@@ -1559,6 +1562,10 @@ smbd_connected:
        /* fscache server cookies are based on primary channel only */
        if (!CIFS_SERVER_IS_CHAN(tcp_ses))
                cifs_fscache_get_client_cookie(tcp_ses);
+#ifdef CONFIG_CIFS_FSCACHE
+       else
+               tcp_ses->fscache = tcp_ses->primary_server->fscache;
+#endif /* CONFIG_CIFS_FSCACHE */
 
        /* queue echo request delayed work */
        queue_delayed_work(cifsiod_wq, &tcp_ses->echo, tcp_ses->echo_interval);
@@ -3043,12 +3050,6 @@ static int mount_get_conns(struct mount_ctx *mnt_ctx)
                                cifs_dbg(VFS, "read only mount of RW share\n");
                        /* no need to log a RW mount of a typical RW share */
                }
-               /*
-                * The cookie is initialized from volume info returned above.
-                * Inside cifs_fscache_get_super_cookie it checks
-                * that we do not get super cookie twice.
-                */
-               cifs_fscache_get_super_cookie(tcon);
        }
 
        /*
@@ -3423,6 +3424,7 @@ static int connect_dfs_root(struct mount_ctx *mnt_ctx, struct dfs_cache_tgt_list
         */
        mount_put_conns(mnt_ctx);
        mount_get_dfs_conns(mnt_ctx);
+       set_root_ses(mnt_ctx);
 
        full_path = build_unc_path_to_root(ctx, cifs_sb, true);
        if (IS_ERR(full_path))
@@ -4111,18 +4113,6 @@ cifs_prune_tlinks(struct work_struct *work)
 }
 
 #ifdef CONFIG_CIFS_DFS_UPCALL
-static void mark_tcon_tcp_ses_for_reconnect(struct cifs_tcon *tcon)
-{
-       int i;
-
-       for (i = 0; i < tcon->ses->chan_count; i++) {
-               spin_lock(&GlobalMid_Lock);
-               if (tcon->ses->chans[i].server->tcpStatus != CifsExiting)
-                       tcon->ses->chans[i].server->tcpStatus = CifsNeedReconnect;
-               spin_unlock(&GlobalMid_Lock);
-       }
-}
-
 /* Update dfs referral path of superblock */
 static int update_server_fullpath(struct TCP_Server_Info *server, struct cifs_sb_info *cifs_sb,
                                  const char *target)
@@ -4299,7 +4289,7 @@ static int tree_connect_dfs_target(const unsigned int xid, struct cifs_tcon *tco
         */
        if (rc && server->current_fullpath != server->origin_fullpath) {
                server->current_fullpath = server->origin_fullpath;
-               mark_tcon_tcp_ses_for_reconnect(tcon);
+               cifs_ses_mark_for_reconnect(tcon->ses);
        }
 
        dfs_cache_free_tgts(tl);
index 5c1259d2eeac21c1a330bbfa1f94d00421ad3805..e9b0fa2a9614a4e63ba767456d868f8b276192a9 100644 (file)
@@ -1355,12 +1355,7 @@ static void mark_for_reconnect_if_needed(struct cifs_tcon *tcon, struct dfs_cach
        }
 
        cifs_dbg(FYI, "%s: no cached or matched targets. mark dfs share for reconnect.\n", __func__);
-       for (i = 0; i < tcon->ses->chan_count; i++) {
-               spin_lock(&GlobalMid_Lock);
-               if (tcon->ses->chans[i].server->tcpStatus != CifsExiting)
-                       tcon->ses->chans[i].server->tcpStatus = CifsNeedReconnect;
-               spin_unlock(&GlobalMid_Lock);
-       }
+       cifs_ses_mark_for_reconnect(tcon->ses);
 }
 
 /* Refresh dfs referral of tcon and mark it for reconnect if needed */
index 7e409a38a2d7c184abf3ab0c1afbea2ccbfe9f6f..003c5f1f4dfb1a058afee9a917818f6c43891d75 100644 (file)
  * Key layout of CIFS server cache index object
  */
 struct cifs_server_key {
-       struct {
-               uint16_t        family;         /* address family */
-               __be16          port;           /* IP port */
-       } hdr;
-       union {
-               struct in_addr  ipv4_addr;
-               struct in6_addr ipv6_addr;
-       };
+       __u64 conn_id;
 } __packed;
 
 /*
@@ -31,42 +24,23 @@ struct cifs_server_key {
  */
 void cifs_fscache_get_client_cookie(struct TCP_Server_Info *server)
 {
-       const struct sockaddr *sa = (struct sockaddr *) &server->dstaddr;
-       const struct sockaddr_in *addr = (struct sockaddr_in *) sa;
-       const struct sockaddr_in6 *addr6 = (struct sockaddr_in6 *) sa;
        struct cifs_server_key key;
-       uint16_t key_len = sizeof(key.hdr);
-
-       memset(&key, 0, sizeof(key));
 
        /*
-        * Should not be a problem as sin_family/sin6_family overlays
-        * sa_family field
+        * Check if cookie was already initialized so don't reinitialize it.
+        * In the future, as we integrate with newer fscache features,
+        * we may want to instead add a check if cookie has changed
         */
-       key.hdr.family = sa->sa_family;
-       switch (sa->sa_family) {
-       case AF_INET:
-               key.hdr.port = addr->sin_port;
-               key.ipv4_addr = addr->sin_addr;
-               key_len += sizeof(key.ipv4_addr);
-               break;
-
-       case AF_INET6:
-               key.hdr.port = addr6->sin6_port;
-               key.ipv6_addr = addr6->sin6_addr;
-               key_len += sizeof(key.ipv6_addr);
-               break;
-
-       default:
-               cifs_dbg(VFS, "Unknown network family '%d'\n", sa->sa_family);
-               server->fscache = NULL;
+       if (server->fscache)
                return;
-       }
+
+       memset(&key, 0, sizeof(key));
+       key.conn_id = server->conn_id;
 
        server->fscache =
                fscache_acquire_cookie(cifs_fscache_netfs.primary_index,
                                       &cifs_fscache_server_index_def,
-                                      &key, key_len,
+                                      &key, sizeof(key),
                                       NULL, 0,
                                       server, 0, true);
        cifs_dbg(FYI, "%s: (0x%p/0x%p)\n",
@@ -92,7 +66,7 @@ void cifs_fscache_get_super_cookie(struct cifs_tcon *tcon)
         * In the future, as we integrate with newer fscache features,
         * we may want to instead add a check if cookie has changed
         */
-       if (tcon->fscache == NULL)
+       if (tcon->fscache)
                return;
 
        sharename = extract_sharename(tcon->treeName);
index 82848412ad85208f08d1fad12bc2871297c8f252..96d083db173724901e3193b51ce4e1dda3bde64d 100644 (file)
@@ -1376,6 +1376,13 @@ iget_no_retry:
                inode = ERR_PTR(rc);
        }
 
+       /*
+        * The cookie is initialized from volume info returned above.
+        * Inside cifs_fscache_get_super_cookie it checks
+        * that we do not get super cookie twice.
+        */
+       cifs_fscache_get_super_cookie(tcon);
+
 out:
        kfree(path);
        free_xid(xid);
index 2c10b186ed6ee6444c43c852500fb4714bf291b5..035dc3e245dca9f569afeceed54f8d0212f62968 100644 (file)
@@ -95,9 +95,9 @@ int cifs_try_adding_channels(struct cifs_sb_info *cifs_sb, struct cifs_ses *ses)
        }
 
        if (!(ses->server->capabilities & SMB2_GLOBAL_CAP_MULTI_CHANNEL)) {
-               cifs_dbg(VFS, "server %s does not support multichannel\n", ses->server->hostname);
                ses->chan_max = 1;
                spin_unlock(&ses->chan_lock);
+               cifs_dbg(VFS, "server %s does not support multichannel\n", ses->server->hostname);
                return 0;
        }
        spin_unlock(&ses->chan_lock);
@@ -222,6 +222,7 @@ cifs_ses_add_channel(struct cifs_sb_info *cifs_sb, struct cifs_ses *ses,
        /* Auth */
        ctx.domainauto = ses->domainAuto;
        ctx.domainname = ses->domainName;
+       ctx.server_hostname = ses->server->hostname;
        ctx.username = ses->user_name;
        ctx.password = ses->password;
        ctx.sectype = ses->sectype;
@@ -318,6 +319,19 @@ out:
        return rc;
 }
 
+/* Mark all session channels for reconnect */
+void cifs_ses_mark_for_reconnect(struct cifs_ses *ses)
+{
+       int i;
+
+       for (i = 0; i < ses->chan_count; i++) {
+               spin_lock(&GlobalMid_Lock);
+               if (ses->chans[i].server->tcpStatus != CifsExiting)
+                       ses->chans[i].server->tcpStatus = CifsNeedReconnect;
+               spin_unlock(&GlobalMid_Lock);
+       }
+}
+
 static __u32 cifs_ssetup_hdr(struct cifs_ses *ses, SESSION_SETUP_ANDX *pSMB)
 {
        __u32 capabilities = 0;
@@ -576,8 +590,8 @@ int decode_ntlmssp_challenge(char *bcc_ptr, int blob_len,
 {
        unsigned int tioffset; /* challenge message target info area */
        unsigned int tilen; /* challenge message target info area length  */
-
        CHALLENGE_MESSAGE *pblob = (CHALLENGE_MESSAGE *)bcc_ptr;
+       __u32 server_flags;
 
        if (blob_len < sizeof(CHALLENGE_MESSAGE)) {
                cifs_dbg(VFS, "challenge blob len %d too small\n", blob_len);
@@ -595,12 +609,37 @@ int decode_ntlmssp_challenge(char *bcc_ptr, int blob_len,
                return -EINVAL;
        }
 
+       server_flags = le32_to_cpu(pblob->NegotiateFlags);
+       cifs_dbg(FYI, "%s: negotiate=0x%08x challenge=0x%08x\n", __func__,
+                ses->ntlmssp->client_flags, server_flags);
+
+       if ((ses->ntlmssp->client_flags & (NTLMSSP_NEGOTIATE_SEAL | NTLMSSP_NEGOTIATE_SIGN)) &&
+           (!(server_flags & NTLMSSP_NEGOTIATE_56) && !(server_flags & NTLMSSP_NEGOTIATE_128))) {
+               cifs_dbg(VFS, "%s: requested signing/encryption but server did not return either 56-bit or 128-bit session key size\n",
+                        __func__);
+               return -EINVAL;
+       }
+       if (!(server_flags & NTLMSSP_NEGOTIATE_NTLM) && !(server_flags & NTLMSSP_NEGOTIATE_EXTENDED_SEC)) {
+               cifs_dbg(VFS, "%s: server does not seem to support either NTLMv1 or NTLMv2\n", __func__);
+               return -EINVAL;
+       }
+       if (ses->server->sign && !(server_flags & NTLMSSP_NEGOTIATE_SIGN)) {
+               cifs_dbg(VFS, "%s: forced packet signing but server does not seem to support it\n",
+                        __func__);
+               return -EOPNOTSUPP;
+       }
+       if ((ses->ntlmssp->client_flags & NTLMSSP_NEGOTIATE_KEY_XCH) &&
+           !(server_flags & NTLMSSP_NEGOTIATE_KEY_XCH))
+               pr_warn_once("%s: authentication has been weakened as server does not support key exchange\n",
+                            __func__);
+
+       ses->ntlmssp->server_flags = server_flags;
+
        memcpy(ses->ntlmssp->cryptkey, pblob->Challenge, CIFS_CRYPTO_KEY_SIZE);
-       /* BB we could decode pblob->NegotiateFlags; some may be useful */
        /* In particular we can examine sign flags */
        /* BB spec says that if AvId field of MsvAvTimestamp is populated then
                we must set the MIC field of the AUTHENTICATE_MESSAGE */
-       ses->ntlmssp->server_flags = le32_to_cpu(pblob->NegotiateFlags);
+
        tioffset = le32_to_cpu(pblob->TargetInfoArray.BufferOffset);
        tilen = le16_to_cpu(pblob->TargetInfoArray.Length);
        if (tioffset > blob_len || tioffset + tilen > blob_len) {
@@ -707,13 +746,13 @@ int build_ntlmssp_negotiate_blob(unsigned char **pbuffer,
        flags = NTLMSSP_NEGOTIATE_56 |  NTLMSSP_REQUEST_TARGET |
                NTLMSSP_NEGOTIATE_128 | NTLMSSP_NEGOTIATE_UNICODE |
                NTLMSSP_NEGOTIATE_NTLM | NTLMSSP_NEGOTIATE_EXTENDED_SEC |
-               NTLMSSP_NEGOTIATE_SEAL;
-       if (server->sign)
-               flags |= NTLMSSP_NEGOTIATE_SIGN;
+               NTLMSSP_NEGOTIATE_ALWAYS_SIGN | NTLMSSP_NEGOTIATE_SEAL |
+               NTLMSSP_NEGOTIATE_SIGN;
        if (!server->session_estab || ses->ntlmssp->sesskey_per_smbsess)
                flags |= NTLMSSP_NEGOTIATE_KEY_XCH;
 
        tmp = *pbuffer + sizeof(NEGOTIATE_MESSAGE);
+       ses->ntlmssp->client_flags = flags;
        sec_blob->NegotiateFlags = cpu_to_le32(flags);
 
        /* these fields should be null in negotiate phase MS-NLMP 3.1.5.1.1 */
@@ -765,15 +804,8 @@ int build_ntlmssp_auth_blob(unsigned char **pbuffer,
        memcpy(sec_blob->Signature, NTLMSSP_SIGNATURE, 8);
        sec_blob->MessageType = NtLmAuthenticate;
 
-       flags = NTLMSSP_NEGOTIATE_56 |
-               NTLMSSP_REQUEST_TARGET | NTLMSSP_NEGOTIATE_TARGET_INFO |
-               NTLMSSP_NEGOTIATE_128 | NTLMSSP_NEGOTIATE_UNICODE |
-               NTLMSSP_NEGOTIATE_NTLM | NTLMSSP_NEGOTIATE_EXTENDED_SEC |
-               NTLMSSP_NEGOTIATE_SEAL | NTLMSSP_NEGOTIATE_WORKSTATION_SUPPLIED;
-       if (ses->server->sign)
-               flags |= NTLMSSP_NEGOTIATE_SIGN;
-       if (!ses->server->session_estab || ses->ntlmssp->sesskey_per_smbsess)
-               flags |= NTLMSSP_NEGOTIATE_KEY_XCH;
+       flags = ses->ntlmssp->server_flags | NTLMSSP_REQUEST_TARGET |
+               NTLMSSP_NEGOTIATE_TARGET_INFO | NTLMSSP_NEGOTIATE_WORKSTATION_SUPPLIED;
 
        tmp = *pbuffer + sizeof(AUTHENTICATE_MESSAGE);
        sec_blob->NegotiateFlags = cpu_to_le32(flags);
@@ -820,9 +852,9 @@ int build_ntlmssp_auth_blob(unsigned char **pbuffer,
                                      *pbuffer, &tmp,
                                      nls_cp);
 
-       if (((ses->ntlmssp->server_flags & NTLMSSP_NEGOTIATE_KEY_XCH) ||
-               (ses->ntlmssp->server_flags & NTLMSSP_NEGOTIATE_EXTENDED_SEC))
-                       && !calc_seckey(ses)) {
+       if ((ses->ntlmssp->server_flags & NTLMSSP_NEGOTIATE_KEY_XCH) &&
+           (!ses->server->session_estab || ses->ntlmssp->sesskey_per_smbsess) &&
+           !calc_seckey(ses)) {
                memcpy(tmp, ses->ntlmssp->ciphertext, CIFS_CPHTXT_SIZE);
                sec_blob->SessionKey.BufferOffset = cpu_to_le32(tmp - *pbuffer);
                sec_blob->SessionKey.Length = cpu_to_le16(CIFS_CPHTXT_SIZE);
index 2f5f2c4c6183c9461bf819b21781989046f6c338..8b3670388cdaf682e23635b4336d4d5cdfce446c 100644 (file)
@@ -142,7 +142,7 @@ static int
 smb2_reconnect(__le16 smb2_command, struct cifs_tcon *tcon,
               struct TCP_Server_Info *server)
 {
-       int rc;
+       int rc = 0;
        struct nls_table *nls_codepage;
        struct cifs_ses *ses;
        int retries;
index 84da2c28001298848bbf3eca237f5ff4349422c5..ec9a1d780dc143c0bd8ab3d6b42b8532f022fe48 100644 (file)
@@ -150,7 +150,7 @@ static bool erofs_try_to_release_workgroup(struct erofs_sb_info *sbi,
         * however in order to avoid some race conditions, add a
         * DBG_BUGON to observe this in advance.
         */
-       DBG_BUGON(xa_erase(&sbi->managed_pslots, grp->index) != grp);
+       DBG_BUGON(__xa_erase(&sbi->managed_pslots, grp->index) != grp);
 
        /* last refcount should be connected with its managed pslot.  */
        erofs_workgroup_unfreeze(grp, 0);
@@ -165,15 +165,19 @@ static unsigned long erofs_shrink_workstation(struct erofs_sb_info *sbi,
        unsigned int freed = 0;
        unsigned long index;
 
+       xa_lock(&sbi->managed_pslots);
        xa_for_each(&sbi->managed_pslots, index, grp) {
                /* try to shrink each valid workgroup */
                if (!erofs_try_to_release_workgroup(sbi, grp))
                        continue;
+               xa_unlock(&sbi->managed_pslots);
 
                ++freed;
                if (!--nr_shrink)
-                       break;
+                       return freed;
+               xa_lock(&sbi->managed_pslots);
        }
+       xa_unlock(&sbi->managed_pslots);
        return freed;
 }
 
index 8627dacfc4246fb975ed38f2b2dc8090f7c7202c..ad4a8bf3cf109fd984449eedef5de6bf9fa342e8 100644 (file)
--- a/fs/file.c
+++ b/fs/file.c
@@ -858,6 +858,10 @@ loop:
                        file = NULL;
                else if (!get_file_rcu_many(file, refs))
                        goto loop;
+               else if (files_lookup_fd_raw(files, fd) != file) {
+                       fput_many(file, refs);
+                       goto loop;
+               }
        }
        rcu_read_unlock();
 
index 79f7eda49e06cb83fb43f0f8961b75e2ce28a1ca..cd54a529460da9b98ef56838b3fd04cb517ae3b0 100644 (file)
@@ -847,17 +847,17 @@ static int fuse_try_move_page(struct fuse_copy_state *cs, struct page **pagep)
 
        replace_page_cache_page(oldpage, newpage);
 
+       get_page(newpage);
+
+       if (!(buf->flags & PIPE_BUF_FLAG_LRU))
+               lru_cache_add(newpage);
+
        /*
         * Release while we have extra ref on stolen page.  Otherwise
         * anon_pipe_buf_release() might think the page can be reused.
         */
        pipe_buf_release(cs->pipe, buf);
 
-       get_page(newpage);
-
-       if (!(buf->flags & PIPE_BUF_FLAG_LRU))
-               lru_cache_add(newpage);
-
        err = 0;
        spin_lock(&cs->req->waitq.lock);
        if (test_bit(FR_ABORTED, &cs->req->flags))
index 7235d539e96950574d3655e6f3b1b61ef64f5984..d67108489148eef281128c6841c52135abf74050 100644 (file)
@@ -940,7 +940,7 @@ do_alloc:
                else if (height == ip->i_height)
                        ret = gfs2_hole_size(inode, lblock, len, mp, iomap);
                else
-                       iomap->length = size - pos;
+                       iomap->length = size - iomap->offset;
        } else if (flags & IOMAP_WRITE) {
                u64 alloc_size;
 
index adafaaf7d24dee43b793ea3a2a5ec5bda7a8887a..3e718cfc19a793eb29225eb685cc17b00f60d6c0 100644 (file)
@@ -773,8 +773,8 @@ static inline bool should_fault_in_pages(ssize_t ret, struct iov_iter *i,
                                         size_t *prev_count,
                                         size_t *window_size)
 {
-       char __user *p = i->iov[0].iov_base + i->iov_offset;
        size_t count = iov_iter_count(i);
+       char __user *p;
        int pages = 1;
 
        if (likely(!count))
@@ -787,14 +787,14 @@ static inline bool should_fault_in_pages(ssize_t ret, struct iov_iter *i,
        if (*prev_count != count || !*window_size) {
                int pages, nr_dirtied;
 
-               pages = min_t(int, BIO_MAX_VECS,
-                             DIV_ROUND_UP(iov_iter_count(i), PAGE_SIZE));
+               pages = min_t(int, BIO_MAX_VECS, DIV_ROUND_UP(count, PAGE_SIZE));
                nr_dirtied = max(current->nr_dirtied_pause -
                                 current->nr_dirtied, 1);
                pages = min(pages, nr_dirtied);
        }
 
        *prev_count = count;
+       p = i->iov[0].iov_base + i->iov_offset;
        *window_size = (size_t)PAGE_SIZE * pages - offset_in_page(p);
        return true;
 }
@@ -1013,6 +1013,7 @@ static ssize_t gfs2_file_buffered_write(struct kiocb *iocb,
        struct gfs2_sbd *sdp = GFS2_SB(inode);
        struct gfs2_holder *statfs_gh = NULL;
        size_t prev_count = 0, window_size = 0;
+       size_t orig_count = iov_iter_count(from);
        size_t read = 0;
        ssize_t ret;
 
@@ -1057,6 +1058,7 @@ retry_under_glock:
        if (inode == sdp->sd_rindex)
                gfs2_glock_dq_uninit(statfs_gh);
 
+       from->count = orig_count - read;
        if (should_fault_in_pages(ret, from, &prev_count, &window_size)) {
                size_t leftover;
 
@@ -1064,6 +1066,7 @@ retry_under_glock:
                leftover = fault_in_iov_iter_readable(from, window_size);
                gfs2_holder_disallow_demote(gh);
                if (leftover != window_size) {
+                       from->count = min(from->count, window_size - leftover);
                        if (!gfs2_holder_queued(gh)) {
                                if (read)
                                        goto out_uninit;
index 19f38aee1b618d8355fb7b50cdc3992e6c13ea94..44a7a4288956b60d174340befd7ee3b19bc8c8e3 100644 (file)
@@ -411,14 +411,14 @@ static void do_error(struct gfs2_glock *gl, const int ret)
 static void demote_incompat_holders(struct gfs2_glock *gl,
                                    struct gfs2_holder *new_gh)
 {
-       struct gfs2_holder *gh;
+       struct gfs2_holder *gh, *tmp;
 
        /*
         * Demote incompatible holders before we make ourselves eligible.
         * (This holder may or may not allow auto-demoting, but we don't want
         * to demote the new holder before it's even granted.)
         */
-       list_for_each_entry(gh, &gl->gl_holders, gh_list) {
+       list_for_each_entry_safe(gh, tmp, &gl->gl_holders, gh_list) {
                /*
                 * Since holders are at the front of the list, we stop when we
                 * find the first non-holder.
@@ -496,7 +496,7 @@ again:
         * Since we unlock the lockref lock, we set a flag to indicate
         * instantiate is in progress.
         */
-       if (test_bit(GLF_INSTANTIATE_IN_PROG, &gl->gl_flags)) {
+       if (test_and_set_bit(GLF_INSTANTIATE_IN_PROG, &gl->gl_flags)) {
                wait_on_bit(&gl->gl_flags, GLF_INSTANTIATE_IN_PROG,
                            TASK_UNINTERRUPTIBLE);
                /*
@@ -509,14 +509,10 @@ again:
                goto again;
        }
 
-       set_bit(GLF_INSTANTIATE_IN_PROG, &gl->gl_flags);
-
        ret = glops->go_instantiate(gh);
        if (!ret)
                clear_bit(GLF_INSTANTIATE_NEEDED, &gl->gl_flags);
-       clear_bit(GLF_INSTANTIATE_IN_PROG, &gl->gl_flags);
-       smp_mb__after_atomic();
-       wake_up_bit(&gl->gl_flags, GLF_INSTANTIATE_IN_PROG);
+       clear_and_wake_up_bit(GLF_INSTANTIATE_IN_PROG, &gl->gl_flags);
        return ret;
 }
 
@@ -1861,7 +1857,6 @@ void gfs2_glock_dq_m(unsigned int num_gh, struct gfs2_holder *ghs)
 
 void gfs2_glock_cb(struct gfs2_glock *gl, unsigned int state)
 {
-       struct gfs2_holder mock_gh = { .gh_gl = gl, .gh_state = state, };
        unsigned long delay = 0;
        unsigned long holdtime;
        unsigned long now = jiffies;
@@ -1894,8 +1889,13 @@ void gfs2_glock_cb(struct gfs2_glock *gl, unsigned int state)
         * keep the glock until the last strong holder is done with it.
         */
        if (!find_first_strong_holder(gl)) {
-               if (state == LM_ST_UNLOCKED)
-                       mock_gh.gh_state = LM_ST_EXCLUSIVE;
+               struct gfs2_holder mock_gh = {
+                       .gh_gl = gl,
+                       .gh_state = (state == LM_ST_UNLOCKED) ?
+                                   LM_ST_EXCLUSIVE : state,
+                       .gh_iflags = BIT(HIF_HOLDER)
+               };
+
                demote_incompat_holders(gl, &mock_gh);
        }
        handle_callback(gl, state, delay, true);
index 6424b903e88515f191bc21b7f4b72cd51c0a70f6..89905f4f29bb6de91e181373c4f168431f3e8e74 100644 (file)
@@ -40,37 +40,6 @@ static const struct inode_operations gfs2_file_iops;
 static const struct inode_operations gfs2_dir_iops;
 static const struct inode_operations gfs2_symlink_iops;
 
-static int iget_test(struct inode *inode, void *opaque)
-{
-       u64 no_addr = *(u64 *)opaque;
-
-       return GFS2_I(inode)->i_no_addr == no_addr;
-}
-
-static int iget_set(struct inode *inode, void *opaque)
-{
-       u64 no_addr = *(u64 *)opaque;
-
-       GFS2_I(inode)->i_no_addr = no_addr;
-       inode->i_ino = no_addr;
-       return 0;
-}
-
-static struct inode *gfs2_iget(struct super_block *sb, u64 no_addr)
-{
-       struct inode *inode;
-
-repeat:
-       inode = iget5_locked(sb, no_addr, iget_test, iget_set, &no_addr);
-       if (!inode)
-               return inode;
-       if (is_bad_inode(inode)) {
-               iput(inode);
-               goto repeat;
-       }
-       return inode;
-}
-
 /**
  * gfs2_set_iop - Sets inode operations
  * @inode: The inode with correct i_mode filled in
@@ -104,6 +73,22 @@ static void gfs2_set_iop(struct inode *inode)
        }
 }
 
+static int iget_test(struct inode *inode, void *opaque)
+{
+       u64 no_addr = *(u64 *)opaque;
+
+       return GFS2_I(inode)->i_no_addr == no_addr;
+}
+
+static int iget_set(struct inode *inode, void *opaque)
+{
+       u64 no_addr = *(u64 *)opaque;
+
+       GFS2_I(inode)->i_no_addr = no_addr;
+       inode->i_ino = no_addr;
+       return 0;
+}
+
 /**
  * gfs2_inode_lookup - Lookup an inode
  * @sb: The super block
@@ -132,12 +117,11 @@ struct inode *gfs2_inode_lookup(struct super_block *sb, unsigned int type,
 {
        struct inode *inode;
        struct gfs2_inode *ip;
-       struct gfs2_glock *io_gl = NULL;
        struct gfs2_holder i_gh;
        int error;
 
        gfs2_holder_mark_uninitialized(&i_gh);
-       inode = gfs2_iget(sb, no_addr);
+       inode = iget5_locked(sb, no_addr, iget_test, iget_set, &no_addr);
        if (!inode)
                return ERR_PTR(-ENOMEM);
 
@@ -145,22 +129,16 @@ struct inode *gfs2_inode_lookup(struct super_block *sb, unsigned int type,
 
        if (inode->i_state & I_NEW) {
                struct gfs2_sbd *sdp = GFS2_SB(inode);
+               struct gfs2_glock *io_gl;
 
                error = gfs2_glock_get(sdp, no_addr, &gfs2_inode_glops, CREATE, &ip->i_gl);
                if (unlikely(error))
                        goto fail;
-               flush_delayed_work(&ip->i_gl->gl_work);
-
-               error = gfs2_glock_get(sdp, no_addr, &gfs2_iopen_glops, CREATE, &io_gl);
-               if (unlikely(error))
-                       goto fail;
-               if (blktype != GFS2_BLKST_UNLINKED)
-                       gfs2_cancel_delete_work(io_gl);
 
                if (type == DT_UNKNOWN || blktype != GFS2_BLKST_FREE) {
                        /*
                         * The GL_SKIP flag indicates to skip reading the inode
-                        * block.  We read the inode with gfs2_inode_refresh
+                        * block.  We read the inode when instantiating it
                         * after possibly checking the block type.
                         */
                        error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE,
@@ -181,24 +159,31 @@ struct inode *gfs2_inode_lookup(struct super_block *sb, unsigned int type,
                        }
                }
 
-               glock_set_object(ip->i_gl, ip);
                set_bit(GLF_INSTANTIATE_NEEDED, &ip->i_gl->gl_flags);
-               error = gfs2_glock_nq_init(io_gl, LM_ST_SHARED, GL_EXACT, &ip->i_iopen_gh);
+
+               error = gfs2_glock_get(sdp, no_addr, &gfs2_iopen_glops, CREATE, &io_gl);
                if (unlikely(error))
                        goto fail;
-               glock_set_object(ip->i_iopen_gh.gh_gl, ip);
+               if (blktype != GFS2_BLKST_UNLINKED)
+                       gfs2_cancel_delete_work(io_gl);
+               error = gfs2_glock_nq_init(io_gl, LM_ST_SHARED, GL_EXACT, &ip->i_iopen_gh);
                gfs2_glock_put(io_gl);
-               io_gl = NULL;
+               if (unlikely(error))
+                       goto fail;
 
                /* Lowest possible timestamp; will be overwritten in gfs2_dinode_in. */
                inode->i_atime.tv_sec = 1LL << (8 * sizeof(inode->i_atime.tv_sec) - 1);
                inode->i_atime.tv_nsec = 0;
 
+               glock_set_object(ip->i_gl, ip);
+
                if (type == DT_UNKNOWN) {
                        /* Inode glock must be locked already */
                        error = gfs2_instantiate(&i_gh);
-                       if (error)
+                       if (error) {
+                               glock_clear_object(ip->i_gl, ip);
                                goto fail;
+                       }
                } else {
                        ip->i_no_formal_ino = no_formal_ino;
                        inode->i_mode = DT2IF(type);
@@ -206,31 +191,23 @@ struct inode *gfs2_inode_lookup(struct super_block *sb, unsigned int type,
 
                if (gfs2_holder_initialized(&i_gh))
                        gfs2_glock_dq_uninit(&i_gh);
+               glock_set_object(ip->i_iopen_gh.gh_gl, ip);
 
                gfs2_set_iop(inode);
+               unlock_new_inode(inode);
        }
 
        if (no_formal_ino && ip->i_no_formal_ino &&
            no_formal_ino != ip->i_no_formal_ino) {
-               error = -ESTALE;
-               if (inode->i_state & I_NEW)
-                       goto fail;
                iput(inode);
-               return ERR_PTR(error);
+               return ERR_PTR(-ESTALE);
        }
 
-       if (inode->i_state & I_NEW)
-               unlock_new_inode(inode);
-
        return inode;
 
 fail:
-       if (gfs2_holder_initialized(&ip->i_iopen_gh)) {
-               glock_clear_object(ip->i_iopen_gh.gh_gl, ip);
+       if (gfs2_holder_initialized(&ip->i_iopen_gh))
                gfs2_glock_dq_uninit(&ip->i_iopen_gh);
-       }
-       if (io_gl)
-               gfs2_glock_put(io_gl);
        if (gfs2_holder_initialized(&i_gh))
                gfs2_glock_dq_uninit(&i_gh);
        iget_failed(inode);
@@ -730,18 +707,19 @@ static int gfs2_create_inode(struct inode *dir, struct dentry *dentry,
        error = gfs2_glock_get(sdp, ip->i_no_addr, &gfs2_inode_glops, CREATE, &ip->i_gl);
        if (error)
                goto fail_free_inode;
-       flush_delayed_work(&ip->i_gl->gl_work);
 
        error = gfs2_glock_get(sdp, ip->i_no_addr, &gfs2_iopen_glops, CREATE, &io_gl);
        if (error)
                goto fail_free_inode;
        gfs2_cancel_delete_work(io_gl);
 
+       error = insert_inode_locked4(inode, ip->i_no_addr, iget_test, &ip->i_no_addr);
+       BUG_ON(error);
+
        error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, GL_SKIP, ghs + 1);
        if (error)
                goto fail_gunlock2;
 
-       glock_set_object(ip->i_gl, ip);
        error = gfs2_trans_begin(sdp, blocks, 0);
        if (error)
                goto fail_gunlock2;
@@ -757,9 +735,9 @@ static int gfs2_create_inode(struct inode *dir, struct dentry *dentry,
        if (error)
                goto fail_gunlock2;
 
+       glock_set_object(ip->i_gl, ip);
        glock_set_object(io_gl, ip);
        gfs2_set_iop(inode);
-       insert_inode_hash(inode);
 
        free_vfs_inode = 0; /* After this point, the inode is no longer
                               considered free. Any failures need to undo
@@ -801,17 +779,17 @@ static int gfs2_create_inode(struct inode *dir, struct dentry *dentry,
        gfs2_glock_dq_uninit(ghs + 1);
        gfs2_glock_put(io_gl);
        gfs2_qa_put(dip);
+       unlock_new_inode(inode);
        return error;
 
 fail_gunlock3:
+       glock_clear_object(ip->i_gl, ip);
        glock_clear_object(io_gl, ip);
        gfs2_glock_dq_uninit(&ip->i_iopen_gh);
 fail_gunlock2:
-       glock_clear_object(io_gl, ip);
        gfs2_glock_put(io_gl);
 fail_free_inode:
        if (ip->i_gl) {
-               glock_clear_object(ip->i_gl, ip);
                if (free_vfs_inode) /* else evict will do the put for us */
                        gfs2_glock_put(ip->i_gl);
        }
@@ -829,7 +807,10 @@ fail_gunlock:
                        mark_inode_dirty(inode);
                set_bit(free_vfs_inode ? GIF_FREE_VFS_INODE : GIF_ALLOC_FAILED,
                        &GFS2_I(inode)->i_flags);
-               iput(inode);
+               if (inode->i_state & I_NEW)
+                       iget_failed(inode);
+               else
+                       iput(inode);
        }
        if (gfs2_holder_initialized(ghs + 1))
                gfs2_glock_dq_uninit(ghs + 1);
index 5b121371508a539da16a9fc89389a5019893f2e0..0f93e8beca4d957a12ea38420c04f95fc8e3da45 100644 (file)
@@ -1402,13 +1402,6 @@ out:
        gfs2_ordered_del_inode(ip);
        clear_inode(inode);
        gfs2_dir_hash_inval(ip);
-       if (ip->i_gl) {
-               glock_clear_object(ip->i_gl, ip);
-               wait_on_bit_io(&ip->i_flags, GIF_GLOP_PENDING, TASK_UNINTERRUPTIBLE);
-               gfs2_glock_add_to_lru(ip->i_gl);
-               gfs2_glock_put_eventually(ip->i_gl);
-               ip->i_gl = NULL;
-       }
        if (gfs2_holder_initialized(&ip->i_iopen_gh)) {
                struct gfs2_glock *gl = ip->i_iopen_gh.gh_gl;
 
@@ -1421,6 +1414,13 @@ out:
                gfs2_holder_uninit(&ip->i_iopen_gh);
                gfs2_glock_put_eventually(gl);
        }
+       if (ip->i_gl) {
+               glock_clear_object(ip->i_gl, ip);
+               wait_on_bit_io(&ip->i_flags, GIF_GLOP_PENDING, TASK_UNINTERRUPTIBLE);
+               gfs2_glock_add_to_lru(ip->i_gl);
+               gfs2_glock_put_eventually(ip->i_gl);
+               ip->i_gl = NULL;
+       }
 }
 
 static struct inode *gfs2_alloc_inode(struct super_block *sb)
index 3eba0940ffcf162389ab4ca7fc391b4dc70399bc..6b80a51129d56f4175326e1faf1429e4f730f8b9 100644 (file)
@@ -180,8 +180,6 @@ int inode_init_always(struct super_block *sb, struct inode *inode)
        mapping->a_ops = &empty_aops;
        mapping->host = inode;
        mapping->flags = 0;
-       if (sb->s_type->fs_flags & FS_THP_SUPPORT)
-               __set_bit(AS_THP_SUPPORT, &mapping->flags);
        mapping->wb_err = 0;
        atomic_set(&mapping->i_mmap_writable, 0);
 #ifdef CONFIG_READ_ONLY_THP_FOR_FS
index 88202de519f6d1755103f435ae6b2eea14e55adb..8d2bb818a3bb009f51c71c0da93d637f7f80ec2b 100644 (file)
@@ -142,6 +142,7 @@ static bool io_acct_cancel_pending_work(struct io_wqe *wqe,
                                        struct io_wqe_acct *acct,
                                        struct io_cb_cancel_data *match);
 static void create_worker_cb(struct callback_head *cb);
+static void io_wq_cancel_tw_create(struct io_wq *wq);
 
 static bool io_worker_get(struct io_worker *worker)
 {
@@ -357,12 +358,22 @@ static bool io_queue_worker_create(struct io_worker *worker,
            test_and_set_bit_lock(0, &worker->create_state))
                goto fail_release;
 
+       atomic_inc(&wq->worker_refs);
        init_task_work(&worker->create_work, func);
        worker->create_index = acct->index;
        if (!task_work_add(wq->task, &worker->create_work, TWA_SIGNAL)) {
-               clear_bit_unlock(0, &worker->create_state);
+               /*
+                * EXIT may have been set after checking it above, check after
+                * adding the task_work and remove any creation item if it is
+                * now set. wq exit does that too, but we can have added this
+                * work item after we canceled in io_wq_exit_workers().
+                */
+               if (test_bit(IO_WQ_BIT_EXIT, &wq->state))
+                       io_wq_cancel_tw_create(wq);
+               io_worker_ref_put(wq);
                return true;
        }
+       io_worker_ref_put(wq);
        clear_bit_unlock(0, &worker->create_state);
 fail_release:
        io_worker_release(worker);
@@ -714,6 +725,13 @@ static bool io_wq_work_match_all(struct io_wq_work *work, void *data)
 
 static inline bool io_should_retry_thread(long err)
 {
+       /*
+        * Prevent perpetual task_work retry, if the task (or its group) is
+        * exiting.
+        */
+       if (fatal_signal_pending(current))
+               return false;
+
        switch (err) {
        case -EAGAIN:
        case -ERESTARTSYS:
@@ -1191,13 +1209,9 @@ void io_wq_exit_start(struct io_wq *wq)
        set_bit(IO_WQ_BIT_EXIT, &wq->state);
 }
 
-static void io_wq_exit_workers(struct io_wq *wq)
+static void io_wq_cancel_tw_create(struct io_wq *wq)
 {
        struct callback_head *cb;
-       int node;
-
-       if (!wq->task)
-               return;
 
        while ((cb = task_work_cancel_match(wq->task, io_task_work_match, wq)) != NULL) {
                struct io_worker *worker;
@@ -1205,6 +1219,16 @@ static void io_wq_exit_workers(struct io_wq *wq)
                worker = container_of(cb, struct io_worker, create_work);
                io_worker_cancel_cb(worker);
        }
+}
+
+static void io_wq_exit_workers(struct io_wq *wq)
+{
+       int node;
+
+       if (!wq->task)
+               return;
+
+       io_wq_cancel_tw_create(wq);
 
        rcu_read_lock();
        for_each_node(node) {
index b07196b4511c421a81ddda68a6cde64dbbf717f3..d5ab0e9a3f2916758e220d28235e15eb16005154 100644 (file)
@@ -1278,6 +1278,7 @@ static void io_refs_resurrect(struct percpu_ref *ref, struct completion *compl)
 
 static bool io_match_task(struct io_kiocb *head, struct task_struct *task,
                          bool cancel_all)
+       __must_hold(&req->ctx->timeout_lock)
 {
        struct io_kiocb *req;
 
@@ -1293,6 +1294,44 @@ static bool io_match_task(struct io_kiocb *head, struct task_struct *task,
        return false;
 }
 
+static bool io_match_linked(struct io_kiocb *head)
+{
+       struct io_kiocb *req;
+
+       io_for_each_link(req, head) {
+               if (req->flags & REQ_F_INFLIGHT)
+                       return true;
+       }
+       return false;
+}
+
+/*
+ * As io_match_task() but protected against racing with linked timeouts.
+ * User must not hold timeout_lock.
+ */
+static bool io_match_task_safe(struct io_kiocb *head, struct task_struct *task,
+                              bool cancel_all)
+{
+       bool matched;
+
+       if (task && head->task != task)
+               return false;
+       if (cancel_all)
+               return true;
+
+       if (head->flags & REQ_F_LINK_TIMEOUT) {
+               struct io_ring_ctx *ctx = head->ctx;
+
+               /* protect against races with linked timeouts */
+               spin_lock_irq(&ctx->timeout_lock);
+               matched = io_match_linked(head);
+               spin_unlock_irq(&ctx->timeout_lock);
+       } else {
+               matched = io_match_linked(head);
+       }
+       return matched;
+}
+
 static inline bool req_has_async_data(struct io_kiocb *req)
 {
        return req->flags & REQ_F_ASYNC_DATA;
@@ -1502,10 +1541,10 @@ static void io_prep_async_link(struct io_kiocb *req)
        if (req->flags & REQ_F_LINK_TIMEOUT) {
                struct io_ring_ctx *ctx = req->ctx;
 
-               spin_lock(&ctx->completion_lock);
+               spin_lock_irq(&ctx->timeout_lock);
                io_for_each_link(cur, req)
                        io_prep_async_work(cur);
-               spin_unlock(&ctx->completion_lock);
+               spin_unlock_irq(&ctx->timeout_lock);
        } else {
                io_for_each_link(cur, req)
                        io_prep_async_work(cur);
@@ -4327,6 +4366,7 @@ static int __io_remove_buffers(struct io_ring_ctx *ctx, struct io_buffer *buf,
                kfree(nxt);
                if (++i == nbufs)
                        return i;
+               cond_resched();
        }
        i++;
        kfree(buf);
@@ -5704,7 +5744,7 @@ static __cold bool io_poll_remove_all(struct io_ring_ctx *ctx,
 
                list = &ctx->cancel_hash[i];
                hlist_for_each_entry_safe(req, tmp, list, hash_node) {
-                       if (io_match_task(req, tsk, cancel_all))
+                       if (io_match_task_safe(req, tsk, cancel_all))
                                posted += io_poll_remove_one(req);
                }
        }
@@ -6156,6 +6196,9 @@ static int io_timeout_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe,
        if (get_timespec64(&data->ts, u64_to_user_ptr(sqe->addr)))
                return -EFAULT;
 
+       if (data->ts.tv_sec < 0 || data->ts.tv_nsec < 0)
+               return -EINVAL;
+
        data->mode = io_translate_timeout_mode(flags);
        hrtimer_init(&data->timer, io_timeout_get_clock(data), data->mode);
 
@@ -6880,10 +6923,11 @@ static inline struct file *io_file_get(struct io_ring_ctx *ctx,
 static void io_req_task_link_timeout(struct io_kiocb *req, bool *locked)
 {
        struct io_kiocb *prev = req->timeout.prev;
-       int ret;
+       int ret = -ENOENT;
 
        if (prev) {
-               ret = io_try_cancel_userdata(req, prev->user_data);
+               if (!(req->task->flags & PF_EXITING))
+                       ret = io_try_cancel_userdata(req, prev->user_data);
                io_req_complete_post(req, ret ?: -ETIME, 0);
                io_put_req(prev);
        } else {
@@ -9255,10 +9299,8 @@ static void io_destroy_buffers(struct io_ring_ctx *ctx)
        struct io_buffer *buf;
        unsigned long index;
 
-       xa_for_each(&ctx->io_buffers, index, buf) {
+       xa_for_each(&ctx->io_buffers, index, buf)
                __io_remove_buffers(ctx, buf, index, -1U);
-               cond_resched();
-       }
 }
 
 static void io_req_caches_free(struct io_ring_ctx *ctx)
@@ -9562,19 +9604,8 @@ static bool io_cancel_task_cb(struct io_wq_work *work, void *data)
 {
        struct io_kiocb *req = container_of(work, struct io_kiocb, work);
        struct io_task_cancel *cancel = data;
-       bool ret;
-
-       if (!cancel->all && (req->flags & REQ_F_LINK_TIMEOUT)) {
-               struct io_ring_ctx *ctx = req->ctx;
 
-               /* protect against races with linked timeouts */
-               spin_lock(&ctx->completion_lock);
-               ret = io_match_task(req, cancel->task, cancel->all);
-               spin_unlock(&ctx->completion_lock);
-       } else {
-               ret = io_match_task(req, cancel->task, cancel->all);
-       }
-       return ret;
+       return io_match_task_safe(req, cancel->task, cancel->all);
 }
 
 static __cold bool io_cancel_defer_files(struct io_ring_ctx *ctx,
@@ -9586,7 +9617,7 @@ static __cold bool io_cancel_defer_files(struct io_ring_ctx *ctx,
 
        spin_lock(&ctx->completion_lock);
        list_for_each_entry_reverse(de, &ctx->defer_list, list) {
-               if (io_match_task(de->req, task, cancel_all)) {
+               if (io_match_task_safe(de->req, task, cancel_all)) {
                        list_cut_position(&list, &ctx->defer_list, &de->list);
                        break;
                }
@@ -9764,7 +9795,7 @@ static __cold void io_uring_clean_tctx(struct io_uring_task *tctx)
        }
        if (wq) {
                /*
-                * Must be after io_uring_del_task_file() (removes nodes under
+                * Must be after io_uring_del_tctx_node() (removes nodes under
                 * uring_lock) to avoid race with io_uring_try_cancel_iowq().
                 */
                io_wq_put_and_exit(wq);
@@ -9793,7 +9824,7 @@ static __cold void io_uring_drop_tctx_refs(struct task_struct *task)
 
 /*
  * Find any io_uring ctx that this task has registered or done IO on, and cancel
- * requests. @sqd should be not-null IIF it's an SQPOLL thread cancellation.
+ * requests. @sqd should be not-null IFF it's an SQPOLL thread cancellation.
  */
 static __cold void io_uring_cancel_generic(bool cancel_all,
                                           struct io_sq_data *sqd)
@@ -9835,8 +9866,10 @@ static __cold void io_uring_cancel_generic(bool cancel_all,
                                                             cancel_all);
                }
 
-               prepare_to_wait(&tctx->wait, &wait, TASK_UNINTERRUPTIBLE);
+               prepare_to_wait(&tctx->wait, &wait, TASK_INTERRUPTIBLE);
+               io_run_task_work();
                io_uring_drop_tctx_refs(current);
+
                /*
                 * If we've seen completions, retry without waiting. This
                 * avoids a race where a completion comes in before we did
index 1753c26c8e76e300ff59c296d2b6b88d2b968db4..71a36ae120ee8a2312a8ee9f85d14f6e1a0079fc 100644 (file)
@@ -205,7 +205,16 @@ struct iomap_readpage_ctx {
        struct readahead_control *rac;
 };
 
-static loff_t iomap_read_inline_data(const struct iomap_iter *iter,
+/**
+ * iomap_read_inline_data - copy inline data into the page cache
+ * @iter: iteration structure
+ * @page: page to copy to
+ *
+ * Copy the inline data in @iter into @page and zero out the rest of the page.
+ * Only a single IOMAP_INLINE extent is allowed at the end of each file.
+ * Returns zero for success to complete the read, or the usual negative errno.
+ */
+static int iomap_read_inline_data(const struct iomap_iter *iter,
                struct page *page)
 {
        const struct iomap *iomap = iomap_iter_srcmap(iter);
@@ -214,7 +223,7 @@ static loff_t iomap_read_inline_data(const struct iomap_iter *iter,
        void *addr;
 
        if (PageUptodate(page))
-               return PAGE_SIZE - poff;
+               return 0;
 
        if (WARN_ON_ONCE(size > PAGE_SIZE - poff))
                return -EIO;
@@ -231,7 +240,7 @@ static loff_t iomap_read_inline_data(const struct iomap_iter *iter,
        memset(addr + size, 0, PAGE_SIZE - poff - size);
        kunmap_local(addr);
        iomap_set_range_uptodate(page, poff, PAGE_SIZE - poff);
-       return PAGE_SIZE - poff;
+       return 0;
 }
 
 static inline bool iomap_block_needs_zeroing(const struct iomap_iter *iter,
@@ -257,7 +266,7 @@ static loff_t iomap_readpage_iter(const struct iomap_iter *iter,
        sector_t sector;
 
        if (iomap->type == IOMAP_INLINE)
-               return min(iomap_read_inline_data(iter, page), length);
+               return iomap_read_inline_data(iter, page);
 
        /* zero post-eof blocks as the page may be mapped */
        iop = iomap_page_create(iter->inode, page);
@@ -370,6 +379,8 @@ static loff_t iomap_readahead_iter(const struct iomap_iter *iter,
                        ctx->cur_page_in_bio = false;
                }
                ret = iomap_readpage_iter(iter, ctx, done);
+               if (ret <= 0)
+                       return ret;
        }
 
        return done;
@@ -580,15 +591,10 @@ static int __iomap_write_begin(const struct iomap_iter *iter, loff_t pos,
 static int iomap_write_begin_inline(const struct iomap_iter *iter,
                struct page *page)
 {
-       int ret;
-
        /* needs more work for the tailpacking case; disable for now */
        if (WARN_ON_ONCE(iomap_iter_srcmap(iter)->offset != 0))
                return -EIO;
-       ret = iomap_read_inline_data(iter, page);
-       if (ret < 0)
-               return ret;
-       return 0;
+       return iomap_read_inline_data(iter, page);
 }
 
 static int iomap_write_begin(const struct iomap_iter *iter, loff_t pos,
index 121f8e8c70aca16d6ce134724f129e0cb4d651ec..49c9da37315c8383c3ed6de855dcaa14a5604322 100644 (file)
@@ -1697,8 +1697,10 @@ int smb2_sess_setup(struct ksmbd_work *work)
        negblob_off = le16_to_cpu(req->SecurityBufferOffset);
        negblob_len = le16_to_cpu(req->SecurityBufferLength);
        if (negblob_off < offsetof(struct smb2_sess_setup_req, Buffer) ||
-           negblob_len < offsetof(struct negotiate_message, NegotiateFlags))
-               return -EINVAL;
+           negblob_len < offsetof(struct negotiate_message, NegotiateFlags)) {
+               rc = -EINVAL;
+               goto out_err;
+       }
 
        negblob = (struct negotiate_message *)((char *)&req->hdr.ProtocolId +
                        negblob_off);
@@ -4457,6 +4459,12 @@ static void get_file_stream_info(struct ksmbd_work *work,
                         &stat);
        file_info = (struct smb2_file_stream_info *)rsp->Buffer;
 
+       buf_free_len =
+               smb2_calc_max_out_buf_len(work, 8,
+                                         le32_to_cpu(req->OutputBufferLength));
+       if (buf_free_len < 0)
+               goto out;
+
        xattr_list_len = ksmbd_vfs_listxattr(path->dentry, &xattr_list);
        if (xattr_list_len < 0) {
                goto out;
@@ -4465,12 +4473,6 @@ static void get_file_stream_info(struct ksmbd_work *work,
                goto out;
        }
 
-       buf_free_len =
-               smb2_calc_max_out_buf_len(work, 8,
-                                         le32_to_cpu(req->OutputBufferLength));
-       if (buf_free_len < 0)
-               goto out;
-
        while (idx < xattr_list_len) {
                stream_name = xattr_list + idx;
                streamlen = strlen(stream_name);
@@ -4496,8 +4498,10 @@ static void get_file_stream_info(struct ksmbd_work *work,
                                     ":%s", &stream_name[XATTR_NAME_STREAM_LEN]);
 
                next = sizeof(struct smb2_file_stream_info) + streamlen * 2;
-               if (next > buf_free_len)
+               if (next > buf_free_len) {
+                       kfree(stream_buf);
                        break;
+               }
 
                file_info = (struct smb2_file_stream_info *)&rsp->Buffer[nbytes];
                streamlen  = smbConvertToUTF16((__le16 *)file_info->StreamName,
@@ -4514,6 +4518,7 @@ static void get_file_stream_info(struct ksmbd_work *work,
                file_info->NextEntryOffset = cpu_to_le32(next);
        }
 
+out:
        if (!S_ISDIR(stat.mode) &&
            buf_free_len >= sizeof(struct smb2_file_stream_info) + 7 * 2) {
                file_info = (struct smb2_file_stream_info *)
@@ -4522,14 +4527,13 @@ static void get_file_stream_info(struct ksmbd_work *work,
                                              "::$DATA", 7, conn->local_nls, 0);
                streamlen *= 2;
                file_info->StreamNameLength = cpu_to_le32(streamlen);
-               file_info->StreamSize = 0;
-               file_info->StreamAllocationSize = 0;
+               file_info->StreamSize = cpu_to_le64(stat.size);
+               file_info->StreamAllocationSize = cpu_to_le64(stat.blocks << 9);
                nbytes += sizeof(struct smb2_file_stream_info) + streamlen;
        }
 
        /* last entry offset should be 0 */
        file_info->NextEntryOffset = 0;
-out:
        kvfree(xattr_list);
 
        rsp->OutputBufferLength = cpu_to_le32(nbytes);
@@ -5068,7 +5072,7 @@ static int smb2_get_info_sec(struct ksmbd_work *work,
        if (addition_info & ~(OWNER_SECINFO | GROUP_SECINFO | DACL_SECINFO |
                              PROTECTED_DACL_SECINFO |
                              UNPROTECTED_DACL_SECINFO)) {
-               pr_err("Unsupported addition info: 0x%x)\n",
+               ksmbd_debug(SMB, "Unsupported addition info: 0x%x)\n",
                       addition_info);
 
                pntsd->revision = cpu_to_le16(1);
index 9320a42dfaf9737629045c4d81908f96993edd39..75c76cbb27ccfd130943f8a9810f5854e4cb09f6 100644 (file)
@@ -354,16 +354,11 @@ static void netfs_rreq_write_to_cache_work(struct work_struct *work)
        netfs_rreq_do_write_to_cache(rreq);
 }
 
-static void netfs_rreq_write_to_cache(struct netfs_read_request *rreq,
-                                     bool was_async)
+static void netfs_rreq_write_to_cache(struct netfs_read_request *rreq)
 {
-       if (was_async) {
-               rreq->work.func = netfs_rreq_write_to_cache_work;
-               if (!queue_work(system_unbound_wq, &rreq->work))
-                       BUG();
-       } else {
-               netfs_rreq_do_write_to_cache(rreq);
-       }
+       rreq->work.func = netfs_rreq_write_to_cache_work;
+       if (!queue_work(system_unbound_wq, &rreq->work))
+               BUG();
 }
 
 /*
@@ -558,7 +553,7 @@ again:
        wake_up_bit(&rreq->flags, NETFS_RREQ_IN_PROGRESS);
 
        if (test_bit(NETFS_RREQ_WRITE_TO_CACHE, &rreq->flags))
-               return netfs_rreq_write_to_cache(rreq, was_async);
+               return netfs_rreq_write_to_cache(rreq);
 
        netfs_rreq_completed(rreq, was_async);
 }
@@ -960,7 +955,7 @@ int netfs_readpage(struct file *file,
        rreq = netfs_alloc_read_request(ops, netfs_priv, file);
        if (!rreq) {
                if (netfs_priv)
-                       ops->cleanup(netfs_priv, folio_file_mapping(folio));
+                       ops->cleanup(folio_file_mapping(folio), netfs_priv);
                folio_unlock(folio);
                return -ENOMEM;
        }
@@ -1008,8 +1003,8 @@ out:
 }
 EXPORT_SYMBOL(netfs_readpage);
 
-/**
- * netfs_skip_folio_read - prep a folio for writing without reading first
+/*
+ * Prepare a folio for writing without reading first
  * @folio: The folio being prepared
  * @pos: starting position for the write
  * @len: length of write
@@ -1191,7 +1186,7 @@ have_folio:
                goto error;
 have_folio_no_wait:
        if (netfs_priv)
-               ops->cleanup(netfs_priv, mapping);
+               ops->cleanup(mapping, netfs_priv);
        *_folio = folio;
        _leave(" = 0");
        return 0;
@@ -1202,7 +1197,7 @@ error:
        folio_unlock(folio);
        folio_put(folio);
        if (netfs_priv)
-               ops->cleanup(netfs_priv, mapping);
+               ops->cleanup(mapping, netfs_priv);
        _leave(" = %d", ret);
        return ret;
 }
index dd53704c3f40400284e3c81421e7e4fbbcfc8f67..fda530d5e76405902d0f723f36c3d599fa48178d 100644 (file)
@@ -219,6 +219,7 @@ void nfs_set_cache_invalid(struct inode *inode, unsigned long flags)
                                          NFS_INO_DATA_INVAL_DEFER);
        else if (nfsi->cache_validity & NFS_INO_INVALID_DATA)
                nfsi->cache_validity &= ~NFS_INO_DATA_INVAL_DEFER;
+       trace_nfs_set_cache_invalid(inode, 0);
 }
 EXPORT_SYMBOL_GPL(nfs_set_cache_invalid);
 
index 08355b66e7cb812b19b3e0d29b3ec5f8d355a479..8b21ff1be7175488975ef1029d193df2abd11b36 100644 (file)
@@ -289,7 +289,9 @@ static void nfs42_copy_dest_done(struct inode *inode, loff_t pos, loff_t len)
        loff_t newsize = pos + len;
        loff_t end = newsize - 1;
 
-       truncate_pagecache_range(inode, pos, end);
+       WARN_ON_ONCE(invalidate_inode_pages2_range(inode->i_mapping,
+                               pos >> PAGE_SHIFT, end >> PAGE_SHIFT));
+
        spin_lock(&inode->i_lock);
        if (newsize > i_size_read(inode))
                i_size_write(inode, newsize);
index c8bad735e4c19dc00a7f5437d756a790f44ac001..271e5f92ed0195982d50f22952519a19e274b1da 100644 (file)
@@ -1434,8 +1434,7 @@ static int nfs4_xdr_dec_clone(struct rpc_rqst *rqstp,
        status = decode_clone(xdr);
        if (status)
                goto out;
-       status = decode_getfattr(xdr, res->dst_fattr, res->server);
-
+       decode_getfattr(xdr, res->dst_fattr, res->server);
 out:
        res->rpc_status = status;
        return status;
index ecc4594299d6fef16bdb4140fa7ef99acecab7f3..f63dfa01001c97394180d7a432753587f73f75af 100644 (file)
@@ -1998,6 +1998,10 @@ static int nfs4_handle_reclaim_lease_error(struct nfs_client *clp, int status)
                dprintk("%s: exit with error %d for server %s\n",
                                __func__, -EPROTONOSUPPORT, clp->cl_hostname);
                return -EPROTONOSUPPORT;
+       case -ENOSPC:
+               if (clp->cl_cons_state == NFS_CS_SESSION_INITING)
+                       nfs_mark_client_ready(clp, -EIO);
+               return -EIO;
        case -NFS4ERR_NOT_SAME: /* FixMe: implement recovery
                                 * in nfs4_exchange_id */
        default:
index 21dac847f1e404d3285d09e546e7cff6b03763aa..b3aee261801e5d352e080904dfa636850d6122d6 100644 (file)
@@ -162,6 +162,7 @@ DEFINE_NFS_INODE_EVENT_DONE(nfs_writeback_inode_exit);
 DEFINE_NFS_INODE_EVENT(nfs_fsync_enter);
 DEFINE_NFS_INODE_EVENT_DONE(nfs_fsync_exit);
 DEFINE_NFS_INODE_EVENT(nfs_access_enter);
+DEFINE_NFS_INODE_EVENT_DONE(nfs_set_cache_invalid);
 
 TRACE_EVENT(nfs_access_exit,
                TP_PROTO(
index 6fedc49726bf7c7b1d27ff692c3913fefbb1ccc9..c634483d85d2a312db0b69b61294d35bab845d9d 100644 (file)
@@ -2156,6 +2156,7 @@ static struct notifier_block nfsd4_cld_block = {
 int
 register_cld_notifier(void)
 {
+       WARN_ON(!nfsd_net_id);
        return rpc_pipefs_notifier_register(&nfsd4_cld_block);
 }
 
index bfad94c70b84bcb0b95e55504dfd9c76e5d25d0f..1956d377d1a608e3a43ac87eff0ba770dcaddc62 100644 (file)
@@ -1207,6 +1207,11 @@ hash_delegation_locked(struct nfs4_delegation *dp, struct nfs4_file *fp)
        return 0;
 }
 
+static bool delegation_hashed(struct nfs4_delegation *dp)
+{
+       return !(list_empty(&dp->dl_perfile));
+}
+
 static bool
 unhash_delegation_locked(struct nfs4_delegation *dp)
 {
@@ -1214,7 +1219,7 @@ unhash_delegation_locked(struct nfs4_delegation *dp)
 
        lockdep_assert_held(&state_lock);
 
-       if (list_empty(&dp->dl_perfile))
+       if (!delegation_hashed(dp))
                return false;
 
        dp->dl_stid.sc_type = NFS4_CLOSED_DELEG_STID;
@@ -4598,7 +4603,7 @@ static void nfsd4_cb_recall_prepare(struct nfsd4_callback *cb)
         * queued for a lease break. Don't queue it again.
         */
        spin_lock(&state_lock);
-       if (dp->dl_time == 0) {
+       if (delegation_hashed(dp) && dp->dl_time == 0) {
                dp->dl_time = ktime_get_boottime_seconds();
                list_add_tail(&dp->dl_recall_lru, &nn->del_recall_lru);
        }
index b2a1d969a172d985726ebef72494605b952e0ac0..5a93a5db4fb0a5ecf1fa0da4d3e1a011f4dd0de0 100644 (file)
@@ -288,11 +288,8 @@ nfsd4_decode_bitmap4(struct nfsd4_compoundargs *argp, u32 *bmval, u32 bmlen)
        p = xdr_inline_decode(argp->xdr, count << 2);
        if (!p)
                return nfserr_bad_xdr;
-       i = 0;
-       while (i < count)
-               bmval[i++] = be32_to_cpup(p++);
-       while (i < bmlen)
-               bmval[i++] = 0;
+       for (i = 0; i < bmlen; i++)
+               bmval[i] = (i < count) ? be32_to_cpup(p++) : 0;
 
        return nfs_ok;
 }
index af8531c3854a93204bc2f785068ca1d429628a7e..51a49e0cfe3762e84f324f900b82f890dcfa5425 100644 (file)
@@ -1521,12 +1521,9 @@ static int __init init_nfsd(void)
        int retval;
        printk(KERN_INFO "Installing knfsd (copyright (C) 1996 okir@monad.swb.de).\n");
 
-       retval = register_cld_notifier();
-       if (retval)
-               return retval;
        retval = nfsd4_init_slabs();
        if (retval)
-               goto out_unregister_notifier;
+               return retval;
        retval = nfsd4_init_pnfs();
        if (retval)
                goto out_free_slabs;
@@ -1545,9 +1542,14 @@ static int __init init_nfsd(void)
                goto out_free_exports;
        retval = register_pernet_subsys(&nfsd_net_ops);
        if (retval < 0)
+               goto out_free_filesystem;
+       retval = register_cld_notifier();
+       if (retval)
                goto out_free_all;
        return 0;
 out_free_all:
+       unregister_pernet_subsys(&nfsd_net_ops);
+out_free_filesystem:
        unregister_filesystem(&nfsd_fs_type);
 out_free_exports:
        remove_proc_entry("fs/nfs/exports", NULL);
@@ -1561,13 +1563,12 @@ out_free_pnfs:
        nfsd4_exit_pnfs();
 out_free_slabs:
        nfsd4_free_slabs();
-out_unregister_notifier:
-       unregister_cld_notifier();
        return retval;
 }
 
 static void __exit exit_nfsd(void)
 {
+       unregister_cld_notifier();
        unregister_pernet_subsys(&nfsd_net_ops);
        nfsd_drc_slab_free();
        remove_proc_entry("fs/nfs/exports", NULL);
@@ -1577,7 +1578,6 @@ static void __exit exit_nfsd(void)
        nfsd4_free_slabs();
        nfsd4_exit_pnfs();
        unregister_filesystem(&nfsd_fs_type);
-       unregister_cld_notifier();
 }
 
 MODULE_AUTHOR("Olaf Kirch <okir@monad.swb.de>");
index 1667a7e590d86315a6f27b3a6a9fbd930316c34f..f93e69a612833f8458d1020408d27ef15b2e921d 100644 (file)
@@ -52,6 +52,7 @@ config NTFS_DEBUG
 config NTFS_RW
        bool "NTFS write support"
        depends on NTFS_FS
+       depends on PAGE_SIZE_LESS_THAN_64KB
        help
          This enables the partial, but safe, write support in the NTFS driver.
 
index 30a3b66f475aeed385212b16b0c68ee473cf49fc..509f85148fee822ae3058e2342aeadfae8287001 100644 (file)
@@ -154,9 +154,13 @@ ssize_t read_from_oldmem(char *buf, size_t count,
                        nr_bytes = count;
 
                /* If pfn is not ram, return zeros for sparse dump files */
-               if (!pfn_is_ram(pfn))
-                       memset(buf, 0, nr_bytes);
-               else {
+               if (!pfn_is_ram(pfn)) {
+                       tmp = 0;
+                       if (!userbuf)
+                               memset(buf, 0, nr_bytes);
+                       else if (clear_user(buf, nr_bytes))
+                               tmp = -EFAULT;
+               } else {
                        if (encrypted)
                                tmp = copy_oldmem_page_encrypted(pfn, buf,
                                                                 nr_bytes,
@@ -165,12 +169,12 @@ ssize_t read_from_oldmem(char *buf, size_t count,
                        else
                                tmp = copy_oldmem_page(pfn, buf, nr_bytes,
                                                       offset, userbuf);
-
-                       if (tmp < 0) {
-                               up_read(&vmcore_cb_rwsem);
-                               return tmp;
-                       }
                }
+               if (tmp < 0) {
+                       up_read(&vmcore_cb_rwsem);
+                       return tmp;
+               }
+
                *ppos += nr_bytes;
                count -= nr_bytes;
                buf += nr_bytes;
index 328da35da390859dd3687eaed0b078299e1ca33b..8adabde685f132f2375114bef345e27db85a9265 100644 (file)
@@ -173,7 +173,6 @@ config PSTORE_BLK
        tristate "Log panic/oops to a block device"
        depends on PSTORE
        depends on BLOCK
-       depends on BROKEN
        select PSTORE_ZONE
        default n
        help
index 5d1fbaffd66a1e16449d8bb964deb33a490b78f3..4ae0cfcd15f20bd10a8a62b40d324d721b1e90cb 100644 (file)
@@ -309,7 +309,7 @@ static int __init __best_effort_init(void)
        if (ret)
                kfree(best_effort_dev);
        else
-               pr_info("attached %s (%zu) (no dedicated panic_write!)\n",
+               pr_info("attached %s (%lu) (no dedicated panic_write!)\n",
                        blkdev, best_effort_dev->zone.total_size);
 
        return ret;
index 040e1cf9052826ef08bf269aeb63d11c073d16b4..65ce0e72e7b9588d344ee715cee6838f7e1cb4e1 100644 (file)
 
 void signalfd_cleanup(struct sighand_struct *sighand)
 {
-       wait_queue_head_t *wqh = &sighand->signalfd_wqh;
-       /*
-        * The lockless check can race with remove_wait_queue() in progress,
-        * but in this case its caller should run under rcu_read_lock() and
-        * sighand_cachep is SLAB_TYPESAFE_BY_RCU, we can safely return.
-        */
-       if (likely(!waitqueue_active(wqh)))
-               return;
-
-       /* wait_queue_entry_t->func(POLLFREE) should do remove_wait_queue() */
-       wake_up_poll(wqh, EPOLLHUP | POLLFREE);
+       wake_up_pollfree(&sighand->signalfd_wqh);
 }
 
 struct signalfd_ctx {
index 85ba15a60b13b36887e0bb3901f8125eb1f6b97b..043e4cb839fa235f7c2e05fac3fe558e82e9d4f2 100644 (file)
@@ -72,16 +72,3 @@ void cifs_arc4_crypt(struct arc4_ctx *ctx, u8 *out, const u8 *in, unsigned int l
        ctx->y = y;
 }
 EXPORT_SYMBOL_GPL(cifs_arc4_crypt);
-
-static int __init
-init_smbfs_common(void)
-{
-       return 0;
-}
-static void __init
-exit_smbfs_common(void)
-{
-}
-
-module_init(init_smbfs_common)
-module_exit(exit_smbfs_common)
index 925a621b432e3c0c74e4fa329ad9689484bc9518..3616839c5c4b64e1c8a8c389bc3292b8262d7f5e 100644 (file)
@@ -161,6 +161,77 @@ struct tracefs_fs_info {
        struct tracefs_mount_opts mount_opts;
 };
 
+static void change_gid(struct dentry *dentry, kgid_t gid)
+{
+       if (!dentry->d_inode)
+               return;
+       dentry->d_inode->i_gid = gid;
+}
+
+/*
+ * Taken from d_walk, but without he need for handling renames.
+ * Nothing can be renamed while walking the list, as tracefs
+ * does not support renames. This is only called when mounting
+ * or remounting the file system, to set all the files to
+ * the given gid.
+ */
+static void set_gid(struct dentry *parent, kgid_t gid)
+{
+       struct dentry *this_parent;
+       struct list_head *next;
+
+       this_parent = parent;
+       spin_lock(&this_parent->d_lock);
+
+       change_gid(this_parent, gid);
+repeat:
+       next = this_parent->d_subdirs.next;
+resume:
+       while (next != &this_parent->d_subdirs) {
+               struct list_head *tmp = next;
+               struct dentry *dentry = list_entry(tmp, struct dentry, d_child);
+               next = tmp->next;
+
+               spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED);
+
+               change_gid(dentry, gid);
+
+               if (!list_empty(&dentry->d_subdirs)) {
+                       spin_unlock(&this_parent->d_lock);
+                       spin_release(&dentry->d_lock.dep_map, _RET_IP_);
+                       this_parent = dentry;
+                       spin_acquire(&this_parent->d_lock.dep_map, 0, 1, _RET_IP_);
+                       goto repeat;
+               }
+               spin_unlock(&dentry->d_lock);
+       }
+       /*
+        * All done at this level ... ascend and resume the search.
+        */
+       rcu_read_lock();
+ascend:
+       if (this_parent != parent) {
+               struct dentry *child = this_parent;
+               this_parent = child->d_parent;
+
+               spin_unlock(&child->d_lock);
+               spin_lock(&this_parent->d_lock);
+
+               /* go into the first sibling still alive */
+               do {
+                       next = child->d_child.next;
+                       if (next == &this_parent->d_subdirs)
+                               goto ascend;
+                       child = list_entry(next, struct dentry, d_child);
+               } while (unlikely(child->d_flags & DCACHE_DENTRY_KILLED));
+               rcu_read_unlock();
+               goto resume;
+       }
+       rcu_read_unlock();
+       spin_unlock(&this_parent->d_lock);
+       return;
+}
+
 static int tracefs_parse_options(char *data, struct tracefs_mount_opts *opts)
 {
        substring_t args[MAX_OPT_ARGS];
@@ -193,6 +264,7 @@ static int tracefs_parse_options(char *data, struct tracefs_mount_opts *opts)
                        if (!gid_valid(gid))
                                return -EINVAL;
                        opts->gid = gid;
+                       set_gid(tracefs_mount->mnt_root, gid);
                        break;
                case Opt_mode:
                        if (match_octal(&args[0], &option))
@@ -414,6 +486,8 @@ struct dentry *tracefs_create_file(const char *name, umode_t mode,
        inode->i_mode = mode;
        inode->i_fop = fops ? fops : &tracefs_file_operations;
        inode->i_private = data;
+       inode->i_uid = d_inode(dentry->d_parent)->i_uid;
+       inode->i_gid = d_inode(dentry->d_parent)->i_gid;
        d_instantiate(dentry, inode);
        fsnotify_create(dentry->d_parent->d_inode, dentry);
        return end_creating(dentry);
@@ -436,6 +510,8 @@ static struct dentry *__create_dir(const char *name, struct dentry *parent,
        inode->i_mode = S_IFDIR | S_IRWXU | S_IRUSR| S_IRGRP | S_IXUSR | S_IXGRP;
        inode->i_op = ops;
        inode->i_fop = &simple_dir_operations;
+       inode->i_uid = d_inode(dentry->d_parent)->i_uid;
+       inode->i_gid = d_inode(dentry->d_parent)->i_gid;
 
        /* directory inodes start off with i_nlink == 2 (for "." entry) */
        inc_nlink(inode);
index 70abdfad2df171a72fa63cc70ede2f8dcbd51e3c..42e3e551fa4c34f7d636b40524a50cb55dc8fad6 100644 (file)
@@ -31,6 +31,7 @@
 #include <linux/mm.h>
 #include <linux/slab.h>
 #include <linux/bio.h>
+#include <linux/iversion.h>
 
 #include "udf_i.h"
 #include "udf_sb.h"
@@ -43,7 +44,7 @@ static int udf_readdir(struct file *file, struct dir_context *ctx)
        struct fileIdentDesc *fi = NULL;
        struct fileIdentDesc cfi;
        udf_pblk_t block, iblock;
-       loff_t nf_pos;
+       loff_t nf_pos, emit_pos = 0;
        int flen;
        unsigned char *fname = NULL, *copy_name = NULL;
        unsigned char *nameptr;
@@ -57,6 +58,7 @@ static int udf_readdir(struct file *file, struct dir_context *ctx)
        int i, num, ret = 0;
        struct extent_position epos = { NULL, 0, {0, 0} };
        struct super_block *sb = dir->i_sb;
+       bool pos_valid = false;
 
        if (ctx->pos == 0) {
                if (!dir_emit_dot(file, ctx))
@@ -67,6 +69,21 @@ static int udf_readdir(struct file *file, struct dir_context *ctx)
        if (nf_pos >= size)
                goto out;
 
+       /*
+        * Something changed since last readdir (either lseek was called or dir
+        * changed)?  We need to verify the position correctly points at the
+        * beginning of some dir entry so that the directory parsing code does
+        * not get confused. Since UDF does not have any reliable way of
+        * identifying beginning of dir entry (names are under user control),
+        * we need to scan the directory from the beginning.
+        */
+       if (!inode_eq_iversion(dir, file->f_version)) {
+               emit_pos = nf_pos;
+               nf_pos = 0;
+       } else {
+               pos_valid = true;
+       }
+
        fname = kmalloc(UDF_NAME_LEN, GFP_NOFS);
        if (!fname) {
                ret = -ENOMEM;
@@ -122,13 +139,21 @@ static int udf_readdir(struct file *file, struct dir_context *ctx)
 
        while (nf_pos < size) {
                struct kernel_lb_addr tloc;
+               loff_t cur_pos = nf_pos;
 
-               ctx->pos = (nf_pos >> 2) + 1;
+               /* Update file position only if we got past the current one */
+               if (nf_pos >= emit_pos) {
+                       ctx->pos = (nf_pos >> 2) + 1;
+                       pos_valid = true;
+               }
 
                fi = udf_fileident_read(dir, &nf_pos, &fibh, &cfi, &epos, &eloc,
                                        &elen, &offset);
                if (!fi)
                        goto out;
+               /* Still not at offset where user asked us to read from? */
+               if (cur_pos < emit_pos)
+                       continue;
 
                liu = le16_to_cpu(cfi.lengthOfImpUse);
                lfi = cfi.lengthFileIdent;
@@ -186,8 +211,11 @@ static int udf_readdir(struct file *file, struct dir_context *ctx)
        } /* end while */
 
        ctx->pos = (nf_pos >> 2) + 1;
+       pos_valid = true;
 
 out:
+       if (pos_valid)
+               file->f_version = inode_query_iversion(dir);
        if (fibh.sbh != fibh.ebh)
                brelse(fibh.ebh);
        brelse(fibh.sbh);
index caeef08efed23c91ffe30a618fa45b58e65b0ad6..0ed4861b038f6a3dc06fcf3d7eb26444981aa21b 100644 (file)
@@ -30,6 +30,7 @@
 #include <linux/sched.h>
 #include <linux/crc-itu-t.h>
 #include <linux/exportfs.h>
+#include <linux/iversion.h>
 
 static inline int udf_match(int len1, const unsigned char *name1, int len2,
                            const unsigned char *name2)
@@ -134,6 +135,8 @@ int udf_write_fi(struct inode *inode, struct fileIdentDesc *cfi,
                        mark_buffer_dirty_inode(fibh->ebh, inode);
                mark_buffer_dirty_inode(fibh->sbh, inode);
        }
+       inode_inc_iversion(inode);
+
        return 0;
 }
 
index 34247fba6df91a433bbc589e9e2c0052b1576fe3..f26b5e0b84b696b65a2614f313eb419947acc471 100644 (file)
@@ -57,6 +57,7 @@
 #include <linux/crc-itu-t.h>
 #include <linux/log2.h>
 #include <asm/byteorder.h>
+#include <linux/iversion.h>
 
 #include "udf_sb.h"
 #include "udf_i.h"
@@ -149,6 +150,7 @@ static struct inode *udf_alloc_inode(struct super_block *sb)
        init_rwsem(&ei->i_data_sem);
        ei->cached_extent.lstart = -1;
        spin_lock_init(&ei->i_extent_cache_lock);
+       inode_set_iversion(&ei->vfs_inode, 1);
 
        return &ei->vfs_inode;
 }
index fbc9d816882ce64269fea3d8d915554da2182929..23523b802539e145fc59184f2bed1ba58195c5b3 100644 (file)
@@ -1077,21 +1077,18 @@ xfs_attr_node_hasname(
 
        state = xfs_da_state_alloc(args);
        if (statep != NULL)
-               *statep = NULL;
+               *statep = state;
 
        /*
         * Search to see if name exists, and get back a pointer to it.
         */
        error = xfs_da3_node_lookup_int(state, &retval);
-       if (error) {
-               xfs_da_state_free(state);
-               return error;
-       }
+       if (error)
+               retval = error;
 
-       if (statep != NULL)
-               *statep = state;
-       else
+       if (!statep)
                xfs_da_state_free(state);
+
        return retval;
 }
 
@@ -1112,7 +1109,7 @@ xfs_attr_node_addname_find_attr(
         */
        retval = xfs_attr_node_hasname(args, &dac->da_state);
        if (retval != -ENOATTR && retval != -EEXIST)
-               return retval;
+               goto error;
 
        if (retval == -ENOATTR && (args->attr_flags & XATTR_REPLACE))
                goto error;
@@ -1337,7 +1334,7 @@ int xfs_attr_node_removename_setup(
 
        error = xfs_attr_node_hasname(args, state);
        if (error != -EEXIST)
-               return error;
+               goto out;
        error = 0;
 
        ASSERT((*state)->path.blk[(*state)->path.active - 1].bp != NULL);
index e1472004170e8fdf02975912fc1cfe003f4c5271..da4af2142a2b498a374733d5d1c2e0f3bd774af4 100644 (file)
@@ -289,22 +289,6 @@ xfs_perag_clear_inode_tag(
        trace_xfs_perag_clear_inode_tag(mp, pag->pag_agno, tag, _RET_IP_);
 }
 
-static inline void
-xfs_inew_wait(
-       struct xfs_inode        *ip)
-{
-       wait_queue_head_t *wq = bit_waitqueue(&ip->i_flags, __XFS_INEW_BIT);
-       DEFINE_WAIT_BIT(wait, &ip->i_flags, __XFS_INEW_BIT);
-
-       do {
-               prepare_to_wait(wq, &wait.wq_entry, TASK_UNINTERRUPTIBLE);
-               if (!xfs_iflags_test(ip, XFS_INEW))
-                       break;
-               schedule();
-       } while (true);
-       finish_wait(wq, &wait.wq_entry);
-}
-
 /*
  * When we recycle a reclaimable inode, we need to re-initialise the VFS inode
  * part of the structure. This is made more complex by the fact we store
@@ -368,18 +352,13 @@ xfs_iget_recycle(
        ASSERT(!rwsem_is_locked(&inode->i_rwsem));
        error = xfs_reinit_inode(mp, inode);
        if (error) {
-               bool    wake;
-
                /*
                 * Re-initializing the inode failed, and we are in deep
                 * trouble.  Try to re-add it to the reclaim list.
                 */
                rcu_read_lock();
                spin_lock(&ip->i_flags_lock);
-               wake = !!__xfs_iflags_test(ip, XFS_INEW);
                ip->i_flags &= ~(XFS_INEW | XFS_IRECLAIM);
-               if (wake)
-                       wake_up_bit(&ip->i_flags, __XFS_INEW_BIT);
                ASSERT(ip->i_flags & XFS_IRECLAIMABLE);
                spin_unlock(&ip->i_flags_lock);
                rcu_read_unlock();
index 64b9bf33480659fcbaa0a5c8c5ad259c9a44cd55..6771f357ad2cce9738c4bdbc9720763e1a3f5025 100644 (file)
@@ -3122,7 +3122,6 @@ xfs_rename(
         * appropriately.
         */
        if (flags & RENAME_WHITEOUT) {
-               ASSERT(!(flags & (RENAME_NOREPLACE | RENAME_EXCHANGE)));
                error = xfs_rename_alloc_whiteout(mnt_userns, target_dp, &wip);
                if (error)
                        return error;
index e635a3d64cba212ef3fd730a821a5d8723ee1f6b..c447bf04205a8eeeba68b279871b5edb240b384e 100644 (file)
@@ -231,8 +231,7 @@ static inline bool xfs_inode_has_bigtime(struct xfs_inode *ip)
 #define XFS_IRECLAIM           (1 << 0) /* started reclaiming this inode */
 #define XFS_ISTALE             (1 << 1) /* inode has been staled */
 #define XFS_IRECLAIMABLE       (1 << 2) /* inode can be reclaimed */
-#define __XFS_INEW_BIT         3        /* inode has just been allocated */
-#define XFS_INEW               (1 << __XFS_INEW_BIT)
+#define XFS_INEW               (1 << 3) /* inode has just been allocated */
 #define XFS_IPRESERVE_DM_FIELDS        (1 << 4) /* has legacy DMAPI fields set */
 #define XFS_ITRUNCATED         (1 << 5) /* truncated down so flush-on-close */
 #define XFS_IDIRTY_RELEASE     (1 << 6) /* dirty release already seen */
@@ -492,7 +491,6 @@ static inline void xfs_finish_inode_setup(struct xfs_inode *ip)
        xfs_iflags_clear(ip, XFS_INEW);
        barrier();
        unlock_new_inode(VFS_I(ip));
-       wake_up_bit(&ip->i_flags, __XFS_INEW_BIT);
 }
 
 static inline void xfs_setup_existing_inode(struct xfs_inode *ip)
index e21459f9923a8a8ef47158e1099e8f772abb35dc..778b57b1f020f051aef1806e4bcf4c5ff7b9d7b4 100644 (file)
@@ -1765,7 +1765,10 @@ static int
 xfs_remount_ro(
        struct xfs_mount        *mp)
 {
-       int error;
+       struct xfs_icwalk       icw = {
+               .icw_flags      = XFS_ICWALK_FLAG_SYNC,
+       };
+       int                     error;
 
        /*
         * Cancel background eofb scanning so it cannot race with the final
@@ -1773,8 +1776,13 @@ xfs_remount_ro(
         */
        xfs_blockgc_stop(mp);
 
-       /* Get rid of any leftover CoW reservations... */
-       error = xfs_blockgc_free_space(mp, NULL);
+       /*
+        * Clear out all remaining COW staging extents and speculative post-EOF
+        * preallocations so that we don't leave inodes requiring inactivation
+        * cleanups during reclaim on a read-only mount.  We must process every
+        * cached inode, so this requires a synchronous cache scan.
+        */
+       error = xfs_blockgc_free_space(mp, &icw);
        if (error) {
                xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
                return error;
index fedc0dfa4877c7027a553a926be5d65fbff28a9b..4f07afacbc23953861443be0939fb8d70116396e 100644 (file)
@@ -50,13 +50,7 @@ static inline void flush_dcache_page(struct page *page)
 {
 }
 
-static inline void flush_dcache_folio(struct folio *folio) { }
 #define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 0
-#define ARCH_IMPLEMENTS_FLUSH_DCACHE_FOLIO
-#endif
-
-#ifndef ARCH_IMPLEMENTS_FLUSH_DCACHE_FOLIO
-void flush_dcache_folio(struct folio *folio);
 #endif
 
 #ifndef flush_dcache_mmap_lock
index 143ce7e0bee130a56eda16b5862cd82e5bcddf4b..b28f8790192a22aba226ada8f6f96a8b12b30c12 100644 (file)
@@ -974,6 +974,15 @@ static inline int acpi_get_local_address(acpi_handle handle, u32 *addr)
        return -ENODEV;
 }
 
+static inline int acpi_register_wakeup_handler(int wake_irq,
+       bool (*wakeup)(void *context), void *context)
+{
+       return -ENXIO;
+}
+
+static inline void acpi_unregister_wakeup_handler(
+       bool (*wakeup)(void *context), void *context) { }
+
 #endif /* !CONFIG_ACPI */
 
 #ifdef CONFIG_ACPI_HOTPLUG_IOAPIC
@@ -1173,7 +1182,6 @@ int acpi_node_prop_get(const struct fwnode_handle *fwnode, const char *propname,
 
 struct fwnode_handle *acpi_get_next_subnode(const struct fwnode_handle *fwnode,
                                            struct fwnode_handle *child);
-struct fwnode_handle *acpi_node_get_parent(const struct fwnode_handle *fwnode);
 
 struct acpi_probe_entry;
 typedef bool (*acpi_probe_entry_validate_subtbl)(struct acpi_subtable_header *,
@@ -1278,12 +1286,6 @@ acpi_get_next_subnode(const struct fwnode_handle *fwnode,
        return NULL;
 }
 
-static inline struct fwnode_handle *
-acpi_node_get_parent(const struct fwnode_handle *fwnode)
-{
-       return NULL;
-}
-
 static inline struct fwnode_handle *
 acpi_graph_get_next_endpoint(const struct fwnode_handle *fwnode,
                             struct fwnode_handle *prev)
index f715e8863f4de97703ccf9397eb78f78dd0770e3..755f38e893be1b141e932403fc12d6ff93086795 100644 (file)
@@ -193,7 +193,7 @@ struct bpf_map {
        atomic64_t usercnt;
        struct work_struct work;
        struct mutex freeze_mutex;
-       u64 writecnt; /* writable mmap cnt; protected by freeze_mutex */
+       atomic64_t writecnt;
 };
 
 static inline bool map_value_has_spin_lock(const struct bpf_map *map)
@@ -732,6 +732,7 @@ int bpf_trampoline_unlink_prog(struct bpf_prog *prog, struct bpf_trampoline *tr)
 struct bpf_trampoline *bpf_trampoline_get(u64 key,
                                          struct bpf_attach_target_info *tgt_info);
 void bpf_trampoline_put(struct bpf_trampoline *tr);
+int arch_prepare_bpf_dispatcher(void *image, s64 *funcs, int num_funcs);
 #define BPF_DISPATCHER_INIT(_name) {                           \
        .mutex = __MUTEX_INITIALIZER(_name.mutex),              \
        .func = &_name##_func,                                  \
@@ -1352,28 +1353,16 @@ extern struct mutex bpf_stats_enabled_mutex;
  * kprobes, tracepoints) to prevent deadlocks on map operations as any of
  * these events can happen inside a region which holds a map bucket lock
  * and can deadlock on it.
- *
- * Use the preemption safe inc/dec variants on RT because migrate disable
- * is preemptible on RT and preemption in the middle of the RMW operation
- * might lead to inconsistent state. Use the raw variants for non RT
- * kernels as migrate_disable() maps to preempt_disable() so the slightly
- * more expensive save operation can be avoided.
  */
 static inline void bpf_disable_instrumentation(void)
 {
        migrate_disable();
-       if (IS_ENABLED(CONFIG_PREEMPT_RT))
-               this_cpu_inc(bpf_prog_active);
-       else
-               __this_cpu_inc(bpf_prog_active);
+       this_cpu_inc(bpf_prog_active);
 }
 
 static inline void bpf_enable_instrumentation(void)
 {
-       if (IS_ENABLED(CONFIG_PREEMPT_RT))
-               this_cpu_dec(bpf_prog_active);
-       else
-               __this_cpu_dec(bpf_prog_active);
+       this_cpu_dec(bpf_prog_active);
        migrate_enable();
 }
 
@@ -1419,6 +1408,7 @@ void bpf_map_put(struct bpf_map *map);
 void *bpf_map_area_alloc(u64 size, int numa_node);
 void *bpf_map_area_mmapable_alloc(u64 size, int numa_node);
 void bpf_map_area_free(void *base);
+bool bpf_map_write_active(const struct bpf_map *map);
 void bpf_map_init_from_attr(struct bpf_map *map, union bpf_attr *attr);
 int  generic_map_lookup_batch(struct bpf_map *map,
                              const union bpf_attr *attr,
index 203eef993d763fb92e6b870ec5a8f3e76d169485..0e1b6281fd8f6af0b9358e26bc814c83fc392921 100644 (file)
@@ -245,7 +245,10 @@ struct kfunc_btf_id_set {
        struct module *owner;
 };
 
-struct kfunc_btf_id_list;
+struct kfunc_btf_id_list {
+       struct list_head list;
+       struct mutex mutex;
+};
 
 #ifdef CONFIG_DEBUG_INFO_BTF_MODULES
 void register_kfunc_btf_id_set(struct kfunc_btf_id_list *l,
@@ -254,6 +257,9 @@ void unregister_kfunc_btf_id_set(struct kfunc_btf_id_list *l,
                                 struct kfunc_btf_id_set *s);
 bool bpf_check_mod_kfunc_call(struct kfunc_btf_id_list *klist, u32 kfunc_id,
                              struct module *owner);
+
+extern struct kfunc_btf_id_list bpf_tcp_ca_kfunc_list;
+extern struct kfunc_btf_id_list prog_test_kfunc_list;
 #else
 static inline void register_kfunc_btf_id_set(struct kfunc_btf_id_list *l,
                                             struct kfunc_btf_id_set *s)
@@ -268,13 +274,13 @@ static inline bool bpf_check_mod_kfunc_call(struct kfunc_btf_id_list *klist,
 {
        return false;
 }
+
+static struct kfunc_btf_id_list bpf_tcp_ca_kfunc_list __maybe_unused;
+static struct kfunc_btf_id_list prog_test_kfunc_list __maybe_unused;
 #endif
 
 #define DEFINE_KFUNC_BTF_ID_SET(set, name)                                     \
        struct kfunc_btf_id_set name = { LIST_HEAD_INIT(name.list), (set),     \
                                         THIS_MODULE }
 
-extern struct kfunc_btf_id_list bpf_tcp_ca_kfunc_list;
-extern struct kfunc_btf_id_list prog_test_kfunc_list;
-
 #endif
diff --git a/include/linux/cacheflush.h b/include/linux/cacheflush.h
new file mode 100644 (file)
index 0000000..fef8b60
--- /dev/null
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_CACHEFLUSH_H
+#define _LINUX_CACHEFLUSH_H
+
+#include <asm/cacheflush.h>
+
+#if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE
+#ifndef ARCH_IMPLEMENTS_FLUSH_DCACHE_FOLIO
+void flush_dcache_folio(struct folio *folio);
+#endif
+#else
+static inline void flush_dcache_folio(struct folio *folio)
+{
+}
+#define ARCH_IMPLEMENTS_FLUSH_DCACHE_FOLIO 0
+#endif /* ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE */
+
+#endif /* _LINUX_CACHEFLUSH_H */
index 2f909ed084c63e5e2dc429306eade714b93ba868..4ff37cb763ae2a243385ac0580e07574ff7546a8 100644 (file)
@@ -3,7 +3,6 @@
 #define _LINUX_CACHEINFO_H
 
 #include <linux/bitops.h>
-#include <linux/cpu.h>
 #include <linux/cpumask.h>
 #include <linux/smp.h>
 
index 8eacf67eb212e26fe71da59777df190d48929d6d..039e7e0c7378d68ca4ed278a2927668571ce11ac 100644 (file)
@@ -20,6 +20,7 @@
  */
 
 #include <linux/math.h>
+#include <linux/sched.h>
 
 extern unsigned long loops_per_jiffy;
 
@@ -58,7 +59,18 @@ void calibrate_delay(void);
 void __attribute__((weak)) calibration_delay_done(void);
 void msleep(unsigned int msecs);
 unsigned long msleep_interruptible(unsigned int msecs);
-void usleep_range(unsigned long min, unsigned long max);
+void usleep_range_state(unsigned long min, unsigned long max,
+                       unsigned int state);
+
+static inline void usleep_range(unsigned long min, unsigned long max)
+{
+       usleep_range_state(min, max, TASK_UNINTERRUPTIBLE);
+}
+
+static inline void usleep_idle_range(unsigned long min, unsigned long max)
+{
+       usleep_range_state(min, max, TASK_IDLE);
+}
 
 static inline void ssleep(unsigned int seconds)
 {
index a498ebcf49933d309fff25b724d1a9244b807b3d..15e7c5e15d629545a757752bf61ffd26c8f24f4b 100644 (file)
@@ -18,6 +18,7 @@
 #include <linux/klist.h>
 #include <linux/pm.h>
 #include <linux/device/bus.h>
+#include <linux/module.h>
 
 /**
  * enum probe_type - device driver probe type to try
index 24b7ed2677afd26b0ffd1ce80f34469adc7129b0..7f1e88e3e2b5445ac4c3d5c3c8bab6d4d6f15b04 100644 (file)
@@ -6,6 +6,7 @@
 #define __LINUX_FILTER_H__
 
 #include <linux/atomic.h>
+#include <linux/bpf.h>
 #include <linux/refcount.h>
 #include <linux/compat.h>
 #include <linux/skbuff.h>
@@ -26,7 +27,6 @@
 
 #include <asm/byteorder.h>
 #include <uapi/linux/filter.h>
-#include <uapi/linux/bpf.h>
 
 struct sk_buff;
 struct sock;
@@ -640,9 +640,6 @@ static __always_inline u32 bpf_prog_run(const struct bpf_prog *prog, const void
  * This uses migrate_disable/enable() explicitly to document that the
  * invocation of a BPF program does not require reentrancy protection
  * against a BPF program which is invoked from a preempting task.
- *
- * For non RT enabled kernels migrate_disable/enable() maps to
- * preempt_disable/enable(), i.e. it disables also preemption.
  */
 static inline u32 bpf_prog_run_pin_on_cpu(const struct bpf_prog *prog,
                                          const void *ctx)
index 1cb616fc11053beda5c7dd6be6da84122a3adc61..bbf812ce89a8c0739cb2e69716183273b69b2573 100644 (file)
@@ -2518,7 +2518,6 @@ struct file_system_type {
 #define FS_USERNS_MOUNT                8       /* Can be mounted by userns root */
 #define FS_DISALLOW_NOTIFY_PERM        16      /* Disable fanotify permission events */
 #define FS_ALLOW_IDMAP         32      /* FS has been updated to handle vfs idmappings. */
-#define FS_THP_SUPPORT         8192    /* Remove once all fs converted */
 #define FS_RENAME_DOES_D_MOVE  32768   /* FS will handle d_move() during rename() internally. */
        int (*init_fs_context)(struct fs_context *);
        const struct fs_parameter_spec *parameters;
index 9e067f937dbc24a82b1126542a2437078b0101c9..f453be385bd47f16a311b1df46e78df63a903022 100644 (file)
@@ -840,6 +840,11 @@ static inline bool hid_is_using_ll_driver(struct hid_device *hdev,
        return hdev->ll_driver == driver;
 }
 
+static inline bool hid_is_usb(struct hid_device *hdev)
+{
+       return hid_is_using_ll_driver(hdev, &usb_hid_driver);
+}
+
 #define        PM_HINT_FULLON  1<<5
 #define PM_HINT_NORMAL 1<<1
 
index 25aff0f2ed0b0053b86467da3b2e157b07495760..39bb9b47fa9cd4c633366f2097d5239a4323871e 100644 (file)
@@ -5,12 +5,11 @@
 #include <linux/fs.h>
 #include <linux/kernel.h>
 #include <linux/bug.h>
+#include <linux/cacheflush.h>
 #include <linux/mm.h>
 #include <linux/uaccess.h>
 #include <linux/hardirq.h>
 
-#include <asm/cacheflush.h>
-
 #include "highmem-internal.h"
 
 /**
@@ -231,10 +230,10 @@ static inline void tag_clear_highpage(struct page *page)
  * If we pass in a base or tail page, we can zero up to PAGE_SIZE.
  * If we pass in a head page, we can zero up to the size of the compound page.
  */
-#if defined(CONFIG_HIGHMEM) && defined(CONFIG_TRANSPARENT_HUGEPAGE)
+#ifdef CONFIG_HIGHMEM
 void zero_user_segments(struct page *page, unsigned start1, unsigned end1,
                unsigned start2, unsigned end2);
-#else /* !HIGHMEM || !TRANSPARENT_HUGEPAGE */
+#else
 static inline void zero_user_segments(struct page *page,
                unsigned start1, unsigned end1,
                unsigned start2, unsigned end2)
@@ -254,7 +253,7 @@ static inline void zero_user_segments(struct page *page,
        for (i = 0; i < compound_nr(page); i++)
                flush_dcache_page(page + i);
 }
-#endif /* !HIGHMEM || !TRANSPARENT_HUGEPAGE */
+#endif
 
 static inline void zero_user_segment(struct page *page,
        unsigned start, unsigned end)
@@ -364,4 +363,42 @@ static inline void memzero_page(struct page *page, size_t offset, size_t len)
        kunmap_local(addr);
 }
 
+/**
+ * folio_zero_segments() - Zero two byte ranges in a folio.
+ * @folio: The folio to write to.
+ * @start1: The first byte to zero.
+ * @xend1: One more than the last byte in the first range.
+ * @start2: The first byte to zero in the second range.
+ * @xend2: One more than the last byte in the second range.
+ */
+static inline void folio_zero_segments(struct folio *folio,
+               size_t start1, size_t xend1, size_t start2, size_t xend2)
+{
+       zero_user_segments(&folio->page, start1, xend1, start2, xend2);
+}
+
+/**
+ * folio_zero_segment() - Zero a byte range in a folio.
+ * @folio: The folio to write to.
+ * @start: The first byte to zero.
+ * @xend: One more than the last byte to zero.
+ */
+static inline void folio_zero_segment(struct folio *folio,
+               size_t start, size_t xend)
+{
+       zero_user_segments(&folio->page, start, xend, 0, 0);
+}
+
+/**
+ * folio_zero_range() - Zero a byte range in a folio.
+ * @folio: The folio to write to.
+ * @start: The first byte to zero.
+ * @length: The number of bytes to zero.
+ */
+static inline void folio_zero_range(struct folio *folio,
+               size_t start, size_t length)
+{
+       zero_user_segments(&folio->page, start, start + length, 0, 0);
+}
+
 #endif /* _LINUX_HIGHMEM_H */
index c137396129db632786e6b5720646226b7814addd..ba025ae278827a2b26df2f682acf7ec32d0784cc 100644 (file)
@@ -128,6 +128,13 @@ static inline void resv_map_dup_hugetlb_cgroup_uncharge_info(
                css_get(resv_map->css);
 }
 
+static inline void resv_map_put_hugetlb_cgroup_uncharge_info(
+                                               struct resv_map *resv_map)
+{
+       if (resv_map->css)
+               css_put(resv_map->css);
+}
+
 extern int hugetlb_cgroup_charge_cgroup(int idx, unsigned long nr_pages,
                                        struct hugetlb_cgroup **ptr);
 extern int hugetlb_cgroup_charge_cgroup_rsvd(int idx, unsigned long nr_pages,
@@ -211,6 +218,11 @@ static inline void resv_map_dup_hugetlb_cgroup_uncharge_info(
 {
 }
 
+static inline void resv_map_put_hugetlb_cgroup_uncharge_info(
+                                               struct resv_map *resv_map)
+{
+}
+
 static inline int hugetlb_cgroup_charge_cgroup(int idx, unsigned long nr_pages,
                                               struct hugetlb_cgroup **ptr)
 {
index aee8ff4739b146932be3a44e025e24ffc1d417d1..f45f13304addd6dc1a215654b8d14f7fe3e33642 100644 (file)
@@ -9,7 +9,7 @@
 #define _INTEL_ISH_CLIENT_IF_H_
 
 #include <linux/device.h>
-#include <linux/uuid.h>
+#include <linux/mod_devicetable.h>
 
 struct ishtp_cl_device;
 struct ishtp_device;
@@ -40,7 +40,7 @@ enum cl_state {
 struct ishtp_cl_driver {
        struct device_driver driver;
        const char *name;
-       const guid_t *guid;
+       const struct ishtp_device_id *id;
        int (*probe)(struct ishtp_cl_device *dev);
        void (*remove)(struct ishtp_cl_device *dev);
        int (*reset)(struct ishtp_cl_device *dev);
index 05e22770af51728b4fe4c8268615296bebfb622a..b75395ec8d521f6feadb454e42cac0204f549578 100644 (file)
@@ -131,6 +131,16 @@ static inline struct ipc_namespace *get_ipc_ns(struct ipc_namespace *ns)
        return ns;
 }
 
+static inline struct ipc_namespace *get_ipc_ns_not_zero(struct ipc_namespace *ns)
+{
+       if (ns) {
+               if (refcount_inc_not_zero(&ns->ns.count))
+                       return ns;
+       }
+
+       return NULL;
+}
+
 extern void put_ipc_ns(struct ipc_namespace *ns);
 #else
 static inline struct ipc_namespace *copy_ipcs(unsigned long flags,
@@ -147,6 +157,11 @@ static inline struct ipc_namespace *get_ipc_ns(struct ipc_namespace *ns)
        return ns;
 }
 
+static inline struct ipc_namespace *get_ipc_ns_not_zero(struct ipc_namespace *ns)
+{
+       return ns;
+}
+
 static inline void put_ipc_ns(struct ipc_namespace *ns)
 {
 }
index e974caf39d3e3bfa7d09b577b488650edc5ceebf..8c8f7a4d93afb96518c18da10c7e8f5bee62ef2d 100644 (file)
@@ -153,6 +153,8 @@ struct kretprobe {
        struct kretprobe_holder *rph;
 };
 
+#define KRETPROBE_MAX_DATA_SIZE        4096
+
 struct kretprobe_instance {
        union {
                struct freelist_node freelist;
index 9e0667e3723e91aca55b1e1050e63cb2b3e3cca5..c310648cc8f1abd8581f245871920b4e08a960f9 100644 (file)
@@ -874,7 +874,7 @@ void kvm_release_pfn_dirty(kvm_pfn_t pfn);
 void kvm_set_pfn_dirty(kvm_pfn_t pfn);
 void kvm_set_pfn_accessed(kvm_pfn_t pfn);
 
-void kvm_release_pfn(kvm_pfn_t pfn, bool dirty, struct gfn_to_pfn_cache *cache);
+void kvm_release_pfn(kvm_pfn_t pfn, bool dirty);
 int kvm_read_guest_page(struct kvm *kvm, gfn_t gfn, void *data, int offset,
                        int len);
 int kvm_read_guest(struct kvm *kvm, gpa_t gpa, void *data, unsigned long len);
@@ -950,12 +950,8 @@ struct kvm_memory_slot *kvm_vcpu_gfn_to_memslot(struct kvm_vcpu *vcpu, gfn_t gfn
 kvm_pfn_t kvm_vcpu_gfn_to_pfn_atomic(struct kvm_vcpu *vcpu, gfn_t gfn);
 kvm_pfn_t kvm_vcpu_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn);
 int kvm_vcpu_map(struct kvm_vcpu *vcpu, gpa_t gpa, struct kvm_host_map *map);
-int kvm_map_gfn(struct kvm_vcpu *vcpu, gfn_t gfn, struct kvm_host_map *map,
-               struct gfn_to_pfn_cache *cache, bool atomic);
 struct page *kvm_vcpu_gfn_to_page(struct kvm_vcpu *vcpu, gfn_t gfn);
 void kvm_vcpu_unmap(struct kvm_vcpu *vcpu, struct kvm_host_map *map, bool dirty);
-int kvm_unmap_gfn(struct kvm_vcpu *vcpu, struct kvm_host_map *map,
-                 struct gfn_to_pfn_cache *cache, bool dirty, bool atomic);
 unsigned long kvm_vcpu_gfn_to_hva(struct kvm_vcpu *vcpu, gfn_t gfn);
 unsigned long kvm_vcpu_gfn_to_hva_prot(struct kvm_vcpu *vcpu, gfn_t gfn, bool *writable);
 int kvm_vcpu_read_guest_page(struct kvm_vcpu *vcpu, gfn_t gfn, void *data, int offset,
index 2237abb93ccdefd82f89b63e5f664802570bb8f4..234eab05983992e333d5d2588b3f79b89b891b4b 100644 (file)
@@ -53,13 +53,6 @@ struct gfn_to_hva_cache {
        struct kvm_memory_slot *memslot;
 };
 
-struct gfn_to_pfn_cache {
-       u64 generation;
-       gfn_t gfn;
-       kvm_pfn_t pfn;
-       bool dirty;
-};
-
 #ifdef KVM_ARCH_NR_OBJS_PER_MEMORY_CACHE
 /*
  * Memory caches are used to preallocate memory ahead of various MMU flows,
index 7239858790353a8cd03cac2a4e397166e88b2b31..a5cc4cdf9cc86fe76bec75022082b0b57f29501d 100644 (file)
@@ -663,6 +663,19 @@ int mhi_pm_suspend(struct mhi_controller *mhi_cntrl);
  */
 int mhi_pm_resume(struct mhi_controller *mhi_cntrl);
 
+/**
+ * mhi_pm_resume_force - Force resume MHI from suspended state
+ * @mhi_cntrl: MHI controller
+ *
+ * Resume the device irrespective of its MHI state. As per the MHI spec, devices
+ * has to be in M3 state during resume. But some devices seem to be in a
+ * different MHI state other than M3 but they continue working fine if allowed.
+ * This API is intented to be used for such devices.
+ *
+ * Return: 0 if the resume succeeds, a negative error code otherwise
+ */
+int mhi_pm_resume_force(struct mhi_controller *mhi_cntrl);
+
 /**
  * mhi_download_rddm_image - Download ramdump image from device for
  *                           debugging purpose.
index 97afcea39a7bf4cc3119cfc971aba1adff5b4651..8b18fe9771f97122efcd1b884647b8df4c4fd907 100644 (file)
@@ -145,13 +145,13 @@ u32 mlx5_eswitch_get_vport_metadata_for_set(struct mlx5_eswitch *esw,
        GENMASK(31 - ESW_TUN_ID_BITS - ESW_RESERVED_BITS, \
                ESW_TUN_OPTS_OFFSET + 1)
 
-u8 mlx5_eswitch_mode(struct mlx5_core_dev *dev);
+u8 mlx5_eswitch_mode(const struct mlx5_core_dev *dev);
 u16 mlx5_eswitch_get_total_vports(const struct mlx5_core_dev *dev);
 struct mlx5_core_dev *mlx5_eswitch_get_core_dev(struct mlx5_eswitch *esw);
 
 #else  /* CONFIG_MLX5_ESWITCH */
 
-static inline u8 mlx5_eswitch_mode(struct mlx5_core_dev *dev)
+static inline u8 mlx5_eswitch_mode(const struct mlx5_core_dev *dev)
 {
        return MLX5_ESWITCH_NONE;
 }
index 3636df90899a2431ecbf0acd66789a851db5a164..fbaab440a4846ecfab2ecaf7ae58f05a4dc974c3 100644 (file)
@@ -9698,7 +9698,10 @@ struct mlx5_ifc_mcam_access_reg_bits {
        u8         regs_84_to_68[0x11];
        u8         tracer_registers[0x4];
 
-       u8         regs_63_to_32[0x20];
+       u8         regs_63_to_46[0x12];
+       u8         mrtc[0x1];
+       u8         regs_44_to_32[0xd];
+
        u8         regs_31_to_0[0x20];
 };
 
index bb8c6f5f19bcaa41b3ad7ca343287c5908ebd331..c3a6e620960068cddb7818e1a39fac924bfd91fe 100644 (file)
@@ -105,7 +105,18 @@ struct page {
                        struct page_pool *pp;
                        unsigned long _pp_mapping_pad;
                        unsigned long dma_addr;
-                       atomic_long_t pp_frag_count;
+                       union {
+                               /**
+                                * dma_addr_upper: might require a 64-bit
+                                * value on 32-bit architectures.
+                                */
+                               unsigned long dma_addr_upper;
+                               /**
+                                * For frag page support, not supported in
+                                * 32-bit architectures with 64-bit DMA.
+                                */
+                               atomic_long_t pp_frag_count;
+                       };
                };
                struct {        /* slab, slob and slub */
                        union {
index ae2e75d15b2199206b79b0a413c00196e5dabdfc..4bb71979a8fd18e0f1c91db7e44362733000bdb0 100644 (file)
@@ -895,4 +895,18 @@ struct dfl_device_id {
        kernel_ulong_t driver_data;
 };
 
+/* ISHTP (Integrated Sensor Hub Transport Protocol) */
+
+#define ISHTP_MODULE_PREFIX    "ishtp:"
+
+/**
+ * struct ishtp_device_id - ISHTP device identifier
+ * @guid: GUID of the device.
+ * @driver_data: pointer to driver specific data
+ */
+struct ishtp_device_id {
+       guid_t guid;
+       kernel_ulong_t driver_data;
+};
+
 #endif /* LINUX_MOD_DEVICETABLE_H */
index 3ec42495a43a56dbd51fecd166d572a9e586e3e4..be5cb3360b944ca1519f2da335b3b6053c18e443 100644 (file)
@@ -4404,7 +4404,8 @@ static inline u32 netif_msg_init(int debug_value, int default_msg_enable_bits)
 static inline void __netif_tx_lock(struct netdev_queue *txq, int cpu)
 {
        spin_lock(&txq->_xmit_lock);
-       txq->xmit_lock_owner = cpu;
+       /* Pairs with READ_ONCE() in __dev_queue_xmit() */
+       WRITE_ONCE(txq->xmit_lock_owner, cpu);
 }
 
 static inline bool __netif_tx_acquire(struct netdev_queue *txq)
@@ -4421,26 +4422,32 @@ static inline void __netif_tx_release(struct netdev_queue *txq)
 static inline void __netif_tx_lock_bh(struct netdev_queue *txq)
 {
        spin_lock_bh(&txq->_xmit_lock);
-       txq->xmit_lock_owner = smp_processor_id();
+       /* Pairs with READ_ONCE() in __dev_queue_xmit() */
+       WRITE_ONCE(txq->xmit_lock_owner, smp_processor_id());
 }
 
 static inline bool __netif_tx_trylock(struct netdev_queue *txq)
 {
        bool ok = spin_trylock(&txq->_xmit_lock);
-       if (likely(ok))
-               txq->xmit_lock_owner = smp_processor_id();
+
+       if (likely(ok)) {
+               /* Pairs with READ_ONCE() in __dev_queue_xmit() */
+               WRITE_ONCE(txq->xmit_lock_owner, smp_processor_id());
+       }
        return ok;
 }
 
 static inline void __netif_tx_unlock(struct netdev_queue *txq)
 {
-       txq->xmit_lock_owner = -1;
+       /* Pairs with READ_ONCE() in __dev_queue_xmit() */
+       WRITE_ONCE(txq->xmit_lock_owner, -1);
        spin_unlock(&txq->_xmit_lock);
 }
 
 static inline void __netif_tx_unlock_bh(struct netdev_queue *txq)
 {
-       txq->xmit_lock_owner = -1;
+       /* Pairs with READ_ONCE() in __dev_queue_xmit() */
+       WRITE_ONCE(txq->xmit_lock_owner, -1);
        spin_unlock_bh(&txq->_xmit_lock);
 }
 
index 52ec4b5e561566d7835ce1df5b703fa4f9f993e4..b5f14d5811135fc24c50ccf37234885fd0bed6f6 100644 (file)
@@ -686,13 +686,13 @@ static inline bool test_set_page_writeback(struct page *page)
 
 __PAGEFLAG(Head, head, PF_ANY) CLEARPAGEFLAG(Head, head, PF_ANY)
 
-/* Whether there are one or multiple pages in a folio */
-static inline bool folio_test_single(struct folio *folio)
-{
-       return !folio_test_head(folio);
-}
-
-static inline bool folio_test_multi(struct folio *folio)
+/**
+ * folio_test_large() - Does this folio contain more than one page?
+ * @folio: The folio to test.
+ *
+ * Return: True if the folio is larger than one page.
+ */
+static inline bool folio_test_large(struct folio *folio)
 {
        return folio_test_head(folio);
 }
index 1a0c646eb6ff7cf34ce2c79a29930c5b5af71201..60524645230514eaa4315a8d152dd102cc1177c7 100644 (file)
@@ -84,7 +84,7 @@ enum mapping_flags {
        AS_EXITING      = 4,    /* final truncate in progress */
        /* writeback related tags are not used */
        AS_NO_WRITEBACK_TAGS = 5,
-       AS_THP_SUPPORT = 6,     /* THPs supported */
+       AS_LARGE_FOLIO_SUPPORT = 6,
 };
 
 /**
@@ -176,9 +176,25 @@ static inline void mapping_set_gfp_mask(struct address_space *m, gfp_t mask)
        m->gfp_mask = mask;
 }
 
-static inline bool mapping_thp_support(struct address_space *mapping)
+/**
+ * mapping_set_large_folios() - Indicate the file supports large folios.
+ * @mapping: The file.
+ *
+ * The filesystem should call this function in its inode constructor to
+ * indicate that the VFS can use large folios to cache the contents of
+ * the file.
+ *
+ * Context: This should not be called while the inode is active as it
+ * is non-atomic.
+ */
+static inline void mapping_set_large_folios(struct address_space *mapping)
+{
+       __set_bit(AS_LARGE_FOLIO_SUPPORT, &mapping->flags);
+}
+
+static inline bool mapping_large_folio_support(struct address_space *mapping)
 {
-       return test_bit(AS_THP_SUPPORT, &mapping->flags);
+       return test_bit(AS_LARGE_FOLIO_SUPPORT, &mapping->flags);
 }
 
 static inline int filemap_nr_thps(struct address_space *mapping)
@@ -193,7 +209,7 @@ static inline int filemap_nr_thps(struct address_space *mapping)
 static inline void filemap_nr_thps_inc(struct address_space *mapping)
 {
 #ifdef CONFIG_READ_ONLY_THP_FOR_FS
-       if (!mapping_thp_support(mapping))
+       if (!mapping_large_folio_support(mapping))
                atomic_inc(&mapping->nr_thps);
 #else
        WARN_ON_ONCE(1);
@@ -203,7 +219,7 @@ static inline void filemap_nr_thps_inc(struct address_space *mapping)
 static inline void filemap_nr_thps_dec(struct address_space *mapping)
 {
 #ifdef CONFIG_READ_ONLY_THP_FOR_FS
-       if (!mapping_thp_support(mapping))
+       if (!mapping_large_folio_support(mapping))
                atomic_dec(&mapping->nr_thps);
 #else
        WARN_ON_ONCE(1);
index b31d3f3312ce594c08e28dabf86efc207af67352..d73a1c08c3e3c030abee511294645d460ddb0570 100644 (file)
@@ -51,9 +51,9 @@
 #define _LINUX_PERCPU_REFCOUNT_H
 
 #include <linux/atomic.h>
-#include <linux/kernel.h>
 #include <linux/percpu.h>
 #include <linux/rcupdate.h>
+#include <linux/types.h>
 #include <linux/gfp.h>
 
 struct percpu_ref;
index 98a9371133f8f137ba641479fc489ec1ad7b2677..ae4004e7957e1824c119a636d36bbba79ba20844 100644 (file)
@@ -6,7 +6,6 @@
 #include <linux/preempt.h>
 #include <linux/smp.h>
 #include <linux/cpumask.h>
-#include <linux/printk.h>
 #include <linux/pfn.h>
 #include <linux/init.h>
 
index 96e43fbb2dd89f151dcc68eed2fe88a7966d0d0c..cbf03a5f9cf5199fa47d674aacea34a7df8e632b 100644 (file)
@@ -538,11 +538,12 @@ struct macsec_ops;
  * @mac_managed_pm: Set true if MAC driver takes of suspending/resuming PHY
  * @state: State of the PHY for management purposes
  * @dev_flags: Device-specific flags used by the PHY driver.
- *             Bits [15:0] are free to use by the PHY driver to communicate
- *                         driver specific behavior.
- *             Bits [23:16] are currently reserved for future use.
- *             Bits [31:24] are reserved for defining generic
- *                          PHY driver behavior.
+ *
+ *      - Bits [15:0] are free to use by the PHY driver to communicate
+ *        driver specific behavior.
+ *      - Bits [23:16] are currently reserved for future use.
+ *      - Bits [31:24] are reserved for defining generic
+ *        PHY driver behavior.
  * @irq: IRQ number of the PHY's interrupt (-1 if none)
  * @phy_timer: The timer for handling the state machine
  * @phylink: Pointer to phylink instance for this PHY
index 222da43b7096d27dbc96c3dc53ce34e20348216b..eddd66d426caf934e0244a6501cde8842328574d 100644 (file)
@@ -129,7 +129,7 @@ static inline bool pm_runtime_suspended(struct device *dev)
  * pm_runtime_active - Check whether or not a device is runtime-active.
  * @dev: Target device.
  *
- * Return %true if runtime PM is enabled for @dev and its runtime PM status is
+ * Return %true if runtime PM is disabled for @dev or its runtime PM status is
  * %RPM_ACTIVE, or %false otherwise.
  *
  * Note that the return value of this function can only be trusted if it is
index 85b656f82d752ecf18d67a693e4de0201ddd9a2e..9497f6b983399ac0cf6a21d5e4b57f9e144e2d74 100644 (file)
@@ -198,6 +198,7 @@ void dump_stack_print_info(const char *log_lvl);
 void show_regs_print_info(const char *log_lvl);
 extern asmlinkage void dump_stack_lvl(const char *log_lvl) __cold;
 extern asmlinkage void dump_stack(void) __cold;
+void printk_trigger_flush(void);
 #else
 static inline __printf(1, 0)
 int vprintk(const char *s, va_list args)
@@ -274,6 +275,9 @@ static inline void dump_stack_lvl(const char *log_lvl)
 static inline void dump_stack(void)
 {
 }
+static inline void printk_trigger_flush(void)
+{
+}
 #endif
 
 #ifdef CONFIG_SMP
index ae04968a3a47201befdf32908bc78b8d37671638..9afd34a2d36c5c693d63707fa9a7a08c83c7a9db 100644 (file)
@@ -37,6 +37,7 @@
 #define PTP_MSGTYPE_PDELAY_RESP 0x3
 
 #define PTP_EV_PORT 319
+#define PTP_GEN_PORT 320
 #define PTP_GEN_BIT 0x08 /* indicates general message, if set in message type */
 
 #define OFF_PTP_SOURCE_UUID    22 /* PTPv1 only */
index bd7a73db2e66cd6fcdc0fa1ced8bb949fb95ecfd..54cf566616aec28429b80236435bc6a8ee6fc9a1 100644 (file)
@@ -499,7 +499,8 @@ struct regulator_irq_data {
  *             best to shut-down regulator(s) or reboot the SOC if error
  *             handling is repeatedly failing. If fatal_cnt is given the IRQ
  *             handling is aborted if it fails for fatal_cnt times and die()
- *             callback (if populated) or BUG() is called to try to prevent
+ *             callback (if populated) is called. If die() is not populated
+ *             poweroff for the system is attempted in order to prevent any
  *             further damage.
  * @reread_ms: The time which is waited before attempting to re-read status
  *             at the worker if IC reading fails. Immediate re-read is done
@@ -516,11 +517,12 @@ struct regulator_irq_data {
  * @data:      Driver private data pointer which will be passed as such to
  *             the renable, map_event and die callbacks in regulator_irq_data.
  * @die:       Protection callback. If IC status reading or recovery actions
- *             fail fatal_cnt times this callback or BUG() is called. This
- *             callback should implement a final protection attempt like
- *             disabling the regulator. If protection succeeded this may
- *             return 0. If anything else is returned the core assumes final
- *             protection failed and calls BUG() as a last resort.
+ *             fail fatal_cnt times this callback is called or system is
+ *             powered off. This callback should implement a final protection
+ *             attempt like disabling the regulator. If protection succeeded
+ *             die() may return 0. If anything else is returned the core
+ *             assumes final protection failed and attempts to perform a
+ *             poweroff as a last resort.
  * @map_event: Driver callback to map IRQ status into regulator devices with
  *             events / errors. NOTE: callback MUST initialize both the
  *             errors and notifs for all rdevs which it signals having
index 6c9f19a33865ab857de84c576718ae95998d7be0..ce3c58286062c4d33e88e9fc1830621625a0bc4d 100644 (file)
 #endif /* CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */
 
 #ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
-extern void task_cputime(struct task_struct *t,
+extern bool task_cputime(struct task_struct *t,
                         u64 *utime, u64 *stime);
 extern u64 task_gtime(struct task_struct *t);
 #else
-static inline void task_cputime(struct task_struct *t,
+static inline bool task_cputime(struct task_struct *t,
                                u64 *utime, u64 *stime)
 {
        *utime = t->utime;
        *stime = t->stime;
+       return false;
 }
 
 static inline u64 task_gtime(struct task_struct *t)
index 23505394ef7091d36219cd28b2e4ad617226f920..33a50642cf41c04a538817a5c2507baec91c2a4e 100644 (file)
@@ -352,6 +352,7 @@ extern __must_check bool do_notify_parent(struct task_struct *, int);
 extern void __wake_up_parent(struct task_struct *p, struct task_struct *parent);
 extern void force_sig(int);
 extern void force_fatal_sig(int);
+extern void force_exit_sig(int);
 extern int send_sig(int, struct task_struct *, int);
 extern int zap_other_threads(struct task_struct *p);
 extern struct sigqueue *sigqueue_alloc(void);
index ba88a69874004260aecf99cd76fd00607621caf9..058d7f371e25af6fb2226bc9646909239af96b23 100644 (file)
@@ -158,7 +158,7 @@ static inline struct vm_struct *task_stack_vm_area(const struct task_struct *t)
  * Protects ->fs, ->files, ->mm, ->group_info, ->comm, keyring
  * subscriptions and synchronises with wait4().  Also used in procfs.  Also
  * pins the final release of task.io_context.  Also protects ->cpuset and
- * ->cgroup.subsys[]. And ->vfork_done.
+ * ->cgroup.subsys[]. And ->vfork_done. And ->sysvshm.shm_clist.
  *
  * Nests both inside and outside of read_lock(&tasklist_lock).
  * It must not be nested with write_lock_irq(&tasklist_lock),
diff --git a/include/linux/sdb.h b/include/linux/sdb.h
deleted file mode 100644 (file)
index a2404a2..0000000
+++ /dev/null
@@ -1,160 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * This is the official version 1.1 of sdb.h
- */
-#ifndef __SDB_H__
-#define __SDB_H__
-#ifdef __KERNEL__
-#include <linux/types.h>
-#else
-#include <stdint.h>
-#endif
-
-/*
- * All structures are 64 bytes long and are expected
- * to live in an array, one for each interconnect.
- * Most fields of the structures are shared among the
- * various types, and most-specific fields are at the
- * beginning (for alignment reasons, and to keep the
- * magic number at the head of the interconnect record
- */
-
-/* Product, 40 bytes at offset 24, 8-byte aligned
- *
- * device_id is vendor-assigned; version is device-specific,
- * date is hex (e.g 0x20120501), name is UTF-8, blank-filled
- * and not terminated with a 0 byte.
- */
-struct sdb_product {
-       uint64_t                vendor_id;      /* 0x18..0x1f */
-       uint32_t                device_id;      /* 0x20..0x23 */
-       uint32_t                version;        /* 0x24..0x27 */
-       uint32_t                date;           /* 0x28..0x2b */
-       uint8_t                 name[19];       /* 0x2c..0x3e */
-       uint8_t                 record_type;    /* 0x3f */
-};
-
-/*
- * Component, 56 bytes at offset 8, 8-byte aligned
- *
- * The address range is first to last, inclusive
- * (for example 0x100000 - 0x10ffff)
- */
-struct sdb_component {
-       uint64_t                addr_first;     /* 0x08..0x0f */
-       uint64_t                addr_last;      /* 0x10..0x17 */
-       struct sdb_product      product;        /* 0x18..0x3f */
-};
-
-/* Type of the SDB record */
-enum sdb_record_type {
-       sdb_type_interconnect   = 0x00,
-       sdb_type_device         = 0x01,
-       sdb_type_bridge         = 0x02,
-       sdb_type_integration    = 0x80,
-       sdb_type_repo_url       = 0x81,
-       sdb_type_synthesis      = 0x82,
-       sdb_type_empty          = 0xFF,
-};
-
-/* Type 0: interconnect (first of the array)
- *
- * sdb_records is the length of the table including this first
- * record, version is 1. The bus type is enumerated later.
- */
-#define                                SDB_MAGIC       0x5344422d /* "SDB-" */
-struct sdb_interconnect {
-       uint32_t                sdb_magic;      /* 0x00-0x03 */
-       uint16_t                sdb_records;    /* 0x04-0x05 */
-       uint8_t                 sdb_version;    /* 0x06 */
-       uint8_t                 sdb_bus_type;   /* 0x07 */
-       struct sdb_component    sdb_component;  /* 0x08-0x3f */
-};
-
-/* Type 1: device
- *
- * class is 0 for "custom device", other values are
- * to be standardized; ABI version is for the driver,
- * bus-specific bits are defined by each bus (see below)
- */
-struct sdb_device {
-       uint16_t                abi_class;      /* 0x00-0x01 */
-       uint8_t                 abi_ver_major;  /* 0x02 */
-       uint8_t                 abi_ver_minor;  /* 0x03 */
-       uint32_t                bus_specific;   /* 0x04-0x07 */
-       struct sdb_component    sdb_component;  /* 0x08-0x3f */
-};
-
-/* Type 2: bridge
- *
- * child is the address of the nested SDB table
- */
-struct sdb_bridge {
-       uint64_t                sdb_child;      /* 0x00-0x07 */
-       struct sdb_component    sdb_component;  /* 0x08-0x3f */
-};
-
-/* Type 0x80: integration
- *
- * all types with bit 7 set are meta-information, so
- * software can ignore the types it doesn't know. Here we
- * just provide product information for an aggregate device
- */
-struct sdb_integration {
-       uint8_t                 reserved[24];   /* 0x00-0x17 */
-       struct sdb_product      product;        /* 0x08-0x3f */
-};
-
-/* Type 0x81: Top module repository url
- *
- * again, an informative field that software can ignore
- */
-struct sdb_repo_url {
-       uint8_t                 repo_url[63];   /* 0x00-0x3e */
-       uint8_t                 record_type;    /* 0x3f */
-};
-
-/* Type 0x82: Synthesis tool information
- *
- * this informative record
- */
-struct sdb_synthesis {
-       uint8_t                 syn_name[16];   /* 0x00-0x0f */
-       uint8_t                 commit_id[16];  /* 0x10-0x1f */
-       uint8_t                 tool_name[8];   /* 0x20-0x27 */
-       uint32_t                tool_version;   /* 0x28-0x2b */
-       uint32_t                date;           /* 0x2c-0x2f */
-       uint8_t                 user_name[15];  /* 0x30-0x3e */
-       uint8_t                 record_type;    /* 0x3f */
-};
-
-/* Type 0xff: empty
- *
- * this allows keeping empty slots during development,
- * so they can be filled later with minimal efforts and
- * no misleading description is ever shipped -- hopefully.
- * It can also be used to pad a table to a desired length.
- */
-struct sdb_empty {
-       uint8_t                 reserved[63];   /* 0x00-0x3e */
-       uint8_t                 record_type;    /* 0x3f */
-};
-
-/* The type of bus, for bus-specific flags */
-enum sdb_bus_type {
-       sdb_wishbone = 0x00,
-       sdb_data     = 0x01,
-};
-
-#define SDB_WB_WIDTH_MASK      0x0f
-#define SDB_WB_ACCESS8                 0x01
-#define SDB_WB_ACCESS16                        0x02
-#define SDB_WB_ACCESS32                        0x04
-#define SDB_WB_ACCESS64                        0x08
-#define SDB_WB_LITTLE_ENDIAN   0x80
-
-#define SDB_DATA_READ          0x04
-#define SDB_DATA_WRITE         0x02
-#define SDB_DATA_EXEC          0x01
-
-#endif /* __SDB_H__ */
index bf21591a9e5e653585c26cb3f3f0857256c0eb89..0cda61855d90719e6175d0325598bbbe1b79254b 100644 (file)
@@ -27,9 +27,7 @@ static inline bool siphash_key_is_zero(const siphash_key_t *key)
 }
 
 u64 __siphash_aligned(const void *data, size_t len, const siphash_key_t *key);
-#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
 u64 __siphash_unaligned(const void *data, size_t len, const siphash_key_t *key);
-#endif
 
 u64 siphash_1u64(const u64 a, const siphash_key_t *key);
 u64 siphash_2u64(const u64 a, const u64 b, const siphash_key_t *key);
@@ -82,10 +80,9 @@ static inline u64 ___siphash_aligned(const __le64 *data, size_t len,
 static inline u64 siphash(const void *data, size_t len,
                          const siphash_key_t *key)
 {
-#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
-       if (!IS_ALIGNED((unsigned long)data, SIPHASH_ALIGNMENT))
+       if (IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) ||
+           !IS_ALIGNED((unsigned long)data, SIPHASH_ALIGNMENT))
                return __siphash_unaligned(data, len, key);
-#endif
        return ___siphash_aligned(data, len, key);
 }
 
@@ -96,10 +93,8 @@ typedef struct {
 
 u32 __hsiphash_aligned(const void *data, size_t len,
                       const hsiphash_key_t *key);
-#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
 u32 __hsiphash_unaligned(const void *data, size_t len,
                         const hsiphash_key_t *key);
-#endif
 
 u32 hsiphash_1u32(const u32 a, const hsiphash_key_t *key);
 u32 hsiphash_2u32(const u32 a, const u32 b, const hsiphash_key_t *key);
@@ -135,10 +130,9 @@ static inline u32 ___hsiphash_aligned(const __le32 *data, size_t len,
 static inline u32 hsiphash(const void *data, size_t len,
                           const hsiphash_key_t *key)
 {
-#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
-       if (!IS_ALIGNED((unsigned long)data, HSIPHASH_ALIGNMENT))
+       if (IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) ||
+           !IS_ALIGNED((unsigned long)data, HSIPHASH_ALIGNMENT))
                return __hsiphash_unaligned(data, len, key);
-#endif
        return ___hsiphash_aligned(data, len, key);
 }
 
index 686a666d073d5106526f3c5c20d64f26131be72d..c8cb7e697d479a7649eb0277024d4f89c4dcb548 100644 (file)
@@ -4226,7 +4226,7 @@ static inline void skb_remcsum_process(struct sk_buff *skb, void *ptr,
                return;
        }
 
-        if (unlikely(skb->ip_summed != CHECKSUM_COMPLETE)) {
+       if (unlikely(skb->ip_summed != CHECKSUM_COMPLETE)) {
                __skb_checksum_complete(skb);
                skb_postpull_rcsum(skb, skb->data, ptr - (void *)skb->data);
        }
index 50453b2876155e3ae582bd7967542a1ae1c5a18a..2d167ac3452c58083517d9466e1fcbdcbb58709e 100644 (file)
@@ -673,7 +673,7 @@ struct trace_event_file {
 
 #define PERF_MAX_TRACE_SIZE    8192
 
-#define MAX_FILTER_STR_VAL     256     /* Should handle KSYM_SYMBOL_LEN */
+#define MAX_FILTER_STR_VAL     256U    /* Should handle KSYM_SYMBOL_LEN */
 
 enum event_trigger_type {
        ETT_NONE                = (0),
index 44d0e09da2d9f3421a8c26a133e55ae116e45cce..41edbc01ffa4039524d5519caaa2f18e5884ca65 100644 (file)
@@ -152,7 +152,6 @@ size_t virtio_max_dma_size(struct virtio_device *vdev);
  * @feature_table_size: number of entries in the feature table array.
  * @feature_table_legacy: same as feature_table but when working in legacy mode.
  * @feature_table_size_legacy: number of entries in feature table legacy array.
- * @suppress_used_validation: set to not have core validate used length
  * @probe: the function to call when a device is found.  Returns 0 or -errno.
  * @scan: optional function to call after successful probe; intended
  *    for virtio-scsi to invoke a scan.
@@ -169,7 +168,6 @@ struct virtio_driver {
        unsigned int feature_table_size;
        const unsigned int *feature_table_legacy;
        unsigned int feature_table_size_legacy;
-       bool suppress_used_validation;
        int (*validate)(struct virtio_device *dev);
        int (*probe)(struct virtio_device *dev);
        void (*scan)(struct virtio_device *dev);
index b465f8f3e554f27ced45c35f54f113cf6dce1f07..04e87f4b9417c9fde533e2125d4eb36307a06562 100644 (file)
@@ -120,10 +120,15 @@ retry:
 
        if (hdr->gso_type != VIRTIO_NET_HDR_GSO_NONE) {
                u16 gso_size = __virtio16_to_cpu(little_endian, hdr->gso_size);
+               unsigned int nh_off = p_off;
                struct skb_shared_info *shinfo = skb_shinfo(skb);
 
+               /* UFO may not include transport header in gso_size. */
+               if (gso_type & SKB_GSO_UDP)
+                       nh_off -= thlen;
+
                /* Too small packets are not really GSO ones. */
-               if (skb->len - p_off > gso_size) {
+               if (skb->len - nh_off > gso_size) {
                        shinfo->gso_size = gso_size;
                        shinfo->gso_type = gso_type;
 
index 2d0df57c99024cb267618c4ddb7440bb63268dfa..851e07da2583fb231e647db74fbc6ce07cb6e2f0 100644 (file)
@@ -217,6 +217,7 @@ void __wake_up_sync_key(struct wait_queue_head *wq_head, unsigned int mode, void
 void __wake_up_locked_sync_key(struct wait_queue_head *wq_head, unsigned int mode, void *key);
 void __wake_up_locked(struct wait_queue_head *wq_head, unsigned int mode, int nr);
 void __wake_up_sync(struct wait_queue_head *wq_head, unsigned int mode);
+void __wake_up_pollfree(struct wait_queue_head *wq_head);
 
 #define wake_up(x)                     __wake_up(x, TASK_NORMAL, 1, NULL)
 #define wake_up_nr(x, nr)              __wake_up(x, TASK_NORMAL, nr, NULL)
@@ -245,6 +246,31 @@ void __wake_up_sync(struct wait_queue_head *wq_head, unsigned int mode);
 #define wake_up_interruptible_sync_poll_locked(x, m)                           \
        __wake_up_locked_sync_key((x), TASK_INTERRUPTIBLE, poll_to_key(m))
 
+/**
+ * wake_up_pollfree - signal that a polled waitqueue is going away
+ * @wq_head: the wait queue head
+ *
+ * In the very rare cases where a ->poll() implementation uses a waitqueue whose
+ * lifetime is tied to a task rather than to the 'struct file' being polled,
+ * this function must be called before the waitqueue is freed so that
+ * non-blocking polls (e.g. epoll) are notified that the queue is going away.
+ *
+ * The caller must also RCU-delay the freeing of the wait_queue_head, e.g. via
+ * an explicit synchronize_rcu() or call_rcu(), or via SLAB_TYPESAFE_BY_RCU.
+ */
+static inline void wake_up_pollfree(struct wait_queue_head *wq_head)
+{
+       /*
+        * For performance reasons, we don't always take the queue lock here.
+        * Therefore, we might race with someone removing the last entry from
+        * the queue, and proceed while they still hold the queue lock.
+        * However, rcu_read_lock() is required to be held in such cases, so we
+        * can safely proceed with an RCU-delayed free.
+        */
+       if (waitqueue_active(wq_head))
+               __wake_up_pollfree(wq_head);
+}
+
 #define ___wait_cond_timeout(condition)                                                \
 ({                                                                             \
        bool __cond = (condition);                                              \
index f6af76c87a6c3856e92ac438c6c93c4c6ab9eec2..191c36afa1f4aa543f9b3b57d35630f0e71c2584 100644 (file)
@@ -126,7 +126,7 @@ struct tlb_slave_info {
 struct alb_bond_info {
        struct tlb_client_info  *tx_hashtbl; /* Dynamically allocated */
        u32                     unbalanced_load;
-       int                     tx_rebalance_counter;
+       atomic_t                tx_rebalance_counter;
        int                     lp_counter;
        /* -------- rlb parameters -------- */
        int rlb_enabled;
index 4202c609bb0b09345c0f1c5105adf409a3a89f74..c4898fcbf923bf01f14c6bcc694eb036d75d7195 100644 (file)
@@ -132,6 +132,19 @@ static inline void sk_mark_napi_id(struct sock *sk, const struct sk_buff *skb)
 #ifdef CONFIG_NET_RX_BUSY_POLL
        if (unlikely(READ_ONCE(sk->sk_napi_id) != skb->napi_id))
                WRITE_ONCE(sk->sk_napi_id, skb->napi_id);
+#endif
+       sk_rx_queue_update(sk, skb);
+}
+
+/* Variant of sk_mark_napi_id() for passive flow setup,
+ * as sk->sk_napi_id and sk->sk_rx_queue_mapping content
+ * needs to be set.
+ */
+static inline void sk_mark_napi_id_set(struct sock *sk,
+                                      const struct sk_buff *skb)
+{
+#ifdef CONFIG_NET_RX_BUSY_POLL
+       WRITE_ONCE(sk->sk_napi_id, skb->napi_id);
 #endif
        sk_rx_queue_set(sk, skb);
 }
index 67634675e9197cdbd8225e0e4aa1547d8f09f036..df6622a5fe98f0a9732617bb2a757ef9c9611797 100644 (file)
@@ -79,6 +79,17 @@ static inline void dst_cache_reset(struct dst_cache *dst_cache)
        dst_cache->reset_ts = jiffies;
 }
 
+/**
+ *     dst_cache_reset_now - invalidate the cache contents immediately
+ *     @dst_cache: the cache
+ *
+ *     The caller must be sure there are no concurrent users, as this frees
+ *     all dst_cache users immediately, rather than waiting for the next
+ *     per-cpu usage like dst_cache_reset does. Most callers should use the
+ *     higher speed lazily-freed dst_cache_reset function instead.
+ */
+void dst_cache_reset_now(struct dst_cache *dst_cache);
+
 /**
  *     dst_cache_init - initialize the cache, allocating the required storage
  *     @dst_cache: the cache
index 4b10676c69d1917e4c30e086bf8f00b1e0f37ed4..bd07484ab9dd5f9de0321f63393941b521a0b5fa 100644 (file)
@@ -69,7 +69,7 @@ struct fib_rules_ops {
        int                     (*action)(struct fib_rule *,
                                          struct flowi *, int,
                                          struct fib_lookup_arg *);
-       bool                    (*suppress)(struct fib_rule *,
+       bool                    (*suppress)(struct fib_rule *, int,
                                            struct fib_lookup_arg *);
        int                     (*match)(struct fib_rule *,
                                         struct flowi *, int);
@@ -218,7 +218,9 @@ INDIRECT_CALLABLE_DECLARE(int fib4_rule_action(struct fib_rule *rule,
                            struct fib_lookup_arg *arg));
 
 INDIRECT_CALLABLE_DECLARE(bool fib6_rule_suppress(struct fib_rule *rule,
+                                               int flags,
                                                struct fib_lookup_arg *arg));
 INDIRECT_CALLABLE_DECLARE(bool fib4_rule_suppress(struct fib_rule *rule,
+                                               int flags,
                                                struct fib_lookup_arg *arg));
 #endif
index c412dde4d67dca88fa236522dadfbbc0eb8a97f4..83b8070d1cc93ad094e29721aa3a738c95462a83 100644 (file)
@@ -485,6 +485,7 @@ int fib6_nh_init(struct net *net, struct fib6_nh *fib6_nh,
                 struct fib6_config *cfg, gfp_t gfp_flags,
                 struct netlink_ext_ack *extack);
 void fib6_nh_release(struct fib6_nh *fib6_nh);
+void fib6_nh_release_dsts(struct fib6_nh *fib6_nh);
 
 int call_fib6_entry_notifiers(struct net *net,
                              enum fib_event_type event_type,
index ab5348e57db1a627cbce2dededb2e9b754d1f2cd..3417ba2d27ad6a1b5612a8855d2788f10d9fdf25 100644 (file)
@@ -438,7 +438,7 @@ int fib_validate_source(struct sk_buff *skb, __be32 src, __be32 dst,
 #ifdef CONFIG_IP_ROUTE_CLASSID
 static inline int fib_num_tclassid_users(struct net *net)
 {
-       return net->ipv4.fib_num_tclassid_users;
+       return atomic_read(&net->ipv4.fib_num_tclassid_users);
 }
 #else
 static inline int fib_num_tclassid_users(struct net *net)
index afbce90c44808a954c79eb9ab2eb3d9249701633..45e0339be6fa4aa8e6c3ca074e00aff1c3b97b2f 100644 (file)
@@ -47,6 +47,7 @@ struct ipv6_stub {
                            struct fib6_config *cfg, gfp_t gfp_flags,
                            struct netlink_ext_ack *extack);
        void (*fib6_nh_release)(struct fib6_nh *fib6_nh);
+       void (*fib6_nh_release_dsts)(struct fib6_nh *fib6_nh);
        void (*fib6_update_sernum)(struct net *net, struct fib6_info *rt);
        int (*ip6_del_rt)(struct net *net, struct fib6_info *rt, bool skip_notify);
        void (*fib6_rt_update)(struct net *net, struct fib6_info *rt,
index cc663c68ddc4ba464fcd2ba71da7a9f31da2fdbf..d24b0a34c8f0cd3571893e0c16b10fa391c0511d 100644 (file)
@@ -276,14 +276,14 @@ static inline bool nf_is_loopback_packet(const struct sk_buff *skb)
 /* jiffies until ct expires, 0 if already expired */
 static inline unsigned long nf_ct_expires(const struct nf_conn *ct)
 {
-       s32 timeout = ct->timeout - nfct_time_stamp;
+       s32 timeout = READ_ONCE(ct->timeout) - nfct_time_stamp;
 
        return timeout > 0 ? timeout : 0;
 }
 
 static inline bool nf_ct_is_expired(const struct nf_conn *ct)
 {
-       return (__s32)(ct->timeout - nfct_time_stamp) <= 0;
+       return (__s32)(READ_ONCE(ct->timeout) - nfct_time_stamp) <= 0;
 }
 
 /* use after obtaining a reference count */
@@ -302,7 +302,7 @@ static inline bool nf_ct_should_gc(const struct nf_conn *ct)
 static inline void nf_ct_offload_timeout(struct nf_conn *ct)
 {
        if (nf_ct_expires(ct) < NF_CT_DAY / 2)
-               ct->timeout = nfct_time_stamp + NF_CT_DAY;
+               WRITE_ONCE(ct->timeout, nfct_time_stamp + NF_CT_DAY);
 }
 
 struct kernel_param;
index 2f65701a43c953bd3a9a9e3d491882cb7bb11859..6c5b2efc4f17d0d17be750d0c1a2e1d169ec063e 100644 (file)
@@ -65,7 +65,7 @@ struct netns_ipv4 {
        bool                    fib_has_custom_local_routes;
        bool                    fib_offload_disabled;
 #ifdef CONFIG_IP_ROUTE_CLASSID
-       int                     fib_num_tclassid_users;
+       atomic_t                fib_num_tclassid_users;
 #endif
        struct hlist_head       *fib_table_hash;
        struct sock             *fibnl;
index a964daedc17b6b6292ca1356a5e974f756af7a85..ea8595651c3846d288a5404b13caf82e10222239 100644 (file)
@@ -30,6 +30,7 @@ enum nci_flag {
        NCI_UP,
        NCI_DATA_EXCHANGE,
        NCI_DATA_EXCHANGE_TO,
+       NCI_UNREG,
 };
 
 /* NCI device states */
index ddcee128f5d9ac59c35b8364dc4c5943a04c712f..145acb8f250957f5fd7ae682a475034eea6055fc 100644 (file)
@@ -19,6 +19,8 @@
  *
  */
 
+#include <linux/types.h>
+
 #define NL802154_GENL_NAME "nl802154"
 
 enum nl802154_commands {
@@ -150,10 +152,9 @@ enum nl802154_attrs {
 };
 
 enum nl802154_iftype {
-       /* for backwards compatibility TODO */
-       NL802154_IFTYPE_UNSPEC = -1,
+       NL802154_IFTYPE_UNSPEC = (~(__u32)0),
 
-       NL802154_IFTYPE_NODE,
+       NL802154_IFTYPE_NODE = 0,
        NL802154_IFTYPE_MONITOR,
        NL802154_IFTYPE_COORD,
 
index 3855f069627f454016dc64558d8accb9c5f31809..a4082406a0039611dd9a5d4633d1da8134d07c18 100644 (file)
@@ -216,14 +216,24 @@ static inline void page_pool_recycle_direct(struct page_pool *pool,
        page_pool_put_full_page(pool, page, true);
 }
 
+#define PAGE_POOL_DMA_USE_PP_FRAG_COUNT        \
+               (sizeof(dma_addr_t) > sizeof(unsigned long))
+
 static inline dma_addr_t page_pool_get_dma_addr(struct page *page)
 {
-       return page->dma_addr;
+       dma_addr_t ret = page->dma_addr;
+
+       if (PAGE_POOL_DMA_USE_PP_FRAG_COUNT)
+               ret |= (dma_addr_t)page->dma_addr_upper << 16 << 16;
+
+       return ret;
 }
 
 static inline void page_pool_set_dma_addr(struct page *page, dma_addr_t addr)
 {
        page->dma_addr = addr;
+       if (PAGE_POOL_DMA_USE_PP_FRAG_COUNT)
+               page->dma_addr_upper = upper_32_bits(addr);
 }
 
 static inline void page_pool_set_frag_count(struct page *page, long nr)
index b32906e1ab55527b5418f203d3de05853863f166..bea21ff70e74d906216f4eaa2d5a712d12551216 100644 (file)
@@ -1913,18 +1913,31 @@ static inline int sk_tx_queue_get(const struct sock *sk)
        return -1;
 }
 
-static inline void sk_rx_queue_set(struct sock *sk, const struct sk_buff *skb)
+static inline void __sk_rx_queue_set(struct sock *sk,
+                                    const struct sk_buff *skb,
+                                    bool force_set)
 {
 #ifdef CONFIG_SOCK_RX_QUEUE_MAPPING
        if (skb_rx_queue_recorded(skb)) {
                u16 rx_queue = skb_get_rx_queue(skb);
 
-               if (unlikely(READ_ONCE(sk->sk_rx_queue_mapping) != rx_queue))
+               if (force_set ||
+                   unlikely(READ_ONCE(sk->sk_rx_queue_mapping) != rx_queue))
                        WRITE_ONCE(sk->sk_rx_queue_mapping, rx_queue);
        }
 #endif
 }
 
+static inline void sk_rx_queue_set(struct sock *sk, const struct sk_buff *skb)
+{
+       __sk_rx_queue_set(sk, skb, true);
+}
+
+static inline void sk_rx_queue_update(struct sock *sk, const struct sk_buff *skb)
+{
+       __sk_rx_queue_set(sk, skb, false);
+}
+
 static inline void sk_rx_queue_clear(struct sock *sk)
 {
 #ifdef CONFIG_SOCK_RX_QUEUE_MAPPING
@@ -2430,19 +2443,22 @@ static inline void sk_stream_moderate_sndbuf(struct sock *sk)
  * @sk: socket
  *
  * Use the per task page_frag instead of the per socket one for
- * optimization when we know that we're in the normal context and owns
+ * optimization when we know that we're in process context and own
  * everything that's associated with %current.
  *
- * gfpflags_allow_blocking() isn't enough here as direct reclaim may nest
- * inside other socket operations and end up recursing into sk_page_frag()
- * while it's already in use.
+ * Both direct reclaim and page faults can nest inside other
+ * socket operations and end up recursing into sk_page_frag()
+ * while it's already in use: explicitly avoid task page_frag
+ * usage if the caller is potentially doing any of them.
+ * This assumes that page fault handlers use the GFP_NOFS flags.
  *
  * Return: a per task page_frag if context allows that,
  * otherwise a per socket one.
  */
 static inline struct page_frag *sk_page_frag(struct sock *sk)
 {
-       if (gfpflags_normal_context(sk->sk_allocation))
+       if ((sk->sk_allocation & (__GFP_DIRECT_RECLAIM | __GFP_MEMALLOC | __GFP_FS)) ==
+           (__GFP_DIRECT_RECLAIM | __GFP_FS))
                return &current->task_frag;
 
        return &sk->sk_frag;
index 2758d9df71ee9187ea6019855b2cfd9c929c91f1..c2a79aeee113c1de91f82d2590e37e14a6031ee8 100644 (file)
@@ -30,7 +30,7 @@ enum rdma_nl_flags {
  * constant as well and the compiler checks they are the same.
  */
 #define MODULE_ALIAS_RDMA_NETLINK(_index, _val)                                \
-       static inline void __chk_##_index(void)                                \
+       static inline void __maybe_unused __chk_##_index(void)                 \
        {                                                                      \
                BUILD_BUG_ON(_index != _val);                                  \
        }                                                                      \
index eeb1142aa1b1d5a00430ba9ce5f9e3742ae17391..4d1dfa1136b2b55afec14dbebf4ba1cbe65c7d82 100644 (file)
@@ -703,6 +703,8 @@ int ocelot_vcap_filter_add(struct ocelot *ocelot,
                           struct netlink_ext_ack *extack);
 int ocelot_vcap_filter_del(struct ocelot *ocelot,
                           struct ocelot_vcap_filter *rule);
+int ocelot_vcap_filter_replace(struct ocelot *ocelot,
+                              struct ocelot_vcap_filter *filter);
 struct ocelot_vcap_filter *
 ocelot_vcap_block_find_filter_by_id(struct ocelot_vcap_block *block,
                                    unsigned long cookie, bool tc_offload);
index 31f4c4f9aeea072bec17ef6b0f71a4a3186835ec..ac0893df9c76d4025046fbb1d0ce3f02fac322c2 100644 (file)
@@ -147,7 +147,7 @@ struct snd_soc_acpi_link_adr {
  */
 /* Descriptor for SST ASoC machine driver */
 struct snd_soc_acpi_mach {
-       const u8 id[ACPI_ID_LEN];
+       u8 id[ACPI_ID_LEN];
        const struct snd_soc_acpi_codecs *comp_ids;
        const u32 link_mask;
        const struct snd_soc_acpi_link_adr *links;
index 3ba63319af3cd7afc19d12cfeb36550ce0e33ad0..c9048f3e471bb99f309696085227f7a6b9455a91 100644 (file)
@@ -8,7 +8,7 @@
 #undef TRACE_SYSTEM
 #define TRACE_SYSTEM rpcgss
 
-#if !defined(_TRACE_RPCRDMA_H) || defined(TRACE_HEADER_MULTI_READ)
+#if !defined(_TRACE_RPCGSS_H) || defined(TRACE_HEADER_MULTI_READ)
 #define _TRACE_RPCGSS_H
 
 #include <linux/tracepoint.h>
index 41b509f410bf9b7f50f2c2e31542f170ee28f38a..f9c520ce4bf4e4cd3128e298d0d06a415514046e 100644 (file)
@@ -29,7 +29,7 @@
 #define POLLRDHUP       0x2000
 #endif
 
-#define POLLFREE       (__force __poll_t)0x4000        /* currently only for epoll */
+#define POLLFREE       (__force __poll_t)0x4000
 
 #define POLL_BUSY_LOOP (__force __poll_t)0x8000
 
index a13e20cc66b45bf0c31e52d0e327b0ce624c6195..0512fde5e6978a83666bac4d92895d1a41b6ea1e 100644 (file)
@@ -196,6 +196,13 @@ struct drm_virtgpu_context_init {
        __u64 ctx_set_params;
 };
 
+/*
+ * Event code that's given when VIRTGPU_CONTEXT_PARAM_POLL_RINGS_MASK is in
+ * effect.  The event size is sizeof(drm_event), since there is no additional
+ * payload.
+ */
+#define VIRTGPU_EVENT_FENCE_SIGNALED 0x90000000
+
 #define DRM_IOCTL_VIRTGPU_MAP \
        DRM_IOWR(DRM_COMMAND_BASE + DRM_VIRTGPU_MAP, struct drm_virtgpu_map)
 
index 5da4ee234e0b7e677d65b0e887c5ca26d3002dcb..c0c2f3ed57298e2fe843b99c02d0714fa12da1ec 100644 (file)
 #define ETH_P_IFE      0xED3E          /* ForCES inter-FE LFB type */
 #define ETH_P_AF_IUCV   0xFBFB         /* IBM af_iucv [ NOT AN OFFICIALLY REGISTERED ID ] */
 
-#define ETH_P_802_3_MIN        0x0600          /* If the value in the ethernet type is less than this value
+#define ETH_P_802_3_MIN        0x0600          /* If the value in the ethernet type is more than this value
                                         * then the frame is Ethernet II. Else it is 802.3 */
 
 /*
index 74ef57b38f9f51f0d41011914d2dae44c0987256..ac5d6a3031db707a9a61d6acd3c65e719b1097bf 100644 (file)
@@ -66,10 +66,17 @@ struct rlimit64 {
 #define _STK_LIM       (8*1024*1024)
 
 /*
- * GPG2 wants 64kB of mlocked memory, to make sure pass phrases
- * and other sensitive information are never written to disk.
+ * Limit the amount of locked memory by some sane default:
+ * root can always increase this limit if needed.
+ *
+ * The main use-cases are (1) preventing sensitive memory
+ * from being swapped; (2) real-time operations; (3) via
+ * IOURING_REGISTER_BUFFERS.
+ *
+ * The first two don't need much. The latter will take as
+ * much as it can get. 8MB is a reasonably sane default.
  */
-#define MLOCK_LIMIT    ((PAGE_SIZE > 64*1024) ? PAGE_SIZE : 64*1024)
+#define MLOCK_LIMIT    (8*1024*1024)
 
 /*
  * Due to binary compatibility, the actual resource numbers
index b94074c827721b8ebdee18a5199ba3657cfcccfd..b13eb86395e0597861b345417ddaa3427fb21946 100644 (file)
@@ -112,6 +112,7 @@ struct xenbus_driver {
        const char *name;       /* defaults to ids[0].devicetype */
        const struct xenbus_device_id *ids;
        bool allow_rebind; /* avoid setting xenstore closed during remove */
+       bool not_essential;     /* is not mandatory for boot progress */
        int (*probe)(struct xenbus_device *dev,
                     const struct xenbus_device_id *id);
        void (*otherend_changed)(struct xenbus_device *dev,
index 086fd11348e003a812bd12e7ad693cd8bfd3a306..69da9c454f73788910a5a9b4994a67e27b0c260d 100644 (file)
@@ -887,7 +887,7 @@ config CC_HAS_INT128
 
 config CC_IMPLICIT_FALLTHROUGH
        string
-       default "-Wimplicit-fallthrough=5" if CC_IS_GCC
+       default "-Wimplicit-fallthrough=5" if CC_IS_GCC && $(cc-option,-Wimplicit-fallthrough=5)
        default "-Wimplicit-fallthrough" if CC_IS_CLANG && $(cc-option,-Wunreachable-code-fallthrough)
 
 #
index 4942bdd65748e90197c92d5e7063d2b190683585..b3048ebd5c315c3768e376a87019c85bd8b86c0d 100644 (file)
--- a/ipc/shm.c
+++ b/ipc/shm.c
@@ -62,9 +62,18 @@ struct shmid_kernel /* private to the kernel */
        struct pid              *shm_lprid;
        struct ucounts          *mlock_ucounts;
 
-       /* The task created the shm object.  NULL if the task is dead. */
+       /*
+        * The task created the shm object, for
+        * task_lock(shp->shm_creator)
+        */
        struct task_struct      *shm_creator;
-       struct list_head        shm_clist;      /* list by creator */
+
+       /*
+        * List by creator. task_lock(->shm_creator) required for read/write.
+        * If list_empty(), then the creator is dead already.
+        */
+       struct list_head        shm_clist;
+       struct ipc_namespace    *ns;
 } __randomize_layout;
 
 /* shm_mode upper byte flags */
@@ -115,6 +124,7 @@ static void do_shm_rmid(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp)
        struct shmid_kernel *shp;
 
        shp = container_of(ipcp, struct shmid_kernel, shm_perm);
+       WARN_ON(ns != shp->ns);
 
        if (shp->shm_nattch) {
                shp->shm_perm.mode |= SHM_DEST;
@@ -225,10 +235,43 @@ static void shm_rcu_free(struct rcu_head *head)
        kfree(shp);
 }
 
-static inline void shm_rmid(struct ipc_namespace *ns, struct shmid_kernel *s)
+/*
+ * It has to be called with shp locked.
+ * It must be called before ipc_rmid()
+ */
+static inline void shm_clist_rm(struct shmid_kernel *shp)
 {
-       list_del(&s->shm_clist);
-       ipc_rmid(&shm_ids(ns), &s->shm_perm);
+       struct task_struct *creator;
+
+       /* ensure that shm_creator does not disappear */
+       rcu_read_lock();
+
+       /*
+        * A concurrent exit_shm may do a list_del_init() as well.
+        * Just do nothing if exit_shm already did the work
+        */
+       if (!list_empty(&shp->shm_clist)) {
+               /*
+                * shp->shm_creator is guaranteed to be valid *only*
+                * if shp->shm_clist is not empty.
+                */
+               creator = shp->shm_creator;
+
+               task_lock(creator);
+               /*
+                * list_del_init() is a nop if the entry was already removed
+                * from the list.
+                */
+               list_del_init(&shp->shm_clist);
+               task_unlock(creator);
+       }
+       rcu_read_unlock();
+}
+
+static inline void shm_rmid(struct shmid_kernel *s)
+{
+       shm_clist_rm(s);
+       ipc_rmid(&shm_ids(s->ns), &s->shm_perm);
 }
 
 
@@ -283,7 +326,7 @@ static void shm_destroy(struct ipc_namespace *ns, struct shmid_kernel *shp)
        shm_file = shp->shm_file;
        shp->shm_file = NULL;
        ns->shm_tot -= (shp->shm_segsz + PAGE_SIZE - 1) >> PAGE_SHIFT;
-       shm_rmid(ns, shp);
+       shm_rmid(shp);
        shm_unlock(shp);
        if (!is_file_hugepages(shm_file))
                shmem_lock(shm_file, 0, shp->mlock_ucounts);
@@ -303,10 +346,10 @@ static void shm_destroy(struct ipc_namespace *ns, struct shmid_kernel *shp)
  *
  * 2) sysctl kernel.shm_rmid_forced is set to 1.
  */
-static bool shm_may_destroy(struct ipc_namespace *ns, struct shmid_kernel *shp)
+static bool shm_may_destroy(struct shmid_kernel *shp)
 {
        return (shp->shm_nattch == 0) &&
-              (ns->shm_rmid_forced ||
+              (shp->ns->shm_rmid_forced ||
                (shp->shm_perm.mode & SHM_DEST));
 }
 
@@ -337,7 +380,7 @@ static void shm_close(struct vm_area_struct *vma)
        ipc_update_pid(&shp->shm_lprid, task_tgid(current));
        shp->shm_dtim = ktime_get_real_seconds();
        shp->shm_nattch--;
-       if (shm_may_destroy(ns, shp))
+       if (shm_may_destroy(shp))
                shm_destroy(ns, shp);
        else
                shm_unlock(shp);
@@ -358,10 +401,10 @@ static int shm_try_destroy_orphaned(int id, void *p, void *data)
         *
         * As shp->* are changed under rwsem, it's safe to skip shp locking.
         */
-       if (shp->shm_creator != NULL)
+       if (!list_empty(&shp->shm_clist))
                return 0;
 
-       if (shm_may_destroy(ns, shp)) {
+       if (shm_may_destroy(shp)) {
                shm_lock_by_ptr(shp);
                shm_destroy(ns, shp);
        }
@@ -379,48 +422,97 @@ void shm_destroy_orphaned(struct ipc_namespace *ns)
 /* Locking assumes this will only be called with task == current */
 void exit_shm(struct task_struct *task)
 {
-       struct ipc_namespace *ns = task->nsproxy->ipc_ns;
-       struct shmid_kernel *shp, *n;
+       for (;;) {
+               struct shmid_kernel *shp;
+               struct ipc_namespace *ns;
 
-       if (list_empty(&task->sysvshm.shm_clist))
-               return;
+               task_lock(task);
+
+               if (list_empty(&task->sysvshm.shm_clist)) {
+                       task_unlock(task);
+                       break;
+               }
+
+               shp = list_first_entry(&task->sysvshm.shm_clist, struct shmid_kernel,
+                               shm_clist);
 
-       /*
-        * If kernel.shm_rmid_forced is not set then only keep track of
-        * which shmids are orphaned, so that a later set of the sysctl
-        * can clean them up.
-        */
-       if (!ns->shm_rmid_forced) {
-               down_read(&shm_ids(ns).rwsem);
-               list_for_each_entry(shp, &task->sysvshm.shm_clist, shm_clist)
-                       shp->shm_creator = NULL;
                /*
-                * Only under read lock but we are only called on current
-                * so no entry on the list will be shared.
+                * 1) Get pointer to the ipc namespace. It is worth to say
+                * that this pointer is guaranteed to be valid because
+                * shp lifetime is always shorter than namespace lifetime
+                * in which shp lives.
+                * We taken task_lock it means that shp won't be freed.
                 */
-               list_del(&task->sysvshm.shm_clist);
-               up_read(&shm_ids(ns).rwsem);
-               return;
-       }
+               ns = shp->ns;
 
-       /*
-        * Destroy all already created segments, that were not yet mapped,
-        * and mark any mapped as orphan to cover the sysctl toggling.
-        * Destroy is skipped if shm_may_destroy() returns false.
-        */
-       down_write(&shm_ids(ns).rwsem);
-       list_for_each_entry_safe(shp, n, &task->sysvshm.shm_clist, shm_clist) {
-               shp->shm_creator = NULL;
+               /*
+                * 2) If kernel.shm_rmid_forced is not set then only keep track of
+                * which shmids are orphaned, so that a later set of the sysctl
+                * can clean them up.
+                */
+               if (!ns->shm_rmid_forced)
+                       goto unlink_continue;
 
-               if (shm_may_destroy(ns, shp)) {
-                       shm_lock_by_ptr(shp);
-                       shm_destroy(ns, shp);
+               /*
+                * 3) get a reference to the namespace.
+                *    The refcount could be already 0. If it is 0, then
+                *    the shm objects will be free by free_ipc_work().
+                */
+               ns = get_ipc_ns_not_zero(ns);
+               if (!ns) {
+unlink_continue:
+                       list_del_init(&shp->shm_clist);
+                       task_unlock(task);
+                       continue;
                }
-       }
 
-       /* Remove the list head from any segments still attached. */
-       list_del(&task->sysvshm.shm_clist);
-       up_write(&shm_ids(ns).rwsem);
+               /*
+                * 4) get a reference to shp.
+                *   This cannot fail: shm_clist_rm() is called before
+                *   ipc_rmid(), thus the refcount cannot be 0.
+                */
+               WARN_ON(!ipc_rcu_getref(&shp->shm_perm));
+
+               /*
+                * 5) unlink the shm segment from the list of segments
+                *    created by current.
+                *    This must be done last. After unlinking,
+                *    only the refcounts obtained above prevent IPC_RMID
+                *    from destroying the segment or the namespace.
+                */
+               list_del_init(&shp->shm_clist);
+
+               task_unlock(task);
+
+               /*
+                * 6) we have all references
+                *    Thus lock & if needed destroy shp.
+                */
+               down_write(&shm_ids(ns).rwsem);
+               shm_lock_by_ptr(shp);
+               /*
+                * rcu_read_lock was implicitly taken in shm_lock_by_ptr, it's
+                * safe to call ipc_rcu_putref here
+                */
+               ipc_rcu_putref(&shp->shm_perm, shm_rcu_free);
+
+               if (ipc_valid_object(&shp->shm_perm)) {
+                       if (shm_may_destroy(shp))
+                               shm_destroy(ns, shp);
+                       else
+                               shm_unlock(shp);
+               } else {
+                       /*
+                        * Someone else deleted the shp from namespace
+                        * idr/kht while we have waited.
+                        * Just unlock and continue.
+                        */
+                       shm_unlock(shp);
+               }
+
+               up_write(&shm_ids(ns).rwsem);
+               put_ipc_ns(ns); /* paired with get_ipc_ns_not_zero */
+       }
 }
 
 static vm_fault_t shm_fault(struct vm_fault *vmf)
@@ -676,7 +768,11 @@ static int newseg(struct ipc_namespace *ns, struct ipc_params *params)
        if (error < 0)
                goto no_id;
 
+       shp->ns = ns;
+
+       task_lock(current);
        list_add(&shp->shm_clist, &current->sysvshm.shm_clist);
+       task_unlock(current);
 
        /*
         * shmid gets reported as "inode#" in /proc/pid/maps.
@@ -1567,7 +1663,8 @@ out_nattch:
        down_write(&shm_ids(ns).rwsem);
        shp = shm_lock(ns, shmid);
        shp->shm_nattch--;
-       if (shm_may_destroy(ns, shp))
+
+       if (shm_may_destroy(shp))
                shm_destroy(ns, shp);
        else
                shm_unlock(shp);
index d48d8cfa1f3fa3d37dd190d38211f69b5c00e4eb..fa2d86ef3fb80b1a828043190a69f8c935715354 100644 (file)
@@ -447,8 +447,8 @@ static int ipcget_public(struct ipc_namespace *ns, struct ipc_ids *ids,
 static void ipc_kht_remove(struct ipc_ids *ids, struct kern_ipc_perm *ipcp)
 {
        if (ipcp->key != IPC_PRIVATE)
-               rhashtable_remove_fast(&ids->key_ht, &ipcp->khtnode,
-                                      ipc_kht_params);
+               WARN_ON_ONCE(rhashtable_remove_fast(&ids->key_ht, &ipcp->khtnode,
+                                      ipc_kht_params));
 }
 
 /**
@@ -498,7 +498,7 @@ void ipc_rmid(struct ipc_ids *ids, struct kern_ipc_perm *ipcp)
 {
        int idx = ipcid_to_idx(ipcp->id);
 
-       idr_remove(&ids->ipcs_idr, idx);
+       WARN_ON_ONCE(idr_remove(&ids->ipcs_idr, idx) != ipcp);
        ipc_kht_remove(ids, ipcp);
        ids->in_use--;
        ipcp->deleted = true;
index dbc3ad07e21b66f19fe4308b0e07be89db7e443f..9bdb03767db5701fad75a00831dba9ebb947cfb6 100644 (file)
@@ -6346,11 +6346,6 @@ BTF_ID_LIST_GLOBAL_SINGLE(btf_task_struct_ids, struct, task_struct)
 
 /* BTF ID set registration API for modules */
 
-struct kfunc_btf_id_list {
-       struct list_head list;
-       struct mutex mutex;
-};
-
 #ifdef CONFIG_DEBUG_INFO_BTF_MODULES
 
 void register_kfunc_btf_id_set(struct kfunc_btf_id_list *l,
@@ -6376,8 +6371,6 @@ bool bpf_check_mod_kfunc_call(struct kfunc_btf_id_list *klist, u32 kfunc_id,
 {
        struct kfunc_btf_id_set *s;
 
-       if (!owner)
-               return false;
        mutex_lock(&klist->mutex);
        list_for_each_entry(s, &klist->list, list) {
                if (s->owner == owner && btf_id_set_contains(s->set, kfunc_id)) {
@@ -6389,8 +6382,6 @@ bool bpf_check_mod_kfunc_call(struct kfunc_btf_id_list *klist, u32 kfunc_id,
        return false;
 }
 
-#endif
-
 #define DEFINE_KFUNC_BTF_ID_LIST(name)                                         \
        struct kfunc_btf_id_list name = { LIST_HEAD_INIT(name.list),           \
                                          __MUTEX_INITIALIZER(name.mutex) };   \
@@ -6398,3 +6389,5 @@ bool bpf_check_mod_kfunc_call(struct kfunc_btf_id_list *klist, u32 kfunc_id,
 
 DEFINE_KFUNC_BTF_ID_LIST(bpf_tcp_ca_kfunc_list);
 DEFINE_KFUNC_BTF_ID_LIST(prog_test_kfunc_list);
+
+#endif
index 2ca643af9a548000fddce831f06457225e918b23..43eb3501721b7bac1d34dbbcfe4d40d2ec435830 100644 (file)
@@ -1809,6 +1809,8 @@ sysctl_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
                return &bpf_sysctl_get_new_value_proto;
        case BPF_FUNC_sysctl_set_new_value:
                return &bpf_sysctl_set_new_value_proto;
+       case BPF_FUNC_ktime_get_coarse_ns:
+               return &bpf_ktime_get_coarse_ns_proto;
        default:
                return cgroup_base_func_proto(func_id, prog);
        }
index 1ffd469c217fad50b302b56505060c6ca869851b..649f07623df6c00c1b2546c15b95578948a31f86 100644 (file)
@@ -1364,8 +1364,6 @@ bpf_base_func_proto(enum bpf_func_id func_id)
                return &bpf_ktime_get_ns_proto;
        case BPF_FUNC_ktime_get_boot_ns:
                return &bpf_ktime_get_boot_ns_proto;
-       case BPF_FUNC_ktime_get_coarse_ns:
-               return &bpf_ktime_get_coarse_ns_proto;
        case BPF_FUNC_ringbuf_output:
                return &bpf_ringbuf_output_proto;
        case BPF_FUNC_ringbuf_reserve:
index 50f96ea4452a2ead6965b02a4885628119f0b917..1033ee8c0caf019e002d86bf58681c0f259b1b93 100644 (file)
@@ -132,6 +132,21 @@ static struct bpf_map *find_and_alloc_map(union bpf_attr *attr)
        return map;
 }
 
+static void bpf_map_write_active_inc(struct bpf_map *map)
+{
+       atomic64_inc(&map->writecnt);
+}
+
+static void bpf_map_write_active_dec(struct bpf_map *map)
+{
+       atomic64_dec(&map->writecnt);
+}
+
+bool bpf_map_write_active(const struct bpf_map *map)
+{
+       return atomic64_read(&map->writecnt) != 0;
+}
+
 static u32 bpf_map_value_size(const struct bpf_map *map)
 {
        if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
@@ -601,11 +616,8 @@ static void bpf_map_mmap_open(struct vm_area_struct *vma)
 {
        struct bpf_map *map = vma->vm_file->private_data;
 
-       if (vma->vm_flags & VM_MAYWRITE) {
-               mutex_lock(&map->freeze_mutex);
-               map->writecnt++;
-               mutex_unlock(&map->freeze_mutex);
-       }
+       if (vma->vm_flags & VM_MAYWRITE)
+               bpf_map_write_active_inc(map);
 }
 
 /* called for all unmapped memory region (including initial) */
@@ -613,11 +625,8 @@ static void bpf_map_mmap_close(struct vm_area_struct *vma)
 {
        struct bpf_map *map = vma->vm_file->private_data;
 
-       if (vma->vm_flags & VM_MAYWRITE) {
-               mutex_lock(&map->freeze_mutex);
-               map->writecnt--;
-               mutex_unlock(&map->freeze_mutex);
-       }
+       if (vma->vm_flags & VM_MAYWRITE)
+               bpf_map_write_active_dec(map);
 }
 
 static const struct vm_operations_struct bpf_map_default_vmops = {
@@ -668,7 +677,7 @@ static int bpf_map_mmap(struct file *filp, struct vm_area_struct *vma)
                goto out;
 
        if (vma->vm_flags & VM_MAYWRITE)
-               map->writecnt++;
+               bpf_map_write_active_inc(map);
 out:
        mutex_unlock(&map->freeze_mutex);
        return err;
@@ -1139,6 +1148,7 @@ static int map_update_elem(union bpf_attr *attr, bpfptr_t uattr)
        map = __bpf_map_get(f);
        if (IS_ERR(map))
                return PTR_ERR(map);
+       bpf_map_write_active_inc(map);
        if (!(map_get_sys_perms(map, f) & FMODE_CAN_WRITE)) {
                err = -EPERM;
                goto err_put;
@@ -1174,6 +1184,7 @@ free_value:
 free_key:
        kvfree(key);
 err_put:
+       bpf_map_write_active_dec(map);
        fdput(f);
        return err;
 }
@@ -1196,6 +1207,7 @@ static int map_delete_elem(union bpf_attr *attr)
        map = __bpf_map_get(f);
        if (IS_ERR(map))
                return PTR_ERR(map);
+       bpf_map_write_active_inc(map);
        if (!(map_get_sys_perms(map, f) & FMODE_CAN_WRITE)) {
                err = -EPERM;
                goto err_put;
@@ -1226,6 +1238,7 @@ static int map_delete_elem(union bpf_attr *attr)
 out:
        kvfree(key);
 err_put:
+       bpf_map_write_active_dec(map);
        fdput(f);
        return err;
 }
@@ -1533,6 +1546,7 @@ static int map_lookup_and_delete_elem(union bpf_attr *attr)
        map = __bpf_map_get(f);
        if (IS_ERR(map))
                return PTR_ERR(map);
+       bpf_map_write_active_inc(map);
        if (!(map_get_sys_perms(map, f) & FMODE_CAN_READ) ||
            !(map_get_sys_perms(map, f) & FMODE_CAN_WRITE)) {
                err = -EPERM;
@@ -1597,6 +1611,7 @@ free_value:
 free_key:
        kvfree(key);
 err_put:
+       bpf_map_write_active_dec(map);
        fdput(f);
        return err;
 }
@@ -1624,8 +1639,7 @@ static int map_freeze(const union bpf_attr *attr)
        }
 
        mutex_lock(&map->freeze_mutex);
-
-       if (map->writecnt) {
+       if (bpf_map_write_active(map)) {
                err = -EBUSY;
                goto err_put;
        }
@@ -4171,6 +4185,9 @@ static int bpf_map_do_batch(const union bpf_attr *attr,
                            union bpf_attr __user *uattr,
                            int cmd)
 {
+       bool has_read  = cmd == BPF_MAP_LOOKUP_BATCH ||
+                        cmd == BPF_MAP_LOOKUP_AND_DELETE_BATCH;
+       bool has_write = cmd != BPF_MAP_LOOKUP_BATCH;
        struct bpf_map *map;
        int err, ufd;
        struct fd f;
@@ -4183,16 +4200,13 @@ static int bpf_map_do_batch(const union bpf_attr *attr,
        map = __bpf_map_get(f);
        if (IS_ERR(map))
                return PTR_ERR(map);
-
-       if ((cmd == BPF_MAP_LOOKUP_BATCH ||
-            cmd == BPF_MAP_LOOKUP_AND_DELETE_BATCH) &&
-           !(map_get_sys_perms(map, f) & FMODE_CAN_READ)) {
+       if (has_write)
+               bpf_map_write_active_inc(map);
+       if (has_read && !(map_get_sys_perms(map, f) & FMODE_CAN_READ)) {
                err = -EPERM;
                goto err_put;
        }
-
-       if (cmd != BPF_MAP_LOOKUP_BATCH &&
-           !(map_get_sys_perms(map, f) & FMODE_CAN_WRITE)) {
+       if (has_write && !(map_get_sys_perms(map, f) & FMODE_CAN_WRITE)) {
                err = -EPERM;
                goto err_put;
        }
@@ -4205,8 +4219,9 @@ static int bpf_map_do_batch(const union bpf_attr *attr,
                BPF_DO_BATCH(map->ops->map_update_batch);
        else
                BPF_DO_BATCH(map->ops->map_delete_batch);
-
 err_put:
+       if (has_write)
+               bpf_map_write_active_dec(map);
        fdput(f);
        return err;
 }
index 890b3ec375a394a862799cc5b07718d571d19a4d..f3001937bbb931bba0990ad664847a07d25abc6b 100644 (file)
@@ -1151,7 +1151,8 @@ static void mark_ptr_not_null_reg(struct bpf_reg_state *reg)
                        /* transfer reg's id which is unique for every map_lookup_elem
                         * as UID of the inner map.
                         */
-                       reg->map_uid = reg->id;
+                       if (map_value_has_timer(map->inner_map_meta))
+                               reg->map_uid = reg->id;
                } else if (map->map_type == BPF_MAP_TYPE_XSKMAP) {
                        reg->type = PTR_TO_XDP_SOCK;
                } else if (map->map_type == BPF_MAP_TYPE_SOCKMAP ||
@@ -4055,7 +4056,22 @@ static void coerce_reg_to_size(struct bpf_reg_state *reg, int size)
 
 static bool bpf_map_is_rdonly(const struct bpf_map *map)
 {
-       return (map->map_flags & BPF_F_RDONLY_PROG) && map->frozen;
+       /* A map is considered read-only if the following condition are true:
+        *
+        * 1) BPF program side cannot change any of the map content. The
+        *    BPF_F_RDONLY_PROG flag is throughout the lifetime of a map
+        *    and was set at map creation time.
+        * 2) The map value(s) have been initialized from user space by a
+        *    loader and then "frozen", such that no new map update/delete
+        *    operations from syscall side are possible for the rest of
+        *    the map's lifetime from that point onwards.
+        * 3) Any parallel/pending map update/delete operations from syscall
+        *    side have been completed. Only after that point, it's safe to
+        *    assume that map value(s) are immutable.
+        */
+       return (map->map_flags & BPF_F_RDONLY_PROG) &&
+              READ_ONCE(map->frozen) &&
+              !bpf_map_write_active(map);
 }
 
 static int bpf_map_direct_read(struct bpf_map *map, int off, int size, u64 *val)
@@ -8406,7 +8422,7 @@ static void find_good_pkt_pointers(struct bpf_verifier_state *vstate,
 
        new_range = dst_reg->off;
        if (range_right_open)
-               new_range--;
+               new_range++;
 
        /* Examples for register markings:
         *
@@ -11631,6 +11647,13 @@ static int check_map_prog_compatibility(struct bpf_verifier_env *env,
                }
        }
 
+       if (map_value_has_timer(map)) {
+               if (is_tracing_prog_type(prog_type)) {
+                       verbose(env, "tracing progs cannot use bpf_timer yet\n");
+                       return -EINVAL;
+               }
+       }
+
        if ((bpf_prog_is_dev_bound(prog->aux) || bpf_map_is_dev_bound(map)) &&
            !bpf_offload_prog_map_match(prog, map)) {
                verbose(env, "offload device mismatch between prog and map\n");
index 192e43a874076d80c8bfa30df88ad94654ead5e6..407a2568f35ebb77424320b10287dd499c9572b9 100644 (file)
@@ -31,6 +31,7 @@
 #include <linux/smpboot.h>
 #include <linux/relay.h>
 #include <linux/slab.h>
+#include <linux/scs.h>
 #include <linux/percpu-rwsem.h>
 #include <linux/cpuset.h>
 
@@ -587,6 +588,12 @@ static int bringup_cpu(unsigned int cpu)
        struct task_struct *idle = idle_thread_get(cpu);
        int ret;
 
+       /*
+        * Reset stale stack state from the last time this CPU was online.
+        */
+       scs_task_reset(idle);
+       kasan_unpoison_task_stack(idle);
+
        /*
         * Some architectures have to walk the irq descriptors to
         * setup the vector space for the cpu which comes online.
index 4508201847d22ec7b7f7749693fafc1b8453ba5b..0b6379adff6bdd4327ace5ae7029cfd24369e825 100644 (file)
@@ -48,7 +48,7 @@ bool syscall_user_dispatch(struct pt_regs *regs)
                 * the selector is loaded by userspace.
                 */
                if (unlikely(__get_user(state, sd->selector))) {
-                       force_fatal_sig(SIGSEGV);
+                       force_exit_sig(SIGSEGV);
                        return true;
                }
 
@@ -56,7 +56,7 @@ bool syscall_user_dispatch(struct pt_regs *regs)
                        return false;
 
                if (state != SYSCALL_DISPATCH_FILTER_BLOCK) {
-                       force_fatal_sig(SIGSYS);
+                       force_exit_sig(SIGSYS);
                        return true;
                }
        }
index 523106a506eed0a93afc92aea8ff8a9bc7ca4209..30d94f68c5bdbcd20694d511ebeb1a5ddeac45ab 100644 (file)
@@ -9759,6 +9759,9 @@ void perf_tp_event(u16 event_type, u64 count, void *record, int entry_size,
                                continue;
                        if (event->attr.config != entry->type)
                                continue;
+                       /* Cannot deliver synchronous signal to other task. */
+                       if (event->attr.sigtrap)
+                               continue;
                        if (perf_tp_event_match(event, &data, regs))
                                perf_swevent_event(event, count, &data, regs);
                }
index e9db0c810554e2c6b209b15de9b04d504e4080d1..21eccc961bba31cad928b2bf6e9bfc325191e64e 100644 (file)
@@ -2086,6 +2086,9 @@ int register_kretprobe(struct kretprobe *rp)
                }
        }
 
+       if (rp->data_size > KRETPROBE_MAX_DATA_SIZE)
+               return -E2BIG;
+
        rp->kp.pre_handler = pre_handler_kretprobe;
        rp->kp.post_handler = NULL;
 
index c51387a4326574a276d04dc76d92e6b284794c42..04a74d040a6d3af40dcd4932cfd9407b3bb4c1df 100644 (file)
  * atomic_long_cmpxchg() will be used to obtain writer lock.
  *
  * There are three places where the lock handoff bit may be set or cleared.
- * 1) rwsem_mark_wake() for readers.
- * 2) rwsem_try_write_lock() for writers.
- * 3) Error path of rwsem_down_write_slowpath().
+ * 1) rwsem_mark_wake() for readers            -- set, clear
+ * 2) rwsem_try_write_lock() for writers       -- set, clear
+ * 3) rwsem_del_waiter()                       -- clear
  *
  * For all the above cases, wait_lock will be held. A writer must also
  * be the first one in the wait_list to be eligible for setting the handoff
@@ -334,6 +334,9 @@ struct rwsem_waiter {
        struct task_struct *task;
        enum rwsem_waiter_type type;
        unsigned long timeout;
+
+       /* Writer only, not initialized in reader */
+       bool handoff_set;
 };
 #define rwsem_first_waiter(sem) \
        list_first_entry(&sem->wait_list, struct rwsem_waiter, list)
@@ -344,12 +347,6 @@ enum rwsem_wake_type {
        RWSEM_WAKE_READ_OWNED   /* Waker thread holds the read lock */
 };
 
-enum writer_wait_state {
-       WRITER_NOT_FIRST,       /* Writer is not first in wait list */
-       WRITER_FIRST,           /* Writer is first in wait list     */
-       WRITER_HANDOFF          /* Writer is first & handoff needed */
-};
-
 /*
  * The typical HZ value is either 250 or 1000. So set the minimum waiting
  * time to at least 4ms or 1 jiffy (if it is higher than 4ms) in the wait
@@ -365,6 +362,31 @@ enum writer_wait_state {
  */
 #define MAX_READERS_WAKEUP     0x100
 
+static inline void
+rwsem_add_waiter(struct rw_semaphore *sem, struct rwsem_waiter *waiter)
+{
+       lockdep_assert_held(&sem->wait_lock);
+       list_add_tail(&waiter->list, &sem->wait_list);
+       /* caller will set RWSEM_FLAG_WAITERS */
+}
+
+/*
+ * Remove a waiter from the wait_list and clear flags.
+ *
+ * Both rwsem_mark_wake() and rwsem_try_write_lock() contain a full 'copy' of
+ * this function. Modify with care.
+ */
+static inline void
+rwsem_del_waiter(struct rw_semaphore *sem, struct rwsem_waiter *waiter)
+{
+       lockdep_assert_held(&sem->wait_lock);
+       list_del(&waiter->list);
+       if (likely(!list_empty(&sem->wait_list)))
+               return;
+
+       atomic_long_andnot(RWSEM_FLAG_HANDOFF | RWSEM_FLAG_WAITERS, &sem->count);
+}
+
 /*
  * handle the lock release when processes blocked on it that can now run
  * - if we come here from up_xxxx(), then the RWSEM_FLAG_WAITERS bit must
@@ -376,6 +398,8 @@ enum writer_wait_state {
  *   preferably when the wait_lock is released
  * - woken process blocks are discarded from the list after having task zeroed
  * - writers are only marked woken if downgrading is false
+ *
+ * Implies rwsem_del_waiter() for all woken readers.
  */
 static void rwsem_mark_wake(struct rw_semaphore *sem,
                            enum rwsem_wake_type wake_type,
@@ -490,18 +514,25 @@ static void rwsem_mark_wake(struct rw_semaphore *sem,
 
        adjustment = woken * RWSEM_READER_BIAS - adjustment;
        lockevent_cond_inc(rwsem_wake_reader, woken);
+
+       oldcount = atomic_long_read(&sem->count);
        if (list_empty(&sem->wait_list)) {
-               /* hit end of list above */
+               /*
+                * Combined with list_move_tail() above, this implies
+                * rwsem_del_waiter().
+                */
                adjustment -= RWSEM_FLAG_WAITERS;
+               if (oldcount & RWSEM_FLAG_HANDOFF)
+                       adjustment -= RWSEM_FLAG_HANDOFF;
+       } else if (woken) {
+               /*
+                * When we've woken a reader, we no longer need to force
+                * writers to give up the lock and we can clear HANDOFF.
+                */
+               if (oldcount & RWSEM_FLAG_HANDOFF)
+                       adjustment -= RWSEM_FLAG_HANDOFF;
        }
 
-       /*
-        * When we've woken a reader, we no longer need to force writers
-        * to give up the lock and we can clear HANDOFF.
-        */
-       if (woken && (atomic_long_read(&sem->count) & RWSEM_FLAG_HANDOFF))
-               adjustment -= RWSEM_FLAG_HANDOFF;
-
        if (adjustment)
                atomic_long_add(adjustment, &sem->count);
 
@@ -532,12 +563,12 @@ static void rwsem_mark_wake(struct rw_semaphore *sem,
  * race conditions between checking the rwsem wait list and setting the
  * sem->count accordingly.
  *
- * If wstate is WRITER_HANDOFF, it will make sure that either the handoff
- * bit is set or the lock is acquired with handoff bit cleared.
+ * Implies rwsem_del_waiter() on success.
  */
 static inline bool rwsem_try_write_lock(struct rw_semaphore *sem,
-                                       enum writer_wait_state wstate)
+                                       struct rwsem_waiter *waiter)
 {
+       bool first = rwsem_first_waiter(sem) == waiter;
        long count, new;
 
        lockdep_assert_held(&sem->wait_lock);
@@ -546,13 +577,19 @@ static inline bool rwsem_try_write_lock(struct rw_semaphore *sem,
        do {
                bool has_handoff = !!(count & RWSEM_FLAG_HANDOFF);
 
-               if (has_handoff && wstate == WRITER_NOT_FIRST)
-                       return false;
+               if (has_handoff) {
+                       if (!first)
+                               return false;
+
+                       /* First waiter inherits a previously set handoff bit */
+                       waiter->handoff_set = true;
+               }
 
                new = count;
 
                if (count & RWSEM_LOCK_MASK) {
-                       if (has_handoff || (wstate != WRITER_HANDOFF))
+                       if (has_handoff || (!rt_task(waiter->task) &&
+                                           !time_after(jiffies, waiter->timeout)))
                                return false;
 
                        new |= RWSEM_FLAG_HANDOFF;
@@ -569,9 +606,17 @@ static inline bool rwsem_try_write_lock(struct rw_semaphore *sem,
         * We have either acquired the lock with handoff bit cleared or
         * set the handoff bit.
         */
-       if (new & RWSEM_FLAG_HANDOFF)
+       if (new & RWSEM_FLAG_HANDOFF) {
+               waiter->handoff_set = true;
+               lockevent_inc(rwsem_wlock_handoff);
                return false;
+       }
 
+       /*
+        * Have rwsem_try_write_lock() fully imply rwsem_del_waiter() on
+        * success.
+        */
+       list_del(&waiter->list);
        rwsem_set_owner(sem);
        return true;
 }
@@ -956,7 +1001,7 @@ queue:
                }
                adjustment += RWSEM_FLAG_WAITERS;
        }
-       list_add_tail(&waiter.list, &sem->wait_list);
+       rwsem_add_waiter(sem, &waiter);
 
        /* we're now waiting on the lock, but no longer actively locking */
        count = atomic_long_add_return(adjustment, &sem->count);
@@ -1002,11 +1047,7 @@ queue:
        return sem;
 
 out_nolock:
-       list_del(&waiter.list);
-       if (list_empty(&sem->wait_list)) {
-               atomic_long_andnot(RWSEM_FLAG_WAITERS|RWSEM_FLAG_HANDOFF,
-                                  &sem->count);
-       }
+       rwsem_del_waiter(sem, &waiter);
        raw_spin_unlock_irq(&sem->wait_lock);
        __set_current_state(TASK_RUNNING);
        lockevent_inc(rwsem_rlock_fail);
@@ -1020,9 +1061,7 @@ static struct rw_semaphore *
 rwsem_down_write_slowpath(struct rw_semaphore *sem, int state)
 {
        long count;
-       enum writer_wait_state wstate;
        struct rwsem_waiter waiter;
-       struct rw_semaphore *ret = sem;
        DEFINE_WAKE_Q(wake_q);
 
        /* do optimistic spinning and steal lock if possible */
@@ -1038,16 +1077,13 @@ rwsem_down_write_slowpath(struct rw_semaphore *sem, int state)
        waiter.task = current;
        waiter.type = RWSEM_WAITING_FOR_WRITE;
        waiter.timeout = jiffies + RWSEM_WAIT_TIMEOUT;
+       waiter.handoff_set = false;
 
        raw_spin_lock_irq(&sem->wait_lock);
-
-       /* account for this before adding a new element to the list */
-       wstate = list_empty(&sem->wait_list) ? WRITER_FIRST : WRITER_NOT_FIRST;
-
-       list_add_tail(&waiter.list, &sem->wait_list);
+       rwsem_add_waiter(sem, &waiter);
 
        /* we're now waiting on the lock */
-       if (wstate == WRITER_NOT_FIRST) {
+       if (rwsem_first_waiter(sem) != &waiter) {
                count = atomic_long_read(&sem->count);
 
                /*
@@ -1083,13 +1119,16 @@ wait:
        /* wait until we successfully acquire the lock */
        set_current_state(state);
        for (;;) {
-               if (rwsem_try_write_lock(sem, wstate)) {
+               if (rwsem_try_write_lock(sem, &waiter)) {
                        /* rwsem_try_write_lock() implies ACQUIRE on success */
                        break;
                }
 
                raw_spin_unlock_irq(&sem->wait_lock);
 
+               if (signal_pending_state(state, current))
+                       goto out_nolock;
+
                /*
                 * After setting the handoff bit and failing to acquire
                 * the lock, attempt to spin on owner to accelerate lock
@@ -1098,7 +1137,7 @@ wait:
                 * In this case, we attempt to acquire the lock again
                 * without sleeping.
                 */
-               if (wstate == WRITER_HANDOFF) {
+               if (waiter.handoff_set) {
                        enum owner_state owner_state;
 
                        preempt_disable();
@@ -1109,66 +1148,26 @@ wait:
                                goto trylock_again;
                }
 
-               /* Block until there are no active lockers. */
-               for (;;) {
-                       if (signal_pending_state(state, current))
-                               goto out_nolock;
-
-                       schedule();
-                       lockevent_inc(rwsem_sleep_writer);
-                       set_current_state(state);
-                       /*
-                        * If HANDOFF bit is set, unconditionally do
-                        * a trylock.
-                        */
-                       if (wstate == WRITER_HANDOFF)
-                               break;
-
-                       if ((wstate == WRITER_NOT_FIRST) &&
-                           (rwsem_first_waiter(sem) == &waiter))
-                               wstate = WRITER_FIRST;
-
-                       count = atomic_long_read(&sem->count);
-                       if (!(count & RWSEM_LOCK_MASK))
-                               break;
-
-                       /*
-                        * The setting of the handoff bit is deferred
-                        * until rwsem_try_write_lock() is called.
-                        */
-                       if ((wstate == WRITER_FIRST) && (rt_task(current) ||
-                           time_after(jiffies, waiter.timeout))) {
-                               wstate = WRITER_HANDOFF;
-                               lockevent_inc(rwsem_wlock_handoff);
-                               break;
-                       }
-               }
+               schedule();
+               lockevent_inc(rwsem_sleep_writer);
+               set_current_state(state);
 trylock_again:
                raw_spin_lock_irq(&sem->wait_lock);
        }
        __set_current_state(TASK_RUNNING);
-       list_del(&waiter.list);
        raw_spin_unlock_irq(&sem->wait_lock);
        lockevent_inc(rwsem_wlock);
-
-       return ret;
+       return sem;
 
 out_nolock:
        __set_current_state(TASK_RUNNING);
        raw_spin_lock_irq(&sem->wait_lock);
-       list_del(&waiter.list);
-
-       if (unlikely(wstate == WRITER_HANDOFF))
-               atomic_long_add(-RWSEM_FLAG_HANDOFF,  &sem->count);
-
-       if (list_empty(&sem->wait_list))
-               atomic_long_andnot(RWSEM_FLAG_WAITERS, &sem->count);
-       else
+       rwsem_del_waiter(sem, &waiter);
+       if (!list_empty(&sem->wait_list))
                rwsem_mark_wake(sem, RWSEM_WAKE_ANY, &wake_q);
        raw_spin_unlock_irq(&sem->wait_lock);
        wake_up_q(&wake_q);
        lockevent_inc(rwsem_wlock_fail);
-
        return ERR_PTR(-EINTR);
 }
 
@@ -1249,17 +1248,14 @@ static inline int __down_read_trylock(struct rw_semaphore *sem)
 
        DEBUG_RWSEMS_WARN_ON(sem->magic != sem, sem);
 
-       /*
-        * Optimize for the case when the rwsem is not locked at all.
-        */
-       tmp = RWSEM_UNLOCKED_VALUE;
-       do {
+       tmp = atomic_long_read(&sem->count);
+       while (!(tmp & RWSEM_READ_FAILED_MASK)) {
                if (atomic_long_try_cmpxchg_acquire(&sem->count, &tmp,
-                                       tmp + RWSEM_READER_BIAS)) {
+                                                   tmp + RWSEM_READER_BIAS)) {
                        rwsem_set_reader_owned(sem);
                        return 1;
                }
-       } while (!(tmp & RWSEM_READ_FAILED_MASK));
+       }
        return 0;
 }
 
index 9ed9b744876c123d9bb386e00f93ff7ac0733b50..e6af502c2fd77410093f92713aa4b1f3704bed9d 100644 (file)
@@ -693,7 +693,7 @@ static int load_image_and_restore(void)
                goto Unlock;
 
        error = swsusp_read(&flags);
-       swsusp_close(FMODE_READ);
+       swsusp_close(FMODE_READ | FMODE_EXCL);
        if (!error)
                error = hibernation_restore(flags & SF_PLATFORM_MODE);
 
@@ -983,7 +983,7 @@ static int software_resume(void)
        /* The snapshot device should not be opened while we're running */
        if (!hibernate_acquire()) {
                error = -EBUSY;
-               swsusp_close(FMODE_READ);
+               swsusp_close(FMODE_READ | FMODE_EXCL);
                goto Unlock;
        }
 
@@ -1018,7 +1018,7 @@ static int software_resume(void)
        pm_pr_dbg("Hibernation image not present or could not be loaded.\n");
        return error;
  Close_Finish:
-       swsusp_close(FMODE_READ);
+       swsusp_close(FMODE_READ | FMODE_EXCL);
        goto Finish;
 }
 
index 740723bb388524434604cd4462bb7f0034d08a17..ad241b4ff64c58b64252de2f3a0501ee9288357a 100644 (file)
@@ -177,7 +177,7 @@ static ssize_t snapshot_write(struct file *filp, const char __user *buf,
                if (res <= 0)
                        goto unlock;
        } else {
-               res = PAGE_SIZE - pg_offp;
+               res = PAGE_SIZE;
        }
 
        if (!data_of(data->handle)) {
index 013bfd6dcc34af711765f1e14377af2c57d4cec0..57b132b658e155b7344d44f9e930039080941290 100644 (file)
@@ -3253,6 +3253,11 @@ void defer_console_output(void)
        preempt_enable();
 }
 
+void printk_trigger_flush(void)
+{
+       defer_console_output();
+}
+
 int vprintk_deferred(const char *fmt, va_list args)
 {
        int r;
index 3c9b0fda64ac08b00723227bad6203279d8a9b1b..77563109c0ea0111d9783a246585fd4d2b5e2531 100644 (file)
@@ -1918,7 +1918,7 @@ static void __init init_uclamp_rq(struct rq *rq)
                };
        }
 
-       rq->uclamp_flags = 0;
+       rq->uclamp_flags = UCLAMP_FLAG_IDLE;
 }
 
 static void __init init_uclamp(void)
@@ -6617,11 +6617,11 @@ static int __init setup_preempt_mode(char *str)
        int mode = sched_dynamic_mode(str);
        if (mode < 0) {
                pr_warn("Dynamic Preempt: unsupported mode: %s\n", str);
-               return 1;
+               return 0;
        }
 
        sched_dynamic_update(mode);
-       return 0;
+       return 1;
 }
 __setup("preempt=", setup_preempt_mode);
 
@@ -8619,9 +8619,6 @@ void __init init_idle(struct task_struct *idle, int cpu)
        idle->flags |= PF_IDLE | PF_KTHREAD | PF_NO_SETAFFINITY;
        kthread_set_per_cpu(idle, cpu);
 
-       scs_task_reset(idle);
-       kasan_unpoison_task_stack(idle);
-
 #ifdef CONFIG_SMP
        /*
         * It's possible that init_idle() gets called multiple times on a task,
@@ -8777,7 +8774,6 @@ void idle_task_exit(void)
                finish_arch_post_lock_switch();
        }
 
-       scs_task_reset(current);
        /* finish_cpu(), as ran on the BP, will clean up the active_mm state */
 }
 
index 872e481d5098c84c6e604ab567cf5d4334d192da..9392aea1804e5d8512363f1a837cd27752ce8338 100644 (file)
@@ -615,7 +615,8 @@ void task_cputime_adjusted(struct task_struct *p, u64 *ut, u64 *st)
                .sum_exec_runtime = p->se.sum_exec_runtime,
        };
 
-       task_cputime(p, &cputime.utime, &cputime.stime);
+       if (task_cputime(p, &cputime.utime, &cputime.stime))
+               cputime.sum_exec_runtime = task_sched_runtime(p);
        cputime_adjust(&cputime, &p->prev_cputime, ut, st);
 }
 EXPORT_SYMBOL_GPL(task_cputime_adjusted);
@@ -828,19 +829,21 @@ u64 task_gtime(struct task_struct *t)
  * add up the pending nohz execution time since the last
  * cputime snapshot.
  */
-void task_cputime(struct task_struct *t, u64 *utime, u64 *stime)
+bool task_cputime(struct task_struct *t, u64 *utime, u64 *stime)
 {
        struct vtime *vtime = &t->vtime;
        unsigned int seq;
        u64 delta;
+       int ret;
 
        if (!vtime_accounting_enabled()) {
                *utime = t->utime;
                *stime = t->stime;
-               return;
+               return false;
        }
 
        do {
+               ret = false;
                seq = read_seqcount_begin(&vtime->seqcount);
 
                *utime = t->utime;
@@ -850,6 +853,7 @@ void task_cputime(struct task_struct *t, u64 *utime, u64 *stime)
                if (vtime->state < VTIME_SYS)
                        continue;
 
+               ret = true;
                delta = vtime_delta(vtime);
 
                /*
@@ -861,6 +865,8 @@ void task_cputime(struct task_struct *t, u64 *utime, u64 *stime)
                else
                        *utime += vtime->utime + delta;
        } while (read_seqcount_retry(&vtime->seqcount, seq));
+
+       return ret;
 }
 
 static int vtime_state_fetch(struct vtime *vtime, int cpu)
index 76577d1642a5dc6fa6eb15048210414bd0591265..eca38107b32f162adad278aee6e522bdfe04c026 100644 (file)
@@ -238,6 +238,13 @@ void __wake_up_sync(struct wait_queue_head *wq_head, unsigned int mode)
 }
 EXPORT_SYMBOL_GPL(__wake_up_sync);     /* For internal use only */
 
+void __wake_up_pollfree(struct wait_queue_head *wq_head)
+{
+       __wake_up(wq_head, TASK_NORMAL, 0, poll_to_key(EPOLLHUP | POLLFREE));
+       /* POLLFREE must have cleared the queue. */
+       WARN_ON_ONCE(waitqueue_active(wq_head));
+}
+
 /*
  * Note: we use "set_current_state()" _after_ the wait-queue add,
  * because we need a memory barrier there on SMP, so that any
index 7c4b7ae714d47f942c87547518112fbb5be35bf3..a629b11bf3e0d9e5d02ba7341094c8b313ec3244 100644 (file)
@@ -1298,6 +1298,12 @@ int do_send_sig_info(int sig, struct kernel_siginfo *info, struct task_struct *p
        return ret;
 }
 
+enum sig_handler {
+       HANDLER_CURRENT, /* If reachable use the current handler */
+       HANDLER_SIG_DFL, /* Always use SIG_DFL handler semantics */
+       HANDLER_EXIT,    /* Only visible as the process exit code */
+};
+
 /*
  * Force a signal that the process can't ignore: if necessary
  * we unblock the signal and change any SIG_IGN to SIG_DFL.
@@ -1310,7 +1316,8 @@ int do_send_sig_info(int sig, struct kernel_siginfo *info, struct task_struct *p
  * that is why we also clear SIGNAL_UNKILLABLE.
  */
 static int
-force_sig_info_to_task(struct kernel_siginfo *info, struct task_struct *t, bool sigdfl)
+force_sig_info_to_task(struct kernel_siginfo *info, struct task_struct *t,
+       enum sig_handler handler)
 {
        unsigned long int flags;
        int ret, blocked, ignored;
@@ -1321,9 +1328,10 @@ force_sig_info_to_task(struct kernel_siginfo *info, struct task_struct *t, bool
        action = &t->sighand->action[sig-1];
        ignored = action->sa.sa_handler == SIG_IGN;
        blocked = sigismember(&t->blocked, sig);
-       if (blocked || ignored || sigdfl) {
+       if (blocked || ignored || (handler != HANDLER_CURRENT)) {
                action->sa.sa_handler = SIG_DFL;
-               action->sa.sa_flags |= SA_IMMUTABLE;
+               if (handler == HANDLER_EXIT)
+                       action->sa.sa_flags |= SA_IMMUTABLE;
                if (blocked) {
                        sigdelset(&t->blocked, sig);
                        recalc_sigpending_and_wake(t);
@@ -1343,7 +1351,7 @@ force_sig_info_to_task(struct kernel_siginfo *info, struct task_struct *t, bool
 
 int force_sig_info(struct kernel_siginfo *info)
 {
-       return force_sig_info_to_task(info, current, false);
+       return force_sig_info_to_task(info, current, HANDLER_CURRENT);
 }
 
 /*
@@ -1660,7 +1668,20 @@ void force_fatal_sig(int sig)
        info.si_code = SI_KERNEL;
        info.si_pid = 0;
        info.si_uid = 0;
-       force_sig_info_to_task(&info, current, true);
+       force_sig_info_to_task(&info, current, HANDLER_SIG_DFL);
+}
+
+void force_exit_sig(int sig)
+{
+       struct kernel_siginfo info;
+
+       clear_siginfo(&info);
+       info.si_signo = sig;
+       info.si_errno = 0;
+       info.si_code = SI_KERNEL;
+       info.si_pid = 0;
+       info.si_uid = 0;
+       force_sig_info_to_task(&info, current, HANDLER_EXIT);
 }
 
 /*
@@ -1693,7 +1714,7 @@ int force_sig_fault_to_task(int sig, int code, void __user *addr
        info.si_flags = flags;
        info.si_isr = isr;
 #endif
-       return force_sig_info_to_task(&info, t, false);
+       return force_sig_info_to_task(&info, t, HANDLER_CURRENT);
 }
 
 int force_sig_fault(int sig, int code, void __user *addr
@@ -1813,7 +1834,8 @@ int force_sig_seccomp(int syscall, int reason, bool force_coredump)
        info.si_errno = reason;
        info.si_arch = syscall_get_arch(current);
        info.si_syscall = syscall;
-       return force_sig_info_to_task(&info, current, force_coredump);
+       return force_sig_info_to_task(&info, current,
+               force_coredump ? HANDLER_EXIT : HANDLER_CURRENT);
 }
 
 /* For the crazy architectures that include trap information in
index 322b65d456767ad5858fc1f1a777b8d908768f79..41f470929e9913d842fdaf58afb29173c9fde403 100644 (file)
@@ -595,7 +595,8 @@ void irq_enter_rcu(void)
 {
        __irq_enter_raw();
 
-       if (is_idle_task(current) && (irq_count() == HARDIRQ_OFFSET))
+       if (tick_nohz_full_cpu(smp_processor_id()) ||
+           (is_idle_task(current) && (irq_count() == HARDIRQ_OFFSET)))
                tick_irq_enter();
 
        account_hardirq_enter(current);
index 6bffe5af8cb1134d96749dbd55092d8c2bb54f22..17a283ce2b20fb0903f4fb7e4aaa5b13a9f5244b 100644 (file)
@@ -1375,6 +1375,13 @@ static inline void tick_nohz_irq_enter(void)
        now = ktime_get();
        if (ts->idle_active)
                tick_nohz_stop_idle(ts, now);
+       /*
+        * If all CPUs are idle. We may need to update a stale jiffies value.
+        * Note nohz_full is a special case: a timekeeper is guaranteed to stay
+        * alive but it might be busy looping with interrupts disabled in some
+        * rare case (typically stop machine). So we must make sure we have a
+        * last resort.
+        */
        if (ts->tick_stopped)
                tick_nohz_update_jiffies(now);
 }
index e3d2c23c413d42bf153144870ca2cab995b3ecb8..85f1021ad45955026eba845727e53c87693ed2db 100644 (file)
@@ -2054,26 +2054,28 @@ unsigned long msleep_interruptible(unsigned int msecs)
 EXPORT_SYMBOL(msleep_interruptible);
 
 /**
- * usleep_range - Sleep for an approximate time
- * @min: Minimum time in usecs to sleep
- * @max: Maximum time in usecs to sleep
+ * usleep_range_state - Sleep for an approximate time in a given state
+ * @min:       Minimum time in usecs to sleep
+ * @max:       Maximum time in usecs to sleep
+ * @state:     State of the current task that will be while sleeping
  *
  * In non-atomic context where the exact wakeup time is flexible, use
- * usleep_range() instead of udelay().  The sleep improves responsiveness
+ * usleep_range_state() instead of udelay().  The sleep improves responsiveness
  * by avoiding the CPU-hogging busy-wait of udelay(), and the range reduces
  * power usage by allowing hrtimers to take advantage of an already-
  * scheduled interrupt instead of scheduling a new one just for this sleep.
  */
-void __sched usleep_range(unsigned long min, unsigned long max)
+void __sched usleep_range_state(unsigned long min, unsigned long max,
+                               unsigned int state)
 {
        ktime_t exp = ktime_add_us(ktime_get(), min);
        u64 delta = (u64)(max - min) * NSEC_PER_USEC;
 
        for (;;) {
-               __set_current_state(TASK_UNINTERRUPTIBLE);
+               __set_current_state(state);
                /* Do not return before the requested sleep time has elapsed */
                if (!schedule_hrtimeout_range(&exp, delta, HRTIMER_MODE_ABS))
                        break;
        }
 }
-EXPORT_SYMBOL(usleep_range);
+EXPORT_SYMBOL(usleep_range_state);
index 7396488793ff7b94cbc808c3d98fcdf850f21e88..ae9755037b7ee5bfb1c00e9d81a4688ed9ef4df0 100644 (file)
@@ -1111,8 +1111,6 @@ bpf_tracing_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
                return &bpf_ktime_get_ns_proto;
        case BPF_FUNC_ktime_get_boot_ns:
                return &bpf_ktime_get_boot_ns_proto;
-       case BPF_FUNC_ktime_get_coarse_ns:
-               return &bpf_ktime_get_coarse_ns_proto;
        case BPF_FUNC_tail_call:
                return &bpf_tail_call_proto;
        case BPF_FUNC_get_current_pid_tgid:
index 30bc880c3849cb6f1ff386b515fd7ffebf4baa7d..be5f6b32a01221398693a57069786c975f2a1b60 100644 (file)
@@ -5217,6 +5217,7 @@ int unregister_ftrace_direct(unsigned long ip, unsigned long addr)
 {
        struct ftrace_direct_func *direct;
        struct ftrace_func_entry *entry;
+       struct ftrace_hash *hash;
        int ret = -ENODEV;
 
        mutex_lock(&direct_mutex);
@@ -5225,7 +5226,8 @@ int unregister_ftrace_direct(unsigned long ip, unsigned long addr)
        if (!entry)
                goto out_unlock;
 
-       if (direct_functions->count == 1)
+       hash = direct_ops.func_hash->filter_hash;
+       if (hash->count == 1)
                unregister_ftrace_function(&direct_ops);
 
        ret = ftrace_set_filter_ip(&direct_ops, ip, 1, 0);
@@ -5540,6 +5542,10 @@ int unregister_ftrace_direct_multi(struct ftrace_ops *ops, unsigned long addr)
        err = unregister_ftrace_function(ops);
        remove_direct_functions_hash(hash, addr);
        mutex_unlock(&direct_mutex);
+
+       /* cleanup for possible another register call */
+       ops->func = NULL;
+       ops->trampoline = 0;
        return err;
 }
 EXPORT_SYMBOL_GPL(unregister_ftrace_direct_multi);
index f9139dc1262cdf55e25ef47c4fc81b37e1f47822..88de94da596b1364b770a703641826b2c5da35cd 100644 (file)
@@ -3812,6 +3812,18 @@ void trace_check_vprintf(struct trace_iterator *iter, const char *fmt,
                iter->fmt[i] = '\0';
                trace_seq_vprintf(&iter->seq, iter->fmt, ap);
 
+               /*
+                * If iter->seq is full, the above call no longer guarantees
+                * that ap is in sync with fmt processing, and further calls
+                * to va_arg() can return wrong positional arguments.
+                *
+                * Ensure that ap is no longer used in this case.
+                */
+               if (iter->seq.full) {
+                       p = "";
+                       break;
+               }
+
                if (star)
                        len = va_arg(ap, int);
 
@@ -6706,9 +6718,7 @@ waitagain:
                cnt = PAGE_SIZE - 1;
 
        /* reset all but tr, trace, and overruns */
-       memset(&iter->seq, 0,
-              sizeof(struct trace_iterator) -
-              offsetof(struct trace_iterator, seq));
+       memset_startat(iter, 0, seq);
        cpumask_clear(iter->started);
        trace_seq_init(&iter->seq);
        iter->pos = -1;
index 6b60ab9475edb3e04a5af854e557ee9c745f70c2..38715aa6cfdfb7453a7c32162920a67d968d2aca 100644 (file)
@@ -1366,14 +1366,26 @@ __event_trigger_test_discard(struct trace_event_file *file,
        if (eflags & EVENT_FILE_FL_TRIGGER_COND)
                *tt = event_triggers_call(file, buffer, entry, event);
 
-       if (test_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &file->flags) ||
-           (unlikely(file->flags & EVENT_FILE_FL_FILTERED) &&
-            !filter_match_preds(file->filter, entry))) {
-               __trace_event_discard_commit(buffer, event);
-               return true;
-       }
+       if (likely(!(file->flags & (EVENT_FILE_FL_SOFT_DISABLED |
+                                   EVENT_FILE_FL_FILTERED |
+                                   EVENT_FILE_FL_PID_FILTER))))
+               return false;
+
+       if (file->flags & EVENT_FILE_FL_SOFT_DISABLED)
+               goto discard;
+
+       if (file->flags & EVENT_FILE_FL_FILTERED &&
+           !filter_match_preds(file->filter, entry))
+               goto discard;
+
+       if ((file->flags & EVENT_FILE_FL_PID_FILTER) &&
+           trace_event_ignore_this_pid(file))
+               goto discard;
 
        return false;
+ discard:
+       __trace_event_discard_commit(buffer, event);
+       return true;
 }
 
 /**
index 4021b9a79f93fe3c1354096537e886be300cb9e2..92be9cb1d7d4bc313507e3a1f516b4517701bc8e 100644 (file)
@@ -2678,12 +2678,24 @@ static struct trace_event_file *
 trace_create_new_event(struct trace_event_call *call,
                       struct trace_array *tr)
 {
+       struct trace_pid_list *no_pid_list;
+       struct trace_pid_list *pid_list;
        struct trace_event_file *file;
+       unsigned int first;
 
        file = kmem_cache_alloc(file_cachep, GFP_TRACE);
        if (!file)
                return NULL;
 
+       pid_list = rcu_dereference_protected(tr->filtered_pids,
+                                            lockdep_is_held(&event_mutex));
+       no_pid_list = rcu_dereference_protected(tr->filtered_no_pids,
+                                            lockdep_is_held(&event_mutex));
+
+       if (!trace_pid_list_first(pid_list, &first) ||
+           !trace_pid_list_first(no_pid_list, &first))
+               file->flags |= EVENT_FILE_FL_PID_FILTER;
+
        file->event_call = call;
        file->tr = tr;
        atomic_set(&file->sm_ref, 0);
index 8a10046c775fdb9f292760eccaa41573ac1ad47a..319f9c8ca7e7d8d4da9ef77e2c5eab9c78c63568 100644 (file)
@@ -2576,28 +2576,27 @@ static struct hist_field *parse_expr(struct hist_trigger_data *hist_data,
 
        /* Split the expression string at the root operator */
        if (!sep)
-               goto free;
+               return ERR_PTR(-EINVAL);
+
        *sep = '\0';
        operand1_str = str;
        str = sep+1;
 
        /* Binary operator requires both operands */
        if (*operand1_str == '\0' || *str == '\0')
-               goto free;
+               return ERR_PTR(-EINVAL);
 
        operand_flags = 0;
 
        /* LHS of string is an expression e.g. a+b in a+b+c */
        operand1 = parse_expr(hist_data, file, operand1_str, operand_flags, NULL, n_subexprs);
-       if (IS_ERR(operand1)) {
-               ret = PTR_ERR(operand1);
-               operand1 = NULL;
-               goto free;
-       }
+       if (IS_ERR(operand1))
+               return ERR_CAST(operand1);
+
        if (operand1->flags & HIST_FIELD_FL_STRING) {
                hist_err(file->tr, HIST_ERR_INVALID_STR_OPERAND, errpos(operand1_str));
                ret = -EINVAL;
-               goto free;
+               goto free_op1;
        }
 
        /* RHS of string is another expression e.g. c in a+b+c */
@@ -2605,13 +2604,12 @@ static struct hist_field *parse_expr(struct hist_trigger_data *hist_data,
        operand2 = parse_expr(hist_data, file, str, operand_flags, NULL, n_subexprs);
        if (IS_ERR(operand2)) {
                ret = PTR_ERR(operand2);
-               operand2 = NULL;
-               goto free;
+               goto free_op1;
        }
        if (operand2->flags & HIST_FIELD_FL_STRING) {
                hist_err(file->tr, HIST_ERR_INVALID_STR_OPERAND, errpos(str));
                ret = -EINVAL;
-               goto free;
+               goto free_operands;
        }
 
        switch (field_op) {
@@ -2629,12 +2627,12 @@ static struct hist_field *parse_expr(struct hist_trigger_data *hist_data,
                break;
        default:
                ret = -EINVAL;
-               goto free;
+               goto free_operands;
        }
 
        ret = check_expr_operands(file->tr, operand1, operand2, &var1, &var2);
        if (ret)
-               goto free;
+               goto free_operands;
 
        operand_flags = var1 ? var1->flags : operand1->flags;
        operand2_flags = var2 ? var2->flags : operand2->flags;
@@ -2653,12 +2651,13 @@ static struct hist_field *parse_expr(struct hist_trigger_data *hist_data,
        expr = create_hist_field(hist_data, NULL, flags, var_name);
        if (!expr) {
                ret = -ENOMEM;
-               goto free;
+               goto free_operands;
        }
 
        operand1->read_once = true;
        operand2->read_once = true;
 
+       /* The operands are now owned and free'd by 'expr' */
        expr->operands[0] = operand1;
        expr->operands[1] = operand2;
 
@@ -2669,7 +2668,7 @@ static struct hist_field *parse_expr(struct hist_trigger_data *hist_data,
                if (!divisor) {
                        hist_err(file->tr, HIST_ERR_DIVISION_BY_ZERO, errpos(str));
                        ret = -EDOM;
-                       goto free;
+                       goto free_expr;
                }
 
                /*
@@ -2709,18 +2708,22 @@ static struct hist_field *parse_expr(struct hist_trigger_data *hist_data,
                expr->type = kstrdup_const(operand1->type, GFP_KERNEL);
                if (!expr->type) {
                        ret = -ENOMEM;
-                       goto free;
+                       goto free_expr;
                }
 
                expr->name = expr_str(expr, 0);
        }
 
        return expr;
-free:
-       destroy_hist_field(operand1, 0);
+
+free_operands:
        destroy_hist_field(operand2, 0);
-       destroy_hist_field(expr, 0);
+free_op1:
+       destroy_hist_field(operand1, 0);
+       return ERR_PTR(ret);
 
+free_expr:
+       destroy_hist_field(expr, 0);
        return ERR_PTR(ret);
 }
 
@@ -3026,8 +3029,10 @@ static inline void __update_field_vars(struct tracing_map_elt *elt,
                if (val->flags & HIST_FIELD_FL_STRING) {
                        char *str = elt_data->field_var_str[j++];
                        char *val_str = (char *)(uintptr_t)var_val;
+                       unsigned int size;
 
-                       strscpy(str, val_str, val->size);
+                       size = min(val->size, STR_VAR_LEN_MAX);
+                       strscpy(str, val_str, size);
                        var_val = (u64)(uintptr_t)str;
                }
                tracing_map_set_var(elt, var_idx, var_val);
@@ -3752,7 +3757,7 @@ static int check_synth_field(struct synth_event *event,
 
        if (strcmp(field->type, hist_field->type) != 0) {
                if (field->size != hist_field->size ||
-                   field->is_signed != hist_field->is_signed)
+                   (!field->is_string && field->is_signed != hist_field->is_signed))
                        return -EINVAL;
        }
 
@@ -4914,6 +4919,7 @@ static void hist_trigger_elt_update(struct hist_trigger_data *hist_data,
                        if (hist_field->flags & HIST_FIELD_FL_STRING) {
                                unsigned int str_start, var_str_idx, idx;
                                char *str, *val_str;
+                               unsigned int size;
 
                                str_start = hist_data->n_field_var_str +
                                        hist_data->n_save_var_str;
@@ -4922,7 +4928,9 @@ static void hist_trigger_elt_update(struct hist_trigger_data *hist_data,
 
                                str = elt_data->field_var_str[idx];
                                val_str = (char *)(uintptr_t)hist_val;
-                               strscpy(str, val_str, hist_field->size);
+
+                               size = min(hist_field->size, STR_VAR_LEN_MAX);
+                               strscpy(str, val_str, size);
 
                                hist_val = (u64)(uintptr_t)str;
                        }
index 22db3ce95e74f2fffa774656d43204692605b0ae..ca9c13b2ecf4ba44e72b4a0a6d391bc5897dbeaf 100644 (file)
@@ -1237,9 +1237,8 @@ static int __create_synth_event(const char *name, const char *raw_fields)
                                                  argv + consumed, &consumed,
                                                  &field_version);
                        if (IS_ERR(field)) {
-                               argv_free(argv);
                                ret = PTR_ERR(field);
-                               goto err;
+                               goto err_free_arg;
                        }
 
                        /*
@@ -1262,18 +1261,19 @@ static int __create_synth_event(const char *name, const char *raw_fields)
                        if (cmd_version > 1 && n_fields_this_loop >= 1) {
                                synth_err(SYNTH_ERR_INVALID_CMD, errpos(field_str));
                                ret = -EINVAL;
-                               goto err;
+                               goto err_free_arg;
                        }
 
                        fields[n_fields++] = field;
                        if (n_fields == SYNTH_FIELDS_MAX) {
                                synth_err(SYNTH_ERR_TOO_MANY_FIELDS, 0);
                                ret = -EINVAL;
-                               goto err;
+                               goto err_free_arg;
                        }
 
                        n_fields_this_loop++;
                }
+               argv_free(argv);
 
                if (consumed < argc) {
                        synth_err(SYNTH_ERR_INVALID_CMD, 0);
@@ -1281,7 +1281,6 @@ static int __create_synth_event(const char *name, const char *raw_fields)
                        goto err;
                }
 
-               argv_free(argv);
        }
 
        if (n_fields == 0) {
@@ -1307,6 +1306,8 @@ static int __create_synth_event(const char *name, const char *raw_fields)
        kfree(saved_fields);
 
        return ret;
+ err_free_arg:
+       argv_free(argv);
  err:
        for (i = 0; i < n_fields; i++)
                free_synth_field(fields[i]);
index 0a5c0db3137ee9f7c04c464f9b5d9a45dd64aa96..f5f0039d31e5aa9edda0344dd271e90f1d2f4c15 100644 (file)
@@ -1313,6 +1313,7 @@ static int uprobe_perf_open(struct trace_event_call *call,
                return 0;
 
        list_for_each_entry(pos, trace_probe_probe_list(tp), list) {
+               tu = container_of(pos, struct trace_uprobe, tp);
                err = uprobe_apply(tu->inode, tu->offset, &tu->consumer, true);
                if (err) {
                        uprobe_perf_close(call, event);
index 39bb56d2dcbef650f1309a8fb3098cd9aab37812..9628b557184688485b586b3d130c414cc09e6442 100644 (file)
@@ -15,6 +15,7 @@
 #include <linux/jhash.h>
 #include <linux/slab.h>
 #include <linux/sort.h>
+#include <linux/kmemleak.h>
 
 #include "tracing_map.h"
 #include "trace.h"
@@ -307,6 +308,7 @@ static void tracing_map_array_free(struct tracing_map_array *a)
        for (i = 0; i < a->n_pages; i++) {
                if (!a->pages[i])
                        break;
+               kmemleak_free(a->pages[i]);
                free_page((unsigned long)a->pages[i]);
        }
 
@@ -342,6 +344,7 @@ static struct tracing_map_array *tracing_map_array_alloc(unsigned int n_elts,
                a->pages[i] = (void *)get_zeroed_page(GFP_KERNEL);
                if (!a->pages[i])
                        goto free;
+               kmemleak_alloc(a->pages[i], PAGE_SIZE, 1, GFP_KERNEL);
        }
  out:
        return a;
index 9ef7ce18b4f56195b28a4ae3f189af08700922f0..5e14e32056add21469af2c9c77d17cfb25e2976f 100644 (file)
@@ -316,6 +316,7 @@ config DEBUG_INFO_BTF
        bool "Generate BTF typeinfo"
        depends on !DEBUG_INFO_SPLIT && !DEBUG_INFO_REDUCED
        depends on !GCC_PLUGIN_RANDSTRUCT || COMPILE_TEST
+       depends on BPF_SYSCALL
        help
          Generate deduplicated BTF type information from DWARF debug info.
          Turning this on expects presence of pahole tool, which will convert
@@ -346,8 +347,9 @@ config FRAME_WARN
        int "Warn for stack frames larger than"
        range 0 8192
        default 2048 if GCC_PLUGIN_LATENT_ENTROPY
-       default 1536 if (!64BIT && (PARISC || XTENSA))
-       default 1024 if (!64BIT && !PARISC)
+       default 2048 if PARISC
+       default 1536 if (!64BIT && XTENSA)
+       default 1024 if !64BIT
        default 2048 if 64BIT
        help
          Tell gcc to warn at build time for stack frames larger than this.
index f9e89001b52ebdc08e3f5e6afa8c44fbf576844d..199ab201d5019c7c216f4db0f32da947e1d4503c 100644 (file)
@@ -75,6 +75,12 @@ void nmi_trigger_cpumask_backtrace(const cpumask_t *mask,
                touch_softlockup_watchdog();
        }
 
+       /*
+        * Force flush any remote buffers that might be stuck in IRQ context
+        * and therefore could not run their irq_work.
+        */
+       printk_trigger_flush();
+
        clear_bit_unlock(0, &backtrace_flag);
        put_cpu();
 }
index a90112ee72a1fee70ddab19281d354734b9b1bec..72b9068ab57bffbb308e1c449218aa205f5a8ffd 100644 (file)
@@ -49,6 +49,7 @@
        SIPROUND; \
        return (v0 ^ v1) ^ (v2 ^ v3);
 
+#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
 u64 __siphash_aligned(const void *data, size_t len, const siphash_key_t *key)
 {
        const u8 *end = data + len - (len % sizeof(u64));
@@ -80,8 +81,8 @@ u64 __siphash_aligned(const void *data, size_t len, const siphash_key_t *key)
        POSTAMBLE
 }
 EXPORT_SYMBOL(__siphash_aligned);
+#endif
 
-#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
 u64 __siphash_unaligned(const void *data, size_t len, const siphash_key_t *key)
 {
        const u8 *end = data + len - (len % sizeof(u64));
@@ -113,7 +114,6 @@ u64 __siphash_unaligned(const void *data, size_t len, const siphash_key_t *key)
        POSTAMBLE
 }
 EXPORT_SYMBOL(__siphash_unaligned);
-#endif
 
 /**
  * siphash_1u64 - compute 64-bit siphash PRF value of a u64
@@ -250,6 +250,7 @@ EXPORT_SYMBOL(siphash_3u32);
        HSIPROUND; \
        return (v0 ^ v1) ^ (v2 ^ v3);
 
+#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
 u32 __hsiphash_aligned(const void *data, size_t len, const hsiphash_key_t *key)
 {
        const u8 *end = data + len - (len % sizeof(u64));
@@ -280,8 +281,8 @@ u32 __hsiphash_aligned(const void *data, size_t len, const hsiphash_key_t *key)
        HPOSTAMBLE
 }
 EXPORT_SYMBOL(__hsiphash_aligned);
+#endif
 
-#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
 u32 __hsiphash_unaligned(const void *data, size_t len,
                         const hsiphash_key_t *key)
 {
@@ -313,7 +314,6 @@ u32 __hsiphash_unaligned(const void *data, size_t len,
        HPOSTAMBLE
 }
 EXPORT_SYMBOL(__hsiphash_unaligned);
-#endif
 
 /**
  * hsiphash_1u32 - compute 64-bit hsiphash PRF value of a u32
@@ -418,6 +418,7 @@ EXPORT_SYMBOL(hsiphash_4u32);
        HSIPROUND; \
        return v1 ^ v3;
 
+#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
 u32 __hsiphash_aligned(const void *data, size_t len, const hsiphash_key_t *key)
 {
        const u8 *end = data + len - (len % sizeof(u32));
@@ -438,8 +439,8 @@ u32 __hsiphash_aligned(const void *data, size_t len, const hsiphash_key_t *key)
        HPOSTAMBLE
 }
 EXPORT_SYMBOL(__hsiphash_aligned);
+#endif
 
-#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
 u32 __hsiphash_unaligned(const void *data, size_t len,
                         const hsiphash_key_t *key)
 {
@@ -461,7 +462,6 @@ u32 __hsiphash_unaligned(const void *data, size_t len,
        HPOSTAMBLE
 }
 EXPORT_SYMBOL(__hsiphash_unaligned);
-#endif
 
 /**
  * hsiphash_1u32 - compute 32-bit hsiphash PRF value of a u32
index 67ed689a0b1bc78c2d5320691423d59e3906f1c4..0643573f86862b04d5e67bf088aeb17a59da5020 100644 (file)
@@ -869,6 +869,7 @@ static void kasan_memchr(struct kunit *test)
        ptr = kmalloc(size, GFP_KERNEL | __GFP_ZERO);
        KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
 
+       OPTIMIZER_HIDE_VAR(size);
        KUNIT_EXPECT_KASAN_FAIL(test,
                kasan_ptr_result = memchr(ptr, '1', size + 1));
 
@@ -894,6 +895,7 @@ static void kasan_memcmp(struct kunit *test)
        KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
        memset(arr, 0, sizeof(arr));
 
+       OPTIMIZER_HIDE_VAR(size);
        KUNIT_EXPECT_KASAN_FAIL(test,
                kasan_int_result = memcmp(ptr, arr, size+1));
        kfree(ptr);
index 65218ec5b8f2a4e506796a755143513f6922a3c9..fc45339fc3a3659e56d112979891a619f8b862cb 100644 (file)
@@ -11,8 +11,6 @@
 obj-$(CONFIG_ZSTD_COMPRESS) += zstd_compress.o
 obj-$(CONFIG_ZSTD_DECOMPRESS) += zstd_decompress.o
 
-ccflags-y += -O3
-
 zstd_compress-y := \
                zstd_compress_module.o \
                common/debug.o \
index a1a051e4bce66c6e631d0e06e00fdac80997b484..f5a9c70a228a2bb204c5325f24a997d27139df97 100644 (file)
@@ -16,6 +16,7 @@
 *********************************************************/
 /* force inlining */
 
+#if !defined(ZSTD_NO_INLINE)
 #if (defined(__GNUC__) && !defined(__STRICT_ANSI__)) || defined(__cplusplus) || defined(__STDC_VERSION__) && __STDC_VERSION__ >= 199901L   /* C99 */
 #  define INLINE_KEYWORD inline
 #else
 
 #define FORCE_INLINE_ATTR __attribute__((always_inline))
 
+#else
+
+#define INLINE_KEYWORD
+#define FORCE_INLINE_ATTR
+
+#endif
 
 /*
   On MSVC qsort requires that functions passed into it use the __cdecl calling conversion(CC).
index ee03e0aedb0304aa7f968a918b7e53cc80e1eef1..b0610b255653f7c1b4d8072fb8b76648d3161daf 100644 (file)
@@ -411,6 +411,8 @@ static size_t ZSTD_seqDecompressedSize(seqStore_t const* seqStore, const seqDef*
     const seqDef* sp = sstart;
     size_t matchLengthSum = 0;
     size_t litLengthSum = 0;
+    /* Only used by assert(), suppress unused variable warnings in production. */
+    (void)litLengthSum;
     while (send-sp > 0) {
         ZSTD_sequenceLength const seqLen = ZSTD_getSequenceLength(seqStore, sp);
         litLengthSum += seqLen.litLength;
index 04337050fe9a0546da1c68cac01953e6e7f9101d..dfc55e3e8119b7a797ca22aee127900f04326597 100644 (file)
@@ -8,6 +8,18 @@
  * You may select, at your option, one of the above-listed licenses.
  */
 
+/*
+ * Disable inlining for the optimal parser for the kernel build.
+ * It is unlikely to be used in the kernel, and where it is used
+ * latency shouldn't matter because it is very slow to begin with.
+ * We prefer a ~180KB binary size win over faster optimal parsing.
+ *
+ * TODO(https://github.com/facebook/zstd/issues/2862):
+ * Improve the code size of the optimal parser in general, so we
+ * don't need this hack for the kernel build.
+ */
+#define ZSTD_NO_INLINE 1
+
 #include "zstd_compress_internal.h"
 #include "hist.h"
 #include "zstd_opt.h"
index 068ce591a13afb02fbf6789d56780ced98bc411a..356f4f2c779e5fda55abcd8bc5b652d85e5af054 100644 (file)
@@ -428,7 +428,7 @@ config THP_SWAP
 # UP and nommu archs use km based percpu allocator
 #
 config NEED_PER_CPU_KM
-       depends on !SMP
+       depends on !SMP || !MMU
        bool
        default y
 
@@ -890,6 +890,9 @@ config MAPPING_DIRTY_HELPERS
 config KMAP_LOCAL
        bool
 
+config KMAP_LOCAL_NON_LINEAR_PTE_ARRAY
+       bool
+
 # struct io_mapping based helper.  Selected by drivers that need them
 config IO_MAPPING
        bool
index 1eead47610112257ca01c5ff306c64716959af8f..eae96dfe0261cfd70c38ee67c4e998fc4cd09a64 100644 (file)
@@ -945,6 +945,13 @@ void bdi_unregister(struct backing_dev_info *bdi)
        wb_shutdown(&bdi->wb);
        cgwb_bdi_unregister(bdi);
 
+       /*
+        * If this BDI's min ratio has been set, use bdi_set_min_ratio() to
+        * update the global bdi_min_ratio.
+        */
+       if (bdi->min_ratio)
+               bdi_set_min_ratio(bdi, 0);
+
        if (bdi->dev) {
                bdi_debug_unregister(bdi);
                device_unregister(bdi->dev);
index c381b3c525d0bd23739012b081a311a10ec00fd6..e924978952025b003711a6260db375a8bb21996f 100644 (file)
@@ -282,7 +282,6 @@ int damon_set_targets(struct damon_ctx *ctx,
        for (i = 0; i < nr_ids; i++) {
                t = damon_new_target(ids[i]);
                if (!t) {
-                       pr_err("Failed to alloc damon_target\n");
                        /* The caller should do cleanup of the ids itself */
                        damon_for_each_target_safe(t, next, ctx)
                                damon_destroy_target(t);
@@ -312,16 +311,10 @@ int damon_set_attrs(struct damon_ctx *ctx, unsigned long sample_int,
                    unsigned long aggr_int, unsigned long primitive_upd_int,
                    unsigned long min_nr_reg, unsigned long max_nr_reg)
 {
-       if (min_nr_reg < 3) {
-               pr_err("min_nr_regions (%lu) must be at least 3\n",
-                               min_nr_reg);
+       if (min_nr_reg < 3)
                return -EINVAL;
-       }
-       if (min_nr_reg > max_nr_reg) {
-               pr_err("invalid nr_regions.  min (%lu) > max (%lu)\n",
-                               min_nr_reg, max_nr_reg);
+       if (min_nr_reg > max_nr_reg)
                return -EINVAL;
-       }
 
        ctx->sample_interval = sample_int;
        ctx->aggr_interval = aggr_int;
@@ -980,10 +973,11 @@ static unsigned long damos_wmark_wait_us(struct damos *scheme)
 
 static void kdamond_usleep(unsigned long usecs)
 {
-       if (usecs > 100 * 1000)
-               schedule_timeout_interruptible(usecs_to_jiffies(usecs));
+       /* See Documentation/timers/timers-howto.rst for the thresholds */
+       if (usecs > 20 * USEC_PER_MSEC)
+               schedule_timeout_idle(usecs_to_jiffies(usecs));
        else
-               usleep_range(usecs, usecs + 1);
+               usleep_idle_range(usecs, usecs + 1);
 }
 
 /* Returns negative error code if it's not activated but should return */
@@ -1038,7 +1032,7 @@ static int kdamond_fn(void *data)
                                ctx->callback.after_sampling(ctx))
                        done = true;
 
-               usleep_range(ctx->sample_interval, ctx->sample_interval + 1);
+               kdamond_usleep(ctx->sample_interval);
 
                if (ctx->primitive.check_accesses)
                        max_nr_accesses = ctx->primitive.check_accesses(ctx);
index eccc14b3490132baef9169bbefc5f6349e10ea35..1efac0022e9a46b2a94ba0329bbe157a33fc83d5 100644 (file)
@@ -32,7 +32,7 @@ static char *user_input_str(const char __user *buf, size_t count, loff_t *ppos)
        if (*ppos)
                return ERR_PTR(-EINVAL);
 
-       kbuf = kmalloc(count + 1, GFP_KERNEL);
+       kbuf = kmalloc(count + 1, GFP_KERNEL | __GFP_NOWARN);
        if (!kbuf)
                return ERR_PTR(-ENOMEM);
 
@@ -133,7 +133,7 @@ static ssize_t dbgfs_schemes_read(struct file *file, char __user *buf,
        char *kbuf;
        ssize_t len;
 
-       kbuf = kmalloc(count, GFP_KERNEL);
+       kbuf = kmalloc(count, GFP_KERNEL | __GFP_NOWARN);
        if (!kbuf)
                return -ENOMEM;
 
@@ -210,10 +210,8 @@ static struct damos **str_to_schemes(const char *str, ssize_t len,
                                &wmarks.low, &parsed);
                if (ret != 18)
                        break;
-               if (!damos_action_valid(action)) {
-                       pr_err("wrong action %d\n", action);
+               if (!damos_action_valid(action))
                        goto fail;
-               }
 
                pos += parsed;
                scheme = damon_new_scheme(min_sz, max_sz, min_nr_a, max_nr_a,
@@ -452,7 +450,7 @@ static ssize_t dbgfs_init_regions_read(struct file *file, char __user *buf,
        char *kbuf;
        ssize_t len;
 
-       kbuf = kmalloc(count, GFP_KERNEL);
+       kbuf = kmalloc(count, GFP_KERNEL | __GFP_NOWARN);
        if (!kbuf)
                return -ENOMEM;
 
@@ -578,7 +576,7 @@ static ssize_t dbgfs_kdamond_pid_read(struct file *file,
        char *kbuf;
        ssize_t len;
 
-       kbuf = kmalloc(count, GFP_KERNEL);
+       kbuf = kmalloc(count, GFP_KERNEL | __GFP_NOWARN);
        if (!kbuf)
                return -ENOMEM;
 
@@ -877,12 +875,14 @@ static ssize_t dbgfs_monitor_on_write(struct file *file,
                return -EINVAL;
        }
 
+       mutex_lock(&damon_dbgfs_lock);
        if (!strncmp(kbuf, "on", count)) {
                int i;
 
                for (i = 0; i < dbgfs_nr_ctxs; i++) {
                        if (damon_targets_empty(dbgfs_ctxs[i])) {
                                kfree(kbuf);
+                               mutex_unlock(&damon_dbgfs_lock);
                                return -EINVAL;
                        }
                }
@@ -892,6 +892,7 @@ static ssize_t dbgfs_monitor_on_write(struct file *file,
        } else {
                ret = -EINVAL;
        }
+       mutex_unlock(&damon_dbgfs_lock);
 
        if (!ret)
                ret = count;
@@ -944,15 +945,16 @@ static int __init __damon_dbgfs_init(void)
 
 static int __init damon_dbgfs_init(void)
 {
-       int rc;
+       int rc = -ENOMEM;
 
+       mutex_lock(&damon_dbgfs_lock);
        dbgfs_ctxs = kmalloc(sizeof(*dbgfs_ctxs), GFP_KERNEL);
        if (!dbgfs_ctxs)
-               return -ENOMEM;
+               goto out;
        dbgfs_ctxs[0] = dbgfs_new_ctx();
        if (!dbgfs_ctxs[0]) {
                kfree(dbgfs_ctxs);
-               return -ENOMEM;
+               goto out;
        }
        dbgfs_nr_ctxs = 1;
 
@@ -963,6 +965,8 @@ static int __init damon_dbgfs_init(void)
                pr_err("%s: dbgfs init failed\n", __func__);
        }
 
+out:
+       mutex_unlock(&damon_dbgfs_lock);
        return rc;
 }
 
index ecfd0b2ed222d065d97e1061704ef0fa96e5ad51..6a1b9272ea123a1f2991ad632daf17ef0b173c76 100644 (file)
@@ -135,7 +135,6 @@ static void damon_do_test_apply_three_regions(struct kunit *test,
                                struct damon_addr_range *three_regions,
                                unsigned long *expected, int nr_expected)
 {
-       struct damon_ctx *ctx = damon_new_ctx();
        struct damon_target *t;
        struct damon_region *r;
        int i;
@@ -145,7 +144,6 @@ static void damon_do_test_apply_three_regions(struct kunit *test,
                r = damon_new_region(regions[i * 2], regions[i * 2 + 1]);
                damon_add_region(r, t);
        }
-       damon_add_target(ctx, t);
 
        damon_va_apply_three_regions(t, three_regions);
 
@@ -154,8 +152,6 @@ static void damon_do_test_apply_three_regions(struct kunit *test,
                KUNIT_EXPECT_EQ(test, r->ar.start, expected[i * 2]);
                KUNIT_EXPECT_EQ(test, r->ar.end, expected[i * 2 + 1]);
        }
-
-       damon_destroy_ctx(ctx);
 }
 
 /*
@@ -252,60 +248,59 @@ static void damon_test_apply_three_regions4(struct kunit *test)
                        new_three_regions, expected, ARRAY_SIZE(expected));
 }
 
-static void damon_test_split_evenly(struct kunit *test)
+static void damon_test_split_evenly_fail(struct kunit *test,
+               unsigned long start, unsigned long end, unsigned int nr_pieces)
 {
-       struct damon_ctx *c = damon_new_ctx();
-       struct damon_target *t;
-       struct damon_region *r;
-       unsigned long i;
-
-       KUNIT_EXPECT_EQ(test, damon_va_evenly_split_region(NULL, NULL, 5),
-                       -EINVAL);
-
-       t = damon_new_target(42);
-       r = damon_new_region(0, 100);
-       KUNIT_EXPECT_EQ(test, damon_va_evenly_split_region(t, r, 0), -EINVAL);
+       struct damon_target *t = damon_new_target(42);
+       struct damon_region *r = damon_new_region(start, end);
 
        damon_add_region(r, t);
-       KUNIT_EXPECT_EQ(test, damon_va_evenly_split_region(t, r, 10), 0);
-       KUNIT_EXPECT_EQ(test, damon_nr_regions(t), 10u);
+       KUNIT_EXPECT_EQ(test,
+                       damon_va_evenly_split_region(t, r, nr_pieces), -EINVAL);
+       KUNIT_EXPECT_EQ(test, damon_nr_regions(t), 1u);
 
-       i = 0;
        damon_for_each_region(r, t) {
-               KUNIT_EXPECT_EQ(test, r->ar.start, i++ * 10);
-               KUNIT_EXPECT_EQ(test, r->ar.end, i * 10);
+               KUNIT_EXPECT_EQ(test, r->ar.start, start);
+               KUNIT_EXPECT_EQ(test, r->ar.end, end);
        }
+
        damon_free_target(t);
+}
+
+static void damon_test_split_evenly_succ(struct kunit *test,
+       unsigned long start, unsigned long end, unsigned int nr_pieces)
+{
+       struct damon_target *t = damon_new_target(42);
+       struct damon_region *r = damon_new_region(start, end);
+       unsigned long expected_width = (end - start) / nr_pieces;
+       unsigned long i = 0;
 
-       t = damon_new_target(42);
-       r = damon_new_region(5, 59);
        damon_add_region(r, t);
-       KUNIT_EXPECT_EQ(test, damon_va_evenly_split_region(t, r, 5), 0);
-       KUNIT_EXPECT_EQ(test, damon_nr_regions(t), 5u);
+       KUNIT_EXPECT_EQ(test,
+                       damon_va_evenly_split_region(t, r, nr_pieces), 0);
+       KUNIT_EXPECT_EQ(test, damon_nr_regions(t), nr_pieces);
 
-       i = 0;
        damon_for_each_region(r, t) {
-               if (i == 4)
+               if (i == nr_pieces - 1)
                        break;
-               KUNIT_EXPECT_EQ(test, r->ar.start, 5 + 10 * i++);
-               KUNIT_EXPECT_EQ(test, r->ar.end, 5 + 10 * i);
+               KUNIT_EXPECT_EQ(test,
+                               r->ar.start, start + i++ * expected_width);
+               KUNIT_EXPECT_EQ(test, r->ar.end, start + i * expected_width);
        }
-       KUNIT_EXPECT_EQ(test, r->ar.start, 5 + 10 * i);
-       KUNIT_EXPECT_EQ(test, r->ar.end, 59ul);
+       KUNIT_EXPECT_EQ(test, r->ar.start, start + i * expected_width);
+       KUNIT_EXPECT_EQ(test, r->ar.end, end);
        damon_free_target(t);
+}
 
-       t = damon_new_target(42);
-       r = damon_new_region(5, 6);
-       damon_add_region(r, t);
-       KUNIT_EXPECT_EQ(test, damon_va_evenly_split_region(t, r, 2), -EINVAL);
-       KUNIT_EXPECT_EQ(test, damon_nr_regions(t), 1u);
+static void damon_test_split_evenly(struct kunit *test)
+{
+       KUNIT_EXPECT_EQ(test, damon_va_evenly_split_region(NULL, NULL, 5),
+                       -EINVAL);
 
-       damon_for_each_region(r, t) {
-               KUNIT_EXPECT_EQ(test, r->ar.start, 5ul);
-               KUNIT_EXPECT_EQ(test, r->ar.end, 6ul);
-       }
-       damon_free_target(t);
-       damon_destroy_ctx(c);
+       damon_test_split_evenly_fail(test, 0, 100, 0);
+       damon_test_split_evenly_succ(test, 0, 100, 10);
+       damon_test_split_evenly_succ(test, 5, 59, 5);
+       damon_test_split_evenly_fail(test, 5, 6, 2);
 }
 
 static struct kunit_case damon_test_cases[] = {
index 35fe49080ee99636f045d3f8c785319947eadf4f..20a9a9d69eb19371e26b84cdc840e75e31b479d6 100644 (file)
@@ -13,6 +13,7 @@
 #include <linux/mmu_notifier.h>
 #include <linux/page_idle.h>
 #include <linux/pagewalk.h>
+#include <linux/sched/mm.h>
 
 #include "prmtv-common.h"
 
@@ -626,7 +627,6 @@ int damon_va_apply_scheme(struct damon_ctx *ctx, struct damon_target *t,
        case DAMOS_STAT:
                return 0;
        default:
-               pr_warn("Wrong action %d\n", scheme->action);
                return -EINVAL;
        }
 
index daa0e23a6ee666b4fd7b7ab3b95ebd7ea156569e..39c4c46c61337e24b76cfeff9fe0dbc79f85be7d 100644 (file)
@@ -3253,8 +3253,6 @@ static struct page *next_uptodate_page(struct page *page,
                        goto skip;
                if (!PageUptodate(page) || PageReadahead(page))
                        goto skip;
-               if (PageHWPoison(page))
-                       goto skip;
                if (!trylock_page(page))
                        goto skip;
                if (page->mapping != mapping)
index 88f65f1558453359e854322ef0e881e3fd1efc89..762679050c9a033151a0d982a426a7675fd2a251 100644 (file)
@@ -359,7 +359,6 @@ void kunmap_high(struct page *page)
 }
 EXPORT_SYMBOL(kunmap_high);
 
-#ifdef CONFIG_TRANSPARENT_HUGEPAGE
 void zero_user_segments(struct page *page, unsigned start1, unsigned end1,
                unsigned start2, unsigned end2)
 {
@@ -416,7 +415,6 @@ void zero_user_segments(struct page *page, unsigned start1, unsigned end1,
        BUG_ON((start1 | start2 | end1 | end2) != 0);
 }
 EXPORT_SYMBOL(zero_user_segments);
-#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
 #endif /* CONFIG_HIGHMEM */
 
 #ifdef CONFIG_KMAP_LOCAL
@@ -503,16 +501,22 @@ static inline int kmap_local_calc_idx(int idx)
 
 static pte_t *__kmap_pte;
 
-static pte_t *kmap_get_pte(void)
+static pte_t *kmap_get_pte(unsigned long vaddr, int idx)
 {
+       if (IS_ENABLED(CONFIG_KMAP_LOCAL_NON_LINEAR_PTE_ARRAY))
+               /*
+                * Set by the arch if __kmap_pte[-idx] does not produce
+                * the correct entry.
+                */
+               return virt_to_kpte(vaddr);
        if (!__kmap_pte)
                __kmap_pte = virt_to_kpte(__fix_to_virt(FIX_KMAP_BEGIN));
-       return __kmap_pte;
+       return &__kmap_pte[-idx];
 }
 
 void *__kmap_local_pfn_prot(unsigned long pfn, pgprot_t prot)
 {
-       pte_t pteval, *kmap_pte = kmap_get_pte();
+       pte_t pteval, *kmap_pte;
        unsigned long vaddr;
        int idx;
 
@@ -524,9 +528,10 @@ void *__kmap_local_pfn_prot(unsigned long pfn, pgprot_t prot)
        preempt_disable();
        idx = arch_kmap_local_map_idx(kmap_local_idx_push(), pfn);
        vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
-       BUG_ON(!pte_none(*(kmap_pte - idx)));
+       kmap_pte = kmap_get_pte(vaddr, idx);
+       BUG_ON(!pte_none(*kmap_pte));
        pteval = pfn_pte(pfn, prot);
-       arch_kmap_local_set_pte(&init_mm, vaddr, kmap_pte - idx, pteval);
+       arch_kmap_local_set_pte(&init_mm, vaddr, kmap_pte, pteval);
        arch_kmap_local_post_map(vaddr, pteval);
        current->kmap_ctrl.pteval[kmap_local_idx()] = pteval;
        preempt_enable();
@@ -559,7 +564,7 @@ EXPORT_SYMBOL(__kmap_local_page_prot);
 void kunmap_local_indexed(void *vaddr)
 {
        unsigned long addr = (unsigned long) vaddr & PAGE_MASK;
-       pte_t *kmap_pte = kmap_get_pte();
+       pte_t *kmap_pte;
        int idx;
 
        if (addr < __fix_to_virt(FIX_KMAP_END) ||
@@ -584,8 +589,9 @@ void kunmap_local_indexed(void *vaddr)
        idx = arch_kmap_local_unmap_idx(kmap_local_idx(), addr);
        WARN_ON_ONCE(addr != __fix_to_virt(FIX_KMAP_BEGIN + idx));
 
+       kmap_pte = kmap_get_pte(addr, idx);
        arch_kmap_local_pre_unmap(addr);
-       pte_clear(&init_mm, addr, kmap_pte - idx);
+       pte_clear(&init_mm, addr, kmap_pte);
        arch_kmap_local_post_unmap(addr);
        current->kmap_ctrl.pteval[kmap_local_idx()] = __pte(0);
        kmap_local_idx_pop();
@@ -607,7 +613,7 @@ EXPORT_SYMBOL(kunmap_local_indexed);
 void __kmap_local_sched_out(void)
 {
        struct task_struct *tsk = current;
-       pte_t *kmap_pte = kmap_get_pte();
+       pte_t *kmap_pte;
        int i;
 
        /* Clear kmaps */
@@ -634,8 +640,9 @@ void __kmap_local_sched_out(void)
                idx = arch_kmap_local_map_idx(i, pte_pfn(pteval));
 
                addr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
+               kmap_pte = kmap_get_pte(addr, idx);
                arch_kmap_local_pre_unmap(addr);
-               pte_clear(&init_mm, addr, kmap_pte - idx);
+               pte_clear(&init_mm, addr, kmap_pte);
                arch_kmap_local_post_unmap(addr);
        }
 }
@@ -643,7 +650,7 @@ void __kmap_local_sched_out(void)
 void __kmap_local_sched_in(void)
 {
        struct task_struct *tsk = current;
-       pte_t *kmap_pte = kmap_get_pte();
+       pte_t *kmap_pte;
        int i;
 
        /* Restore kmaps */
@@ -663,7 +670,8 @@ void __kmap_local_sched_in(void)
                /* See comment in __kmap_local_sched_out() */
                idx = arch_kmap_local_map_idx(i, pte_pfn(pteval));
                addr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
-               set_pte_at(&init_mm, addr, kmap_pte - idx, pteval);
+               kmap_pte = kmap_get_pte(addr, idx);
+               set_pte_at(&init_mm, addr, kmap_pte, pteval);
                arch_kmap_local_post_map(addr, pteval);
        }
 }
index e09159c957e367f9adb37f9cc648d2cc39ff67ac..a1baa198519a2da44a0406c7b02c7c48d3118a39 100644 (file)
@@ -1037,8 +1037,10 @@ void clear_vma_resv_huge_pages(struct vm_area_struct *vma)
         */
        struct resv_map *reservations = vma_resv_map(vma);
 
-       if (reservations && is_vma_resv_set(vma, HPAGE_RESV_OWNER))
+       if (reservations && is_vma_resv_set(vma, HPAGE_RESV_OWNER)) {
+               resv_map_put_hugetlb_cgroup_uncharge_info(reservations);
                kref_put(&reservations->refs, resv_map_release);
+       }
 
        reset_vma_resv_huge_pages(vma);
 }
@@ -2971,7 +2973,7 @@ int __alloc_bootmem_huge_page(struct hstate *h, int nid)
        struct huge_bootmem_page *m = NULL; /* initialize for clang */
        int nr_nodes, node;
 
-       if (nid >= nr_online_nodes)
+       if (nid != NUMA_NO_NODE && nid >= nr_online_nodes)
                return 0;
        /* do node specific alloc */
        if (nid != NUMA_NO_NODE) {
@@ -4917,9 +4919,9 @@ int move_hugetlb_page_tables(struct vm_area_struct *vma,
 
                move_huge_pte(vma, old_addr, new_addr, src_pte);
        }
-       i_mmap_unlock_write(mapping);
        flush_tlb_range(vma, old_end - len, old_end);
        mmu_notifier_invalidate_range_end(&range);
+       i_mmap_unlock_write(mapping);
 
        return len + old_addr - old_end;
 }
@@ -4937,6 +4939,7 @@ static void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct
        struct hstate *h = hstate_vma(vma);
        unsigned long sz = huge_page_size(h);
        struct mmu_notifier_range range;
+       bool force_flush = false;
 
        WARN_ON(!is_vm_hugetlb_page(vma));
        BUG_ON(start & ~huge_page_mask(h));
@@ -4965,10 +4968,8 @@ static void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct
                ptl = huge_pte_lock(h, mm, ptep);
                if (huge_pmd_unshare(mm, vma, &address, ptep)) {
                        spin_unlock(ptl);
-                       /*
-                        * We just unmapped a page of PMDs by clearing a PUD.
-                        * The caller's TLB flush range should cover this area.
-                        */
+                       tlb_flush_pmd_range(tlb, address & PUD_MASK, PUD_SIZE);
+                       force_flush = true;
                        continue;
                }
 
@@ -5025,6 +5026,22 @@ static void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct
        }
        mmu_notifier_invalidate_range_end(&range);
        tlb_end_vma(tlb, vma);
+
+       /*
+        * If we unshared PMDs, the TLB flush was not recorded in mmu_gather. We
+        * could defer the flush until now, since by holding i_mmap_rwsem we
+        * guaranteed that the last refernece would not be dropped. But we must
+        * do the flushing before we return, as otherwise i_mmap_rwsem will be
+        * dropped and the last reference to the shared PMDs page might be
+        * dropped as well.
+        *
+        * In theory we could defer the freeing of the PMD pages as well, but
+        * huge_pmd_unshare() relies on the exact page_count for the PMD page to
+        * detect sharing, so we cannot defer the release of the page either.
+        * Instead, do flush now.
+        */
+       if (force_flush)
+               tlb_flush_mmu_tlbonly(tlb);
 }
 
 void __unmap_hugepage_range_final(struct mmu_gather *tlb,
@@ -5734,13 +5751,14 @@ int hugetlb_mcopy_atomic_pte(struct mm_struct *dst_mm,
        int ret = -ENOMEM;
        struct page *page;
        int writable;
-       bool new_pagecache_page = false;
+       bool page_in_pagecache = false;
 
        if (is_continue) {
                ret = -EFAULT;
                page = find_lock_page(mapping, idx);
                if (!page)
                        goto out;
+               page_in_pagecache = true;
        } else if (!*pagep) {
                /* If a page already exists, then it's UFFDIO_COPY for
                 * a non-missing case. Return -EEXIST.
@@ -5828,7 +5846,7 @@ int hugetlb_mcopy_atomic_pte(struct mm_struct *dst_mm,
                ret = huge_add_to_page_cache(page, mapping, idx);
                if (ret)
                        goto out_release_nounlock;
-               new_pagecache_page = true;
+               page_in_pagecache = true;
        }
 
        ptl = huge_pte_lockptr(h, dst_mm, dst_pte);
@@ -5892,7 +5910,7 @@ out_release_unlock:
        if (vm_shared || is_continue)
                unlock_page(page);
 out_release_nounlock:
-       if (!new_pagecache_page)
+       if (!page_in_pagecache)
                restore_reserve_on_error(h, dst_vma, dst_addr, page);
        put_page(page);
        goto out;
index 781605e920153ce697f86f07dcaea6d84ad19d9e..2ed5f2a0879d3b5a95274ade98d601534ecb1975 100644 (file)
@@ -776,24 +776,6 @@ void __mod_lruvec_kmem_state(void *p, enum node_stat_item idx, int val)
        rcu_read_unlock();
 }
 
-/*
- * mod_objcg_mlstate() may be called with irq enabled, so
- * mod_memcg_lruvec_state() should be used.
- */
-static inline void mod_objcg_mlstate(struct obj_cgroup *objcg,
-                                    struct pglist_data *pgdat,
-                                    enum node_stat_item idx, int nr)
-{
-       struct mem_cgroup *memcg;
-       struct lruvec *lruvec;
-
-       rcu_read_lock();
-       memcg = obj_cgroup_memcg(objcg);
-       lruvec = mem_cgroup_lruvec(memcg, pgdat);
-       mod_memcg_lruvec_state(lruvec, idx, nr);
-       rcu_read_unlock();
-}
-
 /**
  * __count_memcg_events - account VM events in a cgroup
  * @memcg: the memory cgroup
@@ -2137,41 +2119,6 @@ static bool obj_stock_flush_required(struct memcg_stock_pcp *stock,
 }
 #endif
 
-/*
- * Most kmem_cache_alloc() calls are from user context. The irq disable/enable
- * sequence used in this case to access content from object stock is slow.
- * To optimize for user context access, there are now two object stocks for
- * task context and interrupt context access respectively.
- *
- * The task context object stock can be accessed by disabling preemption only
- * which is cheap in non-preempt kernel. The interrupt context object stock
- * can only be accessed after disabling interrupt. User context code can
- * access interrupt object stock, but not vice versa.
- */
-static inline struct obj_stock *get_obj_stock(unsigned long *pflags)
-{
-       struct memcg_stock_pcp *stock;
-
-       if (likely(in_task())) {
-               *pflags = 0UL;
-               preempt_disable();
-               stock = this_cpu_ptr(&memcg_stock);
-               return &stock->task_obj;
-       }
-
-       local_irq_save(*pflags);
-       stock = this_cpu_ptr(&memcg_stock);
-       return &stock->irq_obj;
-}
-
-static inline void put_obj_stock(unsigned long flags)
-{
-       if (likely(in_task()))
-               preempt_enable();
-       else
-               local_irq_restore(flags);
-}
-
 /**
  * consume_stock: Try to consume stocked charge on this cpu.
  * @memcg: memcg to consume from.
@@ -2816,6 +2763,59 @@ retry:
  */
 #define OBJCGS_CLEAR_MASK      (__GFP_DMA | __GFP_RECLAIMABLE | __GFP_ACCOUNT)
 
+/*
+ * Most kmem_cache_alloc() calls are from user context. The irq disable/enable
+ * sequence used in this case to access content from object stock is slow.
+ * To optimize for user context access, there are now two object stocks for
+ * task context and interrupt context access respectively.
+ *
+ * The task context object stock can be accessed by disabling preemption only
+ * which is cheap in non-preempt kernel. The interrupt context object stock
+ * can only be accessed after disabling interrupt. User context code can
+ * access interrupt object stock, but not vice versa.
+ */
+static inline struct obj_stock *get_obj_stock(unsigned long *pflags)
+{
+       struct memcg_stock_pcp *stock;
+
+       if (likely(in_task())) {
+               *pflags = 0UL;
+               preempt_disable();
+               stock = this_cpu_ptr(&memcg_stock);
+               return &stock->task_obj;
+       }
+
+       local_irq_save(*pflags);
+       stock = this_cpu_ptr(&memcg_stock);
+       return &stock->irq_obj;
+}
+
+static inline void put_obj_stock(unsigned long flags)
+{
+       if (likely(in_task()))
+               preempt_enable();
+       else
+               local_irq_restore(flags);
+}
+
+/*
+ * mod_objcg_mlstate() may be called with irq enabled, so
+ * mod_memcg_lruvec_state() should be used.
+ */
+static inline void mod_objcg_mlstate(struct obj_cgroup *objcg,
+                                    struct pglist_data *pgdat,
+                                    enum node_stat_item idx, int nr)
+{
+       struct mem_cgroup *memcg;
+       struct lruvec *lruvec;
+
+       rcu_read_lock();
+       memcg = obj_cgroup_memcg(objcg);
+       lruvec = mem_cgroup_lruvec(memcg, pgdat);
+       mod_memcg_lruvec_state(lruvec, idx, nr);
+       rcu_read_unlock();
+}
+
 int memcg_alloc_page_obj_cgroups(struct page *page, struct kmem_cache *s,
                                 gfp_t gfp, bool new_page)
 {
@@ -5558,7 +5558,7 @@ static int mem_cgroup_move_account(struct page *page,
 
        VM_BUG_ON(from == to);
        VM_BUG_ON_FOLIO(folio_test_lru(folio), folio);
-       VM_BUG_ON(compound && !folio_test_multi(folio));
+       VM_BUG_ON(compound && !folio_test_large(folio));
 
        /*
         * Prevent mem_cgroup_migrate() from looking at
index 852041f6be418c317db8a40c94cf21d501ba9cb3..2a9627dc784c31072f39527f0a52953f72257aff 100644 (file)
@@ -35,6 +35,7 @@
 #include <linux/memblock.h>
 #include <linux/compaction.h>
 #include <linux/rmap.h>
+#include <linux/module.h>
 
 #include <asm/tlbflush.h>
 
index dc038ce78700ccc3e0c523a3791f36d5b310acbb..18f93c2d68f16e76fb244c12bca5aa0aba6125c6 100644 (file)
@@ -2303,6 +2303,7 @@ static struct inode *shmem_get_inode(struct super_block *sb, const struct inode
                INIT_LIST_HEAD(&info->swaplist);
                simple_xattrs_init(&info->xattrs);
                cache_no_acl(inode);
+               mapping_set_large_folios(inode->i_mapping);
 
                switch (mode & S_IFMT) {
                default:
@@ -3870,7 +3871,7 @@ static struct file_system_type shmem_fs_type = {
        .parameters     = shmem_fs_parameters,
 #endif
        .kill_sb        = kill_litter_super,
-       .fs_flags       = FS_USERNS_MOUNT | FS_THP_SUPPORT,
+       .fs_flags       = FS_USERNS_MOUNT,
 };
 
 int __init shmem_init(void)
index da132a9ae6f8be6fd9744b0f9f464ad14fcb04ad..ca4822f6b2b6bb2de4800b82b7aa5d11d2610f93 100644 (file)
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -3733,14 +3733,13 @@ void kmem_cache_free(struct kmem_cache *cachep, void *objp)
        if (!cachep)
                return;
 
+       trace_kmem_cache_free(_RET_IP_, objp, cachep->name);
        local_irq_save(flags);
        debug_check_no_locks_freed(objp, cachep->object_size);
        if (!(cachep->flags & SLAB_DEBUG_OBJECTS))
                debug_check_no_obj_freed(objp, cachep->object_size);
        __cache_free(cachep, objp, _RET_IP_);
        local_irq_restore(flags);
-
-       trace_kmem_cache_free(_RET_IP_, objp, cachep->name);
 }
 EXPORT_SYMBOL(kmem_cache_free);
 
index 58c01a34e5b86b3c7dca26875f95d62e8fac05ca..56ad7eea3ddfbcf7c434f9fe26a7600f31dd27a8 100644 (file)
--- a/mm/slab.h
+++ b/mm/slab.h
@@ -147,7 +147,7 @@ static inline slab_flags_t kmem_cache_flags(unsigned int object_size,
 #define SLAB_CACHE_FLAGS (SLAB_NOLEAKTRACE | SLAB_RECLAIM_ACCOUNT | \
                          SLAB_TEMPORARY | SLAB_ACCOUNT)
 #else
-#define SLAB_CACHE_FLAGS (0)
+#define SLAB_CACHE_FLAGS (SLAB_NOLEAKTRACE)
 #endif
 
 /* Common flags available with current configuration */
index 74d3f6e60666e09e2dbdd8f74c4f79f84b31ea2c..03deee1e6a94c34ed711257a4af8d0ac13583667 100644 (file)
--- a/mm/slob.c
+++ b/mm/slob.c
@@ -666,6 +666,7 @@ static void kmem_rcu_free(struct rcu_head *head)
 void kmem_cache_free(struct kmem_cache *c, void *b)
 {
        kmemleak_free_recursive(b, c->flags);
+       trace_kmem_cache_free(_RET_IP_, b, c->name);
        if (unlikely(c->flags & SLAB_TYPESAFE_BY_RCU)) {
                struct slob_rcu *slob_rcu;
                slob_rcu = b + (c->size - sizeof(struct slob_rcu));
@@ -674,8 +675,6 @@ void kmem_cache_free(struct kmem_cache *c, void *b)
        } else {
                __kmem_cache_free(b, c->size);
        }
-
-       trace_kmem_cache_free(_RET_IP_, b, c->name);
 }
 EXPORT_SYMBOL(kmem_cache_free);
 
index f7368bfffb7afc17cadefa0b75f15d38a6d0609c..abe7db581d686607277e3112647e893fa6ccad4c 100644 (file)
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -3526,8 +3526,8 @@ void kmem_cache_free(struct kmem_cache *s, void *x)
        s = cache_from_obj(s, x);
        if (!s)
                return;
-       slab_free(s, virt_to_head_page(x), x, NULL, 1, _RET_IP_);
        trace_kmem_cache_free(_RET_IP_, x, s->name);
+       slab_free(s, virt_to_head_page(x), x, NULL, 1, _RET_IP_);
 }
 EXPORT_SYMBOL(kmem_cache_free);
 
@@ -5081,6 +5081,7 @@ struct loc_track {
        unsigned long max;
        unsigned long count;
        struct location *loc;
+       loff_t idx;
 };
 
 static struct dentry *slab_debugfs_root;
@@ -6052,11 +6053,11 @@ __initcall(slab_sysfs_init);
 #if defined(CONFIG_SLUB_DEBUG) && defined(CONFIG_DEBUG_FS)
 static int slab_debugfs_show(struct seq_file *seq, void *v)
 {
-
-       struct location *l;
-       unsigned int idx = *(unsigned int *)v;
        struct loc_track *t = seq->private;
+       struct location *l;
+       unsigned long idx;
 
+       idx = (unsigned long) t->idx;
        if (idx < t->count) {
                l = &t->loc[idx];
 
@@ -6105,16 +6106,18 @@ static void *slab_debugfs_next(struct seq_file *seq, void *v, loff_t *ppos)
 {
        struct loc_track *t = seq->private;
 
-       v = ppos;
-       ++*ppos;
+       t->idx = ++(*ppos);
        if (*ppos <= t->count)
-               return v;
+               return ppos;
 
        return NULL;
 }
 
 static void *slab_debugfs_start(struct seq_file *seq, loff_t *ppos)
 {
+       struct loc_track *t = seq->private;
+
+       t->idx = *ppos;
        return ppos;
 }
 
index 1841c24682f8f12fbad984830f111fa8cdfb20cb..e8c9dc6d03771b2035079f0ebd74891e7169e31c 100644 (file)
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -156,6 +156,7 @@ void put_pages_list(struct list_head *pages)
        }
 
        free_unref_page_list(pages);
+       INIT_LIST_HEAD(pages);
 }
 EXPORT_SYMBOL(put_pages_list);
 
index 16f706c55d925900bd6932177fba221ea3f8f56f..2b553184058372e09e2d1b34576c65606bc6c65f 100644 (file)
@@ -30,6 +30,7 @@
 #include <linux/swap_slots.h>
 #include <linux/cpu.h>
 #include <linux/cpumask.h>
+#include <linux/slab.h>
 #include <linux/vmalloc.h>
 #include <linux/mutex.h>
 #include <linux/mm.h>
index e58151a612555c1b66d17349b67a6862bcfb6b9c..741ba32a43ac4b4199d2be3776df39eefb6d651b 100644 (file)
--- a/mm/util.c
+++ b/mm/util.c
@@ -670,7 +670,7 @@ bool folio_mapped(struct folio *folio)
 {
        long i, nr;
 
-       if (folio_test_single(folio))
+       if (!folio_test_large(folio))
                return atomic_read(&folio->_mapcount) >= 0;
        if (atomic_read(folio_mapcount_ptr(folio)) >= 0)
                return true;
index a3a0a5e994f5aeae550404701e72253a7e38a991..abaa5d96ded2453c2fb9b44d46e4146a965c9518 100644 (file)
@@ -184,9 +184,6 @@ int register_vlan_dev(struct net_device *dev, struct netlink_ext_ack *extack)
        if (err)
                goto out_unregister_netdev;
 
-       /* Account for reference in struct vlan_dev_priv */
-       dev_hold(real_dev);
-
        vlan_stacked_transfer_operstate(real_dev, dev, vlan);
        linkwatch_fire_event(dev); /* _MUST_ call rfc2863_policy() */
 
index ab6dee28536daaaf825dfc22120ca111c6197f4b..a54535cbcf4cf55b81dcd955bf6e2baf79cd755e 100644 (file)
@@ -615,6 +615,9 @@ static int vlan_dev_init(struct net_device *dev)
        if (!vlan->vlan_pcpu_stats)
                return -ENOMEM;
 
+       /* Get vlan's reference to real_dev */
+       dev_hold(real_dev);
+
        return 0;
 }
 
index 15ac064b5562d7b99f885610a3d12733b63aa325..2a352e668d103948121970350d344f18b98f4aba 100644 (file)
@@ -4210,7 +4210,10 @@ static int __dev_queue_xmit(struct sk_buff *skb, struct net_device *sb_dev)
        if (dev->flags & IFF_UP) {
                int cpu = smp_processor_id(); /* ok because BHs are off */
 
-               if (txq->xmit_lock_owner != cpu) {
+               /* Other cpus might concurrently change txq->xmit_lock_owner
+                * to -1 or to their cpu id, but not to our id.
+                */
+               if (READ_ONCE(txq->xmit_lock_owner) != cpu) {
                        if (dev_xmit_recursion())
                                goto recursion_alert;
 
index 5ba4f9434acda17426f1c2f63fd67d604c363361..c06c9ba6e8c5ea00a3999700a6724a404c1f05f9 100644 (file)
@@ -4110,14 +4110,6 @@ static int devlink_nl_cmd_reload(struct sk_buff *skb, struct genl_info *info)
                return err;
        }
 
-       if (info->attrs[DEVLINK_ATTR_NETNS_PID] ||
-           info->attrs[DEVLINK_ATTR_NETNS_FD] ||
-           info->attrs[DEVLINK_ATTR_NETNS_ID]) {
-               dest_net = devlink_netns_get(skb, info);
-               if (IS_ERR(dest_net))
-                       return PTR_ERR(dest_net);
-       }
-
        if (info->attrs[DEVLINK_ATTR_RELOAD_ACTION])
                action = nla_get_u8(info->attrs[DEVLINK_ATTR_RELOAD_ACTION]);
        else
@@ -4160,6 +4152,14 @@ static int devlink_nl_cmd_reload(struct sk_buff *skb, struct genl_info *info)
                        return -EINVAL;
                }
        }
+       if (info->attrs[DEVLINK_ATTR_NETNS_PID] ||
+           info->attrs[DEVLINK_ATTR_NETNS_FD] ||
+           info->attrs[DEVLINK_ATTR_NETNS_ID]) {
+               dest_net = devlink_netns_get(skb, info);
+               if (IS_ERR(dest_net))
+                       return PTR_ERR(dest_net);
+       }
+
        err = devlink_reload(devlink, dest_net, action, limit, &actions_performed, info->extack);
 
        if (dest_net)
@@ -4229,7 +4229,9 @@ static void __devlink_flash_update_notify(struct devlink *devlink,
        WARN_ON(cmd != DEVLINK_CMD_FLASH_UPDATE &&
                cmd != DEVLINK_CMD_FLASH_UPDATE_END &&
                cmd != DEVLINK_CMD_FLASH_UPDATE_STATUS);
-       WARN_ON(!xa_get_mark(&devlinks, devlink->index, DEVLINK_REGISTERED));
+
+       if (!xa_get_mark(&devlinks, devlink->index, DEVLINK_REGISTERED))
+               return;
 
        msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
        if (!msg)
index be74ab4551c204a0ddf2cfc17c886d0f36f5be98..0ccfd5fa5cb9b5f608ab6715a718cd57ee4eecab 100644 (file)
@@ -162,3 +162,22 @@ void dst_cache_destroy(struct dst_cache *dst_cache)
        free_percpu(dst_cache->cache);
 }
 EXPORT_SYMBOL_GPL(dst_cache_destroy);
+
+void dst_cache_reset_now(struct dst_cache *dst_cache)
+{
+       int i;
+
+       if (!dst_cache->cache)
+               return;
+
+       dst_cache->reset_ts = jiffies;
+       for_each_possible_cpu(i) {
+               struct dst_cache_pcpu *idst = per_cpu_ptr(dst_cache->cache, i);
+               struct dst_entry *dst = idst->dst;
+
+               idst->cookie = 0;
+               idst->dst = NULL;
+               dst_release(dst);
+       }
+}
+EXPORT_SYMBOL_GPL(dst_cache_reset_now);
index 79df7cd9dbc16d5bd91394bce15ba5e3fd8244c9..1bb567a3b329cd06534f3e0fa27a463e06e538cc 100644 (file)
@@ -323,7 +323,7 @@ jumped:
                if (!err && ops->suppress && INDIRECT_CALL_MT(ops->suppress,
                                                              fib6_rule_suppress,
                                                              fib4_rule_suppress,
-                                                             rule, arg))
+                                                             rule, flags, arg))
                        continue;
 
                if (err != -EAGAIN) {
index e471c9b0967057c29fa12fb8a5f0023b3535b51b..6102f093d59a513f0ed3b8cc75ccc776fa91eb16 100644 (file)
@@ -7162,6 +7162,8 @@ sock_filter_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
 #endif
        case BPF_FUNC_sk_storage_get:
                return &bpf_sk_storage_get_cg_sock_proto;
+       case BPF_FUNC_ktime_get_coarse_ns:
+               return &bpf_ktime_get_coarse_ns_proto;
        default:
                return bpf_base_func_proto(func_id);
        }
@@ -10327,6 +10329,8 @@ sk_reuseport_func_proto(enum bpf_func_id func_id,
                return &sk_reuseport_load_bytes_relative_proto;
        case BPF_FUNC_get_socket_cookie:
                return &bpf_get_socket_ptr_cookie_proto;
+       case BPF_FUNC_ktime_get_coarse_ns:
+               return &bpf_ktime_get_coarse_ns_proto;
        default:
                return bpf_base_func_proto(func_id);
        }
@@ -10833,6 +10837,8 @@ bpf_sk_base_func_proto(enum bpf_func_id func_id)
        case BPF_FUNC_skc_to_unix_sock:
                func = &bpf_skc_to_unix_sock_proto;
                break;
+       case BPF_FUNC_ktime_get_coarse_ns:
+               return &bpf_ktime_get_coarse_ns_proto;
        default:
                return bpf_base_func_proto(func_id);
        }
index 47931c8be04b80a249eb070c02773587a1ed1bb7..dda12fbd177ba6ad2798ea2b07733fa3f03441ab 100644 (file)
@@ -763,11 +763,10 @@ struct pneigh_entry * pneigh_lookup(struct neigh_table *tbl,
 
        ASSERT_RTNL();
 
-       n = kmalloc(sizeof(*n) + key_len, GFP_KERNEL);
+       n = kzalloc(sizeof(*n) + key_len, GFP_KERNEL);
        if (!n)
                goto out;
 
-       n->protocol = 0;
        write_pnet(&n->net, net);
        memcpy(n->key, pkey, key_len);
        n->dev = dev;
@@ -1779,6 +1778,7 @@ int neigh_table_clear(int index, struct neigh_table *tbl)
 {
        neigh_tables[index] = NULL;
        /* It is not clean... Fix it to unload IPv6 module safely */
+       cancel_delayed_work_sync(&tbl->managed_work);
        cancel_delayed_work_sync(&tbl->gc_work);
        del_timer_sync(&tbl->proxy_timer);
        pneigh_queue_purge(&tbl->proxy_queue);
index 9b60e4301a44fbf3b16c39396b14674bd827e900..1a6978427d6c86695d214fb75cca94d5240bb5aa 100644 (file)
@@ -49,12 +49,6 @@ static int page_pool_init(struct page_pool *pool,
         * which is the XDP_TX use-case.
         */
        if (pool->p.flags & PP_FLAG_DMA_MAP) {
-               /* DMA-mapping is not supported on 32-bit systems with
-                * 64-bit DMA mapping.
-                */
-               if (sizeof(dma_addr_t) > sizeof(unsigned long))
-                       return -EOPNOTSUPP;
-
                if ((pool->p.dma_dir != DMA_FROM_DEVICE) &&
                    (pool->p.dma_dir != DMA_BIDIRECTIONAL))
                        return -EINVAL;
@@ -75,6 +69,10 @@ static int page_pool_init(struct page_pool *pool,
                 */
        }
 
+       if (PAGE_POOL_DMA_USE_PP_FRAG_COUNT &&
+           pool->p.flags & PP_FLAG_PAGE_FRAG)
+               return -EINVAL;
+
        if (ptr_ring_init(&pool->ring, ring_qsize, GFP_KERNEL) < 0)
                return -ENOMEM;
 
index 1ae52ac943f626c2845ad892d83550b5c0afbd65..8eb671c827f90f1f3d2514163fc82998c9906cb6 100644 (file)
@@ -1124,6 +1124,8 @@ void sk_psock_start_strp(struct sock *sk, struct sk_psock *psock)
 
 void sk_psock_stop_strp(struct sock *sk, struct sk_psock *psock)
 {
+       psock_set_prog(&psock->progs.stream_parser, NULL);
+
        if (!psock->saved_data_ready)
                return;
 
@@ -1212,6 +1214,9 @@ void sk_psock_start_verdict(struct sock *sk, struct sk_psock *psock)
 
 void sk_psock_stop_verdict(struct sock *sk, struct sk_psock *psock)
 {
+       psock_set_prog(&psock->progs.stream_verdict, NULL);
+       psock_set_prog(&psock->progs.skb_verdict, NULL);
+
        if (!psock->saved_data_ready)
                return;
 
index 8f2b2f2c0e7b1decdb4a5c8d86327ed7caa62c99..41e91d0f7061452b3239325a7af8030336c8d2a7 100644 (file)
@@ -2124,8 +2124,10 @@ struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority)
        newsk->sk_prot_creator = prot;
 
        /* SANITY */
-       if (likely(newsk->sk_net_refcnt))
+       if (likely(newsk->sk_net_refcnt)) {
                get_net(sock_net(newsk));
+               sock_inuse_add(sock_net(newsk), 1);
+       }
        sk_node_init(&newsk->sk_node);
        sock_lock_init(newsk);
        bh_lock_sock(newsk);
@@ -2197,8 +2199,6 @@ struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority)
        newsk->sk_err_soft = 0;
        newsk->sk_priority = 0;
        newsk->sk_incoming_cpu = raw_smp_processor_id();
-       if (likely(newsk->sk_net_refcnt))
-               sock_inuse_add(sock_net(newsk), 1);
 
        /* Before updating sk_refcnt, we must commit prior changes to memory
         * (Documentation/RCU/rculist_nulls.rst for details)
index f39ef79ced679dd19b2b3746363a7b9e6ef7b047..4ca4b11f4e5ffde88c1a8d7edb1ad21cf24e726d 100644 (file)
@@ -167,8 +167,11 @@ static void sock_map_del_link(struct sock *sk,
                write_lock_bh(&sk->sk_callback_lock);
                if (strp_stop)
                        sk_psock_stop_strp(sk, psock);
-               else
+               if (verdict_stop)
                        sk_psock_stop_verdict(sk, psock);
+
+               if (psock->psock_update_sk_prot)
+                       psock->psock_update_sk_prot(sk, psock, false);
                write_unlock_bh(&sk->sk_callback_lock);
        }
 }
@@ -282,6 +285,12 @@ static int sock_map_link(struct bpf_map *map, struct sock *sk)
 
        if (msg_parser)
                psock_set_prog(&psock->progs.msg_parser, msg_parser);
+       if (stream_parser)
+               psock_set_prog(&psock->progs.stream_parser, stream_parser);
+       if (stream_verdict)
+               psock_set_prog(&psock->progs.stream_verdict, stream_verdict);
+       if (skb_verdict)
+               psock_set_prog(&psock->progs.skb_verdict, skb_verdict);
 
        ret = sock_map_init_proto(sk, psock);
        if (ret < 0)
@@ -292,14 +301,10 @@ static int sock_map_link(struct bpf_map *map, struct sock *sk)
                ret = sk_psock_init_strp(sk, psock);
                if (ret)
                        goto out_unlock_drop;
-               psock_set_prog(&psock->progs.stream_verdict, stream_verdict);
-               psock_set_prog(&psock->progs.stream_parser, stream_parser);
                sk_psock_start_strp(sk, psock);
        } else if (!stream_parser && stream_verdict && !psock->saved_data_ready) {
-               psock_set_prog(&psock->progs.stream_verdict, stream_verdict);
                sk_psock_start_verdict(sk,psock);
        } else if (!stream_verdict && skb_verdict && !psock->saved_data_ready) {
-               psock_set_prog(&psock->progs.skb_verdict, skb_verdict);
                sk_psock_start_verdict(sk, psock);
        }
        write_unlock_bh(&sk->sk_callback_lock);
index 65e9bc1058b57c5238e3fccf9316bc8d7b6268d3..20bcf86970ffd388708b2ee6c43953b8a55d2291 100644 (file)
@@ -1719,7 +1719,7 @@ static noinline_for_stack int ethtool_set_coalesce(struct net_device *dev,
        struct ethtool_coalesce coalesce;
        int ret;
 
-       if (!dev->ethtool_ops->set_coalesce && !dev->ethtool_ops->get_coalesce)
+       if (!dev->ethtool_ops->set_coalesce || !dev->ethtool_ops->get_coalesce)
                return -EOPNOTSUPP;
 
        ret = dev->ethtool_ops->get_coalesce(dev, &coalesce, &kernel_coalesce,
index 38b44c0291b11af2c6d59c532c0c4ad4bba2ce27..96f4180aabd2e58f14d2a6e45780ee853b36fe15 100644 (file)
@@ -40,7 +40,8 @@ int ethnl_ops_begin(struct net_device *dev)
        if (dev->dev.parent)
                pm_runtime_get_sync(dev->dev.parent);
 
-       if (!netif_device_present(dev)) {
+       if (!netif_device_present(dev) ||
+           dev->reg_state == NETREG_UNREGISTERING) {
                ret = -ENODEV;
                goto err;
        }
index 2cf02b4d77fbf04dd769ede2e058851ddfbf49c9..4bb9401b0a3fe57460a7ee2c962906233519407c 100644 (file)
@@ -205,6 +205,8 @@ bpf_tcp_ca_get_func_proto(enum bpf_func_id func_id,
                    offsetof(struct tcp_congestion_ops, release))
                        return &bpf_sk_getsockopt_proto;
                return NULL;
+       case BPF_FUNC_ktime_get_coarse_ns:
+               return &bpf_ktime_get_coarse_ns_proto;
        default:
                return bpf_base_func_proto(func_id);
        }
index ec73a0d52d3e3524f36e0c140699cf394696637a..323e622ff9b745350a0ce63a238774281ab326e4 100644 (file)
@@ -2591,7 +2591,7 @@ static int __devinet_sysctl_register(struct net *net, char *dev_name,
 free:
        kfree(t);
 out:
-       return -ENOBUFS;
+       return -ENOMEM;
 }
 
 static void __devinet_sysctl_unregister(struct net *net,
index 9fe13e4f5d08a5cf9cd9ff15033b9f6e0dc9e492..4d61ddd8a0ecfc4cc47b4802eb5a573beb84ee44 100644 (file)
@@ -1582,7 +1582,7 @@ static int __net_init fib_net_init(struct net *net)
        int error;
 
 #ifdef CONFIG_IP_ROUTE_CLASSID
-       net->ipv4.fib_num_tclassid_users = 0;
+       atomic_set(&net->ipv4.fib_num_tclassid_users, 0);
 #endif
        error = ip_fib_net_init(net);
        if (error < 0)
index ce54a30c2ef1e8e79c8922be5eee35055fa51178..d279cb8ac1584487885f66819634b421c01bf819 100644 (file)
@@ -141,6 +141,7 @@ INDIRECT_CALLABLE_SCOPE int fib4_rule_action(struct fib_rule *rule,
 }
 
 INDIRECT_CALLABLE_SCOPE bool fib4_rule_suppress(struct fib_rule *rule,
+                                               int flags,
                                                struct fib_lookup_arg *arg)
 {
        struct fib_result *result = (struct fib_result *) arg->result;
@@ -263,7 +264,7 @@ static int fib4_rule_configure(struct fib_rule *rule, struct sk_buff *skb,
        if (tb[FRA_FLOW]) {
                rule4->tclassid = nla_get_u32(tb[FRA_FLOW]);
                if (rule4->tclassid)
-                       net->ipv4.fib_num_tclassid_users++;
+                       atomic_inc(&net->ipv4.fib_num_tclassid_users);
        }
 #endif
 
@@ -295,7 +296,7 @@ static int fib4_rule_delete(struct fib_rule *rule)
 
 #ifdef CONFIG_IP_ROUTE_CLASSID
        if (((struct fib4_rule *)rule)->tclassid)
-               net->ipv4.fib_num_tclassid_users--;
+               atomic_dec(&net->ipv4.fib_num_tclassid_users);
 #endif
        net->ipv4.fib_has_custom_rules = true;
 
index 3364cb9c67e018fea2b2e370046de5252581b996..fde7797b580694bb3924c5c6e9560cf04fd67387 100644 (file)
@@ -220,7 +220,7 @@ void fib_nh_release(struct net *net, struct fib_nh *fib_nh)
 {
 #ifdef CONFIG_IP_ROUTE_CLASSID
        if (fib_nh->nh_tclassid)
-               net->ipv4.fib_num_tclassid_users--;
+               atomic_dec(&net->ipv4.fib_num_tclassid_users);
 #endif
        fib_nh_common_release(&fib_nh->nh_common);
 }
@@ -632,7 +632,7 @@ int fib_nh_init(struct net *net, struct fib_nh *nh,
 #ifdef CONFIG_IP_ROUTE_CLASSID
        nh->nh_tclassid = cfg->fc_flow;
        if (nh->nh_tclassid)
-               net->ipv4.fib_num_tclassid_users++;
+               atomic_inc(&net->ipv4.fib_num_tclassid_users);
 #endif
 #ifdef CONFIG_IP_ROUTE_MULTIPATH
        nh->fib_nh_weight = nh_weight;
index f7fea3a7c5e64b92ca9c6b56293628923649e58c..62a67fdc344cd21505a84c905c1e2c05cc0ff866 100644 (file)
@@ -721,7 +721,7 @@ static struct request_sock *inet_reqsk_clone(struct request_sock *req,
 
        sk_node_init(&nreq_sk->sk_node);
        nreq_sk->sk_tx_queue_mapping = req_sk->sk_tx_queue_mapping;
-#ifdef CONFIG_XPS
+#ifdef CONFIG_SOCK_RX_QUEUE_MAPPING
        nreq_sk->sk_rx_queue_mapping = req_sk->sk_rx_queue_mapping;
 #endif
        nreq_sk->sk_incoming_cpu = req_sk->sk_incoming_cpu;
index 9e8100728d464dd4b51e407d0b973ec878ada8d5..5dbd4b5505ebaaa366d8f825fb575c75a0932c84 100644 (file)
@@ -1899,15 +1899,36 @@ static void remove_nexthop(struct net *net, struct nexthop *nh,
 /* if any FIB entries reference this nexthop, any dst entries
  * need to be regenerated
  */
-static void nh_rt_cache_flush(struct net *net, struct nexthop *nh)
+static void nh_rt_cache_flush(struct net *net, struct nexthop *nh,
+                             struct nexthop *replaced_nh)
 {
        struct fib6_info *f6i;
+       struct nh_group *nhg;
+       int i;
 
        if (!list_empty(&nh->fi_list))
                rt_cache_flush(net);
 
        list_for_each_entry(f6i, &nh->f6i_list, nh_list)
                ipv6_stub->fib6_update_sernum(net, f6i);
+
+       /* if an IPv6 group was replaced, we have to release all old
+        * dsts to make sure all refcounts are released
+        */
+       if (!replaced_nh->is_group)
+               return;
+
+       /* new dsts must use only the new nexthop group */
+       synchronize_net();
+
+       nhg = rtnl_dereference(replaced_nh->nh_grp);
+       for (i = 0; i < nhg->num_nh; i++) {
+               struct nh_grp_entry *nhge = &nhg->nh_entries[i];
+               struct nh_info *nhi = rtnl_dereference(nhge->nh->nh_info);
+
+               if (nhi->family == AF_INET6)
+                       ipv6_stub->fib6_nh_release_dsts(&nhi->fib6_nh);
+       }
 }
 
 static int replace_nexthop_grp(struct net *net, struct nexthop *old,
@@ -2247,7 +2268,7 @@ static int replace_nexthop(struct net *net, struct nexthop *old,
                err = replace_nexthop_single(net, old, new, extack);
 
        if (!err) {
-               nh_rt_cache_flush(net, old);
+               nh_rt_cache_flush(net, old, new);
 
                __remove_nexthop(net, new, NULL);
                nexthop_put(new);
@@ -2544,11 +2565,15 @@ static int nh_create_ipv6(struct net *net,  struct nexthop *nh,
        /* sets nh_dev if successful */
        err = ipv6_stub->fib6_nh_init(net, fib6_nh, &fib6_cfg, GFP_KERNEL,
                                      extack);
-       if (err)
+       if (err) {
+               /* IPv6 is not enabled, don't call fib6_nh_release */
+               if (err == -EAFNOSUPPORT)
+                       goto out;
                ipv6_stub->fib6_nh_release(fib6_nh);
-       else
+       } else {
                nh->nh_flags = fib6_nh->fib_nh_flags;
-
+       }
+out:
        return err;
 }
 
index b7796b4cf0a099e9f14b28e50cb07367021a7cbf..bbb3d39c69afc2d5a42c6ace8d473657861da61f 100644 (file)
@@ -1758,6 +1758,9 @@ static skb_frag_t *skb_advance_to_frag(struct sk_buff *skb, u32 offset_skb,
 {
        skb_frag_t *frag;
 
+       if (unlikely(offset_skb >= skb->len))
+               return NULL;
+
        offset_skb -= skb_headlen(skb);
        if ((int)offset_skb < 0 || skb_has_frag_list(skb))
                return NULL;
index 5e9d9c51164c4d23a90ebd2be0d7bf85098b47dc..e07837e23b3fd2435c87320945528abdee9817cc 100644 (file)
@@ -330,8 +330,6 @@ static void cubictcp_cong_avoid(struct sock *sk, u32 ack, u32 acked)
                return;
 
        if (tcp_in_slow_start(tp)) {
-               if (hystart && after(ack, ca->end_seq))
-                       bictcp_hystart_reset(sk);
                acked = tcp_slow_start(tp, acked);
                if (!acked)
                        return;
@@ -391,6 +389,9 @@ static void hystart_update(struct sock *sk, u32 delay)
        struct bictcp *ca = inet_csk_ca(sk);
        u32 threshold;
 
+       if (after(tp->snd_una, ca->end_seq))
+               bictcp_hystart_reset(sk);
+
        if (hystart_detect & HYSTART_ACK_TRAIN) {
                u32 now = bictcp_clock_us(sk);
 
index cf913a66df17023bbab8b42e313ce646858c268a..7c2d3ac2363acebcfd92d7a4886c052c8aa120b9 100644 (file)
@@ -829,8 +829,8 @@ int tcp_child_process(struct sock *parent, struct sock *child,
        int ret = 0;
        int state = child->sk_state;
 
-       /* record NAPI ID of child */
-       sk_mark_napi_id(child, skb);
+       /* record sk_napi_id and sk_rx_queue_mapping of child. */
+       sk_mark_napi_id_set(child, skb);
 
        tcp_segs_in(tcp_sk(child), skb);
        if (!sock_owned_by_user(child)) {
index 319dd7bbfe33d64d25c36bc3a1cd3bd4caf0a779..23b05e28490b0a0a690a837027f26167e353f8ce 100644 (file)
@@ -916,7 +916,7 @@ static int udp_send_skb(struct sk_buff *skb, struct flowi4 *fl4,
                        kfree_skb(skb);
                        return -EINVAL;
                }
-               if (skb->len > cork->gso_size * UDP_MAX_SEGMENTS) {
+               if (datalen > cork->gso_size * UDP_MAX_SEGMENTS) {
                        kfree_skb(skb);
                        return -EINVAL;
                }
@@ -1807,6 +1807,17 @@ int udp_read_sock(struct sock *sk, read_descriptor_t *desc,
                skb = skb_recv_udp(sk, 0, 1, &err);
                if (!skb)
                        return err;
+
+               if (udp_lib_checksum_complete(skb)) {
+                       __UDP_INC_STATS(sock_net(sk), UDP_MIB_CSUMERRORS,
+                                       IS_UDPLITE(sk));
+                       __UDP_INC_STATS(sock_net(sk), UDP_MIB_INERRORS,
+                                       IS_UDPLITE(sk));
+                       atomic_inc(&sk->sk_drops);
+                       kfree_skb(skb);
+                       continue;
+               }
+
                used = recv_actor(desc, skb, 0, skb->len);
                if (used <= 0) {
                        if (!copied)
index 0c4da163535ad956be0a194478ca3ab988df872a..dab4a047590b73e5839a06e07c2ed1eb7447a26f 100644 (file)
@@ -1026,6 +1026,7 @@ static const struct ipv6_stub ipv6_stub_impl = {
        .ip6_mtu_from_fib6 = ip6_mtu_from_fib6,
        .fib6_nh_init      = fib6_nh_init,
        .fib6_nh_release   = fib6_nh_release,
+       .fib6_nh_release_dsts = fib6_nh_release_dsts,
        .fib6_update_sernum = fib6_update_sernum_stub,
        .fib6_rt_update    = fib6_rt_update,
        .ip6_del_rt        = ip6_del_rt,
index ed2f061b87685d6109021bd64d2029798c0ceb0f..f0bac6f7ab6bb768b12e51c937c0948e35ee5c07 100644 (file)
@@ -808,6 +808,12 @@ int esp6_input_done2(struct sk_buff *skb, int err)
                struct tcphdr *th;
 
                offset = ipv6_skip_exthdr(skb, offset, &nexthdr, &frag_off);
+
+               if (offset < 0) {
+                       err = -EINVAL;
+                       goto out;
+               }
+
                uh = (void *)(skb->data + offset);
                th = (void *)(skb->data + offset);
                hdr_len += offset;
index 40f3e4f9f33a238ae9d748d278aea37769a83f57..dcedfe29d9d932a3a85491021557005228860ffe 100644 (file)
@@ -267,6 +267,7 @@ INDIRECT_CALLABLE_SCOPE int fib6_rule_action(struct fib_rule *rule,
 }
 
 INDIRECT_CALLABLE_SCOPE bool fib6_rule_suppress(struct fib_rule *rule,
+                                               int flags,
                                                struct fib_lookup_arg *arg)
 {
        struct fib6_result *res = arg->result;
@@ -294,8 +295,7 @@ INDIRECT_CALLABLE_SCOPE bool fib6_rule_suppress(struct fib_rule *rule,
        return false;
 
 suppress_route:
-       if (!(arg->flags & FIB_LOOKUP_NOREF))
-               ip6_rt_put(rt);
+       ip6_rt_put_flags(rt, flags);
        return true;
 }
 
index 1b9827ff8ccf48e61e233e39d671aa67c8fff0ab..1cbd49d5788dd4cfb1b3224bed49df90f75b5d20 100644 (file)
@@ -248,9 +248,9 @@ INDIRECT_CALLABLE_SCOPE struct sk_buff *ipv6_gro_receive(struct list_head *head,
                 * memcmp() alone below is sufficient, right?
                 */
                 if ((first_word & htonl(0xF00FFFFF)) ||
-                   !ipv6_addr_equal(&iph->saddr, &iph2->saddr) ||
-                   !ipv6_addr_equal(&iph->daddr, &iph2->daddr) ||
-                   *(u16 *)&iph->nexthdr != *(u16 *)&iph2->nexthdr) {
+                    !ipv6_addr_equal(&iph->saddr, &iph2->saddr) ||
+                    !ipv6_addr_equal(&iph->daddr, &iph2->daddr) ||
+                    *(u16 *)&iph->nexthdr != *(u16 *)&iph2->nexthdr) {
 not_same_flow:
                        NAPI_GRO_CB(p)->same_flow = 0;
                        continue;
index 2f044a49afa8cf3586c36607c34073edecafc69c..ff4e83e2a5068322bb93391c7c5e2a8cb932730b 100644 (file)
@@ -174,7 +174,7 @@ static int __ip6_finish_output(struct net *net, struct sock *sk, struct sk_buff
 #if defined(CONFIG_NETFILTER) && defined(CONFIG_XFRM)
        /* Policy lookup after SNAT yielded a new policy */
        if (skb_dst(skb)->xfrm) {
-               IPCB(skb)->flags |= IPSKB_REROUTED;
+               IP6CB(skb)->flags |= IP6SKB_REROUTED;
                return dst_output(net, sk, skb);
        }
 #endif
index 3ae25b8ffbd6fbeda4b46438dc14b11235558f10..42d60c76d30a0386a356acb6bafeb4d80f4c43b6 100644 (file)
@@ -3680,6 +3680,25 @@ void fib6_nh_release(struct fib6_nh *fib6_nh)
        fib_nh_common_release(&fib6_nh->nh_common);
 }
 
+void fib6_nh_release_dsts(struct fib6_nh *fib6_nh)
+{
+       int cpu;
+
+       if (!fib6_nh->rt6i_pcpu)
+               return;
+
+       for_each_possible_cpu(cpu) {
+               struct rt6_info *pcpu_rt, **ppcpu_rt;
+
+               ppcpu_rt = per_cpu_ptr(fib6_nh->rt6i_pcpu, cpu);
+               pcpu_rt = xchg(ppcpu_rt, NULL);
+               if (pcpu_rt) {
+                       dst_dev_put(&pcpu_rt->dst);
+                       dst_release(&pcpu_rt->dst);
+               }
+       }
+}
+
 static struct fib6_info *ip6_route_info_create(struct fib6_config *cfg,
                                              gfp_t gfp_flags,
                                              struct netlink_ext_ack *extack)
index 3adc5d9211ad695bb1cdf66405be3dfe1a539c76..d64855010948db23eb5ebe5ce0bc4dfff2634afb 100644 (file)
@@ -161,6 +161,14 @@ int seg6_do_srh_encap(struct sk_buff *skb, struct ipv6_sr_hdr *osrh, int proto)
                hdr->hop_limit = ip6_dst_hoplimit(skb_dst(skb));
 
                memset(IP6CB(skb), 0, sizeof(*IP6CB(skb)));
+
+               /* the control block has been erased, so we have to set the
+                * iif once again.
+                * We read the receiving interface index directly from the
+                * skb->skb_iif as it is done in the IPv4 receiving path (i.e.:
+                * ip_rcv_core(...)).
+                */
+               IP6CB(skb)->iif = skb->skb_iif;
        }
 
        hdr->nexthdr = NEXTHDR_ROUTING;
index e2b791c37591f396c93e5f00564df76ff7173bd2..bd3d3195097faf2726614a6c32adc567b57455d3 100644 (file)
@@ -80,7 +80,8 @@ static int ieee80211_set_mon_options(struct ieee80211_sub_if_data *sdata,
        }
 
        /* also validate MU-MIMO change */
-       monitor_sdata = rtnl_dereference(local->monitor_sdata);
+       monitor_sdata = wiphy_dereference(local->hw.wiphy,
+                                         local->monitor_sdata);
 
        if (!monitor_sdata &&
            (params->vht_mumimo_groups || params->vht_mumimo_follow_addr))
@@ -840,7 +841,8 @@ static int ieee80211_set_monitor_channel(struct wiphy *wiphy,
 
        mutex_lock(&local->mtx);
        if (local->use_chanctx) {
-               sdata = rtnl_dereference(local->monitor_sdata);
+               sdata = wiphy_dereference(local->hw.wiphy,
+                                         local->monitor_sdata);
                if (sdata) {
                        ieee80211_vif_release_channel(sdata);
                        ret = ieee80211_vif_use_channel(sdata, chandef,
@@ -2707,7 +2709,8 @@ static int ieee80211_set_tx_power(struct wiphy *wiphy,
                sdata = IEEE80211_WDEV_TO_SUB_IF(wdev);
 
                if (sdata->vif.type == NL80211_IFTYPE_MONITOR) {
-                       sdata = rtnl_dereference(local->monitor_sdata);
+                       sdata = wiphy_dereference(local->hw.wiphy,
+                                                 local->monitor_sdata);
                        if (!sdata)
                                return -EOPNOTSUPP;
                }
@@ -2767,7 +2770,8 @@ static int ieee80211_set_tx_power(struct wiphy *wiphy,
        mutex_unlock(&local->iflist_mtx);
 
        if (has_monitor) {
-               sdata = rtnl_dereference(local->monitor_sdata);
+               sdata = wiphy_dereference(local->hw.wiphy,
+                                         local->monitor_sdata);
                if (sdata) {
                        sdata->user_power_level = local->user_power_level;
                        if (txp_type != sdata->vif.bss_conf.txpower_type)
index 9a2145c8192b6a1ec98d4cb77ca62df746e3579a..20aa5cc31f77e2afbaf77c8ff2d22e3200aa54ba 100644 (file)
@@ -588,7 +588,7 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata, bool going_do
         */
        if (local->suspended) {
                WARN_ON(local->wowlan);
-               WARN_ON(rtnl_dereference(local->monitor_sdata));
+               WARN_ON(rcu_access_pointer(local->monitor_sdata));
                return;
        }
 
@@ -961,6 +961,7 @@ int ieee80211_add_virtual_monitor(struct ieee80211_local *local)
                return 0;
 
        ASSERT_RTNL();
+       lockdep_assert_wiphy(local->hw.wiphy);
 
        if (local->monitor_sdata)
                return 0;
@@ -1028,6 +1029,7 @@ void ieee80211_del_virtual_monitor(struct ieee80211_local *local)
                return;
 
        ASSERT_RTNL();
+       lockdep_assert_wiphy(local->hw.wiphy);
 
        mutex_lock(&local->iflist_mtx);
 
index fb3aaa3c56069d1ea303eeb5bd2ac4ed60db2c12..b71a1428d883c27e72267977db28d84546b3c998 100644 (file)
@@ -72,19 +72,19 @@ static inline void ieee80211_mod_tpt_led_trig(struct ieee80211_local *local,
 #endif
 
 static inline void
-ieee80211_tpt_led_trig_tx(struct ieee80211_local *local, __le16 fc, int bytes)
+ieee80211_tpt_led_trig_tx(struct ieee80211_local *local, int bytes)
 {
 #ifdef CONFIG_MAC80211_LEDS
-       if (ieee80211_is_data(fc) && atomic_read(&local->tpt_led_active))
+       if (atomic_read(&local->tpt_led_active))
                local->tpt_led_trigger->tx_bytes += bytes;
 #endif
 }
 
 static inline void
-ieee80211_tpt_led_trig_rx(struct ieee80211_local *local, __le16 fc, int bytes)
+ieee80211_tpt_led_trig_rx(struct ieee80211_local *local, int bytes)
 {
 #ifdef CONFIG_MAC80211_LEDS
-       if (ieee80211_is_data(fc) && atomic_read(&local->tpt_led_active))
+       if (atomic_read(&local->tpt_led_active))
                local->tpt_led_trigger->rx_bytes += bytes;
 #endif
 }
index fc5c608d02e21658130fb1ae24d26fdcf21190b5..9541a4c30aca7c071d254c987839f723728312eb 100644 (file)
@@ -364,7 +364,7 @@ ieee80211_add_rx_radiotap_header(struct ieee80211_local *local,
         * the compiler to think we have walked past the end of the
         * struct member.
         */
-       pos = (void *)&rthdr->it_optional[it_present - rthdr->it_optional];
+       pos = (void *)&rthdr->it_optional[it_present + 1 - rthdr->it_optional];
 
        /* the order of the following fields is important */
 
@@ -1952,7 +1952,8 @@ ieee80211_rx_h_decrypt(struct ieee80211_rx_data *rx)
                int keyid = rx->sta->ptk_idx;
                sta_ptk = rcu_dereference(rx->sta->ptk[keyid]);
 
-               if (ieee80211_has_protected(fc)) {
+               if (ieee80211_has_protected(fc) &&
+                   !(status->flag & RX_FLAG_IV_STRIPPED)) {
                        cs = rx->sta->cipher_scheme;
                        keyid = ieee80211_get_keyid(rx->skb, cs);
 
@@ -4863,6 +4864,7 @@ void ieee80211_rx_list(struct ieee80211_hw *hw, struct ieee80211_sta *pubsta,
        struct ieee80211_rate *rate = NULL;
        struct ieee80211_supported_band *sband;
        struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
+       struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
 
        WARN_ON_ONCE(softirq_count() == 0);
 
@@ -4959,9 +4961,9 @@ void ieee80211_rx_list(struct ieee80211_hw *hw, struct ieee80211_sta *pubsta,
        if (!(status->flag & RX_FLAG_8023))
                skb = ieee80211_rx_monitor(local, skb, rate);
        if (skb) {
-               ieee80211_tpt_led_trig_rx(local,
-                                         ((struct ieee80211_hdr *)skb->data)->frame_control,
-                                         skb->len);
+               if ((status->flag & RX_FLAG_8023) ||
+                       ieee80211_is_data_present(hdr->frame_control))
+                       ieee80211_tpt_led_trig_rx(local, skb->len);
 
                if (status->flag & RX_FLAG_8023)
                        __ieee80211_rx_handle_8023(hw, pubsta, skb, list);
index a756a197c770fe8c0bc1d903a549489f4a3c24ff..278945e3e08acc846782411b02307c09705d839b 100644 (file)
@@ -1721,21 +1721,19 @@ static bool ieee80211_tx_frags(struct ieee80211_local *local,
  * Returns false if the frame couldn't be transmitted but was queued instead.
  */
 static bool __ieee80211_tx(struct ieee80211_local *local,
-                          struct sk_buff_head *skbs, int led_len,
-                          struct sta_info *sta, bool txpending)
+                          struct sk_buff_head *skbs, struct sta_info *sta,
+                          bool txpending)
 {
        struct ieee80211_tx_info *info;
        struct ieee80211_sub_if_data *sdata;
        struct ieee80211_vif *vif;
        struct sk_buff *skb;
        bool result;
-       __le16 fc;
 
        if (WARN_ON(skb_queue_empty(skbs)))
                return true;
 
        skb = skb_peek(skbs);
-       fc = ((struct ieee80211_hdr *)skb->data)->frame_control;
        info = IEEE80211_SKB_CB(skb);
        sdata = vif_to_sdata(info->control.vif);
        if (sta && !sta->uploaded)
@@ -1769,8 +1767,6 @@ static bool __ieee80211_tx(struct ieee80211_local *local,
 
        result = ieee80211_tx_frags(local, vif, sta, skbs, txpending);
 
-       ieee80211_tpt_led_trig_tx(local, fc, led_len);
-
        WARN_ON_ONCE(!skb_queue_empty(skbs));
 
        return result;
@@ -1920,7 +1916,6 @@ static bool ieee80211_tx(struct ieee80211_sub_if_data *sdata,
        ieee80211_tx_result res_prepare;
        struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
        bool result = true;
-       int led_len;
 
        if (unlikely(skb->len < 10)) {
                dev_kfree_skb(skb);
@@ -1928,7 +1923,6 @@ static bool ieee80211_tx(struct ieee80211_sub_if_data *sdata,
        }
 
        /* initialises tx */
-       led_len = skb->len;
        res_prepare = ieee80211_tx_prepare(sdata, &tx, sta, skb);
 
        if (unlikely(res_prepare == TX_DROP)) {
@@ -1951,8 +1945,7 @@ static bool ieee80211_tx(struct ieee80211_sub_if_data *sdata,
                return true;
 
        if (!invoke_tx_handlers_late(&tx))
-               result = __ieee80211_tx(local, &tx.skbs, led_len,
-                                       tx.sta, txpending);
+               result = __ieee80211_tx(local, &tx.skbs, tx.sta, txpending);
 
        return result;
 }
@@ -4175,6 +4168,7 @@ void __ieee80211_subif_start_xmit(struct sk_buff *skb,
        struct ieee80211_local *local = sdata->local;
        struct sta_info *sta;
        struct sk_buff *next;
+       int len = skb->len;
 
        if (unlikely(skb->len < ETH_HLEN)) {
                kfree_skb(skb);
@@ -4221,10 +4215,8 @@ void __ieee80211_subif_start_xmit(struct sk_buff *skb,
                }
        } else {
                /* we cannot process non-linear frames on this path */
-               if (skb_linearize(skb)) {
-                       kfree_skb(skb);
-                       goto out;
-               }
+               if (skb_linearize(skb))
+                       goto out_free;
 
                /* the frame could be fragmented, software-encrypted, and other
                 * things so we cannot really handle checksum offload with it -
@@ -4258,7 +4250,10 @@ void __ieee80211_subif_start_xmit(struct sk_buff *skb,
        goto out;
  out_free:
        kfree_skb(skb);
+       len = 0;
  out:
+       if (len)
+               ieee80211_tpt_led_trig_tx(local, len);
        rcu_read_unlock();
 }
 
@@ -4396,8 +4391,7 @@ netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb,
 }
 
 static bool ieee80211_tx_8023(struct ieee80211_sub_if_data *sdata,
-                             struct sk_buff *skb, int led_len,
-                             struct sta_info *sta,
+                             struct sk_buff *skb, struct sta_info *sta,
                              bool txpending)
 {
        struct ieee80211_local *local = sdata->local;
@@ -4410,6 +4404,8 @@ static bool ieee80211_tx_8023(struct ieee80211_sub_if_data *sdata,
        if (sta)
                sk_pacing_shift_update(skb->sk, local->hw.tx_sk_pacing_shift);
 
+       ieee80211_tpt_led_trig_tx(local, skb->len);
+
        if (ieee80211_queue_skb(local, sdata, sta, skb))
                return true;
 
@@ -4498,7 +4494,7 @@ static void ieee80211_8023_xmit(struct ieee80211_sub_if_data *sdata,
        if (key)
                info->control.hw_key = &key->conf;
 
-       ieee80211_tx_8023(sdata, skb, skb->len, sta, false);
+       ieee80211_tx_8023(sdata, skb, sta, false);
 
        return;
 
@@ -4637,7 +4633,7 @@ static bool ieee80211_tx_pending_skb(struct ieee80211_local *local,
                if (IS_ERR(sta) || (sta && !sta->uploaded))
                        sta = NULL;
 
-               result = ieee80211_tx_8023(sdata, skb, skb->len, sta, true);
+               result = ieee80211_tx_8023(sdata, skb, sta, true);
        } else {
                struct sk_buff_head skbs;
 
@@ -4647,7 +4643,7 @@ static bool ieee80211_tx_pending_skb(struct ieee80211_local *local,
                hdr = (struct ieee80211_hdr *)skb->data;
                sta = sta_info_get(sdata, hdr->addr1);
 
-               result = __ieee80211_tx(local, &skbs, skb->len, sta, true);
+               result = __ieee80211_tx(local, &skbs, sta, true);
        }
 
        return result;
index 39fa2a50385d89131c065845ae5f6155cd5c7205..43df2f0c5db9c52b2484196946d2008b22ff3cf0 100644 (file)
@@ -796,7 +796,7 @@ static void __iterate_interfaces(struct ieee80211_local *local,
 
        sdata = rcu_dereference_check(local->monitor_sdata,
                                      lockdep_is_held(&local->iflist_mtx) ||
-                                     lockdep_rtnl_is_held());
+                                     lockdep_is_held(&local->hw.wiphy->mtx));
        if (sdata &&
            (iter_flags & IEEE80211_IFACE_ITER_RESUME_ALL || !active_only ||
             sdata->flags & IEEE80211_SDATA_IN_DRIVER))
@@ -2381,7 +2381,7 @@ int ieee80211_reconfig(struct ieee80211_local *local)
                                   IEEE80211_TPT_LEDTRIG_FL_RADIO, 0);
 
        /* add interfaces */
-       sdata = rtnl_dereference(local->monitor_sdata);
+       sdata = wiphy_dereference(local->hw.wiphy, local->monitor_sdata);
        if (sdata) {
                /* in HW restart it exists already */
                WARN_ON(local->resuming);
@@ -2426,7 +2426,8 @@ int ieee80211_reconfig(struct ieee80211_local *local)
                                WARN_ON(drv_add_chanctx(local, ctx));
                mutex_unlock(&local->chanctx_mtx);
 
-               sdata = rtnl_dereference(local->monitor_sdata);
+               sdata = wiphy_dereference(local->hw.wiphy,
+                                         local->monitor_sdata);
                if (sdata && ieee80211_sdata_running(sdata))
                        ieee80211_assign_chanctx(local, sdata);
        }
index 9ea6004abe1bea8508022e4d48c199c8f2294eda..62c6733e079232aae264b21129101b6bee1ac59b 100644 (file)
@@ -143,7 +143,6 @@ u16 ieee80211_select_queue_80211(struct ieee80211_sub_if_data *sdata,
 u16 __ieee80211_select_queue(struct ieee80211_sub_if_data *sdata,
                             struct sta_info *sta, struct sk_buff *skb)
 {
-       struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
        struct mac80211_qos_map *qos_map;
        bool qos;
 
@@ -156,7 +155,7 @@ u16 __ieee80211_select_queue(struct ieee80211_sub_if_data *sdata,
        else
                qos = false;
 
-       if (!qos || (info->control.flags & IEEE80211_TX_CTRL_DONT_REORDER)) {
+       if (!qos) {
                skb->priority = 0; /* required for correct WPA/11i MIC */
                return IEEE80211_AC_BE;
        }
index 46c44823edb7dd03a10804f737ae281ebb04b1ba..cdf09c2a7007a764f61c0cbc6ee977afe9bcd8e4 100644 (file)
@@ -952,7 +952,7 @@ static int mctp_route_add(struct mctp_dev *mdev, mctp_eid_t daddr_start,
 }
 
 static int mctp_route_remove(struct mctp_dev *mdev, mctp_eid_t daddr_start,
-                            unsigned int daddr_extent)
+                            unsigned int daddr_extent, unsigned char type)
 {
        struct net *net = dev_net(mdev->dev);
        struct mctp_route *rt, *tmp;
@@ -969,7 +969,8 @@ static int mctp_route_remove(struct mctp_dev *mdev, mctp_eid_t daddr_start,
 
        list_for_each_entry_safe(rt, tmp, &net->mctp.routes, list) {
                if (rt->dev == mdev &&
-                   rt->min == daddr_start && rt->max == daddr_end) {
+                   rt->min == daddr_start && rt->max == daddr_end &&
+                   rt->type == type) {
                        list_del_rcu(&rt->list);
                        /* TODO: immediate RTM_DELROUTE */
                        mctp_route_release(rt);
@@ -987,7 +988,7 @@ int mctp_route_add_local(struct mctp_dev *mdev, mctp_eid_t addr)
 
 int mctp_route_remove_local(struct mctp_dev *mdev, mctp_eid_t addr)
 {
-       return mctp_route_remove(mdev, addr, 0);
+       return mctp_route_remove(mdev, addr, 0, RTN_LOCAL);
 }
 
 /* removes all entries for a given device */
@@ -1195,7 +1196,7 @@ static int mctp_delroute(struct sk_buff *skb, struct nlmsghdr *nlh,
        if (rtm->rtm_type != RTN_UNICAST)
                return -EINVAL;
 
-       rc = mctp_route_remove(mdev, daddr_start, rtm->rtm_dst_len);
+       rc = mctp_route_remove(mdev, daddr_start, rtm->rtm_dst_len, RTN_UNICAST);
        return rc;
 }
 
index cc6b8803aa9d0a7bec28f01daef8d458bd90fe46..7b7918702592a738452ec34159f83f57c33b5f20 100644 (file)
@@ -12,7 +12,7 @@
 static netdev_tx_t mctp_test_dev_tx(struct sk_buff *skb,
                                    struct net_device *ndev)
 {
-       kfree(skb);
+       kfree_skb(skb);
        return NETDEV_TX_OK;
 }
 
index ffeb2df8be7ae8da3d00b71bfdc90694db7f7f29..0c7bde1c14a6a879a3263fae3390720c796a3610 100644 (file)
@@ -409,7 +409,7 @@ static int mpls_forward(struct sk_buff *skb, struct net_device *dev,
                goto err;
 
        /* Find the output device */
-       out_dev = rcu_dereference(nh->nh_dev);
+       out_dev = nh->nh_dev;
        if (!mpls_output_possible(out_dev))
                goto tx_err;
 
@@ -698,7 +698,7 @@ static int mpls_nh_assign_dev(struct net *net, struct mpls_route *rt,
            (dev->addr_len != nh->nh_via_alen))
                goto errout;
 
-       RCU_INIT_POINTER(nh->nh_dev, dev);
+       nh->nh_dev = dev;
 
        if (!(dev->flags & IFF_UP)) {
                nh->nh_flags |= RTNH_F_DEAD;
@@ -1491,26 +1491,53 @@ static void mpls_dev_destroy_rcu(struct rcu_head *head)
        kfree(mdev);
 }
 
-static void mpls_ifdown(struct net_device *dev, int event)
+static int mpls_ifdown(struct net_device *dev, int event)
 {
        struct mpls_route __rcu **platform_label;
        struct net *net = dev_net(dev);
-       u8 alive, deleted;
        unsigned index;
 
        platform_label = rtnl_dereference(net->mpls.platform_label);
        for (index = 0; index < net->mpls.platform_labels; index++) {
                struct mpls_route *rt = rtnl_dereference(platform_label[index]);
+               bool nh_del = false;
+               u8 alive = 0;
 
                if (!rt)
                        continue;
 
-               alive = 0;
-               deleted = 0;
+               if (event == NETDEV_UNREGISTER) {
+                       u8 deleted = 0;
+
+                       for_nexthops(rt) {
+                               if (!nh->nh_dev || nh->nh_dev == dev)
+                                       deleted++;
+                               if (nh->nh_dev == dev)
+                                       nh_del = true;
+                       } endfor_nexthops(rt);
+
+                       /* if there are no more nexthops, delete the route */
+                       if (deleted == rt->rt_nhn) {
+                               mpls_route_update(net, index, NULL, NULL);
+                               continue;
+                       }
+
+                       if (nh_del) {
+                               size_t size = sizeof(*rt) + rt->rt_nhn *
+                                       rt->rt_nh_size;
+                               struct mpls_route *orig = rt;
+
+                               rt = kmalloc(size, GFP_KERNEL);
+                               if (!rt)
+                                       return -ENOMEM;
+                               memcpy(rt, orig, size);
+                       }
+               }
+
                change_nexthops(rt) {
                        unsigned int nh_flags = nh->nh_flags;
 
-                       if (rtnl_dereference(nh->nh_dev) != dev)
+                       if (nh->nh_dev != dev)
                                goto next;
 
                        switch (event) {
@@ -1523,23 +1550,22 @@ static void mpls_ifdown(struct net_device *dev, int event)
                                break;
                        }
                        if (event == NETDEV_UNREGISTER)
-                               RCU_INIT_POINTER(nh->nh_dev, NULL);
+                               nh->nh_dev = NULL;
 
                        if (nh->nh_flags != nh_flags)
                                WRITE_ONCE(nh->nh_flags, nh_flags);
 next:
                        if (!(nh_flags & (RTNH_F_DEAD | RTNH_F_LINKDOWN)))
                                alive++;
-                       if (!rtnl_dereference(nh->nh_dev))
-                               deleted++;
                } endfor_nexthops(rt);
 
                WRITE_ONCE(rt->rt_nhn_alive, alive);
 
-               /* if there are no more nexthops, delete the route */
-               if (event == NETDEV_UNREGISTER && deleted == rt->rt_nhn)
-                       mpls_route_update(net, index, NULL, NULL);
+               if (nh_del)
+                       mpls_route_update(net, index, rt, NULL);
        }
+
+       return 0;
 }
 
 static void mpls_ifup(struct net_device *dev, unsigned int flags)
@@ -1559,14 +1585,12 @@ static void mpls_ifup(struct net_device *dev, unsigned int flags)
                alive = 0;
                change_nexthops(rt) {
                        unsigned int nh_flags = nh->nh_flags;
-                       struct net_device *nh_dev =
-                               rtnl_dereference(nh->nh_dev);
 
                        if (!(nh_flags & flags)) {
                                alive++;
                                continue;
                        }
-                       if (nh_dev != dev)
+                       if (nh->nh_dev != dev)
                                continue;
                        alive++;
                        nh_flags &= ~flags;
@@ -1597,8 +1621,12 @@ static int mpls_dev_notify(struct notifier_block *this, unsigned long event,
                return NOTIFY_OK;
 
        switch (event) {
+               int err;
+
        case NETDEV_DOWN:
-               mpls_ifdown(dev, event);
+               err = mpls_ifdown(dev, event);
+               if (err)
+                       return notifier_from_errno(err);
                break;
        case NETDEV_UP:
                flags = dev_get_flags(dev);
@@ -1609,13 +1637,18 @@ static int mpls_dev_notify(struct notifier_block *this, unsigned long event,
                break;
        case NETDEV_CHANGE:
                flags = dev_get_flags(dev);
-               if (flags & (IFF_RUNNING | IFF_LOWER_UP))
+               if (flags & (IFF_RUNNING | IFF_LOWER_UP)) {
                        mpls_ifup(dev, RTNH_F_DEAD | RTNH_F_LINKDOWN);
-               else
-                       mpls_ifdown(dev, event);
+               } else {
+                       err = mpls_ifdown(dev, event);
+                       if (err)
+                               return notifier_from_errno(err);
+               }
                break;
        case NETDEV_UNREGISTER:
-               mpls_ifdown(dev, event);
+               err = mpls_ifdown(dev, event);
+               if (err)
+                       return notifier_from_errno(err);
                mdev = mpls_dev_get(dev);
                if (mdev) {
                        mpls_dev_sysctl_unregister(dev, mdev);
@@ -1626,8 +1659,6 @@ static int mpls_dev_notify(struct notifier_block *this, unsigned long event,
        case NETDEV_CHANGENAME:
                mdev = mpls_dev_get(dev);
                if (mdev) {
-                       int err;
-
                        mpls_dev_sysctl_unregister(dev, mdev);
                        err = mpls_dev_sysctl_register(dev, mdev);
                        if (err)
@@ -1994,7 +2025,7 @@ static int mpls_dump_route(struct sk_buff *skb, u32 portid, u32 seq, int event,
                    nla_put_via(skb, nh->nh_via_table, mpls_nh_via(rt, nh),
                                nh->nh_via_alen))
                        goto nla_put_failure;
-               dev = rtnl_dereference(nh->nh_dev);
+               dev = nh->nh_dev;
                if (dev && nla_put_u32(skb, RTA_OIF, dev->ifindex))
                        goto nla_put_failure;
                if (nh->nh_flags & RTNH_F_LINKDOWN)
@@ -2012,7 +2043,7 @@ static int mpls_dump_route(struct sk_buff *skb, u32 portid, u32 seq, int event,
                        goto nla_put_failure;
 
                for_nexthops(rt) {
-                       dev = rtnl_dereference(nh->nh_dev);
+                       dev = nh->nh_dev;
                        if (!dev)
                                continue;
 
@@ -2123,18 +2154,14 @@ static int mpls_valid_fib_dump_req(struct net *net, const struct nlmsghdr *nlh,
 static bool mpls_rt_uses_dev(struct mpls_route *rt,
                             const struct net_device *dev)
 {
-       struct net_device *nh_dev;
-
        if (rt->rt_nhn == 1) {
                struct mpls_nh *nh = rt->rt_nh;
 
-               nh_dev = rtnl_dereference(nh->nh_dev);
-               if (dev == nh_dev)
+               if (nh->nh_dev == dev)
                        return true;
        } else {
                for_nexthops(rt) {
-                       nh_dev = rtnl_dereference(nh->nh_dev);
-                       if (nh_dev == dev)
+                       if (nh->nh_dev == dev)
                                return true;
                } endfor_nexthops(rt);
        }
@@ -2222,7 +2249,7 @@ static inline size_t lfib_nlmsg_size(struct mpls_route *rt)
                size_t nhsize = 0;
 
                for_nexthops(rt) {
-                       if (!rtnl_dereference(nh->nh_dev))
+                       if (!nh->nh_dev)
                                continue;
                        nhsize += nla_total_size(sizeof(struct rtnexthop));
                        /* RTA_VIA */
@@ -2468,7 +2495,7 @@ static int mpls_getroute(struct sk_buff *in_skb, struct nlmsghdr *in_nlh,
            nla_put_via(skb, nh->nh_via_table, mpls_nh_via(rt, nh),
                        nh->nh_via_alen))
                goto nla_put_failure;
-       dev = rtnl_dereference(nh->nh_dev);
+       dev = nh->nh_dev;
        if (dev && nla_put_u32(skb, RTA_OIF, dev->ifindex))
                goto nla_put_failure;
 
@@ -2507,7 +2534,7 @@ static int resize_platform_label_table(struct net *net, size_t limit)
                rt0 = mpls_rt_alloc(1, lo->addr_len, 0);
                if (IS_ERR(rt0))
                        goto nort0;
-               RCU_INIT_POINTER(rt0->rt_nh->nh_dev, lo);
+               rt0->rt_nh->nh_dev = lo;
                rt0->rt_protocol = RTPROT_KERNEL;
                rt0->rt_payload_type = MPT_IPV4;
                rt0->rt_ttl_propagate = MPLS_TTL_PROP_DEFAULT;
@@ -2521,7 +2548,7 @@ static int resize_platform_label_table(struct net *net, size_t limit)
                rt2 = mpls_rt_alloc(1, lo->addr_len, 0);
                if (IS_ERR(rt2))
                        goto nort2;
-               RCU_INIT_POINTER(rt2->rt_nh->nh_dev, lo);
+               rt2->rt_nh->nh_dev = lo;
                rt2->rt_protocol = RTPROT_KERNEL;
                rt2->rt_payload_type = MPT_IPV6;
                rt2->rt_ttl_propagate = MPLS_TTL_PROP_DEFAULT;
index 838cdfc10e47d6efd51a7f89c674458f845e6a44..893df00b77b62ec8a6c5a85dd37679a2210fce73 100644 (file)
@@ -87,7 +87,7 @@ enum mpls_payload_type {
 };
 
 struct mpls_nh { /* next hop label forwarding entry */
-       struct net_device __rcu *nh_dev;
+       struct net_device       *nh_dev;
 
        /* nh_flags is accessed under RCU in the packet path; it is
         * modified handling netdev events with rtnl lock held
index 7c3420afb1a013e585318bf055a3db520bd11202..fe98e4f475baa4bcc0fb814be3bf1d74575f13e1 100644 (file)
@@ -422,28 +422,6 @@ bool mptcp_syn_options(struct sock *sk, const struct sk_buff *skb,
        return false;
 }
 
-/* MP_JOIN client subflow must wait for 4th ack before sending any data:
- * TCP can't schedule delack timer before the subflow is fully established.
- * MPTCP uses the delack timer to do 3rd ack retransmissions
- */
-static void schedule_3rdack_retransmission(struct sock *sk)
-{
-       struct inet_connection_sock *icsk = inet_csk(sk);
-       struct tcp_sock *tp = tcp_sk(sk);
-       unsigned long timeout;
-
-       /* reschedule with a timeout above RTT, as we must look only for drop */
-       if (tp->srtt_us)
-               timeout = tp->srtt_us << 1;
-       else
-               timeout = TCP_TIMEOUT_INIT;
-
-       WARN_ON_ONCE(icsk->icsk_ack.pending & ICSK_ACK_TIMER);
-       icsk->icsk_ack.pending |= ICSK_ACK_SCHED | ICSK_ACK_TIMER;
-       icsk->icsk_ack.timeout = timeout;
-       sk_reset_timer(sk, &icsk->icsk_delack_timer, timeout);
-}
-
 static void clear_3rdack_retransmission(struct sock *sk)
 {
        struct inet_connection_sock *icsk = inet_csk(sk);
@@ -526,7 +504,15 @@ static bool mptcp_established_options_mp(struct sock *sk, struct sk_buff *skb,
                *size = TCPOLEN_MPTCP_MPJ_ACK;
                pr_debug("subflow=%p", subflow);
 
-               schedule_3rdack_retransmission(sk);
+               /* we can use the full delegate action helper only from BH context
+                * If we are in process context - sk is flushing the backlog at
+                * socket lock release time - just set the appropriate flag, will
+                * be handled by the release callback
+                */
+               if (sock_owned_by_user(sk))
+                       set_bit(MPTCP_DELEGATE_ACK, &subflow->delegated_status);
+               else
+                       mptcp_subflow_delegate(subflow, MPTCP_DELEGATE_ACK);
                return true;
        }
        return false;
index b7e32e316738b88d4b9f907f584b12785e396dae..c82a76d2d0bfeb3761d7eee60e6c0936cc8d14bd 100644 (file)
@@ -1596,7 +1596,8 @@ static void __mptcp_subflow_push_pending(struct sock *sk, struct sock *ssk)
                        if (!xmit_ssk)
                                goto out;
                        if (xmit_ssk != ssk) {
-                               mptcp_subflow_delegate(mptcp_subflow_ctx(xmit_ssk));
+                               mptcp_subflow_delegate(mptcp_subflow_ctx(xmit_ssk),
+                                                      MPTCP_DELEGATE_SEND);
                                goto out;
                        }
 
@@ -2943,7 +2944,7 @@ void __mptcp_check_push(struct sock *sk, struct sock *ssk)
                if (xmit_ssk == ssk)
                        __mptcp_subflow_push_pending(sk, ssk);
                else if (xmit_ssk)
-                       mptcp_subflow_delegate(mptcp_subflow_ctx(xmit_ssk));
+                       mptcp_subflow_delegate(mptcp_subflow_ctx(xmit_ssk), MPTCP_DELEGATE_SEND);
        } else {
                set_bit(MPTCP_PUSH_PENDING, &mptcp_sk(sk)->flags);
        }
@@ -2993,18 +2994,50 @@ static void mptcp_release_cb(struct sock *sk)
        __mptcp_update_rmem(sk);
 }
 
+/* MP_JOIN client subflow must wait for 4th ack before sending any data:
+ * TCP can't schedule delack timer before the subflow is fully established.
+ * MPTCP uses the delack timer to do 3rd ack retransmissions
+ */
+static void schedule_3rdack_retransmission(struct sock *ssk)
+{
+       struct inet_connection_sock *icsk = inet_csk(ssk);
+       struct tcp_sock *tp = tcp_sk(ssk);
+       unsigned long timeout;
+
+       if (mptcp_subflow_ctx(ssk)->fully_established)
+               return;
+
+       /* reschedule with a timeout above RTT, as we must look only for drop */
+       if (tp->srtt_us)
+               timeout = usecs_to_jiffies(tp->srtt_us >> (3 - 1));
+       else
+               timeout = TCP_TIMEOUT_INIT;
+       timeout += jiffies;
+
+       WARN_ON_ONCE(icsk->icsk_ack.pending & ICSK_ACK_TIMER);
+       icsk->icsk_ack.pending |= ICSK_ACK_SCHED | ICSK_ACK_TIMER;
+       icsk->icsk_ack.timeout = timeout;
+       sk_reset_timer(ssk, &icsk->icsk_delack_timer, timeout);
+}
+
 void mptcp_subflow_process_delegated(struct sock *ssk)
 {
        struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
        struct sock *sk = subflow->conn;
 
-       mptcp_data_lock(sk);
-       if (!sock_owned_by_user(sk))
-               __mptcp_subflow_push_pending(sk, ssk);
-       else
-               set_bit(MPTCP_PUSH_PENDING, &mptcp_sk(sk)->flags);
-       mptcp_data_unlock(sk);
-       mptcp_subflow_delegated_done(subflow);
+       if (test_bit(MPTCP_DELEGATE_SEND, &subflow->delegated_status)) {
+               mptcp_data_lock(sk);
+               if (!sock_owned_by_user(sk))
+                       __mptcp_subflow_push_pending(sk, ssk);
+               else
+                       set_bit(MPTCP_PUSH_PENDING, &mptcp_sk(sk)->flags);
+               mptcp_data_unlock(sk);
+               mptcp_subflow_delegated_done(subflow, MPTCP_DELEGATE_SEND);
+       }
+       if (test_bit(MPTCP_DELEGATE_ACK, &subflow->delegated_status)) {
+               schedule_3rdack_retransmission(ssk);
+               mptcp_subflow_delegated_done(subflow, MPTCP_DELEGATE_ACK);
+       }
 }
 
 static int mptcp_hash(struct sock *sk)
index 67a61ac48b20a92d3ea7b27b818164e935e0d041..d87cc040352e3c319b84d03fc2deda84a0be18d9 100644 (file)
@@ -387,6 +387,7 @@ struct mptcp_delegated_action {
 DECLARE_PER_CPU(struct mptcp_delegated_action, mptcp_delegated_actions);
 
 #define MPTCP_DELEGATE_SEND            0
+#define MPTCP_DELEGATE_ACK             1
 
 /* MPTCP subflow context */
 struct mptcp_subflow_context {
@@ -492,23 +493,23 @@ static inline void mptcp_add_pending_subflow(struct mptcp_sock *msk,
 
 void mptcp_subflow_process_delegated(struct sock *ssk);
 
-static inline void mptcp_subflow_delegate(struct mptcp_subflow_context *subflow)
+static inline void mptcp_subflow_delegate(struct mptcp_subflow_context *subflow, int action)
 {
        struct mptcp_delegated_action *delegated;
        bool schedule;
 
+       /* the caller held the subflow bh socket lock */
+       lockdep_assert_in_softirq();
+
        /* The implied barrier pairs with mptcp_subflow_delegated_done(), and
         * ensures the below list check sees list updates done prior to status
         * bit changes
         */
-       if (!test_and_set_bit(MPTCP_DELEGATE_SEND, &subflow->delegated_status)) {
+       if (!test_and_set_bit(action, &subflow->delegated_status)) {
                /* still on delegated list from previous scheduling */
                if (!list_empty(&subflow->delegated_node))
                        return;
 
-               /* the caller held the subflow bh socket lock */
-               lockdep_assert_in_softirq();
-
                delegated = this_cpu_ptr(&mptcp_delegated_actions);
                schedule = list_empty(&delegated->head);
                list_add_tail(&subflow->delegated_node, &delegated->head);
@@ -533,16 +534,16 @@ mptcp_subflow_delegated_next(struct mptcp_delegated_action *delegated)
 
 static inline bool mptcp_subflow_has_delegated_action(const struct mptcp_subflow_context *subflow)
 {
-       return test_bit(MPTCP_DELEGATE_SEND, &subflow->delegated_status);
+       return !!READ_ONCE(subflow->delegated_status);
 }
 
-static inline void mptcp_subflow_delegated_done(struct mptcp_subflow_context *subflow)
+static inline void mptcp_subflow_delegated_done(struct mptcp_subflow_context *subflow, int action)
 {
        /* pairs with mptcp_subflow_delegate, ensures delegate_node is updated before
         * touching the status bit
         */
        smp_wmb();
-       clear_bit(MPTCP_DELEGATE_SEND, &subflow->delegated_status);
+       clear_bit(action, &subflow->delegated_status);
 }
 
 int mptcp_is_enabled(const struct net *net);
index ba9ae482141b0f4be035085a6f9427cf581cf169..dda8b76b77988ab422afce64470e924a90124809 100644 (file)
@@ -18,6 +18,8 @@
 #include "internal.h"
 #include "ncsi-pkt.h"
 
+static const int padding_bytes = 26;
+
 u32 ncsi_calculate_checksum(unsigned char *data, int len)
 {
        u32 checksum = 0;
@@ -213,12 +215,17 @@ static int ncsi_cmd_handler_oem(struct sk_buff *skb,
 {
        struct ncsi_cmd_oem_pkt *cmd;
        unsigned int len;
+       int payload;
+       /* NC-SI spec DSP_0222_1.2.0, section 8.2.2.2
+        * requires payload to be padded with 0 to
+        * 32-bit boundary before the checksum field.
+        * Ensure the padding bytes are accounted for in
+        * skb allocation
+        */
 
+       payload = ALIGN(nca->payload, 4);
        len = sizeof(struct ncsi_cmd_pkt_hdr) + 4;
-       if (nca->payload < 26)
-               len += 26;
-       else
-               len += nca->payload;
+       len += max(payload, padding_bytes);
 
        cmd = skb_put_zero(skb, len);
        memcpy(&cmd->mfr_id, nca->data, nca->payload);
@@ -272,6 +279,7 @@ static struct ncsi_request *ncsi_alloc_command(struct ncsi_cmd_arg *nca)
        struct net_device *dev = nd->dev;
        int hlen = LL_RESERVED_SPACE(dev);
        int tlen = dev->needed_tailroom;
+       int payload;
        int len = hlen + tlen;
        struct sk_buff *skb;
        struct ncsi_request *nr;
@@ -281,14 +289,14 @@ static struct ncsi_request *ncsi_alloc_command(struct ncsi_cmd_arg *nca)
                return NULL;
 
        /* NCSI command packet has 16-bytes header, payload, 4 bytes checksum.
+        * Payload needs padding so that the checksum field following payload is
+        * aligned to 32-bit boundary.
         * The packet needs padding if its payload is less than 26 bytes to
         * meet 64 bytes minimal ethernet frame length.
         */
        len += sizeof(struct ncsi_cmd_pkt_hdr) + 4;
-       if (nca->payload < 26)
-               len += 26;
-       else
-               len += nca->payload;
+       payload = ALIGN(nca->payload, 4);
+       len += max(payload, padding_bytes);
 
        /* Allocate skb */
        skb = alloc_skb(len, GFP_ATOMIC);
index e93c937a8bf026248001200ee674e04214c56468..51ad557a525b56564aff6cdf4b94590150d997a6 100644 (file)
@@ -1919,7 +1919,6 @@ ip_vs_in_hook(void *priv, struct sk_buff *skb, const struct nf_hook_state *state
        struct ip_vs_proto_data *pd;
        struct ip_vs_conn *cp;
        int ret, pkts;
-       int conn_reuse_mode;
        struct sock *sk;
        int af = state->pf;
 
@@ -1997,15 +1996,16 @@ ip_vs_in_hook(void *priv, struct sk_buff *skb, const struct nf_hook_state *state
        cp = INDIRECT_CALL_1(pp->conn_in_get, ip_vs_conn_in_get_proto,
                             ipvs, af, skb, &iph);
 
-       conn_reuse_mode = sysctl_conn_reuse_mode(ipvs);
-       if (conn_reuse_mode && !iph.fragoffs && is_new_conn(skb, &iph) && cp) {
+       if (!iph.fragoffs && is_new_conn(skb, &iph) && cp) {
+               int conn_reuse_mode = sysctl_conn_reuse_mode(ipvs);
                bool old_ct = false, resched = false;
 
                if (unlikely(sysctl_expire_nodest_conn(ipvs)) && cp->dest &&
                    unlikely(!atomic_read(&cp->dest->weight))) {
                        resched = true;
                        old_ct = ip_vs_conn_uses_old_conntrack(cp, skb);
-               } else if (is_new_conn_expected(cp, conn_reuse_mode)) {
+               } else if (conn_reuse_mode &&
+                          is_new_conn_expected(cp, conn_reuse_mode)) {
                        old_ct = ip_vs_conn_uses_old_conntrack(cp, skb);
                        if (!atomic_read(&cp->n_control)) {
                                resched = true;
index 770a63103c7a4240b8559a97f707588d569beba8..4712a90a1820ce00146ca0080aa6ce46c826331c 100644 (file)
@@ -684,7 +684,7 @@ bool nf_ct_delete(struct nf_conn *ct, u32 portid, int report)
 
        tstamp = nf_conn_tstamp_find(ct);
        if (tstamp) {
-               s32 timeout = ct->timeout - nfct_time_stamp;
+               s32 timeout = READ_ONCE(ct->timeout) - nfct_time_stamp;
 
                tstamp->stop = ktime_get_real_ns();
                if (timeout < 0)
@@ -1036,7 +1036,7 @@ static int nf_ct_resolve_clash_harder(struct sk_buff *skb, u32 repl_idx)
        }
 
        /* We want the clashing entry to go away real soon: 1 second timeout. */
-       loser_ct->timeout = nfct_time_stamp + HZ;
+       WRITE_ONCE(loser_ct->timeout, nfct_time_stamp + HZ);
 
        /* IPS_NAT_CLASH removes the entry automatically on the first
         * reply.  Also prevents UDP tracker from moving the entry to
@@ -1560,7 +1560,7 @@ __nf_conntrack_alloc(struct net *net,
        /* save hash for reusing when confirming */
        *(unsigned long *)(&ct->tuplehash[IP_CT_DIR_REPLY].hnnode.pprev) = hash;
        ct->status = 0;
-       ct->timeout = 0;
+       WRITE_ONCE(ct->timeout, 0);
        write_pnet(&ct->ct_net, net);
        memset(&ct->__nfct_init_offset, 0,
               offsetof(struct nf_conn, proto) -
index f1e5443fe7c74cde3f5f0a1a01bcbe530bedb75c..81d03acf68d4df95c65642d89aa03a418fd8fc9f 100644 (file)
@@ -1011,11 +1011,9 @@ ctnetlink_alloc_filter(const struct nlattr * const cda[], u8 family)
                                                   CTA_TUPLE_REPLY,
                                                   filter->family,
                                                   &filter->zone,
-                                                  filter->orig_flags);
-               if (err < 0) {
-                       err = -EINVAL;
+                                                  filter->reply_flags);
+               if (err < 0)
                        goto err_filter;
-               }
        }
 
        return filter;
@@ -2000,7 +1998,7 @@ static int ctnetlink_change_timeout(struct nf_conn *ct,
 
        if (timeout > INT_MAX)
                timeout = INT_MAX;
-       ct->timeout = nfct_time_stamp + (u32)timeout;
+       WRITE_ONCE(ct->timeout, nfct_time_stamp + (u32)timeout);
 
        if (test_bit(IPS_DYING_BIT, &ct->status))
                return -ETIME;
index 87a7388b6c8942273eb12447d959c8565cb3b606..ed37bb9b4e5886cd7681606ad9d423ec5575f9be 100644 (file)
@@ -201,8 +201,8 @@ static void flow_offload_fixup_ct_timeout(struct nf_conn *ct)
        if (timeout < 0)
                timeout = 0;
 
-       if (nf_flow_timeout_delta(ct->timeout) > (__s32)timeout)
-               ct->timeout = nfct_time_stamp + timeout;
+       if (nf_flow_timeout_delta(READ_ONCE(ct->timeout)) > (__s32)timeout)
+               WRITE_ONCE(ct->timeout, nfct_time_stamp + timeout);
 }
 
 static void flow_offload_fixup_ct_state(struct nf_conn *ct)
index d6bf1b2cd541b7302bdb7284f954d91af386e7a1..b561e0a44a45f36392611d6ed4b94c34f920e29f 100644 (file)
@@ -65,11 +65,11 @@ static void nf_flow_rule_lwt_match(struct nf_flow_match *match,
                       sizeof(struct in6_addr));
                if (memcmp(&key->enc_ipv6.src, &in6addr_any,
                           sizeof(struct in6_addr)))
-                       memset(&key->enc_ipv6.src, 0xff,
+                       memset(&mask->enc_ipv6.src, 0xff,
                               sizeof(struct in6_addr));
                if (memcmp(&key->enc_ipv6.dst, &in6addr_any,
                           sizeof(struct in6_addr)))
-                       memset(&key->enc_ipv6.dst, 0xff,
+                       memset(&mask->enc_ipv6.dst, 0xff,
                               sizeof(struct in6_addr));
                enc_keys |= BIT(FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS);
                key->enc_control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
index 4acc4b8e9fe5a0fde7305afda92e6ac01c59ac02..5837e8efc9c201ca64c0aa03e60d82c4858f1559 100644 (file)
@@ -387,7 +387,7 @@ nfqnl_build_packet_message(struct net *net, struct nfqnl_instance *queue,
        struct net_device *indev;
        struct net_device *outdev;
        struct nf_conn *ct = NULL;
-       enum ip_conntrack_info ctinfo;
+       enum ip_conntrack_info ctinfo = 0;
        struct nfnl_ct_hook *nfnl_ct;
        bool csum_verify;
        char *secdata = NULL;
index af4ee874a067c69c295492584ab87c72b2caf23b..dbe1f2e7dd9ed620b1ab0c99c214178e6dbc6a16 100644 (file)
@@ -236,7 +236,7 @@ static void nft_exthdr_tcp_set_eval(const struct nft_expr *expr,
 
        tcph = nft_tcp_header_pointer(pkt, sizeof(buff), buff, &tcphdr_len);
        if (!tcph)
-               return;
+               goto err;
 
        opt = (u8 *)tcph;
        for (i = sizeof(*tcph); i < tcphdr_len - 1; i += optl) {
@@ -251,16 +251,16 @@ static void nft_exthdr_tcp_set_eval(const struct nft_expr *expr,
                        continue;
 
                if (i + optl > tcphdr_len || priv->len + priv->offset > optl)
-                       return;
+                       goto err;
 
                if (skb_ensure_writable(pkt->skb,
                                        nft_thoff(pkt) + i + priv->len))
-                       return;
+                       goto err;
 
                tcph = nft_tcp_header_pointer(pkt, sizeof(buff), buff,
                                              &tcphdr_len);
                if (!tcph)
-                       return;
+                       goto err;
 
                offset = i + priv->offset;
 
@@ -303,6 +303,9 @@ static void nft_exthdr_tcp_set_eval(const struct nft_expr *expr,
 
                return;
        }
+       return;
+err:
+       regs->verdict.code = NFT_BREAK;
 }
 
 static void nft_exthdr_sctp_eval(const struct nft_expr *expr,
index cbfe4e4a4ad7af188861791ba82d7da06415db33..bd689938a2e0c78810895d5bd06377fe1a0853d7 100644 (file)
@@ -22,7 +22,6 @@
 #include <linux/icmpv6.h>
 #include <linux/ip.h>
 #include <linux/ipv6.h>
-#include <linux/ip.h>
 #include <net/sctp/checksum.h>
 
 static bool nft_payload_rebuild_vlan_hdr(const struct sk_buff *skb, int mac_off,
index e517663e0cd175f0421b4878bbe4f3f9815acd65..6f4116e729581fd1f3bb418b124908cf38159afc 100644 (file)
@@ -886,7 +886,7 @@ static int nft_pipapo_avx2_lookup_8b_6(unsigned long *map, unsigned long *fill,
                        NFT_PIPAPO_AVX2_BUCKET_LOAD8(4,  lt, 4, pkt[4], bsize);
 
                        NFT_PIPAPO_AVX2_AND(5, 0, 1);
-                       NFT_PIPAPO_AVX2_BUCKET_LOAD8(6,  lt, 6, pkt[5], bsize);
+                       NFT_PIPAPO_AVX2_BUCKET_LOAD8(6,  lt, 5, pkt[5], bsize);
                        NFT_PIPAPO_AVX2_AND(7, 2, 3);
 
                        /* Stall */
index 2f7cf5ecebf4f3b66f817e6e3d17e5114924189f..0f8bb0bf558f97a304860133bf781003005fa028 100644 (file)
@@ -85,9 +85,9 @@ static ssize_t idletimer_tg_show(struct device *dev,
        mutex_unlock(&list_mutex);
 
        if (time_after(expires, jiffies) || ktimespec.tv_sec > 0)
-               return snprintf(buf, PAGE_SIZE, "%ld\n", time_diff);
+               return sysfs_emit(buf, "%ld\n", time_diff);
 
-       return snprintf(buf, PAGE_SIZE, "0\n");
+       return sysfs_emit(buf, "0\n");
 }
 
 static void idletimer_tg_work(struct work_struct *work)
index 4c575324a98528bec4188acf27eecc2f98ae5e0a..9eba2e6483851db2313a093d3ac17deb4c45b7b6 100644 (file)
@@ -1852,6 +1852,11 @@ static int netlink_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
        if (msg->msg_flags & MSG_OOB)
                return -EOPNOTSUPP;
 
+       if (len == 0) {
+               pr_warn_once("Zero length message leads to an empty skb\n");
+               return -ENODATA;
+       }
+
        err = scm_send(sock, msg, &scm, true);
        if (err < 0)
                return err;
index 3c645c1d99c9b63f3519d53e717f2c878c4efcb6..dc7a2404efdf9c7c17e1cf25a39df0b5d3afc76c 100644 (file)
@@ -94,13 +94,13 @@ int nfc_dev_up(struct nfc_dev *dev)
 
        device_lock(&dev->dev);
 
-       if (dev->rfkill && rfkill_blocked(dev->rfkill)) {
-               rc = -ERFKILL;
+       if (!device_is_registered(&dev->dev)) {
+               rc = -ENODEV;
                goto error;
        }
 
-       if (!device_is_registered(&dev->dev)) {
-               rc = -ENODEV;
+       if (dev->rfkill && rfkill_blocked(dev->rfkill)) {
+               rc = -ERFKILL;
                goto error;
        }
 
@@ -1125,11 +1125,7 @@ int nfc_register_device(struct nfc_dev *dev)
        if (rc)
                pr_err("Could not register llcp device\n");
 
-       rc = nfc_genl_device_added(dev);
-       if (rc)
-               pr_debug("The userspace won't be notified that the device %s was added\n",
-                        dev_name(&dev->dev));
-
+       device_lock(&dev->dev);
        dev->rfkill = rfkill_alloc(dev_name(&dev->dev), &dev->dev,
                                   RFKILL_TYPE_NFC, &nfc_rfkill_ops, dev);
        if (dev->rfkill) {
@@ -1138,6 +1134,12 @@ int nfc_register_device(struct nfc_dev *dev)
                        dev->rfkill = NULL;
                }
        }
+       device_unlock(&dev->dev);
+
+       rc = nfc_genl_device_added(dev);
+       if (rc)
+               pr_debug("The userspace won't be notified that the device %s was added\n",
+                        dev_name(&dev->dev));
 
        return 0;
 }
@@ -1154,10 +1156,17 @@ void nfc_unregister_device(struct nfc_dev *dev)
 
        pr_debug("dev_name=%s\n", dev_name(&dev->dev));
 
+       rc = nfc_genl_device_removed(dev);
+       if (rc)
+               pr_debug("The userspace won't be notified that the device %s "
+                        "was removed\n", dev_name(&dev->dev));
+
+       device_lock(&dev->dev);
        if (dev->rfkill) {
                rfkill_unregister(dev->rfkill);
                rfkill_destroy(dev->rfkill);
        }
+       device_unlock(&dev->dev);
 
        if (dev->ops->check_presence) {
                device_lock(&dev->dev);
@@ -1167,11 +1176,6 @@ void nfc_unregister_device(struct nfc_dev *dev)
                cancel_work_sync(&dev->check_pres_work);
        }
 
-       rc = nfc_genl_device_removed(dev);
-       if (rc)
-               pr_debug("The userspace won't be notified that the device %s "
-                        "was removed\n", dev_name(&dev->dev));
-
        nfc_llcp_unregister_device(dev);
 
        mutex_lock(&nfc_devlist_mutex);
index 6fd873aa86bee0e5b5fb219797d79aa611ae9296..d2537383a3e89d181f8281f1d9c3508da8bdc39f 100644 (file)
@@ -144,12 +144,15 @@ inline int nci_request(struct nci_dev *ndev,
 {
        int rc;
 
-       if (!test_bit(NCI_UP, &ndev->flags))
-               return -ENETDOWN;
-
        /* Serialize all requests */
        mutex_lock(&ndev->req_lock);
-       rc = __nci_request(ndev, req, opt, timeout);
+       /* check the state after obtaing the lock against any races
+        * from nci_close_device when the device gets removed.
+        */
+       if (test_bit(NCI_UP, &ndev->flags))
+               rc = __nci_request(ndev, req, opt, timeout);
+       else
+               rc = -ENETDOWN;
        mutex_unlock(&ndev->req_lock);
 
        return rc;
@@ -473,6 +476,11 @@ static int nci_open_device(struct nci_dev *ndev)
 
        mutex_lock(&ndev->req_lock);
 
+       if (test_bit(NCI_UNREG, &ndev->flags)) {
+               rc = -ENODEV;
+               goto done;
+       }
+
        if (test_bit(NCI_UP, &ndev->flags)) {
                rc = -EALREADY;
                goto done;
@@ -545,6 +553,10 @@ done:
 static int nci_close_device(struct nci_dev *ndev)
 {
        nci_req_cancel(ndev, ENODEV);
+
+       /* This mutex needs to be held as a barrier for
+        * caller nci_unregister_device
+        */
        mutex_lock(&ndev->req_lock);
 
        if (!test_and_clear_bit(NCI_UP, &ndev->flags)) {
@@ -582,8 +594,8 @@ static int nci_close_device(struct nci_dev *ndev)
 
        del_timer_sync(&ndev->cmd_timer);
 
-       /* Clear flags */
-       ndev->flags = 0;
+       /* Clear flags except NCI_UNREG */
+       ndev->flags &= BIT(NCI_UNREG);
 
        mutex_unlock(&ndev->req_lock);
 
@@ -1266,6 +1278,12 @@ void nci_unregister_device(struct nci_dev *ndev)
 {
        struct nci_conn_info *conn_info, *n;
 
+       /* This set_bit is not protected with specialized barrier,
+        * However, it is fine because the mutex_lock(&ndev->req_lock);
+        * in nci_close_device() will help to emit one.
+        */
+       set_bit(NCI_UNREG, &ndev->flags);
+
        nci_close_device(ndev);
 
        destroy_workqueue(ndev->cmd_wq);
index 334f63c9529efaf6170c82a0ec5bd7805d5f44da..f184b0db79d4026418ffa4fbf80bd6b38bdd3d0a 100644 (file)
@@ -636,8 +636,10 @@ static int nfc_genl_dump_devices_done(struct netlink_callback *cb)
 {
        struct class_dev_iter *iter = (struct class_dev_iter *) cb->args[0];
 
-       nfc_device_iter_exit(iter);
-       kfree(iter);
+       if (iter) {
+               nfc_device_iter_exit(iter);
+               kfree(iter);
+       }
 
        return 0;
 }
@@ -1392,8 +1394,10 @@ static int nfc_genl_dump_ses_done(struct netlink_callback *cb)
 {
        struct class_dev_iter *iter = (struct class_dev_iter *) cb->args[0];
 
-       nfc_device_iter_exit(iter);
-       kfree(iter);
+       if (iter) {
+               nfc_device_iter_exit(iter);
+               kfree(iter);
+       }
 
        return 0;
 }
index abf19c0e3ba0bfcf0396df2d78b937e288b84ab0..5327d130c4b5691e788bbbcb990a349d714ad8d4 100644 (file)
@@ -500,7 +500,7 @@ void rds_tcp_tune(struct socket *sock)
                sk->sk_userlocks |= SOCK_SNDBUF_LOCK;
        }
        if (rtn->rcvbuf_size > 0) {
-               sk->sk_sndbuf = rtn->rcvbuf_size;
+               sk->sk_rcvbuf = rtn->rcvbuf_size;
                sk->sk_userlocks |= SOCK_RCVBUF_LOCK;
        }
        release_sock(sk);
index dbea0bfee48e9e4df50fda68ade4624761f44d33..8120138dac01810854c8376acdec90e3c13dd4a8 100644 (file)
@@ -135,16 +135,20 @@ struct rxrpc_bundle *rxrpc_get_bundle(struct rxrpc_bundle *bundle)
        return bundle;
 }
 
+static void rxrpc_free_bundle(struct rxrpc_bundle *bundle)
+{
+       rxrpc_put_peer(bundle->params.peer);
+       kfree(bundle);
+}
+
 void rxrpc_put_bundle(struct rxrpc_bundle *bundle)
 {
        unsigned int d = bundle->debug_id;
        unsigned int u = atomic_dec_return(&bundle->usage);
 
        _debug("PUT B=%x %u", d, u);
-       if (u == 0) {
-               rxrpc_put_peer(bundle->params.peer);
-               kfree(bundle);
-       }
+       if (u == 0)
+               rxrpc_free_bundle(bundle);
 }
 
 /*
@@ -328,7 +332,7 @@ static struct rxrpc_bundle *rxrpc_look_up_bundle(struct rxrpc_conn_parameters *c
        return candidate;
 
 found_bundle_free:
-       kfree(candidate);
+       rxrpc_free_bundle(candidate);
 found_bundle:
        rxrpc_get_bundle(bundle);
        spin_unlock(&local->client_bundles_lock);
index 68396d05205252177ea9525b43972f1598480e12..0298fe2ad6d323b377b46d5dc70c8d472d899966 100644 (file)
@@ -299,6 +299,12 @@ static struct rxrpc_peer *rxrpc_create_peer(struct rxrpc_sock *rx,
        return peer;
 }
 
+static void rxrpc_free_peer(struct rxrpc_peer *peer)
+{
+       rxrpc_put_local(peer->local);
+       kfree_rcu(peer, rcu);
+}
+
 /*
  * Set up a new incoming peer.  There shouldn't be any other matching peers
  * since we've already done a search in the list from the non-reentrant context
@@ -365,7 +371,7 @@ struct rxrpc_peer *rxrpc_lookup_peer(struct rxrpc_sock *rx,
                spin_unlock_bh(&rxnet->peer_hash_lock);
 
                if (peer)
-                       kfree(candidate);
+                       rxrpc_free_peer(candidate);
                else
                        peer = candidate;
        }
@@ -420,8 +426,7 @@ static void __rxrpc_put_peer(struct rxrpc_peer *peer)
        list_del_init(&peer->keepalive_link);
        spin_unlock_bh(&rxnet->peer_hash_lock);
 
-       rxrpc_put_local(peer->local);
-       kfree_rcu(peer, rcu);
+       rxrpc_free_peer(peer);
 }
 
 /*
@@ -457,8 +462,7 @@ void rxrpc_put_peer_locked(struct rxrpc_peer *peer)
        if (n == 0) {
                hash_del_rcu(&peer->hash_link);
                list_del_init(&peer->keepalive_link);
-               rxrpc_put_local(peer->local);
-               kfree_rcu(peer, rcu);
+               rxrpc_free_peer(peer);
        }
 }
 
index d64b0eeccbe4df30b2715981b1ba394708cf6a8b..efc963ab995a333ab21ec3d00884097c6a10a2b4 100644 (file)
@@ -19,6 +19,7 @@
 #include <linux/if_arp.h>
 #include <net/net_namespace.h>
 #include <net/netlink.h>
+#include <net/dst.h>
 #include <net/pkt_sched.h>
 #include <net/pkt_cls.h>
 #include <linux/tc_act/tc_mirred.h>
@@ -228,6 +229,7 @@ static int tcf_mirred_act(struct sk_buff *skb, const struct tc_action *a,
        bool want_ingress;
        bool is_redirect;
        bool expects_nh;
+       bool at_ingress;
        int m_eaction;
        int mac_len;
        bool at_nh;
@@ -263,7 +265,8 @@ static int tcf_mirred_act(struct sk_buff *skb, const struct tc_action *a,
         * ingress - that covers the TC S/W datapath.
         */
        is_redirect = tcf_mirred_is_act_redirect(m_eaction);
-       use_reinsert = skb_at_tc_ingress(skb) && is_redirect &&
+       at_ingress = skb_at_tc_ingress(skb);
+       use_reinsert = at_ingress && is_redirect &&
                       tcf_mirred_can_reinsert(retval);
        if (!use_reinsert) {
                skb2 = skb_clone(skb, GFP_ATOMIC);
@@ -271,10 +274,12 @@ static int tcf_mirred_act(struct sk_buff *skb, const struct tc_action *a,
                        goto out;
        }
 
+       want_ingress = tcf_mirred_act_wants_ingress(m_eaction);
+
        /* All mirred/redirected skbs should clear previous ct info */
        nf_reset_ct(skb2);
-
-       want_ingress = tcf_mirred_act_wants_ingress(m_eaction);
+       if (want_ingress && !at_ingress) /* drop dst for egress -> ingress */
+               skb_dst_drop(skb2);
 
        expects_nh = want_ingress || !m_mac_header_xmit;
        at_nh = skb->data == skb_network_header(skb);
index 0eae9ff5edf6ff1eb4604246b936168dd19ca2ad..e007fc75ef2feffdda6435031d548ff38fb1a790 100644 (file)
@@ -665,12 +665,14 @@ static int ets_qdisc_change(struct Qdisc *sch, struct nlattr *opt,
                        q->classes[i].deficit = quanta[i];
                }
        }
+       for (i = q->nbands; i < oldbands; i++) {
+               qdisc_tree_flush_backlog(q->classes[i].qdisc);
+               if (i >= q->nstrict)
+                       list_del(&q->classes[i].alist);
+       }
        q->nstrict = nstrict;
        memcpy(q->prio2band, priomap, sizeof(priomap));
 
-       for (i = q->nbands; i < oldbands; i++)
-               qdisc_tree_flush_backlog(q->classes[i].qdisc);
-
        for (i = 0; i < q->nbands; i++)
                q->classes[i].quantum = quanta[i];
 
index 830f3559f727ad471b595b2f5b04788827da89b0..d6aba6edd16e5eab120a57c316fcb06a5d5f3442 100644 (file)
@@ -531,6 +531,7 @@ static void fq_pie_destroy(struct Qdisc *sch)
        struct fq_pie_sched_data *q = qdisc_priv(sch);
 
        tcf_block_put(q->block);
+       q->p_params.tupdate = 0;
        del_timer_sync(&q->adapt_timer);
        kvfree(q->flows);
 }
index 59284da9116d787b95704be7fda6d62e351e36aa..230072f9ec48e3bf3cd43f9aa7fafa4a096ac402 100644 (file)
@@ -566,6 +566,10 @@ static void smc_stat_fallback(struct smc_sock *smc)
 
 static void smc_switch_to_fallback(struct smc_sock *smc, int reason_code)
 {
+       wait_queue_head_t *smc_wait = sk_sleep(&smc->sk);
+       wait_queue_head_t *clc_wait = sk_sleep(smc->clcsock->sk);
+       unsigned long flags;
+
        smc->use_fallback = true;
        smc->fallback_rsn = reason_code;
        smc_stat_fallback(smc);
@@ -575,6 +579,16 @@ static void smc_switch_to_fallback(struct smc_sock *smc, int reason_code)
                smc->clcsock->file->private_data = smc->clcsock;
                smc->clcsock->wq.fasync_list =
                        smc->sk.sk_socket->wq.fasync_list;
+
+               /* There may be some entries remaining in
+                * smc socket->wq, which should be removed
+                * to clcsocket->wq during the fallback.
+                */
+               spin_lock_irqsave(&smc_wait->lock, flags);
+               spin_lock_nested(&clc_wait->lock, SINGLE_DEPTH_NESTING);
+               list_splice_init(&smc_wait->head, &clc_wait->head);
+               spin_unlock(&clc_wait->lock);
+               spin_unlock_irqrestore(&smc_wait->lock, flags);
        }
 }
 
@@ -2120,8 +2134,10 @@ static int smc_listen(struct socket *sock, int backlog)
        smc->clcsock->sk->sk_user_data =
                (void *)((uintptr_t)smc | SK_USER_DATA_NOCOPY);
        rc = kernel_listen(smc->clcsock, backlog);
-       if (rc)
+       if (rc) {
+               smc->clcsock->sk->sk_data_ready = smc->clcsk_data_ready;
                goto out;
+       }
        sk->sk_max_ack_backlog = backlog;
        sk->sk_ack_backlog = 0;
        sk->sk_state = SMC_LISTEN;
@@ -2354,8 +2370,10 @@ static __poll_t smc_poll(struct file *file, struct socket *sock,
 static int smc_shutdown(struct socket *sock, int how)
 {
        struct sock *sk = sock->sk;
+       bool do_shutdown = true;
        struct smc_sock *smc;
        int rc = -EINVAL;
+       int old_state;
        int rc1 = 0;
 
        smc = smc_sk(sk);
@@ -2382,7 +2400,11 @@ static int smc_shutdown(struct socket *sock, int how)
        }
        switch (how) {
        case SHUT_RDWR:         /* shutdown in both directions */
+               old_state = sk->sk_state;
                rc = smc_close_active(smc);
+               if (old_state == SMC_ACTIVE &&
+                   sk->sk_state == SMC_PEERCLOSEWAIT1)
+                       do_shutdown = false;
                break;
        case SHUT_WR:
                rc = smc_close_shutdown_write(smc);
@@ -2392,7 +2414,7 @@ static int smc_shutdown(struct socket *sock, int how)
                /* nothing more to do because peer is not involved */
                break;
        }
-       if (smc->clcsock)
+       if (do_shutdown && smc->clcsock)
                rc1 = kernel_sock_shutdown(smc->clcsock, how);
        /* map sock_shutdown_cmd constants to sk_shutdown value range */
        sk->sk_shutdown |= how + 1;
index 0f9ffba07d26858cfd07c53567e14700884553d4..292e4d904ab6e4afbba2cef421bce27cae5af364 100644 (file)
@@ -195,6 +195,7 @@ int smc_close_active(struct smc_sock *smc)
        int old_state;
        long timeout;
        int rc = 0;
+       int rc1 = 0;
 
        timeout = current->flags & PF_EXITING ?
                  0 : sock_flag(sk, SOCK_LINGER) ?
@@ -228,6 +229,15 @@ again:
                        /* send close request */
                        rc = smc_close_final(conn);
                        sk->sk_state = SMC_PEERCLOSEWAIT1;
+
+                       /* actively shutdown clcsock before peer close it,
+                        * prevent peer from entering TIME_WAIT state.
+                        */
+                       if (smc->clcsock && smc->clcsock->sk) {
+                               rc1 = kernel_sock_shutdown(smc->clcsock,
+                                                          SHUT_RDWR);
+                               rc = rc ? rc : rc1;
+                       }
                } else {
                        /* peer event has changed the state */
                        goto again;
@@ -354,9 +364,9 @@ static void smc_close_passive_work(struct work_struct *work)
        if (rxflags->peer_conn_abort) {
                /* peer has not received all data */
                smc_close_passive_abort_received(smc);
-               release_sock(&smc->sk);
+               release_sock(sk);
                cancel_delayed_work_sync(&conn->tx_work);
-               lock_sock(&smc->sk);
+               lock_sock(sk);
                goto wakeup;
        }
 
index 49b8ba3bb6835e88c2e58a6abbabb3ea25da6906..387d28b2f8dd055a3b3ceea6024ffb231f115877 100644 (file)
@@ -625,18 +625,17 @@ int smcd_nl_get_lgr(struct sk_buff *skb, struct netlink_callback *cb)
 void smc_lgr_cleanup_early(struct smc_connection *conn)
 {
        struct smc_link_group *lgr = conn->lgr;
-       struct list_head *lgr_list;
        spinlock_t *lgr_lock;
 
        if (!lgr)
                return;
 
        smc_conn_free(conn);
-       lgr_list = smc_lgr_list_head(lgr, &lgr_lock);
+       smc_lgr_list_head(lgr, &lgr_lock);
        spin_lock_bh(lgr_lock);
        /* do not use this link group for new connections */
-       if (!list_empty(lgr_list))
-               list_del_init(lgr_list);
+       if (!list_empty(&lgr->list))
+               list_del_init(&lgr->list);
        spin_unlock_bh(lgr_lock);
        __smc_lgr_terminate(lgr, true);
 }
@@ -708,13 +707,14 @@ static u8 smcr_next_link_id(struct smc_link_group *lgr)
        int i;
 
        while (1) {
+again:
                link_id = ++lgr->next_link_id;
                if (!link_id)   /* skip zero as link_id */
                        link_id = ++lgr->next_link_id;
                for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++) {
                        if (smc_link_usable(&lgr->lnk[i]) &&
                            lgr->lnk[i].link_id == link_id)
-                               continue;
+                               goto again;
                }
                break;
        }
@@ -1671,14 +1671,26 @@ static void smc_link_down_work(struct work_struct *work)
        mutex_unlock(&lgr->llc_conf_mutex);
 }
 
-/* Determine vlan of internal TCP socket.
- * @vlan_id: address to store the determined vlan id into
- */
+static int smc_vlan_by_tcpsk_walk(struct net_device *lower_dev,
+                                 struct netdev_nested_priv *priv)
+{
+       unsigned short *vlan_id = (unsigned short *)priv->data;
+
+       if (is_vlan_dev(lower_dev)) {
+               *vlan_id = vlan_dev_vlan_id(lower_dev);
+               return 1;
+       }
+
+       return 0;
+}
+
+/* Determine vlan of internal TCP socket. */
 int smc_vlan_by_tcpsk(struct socket *clcsock, struct smc_init_info *ini)
 {
        struct dst_entry *dst = sk_dst_get(clcsock->sk);
+       struct netdev_nested_priv priv;
        struct net_device *ndev;
-       int i, nest_lvl, rc = 0;
+       int rc = 0;
 
        ini->vlan_id = 0;
        if (!dst) {
@@ -1696,20 +1708,9 @@ int smc_vlan_by_tcpsk(struct socket *clcsock, struct smc_init_info *ini)
                goto out_rel;
        }
 
+       priv.data = (void *)&ini->vlan_id;
        rtnl_lock();
-       nest_lvl = ndev->lower_level;
-       for (i = 0; i < nest_lvl; i++) {
-               struct list_head *lower = &ndev->adj_list.lower;
-
-               if (list_empty(lower))
-                       break;
-               lower = lower->next;
-               ndev = (struct net_device *)netdev_lower_get_next(ndev, &lower);
-               if (is_vlan_dev(ndev)) {
-                       ini->vlan_id = vlan_dev_vlan_id(ndev);
-                       break;
-               }
-       }
+       netdev_walk_all_lower_dev(ndev, smc_vlan_by_tcpsk_walk, &priv);
        rtnl_unlock();
 
 out_rel:
index ae48c9c84ee17af956c5ae7a0ff4e628d8fce71c..d8ee06a9650a1697abc4f23b01f54373f49b2a32 100644 (file)
@@ -1720,15 +1720,15 @@ static void xs_local_set_port(struct rpc_xprt *xprt, unsigned short port)
 }
 
 #ifdef CONFIG_DEBUG_LOCK_ALLOC
-static struct lock_class_key xs_key[2];
-static struct lock_class_key xs_slock_key[2];
+static struct lock_class_key xs_key[3];
+static struct lock_class_key xs_slock_key[3];
 
 static inline void xs_reclassify_socketu(struct socket *sock)
 {
        struct sock *sk = sock->sk;
 
        sock_lock_init_class_and_name(sk, "slock-AF_LOCAL-RPC",
-               &xs_slock_key[1], "sk_lock-AF_LOCAL-RPC", &xs_key[1]);
+               &xs_slock_key[0], "sk_lock-AF_LOCAL-RPC", &xs_key[0]);
 }
 
 static inline void xs_reclassify_socket4(struct socket *sock)
@@ -1736,7 +1736,7 @@ static inline void xs_reclassify_socket4(struct socket *sock)
        struct sock *sk = sock->sk;
 
        sock_lock_init_class_and_name(sk, "slock-AF_INET-RPC",
-               &xs_slock_key[0], "sk_lock-AF_INET-RPC", &xs_key[0]);
+               &xs_slock_key[1], "sk_lock-AF_INET-RPC", &xs_key[1]);
 }
 
 static inline void xs_reclassify_socket6(struct socket *sock)
@@ -1744,7 +1744,7 @@ static inline void xs_reclassify_socket6(struct socket *sock)
        struct sock *sk = sock->sk;
 
        sock_lock_init_class_and_name(sk, "slock-AF_INET6-RPC",
-               &xs_slock_key[1], "sk_lock-AF_INET6-RPC", &xs_key[1]);
+               &xs_slock_key[2], "sk_lock-AF_INET6-RPC", &xs_key[2]);
 }
 
 static inline void xs_reclassify_socket(int family, struct socket *sock)
index dc60c32bb70df978feab7940dc4c71a7f729689d..b4d9419a015b1b984119ffd85c546ab6d5f3fdcc 100644 (file)
@@ -524,7 +524,7 @@ static int tipc_aead_init(struct tipc_aead **aead, struct tipc_aead_key *ukey,
                return -EEXIST;
 
        /* Allocate a new AEAD */
-       tmp = kzalloc(sizeof(*tmp), GFP_ATOMIC);
+       tmp = kzalloc(sizeof(*tmp), GFP_KERNEL);
        if (unlikely(!tmp))
                return -ENOMEM;
 
@@ -597,6 +597,10 @@ static int tipc_aead_init(struct tipc_aead **aead, struct tipc_aead_key *ukey,
        tmp->cloned = NULL;
        tmp->authsize = TIPC_AES_GCM_TAG_SIZE;
        tmp->key = kmemdup(ukey, tipc_aead_key_size(ukey), GFP_KERNEL);
+       if (!tmp->key) {
+               tipc_aead_free(&tmp->rcu);
+               return -ENOMEM;
+       }
        memcpy(&tmp->salt, ukey->key + keylen, TIPC_AES_GCM_SALT_SIZE);
        atomic_set(&tmp->users, 0);
        atomic64_set(&tmp->seqno, 0);
@@ -1470,7 +1474,7 @@ int tipc_crypto_start(struct tipc_crypto **crypto, struct net *net,
                return -EEXIST;
 
        /* Allocate crypto */
-       c = kzalloc(sizeof(*c), GFP_ATOMIC);
+       c = kzalloc(sizeof(*c), GFP_KERNEL);
        if (!c)
                return -ENOMEM;
 
@@ -1484,7 +1488,7 @@ int tipc_crypto_start(struct tipc_crypto **crypto, struct net *net,
        }
 
        /* Allocate statistic structure */
-       c->stats = alloc_percpu_gfp(struct tipc_crypto_stats, GFP_ATOMIC);
+       c->stats = alloc_percpu(struct tipc_crypto_stats);
        if (!c->stats) {
                if (c->wq)
                        destroy_workqueue(c->wq);
@@ -2457,7 +2461,7 @@ static void tipc_crypto_work_tx(struct work_struct *work)
        }
 
        /* Lets duplicate it first */
-       skey = kmemdup(aead->key, tipc_aead_key_size(aead->key), GFP_ATOMIC);
+       skey = kmemdup(aead->key, tipc_aead_key_size(aead->key), GFP_KERNEL);
        rcu_read_unlock();
 
        /* Now, generate new key, initiate & distribute it */
index 1b7a487c88419779536908b9a3e85cc7233a3778..09ae8448f394f738f434cf521503a43450615c95 100644 (file)
@@ -1298,8 +1298,11 @@ static bool tipc_data_input(struct tipc_link *l, struct sk_buff *skb,
                return false;
 #ifdef CONFIG_TIPC_CRYPTO
        case MSG_CRYPTO:
-               tipc_crypto_msg_rcv(l->net, skb);
-               return true;
+               if (TIPC_SKB_CB(skb)->decrypted) {
+                       tipc_crypto_msg_rcv(l->net, skb);
+                       return true;
+               }
+               fallthrough;
 #endif
        default:
                pr_warn("Dropping received illegal msg type\n");
index acfba9f1ba72fa188882fce7db94fb061757969d..6bc2879ba637bf4c4c2d7338fd217810b2bb8485 100644 (file)
@@ -61,7 +61,7 @@ static DEFINE_MUTEX(tcpv6_prot_mutex);
 static const struct proto *saved_tcpv4_prot;
 static DEFINE_MUTEX(tcpv4_prot_mutex);
 static struct proto tls_prots[TLS_NUM_PROTS][TLS_NUM_CONFIG][TLS_NUM_CONFIG];
-static struct proto_ops tls_sw_proto_ops;
+static struct proto_ops tls_proto_ops[TLS_NUM_PROTS][TLS_NUM_CONFIG][TLS_NUM_CONFIG];
 static void build_protos(struct proto prot[TLS_NUM_CONFIG][TLS_NUM_CONFIG],
                         const struct proto *base);
 
@@ -71,6 +71,8 @@ void update_sk_prot(struct sock *sk, struct tls_context *ctx)
 
        WRITE_ONCE(sk->sk_prot,
                   &tls_prots[ip_ver][ctx->tx_conf][ctx->rx_conf]);
+       WRITE_ONCE(sk->sk_socket->ops,
+                  &tls_proto_ops[ip_ver][ctx->tx_conf][ctx->rx_conf]);
 }
 
 int wait_on_pending_writer(struct sock *sk, long *timeo)
@@ -669,8 +671,6 @@ static int do_tls_setsockopt_conf(struct sock *sk, sockptr_t optval,
        if (tx) {
                ctx->sk_write_space = sk->sk_write_space;
                sk->sk_write_space = tls_write_space;
-       } else {
-               sk->sk_socket->ops = &tls_sw_proto_ops;
        }
        goto out;
 
@@ -728,6 +728,39 @@ struct tls_context *tls_ctx_create(struct sock *sk)
        return ctx;
 }
 
+static void build_proto_ops(struct proto_ops ops[TLS_NUM_CONFIG][TLS_NUM_CONFIG],
+                           const struct proto_ops *base)
+{
+       ops[TLS_BASE][TLS_BASE] = *base;
+
+       ops[TLS_SW  ][TLS_BASE] = ops[TLS_BASE][TLS_BASE];
+       ops[TLS_SW  ][TLS_BASE].sendpage_locked = tls_sw_sendpage_locked;
+
+       ops[TLS_BASE][TLS_SW  ] = ops[TLS_BASE][TLS_BASE];
+       ops[TLS_BASE][TLS_SW  ].splice_read     = tls_sw_splice_read;
+
+       ops[TLS_SW  ][TLS_SW  ] = ops[TLS_SW  ][TLS_BASE];
+       ops[TLS_SW  ][TLS_SW  ].splice_read     = tls_sw_splice_read;
+
+#ifdef CONFIG_TLS_DEVICE
+       ops[TLS_HW  ][TLS_BASE] = ops[TLS_BASE][TLS_BASE];
+       ops[TLS_HW  ][TLS_BASE].sendpage_locked = NULL;
+
+       ops[TLS_HW  ][TLS_SW  ] = ops[TLS_BASE][TLS_SW  ];
+       ops[TLS_HW  ][TLS_SW  ].sendpage_locked = NULL;
+
+       ops[TLS_BASE][TLS_HW  ] = ops[TLS_BASE][TLS_SW  ];
+
+       ops[TLS_SW  ][TLS_HW  ] = ops[TLS_SW  ][TLS_SW  ];
+
+       ops[TLS_HW  ][TLS_HW  ] = ops[TLS_HW  ][TLS_SW  ];
+       ops[TLS_HW  ][TLS_HW  ].sendpage_locked = NULL;
+#endif
+#ifdef CONFIG_TLS_TOE
+       ops[TLS_HW_RECORD][TLS_HW_RECORD] = *base;
+#endif
+}
+
 static void tls_build_proto(struct sock *sk)
 {
        int ip_ver = sk->sk_family == AF_INET6 ? TLSV6 : TLSV4;
@@ -739,6 +772,8 @@ static void tls_build_proto(struct sock *sk)
                mutex_lock(&tcpv6_prot_mutex);
                if (likely(prot != saved_tcpv6_prot)) {
                        build_protos(tls_prots[TLSV6], prot);
+                       build_proto_ops(tls_proto_ops[TLSV6],
+                                       sk->sk_socket->ops);
                        smp_store_release(&saved_tcpv6_prot, prot);
                }
                mutex_unlock(&tcpv6_prot_mutex);
@@ -749,6 +784,8 @@ static void tls_build_proto(struct sock *sk)
                mutex_lock(&tcpv4_prot_mutex);
                if (likely(prot != saved_tcpv4_prot)) {
                        build_protos(tls_prots[TLSV4], prot);
+                       build_proto_ops(tls_proto_ops[TLSV4],
+                                       sk->sk_socket->ops);
                        smp_store_release(&saved_tcpv4_prot, prot);
                }
                mutex_unlock(&tcpv4_prot_mutex);
@@ -959,10 +996,6 @@ static int __init tls_register(void)
        if (err)
                return err;
 
-       tls_sw_proto_ops = inet_stream_ops;
-       tls_sw_proto_ops.splice_read = tls_sw_splice_read;
-       tls_sw_proto_ops.sendpage_locked   = tls_sw_sendpage_locked;
-
        tls_device_init();
        tcp_register_ulp(&tcp_tls_ulp_ops);
 
index d81564078557b93e39c1e6e3507249651448df56..dfe623a4e72f48bb12a68abcfb3081c45a7716f5 100644 (file)
@@ -521,7 +521,7 @@ static int tls_do_encryption(struct sock *sk,
        memcpy(&rec->iv_data[iv_offset], tls_ctx->tx.iv,
               prot->iv_size + prot->salt_size);
 
-       xor_iv_with_seq(prot, rec->iv_data, tls_ctx->tx.rec_seq);
+       xor_iv_with_seq(prot, rec->iv_data + iv_offset, tls_ctx->tx.rec_seq);
 
        sge->offset += prot->prepend_size;
        sge->length -= prot->prepend_size;
@@ -1499,7 +1499,7 @@ static int decrypt_internal(struct sock *sk, struct sk_buff *skb,
        else
                memcpy(iv + iv_offset, tls_ctx->rx.iv, prot->salt_size);
 
-       xor_iv_with_seq(prot, iv, tls_ctx->rx.rec_seq);
+       xor_iv_with_seq(prot, iv + iv_offset, tls_ctx->rx.rec_seq);
 
        /* Prepare AAD */
        tls_make_aad(aad, rxm->full_len - prot->overhead_size +
@@ -2005,6 +2005,7 @@ ssize_t tls_sw_splice_read(struct socket *sock,  loff_t *ppos,
        struct sock *sk = sock->sk;
        struct sk_buff *skb;
        ssize_t copied = 0;
+       bool from_queue;
        int err = 0;
        long timeo;
        int chunk;
@@ -2014,25 +2015,28 @@ ssize_t tls_sw_splice_read(struct socket *sock,  loff_t *ppos,
 
        timeo = sock_rcvtimeo(sk, flags & SPLICE_F_NONBLOCK);
 
-       skb = tls_wait_data(sk, NULL, flags & SPLICE_F_NONBLOCK, timeo, &err);
-       if (!skb)
-               goto splice_read_end;
-
-       if (!ctx->decrypted) {
-               err = decrypt_skb_update(sk, skb, NULL, &chunk, &zc, false);
-
-               /* splice does not support reading control messages */
-               if (ctx->control != TLS_RECORD_TYPE_DATA) {
-                       err = -EINVAL;
+       from_queue = !skb_queue_empty(&ctx->rx_list);
+       if (from_queue) {
+               skb = __skb_dequeue(&ctx->rx_list);
+       } else {
+               skb = tls_wait_data(sk, NULL, flags & SPLICE_F_NONBLOCK, timeo,
+                                   &err);
+               if (!skb)
                        goto splice_read_end;
-               }
 
+               err = decrypt_skb_update(sk, skb, NULL, &chunk, &zc, false);
                if (err < 0) {
                        tls_err_abort(sk, -EBADMSG);
                        goto splice_read_end;
                }
-               ctx->decrypted = 1;
        }
+
+       /* splice does not support reading control messages */
+       if (ctx->control != TLS_RECORD_TYPE_DATA) {
+               err = -EINVAL;
+               goto splice_read_end;
+       }
+
        rxm = strp_msg(skb);
 
        chunk = min_t(unsigned int, rxm->full_len, len);
@@ -2040,7 +2044,17 @@ ssize_t tls_sw_splice_read(struct socket *sock,  loff_t *ppos,
        if (copied < 0)
                goto splice_read_end;
 
-       tls_sw_advance_skb(sk, skb, copied);
+       if (!from_queue) {
+               ctx->recv_pkt = NULL;
+               __strp_unpause(&ctx->strp);
+       }
+       if (chunk < rxm->full_len) {
+               __skb_queue_head(&ctx->rx_list, skb);
+               rxm->offset += len;
+               rxm->full_len -= len;
+       } else {
+               consume_skb(skb);
+       }
 
 splice_read_end:
        release_sock(sk);
index 78e08e82c08c423ff2112fcef9e27995a6d39984..b0bfc78e421ceef27a4466ceac6be09e7a50476a 100644 (file)
@@ -2882,9 +2882,6 @@ static int unix_shutdown(struct socket *sock, int mode)
 
        unix_state_lock(sk);
        sk->sk_shutdown |= mode;
-       if ((sk->sk_type == SOCK_STREAM || sk->sk_type == SOCK_SEQPACKET) &&
-           mode == SHUTDOWN_MASK)
-               sk->sk_state = TCP_CLOSE;
        other = unix_peer(sk);
        if (other)
                sock_hold(other);
index 81232b73df8f999b348bce66a4b41da9325461da..a27b3b5fa210f45771913127b219d546d3a672f4 100644 (file)
@@ -936,33 +936,37 @@ nl80211_packet_pattern_policy[MAX_NL80211_PKTPAT + 1] = {
        [NL80211_PKTPAT_OFFSET] = { .type = NLA_U32 },
 };
 
-int nl80211_prepare_wdev_dump(struct netlink_callback *cb,
-                             struct cfg80211_registered_device **rdev,
-                             struct wireless_dev **wdev)
+static int nl80211_prepare_wdev_dump(struct netlink_callback *cb,
+                                    struct cfg80211_registered_device **rdev,
+                                    struct wireless_dev **wdev,
+                                    struct nlattr **attrbuf)
 {
        int err;
 
        if (!cb->args[0]) {
-               struct nlattr **attrbuf;
+               struct nlattr **attrbuf_free = NULL;
 
-               attrbuf = kcalloc(NUM_NL80211_ATTR, sizeof(*attrbuf),
-                                 GFP_KERNEL);
-               if (!attrbuf)
-                       return -ENOMEM;
+               if (!attrbuf) {
+                       attrbuf = kcalloc(NUM_NL80211_ATTR, sizeof(*attrbuf),
+                                         GFP_KERNEL);
+                       if (!attrbuf)
+                               return -ENOMEM;
+                       attrbuf_free = attrbuf;
+               }
 
                err = nlmsg_parse_deprecated(cb->nlh,
                                             GENL_HDRLEN + nl80211_fam.hdrsize,
                                             attrbuf, nl80211_fam.maxattr,
                                             nl80211_policy, NULL);
                if (err) {
-                       kfree(attrbuf);
+                       kfree(attrbuf_free);
                        return err;
                }
 
                rtnl_lock();
                *wdev = __cfg80211_wdev_from_attrs(NULL, sock_net(cb->skb->sk),
                                                   attrbuf);
-               kfree(attrbuf);
+               kfree(attrbuf_free);
                if (IS_ERR(*wdev)) {
                        rtnl_unlock();
                        return PTR_ERR(*wdev);
@@ -6197,7 +6201,7 @@ static int nl80211_dump_station(struct sk_buff *skb,
        int sta_idx = cb->args[2];
        int err;
 
-       err = nl80211_prepare_wdev_dump(cb, &rdev, &wdev);
+       err = nl80211_prepare_wdev_dump(cb, &rdev, &wdev, NULL);
        if (err)
                return err;
        /* nl80211_prepare_wdev_dump acquired it in the successful case */
@@ -7092,7 +7096,7 @@ static int nl80211_dump_mpath(struct sk_buff *skb,
        int path_idx = cb->args[2];
        int err;
 
-       err = nl80211_prepare_wdev_dump(cb, &rdev, &wdev);
+       err = nl80211_prepare_wdev_dump(cb, &rdev, &wdev, NULL);
        if (err)
                return err;
        /* nl80211_prepare_wdev_dump acquired it in the successful case */
@@ -7292,7 +7296,7 @@ static int nl80211_dump_mpp(struct sk_buff *skb,
        int path_idx = cb->args[2];
        int err;
 
-       err = nl80211_prepare_wdev_dump(cb, &rdev, &wdev);
+       err = nl80211_prepare_wdev_dump(cb, &rdev, &wdev, NULL);
        if (err)
                return err;
        /* nl80211_prepare_wdev_dump acquired it in the successful case */
@@ -9718,7 +9722,7 @@ static int nl80211_dump_scan(struct sk_buff *skb, struct netlink_callback *cb)
        int start = cb->args[2], idx = 0;
        int err;
 
-       err = nl80211_prepare_wdev_dump(cb, &rdev, &wdev);
+       err = nl80211_prepare_wdev_dump(cb, &rdev, &wdev, NULL);
        if (err)
                return err;
        /* nl80211_prepare_wdev_dump acquired it in the successful case */
@@ -9851,7 +9855,7 @@ static int nl80211_dump_survey(struct sk_buff *skb, struct netlink_callback *cb)
        if (!attrbuf)
                return -ENOMEM;
 
-       res = nl80211_prepare_wdev_dump(cb, &rdev, &wdev);
+       res = nl80211_prepare_wdev_dump(cb, &rdev, &wdev, attrbuf);
        if (res) {
                kfree(attrbuf);
                return res;
index a3f387770f1bf065e088af09c116bac31e973a6f..d642e3be4ee78cdfe26b30d7a3fbc9363a4a1c66 100644 (file)
@@ -1,7 +1,7 @@
 /* SPDX-License-Identifier: GPL-2.0 */
 /*
  * Portions of this file
- * Copyright (C) 2018, 2020 Intel Corporation
+ * Copyright (C) 2018, 2020-2021 Intel Corporation
  */
 #ifndef __NET_WIRELESS_NL80211_H
 #define __NET_WIRELESS_NL80211_H
@@ -22,10 +22,6 @@ static inline u64 wdev_id(struct wireless_dev *wdev)
               ((u64)wiphy_to_rdev(wdev->wiphy)->wiphy_idx << 32);
 }
 
-int nl80211_prepare_wdev_dump(struct netlink_callback *cb,
-                             struct cfg80211_registered_device **rdev,
-                             struct wireless_dev **wdev);
-
 int nl80211_parse_chandef(struct cfg80211_registered_device *rdev,
                          struct genl_info *info,
                          struct cfg80211_chan_def *chandef);
index 5ff1f8726faf8d944ba24968e6dc9eaca5f38a0f..41ea65deb6e1f1b653b0c18d3af86fc1b62c2680 100644 (file)
@@ -1046,6 +1046,7 @@ int cfg80211_change_iface(struct cfg80211_registered_device *rdev,
 
                switch (otype) {
                case NL80211_IFTYPE_AP:
+               case NL80211_IFTYPE_P2P_GO:
                        cfg80211_stop_ap(rdev, dev, true);
                        break;
                case NL80211_IFTYPE_ADHOC:
index 90c4e1e819d38b6bced115d1a2ccaef5c2b1530b..bc4ad48ea4f097649e70e52aacf038fdba46cefb 100644 (file)
@@ -500,7 +500,7 @@ struct xdp_buff *xp_alloc(struct xsk_buff_pool *pool)
                pool->free_list_cnt--;
                xskb = list_first_entry(&pool->free_list, struct xdp_buff_xsk,
                                        free_list_node);
-               list_del(&xskb->free_list_node);
+               list_del_init(&xskb->free_list_node);
        }
 
        xskb->xdp.data = xskb->xdp.data_hard_start + XDP_PACKET_HEADROOM;
@@ -568,7 +568,7 @@ static u32 xp_alloc_reused(struct xsk_buff_pool *pool, struct xdp_buff **xdp, u3
        i = nb_entries;
        while (i--) {
                xskb = list_first_entry(&pool->free_list, struct xdp_buff_xsk, free_list_node);
-               list_del(&xskb->free_list_node);
+               list_del_init(&xskb->free_list_node);
 
                *xdp = &xskb->xdp;
                xdp++;
@@ -615,6 +615,9 @@ EXPORT_SYMBOL(xp_can_alloc);
 
 void xp_free(struct xdp_buff_xsk *xskb)
 {
+       if (!list_empty(&xskb->free_list_node))
+               return;
+
        xskb->pool->free_list_cnt++;
        list_add(&xskb->free_list_node, &xskb->pool->free_list);
 }
index bec3528aa2de1369619940999d300b1b9c1849d2..43d2e9aa557f20bee4db33e863b45ca968b043b7 100644 (file)
@@ -31,6 +31,15 @@ config SAMPLE_FTRACE_DIRECT
          This builds an ftrace direct function example
          that hooks to wake_up_process and prints the parameters.
 
+config SAMPLE_FTRACE_DIRECT_MULTI
+       tristate "Build register_ftrace_direct_multi() example"
+       depends on DYNAMIC_FTRACE_WITH_DIRECT_CALLS && m
+       depends on HAVE_SAMPLE_FTRACE_DIRECT_MULTI
+       help
+         This builds an ftrace direct function example
+         that hooks to wake_up_process and schedule, and prints
+         the function addresses.
+
 config SAMPLE_TRACE_ARRAY
         tristate "Build sample module for kernel access to Ftrace instancess"
        depends on EVENT_TRACING && m
@@ -237,5 +246,5 @@ endif # SAMPLES
 config HAVE_SAMPLE_FTRACE_DIRECT
        bool
 
-config HAVE_SAMPLE_FTRACE_MULTI_DIRECT
+config HAVE_SAMPLE_FTRACE_DIRECT_MULTI
        bool
index b7b98307c2b4051cb84e6bd0c38c3b6dc21fb864..4bcd6b93bffaf80a45e471cd0f7850c56da7f901 100644 (file)
@@ -22,7 +22,7 @@ subdir-$(CONFIG_SAMPLE_TIMER)         += timers
 obj-$(CONFIG_SAMPLE_TRACE_EVENTS)      += trace_events/
 obj-$(CONFIG_SAMPLE_TRACE_PRINTK)      += trace_printk/
 obj-$(CONFIG_SAMPLE_FTRACE_DIRECT)     += ftrace/
-obj-$(CONFIG_SAMPLE_FTRACE_MULTI_DIRECT) += ftrace/
+obj-$(CONFIG_SAMPLE_FTRACE_DIRECT_MULTI) += ftrace/
 obj-$(CONFIG_SAMPLE_TRACE_ARRAY)       += ftrace/
 subdir-$(CONFIG_SAMPLE_UHID)           += uhid
 obj-$(CONFIG_VIDEO_PCI_SKELETON)       += v4l/
index 722b3fadb46750ead89df8131b744aba293037de..1752a46a2b05673bb72027ec67e057e39c49ea7c 100644 (file)
@@ -9,8 +9,6 @@
  * Include file for sample Host Bandwidth Manager (HBM) BPF programs
  */
 #define KBUILD_MODNAME "foo"
-#include <stddef.h>
-#include <stdbool.h>
 #include <uapi/linux/bpf.h>
 #include <uapi/linux/if_ether.h>
 #include <uapi/linux/if_packet.h>
index d84e6949007cc9a209775fb98e04e60d9b0c1e9f..a81704d3317ba1f01b03966d184d4dda6050e36e 100644 (file)
@@ -309,7 +309,6 @@ int main(int argc, char **argv)
        const char *mprog_filename = NULL, *mprog_name = NULL;
        struct xdp_redirect_cpu *skel;
        struct bpf_map_info info = {};
-       char ifname_buf[IF_NAMESIZE];
        struct bpf_cpumap_val value;
        __u32 infosz = sizeof(info);
        int ret = EXIT_FAIL_OPTION;
@@ -390,10 +389,10 @@ int main(int argc, char **argv)
                case 'd':
                        if (strlen(optarg) >= IF_NAMESIZE) {
                                fprintf(stderr, "-d/--dev name too long\n");
+                               usage(argv, long_options, __doc__, mask, true, skel->obj);
                                goto end_cpu;
                        }
-                       safe_strncpy(ifname_buf, optarg, strlen(ifname_buf));
-                       ifindex = if_nametoindex(ifname_buf);
+                       ifindex = if_nametoindex(optarg);
                        if (!ifindex)
                                ifindex = strtoul(optarg, NULL, 0);
                        if (!ifindex) {
index b32d8217819902fe2f87d739cc826086a060f826..8740838e7767929d0a9bcea961246113ba583914 100644 (file)
@@ -120,7 +120,10 @@ struct sample_output {
                __u64 xmit;
        } totals;
        struct {
-               __u64 pps;
+               union {
+                       __u64 pps;
+                       __u64 num;
+               };
                __u64 drop;
                __u64 err;
        } rx_cnt;
@@ -1322,7 +1325,7 @@ int sample_install_xdp(struct bpf_program *xdp_prog, int ifindex, bool generic,
 
 static void sample_summary_print(void)
 {
-       double period = sample_out.rx_cnt.pps;
+       double num = sample_out.rx_cnt.num;
 
        if (sample_out.totals.rx) {
                double pkts = sample_out.totals.rx;
@@ -1330,7 +1333,7 @@ static void sample_summary_print(void)
                print_always("  Packets received    : %'-10llu\n",
                             sample_out.totals.rx);
                print_always("  Average packets/s   : %'-10.0f\n",
-                            sample_round(pkts / period));
+                            sample_round(pkts / num));
        }
        if (sample_out.totals.redir) {
                double pkts = sample_out.totals.redir;
@@ -1338,7 +1341,7 @@ static void sample_summary_print(void)
                print_always("  Packets redirected  : %'-10llu\n",
                             sample_out.totals.redir);
                print_always("  Average redir/s     : %'-10.0f\n",
-                            sample_round(pkts / period));
+                            sample_round(pkts / num));
        }
        if (sample_out.totals.drop)
                print_always("  Rx dropped          : %'-10llu\n",
@@ -1355,7 +1358,7 @@ static void sample_summary_print(void)
                print_always("  Packets transmitted : %'-10llu\n",
                             sample_out.totals.xmit);
                print_always("  Average transmit/s  : %'-10.0f\n",
-                            sample_round(pkts / period));
+                            sample_round(pkts / num));
        }
 }
 
@@ -1422,7 +1425,7 @@ static int sample_stats_collect(struct stats_record *rec)
        return 0;
 }
 
-static void sample_summary_update(struct sample_output *out, int interval)
+static void sample_summary_update(struct sample_output *out)
 {
        sample_out.totals.rx += out->totals.rx;
        sample_out.totals.redir += out->totals.redir;
@@ -1430,12 +1433,11 @@ static void sample_summary_update(struct sample_output *out, int interval)
        sample_out.totals.drop_xmit += out->totals.drop_xmit;
        sample_out.totals.err += out->totals.err;
        sample_out.totals.xmit += out->totals.xmit;
-       sample_out.rx_cnt.pps += interval;
+       sample_out.rx_cnt.num++;
 }
 
 static void sample_stats_print(int mask, struct stats_record *cur,
-                              struct stats_record *prev, char *prog_name,
-                              int interval)
+                              struct stats_record *prev, char *prog_name)
 {
        struct sample_output out = {};
 
@@ -1452,7 +1454,7 @@ static void sample_stats_print(int mask, struct stats_record *cur,
        else if (mask & SAMPLE_DEVMAP_XMIT_CNT_MULTI)
                stats_get_devmap_xmit_multi(cur, prev, 0, &out,
                                            mask & SAMPLE_DEVMAP_XMIT_CNT);
-       sample_summary_update(&out, interval);
+       sample_summary_update(&out);
 
        stats_print(prog_name, mask, cur, prev, &out);
 }
@@ -1495,7 +1497,7 @@ static void swap(struct stats_record **a, struct stats_record **b)
 }
 
 static int sample_timer_cb(int timerfd, struct stats_record **rec,
-                          struct stats_record **prev, int interval)
+                          struct stats_record **prev)
 {
        char line[64] = "Summary";
        int ret;
@@ -1524,7 +1526,7 @@ static int sample_timer_cb(int timerfd, struct stats_record **rec,
                snprintf(line, sizeof(line), "%s->%s", f ?: "?", t ?: "?");
        }
 
-       sample_stats_print(sample_mask, *rec, *prev, line, interval);
+       sample_stats_print(sample_mask, *rec, *prev, line);
        return 0;
 }
 
@@ -1579,7 +1581,7 @@ int sample_run(int interval, void (*post_cb)(void *), void *ctx)
                if (pfd[0].revents & POLLIN)
                        ret = sample_signal_cb();
                else if (pfd[1].revents & POLLIN)
-                       ret = sample_timer_cb(timerfd, &rec, &prev, interval);
+                       ret = sample_timer_cb(timerfd, &rec, &prev);
 
                if (ret)
                        break;
index e8a3f8520a44e43eec906daafacb364db27421c3..faf8cdb79c5f48ae87d41008b079e607b9bcb68a 100644 (file)
@@ -3,7 +3,8 @@
 obj-$(CONFIG_SAMPLE_FTRACE_DIRECT) += ftrace-direct.o
 obj-$(CONFIG_SAMPLE_FTRACE_DIRECT) += ftrace-direct-too.o
 obj-$(CONFIG_SAMPLE_FTRACE_DIRECT) += ftrace-direct-modify.o
-obj-$(CONFIG_SAMPLE_FTRACE_MULTI_DIRECT) += ftrace-direct-multi.o
+obj-$(CONFIG_SAMPLE_FTRACE_DIRECT_MULTI) += ftrace-direct-multi.o
+obj-$(CONFIG_SAMPLE_FTRACE_DIRECT_MULTI) += ftrace-direct-multi-modify.o
 
 CFLAGS_sample-trace-array.o := -I$(src)
 obj-$(CONFIG_SAMPLE_TRACE_ARRAY) += sample-trace-array.o
diff --git a/samples/ftrace/ftrace-direct-multi-modify.c b/samples/ftrace/ftrace-direct-multi-modify.c
new file mode 100644 (file)
index 0000000..91bc42a
--- /dev/null
@@ -0,0 +1,152 @@
+// SPDX-License-Identifier: GPL-2.0-only
+#include <linux/module.h>
+#include <linux/kthread.h>
+#include <linux/ftrace.h>
+#include <asm/asm-offsets.h>
+
+void my_direct_func1(unsigned long ip)
+{
+       trace_printk("my direct func1 ip %lx\n", ip);
+}
+
+void my_direct_func2(unsigned long ip)
+{
+       trace_printk("my direct func2 ip %lx\n", ip);
+}
+
+extern void my_tramp1(void *);
+extern void my_tramp2(void *);
+
+#ifdef CONFIG_X86_64
+
+asm (
+"      .pushsection    .text, \"ax\", @progbits\n"
+"      .type           my_tramp1, @function\n"
+"      .globl          my_tramp1\n"
+"   my_tramp1:"
+"      pushq %rbp\n"
+"      movq %rsp, %rbp\n"
+"      pushq %rdi\n"
+"      movq 8(%rbp), %rdi\n"
+"      call my_direct_func1\n"
+"      popq %rdi\n"
+"      leave\n"
+"      ret\n"
+"      .size           my_tramp1, .-my_tramp1\n"
+"      .type           my_tramp2, @function\n"
+"\n"
+"      .globl          my_tramp2\n"
+"   my_tramp2:"
+"      pushq %rbp\n"
+"      movq %rsp, %rbp\n"
+"      pushq %rdi\n"
+"      movq 8(%rbp), %rdi\n"
+"      call my_direct_func2\n"
+"      popq %rdi\n"
+"      leave\n"
+"      ret\n"
+"      .size           my_tramp2, .-my_tramp2\n"
+"      .popsection\n"
+);
+
+#endif /* CONFIG_X86_64 */
+
+#ifdef CONFIG_S390
+
+asm (
+"       .pushsection    .text, \"ax\", @progbits\n"
+"       .type           my_tramp1, @function\n"
+"       .globl          my_tramp1\n"
+"   my_tramp1:"
+"       lgr             %r1,%r15\n"
+"       stmg            %r0,%r5,"__stringify(__SF_GPRS)"(%r15)\n"
+"       stg             %r14,"__stringify(__SF_GPRS+8*8)"(%r15)\n"
+"       aghi            %r15,"__stringify(-STACK_FRAME_OVERHEAD)"\n"
+"       stg             %r1,"__stringify(__SF_BACKCHAIN)"(%r15)\n"
+"       lgr             %r2,%r0\n"
+"       brasl           %r14,my_direct_func1\n"
+"       aghi            %r15,"__stringify(STACK_FRAME_OVERHEAD)"\n"
+"       lmg             %r0,%r5,"__stringify(__SF_GPRS)"(%r15)\n"
+"       lg              %r14,"__stringify(__SF_GPRS+8*8)"(%r15)\n"
+"       lgr             %r1,%r0\n"
+"       br              %r1\n"
+"       .size           my_tramp1, .-my_tramp1\n"
+"\n"
+"       .type           my_tramp2, @function\n"
+"       .globl          my_tramp2\n"
+"   my_tramp2:"
+"       lgr             %r1,%r15\n"
+"       stmg            %r0,%r5,"__stringify(__SF_GPRS)"(%r15)\n"
+"       stg             %r14,"__stringify(__SF_GPRS+8*8)"(%r15)\n"
+"       aghi            %r15,"__stringify(-STACK_FRAME_OVERHEAD)"\n"
+"       stg             %r1,"__stringify(__SF_BACKCHAIN)"(%r15)\n"
+"       lgr             %r2,%r0\n"
+"       brasl           %r14,my_direct_func2\n"
+"       aghi            %r15,"__stringify(STACK_FRAME_OVERHEAD)"\n"
+"       lmg             %r0,%r5,"__stringify(__SF_GPRS)"(%r15)\n"
+"       lg              %r14,"__stringify(__SF_GPRS+8*8)"(%r15)\n"
+"       lgr             %r1,%r0\n"
+"       br              %r1\n"
+"       .size           my_tramp2, .-my_tramp2\n"
+"       .popsection\n"
+);
+
+#endif /* CONFIG_S390 */
+
+static unsigned long my_tramp = (unsigned long)my_tramp1;
+static unsigned long tramps[2] = {
+       (unsigned long)my_tramp1,
+       (unsigned long)my_tramp2,
+};
+
+static struct ftrace_ops direct;
+
+static int simple_thread(void *arg)
+{
+       static int t;
+       int ret = 0;
+
+       while (!kthread_should_stop()) {
+               set_current_state(TASK_INTERRUPTIBLE);
+               schedule_timeout(2 * HZ);
+
+               if (ret)
+                       continue;
+               t ^= 1;
+               ret = modify_ftrace_direct_multi(&direct, tramps[t]);
+               if (!ret)
+                       my_tramp = tramps[t];
+               WARN_ON_ONCE(ret);
+       }
+
+       return 0;
+}
+
+static struct task_struct *simple_tsk;
+
+static int __init ftrace_direct_multi_init(void)
+{
+       int ret;
+
+       ftrace_set_filter_ip(&direct, (unsigned long) wake_up_process, 0, 0);
+       ftrace_set_filter_ip(&direct, (unsigned long) schedule, 0, 0);
+
+       ret = register_ftrace_direct_multi(&direct, my_tramp);
+
+       if (!ret)
+               simple_tsk = kthread_run(simple_thread, NULL, "event-sample-fn");
+       return ret;
+}
+
+static void __exit ftrace_direct_multi_exit(void)
+{
+       kthread_stop(simple_tsk);
+       unregister_ftrace_direct_multi(&direct, my_tramp);
+}
+
+module_init(ftrace_direct_multi_init);
+module_exit(ftrace_direct_multi_exit);
+
+MODULE_AUTHOR("Jiri Olsa");
+MODULE_DESCRIPTION("Example use case of using modify_ftrace_direct_multi()");
+MODULE_LICENSE("GPL");
index b6d7806b400e4966212146fe45f844c8be8ba8d9..2fafc9afcbf0fcf31df7eaebd8fd0c19fa44fd28 100644 (file)
@@ -4,6 +4,7 @@
 #include <linux/mm.h> /* for handle_mm_fault() */
 #include <linux/ftrace.h>
 #include <linux/sched/stat.h>
+#include <asm/asm-offsets.h>
 
 extern void my_direct_func(unsigned long ip);
 
@@ -14,6 +15,8 @@ void my_direct_func(unsigned long ip)
 
 extern void my_tramp(void *);
 
+#ifdef CONFIG_X86_64
+
 asm (
 "      .pushsection    .text, \"ax\", @progbits\n"
 "      .type           my_tramp, @function\n"
@@ -31,6 +34,33 @@ asm (
 "      .popsection\n"
 );
 
+#endif /* CONFIG_X86_64 */
+
+#ifdef CONFIG_S390
+
+asm (
+"      .pushsection    .text, \"ax\", @progbits\n"
+"      .type           my_tramp, @function\n"
+"      .globl          my_tramp\n"
+"   my_tramp:"
+"      lgr             %r1,%r15\n"
+"      stmg            %r0,%r5,"__stringify(__SF_GPRS)"(%r15)\n"
+"      stg             %r14,"__stringify(__SF_GPRS+8*8)"(%r15)\n"
+"      aghi            %r15,"__stringify(-STACK_FRAME_OVERHEAD)"\n"
+"      stg             %r1,"__stringify(__SF_BACKCHAIN)"(%r15)\n"
+"      lgr             %r2,%r0\n"
+"      brasl           %r14,my_direct_func\n"
+"      aghi            %r15,"__stringify(STACK_FRAME_OVERHEAD)"\n"
+"      lmg             %r0,%r5,"__stringify(__SF_GPRS)"(%r15)\n"
+"      lg              %r14,"__stringify(__SF_GPRS+8*8)"(%r15)\n"
+"      lgr             %r1,%r0\n"
+"      br              %r1\n"
+"      .size           my_tramp, .-my_tramp\n"
+"      .popsection\n"
+);
+
+#endif /* CONFIG_S390 */
+
 static struct ftrace_ops direct;
 
 static int __init ftrace_direct_multi_init(void)
index cc3625617a0ef58becdca4e298f8c80d34d274e6..c0d3bcb99138097ba17417386189aa60f8019006 100644 (file)
@@ -259,5 +259,8 @@ int main(void)
        DEVID_FIELD(dfl_device_id, type);
        DEVID_FIELD(dfl_device_id, feature_id);
 
+       DEVID(ishtp_device_id);
+       DEVID_FIELD(ishtp_device_id, guid);
+
        return 0;
 }
index 49aba862073e017557bc736637f13c62a80944e8..5258247d78ac29765337e7cf4d09afeb05e1e0d4 100644 (file)
@@ -115,6 +115,17 @@ static inline void add_uuid(char *str, uuid_le uuid)
                uuid.b[12], uuid.b[13], uuid.b[14], uuid.b[15]);
 }
 
+static inline void add_guid(char *str, guid_t guid)
+{
+       int len = strlen(str);
+
+       sprintf(str + len, "%02X%02X%02X%02X-%02X%02X-%02X%02X-%02X%02X-%02X%02X%02X%02X%02X%02X",
+               guid.b[3], guid.b[2], guid.b[1], guid.b[0],
+               guid.b[5], guid.b[4], guid.b[7], guid.b[6],
+               guid.b[8], guid.b[9], guid.b[10], guid.b[11],
+               guid.b[12], guid.b[13], guid.b[14], guid.b[15]);
+}
+
 /**
  * Check that sizeof(device_id type) are consistent with size of section
  * in .o file. If in-consistent then userspace and kernel does not agree
@@ -1380,6 +1391,18 @@ static int do_mhi_entry(const char *filename, void *symval, char *alias)
        return 1;
 }
 
+/* Looks like: ishtp:{guid} */
+static int do_ishtp_entry(const char *filename, void *symval, char *alias)
+{
+       DEF_FIELD(symval, ishtp_device_id, guid);
+
+       strcpy(alias, ISHTP_MODULE_PREFIX "{");
+       add_guid(alias, guid);
+       strcat(alias, "}");
+
+       return 1;
+}
+
 static int do_auxiliary_entry(const char *filename, void *symval, char *alias)
 {
        DEF_FIELD_ADDR(symval, auxiliary_device_id, name);
@@ -1499,6 +1522,7 @@ static const struct devtable devtable[] = {
        {"auxiliary", SIZE_auxiliary_device_id, do_auxiliary_entry},
        {"ssam", SIZE_ssam_device_id, do_ssam_entry},
        {"dfl", SIZE_dfl_device_id, do_dfl_entry},
+       {"ishtp", SIZE_ishtp_device_id, do_ishtp_entry},
 };
 
 /* Create MODULE_ALIAS() statements.
index 727c3b484bd34f1246e87a51314e3110cf3713bc..0ae4e4e57a401ebb36f09ba59a4e7b60b9c67846 100644 (file)
@@ -31,13 +31,20 @@ static u32 hashtab_compute_size(u32 nel)
 
 int hashtab_init(struct hashtab *h, u32 nel_hint)
 {
-       h->size = hashtab_compute_size(nel_hint);
+       u32 size = hashtab_compute_size(nel_hint);
+
+       /* should already be zeroed, but better be safe */
        h->nel = 0;
-       if (!h->size)
-               return 0;
+       h->size = 0;
+       h->htable = NULL;
 
-       h->htable = kcalloc(h->size, sizeof(*h->htable), GFP_KERNEL);
-       return h->htable ? 0 : -ENOMEM;
+       if (size) {
+               h->htable = kcalloc(size, sizeof(*h->htable), GFP_KERNEL);
+               if (!h->htable)
+                       return -ENOMEM;
+               h->size = size;
+       }
+       return 0;
 }
 
 int __hashtab_insert(struct hashtab *h, struct hashtab_node **dst,
index 470dabc60aa0e23e9daea43314dc637283903fa5..edff063e088d28266677da124443ae6960588282 100644 (file)
@@ -264,6 +264,7 @@ static int copy_ctl_value_to_user(void __user *userdata,
                                  struct snd_ctl_elem_value *data,
                                  int type, int count)
 {
+       struct snd_ctl_elem_value32 __user *data32 = userdata;
        int i, size;
 
        if (type == SNDRV_CTL_ELEM_TYPE_BOOLEAN ||
@@ -280,6 +281,8 @@ static int copy_ctl_value_to_user(void __user *userdata,
                if (copy_to_user(valuep, data->value.bytes.data, size))
                        return -EFAULT;
        }
+       if (copy_to_user(&data32->id, &data->id, sizeof(data32->id)))
+               return -EFAULT;
        return 0;
 }
 
index 82a818734a5f769268872f0f2d895f616d5a8c4e..20a0a4771b9a830f5750f84230a49ee71215a08e 100644 (file)
@@ -147,7 +147,7 @@ snd_pcm_hw_param_value_min(const struct snd_pcm_hw_params *params,
  *
  * Return the maximum value for field PAR.
  */
-static unsigned int
+static int
 snd_pcm_hw_param_value_max(const struct snd_pcm_hw_params *params,
                           snd_pcm_hw_param_t var, int *dir)
 {
@@ -682,18 +682,24 @@ static int snd_pcm_oss_period_size(struct snd_pcm_substream *substream,
                                   struct snd_pcm_hw_params *oss_params,
                                   struct snd_pcm_hw_params *slave_params)
 {
-       size_t s;
-       size_t oss_buffer_size, oss_period_size, oss_periods;
-       size_t min_period_size, max_period_size;
+       ssize_t s;
+       ssize_t oss_buffer_size;
+       ssize_t oss_period_size, oss_periods;
+       ssize_t min_period_size, max_period_size;
        struct snd_pcm_runtime *runtime = substream->runtime;
        size_t oss_frame_size;
 
        oss_frame_size = snd_pcm_format_physical_width(params_format(oss_params)) *
                         params_channels(oss_params) / 8;
 
+       oss_buffer_size = snd_pcm_hw_param_value_max(slave_params,
+                                                    SNDRV_PCM_HW_PARAM_BUFFER_SIZE,
+                                                    NULL);
+       if (oss_buffer_size <= 0)
+               return -EINVAL;
        oss_buffer_size = snd_pcm_plug_client_size(substream,
-                                                  snd_pcm_hw_param_value_max(slave_params, SNDRV_PCM_HW_PARAM_BUFFER_SIZE, NULL)) * oss_frame_size;
-       if (!oss_buffer_size)
+                                                  oss_buffer_size * oss_frame_size);
+       if (oss_buffer_size <= 0)
                return -EINVAL;
        oss_buffer_size = rounddown_pow_of_two(oss_buffer_size);
        if (atomic_read(&substream->mmap_count)) {
@@ -730,7 +736,7 @@ static int snd_pcm_oss_period_size(struct snd_pcm_substream *substream,
 
        min_period_size = snd_pcm_plug_client_size(substream,
                                                   snd_pcm_hw_param_value_min(slave_params, SNDRV_PCM_HW_PARAM_PERIOD_SIZE, NULL));
-       if (min_period_size) {
+       if (min_period_size > 0) {
                min_period_size *= oss_frame_size;
                min_period_size = roundup_pow_of_two(min_period_size);
                if (oss_period_size < min_period_size)
@@ -739,7 +745,7 @@ static int snd_pcm_oss_period_size(struct snd_pcm_substream *substream,
 
        max_period_size = snd_pcm_plug_client_size(substream,
                                                   snd_pcm_hw_param_value_max(slave_params, SNDRV_PCM_HW_PARAM_PERIOD_SIZE, NULL));
-       if (max_period_size) {
+       if (max_period_size > 0) {
                max_period_size *= oss_frame_size;
                max_period_size = rounddown_pow_of_two(max_period_size);
                if (oss_period_size > max_period_size)
@@ -752,7 +758,7 @@ static int snd_pcm_oss_period_size(struct snd_pcm_substream *substream,
                oss_periods = substream->oss.setup.periods;
 
        s = snd_pcm_hw_param_value_max(slave_params, SNDRV_PCM_HW_PARAM_PERIODS, NULL);
-       if (runtime->oss.maxfrags && s > runtime->oss.maxfrags)
+       if (s > 0 && runtime->oss.maxfrags && s > runtime->oss.maxfrags)
                s = runtime->oss.maxfrags;
        if (oss_periods > s)
                oss_periods = s;
@@ -878,8 +884,15 @@ static int snd_pcm_oss_change_params_locked(struct snd_pcm_substream *substream)
                err = -EINVAL;
                goto failure;
        }
-       choose_rate(substream, sparams, runtime->oss.rate);
-       snd_pcm_hw_param_near(substream, sparams, SNDRV_PCM_HW_PARAM_CHANNELS, runtime->oss.channels, NULL);
+
+       err = choose_rate(substream, sparams, runtime->oss.rate);
+       if (err < 0)
+               goto failure;
+       err = snd_pcm_hw_param_near(substream, sparams,
+                                   SNDRV_PCM_HW_PARAM_CHANNELS,
+                                   runtime->oss.channels, NULL);
+       if (err < 0)
+               goto failure;
 
        format = snd_pcm_oss_format_from(runtime->oss.format);
 
@@ -1956,7 +1969,7 @@ static int snd_pcm_oss_set_fragment1(struct snd_pcm_substream *substream, unsign
        if (runtime->oss.subdivision || runtime->oss.fragshift)
                return -EINVAL;
        fragshift = val & 0xffff;
-       if (fragshift >= 31)
+       if (fragshift >= 25) /* should be large enough */
                return -EINVAL;
        runtime->oss.fragshift = fragshift;
        runtime->oss.maxfrags = (val >> 16) & 0xffff;
index b9ac9e9e45a4847f67f792509084436e3059c33c..4208fa8a4db5bc10787d797b04ead71572b6f503 100644 (file)
@@ -252,6 +252,11 @@ static const struct config_entry config_table[] = {
                .flags = FLAG_SOF | FLAG_SOF_ONLY_IF_DMIC_OR_SOUNDWIRE,
                .device = 0x02c8,
        },
+       {
+               .flags = FLAG_SOF,
+               .device = 0x02c8,
+               .codec_hid = "ESSX8336",
+       },
 /* Cometlake-H */
        {
                .flags = FLAG_SOF,
@@ -276,6 +281,11 @@ static const struct config_entry config_table[] = {
                .flags = FLAG_SOF | FLAG_SOF_ONLY_IF_DMIC_OR_SOUNDWIRE,
                .device = 0x06c8,
        },
+               {
+               .flags = FLAG_SOF,
+               .device = 0x06c8,
+               .codec_hid = "ESSX8336",
+       },
 #endif
 
 /* Icelake */
@@ -299,6 +309,15 @@ static const struct config_entry config_table[] = {
        },
 #endif
 
+/* JasperLake */
+#if IS_ENABLED(CONFIG_SND_SOC_SOF_JASPERLAKE)
+       {
+               .flags = FLAG_SOF,
+               .device = 0x4dc8,
+               .codec_hid = "ESSX8336",
+       },
+#endif
+
 /* Tigerlake */
 #if IS_ENABLED(CONFIG_SND_SOC_SOF_TIGERLAKE)
        {
index ea20236f35dbc210b2042897bf19319f26f93c7b..9a678b5cf2855fabf163cbf4d69821fe3f08f098 100644 (file)
@@ -3218,7 +3218,6 @@ static int snd_cmipci_probe(struct pci_dev *pci,
 {
        static int dev;
        struct snd_card *card;
-       struct cmipci *cm;
        int err;
 
        if (dev >= SNDRV_CARDS)
@@ -3229,10 +3228,9 @@ static int snd_cmipci_probe(struct pci_dev *pci,
        }
 
        err = snd_devm_card_new(&pci->dev, index[dev], id[dev], THIS_MODULE,
-                               sizeof(*cm), &card);
+                               sizeof(struct cmipci), &card);
        if (err < 0)
                return err;
-       cm = card->private_data;
        
        switch (pci->device) {
        case PCI_DEVICE_ID_CMEDIA_CM8738:
index da6e6350ceafad545aa4e0ef29591ffd2d9e6c4d..d074727c3e21d3edf25f934b259e441a83738b1f 100644 (file)
 
 #define BLANK_SLOT             4094
 
-static int amixer_master(struct rsc *rsc)
+static void amixer_master(struct rsc *rsc)
 {
        rsc->conj = 0;
-       return rsc->idx = container_of(rsc, struct amixer, rsc)->idx[0];
+       rsc->idx = container_of(rsc, struct amixer, rsc)->idx[0];
 }
 
-static int amixer_next_conj(struct rsc *rsc)
+static void amixer_next_conj(struct rsc *rsc)
 {
        rsc->conj++;
-       return container_of(rsc, struct amixer, rsc)->idx[rsc->conj];
 }
 
 static int amixer_index(const struct rsc *rsc)
@@ -331,16 +330,15 @@ int amixer_mgr_destroy(struct amixer_mgr *amixer_mgr)
 
 /* SUM resource management */
 
-static int sum_master(struct rsc *rsc)
+static void sum_master(struct rsc *rsc)
 {
        rsc->conj = 0;
-       return rsc->idx = container_of(rsc, struct sum, rsc)->idx[0];
+       rsc->idx = container_of(rsc, struct sum, rsc)->idx[0];
 }
 
-static int sum_next_conj(struct rsc *rsc)
+static void sum_next_conj(struct rsc *rsc)
 {
        rsc->conj++;
-       return container_of(rsc, struct sum, rsc)->idx[rsc->conj];
 }
 
 static int sum_index(const struct rsc *rsc)
index f589da04534244e8752e65f090007ecd6d36db91..7fc720046ce293564e513ad78fccda687f45aa38 100644 (file)
@@ -51,12 +51,12 @@ static const struct daio_rsc_idx idx_20k2[NUM_DAIOTYP] = {
        [SPDIFIO] = {.left = 0x05, .right = 0x85},
 };
 
-static int daio_master(struct rsc *rsc)
+static void daio_master(struct rsc *rsc)
 {
        /* Actually, this is not the resource index of DAIO.
         * For DAO, it is the input mapper index. And, for DAI,
         * it is the output time-slot index. */
-       return rsc->conj = rsc->idx;
+       rsc->conj = rsc->idx;
 }
 
 static int daio_index(const struct rsc *rsc)
@@ -64,19 +64,19 @@ static int daio_index(const struct rsc *rsc)
        return rsc->conj;
 }
 
-static int daio_out_next_conj(struct rsc *rsc)
+static void daio_out_next_conj(struct rsc *rsc)
 {
-       return rsc->conj += 2;
+       rsc->conj += 2;
 }
 
-static int daio_in_next_conj_20k1(struct rsc *rsc)
+static void daio_in_next_conj_20k1(struct rsc *rsc)
 {
-       return rsc->conj += 0x200;
+       rsc->conj += 0x200;
 }
 
-static int daio_in_next_conj_20k2(struct rsc *rsc)
+static void daio_in_next_conj_20k2(struct rsc *rsc)
 {
-       return rsc->conj += 0x100;
+       rsc->conj += 0x100;
 }
 
 static const struct rsc_ops daio_out_rsc_ops = {
index 81ad269345182748a68f9b112c900d18095624c1..be1d3e61309ce2580ed440ce45c16a6813e090d3 100644 (file)
@@ -109,18 +109,17 @@ static int audio_ring_slot(const struct rsc *rsc)
     return (rsc->conj << 4) + offset_in_audio_slot_block[rsc->type];
 }
 
-static int rsc_next_conj(struct rsc *rsc)
+static void rsc_next_conj(struct rsc *rsc)
 {
        unsigned int i;
        for (i = 0; (i < 8) && (!(rsc->msr & (0x1 << i))); )
                i++;
        rsc->conj += (AUDIO_SLOT_BLOCK_NUM >> i);
-       return rsc->conj;
 }
 
-static int rsc_master(struct rsc *rsc)
+static void rsc_master(struct rsc *rsc)
 {
-       return rsc->conj = rsc->idx;
+       rsc->conj = rsc->idx;
 }
 
 static const struct rsc_ops rsc_generic_ops = {
index fdbfd808816d373a8587cce9b840b84281a469d9..58553bda44f436316d29e0b7b14fcdd187ea57ff 100644 (file)
@@ -39,8 +39,8 @@ struct rsc {
 };
 
 struct rsc_ops {
-       int (*master)(struct rsc *rsc); /* Move to master resource */
-       int (*next_conj)(struct rsc *rsc); /* Move to next conjugate resource */
+       void (*master)(struct rsc *rsc); /* Move to master resource */
+       void (*next_conj)(struct rsc *rsc); /* Move to next conjugate resource */
        int (*index)(const struct rsc *rsc); /* Return the index of resource */
        /* Return the output slot number */
        int (*output_slot)(const struct rsc *rsc);
index bd4697b44233467c687033aeb7194ef85132443c..4a94b4708a77e55f9ee81315b1d0f725c8cc3a33 100644 (file)
@@ -590,16 +590,15 @@ int src_mgr_destroy(struct src_mgr *src_mgr)
 
 /* SRCIMP resource manager operations */
 
-static int srcimp_master(struct rsc *rsc)
+static void srcimp_master(struct rsc *rsc)
 {
        rsc->conj = 0;
-       return rsc->idx = container_of(rsc, struct srcimp, rsc)->idx[0];
+       rsc->idx = container_of(rsc, struct srcimp, rsc)->idx[0];
 }
 
-static int srcimp_next_conj(struct rsc *rsc)
+static void srcimp_next_conj(struct rsc *rsc)
 {
        rsc->conj++;
-       return container_of(rsc, struct srcimp, rsc)->idx[rsc->conj];
 }
 
 static int srcimp_index(const struct rsc *rsc)
index fe51163f2d82df9347be08a94a486503705779cd..1b46b599a5cff282dbdb02ea22202b941711d724 100644 (file)
@@ -335,7 +335,10 @@ enum {
                                        ((pci)->device == 0x0c0c) || \
                                        ((pci)->device == 0x0d0c) || \
                                        ((pci)->device == 0x160c) || \
-                                       ((pci)->device == 0x490d))
+                                       ((pci)->device == 0x490d) || \
+                                       ((pci)->device == 0x4f90) || \
+                                       ((pci)->device == 0x4f91) || \
+                                       ((pci)->device == 0x4f92))
 
 #define IS_BXT(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0x5a98)
 
@@ -2473,6 +2476,13 @@ static const struct pci_device_id azx_ids[] = {
        /* DG1 */
        { PCI_DEVICE(0x8086, 0x490d),
          .driver_data = AZX_DRIVER_SKL | AZX_DCAPS_INTEL_SKYLAKE},
+       /* DG2 */
+       { PCI_DEVICE(0x8086, 0x4f90),
+         .driver_data = AZX_DRIVER_SKL | AZX_DCAPS_INTEL_SKYLAKE},
+       { PCI_DEVICE(0x8086, 0x4f91),
+         .driver_data = AZX_DRIVER_SKL | AZX_DCAPS_INTEL_SKYLAKE},
+       { PCI_DEVICE(0x8086, 0x4f92),
+         .driver_data = AZX_DRIVER_SKL | AZX_DCAPS_INTEL_SKYLAKE},
        /* Alderlake-S */
        { PCI_DEVICE(0x8086, 0x7ad0),
          .driver_data = AZX_DRIVER_SKL | AZX_DCAPS_INTEL_SKYLAKE},
index ea8ab8b43337896ba3ebee6a89a94d63fedff2df..d22c96eb2f8fb7324f44335835dd36bd8576e234 100644 (file)
@@ -438,6 +438,15 @@ int snd_hda_codec_set_pin_target(struct hda_codec *codec, hda_nid_t nid,
 #define for_each_hda_codec_node(nid, codec) \
        for ((nid) = (codec)->core.start_nid; (nid) < (codec)->core.end_nid; (nid)++)
 
+/* Set the codec power_state flag to indicate to allow unsol event handling;
+ * see hda_codec_unsol_event() in hda_bind.c.  Calling this might confuse the
+ * state tracking, so use with care.
+ */
+static inline void snd_hda_codec_allow_unsol_events(struct hda_codec *codec)
+{
+       codec->core.dev.power.power_state = PMSG_ON;
+}
+
 /*
  * get widget capabilities
  */
index 31ff11ab868e1ac3c1aff15d20a52728884e2788..039b9f2f8e94700f097cc58dc3a02ace0fae2dcf 100644 (file)
@@ -750,6 +750,11 @@ static void cs42l42_resume(struct sub_codec *cs42l42)
        if (cs42l42->full_scale_vol)
                cs8409_i2c_write(cs42l42, 0x2001, 0x01);
 
+       /* we have to explicitly allow unsol event handling even during the
+        * resume phase so that the jack event is processed properly
+        */
+       snd_hda_codec_allow_unsol_events(cs42l42->codec);
+
        cs42l42_enable_jack_detect(cs42l42);
 }
 
index 65d2c55399195a4c6b89afe3f62f1debbe8fbeed..415701bd10ac8ca23ba7a505dea0b0d7ccbbb0da 100644 (file)
@@ -4380,10 +4380,11 @@ HDA_CODEC_ENTRY(0x8086280f, "Icelake HDMI",     patch_i915_icl_hdmi),
 HDA_CODEC_ENTRY(0x80862812, "Tigerlake HDMI",  patch_i915_tgl_hdmi),
 HDA_CODEC_ENTRY(0x80862814, "DG1 HDMI",        patch_i915_tgl_hdmi),
 HDA_CODEC_ENTRY(0x80862815, "Alderlake HDMI",  patch_i915_tgl_hdmi),
-HDA_CODEC_ENTRY(0x8086281c, "Alderlake-P HDMI", patch_i915_tgl_hdmi),
 HDA_CODEC_ENTRY(0x80862816, "Rocketlake HDMI", patch_i915_tgl_hdmi),
+HDA_CODEC_ENTRY(0x80862819, "DG2 HDMI",        patch_i915_tgl_hdmi),
 HDA_CODEC_ENTRY(0x8086281a, "Jasperlake HDMI", patch_i915_icl_hdmi),
 HDA_CODEC_ENTRY(0x8086281b, "Elkhartlake HDMI",        patch_i915_icl_hdmi),
+HDA_CODEC_ENTRY(0x8086281c, "Alderlake-P HDMI", patch_i915_tgl_hdmi),
 HDA_CODEC_ENTRY(0x80862880, "CedarTrail HDMI", patch_generic_hdmi),
 HDA_CODEC_ENTRY(0x80862882, "Valleyview2 HDMI",        patch_i915_byt_hdmi),
 HDA_CODEC_ENTRY(0x80862883, "Braswell HDMI",   patch_i915_byt_hdmi),
index 2f1727faec698c37322c086a2399d623f6bce35e..3599f4c85ebf748a17ea4fc9432df0e9aa251300 100644 (file)
@@ -6503,22 +6503,47 @@ static void alc287_fixup_legion_15imhg05_speakers(struct hda_codec *codec,
 /* for alc285_fixup_ideapad_s740_coef() */
 #include "ideapad_s740_helper.c"
 
-static void alc256_fixup_tongfang_reset_persistent_settings(struct hda_codec *codec,
-                                                           const struct hda_fixup *fix,
-                                                           int action)
+static const struct coef_fw alc256_fixup_set_coef_defaults_coefs[] = {
+       WRITE_COEF(0x10, 0x0020), WRITE_COEF(0x24, 0x0000),
+       WRITE_COEF(0x26, 0x0000), WRITE_COEF(0x29, 0x3000),
+       WRITE_COEF(0x37, 0xfe05), WRITE_COEF(0x45, 0x5089),
+       {}
+};
+
+static void alc256_fixup_set_coef_defaults(struct hda_codec *codec,
+                                          const struct hda_fixup *fix,
+                                          int action)
 {
        /*
-       * A certain other OS sets these coeffs to different values. On at least one TongFang
-       * barebone these settings might survive even a cold reboot. So to restore a clean slate the
-       * values are explicitly reset to default here. Without this, the external microphone is
-       * always in a plugged-in state, while the internal microphone is always in an unplugged
-       * state, breaking the ability to use the internal microphone.
-       */
-       alc_write_coef_idx(codec, 0x24, 0x0000);
-       alc_write_coef_idx(codec, 0x26, 0x0000);
-       alc_write_coef_idx(codec, 0x29, 0x3000);
-       alc_write_coef_idx(codec, 0x37, 0xfe05);
-       alc_write_coef_idx(codec, 0x45, 0x5089);
+        * A certain other OS sets these coeffs to different values. On at least
+        * one TongFang barebone these settings might survive even a cold
+        * reboot. So to restore a clean slate the values are explicitly reset
+        * to default here. Without this, the external microphone is always in a
+        * plugged-in state, while the internal microphone is always in an
+        * unplugged state, breaking the ability to use the internal microphone.
+        */
+       alc_process_coef_fw(codec, alc256_fixup_set_coef_defaults_coefs);
+}
+
+static const struct coef_fw alc233_fixup_no_audio_jack_coefs[] = {
+       WRITE_COEF(0x1a, 0x9003), WRITE_COEF(0x1b, 0x0e2b), WRITE_COEF(0x37, 0xfe06),
+       WRITE_COEF(0x38, 0x4981), WRITE_COEF(0x45, 0xd489), WRITE_COEF(0x46, 0x0074),
+       WRITE_COEF(0x49, 0x0149),
+       {}
+};
+
+static void alc233_fixup_no_audio_jack(struct hda_codec *codec,
+                                      const struct hda_fixup *fix,
+                                      int action)
+{
+       /*
+        * The audio jack input and output is not detected on the ASRock NUC Box
+        * 1100 series when cold booting without this fix. Warm rebooting from a
+        * certain other OS makes the audio functional, as COEF settings are
+        * preserved in this case. This fix sets these altered COEF values as
+        * the default.
+        */
+       alc_process_coef_fw(codec, alc233_fixup_no_audio_jack_coefs);
 }
 
 enum {
@@ -6738,8 +6763,9 @@ enum {
        ALC287_FIXUP_LEGION_15IMHG05_AUTOMUTE,
        ALC287_FIXUP_YOGA7_14ITL_SPEAKERS,
        ALC287_FIXUP_13S_GEN2_SPEAKERS,
-       ALC256_FIXUP_TONGFANG_RESET_PERSISTENT_SETTINGS,
+       ALC256_FIXUP_SET_COEF_DEFAULTS,
        ALC256_FIXUP_SYSTEM76_MIC_NO_PRESENCE,
+       ALC233_FIXUP_NO_AUDIO_JACK,
 };
 
 static const struct hda_fixup alc269_fixups[] = {
@@ -8443,9 +8469,9 @@ static const struct hda_fixup alc269_fixups[] = {
                .chained = true,
                .chain_id = ALC269_FIXUP_HEADSET_MODE,
        },
-       [ALC256_FIXUP_TONGFANG_RESET_PERSISTENT_SETTINGS] = {
+       [ALC256_FIXUP_SET_COEF_DEFAULTS] = {
                .type = HDA_FIXUP_FUNC,
-               .v.func = alc256_fixup_tongfang_reset_persistent_settings,
+               .v.func = alc256_fixup_set_coef_defaults,
        },
        [ALC245_FIXUP_HP_GPIO_LED] = {
                .type = HDA_FIXUP_FUNC,
@@ -8460,6 +8486,10 @@ static const struct hda_fixup alc269_fixups[] = {
                .chained = true,
                .chain_id = ALC269_FIXUP_HEADSET_MODE_NO_HP_MIC,
        },
+       [ALC233_FIXUP_NO_AUDIO_JACK] = {
+               .type = HDA_FIXUP_FUNC,
+               .v.func = alc233_fixup_no_audio_jack,
+       },
 };
 
 static const struct snd_pci_quirk alc269_fixup_tbl[] = {
@@ -8639,6 +8669,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
        SND_PCI_QUIRK(0x103c, 0x8728, "HP EliteBook 840 G7", ALC285_FIXUP_HP_GPIO_LED),
        SND_PCI_QUIRK(0x103c, 0x8729, "HP", ALC285_FIXUP_HP_GPIO_LED),
        SND_PCI_QUIRK(0x103c, 0x8730, "HP ProBook 445 G7", ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF),
+       SND_PCI_QUIRK(0x103c, 0x8735, "HP ProBook 435 G7", ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF),
        SND_PCI_QUIRK(0x103c, 0x8736, "HP", ALC285_FIXUP_HP_GPIO_AMP_INIT),
        SND_PCI_QUIRK(0x103c, 0x8760, "HP", ALC285_FIXUP_HP_MUTE_LED),
        SND_PCI_QUIRK(0x103c, 0x877a, "HP", ALC285_FIXUP_HP_MUTE_LED),
@@ -8894,6 +8925,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
        SND_PCI_QUIRK(0x17aa, 0x511e, "Thinkpad", ALC298_FIXUP_TPT470_DOCK),
        SND_PCI_QUIRK(0x17aa, 0x511f, "Thinkpad", ALC298_FIXUP_TPT470_DOCK),
        SND_PCI_QUIRK(0x17aa, 0x9e54, "LENOVO NB", ALC269_FIXUP_LENOVO_EAPD),
+       SND_PCI_QUIRK(0x1849, 0x1233, "ASRock NUC Box 1100", ALC233_FIXUP_NO_AUDIO_JACK),
        SND_PCI_QUIRK(0x19e5, 0x3204, "Huawei MACH-WX9", ALC256_FIXUP_HUAWEI_MACH_WX9_PINS),
        SND_PCI_QUIRK(0x1b35, 0x1235, "CZC B20", ALC269_FIXUP_CZC_B20),
        SND_PCI_QUIRK(0x1b35, 0x1236, "CZC TMI", ALC269_FIXUP_CZC_TMI),
@@ -8901,7 +8933,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
        SND_PCI_QUIRK(0x1b7d, 0xa831, "Ordissimo EVE2 ", ALC269VB_FIXUP_ORDISSIMO_EVE2), /* Also known as Malata PC-B1303 */
        SND_PCI_QUIRK(0x1c06, 0x2013, "Lemote A1802", ALC269_FIXUP_LEMOTE_A1802),
        SND_PCI_QUIRK(0x1c06, 0x2015, "Lemote A190X", ALC269_FIXUP_LEMOTE_A190X),
-       SND_PCI_QUIRK(0x1d05, 0x1132, "TongFang PHxTxX1", ALC256_FIXUP_TONGFANG_RESET_PERSISTENT_SETTINGS),
+       SND_PCI_QUIRK(0x1d05, 0x1132, "TongFang PHxTxX1", ALC256_FIXUP_SET_COEF_DEFAULTS),
        SND_PCI_QUIRK(0x1d72, 0x1602, "RedmiBook", ALC255_FIXUP_XIAOMI_HEADSET_MIC),
        SND_PCI_QUIRK(0x1d72, 0x1701, "XiaomiNotebook Pro", ALC298_FIXUP_DELL1_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x1d72, 0x1901, "RedmiBook 14", ALC256_FIXUP_ASUS_HEADSET_MIC),
@@ -10203,6 +10235,27 @@ static void alc671_fixup_hp_headset_mic2(struct hda_codec *codec,
        }
 }
 
+static void alc897_hp_automute_hook(struct hda_codec *codec,
+                                        struct hda_jack_callback *jack)
+{
+       struct alc_spec *spec = codec->spec;
+       int vref;
+
+       snd_hda_gen_hp_automute(codec, jack);
+       vref = spec->gen.hp_jack_present ? (PIN_HP | AC_PINCTL_VREF_100) : PIN_HP;
+       snd_hda_codec_write(codec, 0x1b, 0, AC_VERB_SET_PIN_WIDGET_CONTROL,
+                           vref);
+}
+
+static void alc897_fixup_lenovo_headset_mic(struct hda_codec *codec,
+                                    const struct hda_fixup *fix, int action)
+{
+       struct alc_spec *spec = codec->spec;
+       if (action == HDA_FIXUP_ACT_PRE_PROBE) {
+               spec->gen.hp_automute_hook = alc897_hp_automute_hook;
+       }
+}
+
 static const struct coef_fw alc668_coefs[] = {
        WRITE_COEF(0x01, 0xbebe), WRITE_COEF(0x02, 0xaaaa), WRITE_COEF(0x03,    0x0),
        WRITE_COEF(0x04, 0x0180), WRITE_COEF(0x06,    0x0), WRITE_COEF(0x07, 0x0f80),
@@ -10283,6 +10336,8 @@ enum {
        ALC668_FIXUP_ASUS_NO_HEADSET_MIC,
        ALC668_FIXUP_HEADSET_MIC,
        ALC668_FIXUP_MIC_DET_COEF,
+       ALC897_FIXUP_LENOVO_HEADSET_MIC,
+       ALC897_FIXUP_HEADSET_MIC_PIN,
 };
 
 static const struct hda_fixup alc662_fixups[] = {
@@ -10689,6 +10744,19 @@ static const struct hda_fixup alc662_fixups[] = {
                        {}
                },
        },
+       [ALC897_FIXUP_LENOVO_HEADSET_MIC] = {
+               .type = HDA_FIXUP_FUNC,
+               .v.func = alc897_fixup_lenovo_headset_mic,
+       },
+       [ALC897_FIXUP_HEADSET_MIC_PIN] = {
+               .type = HDA_FIXUP_PINS,
+               .v.pins = (const struct hda_pintbl[]) {
+                       { 0x1a, 0x03a11050 },
+                       { }
+               },
+               .chained = true,
+               .chain_id = ALC897_FIXUP_LENOVO_HEADSET_MIC
+       },
 };
 
 static const struct snd_pci_quirk alc662_fixup_tbl[] = {
@@ -10733,6 +10801,10 @@ static const struct snd_pci_quirk alc662_fixup_tbl[] = {
        SND_PCI_QUIRK(0x144d, 0xc051, "Samsung R720", ALC662_FIXUP_IDEAPAD),
        SND_PCI_QUIRK(0x14cd, 0x5003, "USI", ALC662_FIXUP_USI_HEADSET_MODE),
        SND_PCI_QUIRK(0x17aa, 0x1036, "Lenovo P520", ALC662_FIXUP_LENOVO_MULTI_CODECS),
+       SND_PCI_QUIRK(0x17aa, 0x32ca, "Lenovo ThinkCentre M80", ALC897_FIXUP_HEADSET_MIC_PIN),
+       SND_PCI_QUIRK(0x17aa, 0x32cb, "Lenovo ThinkCentre M70", ALC897_FIXUP_HEADSET_MIC_PIN),
+       SND_PCI_QUIRK(0x17aa, 0x32cf, "Lenovo ThinkCentre M950", ALC897_FIXUP_HEADSET_MIC_PIN),
+       SND_PCI_QUIRK(0x17aa, 0x32f7, "Lenovo ThinkCentre M90", ALC897_FIXUP_HEADSET_MIC_PIN),
        SND_PCI_QUIRK(0x17aa, 0x38af, "Lenovo Ideapad Y550P", ALC662_FIXUP_IDEAPAD),
        SND_PCI_QUIRK(0x17aa, 0x3a0d, "Lenovo Ideapad Y550", ALC662_FIXUP_IDEAPAD),
        SND_PCI_QUIRK(0x1849, 0x5892, "ASRock B150M", ALC892_FIXUP_ASROCK_MOBO),
index 957eeb6fb8e379d7fa8b821120d52ac5d4d0726c..7e9a9a9d8ddd94fc9624b6a213fcb1e6acbd9296 100644 (file)
@@ -146,10 +146,11 @@ static int snd_acp6x_probe(struct pci_dev *pci,
 {
        struct acp6x_dev_data *adata;
        struct platform_device_info pdevinfo[ACP6x_DEVS];
-       int ret, index;
+       int index = 0;
        int val = 0x00;
        u32 addr;
        unsigned int irqflags;
+       int ret;
 
        irqflags = IRQF_SHARED;
        /* Yellow Carp device check */
index 90a921f726c3bdb398ff15bd7e52a673d76b5d8d..3fa99741779af3d90216bf004349435f6b0d4c12 100644 (file)
@@ -42,34 +42,6 @@ static const struct spi_device_id cs35l41_id_spi[] = {
 
 MODULE_DEVICE_TABLE(spi, cs35l41_id_spi);
 
-static void cs35l41_spi_otp_setup(struct cs35l41_private *cs35l41,
-                                 bool is_pre_setup, unsigned int *freq)
-{
-       struct spi_device *spi;
-       u32 orig_spi_freq;
-
-       spi = to_spi_device(cs35l41->dev);
-
-       if (!spi) {
-               dev_err(cs35l41->dev, "%s: No SPI device\n", __func__);
-               return;
-       }
-
-       if (is_pre_setup) {
-               orig_spi_freq = spi->max_speed_hz;
-               if (orig_spi_freq > CS35L41_SPI_MAX_FREQ_OTP) {
-                       spi->max_speed_hz = CS35L41_SPI_MAX_FREQ_OTP;
-                       spi_setup(spi);
-               }
-               *freq = orig_spi_freq;
-       } else {
-               if (spi->max_speed_hz != *freq) {
-                       spi->max_speed_hz = *freq;
-                       spi_setup(spi);
-               }
-       }
-}
-
 static int cs35l41_spi_probe(struct spi_device *spi)
 {
        const struct regmap_config *regmap_config = &cs35l41_regmap_spi;
@@ -81,6 +53,9 @@ static int cs35l41_spi_probe(struct spi_device *spi)
        if (!cs35l41)
                return -ENOMEM;
 
+       spi->max_speed_hz = CS35L41_SPI_MAX_FREQ;
+       spi_setup(spi);
+
        spi_set_drvdata(spi, cs35l41);
        cs35l41->regmap = devm_regmap_init_spi(spi, regmap_config);
        if (IS_ERR(cs35l41->regmap)) {
@@ -91,7 +66,6 @@ static int cs35l41_spi_probe(struct spi_device *spi)
 
        cs35l41->dev = &spi->dev;
        cs35l41->irq = spi->irq;
-       cs35l41->otp_setup = cs35l41_spi_otp_setup;
 
        return cs35l41_probe(cs35l41, pdata);
 }
index 94ed21d7676fb0242030d7e2f09313328a84f4b6..9c4d481f7614c6a61edb065cfd45b7603afc9c0f 100644 (file)
@@ -302,7 +302,6 @@ static int cs35l41_otp_unpack(void *data)
        const struct cs35l41_otp_packed_element_t *otp_map;
        struct cs35l41_private *cs35l41 = data;
        int bit_offset, word_offset, ret, i;
-       unsigned int orig_spi_freq;
        unsigned int bit_sum = 8;
        u32 otp_val, otp_id_reg;
        u32 *otp_mem;
@@ -326,9 +325,6 @@ static int cs35l41_otp_unpack(void *data)
                goto err_otp_unpack;
        }
 
-       if (cs35l41->otp_setup)
-               cs35l41->otp_setup(cs35l41, true, &orig_spi_freq);
-
        ret = regmap_bulk_read(cs35l41->regmap, CS35L41_OTP_MEM0, otp_mem,
                               CS35L41_OTP_SIZE_WORDS);
        if (ret < 0) {
@@ -336,9 +332,6 @@ static int cs35l41_otp_unpack(void *data)
                goto err_otp_unpack;
        }
 
-       if (cs35l41->otp_setup)
-               cs35l41->otp_setup(cs35l41, false, &orig_spi_freq);
-
        otp_map = otp_map_match->map;
 
        bit_offset = otp_map_match->bit_offset;
@@ -612,6 +605,12 @@ static const struct snd_soc_dapm_widget cs35l41_dapm_widgets[] = {
        SND_SOC_DAPM_AIF_OUT("ASPTX3", NULL, 0, CS35L41_SP_ENABLES, 2, 0),
        SND_SOC_DAPM_AIF_OUT("ASPTX4", NULL, 0, CS35L41_SP_ENABLES, 3, 0),
 
+       SND_SOC_DAPM_SIGGEN("VSENSE"),
+       SND_SOC_DAPM_SIGGEN("ISENSE"),
+       SND_SOC_DAPM_SIGGEN("VP"),
+       SND_SOC_DAPM_SIGGEN("VBST"),
+       SND_SOC_DAPM_SIGGEN("TEMP"),
+
        SND_SOC_DAPM_ADC("VMON ADC", NULL, CS35L41_PWR_CTRL2, 12, 0),
        SND_SOC_DAPM_ADC("IMON ADC", NULL, CS35L41_PWR_CTRL2, 13, 0),
        SND_SOC_DAPM_ADC("VPMON ADC", NULL, CS35L41_PWR_CTRL2, 8, 0),
@@ -623,12 +622,6 @@ static const struct snd_soc_dapm_widget cs35l41_dapm_widgets[] = {
                               cs35l41_main_amp_event,
                               SND_SOC_DAPM_POST_PMD |  SND_SOC_DAPM_POST_PMU),
 
-       SND_SOC_DAPM_INPUT("VP"),
-       SND_SOC_DAPM_INPUT("VBST"),
-       SND_SOC_DAPM_INPUT("ISENSE"),
-       SND_SOC_DAPM_INPUT("VSENSE"),
-       SND_SOC_DAPM_INPUT("TEMP"),
-
        SND_SOC_DAPM_MUX("ASP TX1 Source", SND_SOC_NOPM, 0, 0, &asp_tx1_mux),
        SND_SOC_DAPM_MUX("ASP TX2 Source", SND_SOC_NOPM, 0, 0, &asp_tx2_mux),
        SND_SOC_DAPM_MUX("ASP TX3 Source", SND_SOC_NOPM, 0, 0, &asp_tx3_mux),
@@ -674,8 +667,8 @@ static const struct snd_soc_dapm_route cs35l41_audio_map[] = {
        {"VMON ADC", NULL, "VSENSE"},
        {"IMON ADC", NULL, "ISENSE"},
        {"VPMON ADC", NULL, "VP"},
-       {"TEMPMON ADC", NULL, "TEMP"},
        {"VBSTMON ADC", NULL, "VBST"},
+       {"TEMPMON ADC", NULL, "TEMP"},
 
        {"ASPRX1", NULL, "AMP Playback"},
        {"ASPRX2", NULL, "AMP Playback"},
index 6cffe8a55beb1b9789f16f06c0ef0f94250f401c..48485b08a6f1fdb5dde5c14d20fba978d90268a7 100644 (file)
 #define CS35L41_FS2_WINDOW_MASK                0x00FFF800
 #define CS35L41_FS2_WINDOW_SHIFT       12
 
-#define CS35L41_SPI_MAX_FREQ_OTP       4000000
+#define CS35L41_SPI_MAX_FREQ           4000000
 
 #define CS35L41_RX_FORMATS (SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S24_LE)
 #define CS35L41_TX_FORMATS (SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S24_LE)
@@ -764,8 +764,6 @@ struct cs35l41_private {
        int irq;
        /* GPIO for /RST */
        struct gpio_desc *reset_gpio;
-       void (*otp_setup)(struct cs35l41_private *cs35l41, bool is_pre_setup,
-                         unsigned int *freq);
 };
 
 int cs35l41_probe(struct cs35l41_private *cs35l41,
index 2bed5cf229be721f12bcdc5c3bd5f383520ac243..aec5127260fd4e00f0dd4167ab8e940fad7e9db2 100644 (file)
@@ -2188,7 +2188,7 @@ static int rx_macro_config_classh(struct snd_soc_component *component,
                snd_soc_component_update_bits(component,
                                CDC_RX_CLSH_DECAY_CTRL,
                                CDC_RX_CLSH_DECAY_RATE_MASK, 0x0);
-               snd_soc_component_update_bits(component,
+               snd_soc_component_write_field(component,
                                CDC_RX_RX1_RX_PATH_CFG0,
                                CDC_RX_RXn_CLSH_EN_MASK, 0x1);
                break;
index 943d7d933e81b3325e3ae09a25df7c1ada8e422d..03f24edfe4f6492b86fe8fc97a3ae362b6083e50 100644 (file)
@@ -539,3 +539,4 @@ module_platform_driver(rk817_codec_driver);
 MODULE_DESCRIPTION("ASoC RK817 codec driver");
 MODULE_AUTHOR("binyuan <kevan.lan@rock-chips.com>");
 MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:rk817-codec");
index 297af7ff824ce54ce6bfc87adc54a116b4039111..b62301a6281f912b3794f43f1aff10b10a11315b 100644 (file)
@@ -1311,13 +1311,54 @@ static int rt1011_r0_load_info(struct snd_kcontrol *kcontrol,
        .put = rt1011_r0_load_mode_put \
 }
 
-static const char * const rt1011_i2s_ref_texts[] = {
-       "Left Channel", "Right Channel"
+static const char * const rt1011_i2s_ref[] = {
+       "None", "Left Channel", "Right Channel"
 };
 
-static SOC_ENUM_SINGLE_DECL(rt1011_i2s_ref_enum,
-                           RT1011_TDM1_SET_1, 7,
-                           rt1011_i2s_ref_texts);
+static SOC_ENUM_SINGLE_DECL(rt1011_i2s_ref_enum, 0, 0,
+       rt1011_i2s_ref);
+
+static int rt1011_i2s_ref_put(struct snd_kcontrol *kcontrol,
+               struct snd_ctl_elem_value *ucontrol)
+{
+       struct snd_soc_component *component =
+               snd_soc_kcontrol_component(kcontrol);
+       struct rt1011_priv *rt1011 =
+               snd_soc_component_get_drvdata(component);
+
+       rt1011->i2s_ref = ucontrol->value.enumerated.item[0];
+       switch (rt1011->i2s_ref) {
+       case RT1011_I2S_REF_LEFT_CH:
+               regmap_write(rt1011->regmap, RT1011_TDM_TOTAL_SET, 0x0240);
+               regmap_write(rt1011->regmap, RT1011_TDM1_SET_2, 0x8);
+               regmap_write(rt1011->regmap, RT1011_TDM1_SET_1, 0x1022);
+               regmap_write(rt1011->regmap, RT1011_ADCDAT_OUT_SOURCE, 0x4);
+               break;
+       case RT1011_I2S_REF_RIGHT_CH:
+               regmap_write(rt1011->regmap, RT1011_TDM_TOTAL_SET, 0x0240);
+               regmap_write(rt1011->regmap, RT1011_TDM1_SET_2, 0x8);
+               regmap_write(rt1011->regmap, RT1011_TDM1_SET_1, 0x10a2);
+               regmap_write(rt1011->regmap, RT1011_ADCDAT_OUT_SOURCE, 0x4);
+               break;
+       default:
+               dev_info(component->dev, "I2S Reference: Do nothing\n");
+       }
+
+       return 0;
+}
+
+static int rt1011_i2s_ref_get(struct snd_kcontrol *kcontrol,
+               struct snd_ctl_elem_value *ucontrol)
+{
+       struct snd_soc_component *component =
+               snd_soc_kcontrol_component(kcontrol);
+       struct rt1011_priv *rt1011 =
+               snd_soc_component_get_drvdata(component);
+
+       ucontrol->value.enumerated.item[0] = rt1011->i2s_ref;
+
+       return 0;
+}
 
 static const struct snd_kcontrol_new rt1011_snd_controls[] = {
        /* I2S Data In Selection */
@@ -1358,7 +1399,8 @@ static const struct snd_kcontrol_new rt1011_snd_controls[] = {
        SOC_SINGLE("R0 Temperature", RT1011_STP_INITIAL_RESISTANCE_TEMP,
                2, 255, 0),
        /* I2S Reference */
-       SOC_ENUM("I2S Reference", rt1011_i2s_ref_enum),
+       SOC_ENUM_EXT("I2S Reference", rt1011_i2s_ref_enum,
+               rt1011_i2s_ref_get, rt1011_i2s_ref_put),
 };
 
 static int rt1011_is_sys_clk_from_pll(struct snd_soc_dapm_widget *source,
@@ -2017,6 +2059,7 @@ static int rt1011_probe(struct snd_soc_component *component)
 
        schedule_work(&rt1011->cali_work);
 
+       rt1011->i2s_ref = 0;
        rt1011->bq_drc_params = devm_kcalloc(component->dev,
                RT1011_ADVMODE_NUM, sizeof(struct rt1011_bq_drc_params *),
                GFP_KERNEL);
index 68fadc15fa8c1c061523156dae87151f75745cc0..4d6e7492d99cd4c26116012d7fb8202a6c1118a0 100644 (file)
@@ -654,6 +654,12 @@ enum {
        RT1011_AIFS
 };
 
+enum {
+       RT1011_I2S_REF_NONE,
+       RT1011_I2S_REF_LEFT_CH,
+       RT1011_I2S_REF_RIGHT_CH,
+};
+
 /* BiQual & DRC related settings */
 #define RT1011_BQ_DRC_NUM 128
 struct rt1011_bq_drc_params {
@@ -692,6 +698,7 @@ struct rt1011_priv {
        unsigned int r0_reg, cali_done;
        unsigned int r0_calib, temperature_calib;
        int recv_spk_mode;
+       int i2s_ref;
 };
 
 #endif         /* end of _RT1011_H_ */
index 983347b65127b9a86d073aebe52aeb33da5a6436..20e0f90ea49863dddc81930ebe93adfcd130850d 100644 (file)
@@ -198,6 +198,7 @@ static int rt5682_i2c_probe(struct i2c_client *i2c,
        }
 
        mutex_init(&rt5682->calibrate_mutex);
+       mutex_init(&rt5682->jdet_mutex);
        rt5682_calibrate(rt5682);
 
        rt5682_apply_patch_list(rt5682, &i2c->dev);
index 78b4cb5fb6c8f2b4b3fe82db6c29457fc557213f..5224123d0d3bb900612fc89c99e6a5882424ab91 100644 (file)
@@ -48,6 +48,8 @@ static const struct reg_sequence patch_list[] = {
        {RT5682_SAR_IL_CMD_6, 0x0110},
        {RT5682_CHARGE_PUMP_1, 0x0210},
        {RT5682_HP_LOGIC_CTRL_2, 0x0007},
+       {RT5682_SAR_IL_CMD_2, 0xac00},
+       {RT5682_CBJ_CTRL_7, 0x0104},
 };
 
 void rt5682_apply_patch_list(struct rt5682_priv *rt5682, struct device *dev)
@@ -940,6 +942,10 @@ int rt5682_headset_detect(struct snd_soc_component *component, int jack_insert)
                snd_soc_component_update_bits(component,
                        RT5682_HP_CHARGE_PUMP_1,
                        RT5682_OSW_L_MASK | RT5682_OSW_R_MASK, 0);
+               rt5682_enable_push_button_irq(component, false);
+               snd_soc_component_update_bits(component, RT5682_CBJ_CTRL_1,
+                       RT5682_TRIG_JD_MASK, RT5682_TRIG_JD_LOW);
+               usleep_range(55000, 60000);
                snd_soc_component_update_bits(component, RT5682_CBJ_CTRL_1,
                        RT5682_TRIG_JD_MASK, RT5682_TRIG_JD_HIGH);
 
@@ -1092,6 +1098,7 @@ void rt5682_jack_detect_handler(struct work_struct *work)
        while (!rt5682->component->card->instantiated)
                usleep_range(10000, 15000);
 
+       mutex_lock(&rt5682->jdet_mutex);
        mutex_lock(&rt5682->calibrate_mutex);
 
        val = snd_soc_component_read(rt5682->component, RT5682_AJD1_CTRL)
@@ -1165,6 +1172,7 @@ void rt5682_jack_detect_handler(struct work_struct *work)
        }
 
        mutex_unlock(&rt5682->calibrate_mutex);
+       mutex_unlock(&rt5682->jdet_mutex);
 }
 EXPORT_SYMBOL_GPL(rt5682_jack_detect_handler);
 
@@ -1514,6 +1522,7 @@ static int rt5682_hp_event(struct snd_soc_dapm_widget *w,
 {
        struct snd_soc_component *component =
                snd_soc_dapm_to_component(w->dapm);
+       struct rt5682_priv *rt5682 = snd_soc_component_get_drvdata(component);
 
        switch (event) {
        case SND_SOC_DAPM_PRE_PMU:
@@ -1525,12 +1534,17 @@ static int rt5682_hp_event(struct snd_soc_dapm_widget *w,
                        RT5682_DEPOP_1, 0x60, 0x60);
                snd_soc_component_update_bits(component,
                        RT5682_DAC_ADC_DIG_VOL1, 0x00c0, 0x0080);
+
+               mutex_lock(&rt5682->jdet_mutex);
+
                snd_soc_component_update_bits(component, RT5682_HP_CTRL_2,
                        RT5682_HP_C2_DAC_L_EN | RT5682_HP_C2_DAC_R_EN,
                        RT5682_HP_C2_DAC_L_EN | RT5682_HP_C2_DAC_R_EN);
                usleep_range(5000, 10000);
                snd_soc_component_update_bits(component, RT5682_CHARGE_PUMP_1,
                        RT5682_CP_SW_SIZE_MASK, RT5682_CP_SW_SIZE_L);
+
+               mutex_unlock(&rt5682->jdet_mutex);
                break;
 
        case SND_SOC_DAPM_POST_PMD:
@@ -2844,6 +2858,8 @@ int rt5682_register_dai_clks(struct rt5682_priv *rt5682)
 
        for (i = 0; i < RT5682_DAI_NUM_CLKS; ++i) {
                struct clk_init_data init = { };
+               struct clk_parent_data parent_data;
+               const struct clk_hw *parent;
 
                dai_clk_hw = &rt5682->dai_clks_hw[i];
 
@@ -2851,17 +2867,17 @@ int rt5682_register_dai_clks(struct rt5682_priv *rt5682)
                case RT5682_DAI_WCLK_IDX:
                        /* Make MCLK the parent of WCLK */
                        if (rt5682->mclk) {
-                               init.parent_data = &(struct clk_parent_data){
+                               parent_data = (struct clk_parent_data){
                                        .fw_name = "mclk",
                                };
+                               init.parent_data = &parent_data;
                                init.num_parents = 1;
                        }
                        break;
                case RT5682_DAI_BCLK_IDX:
                        /* Make WCLK the parent of BCLK */
-                       init.parent_hws = &(const struct clk_hw *){
-                               &rt5682->dai_clks_hw[RT5682_DAI_WCLK_IDX]
-                       };
+                       parent = &rt5682->dai_clks_hw[RT5682_DAI_WCLK_IDX];
+                       init.parent_hws = &parent;
                        init.num_parents = 1;
                        break;
                default:
@@ -2942,10 +2958,7 @@ static int rt5682_suspend(struct snd_soc_component *component)
 
        cancel_delayed_work_sync(&rt5682->jack_detect_work);
        cancel_delayed_work_sync(&rt5682->jd_check_work);
-       if (rt5682->hs_jack && rt5682->jack_type == SND_JACK_HEADSET) {
-               snd_soc_component_update_bits(component, RT5682_CBJ_CTRL_1,
-                       RT5682_MB1_PATH_MASK | RT5682_MB2_PATH_MASK,
-                       RT5682_CTRL_MB1_REG | RT5682_CTRL_MB2_REG);
+       if (rt5682->hs_jack && (rt5682->jack_type & SND_JACK_HEADSET) == SND_JACK_HEADSET) {
                val = snd_soc_component_read(component,
                                RT5682_CBJ_CTRL_2) & RT5682_JACK_TYPE_MASK;
 
@@ -2967,10 +2980,17 @@ static int rt5682_suspend(struct snd_soc_component *component)
                /* enter SAR ADC power saving mode */
                snd_soc_component_update_bits(component, RT5682_SAR_IL_CMD_1,
                        RT5682_SAR_BUTT_DET_MASK | RT5682_SAR_BUTDET_MODE_MASK |
-                       RT5682_SAR_BUTDET_RST_MASK | RT5682_SAR_SEL_MB1_MB2_MASK, 0);
+                       RT5682_SAR_SEL_MB1_MB2_MASK, 0);
+               usleep_range(5000, 6000);
+               snd_soc_component_update_bits(component, RT5682_CBJ_CTRL_1,
+                       RT5682_MB1_PATH_MASK | RT5682_MB2_PATH_MASK,
+                       RT5682_CTRL_MB1_REG | RT5682_CTRL_MB2_REG);
+               usleep_range(10000, 12000);
                snd_soc_component_update_bits(component, RT5682_SAR_IL_CMD_1,
-                       RT5682_SAR_BUTT_DET_MASK | RT5682_SAR_BUTDET_MODE_MASK | RT5682_SAR_BUTDET_RST_MASK,
-                       RT5682_SAR_BUTT_DET_EN | RT5682_SAR_BUTDET_POW_SAV | RT5682_SAR_BUTDET_RST_NORMAL);
+                       RT5682_SAR_BUTT_DET_MASK | RT5682_SAR_BUTDET_MODE_MASK,
+                       RT5682_SAR_BUTT_DET_EN | RT5682_SAR_BUTDET_POW_SAV);
+               snd_soc_component_update_bits(component, RT5682_HP_CHARGE_PUMP_1,
+                       RT5682_OSW_L_MASK | RT5682_OSW_R_MASK, 0);
        }
 
        regcache_cache_only(rt5682->regmap, true);
@@ -2988,10 +3008,11 @@ static int rt5682_resume(struct snd_soc_component *component)
        regcache_cache_only(rt5682->regmap, false);
        regcache_sync(rt5682->regmap);
 
-       if (rt5682->hs_jack && rt5682->jack_type == SND_JACK_HEADSET) {
+       if (rt5682->hs_jack && (rt5682->jack_type & SND_JACK_HEADSET) == SND_JACK_HEADSET) {
                snd_soc_component_update_bits(component, RT5682_SAR_IL_CMD_1,
                        RT5682_SAR_BUTDET_MODE_MASK | RT5682_SAR_SEL_MB1_MB2_MASK,
                        RT5682_SAR_BUTDET_POW_NORM | RT5682_SAR_SEL_MB1_MB2_AUTO);
+               usleep_range(5000, 6000);
                snd_soc_component_update_bits(component, RT5682_CBJ_CTRL_1,
                        RT5682_MB1_PATH_MASK | RT5682_MB2_PATH_MASK,
                        RT5682_CTRL_MB1_FSM | RT5682_CTRL_MB2_FSM);
@@ -2999,8 +3020,9 @@ static int rt5682_resume(struct snd_soc_component *component)
                        RT5682_PWR_CBJ, RT5682_PWR_CBJ);
        }
 
+       rt5682->jack_type = 0;
        mod_delayed_work(system_power_efficient_wq,
-               &rt5682->jack_detect_work, msecs_to_jiffies(250));
+               &rt5682->jack_detect_work, msecs_to_jiffies(0));
 
        return 0;
 }
index d93829c35585cd6a9effcc9058cf4c8f8a5b2284..c917c76200ea2b44a68948b99f676c2ab613dd18 100644 (file)
@@ -1463,6 +1463,7 @@ struct rt5682_priv {
 
        int jack_type;
        int irq_work_delay_time;
+       struct mutex jdet_mutex;
 };
 
 extern const char *rt5682_supply_names[RT5682_NUM_SUPPLIES];
index 470957fcad6b6bdf69a1565a94ee4c4c00acdc6d..d49a4f68566d21e4de7e43595ceb99be4872d9ac 100644 (file)
@@ -2693,6 +2693,8 @@ static int rt5682s_register_dai_clks(struct snd_soc_component *component)
 
        for (i = 0; i < RT5682S_DAI_NUM_CLKS; ++i) {
                struct clk_init_data init = { };
+               struct clk_parent_data parent_data;
+               const struct clk_hw *parent;
 
                dai_clk_hw = &rt5682s->dai_clks_hw[i];
 
@@ -2700,17 +2702,17 @@ static int rt5682s_register_dai_clks(struct snd_soc_component *component)
                case RT5682S_DAI_WCLK_IDX:
                        /* Make MCLK the parent of WCLK */
                        if (rt5682s->mclk) {
-                               init.parent_data = &(struct clk_parent_data){
+                               parent_data = (struct clk_parent_data){
                                        .fw_name = "mclk",
                                };
+                               init.parent_data = &parent_data;
                                init.num_parents = 1;
                        }
                        break;
                case RT5682S_DAI_BCLK_IDX:
                        /* Make WCLK the parent of BCLK */
-                       init.parent_hws = &(const struct clk_hw *){
-                               &rt5682s->dai_clks_hw[RT5682S_DAI_WCLK_IDX]
-                       };
+                       parent = &rt5682s->dai_clks_hw[RT5682S_DAI_WCLK_IDX];
+                       init.parent_hws = &parent;
                        init.num_parents = 1;
                        break;
                default:
index f9574980a40724a6f944fde8aee83de607aad453..7aa1772a915f322b7e3a0fce8fcfdba1a4458a2e 100644 (file)
@@ -1,6 +1,7 @@
 // SPDX-License-Identifier: GPL-2.0
 
 #include <linux/bits.h>
+#include <linux/bitfield.h>
 #include <linux/delay.h>
 #include <linux/gpio/consumer.h>
 #include <linux/i2c.h>
 #define RT9120_REG_ERRRPT      0x10
 #define RT9120_REG_MSVOL       0x20
 #define RT9120_REG_SWRESET     0x40
+#define RT9120_REG_INTERCFG    0x63
 #define RT9120_REG_INTERNAL0   0x65
 #define RT9120_REG_INTERNAL1   0x69
 #define RT9120_REG_UVPOPT      0x6C
+#define RT9120_REG_DIGCFG      0xF8
 
 #define RT9120_VID_MASK                GENMASK(15, 8)
 #define RT9120_SWRST_MASK      BIT(7)
 #define RT9120_CFG_WORDLEN_24  24
 #define RT9120_CFG_WORDLEN_32  32
 #define RT9120_DVDD_UVSEL_MASK GENMASK(5, 4)
+#define RT9120_AUTOSYNC_MASK   BIT(6)
 
-#define RT9120_VENDOR_ID       0x4200
+#define RT9120_VENDOR_ID       0x42
+#define RT9120S_VENDOR_ID      0x43
 #define RT9120_RESET_WAITMS    20
 #define RT9120_CHIPON_WAITMS   20
 #define RT9120_AMPON_WAITMS    50
                                 SNDRV_PCM_FMTBIT_S24_LE |\
                                 SNDRV_PCM_FMTBIT_S32_LE)
 
+enum {
+       CHIP_IDX_RT9120 = 0,
+       CHIP_IDX_RT9120S,
+       CHIP_IDX_MAX
+};
+
 struct rt9120_data {
        struct device *dev;
        struct regmap *regmap;
+       int chip_idx;
 };
 
 /* 11bit [min,max,step] = [-103.9375dB, 24dB, 0.0625dB] */
@@ -149,8 +161,12 @@ static int rt9120_codec_probe(struct snd_soc_component *comp)
        snd_soc_component_init_regmap(comp, data->regmap);
 
        /* Internal setting */
-       snd_soc_component_write(comp, RT9120_REG_INTERNAL1, 0x03);
-       snd_soc_component_write(comp, RT9120_REG_INTERNAL0, 0x69);
+       if (data->chip_idx == CHIP_IDX_RT9120S) {
+               snd_soc_component_write(comp, RT9120_REG_INTERCFG, 0xde);
+               snd_soc_component_write(comp, RT9120_REG_INTERNAL0, 0x66);
+       } else
+               snd_soc_component_write(comp, RT9120_REG_INTERNAL0, 0x04);
+
        return 0;
 }
 
@@ -201,8 +217,8 @@ static int rt9120_hw_params(struct snd_pcm_substream *substream,
                            struct snd_soc_dai *dai)
 {
        struct snd_soc_component *comp = dai->component;
-       unsigned int param_width, param_slot_width;
-       int width;
+       unsigned int param_width, param_slot_width, auto_sync;
+       int width, fs;
 
        switch (width = params_width(param)) {
        case 16:
@@ -240,6 +256,16 @@ static int rt9120_hw_params(struct snd_pcm_substream *substream,
 
        snd_soc_component_update_bits(comp, RT9120_REG_I2SWL,
                                      RT9120_AUDWL_MASK, param_slot_width);
+
+       fs = width * params_channels(param);
+       /* If fs is divided by 48, disable auto sync */
+       if (fs % 48 == 0)
+               auto_sync = 0;
+       else
+               auto_sync = RT9120_AUTOSYNC_MASK;
+
+       snd_soc_component_update_bits(comp, RT9120_REG_DIGCFG,
+                                     RT9120_AUTOSYNC_MASK, auto_sync);
        return 0;
 }
 
@@ -279,9 +305,11 @@ static const struct regmap_range rt9120_rd_yes_ranges[] = {
        regmap_reg_range(0x20, 0x27),
        regmap_reg_range(0x30, 0x38),
        regmap_reg_range(0x3A, 0x40),
+       regmap_reg_range(0x63, 0x63),
        regmap_reg_range(0x65, 0x65),
        regmap_reg_range(0x69, 0x69),
-       regmap_reg_range(0x6C, 0x6C)
+       regmap_reg_range(0x6C, 0x6C),
+       regmap_reg_range(0xF8, 0xF8)
 };
 
 static const struct regmap_access_table rt9120_rd_table = {
@@ -297,9 +325,11 @@ static const struct regmap_range rt9120_wr_yes_ranges[] = {
        regmap_reg_range(0x30, 0x38),
        regmap_reg_range(0x3A, 0x3D),
        regmap_reg_range(0x40, 0x40),
+       regmap_reg_range(0x63, 0x63),
        regmap_reg_range(0x65, 0x65),
        regmap_reg_range(0x69, 0x69),
-       regmap_reg_range(0x6C, 0x6C)
+       regmap_reg_range(0x6C, 0x6C),
+       regmap_reg_range(0xF8, 0xF8)
 };
 
 static const struct regmap_access_table rt9120_wr_table = {
@@ -370,7 +400,7 @@ static int rt9120_reg_write(void *context, unsigned int reg, unsigned int val)
 static const struct regmap_config rt9120_regmap_config = {
        .reg_bits = 8,
        .val_bits = 32,
-       .max_register = RT9120_REG_UVPOPT,
+       .max_register = RT9120_REG_DIGCFG,
 
        .reg_read = rt9120_reg_read,
        .reg_write = rt9120_reg_write,
@@ -388,8 +418,16 @@ static int rt9120_check_vendor_info(struct rt9120_data *data)
        if (ret)
                return ret;
 
-       if ((devid & RT9120_VID_MASK) != RT9120_VENDOR_ID) {
-               dev_err(data->dev, "DEVID not correct [0x%04x]\n", devid);
+       devid = FIELD_GET(RT9120_VID_MASK, devid);
+       switch (devid) {
+       case RT9120_VENDOR_ID:
+               data->chip_idx = CHIP_IDX_RT9120;
+               break;
+       case RT9120S_VENDOR_ID:
+               data->chip_idx = CHIP_IDX_RT9120S;
+               break;
+       default:
+               dev_err(data->dev, "DEVID not correct [0x%0x]\n", devid);
                return -ENODEV;
        }
 
index c496b359f2f40bbde61e34193f8d7e97659f2ff7..e63c6b723d76c44aa8c756ccf7c9f28b405f14f3 100644 (file)
@@ -1896,9 +1896,8 @@ static int wcd934x_hw_params(struct snd_pcm_substream *substream,
        }
 
        wcd->dai[dai->id].sconfig.rate = params_rate(params);
-       wcd934x_slim_set_hw_params(wcd, &wcd->dai[dai->id], substream->stream);
 
-       return 0;
+       return wcd934x_slim_set_hw_params(wcd, &wcd->dai[dai->id], substream->stream);
 }
 
 static int wcd934x_hw_free(struct snd_pcm_substream *substream,
@@ -3257,6 +3256,9 @@ static int wcd934x_compander_set(struct snd_kcontrol *kc,
        int value = ucontrol->value.integer.value[0];
        int sel;
 
+       if (wcd->comp_enabled[comp] == value)
+               return 0;
+
        wcd->comp_enabled[comp] = value;
        sel = value ? WCD934X_HPH_GAIN_SRC_SEL_COMPANDER :
                WCD934X_HPH_GAIN_SRC_SEL_REGISTER;
@@ -3280,10 +3282,10 @@ static int wcd934x_compander_set(struct snd_kcontrol *kc,
        case COMPANDER_8:
                break;
        default:
-               break;
+               return 0;
        }
 
-       return 0;
+       return 1;
 }
 
 static int wcd934x_rx_hph_mode_get(struct snd_kcontrol *kc,
@@ -3327,6 +3329,31 @@ static int slim_rx_mux_get(struct snd_kcontrol *kc,
        return 0;
 }
 
+static int slim_rx_mux_to_dai_id(int mux)
+{
+       int aif_id;
+
+       switch (mux) {
+       case 1:
+               aif_id = AIF1_PB;
+               break;
+       case 2:
+               aif_id = AIF2_PB;
+               break;
+       case 3:
+               aif_id = AIF3_PB;
+               break;
+       case 4:
+               aif_id = AIF4_PB;
+               break;
+       default:
+               aif_id = -1;
+               break;
+       }
+
+       return aif_id;
+}
+
 static int slim_rx_mux_put(struct snd_kcontrol *kc,
                           struct snd_ctl_elem_value *ucontrol)
 {
@@ -3334,43 +3361,59 @@ static int slim_rx_mux_put(struct snd_kcontrol *kc,
        struct wcd934x_codec *wcd = dev_get_drvdata(w->dapm->dev);
        struct soc_enum *e = (struct soc_enum *)kc->private_value;
        struct snd_soc_dapm_update *update = NULL;
+       struct wcd934x_slim_ch *ch, *c;
        u32 port_id = w->shift;
+       bool found = false;
+       int mux_idx;
+       int prev_mux_idx = wcd->rx_port_value[port_id];
+       int aif_id;
 
-       if (wcd->rx_port_value[port_id] == ucontrol->value.enumerated.item[0])
-               return 0;
+       mux_idx = ucontrol->value.enumerated.item[0];
 
-       wcd->rx_port_value[port_id] = ucontrol->value.enumerated.item[0];
+       if (mux_idx == prev_mux_idx)
+               return 0;
 
-       switch (wcd->rx_port_value[port_id]) {
+       switch(mux_idx) {
        case 0:
-               list_del_init(&wcd->rx_chs[port_id].list);
-               break;
-       case 1:
-               list_add_tail(&wcd->rx_chs[port_id].list,
-                             &wcd->dai[AIF1_PB].slim_ch_list);
-               break;
-       case 2:
-               list_add_tail(&wcd->rx_chs[port_id].list,
-                             &wcd->dai[AIF2_PB].slim_ch_list);
-               break;
-       case 3:
-               list_add_tail(&wcd->rx_chs[port_id].list,
-                             &wcd->dai[AIF3_PB].slim_ch_list);
+               aif_id = slim_rx_mux_to_dai_id(prev_mux_idx);
+               if (aif_id < 0)
+                       return 0;
+
+               list_for_each_entry_safe(ch, c, &wcd->dai[aif_id].slim_ch_list, list) {
+                       if (ch->port == port_id + WCD934X_RX_START) {
+                               found = true;
+                               list_del_init(&ch->list);
+                               break;
+                       }
+               }
+               if (!found)
+                       return 0;
+
                break;
-       case 4:
-               list_add_tail(&wcd->rx_chs[port_id].list,
-                             &wcd->dai[AIF4_PB].slim_ch_list);
+       case 1 ... 4:
+               aif_id = slim_rx_mux_to_dai_id(mux_idx);
+               if (aif_id < 0)
+                       return 0;
+
+               if (list_empty(&wcd->rx_chs[port_id].list)) {
+                       list_add_tail(&wcd->rx_chs[port_id].list,
+                                     &wcd->dai[aif_id].slim_ch_list);
+               } else {
+                       dev_err(wcd->dev ,"SLIM_RX%d PORT is busy\n", port_id);
+                       return 0;
+               }
                break;
+
        default:
-               dev_err(wcd->dev, "Unknown AIF %d\n",
-                       wcd->rx_port_value[port_id]);
+               dev_err(wcd->dev, "Unknown AIF %d\n", mux_idx);
                goto err;
        }
 
+       wcd->rx_port_value[port_id] = mux_idx;
        snd_soc_dapm_mux_update_power(w->dapm, kc, wcd->rx_port_value[port_id],
                                      e, update);
 
-       return 0;
+       return 1;
 err:
        return -EINVAL;
 }
@@ -3816,6 +3859,7 @@ static int slim_tx_mixer_put(struct snd_kcontrol *kc,
        struct soc_mixer_control *mixer =
                        (struct soc_mixer_control *)kc->private_value;
        int enable = ucontrol->value.integer.value[0];
+       struct wcd934x_slim_ch *ch, *c;
        int dai_id = widget->shift;
        int port_id = mixer->shift;
 
@@ -3823,17 +3867,32 @@ static int slim_tx_mixer_put(struct snd_kcontrol *kc,
        if (enable == wcd->tx_port_value[port_id])
                return 0;
 
-       wcd->tx_port_value[port_id] = enable;
-
-       if (enable)
-               list_add_tail(&wcd->tx_chs[port_id].list,
-                             &wcd->dai[dai_id].slim_ch_list);
-       else
-               list_del_init(&wcd->tx_chs[port_id].list);
+       if (enable) {
+               if (list_empty(&wcd->tx_chs[port_id].list)) {
+                       list_add_tail(&wcd->tx_chs[port_id].list,
+                                     &wcd->dai[dai_id].slim_ch_list);
+               } else {
+                       dev_err(wcd->dev ,"SLIM_TX%d PORT is busy\n", port_id);
+                       return 0;
+               }
+        } else {
+               bool found = false;
+
+               list_for_each_entry_safe(ch, c, &wcd->dai[dai_id].slim_ch_list, list) {
+                       if (ch->port == port_id) {
+                               found = true;
+                               list_del_init(&wcd->tx_chs[port_id].list);
+                               break;
+                       }
+               }
+               if (!found)
+                       return 0;
+        }
 
+       wcd->tx_port_value[port_id] = enable;
        snd_soc_dapm_mixer_update_power(widget->dapm, kc, enable, update);
 
-       return 0;
+       return 1;
 }
 
 static const struct snd_kcontrol_new aif1_slim_cap_mixer[] = {
index 52de7d14b13985970d44bb3f36203bfc9545d600..67151c7770c65064f55ad7378a6ba794f93bf7b7 100644 (file)
@@ -1174,6 +1174,9 @@ static bool wcd938x_readonly_register(struct device *dev, unsigned int reg)
        case WCD938X_DIGITAL_INTR_STATUS_0:
        case WCD938X_DIGITAL_INTR_STATUS_1:
        case WCD938X_DIGITAL_INTR_STATUS_2:
+       case WCD938X_DIGITAL_INTR_CLEAR_0:
+       case WCD938X_DIGITAL_INTR_CLEAR_1:
+       case WCD938X_DIGITAL_INTR_CLEAR_2:
        case WCD938X_DIGITAL_SWR_HM_TEST_0:
        case WCD938X_DIGITAL_SWR_HM_TEST_1:
        case WCD938X_DIGITAL_EFUSE_T_DATA_0:
index d4f0d72cbcc80696aba2caa861ad08ff0b5f409f..6cb01a8e08fb6abdeafaad588f6304bf6732f865 100644 (file)
@@ -617,8 +617,9 @@ static int wm_adsp_control_add(struct cs_dsp_coeff_ctl *cs_ctl)
        switch (cs_dsp->fw_ver) {
        case 0:
        case 1:
-               snprintf(name, SNDRV_CTL_ELEM_ID_NAME_MAXLEN, "%s %s %x",
-                        cs_dsp->name, region_name, cs_ctl->alg_region.alg);
+               ret = scnprintf(name, SNDRV_CTL_ELEM_ID_NAME_MAXLEN,
+                               "%s %s %x", cs_dsp->name, region_name,
+                               cs_ctl->alg_region.alg);
                break;
        case 2:
                ret = scnprintf(name, SNDRV_CTL_ELEM_ID_NAME_MAXLEN,
index 2da4a5fa7a18d85a5181f589b0be4adb9cca3030..564b78f3cdd0a1d8c7e769c1d2db2b9417cae488 100644 (file)
@@ -772,7 +772,8 @@ static int wsa881x_put_pa_gain(struct snd_kcontrol *kc,
 
                usleep_range(1000, 1010);
        }
-       return 0;
+
+       return 1;
 }
 
 static int wsa881x_get_port(struct snd_kcontrol *kcontrol,
@@ -816,15 +817,22 @@ static int wsa881x_set_port(struct snd_kcontrol *kcontrol,
                (struct soc_mixer_control *)kcontrol->private_value;
        int portidx = mixer->reg;
 
-       if (ucontrol->value.integer.value[0])
+       if (ucontrol->value.integer.value[0]) {
+               if (data->port_enable[portidx])
+                       return 0;
+
                data->port_enable[portidx] = true;
-       else
+       } else {
+               if (!data->port_enable[portidx])
+                       return 0;
+
                data->port_enable[portidx] = false;
+       }
 
        if (portidx == WSA881X_PORT_BOOST) /* Boost Switch */
                wsa881x_boost_ctrl(comp, data->port_enable[portidx]);
 
-       return 0;
+       return 1;
 }
 
 static const char * const smart_boost_lvl_text[] = {
index f10496206ceed0e3f292f5b8582fefe307a0e5f5..77219c3f8766cca350aa99ad2b7983e788fb3ee1 100644 (file)
@@ -248,6 +248,75 @@ static const struct dmi_system_id sof_sdw_quirk_table[] = {
                                        SOF_BT_OFFLOAD_SSP(2) |
                                        SOF_SSP_BT_OFFLOAD_PRESENT),
        },
+       {
+               .callback = sof_sdw_quirk_cb,
+               .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc"),
+                       DMI_EXACT_MATCH(DMI_PRODUCT_SKU, "0AF3"),
+               },
+               /* No Jack */
+               .driver_data = (void *)(SOF_SDW_TGL_HDMI |
+                                       SOF_SDW_FOUR_SPK),
+       },
+       {
+               .callback = sof_sdw_quirk_cb,
+               .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc"),
+                       DMI_EXACT_MATCH(DMI_PRODUCT_SKU, "0B00")
+               },
+               .driver_data = (void *)(SOF_SDW_TGL_HDMI |
+                                       RT711_JD2 |
+                                       SOF_SDW_FOUR_SPK),
+       },
+       {
+               .callback = sof_sdw_quirk_cb,
+               .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc"),
+                       DMI_EXACT_MATCH(DMI_PRODUCT_SKU, "0B01")
+               },
+               .driver_data = (void *)(SOF_SDW_TGL_HDMI |
+                                       RT711_JD2 |
+                                       SOF_SDW_FOUR_SPK),
+       },
+       {
+               .callback = sof_sdw_quirk_cb,
+               .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc"),
+                       DMI_EXACT_MATCH(DMI_PRODUCT_SKU, "0B11")
+               },
+               .driver_data = (void *)(SOF_SDW_TGL_HDMI |
+                                       RT711_JD2 |
+                                       SOF_SDW_FOUR_SPK),
+       },
+       {
+               .callback = sof_sdw_quirk_cb,
+               .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc"),
+                       DMI_EXACT_MATCH(DMI_PRODUCT_SKU, "0B12")
+               },
+               .driver_data = (void *)(SOF_SDW_TGL_HDMI |
+                                       RT711_JD2 |
+                                       SOF_SDW_FOUR_SPK),
+       },
+       {
+               .callback = sof_sdw_quirk_cb,
+               .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc"),
+                       DMI_EXACT_MATCH(DMI_PRODUCT_SKU, "0B13"),
+               },
+               /* No Jack */
+               .driver_data = (void *)SOF_SDW_TGL_HDMI,
+       },
+       {
+               .callback = sof_sdw_quirk_cb,
+               .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc"),
+                       DMI_EXACT_MATCH(DMI_PRODUCT_SKU, "0B29"),
+               },
+               .driver_data = (void *)(SOF_SDW_TGL_HDMI |
+                                       RT711_JD2 |
+                                       SOF_SDW_FOUR_SPK),
+       },
        {}
 };
 
index 06f503452aa50d4f33eafc033659b88219b74f29..b61a778a9d26b46ffc8fce8a7f7ac304e9c592eb 100644 (file)
@@ -74,6 +74,15 @@ static const struct snd_soc_acpi_adr_device rt711_sdca_0_adr[] = {
        }
 };
 
+static const struct snd_soc_acpi_adr_device rt711_sdca_2_adr[] = {
+       {
+               .adr = 0x000230025D071101ull,
+               .num_endpoints = 1,
+               .endpoints = &single_endpoint,
+               .name_prefix = "rt711"
+       }
+};
+
 static const struct snd_soc_acpi_adr_device rt1316_1_group1_adr[] = {
        {
                .adr = 0x000131025D131601ull, /* unique ID is set for some reason */
@@ -101,6 +110,24 @@ static const struct snd_soc_acpi_adr_device rt1316_3_group1_adr[] = {
        }
 };
 
+static const struct snd_soc_acpi_adr_device rt1316_0_group2_adr[] = {
+       {
+               .adr = 0x000031025D131601ull,
+               .num_endpoints = 1,
+               .endpoints = &spk_l_endpoint,
+               .name_prefix = "rt1316-1"
+       }
+};
+
+static const struct snd_soc_acpi_adr_device rt1316_1_group2_adr[] = {
+       {
+               .adr = 0x000130025D131601ull,
+               .num_endpoints = 1,
+               .endpoints = &spk_r_endpoint,
+               .name_prefix = "rt1316-2"
+       }
+};
+
 static const struct snd_soc_acpi_adr_device rt1316_2_single_adr[] = {
        {
                .adr = 0x000230025D131601ull,
@@ -209,6 +236,63 @@ static const struct snd_soc_acpi_link_adr adl_sdca_3_in_1[] = {
        {}
 };
 
+static const struct snd_soc_acpi_link_adr adl_sdw_rt711_link2_rt1316_link01_rt714_link3[] = {
+       {
+               .mask = BIT(2),
+               .num_adr = ARRAY_SIZE(rt711_sdca_2_adr),
+               .adr_d = rt711_sdca_2_adr,
+       },
+       {
+               .mask = BIT(0),
+               .num_adr = ARRAY_SIZE(rt1316_0_group2_adr),
+               .adr_d = rt1316_0_group2_adr,
+       },
+       {
+               .mask = BIT(1),
+               .num_adr = ARRAY_SIZE(rt1316_1_group2_adr),
+               .adr_d = rt1316_1_group2_adr,
+       },
+       {
+               .mask = BIT(3),
+               .num_adr = ARRAY_SIZE(rt714_3_adr),
+               .adr_d = rt714_3_adr,
+       },
+       {}
+};
+
+static const struct snd_soc_acpi_link_adr adl_sdw_rt1316_link12_rt714_link0[] = {
+       {
+               .mask = BIT(1),
+               .num_adr = ARRAY_SIZE(rt1316_1_group1_adr),
+               .adr_d = rt1316_1_group1_adr,
+       },
+       {
+               .mask = BIT(2),
+               .num_adr = ARRAY_SIZE(rt1316_2_group1_adr),
+               .adr_d = rt1316_2_group1_adr,
+       },
+       {
+               .mask = BIT(0),
+               .num_adr = ARRAY_SIZE(rt714_0_adr),
+               .adr_d = rt714_0_adr,
+       },
+       {}
+};
+
+static const struct snd_soc_acpi_link_adr adl_sdw_rt1316_link2_rt714_link3[] = {
+       {
+               .mask = BIT(2),
+               .num_adr = ARRAY_SIZE(rt1316_2_single_adr),
+               .adr_d = rt1316_2_single_adr,
+       },
+       {
+               .mask = BIT(3),
+               .num_adr = ARRAY_SIZE(rt714_3_adr),
+               .adr_d = rt714_3_adr,
+       },
+       {}
+};
+
 static const struct snd_soc_acpi_link_adr adl_sdw_rt1316_link2_rt714_link0[] = {
        {
                .mask = BIT(2),
@@ -339,6 +423,27 @@ struct snd_soc_acpi_mach snd_soc_acpi_intel_adl_sdw_machines[] = {
                .drv_name = "sof_sdw",
                .sof_tplg_filename = "sof-adl-rt711-l0-rt1316-l13-rt714-l2.tplg",
        },
+       {
+               .link_mask = 0xF, /* 4 active links required */
+               .links = adl_sdw_rt711_link2_rt1316_link01_rt714_link3,
+               .drv_name = "sof_sdw",
+               .sof_fw_filename = "sof-adl.ri",
+               .sof_tplg_filename = "sof-adl-rt711-l2-rt1316-l01-rt714-l3.tplg",
+       },
+       {
+               .link_mask = 0xC, /* rt1316 on link2 & rt714 on link3 */
+               .links = adl_sdw_rt1316_link2_rt714_link3,
+               .drv_name = "sof_sdw",
+               .sof_fw_filename = "sof-adl.ri",
+               .sof_tplg_filename = "sof-adl-rt1316-l2-mono-rt714-l3.tplg",
+       },
+       {
+               .link_mask = 0x7, /* rt714 on link0 & two rt1316s on link1 and link2 */
+               .links = adl_sdw_rt1316_link12_rt714_link0,
+               .drv_name = "sof_sdw",
+               .sof_fw_filename = "sof-adl.ri",
+               .sof_tplg_filename = "sof-adl-rt1316-l12-rt714-l0.tplg",
+       },
        {
                .link_mask = 0x5, /* 2 active links required */
                .links = adl_sdw_rt1316_link2_rt714_link0,
index b4eb0c97edf1c486e1608a54ae8e348823609c6d..4eebc79d4b486df60b07490e6f1cb19a8401f181 100644 (file)
@@ -81,6 +81,12 @@ struct snd_soc_acpi_mach snd_soc_acpi_intel_cml_machines[] = {
                .sof_fw_filename = "sof-cml.ri",
                .sof_tplg_filename = "sof-cml-da7219-max98390.tplg",
        },
+       {
+               .id = "ESSX8336",
+               .drv_name = "sof-essx8336",
+               .sof_fw_filename = "sof-cml.ri",
+               .sof_tplg_filename = "sof-cml-es8336.tplg",
+       },
        {},
 };
 EXPORT_SYMBOL_GPL(snd_soc_acpi_intel_cml_machines);
index 6350390414d4a29aebd41d5bd2c96e1dbac43059..31494930433f7fda5d2def43a64d170b2f9bc7a5 100644 (file)
@@ -1054,6 +1054,7 @@ static int mt8173_afe_pcm_dev_probe(struct platform_device *pdev)
        int irq_id;
        struct mtk_base_afe *afe;
        struct mt8173_afe_private *afe_priv;
+       struct snd_soc_component *comp_pcm, *comp_hdmi;
 
        ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(33));
        if (ret)
@@ -1142,23 +1143,55 @@ static int mt8173_afe_pcm_dev_probe(struct platform_device *pdev)
        if (ret)
                goto err_pm_disable;
 
-       ret = devm_snd_soc_register_component(&pdev->dev,
-                                        &mt8173_afe_pcm_dai_component,
-                                        mt8173_afe_pcm_dais,
-                                        ARRAY_SIZE(mt8173_afe_pcm_dais));
+       comp_pcm = devm_kzalloc(&pdev->dev, sizeof(*comp_pcm), GFP_KERNEL);
+       if (!comp_pcm) {
+               ret = -ENOMEM;
+               goto err_pm_disable;
+       }
+
+       ret = snd_soc_component_initialize(comp_pcm,
+                                          &mt8173_afe_pcm_dai_component,
+                                          &pdev->dev);
        if (ret)
                goto err_pm_disable;
 
-       ret = devm_snd_soc_register_component(&pdev->dev,
-                                        &mt8173_afe_hdmi_dai_component,
-                                        mt8173_afe_hdmi_dais,
-                                        ARRAY_SIZE(mt8173_afe_hdmi_dais));
+#ifdef CONFIG_DEBUG_FS
+       comp_pcm->debugfs_prefix = "pcm";
+#endif
+
+       ret = snd_soc_add_component(comp_pcm,
+                                   mt8173_afe_pcm_dais,
+                                   ARRAY_SIZE(mt8173_afe_pcm_dais));
+       if (ret)
+               goto err_pm_disable;
+
+       comp_hdmi = devm_kzalloc(&pdev->dev, sizeof(*comp_hdmi), GFP_KERNEL);
+       if (!comp_hdmi) {
+               ret = -ENOMEM;
+               goto err_pm_disable;
+       }
+
+       ret = snd_soc_component_initialize(comp_hdmi,
+                                          &mt8173_afe_hdmi_dai_component,
+                                          &pdev->dev);
        if (ret)
                goto err_pm_disable;
 
+#ifdef CONFIG_DEBUG_FS
+       comp_hdmi->debugfs_prefix = "hdmi";
+#endif
+
+       ret = snd_soc_add_component(comp_hdmi,
+                                   mt8173_afe_hdmi_dais,
+                                   ARRAY_SIZE(mt8173_afe_hdmi_dais));
+       if (ret)
+               goto err_cleanup_components;
+
        dev_info(&pdev->dev, "MT8173 AFE driver initialized.\n");
        return 0;
 
+err_cleanup_components:
+       snd_soc_unregister_component(&pdev->dev);
 err_pm_disable:
        pm_runtime_disable(&pdev->dev);
        return ret;
@@ -1166,6 +1199,8 @@ err_pm_disable:
 
 static int mt8173_afe_pcm_dev_remove(struct platform_device *pdev)
 {
+       snd_soc_unregister_component(&pdev->dev);
+
        pm_runtime_disable(&pdev->dev);
        if (!pm_runtime_status_suspended(&pdev->dev))
                mt8173_afe_runtime_suspend(&pdev->dev);
index c28ebf891cb05f77b50e88869dc75424edaad51e..2cbf679f5c74b365975dab4246d981298e90c1be 100644 (file)
@@ -30,15 +30,15 @@ static struct mt8173_rt5650_platform_data mt8173_rt5650_priv = {
 };
 
 static const struct snd_soc_dapm_widget mt8173_rt5650_widgets[] = {
-       SND_SOC_DAPM_SPK("Speaker", NULL),
+       SND_SOC_DAPM_SPK("Ext Spk", NULL),
        SND_SOC_DAPM_MIC("Int Mic", NULL),
        SND_SOC_DAPM_HP("Headphone", NULL),
        SND_SOC_DAPM_MIC("Headset Mic", NULL),
 };
 
 static const struct snd_soc_dapm_route mt8173_rt5650_routes[] = {
-       {"Speaker", NULL, "SPOL"},
-       {"Speaker", NULL, "SPOR"},
+       {"Ext Spk", NULL, "SPOL"},
+       {"Ext Spk", NULL, "SPOR"},
        {"DMIC L1", NULL, "Int Mic"},
        {"DMIC R1", NULL, "Int Mic"},
        {"Headphone", NULL, "HPOL"},
@@ -48,7 +48,7 @@ static const struct snd_soc_dapm_route mt8173_rt5650_routes[] = {
 };
 
 static const struct snd_kcontrol_new mt8173_rt5650_controls[] = {
-       SOC_DAPM_PIN_SWITCH("Speaker"),
+       SOC_DAPM_PIN_SWITCH("Ext Spk"),
        SOC_DAPM_PIN_SWITCH("Int Mic"),
        SOC_DAPM_PIN_SWITCH("Headphone"),
        SOC_DAPM_PIN_SWITCH("Headset Mic"),
index 4f693a2660b561a3c7c7d1bdc9425e39999c2e28..3ee8bfcd0121152489ed151124a55b11fa79b4ff 100644 (file)
@@ -550,6 +550,10 @@ struct audio_hw_clk_cfg {
        uint32_t clock_root;
 } __packed;
 
+struct audio_hw_clk_rel_cfg {
+       uint32_t clock_id;
+} __packed;
+
 #define PARAM_ID_HW_EP_POWER_MODE_CFG  0x8001176
 #define AR_HW_EP_POWER_MODE_0  0 /* default */
 #define AR_HW_EP_POWER_MODE_1  1 /* XO Shutdown allowed */
index 3d831b635524fbb5bf9146b95dffbd89ef90542c..72c5719f1d253ef535a4360e196e1baf832e98e5 100644 (file)
@@ -390,7 +390,7 @@ struct q6copp *q6adm_open(struct device *dev, int port_id, int path, int rate,
        int ret = 0;
 
        if (port_id < 0) {
-               dev_err(dev, "Invalid port_id 0x%x\n", port_id);
+               dev_err(dev, "Invalid port_id %d\n", port_id);
                return ERR_PTR(-EINVAL);
        }
 
@@ -508,7 +508,7 @@ int q6adm_matrix_map(struct device *dev, int path,
                int port_idx = payload_map.port_id[i];
 
                if (port_idx < 0) {
-                       dev_err(dev, "Invalid port_id 0x%x\n",
+                       dev_err(dev, "Invalid port_id %d\n",
                                payload_map.port_id[i]);
                        kfree(pkt);
                        return -EINVAL;
index 46f365528d50184a84d1a51f31012328f2181242..b74b67720ef437176b3919672f171c9e0d068ad4 100644 (file)
@@ -269,9 +269,7 @@ static int q6asm_dai_prepare(struct snd_soc_component *component,
 
        if (ret < 0) {
                dev_err(dev, "%s: q6asm_open_write failed\n", __func__);
-               q6asm_audio_client_free(prtd->audio_client);
-               prtd->audio_client = NULL;
-               return -ENOMEM;
+               goto open_err;
        }
 
        prtd->session_id = q6asm_get_session_id(prtd->audio_client);
@@ -279,7 +277,7 @@ static int q6asm_dai_prepare(struct snd_soc_component *component,
                              prtd->session_id, substream->stream);
        if (ret) {
                dev_err(dev, "%s: stream reg failed ret:%d\n", __func__, ret);
-               return ret;
+               goto routing_err;
        }
 
        if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
@@ -301,10 +299,19 @@ static int q6asm_dai_prepare(struct snd_soc_component *component,
        }
        if (ret < 0)
                dev_info(dev, "%s: CMD Format block failed\n", __func__);
+       else
+               prtd->state = Q6ASM_STREAM_RUNNING;
 
-       prtd->state = Q6ASM_STREAM_RUNNING;
+       return ret;
 
-       return 0;
+routing_err:
+       q6asm_cmd(prtd->audio_client, prtd->stream_id,  CMD_CLOSE);
+open_err:
+       q6asm_unmap_memory_regions(substream->stream, prtd->audio_client);
+       q6asm_audio_client_free(prtd->audio_client);
+       prtd->audio_client = NULL;
+
+       return ret;
 }
 
 static int q6asm_dai_trigger(struct snd_soc_component *component,
index 82c40f2d4e1df85df3a5247c3f98d02765407081..cda33ded29bed3581498572238b544c0e0e81161 100644 (file)
@@ -42,6 +42,12 @@ struct prm_cmd_request_rsc {
        struct audio_hw_clk_cfg clock_id;
 } __packed;
 
+struct prm_cmd_release_rsc {
+       struct apm_module_param_data param_data;
+       uint32_t num_clk_id;
+       struct audio_hw_clk_rel_cfg clock_id;
+} __packed;
+
 static int q6prm_send_cmd_sync(struct q6prm *prm, struct gpr_pkt *pkt, uint32_t rsp_opcode)
 {
        return audioreach_send_cmd_sync(prm->dev, prm->gdev, &prm->result, &prm->lock,
@@ -102,8 +108,8 @@ int q6prm_unvote_lpass_core_hw(struct device *dev, uint32_t hw_block_id, uint32_
 }
 EXPORT_SYMBOL_GPL(q6prm_unvote_lpass_core_hw);
 
-int q6prm_set_lpass_clock(struct device *dev, int clk_id, int clk_attr, int clk_root,
-                         unsigned int freq)
+static int q6prm_request_lpass_clock(struct device *dev, int clk_id, int clk_attr, int clk_root,
+                                    unsigned int freq)
 {
        struct q6prm *prm = dev_get_drvdata(dev->parent);
        struct apm_module_param_data *param_data;
@@ -138,6 +144,49 @@ int q6prm_set_lpass_clock(struct device *dev, int clk_id, int clk_attr, int clk_
 
        return rc;
 }
+
+static int q6prm_release_lpass_clock(struct device *dev, int clk_id, int clk_attr, int clk_root,
+                         unsigned int freq)
+{
+       struct q6prm *prm = dev_get_drvdata(dev->parent);
+       struct apm_module_param_data *param_data;
+       struct prm_cmd_release_rsc *rel;
+       gpr_device_t *gdev = prm->gdev;
+       struct gpr_pkt *pkt;
+       int rc;
+
+       pkt = audioreach_alloc_cmd_pkt(sizeof(*rel), PRM_CMD_RELEASE_HW_RSC, 0, gdev->svc.id,
+                                      GPR_PRM_MODULE_IID);
+       if (IS_ERR(pkt))
+               return PTR_ERR(pkt);
+
+       rel = (void *)pkt + GPR_HDR_SIZE + APM_CMD_HDR_SIZE;
+
+       param_data = &rel->param_data;
+
+       param_data->module_instance_id = GPR_PRM_MODULE_IID;
+       param_data->error_code = 0;
+       param_data->param_id = PARAM_ID_RSC_AUDIO_HW_CLK;
+       param_data->param_size = sizeof(*rel) - APM_MODULE_PARAM_DATA_SIZE;
+
+       rel->num_clk_id = 1;
+       rel->clock_id.clock_id = clk_id;
+
+       rc = q6prm_send_cmd_sync(prm, pkt, PRM_CMD_RSP_RELEASE_HW_RSC);
+
+       kfree(pkt);
+
+       return rc;
+}
+
+int q6prm_set_lpass_clock(struct device *dev, int clk_id, int clk_attr, int clk_root,
+                         unsigned int freq)
+{
+       if (freq)
+               return q6prm_request_lpass_clock(dev, clk_id, clk_attr, clk_attr, freq);
+
+       return q6prm_release_lpass_clock(dev, clk_id, clk_attr, clk_attr, freq);
+}
 EXPORT_SYMBOL_GPL(q6prm_set_lpass_clock);
 
 static int prm_callback(struct gpr_resp_pkt *data, void *priv, int op)
index 3390ebef9549d2b4a7c206d72eaeb4d7da5fab59..928fd23e2c27271a49a2376ffdf86ad3e85fa82b 100644 (file)
@@ -372,6 +372,12 @@ int q6routing_stream_open(int fedai_id, int perf_mode,
        }
 
        session = &routing_data->sessions[stream_id - 1];
+       if (session->port_id < 0) {
+               dev_err(routing_data->dev, "Routing not setup for MultiMedia%d Session\n",
+                       session->fedai_id);
+               return -EINVAL;
+       }
+
        pdata = &routing_data->port_data[session->port_id];
 
        mutex_lock(&routing_data->lock);
@@ -492,9 +498,15 @@ static int msm_routing_put_audio_mixer(struct snd_kcontrol *kcontrol,
        struct session_data *session = &data->sessions[session_id];
 
        if (ucontrol->value.integer.value[0]) {
+               if (session->port_id == be_id)
+                       return 0;
+
                session->port_id = be_id;
                snd_soc_dapm_mixer_update_power(dapm, kcontrol, 1, update);
        } else {
+               if (session->port_id == -1 || session->port_id != be_id)
+                       return 0;
+
                session->port_id = -1;
                snd_soc_dapm_mixer_update_power(dapm, kcontrol, 0, update);
        }
index 17b9b287853a164ab2b2310ceb1186f3d441ef6b..5f9cb5c4c7f090780506e01b8a719d619bb797b1 100644 (file)
@@ -95,6 +95,7 @@ struct rk_i2s_tdm_dev {
        spinlock_t lock; /* xfer lock */
        bool has_playback;
        bool has_capture;
+       struct snd_soc_dai_driver *dai;
 };
 
 static int to_ch_num(unsigned int val)
@@ -1310,19 +1311,14 @@ static const struct of_device_id rockchip_i2s_tdm_match[] = {
        {},
 };
 
-static struct snd_soc_dai_driver i2s_tdm_dai = {
+static const struct snd_soc_dai_driver i2s_tdm_dai = {
        .probe = rockchip_i2s_tdm_dai_probe,
-       .playback = {
-               .stream_name  = "Playback",
-       },
-       .capture = {
-               .stream_name  = "Capture",
-       },
        .ops = &rockchip_i2s_tdm_dai_ops,
 };
 
-static void rockchip_i2s_tdm_init_dai(struct rk_i2s_tdm_dev *i2s_tdm)
+static int rockchip_i2s_tdm_init_dai(struct rk_i2s_tdm_dev *i2s_tdm)
 {
+       struct snd_soc_dai_driver *dai;
        struct property *dma_names;
        const char *dma_name;
        u64 formats = (SNDRV_PCM_FMTBIT_S8 | SNDRV_PCM_FMTBIT_S16_LE |
@@ -1337,19 +1333,33 @@ static void rockchip_i2s_tdm_init_dai(struct rk_i2s_tdm_dev *i2s_tdm)
                        i2s_tdm->has_capture = true;
        }
 
+       dai = devm_kmemdup(i2s_tdm->dev, &i2s_tdm_dai,
+                          sizeof(*dai), GFP_KERNEL);
+       if (!dai)
+               return -ENOMEM;
+
        if (i2s_tdm->has_playback) {
-               i2s_tdm_dai.playback.channels_min = 2;
-               i2s_tdm_dai.playback.channels_max = 8;
-               i2s_tdm_dai.playback.rates = SNDRV_PCM_RATE_8000_192000;
-               i2s_tdm_dai.playback.formats = formats;
+               dai->playback.stream_name  = "Playback";
+               dai->playback.channels_min = 2;
+               dai->playback.channels_max = 8;
+               dai->playback.rates = SNDRV_PCM_RATE_8000_192000;
+               dai->playback.formats = formats;
        }
 
        if (i2s_tdm->has_capture) {
-               i2s_tdm_dai.capture.channels_min = 2;
-               i2s_tdm_dai.capture.channels_max = 8;
-               i2s_tdm_dai.capture.rates = SNDRV_PCM_RATE_8000_192000;
-               i2s_tdm_dai.capture.formats = formats;
+               dai->capture.stream_name  = "Capture";
+               dai->capture.channels_min = 2;
+               dai->capture.channels_max = 8;
+               dai->capture.rates = SNDRV_PCM_RATE_8000_192000;
+               dai->capture.formats = formats;
        }
+
+       if (i2s_tdm->clk_trcm != TRCM_TXRX)
+               dai->symmetric_rate = 1;
+
+       i2s_tdm->dai = dai;
+
+       return 0;
 }
 
 static int rockchip_i2s_tdm_path_check(struct rk_i2s_tdm_dev *i2s_tdm,
@@ -1541,8 +1551,6 @@ static int rockchip_i2s_tdm_probe(struct platform_device *pdev)
        spin_lock_init(&i2s_tdm->lock);
        i2s_tdm->soc_data = (struct rk_i2s_soc_data *)of_id->data;
 
-       rockchip_i2s_tdm_init_dai(i2s_tdm);
-
        i2s_tdm->frame_width = 64;
 
        i2s_tdm->clk_trcm = TRCM_TXRX;
@@ -1555,8 +1563,10 @@ static int rockchip_i2s_tdm_probe(struct platform_device *pdev)
                }
                i2s_tdm->clk_trcm = TRCM_RX;
        }
-       if (i2s_tdm->clk_trcm != TRCM_TXRX)
-               i2s_tdm_dai.symmetric_rate = 1;
+
+       ret = rockchip_i2s_tdm_init_dai(i2s_tdm);
+       if (ret)
+               return ret;
 
        i2s_tdm->grf = syscon_regmap_lookup_by_phandle(node, "rockchip,grf");
        if (IS_ERR(i2s_tdm->grf))
@@ -1678,7 +1688,7 @@ static int rockchip_i2s_tdm_probe(struct platform_device *pdev)
 
        ret = devm_snd_soc_register_component(&pdev->dev,
                                              &rockchip_i2s_tdm_component,
-                                             &i2s_tdm_dai, 1);
+                                             i2s_tdm->dai, 1);
 
        if (ret) {
                dev_err(&pdev->dev, "Could not register DAI\n");
index 16c6e0265749bc88e7834dbae17656427b66be4a..03e0d4eca78156a60442b0e77a8260b046ed3690 100644 (file)
@@ -102,7 +102,7 @@ static int rsnd_dmaen_stop(struct rsnd_mod *mod,
        struct rsnd_dmaen *dmaen = rsnd_dma_to_dmaen(dma);
 
        if (dmaen->chan)
-               dmaengine_terminate_sync(dmaen->chan);
+               dmaengine_terminate_async(dmaen->chan);
 
        return 0;
 }
index 2ae99b49d3f5f6b72c5990f05432bf06c588d888..cbd7ea48837b246ddcface067b0eff746fec59da 100644 (file)
@@ -20,8 +20,10 @@ static bool snd_soc_acpi_id_present(struct snd_soc_acpi_mach *machine)
 
        if (comp_ids) {
                for (i = 0; i < comp_ids->num_codecs; i++) {
-                       if (acpi_dev_present(comp_ids->codecs[i], NULL, -1))
+                       if (acpi_dev_present(comp_ids->codecs[i], NULL, -1)) {
+                               strscpy(machine->id, comp_ids->codecs[i], ACPI_ID_LEN);
                                return true;
+                       }
                }
        }
 
index 2892b0aba151c083b71900eae31e70d5fd60ef0f..b06c5682445c0313c6caf7e1874f99bc158da712 100644 (file)
@@ -2559,8 +2559,13 @@ static struct snd_soc_dapm_widget *dapm_find_widget(
        return NULL;
 }
 
-static int snd_soc_dapm_set_pin(struct snd_soc_dapm_context *dapm,
-                               const char *pin, int status)
+/*
+ * set the DAPM pin status:
+ * returns 1 when the value has been updated, 0 when unchanged, or a negative
+ * error code; called from kcontrol put callback
+ */
+static int __snd_soc_dapm_set_pin(struct snd_soc_dapm_context *dapm,
+                                 const char *pin, int status)
 {
        struct snd_soc_dapm_widget *w = dapm_find_widget(dapm, pin, true);
        int ret = 0;
@@ -2586,6 +2591,18 @@ static int snd_soc_dapm_set_pin(struct snd_soc_dapm_context *dapm,
        return ret;
 }
 
+/*
+ * similar as __snd_soc_dapm_set_pin(), but returns 0 when successful;
+ * called from several API functions below
+ */
+static int snd_soc_dapm_set_pin(struct snd_soc_dapm_context *dapm,
+                               const char *pin, int status)
+{
+       int ret = __snd_soc_dapm_set_pin(dapm, pin, status);
+
+       return ret < 0 ? ret : 0;
+}
+
 /**
  * snd_soc_dapm_sync_unlocked - scan and power dapm paths
  * @dapm: DAPM context
@@ -3589,10 +3606,10 @@ int snd_soc_dapm_put_pin_switch(struct snd_kcontrol *kcontrol,
        const char *pin = (const char *)kcontrol->private_value;
        int ret;
 
-       if (ucontrol->value.integer.value[0])
-               ret = snd_soc_dapm_enable_pin(&card->dapm, pin);
-       else
-               ret = snd_soc_dapm_disable_pin(&card->dapm, pin);
+       mutex_lock_nested(&card->dapm_mutex, SND_SOC_DAPM_CLASS_RUNTIME);
+       ret = __snd_soc_dapm_set_pin(&card->dapm, pin,
+                                    !!ucontrol->value.integer.value[0]);
+       mutex_unlock(&card->dapm_mutex);
 
        snd_soc_dapm_sync(&card->dapm);
        return ret;
index 557e22c5254c76d32e31be17544f2af79abed2a8..f5b9e66ac3b82b5eeeadcd03c6cf9846f48cef41 100644 (file)
@@ -2700,6 +2700,7 @@ EXPORT_SYMBOL_GPL(snd_soc_tplg_component_load);
 /* remove dynamic controls from the component driver */
 int snd_soc_tplg_component_remove(struct snd_soc_component *comp)
 {
+       struct snd_card *card = comp->card->snd_card;
        struct snd_soc_dobj *dobj, *next_dobj;
        int pass = SOC_TPLG_PASS_END;
 
@@ -2707,6 +2708,7 @@ int snd_soc_tplg_component_remove(struct snd_soc_component *comp)
        while (pass >= SOC_TPLG_PASS_START) {
 
                /* remove mixer controls */
+               down_write(&card->controls_rwsem);
                list_for_each_entry_safe(dobj, next_dobj, &comp->dobj_list,
                        list) {
 
@@ -2745,6 +2747,7 @@ int snd_soc_tplg_component_remove(struct snd_soc_component *comp)
                                break;
                        }
                }
+               up_write(&card->controls_rwsem);
                pass--;
        }
 
index 6bb4db87af0330abe6f0f94869e4ae6992f8dc37..041c54639c4d9a220607770daf84e365158eb7cb 100644 (file)
@@ -47,7 +47,7 @@ config SND_SOC_SOF_OF
          Say Y if you need this option. If unsure select "N".
 
 config SND_SOC_SOF_COMPRESS
-       tristate
+       bool
        select SND_SOC_COMPRESS
 
 config SND_SOC_SOF_DEBUG_PROBES
index 58bb89af4de1f787351601b7365eb7146a0d639c..bb1dfe4f6d40100c3d614447b987521ea77a8f05 100644 (file)
@@ -69,7 +69,7 @@ static void snd_sof_refresh_control(struct snd_sof_control *scontrol)
 {
        struct sof_ipc_ctrl_data *cdata = scontrol->control_data;
        struct snd_soc_component *scomp = scontrol->scomp;
-       enum sof_ipc_ctrl_type ctrl_type;
+       u32 ipc_cmd;
        int ret;
 
        if (!scontrol->comp_data_dirty)
@@ -79,9 +79,9 @@ static void snd_sof_refresh_control(struct snd_sof_control *scontrol)
                return;
 
        if (scontrol->cmd == SOF_CTRL_CMD_BINARY)
-               ctrl_type = SOF_IPC_COMP_GET_DATA;
+               ipc_cmd = SOF_IPC_COMP_GET_DATA;
        else
-               ctrl_type = SOF_IPC_COMP_GET_VALUE;
+               ipc_cmd = SOF_IPC_COMP_GET_VALUE;
 
        /* set the ABI header values */
        cdata->data->magic = SOF_ABI_MAGIC;
@@ -89,7 +89,7 @@ static void snd_sof_refresh_control(struct snd_sof_control *scontrol)
 
        /* refresh the component data from DSP */
        scontrol->comp_data_dirty = false;
-       ret = snd_sof_ipc_set_get_comp_data(scontrol, ctrl_type,
+       ret = snd_sof_ipc_set_get_comp_data(scontrol, ipc_cmd,
                                            SOF_CTRL_TYPE_VALUE_CHAN_GET,
                                            scontrol->cmd, false);
        if (ret < 0) {
index 30025d3c16b6e925d7c0cd3790d086b2e3bc5be1..0862ff8b66273389414a8f1bf1c4ac8e73b29ed5 100644 (file)
@@ -10,6 +10,8 @@
 #include <linux/io.h>
 #include <sound/hdaudio.h>
 #include <sound/hda_i915.h>
+#include <sound/hda_codec.h>
+#include <sound/hda_register.h>
 #include "../sof-priv.h"
 #include "hda.h"
 
 #endif
 
 #if IS_ENABLED(CONFIG_SND_SOC_SOF_HDA)
+static void update_codec_wake_enable(struct hdac_bus *bus, unsigned int addr, bool link_power)
+{
+       unsigned int mask = snd_hdac_chip_readw(bus, WAKEEN);
+
+       if (link_power)
+               mask &= ~BIT(addr);
+       else
+               mask |= BIT(addr);
+
+       snd_hdac_chip_updatew(bus, WAKEEN, STATESTS_INT_MASK, mask);
+}
+
 static void sof_hda_bus_link_power(struct hdac_device *codec, bool enable)
 {
        struct hdac_bus *bus = codec->bus;
@@ -41,6 +55,9 @@ static void sof_hda_bus_link_power(struct hdac_device *codec, bool enable)
         */
        if (codec->addr == HDA_IDISP_ADDR && !enable)
                snd_hdac_display_power(bus, HDA_CODEC_IDX_CONTROLLER, false);
+
+       /* WAKEEN needs to be set for disabled links */
+       update_codec_wake_enable(bus, codec->addr, enable);
 }
 
 static const struct hdac_bus_ops bus_core_ops = {
index 6744318de612e567ceab15b22df953686222a50e..13cd96e6724a49a999ec743f9a5c24f9d11536f9 100644 (file)
@@ -22,6 +22,7 @@
 
 #if IS_ENABLED(CONFIG_SND_SOC_SOF_HDA_AUDIO_CODEC)
 #define IDISP_VID_INTEL        0x80860000
+#define CODEC_PROBE_RETRIES 3
 
 /* load the legacy HDA codec driver */
 static int request_codec_module(struct hda_codec *codec)
@@ -121,12 +122,15 @@ static int hda_codec_probe(struct snd_sof_dev *sdev, int address,
        u32 hda_cmd = (address << 28) | (AC_NODE_ROOT << 20) |
                (AC_VERB_PARAMETERS << 8) | AC_PAR_VENDOR_ID;
        u32 resp = -1;
-       int ret;
+       int ret, retry = 0;
+
+       do {
+               mutex_lock(&hbus->core.cmd_mutex);
+               snd_hdac_bus_send_cmd(&hbus->core, hda_cmd);
+               snd_hdac_bus_get_response(&hbus->core, address, &resp);
+               mutex_unlock(&hbus->core.cmd_mutex);
+       } while (resp == -1 && retry++ < CODEC_PROBE_RETRIES);
 
-       mutex_lock(&hbus->core.cmd_mutex);
-       snd_hdac_bus_send_cmd(&hbus->core, hda_cmd);
-       snd_hdac_bus_get_response(&hbus->core, address, &resp);
-       mutex_unlock(&hbus->core.cmd_mutex);
        if (resp == -1)
                return -EIO;
        dev_dbg(sdev->dev, "HDA codec #%d probed OK: response: %x\n",
index 058baca2cd0e90cb197aa95b86c6692a413e192c..287dc0eb6686f52bf815cfba934d71cdd7ecb667 100644 (file)
@@ -622,8 +622,7 @@ static int hda_suspend(struct snd_sof_dev *sdev, bool runtime_suspend)
        hda_dsp_ipc_int_disable(sdev);
 
 #if IS_ENABLED(CONFIG_SND_SOC_SOF_HDA)
-       if (runtime_suspend)
-               hda_codec_jack_wake_enable(sdev, true);
+       hda_codec_jack_wake_enable(sdev, runtime_suspend);
 
        /* power down all hda link */
        snd_hdac_ext_bus_link_power_down_all(bus);
index 883d78dd01b5a4ddba16367c504a93f8dbc8c38f..2c0d4d06ab364125a4a3a987c1000385905a7042 100644 (file)
@@ -58,6 +58,13 @@ int hda_ctrl_dai_widget_setup(struct snd_soc_dapm_widget *w)
                return -EINVAL;
        }
 
+       /* DAI already configured, reset it before reconfiguring it */
+       if (sof_dai->configured) {
+               ret = hda_ctrl_dai_widget_free(w);
+               if (ret < 0)
+                       return ret;
+       }
+
        config = &sof_dai->dai_config[sof_dai->current_config];
 
        /*
@@ -810,6 +817,20 @@ skip_soundwire:
        return 0;
 }
 
+static void hda_check_for_state_change(struct snd_sof_dev *sdev)
+{
+#if IS_ENABLED(CONFIG_SND_SOC_SOF_HDA)
+       struct hdac_bus *bus = sof_to_bus(sdev);
+       unsigned int codec_mask;
+
+       codec_mask = snd_hdac_chip_readw(bus, STATESTS);
+       if (codec_mask) {
+               hda_codec_jack_check(sdev);
+               snd_hdac_chip_writew(bus, STATESTS, codec_mask);
+       }
+#endif
+}
+
 static irqreturn_t hda_dsp_interrupt_handler(int irq, void *context)
 {
        struct snd_sof_dev *sdev = context;
@@ -851,6 +872,8 @@ static irqreturn_t hda_dsp_interrupt_thread(int irq, void *context)
        if (hda_sdw_check_wakeen_irq(sdev))
                hda_sdw_process_wakeen(sdev);
 
+       hda_check_for_state_change(sdev);
+
        /* enable GIE interrupt */
        snd_sof_dsp_update_bits(sdev, HDA_DSP_HDA_BAR,
                                SOF_HDA_INTCTL,
index 6254bacad6eb7673aa0b90f57077790175705457..717f45a83445c129e1779d3f3851011b207517ef 100644 (file)
@@ -700,7 +700,7 @@ static int stm32_i2s_configure_clock(struct snd_soc_dai *cpu_dai,
                if (ret < 0)
                        return ret;
 
-               nb_bits = frame_len * ((cgfr & I2S_CGFR_CHLEN) + 1);
+               nb_bits = frame_len * (FIELD_GET(I2S_CGFR_CHLEN, cgfr) + 1);
                ret = stm32_i2s_calc_clk_div(i2s, i2s_clock_rate,
                                             (nb_bits * rate));
                if (ret)
index 8ee9a77bd83d375832de31094be44878093ecfdd..a74c980ee77539cd53dd8006eaea1487b8ecc653 100644 (file)
@@ -26,51 +26,162 @@ static const struct reg_default tegra186_dspk_reg_defaults[] = {
        { TEGRA186_DSPK_CODEC_CTRL,  0x03000000 },
 };
 
-static int tegra186_dspk_get_control(struct snd_kcontrol *kcontrol,
+static int tegra186_dspk_get_fifo_th(struct snd_kcontrol *kcontrol,
                                     struct snd_ctl_elem_value *ucontrol)
 {
        struct snd_soc_component *codec = snd_soc_kcontrol_component(kcontrol);
        struct tegra186_dspk *dspk = snd_soc_component_get_drvdata(codec);
 
-       if (strstr(kcontrol->id.name, "FIFO Threshold"))
-               ucontrol->value.integer.value[0] = dspk->rx_fifo_th;
-       else if (strstr(kcontrol->id.name, "OSR Value"))
-               ucontrol->value.integer.value[0] = dspk->osr_val;
-       else if (strstr(kcontrol->id.name, "LR Polarity Select"))
-               ucontrol->value.integer.value[0] = dspk->lrsel;
-       else if (strstr(kcontrol->id.name, "Channel Select"))
-               ucontrol->value.integer.value[0] = dspk->ch_sel;
-       else if (strstr(kcontrol->id.name, "Mono To Stereo"))
-               ucontrol->value.integer.value[0] = dspk->mono_to_stereo;
-       else if (strstr(kcontrol->id.name, "Stereo To Mono"))
-               ucontrol->value.integer.value[0] = dspk->stereo_to_mono;
+       ucontrol->value.integer.value[0] = dspk->rx_fifo_th;
 
        return 0;
 }
 
-static int tegra186_dspk_put_control(struct snd_kcontrol *kcontrol,
+static int tegra186_dspk_put_fifo_th(struct snd_kcontrol *kcontrol,
                                     struct snd_ctl_elem_value *ucontrol)
 {
        struct snd_soc_component *codec = snd_soc_kcontrol_component(kcontrol);
        struct tegra186_dspk *dspk = snd_soc_component_get_drvdata(codec);
-       int val = ucontrol->value.integer.value[0];
-
-       if (strstr(kcontrol->id.name, "FIFO Threshold"))
-               dspk->rx_fifo_th = val;
-       else if (strstr(kcontrol->id.name, "OSR Value"))
-               dspk->osr_val = val;
-       else if (strstr(kcontrol->id.name, "LR Polarity Select"))
-               dspk->lrsel = val;
-       else if (strstr(kcontrol->id.name, "Channel Select"))
-               dspk->ch_sel = val;
-       else if (strstr(kcontrol->id.name, "Mono To Stereo"))
-               dspk->mono_to_stereo = val;
-       else if (strstr(kcontrol->id.name, "Stereo To Mono"))
-               dspk->stereo_to_mono = val;
+       int value = ucontrol->value.integer.value[0];
+
+       if (value == dspk->rx_fifo_th)
+               return 0;
+
+       dspk->rx_fifo_th = value;
+
+       return 1;
+}
+
+static int tegra186_dspk_get_osr_val(struct snd_kcontrol *kcontrol,
+                                    struct snd_ctl_elem_value *ucontrol)
+{
+       struct snd_soc_component *codec = snd_soc_kcontrol_component(kcontrol);
+       struct tegra186_dspk *dspk = snd_soc_component_get_drvdata(codec);
+
+       ucontrol->value.enumerated.item[0] = dspk->osr_val;
 
        return 0;
 }
 
+static int tegra186_dspk_put_osr_val(struct snd_kcontrol *kcontrol,
+                                    struct snd_ctl_elem_value *ucontrol)
+{
+       struct snd_soc_component *codec = snd_soc_kcontrol_component(kcontrol);
+       struct tegra186_dspk *dspk = snd_soc_component_get_drvdata(codec);
+       unsigned int value = ucontrol->value.enumerated.item[0];
+
+       if (value == dspk->osr_val)
+               return 0;
+
+       dspk->osr_val = value;
+
+       return 1;
+}
+
+static int tegra186_dspk_get_pol_sel(struct snd_kcontrol *kcontrol,
+                                    struct snd_ctl_elem_value *ucontrol)
+{
+       struct snd_soc_component *codec = snd_soc_kcontrol_component(kcontrol);
+       struct tegra186_dspk *dspk = snd_soc_component_get_drvdata(codec);
+
+       ucontrol->value.enumerated.item[0] = dspk->lrsel;
+
+       return 0;
+}
+
+static int tegra186_dspk_put_pol_sel(struct snd_kcontrol *kcontrol,
+                                    struct snd_ctl_elem_value *ucontrol)
+{
+       struct snd_soc_component *codec = snd_soc_kcontrol_component(kcontrol);
+       struct tegra186_dspk *dspk = snd_soc_component_get_drvdata(codec);
+       unsigned int value = ucontrol->value.enumerated.item[0];
+
+       if (value == dspk->lrsel)
+               return 0;
+
+       dspk->lrsel = value;
+
+       return 1;
+}
+
+static int tegra186_dspk_get_ch_sel(struct snd_kcontrol *kcontrol,
+                                   struct snd_ctl_elem_value *ucontrol)
+{
+       struct snd_soc_component *codec = snd_soc_kcontrol_component(kcontrol);
+       struct tegra186_dspk *dspk = snd_soc_component_get_drvdata(codec);
+
+       ucontrol->value.enumerated.item[0] = dspk->ch_sel;
+
+       return 0;
+}
+
+static int tegra186_dspk_put_ch_sel(struct snd_kcontrol *kcontrol,
+                                   struct snd_ctl_elem_value *ucontrol)
+{
+       struct snd_soc_component *codec = snd_soc_kcontrol_component(kcontrol);
+       struct tegra186_dspk *dspk = snd_soc_component_get_drvdata(codec);
+       unsigned int value = ucontrol->value.enumerated.item[0];
+
+       if (value == dspk->ch_sel)
+               return 0;
+
+       dspk->ch_sel = value;
+
+       return 1;
+}
+
+static int tegra186_dspk_get_mono_to_stereo(struct snd_kcontrol *kcontrol,
+                                           struct snd_ctl_elem_value *ucontrol)
+{
+       struct snd_soc_component *codec = snd_soc_kcontrol_component(kcontrol);
+       struct tegra186_dspk *dspk = snd_soc_component_get_drvdata(codec);
+
+       ucontrol->value.enumerated.item[0] = dspk->mono_to_stereo;
+
+       return 0;
+}
+
+static int tegra186_dspk_put_mono_to_stereo(struct snd_kcontrol *kcontrol,
+                                           struct snd_ctl_elem_value *ucontrol)
+{
+       struct snd_soc_component *codec = snd_soc_kcontrol_component(kcontrol);
+       struct tegra186_dspk *dspk = snd_soc_component_get_drvdata(codec);
+       unsigned int value = ucontrol->value.enumerated.item[0];
+
+       if (value == dspk->mono_to_stereo)
+               return 0;
+
+       dspk->mono_to_stereo = value;
+
+       return 1;
+}
+
+static int tegra186_dspk_get_stereo_to_mono(struct snd_kcontrol *kcontrol,
+                                           struct snd_ctl_elem_value *ucontrol)
+{
+       struct snd_soc_component *codec = snd_soc_kcontrol_component(kcontrol);
+       struct tegra186_dspk *dspk = snd_soc_component_get_drvdata(codec);
+
+       ucontrol->value.enumerated.item[0] = dspk->stereo_to_mono;
+
+       return 0;
+}
+
+static int tegra186_dspk_put_stereo_to_mono(struct snd_kcontrol *kcontrol,
+                                           struct snd_ctl_elem_value *ucontrol)
+{
+       struct snd_soc_component *codec = snd_soc_kcontrol_component(kcontrol);
+       struct tegra186_dspk *dspk = snd_soc_component_get_drvdata(codec);
+       unsigned int value = ucontrol->value.enumerated.item[0];
+
+       if (value == dspk->stereo_to_mono)
+               return 0;
+
+       dspk->stereo_to_mono = value;
+
+       return 1;
+}
+
 static int __maybe_unused tegra186_dspk_runtime_suspend(struct device *dev)
 {
        struct tegra186_dspk *dspk = dev_get_drvdata(dev);
@@ -279,17 +390,19 @@ static const struct soc_enum tegra186_dspk_lrsel_enum =
 static const struct snd_kcontrol_new tegrat186_dspk_controls[] = {
        SOC_SINGLE_EXT("FIFO Threshold", SND_SOC_NOPM, 0,
                       TEGRA186_DSPK_RX_FIFO_DEPTH - 1, 0,
-                      tegra186_dspk_get_control, tegra186_dspk_put_control),
+                      tegra186_dspk_get_fifo_th, tegra186_dspk_put_fifo_th),
        SOC_ENUM_EXT("OSR Value", tegra186_dspk_osr_enum,
-                    tegra186_dspk_get_control, tegra186_dspk_put_control),
+                    tegra186_dspk_get_osr_val, tegra186_dspk_put_osr_val),
        SOC_ENUM_EXT("LR Polarity Select", tegra186_dspk_lrsel_enum,
-                    tegra186_dspk_get_control, tegra186_dspk_put_control),
+                    tegra186_dspk_get_pol_sel, tegra186_dspk_put_pol_sel),
        SOC_ENUM_EXT("Channel Select", tegra186_dspk_ch_sel_enum,
-                    tegra186_dspk_get_control, tegra186_dspk_put_control),
+                    tegra186_dspk_get_ch_sel, tegra186_dspk_put_ch_sel),
        SOC_ENUM_EXT("Mono To Stereo", tegra186_dspk_mono_conv_enum,
-                    tegra186_dspk_get_control, tegra186_dspk_put_control),
+                    tegra186_dspk_get_mono_to_stereo,
+                    tegra186_dspk_put_mono_to_stereo),
        SOC_ENUM_EXT("Stereo To Mono", tegra186_dspk_stereo_conv_enum,
-                    tegra186_dspk_get_control, tegra186_dspk_put_control),
+                    tegra186_dspk_get_stereo_to_mono,
+                    tegra186_dspk_put_stereo_to_mono),
 };
 
 static const struct snd_soc_component_driver tegra186_dspk_cmpnt = {
index bcccdf3ddc528b2d2ccea759390c0564c1ea6c86..1a2e868a6220932f48f6eab1150523e1c9ee37dd 100644 (file)
@@ -424,46 +424,122 @@ static const struct snd_soc_dai_ops tegra_admaif_dai_ops = {
        .trigger        = tegra_admaif_trigger,
 };
 
-static int tegra_admaif_get_control(struct snd_kcontrol *kcontrol,
-                                   struct snd_ctl_elem_value *ucontrol)
+static int tegra210_admaif_pget_mono_to_stereo(struct snd_kcontrol *kcontrol,
+       struct snd_ctl_elem_value *ucontrol)
 {
        struct snd_soc_component *cmpnt = snd_soc_kcontrol_component(kcontrol);
+       struct tegra_admaif *admaif = snd_soc_component_get_drvdata(cmpnt);
+       struct soc_enum *ec = (struct soc_enum *)kcontrol->private_value;
+
+       ucontrol->value.enumerated.item[0] =
+               admaif->mono_to_stereo[ADMAIF_TX_PATH][ec->reg];
+
+       return 0;
+}
+
+static int tegra210_admaif_pput_mono_to_stereo(struct snd_kcontrol *kcontrol,
+       struct snd_ctl_elem_value *ucontrol)
+{
+       struct snd_soc_component *cmpnt = snd_soc_kcontrol_component(kcontrol);
+       struct tegra_admaif *admaif = snd_soc_component_get_drvdata(cmpnt);
+       struct soc_enum *ec = (struct soc_enum *)kcontrol->private_value;
+       unsigned int value = ucontrol->value.enumerated.item[0];
+
+       if (value == admaif->mono_to_stereo[ADMAIF_TX_PATH][ec->reg])
+               return 0;
+
+       admaif->mono_to_stereo[ADMAIF_TX_PATH][ec->reg] = value;
+
+       return 1;
+}
+
+static int tegra210_admaif_cget_mono_to_stereo(struct snd_kcontrol *kcontrol,
+       struct snd_ctl_elem_value *ucontrol)
+{
+       struct snd_soc_component *cmpnt = snd_soc_kcontrol_component(kcontrol);
+       struct tegra_admaif *admaif = snd_soc_component_get_drvdata(cmpnt);
+       struct soc_enum *ec = (struct soc_enum *)kcontrol->private_value;
+
+       ucontrol->value.enumerated.item[0] =
+               admaif->mono_to_stereo[ADMAIF_RX_PATH][ec->reg];
+
+       return 0;
+}
+
+static int tegra210_admaif_cput_mono_to_stereo(struct snd_kcontrol *kcontrol,
+       struct snd_ctl_elem_value *ucontrol)
+{
+       struct snd_soc_component *cmpnt = snd_soc_kcontrol_component(kcontrol);
+       struct tegra_admaif *admaif = snd_soc_component_get_drvdata(cmpnt);
        struct soc_enum *ec = (struct soc_enum *)kcontrol->private_value;
+       unsigned int value = ucontrol->value.enumerated.item[0];
+
+       if (value == admaif->mono_to_stereo[ADMAIF_RX_PATH][ec->reg])
+               return 0;
+
+       admaif->mono_to_stereo[ADMAIF_RX_PATH][ec->reg] = value;
+
+       return 1;
+}
+
+static int tegra210_admaif_pget_stereo_to_mono(struct snd_kcontrol *kcontrol,
+       struct snd_ctl_elem_value *ucontrol)
+{
+       struct snd_soc_component *cmpnt = snd_soc_kcontrol_component(kcontrol);
        struct tegra_admaif *admaif = snd_soc_component_get_drvdata(cmpnt);
-       long *uctl_val = &ucontrol->value.integer.value[0];
+       struct soc_enum *ec = (struct soc_enum *)kcontrol->private_value;
 
-       if (strstr(kcontrol->id.name, "Playback Mono To Stereo"))
-               *uctl_val = admaif->mono_to_stereo[ADMAIF_TX_PATH][ec->reg];
-       else if (strstr(kcontrol->id.name, "Capture Mono To Stereo"))
-               *uctl_val = admaif->mono_to_stereo[ADMAIF_RX_PATH][ec->reg];
-       else if (strstr(kcontrol->id.name, "Playback Stereo To Mono"))
-               *uctl_val = admaif->stereo_to_mono[ADMAIF_TX_PATH][ec->reg];
-       else if (strstr(kcontrol->id.name, "Capture Stereo To Mono"))
-               *uctl_val = admaif->stereo_to_mono[ADMAIF_RX_PATH][ec->reg];
+       ucontrol->value.enumerated.item[0] =
+               admaif->stereo_to_mono[ADMAIF_TX_PATH][ec->reg];
 
        return 0;
 }
 
-static int tegra_admaif_put_control(struct snd_kcontrol *kcontrol,
-                                   struct snd_ctl_elem_value *ucontrol)
+static int tegra210_admaif_pput_stereo_to_mono(struct snd_kcontrol *kcontrol,
+       struct snd_ctl_elem_value *ucontrol)
 {
        struct snd_soc_component *cmpnt = snd_soc_kcontrol_component(kcontrol);
+       struct tegra_admaif *admaif = snd_soc_component_get_drvdata(cmpnt);
        struct soc_enum *ec = (struct soc_enum *)kcontrol->private_value;
+       unsigned int value = ucontrol->value.enumerated.item[0];
+
+       if (value == admaif->stereo_to_mono[ADMAIF_TX_PATH][ec->reg])
+               return 0;
+
+       admaif->stereo_to_mono[ADMAIF_TX_PATH][ec->reg] = value;
+
+       return 1;
+}
+
+static int tegra210_admaif_cget_stereo_to_mono(struct snd_kcontrol *kcontrol,
+       struct snd_ctl_elem_value *ucontrol)
+{
+       struct snd_soc_component *cmpnt = snd_soc_kcontrol_component(kcontrol);
        struct tegra_admaif *admaif = snd_soc_component_get_drvdata(cmpnt);
-       int value = ucontrol->value.integer.value[0];
+       struct soc_enum *ec = (struct soc_enum *)kcontrol->private_value;
 
-       if (strstr(kcontrol->id.name, "Playback Mono To Stereo"))
-               admaif->mono_to_stereo[ADMAIF_TX_PATH][ec->reg] = value;
-       else if (strstr(kcontrol->id.name, "Capture Mono To Stereo"))
-               admaif->mono_to_stereo[ADMAIF_RX_PATH][ec->reg] = value;
-       else if (strstr(kcontrol->id.name, "Playback Stereo To Mono"))
-               admaif->stereo_to_mono[ADMAIF_TX_PATH][ec->reg] = value;
-       else if (strstr(kcontrol->id.name, "Capture Stereo To Mono"))
-               admaif->stereo_to_mono[ADMAIF_RX_PATH][ec->reg] = value;
+       ucontrol->value.enumerated.item[0] =
+               admaif->stereo_to_mono[ADMAIF_RX_PATH][ec->reg];
 
        return 0;
 }
 
+static int tegra210_admaif_cput_stereo_to_mono(struct snd_kcontrol *kcontrol,
+       struct snd_ctl_elem_value *ucontrol)
+{
+       struct snd_soc_component *cmpnt = snd_soc_kcontrol_component(kcontrol);
+       struct tegra_admaif *admaif = snd_soc_component_get_drvdata(cmpnt);
+       struct soc_enum *ec = (struct soc_enum *)kcontrol->private_value;
+       unsigned int value = ucontrol->value.enumerated.item[0];
+
+       if (value == admaif->stereo_to_mono[ADMAIF_RX_PATH][ec->reg])
+               return 0;
+
+       admaif->stereo_to_mono[ADMAIF_RX_PATH][ec->reg] = value;
+
+       return 1;
+}
+
 static int tegra_admaif_dai_probe(struct snd_soc_dai *dai)
 {
        struct tegra_admaif *admaif = snd_soc_dai_get_drvdata(dai);
@@ -559,17 +635,21 @@ static const char * const tegra_admaif_mono_conv_text[] = {
 }
 
 #define TEGRA_ADMAIF_CIF_CTRL(reg)                                            \
-       NV_SOC_ENUM_EXT("ADMAIF" #reg " Playback Mono To Stereo", reg - 1,\
-                       tegra_admaif_get_control, tegra_admaif_put_control,    \
+       NV_SOC_ENUM_EXT("ADMAIF" #reg " Playback Mono To Stereo", reg - 1,     \
+                       tegra210_admaif_pget_mono_to_stereo,                   \
+                       tegra210_admaif_pput_mono_to_stereo,                   \
                        tegra_admaif_mono_conv_text),                          \
-       NV_SOC_ENUM_EXT("ADMAIF" #reg " Playback Stereo To Mono", reg - 1,\
-                       tegra_admaif_get_control, tegra_admaif_put_control,    \
+       NV_SOC_ENUM_EXT("ADMAIF" #reg " Playback Stereo To Mono", reg - 1,     \
+                       tegra210_admaif_pget_stereo_to_mono,                   \
+                       tegra210_admaif_pput_stereo_to_mono,                   \
                        tegra_admaif_stereo_conv_text),                        \
-       NV_SOC_ENUM_EXT("ADMAIF" #reg " Capture Mono To Stereo", reg - 1, \
-                       tegra_admaif_get_control, tegra_admaif_put_control,    \
+       NV_SOC_ENUM_EXT("ADMAIF" #reg " Capture Mono To Stereo", reg - 1,      \
+                       tegra210_admaif_cget_mono_to_stereo,                   \
+                       tegra210_admaif_cput_mono_to_stereo,                   \
                        tegra_admaif_mono_conv_text),                          \
-       NV_SOC_ENUM_EXT("ADMAIF" #reg " Capture Stereo To Mono", reg - 1, \
-                       tegra_admaif_get_control, tegra_admaif_put_control,    \
+       NV_SOC_ENUM_EXT("ADMAIF" #reg " Capture Stereo To Mono", reg - 1,      \
+                       tegra210_admaif_cget_stereo_to_mono,                   \
+                       tegra210_admaif_cput_stereo_to_mono,                   \
                        tegra_admaif_stereo_conv_text)
 
 static struct snd_kcontrol_new tegra210_admaif_controls[] = {
index d7c7849c2f92c999b8463b7715284e6555fb74b7..3785cade2d9a94b21a6e31cfdddd2019f616d351 100644 (file)
@@ -193,6 +193,9 @@ static int tegra210_adx_put_byte_map(struct snd_kcontrol *kcontrol,
        struct soc_mixer_control *mc =
                (struct soc_mixer_control *)kcontrol->private_value;;
 
+       if (value == bytes_map[mc->reg])
+               return 0;
+
        if (value >= 0 && value <= 255) {
                /* update byte map and enable slot */
                bytes_map[mc->reg] = value;
@@ -511,8 +514,8 @@ static int tegra210_adx_platform_remove(struct platform_device *pdev)
 static const struct dev_pm_ops tegra210_adx_pm_ops = {
        SET_RUNTIME_PM_OPS(tegra210_adx_runtime_suspend,
                           tegra210_adx_runtime_resume, NULL)
-       SET_LATE_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
-                                    pm_runtime_force_resume)
+       SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
+                               pm_runtime_force_resume)
 };
 
 static struct platform_driver tegra210_adx_driver = {
index a1989eae2b525ad818245c56c588b8da0aa55997..388b815443c7d1dff33617667a592e91ab176153 100644 (file)
@@ -62,6 +62,7 @@ static int tegra_ahub_put_value_enum(struct snd_kcontrol *kctl,
        unsigned int *item = uctl->value.enumerated.item;
        unsigned int value = e->values[item[0]];
        unsigned int i, bit_pos, reg_idx = 0, reg_val = 0;
+       int change = 0;
 
        if (item[0] >= e->items)
                return -EINVAL;
@@ -86,12 +87,14 @@ static int tegra_ahub_put_value_enum(struct snd_kcontrol *kctl,
 
                /* Update widget power if state has changed */
                if (snd_soc_component_test_bits(cmpnt, update[i].reg,
-                                               update[i].mask, update[i].val))
-                       snd_soc_dapm_mux_update_power(dapm, kctl, item[0], e,
-                                                     &update[i]);
+                                               update[i].mask,
+                                               update[i].val))
+                       change |= snd_soc_dapm_mux_update_power(dapm, kctl,
+                                                               item[0], e,
+                                                               &update[i]);
        }
 
-       return 0;
+       return change;
 }
 
 static struct snd_soc_dai_driver tegra210_ahub_dais[] = {
index af9bddfc312073d70ded76d3deffb2d5bf637fe7..d064cc67fea66654499d9e73e629463de38807e0 100644 (file)
@@ -222,6 +222,9 @@ static int tegra210_amx_put_byte_map(struct snd_kcontrol *kcontrol,
        int reg = mc->reg;
        int value = ucontrol->value.integer.value[0];
 
+       if (value == bytes_map[reg])
+               return 0;
+
        if (value >= 0 && value <= 255) {
                /* Update byte map and enable slot */
                bytes_map[reg] = value;
@@ -580,8 +583,8 @@ static int tegra210_amx_platform_remove(struct platform_device *pdev)
 static const struct dev_pm_ops tegra210_amx_pm_ops = {
        SET_RUNTIME_PM_OPS(tegra210_amx_runtime_suspend,
                           tegra210_amx_runtime_resume, NULL)
-       SET_LATE_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
-                                    pm_runtime_force_resume)
+       SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
+                               pm_runtime_force_resume)
 };
 
 static struct platform_driver tegra210_amx_driver = {
index b096478cd2ef0f65eb30239b1e781c0831ec8fad..db95794530f4678b322dabc1ab13dedb552acfea 100644 (file)
@@ -156,51 +156,162 @@ static int tegra210_dmic_hw_params(struct snd_pcm_substream *substream,
        return 0;
 }
 
-static int tegra210_dmic_get_control(struct snd_kcontrol *kcontrol,
+static int tegra210_dmic_get_boost_gain(struct snd_kcontrol *kcontrol,
+                                       struct snd_ctl_elem_value *ucontrol)
+{
+       struct snd_soc_component *comp = snd_soc_kcontrol_component(kcontrol);
+       struct tegra210_dmic *dmic = snd_soc_component_get_drvdata(comp);
+
+       ucontrol->value.integer.value[0] = dmic->boost_gain;
+
+       return 0;
+}
+
+static int tegra210_dmic_put_boost_gain(struct snd_kcontrol *kcontrol,
+                                       struct snd_ctl_elem_value *ucontrol)
+{
+       struct snd_soc_component *comp = snd_soc_kcontrol_component(kcontrol);
+       struct tegra210_dmic *dmic = snd_soc_component_get_drvdata(comp);
+       int value = ucontrol->value.integer.value[0];
+
+       if (value == dmic->boost_gain)
+               return 0;
+
+       dmic->boost_gain = value;
+
+       return 1;
+}
+
+static int tegra210_dmic_get_ch_select(struct snd_kcontrol *kcontrol,
+                                      struct snd_ctl_elem_value *ucontrol)
+{
+       struct snd_soc_component *comp = snd_soc_kcontrol_component(kcontrol);
+       struct tegra210_dmic *dmic = snd_soc_component_get_drvdata(comp);
+
+       ucontrol->value.enumerated.item[0] = dmic->ch_select;
+
+       return 0;
+}
+
+static int tegra210_dmic_put_ch_select(struct snd_kcontrol *kcontrol,
+                                      struct snd_ctl_elem_value *ucontrol)
+{
+       struct snd_soc_component *comp = snd_soc_kcontrol_component(kcontrol);
+       struct tegra210_dmic *dmic = snd_soc_component_get_drvdata(comp);
+       unsigned int value = ucontrol->value.enumerated.item[0];
+
+       if (value == dmic->ch_select)
+               return 0;
+
+       dmic->ch_select = value;
+
+       return 1;
+}
+
+static int tegra210_dmic_get_mono_to_stereo(struct snd_kcontrol *kcontrol,
+                                           struct snd_ctl_elem_value *ucontrol)
+{
+       struct snd_soc_component *comp = snd_soc_kcontrol_component(kcontrol);
+       struct tegra210_dmic *dmic = snd_soc_component_get_drvdata(comp);
+
+       ucontrol->value.enumerated.item[0] = dmic->mono_to_stereo;
+
+       return 0;
+}
+
+static int tegra210_dmic_put_mono_to_stereo(struct snd_kcontrol *kcontrol,
+                                           struct snd_ctl_elem_value *ucontrol)
+{
+       struct snd_soc_component *comp = snd_soc_kcontrol_component(kcontrol);
+       struct tegra210_dmic *dmic = snd_soc_component_get_drvdata(comp);
+       unsigned int value = ucontrol->value.enumerated.item[0];
+
+       if (value == dmic->mono_to_stereo)
+               return 0;
+
+       dmic->mono_to_stereo = value;
+
+       return 1;
+}
+
+static int tegra210_dmic_get_stereo_to_mono(struct snd_kcontrol *kcontrol,
+                                           struct snd_ctl_elem_value *ucontrol)
+{
+       struct snd_soc_component *comp = snd_soc_kcontrol_component(kcontrol);
+       struct tegra210_dmic *dmic = snd_soc_component_get_drvdata(comp);
+
+       ucontrol->value.enumerated.item[0] = dmic->stereo_to_mono;
+
+       return 0;
+}
+
+static int tegra210_dmic_put_stereo_to_mono(struct snd_kcontrol *kcontrol,
+                                           struct snd_ctl_elem_value *ucontrol)
+{
+       struct snd_soc_component *comp = snd_soc_kcontrol_component(kcontrol);
+       struct tegra210_dmic *dmic = snd_soc_component_get_drvdata(comp);
+       unsigned int value = ucontrol->value.enumerated.item[0];
+
+       if (value == dmic->stereo_to_mono)
+               return 0;
+
+       dmic->stereo_to_mono = value;
+
+       return 1;
+}
+
+static int tegra210_dmic_get_osr_val(struct snd_kcontrol *kcontrol,
                                     struct snd_ctl_elem_value *ucontrol)
 {
        struct snd_soc_component *comp = snd_soc_kcontrol_component(kcontrol);
        struct tegra210_dmic *dmic = snd_soc_component_get_drvdata(comp);
 
-       if (strstr(kcontrol->id.name, "Boost Gain Volume"))
-               ucontrol->value.integer.value[0] = dmic->boost_gain;
-       else if (strstr(kcontrol->id.name, "Channel Select"))
-               ucontrol->value.integer.value[0] = dmic->ch_select;
-       else if (strstr(kcontrol->id.name, "Mono To Stereo"))
-               ucontrol->value.integer.value[0] = dmic->mono_to_stereo;
-       else if (strstr(kcontrol->id.name, "Stereo To Mono"))
-               ucontrol->value.integer.value[0] = dmic->stereo_to_mono;
-       else if (strstr(kcontrol->id.name, "OSR Value"))
-               ucontrol->value.integer.value[0] = dmic->osr_val;
-       else if (strstr(kcontrol->id.name, "LR Polarity Select"))
-               ucontrol->value.integer.value[0] = dmic->lrsel;
+       ucontrol->value.enumerated.item[0] = dmic->osr_val;
 
        return 0;
 }
 
-static int tegra210_dmic_put_control(struct snd_kcontrol *kcontrol,
+static int tegra210_dmic_put_osr_val(struct snd_kcontrol *kcontrol,
                                     struct snd_ctl_elem_value *ucontrol)
 {
        struct snd_soc_component *comp = snd_soc_kcontrol_component(kcontrol);
        struct tegra210_dmic *dmic = snd_soc_component_get_drvdata(comp);
-       int value = ucontrol->value.integer.value[0];
+       unsigned int value = ucontrol->value.enumerated.item[0];
 
-       if (strstr(kcontrol->id.name, "Boost Gain Volume"))
-               dmic->boost_gain = value;
-       else if (strstr(kcontrol->id.name, "Channel Select"))
-               dmic->ch_select = ucontrol->value.integer.value[0];
-       else if (strstr(kcontrol->id.name, "Mono To Stereo"))
-               dmic->mono_to_stereo = value;
-       else if (strstr(kcontrol->id.name, "Stereo To Mono"))
-               dmic->stereo_to_mono = value;
-       else if (strstr(kcontrol->id.name, "OSR Value"))
-               dmic->osr_val = value;
-       else if (strstr(kcontrol->id.name, "LR Polarity Select"))
-               dmic->lrsel = value;
+       if (value == dmic->osr_val)
+               return 0;
+
+       dmic->osr_val = value;
+
+       return 1;
+}
+
+static int tegra210_dmic_get_pol_sel(struct snd_kcontrol *kcontrol,
+                                    struct snd_ctl_elem_value *ucontrol)
+{
+       struct snd_soc_component *comp = snd_soc_kcontrol_component(kcontrol);
+       struct tegra210_dmic *dmic = snd_soc_component_get_drvdata(comp);
+
+       ucontrol->value.enumerated.item[0] = dmic->lrsel;
 
        return 0;
 }
 
+static int tegra210_dmic_put_pol_sel(struct snd_kcontrol *kcontrol,
+                                    struct snd_ctl_elem_value *ucontrol)
+{
+       struct snd_soc_component *comp = snd_soc_kcontrol_component(kcontrol);
+       struct tegra210_dmic *dmic = snd_soc_component_get_drvdata(comp);
+       unsigned int value = ucontrol->value.enumerated.item[0];
+
+       if (value == dmic->lrsel)
+               return 0;
+
+       dmic->lrsel = value;
+
+       return 1;
+}
+
 static const struct snd_soc_dai_ops tegra210_dmic_dai_ops = {
        .hw_params      = tegra210_dmic_hw_params,
 };
@@ -287,19 +398,22 @@ static const struct soc_enum tegra210_dmic_lrsel_enum =
 
 static const struct snd_kcontrol_new tegra210_dmic_controls[] = {
        SOC_SINGLE_EXT("Boost Gain Volume", 0, 0, MAX_BOOST_GAIN, 0,
-                      tegra210_dmic_get_control, tegra210_dmic_put_control),
+                      tegra210_dmic_get_boost_gain,
+                      tegra210_dmic_put_boost_gain),
        SOC_ENUM_EXT("Channel Select", tegra210_dmic_ch_enum,
-                    tegra210_dmic_get_control, tegra210_dmic_put_control),
+                    tegra210_dmic_get_ch_select, tegra210_dmic_put_ch_select),
        SOC_ENUM_EXT("Mono To Stereo",
-                    tegra210_dmic_mono_conv_enum, tegra210_dmic_get_control,
-                    tegra210_dmic_put_control),
+                    tegra210_dmic_mono_conv_enum,
+                    tegra210_dmic_get_mono_to_stereo,
+                    tegra210_dmic_put_mono_to_stereo),
        SOC_ENUM_EXT("Stereo To Mono",
-                    tegra210_dmic_stereo_conv_enum, tegra210_dmic_get_control,
-                    tegra210_dmic_put_control),
+                    tegra210_dmic_stereo_conv_enum,
+                    tegra210_dmic_get_stereo_to_mono,
+                    tegra210_dmic_put_stereo_to_mono),
        SOC_ENUM_EXT("OSR Value", tegra210_dmic_osr_enum,
-                    tegra210_dmic_get_control, tegra210_dmic_put_control),
+                    tegra210_dmic_get_osr_val, tegra210_dmic_put_osr_val),
        SOC_ENUM_EXT("LR Polarity Select", tegra210_dmic_lrsel_enum,
-                    tegra210_dmic_get_control, tegra210_dmic_put_control),
+                    tegra210_dmic_get_pol_sel, tegra210_dmic_put_pol_sel),
 };
 
 static const struct snd_soc_component_driver tegra210_dmic_compnt = {
index 45f31ccb49d89ee2fc195e64e1e89a180c45f862..9552bbb939dd1e9343ad989a5bafb036a75db0e0 100644 (file)
@@ -302,85 +302,235 @@ static int tegra210_i2s_set_tdm_slot(struct snd_soc_dai *dai,
        return 0;
 }
 
-static int tegra210_i2s_set_dai_bclk_ratio(struct snd_soc_dai *dai,
-                                          unsigned int ratio)
+static int tegra210_i2s_get_loopback(struct snd_kcontrol *kcontrol,
+                                    struct snd_ctl_elem_value *ucontrol)
 {
-       struct tegra210_i2s *i2s = snd_soc_dai_get_drvdata(dai);
+       struct snd_soc_component *compnt = snd_soc_kcontrol_component(kcontrol);
+       struct tegra210_i2s *i2s = snd_soc_component_get_drvdata(compnt);
 
-       i2s->bclk_ratio = ratio;
+       ucontrol->value.integer.value[0] = i2s->loopback;
 
        return 0;
 }
 
-static int tegra210_i2s_get_control(struct snd_kcontrol *kcontrol,
-                                   struct snd_ctl_elem_value *ucontrol)
+static int tegra210_i2s_put_loopback(struct snd_kcontrol *kcontrol,
+                                    struct snd_ctl_elem_value *ucontrol)
+{
+       struct snd_soc_component *compnt = snd_soc_kcontrol_component(kcontrol);
+       struct tegra210_i2s *i2s = snd_soc_component_get_drvdata(compnt);
+       int value = ucontrol->value.integer.value[0];
+
+       if (value == i2s->loopback)
+               return 0;
+
+       i2s->loopback = value;
+
+       regmap_update_bits(i2s->regmap, TEGRA210_I2S_CTRL, I2S_CTRL_LPBK_MASK,
+                          i2s->loopback << I2S_CTRL_LPBK_SHIFT);
+
+       return 1;
+}
+
+static int tegra210_i2s_get_fsync_width(struct snd_kcontrol *kcontrol,
+                                       struct snd_ctl_elem_value *ucontrol)
 {
        struct snd_soc_component *compnt = snd_soc_kcontrol_component(kcontrol);
        struct tegra210_i2s *i2s = snd_soc_component_get_drvdata(compnt);
-       long *uctl_val = &ucontrol->value.integer.value[0];
-
-       if (strstr(kcontrol->id.name, "Loopback"))
-               *uctl_val = i2s->loopback;
-       else if (strstr(kcontrol->id.name, "FSYNC Width"))
-               *uctl_val = i2s->fsync_width;
-       else if (strstr(kcontrol->id.name, "Capture Stereo To Mono"))
-               *uctl_val = i2s->stereo_to_mono[I2S_TX_PATH];
-       else if (strstr(kcontrol->id.name, "Capture Mono To Stereo"))
-               *uctl_val = i2s->mono_to_stereo[I2S_TX_PATH];
-       else if (strstr(kcontrol->id.name, "Playback Stereo To Mono"))
-               *uctl_val = i2s->stereo_to_mono[I2S_RX_PATH];
-       else if (strstr(kcontrol->id.name, "Playback Mono To Stereo"))
-               *uctl_val = i2s->mono_to_stereo[I2S_RX_PATH];
-       else if (strstr(kcontrol->id.name, "Playback FIFO Threshold"))
-               *uctl_val = i2s->rx_fifo_th;
-       else if (strstr(kcontrol->id.name, "BCLK Ratio"))
-               *uctl_val = i2s->bclk_ratio;
+
+       ucontrol->value.integer.value[0] = i2s->fsync_width;
 
        return 0;
 }
 
-static int tegra210_i2s_put_control(struct snd_kcontrol *kcontrol,
-                                   struct snd_ctl_elem_value *ucontrol)
+static int tegra210_i2s_put_fsync_width(struct snd_kcontrol *kcontrol,
+                                       struct snd_ctl_elem_value *ucontrol)
 {
        struct snd_soc_component *compnt = snd_soc_kcontrol_component(kcontrol);
        struct tegra210_i2s *i2s = snd_soc_component_get_drvdata(compnt);
        int value = ucontrol->value.integer.value[0];
 
-       if (strstr(kcontrol->id.name, "Loopback")) {
-               i2s->loopback = value;
+       if (value == i2s->fsync_width)
+               return 0;
 
-               regmap_update_bits(i2s->regmap, TEGRA210_I2S_CTRL,
-                                  I2S_CTRL_LPBK_MASK,
-                                  i2s->loopback << I2S_CTRL_LPBK_SHIFT);
+       i2s->fsync_width = value;
 
-       } else if (strstr(kcontrol->id.name, "FSYNC Width")) {
-               /*
-                * Frame sync width is used only for FSYNC modes and not
-                * applicable for LRCK modes. Reset value for this field is "0",
-                * which means the width is one bit clock wide.
-                * The width requirement may depend on the codec and in such
-                * cases mixer control is used to update custom values. A value
-                * of "N" here means, width is "N + 1" bit clock wide.
-                */
-               i2s->fsync_width = value;
-
-               regmap_update_bits(i2s->regmap, TEGRA210_I2S_CTRL,
-                                  I2S_CTRL_FSYNC_WIDTH_MASK,
-                                  i2s->fsync_width << I2S_FSYNC_WIDTH_SHIFT);
-
-       } else if (strstr(kcontrol->id.name, "Capture Stereo To Mono")) {
-               i2s->stereo_to_mono[I2S_TX_PATH] = value;
-       } else if (strstr(kcontrol->id.name, "Capture Mono To Stereo")) {
-               i2s->mono_to_stereo[I2S_TX_PATH] = value;
-       } else if (strstr(kcontrol->id.name, "Playback Stereo To Mono")) {
-               i2s->stereo_to_mono[I2S_RX_PATH] = value;
-       } else if (strstr(kcontrol->id.name, "Playback Mono To Stereo")) {
-               i2s->mono_to_stereo[I2S_RX_PATH] = value;
-       } else if (strstr(kcontrol->id.name, "Playback FIFO Threshold")) {
-               i2s->rx_fifo_th = value;
-       } else if (strstr(kcontrol->id.name, "BCLK Ratio")) {
-               i2s->bclk_ratio = value;
-       }
+       /*
+        * Frame sync width is used only for FSYNC modes and not
+        * applicable for LRCK modes. Reset value for this field is "0",
+        * which means the width is one bit clock wide.
+        * The width requirement may depend on the codec and in such
+        * cases mixer control is used to update custom values. A value
+        * of "N" here means, width is "N + 1" bit clock wide.
+        */
+       regmap_update_bits(i2s->regmap, TEGRA210_I2S_CTRL,
+                          I2S_CTRL_FSYNC_WIDTH_MASK,
+                          i2s->fsync_width << I2S_FSYNC_WIDTH_SHIFT);
+
+       return 1;
+}
+
+static int tegra210_i2s_cget_stereo_to_mono(struct snd_kcontrol *kcontrol,
+                                           struct snd_ctl_elem_value *ucontrol)
+{
+       struct snd_soc_component *compnt = snd_soc_kcontrol_component(kcontrol);
+       struct tegra210_i2s *i2s = snd_soc_component_get_drvdata(compnt);
+
+       ucontrol->value.enumerated.item[0] = i2s->stereo_to_mono[I2S_TX_PATH];
+
+       return 0;
+}
+
+static int tegra210_i2s_cput_stereo_to_mono(struct snd_kcontrol *kcontrol,
+                                           struct snd_ctl_elem_value *ucontrol)
+{
+       struct snd_soc_component *compnt = snd_soc_kcontrol_component(kcontrol);
+       struct tegra210_i2s *i2s = snd_soc_component_get_drvdata(compnt);
+       unsigned int value = ucontrol->value.enumerated.item[0];
+
+       if (value == i2s->stereo_to_mono[I2S_TX_PATH])
+               return 0;
+
+       i2s->stereo_to_mono[I2S_TX_PATH] = value;
+
+       return 1;
+}
+
+static int tegra210_i2s_cget_mono_to_stereo(struct snd_kcontrol *kcontrol,
+                                           struct snd_ctl_elem_value *ucontrol)
+{
+       struct snd_soc_component *compnt = snd_soc_kcontrol_component(kcontrol);
+       struct tegra210_i2s *i2s = snd_soc_component_get_drvdata(compnt);
+
+       ucontrol->value.enumerated.item[0] = i2s->mono_to_stereo[I2S_TX_PATH];
+
+       return 0;
+}
+
+static int tegra210_i2s_cput_mono_to_stereo(struct snd_kcontrol *kcontrol,
+                                           struct snd_ctl_elem_value *ucontrol)
+{
+       struct snd_soc_component *compnt = snd_soc_kcontrol_component(kcontrol);
+       struct tegra210_i2s *i2s = snd_soc_component_get_drvdata(compnt);
+       unsigned int value = ucontrol->value.enumerated.item[0];
+
+       if (value == i2s->mono_to_stereo[I2S_TX_PATH])
+               return 0;
+
+       i2s->mono_to_stereo[I2S_TX_PATH] = value;
+
+       return 1;
+}
+
+static int tegra210_i2s_pget_stereo_to_mono(struct snd_kcontrol *kcontrol,
+                                           struct snd_ctl_elem_value *ucontrol)
+{
+       struct snd_soc_component *compnt = snd_soc_kcontrol_component(kcontrol);
+       struct tegra210_i2s *i2s = snd_soc_component_get_drvdata(compnt);
+
+       ucontrol->value.enumerated.item[0] = i2s->stereo_to_mono[I2S_RX_PATH];
+
+       return 0;
+}
+
+static int tegra210_i2s_pput_stereo_to_mono(struct snd_kcontrol *kcontrol,
+                                           struct snd_ctl_elem_value *ucontrol)
+{
+       struct snd_soc_component *compnt = snd_soc_kcontrol_component(kcontrol);
+       struct tegra210_i2s *i2s = snd_soc_component_get_drvdata(compnt);
+       unsigned int value = ucontrol->value.enumerated.item[0];
+
+       if (value == i2s->stereo_to_mono[I2S_RX_PATH])
+               return 0;
+
+       i2s->stereo_to_mono[I2S_RX_PATH] = value;
+
+       return 1;
+}
+
+static int tegra210_i2s_pget_mono_to_stereo(struct snd_kcontrol *kcontrol,
+                                           struct snd_ctl_elem_value *ucontrol)
+{
+       struct snd_soc_component *compnt = snd_soc_kcontrol_component(kcontrol);
+       struct tegra210_i2s *i2s = snd_soc_component_get_drvdata(compnt);
+
+       ucontrol->value.enumerated.item[0] = i2s->mono_to_stereo[I2S_RX_PATH];
+
+       return 0;
+}
+
+static int tegra210_i2s_pput_mono_to_stereo(struct snd_kcontrol *kcontrol,
+                                           struct snd_ctl_elem_value *ucontrol)
+{
+       struct snd_soc_component *compnt = snd_soc_kcontrol_component(kcontrol);
+       struct tegra210_i2s *i2s = snd_soc_component_get_drvdata(compnt);
+       unsigned int value = ucontrol->value.enumerated.item[0];
+
+       if (value == i2s->mono_to_stereo[I2S_RX_PATH])
+               return 0;
+
+       i2s->mono_to_stereo[I2S_RX_PATH] = value;
+
+       return 1;
+}
+
+static int tegra210_i2s_pget_fifo_th(struct snd_kcontrol *kcontrol,
+                                    struct snd_ctl_elem_value *ucontrol)
+{
+       struct snd_soc_component *compnt = snd_soc_kcontrol_component(kcontrol);
+       struct tegra210_i2s *i2s = snd_soc_component_get_drvdata(compnt);
+
+       ucontrol->value.integer.value[0] = i2s->rx_fifo_th;
+
+       return 0;
+}
+
+static int tegra210_i2s_pput_fifo_th(struct snd_kcontrol *kcontrol,
+                                    struct snd_ctl_elem_value *ucontrol)
+{
+       struct snd_soc_component *compnt = snd_soc_kcontrol_component(kcontrol);
+       struct tegra210_i2s *i2s = snd_soc_component_get_drvdata(compnt);
+       int value = ucontrol->value.integer.value[0];
+
+       if (value == i2s->rx_fifo_th)
+               return 0;
+
+       i2s->rx_fifo_th = value;
+
+       return 1;
+}
+
+static int tegra210_i2s_get_bclk_ratio(struct snd_kcontrol *kcontrol,
+                                      struct snd_ctl_elem_value *ucontrol)
+{
+       struct snd_soc_component *compnt = snd_soc_kcontrol_component(kcontrol);
+       struct tegra210_i2s *i2s = snd_soc_component_get_drvdata(compnt);
+
+       ucontrol->value.integer.value[0] = i2s->bclk_ratio;
+
+       return 0;
+}
+
+static int tegra210_i2s_put_bclk_ratio(struct snd_kcontrol *kcontrol,
+                                      struct snd_ctl_elem_value *ucontrol)
+{
+       struct snd_soc_component *compnt = snd_soc_kcontrol_component(kcontrol);
+       struct tegra210_i2s *i2s = snd_soc_component_get_drvdata(compnt);
+       int value = ucontrol->value.integer.value[0];
+
+       if (value == i2s->bclk_ratio)
+               return 0;
+
+       i2s->bclk_ratio = value;
+
+       return 1;
+}
+
+static int tegra210_i2s_set_dai_bclk_ratio(struct snd_soc_dai *dai,
+                                          unsigned int ratio)
+{
+       struct tegra210_i2s *i2s = snd_soc_dai_get_drvdata(dai);
+
+       i2s->bclk_ratio = ratio;
 
        return 0;
 }
@@ -598,22 +748,28 @@ static const struct soc_enum tegra210_i2s_stereo_conv_enum =
                        tegra210_i2s_stereo_conv_text);
 
 static const struct snd_kcontrol_new tegra210_i2s_controls[] = {
-       SOC_SINGLE_EXT("Loopback", 0, 0, 1, 0, tegra210_i2s_get_control,
-                      tegra210_i2s_put_control),
-       SOC_SINGLE_EXT("FSYNC Width", 0, 0, 255, 0, tegra210_i2s_get_control,
-                      tegra210_i2s_put_control),
+       SOC_SINGLE_EXT("Loopback", 0, 0, 1, 0, tegra210_i2s_get_loopback,
+                      tegra210_i2s_put_loopback),
+       SOC_SINGLE_EXT("FSYNC Width", 0, 0, 255, 0,
+                      tegra210_i2s_get_fsync_width,
+                      tegra210_i2s_put_fsync_width),
        SOC_ENUM_EXT("Capture Stereo To Mono", tegra210_i2s_stereo_conv_enum,
-                    tegra210_i2s_get_control, tegra210_i2s_put_control),
+                    tegra210_i2s_cget_stereo_to_mono,
+                    tegra210_i2s_cput_stereo_to_mono),
        SOC_ENUM_EXT("Capture Mono To Stereo", tegra210_i2s_mono_conv_enum,
-                    tegra210_i2s_get_control, tegra210_i2s_put_control),
+                    tegra210_i2s_cget_mono_to_stereo,
+                    tegra210_i2s_cput_mono_to_stereo),
        SOC_ENUM_EXT("Playback Stereo To Mono", tegra210_i2s_stereo_conv_enum,
-                    tegra210_i2s_get_control, tegra210_i2s_put_control),
+                    tegra210_i2s_pget_mono_to_stereo,
+                    tegra210_i2s_pput_mono_to_stereo),
        SOC_ENUM_EXT("Playback Mono To Stereo", tegra210_i2s_mono_conv_enum,
-                    tegra210_i2s_get_control, tegra210_i2s_put_control),
+                    tegra210_i2s_pget_stereo_to_mono,
+                    tegra210_i2s_pput_stereo_to_mono),
        SOC_SINGLE_EXT("Playback FIFO Threshold", 0, 0, I2S_RX_FIFO_DEPTH - 1,
-                      0, tegra210_i2s_get_control, tegra210_i2s_put_control),
-       SOC_SINGLE_EXT("BCLK Ratio", 0, 0, INT_MAX, 0, tegra210_i2s_get_control,
-                      tegra210_i2s_put_control),
+                      0, tegra210_i2s_pget_fifo_th, tegra210_i2s_pput_fifo_th),
+       SOC_SINGLE_EXT("BCLK Ratio", 0, 0, INT_MAX, 0,
+                      tegra210_i2s_get_bclk_ratio,
+                      tegra210_i2s_put_bclk_ratio),
 };
 
 static const struct snd_soc_dapm_widget tegra210_i2s_widgets[] = {
index 55e61776c565ad68cb24f86e59953e4bb804c69c..16e679a9565825c41bc3c2a0646aa67439efbfc0 100644 (file)
@@ -192,24 +192,24 @@ static int tegra210_mixer_get_gain(struct snd_kcontrol *kcontrol,
        return 0;
 }
 
-static int tegra210_mixer_put_gain(struct snd_kcontrol *kcontrol,
-                                  struct snd_ctl_elem_value *ucontrol)
+static int tegra210_mixer_apply_gain(struct snd_kcontrol *kcontrol,
+                                    struct snd_ctl_elem_value *ucontrol,
+                                    bool instant_gain)
 {
        struct soc_mixer_control *mc =
                (struct soc_mixer_control *)kcontrol->private_value;
        struct snd_soc_component *cmpnt = snd_soc_kcontrol_component(kcontrol);
        struct tegra210_mixer *mixer = snd_soc_component_get_drvdata(cmpnt);
        unsigned int reg = mc->reg, id;
-       bool instant_gain = false;
        int err;
 
-       if (strstr(kcontrol->id.name, "Instant Gain Volume"))
-               instant_gain = true;
-
        /* Save gain value for specific MIXER input */
        id = (reg - TEGRA210_MIXER_GAIN_CFG_RAM_ADDR_0) /
             TEGRA210_MIXER_GAIN_CFG_RAM_ADDR_STRIDE;
 
+       if (mixer->gain_value[id] == ucontrol->value.integer.value[0])
+               return 0;
+
        mixer->gain_value[id] = ucontrol->value.integer.value[0];
 
        err = tegra210_mixer_configure_gain(cmpnt, id, instant_gain);
@@ -221,6 +221,18 @@ static int tegra210_mixer_put_gain(struct snd_kcontrol *kcontrol,
        return 1;
 }
 
+static int tegra210_mixer_put_gain(struct snd_kcontrol *kcontrol,
+                                  struct snd_ctl_elem_value *ucontrol)
+{
+       return tegra210_mixer_apply_gain(kcontrol, ucontrol, false);
+}
+
+static int tegra210_mixer_put_instant_gain(struct snd_kcontrol *kcontrol,
+                                          struct snd_ctl_elem_value *ucontrol)
+{
+       return tegra210_mixer_apply_gain(kcontrol, ucontrol, true);
+}
+
 static int tegra210_mixer_set_audio_cif(struct tegra210_mixer *mixer,
                                        struct snd_pcm_hw_params *params,
                                        unsigned int reg,
@@ -388,7 +400,7 @@ ADDER_CTRL_DECL(adder5, TEGRA210_MIXER_TX5_ADDER_CONFIG);
        SOC_SINGLE_EXT("RX" #id " Instant Gain Volume",         \
                       MIXER_GAIN_CFG_RAM_ADDR((id) - 1), 0,    \
                       0x20000, 0, tegra210_mixer_get_gain,     \
-                      tegra210_mixer_put_gain),
+                      tegra210_mixer_put_instant_gain),
 
 /* Volume controls for all MIXER inputs */
 static const struct snd_kcontrol_new tegra210_mixer_gain_ctls[] = {
@@ -654,8 +666,8 @@ static int tegra210_mixer_platform_remove(struct platform_device *pdev)
 static const struct dev_pm_ops tegra210_mixer_pm_ops = {
        SET_RUNTIME_PM_OPS(tegra210_mixer_runtime_suspend,
                           tegra210_mixer_runtime_resume, NULL)
-       SET_LATE_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
-                                    pm_runtime_force_resume)
+       SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
+                               pm_runtime_force_resume)
 };
 
 static struct platform_driver tegra210_mixer_driver = {
index 7b9c7006e4197e5fc3537b448de461285b019a2a..acf59328dcb6dedd5c9f324b7d0b1248152a08d2 100644 (file)
@@ -136,7 +136,7 @@ static int tegra210_mvc_put_mute(struct snd_kcontrol *kcontrol,
        struct snd_soc_component *cmpnt = snd_soc_kcontrol_component(kcontrol);
        struct tegra210_mvc *mvc = snd_soc_component_get_drvdata(cmpnt);
        unsigned int value;
-       u8 mute_mask;
+       u8 new_mask, old_mask;
        int err;
 
        pm_runtime_get_sync(cmpnt->dev);
@@ -148,15 +148,23 @@ static int tegra210_mvc_put_mute(struct snd_kcontrol *kcontrol,
        if (err < 0)
                goto end;
 
-       mute_mask = ucontrol->value.integer.value[0];
+       regmap_read(mvc->regmap, TEGRA210_MVC_CTRL, &value);
+
+       old_mask = (value >> TEGRA210_MVC_MUTE_SHIFT) & TEGRA210_MUTE_MASK_EN;
+       new_mask = ucontrol->value.integer.value[0];
+
+       if (new_mask == old_mask) {
+               err = 0;
+               goto end;
+       }
 
        err = regmap_update_bits(mvc->regmap, mc->reg,
                                 TEGRA210_MVC_MUTE_MASK,
-                                mute_mask << TEGRA210_MVC_MUTE_SHIFT);
+                                new_mask << TEGRA210_MVC_MUTE_SHIFT);
        if (err < 0)
                goto end;
 
-       return 1;
+       err = 1;
 
 end:
        pm_runtime_put(cmpnt->dev);
@@ -195,7 +203,7 @@ static int tegra210_mvc_put_vol(struct snd_kcontrol *kcontrol,
        unsigned int reg = mc->reg;
        unsigned int value;
        u8 chan;
-       int err;
+       int err, old_volume;
 
        pm_runtime_get_sync(cmpnt->dev);
 
@@ -207,10 +215,16 @@ static int tegra210_mvc_put_vol(struct snd_kcontrol *kcontrol,
                goto end;
 
        chan = (reg - TEGRA210_MVC_TARGET_VOL) / REG_SIZE;
+       old_volume = mvc->volume[chan];
 
        tegra210_mvc_conv_vol(mvc, chan,
                              ucontrol->value.integer.value[0]);
 
+       if (mvc->volume[chan] == old_volume) {
+               err = 0;
+               goto end;
+       }
+
        /* Configure init volume same as target volume */
        regmap_write(mvc->regmap,
                TEGRA210_MVC_REG_OFFSET(TEGRA210_MVC_INIT_VOL, chan),
@@ -222,7 +236,7 @@ static int tegra210_mvc_put_vol(struct snd_kcontrol *kcontrol,
                           TEGRA210_MVC_VOLUME_SWITCH_MASK,
                           TEGRA210_MVC_VOLUME_SWITCH_TRIGGER);
 
-       return 1;
+       err = 1;
 
 end:
        pm_runtime_put(cmpnt->dev);
@@ -275,7 +289,7 @@ static int tegra210_mvc_get_curve_type(struct snd_kcontrol *kcontrol,
        struct snd_soc_component *cmpnt = snd_soc_kcontrol_component(kcontrol);
        struct tegra210_mvc *mvc = snd_soc_component_get_drvdata(cmpnt);
 
-       ucontrol->value.integer.value[0] = mvc->curve_type;
+       ucontrol->value.enumerated.item[0] = mvc->curve_type;
 
        return 0;
 }
@@ -285,7 +299,7 @@ static int tegra210_mvc_put_curve_type(struct snd_kcontrol *kcontrol,
 {
        struct snd_soc_component *cmpnt = snd_soc_kcontrol_component(kcontrol);
        struct tegra210_mvc *mvc = snd_soc_component_get_drvdata(cmpnt);
-       int value;
+       unsigned int value;
 
        regmap_read(mvc->regmap, TEGRA210_MVC_ENABLE, &value);
        if (value & TEGRA210_MVC_EN) {
@@ -294,10 +308,10 @@ static int tegra210_mvc_put_curve_type(struct snd_kcontrol *kcontrol,
                return -EINVAL;
        }
 
-       if (mvc->curve_type == ucontrol->value.integer.value[0])
+       if (mvc->curve_type == ucontrol->value.enumerated.item[0])
                return 0;
 
-       mvc->curve_type = ucontrol->value.integer.value[0];
+       mvc->curve_type = ucontrol->value.enumerated.item[0];
 
        tegra210_mvc_reset_vol_settings(mvc, cmpnt->dev);
 
@@ -625,8 +639,8 @@ static int tegra210_mvc_platform_remove(struct platform_device *pdev)
 static const struct dev_pm_ops tegra210_mvc_pm_ops = {
        SET_RUNTIME_PM_OPS(tegra210_mvc_runtime_suspend,
                           tegra210_mvc_runtime_resume, NULL)
-       SET_LATE_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
-                                    pm_runtime_force_resume)
+       SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
+                               pm_runtime_force_resume)
 };
 
 static struct platform_driver tegra210_mvc_driver = {
index dc477ee1b82cd06041bed5ca4940a21d856f1398..368f077e7bee750e92b36b8dd10a30f0abb2a72e 100644 (file)
@@ -3244,46 +3244,107 @@ static int tegra210_sfc_init(struct snd_soc_dapm_widget *w,
        return tegra210_sfc_write_coeff_ram(cmpnt);
 }
 
-static int tegra210_sfc_get_control(struct snd_kcontrol *kcontrol,
+static int tegra210_sfc_iget_stereo_to_mono(struct snd_kcontrol *kcontrol,
                                    struct snd_ctl_elem_value *ucontrol)
 {
        struct snd_soc_component *cmpnt = snd_soc_kcontrol_component(kcontrol);
        struct tegra210_sfc *sfc = snd_soc_component_get_drvdata(cmpnt);
 
-       if (strstr(kcontrol->id.name, "Input Stereo To Mono"))
-               ucontrol->value.integer.value[0] =
-                       sfc->stereo_to_mono[SFC_RX_PATH];
-       else if (strstr(kcontrol->id.name, "Input Mono To Stereo"))
-               ucontrol->value.integer.value[0] =
-                       sfc->mono_to_stereo[SFC_RX_PATH];
-       else if (strstr(kcontrol->id.name, "Output Stereo To Mono"))
-               ucontrol->value.integer.value[0] =
-                       sfc->stereo_to_mono[SFC_TX_PATH];
-       else if (strstr(kcontrol->id.name, "Output Mono To Stereo"))
-               ucontrol->value.integer.value[0] =
-                       sfc->mono_to_stereo[SFC_TX_PATH];
+       ucontrol->value.enumerated.item[0] = sfc->stereo_to_mono[SFC_RX_PATH];
 
        return 0;
 }
 
-static int tegra210_sfc_put_control(struct snd_kcontrol *kcontrol,
+static int tegra210_sfc_iput_stereo_to_mono(struct snd_kcontrol *kcontrol,
                                    struct snd_ctl_elem_value *ucontrol)
 {
        struct snd_soc_component *cmpnt = snd_soc_kcontrol_component(kcontrol);
        struct tegra210_sfc *sfc = snd_soc_component_get_drvdata(cmpnt);
-       int value = ucontrol->value.integer.value[0];
-
-       if (strstr(kcontrol->id.name, "Input Stereo To Mono"))
-               sfc->stereo_to_mono[SFC_RX_PATH] = value;
-       else if (strstr(kcontrol->id.name, "Input Mono To Stereo"))
-               sfc->mono_to_stereo[SFC_RX_PATH] = value;
-       else if (strstr(kcontrol->id.name, "Output Stereo To Mono"))
-               sfc->stereo_to_mono[SFC_TX_PATH] = value;
-       else if (strstr(kcontrol->id.name, "Output Mono To Stereo"))
-               sfc->mono_to_stereo[SFC_TX_PATH] = value;
-       else
+       unsigned int value = ucontrol->value.enumerated.item[0];
+
+       if (value == sfc->stereo_to_mono[SFC_RX_PATH])
+               return 0;
+
+       sfc->stereo_to_mono[SFC_RX_PATH] = value;
+
+       return 1;
+}
+
+static int tegra210_sfc_iget_mono_to_stereo(struct snd_kcontrol *kcontrol,
+                                   struct snd_ctl_elem_value *ucontrol)
+{
+       struct snd_soc_component *cmpnt = snd_soc_kcontrol_component(kcontrol);
+       struct tegra210_sfc *sfc = snd_soc_component_get_drvdata(cmpnt);
+
+       ucontrol->value.enumerated.item[0] = sfc->mono_to_stereo[SFC_RX_PATH];
+
+       return 0;
+}
+
+static int tegra210_sfc_iput_mono_to_stereo(struct snd_kcontrol *kcontrol,
+                                   struct snd_ctl_elem_value *ucontrol)
+{
+       struct snd_soc_component *cmpnt = snd_soc_kcontrol_component(kcontrol);
+       struct tegra210_sfc *sfc = snd_soc_component_get_drvdata(cmpnt);
+       unsigned int value = ucontrol->value.enumerated.item[0];
+
+       if (value == sfc->mono_to_stereo[SFC_RX_PATH])
                return 0;
 
+       sfc->mono_to_stereo[SFC_RX_PATH] = value;
+
+       return 1;
+}
+
+static int tegra210_sfc_oget_stereo_to_mono(struct snd_kcontrol *kcontrol,
+                                   struct snd_ctl_elem_value *ucontrol)
+{
+       struct snd_soc_component *cmpnt = snd_soc_kcontrol_component(kcontrol);
+       struct tegra210_sfc *sfc = snd_soc_component_get_drvdata(cmpnt);
+
+       ucontrol->value.enumerated.item[0] = sfc->stereo_to_mono[SFC_TX_PATH];
+
+       return 0;
+}
+
+static int tegra210_sfc_oput_stereo_to_mono(struct snd_kcontrol *kcontrol,
+                                   struct snd_ctl_elem_value *ucontrol)
+{
+       struct snd_soc_component *cmpnt = snd_soc_kcontrol_component(kcontrol);
+       struct tegra210_sfc *sfc = snd_soc_component_get_drvdata(cmpnt);
+       unsigned int value = ucontrol->value.enumerated.item[0];
+
+       if (value == sfc->stereo_to_mono[SFC_TX_PATH])
+               return 0;
+
+       sfc->stereo_to_mono[SFC_TX_PATH] = value;
+
+       return 1;
+}
+
+static int tegra210_sfc_oget_mono_to_stereo(struct snd_kcontrol *kcontrol,
+                                   struct snd_ctl_elem_value *ucontrol)
+{
+       struct snd_soc_component *cmpnt = snd_soc_kcontrol_component(kcontrol);
+       struct tegra210_sfc *sfc = snd_soc_component_get_drvdata(cmpnt);
+
+       ucontrol->value.enumerated.item[0] = sfc->mono_to_stereo[SFC_TX_PATH];
+
+       return 0;
+}
+
+static int tegra210_sfc_oput_mono_to_stereo(struct snd_kcontrol *kcontrol,
+                                   struct snd_ctl_elem_value *ucontrol)
+{
+       struct snd_soc_component *cmpnt = snd_soc_kcontrol_component(kcontrol);
+       struct tegra210_sfc *sfc = snd_soc_component_get_drvdata(cmpnt);
+       unsigned int value = ucontrol->value.enumerated.item[0];
+
+       if (value == sfc->mono_to_stereo[SFC_TX_PATH])
+               return 0;
+
+       sfc->mono_to_stereo[SFC_TX_PATH] = value;
+
        return 1;
 }
 
@@ -3384,13 +3445,17 @@ static const struct soc_enum tegra210_sfc_mono_conv_enum =
 
 static const struct snd_kcontrol_new tegra210_sfc_controls[] = {
        SOC_ENUM_EXT("Input Stereo To Mono", tegra210_sfc_stereo_conv_enum,
-               tegra210_sfc_get_control, tegra210_sfc_put_control),
+                    tegra210_sfc_iget_stereo_to_mono,
+                    tegra210_sfc_iput_stereo_to_mono),
        SOC_ENUM_EXT("Input Mono To Stereo", tegra210_sfc_mono_conv_enum,
-               tegra210_sfc_get_control, tegra210_sfc_put_control),
+                    tegra210_sfc_iget_mono_to_stereo,
+                    tegra210_sfc_iput_mono_to_stereo),
        SOC_ENUM_EXT("Output Stereo To Mono", tegra210_sfc_stereo_conv_enum,
-               tegra210_sfc_get_control, tegra210_sfc_put_control),
+                    tegra210_sfc_oget_stereo_to_mono,
+                    tegra210_sfc_oput_stereo_to_mono),
        SOC_ENUM_EXT("Output Mono To Stereo", tegra210_sfc_mono_conv_enum,
-               tegra210_sfc_get_control, tegra210_sfc_put_control),
+                    tegra210_sfc_oget_mono_to_stereo,
+                    tegra210_sfc_oput_mono_to_stereo),
 };
 
 static const struct snd_soc_component_driver tegra210_sfc_cmpnt = {
@@ -3529,8 +3594,8 @@ static int tegra210_sfc_platform_remove(struct platform_device *pdev)
 static const struct dev_pm_ops tegra210_sfc_pm_ops = {
        SET_RUNTIME_PM_OPS(tegra210_sfc_runtime_suspend,
                           tegra210_sfc_runtime_resume, NULL)
-       SET_LATE_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
-                                    pm_runtime_force_resume)
+       SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
+                               pm_runtime_force_resume)
 };
 
 static struct platform_driver tegra210_sfc_driver = {
index d489c1de3baec1b84d87bf28ca6a849e0e1ab37c..823b6b8de942d70a58eee846e0dca680899af630 100644 (file)
@@ -3016,11 +3016,11 @@ static const struct snd_djm_ctl snd_djm_ctls_750mk2[] = {
 
 
 static const struct snd_djm_device snd_djm_devices[] = {
-       SND_DJM_DEVICE(250mk2),
-       SND_DJM_DEVICE(750),
-       SND_DJM_DEVICE(750mk2),
-       SND_DJM_DEVICE(850),
-       SND_DJM_DEVICE(900nxs2)
+       [SND_DJM_250MK2_IDX] = SND_DJM_DEVICE(250mk2),
+       [SND_DJM_750_IDX] = SND_DJM_DEVICE(750),
+       [SND_DJM_850_IDX] = SND_DJM_DEVICE(850),
+       [SND_DJM_900NXS2_IDX] = SND_DJM_DEVICE(900nxs2),
+       [SND_DJM_750MK2_IDX] = SND_DJM_DEVICE(750mk2),
 };
 
 
index 95ec8eec1bb046f2bc1448fc31e363c331126c39..cec6e91afea2403cc4fd56580c07a01291be3b26 100644 (file)
@@ -581,6 +581,12 @@ static int snd_usb_hw_free(struct snd_pcm_substream *substream)
        return 0;
 }
 
+/* free-wheeling mode? (e.g. dmix) */
+static int in_free_wheeling_mode(struct snd_pcm_runtime *runtime)
+{
+       return runtime->stop_threshold > runtime->buffer_size;
+}
+
 /* check whether early start is needed for playback stream */
 static int lowlatency_playback_available(struct snd_pcm_runtime *runtime,
                                         struct snd_usb_substream *subs)
@@ -592,8 +598,7 @@ static int lowlatency_playback_available(struct snd_pcm_runtime *runtime,
        /* disabled via module option? */
        if (!chip->lowlatency)
                return false;
-       /* free-wheeling mode? (e.g. dmix) */
-       if (runtime->stop_threshold > runtime->buffer_size)
+       if (in_free_wheeling_mode(runtime))
                return false;
        /* implicit feedback mode has own operation mode */
        if (snd_usb_endpoint_implicit_feedback_sink(subs->data_endpoint))
@@ -635,7 +640,8 @@ static int snd_usb_pcm_prepare(struct snd_pcm_substream *substream)
        runtime->delay = 0;
 
        subs->lowlatency_playback = lowlatency_playback_available(runtime, subs);
-       if (!subs->lowlatency_playback)
+       if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK &&
+           !subs->lowlatency_playback)
                ret = start_endpoints(subs);
 
  unlock:
@@ -1552,6 +1558,8 @@ static int snd_usb_substream_playback_trigger(struct snd_pcm_substream *substrea
                                              subs);
                if (subs->lowlatency_playback &&
                    cmd == SNDRV_PCM_TRIGGER_START) {
+                       if (in_free_wheeling_mode(substream->runtime))
+                               subs->lowlatency_playback = false;
                        err = start_endpoints(subs);
                        if (err < 0) {
                                snd_usb_endpoint_set_callback(subs->data_endpoint,
index 2cb0a19be2b858883c25be4224da2c7febeb6a22..4041748c12e515c090b62f9918a0877ccd0d574c 100644 (file)
@@ -358,6 +358,7 @@ static struct xenbus_driver xen_driver = {
        .probe = xen_drv_probe,
        .remove = xen_drv_remove,
        .otherend_changed = sndback_changed,
+       .not_essential = true,
 };
 
 static int __init xen_drv_init(void)
index d0ce5cfd3ac1488d22460989a61d39e5db753800..d5b5f2ab87a0b76fa42ab5313146d0a622cd2162 100644 (file)
 #define X86_FEATURE_XSAVEC             (10*32+ 1) /* XSAVEC instruction */
 #define X86_FEATURE_XGETBV1            (10*32+ 2) /* XGETBV with ECX = 1 instruction */
 #define X86_FEATURE_XSAVES             (10*32+ 3) /* XSAVES/XRSTORS instructions */
+#define X86_FEATURE_XFD                        (10*32+ 4) /* "" eXtended Feature Disabling */
 
 /*
  * Extended auxiliary flags: Linux defined - for features scattered in various
 /* Intel-defined CPU features, CPUID level 0x00000007:1 (EAX), word 12 */
 #define X86_FEATURE_AVX_VNNI           (12*32+ 4) /* AVX VNNI instructions */
 #define X86_FEATURE_AVX512_BF16                (12*32+ 5) /* AVX512 BFLOAT16 instructions */
+#define X86_FEATURE_AMX_TILE           (18*32+24) /* AMX tile Support */
 
 /* AMD-defined CPU features, CPUID level 0x80000008 (EBX), word 13 */
 #define X86_FEATURE_CLZERO             (13*32+ 0) /* CLZERO instruction */
index 2ef1f6513c68e30b473f15f905557ba83245d22c..5a776a08f78cb3b5c9e75e8b018abab051d08262 100644 (file)
@@ -504,4 +504,8 @@ struct kvm_pmu_event_filter {
 #define KVM_PMU_EVENT_ALLOW 0
 #define KVM_PMU_EVENT_DENY 1
 
+/* for KVM_{GET,SET,HAS}_DEVICE_ATTR */
+#define KVM_VCPU_TSC_CTRL 0 /* control group for the timestamp counter (TSC) */
+#define   KVM_VCPU_TSC_OFFSET 0 /* attribute for the TSC offset */
+
 #endif /* _ASM_X86_KVM_H */
index a59cb0ee609cd9e23b480a8371e64613135f0f01..73409e27be01f0834cb79fa6c8f3bc3ff65f4241 100644 (file)
@@ -83,6 +83,7 @@ struct btf_id {
                int      cnt;
        };
        int              addr_cnt;
+       bool             is_set;
        Elf64_Addr       addr[ADDR_CNT];
 };
 
@@ -451,8 +452,10 @@ static int symbols_collect(struct object *obj)
                         * in symbol's size, together with 'cnt' field hence
                         * that - 1.
                         */
-                       if (id)
+                       if (id) {
                                id->cnt = sym.st_size / sizeof(int) - 1;
+                               id->is_set = true;
+                       }
                } else {
                        pr_err("FAILED unsupported prefix %s\n", prefix);
                        return -1;
@@ -568,9 +571,8 @@ static int id_patch(struct object *obj, struct btf_id *id)
        int *ptr = data->d_buf;
        int i;
 
-       if (!id->id) {
+       if (!id->id && !id->is_set)
                pr_err("WARN: resolve_btfids: unresolved symbol %s\n", id->name);
-       }
 
        for (i = 0; i < id->addr_cnt; i++) {
                unsigned long addr = id->addr[i];
index bbd1150578f7aefa512bc34a7c18e55d2be2bdcc..8791d0e2762b91e626f185221c82594c1dbfecb4 100644 (file)
@@ -88,5 +88,4 @@ $(BPFOBJ): $(wildcard $(LIBBPF_SRC)/*.[ch] $(LIBBPF_SRC)/Makefile) | $(BPFOBJ_OU
 
 $(DEFAULT_BPFTOOL): $(BPFOBJ) | $(BPFTOOL_OUTPUT)
        $(Q)$(MAKE) $(submake_extras) -C ../bpftool OUTPUT=$(BPFTOOL_OUTPUT)   \
-                   LIBBPF_OUTPUT=$(BPFOBJ_OUTPUT)                             \
-                   LIBBPF_DESTDIR=$(BPF_DESTDIR) CC=$(HOSTCC) LD=$(HOSTLD)
+                   CC=$(HOSTCC) LD=$(HOSTLD)
index 45a9a59828c3c09d420e8e149930363cec9668c2..ae61f464043a11fbe78d7fcebb5f2266a1b291a7 100644 (file)
@@ -48,7 +48,6 @@ FEATURE_TESTS_BASIC :=                  \
         numa_num_possible_cpus          \
         libperl                         \
         libpython                       \
-        libpython-version               \
         libslang                        \
         libslang-include-subdir         \
         libtraceevent                   \
index 0a3244ad967307cef0793d4f8df46eae96e48bd0..1480910c792e2cb3fc6f63f858cf089e18222d70 100644 (file)
@@ -32,7 +32,6 @@ FILES=                                          \
          test-numa_num_possible_cpus.bin        \
          test-libperl.bin                       \
          test-libpython.bin                     \
-         test-libpython-version.bin             \
          test-libslang.bin                      \
          test-libslang-include-subdir.bin       \
          test-libtraceevent.bin                 \
@@ -227,9 +226,6 @@ $(OUTPUT)test-libperl.bin:
 $(OUTPUT)test-libpython.bin:
        $(BUILD) $(FLAGS_PYTHON_EMBED)
 
-$(OUTPUT)test-libpython-version.bin:
-       $(BUILD)
-
 $(OUTPUT)test-libbfd.bin:
        $(BUILD) -DPACKAGE='"perf"' -lbfd -ldl
 
index 9204395272912c136b23270744ac813205d6bcbd..5ffafb967b6e4952d4b4b3b023d9207291abb167 100644 (file)
 # include "test-libpython.c"
 #undef main
 
-#define main main_test_libpython_version
-# include "test-libpython-version.c"
-#undef main
-
 #define main main_test_libperl
 # include "test-libperl.c"
 #undef main
 int main(int argc, char *argv[])
 {
        main_test_libpython();
-       main_test_libpython_version();
        main_test_libperl();
        main_test_hello();
        main_test_libelf();
@@ -200,7 +195,6 @@ int main(int argc, char *argv[])
        main_test_timerfd();
        main_test_stackprotector_all();
        main_test_libdw_dwarf_unwind();
-       main_test_sync_compare_and_swap(argc, argv);
        main_test_zlib();
        main_test_pthread_attr_setaffinity_np();
        main_test_pthread_barrier();
diff --git a/tools/build/feature/test-libpython-version.c b/tools/build/feature/test-libpython-version.c
deleted file mode 100644 (file)
index 47714b9..0000000
+++ /dev/null
@@ -1,11 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-#include <Python.h>
-
-#if PY_VERSION_HEX >= 0x03000000
-       #error
-#endif
-
-int main(void)
-{
-       return 0;
-}
diff --git a/tools/include/linux/debug_locks.h b/tools/include/linux/debug_locks.h
deleted file mode 100644 (file)
index 72d595c..0000000
+++ /dev/null
@@ -1,14 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _LIBLOCKDEP_DEBUG_LOCKS_H_
-#define _LIBLOCKDEP_DEBUG_LOCKS_H_
-
-#include <stddef.h>
-#include <linux/compiler.h>
-#include <asm/bug.h>
-
-#define DEBUG_LOCKS_WARN_ON(x) WARN_ON(x)
-
-extern bool debug_locks;
-extern bool debug_locks_silent;
-
-#endif
diff --git a/tools/include/linux/hardirq.h b/tools/include/linux/hardirq.h
deleted file mode 100644 (file)
index b25580b..0000000
+++ /dev/null
@@ -1,12 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _LIBLOCKDEP_LINUX_HARDIRQ_H_
-#define _LIBLOCKDEP_LINUX_HARDIRQ_H_
-
-#define SOFTIRQ_BITS   0UL
-#define HARDIRQ_BITS   0UL
-#define SOFTIRQ_SHIFT  0UL
-#define HARDIRQ_SHIFT  0UL
-#define hardirq_count()        0UL
-#define softirq_count()        0UL
-
-#endif
diff --git a/tools/include/linux/irqflags.h b/tools/include/linux/irqflags.h
deleted file mode 100644 (file)
index 501262a..0000000
+++ /dev/null
@@ -1,39 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _LIBLOCKDEP_LINUX_TRACE_IRQFLAGS_H_
-#define _LIBLOCKDEP_LINUX_TRACE_IRQFLAGS_H_
-
-# define lockdep_hardirq_context()     0
-# define lockdep_softirq_context(p)    0
-# define lockdep_hardirqs_enabled()    0
-# define lockdep_softirqs_enabled(p)   0
-# define lockdep_hardirq_enter()       do { } while (0)
-# define lockdep_hardirq_exit()                do { } while (0)
-# define lockdep_softirq_enter()       do { } while (0)
-# define lockdep_softirq_exit()                do { } while (0)
-# define INIT_TRACE_IRQFLAGS
-
-# define stop_critical_timings() do { } while (0)
-# define start_critical_timings() do { } while (0)
-
-#define raw_local_irq_disable() do { } while (0)
-#define raw_local_irq_enable() do { } while (0)
-#define raw_local_irq_save(flags) ((flags) = 0)
-#define raw_local_irq_restore(flags) ((void)(flags))
-#define raw_local_save_flags(flags) ((flags) = 0)
-#define raw_irqs_disabled_flags(flags) ((void)(flags))
-#define raw_irqs_disabled() 0
-#define raw_safe_halt()
-
-#define local_irq_enable() do { } while (0)
-#define local_irq_disable() do { } while (0)
-#define local_irq_save(flags) ((flags) = 0)
-#define local_irq_restore(flags) ((void)(flags))
-#define local_save_flags(flags)        ((flags) = 0)
-#define irqs_disabled() (1)
-#define irqs_disabled_flags(flags) ((void)(flags), 0)
-#define safe_halt() do { } while (0)
-
-#define trace_lock_release(x, y)
-#define trace_lock_acquire(a, b, c, d, e, f, g)
-
-#endif
index a7e54a08fb54c41b7b5da231119caab66d5431cc..3e8df500cfbd41d4139906348dfb259fb4641fab 100644 (file)
@@ -7,6 +7,7 @@
 #include <assert.h>
 #include <linux/build_bug.h>
 #include <linux/compiler.h>
+#include <linux/math.h>
 #include <endian.h>
 #include <byteswap.h>
 
@@ -14,8 +15,6 @@
 #define UINT_MAX       (~0U)
 #endif
 
-#define DIV_ROUND_UP(n,d) (((n) + (d) - 1) / (d))
-
 #define PERF_ALIGN(x, a)       __PERF_ALIGN_MASK(x, (typeof(x))(a)-1)
 #define __PERF_ALIGN_MASK(x, mask)     (((x)+(mask))&~(mask))
 
        _min1 < _min2 ? _min1 : _min2; })
 #endif
 
-#ifndef roundup
-#define roundup(x, y) (                                \
-{                                                      \
-       const typeof(y) __y = y;                       \
-       (((x) + (__y - 1)) / __y) * __y;               \
-}                                                      \
-)
-#endif
-
 #ifndef BUG_ON
 #ifdef NDEBUG
 #define BUG_ON(cond) do { if (cond) {} } while (0)
@@ -104,16 +94,6 @@ int scnprintf_pad(char * buf, size_t size, const char * fmt, ...);
 
 #define ARRAY_SIZE(arr) (sizeof(arr) / sizeof((arr)[0]) + __must_be_array(arr))
 
-/*
- * This looks more complex than it should be. But we need to
- * get the type for the ~ right in round_down (it needs to be
- * as wide as the result!), and we want to evaluate the macro
- * arguments just once each.
- */
-#define __round_mask(x, y) ((__typeof__(x))((y)-1))
-#define round_up(x, y) ((((x)-1) | __round_mask(x, y))+1)
-#define round_down(x, y) ((x) & ~__round_mask(x, y))
-
 #define current_gfp_context(k) 0
 #define synchronize_rcu()
 
diff --git a/tools/include/linux/lockdep.h b/tools/include/linux/lockdep.h
deleted file mode 100644 (file)
index e569972..0000000
+++ /dev/null
@@ -1,72 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _LIBLOCKDEP_LOCKDEP_H_
-#define _LIBLOCKDEP_LOCKDEP_H_
-
-#include <sys/prctl.h>
-#include <sys/syscall.h>
-#include <string.h>
-#include <limits.h>
-#include <linux/utsname.h>
-#include <linux/compiler.h>
-#include <linux/export.h>
-#include <linux/kern_levels.h>
-#include <linux/err.h>
-#include <linux/rcu.h>
-#include <linux/list.h>
-#include <linux/hardirq.h>
-#include <unistd.h>
-
-#define MAX_LOCK_DEPTH 63UL
-
-#define asmlinkage
-#define __visible
-
-#include "../../../include/linux/lockdep.h"
-
-struct task_struct {
-       u64 curr_chain_key;
-       int lockdep_depth;
-       unsigned int lockdep_recursion;
-       struct held_lock held_locks[MAX_LOCK_DEPTH];
-       gfp_t lockdep_reclaim_gfp;
-       int pid;
-       int state;
-       char comm[17];
-};
-
-#define TASK_RUNNING 0
-
-extern struct task_struct *__curr(void);
-
-#define current (__curr())
-
-static inline int debug_locks_off(void)
-{
-       return 1;
-}
-
-#define task_pid_nr(tsk) ((tsk)->pid)
-
-#define KSYM_NAME_LEN 128
-#define printk(...) dprintf(STDOUT_FILENO, __VA_ARGS__)
-#define pr_err(format, ...) fprintf (stderr, format, ## __VA_ARGS__)
-#define pr_warn pr_err
-#define pr_cont pr_err
-
-#define list_del_rcu list_del
-
-#define atomic_t unsigned long
-#define atomic_inc(x) ((*(x))++)
-
-#define print_tainted() ""
-#define static_obj(x) 1
-
-#define debug_show_all_locks()
-extern void debug_check_no_locks_held(void);
-
-static __used bool __is_kernel_percpu_address(unsigned long addr, void *can_addr)
-{
-       return false;
-}
-
-#endif
diff --git a/tools/include/linux/math.h b/tools/include/linux/math.h
new file mode 100644 (file)
index 0000000..4e7af99
--- /dev/null
@@ -0,0 +1,25 @@
+#ifndef _TOOLS_MATH_H
+#define _TOOLS_MATH_H
+
+/*
+ * This looks more complex than it should be. But we need to
+ * get the type for the ~ right in round_down (it needs to be
+ * as wide as the result!), and we want to evaluate the macro
+ * arguments just once each.
+ */
+#define __round_mask(x, y) ((__typeof__(x))((y)-1))
+#define round_up(x, y) ((((x)-1) | __round_mask(x, y))+1)
+#define round_down(x, y) ((x) & ~__round_mask(x, y))
+
+#define DIV_ROUND_UP(n,d) (((n) + (d) - 1) / (d))
+
+#ifndef roundup
+#define roundup(x, y) (                                \
+{                                                      \
+       const typeof(y) __y = y;                       \
+       (((x) + (__y - 1)) / __y) * __y;               \
+}                                                      \
+)
+#endif
+
+#endif
diff --git a/tools/include/linux/proc_fs.h b/tools/include/linux/proc_fs.h
deleted file mode 100644 (file)
index 8b3b03b..0000000
+++ /dev/null
@@ -1,4 +0,0 @@
-#ifndef _TOOLS_INCLUDE_LINUX_PROC_FS_H
-#define _TOOLS_INCLUDE_LINUX_PROC_FS_H
-
-#endif /* _TOOLS_INCLUDE_LINUX_PROC_FS_H */
index c934572d935cc116ba4b7607706973e0b4a32439..622266b197d0dd33c10b8da6a8fb105e47ad323e 100644 (file)
@@ -37,6 +37,4 @@ static inline bool arch_spin_is_locked(arch_spinlock_t *mutex)
        return true;
 }
 
-#include <linux/lockdep.h>
-
 #endif
diff --git a/tools/include/linux/stacktrace.h b/tools/include/linux/stacktrace.h
deleted file mode 100644 (file)
index ae343ac..0000000
+++ /dev/null
@@ -1,33 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _LIBLOCKDEP_LINUX_STACKTRACE_H_
-#define _LIBLOCKDEP_LINUX_STACKTRACE_H_
-
-#include <execinfo.h>
-
-struct stack_trace {
-       unsigned int nr_entries, max_entries;
-       unsigned long *entries;
-       int skip;
-};
-
-static inline void print_stack_trace(struct stack_trace *trace, int spaces)
-{
-       backtrace_symbols_fd((void **)trace->entries, trace->nr_entries, 1);
-}
-
-#define save_stack_trace(trace)        \
-       ((trace)->nr_entries =  \
-               backtrace((void **)(trace)->entries, (trace)->max_entries))
-
-static inline int dump_stack(void)
-{
-       void *array[64];
-       size_t size;
-
-       size = backtrace(array, 64);
-       backtrace_symbols_fd(array, size, 1);
-
-       return 0;
-}
-
-#endif
index b3610fdd1feec345dee8df36bce3c9c4895c98a0..eebd3894fe89a0dfda36d79e1a291743220db613 100644 (file)
@@ -7,24 +7,23 @@
 
 /* This struct should be in sync with struct rtnl_link_stats64 */
 struct rtnl_link_stats {
-       __u32   rx_packets;             /* total packets received       */
-       __u32   tx_packets;             /* total packets transmitted    */
-       __u32   rx_bytes;               /* total bytes received         */
-       __u32   tx_bytes;               /* total bytes transmitted      */
-       __u32   rx_errors;              /* bad packets received         */
-       __u32   tx_errors;              /* packet transmit problems     */
-       __u32   rx_dropped;             /* no space in linux buffers    */
-       __u32   tx_dropped;             /* no space available in linux  */
-       __u32   multicast;              /* multicast packets received   */
+       __u32   rx_packets;
+       __u32   tx_packets;
+       __u32   rx_bytes;
+       __u32   tx_bytes;
+       __u32   rx_errors;
+       __u32   tx_errors;
+       __u32   rx_dropped;
+       __u32   tx_dropped;
+       __u32   multicast;
        __u32   collisions;
-
        /* detailed rx_errors: */
        __u32   rx_length_errors;
-       __u32   rx_over_errors;         /* receiver ring buff overflow  */
-       __u32   rx_crc_errors;          /* recved pkt with crc error    */
-       __u32   rx_frame_errors;        /* recv'd frame alignment error */
-       __u32   rx_fifo_errors;         /* recv'r fifo overrun          */
-       __u32   rx_missed_errors;       /* receiver missed packet       */
+       __u32   rx_over_errors;
+       __u32   rx_crc_errors;
+       __u32   rx_frame_errors;
+       __u32   rx_fifo_errors;
+       __u32   rx_missed_errors;
 
        /* detailed tx_errors */
        __u32   tx_aborted_errors;
@@ -37,29 +36,201 @@ struct rtnl_link_stats {
        __u32   rx_compressed;
        __u32   tx_compressed;
 
-       __u32   rx_nohandler;           /* dropped, no handler found    */
+       __u32   rx_nohandler;
 };
 
-/* The main device statistics structure */
+/**
+ * struct rtnl_link_stats64 - The main device statistics structure.
+ *
+ * @rx_packets: Number of good packets received by the interface.
+ *   For hardware interfaces counts all good packets received from the device
+ *   by the host, including packets which host had to drop at various stages
+ *   of processing (even in the driver).
+ *
+ * @tx_packets: Number of packets successfully transmitted.
+ *   For hardware interfaces counts packets which host was able to successfully
+ *   hand over to the device, which does not necessarily mean that packets
+ *   had been successfully transmitted out of the device, only that device
+ *   acknowledged it copied them out of host memory.
+ *
+ * @rx_bytes: Number of good received bytes, corresponding to @rx_packets.
+ *
+ *   For IEEE 802.3 devices should count the length of Ethernet Frames
+ *   excluding the FCS.
+ *
+ * @tx_bytes: Number of good transmitted bytes, corresponding to @tx_packets.
+ *
+ *   For IEEE 802.3 devices should count the length of Ethernet Frames
+ *   excluding the FCS.
+ *
+ * @rx_errors: Total number of bad packets received on this network device.
+ *   This counter must include events counted by @rx_length_errors,
+ *   @rx_crc_errors, @rx_frame_errors and other errors not otherwise
+ *   counted.
+ *
+ * @tx_errors: Total number of transmit problems.
+ *   This counter must include events counter by @tx_aborted_errors,
+ *   @tx_carrier_errors, @tx_fifo_errors, @tx_heartbeat_errors,
+ *   @tx_window_errors and other errors not otherwise counted.
+ *
+ * @rx_dropped: Number of packets received but not processed,
+ *   e.g. due to lack of resources or unsupported protocol.
+ *   For hardware interfaces this counter may include packets discarded
+ *   due to L2 address filtering but should not include packets dropped
+ *   by the device due to buffer exhaustion which are counted separately in
+ *   @rx_missed_errors (since procfs folds those two counters together).
+ *
+ * @tx_dropped: Number of packets dropped on their way to transmission,
+ *   e.g. due to lack of resources.
+ *
+ * @multicast: Multicast packets received.
+ *   For hardware interfaces this statistic is commonly calculated
+ *   at the device level (unlike @rx_packets) and therefore may include
+ *   packets which did not reach the host.
+ *
+ *   For IEEE 802.3 devices this counter may be equivalent to:
+ *
+ *    - 30.3.1.1.21 aMulticastFramesReceivedOK
+ *
+ * @collisions: Number of collisions during packet transmissions.
+ *
+ * @rx_length_errors: Number of packets dropped due to invalid length.
+ *   Part of aggregate "frame" errors in `/proc/net/dev`.
+ *
+ *   For IEEE 802.3 devices this counter should be equivalent to a sum
+ *   of the following attributes:
+ *
+ *    - 30.3.1.1.23 aInRangeLengthErrors
+ *    - 30.3.1.1.24 aOutOfRangeLengthField
+ *    - 30.3.1.1.25 aFrameTooLongErrors
+ *
+ * @rx_over_errors: Receiver FIFO overflow event counter.
+ *
+ *   Historically the count of overflow events. Such events may be
+ *   reported in the receive descriptors or via interrupts, and may
+ *   not correspond one-to-one with dropped packets.
+ *
+ *   The recommended interpretation for high speed interfaces is -
+ *   number of packets dropped because they did not fit into buffers
+ *   provided by the host, e.g. packets larger than MTU or next buffer
+ *   in the ring was not available for a scatter transfer.
+ *
+ *   Part of aggregate "frame" errors in `/proc/net/dev`.
+ *
+ *   This statistics was historically used interchangeably with
+ *   @rx_fifo_errors.
+ *
+ *   This statistic corresponds to hardware events and is not commonly used
+ *   on software devices.
+ *
+ * @rx_crc_errors: Number of packets received with a CRC error.
+ *   Part of aggregate "frame" errors in `/proc/net/dev`.
+ *
+ *   For IEEE 802.3 devices this counter must be equivalent to:
+ *
+ *    - 30.3.1.1.6 aFrameCheckSequenceErrors
+ *
+ * @rx_frame_errors: Receiver frame alignment errors.
+ *   Part of aggregate "frame" errors in `/proc/net/dev`.
+ *
+ *   For IEEE 802.3 devices this counter should be equivalent to:
+ *
+ *    - 30.3.1.1.7 aAlignmentErrors
+ *
+ * @rx_fifo_errors: Receiver FIFO error counter.
+ *
+ *   Historically the count of overflow events. Those events may be
+ *   reported in the receive descriptors or via interrupts, and may
+ *   not correspond one-to-one with dropped packets.
+ *
+ *   This statistics was used interchangeably with @rx_over_errors.
+ *   Not recommended for use in drivers for high speed interfaces.
+ *
+ *   This statistic is used on software devices, e.g. to count software
+ *   packet queue overflow (can) or sequencing errors (GRE).
+ *
+ * @rx_missed_errors: Count of packets missed by the host.
+ *   Folded into the "drop" counter in `/proc/net/dev`.
+ *
+ *   Counts number of packets dropped by the device due to lack
+ *   of buffer space. This usually indicates that the host interface
+ *   is slower than the network interface, or host is not keeping up
+ *   with the receive packet rate.
+ *
+ *   This statistic corresponds to hardware events and is not used
+ *   on software devices.
+ *
+ * @tx_aborted_errors:
+ *   Part of aggregate "carrier" errors in `/proc/net/dev`.
+ *   For IEEE 802.3 devices capable of half-duplex operation this counter
+ *   must be equivalent to:
+ *
+ *    - 30.3.1.1.11 aFramesAbortedDueToXSColls
+ *
+ *   High speed interfaces may use this counter as a general device
+ *   discard counter.
+ *
+ * @tx_carrier_errors: Number of frame transmission errors due to loss
+ *   of carrier during transmission.
+ *   Part of aggregate "carrier" errors in `/proc/net/dev`.
+ *
+ *   For IEEE 802.3 devices this counter must be equivalent to:
+ *
+ *    - 30.3.1.1.13 aCarrierSenseErrors
+ *
+ * @tx_fifo_errors: Number of frame transmission errors due to device
+ *   FIFO underrun / underflow. This condition occurs when the device
+ *   begins transmission of a frame but is unable to deliver the
+ *   entire frame to the transmitter in time for transmission.
+ *   Part of aggregate "carrier" errors in `/proc/net/dev`.
+ *
+ * @tx_heartbeat_errors: Number of Heartbeat / SQE Test errors for
+ *   old half-duplex Ethernet.
+ *   Part of aggregate "carrier" errors in `/proc/net/dev`.
+ *
+ *   For IEEE 802.3 devices possibly equivalent to:
+ *
+ *    - 30.3.2.1.4 aSQETestErrors
+ *
+ * @tx_window_errors: Number of frame transmission errors due
+ *   to late collisions (for Ethernet - after the first 64B of transmission).
+ *   Part of aggregate "carrier" errors in `/proc/net/dev`.
+ *
+ *   For IEEE 802.3 devices this counter must be equivalent to:
+ *
+ *    - 30.3.1.1.10 aLateCollisions
+ *
+ * @rx_compressed: Number of correctly received compressed packets.
+ *   This counters is only meaningful for interfaces which support
+ *   packet compression (e.g. CSLIP, PPP).
+ *
+ * @tx_compressed: Number of transmitted compressed packets.
+ *   This counters is only meaningful for interfaces which support
+ *   packet compression (e.g. CSLIP, PPP).
+ *
+ * @rx_nohandler: Number of packets received on the interface
+ *   but dropped by the networking stack because the device is
+ *   not designated to receive packets (e.g. backup link in a bond).
+ */
 struct rtnl_link_stats64 {
-       __u64   rx_packets;             /* total packets received       */
-       __u64   tx_packets;             /* total packets transmitted    */
-       __u64   rx_bytes;               /* total bytes received         */
-       __u64   tx_bytes;               /* total bytes transmitted      */
-       __u64   rx_errors;              /* bad packets received         */
-       __u64   tx_errors;              /* packet transmit problems     */
-       __u64   rx_dropped;             /* no space in linux buffers    */
-       __u64   tx_dropped;             /* no space available in linux  */
-       __u64   multicast;              /* multicast packets received   */
+       __u64   rx_packets;
+       __u64   tx_packets;
+       __u64   rx_bytes;
+       __u64   tx_bytes;
+       __u64   rx_errors;
+       __u64   tx_errors;
+       __u64   rx_dropped;
+       __u64   tx_dropped;
+       __u64   multicast;
        __u64   collisions;
 
        /* detailed rx_errors: */
        __u64   rx_length_errors;
-       __u64   rx_over_errors;         /* receiver ring buff overflow  */
-       __u64   rx_crc_errors;          /* recved pkt with crc error    */
-       __u64   rx_frame_errors;        /* recv'd frame alignment error */
-       __u64   rx_fifo_errors;         /* recv'r fifo overrun          */
-       __u64   rx_missed_errors;       /* receiver missed packet       */
+       __u64   rx_over_errors;
+       __u64   rx_crc_errors;
+       __u64   rx_frame_errors;
+       __u64   rx_fifo_errors;
+       __u64   rx_missed_errors;
 
        /* detailed tx_errors */
        __u64   tx_aborted_errors;
@@ -71,8 +242,7 @@ struct rtnl_link_stats64 {
        /* for cslip etc */
        __u64   rx_compressed;
        __u64   tx_compressed;
-
-       __u64   rx_nohandler;           /* dropped, no handler found    */
+       __u64   rx_nohandler;
 };
 
 /* The struct should be in sync with struct ifmap */
@@ -170,12 +340,29 @@ enum {
        IFLA_PROP_LIST,
        IFLA_ALT_IFNAME, /* Alternative ifname */
        IFLA_PERM_ADDRESS,
+       IFLA_PROTO_DOWN_REASON,
+
+       /* device (sysfs) name as parent, used instead
+        * of IFLA_LINK where there's no parent netdev
+        */
+       IFLA_PARENT_DEV_NAME,
+       IFLA_PARENT_DEV_BUS_NAME,
+
        __IFLA_MAX
 };
 
 
 #define IFLA_MAX (__IFLA_MAX - 1)
 
+enum {
+       IFLA_PROTO_DOWN_REASON_UNSPEC,
+       IFLA_PROTO_DOWN_REASON_MASK,    /* u32, mask for reason bits */
+       IFLA_PROTO_DOWN_REASON_VALUE,   /* u32, reason bit value */
+
+       __IFLA_PROTO_DOWN_REASON_CNT,
+       IFLA_PROTO_DOWN_REASON_MAX = __IFLA_PROTO_DOWN_REASON_CNT - 1
+};
+
 /* backwards compatibility for userspace */
 #ifndef __KERNEL__
 #define IFLA_RTA(r)  ((struct rtattr*)(((char*)(r)) + NLMSG_ALIGN(sizeof(struct ifinfomsg))))
@@ -293,6 +480,7 @@ enum {
        IFLA_BR_MCAST_MLD_VERSION,
        IFLA_BR_VLAN_STATS_PER_PORT,
        IFLA_BR_MULTI_BOOLOPT,
+       IFLA_BR_MCAST_QUERIER_STATE,
        __IFLA_BR_MAX,
 };
 
@@ -346,6 +534,8 @@ enum {
        IFLA_BRPORT_BACKUP_PORT,
        IFLA_BRPORT_MRP_RING_OPEN,
        IFLA_BRPORT_MRP_IN_OPEN,
+       IFLA_BRPORT_MCAST_EHT_HOSTS_LIMIT,
+       IFLA_BRPORT_MCAST_EHT_HOSTS_CNT,
        __IFLA_BRPORT_MAX
 };
 #define IFLA_BRPORT_MAX (__IFLA_BRPORT_MAX - 1)
@@ -433,6 +623,7 @@ enum macvlan_macaddr_mode {
 };
 
 #define MACVLAN_FLAG_NOPROMISC 1
+#define MACVLAN_FLAG_NODST     2 /* skip dst macvlan if matching src macvlan */
 
 /* VRF section */
 enum {
@@ -597,6 +788,18 @@ enum ifla_geneve_df {
        GENEVE_DF_MAX = __GENEVE_DF_END - 1,
 };
 
+/* Bareudp section  */
+enum {
+       IFLA_BAREUDP_UNSPEC,
+       IFLA_BAREUDP_PORT,
+       IFLA_BAREUDP_ETHERTYPE,
+       IFLA_BAREUDP_SRCPORT_MIN,
+       IFLA_BAREUDP_MULTIPROTO_MODE,
+       __IFLA_BAREUDP_MAX
+};
+
+#define IFLA_BAREUDP_MAX (__IFLA_BAREUDP_MAX - 1)
+
 /* PPP section */
 enum {
        IFLA_PPP_UNSPEC,
@@ -899,7 +1102,14 @@ enum {
 #define IFLA_IPOIB_MAX (__IFLA_IPOIB_MAX - 1)
 
 
-/* HSR section */
+/* HSR/PRP section, both uses same interface */
+
+/* Different redundancy protocols for hsr device */
+enum {
+       HSR_PROTOCOL_HSR,
+       HSR_PROTOCOL_PRP,
+       HSR_PROTOCOL_MAX,
+};
 
 enum {
        IFLA_HSR_UNSPEC,
@@ -909,6 +1119,9 @@ enum {
        IFLA_HSR_SUPERVISION_ADDR,      /* Supervision frame multicast addr */
        IFLA_HSR_SEQ_NR,
        IFLA_HSR_VERSION,               /* HSR version */
+       IFLA_HSR_PROTOCOL,              /* Indicate different protocol than
+                                        * HSR. For example PRP.
+                                        */
        __IFLA_HSR_MAX,
 };
 
@@ -1033,6 +1246,8 @@ enum {
 #define RMNET_FLAGS_INGRESS_MAP_COMMANDS          (1U << 1)
 #define RMNET_FLAGS_INGRESS_MAP_CKSUMV4           (1U << 2)
 #define RMNET_FLAGS_EGRESS_MAP_CKSUMV4            (1U << 3)
+#define RMNET_FLAGS_INGRESS_MAP_CKSUMV5           (1U << 4)
+#define RMNET_FLAGS_EGRESS_MAP_CKSUMV5            (1U << 5)
 
 enum {
        IFLA_RMNET_UNSPEC,
@@ -1048,4 +1263,14 @@ struct ifla_rmnet_flags {
        __u32   mask;
 };
 
+/* MCTP section */
+
+enum {
+       IFLA_MCTP_UNSPEC,
+       IFLA_MCTP_NET,
+       __IFLA_MCTP_MAX,
+};
+
+#define IFLA_MCTP_MAX (__IFLA_MCTP_MAX - 1)
+
 #endif /* _UAPI_LINUX_IF_LINK_H */
index a067410ebea5e4e26ae1b9fb1789a0ccc2404c4b..1daa45268de266ce9b71fbe6470a362cb4d482f6 100644 (file)
@@ -269,6 +269,7 @@ struct kvm_xen_exit {
 #define KVM_EXIT_AP_RESET_HOLD    32
 #define KVM_EXIT_X86_BUS_LOCK     33
 #define KVM_EXIT_XEN              34
+#define KVM_EXIT_RISCV_SBI        35
 
 /* For KVM_EXIT_INTERNAL_ERROR */
 /* Emulate instruction failed. */
@@ -397,13 +398,23 @@ struct kvm_run {
                 * "ndata" is correct, that new fields are enumerated in "flags",
                 * and that each flag enumerates fields that are 64-bit aligned
                 * and sized (so that ndata+internal.data[] is valid/accurate).
+                *
+                * Space beyond the defined fields may be used to store arbitrary
+                * debug information relating to the emulation failure. It is
+                * accounted for in "ndata" but the format is unspecified and is
+                * not represented in "flags". Any such information is *not* ABI!
                 */
                struct {
                        __u32 suberror;
                        __u32 ndata;
                        __u64 flags;
-                       __u8  insn_size;
-                       __u8  insn_bytes[15];
+                       union {
+                               struct {
+                                       __u8  insn_size;
+                                       __u8  insn_bytes[15];
+                               };
+                       };
+                       /* Arbitrary debug data may follow. */
                } emulation_failure;
                /* KVM_EXIT_OSI */
                struct {
@@ -469,6 +480,13 @@ struct kvm_run {
                } msr;
                /* KVM_EXIT_XEN */
                struct kvm_xen_exit xen;
+               /* KVM_EXIT_RISCV_SBI */
+               struct {
+                       unsigned long extension_id;
+                       unsigned long function_id;
+                       unsigned long args[6];
+                       unsigned long ret[2];
+               } riscv_sbi;
                /* Fix the size of the union. */
                char padding[256];
        };
@@ -1112,6 +1130,7 @@ struct kvm_ppc_resize_hpt {
 #define KVM_CAP_BINARY_STATS_FD 203
 #define KVM_CAP_EXIT_ON_EMULATION_FAILURE 204
 #define KVM_CAP_ARM_MTE 205
+#define KVM_CAP_VM_MOVE_ENC_CONTEXT_FROM 206
 
 #ifdef KVM_CAP_IRQ_ROUTING
 
@@ -1223,11 +1242,16 @@ struct kvm_irqfd {
 
 /* Do not use 1, KVM_CHECK_EXTENSION returned it before we had flags.  */
 #define KVM_CLOCK_TSC_STABLE           2
+#define KVM_CLOCK_REALTIME             (1 << 2)
+#define KVM_CLOCK_HOST_TSC             (1 << 3)
 
 struct kvm_clock_data {
        __u64 clock;
        __u32 flags;
-       __u32 pad[9];
+       __u32 pad0;
+       __u64 realtime;
+       __u64 host_tsc;
+       __u32 pad[4];
 };
 
 /* For KVM_CAP_SW_TLB */
index d26e5472fe501633b99b805d137ab806b7a06fa6..6f3df004479b37cd48b713838514af988129997f 100644 (file)
@@ -45,8 +45,8 @@ struct bpf_gen {
        int nr_fd_array;
 };
 
-void bpf_gen__init(struct bpf_gen *gen, int log_level);
-int bpf_gen__finish(struct bpf_gen *gen);
+void bpf_gen__init(struct bpf_gen *gen, int log_level, int nr_progs, int nr_maps);
+int bpf_gen__finish(struct bpf_gen *gen, int nr_progs, int nr_maps);
 void bpf_gen__free(struct bpf_gen *gen);
 void bpf_gen__load_btf(struct bpf_gen *gen, const void *raw_data, __u32 raw_size);
 void bpf_gen__map_create(struct bpf_gen *gen, struct bpf_create_map_params *map_attr, int map_idx);
index 502dea53a742d7bae413eaf796daf18a9972503d..9934851ccde760c153006c9ef9a36b0583adebf6 100644 (file)
@@ -18,7 +18,7 @@
 #define MAX_USED_MAPS  64
 #define MAX_USED_PROGS 32
 #define MAX_KFUNC_DESCS 256
-#define MAX_FD_ARRAY_SZ (MAX_USED_PROGS + MAX_KFUNC_DESCS)
+#define MAX_FD_ARRAY_SZ (MAX_USED_MAPS + MAX_KFUNC_DESCS)
 
 /* The following structure describes the stack layout of the loader program.
  * In addition R6 contains the pointer to context.
@@ -33,8 +33,8 @@
  */
 struct loader_stack {
        __u32 btf_fd;
-       __u32 prog_fd[MAX_USED_PROGS];
        __u32 inner_map_fd;
+       __u32 prog_fd[MAX_USED_PROGS];
 };
 
 #define stack_off(field) \
@@ -42,6 +42,11 @@ struct loader_stack {
 
 #define attr_field(attr, field) (attr + offsetof(union bpf_attr, field))
 
+static int blob_fd_array_off(struct bpf_gen *gen, int index)
+{
+       return gen->fd_array + index * sizeof(int);
+}
+
 static int realloc_insn_buf(struct bpf_gen *gen, __u32 size)
 {
        size_t off = gen->insn_cur - gen->insn_start;
@@ -102,11 +107,15 @@ static void emit2(struct bpf_gen *gen, struct bpf_insn insn1, struct bpf_insn in
        emit(gen, insn2);
 }
 
-void bpf_gen__init(struct bpf_gen *gen, int log_level)
+static int add_data(struct bpf_gen *gen, const void *data, __u32 size);
+static void emit_sys_close_blob(struct bpf_gen *gen, int blob_off);
+
+void bpf_gen__init(struct bpf_gen *gen, int log_level, int nr_progs, int nr_maps)
 {
-       size_t stack_sz = sizeof(struct loader_stack);
+       size_t stack_sz = sizeof(struct loader_stack), nr_progs_sz;
        int i;
 
+       gen->fd_array = add_data(gen, NULL, MAX_FD_ARRAY_SZ * sizeof(int));
        gen->log_level = log_level;
        /* save ctx pointer into R6 */
        emit(gen, BPF_MOV64_REG(BPF_REG_6, BPF_REG_1));
@@ -118,19 +127,27 @@ void bpf_gen__init(struct bpf_gen *gen, int log_level)
        emit(gen, BPF_MOV64_IMM(BPF_REG_3, 0));
        emit(gen, BPF_EMIT_CALL(BPF_FUNC_probe_read_kernel));
 
+       /* amount of stack actually used, only used to calculate iterations, not stack offset */
+       nr_progs_sz = offsetof(struct loader_stack, prog_fd[nr_progs]);
        /* jump over cleanup code */
        emit(gen, BPF_JMP_IMM(BPF_JA, 0, 0,
-                             /* size of cleanup code below */
-                             (stack_sz / 4) * 3 + 2));
+                             /* size of cleanup code below (including map fd cleanup) */
+                             (nr_progs_sz / 4) * 3 + 2 +
+                             /* 6 insns for emit_sys_close_blob,
+                              * 6 insns for debug_regs in emit_sys_close_blob
+                              */
+                             nr_maps * (6 + (gen->log_level ? 6 : 0))));
 
        /* remember the label where all error branches will jump to */
        gen->cleanup_label = gen->insn_cur - gen->insn_start;
        /* emit cleanup code: close all temp FDs */
-       for (i = 0; i < stack_sz; i += 4) {
+       for (i = 0; i < nr_progs_sz; i += 4) {
                emit(gen, BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_10, -stack_sz + i));
                emit(gen, BPF_JMP_IMM(BPF_JSLE, BPF_REG_1, 0, 1));
                emit(gen, BPF_EMIT_CALL(BPF_FUNC_sys_close));
        }
+       for (i = 0; i < nr_maps; i++)
+               emit_sys_close_blob(gen, blob_fd_array_off(gen, i));
        /* R7 contains the error code from sys_bpf. Copy it into R0 and exit. */
        emit(gen, BPF_MOV64_REG(BPF_REG_0, BPF_REG_7));
        emit(gen, BPF_EXIT_INSN());
@@ -160,8 +177,6 @@ static int add_data(struct bpf_gen *gen, const void *data, __u32 size)
  */
 static int add_map_fd(struct bpf_gen *gen)
 {
-       if (!gen->fd_array)
-               gen->fd_array = add_data(gen, NULL, MAX_FD_ARRAY_SZ * sizeof(int));
        if (gen->nr_maps == MAX_USED_MAPS) {
                pr_warn("Total maps exceeds %d\n", MAX_USED_MAPS);
                gen->error = -E2BIG;
@@ -174,8 +189,6 @@ static int add_kfunc_btf_fd(struct bpf_gen *gen)
 {
        int cur;
 
-       if (!gen->fd_array)
-               gen->fd_array = add_data(gen, NULL, MAX_FD_ARRAY_SZ * sizeof(int));
        if (gen->nr_fd_array == MAX_KFUNC_DESCS) {
                cur = add_data(gen, NULL, sizeof(int));
                return (cur - gen->fd_array) / sizeof(int);
@@ -183,11 +196,6 @@ static int add_kfunc_btf_fd(struct bpf_gen *gen)
        return MAX_USED_MAPS + gen->nr_fd_array++;
 }
 
-static int blob_fd_array_off(struct bpf_gen *gen, int index)
-{
-       return gen->fd_array + index * sizeof(int);
-}
-
 static int insn_bytes_to_bpf_size(__u32 sz)
 {
        switch (sz) {
@@ -359,10 +367,15 @@ static void emit_sys_close_blob(struct bpf_gen *gen, int blob_off)
        __emit_sys_close(gen);
 }
 
-int bpf_gen__finish(struct bpf_gen *gen)
+int bpf_gen__finish(struct bpf_gen *gen, int nr_progs, int nr_maps)
 {
        int i;
 
+       if (nr_progs != gen->nr_progs || nr_maps != gen->nr_maps) {
+               pr_warn("progs/maps mismatch\n");
+               gen->error = -EFAULT;
+               return gen->error;
+       }
        emit_sys_close_stack(gen, stack_off(btf_fd));
        for (i = 0; i < gen->nr_progs; i++)
                move_stack2ctx(gen,
index a1bea1953df6739a10d49a3824ea66cc768ef31a..7c74342bb668054e8d75258d6a378be6c18d1a3c 100644 (file)
@@ -7258,7 +7258,7 @@ int bpf_object__load_xattr(struct bpf_object_load_attr *attr)
        }
 
        if (obj->gen_loader)
-               bpf_gen__init(obj->gen_loader, attr->log_level);
+               bpf_gen__init(obj->gen_loader, attr->log_level, obj->nr_programs, obj->nr_maps);
 
        err = bpf_object__probe_loading(obj);
        err = err ? : bpf_object__load_vmlinux_btf(obj, false);
@@ -7277,7 +7277,7 @@ int bpf_object__load_xattr(struct bpf_object_load_attr *attr)
                for (i = 0; i < obj->nr_maps; i++)
                        obj->maps[i].fd = -1;
                if (!err)
-                       err = bpf_gen__finish(obj->gen_loader);
+                       err = bpf_gen__finish(obj->gen_loader, obj->nr_programs, obj->nr_maps);
        }
 
        /* clean up fd_array */
index 81a4c543ff7ea5b966b4de08195dea3f9068b1c6..4b384c907027eb4e21cf0997c1cf2c0f5710986c 100644 (file)
@@ -375,6 +375,7 @@ static int read_symbols(struct elf *elf)
                        return -1;
                }
                memset(sym, 0, sizeof(*sym));
+               INIT_LIST_HEAD(&sym->pv_target);
                sym->alias = sym;
 
                sym->idx = i;
index c90c7084e45a9c68b022846189c5155600626551..bdf699f6552bed6432765c7ce37abbd1d520395c 100644 (file)
@@ -153,6 +153,10 @@ void objtool_pv_add(struct objtool_file *f, int idx, struct symbol *func)
            !strcmp(func->name, "_paravirt_ident_64"))
                return;
 
+       /* already added this function */
+       if (!list_empty(&func->pv_target))
+               return;
+
        list_add(&func->pv_target, &f->pv_ops[idx].targets);
        f->pv_ops[idx].clean = false;
 }
index 07e65a061fd3a13b3863346dc5d641e0a0480cc4..3df74cf5651af9dda46f214bc860a76c30a0c98d 100644 (file)
@@ -271,8 +271,6 @@ endif
 
 FEATURE_CHECK_CFLAGS-libpython := $(PYTHON_EMBED_CCOPTS)
 FEATURE_CHECK_LDFLAGS-libpython := $(PYTHON_EMBED_LDOPTS)
-FEATURE_CHECK_CFLAGS-libpython-version := $(PYTHON_EMBED_CCOPTS)
-FEATURE_CHECK_LDFLAGS-libpython-version := $(PYTHON_EMBED_LDOPTS)
 
 FEATURE_CHECK_LDFLAGS-libaio = -lrt
 
@@ -1010,6 +1008,9 @@ ifndef NO_AUXTRACE
   ifndef NO_AUXTRACE
     $(call detected,CONFIG_AUXTRACE)
     CFLAGS += -DHAVE_AUXTRACE_SUPPORT
+    ifeq ($(feature-reallocarray), 0)
+      CFLAGS += -DCOMPAT_NEED_REALLOCARRAY
+    endif
   endif
 endif
 
index 1ca7bc337932bcae3e58cb8c07694096ecdc5592..e2c481fcede6bd11a5bbd4b43c02b780be0de10b 100644 (file)
 446    n64     landlock_restrict_self          sys_landlock_restrict_self
 # 447 reserved for memfd_secret
 448    n64     process_mrelease                sys_process_mrelease
+449    n64     futex_waitv                     sys_futex_waitv
index 7bef917cc84e6499baf173cc4feef4c8430527f6..15109af9d0754d5fcb6e455532944ef1a0a14e17 100644 (file)
 446    common  landlock_restrict_self          sys_landlock_restrict_self
 # 447 reserved for memfd_secret
 448    common  process_mrelease                sys_process_mrelease
+449    common  futex_waitv                     sys_futex_waitv
index df5261e5cfe1f28d6afc412ba5475e588f6c1dde..ed9c5c2eafad700ce45ad0178837ed3d1c9204d1 100644 (file)
 446  common    landlock_restrict_self  sys_landlock_restrict_self      sys_landlock_restrict_self
 # 447 reserved for memfd_secret
 448  common    process_mrelease        sys_process_mrelease            sys_process_mrelease
+449  common    futex_waitv             sys_futex_waitv                 sys_futex_waitv
index bc5259db5fd91b51af3b376c5ee5334c137fec4c..b9d6306cc14ea0079e24876596dc8a4420e3230e 100644 (file)
@@ -820,7 +820,7 @@ static int __cmd_inject(struct perf_inject *inject)
                inject->tool.ordered_events = true;
                inject->tool.ordering_requires_timestamps = true;
                /* Allow space in the header for new attributes */
-               output_data_offset = 4096;
+               output_data_offset = roundup(8192 + session->header.data_offset, 4096);
                if (inject->strip)
                        strip_init(inject);
        }
index 8167ebfe776a790aac12ab9cb01167dd62094a2f..8ae400429870a8fc5a191b420d5711c39c86842c 100644 (file)
@@ -619,14 +619,17 @@ static int report__browse_hists(struct report *rep)
        int ret;
        struct perf_session *session = rep->session;
        struct evlist *evlist = session->evlist;
-       const char *help = perf_tip(system_path(TIPDIR));
+       char *help = NULL, *path = NULL;
 
-       if (help == NULL) {
+       path = system_path(TIPDIR);
+       if (perf_tip(&help, path) || help == NULL) {
                /* fallback for people who don't install perf ;-) */
-               help = perf_tip(DOCDIR);
-               if (help == NULL)
-                       help = "Cannot load tips.txt file, please install perf!";
+               free(path);
+               path = system_path(DOCDIR);
+               if (perf_tip(&help, path) || help == NULL)
+                       help = strdup("Cannot load tips.txt file, please install perf!");
        }
+       free(path);
 
        switch (use_browser) {
        case 1:
@@ -651,7 +654,7 @@ static int report__browse_hists(struct report *rep)
                ret = evlist__tty_browse_hists(evlist, rep, help);
                break;
        }
-
+       free(help);
        return ret;
 }
 
index fbb68deba59f2290bf239c24734f0ce35646692d..d01532d40acb70c5c5d806d8f40126692e544d71 100644 (file)
@@ -88,7 +88,6 @@ static int test__event_update(struct test_suite *test __maybe_unused, int subtes
        struct evsel *evsel;
        struct event_name tmp;
        struct evlist *evlist = evlist__new_default();
-       char *unit = strdup("KRAVA");
 
        TEST_ASSERT_VAL("failed to get evlist", evlist);
 
@@ -99,7 +98,8 @@ static int test__event_update(struct test_suite *test __maybe_unused, int subtes
 
        perf_evlist__id_add(&evlist->core, &evsel->core, 0, 0, 123);
 
-       evsel->unit = unit;
+       free((char *)evsel->unit);
+       evsel->unit = strdup("KRAVA");
 
        TEST_ASSERT_VAL("failed to synthesize attr update unit",
                        !perf_event__synthesize_event_update_unit(NULL, evsel, process_event_unit));
@@ -119,7 +119,6 @@ static int test__event_update(struct test_suite *test __maybe_unused, int subtes
        TEST_ASSERT_VAL("failed to synthesize attr update cpus",
                        !perf_event__synthesize_event_update_cpus(&tmp.tool, evsel, process_event_cpus));
 
-       free(unit);
        evlist__delete(evlist);
        return 0;
 }
index c895de481fe10d5c22fa60623604211588bd0cec..d54c5371c6a6e414dbb19ffece465a0f40f7415a 100644 (file)
@@ -169,7 +169,9 @@ static int test__expr(struct test_suite *t __maybe_unused, int subtest __maybe_u
        TEST_ASSERT_VAL("#num_dies", expr__parse(&num_dies, ctx, "#num_dies") == 0);
        TEST_ASSERT_VAL("#num_cores >= #num_dies", num_cores >= num_dies);
        TEST_ASSERT_VAL("#num_packages", expr__parse(&num_packages, ctx, "#num_packages") == 0);
-       TEST_ASSERT_VAL("#num_dies >= #num_packages", num_dies >= num_packages);
+
+       if (num_dies) // Some platforms do not have CPU die support, for example s390
+               TEST_ASSERT_VAL("#num_dies >= #num_packages", num_dies >= num_packages);
 
        /*
         * Source count returns the number of events aggregating in a leader
index 574b7e4efd3a5a64b31ebae700909ddebe22eb1e..07b6f4ec024f0a0f423d8484b4b21d6e268ed354 100644 (file)
@@ -109,6 +109,7 @@ static void load_runtime_stat(struct runtime_stat *st, struct evlist *evlist,
        struct evsel *evsel;
        u64 count;
 
+       perf_stat__reset_shadow_stats();
        evlist__for_each_entry(evlist, evsel) {
                count = find_value(evsel->name, vals);
                perf_stat__update_shadow_stats(evsel, count, 0, st);
index b669d22f2b13619a3bcc28013cf17790f8ebf708..07f2411b0ad45553581189682fb53913481fdffa 100644 (file)
@@ -36,7 +36,7 @@
  * These are based on the input value (213) specified
  * in branch_stack variable.
  */
-#define BS_EXPECTED_BE 0xa00d000000000000
+#define BS_EXPECTED_BE 0xa000d00000000000
 #define BS_EXPECTED_LE 0xd5000000
 #define FLAG(s)        s->branch_stack->entries[i].flags
 
index 820d942b30c390e9ef466a3d8e62e3cc6566493a..9d4c45184e715daec8c78ac10189efcc6d456b1e 100644 (file)
@@ -21,6 +21,7 @@ do {                                            \
 volatile u64 data1;
 volatile u8 data2[3];
 
+#ifndef __s390x__
 static int wp_read(int fd, long long *count, int size)
 {
        int ret = read(fd, count, size);
@@ -48,7 +49,6 @@ static void get__perf_event_attr(struct perf_event_attr *attr, int wp_type,
        attr->exclude_hv     = 1;
 }
 
-#ifndef __s390x__
 static int __event(int wp_type, void *wp_addr, unsigned long wp_len)
 {
        int fd;
index c1f24d00485272bbbd0a87bc9762d6b304b1a349..5075ecead5f3d799a495ce89c93e4be707da5dec 100644 (file)
@@ -535,6 +535,18 @@ struct perf_hpp_list perf_hpp_list = {
 #undef __HPP_SORT_ACC_FN
 #undef __HPP_SORT_RAW_FN
 
+static void fmt_free(struct perf_hpp_fmt *fmt)
+{
+       /*
+        * At this point fmt should be completely
+        * unhooked, if not it's a bug.
+        */
+       BUG_ON(!list_empty(&fmt->list));
+       BUG_ON(!list_empty(&fmt->sort_list));
+
+       if (fmt->free)
+               fmt->free(fmt);
+}
 
 void perf_hpp__init(void)
 {
@@ -598,9 +610,10 @@ void perf_hpp_list__prepend_sort_field(struct perf_hpp_list *list,
        list_add(&format->sort_list, &list->sorts);
 }
 
-void perf_hpp__column_unregister(struct perf_hpp_fmt *format)
+static void perf_hpp__column_unregister(struct perf_hpp_fmt *format)
 {
        list_del_init(&format->list);
+       fmt_free(format);
 }
 
 void perf_hpp__cancel_cumulate(void)
@@ -672,19 +685,6 @@ next:
 }
 
 
-static void fmt_free(struct perf_hpp_fmt *fmt)
-{
-       /*
-        * At this point fmt should be completely
-        * unhooked, if not it's a bug.
-        */
-       BUG_ON(!list_empty(&fmt->list));
-       BUG_ON(!list_empty(&fmt->sort_list));
-
-       if (fmt->free)
-               fmt->free(fmt);
-}
-
 void perf_hpp__reset_output_field(struct perf_hpp_list *list)
 {
        struct perf_hpp_fmt *fmt, *tmp;
index 4748bcfe61de4e6c61cbc5e73147247d877bb805..fccac06b573a8f221f81c8b254fbf24877594c73 100644 (file)
@@ -51,6 +51,7 @@ struct arm_spe {
        u8                              timeless_decoding;
        u8                              data_queued;
 
+       u64                             sample_type;
        u8                              sample_flc;
        u8                              sample_llc;
        u8                              sample_tlb;
@@ -287,6 +288,12 @@ static void arm_spe_prep_sample(struct arm_spe *spe,
        event->sample.header.size = sizeof(struct perf_event_header);
 }
 
+static int arm_spe__inject_event(union perf_event *event, struct perf_sample *sample, u64 type)
+{
+       event->header.size = perf_event__sample_event_size(sample, type, 0);
+       return perf_event__synthesize_sample(event, type, 0, sample);
+}
+
 static inline int
 arm_spe_deliver_synth_event(struct arm_spe *spe,
                            struct arm_spe_queue *speq __maybe_unused,
@@ -295,6 +302,12 @@ arm_spe_deliver_synth_event(struct arm_spe *spe,
 {
        int ret;
 
+       if (spe->synth_opts.inject) {
+               ret = arm_spe__inject_event(event, sample, spe->sample_type);
+               if (ret)
+                       return ret;
+       }
+
        ret = perf_session__deliver_synth_event(spe->session, event, sample);
        if (ret)
                pr_err("ARM SPE: failed to deliver event, error %d\n", ret);
@@ -986,6 +999,8 @@ arm_spe_synth_events(struct arm_spe *spe, struct perf_session *session)
        else
                attr.sample_type |= PERF_SAMPLE_TIME;
 
+       spe->sample_type = attr.sample_type;
+
        attr.exclude_user = evsel->core.attr.exclude_user;
        attr.exclude_kernel = evsel->core.attr.exclude_kernel;
        attr.exclude_hv = evsel->core.attr.exclude_hv;
diff --git a/tools/perf/util/bpf_skel/bperf.h b/tools/perf/util/bpf_skel/bperf.h
deleted file mode 100644 (file)
index 186a555..0000000
+++ /dev/null
@@ -1,14 +0,0 @@
-// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
-// Copyright (c) 2021 Facebook
-
-#ifndef __BPERF_STAT_H
-#define __BPERF_STAT_H
-
-typedef struct {
-       __uint(type, BPF_MAP_TYPE_PERCPU_ARRAY);
-       __uint(key_size, sizeof(__u32));
-       __uint(value_size, sizeof(struct bpf_perf_event_value));
-       __uint(max_entries, 1);
-} reading_map;
-
-#endif /* __BPERF_STAT_H */
index b8fa3cb2da2308034f8fe651f4a0cdb70d9cb6e7..f193998530d431d828eb0ebee6840e166eb6aae7 100644 (file)
@@ -1,14 +1,23 @@
 // SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
 // Copyright (c) 2021 Facebook
-#include <linux/bpf.h>
-#include <linux/perf_event.h>
+#include "vmlinux.h"
 #include <bpf/bpf_helpers.h>
 #include <bpf/bpf_tracing.h>
-#include "bperf.h"
 #include "bperf_u.h"
 
-reading_map diff_readings SEC(".maps");
-reading_map accum_readings SEC(".maps");
+struct {
+       __uint(type, BPF_MAP_TYPE_PERCPU_ARRAY);
+       __uint(key_size, sizeof(__u32));
+       __uint(value_size, sizeof(struct bpf_perf_event_value));
+       __uint(max_entries, 1);
+} diff_readings SEC(".maps");
+
+struct {
+       __uint(type, BPF_MAP_TYPE_PERCPU_ARRAY);
+       __uint(key_size, sizeof(__u32));
+       __uint(value_size, sizeof(struct bpf_perf_event_value));
+       __uint(max_entries, 1);
+} accum_readings SEC(".maps");
 
 struct {
        __uint(type, BPF_MAP_TYPE_HASH);
index 4f70d1459e86cb99e72a38d0889a0f3c79a84f06..e2a2d4cd7779ce703618fd366fce63bddf871bbe 100644 (file)
@@ -1,10 +1,8 @@
 // SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
 // Copyright (c) 2021 Facebook
-#include <linux/bpf.h>
-#include <linux/perf_event.h>
+#include "vmlinux.h"
 #include <bpf/bpf_helpers.h>
 #include <bpf/bpf_tracing.h>
-#include "bperf.h"
 
 struct {
        __uint(type, BPF_MAP_TYPE_PERF_EVENT_ARRAY);
@@ -13,8 +11,19 @@ struct {
        __uint(map_flags, BPF_F_PRESERVE_ELEMS);
 } events SEC(".maps");
 
-reading_map prev_readings SEC(".maps");
-reading_map diff_readings SEC(".maps");
+struct {
+       __uint(type, BPF_MAP_TYPE_PERCPU_ARRAY);
+       __uint(key_size, sizeof(__u32));
+       __uint(value_size, sizeof(struct bpf_perf_event_value));
+       __uint(max_entries, 1);
+} prev_readings SEC(".maps");
+
+struct {
+       __uint(type, BPF_MAP_TYPE_PERCPU_ARRAY);
+       __uint(key_size, sizeof(__u32));
+       __uint(value_size, sizeof(struct bpf_perf_event_value));
+       __uint(max_entries, 1);
+} diff_readings SEC(".maps");
 
 SEC("raw_tp/sched_switch")
 int BPF_PROG(on_switch)
index ab12b4c4ece21a9a3fd0f1f540d67241b34e5f03..97037d3b3d9fa4cd70838b32397c4f0f488c115e 100644 (file)
@@ -1,6 +1,6 @@
 // SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
 // Copyright (c) 2020 Facebook
-#include <linux/bpf.h>
+#include "vmlinux.h"
 #include <bpf/bpf_helpers.h>
 #include <bpf/bpf_tracing.h>
 
index 95ffed66369c3287f9696ba1f1ce44eba7a5bf02..c59331eea1d9102c910c4290a9c88126e20beedf 100644 (file)
@@ -44,13 +44,16 @@ struct perf_event_attr;
 /* perf sample has 16 bits size limit */
 #define PERF_SAMPLE_MAX_SIZE (1 << 16)
 
+/* number of register is bound by the number of bits in regs_dump::mask (64) */
+#define PERF_SAMPLE_REGS_CACHE_SIZE (8 * sizeof(u64))
+
 struct regs_dump {
        u64 abi;
        u64 mask;
        u64 *regs;
 
        /* Cached values/mask filled by first register access. */
-       u64 cache_regs[PERF_REGS_MAX];
+       u64 cache_regs[PERF_SAMPLE_REGS_CACHE_SIZE];
        u64 cache_mask;
 };
 
index a59fb2ecb84ee47bb80c6e821254ebeda68b4dd9..ac0127be0459352acf8364a3bcd520d616ca7726 100644 (file)
@@ -241,7 +241,7 @@ void evsel__init(struct evsel *evsel,
 {
        perf_evsel__init(&evsel->core, attr, idx);
        evsel->tracking    = !idx;
-       evsel->unit        = "";
+       evsel->unit        = strdup("");
        evsel->scale       = 1.0;
        evsel->max_events  = ULONG_MAX;
        evsel->evlist      = NULL;
@@ -276,13 +276,8 @@ struct evsel *evsel__new_idx(struct perf_event_attr *attr, int idx)
        }
 
        if (evsel__is_clock(evsel)) {
-               /*
-                * The evsel->unit points to static alias->unit
-                * so it's ok to use static string in here.
-                */
-               static const char *unit = "msec";
-
-               evsel->unit = unit;
+               free((char *)evsel->unit);
+               evsel->unit = strdup("msec");
                evsel->scale = 1e-6;
        }
 
@@ -420,7 +415,11 @@ struct evsel *evsel__clone(struct evsel *orig)
 
        evsel->max_events = orig->max_events;
        evsel->tool_event = orig->tool_event;
-       evsel->unit = orig->unit;
+       free((char *)evsel->unit);
+       evsel->unit = strdup(orig->unit);
+       if (evsel->unit == NULL)
+               goto out_err;
+
        evsel->scale = orig->scale;
        evsel->snapshot = orig->snapshot;
        evsel->per_pkg = orig->per_pkg;
@@ -1441,6 +1440,7 @@ void evsel__exit(struct evsel *evsel)
        zfree(&evsel->group_name);
        zfree(&evsel->name);
        zfree(&evsel->pmu_name);
+       zfree(&evsel->unit);
        zfree(&evsel->metric_id);
        evsel__zero_per_pkg(evsel);
        hashmap__free(evsel->per_pkg_mask);
index fda8d14c891f8903c1ff0eaa0f7f20daa6083758..e3c1a532d05910bf634a1d22cc452eaba7dc057f 100644 (file)
@@ -2321,6 +2321,7 @@ out:
 #define FEAT_PROCESS_STR_FUN(__feat, __feat_env) \
 static int process_##__feat(struct feat_fd *ff, void *data __maybe_unused) \
 {\
+       free(ff->ph->env.__feat_env);                \
        ff->ph->env.__feat_env = do_read_string(ff); \
        return ff->ph->env.__feat_env ? 0 : -ENOMEM; \
 }
@@ -4124,6 +4125,7 @@ int perf_event__process_feature(struct perf_session *session,
        struct perf_record_header_feature *fe = (struct perf_record_header_feature *)event;
        int type = fe->header.type;
        u64 feat = fe->feat_id;
+       int ret = 0;
 
        if (type < 0 || type >= PERF_RECORD_HEADER_MAX) {
                pr_warning("invalid record type %d in pipe-mode\n", type);
@@ -4141,11 +4143,13 @@ int perf_event__process_feature(struct perf_session *session,
        ff.size = event->header.size - sizeof(*fe);
        ff.ph = &session->header;
 
-       if (feat_ops[feat].process(&ff, NULL))
-               return -1;
+       if (feat_ops[feat].process(&ff, NULL)) {
+               ret = -1;
+               goto out;
+       }
 
        if (!feat_ops[feat].print || !tool->show_feat_hdr)
-               return 0;
+               goto out;
 
        if (!feat_ops[feat].full_only ||
            tool->show_feat_hdr >= SHOW_FEAT_HEADER_FULL_INFO) {
@@ -4154,8 +4158,9 @@ int perf_event__process_feature(struct perf_session *session,
                fprintf(stdout, "# %s info available, use -I to display\n",
                        feat_ops[feat].name);
        }
-
-       return 0;
+out:
+       free_event_desc(ff.events);
+       return ret;
 }
 
 size_t perf_event__fprintf_event_update(union perf_event *event, FILE *fp)
@@ -4257,9 +4262,11 @@ int perf_event__process_event_update(struct perf_tool *tool __maybe_unused,
 
        switch (ev->type) {
        case PERF_EVENT_UPDATE__UNIT:
+               free((char *)evsel->unit);
                evsel->unit = strdup(ev->data);
                break;
        case PERF_EVENT_UPDATE__NAME:
+               free(evsel->name);
                evsel->name = strdup(ev->data);
                break;
        case PERF_EVENT_UPDATE__SCALE:
@@ -4268,11 +4275,11 @@ int perf_event__process_event_update(struct perf_tool *tool __maybe_unused,
                break;
        case PERF_EVENT_UPDATE__CPUS:
                ev_cpus = (struct perf_record_event_update_cpus *)ev->data;
-
                map = cpu_map__new_data(&ev_cpus->cpus);
-               if (map)
+               if (map) {
+                       perf_cpu_map__put(evsel->core.own_cpus);
                        evsel->core.own_cpus = map;
-               else
+               else
                        pr_err("failed to get event_update cpus\n");
        default:
                break;
index 65fe65ba03c257bf85049b1eadc0bb6cba3d080d..b776465e04ef33256316d5283db7ee4e7d4e4ad0 100644 (file)
@@ -289,15 +289,10 @@ static long hist_time(unsigned long htime)
        return htime;
 }
 
-static void he_stat__add_period(struct he_stat *he_stat, u64 period,
-                               u64 weight, u64 ins_lat, u64 p_stage_cyc)
+static void he_stat__add_period(struct he_stat *he_stat, u64 period)
 {
-
        he_stat->period         += period;
-       he_stat->weight         += weight;
        he_stat->nr_events      += 1;
-       he_stat->ins_lat        += ins_lat;
-       he_stat->p_stage_cyc    += p_stage_cyc;
 }
 
 static void he_stat__add_stat(struct he_stat *dest, struct he_stat *src)
@@ -308,9 +303,6 @@ static void he_stat__add_stat(struct he_stat *dest, struct he_stat *src)
        dest->period_guest_sys  += src->period_guest_sys;
        dest->period_guest_us   += src->period_guest_us;
        dest->nr_events         += src->nr_events;
-       dest->weight            += src->weight;
-       dest->ins_lat           += src->ins_lat;
-       dest->p_stage_cyc               += src->p_stage_cyc;
 }
 
 static void he_stat__decay(struct he_stat *he_stat)
@@ -598,9 +590,6 @@ static struct hist_entry *hists__findnew_entry(struct hists *hists,
        struct hist_entry *he;
        int64_t cmp;
        u64 period = entry->stat.period;
-       u64 weight = entry->stat.weight;
-       u64 ins_lat = entry->stat.ins_lat;
-       u64 p_stage_cyc = entry->stat.p_stage_cyc;
        bool leftmost = true;
 
        p = &hists->entries_in->rb_root.rb_node;
@@ -619,11 +608,11 @@ static struct hist_entry *hists__findnew_entry(struct hists *hists,
 
                if (!cmp) {
                        if (sample_self) {
-                               he_stat__add_period(&he->stat, period, weight, ins_lat, p_stage_cyc);
+                               he_stat__add_period(&he->stat, period);
                                hist_entry__add_callchain_period(he, period);
                        }
                        if (symbol_conf.cumulate_callchain)
-                               he_stat__add_period(he->stat_acc, period, weight, ins_lat, p_stage_cyc);
+                               he_stat__add_period(he->stat_acc, period);
 
                        /*
                         * This mem info was allocated from sample__resolve_mem
@@ -733,9 +722,6 @@ __hists__add_entry(struct hists *hists,
                .stat = {
                        .nr_events = 1,
                        .period = sample->period,
-                       .weight = sample->weight,
-                       .ins_lat = sample->ins_lat,
-                       .p_stage_cyc = sample->p_stage_cyc,
                },
                .parent = sym_parent,
                .filtered = symbol__parent_filter(sym_parent) | al->filtered,
@@ -748,6 +734,9 @@ __hists__add_entry(struct hists *hists,
                .raw_size = sample->raw_size,
                .ops = ops,
                .time = hist_time(sample->time),
+               .weight = sample->weight,
+               .ins_lat = sample->ins_lat,
+               .p_stage_cyc = sample->p_stage_cyc,
        }, *he = hists__findnew_entry(hists, &entry, al, sample_self);
 
        if (!hists->has_callchains && he && he->callchain_size != 0)
index 5343b62476e604d1af6ada43d8381e108c2a5f30..621f35ae1efa535bc77dbc022654886fc86fb398 100644 (file)
@@ -369,7 +369,6 @@ enum {
 };
 
 void perf_hpp__init(void);
-void perf_hpp__column_unregister(struct perf_hpp_fmt *format);
 void perf_hpp__cancel_cumulate(void);
 void perf_hpp__setup_output_field(struct perf_hpp_list *list);
 void perf_hpp__reset_output_field(struct perf_hpp_list *list);
index 5f83937bf8f3cbf653b2bb9f4a9356641a75f601..0e013c2d9eb43537ad43d850aba29cc8c4b11c21 100644 (file)
@@ -1205,61 +1205,69 @@ out_no_progress:
 
 static bool intel_pt_fup_event(struct intel_pt_decoder *decoder)
 {
+       enum intel_pt_sample_type type = decoder->state.type;
        bool ret = false;
 
+       decoder->state.type &= ~INTEL_PT_BRANCH;
+
        if (decoder->set_fup_tx_flags) {
                decoder->set_fup_tx_flags = false;
                decoder->tx_flags = decoder->fup_tx_flags;
-               decoder->state.type = INTEL_PT_TRANSACTION;
+               decoder->state.type |= INTEL_PT_TRANSACTION;
                if (decoder->fup_tx_flags & INTEL_PT_ABORT_TX)
                        decoder->state.type |= INTEL_PT_BRANCH;
-               decoder->state.from_ip = decoder->ip;
-               decoder->state.to_ip = 0;
                decoder->state.flags = decoder->fup_tx_flags;
-               return true;
+               ret = true;
        }
        if (decoder->set_fup_ptw) {
                decoder->set_fup_ptw = false;
-               decoder->state.type = INTEL_PT_PTW;
+               decoder->state.type |= INTEL_PT_PTW;
                decoder->state.flags |= INTEL_PT_FUP_IP;
-               decoder->state.from_ip = decoder->ip;
-               decoder->state.to_ip = 0;
                decoder->state.ptw_payload = decoder->fup_ptw_payload;
-               return true;
+               ret = true;
        }
        if (decoder->set_fup_mwait) {
                decoder->set_fup_mwait = false;
-               decoder->state.type = INTEL_PT_MWAIT_OP;
-               decoder->state.from_ip = decoder->ip;
-               decoder->state.to_ip = 0;
+               decoder->state.type |= INTEL_PT_MWAIT_OP;
                decoder->state.mwait_payload = decoder->fup_mwait_payload;
                ret = true;
        }
        if (decoder->set_fup_pwre) {
                decoder->set_fup_pwre = false;
                decoder->state.type |= INTEL_PT_PWR_ENTRY;
-               decoder->state.type &= ~INTEL_PT_BRANCH;
-               decoder->state.from_ip = decoder->ip;
-               decoder->state.to_ip = 0;
                decoder->state.pwre_payload = decoder->fup_pwre_payload;
                ret = true;
        }
        if (decoder->set_fup_exstop) {
                decoder->set_fup_exstop = false;
                decoder->state.type |= INTEL_PT_EX_STOP;
-               decoder->state.type &= ~INTEL_PT_BRANCH;
                decoder->state.flags |= INTEL_PT_FUP_IP;
-               decoder->state.from_ip = decoder->ip;
-               decoder->state.to_ip = 0;
                ret = true;
        }
        if (decoder->set_fup_bep) {
                decoder->set_fup_bep = false;
                decoder->state.type |= INTEL_PT_BLK_ITEMS;
-               decoder->state.type &= ~INTEL_PT_BRANCH;
+               ret = true;
+       }
+       if (decoder->overflow) {
+               decoder->overflow = false;
+               if (!ret && !decoder->pge) {
+                       if (decoder->hop) {
+                               decoder->state.type = 0;
+                               decoder->pkt_state = INTEL_PT_STATE_RESAMPLE;
+                       }
+                       decoder->pge = true;
+                       decoder->state.type |= INTEL_PT_BRANCH | INTEL_PT_TRACE_BEGIN;
+                       decoder->state.from_ip = 0;
+                       decoder->state.to_ip = decoder->ip;
+                       return true;
+               }
+       }
+       if (ret) {
                decoder->state.from_ip = decoder->ip;
                decoder->state.to_ip = 0;
-               ret = true;
+       } else {
+               decoder->state.type = type;
        }
        return ret;
 }
@@ -1608,7 +1616,16 @@ static int intel_pt_overflow(struct intel_pt_decoder *decoder)
        intel_pt_clear_tx_flags(decoder);
        intel_pt_set_nr(decoder);
        decoder->timestamp_insn_cnt = 0;
-       decoder->pkt_state = INTEL_PT_STATE_ERR_RESYNC;
+       decoder->pkt_state = INTEL_PT_STATE_IN_SYNC;
+       decoder->state.from_ip = decoder->ip;
+       decoder->ip = 0;
+       decoder->pge = false;
+       decoder->set_fup_tx_flags = false;
+       decoder->set_fup_ptw = false;
+       decoder->set_fup_mwait = false;
+       decoder->set_fup_pwre = false;
+       decoder->set_fup_exstop = false;
+       decoder->set_fup_bep = false;
        decoder->overflow = true;
        return -EOVERFLOW;
 }
@@ -2666,6 +2683,8 @@ static int intel_pt_scan_for_psb(struct intel_pt_decoder *decoder);
 /* Hop mode: Ignore TNT, do not walk code, but get ip from FUPs and TIPs */
 static int intel_pt_hop_trace(struct intel_pt_decoder *decoder, bool *no_tip, int *err)
 {
+       *err = 0;
+
        /* Leap from PSB to PSB, getting ip from FUP within PSB+ */
        if (decoder->leap && !decoder->in_psb && decoder->packet.type != INTEL_PT_PSB) {
                *err = intel_pt_scan_for_psb(decoder);
@@ -2678,6 +2697,7 @@ static int intel_pt_hop_trace(struct intel_pt_decoder *decoder, bool *no_tip, in
                return HOP_IGNORE;
 
        case INTEL_PT_TIP_PGD:
+               decoder->pge = false;
                if (!decoder->packet.count) {
                        intel_pt_set_nr(decoder);
                        return HOP_IGNORE;
@@ -2705,18 +2725,21 @@ static int intel_pt_hop_trace(struct intel_pt_decoder *decoder, bool *no_tip, in
                if (!decoder->packet.count)
                        return HOP_IGNORE;
                intel_pt_set_ip(decoder);
-               if (intel_pt_fup_event(decoder))
-                       return HOP_RETURN;
-               if (!decoder->branch_enable)
+               if (decoder->set_fup_mwait || decoder->set_fup_pwre)
+                       *no_tip = true;
+               if (!decoder->branch_enable || !decoder->pge)
                        *no_tip = true;
                if (*no_tip) {
                        decoder->state.type = INTEL_PT_INSTRUCTION;
                        decoder->state.from_ip = decoder->ip;
                        decoder->state.to_ip = 0;
+                       intel_pt_fup_event(decoder);
                        return HOP_RETURN;
                }
+               intel_pt_fup_event(decoder);
+               decoder->state.type |= INTEL_PT_INSTRUCTION | INTEL_PT_BRANCH;
                *err = intel_pt_walk_fup_tip(decoder);
-               if (!*err)
+               if (!*err && decoder->state.to_ip)
                        decoder->pkt_state = INTEL_PT_STATE_RESAMPLE;
                return HOP_RETURN;
 
@@ -2897,7 +2920,7 @@ static bool intel_pt_psb_with_fup(struct intel_pt_decoder *decoder, int *err)
 {
        struct intel_pt_psb_info data = { .fup = false };
 
-       if (!decoder->branch_enable || !decoder->pge)
+       if (!decoder->branch_enable)
                return false;
 
        intel_pt_pkt_lookahead(decoder, intel_pt_psb_lookahead_cb, &data);
@@ -2924,6 +2947,7 @@ static int intel_pt_walk_trace(struct intel_pt_decoder *decoder)
                if (err)
                        return err;
 next:
+               err = 0;
                if (decoder->cyc_threshold) {
                        if (decoder->sample_cyc && last_packet_type != INTEL_PT_CYC)
                                decoder->sample_cyc = false;
@@ -2962,6 +2986,7 @@ next:
 
                case INTEL_PT_TIP_PGE: {
                        decoder->pge = true;
+                       decoder->overflow = false;
                        intel_pt_mtc_cyc_cnt_pge(decoder);
                        intel_pt_set_nr(decoder);
                        if (decoder->packet.count == 0) {
@@ -2999,7 +3024,7 @@ next:
                                break;
                        }
                        intel_pt_set_last_ip(decoder);
-                       if (!decoder->branch_enable) {
+                       if (!decoder->branch_enable || !decoder->pge) {
                                decoder->ip = decoder->last_ip;
                                if (intel_pt_fup_event(decoder))
                                        return 0;
@@ -3467,10 +3492,10 @@ static int intel_pt_sync_ip(struct intel_pt_decoder *decoder)
        decoder->set_fup_pwre = false;
        decoder->set_fup_exstop = false;
        decoder->set_fup_bep = false;
+       decoder->overflow = false;
 
        if (!decoder->branch_enable) {
                decoder->pkt_state = INTEL_PT_STATE_IN_SYNC;
-               decoder->overflow = false;
                decoder->state.type = 0; /* Do not have a sample */
                return 0;
        }
@@ -3485,7 +3510,6 @@ static int intel_pt_sync_ip(struct intel_pt_decoder *decoder)
                decoder->pkt_state = INTEL_PT_STATE_RESAMPLE;
        else
                decoder->pkt_state = INTEL_PT_STATE_IN_SYNC;
-       decoder->overflow = false;
 
        decoder->state.from_ip = 0;
        decoder->state.to_ip = decoder->ip;
@@ -3607,7 +3631,7 @@ static int intel_pt_sync(struct intel_pt_decoder *decoder)
        }
 
        decoder->have_last_ip = true;
-       decoder->pkt_state = INTEL_PT_STATE_NO_IP;
+       decoder->pkt_state = INTEL_PT_STATE_IN_SYNC;
 
        err = intel_pt_walk_psb(decoder);
        if (err)
@@ -3704,7 +3728,8 @@ const struct intel_pt_state *intel_pt_decode(struct intel_pt_decoder *decoder)
 
        if (err) {
                decoder->state.err = intel_pt_ext_err(err);
-               decoder->state.from_ip = decoder->ip;
+               if (err != -EOVERFLOW)
+                       decoder->state.from_ip = decoder->ip;
                intel_pt_update_sample_time(decoder);
                decoder->sample_tot_cyc_cnt = decoder->tot_cyc_cnt;
                intel_pt_set_nr(decoder);
index 556a893508daeb9aca14806ba9779de982208771..10c3187e4c5aae1886a5cdcce7af4d64efcae62a 100644 (file)
@@ -2565,6 +2565,7 @@ static int intel_pt_run_decoder(struct intel_pt_queue *ptq, u64 *timestamp)
                                ptq->sync_switch = false;
                                intel_pt_next_tid(pt, ptq);
                        }
+                       ptq->timestamp = state->est_timestamp;
                        if (pt->synth_opts.errors) {
                                err = intel_ptq_synth_error(ptq, state);
                                if (err)
index 5bfb6f892489afa515185a4cceda4a6912dd0fea..ba74fdf74af91f983d9fcb09c650c1c1d281c8e4 100644 (file)
@@ -402,8 +402,10 @@ static int add_event_tool(struct list_head *list, int *idx,
        if (!evsel)
                return -ENOMEM;
        evsel->tool_event = tool_event;
-       if (tool_event == PERF_TOOL_DURATION_TIME)
-               evsel->unit = "ns";
+       if (tool_event == PERF_TOOL_DURATION_TIME) {
+               free((char *)evsel->unit);
+               evsel->unit = strdup("ns");
+       }
        return 0;
 }
 
@@ -1630,7 +1632,8 @@ int parse_events_add_pmu(struct parse_events_state *parse_state,
        if (parse_state->fake_pmu)
                return 0;
 
-       evsel->unit = info.unit;
+       free((char *)evsel->unit);
+       evsel->unit = strdup(info.unit);
        evsel->scale = info.scale;
        evsel->per_pkg = info.per_pkg;
        evsel->snapshot = info.snapshot;
index 5ee47ae1509c67fcf015ae5e52637d27272af423..06a7461ba864c7597492b011a5e1d27586a10065 100644 (file)
@@ -25,6 +25,9 @@ int perf_reg_value(u64 *valp, struct regs_dump *regs, int id)
        int i, idx = 0;
        u64 mask = regs->mask;
 
+       if ((u64)id >= PERF_SAMPLE_REGS_CACHE_SIZE)
+               return -EINVAL;
+
        if (regs->cache_mask & (1ULL << id))
                goto out;
 
index 563a9ba8954f31b3cbf561268ad9a2da3b84cca2..7f782a31bda3b67876bc8aaee5896ebaff051e9c 100644 (file)
@@ -461,7 +461,7 @@ get_tracepoint_field(struct pyrf_event *pevent, PyObject *attr_name)
                struct tep_event *tp_format;
 
                tp_format = trace_event__tp_format_id(evsel->core.attr.config);
-               if (!tp_format)
+               if (IS_ERR_OR_NULL(tp_format))
                        return NULL;
 
                evsel->tp_format = tp_format;
index 20bacd5972adec4425ad11bef211a9dee5b6d45d..34f1b1b1176c7808f7c08f0f945b59cbb43de27b 100644 (file)
@@ -15,7 +15,7 @@ int smt_on(void)
        if (cached)
                return cached_result;
 
-       if (sysfs__read_int("devices/system/cpu/smt/active", &cached_result) > 0)
+       if (sysfs__read_int("devices/system/cpu/smt/active", &cached_result) >= 0)
                goto done;
 
        ncpu = sysconf(_SC_NPROCESSORS_CONF);
index 568a88c001c6cb5afe907b3cbe7c6cde632bf06c..a111065b484ef76f72bb9721dc2c8ff27f658f8e 100644 (file)
@@ -1325,88 +1325,68 @@ struct sort_entry sort_mispredict = {
        .se_width_idx   = HISTC_MISPREDICT,
 };
 
-static u64 he_weight(struct hist_entry *he)
-{
-       return he->stat.nr_events ? he->stat.weight / he->stat.nr_events : 0;
-}
-
 static int64_t
-sort__local_weight_cmp(struct hist_entry *left, struct hist_entry *right)
+sort__weight_cmp(struct hist_entry *left, struct hist_entry *right)
 {
-       return he_weight(left) - he_weight(right);
+       return left->weight - right->weight;
 }
 
 static int hist_entry__local_weight_snprintf(struct hist_entry *he, char *bf,
                                    size_t size, unsigned int width)
 {
-       return repsep_snprintf(bf, size, "%-*llu", width, he_weight(he));
+       return repsep_snprintf(bf, size, "%-*llu", width, he->weight);
 }
 
 struct sort_entry sort_local_weight = {
        .se_header      = "Local Weight",
-       .se_cmp         = sort__local_weight_cmp,
+       .se_cmp         = sort__weight_cmp,
        .se_snprintf    = hist_entry__local_weight_snprintf,
        .se_width_idx   = HISTC_LOCAL_WEIGHT,
 };
 
-static int64_t
-sort__global_weight_cmp(struct hist_entry *left, struct hist_entry *right)
-{
-       return left->stat.weight - right->stat.weight;
-}
-
 static int hist_entry__global_weight_snprintf(struct hist_entry *he, char *bf,
                                              size_t size, unsigned int width)
 {
-       return repsep_snprintf(bf, size, "%-*llu", width, he->stat.weight);
+       return repsep_snprintf(bf, size, "%-*llu", width,
+                              he->weight * he->stat.nr_events);
 }
 
 struct sort_entry sort_global_weight = {
        .se_header      = "Weight",
-       .se_cmp         = sort__global_weight_cmp,
+       .se_cmp         = sort__weight_cmp,
        .se_snprintf    = hist_entry__global_weight_snprintf,
        .se_width_idx   = HISTC_GLOBAL_WEIGHT,
 };
 
-static u64 he_ins_lat(struct hist_entry *he)
-{
-               return he->stat.nr_events ? he->stat.ins_lat / he->stat.nr_events : 0;
-}
-
 static int64_t
-sort__local_ins_lat_cmp(struct hist_entry *left, struct hist_entry *right)
+sort__ins_lat_cmp(struct hist_entry *left, struct hist_entry *right)
 {
-               return he_ins_lat(left) - he_ins_lat(right);
+       return left->ins_lat - right->ins_lat;
 }
 
 static int hist_entry__local_ins_lat_snprintf(struct hist_entry *he, char *bf,
                                              size_t size, unsigned int width)
 {
-               return repsep_snprintf(bf, size, "%-*u", width, he_ins_lat(he));
+       return repsep_snprintf(bf, size, "%-*u", width, he->ins_lat);
 }
 
 struct sort_entry sort_local_ins_lat = {
        .se_header      = "Local INSTR Latency",
-       .se_cmp         = sort__local_ins_lat_cmp,
+       .se_cmp         = sort__ins_lat_cmp,
        .se_snprintf    = hist_entry__local_ins_lat_snprintf,
        .se_width_idx   = HISTC_LOCAL_INS_LAT,
 };
 
-static int64_t
-sort__global_ins_lat_cmp(struct hist_entry *left, struct hist_entry *right)
-{
-               return left->stat.ins_lat - right->stat.ins_lat;
-}
-
 static int hist_entry__global_ins_lat_snprintf(struct hist_entry *he, char *bf,
                                               size_t size, unsigned int width)
 {
-               return repsep_snprintf(bf, size, "%-*u", width, he->stat.ins_lat);
+       return repsep_snprintf(bf, size, "%-*u", width,
+                              he->ins_lat * he->stat.nr_events);
 }
 
 struct sort_entry sort_global_ins_lat = {
        .se_header      = "INSTR Latency",
-       .se_cmp         = sort__global_ins_lat_cmp,
+       .se_cmp         = sort__ins_lat_cmp,
        .se_snprintf    = hist_entry__global_ins_lat_snprintf,
        .se_width_idx   = HISTC_GLOBAL_INS_LAT,
 };
@@ -1414,13 +1394,13 @@ struct sort_entry sort_global_ins_lat = {
 static int64_t
 sort__global_p_stage_cyc_cmp(struct hist_entry *left, struct hist_entry *right)
 {
-       return left->stat.p_stage_cyc - right->stat.p_stage_cyc;
+       return left->p_stage_cyc - right->p_stage_cyc;
 }
 
 static int hist_entry__p_stage_cyc_snprintf(struct hist_entry *he, char *bf,
                                        size_t size, unsigned int width)
 {
-       return repsep_snprintf(bf, size, "%-*u", width, he->stat.p_stage_cyc);
+       return repsep_snprintf(bf, size, "%-*u", width, he->p_stage_cyc);
 }
 
 struct sort_entry sort_p_stage_cyc = {
index b67c469aba79587ff3533ac1f0b0efa2aca0409f..7b7145501933fa88093d2eee481005e21f45cf02 100644 (file)
@@ -49,9 +49,6 @@ struct he_stat {
        u64                     period_us;
        u64                     period_guest_sys;
        u64                     period_guest_us;
-       u64                     weight;
-       u64                     ins_lat;
-       u64                     p_stage_cyc;
        u32                     nr_events;
 };
 
@@ -109,6 +106,9 @@ struct hist_entry {
        s32                     socket;
        s32                     cpu;
        u64                     code_page_size;
+       u64                     weight;
+       u64                     ins_lat;
+       u64                     p_stage_cyc;
        u8                      cpumode;
        u8                      depth;
 
index 37a9492edb3ebf094e348c1ab4e72e9dfe9d2378..df3c4671be72afd5ef426ea4f3c79a5f50dc0874 100644 (file)
@@ -379,32 +379,32 @@ fetch_kernel_version(unsigned int *puint, char *str,
        return 0;
 }
 
-const char *perf_tip(const char *dirpath)
+int perf_tip(char **strp, const char *dirpath)
 {
        struct strlist *tips;
        struct str_node *node;
-       char *tip = NULL;
        struct strlist_config conf = {
                .dirname = dirpath,
                .file_only = true,
        };
+       int ret = 0;
 
+       *strp = NULL;
        tips = strlist__new("tips.txt", &conf);
        if (tips == NULL)
-               return errno == ENOENT ? NULL :
-                       "Tip: check path of tips.txt or get more memory! ;-p";
+               return -errno;
 
        if (strlist__nr_entries(tips) == 0)
                goto out;
 
        node = strlist__entry(tips, random() % strlist__nr_entries(tips));
-       if (asprintf(&tip, "Tip: %s", node->s) < 0)
-               tip = (char *)"Tip: get more memory! ;-)";
+       if (asprintf(strp, "Tip: %s", node->s) < 0)
+               ret = -ENOMEM;
 
 out:
        strlist__delete(tips);
 
-       return tip;
+       return ret;
 }
 
 char *perf_exe(char *buf, int len)
index ad737052e59776dfe122ac633e737e972c5de217..9f0d36ba77f2d1734daced16f71dd3c2c053892c 100644 (file)
@@ -39,7 +39,7 @@ int fetch_kernel_version(unsigned int *puint,
 #define KVER_FMT       "%d.%d.%d"
 #define KVER_PARAM(x)  KVER_VERSION(x), KVER_PATCHLEVEL(x), KVER_SUBLEVEL(x)
 
-const char *perf_tip(const char *dirpath);
+int perf_tip(char **strp, const char *dirpath);
 
 #ifndef HAVE_SCHED_GETCPU_SUPPORT
 int sched_getcpu(void);
index 331f6d30f47261864a7c5654a1efcc2df7db5490..cd7106876a5f39dfda38e286c54c3a7c268b34a2 100644 (file)
@@ -69,6 +69,7 @@ KERNEL_INCLUDE := $(OUTPUT)include
 ACPICA_INCLUDE := $(srctree)/../../../drivers/acpi/acpica
 CFLAGS += -D_LINUX -I$(KERNEL_INCLUDE) -I$(ACPICA_INCLUDE)
 CFLAGS += $(WARNINGS)
+MKDIR = mkdir
 
 ifeq ($(strip $(V)),false)
        QUIET=@
index 2a6c170b57cd4aabc81a08a11ffa079b61daefb7..1d7616f5d0aec848204ede85d79ae6a9516cfc0a 100644 (file)
@@ -21,6 +21,7 @@ $(KERNEL_INCLUDE):
 
 $(objdir)%.o: %.c $(KERNEL_INCLUDE)
        $(ECHO) "  CC      " $(subst $(OUTPUT),,$@)
+       $(QUIET) $(MKDIR) -p $(objdir) 2>/dev/null
        $(QUIET) $(CC) -c $(CFLAGS) -o $@ $<
 
 all: $(OUTPUT)$(TOOL)
index 565fccdfe6e954a4bec7d11bebb53a108623be56..016cff473cfc483963cd1b4586243fa3e73378e7 100644 (file)
@@ -1,5 +1,8 @@
 #ifndef _LINUX_LOCKDEP_H
 #define _LINUX_LOCKDEP_H
+
+#include <linux/spinlock.h>
+
 struct lock_class_key {
        unsigned int a;
 };
index 54b0a41a37750832a26317582b0b25b6e2319d21..62fafbeb46723be2e4ef162dada68d2d674ca90e 100644 (file)
@@ -187,7 +187,7 @@ DEFAULT_BPFTOOL := $(HOST_SCRATCH_DIR)/sbin/bpftool
 $(OUTPUT)/runqslower: $(BPFOBJ) | $(DEFAULT_BPFTOOL) $(RUNQSLOWER_OUTPUT)
        $(Q)$(MAKE) $(submake_extras) -C $(TOOLSDIR)/bpf/runqslower            \
                    OUTPUT=$(RUNQSLOWER_OUTPUT) VMLINUX_BTF=$(VMLINUX_BTF)     \
-                   BPFTOOL_OUTPUT=$(BUILD_DIR)/bpftool/                       \
+                   BPFTOOL_OUTPUT=$(HOST_BUILD_DIR)/bpftool/                  \
                    BPFOBJ_OUTPUT=$(BUILD_DIR)/libbpf                          \
                    BPFOBJ=$(BPFOBJ) BPF_INCLUDE=$(INCLUDE_DIR) &&             \
                    cp $(RUNQSLOWER_OUTPUT)runqslower $@
diff --git a/tools/testing/selftests/bpf/prog_tests/helper_restricted.c b/tools/testing/selftests/bpf/prog_tests/helper_restricted.c
new file mode 100644 (file)
index 0000000..e1de5f8
--- /dev/null
@@ -0,0 +1,33 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <test_progs.h>
+#include "test_helper_restricted.skel.h"
+
+void test_helper_restricted(void)
+{
+       int prog_i = 0, prog_cnt;
+       int duration = 0;
+
+       do {
+               struct test_helper_restricted *test;
+               int maybeOK;
+
+               test = test_helper_restricted__open();
+               if (!ASSERT_OK_PTR(test, "open"))
+                       return;
+
+               prog_cnt = test->skeleton->prog_cnt;
+
+               for (int j = 0; j < prog_cnt; ++j) {
+                       struct bpf_program *prog = *test->skeleton->progs[j].prog;
+
+                       maybeOK = bpf_program__set_autoload(prog, prog_i == j);
+                       ASSERT_OK(maybeOK, "set autoload");
+               }
+
+               maybeOK = test_helper_restricted__load(test);
+               CHECK(!maybeOK, test->skeleton->progs[prog_i].name, "helper isn't restricted");
+
+               test_helper_restricted__destroy(test);
+       } while (++prog_i < prog_cnt);
+}
diff --git a/tools/testing/selftests/bpf/progs/test_helper_restricted.c b/tools/testing/selftests/bpf/progs/test_helper_restricted.c
new file mode 100644 (file)
index 0000000..68d64c3
--- /dev/null
@@ -0,0 +1,123 @@
+// SPDX-License-Identifier: GPL-2.0-only
+#include <time.h>
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+
+struct timer {
+       struct bpf_timer t;
+};
+
+struct lock {
+       struct bpf_spin_lock l;
+};
+
+struct {
+       __uint(type, BPF_MAP_TYPE_ARRAY);
+       __uint(max_entries, 1);
+       __type(key, __u32);
+       __type(value, struct timer);
+} timers SEC(".maps");
+
+struct {
+       __uint(type, BPF_MAP_TYPE_ARRAY);
+       __uint(max_entries, 1);
+       __type(key, __u32);
+       __type(value, struct lock);
+} locks SEC(".maps");
+
+static int timer_cb(void *map, int *key, struct timer *timer)
+{
+       return 0;
+}
+
+static void timer_work(void)
+{
+       struct timer *timer;
+       const int key = 0;
+
+       timer  = bpf_map_lookup_elem(&timers, &key);
+       if (timer) {
+               bpf_timer_init(&timer->t, &timers, CLOCK_MONOTONIC);
+               bpf_timer_set_callback(&timer->t, timer_cb);
+               bpf_timer_start(&timer->t, 10E9, 0);
+               bpf_timer_cancel(&timer->t);
+       }
+}
+
+static void spin_lock_work(void)
+{
+       const int key = 0;
+       struct lock *lock;
+
+       lock = bpf_map_lookup_elem(&locks, &key);
+       if (lock) {
+               bpf_spin_lock(&lock->l);
+               bpf_spin_unlock(&lock->l);
+       }
+}
+
+SEC("raw_tp/sys_enter")
+int raw_tp_timer(void *ctx)
+{
+       timer_work();
+
+       return 0;
+}
+
+SEC("tp/syscalls/sys_enter_nanosleep")
+int tp_timer(void *ctx)
+{
+       timer_work();
+
+       return 0;
+}
+
+SEC("kprobe/sys_nanosleep")
+int kprobe_timer(void *ctx)
+{
+       timer_work();
+
+       return 0;
+}
+
+SEC("perf_event")
+int perf_event_timer(void *ctx)
+{
+       timer_work();
+
+       return 0;
+}
+
+SEC("raw_tp/sys_enter")
+int raw_tp_spin_lock(void *ctx)
+{
+       spin_lock_work();
+
+       return 0;
+}
+
+SEC("tp/syscalls/sys_enter_nanosleep")
+int tp_spin_lock(void *ctx)
+{
+       spin_lock_work();
+
+       return 0;
+}
+
+SEC("kprobe/sys_nanosleep")
+int kprobe_spin_lock(void *ctx)
+{
+       spin_lock_work();
+
+       return 0;
+}
+
+SEC("perf_event")
+int perf_event_spin_lock(void *ctx)
+{
+       spin_lock_work();
+
+       return 0;
+}
+
+const char LICENSE[] SEC("license") = "GPL";
index 25afe423b3f06dbb403ced2873c75e3b87e98640..465ef3f112c0c96446e48dbbfc3f73eac170bbeb 100644 (file)
@@ -92,6 +92,7 @@ struct bpf_test {
        int fixup_map_event_output[MAX_FIXUPS];
        int fixup_map_reuseport_array[MAX_FIXUPS];
        int fixup_map_ringbuf[MAX_FIXUPS];
+       int fixup_map_timer[MAX_FIXUPS];
        /* Expected verifier log output for result REJECT or VERBOSE_ACCEPT.
         * Can be a tab-separated sequence of expected strings. An empty string
         * means no log verification.
@@ -604,8 +605,15 @@ static int create_cgroup_storage(bool percpu)
  *   int cnt;
  *   struct bpf_spin_lock l;
  * };
+ * struct bpf_timer {
+ *   __u64 :64;
+ *   __u64 :64;
+ * } __attribute__((aligned(8)));
+ * struct timer {
+ *   struct bpf_timer t;
+ * };
  */
-static const char btf_str_sec[] = "\0bpf_spin_lock\0val\0cnt\0l";
+static const char btf_str_sec[] = "\0bpf_spin_lock\0val\0cnt\0l\0bpf_timer\0timer\0t";
 static __u32 btf_raw_types[] = {
        /* int */
        BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4),  /* [1] */
@@ -616,6 +624,11 @@ static __u32 btf_raw_types[] = {
        BTF_TYPE_ENC(15, BTF_INFO_ENC(BTF_KIND_STRUCT, 0, 2), 8),
        BTF_MEMBER_ENC(19, 1, 0), /* int cnt; */
        BTF_MEMBER_ENC(23, 2, 32),/* struct bpf_spin_lock l; */
+       /* struct bpf_timer */                          /* [4] */
+       BTF_TYPE_ENC(25, BTF_INFO_ENC(BTF_KIND_STRUCT, 0, 0), 16),
+       /* struct timer */                              /* [5] */
+       BTF_TYPE_ENC(35, BTF_INFO_ENC(BTF_KIND_STRUCT, 0, 1), 16),
+       BTF_MEMBER_ENC(41, 4, 0), /* struct bpf_timer t; */
 };
 
 static int load_btf(void)
@@ -696,6 +709,29 @@ static int create_sk_storage_map(void)
        return fd;
 }
 
+static int create_map_timer(void)
+{
+       struct bpf_create_map_attr attr = {
+               .name = "test_map",
+               .map_type = BPF_MAP_TYPE_ARRAY,
+               .key_size = 4,
+               .value_size = 16,
+               .max_entries = 1,
+               .btf_key_type_id = 1,
+               .btf_value_type_id = 5,
+       };
+       int fd, btf_fd;
+
+       btf_fd = load_btf();
+       if (btf_fd < 0)
+               return -1;
+       attr.btf_fd = btf_fd;
+       fd = bpf_create_map_xattr(&attr);
+       if (fd < 0)
+               printf("Failed to create map with timer\n");
+       return fd;
+}
+
 static char bpf_vlog[UINT_MAX >> 8];
 
 static void do_test_fixup(struct bpf_test *test, enum bpf_prog_type prog_type,
@@ -722,6 +758,7 @@ static void do_test_fixup(struct bpf_test *test, enum bpf_prog_type prog_type,
        int *fixup_map_event_output = test->fixup_map_event_output;
        int *fixup_map_reuseport_array = test->fixup_map_reuseport_array;
        int *fixup_map_ringbuf = test->fixup_map_ringbuf;
+       int *fixup_map_timer = test->fixup_map_timer;
 
        if (test->fill_helper) {
                test->fill_insns = calloc(MAX_TEST_INSNS, sizeof(struct bpf_insn));
@@ -907,6 +944,13 @@ static void do_test_fixup(struct bpf_test *test, enum bpf_prog_type prog_type,
                        fixup_map_ringbuf++;
                } while (*fixup_map_ringbuf);
        }
+       if (*fixup_map_timer) {
+               map_fds[21] = create_map_timer();
+               do {
+                       prog[*fixup_map_timer].imm = map_fds[21];
+                       fixup_map_timer++;
+               } while (*fixup_map_timer);
+       }
 }
 
 struct libcap {
diff --git a/tools/testing/selftests/bpf/verifier/helper_restricted.c b/tools/testing/selftests/bpf/verifier/helper_restricted.c
new file mode 100644 (file)
index 0000000..a067b70
--- /dev/null
@@ -0,0 +1,196 @@
+{
+       "bpf_ktime_get_coarse_ns is forbidden in BPF_PROG_TYPE_KPROBE",
+       .insns = {
+               BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_ktime_get_coarse_ns),
+               BPF_MOV64_IMM(BPF_REG_0, 0),
+               BPF_EXIT_INSN(),
+       },
+       .errstr = "unknown func bpf_ktime_get_coarse_ns",
+       .result = REJECT,
+       .prog_type = BPF_PROG_TYPE_KPROBE,
+},
+{
+       "bpf_ktime_get_coarse_ns is forbidden in BPF_PROG_TYPE_TRACEPOINT",
+       .insns = {
+               BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_ktime_get_coarse_ns),
+               BPF_MOV64_IMM(BPF_REG_0, 0),
+               BPF_EXIT_INSN(),
+       },
+       .errstr = "unknown func bpf_ktime_get_coarse_ns",
+       .result = REJECT,
+       .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+},
+{
+       "bpf_ktime_get_coarse_ns is forbidden in BPF_PROG_TYPE_PERF_EVENT",
+       .insns = {
+       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_ktime_get_coarse_ns),
+       BPF_MOV64_IMM(BPF_REG_0, 0),
+       BPF_EXIT_INSN(),
+       },
+       .errstr = "unknown func bpf_ktime_get_coarse_ns",
+       .result = REJECT,
+       .prog_type = BPF_PROG_TYPE_PERF_EVENT,
+},
+{
+       "bpf_ktime_get_coarse_ns is forbidden in BPF_PROG_TYPE_RAW_TRACEPOINT",
+       .insns = {
+       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_ktime_get_coarse_ns),
+       BPF_MOV64_IMM(BPF_REG_0, 0),
+       BPF_EXIT_INSN(),
+       },
+       .errstr = "unknown func bpf_ktime_get_coarse_ns",
+       .result = REJECT,
+       .prog_type = BPF_PROG_TYPE_RAW_TRACEPOINT,
+},
+{
+       "bpf_timer_init isn restricted in BPF_PROG_TYPE_KPROBE",
+       .insns = {
+       BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+       BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+       BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
+       BPF_LD_MAP_FD(BPF_REG_1, 0),
+       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+       BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
+       BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+       BPF_LD_MAP_FD(BPF_REG_2, 0),
+       BPF_MOV64_IMM(BPF_REG_3, 1),
+       BPF_EMIT_CALL(BPF_FUNC_timer_init),
+       BPF_EXIT_INSN(),
+       },
+       .fixup_map_timer = { 3, 8 },
+       .errstr = "tracing progs cannot use bpf_timer yet",
+       .result = REJECT,
+       .prog_type = BPF_PROG_TYPE_KPROBE,
+},
+{
+       "bpf_timer_init is forbidden in BPF_PROG_TYPE_PERF_EVENT",
+       .insns = {
+       BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+       BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+       BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
+       BPF_LD_MAP_FD(BPF_REG_1, 0),
+       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+       BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
+       BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+       BPF_LD_MAP_FD(BPF_REG_2, 0),
+       BPF_MOV64_IMM(BPF_REG_3, 1),
+       BPF_EMIT_CALL(BPF_FUNC_timer_init),
+       BPF_EXIT_INSN(),
+       },
+       .fixup_map_timer = { 3, 8 },
+       .errstr = "tracing progs cannot use bpf_timer yet",
+       .result = REJECT,
+       .prog_type = BPF_PROG_TYPE_PERF_EVENT,
+},
+{
+       "bpf_timer_init is forbidden in BPF_PROG_TYPE_TRACEPOINT",
+       .insns = {
+       BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+       BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+       BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
+       BPF_LD_MAP_FD(BPF_REG_1, 0),
+       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+       BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
+       BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+       BPF_LD_MAP_FD(BPF_REG_2, 0),
+       BPF_MOV64_IMM(BPF_REG_3, 1),
+       BPF_EMIT_CALL(BPF_FUNC_timer_init),
+       BPF_EXIT_INSN(),
+       },
+       .fixup_map_timer = { 3, 8 },
+       .errstr = "tracing progs cannot use bpf_timer yet",
+       .result = REJECT,
+       .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+},
+{
+       "bpf_timer_init is forbidden in BPF_PROG_TYPE_RAW_TRACEPOINT",
+       .insns = {
+       BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+       BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+       BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
+       BPF_LD_MAP_FD(BPF_REG_1, 0),
+       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+       BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
+       BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+       BPF_LD_MAP_FD(BPF_REG_2, 0),
+       BPF_MOV64_IMM(BPF_REG_3, 1),
+       BPF_EMIT_CALL(BPF_FUNC_timer_init),
+       BPF_EXIT_INSN(),
+       },
+       .fixup_map_timer = { 3, 8 },
+       .errstr = "tracing progs cannot use bpf_timer yet",
+       .result = REJECT,
+       .prog_type = BPF_PROG_TYPE_RAW_TRACEPOINT,
+},
+{
+       "bpf_spin_lock is forbidden in BPF_PROG_TYPE_KPROBE",
+       .insns = {
+       BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+       BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+       BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
+       BPF_LD_MAP_FD(BPF_REG_1, 0),
+       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+       BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
+       BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+       BPF_EMIT_CALL(BPF_FUNC_spin_lock),
+       BPF_EXIT_INSN(),
+       },
+       .fixup_map_spin_lock = { 3 },
+       .errstr = "tracing progs cannot use bpf_spin_lock yet",
+       .result = REJECT,
+       .prog_type = BPF_PROG_TYPE_KPROBE,
+},
+{
+       "bpf_spin_lock is forbidden in BPF_PROG_TYPE_TRACEPOINT",
+       .insns = {
+       BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+       BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+       BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
+       BPF_LD_MAP_FD(BPF_REG_1, 0),
+       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+       BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
+       BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+       BPF_EMIT_CALL(BPF_FUNC_spin_lock),
+       BPF_EXIT_INSN(),
+       },
+       .fixup_map_spin_lock = { 3 },
+       .errstr = "tracing progs cannot use bpf_spin_lock yet",
+       .result = REJECT,
+       .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+},
+{
+       "bpf_spin_lock is forbidden in BPF_PROG_TYPE_PERF_EVENT",
+       .insns = {
+       BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+       BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+       BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
+       BPF_LD_MAP_FD(BPF_REG_1, 0),
+       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+       BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
+       BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+       BPF_EMIT_CALL(BPF_FUNC_spin_lock),
+       BPF_EXIT_INSN(),
+       },
+       .fixup_map_spin_lock = { 3 },
+       .errstr = "tracing progs cannot use bpf_spin_lock yet",
+       .result = REJECT,
+       .prog_type = BPF_PROG_TYPE_PERF_EVENT,
+},
+{
+       "bpf_spin_lock is forbidden in BPF_PROG_TYPE_RAW_TRACEPOINT",
+       .insns = {
+       BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+       BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+       BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
+       BPF_LD_MAP_FD(BPF_REG_1, 0),
+       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+       BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
+       BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+       BPF_EMIT_CALL(BPF_FUNC_spin_lock),
+       BPF_EXIT_INSN(),
+       },
+       .fixup_map_spin_lock = { 3 },
+       .errstr = "tracing progs cannot use bpf_spin_lock yet",
+       .result = REJECT,
+       .prog_type = BPF_PROG_TYPE_RAW_TRACEPOINT,
+},
index 2798927ee9ff42d0efe71d7d6f29ffa682f19d24..128a348b762dc1f67721592759ad1d4bbd64fc6b 100644 (file)
        .fixup_map_in_map = { 3 },
        .result = ACCEPT,
 },
+{
+       "map in map state pruning",
+       .insns = {
+       BPF_ST_MEM(0, BPF_REG_10, -4, 0),
+       BPF_MOV64_REG(BPF_REG_6, BPF_REG_10),
+       BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -4),
+       BPF_MOV64_REG(BPF_REG_2, BPF_REG_6),
+       BPF_LD_MAP_FD(BPF_REG_1, 0),
+       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+       BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
+       BPF_EXIT_INSN(),
+       BPF_MOV64_REG(BPF_REG_2, BPF_REG_6),
+       BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+       BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 11),
+       BPF_MOV64_REG(BPF_REG_2, BPF_REG_6),
+       BPF_LD_MAP_FD(BPF_REG_1, 0),
+       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+       BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
+       BPF_EXIT_INSN(),
+       BPF_MOV64_REG(BPF_REG_2, BPF_REG_6),
+       BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+       BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
+       BPF_EXIT_INSN(),
+       BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0, 0),
+       BPF_EXIT_INSN(),
+       },
+       .fixup_map_in_map = { 4, 14 },
+       .flags = BPF_F_TEST_STATE_FREQ,
+       .result = VERBOSE_ACCEPT,
+       .errstr = "processed 25 insns",
+       .prog_type = BPF_PROG_TYPE_XDP,
+},
 {
        "invalid inner map pointer",
        .insns = {
index bfb97383e6b5ae2c7e10c0e8294d4dd115b61954..b4ec228eb95d05e02ef416351c4c9578b38b983b 100644 (file)
@@ -35,7 +35,7 @@
        .prog_type = BPF_PROG_TYPE_XDP,
 },
 {
-       "XDP pkt read, pkt_data' > pkt_end, good access",
+       "XDP pkt read, pkt_data' > pkt_end, corner case, good access",
        .insns = {
        BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)),
        BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
        .prog_type = BPF_PROG_TYPE_XDP,
        .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
 },
+{
+       "XDP pkt read, pkt_data' > pkt_end, corner case +1, good access",
+       .insns = {
+       BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)),
+       BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+                   offsetof(struct xdp_md, data_end)),
+       BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+       BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 9),
+       BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1),
+       BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -9),
+       BPF_MOV64_IMM(BPF_REG_0, 0),
+       BPF_EXIT_INSN(),
+       },
+       .result = ACCEPT,
+       .prog_type = BPF_PROG_TYPE_XDP,
+       .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+       "XDP pkt read, pkt_data' > pkt_end, corner case -1, bad access",
+       .insns = {
+       BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)),
+       BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+                   offsetof(struct xdp_md, data_end)),
+       BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+       BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
+       BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1),
+       BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -7),
+       BPF_MOV64_IMM(BPF_REG_0, 0),
+       BPF_EXIT_INSN(),
+       },
+       .errstr = "R1 offset is outside of the packet",
+       .result = REJECT,
+       .prog_type = BPF_PROG_TYPE_XDP,
+       .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
 {
        "XDP pkt read, pkt_end > pkt_data', good access",
        .insns = {
        .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
 },
 {
-       "XDP pkt read, pkt_end > pkt_data', bad access 1",
+       "XDP pkt read, pkt_end > pkt_data', corner case -1, bad access",
        .insns = {
        BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)),
        BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
                    offsetof(struct xdp_md, data_end)),
        BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
-       BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
+       BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 6),
        BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_1, 1),
        BPF_JMP_IMM(BPF_JA, 0, 0, 1),
-       BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
+       BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -6),
        BPF_MOV64_IMM(BPF_REG_0, 0),
        BPF_EXIT_INSN(),
        },
        .prog_type = BPF_PROG_TYPE_XDP,
        .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
 },
+{
+       "XDP pkt read, pkt_end > pkt_data', corner case, good access",
+       .insns = {
+       BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)),
+       BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+                   offsetof(struct xdp_md, data_end)),
+       BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+       BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
+       BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_1, 1),
+       BPF_JMP_IMM(BPF_JA, 0, 0, 1),
+       BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -7),
+       BPF_MOV64_IMM(BPF_REG_0, 0),
+       BPF_EXIT_INSN(),
+       },
+       .result = ACCEPT,
+       .prog_type = BPF_PROG_TYPE_XDP,
+       .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+       "XDP pkt read, pkt_end > pkt_data', corner case +1, good access",
+       .insns = {
+       BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)),
+       BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+                   offsetof(struct xdp_md, data_end)),
+       BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+       BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
+       BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_1, 1),
+       BPF_JMP_IMM(BPF_JA, 0, 0, 1),
+       BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
+       BPF_MOV64_IMM(BPF_REG_0, 0),
+       BPF_EXIT_INSN(),
+       },
+       .result = ACCEPT,
+       .prog_type = BPF_PROG_TYPE_XDP,
+       .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
 {
        "XDP pkt read, pkt_data' < pkt_end, good access",
        .insns = {
        .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
 },
 {
-       "XDP pkt read, pkt_data' < pkt_end, bad access 1",
+       "XDP pkt read, pkt_data' < pkt_end, corner case -1, bad access",
        .insns = {
        BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)),
        BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
                    offsetof(struct xdp_md, data_end)),
        BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
-       BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
+       BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 6),
        BPF_JMP_REG(BPF_JLT, BPF_REG_1, BPF_REG_3, 1),
        BPF_JMP_IMM(BPF_JA, 0, 0, 1),
-       BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
+       BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -6),
        BPF_MOV64_IMM(BPF_REG_0, 0),
        BPF_EXIT_INSN(),
        },
        .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
 },
 {
-       "XDP pkt read, pkt_end < pkt_data', good access",
+       "XDP pkt read, pkt_data' < pkt_end, corner case, good access",
+       .insns = {
+       BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)),
+       BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+                   offsetof(struct xdp_md, data_end)),
+       BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+       BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
+       BPF_JMP_REG(BPF_JLT, BPF_REG_1, BPF_REG_3, 1),
+       BPF_JMP_IMM(BPF_JA, 0, 0, 1),
+       BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -7),
+       BPF_MOV64_IMM(BPF_REG_0, 0),
+       BPF_EXIT_INSN(),
+       },
+       .result = ACCEPT,
+       .prog_type = BPF_PROG_TYPE_XDP,
+       .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+       "XDP pkt read, pkt_data' < pkt_end, corner case +1, good access",
+       .insns = {
+       BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)),
+       BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+                   offsetof(struct xdp_md, data_end)),
+       BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+       BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
+       BPF_JMP_REG(BPF_JLT, BPF_REG_1, BPF_REG_3, 1),
+       BPF_JMP_IMM(BPF_JA, 0, 0, 1),
+       BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
+       BPF_MOV64_IMM(BPF_REG_0, 0),
+       BPF_EXIT_INSN(),
+       },
+       .result = ACCEPT,
+       .prog_type = BPF_PROG_TYPE_XDP,
+       .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+       "XDP pkt read, pkt_end < pkt_data', corner case, good access",
        .insns = {
        BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)),
        BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
        .prog_type = BPF_PROG_TYPE_XDP,
        .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
 },
+{
+       "XDP pkt read, pkt_end < pkt_data', corner case +1, good access",
+       .insns = {
+       BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)),
+       BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+                   offsetof(struct xdp_md, data_end)),
+       BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+       BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 9),
+       BPF_JMP_REG(BPF_JLT, BPF_REG_3, BPF_REG_1, 1),
+       BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -9),
+       BPF_MOV64_IMM(BPF_REG_0, 0),
+       BPF_EXIT_INSN(),
+       },
+       .result = ACCEPT,
+       .prog_type = BPF_PROG_TYPE_XDP,
+       .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+       "XDP pkt read, pkt_end < pkt_data', corner case -1, bad access",
+       .insns = {
+       BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)),
+       BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+                   offsetof(struct xdp_md, data_end)),
+       BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+       BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
+       BPF_JMP_REG(BPF_JLT, BPF_REG_3, BPF_REG_1, 1),
+       BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -7),
+       BPF_MOV64_IMM(BPF_REG_0, 0),
+       BPF_EXIT_INSN(),
+       },
+       .errstr = "R1 offset is outside of the packet",
+       .result = REJECT,
+       .prog_type = BPF_PROG_TYPE_XDP,
+       .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
 {
        "XDP pkt read, pkt_data' >= pkt_end, good access",
        .insns = {
        .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
 },
 {
-       "XDP pkt read, pkt_data' >= pkt_end, bad access 1",
+       "XDP pkt read, pkt_data' >= pkt_end, corner case -1, bad access",
        .insns = {
        BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)),
        BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
                    offsetof(struct xdp_md, data_end)),
        BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
-       BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
+       BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 6),
        BPF_JMP_REG(BPF_JGE, BPF_REG_1, BPF_REG_3, 1),
-       BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
+       BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -6),
        BPF_MOV64_IMM(BPF_REG_0, 0),
        BPF_EXIT_INSN(),
        },
        .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
 },
 {
-       "XDP pkt read, pkt_end >= pkt_data', good access",
+       "XDP pkt read, pkt_data' >= pkt_end, corner case, good access",
+       .insns = {
+       BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)),
+       BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+                   offsetof(struct xdp_md, data_end)),
+       BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+       BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
+       BPF_JMP_REG(BPF_JGE, BPF_REG_1, BPF_REG_3, 1),
+       BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -7),
+       BPF_MOV64_IMM(BPF_REG_0, 0),
+       BPF_EXIT_INSN(),
+       },
+       .result = ACCEPT,
+       .prog_type = BPF_PROG_TYPE_XDP,
+       .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+       "XDP pkt read, pkt_data' >= pkt_end, corner case +1, good access",
+       .insns = {
+       BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)),
+       BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+                   offsetof(struct xdp_md, data_end)),
+       BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+       BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
+       BPF_JMP_REG(BPF_JGE, BPF_REG_1, BPF_REG_3, 1),
+       BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
+       BPF_MOV64_IMM(BPF_REG_0, 0),
+       BPF_EXIT_INSN(),
+       },
+       .result = ACCEPT,
+       .prog_type = BPF_PROG_TYPE_XDP,
+       .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+       "XDP pkt read, pkt_end >= pkt_data', corner case, good access",
        .insns = {
        BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)),
        BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
        .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
 },
 {
-       "XDP pkt read, pkt_data' <= pkt_end, good access",
+       "XDP pkt read, pkt_end >= pkt_data', corner case +1, good access",
+       .insns = {
+       BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)),
+       BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+                   offsetof(struct xdp_md, data_end)),
+       BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+       BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 9),
+       BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_1, 1),
+       BPF_JMP_IMM(BPF_JA, 0, 0, 1),
+       BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -9),
+       BPF_MOV64_IMM(BPF_REG_0, 0),
+       BPF_EXIT_INSN(),
+       },
+       .result = ACCEPT,
+       .prog_type = BPF_PROG_TYPE_XDP,
+       .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+       "XDP pkt read, pkt_end >= pkt_data', corner case -1, bad access",
+       .insns = {
+       BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)),
+       BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+                   offsetof(struct xdp_md, data_end)),
+       BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+       BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
+       BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_1, 1),
+       BPF_JMP_IMM(BPF_JA, 0, 0, 1),
+       BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -7),
+       BPF_MOV64_IMM(BPF_REG_0, 0),
+       BPF_EXIT_INSN(),
+       },
+       .errstr = "R1 offset is outside of the packet",
+       .result = REJECT,
+       .prog_type = BPF_PROG_TYPE_XDP,
+       .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+       "XDP pkt read, pkt_data' <= pkt_end, corner case, good access",
        .insns = {
        BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)),
        BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
        .prog_type = BPF_PROG_TYPE_XDP,
        .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
 },
+{
+       "XDP pkt read, pkt_data' <= pkt_end, corner case +1, good access",
+       .insns = {
+       BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)),
+       BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+                   offsetof(struct xdp_md, data_end)),
+       BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+       BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 9),
+       BPF_JMP_REG(BPF_JLE, BPF_REG_1, BPF_REG_3, 1),
+       BPF_JMP_IMM(BPF_JA, 0, 0, 1),
+       BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -9),
+       BPF_MOV64_IMM(BPF_REG_0, 0),
+       BPF_EXIT_INSN(),
+       },
+       .result = ACCEPT,
+       .prog_type = BPF_PROG_TYPE_XDP,
+       .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+       "XDP pkt read, pkt_data' <= pkt_end, corner case -1, bad access",
+       .insns = {
+       BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)),
+       BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+                   offsetof(struct xdp_md, data_end)),
+       BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+       BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
+       BPF_JMP_REG(BPF_JLE, BPF_REG_1, BPF_REG_3, 1),
+       BPF_JMP_IMM(BPF_JA, 0, 0, 1),
+       BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -7),
+       BPF_MOV64_IMM(BPF_REG_0, 0),
+       BPF_EXIT_INSN(),
+       },
+       .errstr = "R1 offset is outside of the packet",
+       .result = REJECT,
+       .prog_type = BPF_PROG_TYPE_XDP,
+       .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
 {
        "XDP pkt read, pkt_end <= pkt_data', good access",
        .insns = {
        .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
 },
 {
-       "XDP pkt read, pkt_end <= pkt_data', bad access 1",
+       "XDP pkt read, pkt_end <= pkt_data', corner case -1, bad access",
        .insns = {
        BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)),
        BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
                    offsetof(struct xdp_md, data_end)),
        BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
-       BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
+       BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 6),
        BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_1, 1),
-       BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
+       BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -6),
        BPF_MOV64_IMM(BPF_REG_0, 0),
        BPF_EXIT_INSN(),
        },
        .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
 },
 {
-       "XDP pkt read, pkt_meta' > pkt_data, good access",
+       "XDP pkt read, pkt_end <= pkt_data', corner case, good access",
+       .insns = {
+       BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)),
+       BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+                   offsetof(struct xdp_md, data_end)),
+       BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+       BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
+       BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_1, 1),
+       BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -7),
+       BPF_MOV64_IMM(BPF_REG_0, 0),
+       BPF_EXIT_INSN(),
+       },
+       .result = ACCEPT,
+       .prog_type = BPF_PROG_TYPE_XDP,
+       .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+       "XDP pkt read, pkt_end <= pkt_data', corner case +1, good access",
+       .insns = {
+       BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)),
+       BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+                   offsetof(struct xdp_md, data_end)),
+       BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+       BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
+       BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_1, 1),
+       BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
+       BPF_MOV64_IMM(BPF_REG_0, 0),
+       BPF_EXIT_INSN(),
+       },
+       .result = ACCEPT,
+       .prog_type = BPF_PROG_TYPE_XDP,
+       .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+       "XDP pkt read, pkt_meta' > pkt_data, corner case, good access",
        .insns = {
        BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
                    offsetof(struct xdp_md, data_meta)),
        .prog_type = BPF_PROG_TYPE_XDP,
        .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
 },
+{
+       "XDP pkt read, pkt_meta' > pkt_data, corner case +1, good access",
+       .insns = {
+       BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+                   offsetof(struct xdp_md, data_meta)),
+       BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)),
+       BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+       BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 9),
+       BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1),
+       BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -9),
+       BPF_MOV64_IMM(BPF_REG_0, 0),
+       BPF_EXIT_INSN(),
+       },
+       .result = ACCEPT,
+       .prog_type = BPF_PROG_TYPE_XDP,
+       .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+       "XDP pkt read, pkt_meta' > pkt_data, corner case -1, bad access",
+       .insns = {
+       BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+                   offsetof(struct xdp_md, data_meta)),
+       BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)),
+       BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+       BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
+       BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1),
+       BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -7),
+       BPF_MOV64_IMM(BPF_REG_0, 0),
+       BPF_EXIT_INSN(),
+       },
+       .errstr = "R1 offset is outside of the packet",
+       .result = REJECT,
+       .prog_type = BPF_PROG_TYPE_XDP,
+       .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
 {
        "XDP pkt read, pkt_data > pkt_meta', good access",
        .insns = {
        .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
 },
 {
-       "XDP pkt read, pkt_data > pkt_meta', bad access 1",
+       "XDP pkt read, pkt_data > pkt_meta', corner case -1, bad access",
        .insns = {
        BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
                    offsetof(struct xdp_md, data_meta)),
        BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)),
        BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
-       BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
+       BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 6),
        BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_1, 1),
        BPF_JMP_IMM(BPF_JA, 0, 0, 1),
-       BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
+       BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -6),
        BPF_MOV64_IMM(BPF_REG_0, 0),
        BPF_EXIT_INSN(),
        },
        .prog_type = BPF_PROG_TYPE_XDP,
        .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
 },
+{
+       "XDP pkt read, pkt_data > pkt_meta', corner case, good access",
+       .insns = {
+       BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+                   offsetof(struct xdp_md, data_meta)),
+       BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)),
+       BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+       BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
+       BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_1, 1),
+       BPF_JMP_IMM(BPF_JA, 0, 0, 1),
+       BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -7),
+       BPF_MOV64_IMM(BPF_REG_0, 0),
+       BPF_EXIT_INSN(),
+       },
+       .result = ACCEPT,
+       .prog_type = BPF_PROG_TYPE_XDP,
+       .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+       "XDP pkt read, pkt_data > pkt_meta', corner case +1, good access",
+       .insns = {
+       BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+                   offsetof(struct xdp_md, data_meta)),
+       BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)),
+       BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+       BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
+       BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_1, 1),
+       BPF_JMP_IMM(BPF_JA, 0, 0, 1),
+       BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
+       BPF_MOV64_IMM(BPF_REG_0, 0),
+       BPF_EXIT_INSN(),
+       },
+       .result = ACCEPT,
+       .prog_type = BPF_PROG_TYPE_XDP,
+       .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
 {
        "XDP pkt read, pkt_meta' < pkt_data, good access",
        .insns = {
        .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
 },
 {
-       "XDP pkt read, pkt_meta' < pkt_data, bad access 1",
+       "XDP pkt read, pkt_meta' < pkt_data, corner case -1, bad access",
        .insns = {
        BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
                    offsetof(struct xdp_md, data_meta)),
        BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)),
        BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
-       BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
+       BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 6),
        BPF_JMP_REG(BPF_JLT, BPF_REG_1, BPF_REG_3, 1),
        BPF_JMP_IMM(BPF_JA, 0, 0, 1),
-       BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
+       BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -6),
        BPF_MOV64_IMM(BPF_REG_0, 0),
        BPF_EXIT_INSN(),
        },
        .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
 },
 {
-       "XDP pkt read, pkt_data < pkt_meta', good access",
+       "XDP pkt read, pkt_meta' < pkt_data, corner case, good access",
+       .insns = {
+       BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+                   offsetof(struct xdp_md, data_meta)),
+       BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)),
+       BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+       BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
+       BPF_JMP_REG(BPF_JLT, BPF_REG_1, BPF_REG_3, 1),
+       BPF_JMP_IMM(BPF_JA, 0, 0, 1),
+       BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -7),
+       BPF_MOV64_IMM(BPF_REG_0, 0),
+       BPF_EXIT_INSN(),
+       },
+       .result = ACCEPT,
+       .prog_type = BPF_PROG_TYPE_XDP,
+       .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+       "XDP pkt read, pkt_meta' < pkt_data, corner case +1, good access",
+       .insns = {
+       BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+                   offsetof(struct xdp_md, data_meta)),
+       BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)),
+       BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+       BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
+       BPF_JMP_REG(BPF_JLT, BPF_REG_1, BPF_REG_3, 1),
+       BPF_JMP_IMM(BPF_JA, 0, 0, 1),
+       BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
+       BPF_MOV64_IMM(BPF_REG_0, 0),
+       BPF_EXIT_INSN(),
+       },
+       .result = ACCEPT,
+       .prog_type = BPF_PROG_TYPE_XDP,
+       .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+       "XDP pkt read, pkt_data < pkt_meta', corner case, good access",
        .insns = {
        BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
                    offsetof(struct xdp_md, data_meta)),
        .prog_type = BPF_PROG_TYPE_XDP,
        .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
 },
+{
+       "XDP pkt read, pkt_data < pkt_meta', corner case +1, good access",
+       .insns = {
+       BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+                   offsetof(struct xdp_md, data_meta)),
+       BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)),
+       BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+       BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 9),
+       BPF_JMP_REG(BPF_JLT, BPF_REG_3, BPF_REG_1, 1),
+       BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -9),
+       BPF_MOV64_IMM(BPF_REG_0, 0),
+       BPF_EXIT_INSN(),
+       },
+       .result = ACCEPT,
+       .prog_type = BPF_PROG_TYPE_XDP,
+       .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+       "XDP pkt read, pkt_data < pkt_meta', corner case -1, bad access",
+       .insns = {
+       BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+                   offsetof(struct xdp_md, data_meta)),
+       BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)),
+       BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+       BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
+       BPF_JMP_REG(BPF_JLT, BPF_REG_3, BPF_REG_1, 1),
+       BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -7),
+       BPF_MOV64_IMM(BPF_REG_0, 0),
+       BPF_EXIT_INSN(),
+       },
+       .errstr = "R1 offset is outside of the packet",
+       .result = REJECT,
+       .prog_type = BPF_PROG_TYPE_XDP,
+       .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
 {
        "XDP pkt read, pkt_meta' >= pkt_data, good access",
        .insns = {
        .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
 },
 {
-       "XDP pkt read, pkt_meta' >= pkt_data, bad access 1",
+       "XDP pkt read, pkt_meta' >= pkt_data, corner case -1, bad access",
        .insns = {
        BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
                    offsetof(struct xdp_md, data_meta)),
        BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)),
        BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
-       BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
+       BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 6),
        BPF_JMP_REG(BPF_JGE, BPF_REG_1, BPF_REG_3, 1),
-       BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
+       BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -6),
        BPF_MOV64_IMM(BPF_REG_0, 0),
        BPF_EXIT_INSN(),
        },
        .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
 },
 {
-       "XDP pkt read, pkt_data >= pkt_meta', good access",
+       "XDP pkt read, pkt_meta' >= pkt_data, corner case, good access",
+       .insns = {
+       BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+                   offsetof(struct xdp_md, data_meta)),
+       BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)),
+       BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+       BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
+       BPF_JMP_REG(BPF_JGE, BPF_REG_1, BPF_REG_3, 1),
+       BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -7),
+       BPF_MOV64_IMM(BPF_REG_0, 0),
+       BPF_EXIT_INSN(),
+       },
+       .result = ACCEPT,
+       .prog_type = BPF_PROG_TYPE_XDP,
+       .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+       "XDP pkt read, pkt_meta' >= pkt_data, corner case +1, good access",
+       .insns = {
+       BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+                   offsetof(struct xdp_md, data_meta)),
+       BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)),
+       BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+       BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
+       BPF_JMP_REG(BPF_JGE, BPF_REG_1, BPF_REG_3, 1),
+       BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
+       BPF_MOV64_IMM(BPF_REG_0, 0),
+       BPF_EXIT_INSN(),
+       },
+       .result = ACCEPT,
+       .prog_type = BPF_PROG_TYPE_XDP,
+       .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+       "XDP pkt read, pkt_data >= pkt_meta', corner case, good access",
        .insns = {
        BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
                    offsetof(struct xdp_md, data_meta)),
        .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
 },
 {
-       "XDP pkt read, pkt_meta' <= pkt_data, good access",
+       "XDP pkt read, pkt_data >= pkt_meta', corner case +1, good access",
+       .insns = {
+       BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+                   offsetof(struct xdp_md, data_meta)),
+       BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)),
+       BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+       BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 9),
+       BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_1, 1),
+       BPF_JMP_IMM(BPF_JA, 0, 0, 1),
+       BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -9),
+       BPF_MOV64_IMM(BPF_REG_0, 0),
+       BPF_EXIT_INSN(),
+       },
+       .result = ACCEPT,
+       .prog_type = BPF_PROG_TYPE_XDP,
+       .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+       "XDP pkt read, pkt_data >= pkt_meta', corner case -1, bad access",
+       .insns = {
+       BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+                   offsetof(struct xdp_md, data_meta)),
+       BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)),
+       BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+       BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
+       BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_1, 1),
+       BPF_JMP_IMM(BPF_JA, 0, 0, 1),
+       BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -7),
+       BPF_MOV64_IMM(BPF_REG_0, 0),
+       BPF_EXIT_INSN(),
+       },
+       .errstr = "R1 offset is outside of the packet",
+       .result = REJECT,
+       .prog_type = BPF_PROG_TYPE_XDP,
+       .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+       "XDP pkt read, pkt_meta' <= pkt_data, corner case, good access",
        .insns = {
        BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
                    offsetof(struct xdp_md, data_meta)),
        .prog_type = BPF_PROG_TYPE_XDP,
        .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
 },
+{
+       "XDP pkt read, pkt_meta' <= pkt_data, corner case +1, good access",
+       .insns = {
+       BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+                   offsetof(struct xdp_md, data_meta)),
+       BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)),
+       BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+       BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 9),
+       BPF_JMP_REG(BPF_JLE, BPF_REG_1, BPF_REG_3, 1),
+       BPF_JMP_IMM(BPF_JA, 0, 0, 1),
+       BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -9),
+       BPF_MOV64_IMM(BPF_REG_0, 0),
+       BPF_EXIT_INSN(),
+       },
+       .result = ACCEPT,
+       .prog_type = BPF_PROG_TYPE_XDP,
+       .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+       "XDP pkt read, pkt_meta' <= pkt_data, corner case -1, bad access",
+       .insns = {
+       BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+                   offsetof(struct xdp_md, data_meta)),
+       BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)),
+       BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+       BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
+       BPF_JMP_REG(BPF_JLE, BPF_REG_1, BPF_REG_3, 1),
+       BPF_JMP_IMM(BPF_JA, 0, 0, 1),
+       BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -7),
+       BPF_MOV64_IMM(BPF_REG_0, 0),
+       BPF_EXIT_INSN(),
+       },
+       .errstr = "R1 offset is outside of the packet",
+       .result = REJECT,
+       .prog_type = BPF_PROG_TYPE_XDP,
+       .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
 {
        "XDP pkt read, pkt_data <= pkt_meta', good access",
        .insns = {
        .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
 },
 {
-       "XDP pkt read, pkt_data <= pkt_meta', bad access 1",
+       "XDP pkt read, pkt_data <= pkt_meta', corner case -1, bad access",
        .insns = {
        BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
                    offsetof(struct xdp_md, data_meta)),
        BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)),
        BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
-       BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
+       BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 6),
        BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_1, 1),
-       BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
+       BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -6),
        BPF_MOV64_IMM(BPF_REG_0, 0),
        BPF_EXIT_INSN(),
        },
        .prog_type = BPF_PROG_TYPE_XDP,
        .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
 },
+{
+       "XDP pkt read, pkt_data <= pkt_meta', corner case, good access",
+       .insns = {
+       BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+                   offsetof(struct xdp_md, data_meta)),
+       BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)),
+       BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+       BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
+       BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_1, 1),
+       BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -7),
+       BPF_MOV64_IMM(BPF_REG_0, 0),
+       BPF_EXIT_INSN(),
+       },
+       .result = ACCEPT,
+       .prog_type = BPF_PROG_TYPE_XDP,
+       .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+       "XDP pkt read, pkt_data <= pkt_meta', corner case +1, good access",
+       .insns = {
+       BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+                   offsetof(struct xdp_md, data_meta)),
+       BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)),
+       BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+       BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
+       BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_1, 1),
+       BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
+       BPF_MOV64_IMM(BPF_REG_0, 0),
+       BPF_EXIT_INSN(),
+       },
+       .result = ACCEPT,
+       .prog_type = BPF_PROG_TYPE_XDP,
+       .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
diff --git a/tools/testing/selftests/damon/.gitignore b/tools/testing/selftests/damon/.gitignore
new file mode 100644 (file)
index 0000000..c6c2965
--- /dev/null
@@ -0,0 +1,2 @@
+# SPDX-License-Identifier: GPL-2.0-only
+huge_count_read_write
index 8a3f2cd9fec0c65b8d88f7ff1b891c7ef6746c0b..937d36ae9a69c2d8955791437e75049b9f0bc0f0 100644 (file)
@@ -1,7 +1,10 @@
 # SPDX-License-Identifier: GPL-2.0
 # Makefile for damon selftests
 
-TEST_FILES = _chk_dependency.sh
-TEST_PROGS = debugfs_attrs.sh
+TEST_GEN_FILES += huge_count_read_write
+
+TEST_FILES = _chk_dependency.sh _debugfs_common.sh
+TEST_PROGS = debugfs_attrs.sh debugfs_schemes.sh debugfs_target_ids.sh
+TEST_PROGS += debugfs_empty_targets.sh debugfs_huge_count_read_write.sh
 
 include ../lib.mk
diff --git a/tools/testing/selftests/damon/_debugfs_common.sh b/tools/testing/selftests/damon/_debugfs_common.sh
new file mode 100644 (file)
index 0000000..48989d4
--- /dev/null
@@ -0,0 +1,52 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+
+test_write_result() {
+       file=$1
+       content=$2
+       orig_content=$3
+       expect_reason=$4
+       expected=$5
+
+       echo "$content" > "$file"
+       if [ $? -ne "$expected" ]
+       then
+               echo "writing $content to $file doesn't return $expected"
+               echo "expected because: $expect_reason"
+               echo "$orig_content" > "$file"
+               exit 1
+       fi
+}
+
+test_write_succ() {
+       test_write_result "$1" "$2" "$3" "$4" 0
+}
+
+test_write_fail() {
+       test_write_result "$1" "$2" "$3" "$4" 1
+}
+
+test_content() {
+       file=$1
+       orig_content=$2
+       expected=$3
+       expect_reason=$4
+
+       content=$(cat "$file")
+       if [ "$content" != "$expected" ]
+       then
+               echo "reading $file expected $expected but $content"
+               echo "expected because: $expect_reason"
+               echo "$orig_content" > "$file"
+               exit 1
+       fi
+}
+
+source ./_chk_dependency.sh
+
+damon_onoff="$DBGFS/monitor_on"
+if [ $(cat "$damon_onoff") = "on" ]
+then
+       echo "monitoring is on"
+       exit $ksft_skip
+fi
index 196b6640bf3783601833f348447d720f71f9f7ec..902e312bca898b5b88a88e23ec7c26045208f940 100644 (file)
@@ -1,48 +1,7 @@
 #!/bin/bash
 # SPDX-License-Identifier: GPL-2.0
 
-test_write_result() {
-       file=$1
-       content=$2
-       orig_content=$3
-       expect_reason=$4
-       expected=$5
-
-       echo "$content" > "$file"
-       if [ $? -ne "$expected" ]
-       then
-               echo "writing $content to $file doesn't return $expected"
-               echo "expected because: $expect_reason"
-               echo "$orig_content" > "$file"
-               exit 1
-       fi
-}
-
-test_write_succ() {
-       test_write_result "$1" "$2" "$3" "$4" 0
-}
-
-test_write_fail() {
-       test_write_result "$1" "$2" "$3" "$4" 1
-}
-
-test_content() {
-       file=$1
-       orig_content=$2
-       expected=$3
-       expect_reason=$4
-
-       content=$(cat "$file")
-       if [ "$content" != "$expected" ]
-       then
-               echo "reading $file expected $expected but $content"
-               echo "expected because: $expect_reason"
-               echo "$orig_content" > "$file"
-               exit 1
-       fi
-}
-
-source ./_chk_dependency.sh
+source _debugfs_common.sh
 
 # Test attrs file
 # ===============
@@ -56,33 +15,3 @@ test_write_fail "$file" "1 2 3 5 4" "$orig_content" \
        "min_nr_regions > max_nr_regions"
 test_content "$file" "$orig_content" "1 2 3 4 5" "successfully written"
 echo "$orig_content" > "$file"
-
-# Test schemes file
-# =================
-
-file="$DBGFS/schemes"
-orig_content=$(cat "$file")
-
-test_write_succ "$file" "1 2 3 4 5 6 4 0 0 0 1 2 3 1 100 3 2 1" \
-       "$orig_content" "valid input"
-test_write_fail "$file" "1 2
-3 4 5 6 3 0 0 0 1 2 3 1 100 3 2 1" "$orig_content" "multi lines"
-test_write_succ "$file" "" "$orig_content" "disabling"
-echo "$orig_content" > "$file"
-
-# Test target_ids file
-# ====================
-
-file="$DBGFS/target_ids"
-orig_content=$(cat "$file")
-
-test_write_succ "$file" "1 2 3 4" "$orig_content" "valid input"
-test_write_succ "$file" "1 2 abc 4" "$orig_content" "still valid input"
-test_content "$file" "$orig_content" "1 2" "non-integer was there"
-test_write_succ "$file" "abc 2 3" "$orig_content" "the file allows wrong input"
-test_content "$file" "$orig_content" "" "wrong input written"
-test_write_succ "$file" "" "$orig_content" "empty input"
-test_content "$file" "$orig_content" "" "empty input written"
-echo "$orig_content" > "$file"
-
-echo "PASS"
diff --git a/tools/testing/selftests/damon/debugfs_empty_targets.sh b/tools/testing/selftests/damon/debugfs_empty_targets.sh
new file mode 100644 (file)
index 0000000..87aff80
--- /dev/null
@@ -0,0 +1,13 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+
+source _debugfs_common.sh
+
+# Test empty targets case
+# =======================
+
+orig_target_ids=$(cat "$DBGFS/target_ids")
+echo "" > "$DBGFS/target_ids"
+orig_monitor_on=$(cat "$DBGFS/monitor_on")
+test_write_fail "$DBGFS/monitor_on" "on" "orig_monitor_on" "empty target ids"
+echo "$orig_target_ids" > "$DBGFS/target_ids"
diff --git a/tools/testing/selftests/damon/debugfs_huge_count_read_write.sh b/tools/testing/selftests/damon/debugfs_huge_count_read_write.sh
new file mode 100644 (file)
index 0000000..922cada
--- /dev/null
@@ -0,0 +1,22 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+
+source _debugfs_common.sh
+
+# Test huge count read write
+# ==========================
+
+dmesg -C
+
+for file in "$DBGFS/"*
+do
+       ./huge_count_read_write "$file"
+done
+
+if dmesg | grep -q WARNING
+then
+       dmesg
+       exit 1
+else
+       exit 0
+fi
diff --git a/tools/testing/selftests/damon/debugfs_schemes.sh b/tools/testing/selftests/damon/debugfs_schemes.sh
new file mode 100644 (file)
index 0000000..5b39ab4
--- /dev/null
@@ -0,0 +1,19 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+
+source _debugfs_common.sh
+
+# Test schemes file
+# =================
+
+file="$DBGFS/schemes"
+orig_content=$(cat "$file")
+
+test_write_succ "$file" "1 2 3 4 5 6 4 0 0 0 1 2 3 1 100 3 2 1" \
+       "$orig_content" "valid input"
+test_write_fail "$file" "1 2
+3 4 5 6 3 0 0 0 1 2 3 1 100 3 2 1" "$orig_content" "multi lines"
+test_write_succ "$file" "" "$orig_content" "disabling"
+test_write_fail "$file" "2 1 2 1 10 1 3 10 1 1 1 1 1 1 1 1 2 3" \
+       "$orig_content" "wrong condition ranges"
+echo "$orig_content" > "$file"
diff --git a/tools/testing/selftests/damon/debugfs_target_ids.sh b/tools/testing/selftests/damon/debugfs_target_ids.sh
new file mode 100644 (file)
index 0000000..49aeabd
--- /dev/null
@@ -0,0 +1,19 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+
+source _debugfs_common.sh
+
+# Test target_ids file
+# ====================
+
+file="$DBGFS/target_ids"
+orig_content=$(cat "$file")
+
+test_write_succ "$file" "1 2 3 4" "$orig_content" "valid input"
+test_write_succ "$file" "1 2 abc 4" "$orig_content" "still valid input"
+test_content "$file" "$orig_content" "1 2" "non-integer was there"
+test_write_succ "$file" "abc 2 3" "$orig_content" "the file allows wrong input"
+test_content "$file" "$orig_content" "" "wrong input written"
+test_write_succ "$file" "" "$orig_content" "empty input"
+test_content "$file" "$orig_content" "" "empty input written"
+echo "$orig_content" > "$file"
diff --git a/tools/testing/selftests/damon/huge_count_read_write.c b/tools/testing/selftests/damon/huge_count_read_write.c
new file mode 100644 (file)
index 0000000..ad7a6b4
--- /dev/null
@@ -0,0 +1,39 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Author: SeongJae Park <sj@kernel.org>
+ */
+
+#include <fcntl.h>
+#include <stdlib.h>
+#include <unistd.h>
+#include <stdio.h>
+
+void write_read_with_huge_count(char *file)
+{
+       int filedesc = open(file, O_RDWR);
+       char buf[25];
+       int ret;
+
+       printf("%s %s\n", __func__, file);
+       if (filedesc < 0) {
+               fprintf(stderr, "failed opening %s\n", file);
+               exit(1);
+       }
+
+       write(filedesc, "", 0xfffffffful);
+       perror("after write: ");
+       ret = read(filedesc, buf, 0xfffffffful);
+       perror("after read: ");
+       close(filedesc);
+}
+
+int main(int argc, char *argv[])
+{
+       if (argc != 2) {
+               fprintf(stderr, "Usage: %s <file>\n", argv[0]);
+               exit(1);
+       }
+       write_read_with_huge_count(argv[1]);
+
+       return 0;
+}
index 39f2bbe8dd3df67d01d9fcfeadbadfda02f8c3ea..d7b312b44a62a8540bdb5f6c0e105010315de2f9 100644 (file)
@@ -3,5 +3,6 @@
 TEST_PROGS := gpio-mockup.sh
 TEST_FILES := gpio-mockup-sysfs.sh
 TEST_GEN_PROGS_EXTENDED := gpio-mockup-cdev
+CFLAGS += -O2 -g -Wall -I../../../../usr/include/
 
 include ../lib.mk
index e83eac71621a0e2d7913f056b7a8d35c41912e8e..d1640f44f8ac2a6fda7a5f75605f83fcaa165dc0 100644 (file)
@@ -117,7 +117,7 @@ int main(int argc, char *argv[])
 {
        char *chip;
        int opt, ret, cfd, lfd;
-       unsigned int offset, val, abiv;
+       unsigned int offset, val = 0, abiv;
        uint32_t flags_v1;
        uint64_t flags_v2;
 
index d4a8301396833fc8e028fc02366f30de0004a684..00814c0f87a678e9f5d262d8e2663f5ca61b4e5a 100644 (file)
 /x86_64/platform_info_test
 /x86_64/set_boot_cpu_id
 /x86_64/set_sregs_test
+/x86_64/sev_migrate_tests
 /x86_64/smm_test
 /x86_64/state_test
 /x86_64/svm_vmcall_test
 /x86_64/svm_int_ctl_test
 /x86_64/sync_regs_test
 /x86_64/tsc_msrs_test
+/x86_64/userspace_io_test
 /x86_64/userspace_msr_exit_test
 /x86_64/vmx_apic_access_test
 /x86_64/vmx_close_while_nested_test
index c4e34717826aaf19dc4ad9e6f602d6a8eaa708c9..f307c9f619815de6bff2ca1c73cb1889aae71543 100644 (file)
@@ -59,6 +59,7 @@ TEST_GEN_PROGS_x86_64 += x86_64/vmx_preemption_timer_test
 TEST_GEN_PROGS_x86_64 += x86_64/svm_vmcall_test
 TEST_GEN_PROGS_x86_64 += x86_64/svm_int_ctl_test
 TEST_GEN_PROGS_x86_64 += x86_64/sync_regs_test
+TEST_GEN_PROGS_x86_64 += x86_64/userspace_io_test
 TEST_GEN_PROGS_x86_64 += x86_64/userspace_msr_exit_test
 TEST_GEN_PROGS_x86_64 += x86_64/vmx_apic_access_test
 TEST_GEN_PROGS_x86_64 += x86_64/vmx_close_while_nested_test
index 5d95113c7b7c540842d5df93cfcef16658765e82..d8909032317a8fa1f25e4876d6571d328d4f4ea6 100644 (file)
@@ -47,7 +47,7 @@
 #include "guest_modes.h"
 
 /* Global variable used to synchronize all of the vCPU threads. */
-static int iteration = -1;
+static int iteration;
 
 /* Defines what vCPU threads should do during a given iteration. */
 static enum {
@@ -215,12 +215,11 @@ static bool spin_wait_for_next_iteration(int *current_iteration)
        return true;
 }
 
-static void *vcpu_thread_main(void *arg)
+static void vcpu_thread_main(struct perf_test_vcpu_args *vcpu_args)
 {
-       struct perf_test_vcpu_args *vcpu_args = arg;
        struct kvm_vm *vm = perf_test_args.vm;
        int vcpu_id = vcpu_args->vcpu_id;
-       int current_iteration = -1;
+       int current_iteration = 0;
 
        while (spin_wait_for_next_iteration(&current_iteration)) {
                switch (READ_ONCE(iteration_work)) {
@@ -235,8 +234,6 @@ static void *vcpu_thread_main(void *arg)
 
                vcpu_last_completed_iteration[vcpu_id] = current_iteration;
        }
-
-       return NULL;
 }
 
 static void spin_wait_for_vcpu(int vcpu_id, int target_iteration)
@@ -277,8 +274,7 @@ static void run_iteration(struct kvm_vm *vm, int vcpus, const char *description)
 static void access_memory(struct kvm_vm *vm, int vcpus, enum access_type access,
                          const char *description)
 {
-       perf_test_args.wr_fract = (access == ACCESS_READ) ? INT_MAX : 1;
-       sync_global_to_guest(vm, perf_test_args);
+       perf_test_set_wr_fract(vm, (access == ACCESS_READ) ? INT_MAX : 1);
        iteration_work = ITERATION_ACCESS_MEMORY;
        run_iteration(vm, vcpus, description);
 }
@@ -296,48 +292,16 @@ static void mark_memory_idle(struct kvm_vm *vm, int vcpus)
        run_iteration(vm, vcpus, "Mark memory idle");
 }
 
-static pthread_t *create_vcpu_threads(int vcpus)
-{
-       pthread_t *vcpu_threads;
-       int i;
-
-       vcpu_threads = malloc(vcpus * sizeof(vcpu_threads[0]));
-       TEST_ASSERT(vcpu_threads, "Failed to allocate vcpu_threads.");
-
-       for (i = 0; i < vcpus; i++) {
-               vcpu_last_completed_iteration[i] = iteration;
-               pthread_create(&vcpu_threads[i], NULL, vcpu_thread_main,
-                              &perf_test_args.vcpu_args[i]);
-       }
-
-       return vcpu_threads;
-}
-
-static void terminate_vcpu_threads(pthread_t *vcpu_threads, int vcpus)
-{
-       int i;
-
-       /* Set done to signal the vCPU threads to exit */
-       done = true;
-
-       for (i = 0; i < vcpus; i++)
-               pthread_join(vcpu_threads[i], NULL);
-}
-
 static void run_test(enum vm_guest_mode mode, void *arg)
 {
        struct test_params *params = arg;
        struct kvm_vm *vm;
-       pthread_t *vcpu_threads;
        int vcpus = params->vcpus;
 
        vm = perf_test_create_vm(mode, vcpus, params->vcpu_memory_bytes, 1,
-                                params->backing_src);
+                                params->backing_src, !overlap_memory_access);
 
-       perf_test_setup_vcpus(vm, vcpus, params->vcpu_memory_bytes,
-                             !overlap_memory_access);
-
-       vcpu_threads = create_vcpu_threads(vcpus);
+       perf_test_start_vcpu_threads(vcpus, vcpu_thread_main);
 
        pr_info("\n");
        access_memory(vm, vcpus, ACCESS_WRITE, "Populating memory");
@@ -352,8 +316,10 @@ static void run_test(enum vm_guest_mode mode, void *arg)
        mark_memory_idle(vm, vcpus);
        access_memory(vm, vcpus, ACCESS_READ, "Reading from idle memory");
 
-       terminate_vcpu_threads(vcpu_threads, vcpus);
-       free(vcpu_threads);
+       /* Set done to signal the vCPU threads to exit */
+       done = true;
+
+       perf_test_join_vcpu_threads(vcpus);
        perf_test_destroy_vm(vm);
 }
 
index 1510b21e6306143b202581d76415451545332797..6a719d0655991c66fdb8ea0889a70cf2828cd75e 100644 (file)
@@ -42,10 +42,9 @@ static uint64_t guest_percpu_mem_size = DEFAULT_PER_VCPU_MEM_SIZE;
 static size_t demand_paging_size;
 static char *guest_data_prototype;
 
-static void *vcpu_worker(void *data)
+static void vcpu_worker(struct perf_test_vcpu_args *vcpu_args)
 {
        int ret;
-       struct perf_test_vcpu_args *vcpu_args = (struct perf_test_vcpu_args *)data;
        int vcpu_id = vcpu_args->vcpu_id;
        struct kvm_vm *vm = perf_test_args.vm;
        struct kvm_run *run;
@@ -68,8 +67,6 @@ static void *vcpu_worker(void *data)
        ts_diff = timespec_elapsed(start);
        PER_VCPU_DEBUG("vCPU %d execution time: %ld.%.9lds\n", vcpu_id,
                       ts_diff.tv_sec, ts_diff.tv_nsec);
-
-       return NULL;
 }
 
 static int handle_uffd_page_request(int uffd_mode, int uffd, uint64_t addr)
@@ -282,7 +279,6 @@ struct test_params {
 static void run_test(enum vm_guest_mode mode, void *arg)
 {
        struct test_params *p = arg;
-       pthread_t *vcpu_threads;
        pthread_t *uffd_handler_threads = NULL;
        struct uffd_handler_args *uffd_args = NULL;
        struct timespec start;
@@ -293,9 +289,7 @@ static void run_test(enum vm_guest_mode mode, void *arg)
        int r;
 
        vm = perf_test_create_vm(mode, nr_vcpus, guest_percpu_mem_size, 1,
-                                p->src_type);
-
-       perf_test_args.wr_fract = 1;
+                                p->src_type, p->partition_vcpu_memory_access);
 
        demand_paging_size = get_backing_src_pagesz(p->src_type);
 
@@ -304,12 +298,6 @@ static void run_test(enum vm_guest_mode mode, void *arg)
                    "Failed to allocate buffer for guest data pattern");
        memset(guest_data_prototype, 0xAB, demand_paging_size);
 
-       vcpu_threads = malloc(nr_vcpus * sizeof(*vcpu_threads));
-       TEST_ASSERT(vcpu_threads, "Memory allocation failed");
-
-       perf_test_setup_vcpus(vm, nr_vcpus, guest_percpu_mem_size,
-                             p->partition_vcpu_memory_access);
-
        if (p->uffd_mode) {
                uffd_handler_threads =
                        malloc(nr_vcpus * sizeof(*uffd_handler_threads));
@@ -322,26 +310,15 @@ static void run_test(enum vm_guest_mode mode, void *arg)
                TEST_ASSERT(pipefds, "Unable to allocate memory for pipefd");
 
                for (vcpu_id = 0; vcpu_id < nr_vcpus; vcpu_id++) {
-                       vm_paddr_t vcpu_gpa;
+                       struct perf_test_vcpu_args *vcpu_args;
                        void *vcpu_hva;
                        void *vcpu_alias;
-                       uint64_t vcpu_mem_size;
-
 
-                       if (p->partition_vcpu_memory_access) {
-                               vcpu_gpa = guest_test_phys_mem +
-                                          (vcpu_id * guest_percpu_mem_size);
-                               vcpu_mem_size = guest_percpu_mem_size;
-                       } else {
-                               vcpu_gpa = guest_test_phys_mem;
-                               vcpu_mem_size = guest_percpu_mem_size * nr_vcpus;
-                       }
-                       PER_VCPU_DEBUG("Added VCPU %d with test mem gpa [%lx, %lx)\n",
-                                      vcpu_id, vcpu_gpa, vcpu_gpa + vcpu_mem_size);
+                       vcpu_args = &perf_test_args.vcpu_args[vcpu_id];
 
                        /* Cache the host addresses of the region */
-                       vcpu_hva = addr_gpa2hva(vm, vcpu_gpa);
-                       vcpu_alias = addr_gpa2alias(vm, vcpu_gpa);
+                       vcpu_hva = addr_gpa2hva(vm, vcpu_args->gpa);
+                       vcpu_alias = addr_gpa2alias(vm, vcpu_args->gpa);
 
                        /*
                         * Set up user fault fd to handle demand paging
@@ -355,32 +332,18 @@ static void run_test(enum vm_guest_mode mode, void *arg)
                                            pipefds[vcpu_id * 2], p->uffd_mode,
                                            p->uffd_delay, &uffd_args[vcpu_id],
                                            vcpu_hva, vcpu_alias,
-                                           vcpu_mem_size);
+                                           vcpu_args->pages * perf_test_args.guest_page_size);
                }
        }
 
-       /* Export the shared variables to the guest */
-       sync_global_to_guest(vm, perf_test_args);
-
        pr_info("Finished creating vCPUs and starting uffd threads\n");
 
        clock_gettime(CLOCK_MONOTONIC, &start);
-
-       for (vcpu_id = 0; vcpu_id < nr_vcpus; vcpu_id++) {
-               pthread_create(&vcpu_threads[vcpu_id], NULL, vcpu_worker,
-                              &perf_test_args.vcpu_args[vcpu_id]);
-       }
-
+       perf_test_start_vcpu_threads(nr_vcpus, vcpu_worker);
        pr_info("Started all vCPUs\n");
 
-       /* Wait for the vcpu threads to quit */
-       for (vcpu_id = 0; vcpu_id < nr_vcpus; vcpu_id++) {
-               pthread_join(vcpu_threads[vcpu_id], NULL);
-               PER_VCPU_DEBUG("Joined thread for vCPU %d\n", vcpu_id);
-       }
-
+       perf_test_join_vcpu_threads(nr_vcpus);
        ts_diff = timespec_elapsed(start);
-
        pr_info("All vCPU threads joined\n");
 
        if (p->uffd_mode) {
@@ -404,7 +367,6 @@ static void run_test(enum vm_guest_mode mode, void *arg)
        perf_test_destroy_vm(vm);
 
        free(guest_data_prototype);
-       free(vcpu_threads);
        if (p->uffd_mode) {
                free(uffd_handler_threads);
                free(uffd_args);
index 7ffab5bd5ce55a59da6e128e226847cf0c765353..1954b964d1cf1d95efd067c9f8adc11c445f2d6c 100644 (file)
@@ -31,7 +31,7 @@ static bool host_quit;
 static int iteration;
 static int vcpu_last_completed_iteration[KVM_MAX_VCPUS];
 
-static void *vcpu_worker(void *data)
+static void vcpu_worker(struct perf_test_vcpu_args *vcpu_args)
 {
        int ret;
        struct kvm_vm *vm = perf_test_args.vm;
@@ -41,7 +41,6 @@ static void *vcpu_worker(void *data)
        struct timespec ts_diff;
        struct timespec total = (struct timespec){0};
        struct timespec avg;
-       struct perf_test_vcpu_args *vcpu_args = (struct perf_test_vcpu_args *)data;
        int vcpu_id = vcpu_args->vcpu_id;
 
        run = vcpu_state(vm, vcpu_id);
@@ -83,8 +82,6 @@ static void *vcpu_worker(void *data)
        pr_debug("\nvCPU %d dirtied 0x%lx pages over %d iterations in %ld.%.9lds. (Avg %ld.%.9lds/iteration)\n",
                vcpu_id, pages_count, vcpu_last_completed_iteration[vcpu_id],
                total.tv_sec, total.tv_nsec, avg.tv_sec, avg.tv_nsec);
-
-       return NULL;
 }
 
 struct test_params {
@@ -170,7 +167,6 @@ static void free_bitmaps(unsigned long *bitmaps[], int slots)
 static void run_test(enum vm_guest_mode mode, void *arg)
 {
        struct test_params *p = arg;
-       pthread_t *vcpu_threads;
        struct kvm_vm *vm;
        unsigned long **bitmaps;
        uint64_t guest_num_pages;
@@ -186,9 +182,10 @@ static void run_test(enum vm_guest_mode mode, void *arg)
        struct timespec clear_dirty_log_total = (struct timespec){0};
 
        vm = perf_test_create_vm(mode, nr_vcpus, guest_percpu_mem_size,
-                                p->slots, p->backing_src);
+                                p->slots, p->backing_src,
+                                p->partition_vcpu_memory_access);
 
-       perf_test_args.wr_fract = p->wr_fract;
+       perf_test_set_wr_fract(vm, p->wr_fract);
 
        guest_num_pages = (nr_vcpus * guest_percpu_mem_size) >> vm_get_page_shift(vm);
        guest_num_pages = vm_adjust_num_guest_pages(mode, guest_num_pages);
@@ -203,25 +200,15 @@ static void run_test(enum vm_guest_mode mode, void *arg)
                vm_enable_cap(vm, &cap);
        }
 
-       vcpu_threads = malloc(nr_vcpus * sizeof(*vcpu_threads));
-       TEST_ASSERT(vcpu_threads, "Memory allocation failed");
-
-       perf_test_setup_vcpus(vm, nr_vcpus, guest_percpu_mem_size,
-                             p->partition_vcpu_memory_access);
-
-       sync_global_to_guest(vm, perf_test_args);
-
        /* Start the iterations */
        iteration = 0;
        host_quit = false;
 
        clock_gettime(CLOCK_MONOTONIC, &start);
-       for (vcpu_id = 0; vcpu_id < nr_vcpus; vcpu_id++) {
+       for (vcpu_id = 0; vcpu_id < nr_vcpus; vcpu_id++)
                vcpu_last_completed_iteration[vcpu_id] = -1;
 
-               pthread_create(&vcpu_threads[vcpu_id], NULL, vcpu_worker,
-                              &perf_test_args.vcpu_args[vcpu_id]);
-       }
+       perf_test_start_vcpu_threads(nr_vcpus, vcpu_worker);
 
        /* Allow the vCPUs to populate memory */
        pr_debug("Starting iteration %d - Populating\n", iteration);
@@ -290,8 +277,7 @@ static void run_test(enum vm_guest_mode mode, void *arg)
 
        /* Tell the vcpu thread to quit */
        host_quit = true;
-       for (vcpu_id = 0; vcpu_id < nr_vcpus; vcpu_id++)
-               pthread_join(vcpu_threads[vcpu_id], NULL);
+       perf_test_join_vcpu_threads(nr_vcpus);
 
        avg = timespec_div(get_dirty_log_total, p->iterations);
        pr_info("Get dirty log over %lu iterations took %ld.%.9lds. (Avg %ld.%.9lds/iteration)\n",
@@ -306,7 +292,6 @@ static void run_test(enum vm_guest_mode mode, void *arg)
        }
 
        free_bitmaps(bitmaps, p->slots);
-       free(vcpu_threads);
        perf_test_destroy_vm(vm);
 }
 
index 792c60e1b17ddd00263ebebb3b44131374c71024..3fcd89e195c7660fc5201d0f2cea802a46f558ef 100644 (file)
@@ -115,7 +115,7 @@ static void guest_code(void)
                        addr = guest_test_virt_mem;
                        addr += (READ_ONCE(random_array[i]) % guest_num_pages)
                                * guest_page_size;
-                       addr &= ~(host_page_size - 1);
+                       addr = align_down(addr, host_page_size);
                        *(uint64_t *)addr = READ_ONCE(iteration);
                }
 
@@ -737,14 +737,14 @@ static void run_test(enum vm_guest_mode mode, void *arg)
        if (!p->phys_offset) {
                guest_test_phys_mem = (vm_get_max_gfn(vm) -
                                       guest_num_pages) * guest_page_size;
-               guest_test_phys_mem &= ~(host_page_size - 1);
+               guest_test_phys_mem = align_down(guest_test_phys_mem, host_page_size);
        } else {
                guest_test_phys_mem = p->phys_offset;
        }
 
 #ifdef __s390x__
        /* Align to 1M (segment size) */
-       guest_test_phys_mem &= ~((1 << 20) - 1);
+       guest_test_phys_mem = align_down(guest_test_phys_mem, 1 << 20);
 #endif
 
        pr_info("guest physical test memory offset: 0x%lx\n", guest_test_phys_mem);
index 6a1a37f30494b327e36c8b54ec9ddd8a6d0996bc..da2b702da71a4320d59f87f1df4ba34147123e0e 100644 (file)
@@ -71,6 +71,15 @@ enum vm_guest_mode {
 
 #endif
 
+#if defined(__x86_64__)
+unsigned long vm_compute_max_gfn(struct kvm_vm *vm);
+#else
+static inline unsigned long vm_compute_max_gfn(struct kvm_vm *vm)
+{
+       return ((1ULL << vm->pa_bits) >> vm->page_shift) - 1;
+}
+#endif
+
 #define MIN_PAGE_SIZE          (1U << MIN_PAGE_SHIFT)
 #define PTES_PER_MIN_PAGE      ptes_per_page(MIN_PAGE_SIZE)
 
index df9f1a3a3ffb9a5922ddff89e8557d6e039b3fcb..a86f953d8d36569389f786797f54b76dcb153114 100644 (file)
@@ -8,6 +8,8 @@
 #ifndef SELFTEST_KVM_PERF_TEST_UTIL_H
 #define SELFTEST_KVM_PERF_TEST_UTIL_H
 
+#include <pthread.h>
+
 #include "kvm_util.h"
 
 /* Default guest test virtual memory offset */
@@ -18,6 +20,7 @@
 #define PERF_TEST_MEM_SLOT_INDEX       1
 
 struct perf_test_vcpu_args {
+       uint64_t gpa;
        uint64_t gva;
        uint64_t pages;
 
@@ -27,7 +30,7 @@ struct perf_test_vcpu_args {
 
 struct perf_test_args {
        struct kvm_vm *vm;
-       uint64_t host_page_size;
+       uint64_t gpa;
        uint64_t guest_page_size;
        int wr_fract;
 
@@ -36,19 +39,15 @@ struct perf_test_args {
 
 extern struct perf_test_args perf_test_args;
 
-/*
- * Guest physical memory offset of the testing memory slot.
- * This will be set to the topmost valid physical address minus
- * the test memory size.
- */
-extern uint64_t guest_test_phys_mem;
-
 struct kvm_vm *perf_test_create_vm(enum vm_guest_mode mode, int vcpus,
                                   uint64_t vcpu_memory_bytes, int slots,
-                                  enum vm_mem_backing_src_type backing_src);
+                                  enum vm_mem_backing_src_type backing_src,
+                                  bool partition_vcpu_memory_access);
 void perf_test_destroy_vm(struct kvm_vm *vm);
-void perf_test_setup_vcpus(struct kvm_vm *vm, int vcpus,
-                          uint64_t vcpu_memory_bytes,
-                          bool partition_vcpu_memory_access);
+
+void perf_test_set_wr_fract(struct kvm_vm *vm, int wr_fract);
+
+void perf_test_start_vcpu_threads(int vcpus, void (*vcpu_fn)(struct perf_test_vcpu_args *));
+void perf_test_join_vcpu_threads(int vcpus);
 
 #endif /* SELFTEST_KVM_PERF_TEST_UTIL_H */
index f8fddc84c0d3b3b18e5a5d791ae8f5da5ed6f759..99e0dcdc923fa2330ab13cd77171d7da7149a0b7 100644 (file)
@@ -104,6 +104,7 @@ size_t get_trans_hugepagesz(void);
 size_t get_def_hugetlb_pagesz(void);
 const struct vm_mem_backing_src_alias *vm_mem_backing_src_alias(uint32_t i);
 size_t get_backing_src_pagesz(uint32_t i);
+bool is_backing_src_hugetlb(uint32_t i);
 void backing_src_help(const char *flag);
 enum vm_mem_backing_src_type parse_backing_src_type(const char *type_name);
 long get_run_delay(void);
@@ -117,4 +118,29 @@ static inline bool backing_src_is_shared(enum vm_mem_backing_src_type t)
        return vm_mem_backing_src_alias(t)->flag & MAP_SHARED;
 }
 
+/* Aligns x up to the next multiple of size. Size must be a power of 2. */
+static inline uint64_t align_up(uint64_t x, uint64_t size)
+{
+       uint64_t mask = size - 1;
+
+       TEST_ASSERT(size != 0 && !(size & (size - 1)),
+                   "size not a power of 2: %lu", size);
+       return ((x + mask) & ~mask);
+}
+
+static inline uint64_t align_down(uint64_t x, uint64_t size)
+{
+       uint64_t x_aligned_up = align_up(x, size);
+
+       if (x == x_aligned_up)
+               return x;
+       else
+               return x_aligned_up - size;
+}
+
+static inline void *align_ptr_up(void *x, size_t size)
+{
+       return (void *)align_up((unsigned long)x, size);
+}
+
 #endif /* SELFTEST_KVM_TEST_UTIL_H */
index f968dfd4ee88929d523824c948ff8a1447120ed9..aed9dc3ca1e9eeb45246d643e48da4817f4c542c 100644 (file)
@@ -12,6 +12,7 @@
 #include <stdio.h>
 #include <stdlib.h>
 #include <string.h>
+#include <sys/resource.h>
 
 #include "test_util.h"
 
@@ -40,10 +41,39 @@ int main(int argc, char *argv[])
 {
        int kvm_max_vcpu_id = kvm_check_cap(KVM_CAP_MAX_VCPU_ID);
        int kvm_max_vcpus = kvm_check_cap(KVM_CAP_MAX_VCPUS);
+       /*
+        * Number of file descriptors reqired, KVM_CAP_MAX_VCPUS for vCPU fds +
+        * an arbitrary number for everything else.
+        */
+       int nr_fds_wanted = kvm_max_vcpus + 100;
+       struct rlimit rl;
 
        pr_info("KVM_CAP_MAX_VCPU_ID: %d\n", kvm_max_vcpu_id);
        pr_info("KVM_CAP_MAX_VCPUS: %d\n", kvm_max_vcpus);
 
+       /*
+        * Check that we're allowed to open nr_fds_wanted file descriptors and
+        * try raising the limits if needed.
+        */
+       TEST_ASSERT(!getrlimit(RLIMIT_NOFILE, &rl), "getrlimit() failed!");
+
+       if (rl.rlim_cur < nr_fds_wanted) {
+               rl.rlim_cur = nr_fds_wanted;
+               if (rl.rlim_max < nr_fds_wanted) {
+                       int old_rlim_max = rl.rlim_max;
+                       rl.rlim_max = nr_fds_wanted;
+
+                       int r = setrlimit(RLIMIT_NOFILE, &rl);
+                       if (r < 0) {
+                               printf("RLIMIT_NOFILE hard limit is too low (%d, wanted %d)\n",
+                                      old_rlim_max, nr_fds_wanted);
+                               exit(KSFT_SKIP);
+                       }
+               } else {
+                       TEST_ASSERT(!setrlimit(RLIMIT_NOFILE, &rl), "setrlimit() failed!");
+               }
+       }
+
        /*
         * Upstream KVM prior to 4.8 does not support KVM_CAP_MAX_VCPU_ID.
         * Userspace is supposed to use KVM_CAP_MAX_VCPUS as the maximum ID
index 36407cb0ec85dcb62277f27215a19e2a285bb0f2..ba1fdc3dcf4a90319f1a9d7cd8dd9bbeaaa5f5b4 100644 (file)
@@ -280,7 +280,7 @@ static struct kvm_vm *pre_init_before_test(enum vm_guest_mode mode, void *arg)
 #ifdef __s390x__
        alignment = max(0x100000, alignment);
 #endif
-       guest_test_phys_mem &= ~(alignment - 1);
+       guest_test_phys_mem = align_down(guest_test_phys_mem, alignment);
 
        /* Set up the shared data structure test_args */
        test_args.vm = vm;
index eac44f5d0db0347ea53c3f4ceb27104ec05fca58..13e8e3dcf984ec663c351d2f02f1158cc39ff54a 100644 (file)
@@ -157,8 +157,7 @@ void kvm_vm_elf_load(struct kvm_vm *vm, const char *filename)
                        "memsize of 0,\n"
                        "  phdr index: %u p_memsz: 0x%" PRIx64,
                        n1, (uint64_t) phdr.p_memsz);
-               vm_vaddr_t seg_vstart = phdr.p_vaddr;
-               seg_vstart &= ~(vm_vaddr_t)(vm->page_size - 1);
+               vm_vaddr_t seg_vstart = align_down(phdr.p_vaddr, vm->page_size);
                vm_vaddr_t seg_vend = phdr.p_vaddr + phdr.p_memsz - 1;
                seg_vend |= vm->page_size - 1;
                size_t seg_size = seg_vend - seg_vstart + 1;
index 14bb4d5b6bb7df6d1dbd0f3f0d92242dc59d602d..daf6fdb217a7649a80949b46ae31eb6eb93e6160 100644 (file)
 
 static int vcpu_mmap_sz(void);
 
-/* Aligns x up to the next multiple of size. Size must be a power of 2. */
-static void *align(void *x, size_t size)
-{
-       size_t mask = size - 1;
-       TEST_ASSERT(size != 0 && !(size & (size - 1)),
-                   "size not a power of 2: %lu", size);
-       return (void *) (((size_t) x + mask) & ~mask);
-}
-
 int open_path_or_exit(const char *path, int flags)
 {
        int fd;
@@ -191,15 +182,15 @@ const char *vm_guest_mode_string(uint32_t i)
 }
 
 const struct vm_guest_mode_params vm_guest_mode_params[] = {
-       { 52, 48,  0x1000, 12 },
-       { 52, 48, 0x10000, 16 },
-       { 48, 48,  0x1000, 12 },
-       { 48, 48, 0x10000, 16 },
-       { 40, 48,  0x1000, 12 },
-       { 40, 48, 0x10000, 16 },
-       {  0,  0,  0x1000, 12 },
-       { 47, 64,  0x1000, 12 },
-       { 44, 64,  0x1000, 12 },
+       [VM_MODE_P52V48_4K]     = { 52, 48,  0x1000, 12 },
+       [VM_MODE_P52V48_64K]    = { 52, 48, 0x10000, 16 },
+       [VM_MODE_P48V48_4K]     = { 48, 48,  0x1000, 12 },
+       [VM_MODE_P48V48_64K]    = { 48, 48, 0x10000, 16 },
+       [VM_MODE_P40V48_4K]     = { 40, 48,  0x1000, 12 },
+       [VM_MODE_P40V48_64K]    = { 40, 48, 0x10000, 16 },
+       [VM_MODE_PXXV48_4K]     = {  0,  0,  0x1000, 12 },
+       [VM_MODE_P47V64_4K]     = { 47, 64,  0x1000, 12 },
+       [VM_MODE_P44V64_4K]     = { 44, 64,  0x1000, 12 },
 };
 _Static_assert(sizeof(vm_guest_mode_params)/sizeof(struct vm_guest_mode_params) == NUM_VM_MODES,
               "Missing new mode params?");
@@ -311,7 +302,7 @@ struct kvm_vm *vm_create(enum vm_guest_mode mode, uint64_t phy_pages, int perm)
                (1ULL << (vm->va_bits - 1)) >> vm->page_shift);
 
        /* Limit physical addresses to PA-bits. */
-       vm->max_gfn = ((1ULL << vm->pa_bits) >> vm->page_shift) - 1;
+       vm->max_gfn = vm_compute_max_gfn(vm);
 
        /* Allocate and setup memory for guest. */
        vm->vpages_mapped = sparsebit_alloc();
@@ -879,9 +870,17 @@ void vm_userspace_mem_region_add(struct kvm_vm *vm,
        alignment = 1;
 #endif
 
+       /*
+        * When using THP mmap is not guaranteed to returned a hugepage aligned
+        * address so we have to pad the mmap. Padding is not needed for HugeTLB
+        * because mmap will always return an address aligned to the HugeTLB
+        * page size.
+        */
        if (src_type == VM_MEM_SRC_ANONYMOUS_THP)
                alignment = max(backing_src_pagesz, alignment);
 
+       ASSERT_EQ(guest_paddr, align_up(guest_paddr, backing_src_pagesz));
+
        /* Add enough memory to align up if necessary */
        if (alignment > 1)
                region->mmap_size += alignment;
@@ -914,8 +913,13 @@ void vm_userspace_mem_region_add(struct kvm_vm *vm,
                    "test_malloc failed, mmap_start: %p errno: %i",
                    region->mmap_start, errno);
 
+       TEST_ASSERT(!is_backing_src_hugetlb(src_type) ||
+                   region->mmap_start == align_ptr_up(region->mmap_start, backing_src_pagesz),
+                   "mmap_start %p is not aligned to HugeTLB page size 0x%lx",
+                   region->mmap_start, backing_src_pagesz);
+
        /* Align host address */
-       region->host_mem = align(region->mmap_start, alignment);
+       region->host_mem = align_ptr_up(region->mmap_start, alignment);
 
        /* As needed perform madvise */
        if ((src_type == VM_MEM_SRC_ANONYMOUS ||
@@ -958,7 +962,7 @@ void vm_userspace_mem_region_add(struct kvm_vm *vm,
                            "mmap of alias failed, errno: %i", errno);
 
                /* Align host alias address */
-               region->host_alias = align(region->mmap_alias, alignment);
+               region->host_alias = align_ptr_up(region->mmap_alias, alignment);
        }
 }
 
index 0ef80dbdc116c11868e43ded6da0b5bf20bad897..722df3a28791c3b7eb3fd967c9f1ea195f815796 100644 (file)
 
 struct perf_test_args perf_test_args;
 
-uint64_t guest_test_phys_mem;
-
 /*
  * Guest virtual memory offset of the testing memory slot.
  * Must not conflict with identity mapped test code.
  */
 static uint64_t guest_test_virt_mem = DEFAULT_GUEST_TEST_MEM;
 
+struct vcpu_thread {
+       /* The id of the vCPU. */
+       int vcpu_id;
+
+       /* The pthread backing the vCPU. */
+       pthread_t thread;
+
+       /* Set to true once the vCPU thread is up and running. */
+       bool running;
+};
+
+/* The vCPU threads involved in this test. */
+static struct vcpu_thread vcpu_threads[KVM_MAX_VCPUS];
+
+/* The function run by each vCPU thread, as provided by the test. */
+static void (*vcpu_thread_fn)(struct perf_test_vcpu_args *);
+
+/* Set to true once all vCPU threads are up and running. */
+static bool all_vcpu_threads_running;
+
 /*
  * Continuously write to the first 8 bytes of each page in the
  * specified region.
  */
 static void guest_code(uint32_t vcpu_id)
 {
-       struct perf_test_vcpu_args *vcpu_args = &perf_test_args.vcpu_args[vcpu_id];
+       struct perf_test_args *pta = &perf_test_args;
+       struct perf_test_vcpu_args *vcpu_args = &pta->vcpu_args[vcpu_id];
        uint64_t gva;
        uint64_t pages;
        int i;
@@ -37,9 +56,9 @@ static void guest_code(uint32_t vcpu_id)
 
        while (true) {
                for (i = 0; i < pages; i++) {
-                       uint64_t addr = gva + (i * perf_test_args.guest_page_size);
+                       uint64_t addr = gva + (i * pta->guest_page_size);
 
-                       if (i % perf_test_args.wr_fract == 0)
+                       if (i % pta->wr_fract == 0)
                                *(uint64_t *)addr = 0x0123456789ABCDEF;
                        else
                                READ_ONCE(*(uint64_t *)addr);
@@ -49,35 +68,81 @@ static void guest_code(uint32_t vcpu_id)
        }
 }
 
+void perf_test_setup_vcpus(struct kvm_vm *vm, int vcpus,
+                          uint64_t vcpu_memory_bytes,
+                          bool partition_vcpu_memory_access)
+{
+       struct perf_test_args *pta = &perf_test_args;
+       struct perf_test_vcpu_args *vcpu_args;
+       int vcpu_id;
+
+       for (vcpu_id = 0; vcpu_id < vcpus; vcpu_id++) {
+               vcpu_args = &pta->vcpu_args[vcpu_id];
+
+               vcpu_args->vcpu_id = vcpu_id;
+               if (partition_vcpu_memory_access) {
+                       vcpu_args->gva = guest_test_virt_mem +
+                                        (vcpu_id * vcpu_memory_bytes);
+                       vcpu_args->pages = vcpu_memory_bytes /
+                                          pta->guest_page_size;
+                       vcpu_args->gpa = pta->gpa + (vcpu_id * vcpu_memory_bytes);
+               } else {
+                       vcpu_args->gva = guest_test_virt_mem;
+                       vcpu_args->pages = (vcpus * vcpu_memory_bytes) /
+                                          pta->guest_page_size;
+                       vcpu_args->gpa = pta->gpa;
+               }
+
+               vcpu_args_set(vm, vcpu_id, 1, vcpu_id);
+
+               pr_debug("Added VCPU %d with test mem gpa [%lx, %lx)\n",
+                        vcpu_id, vcpu_args->gpa, vcpu_args->gpa +
+                        (vcpu_args->pages * pta->guest_page_size));
+       }
+}
+
 struct kvm_vm *perf_test_create_vm(enum vm_guest_mode mode, int vcpus,
                                   uint64_t vcpu_memory_bytes, int slots,
-                                  enum vm_mem_backing_src_type backing_src)
+                                  enum vm_mem_backing_src_type backing_src,
+                                  bool partition_vcpu_memory_access)
 {
+       struct perf_test_args *pta = &perf_test_args;
        struct kvm_vm *vm;
        uint64_t guest_num_pages;
+       uint64_t backing_src_pagesz = get_backing_src_pagesz(backing_src);
        int i;
 
        pr_info("Testing guest mode: %s\n", vm_guest_mode_string(mode));
 
-       perf_test_args.host_page_size = getpagesize();
-       perf_test_args.guest_page_size = vm_guest_mode_params[mode].page_size;
+       /* By default vCPUs will write to memory. */
+       pta->wr_fract = 1;
+
+       /*
+        * Snapshot the non-huge page size.  This is used by the guest code to
+        * access/dirty pages at the logging granularity.
+        */
+       pta->guest_page_size = vm_guest_mode_params[mode].page_size;
 
        guest_num_pages = vm_adjust_num_guest_pages(mode,
-                               (vcpus * vcpu_memory_bytes) / perf_test_args.guest_page_size);
+                               (vcpus * vcpu_memory_bytes) / pta->guest_page_size);
 
-       TEST_ASSERT(vcpu_memory_bytes % perf_test_args.host_page_size == 0,
+       TEST_ASSERT(vcpu_memory_bytes % getpagesize() == 0,
                    "Guest memory size is not host page size aligned.");
-       TEST_ASSERT(vcpu_memory_bytes % perf_test_args.guest_page_size == 0,
+       TEST_ASSERT(vcpu_memory_bytes % pta->guest_page_size == 0,
                    "Guest memory size is not guest page size aligned.");
        TEST_ASSERT(guest_num_pages % slots == 0,
                    "Guest memory cannot be evenly divided into %d slots.",
                    slots);
 
+       /*
+        * Pass guest_num_pages to populate the page tables for test memory.
+        * The memory is also added to memslot 0, but that's a benign side
+        * effect as KVM allows aliasing HVAs in meslots.
+        */
        vm = vm_create_with_vcpus(mode, vcpus, DEFAULT_GUEST_PHY_PAGES,
-                                 (vcpus * vcpu_memory_bytes) / perf_test_args.guest_page_size,
-                                 0, guest_code, NULL);
+                                 guest_num_pages, 0, guest_code, NULL);
 
-       perf_test_args.vm = vm;
+       pta->vm = vm;
 
        /*
         * If there should be more memory in the guest test region than there
@@ -90,20 +155,18 @@ struct kvm_vm *perf_test_create_vm(enum vm_guest_mode mode, int vcpus,
                    guest_num_pages, vm_get_max_gfn(vm), vcpus,
                    vcpu_memory_bytes);
 
-       guest_test_phys_mem = (vm_get_max_gfn(vm) - guest_num_pages) *
-                             perf_test_args.guest_page_size;
-       guest_test_phys_mem &= ~(perf_test_args.host_page_size - 1);
+       pta->gpa = (vm_get_max_gfn(vm) - guest_num_pages) * pta->guest_page_size;
+       pta->gpa = align_down(pta->gpa, backing_src_pagesz);
 #ifdef __s390x__
        /* Align to 1M (segment size) */
-       guest_test_phys_mem &= ~((1 << 20) - 1);
+       pta->gpa = align_down(pta->gpa, 1 << 20);
 #endif
-       pr_info("guest physical test memory offset: 0x%lx\n", guest_test_phys_mem);
+       pr_info("guest physical test memory offset: 0x%lx\n", pta->gpa);
 
        /* Add extra memory slots for testing */
        for (i = 0; i < slots; i++) {
                uint64_t region_pages = guest_num_pages / slots;
-               vm_paddr_t region_start = guest_test_phys_mem +
-                       region_pages * perf_test_args.guest_page_size * i;
+               vm_paddr_t region_start = pta->gpa + region_pages * pta->guest_page_size * i;
 
                vm_userspace_mem_region_add(vm, backing_src, region_start,
                                            PERF_TEST_MEM_SLOT_INDEX + i,
@@ -111,10 +174,15 @@ struct kvm_vm *perf_test_create_vm(enum vm_guest_mode mode, int vcpus,
        }
 
        /* Do mapping for the demand paging memory slot */
-       virt_map(vm, guest_test_virt_mem, guest_test_phys_mem, guest_num_pages);
+       virt_map(vm, guest_test_virt_mem, pta->gpa, guest_num_pages);
+
+       perf_test_setup_vcpus(vm, vcpus, vcpu_memory_bytes, partition_vcpu_memory_access);
 
        ucall_init(vm, NULL);
 
+       /* Export the shared variables to the guest. */
+       sync_global_to_guest(vm, perf_test_args);
+
        return vm;
 }
 
@@ -124,36 +192,60 @@ void perf_test_destroy_vm(struct kvm_vm *vm)
        kvm_vm_free(vm);
 }
 
-void perf_test_setup_vcpus(struct kvm_vm *vm, int vcpus,
-                          uint64_t vcpu_memory_bytes,
-                          bool partition_vcpu_memory_access)
+void perf_test_set_wr_fract(struct kvm_vm *vm, int wr_fract)
+{
+       perf_test_args.wr_fract = wr_fract;
+       sync_global_to_guest(vm, perf_test_args);
+}
+
+static void *vcpu_thread_main(void *data)
+{
+       struct vcpu_thread *vcpu = data;
+
+       WRITE_ONCE(vcpu->running, true);
+
+       /*
+        * Wait for all vCPU threads to be up and running before calling the test-
+        * provided vCPU thread function. This prevents thread creation (which
+        * requires taking the mmap_sem in write mode) from interfering with the
+        * guest faulting in its memory.
+        */
+       while (!READ_ONCE(all_vcpu_threads_running))
+               ;
+
+       vcpu_thread_fn(&perf_test_args.vcpu_args[vcpu->vcpu_id]);
+
+       return NULL;
+}
+
+void perf_test_start_vcpu_threads(int vcpus, void (*vcpu_fn)(struct perf_test_vcpu_args *))
 {
-       vm_paddr_t vcpu_gpa;
-       struct perf_test_vcpu_args *vcpu_args;
        int vcpu_id;
 
+       vcpu_thread_fn = vcpu_fn;
+       WRITE_ONCE(all_vcpu_threads_running, false);
+
        for (vcpu_id = 0; vcpu_id < vcpus; vcpu_id++) {
-               vcpu_args = &perf_test_args.vcpu_args[vcpu_id];
+               struct vcpu_thread *vcpu = &vcpu_threads[vcpu_id];
 
-               vcpu_args->vcpu_id = vcpu_id;
-               if (partition_vcpu_memory_access) {
-                       vcpu_args->gva = guest_test_virt_mem +
-                                        (vcpu_id * vcpu_memory_bytes);
-                       vcpu_args->pages = vcpu_memory_bytes /
-                                          perf_test_args.guest_page_size;
-                       vcpu_gpa = guest_test_phys_mem +
-                                  (vcpu_id * vcpu_memory_bytes);
-               } else {
-                       vcpu_args->gva = guest_test_virt_mem;
-                       vcpu_args->pages = (vcpus * vcpu_memory_bytes) /
-                                          perf_test_args.guest_page_size;
-                       vcpu_gpa = guest_test_phys_mem;
-               }
+               vcpu->vcpu_id = vcpu_id;
+               WRITE_ONCE(vcpu->running, false);
 
-               vcpu_args_set(vm, vcpu_id, 1, vcpu_id);
+               pthread_create(&vcpu->thread, NULL, vcpu_thread_main, vcpu);
+       }
 
-               pr_debug("Added VCPU %d with test mem gpa [%lx, %lx)\n",
-                        vcpu_id, vcpu_gpa, vcpu_gpa +
-                        (vcpu_args->pages * perf_test_args.guest_page_size));
+       for (vcpu_id = 0; vcpu_id < vcpus; vcpu_id++) {
+               while (!READ_ONCE(vcpu_threads[vcpu_id].running))
+                       ;
        }
+
+       WRITE_ONCE(all_vcpu_threads_running, true);
+}
+
+void perf_test_join_vcpu_threads(int vcpus)
+{
+       int vcpu_id;
+
+       for (vcpu_id = 0; vcpu_id < vcpus; vcpu_id++)
+               pthread_join(vcpu_threads[vcpu_id].thread, NULL);
 }
index b724291089939380bd0e8fa6093d8ef5ad339092..6d23878bbfe1a644e8a1c7a2f30d05190edd5f05 100644 (file)
@@ -283,6 +283,11 @@ size_t get_backing_src_pagesz(uint32_t i)
        }
 }
 
+bool is_backing_src_hugetlb(uint32_t i)
+{
+       return !!(vm_mem_backing_src_alias(i)->flag & MAP_HUGETLB);
+}
+
 static void print_available_backing_src_types(const char *prefix)
 {
        int i;
index 82c39db913699949648974ecb3530bad60431bea..eef7b34756d5ce40dbc2d6ea8751971245b2cfec 100644 (file)
@@ -1431,3 +1431,71 @@ struct kvm_cpuid2 *vcpu_get_supported_hv_cpuid(struct kvm_vm *vm, uint32_t vcpui
 
        return cpuid;
 }
+
+#define X86EMUL_CPUID_VENDOR_AuthenticAMD_ebx 0x68747541
+#define X86EMUL_CPUID_VENDOR_AuthenticAMD_ecx 0x444d4163
+#define X86EMUL_CPUID_VENDOR_AuthenticAMD_edx 0x69746e65
+
+static inline unsigned x86_family(unsigned int eax)
+{
+        unsigned int x86;
+
+        x86 = (eax >> 8) & 0xf;
+
+        if (x86 == 0xf)
+                x86 += (eax >> 20) & 0xff;
+
+        return x86;
+}
+
+unsigned long vm_compute_max_gfn(struct kvm_vm *vm)
+{
+       const unsigned long num_ht_pages = 12 << (30 - vm->page_shift); /* 12 GiB */
+       unsigned long ht_gfn, max_gfn, max_pfn;
+       uint32_t eax, ebx, ecx, edx, max_ext_leaf;
+
+       max_gfn = (1ULL << (vm->pa_bits - vm->page_shift)) - 1;
+
+       /* Avoid reserved HyperTransport region on AMD processors.  */
+       eax = ecx = 0;
+       cpuid(&eax, &ebx, &ecx, &edx);
+       if (ebx != X86EMUL_CPUID_VENDOR_AuthenticAMD_ebx ||
+           ecx != X86EMUL_CPUID_VENDOR_AuthenticAMD_ecx ||
+           edx != X86EMUL_CPUID_VENDOR_AuthenticAMD_edx)
+               return max_gfn;
+
+       /* On parts with <40 physical address bits, the area is fully hidden */
+       if (vm->pa_bits < 40)
+               return max_gfn;
+
+       /* Before family 17h, the HyperTransport area is just below 1T.  */
+       ht_gfn = (1 << 28) - num_ht_pages;
+       eax = 1;
+       cpuid(&eax, &ebx, &ecx, &edx);
+       if (x86_family(eax) < 0x17)
+               goto done;
+
+       /*
+        * Otherwise it's at the top of the physical address space, possibly
+        * reduced due to SME by bits 11:6 of CPUID[0x8000001f].EBX.  Use
+        * the old conservative value if MAXPHYADDR is not enumerated.
+        */
+       eax = 0x80000000;
+       cpuid(&eax, &ebx, &ecx, &edx);
+       max_ext_leaf = eax;
+       if (max_ext_leaf < 0x80000008)
+               goto done;
+
+       eax = 0x80000008;
+       cpuid(&eax, &ebx, &ecx, &edx);
+       max_pfn = (1ULL << ((eax & 0xff) - vm->page_shift)) - 1;
+       if (max_ext_leaf >= 0x8000001f) {
+               eax = 0x8000001f;
+               cpuid(&eax, &ebx, &ecx, &edx);
+               max_pfn >>= (ebx >> 6) & 0x3f;
+       }
+
+       ht_gfn = max_pfn - num_ht_pages;
+done:
+       return min(max_gfn, ht_gfn - 1);
+}
index 4cfcafea9f5a6eb60a3188425697e8c3dcd3c1e5..1410d0a9141a1e41d61e0b635e20bb7f68a58626 100644 (file)
@@ -36,11 +36,9 @@ static uint64_t guest_percpu_mem_size = DEFAULT_PER_VCPU_MEM_SIZE;
 
 static bool run_vcpus = true;
 
-static void *vcpu_worker(void *data)
+static void vcpu_worker(struct perf_test_vcpu_args *vcpu_args)
 {
        int ret;
-       struct perf_test_vcpu_args *vcpu_args =
-               (struct perf_test_vcpu_args *)data;
        int vcpu_id = vcpu_args->vcpu_id;
        struct kvm_vm *vm = perf_test_args.vm;
        struct kvm_run *run;
@@ -59,8 +57,6 @@ static void *vcpu_worker(void *data)
                            "Invalid guest sync status: exit_reason=%s\n",
                            exit_reason_str(run->exit_reason));
        }
-
-       return NULL;
 }
 
 struct memslot_antagonist_args {
@@ -80,7 +76,7 @@ static void add_remove_memslot(struct kvm_vm *vm, useconds_t delay,
         * Add the dummy memslot just below the perf_test_util memslot, which is
         * at the top of the guest physical address space.
         */
-       gpa = guest_test_phys_mem - pages * vm_get_page_size(vm);
+       gpa = perf_test_args.gpa - pages * vm_get_page_size(vm);
 
        for (i = 0; i < nr_modifications; i++) {
                usleep(delay);
@@ -100,29 +96,15 @@ struct test_params {
 static void run_test(enum vm_guest_mode mode, void *arg)
 {
        struct test_params *p = arg;
-       pthread_t *vcpu_threads;
        struct kvm_vm *vm;
-       int vcpu_id;
 
        vm = perf_test_create_vm(mode, nr_vcpus, guest_percpu_mem_size, 1,
-                                VM_MEM_SRC_ANONYMOUS);
-
-       perf_test_args.wr_fract = 1;
-
-       vcpu_threads = malloc(nr_vcpus * sizeof(*vcpu_threads));
-       TEST_ASSERT(vcpu_threads, "Memory allocation failed");
-
-       perf_test_setup_vcpus(vm, nr_vcpus, guest_percpu_mem_size,
-                             p->partition_vcpu_memory_access);
-
-       /* Export the shared variables to the guest */
-       sync_global_to_guest(vm, perf_test_args);
+                                VM_MEM_SRC_ANONYMOUS,
+                                p->partition_vcpu_memory_access);
 
        pr_info("Finished creating vCPUs\n");
 
-       for (vcpu_id = 0; vcpu_id < nr_vcpus; vcpu_id++)
-               pthread_create(&vcpu_threads[vcpu_id], NULL, vcpu_worker,
-                              &perf_test_args.vcpu_args[vcpu_id]);
+       perf_test_start_vcpu_threads(nr_vcpus, vcpu_worker);
 
        pr_info("Started all vCPUs\n");
 
@@ -131,16 +113,10 @@ static void run_test(enum vm_guest_mode mode, void *arg)
 
        run_vcpus = false;
 
-       /* Wait for the vcpu threads to quit */
-       for (vcpu_id = 0; vcpu_id < nr_vcpus; vcpu_id++)
-               pthread_join(vcpu_threads[vcpu_id], NULL);
-
+       perf_test_join_vcpu_threads(nr_vcpus);
        pr_info("All vCPU threads joined\n");
 
-       ucall_uninit(vm);
-       kvm_vm_free(vm);
-
-       free(vcpu_threads);
+       perf_test_destroy_vm(vm);
 }
 
 static void help(char *name)
index 91d88aaa989928723be6aa575bc967082b16ca07..672915ce73d8f6a363b5e2ea690f3fc44e7c9b01 100644 (file)
@@ -165,10 +165,10 @@ static void hv_set_cpuid(struct kvm_vm *vm, struct kvm_cpuid2 *cpuid,
        vcpu_set_cpuid(vm, VCPU_ID, cpuid);
 }
 
-static void guest_test_msrs_access(struct kvm_vm *vm, struct msr_data *msr,
-                                  struct kvm_cpuid2 *best)
+static void guest_test_msrs_access(void)
 {
        struct kvm_run *run;
+       struct kvm_vm *vm;
        struct ucall uc;
        int stage = 0, r;
        struct kvm_cpuid_entry2 feat = {
@@ -180,11 +180,34 @@ static void guest_test_msrs_access(struct kvm_vm *vm, struct msr_data *msr,
        struct kvm_cpuid_entry2 dbg = {
                .function = HYPERV_CPUID_SYNDBG_PLATFORM_CAPABILITIES
        };
-       struct kvm_enable_cap cap = {0};
-
-       run = vcpu_state(vm, VCPU_ID);
+       struct kvm_cpuid2 *best;
+       vm_vaddr_t msr_gva;
+       struct kvm_enable_cap cap = {
+               .cap = KVM_CAP_HYPERV_ENFORCE_CPUID,
+               .args = {1}
+       };
+       struct msr_data *msr;
 
        while (true) {
+               vm = vm_create_default(VCPU_ID, 0, guest_msr);
+
+               msr_gva = vm_vaddr_alloc_page(vm);
+               memset(addr_gva2hva(vm, msr_gva), 0x0, getpagesize());
+               msr = addr_gva2hva(vm, msr_gva);
+
+               vcpu_args_set(vm, VCPU_ID, 1, msr_gva);
+               vcpu_enable_cap(vm, VCPU_ID, &cap);
+
+               vcpu_set_hv_cpuid(vm, VCPU_ID);
+
+               best = kvm_get_supported_hv_cpuid();
+
+               vm_init_descriptor_tables(vm);
+               vcpu_init_descriptor_tables(vm, VCPU_ID);
+               vm_install_exception_handler(vm, GP_VECTOR, guest_gp_handler);
+
+               run = vcpu_state(vm, VCPU_ID);
+
                switch (stage) {
                case 0:
                        /*
@@ -315,6 +338,7 @@ static void guest_test_msrs_access(struct kvm_vm *vm, struct msr_data *msr,
                         * capability enabled and guest visible CPUID bit unset.
                         */
                        cap.cap = KVM_CAP_HYPERV_SYNIC2;
+                       cap.args[0] = 0;
                        vcpu_enable_cap(vm, VCPU_ID, &cap);
                        break;
                case 22:
@@ -461,9 +485,9 @@ static void guest_test_msrs_access(struct kvm_vm *vm, struct msr_data *msr,
 
                switch (get_ucall(vm, VCPU_ID, &uc)) {
                case UCALL_SYNC:
-                       TEST_ASSERT(uc.args[1] == stage,
-                                   "Unexpected stage: %ld (%d expected)\n",
-                                   uc.args[1], stage);
+                       TEST_ASSERT(uc.args[1] == 0,
+                                   "Unexpected stage: %ld (0 expected)\n",
+                                   uc.args[1]);
                        break;
                case UCALL_ABORT:
                        TEST_FAIL("%s at %s:%ld", (const char *)uc.args[0],
@@ -474,13 +498,14 @@ static void guest_test_msrs_access(struct kvm_vm *vm, struct msr_data *msr,
                }
 
                stage++;
+               kvm_vm_free(vm);
        }
 }
 
-static void guest_test_hcalls_access(struct kvm_vm *vm, struct hcall_data *hcall,
-                                    void *input, void *output, struct kvm_cpuid2 *best)
+static void guest_test_hcalls_access(void)
 {
        struct kvm_run *run;
+       struct kvm_vm *vm;
        struct ucall uc;
        int stage = 0, r;
        struct kvm_cpuid_entry2 feat = {
@@ -493,10 +518,38 @@ static void guest_test_hcalls_access(struct kvm_vm *vm, struct hcall_data *hcall
        struct kvm_cpuid_entry2 dbg = {
                .function = HYPERV_CPUID_SYNDBG_PLATFORM_CAPABILITIES
        };
-
-       run = vcpu_state(vm, VCPU_ID);
+       struct kvm_enable_cap cap = {
+               .cap = KVM_CAP_HYPERV_ENFORCE_CPUID,
+               .args = {1}
+       };
+       vm_vaddr_t hcall_page, hcall_params;
+       struct hcall_data *hcall;
+       struct kvm_cpuid2 *best;
 
        while (true) {
+               vm = vm_create_default(VCPU_ID, 0, guest_hcall);
+
+               vm_init_descriptor_tables(vm);
+               vcpu_init_descriptor_tables(vm, VCPU_ID);
+               vm_install_exception_handler(vm, UD_VECTOR, guest_ud_handler);
+
+               /* Hypercall input/output */
+               hcall_page = vm_vaddr_alloc_pages(vm, 2);
+               hcall = addr_gva2hva(vm, hcall_page);
+               memset(addr_gva2hva(vm, hcall_page), 0x0, 2 * getpagesize());
+
+               hcall_params = vm_vaddr_alloc_page(vm);
+               memset(addr_gva2hva(vm, hcall_params), 0x0, getpagesize());
+
+               vcpu_args_set(vm, VCPU_ID, 2, addr_gva2gpa(vm, hcall_page), hcall_params);
+               vcpu_enable_cap(vm, VCPU_ID, &cap);
+
+               vcpu_set_hv_cpuid(vm, VCPU_ID);
+
+               best = kvm_get_supported_hv_cpuid();
+
+               run = vcpu_state(vm, VCPU_ID);
+
                switch (stage) {
                case 0:
                        hcall->control = 0xdeadbeef;
@@ -606,9 +659,9 @@ static void guest_test_hcalls_access(struct kvm_vm *vm, struct hcall_data *hcall
 
                switch (get_ucall(vm, VCPU_ID, &uc)) {
                case UCALL_SYNC:
-                       TEST_ASSERT(uc.args[1] == stage,
-                                   "Unexpected stage: %ld (%d expected)\n",
-                                   uc.args[1], stage);
+                       TEST_ASSERT(uc.args[1] == 0,
+                                   "Unexpected stage: %ld (0 expected)\n",
+                                   uc.args[1]);
                        break;
                case UCALL_ABORT:
                        TEST_FAIL("%s at %s:%ld", (const char *)uc.args[0],
@@ -619,66 +672,15 @@ static void guest_test_hcalls_access(struct kvm_vm *vm, struct hcall_data *hcall
                }
 
                stage++;
+               kvm_vm_free(vm);
        }
 }
 
 int main(void)
 {
-       struct kvm_cpuid2 *best;
-       struct kvm_vm *vm;
-       vm_vaddr_t msr_gva, hcall_page, hcall_params;
-       struct kvm_enable_cap cap = {
-               .cap = KVM_CAP_HYPERV_ENFORCE_CPUID,
-               .args = {1}
-       };
-
-       /* Test MSRs */
-       vm = vm_create_default(VCPU_ID, 0, guest_msr);
-
-       msr_gva = vm_vaddr_alloc_page(vm);
-       memset(addr_gva2hva(vm, msr_gva), 0x0, getpagesize());
-       vcpu_args_set(vm, VCPU_ID, 1, msr_gva);
-       vcpu_enable_cap(vm, VCPU_ID, &cap);
-
-       vcpu_set_hv_cpuid(vm, VCPU_ID);
-
-       best = kvm_get_supported_hv_cpuid();
-
-       vm_init_descriptor_tables(vm);
-       vcpu_init_descriptor_tables(vm, VCPU_ID);
-       vm_install_exception_handler(vm, GP_VECTOR, guest_gp_handler);
-
        pr_info("Testing access to Hyper-V specific MSRs\n");
-       guest_test_msrs_access(vm, addr_gva2hva(vm, msr_gva),
-                              best);
-       kvm_vm_free(vm);
-
-       /* Test hypercalls */
-       vm = vm_create_default(VCPU_ID, 0, guest_hcall);
-
-       vm_init_descriptor_tables(vm);
-       vcpu_init_descriptor_tables(vm, VCPU_ID);
-       vm_install_exception_handler(vm, UD_VECTOR, guest_ud_handler);
-
-       /* Hypercall input/output */
-       hcall_page = vm_vaddr_alloc_pages(vm, 2);
-       memset(addr_gva2hva(vm, hcall_page), 0x0, 2 * getpagesize());
-
-       hcall_params = vm_vaddr_alloc_page(vm);
-       memset(addr_gva2hva(vm, hcall_params), 0x0, getpagesize());
-
-       vcpu_args_set(vm, VCPU_ID, 2, addr_gva2gpa(vm, hcall_page), hcall_params);
-       vcpu_enable_cap(vm, VCPU_ID, &cap);
-
-       vcpu_set_hv_cpuid(vm, VCPU_ID);
-
-       best = kvm_get_supported_hv_cpuid();
+       guest_test_msrs_access();
 
        pr_info("Testing access to Hyper-V hypercalls\n");
-       guest_test_hcalls_access(vm, addr_gva2hva(vm, hcall_params),
-                                addr_gva2hva(vm, hcall_page),
-                                addr_gva2hva(vm, hcall_page) + getpagesize(),
-                                best);
-
-       kvm_vm_free(vm);
+       guest_test_hcalls_access();
 }
index 5ba325cd64bfd80c02dd3ff441bb5330070700b2..29b18d565cf4ce1cfbbce5b29e4298dc781e99c1 100644 (file)
@@ -54,12 +54,15 @@ static struct kvm_vm *sev_vm_create(bool es)
        return vm;
 }
 
-static struct kvm_vm *__vm_create(void)
+static struct kvm_vm *aux_vm_create(bool with_vcpus)
 {
        struct kvm_vm *vm;
        int i;
 
        vm = vm_create(VM_MODE_DEFAULT, 0, O_RDWR);
+       if (!with_vcpus)
+               return vm;
+
        for (i = 0; i < NR_MIGRATE_TEST_VCPUS; ++i)
                vm_vcpu_add(vm, i);
 
@@ -89,11 +92,11 @@ static void test_sev_migrate_from(bool es)
 {
        struct kvm_vm *src_vm;
        struct kvm_vm *dst_vms[NR_MIGRATE_TEST_VMS];
-       int i;
+       int i, ret;
 
        src_vm = sev_vm_create(es);
        for (i = 0; i < NR_MIGRATE_TEST_VMS; ++i)
-               dst_vms[i] = __vm_create();
+               dst_vms[i] = aux_vm_create(true);
 
        /* Initial migration from the src to the first dst. */
        sev_migrate_from(dst_vms[0]->fd, src_vm->fd);
@@ -102,7 +105,10 @@ static void test_sev_migrate_from(bool es)
                sev_migrate_from(dst_vms[i]->fd, dst_vms[i - 1]->fd);
 
        /* Migrate the guest back to the original VM. */
-       sev_migrate_from(src_vm->fd, dst_vms[NR_MIGRATE_TEST_VMS - 1]->fd);
+       ret = __sev_migrate_from(src_vm->fd, dst_vms[NR_MIGRATE_TEST_VMS - 1]->fd);
+       TEST_ASSERT(ret == -1 && errno == EIO,
+                   "VM that was migrated from should be dead. ret %d, errno: %d\n", ret,
+                   errno);
 
        kvm_vm_free(src_vm);
        for (i = 0; i < NR_MIGRATE_TEST_VMS; ++i)
@@ -146,6 +152,8 @@ static void test_sev_migrate_locking(void)
 
        for (i = 0; i < NR_LOCK_TESTING_THREADS; ++i)
                pthread_join(pt[i], NULL);
+       for (i = 0; i < NR_LOCK_TESTING_THREADS; ++i)
+               kvm_vm_free(input[i].vm);
 }
 
 static void test_sev_migrate_parameters(void)
@@ -157,12 +165,11 @@ static void test_sev_migrate_parameters(void)
        sev_vm = sev_vm_create(/* es= */ false);
        sev_es_vm = sev_vm_create(/* es= */ true);
        vm_no_vcpu = vm_create(VM_MODE_DEFAULT, 0, O_RDWR);
-       vm_no_sev = __vm_create();
+       vm_no_sev = aux_vm_create(true);
        sev_es_vm_no_vmsa = vm_create(VM_MODE_DEFAULT, 0, O_RDWR);
        sev_ioctl(sev_es_vm_no_vmsa->fd, KVM_SEV_ES_INIT, NULL);
        vm_vcpu_add(sev_es_vm_no_vmsa, 1);
 
-
        ret = __sev_migrate_from(sev_vm->fd, sev_es_vm->fd);
        TEST_ASSERT(
                ret == -1 && errno == EINVAL,
@@ -191,13 +198,151 @@ static void test_sev_migrate_parameters(void)
        TEST_ASSERT(ret == -1 && errno == EINVAL,
                    "Migrations require SEV enabled. ret %d, errno: %d\n", ret,
                    errno);
+
+       kvm_vm_free(sev_vm);
+       kvm_vm_free(sev_es_vm);
+       kvm_vm_free(sev_es_vm_no_vmsa);
+       kvm_vm_free(vm_no_vcpu);
+       kvm_vm_free(vm_no_sev);
+}
+
+static int __sev_mirror_create(int dst_fd, int src_fd)
+{
+       struct kvm_enable_cap cap = {
+               .cap = KVM_CAP_VM_COPY_ENC_CONTEXT_FROM,
+               .args = { src_fd }
+       };
+
+       return ioctl(dst_fd, KVM_ENABLE_CAP, &cap);
+}
+
+
+static void sev_mirror_create(int dst_fd, int src_fd)
+{
+       int ret;
+
+       ret = __sev_mirror_create(dst_fd, src_fd);
+       TEST_ASSERT(!ret, "Copying context failed, ret: %d, errno: %d\n", ret, errno);
+}
+
+static void test_sev_mirror(bool es)
+{
+       struct kvm_vm *src_vm, *dst_vm;
+       struct kvm_sev_launch_start start = {
+               .policy = es ? SEV_POLICY_ES : 0
+       };
+       int i;
+
+       src_vm = sev_vm_create(es);
+       dst_vm = aux_vm_create(false);
+
+       sev_mirror_create(dst_vm->fd, src_vm->fd);
+
+       /* Check that we can complete creation of the mirror VM.  */
+       for (i = 0; i < NR_MIGRATE_TEST_VCPUS; ++i)
+               vm_vcpu_add(dst_vm, i);
+       sev_ioctl(dst_vm->fd, KVM_SEV_LAUNCH_START, &start);
+       if (es)
+               sev_ioctl(dst_vm->fd, KVM_SEV_LAUNCH_UPDATE_VMSA, NULL);
+
+       kvm_vm_free(src_vm);
+       kvm_vm_free(dst_vm);
+}
+
+static void test_sev_mirror_parameters(void)
+{
+       struct kvm_vm *sev_vm, *sev_es_vm, *vm_no_vcpu, *vm_with_vcpu;
+       int ret;
+
+       sev_vm = sev_vm_create(/* es= */ false);
+       sev_es_vm = sev_vm_create(/* es= */ true);
+       vm_with_vcpu = aux_vm_create(true);
+       vm_no_vcpu = aux_vm_create(false);
+
+       ret = __sev_mirror_create(sev_vm->fd, sev_vm->fd);
+       TEST_ASSERT(
+               ret == -1 && errno == EINVAL,
+               "Should not be able copy context to self. ret: %d, errno: %d\n",
+               ret, errno);
+
+       ret = __sev_mirror_create(sev_vm->fd, sev_es_vm->fd);
+       TEST_ASSERT(
+               ret == -1 && errno == EINVAL,
+               "Should not be able copy context to SEV enabled VM. ret: %d, errno: %d\n",
+               ret, errno);
+
+       ret = __sev_mirror_create(sev_es_vm->fd, sev_vm->fd);
+       TEST_ASSERT(
+               ret == -1 && errno == EINVAL,
+               "Should not be able copy context to SEV-ES enabled VM. ret: %d, errno: %d\n",
+               ret, errno);
+
+       ret = __sev_mirror_create(vm_no_vcpu->fd, vm_with_vcpu->fd);
+       TEST_ASSERT(ret == -1 && errno == EINVAL,
+                   "Copy context requires SEV enabled. ret %d, errno: %d\n", ret,
+                   errno);
+
+       ret = __sev_mirror_create(vm_with_vcpu->fd, sev_vm->fd);
+       TEST_ASSERT(
+               ret == -1 && errno == EINVAL,
+               "SEV copy context requires no vCPUS on the destination. ret: %d, errno: %d\n",
+               ret, errno);
+
+       kvm_vm_free(sev_vm);
+       kvm_vm_free(sev_es_vm);
+       kvm_vm_free(vm_with_vcpu);
+       kvm_vm_free(vm_no_vcpu);
+}
+
+static void test_sev_move_copy(void)
+{
+       struct kvm_vm *dst_vm, *sev_vm, *mirror_vm, *dst_mirror_vm;
+       int ret;
+
+       sev_vm = sev_vm_create(/* es= */ false);
+       dst_vm = aux_vm_create(true);
+       mirror_vm = aux_vm_create(false);
+       dst_mirror_vm = aux_vm_create(false);
+
+       sev_mirror_create(mirror_vm->fd, sev_vm->fd);
+       ret = __sev_migrate_from(dst_vm->fd, sev_vm->fd);
+       TEST_ASSERT(ret == -1 && errno == EBUSY,
+                   "Cannot migrate VM that has mirrors. ret %d, errno: %d\n", ret,
+                   errno);
+
+       /* The mirror itself can be migrated.  */
+       sev_migrate_from(dst_mirror_vm->fd, mirror_vm->fd);
+       ret = __sev_migrate_from(dst_vm->fd, sev_vm->fd);
+       TEST_ASSERT(ret == -1 && errno == EBUSY,
+                   "Cannot migrate VM that has mirrors. ret %d, errno: %d\n", ret,
+                   errno);
+
+       /*
+        * mirror_vm is not a mirror anymore, dst_mirror_vm is.  Thus,
+        * the owner can be copied as soon as dst_mirror_vm is gone.
+        */
+       kvm_vm_free(dst_mirror_vm);
+       sev_migrate_from(dst_vm->fd, sev_vm->fd);
+
+       kvm_vm_free(mirror_vm);
+       kvm_vm_free(dst_vm);
+       kvm_vm_free(sev_vm);
 }
 
 int main(int argc, char *argv[])
 {
-       test_sev_migrate_from(/* es= */ false);
-       test_sev_migrate_from(/* es= */ true);
-       test_sev_migrate_locking();
-       test_sev_migrate_parameters();
+       if (kvm_check_cap(KVM_CAP_VM_MOVE_ENC_CONTEXT_FROM)) {
+               test_sev_migrate_from(/* es= */ false);
+               test_sev_migrate_from(/* es= */ true);
+               test_sev_migrate_locking();
+               test_sev_migrate_parameters();
+               if (kvm_check_cap(KVM_CAP_VM_COPY_ENC_CONTEXT_FROM))
+                       test_sev_move_copy();
+       }
+       if (kvm_check_cap(KVM_CAP_VM_COPY_ENC_CONTEXT_FROM)) {
+               test_sev_mirror(/* es= */ false);
+               test_sev_mirror(/* es= */ true);
+               test_sev_mirror_parameters();
+       }
        return 0;
 }
index df04f56ce859a0b40b770228dc72516190c5f526..30a81038df460481efe853a722eded2205b5a90a 100644 (file)
@@ -75,7 +75,7 @@ static void l1_guest_code(struct svm_test_data *svm)
        vmcb->control.int_ctl &= ~V_INTR_MASKING_MASK;
 
        /* No intercepts for real and virtual interrupts */
-       vmcb->control.intercept &= ~(1ULL << INTERCEPT_INTR | INTERCEPT_VINTR);
+       vmcb->control.intercept &= ~(BIT(INTERCEPT_INTR) | BIT(INTERCEPT_VINTR));
 
        /* Make a virtual interrupt VINTR_IRQ_NUMBER pending */
        vmcb->control.int_ctl |= V_IRQ_MASK | (0x1 << V_INTR_PRIO_SHIFT);
diff --git a/tools/testing/selftests/kvm/x86_64/userspace_io_test.c b/tools/testing/selftests/kvm/x86_64/userspace_io_test.c
new file mode 100644 (file)
index 0000000..e4bef2e
--- /dev/null
@@ -0,0 +1,114 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <fcntl.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <sys/ioctl.h>
+
+#include "test_util.h"
+
+#include "kvm_util.h"
+#include "processor.h"
+
+#define VCPU_ID                        1
+
+static void guest_ins_port80(uint8_t *buffer, unsigned int count)
+{
+       unsigned long end;
+
+       if (count == 2)
+               end = (unsigned long)buffer + 1;
+       else
+               end = (unsigned long)buffer + 8192;
+
+       asm volatile("cld; rep; insb" : "+D"(buffer), "+c"(count) : "d"(0x80) : "memory");
+       GUEST_ASSERT_1(count == 0, count);
+       GUEST_ASSERT_2((unsigned long)buffer == end, buffer, end);
+}
+
+static void guest_code(void)
+{
+       uint8_t buffer[8192];
+       int i;
+
+       /*
+        * Special case tests.  main() will adjust RCX 2 => 1 and 3 => 8192 to
+        * test that KVM doesn't explode when userspace modifies the "count" on
+        * a userspace I/O exit.  KVM isn't required to play nice with the I/O
+        * itself as KVM doesn't support manipulating the count, it just needs
+        * to not explode or overflow a buffer.
+        */
+       guest_ins_port80(buffer, 2);
+       guest_ins_port80(buffer, 3);
+
+       /* Verify KVM fills the buffer correctly when not stuffing RCX. */
+       memset(buffer, 0, sizeof(buffer));
+       guest_ins_port80(buffer, 8192);
+       for (i = 0; i < 8192; i++)
+               GUEST_ASSERT_2(buffer[i] == 0xaa, i, buffer[i]);
+
+       GUEST_DONE();
+}
+
+int main(int argc, char *argv[])
+{
+       struct kvm_regs regs;
+       struct kvm_run *run;
+       struct kvm_vm *vm;
+       struct ucall uc;
+       int rc;
+
+       /* Tell stdout not to buffer its content */
+       setbuf(stdout, NULL);
+
+       /* Create VM */
+       vm = vm_create_default(VCPU_ID, 0, guest_code);
+       run = vcpu_state(vm, VCPU_ID);
+
+       memset(&regs, 0, sizeof(regs));
+
+       while (1) {
+               rc = _vcpu_run(vm, VCPU_ID);
+
+               TEST_ASSERT(rc == 0, "vcpu_run failed: %d\n", rc);
+               TEST_ASSERT(run->exit_reason == KVM_EXIT_IO,
+                           "Unexpected exit reason: %u (%s),\n",
+                           run->exit_reason,
+                           exit_reason_str(run->exit_reason));
+
+               if (get_ucall(vm, VCPU_ID, &uc))
+                       break;
+
+               TEST_ASSERT(run->io.port == 0x80,
+                           "Expected I/O at port 0x80, got port 0x%x\n", run->io.port);
+
+               /*
+                * Modify the rep string count in RCX: 2 => 1 and 3 => 8192.
+                * Note, this abuses KVM's batching of rep string I/O to avoid
+                * getting stuck in an infinite loop.  That behavior isn't in
+                * scope from a testing perspective as it's not ABI in any way,
+                * i.e. it really is abusing internal KVM knowledge.
+                */
+               vcpu_regs_get(vm, VCPU_ID, &regs);
+               if (regs.rcx == 2)
+                       regs.rcx = 1;
+               if (regs.rcx == 3)
+                       regs.rcx = 8192;
+               memset((void *)run + run->io.data_offset, 0xaa, 4096);
+               vcpu_regs_set(vm, VCPU_ID, &regs);
+       }
+
+       switch (uc.cmd) {
+       case UCALL_DONE:
+               break;
+       case UCALL_ABORT:
+               TEST_FAIL("%s at %s:%ld : argN+1 = 0x%lx, argN+2 = 0x%lx",
+                         (const char *)uc.args[0], __FILE__, uc.args[1],
+                         uc.args[2], uc.args[3]);
+       default:
+               TEST_FAIL("Unknown ucall %lu", uc.cmd);
+       }
+
+       kvm_vm_free(vm);
+       return 0;
+}
index eda0d2a51224bd1c709d9d52352a9e81be2a29bb..a0699f00b3d6ed72259592918249765880f3d248 100644 (file)
 
 #define PVTIME_ADDR    (SHINFO_REGION_GPA + PAGE_SIZE)
 #define RUNSTATE_ADDR  (SHINFO_REGION_GPA + PAGE_SIZE + 0x20)
+#define VCPU_INFO_ADDR (SHINFO_REGION_GPA + 0x40)
 
 #define RUNSTATE_VADDR (SHINFO_REGION_GVA + PAGE_SIZE + 0x20)
+#define VCPU_INFO_VADDR        (SHINFO_REGION_GVA + 0x40)
+
+#define EVTCHN_VECTOR  0x10
 
 static struct kvm_vm *vm;
 
@@ -56,15 +60,44 @@ struct vcpu_runstate_info {
     uint64_t time[4];
 };
 
+struct arch_vcpu_info {
+    unsigned long cr2;
+    unsigned long pad; /* sizeof(vcpu_info_t) == 64 */
+};
+
+struct vcpu_info {
+        uint8_t evtchn_upcall_pending;
+        uint8_t evtchn_upcall_mask;
+        unsigned long evtchn_pending_sel;
+        struct arch_vcpu_info arch;
+        struct pvclock_vcpu_time_info time;
+}; /* 64 bytes (x86) */
+
 #define RUNSTATE_running  0
 #define RUNSTATE_runnable 1
 #define RUNSTATE_blocked  2
 #define RUNSTATE_offline  3
 
+static void evtchn_handler(struct ex_regs *regs)
+{
+       struct vcpu_info *vi = (void *)VCPU_INFO_VADDR;
+       vi->evtchn_upcall_pending = 0;
+
+       GUEST_SYNC(0x20);
+}
+
 static void guest_code(void)
 {
        struct vcpu_runstate_info *rs = (void *)RUNSTATE_VADDR;
 
+       __asm__ __volatile__(
+               "sti\n"
+               "nop\n"
+       );
+
+       /* Trigger an interrupt injection */
+       GUEST_SYNC(0);
+
        /* Test having the host set runstates manually */
        GUEST_SYNC(RUNSTATE_runnable);
        GUEST_ASSERT(rs->time[RUNSTATE_runnable] != 0);
@@ -153,7 +186,7 @@ int main(int argc, char *argv[])
 
        struct kvm_xen_vcpu_attr vi = {
                .type = KVM_XEN_VCPU_ATTR_TYPE_VCPU_INFO,
-               .u.gpa = SHINFO_REGION_GPA + 0x40,
+               .u.gpa = VCPU_INFO_ADDR,
        };
        vcpu_ioctl(vm, VCPU_ID, KVM_XEN_VCPU_SET_ATTR, &vi);
 
@@ -163,6 +196,16 @@ int main(int argc, char *argv[])
        };
        vcpu_ioctl(vm, VCPU_ID, KVM_XEN_VCPU_SET_ATTR, &pvclock);
 
+       struct kvm_xen_hvm_attr vec = {
+               .type = KVM_XEN_ATTR_TYPE_UPCALL_VECTOR,
+               .u.vector = EVTCHN_VECTOR,
+       };
+       vm_ioctl(vm, KVM_XEN_HVM_SET_ATTR, &vec);
+
+       vm_init_descriptor_tables(vm);
+       vcpu_init_descriptor_tables(vm, VCPU_ID);
+       vm_install_exception_handler(vm, EVTCHN_VECTOR, evtchn_handler);
+
        if (do_runstate_tests) {
                struct kvm_xen_vcpu_attr st = {
                        .type = KVM_XEN_VCPU_ATTR_TYPE_RUNSTATE_ADDR,
@@ -171,9 +214,14 @@ int main(int argc, char *argv[])
                vcpu_ioctl(vm, VCPU_ID, KVM_XEN_VCPU_SET_ATTR, &st);
        }
 
+       struct vcpu_info *vinfo = addr_gpa2hva(vm, VCPU_INFO_VADDR);
+       vinfo->evtchn_upcall_pending = 0;
+
        struct vcpu_runstate_info *rs = addr_gpa2hva(vm, RUNSTATE_ADDR);
        rs->state = 0x5a;
 
+       bool evtchn_irq_expected = false;
+
        for (;;) {
                volatile struct kvm_run *run = vcpu_state(vm, VCPU_ID);
                struct ucall uc;
@@ -193,16 +241,21 @@ int main(int argc, char *argv[])
                        struct kvm_xen_vcpu_attr rst;
                        long rundelay;
 
-                       /* If no runstate support, bail out early */
-                       if (!do_runstate_tests)
-                               goto done;
-
-                       TEST_ASSERT(rs->state_entry_time == rs->time[0] +
-                                   rs->time[1] + rs->time[2] + rs->time[3],
-                                   "runstate times don't add up");
+                       if (do_runstate_tests)
+                               TEST_ASSERT(rs->state_entry_time == rs->time[0] +
+                                           rs->time[1] + rs->time[2] + rs->time[3],
+                                           "runstate times don't add up");
 
                        switch (uc.args[1]) {
-                       case RUNSTATE_running...RUNSTATE_offline:
+                       case 0:
+                               evtchn_irq_expected = true;
+                               vinfo->evtchn_upcall_pending = 1;
+                               break;
+
+                       case RUNSTATE_runnable...RUNSTATE_offline:
+                               TEST_ASSERT(!evtchn_irq_expected, "Event channel IRQ not seen");
+                               if (!do_runstate_tests)
+                                       goto done;
                                rst.type = KVM_XEN_VCPU_ATTR_TYPE_RUNSTATE_CURRENT;
                                rst.u.runstate.state = uc.args[1];
                                vcpu_ioctl(vm, VCPU_ID, KVM_XEN_VCPU_SET_ATTR, &rst);
@@ -236,6 +289,10 @@ int main(int argc, char *argv[])
                                        sched_yield();
                                } while (get_run_delay() < rundelay);
                                break;
+                       case 0x20:
+                               TEST_ASSERT(evtchn_irq_expected, "Unexpected event channel IRQ");
+                               evtchn_irq_expected = false;
+                               break;
                        }
                        break;
                }
index 7615f29831ebd6f0961ad7b0fb650c8cdf12fc2c..9897fa9ab95373f292bff1bffaf989fa5271eafc 100644 (file)
@@ -34,6 +34,7 @@ TEST_PROGS += srv6_end_dt46_l3vpn_test.sh
 TEST_PROGS += srv6_end_dt4_l3vpn_test.sh
 TEST_PROGS += srv6_end_dt6_l3vpn_test.sh
 TEST_PROGS += vrf_strict_mode_test.sh
+TEST_PROGS += arp_ndisc_evict_nocarrier.sh
 TEST_PROGS_EXTENDED := in_netns.sh setup_loopback.sh setup_veth.sh
 TEST_PROGS_EXTENDED += toeplitz_client.sh toeplitz.sh
 TEST_GEN_FILES =  socket nettest
index 3313566ce9062e285a9872ac2ce6c2ab3c1bc652..a1da013d847b9a2fb86b450ab4be29edcd9aea16 100755 (executable)
@@ -4002,8 +4002,8 @@ EOF
 ################################################################################
 # main
 
-TESTS_IPV4="ipv4_ping ipv4_tcp ipv4_udp ipv4_addr_bind ipv4_runtime ipv4_netfilter"
-TESTS_IPV6="ipv6_ping ipv6_tcp ipv6_udp ipv6_addr_bind ipv6_runtime ipv6_netfilter"
+TESTS_IPV4="ipv4_ping ipv4_tcp ipv4_udp ipv4_bind ipv4_runtime ipv4_netfilter"
+TESTS_IPV6="ipv6_ping ipv6_tcp ipv6_udp ipv6_bind ipv6_runtime ipv6_netfilter"
 TESTS_OTHER="use_cases"
 
 PAUSE_ON_FAIL=no
@@ -4077,3 +4077,11 @@ cleanup 2>/dev/null
 
 printf "\nTests passed: %3d\n" ${nsuccess}
 printf "Tests failed: %3d\n"   ${nfail}
+
+if [ $nfail -ne 0 ]; then
+       exit 1 # KSFT_FAIL
+elif [ $nsuccess -eq 0 ]; then
+       exit $ksft_skip
+fi
+
+exit 0 # KSFT_PASS
index b5a69ad191b078838c19215cd070039964ab9fbd..d444ee6aa3cb0f45725434fa03b248c01f311872 100755 (executable)
@@ -629,6 +629,66 @@ ipv6_fcnal()
        log_test $? 0 "Nexthops removed on admin down"
 }
 
+ipv6_grp_refs()
+{
+       if [ ! -x "$(command -v mausezahn)" ]; then
+               echo "SKIP: Could not run test; need mausezahn tool"
+               return
+       fi
+
+       run_cmd "$IP link set dev veth1 up"
+       run_cmd "$IP link add veth1.10 link veth1 up type vlan id 10"
+       run_cmd "$IP link add veth1.20 link veth1 up type vlan id 20"
+       run_cmd "$IP -6 addr add 2001:db8:91::1/64 dev veth1.10"
+       run_cmd "$IP -6 addr add 2001:db8:92::1/64 dev veth1.20"
+       run_cmd "$IP -6 neigh add 2001:db8:91::2 lladdr 00:11:22:33:44:55 dev veth1.10"
+       run_cmd "$IP -6 neigh add 2001:db8:92::2 lladdr 00:11:22:33:44:55 dev veth1.20"
+       run_cmd "$IP nexthop add id 100 via 2001:db8:91::2 dev veth1.10"
+       run_cmd "$IP nexthop add id 101 via 2001:db8:92::2 dev veth1.20"
+       run_cmd "$IP nexthop add id 102 group 100"
+       run_cmd "$IP route add 2001:db8:101::1/128 nhid 102"
+
+       # create per-cpu dsts through nh 100
+       run_cmd "ip netns exec me mausezahn -6 veth1.10 -B 2001:db8:101::1 -A 2001:db8:91::1 -c 5 -t tcp "dp=1-1023, flags=syn" >/dev/null 2>&1"
+
+       # remove nh 100 from the group to delete the route potentially leaving
+       # a stale per-cpu dst which holds a reference to the nexthop's net
+       # device and to the IPv6 route
+       run_cmd "$IP nexthop replace id 102 group 101"
+       run_cmd "$IP route del 2001:db8:101::1/128"
+
+       # add both nexthops to the group so a reference is taken on them
+       run_cmd "$IP nexthop replace id 102 group 100/101"
+
+       # if the bug described in commit "net: nexthop: release IPv6 per-cpu
+       # dsts when replacing a nexthop group" exists at this point we have
+       # an unlinked IPv6 route (but not freed due to stale dst) with a
+       # reference over the group so we delete the group which will again
+       # only unlink it due to the route reference
+       run_cmd "$IP nexthop del id 102"
+
+       # delete the nexthop with stale dst, since we have an unlinked
+       # group with a ref to it and an unlinked IPv6 route with ref to the
+       # group, the nh will only be unlinked and not freed so the stale dst
+       # remains forever and we get a net device refcount imbalance
+       run_cmd "$IP nexthop del id 100"
+
+       # if a reference was lost this command will hang because the net device
+       # cannot be removed
+       timeout -s KILL 5 ip netns exec me ip link del veth1.10 >/dev/null 2>&1
+
+       # we can't cleanup if the command is hung trying to delete the netdev
+       if [ $? -eq 137 ]; then
+               return 1
+       fi
+
+       # cleanup
+       run_cmd "$IP link del veth1.20"
+       run_cmd "$IP nexthop flush"
+
+       return 0
+}
+
 ipv6_grp_fcnal()
 {
        local rc
@@ -734,6 +794,9 @@ ipv6_grp_fcnal()
 
        run_cmd "$IP nexthop add id 108 group 31/24"
        log_test $? 2 "Nexthop group can not have a blackhole and another nexthop"
+
+       ipv6_grp_refs
+       log_test $? 0 "Nexthop group replace refcounts"
 }
 
 ipv6_res_grp_fcnal()
index 5abe92d55b696af2f45291752d48c03f378f3d96..996af1ae3d3ddb4cf4f97eb0dec71c9b99c007f2 100755 (executable)
@@ -444,24 +444,63 @@ fib_rp_filter_test()
        setup
 
        set -e
+       ip netns add ns2
+       ip netns set ns2 auto
+
+       ip -netns ns2 link set dev lo up
+
+       $IP link add name veth1 type veth peer name veth2
+       $IP link set dev veth2 netns ns2
+       $IP address add 192.0.2.1/24 dev veth1
+       ip -netns ns2 address add 192.0.2.1/24 dev veth2
+       $IP link set dev veth1 up
+       ip -netns ns2 link set dev veth2 up
+
        $IP link set dev lo address 52:54:00:6a:c7:5e
-       $IP link set dummy0 address 52:54:00:6a:c7:5e
-       $IP link add dummy1 type dummy
-       $IP link set dummy1 address 52:54:00:6a:c7:5e
-       $IP link set dev dummy1 up
+       $IP link set dev veth1 address 52:54:00:6a:c7:5e
+       ip -netns ns2 link set dev lo address 52:54:00:6a:c7:5e
+       ip -netns ns2 link set dev veth2 address 52:54:00:6a:c7:5e
+
+       # 1. (ns2) redirect lo's egress to veth2's egress
+       ip netns exec ns2 tc qdisc add dev lo parent root handle 1: fq_codel
+       ip netns exec ns2 tc filter add dev lo parent 1: protocol arp basic \
+               action mirred egress redirect dev veth2
+       ip netns exec ns2 tc filter add dev lo parent 1: protocol ip basic \
+               action mirred egress redirect dev veth2
+
+       # 2. (ns1) redirect veth1's ingress to lo's ingress
+       $NS_EXEC tc qdisc add dev veth1 ingress
+       $NS_EXEC tc filter add dev veth1 ingress protocol arp basic \
+               action mirred ingress redirect dev lo
+       $NS_EXEC tc filter add dev veth1 ingress protocol ip basic \
+               action mirred ingress redirect dev lo
+
+       # 3. (ns1) redirect lo's egress to veth1's egress
+       $NS_EXEC tc qdisc add dev lo parent root handle 1: fq_codel
+       $NS_EXEC tc filter add dev lo parent 1: protocol arp basic \
+               action mirred egress redirect dev veth1
+       $NS_EXEC tc filter add dev lo parent 1: protocol ip basic \
+               action mirred egress redirect dev veth1
+
+       # 4. (ns2) redirect veth2's ingress to lo's ingress
+       ip netns exec ns2 tc qdisc add dev veth2 ingress
+       ip netns exec ns2 tc filter add dev veth2 ingress protocol arp basic \
+               action mirred ingress redirect dev lo
+       ip netns exec ns2 tc filter add dev veth2 ingress protocol ip basic \
+               action mirred ingress redirect dev lo
+
        $NS_EXEC sysctl -qw net.ipv4.conf.all.rp_filter=1
        $NS_EXEC sysctl -qw net.ipv4.conf.all.accept_local=1
        $NS_EXEC sysctl -qw net.ipv4.conf.all.route_localnet=1
-
-       $NS_EXEC tc qd add dev dummy1 parent root handle 1: fq_codel
-       $NS_EXEC tc filter add dev dummy1 parent 1: protocol arp basic action mirred egress redirect dev lo
-       $NS_EXEC tc filter add dev dummy1 parent 1: protocol ip basic action mirred egress redirect dev lo
+       ip netns exec ns2 sysctl -qw net.ipv4.conf.all.rp_filter=1
+       ip netns exec ns2 sysctl -qw net.ipv4.conf.all.accept_local=1
+       ip netns exec ns2 sysctl -qw net.ipv4.conf.all.route_localnet=1
        set +e
 
-       run_cmd "ip netns exec ns1 ping -I dummy1 -w1 -c1 198.51.100.1"
+       run_cmd "ip netns exec ns2 ping -w1 -c1 192.0.2.1"
        log_test $? 0 "rp_filter passes local packets"
 
-       run_cmd "ip netns exec ns1 ping -I dummy1 -w1 -c1 127.0.0.1"
+       run_cmd "ip netns exec ns2 ping -w1 -c1 127.0.0.1"
        log_test $? 0 "rp_filter passes loopback packets"
 
        cleanup
index a4bd1b0873035e35558d1c8f51a0b38bb777ca7b..697994a9278bbe50edc5bfeca33af9eeb7565544 100644 (file)
@@ -6,6 +6,7 @@ CONFIG_IPV6_MULTIPLE_TABLES=y
 CONFIG_NET_VRF=m
 CONFIG_BPF_SYSCALL=y
 CONFIG_CGROUP_BPF=y
+CONFIG_NET_ACT_CT=m
 CONFIG_NET_ACT_MIRRED=m
 CONFIG_NET_ACT_MPLS=m
 CONFIG_NET_ACT_VLAN=m
index d9eca227136bb6d4a1c7921251190c65d76fcb04..de19eb6c38f0423949b4a62398f8a27f6f4c5987 100755 (executable)
@@ -3,7 +3,7 @@
 
 ALL_TESTS="gact_drop_and_ok_test mirred_egress_redirect_test \
        mirred_egress_mirror_test matchall_mirred_egress_mirror_test \
-       gact_trap_test"
+       gact_trap_test mirred_egress_to_ingress_test"
 NUM_NETIFS=4
 source tc_common.sh
 source lib.sh
@@ -13,10 +13,12 @@ tcflags="skip_hw"
 h1_create()
 {
        simple_if_init $h1 192.0.2.1/24
+       tc qdisc add dev $h1 clsact
 }
 
 h1_destroy()
 {
+       tc qdisc del dev $h1 clsact
        simple_if_fini $h1 192.0.2.1/24
 }
 
@@ -153,6 +155,49 @@ gact_trap_test()
        log_test "trap ($tcflags)"
 }
 
+mirred_egress_to_ingress_test()
+{
+       RET=0
+
+       tc filter add dev $h1 protocol ip pref 100 handle 100 egress flower \
+               ip_proto icmp src_ip 192.0.2.1 dst_ip 192.0.2.2 type 8 action \
+                       ct commit nat src addr 192.0.2.2 pipe \
+                       ct clear pipe \
+                       ct commit nat dst addr 192.0.2.1 pipe \
+                       mirred ingress redirect dev $h1
+
+       tc filter add dev $swp1 protocol ip pref 11 handle 111 ingress flower \
+               ip_proto icmp src_ip 192.0.2.1 dst_ip 192.0.2.2 type 8 action drop
+       tc filter add dev $swp1 protocol ip pref 12 handle 112 ingress flower \
+               ip_proto icmp src_ip 192.0.2.1 dst_ip 192.0.2.2 type 0 action pass
+
+       $MZ $h1 -c 1 -p 64 -a $h1mac -b $h2mac -A 192.0.2.1 -B 192.0.2.2 \
+               -t icmp "ping,id=42,seq=10" -q
+
+       tc_check_packets "dev $h1 egress" 100 1
+       check_err $? "didn't mirror first packet"
+
+       tc_check_packets "dev $swp1 ingress" 111 1
+       check_fail $? "didn't redirect first packet"
+       tc_check_packets "dev $swp1 ingress" 112 1
+       check_err $? "didn't receive reply to first packet"
+
+       ping 192.0.2.2 -I$h1 -c1 -w1 -q 1>/dev/null 2>&1
+
+       tc_check_packets "dev $h1 egress" 100 2
+       check_err $? "didn't mirror second packet"
+       tc_check_packets "dev $swp1 ingress" 111 1
+       check_fail $? "didn't redirect second packet"
+       tc_check_packets "dev $swp1 ingress" 112 2
+       check_err $? "didn't receive reply to second packet"
+
+       tc filter del dev $h1 egress protocol ip pref 100 handle 100 flower
+       tc filter del dev $swp1 ingress protocol ip pref 11 handle 111 flower
+       tc filter del dev $swp1 ingress protocol ip pref 12 handle 112 flower
+
+       log_test "mirred_egress_to_ingress ($tcflags)"
+}
+
 setup_prepare()
 {
        h1=${NETIFS[p1]}
index fdeb44d621eb937bafd79572a36dee25dea6b21a..3224651db97b8835598c28ae4d64760a3d1c0317 100755 (executable)
@@ -118,16 +118,18 @@ gre_gst_test_checks()
        local addr=$2
        local proto=$3
 
-       $NS_EXEC nc $proto -kl $port >/dev/null &
+       [ "$proto" == 6 ] && addr="[$addr]"
+
+       $NS_EXEC socat - tcp${proto}-listen:$port,reuseaddr,fork >/dev/null &
        PID=$!
        while ! $NS_EXEC ss -ltn | grep -q $port; do ((i++)); sleep 0.01; done
 
-       cat $TMPFILE | timeout 1 nc $proto -N $addr $port
+       cat $TMPFILE | timeout 1 socat -u STDIN TCP:$addr:$port
        log_test $? 0 "$name - copy file w/ TSO"
 
        ethtool -K veth0 tso off
 
-       cat $TMPFILE | timeout 1 nc $proto -N $addr $port
+       cat $TMPFILE | timeout 1 socat -u STDIN TCP:$addr:$port
        log_test $? 0 "$name - copy file w/ GSO"
 
        ethtool -K veth0 tso on
@@ -155,8 +157,8 @@ gre6_gso_test()
 
        sleep 2
 
-       gre_gst_test_checks GREv6/v4 172.16.2.2
-       gre_gst_test_checks GREv6/v6 2001:db8:1::2 -6
+       gre_gst_test_checks GREv6/v4 172.16.2.2 4
+       gre_gst_test_checks GREv6/v6 2001:db8:1::2 6
 
        cleanup
 }
@@ -212,8 +214,8 @@ if [ ! -x "$(command -v ip)" ]; then
        exit $ksft_skip
 fi
 
-if [ ! -x "$(command -v nc)" ]; then
-       echo "SKIP: Could not run test without nc tool"
+if [ ! -x "$(command -v socat)" ]; then
+       echo "SKIP: Could not run test without socat tool"
        exit $ksft_skip
 fi
 
index e61fc4c32ba23a9f0b22e4a45356e2f7d15eb9ff..6e468e0f42f7844ffb8d10509b8370cc9382ba3e 100644 (file)
@@ -31,6 +31,8 @@ struct tls_crypto_info_keys {
                struct tls12_crypto_info_chacha20_poly1305 chacha20;
                struct tls12_crypto_info_sm4_gcm sm4gcm;
                struct tls12_crypto_info_sm4_ccm sm4ccm;
+               struct tls12_crypto_info_aes_ccm_128 aesccm128;
+               struct tls12_crypto_info_aes_gcm_256 aesgcm256;
        };
        size_t len;
 };
@@ -61,6 +63,16 @@ static void tls_crypto_info_init(uint16_t tls_version, uint16_t cipher_type,
                tls12->sm4ccm.info.version = tls_version;
                tls12->sm4ccm.info.cipher_type = cipher_type;
                break;
+       case TLS_CIPHER_AES_CCM_128:
+               tls12->len = sizeof(struct tls12_crypto_info_aes_ccm_128);
+               tls12->aesccm128.info.version = tls_version;
+               tls12->aesccm128.info.cipher_type = cipher_type;
+               break;
+       case TLS_CIPHER_AES_GCM_256:
+               tls12->len = sizeof(struct tls12_crypto_info_aes_gcm_256);
+               tls12->aesgcm256.info.version = tls_version;
+               tls12->aesgcm256.info.cipher_type = cipher_type;
+               break;
        default:
                break;
        }
@@ -78,26 +90,21 @@ static void memrnd(void *s, size_t n)
                *byte++ = rand();
 }
 
-FIXTURE(tls_basic)
-{
-       int fd, cfd;
-       bool notls;
-};
-
-FIXTURE_SETUP(tls_basic)
+static void ulp_sock_pair(struct __test_metadata *_metadata,
+                         int *fd, int *cfd, bool *notls)
 {
        struct sockaddr_in addr;
        socklen_t len;
        int sfd, ret;
 
-       self->notls = false;
+       *notls = false;
        len = sizeof(addr);
 
        addr.sin_family = AF_INET;
        addr.sin_addr.s_addr = htonl(INADDR_ANY);
        addr.sin_port = 0;
 
-       self->fd = socket(AF_INET, SOCK_STREAM, 0);
+       *fd = socket(AF_INET, SOCK_STREAM, 0);
        sfd = socket(AF_INET, SOCK_STREAM, 0);
 
        ret = bind(sfd, &addr, sizeof(addr));
@@ -108,26 +115,96 @@ FIXTURE_SETUP(tls_basic)
        ret = getsockname(sfd, &addr, &len);
        ASSERT_EQ(ret, 0);
 
-       ret = connect(self->fd, &addr, sizeof(addr));
+       ret = connect(*fd, &addr, sizeof(addr));
        ASSERT_EQ(ret, 0);
 
-       self->cfd = accept(sfd, &addr, &len);
-       ASSERT_GE(self->cfd, 0);
+       *cfd = accept(sfd, &addr, &len);
+       ASSERT_GE(*cfd, 0);
 
        close(sfd);
 
-       ret = setsockopt(self->fd, IPPROTO_TCP, TCP_ULP, "tls", sizeof("tls"));
+       ret = setsockopt(*fd, IPPROTO_TCP, TCP_ULP, "tls", sizeof("tls"));
        if (ret != 0) {
                ASSERT_EQ(errno, ENOENT);
-               self->notls = true;
+               *notls = true;
                printf("Failure setting TCP_ULP, testing without tls\n");
                return;
        }
 
-       ret = setsockopt(self->cfd, IPPROTO_TCP, TCP_ULP, "tls", sizeof("tls"));
+       ret = setsockopt(*cfd, IPPROTO_TCP, TCP_ULP, "tls", sizeof("tls"));
        ASSERT_EQ(ret, 0);
 }
 
+/* Produce a basic cmsg */
+static int tls_send_cmsg(int fd, unsigned char record_type,
+                        void *data, size_t len, int flags)
+{
+       char cbuf[CMSG_SPACE(sizeof(char))];
+       int cmsg_len = sizeof(char);
+       struct cmsghdr *cmsg;
+       struct msghdr msg;
+       struct iovec vec;
+
+       vec.iov_base = data;
+       vec.iov_len = len;
+       memset(&msg, 0, sizeof(struct msghdr));
+       msg.msg_iov = &vec;
+       msg.msg_iovlen = 1;
+       msg.msg_control = cbuf;
+       msg.msg_controllen = sizeof(cbuf);
+       cmsg = CMSG_FIRSTHDR(&msg);
+       cmsg->cmsg_level = SOL_TLS;
+       /* test sending non-record types. */
+       cmsg->cmsg_type = TLS_SET_RECORD_TYPE;
+       cmsg->cmsg_len = CMSG_LEN(cmsg_len);
+       *CMSG_DATA(cmsg) = record_type;
+       msg.msg_controllen = cmsg->cmsg_len;
+
+       return sendmsg(fd, &msg, flags);
+}
+
+static int tls_recv_cmsg(struct __test_metadata *_metadata,
+                        int fd, unsigned char record_type,
+                        void *data, size_t len, int flags)
+{
+       char cbuf[CMSG_SPACE(sizeof(char))];
+       struct cmsghdr *cmsg;
+       unsigned char ctype;
+       struct msghdr msg;
+       struct iovec vec;
+       int n;
+
+       vec.iov_base = data;
+       vec.iov_len = len;
+       memset(&msg, 0, sizeof(struct msghdr));
+       msg.msg_iov = &vec;
+       msg.msg_iovlen = 1;
+       msg.msg_control = cbuf;
+       msg.msg_controllen = sizeof(cbuf);
+
+       n = recvmsg(fd, &msg, flags);
+
+       cmsg = CMSG_FIRSTHDR(&msg);
+       EXPECT_NE(cmsg, NULL);
+       EXPECT_EQ(cmsg->cmsg_level, SOL_TLS);
+       EXPECT_EQ(cmsg->cmsg_type, TLS_GET_RECORD_TYPE);
+       ctype = *((unsigned char *)CMSG_DATA(cmsg));
+       EXPECT_EQ(ctype, record_type);
+
+       return n;
+}
+
+FIXTURE(tls_basic)
+{
+       int fd, cfd;
+       bool notls;
+};
+
+FIXTURE_SETUP(tls_basic)
+{
+       ulp_sock_pair(_metadata, &self->fd, &self->cfd, &self->notls);
+}
+
 FIXTURE_TEARDOWN(tls_basic)
 {
        close(self->fd);
@@ -196,63 +273,48 @@ FIXTURE_VARIANT_ADD(tls, 13_sm4_ccm)
        .cipher_type = TLS_CIPHER_SM4_CCM,
 };
 
+FIXTURE_VARIANT_ADD(tls, 12_aes_ccm)
+{
+       .tls_version = TLS_1_2_VERSION,
+       .cipher_type = TLS_CIPHER_AES_CCM_128,
+};
+
+FIXTURE_VARIANT_ADD(tls, 13_aes_ccm)
+{
+       .tls_version = TLS_1_3_VERSION,
+       .cipher_type = TLS_CIPHER_AES_CCM_128,
+};
+
+FIXTURE_VARIANT_ADD(tls, 12_aes_gcm_256)
+{
+       .tls_version = TLS_1_2_VERSION,
+       .cipher_type = TLS_CIPHER_AES_GCM_256,
+};
+
+FIXTURE_VARIANT_ADD(tls, 13_aes_gcm_256)
+{
+       .tls_version = TLS_1_3_VERSION,
+       .cipher_type = TLS_CIPHER_AES_GCM_256,
+};
+
 FIXTURE_SETUP(tls)
 {
        struct tls_crypto_info_keys tls12;
-       struct sockaddr_in addr;
-       socklen_t len;
-       int sfd, ret;
-
-       self->notls = false;
-       len = sizeof(addr);
+       int ret;
 
        tls_crypto_info_init(variant->tls_version, variant->cipher_type,
                             &tls12);
 
-       addr.sin_family = AF_INET;
-       addr.sin_addr.s_addr = htonl(INADDR_ANY);
-       addr.sin_port = 0;
-
-       self->fd = socket(AF_INET, SOCK_STREAM, 0);
-       sfd = socket(AF_INET, SOCK_STREAM, 0);
+       ulp_sock_pair(_metadata, &self->fd, &self->cfd, &self->notls);
 
-       ret = bind(sfd, &addr, sizeof(addr));
-       ASSERT_EQ(ret, 0);
-       ret = listen(sfd, 10);
-       ASSERT_EQ(ret, 0);
+       if (self->notls)
+               return;
 
-       ret = getsockname(sfd, &addr, &len);
+       ret = setsockopt(self->fd, SOL_TLS, TLS_TX, &tls12, tls12.len);
        ASSERT_EQ(ret, 0);
 
-       ret = connect(self->fd, &addr, sizeof(addr));
+       ret = setsockopt(self->cfd, SOL_TLS, TLS_RX, &tls12, tls12.len);
        ASSERT_EQ(ret, 0);
-
-       ret = setsockopt(self->fd, IPPROTO_TCP, TCP_ULP, "tls", sizeof("tls"));
-       if (ret != 0) {
-               self->notls = true;
-               printf("Failure setting TCP_ULP, testing without tls\n");
-       }
-
-       if (!self->notls) {
-               ret = setsockopt(self->fd, SOL_TLS, TLS_TX, &tls12,
-                                tls12.len);
-               ASSERT_EQ(ret, 0);
-       }
-
-       self->cfd = accept(sfd, &addr, &len);
-       ASSERT_GE(self->cfd, 0);
-
-       if (!self->notls) {
-               ret = setsockopt(self->cfd, IPPROTO_TCP, TCP_ULP, "tls",
-                                sizeof("tls"));
-               ASSERT_EQ(ret, 0);
-
-               ret = setsockopt(self->cfd, SOL_TLS, TLS_RX, &tls12,
-                                tls12.len);
-               ASSERT_EQ(ret, 0);
-       }
-
-       close(sfd);
 }
 
 FIXTURE_TEARDOWN(tls)
@@ -613,6 +675,95 @@ TEST_F(tls, splice_to_pipe)
        EXPECT_EQ(memcmp(mem_send, mem_recv, send_len), 0);
 }
 
+TEST_F(tls, splice_cmsg_to_pipe)
+{
+       char *test_str = "test_read";
+       char record_type = 100;
+       int send_len = 10;
+       char buf[10];
+       int p[2];
+
+       ASSERT_GE(pipe(p), 0);
+       EXPECT_EQ(tls_send_cmsg(self->fd, 100, test_str, send_len, 0), 10);
+       EXPECT_EQ(splice(self->cfd, NULL, p[1], NULL, send_len, 0), -1);
+       EXPECT_EQ(errno, EINVAL);
+       EXPECT_EQ(recv(self->cfd, buf, send_len, 0), -1);
+       EXPECT_EQ(errno, EIO);
+       EXPECT_EQ(tls_recv_cmsg(_metadata, self->cfd, record_type,
+                               buf, sizeof(buf), MSG_WAITALL),
+                 send_len);
+       EXPECT_EQ(memcmp(test_str, buf, send_len), 0);
+}
+
+TEST_F(tls, splice_dec_cmsg_to_pipe)
+{
+       char *test_str = "test_read";
+       char record_type = 100;
+       int send_len = 10;
+       char buf[10];
+       int p[2];
+
+       ASSERT_GE(pipe(p), 0);
+       EXPECT_EQ(tls_send_cmsg(self->fd, 100, test_str, send_len, 0), 10);
+       EXPECT_EQ(recv(self->cfd, buf, send_len, 0), -1);
+       EXPECT_EQ(errno, EIO);
+       EXPECT_EQ(splice(self->cfd, NULL, p[1], NULL, send_len, 0), -1);
+       EXPECT_EQ(errno, EINVAL);
+       EXPECT_EQ(tls_recv_cmsg(_metadata, self->cfd, record_type,
+                               buf, sizeof(buf), MSG_WAITALL),
+                 send_len);
+       EXPECT_EQ(memcmp(test_str, buf, send_len), 0);
+}
+
+TEST_F(tls, recv_and_splice)
+{
+       int send_len = TLS_PAYLOAD_MAX_LEN;
+       char mem_send[TLS_PAYLOAD_MAX_LEN];
+       char mem_recv[TLS_PAYLOAD_MAX_LEN];
+       int half = send_len / 2;
+       int p[2];
+
+       ASSERT_GE(pipe(p), 0);
+       EXPECT_EQ(send(self->fd, mem_send, send_len, 0), send_len);
+       /* Recv hald of the record, splice the other half */
+       EXPECT_EQ(recv(self->cfd, mem_recv, half, MSG_WAITALL), half);
+       EXPECT_EQ(splice(self->cfd, NULL, p[1], NULL, half, SPLICE_F_NONBLOCK),
+                 half);
+       EXPECT_EQ(read(p[0], &mem_recv[half], half), half);
+       EXPECT_EQ(memcmp(mem_send, mem_recv, send_len), 0);
+}
+
+TEST_F(tls, peek_and_splice)
+{
+       int send_len = TLS_PAYLOAD_MAX_LEN;
+       char mem_send[TLS_PAYLOAD_MAX_LEN];
+       char mem_recv[TLS_PAYLOAD_MAX_LEN];
+       int chunk = TLS_PAYLOAD_MAX_LEN / 4;
+       int n, i, p[2];
+
+       memrnd(mem_send, sizeof(mem_send));
+
+       ASSERT_GE(pipe(p), 0);
+       for (i = 0; i < 4; i++)
+               EXPECT_EQ(send(self->fd, &mem_send[chunk * i], chunk, 0),
+                         chunk);
+
+       EXPECT_EQ(recv(self->cfd, mem_recv, chunk * 5 / 2,
+                      MSG_WAITALL | MSG_PEEK),
+                 chunk * 5 / 2);
+       EXPECT_EQ(memcmp(mem_send, mem_recv, chunk * 5 / 2), 0);
+
+       n = 0;
+       while (n < send_len) {
+               i = splice(self->cfd, NULL, p[1], NULL, send_len - n, 0);
+               EXPECT_GT(i, 0);
+               n += i;
+       }
+       EXPECT_EQ(n, send_len);
+       EXPECT_EQ(read(p[0], mem_recv, send_len), send_len);
+       EXPECT_EQ(memcmp(mem_send, mem_recv, send_len), 0);
+}
+
 TEST_F(tls, recvmsg_single)
 {
        char const *test_str = "test_recvmsg_single";
@@ -1193,60 +1344,30 @@ TEST_F(tls, mutliproc_sendpage_writers)
 
 TEST_F(tls, control_msg)
 {
-       if (self->notls)
-               return;
-
-       char cbuf[CMSG_SPACE(sizeof(char))];
-       char const *test_str = "test_read";
-       int cmsg_len = sizeof(char);
+       char *test_str = "test_read";
        char record_type = 100;
-       struct cmsghdr *cmsg;
-       struct msghdr msg;
        int send_len = 10;
-       struct iovec vec;
        char buf[10];
 
-       vec.iov_base = (char *)test_str;
-       vec.iov_len = 10;
-       memset(&msg, 0, sizeof(struct msghdr));
-       msg.msg_iov = &vec;
-       msg.msg_iovlen = 1;
-       msg.msg_control = cbuf;
-       msg.msg_controllen = sizeof(cbuf);
-       cmsg = CMSG_FIRSTHDR(&msg);
-       cmsg->cmsg_level = SOL_TLS;
-       /* test sending non-record types. */
-       cmsg->cmsg_type = TLS_SET_RECORD_TYPE;
-       cmsg->cmsg_len = CMSG_LEN(cmsg_len);
-       *CMSG_DATA(cmsg) = record_type;
-       msg.msg_controllen = cmsg->cmsg_len;
+       if (self->notls)
+               SKIP(return, "no TLS support");
 
-       EXPECT_EQ(sendmsg(self->fd, &msg, 0), send_len);
+       EXPECT_EQ(tls_send_cmsg(self->fd, record_type, test_str, send_len, 0),
+                 send_len);
        /* Should fail because we didn't provide a control message */
        EXPECT_EQ(recv(self->cfd, buf, send_len, 0), -1);
 
-       vec.iov_base = buf;
-       EXPECT_EQ(recvmsg(self->cfd, &msg, MSG_WAITALL | MSG_PEEK), send_len);
-
-       cmsg = CMSG_FIRSTHDR(&msg);
-       EXPECT_NE(cmsg, NULL);
-       EXPECT_EQ(cmsg->cmsg_level, SOL_TLS);
-       EXPECT_EQ(cmsg->cmsg_type, TLS_GET_RECORD_TYPE);
-       record_type = *((unsigned char *)CMSG_DATA(cmsg));
-       EXPECT_EQ(record_type, 100);
+       EXPECT_EQ(tls_recv_cmsg(_metadata, self->cfd, record_type,
+                               buf, sizeof(buf), MSG_WAITALL | MSG_PEEK),
+                 send_len);
        EXPECT_EQ(memcmp(buf, test_str, send_len), 0);
 
        /* Recv the message again without MSG_PEEK */
-       record_type = 0;
        memset(buf, 0, sizeof(buf));
 
-       EXPECT_EQ(recvmsg(self->cfd, &msg, MSG_WAITALL), send_len);
-       cmsg = CMSG_FIRSTHDR(&msg);
-       EXPECT_NE(cmsg, NULL);
-       EXPECT_EQ(cmsg->cmsg_level, SOL_TLS);
-       EXPECT_EQ(cmsg->cmsg_type, TLS_GET_RECORD_TYPE);
-       record_type = *((unsigned char *)CMSG_DATA(cmsg));
-       EXPECT_EQ(record_type, 100);
+       EXPECT_EQ(tls_recv_cmsg(_metadata, self->cfd, record_type,
+                               buf, sizeof(buf), MSG_WAITALL),
+                 send_len);
        EXPECT_EQ(memcmp(buf, test_str, send_len), 0);
 }
 
@@ -1301,6 +1422,160 @@ TEST_F(tls, shutdown_reuse)
        EXPECT_EQ(errno, EISCONN);
 }
 
+FIXTURE(tls_err)
+{
+       int fd, cfd;
+       int fd2, cfd2;
+       bool notls;
+};
+
+FIXTURE_VARIANT(tls_err)
+{
+       uint16_t tls_version;
+};
+
+FIXTURE_VARIANT_ADD(tls_err, 12_aes_gcm)
+{
+       .tls_version = TLS_1_2_VERSION,
+};
+
+FIXTURE_VARIANT_ADD(tls_err, 13_aes_gcm)
+{
+       .tls_version = TLS_1_3_VERSION,
+};
+
+FIXTURE_SETUP(tls_err)
+{
+       struct tls_crypto_info_keys tls12;
+       int ret;
+
+       tls_crypto_info_init(variant->tls_version, TLS_CIPHER_AES_GCM_128,
+                            &tls12);
+
+       ulp_sock_pair(_metadata, &self->fd, &self->cfd, &self->notls);
+       ulp_sock_pair(_metadata, &self->fd2, &self->cfd2, &self->notls);
+       if (self->notls)
+               return;
+
+       ret = setsockopt(self->fd, SOL_TLS, TLS_TX, &tls12, tls12.len);
+       ASSERT_EQ(ret, 0);
+
+       ret = setsockopt(self->cfd2, SOL_TLS, TLS_RX, &tls12, tls12.len);
+       ASSERT_EQ(ret, 0);
+}
+
+FIXTURE_TEARDOWN(tls_err)
+{
+       close(self->fd);
+       close(self->cfd);
+       close(self->fd2);
+       close(self->cfd2);
+}
+
+TEST_F(tls_err, bad_rec)
+{
+       char buf[64];
+
+       if (self->notls)
+               SKIP(return, "no TLS support");
+
+       memset(buf, 0x55, sizeof(buf));
+       EXPECT_EQ(send(self->fd2, buf, sizeof(buf), 0), sizeof(buf));
+       EXPECT_EQ(recv(self->cfd2, buf, sizeof(buf), 0), -1);
+       EXPECT_EQ(errno, EMSGSIZE);
+       EXPECT_EQ(recv(self->cfd2, buf, sizeof(buf), MSG_DONTWAIT), -1);
+       EXPECT_EQ(errno, EAGAIN);
+}
+
+TEST_F(tls_err, bad_auth)
+{
+       char buf[128];
+       int n;
+
+       if (self->notls)
+               SKIP(return, "no TLS support");
+
+       memrnd(buf, sizeof(buf) / 2);
+       EXPECT_EQ(send(self->fd, buf, sizeof(buf) / 2, 0), sizeof(buf) / 2);
+       n = recv(self->cfd, buf, sizeof(buf), 0);
+       EXPECT_GT(n, sizeof(buf) / 2);
+
+       buf[n - 1]++;
+
+       EXPECT_EQ(send(self->fd2, buf, n, 0), n);
+       EXPECT_EQ(recv(self->cfd2, buf, sizeof(buf), 0), -1);
+       EXPECT_EQ(errno, EBADMSG);
+       EXPECT_EQ(recv(self->cfd2, buf, sizeof(buf), 0), -1);
+       EXPECT_EQ(errno, EBADMSG);
+}
+
+TEST_F(tls_err, bad_in_large_read)
+{
+       char txt[3][64];
+       char cip[3][128];
+       char buf[3 * 128];
+       int i, n;
+
+       if (self->notls)
+               SKIP(return, "no TLS support");
+
+       /* Put 3 records in the sockets */
+       for (i = 0; i < 3; i++) {
+               memrnd(txt[i], sizeof(txt[i]));
+               EXPECT_EQ(send(self->fd, txt[i], sizeof(txt[i]), 0),
+                         sizeof(txt[i]));
+               n = recv(self->cfd, cip[i], sizeof(cip[i]), 0);
+               EXPECT_GT(n, sizeof(txt[i]));
+               /* Break the third message */
+               if (i == 2)
+                       cip[2][n - 1]++;
+               EXPECT_EQ(send(self->fd2, cip[i], n, 0), n);
+       }
+
+       /* We should be able to receive the first two messages */
+       EXPECT_EQ(recv(self->cfd2, buf, sizeof(buf), 0), sizeof(txt[0]) * 2);
+       EXPECT_EQ(memcmp(buf, txt[0], sizeof(txt[0])), 0);
+       EXPECT_EQ(memcmp(buf + sizeof(txt[0]), txt[1], sizeof(txt[1])), 0);
+       /* Third mesasge is bad */
+       EXPECT_EQ(recv(self->cfd2, buf, sizeof(buf), 0), -1);
+       EXPECT_EQ(errno, EBADMSG);
+       EXPECT_EQ(recv(self->cfd2, buf, sizeof(buf), 0), -1);
+       EXPECT_EQ(errno, EBADMSG);
+}
+
+TEST_F(tls_err, bad_cmsg)
+{
+       char *test_str = "test_read";
+       int send_len = 10;
+       char cip[128];
+       char buf[128];
+       char txt[64];
+       int n;
+
+       if (self->notls)
+               SKIP(return, "no TLS support");
+
+       /* Queue up one data record */
+       memrnd(txt, sizeof(txt));
+       EXPECT_EQ(send(self->fd, txt, sizeof(txt), 0), sizeof(txt));
+       n = recv(self->cfd, cip, sizeof(cip), 0);
+       EXPECT_GT(n, sizeof(txt));
+       EXPECT_EQ(send(self->fd2, cip, n, 0), n);
+
+       EXPECT_EQ(tls_send_cmsg(self->fd, 100, test_str, send_len, 0), 10);
+       n = recv(self->cfd, cip, sizeof(cip), 0);
+       cip[n - 1]++; /* Break it */
+       EXPECT_GT(n, send_len);
+       EXPECT_EQ(send(self->fd2, cip, n, 0), n);
+
+       EXPECT_EQ(recv(self->cfd2, buf, sizeof(buf), 0), sizeof(txt));
+       EXPECT_EQ(memcmp(buf, txt, sizeof(txt)), 0);
+       EXPECT_EQ(recv(self->cfd2, buf, sizeof(buf), 0), -1);
+       EXPECT_EQ(errno, EBADMSG);
+       EXPECT_EQ(recv(self->cfd2, buf, sizeof(buf), 0), -1);
+       EXPECT_EQ(errno, EBADMSG);
+}
+
 TEST(non_established) {
        struct tls12_crypto_info_aes_gcm_256 tls12;
        struct sockaddr_in addr;
@@ -1355,64 +1630,82 @@ TEST(non_established) {
 
 TEST(keysizes) {
        struct tls12_crypto_info_aes_gcm_256 tls12;
-       struct sockaddr_in addr;
-       int sfd, ret, fd, cfd;
-       socklen_t len;
+       int ret, fd, cfd;
        bool notls;
 
-       notls = false;
-       len = sizeof(addr);
-
        memset(&tls12, 0, sizeof(tls12));
        tls12.info.version = TLS_1_2_VERSION;
        tls12.info.cipher_type = TLS_CIPHER_AES_GCM_256;
 
-       addr.sin_family = AF_INET;
-       addr.sin_addr.s_addr = htonl(INADDR_ANY);
-       addr.sin_port = 0;
+       ulp_sock_pair(_metadata, &fd, &cfd, &notls);
 
-       fd = socket(AF_INET, SOCK_STREAM, 0);
-       sfd = socket(AF_INET, SOCK_STREAM, 0);
+       if (!notls) {
+               ret = setsockopt(fd, SOL_TLS, TLS_TX, &tls12,
+                                sizeof(tls12));
+               EXPECT_EQ(ret, 0);
+
+               ret = setsockopt(cfd, SOL_TLS, TLS_RX, &tls12,
+                                sizeof(tls12));
+               EXPECT_EQ(ret, 0);
+       }
+
+       close(fd);
+       close(cfd);
+}
+
+TEST(tls_v6ops) {
+       struct tls_crypto_info_keys tls12;
+       struct sockaddr_in6 addr, addr2;
+       int sfd, ret, fd;
+       socklen_t len, len2;
+
+       tls_crypto_info_init(TLS_1_2_VERSION, TLS_CIPHER_AES_GCM_128, &tls12);
+
+       addr.sin6_family = AF_INET6;
+       addr.sin6_addr = in6addr_any;
+       addr.sin6_port = 0;
+
+       fd = socket(AF_INET6, SOCK_STREAM, 0);
+       sfd = socket(AF_INET6, SOCK_STREAM, 0);
 
        ret = bind(sfd, &addr, sizeof(addr));
        ASSERT_EQ(ret, 0);
        ret = listen(sfd, 10);
        ASSERT_EQ(ret, 0);
 
+       len = sizeof(addr);
        ret = getsockname(sfd, &addr, &len);
        ASSERT_EQ(ret, 0);
 
        ret = connect(fd, &addr, sizeof(addr));
        ASSERT_EQ(ret, 0);
 
+       len = sizeof(addr);
+       ret = getsockname(fd, &addr, &len);
+       ASSERT_EQ(ret, 0);
+
        ret = setsockopt(fd, IPPROTO_TCP, TCP_ULP, "tls", sizeof("tls"));
-       if (ret != 0) {
-               notls = true;
-               printf("Failure setting TCP_ULP, testing without tls\n");
+       if (ret) {
+               ASSERT_EQ(errno, ENOENT);
+               SKIP(return, "no TLS support");
        }
+       ASSERT_EQ(ret, 0);
 
-       if (!notls) {
-               ret = setsockopt(fd, SOL_TLS, TLS_TX, &tls12,
-                                sizeof(tls12));
-               EXPECT_EQ(ret, 0);
-       }
+       ret = setsockopt(fd, SOL_TLS, TLS_TX, &tls12, tls12.len);
+       ASSERT_EQ(ret, 0);
 
-       cfd = accept(sfd, &addr, &len);
-       ASSERT_GE(cfd, 0);
+       ret = setsockopt(fd, SOL_TLS, TLS_RX, &tls12, tls12.len);
+       ASSERT_EQ(ret, 0);
 
-       if (!notls) {
-               ret = setsockopt(cfd, IPPROTO_TCP, TCP_ULP, "tls",
-                                sizeof("tls"));
-               EXPECT_EQ(ret, 0);
+       len2 = sizeof(addr2);
+       ret = getsockname(fd, &addr2, &len2);
+       ASSERT_EQ(ret, 0);
 
-               ret = setsockopt(cfd, SOL_TLS, TLS_RX, &tls12,
-                                sizeof(tls12));
-               EXPECT_EQ(ret, 0);
-       }
+       EXPECT_EQ(len2, len);
+       EXPECT_EQ(memcmp(&addr, &addr2, len), 0);
 
-       close(sfd);
        close(fd);
-       close(cfd);
+       close(sfd);
 }
 
 TEST_HARNESS_MAIN
index 8748199ac10984822b675da62c1401015957abde..ffca314897c4c6ab8463e04105ac22ec0ec0765e 100644 (file)
@@ -5,7 +5,8 @@ TEST_PROGS := nft_trans_stress.sh nft_fib.sh nft_nat.sh bridge_brouter.sh \
        conntrack_icmp_related.sh nft_flowtable.sh ipvs.sh \
        nft_concat_range.sh nft_conntrack_helper.sh \
        nft_queue.sh nft_meta.sh nf_nat_edemux.sh \
-       ipip-conntrack-mtu.sh conntrack_tcp_unreplied.sh
+       ipip-conntrack-mtu.sh conntrack_tcp_unreplied.sh \
+       conntrack_vrf.sh
 
 LDLIBS = -lmnl
 TEST_GEN_FILES =  nf-queue
diff --git a/tools/testing/selftests/netfilter/conntrack_vrf.sh b/tools/testing/selftests/netfilter/conntrack_vrf.sh
new file mode 100755 (executable)
index 0000000..8b5ea92
--- /dev/null
@@ -0,0 +1,241 @@
+#!/bin/sh
+
+# This script demonstrates interaction of conntrack and vrf.
+# The vrf driver calls the netfilter hooks again, with oif/iif
+# pointing at the VRF device.
+#
+# For ingress, this means first iteration has iifname of lower/real
+# device.  In this script, thats veth0.
+# Second iteration is iifname set to vrf device, tvrf in this script.
+#
+# For egress, this is reversed: first iteration has the vrf device,
+# second iteration is done with the lower/real/veth0 device.
+#
+# test_ct_zone_in demonstrates unexpected change of nftables
+# behavior # caused by commit 09e856d54bda5f28 "vrf: Reset skb conntrack
+# connection on VRF rcv"
+#
+# It was possible to assign conntrack zone to a packet (or mark it for
+# `notracking`) in the prerouting chain before conntrack, based on real iif.
+#
+# After the change, the zone assignment is lost and the zone is assigned based
+# on the VRF master interface (in case such a rule exists).
+# assignment is lost. Instead, assignment based on the `iif` matching
+# Thus it is impossible to distinguish packets based on the original
+# interface.
+#
+# test_masquerade_vrf and test_masquerade_veth0 demonstrate the problem
+# that was supposed to be fixed by the commit mentioned above to make sure
+# that any fix to test case 1 won't break masquerade again.
+
+ksft_skip=4
+
+IP0=172.30.30.1
+IP1=172.30.30.2
+PFXL=30
+ret=0
+
+sfx=$(mktemp -u "XXXXXXXX")
+ns0="ns0-$sfx"
+ns1="ns1-$sfx"
+
+cleanup()
+{
+       ip netns pids $ns0 | xargs kill 2>/dev/null
+       ip netns pids $ns1 | xargs kill 2>/dev/null
+
+       ip netns del $ns0 $ns1
+}
+
+nft --version > /dev/null 2>&1
+if [ $? -ne 0 ];then
+       echo "SKIP: Could not run test without nft tool"
+       exit $ksft_skip
+fi
+
+ip -Version > /dev/null 2>&1
+if [ $? -ne 0 ];then
+       echo "SKIP: Could not run test without ip tool"
+       exit $ksft_skip
+fi
+
+ip netns add "$ns0"
+if [ $? -ne 0 ];then
+       echo "SKIP: Could not create net namespace $ns0"
+       exit $ksft_skip
+fi
+ip netns add "$ns1"
+
+trap cleanup EXIT
+
+ip netns exec $ns0 sysctl -q -w net.ipv4.conf.default.rp_filter=0
+ip netns exec $ns0 sysctl -q -w net.ipv4.conf.all.rp_filter=0
+ip netns exec $ns0 sysctl -q -w net.ipv4.conf.all.rp_filter=0
+
+ip link add veth0 netns "$ns0" type veth peer name veth0 netns "$ns1" > /dev/null 2>&1
+if [ $? -ne 0 ];then
+       echo "SKIP: Could not add veth device"
+       exit $ksft_skip
+fi
+
+ip -net $ns0 li add tvrf type vrf table 9876
+if [ $? -ne 0 ];then
+       echo "SKIP: Could not add vrf device"
+       exit $ksft_skip
+fi
+
+ip -net $ns0 li set lo up
+
+ip -net $ns0 li set veth0 master tvrf
+ip -net $ns0 li set tvrf up
+ip -net $ns0 li set veth0 up
+ip -net $ns1 li set veth0 up
+
+ip -net $ns0 addr add $IP0/$PFXL dev veth0
+ip -net $ns1 addr add $IP1/$PFXL dev veth0
+
+ip netns exec $ns1 iperf3 -s > /dev/null 2>&1&
+if [ $? -ne 0 ];then
+       echo "SKIP: Could not start iperf3"
+       exit $ksft_skip
+fi
+
+# test vrf ingress handling.
+# The incoming connection should be placed in conntrack zone 1,
+# as decided by the first iteration of the ruleset.
+test_ct_zone_in()
+{
+ip netns exec $ns0 nft -f - <<EOF
+table testct {
+       chain rawpre {
+               type filter hook prerouting priority raw;
+
+               iif { veth0, tvrf } counter meta nftrace set 1
+               iif veth0 counter ct zone set 1 counter return
+               iif tvrf counter ct zone set 2 counter return
+               ip protocol icmp counter
+               notrack counter
+       }
+
+       chain rawout {
+               type filter hook output priority raw;
+
+               oif veth0 counter ct zone set 1 counter return
+               oif tvrf counter ct zone set 2 counter return
+               notrack counter
+       }
+}
+EOF
+       ip netns exec $ns1 ping -W 1 -c 1 -I veth0 $IP0 > /dev/null
+
+       # should be in zone 1, not zone 2
+       count=$(ip netns exec $ns0 conntrack -L -s $IP1 -d $IP0 -p icmp --zone 1 2>/dev/null | wc -l)
+       if [ $count -eq 1 ]; then
+               echo "PASS: entry found in conntrack zone 1"
+       else
+               echo "FAIL: entry not found in conntrack zone 1"
+               count=$(ip netns exec $ns0 conntrack -L -s $IP1 -d $IP0 -p icmp --zone 2 2> /dev/null | wc -l)
+               if [ $count -eq 1 ]; then
+                       echo "FAIL: entry found in zone 2 instead"
+               else
+                       echo "FAIL: entry not in zone 1 or 2, dumping table"
+                       ip netns exec $ns0 conntrack -L
+                       ip netns exec $ns0 nft list ruleset
+               fi
+       fi
+}
+
+# add masq rule that gets evaluated w. outif set to vrf device.
+# This tests the first iteration of the packet through conntrack,
+# oifname is the vrf device.
+test_masquerade_vrf()
+{
+       local qdisc=$1
+
+       if [ "$qdisc" != "default" ]; then
+               tc -net $ns0 qdisc add dev tvrf root $qdisc
+       fi
+
+       ip netns exec $ns0 conntrack -F 2>/dev/null
+
+ip netns exec $ns0 nft -f - <<EOF
+flush ruleset
+table ip nat {
+       chain rawout {
+               type filter hook output priority raw;
+
+               oif tvrf ct state untracked counter
+       }
+       chain postrouting2 {
+               type filter hook postrouting priority mangle;
+
+               oif tvrf ct state untracked counter
+       }
+       chain postrouting {
+               type nat hook postrouting priority 0;
+               # NB: masquerade should always be combined with 'oif(name) bla',
+               # lack of this is intentional here, we want to exercise double-snat.
+               ip saddr 172.30.30.0/30 counter masquerade random
+       }
+}
+EOF
+       ip netns exec $ns0 ip vrf exec tvrf iperf3 -t 1 -c $IP1 >/dev/null
+       if [ $? -ne 0 ]; then
+               echo "FAIL: iperf3 connect failure with masquerade + sport rewrite on vrf device"
+               ret=1
+               return
+       fi
+
+       # must also check that nat table was evaluated on second (lower device) iteration.
+       ip netns exec $ns0 nft list table ip nat |grep -q 'counter packets 2' &&
+       ip netns exec $ns0 nft list table ip nat |grep -q 'untracked counter packets [1-9]'
+       if [ $? -eq 0 ]; then
+               echo "PASS: iperf3 connect with masquerade + sport rewrite on vrf device ($qdisc qdisc)"
+       else
+               echo "FAIL: vrf rules have unexpected counter value"
+               ret=1
+       fi
+
+       if [ "$qdisc" != "default" ]; then
+               tc -net $ns0 qdisc del dev tvrf root
+       fi
+}
+
+# add masq rule that gets evaluated w. outif set to veth device.
+# This tests the 2nd iteration of the packet through conntrack,
+# oifname is the lower device (veth0 in this case).
+test_masquerade_veth()
+{
+       ip netns exec $ns0 conntrack -F 2>/dev/null
+ip netns exec $ns0 nft -f - <<EOF
+flush ruleset
+table ip nat {
+       chain postrouting {
+               type nat hook postrouting priority 0;
+               meta oif veth0 ip saddr 172.30.30.0/30 counter masquerade random
+       }
+}
+EOF
+       ip netns exec $ns0 ip vrf exec tvrf iperf3 -t 1 -c $IP1 > /dev/null
+       if [ $? -ne 0 ]; then
+               echo "FAIL: iperf3 connect failure with masquerade + sport rewrite on veth device"
+               ret=1
+               return
+       fi
+
+       # must also check that nat table was evaluated on second (lower device) iteration.
+       ip netns exec $ns0 nft list table ip nat |grep -q 'counter packets 2'
+       if [ $? -eq 0 ]; then
+               echo "PASS: iperf3 connect with masquerade + sport rewrite on veth device"
+       else
+               echo "FAIL: vrf masq rule has unexpected counter value"
+               ret=1
+       fi
+}
+
+test_ct_zone_in
+test_masquerade_vrf "default"
+test_masquerade_vrf "pfifo"
+test_masquerade_veth
+
+exit $ret
index 5a4938d6dcf25a3f8137be799091f4f0d16ad7fd..ed61f6cab60f4a933d2e8728889cafdc44d7425c 100755 (executable)
@@ -23,8 +23,8 @@ TESTS="reported_issues correctness concurrency timeout"
 
 # Set types, defined by TYPE_ variables below
 TYPES="net_port port_net net6_port port_proto net6_port_mac net6_port_mac_proto
-       net_port_net net_mac net_mac_icmp net6_mac_icmp net6_port_net6_port
-       net_port_mac_proto_net"
+       net_port_net net_mac mac_net net_mac_icmp net6_mac_icmp
+       net6_port_net6_port net_port_mac_proto_net"
 
 # Reported bugs, also described by TYPE_ variables below
 BUGS="flush_remove_add"
@@ -277,6 +277,23 @@ perf_entries       1000
 perf_proto     ipv4
 "
 
+TYPE_mac_net="
+display                mac,net
+type_spec      ether_addr . ipv4_addr
+chain_spec     ether saddr . ip saddr
+dst             
+src            mac addr4
+start          1
+count          5
+src_delta      2000
+tools          sendip nc bash
+proto          udp
+
+race_repeat    0
+
+perf_duration  0
+"
+
 TYPE_net_mac_icmp="
 display                net,mac - ICMP
 type_spec      ipv4_addr . ether_addr
@@ -984,7 +1001,8 @@ format() {
                fi
        done
        for f in ${src}; do
-               __expr="${__expr} . "
+               [ "${__expr}" != "{ " ] && __expr="${__expr} . "
+
                __start="$(eval format_"${f}" "${srcstart}")"
                __end="$(eval format_"${f}" "${srcend}")"
 
index da1c1e4b6c86bf87ccfd8a01f4dbd99295081367..d88867d2fed755cd7124aea78ebd95b0f07fc7d7 100755 (executable)
@@ -759,19 +759,21 @@ test_port_shadow()
        local result=""
        local logmsg=""
 
-       echo ROUTER | ip netns exec "$ns0" nc -w 5 -u -l -p 1405 >/dev/null 2>&1 &
-       nc_r=$!
+       # make shadow entry, from client (ns2), going to (ns1), port 41404, sport 1405.
+       echo "fake-entry" | ip netns exec "$ns2" timeout 1 socat -u STDIN UDP:"$daddrc":41404,sourceport=1405
 
-       echo CLIENT | ip netns exec "$ns2" nc -w 5 -u -l -p 1405 >/dev/null 2>&1 &
-       nc_c=$!
+       echo ROUTER | ip netns exec "$ns0" timeout 5 socat -u STDIN UDP4-LISTEN:1405 &
+       sc_r=$!
 
-       # make shadow entry, from client (ns2), going to (ns1), port 41404, sport 1405.
-       echo "fake-entry" | ip netns exec "$ns2" nc -w 1 -p 1405 -u "$daddrc" 41404 > /dev/null
+       echo CLIENT | ip netns exec "$ns2" timeout 5 socat -u STDIN UDP4-LISTEN:1405,reuseport &
+       sc_c=$!
+
+       sleep 0.3
 
        # ns1 tries to connect to ns0:1405.  With default settings this should connect
        # to client, it matches the conntrack entry created above.
 
-       result=$(echo "" | ip netns exec "$ns1" nc -w 1 -p 41404 -u "$daddrs" 1405)
+       result=$(echo "data" | ip netns exec "$ns1" timeout 1 socat - UDP:"$daddrs":1405,sourceport=41404)
 
        if [ "$result" = "$expect" ] ;then
                echo "PASS: portshadow test $test: got reply from ${expect}${logmsg}"
@@ -780,7 +782,7 @@ test_port_shadow()
                ret=1
        fi
 
-       kill $nc_r $nc_c 2>/dev/null
+       kill $sc_r $sc_c 2>/dev/null
 
        # flush udp entries for next test round, if any
        ip netns exec "$ns0" conntrack -F >/dev/null 2>&1
@@ -816,11 +818,10 @@ table $family raw {
        chain prerouting {
                type filter hook prerouting priority -300; policy accept;
                meta iif veth0 udp dport 1405 notrack
-               udp dport 1405 notrack
        }
        chain output {
                type filter hook output priority -300; policy accept;
-               udp sport 1405 notrack
+               meta oif veth0 udp sport 1405 notrack
        }
 }
 EOF
@@ -851,6 +852,18 @@ test_port_shadowing()
 {
        local family="ip"
 
+       conntrack -h >/dev/null 2>&1
+       if [ $? -ne 0 ];then
+               echo "SKIP: Could not run nat port shadowing test without conntrack tool"
+               return
+       fi
+
+       socat -h > /dev/null 2>&1
+       if [ $? -ne 0 ];then
+               echo "SKIP: Could not run nat port shadowing test without socat tool"
+               return
+       fi
+
        ip netns exec "$ns0" sysctl net.ipv4.conf.veth0.forwarding=1 > /dev/null
        ip netns exec "$ns0" sysctl net.ipv4.conf.veth1.forwarding=1 > /dev/null
 
index 3d202b90b33d2fb88d350644f8d4b88f14a094ab..7d27f1f3bc0108933ddbf22960e99f2a79db546c 100755 (executable)
@@ -16,6 +16,10 @@ timeout=4
 
 cleanup()
 {
+       ip netns pids ${ns1} | xargs kill 2>/dev/null
+       ip netns pids ${ns2} | xargs kill 2>/dev/null
+       ip netns pids ${nsrouter} | xargs kill 2>/dev/null
+
        ip netns del ${ns1}
        ip netns del ${ns2}
        ip netns del ${nsrouter}
@@ -332,6 +336,55 @@ EOF
        echo "PASS: tcp via loopback and re-queueing"
 }
 
+test_icmp_vrf() {
+       ip -net $ns1 link add tvrf type vrf table 9876
+       if [ $? -ne 0 ];then
+               echo "SKIP: Could not add vrf device"
+               return
+       fi
+
+       ip -net $ns1 li set eth0 master tvrf
+       ip -net $ns1 li set tvrf up
+
+       ip -net $ns1 route add 10.0.2.0/24 via 10.0.1.1 dev eth0 table 9876
+ip netns exec ${ns1} nft -f /dev/stdin <<EOF
+flush ruleset
+table inet filter {
+       chain output {
+               type filter hook output priority 0; policy accept;
+               meta oifname "tvrf" icmp type echo-request counter queue num 1
+               meta oifname "eth0" icmp type echo-request counter queue num 1
+       }
+       chain post {
+               type filter hook postrouting priority 0; policy accept;
+               meta oifname "tvrf" icmp type echo-request counter queue num 1
+               meta oifname "eth0" icmp type echo-request counter queue num 1
+       }
+}
+EOF
+       ip netns exec ${ns1} ./nf-queue -q 1 -t $timeout &
+       local nfqpid=$!
+
+       sleep 1
+       ip netns exec ${ns1} ip vrf exec tvrf ping -c 1 10.0.2.99 > /dev/null
+
+       for n in output post; do
+               for d in tvrf eth0; do
+                       ip netns exec ${ns1} nft list chain inet filter $n | grep -q "oifname \"$d\" icmp type echo-request counter packets 1"
+                       if [ $? -ne 0 ] ; then
+                               echo "FAIL: chain $n: icmp packet counter mismatch for device $d" 1>&2
+                               ip netns exec ${ns1} nft list ruleset
+                               ret=1
+                               return
+                       fi
+               done
+       done
+
+       wait $nfqpid
+       [ $? -eq 0 ] && echo "PASS: icmp+nfqueue via vrf"
+       wait 2>/dev/null
+}
+
 ip netns exec ${nsrouter} sysctl net.ipv6.conf.all.forwarding=1 > /dev/null
 ip netns exec ${nsrouter} sysctl net.ipv4.conf.veth0.forwarding=1 > /dev/null
 ip netns exec ${nsrouter} sysctl net.ipv4.conf.veth1.forwarding=1 > /dev/null
@@ -372,5 +425,6 @@ test_queue 20
 test_tcp_forward
 test_tcp_localhost
 test_tcp_localhost_requeue
+test_icmp_vrf
 
 exit $ret
index ac646376eb014635e6b8092b4152f67b99312da3..04633119b29a0a85ef7675968b4a1efdb48df213 100755 (executable)
@@ -18,11 +18,17 @@ cleanup()
        ip netns del $ns
 }
 
-ip netns add $ns
-if [ $? -ne 0 ];then
-       echo "SKIP: Could not create net namespace $gw"
-       exit $ksft_skip
-fi
+checktool (){
+       if ! $1 > /dev/null 2>&1; then
+               echo "SKIP: Could not $2"
+               exit $ksft_skip
+       fi
+}
+
+checktool "nft --version" "run test without nft tool"
+checktool "ip -Version" "run test without ip tool"
+checktool "socat -V" "run test without socat tool"
+checktool "ip netns add $ns" "create net namespace"
 
 trap cleanup EXIT
 
@@ -71,7 +77,8 @@ EOF
                local start=$(date +%s%3N)
                i=$((i + 10000))
                j=$((j + 1))
-               dd if=/dev/zero of=/dev/stdout bs=8k count=10000 2>/dev/null | ip netns exec "$ns" nc -w 1 -q 1 -u -p 12345 127.0.0.1 12345 > /dev/null
+               # nft rule in output places each packet in a different zone.
+               dd if=/dev/zero of=/dev/stdout bs=8k count=10000 2>/dev/null | ip netns exec "$ns" socat STDIN UDP:127.0.0.1:12345,sourceport=12345
                if [ $? -ne 0 ] ;then
                        ret=1
                        break
index b71828df5a6ddbae7e1861722ac4f9f9651ec6a4..a3239d5e40c79e9683b0626f7666d25e0f169260 100644 (file)
@@ -60,6 +60,8 @@ CONFIG_NET_IFE_SKBTCINDEX=m
 CONFIG_NET_SCH_FIFO=y
 CONFIG_NET_SCH_ETS=m
 CONFIG_NET_SCH_RED=m
+CONFIG_NET_SCH_FQ_PIE=m
+CONFIG_NETDEVSIM=m
 
 #
 ## Network testing
index 503982b8f295b937e87bb65d556cb0571606c410..91832400ddbdbb5b7d118a6928ae0f8fbde2dd78 100644 (file)
@@ -68,7 +68,7 @@
         "cmdUnderTest": "$TC action add action bpf object-file $EBPFDIR/action.o section action-ok index 667",
         "expExitCode": "0",
         "verifyCmd": "$TC action get action bpf index 667",
-        "matchPattern": "action order [0-9]*: bpf action.o:\\[action-ok\\] id [0-9]* tag [0-9a-f]{16}( jited)? default-action pipe.*index 667 ref",
+        "matchPattern": "action order [0-9]*: bpf action.o:\\[action-ok\\] id [0-9].* tag [0-9a-f]{16}( jited)? default-action pipe.*index 667 ref",
         "matchCount": "1",
         "teardown": [
             "$TC action flush action bpf"
index 88a20c781e49828f4dff578a0dcd5fe25ec7d118..c6046096d9db8a660b0dec6733d25a7223a80538 100644 (file)
@@ -15,7 +15,7 @@
            "cmdUnderTest": "$TC qdisc add dev $ETH root handle 1: mq",
            "expExitCode": "0",
            "verifyCmd": "$TC qdisc show dev $ETH",
-           "matchPattern": "qdisc pfifo_fast 0: parent 1:[1-4] bands 3 priomap 1 2 2 2 1 2 0 0 1 1 1 1 1 1 1 1",
+           "matchPattern": "qdisc [a-zA-Z0-9_]+ 0: parent 1:[1-4]",
            "matchCount": "4",
            "teardown": [
                    "echo \"1\" > /sys/bus/netdevsim/del_device"
@@ -37,7 +37,7 @@
            "cmdUnderTest": "$TC qdisc add dev $ETH root handle 1: mq",
            "expExitCode": "0",
            "verifyCmd": "$TC qdisc show dev $ETH",
-           "matchPattern": "qdisc pfifo_fast 0: parent 1:[1-9,a-f][0-9,a-f]{0,2} bands 3 priomap 1 2 2 2 1 2 0 0 1 1 1 1 1 1 1 1",
+           "matchPattern": "qdisc [a-zA-Z0-9_]+ 0: parent 1:[1-9,a-f][0-9,a-f]{0,2}",
            "matchCount": "256",
            "teardown": [
                    "echo \"1\" > /sys/bus/netdevsim/del_device"
@@ -60,7 +60,7 @@
            "cmdUnderTest": "$TC qdisc add dev $ETH root handle 1: mq",
            "expExitCode": "2",
            "verifyCmd": "$TC qdisc show dev $ETH",
-           "matchPattern": "qdisc pfifo_fast 0: parent 1:[1-4] bands 3 priomap 1 2 2 2 1 2 0 0 1 1 1 1 1 1 1 1",
+           "matchPattern": "qdisc [a-zA-Z0-9_]+ 0: parent 1:[1-4]",
            "matchCount": "4",
            "teardown": [
                    "echo \"1\" > /sys/bus/netdevsim/del_device"
@@ -82,7 +82,7 @@
            "cmdUnderTest": "$TC qdisc del dev $ETH root handle 1: mq",
            "expExitCode": "2",
            "verifyCmd": "$TC qdisc show dev $ETH",
-           "matchPattern": "qdisc pfifo_fast 0: parent 1:[1-4] bands 3 priomap 1 2 2 2 1 2 0 0 1 1 1 1 1 1 1 1",
+           "matchPattern": "qdisc [a-zA-Z0-9_]+ 0: parent 1:[1-4]",
            "matchCount": "0",
            "teardown": [
                    "echo \"1\" > /sys/bus/netdevsim/del_device"
            "cmdUnderTest": "$TC qdisc del dev $ETH root handle 1: mq",
            "expExitCode": "2",
            "verifyCmd": "$TC qdisc show dev $ETH",
-           "matchPattern": "qdisc pfifo_fast 0: parent 1:[1-4] bands 3 priomap 1 2 2 2 1 2 0 0 1 1 1 1 1 1 1 1",
+           "matchPattern": "qdisc [a-zA-Z0-9_]+ 0: parent 1:[1-4]",
            "matchCount": "0",
            "teardown": [
                    "echo \"1\" > /sys/bus/netdevsim/del_device"
            "cmdUnderTest": "$TC qdisc add dev $ETH root handle 1: mq",
            "expExitCode": "2",
            "verifyCmd": "$TC qdisc show dev $ETH",
-           "matchPattern": "qdisc pfifo_fast 0: parent 1:[1-4] bands 3 priomap 1 2 2 2 1 2 0 0 1 1 1 1 1 1 1 1",
+           "matchPattern": "qdisc [a-zA-Z0-9_]+ 0: parent 1:[1-4]",
            "matchCount": "0",
            "teardown": [
                    "echo \"1\" > /sys/bus/netdevsim/del_device"
index a3e43189d94003db8170a91f7d9b71ab4f1a407d..ee22e3447ec7e294378c9c7ac4c523cdfee12634 100755 (executable)
@@ -716,6 +716,7 @@ def set_operation_mode(pm, parser, args, remaining):
         list_test_cases(alltests)
         exit(0)
 
+    exit_code = 0 # KSFT_PASS
     if len(alltests):
         req_plugins = pm.get_required_plugins(alltests)
         try:
@@ -724,6 +725,8 @@ def set_operation_mode(pm, parser, args, remaining):
             print('The following plugins were not found:')
             print('{}'.format(pde.missing_pg))
         catresults = test_runner(pm, args, alltests)
+        if catresults.count_failures() != 0:
+            exit_code = 1 # KSFT_FAIL
         if args.format == 'none':
             print('Test results output suppression requested\n')
         else:
@@ -748,6 +751,8 @@ def set_operation_mode(pm, parser, args, remaining):
                         gid=int(os.getenv('SUDO_GID')))
     else:
         print('No tests found\n')
+        exit_code = 4 # KSFT_SKIP
+    exit(exit_code)
 
 def main():
     """
@@ -767,8 +772,5 @@ def main():
 
     set_operation_mode(pm, parser, args, remaining)
 
-    exit(0)
-
-
 if __name__ == "__main__":
     main()
index 7fe38c76db4473db3a85d3a3750c8d852a3ca284..afb0cd86fa3df17777d54e19328da3197b7ae9a9 100755 (executable)
@@ -1,5 +1,6 @@
 #!/bin/sh
 # SPDX-License-Identifier: GPL-2.0
 
+modprobe netdevsim
 ./tdc.py -c actions --nobuildebpf
 ./tdc.py -c qdisc
index ebc4ee0fe179ff1c135602b4cb332c05293dd18b..8a9461aa0878a0b6ea74fc9ec48f370846e39397 100755 (executable)
@@ -276,7 +276,11 @@ n0 ping -W 1 -c 1 192.168.241.2
 n1 wg set wg0 peer "$pub2" endpoint 192.168.241.2:7
 ip2 link del wg0
 ip2 link del wg1
-! n0 ping -W 1 -c 10 -f 192.168.241.2 || false # Should not crash kernel
+read _ _ tx_bytes_before < <(n0 wg show wg1 transfer)
+! n0 ping -W 1 -c 10 -f 192.168.241.2 || false
+sleep 1
+read _ _ tx_bytes_after < <(n0 wg show wg1 transfer)
+(( tx_bytes_after - tx_bytes_before < 70000 ))
 
 ip0 link del wg1
 ip1 link del wg0
@@ -609,6 +613,28 @@ ip0 link set wg0 up
 kill $ncat_pid
 ip0 link del wg0
 
+# Ensure that dst_cache references don't outlive netns lifetime
+ip1 link add dev wg0 type wireguard
+ip2 link add dev wg0 type wireguard
+configure_peers
+ip1 link add veth1 type veth peer name veth2
+ip1 link set veth2 netns $netns2
+ip1 addr add fd00:aa::1/64 dev veth1
+ip2 addr add fd00:aa::2/64 dev veth2
+ip1 link set veth1 up
+ip2 link set veth2 up
+waitiface $netns1 veth1
+waitiface $netns2 veth2
+ip1 -6 route add default dev veth1 via fd00:aa::2
+ip2 -6 route add default dev veth2 via fd00:aa::1
+n1 wg set wg0 peer "$pub2" endpoint [fd00:aa::2]:2
+n2 wg set wg0 peer "$pub1" endpoint [fd00:aa::1]:1
+n1 ping6 -c 1 fd00::2
+pp ip netns delete $netns1
+pp ip netns delete $netns2
+pp ip netns add $netns1
+pp ip netns add $netns2
+
 # Ensure there aren't circular reference loops
 ip1 link add wg1 type wireguard
 ip2 link add wg2 type wireguard
@@ -627,7 +653,7 @@ while read -t 0.1 -r line 2>/dev/null || [[ $? -ne 142 ]]; do
 done < /dev/kmsg
 alldeleted=1
 for object in "${!objects[@]}"; do
-       if [[ ${objects["$object"]} != *createddestroyed ]]; then
+       if [[ ${objects["$object"]} != *createddestroyed && ${objects["$object"]} != *createdcreateddestroyeddestroyed ]]; then
                echo "Error: $object: merely ${objects["$object"]}" >&3
                alldeleted=0
        fi
index fe07d97df9fa89044d6493452226b0d31c3534c1..2b321b8a96cf3cc67e15e75461fd5c747307055a 100644 (file)
@@ -47,7 +47,7 @@ CONFIG_DEBUG_ATOMIC_SLEEP=y
 CONFIG_TRACE_IRQFLAGS=y
 CONFIG_DEBUG_BUGVERBOSE=y
 CONFIG_DEBUG_LIST=y
-CONFIG_DEBUG_PI_LIST=y
+CONFIG_DEBUG_PLIST=y
 CONFIG_PROVE_RCU=y
 CONFIG_SPARSE_RCU_POINTER=y
 CONFIG_RCU_CPU_STALL_TIMEOUT=21
index 74db83a0aedd8b67be991e0f856c53ec6ec7c9cd..a9b5a520a1d22e7de62729bf27f746993049d595 100644 (file)
@@ -66,6 +66,7 @@ CONFIG_PROC_SYSCTL=y
 CONFIG_SYSFS=y
 CONFIG_TMPFS=y
 CONFIG_CONSOLE_LOGLEVEL_DEFAULT=15
+CONFIG_LOG_BUF_SHIFT=18
 CONFIG_PRINTK_TIME=y
 CONFIG_BLK_DEV_INITRD=y
 CONFIG_LEGACY_VSYSCALL_NONE=y
index d3172450050184fa5b492cb9023e908c6f18e01d..72c4e6b393896aa9f4a7fa3531151ee8df3d1268 100644 (file)
@@ -1531,11 +1531,10 @@ static struct kvm_memslots *kvm_dup_memslots(struct kvm_memslots *old,
 
 static int kvm_set_memslot(struct kvm *kvm,
                           const struct kvm_userspace_memory_region *mem,
-                          struct kvm_memory_slot *old,
                           struct kvm_memory_slot *new, int as_id,
                           enum kvm_mr_change change)
 {
-       struct kvm_memory_slot *slot;
+       struct kvm_memory_slot *slot, old;
        struct kvm_memslots *slots;
        int r;
 
@@ -1566,7 +1565,7 @@ static int kvm_set_memslot(struct kvm *kvm,
                 * Note, the INVALID flag needs to be in the appropriate entry
                 * in the freshly allocated memslots, not in @old or @new.
                 */
-               slot = id_to_memslot(slots, old->id);
+               slot = id_to_memslot(slots, new->id);
                slot->flags |= KVM_MEMSLOT_INVALID;
 
                /*
@@ -1597,6 +1596,26 @@ static int kvm_set_memslot(struct kvm *kvm,
                kvm_copy_memslots(slots, __kvm_memslots(kvm, as_id));
        }
 
+       /*
+        * Make a full copy of the old memslot, the pointer will become stale
+        * when the memslots are re-sorted by update_memslots(), and the old
+        * memslot needs to be referenced after calling update_memslots(), e.g.
+        * to free its resources and for arch specific behavior.  This needs to
+        * happen *after* (re)acquiring slots_arch_lock.
+        */
+       slot = id_to_memslot(slots, new->id);
+       if (slot) {
+               old = *slot;
+       } else {
+               WARN_ON_ONCE(change != KVM_MR_CREATE);
+               memset(&old, 0, sizeof(old));
+               old.id = new->id;
+               old.as_id = as_id;
+       }
+
+       /* Copy the arch-specific data, again after (re)acquiring slots_arch_lock. */
+       memcpy(&new->arch, &old.arch, sizeof(old.arch));
+
        r = kvm_arch_prepare_memory_region(kvm, new, mem, change);
        if (r)
                goto out_slots;
@@ -1604,14 +1623,18 @@ static int kvm_set_memslot(struct kvm *kvm,
        update_memslots(slots, new, change);
        slots = install_new_memslots(kvm, as_id, slots);
 
-       kvm_arch_commit_memory_region(kvm, mem, old, new, change);
+       kvm_arch_commit_memory_region(kvm, mem, &old, new, change);
+
+       /* Free the old memslot's metadata.  Note, this is the full copy!!! */
+       if (change == KVM_MR_DELETE)
+               kvm_free_memslot(kvm, &old);
 
        kvfree(slots);
        return 0;
 
 out_slots:
        if (change == KVM_MR_DELETE || change == KVM_MR_MOVE) {
-               slot = id_to_memslot(slots, old->id);
+               slot = id_to_memslot(slots, new->id);
                slot->flags &= ~KVM_MEMSLOT_INVALID;
                slots = install_new_memslots(kvm, as_id, slots);
        } else {
@@ -1626,7 +1649,6 @@ static int kvm_delete_memslot(struct kvm *kvm,
                              struct kvm_memory_slot *old, int as_id)
 {
        struct kvm_memory_slot new;
-       int r;
 
        if (!old->npages)
                return -EINVAL;
@@ -1639,12 +1661,7 @@ static int kvm_delete_memslot(struct kvm *kvm,
         */
        new.as_id = as_id;
 
-       r = kvm_set_memslot(kvm, mem, old, &new, as_id, KVM_MR_DELETE);
-       if (r)
-               return r;
-
-       kvm_free_memslot(kvm, old);
-       return 0;
+       return kvm_set_memslot(kvm, mem, &new, as_id, KVM_MR_DELETE);
 }
 
 /*
@@ -1672,7 +1689,8 @@ int __kvm_set_memory_region(struct kvm *kvm,
        id = (u16)mem->slot;
 
        /* General sanity checks */
-       if (mem->memory_size & (PAGE_SIZE - 1))
+       if ((mem->memory_size & (PAGE_SIZE - 1)) ||
+           (mem->memory_size != (unsigned long)mem->memory_size))
                return -EINVAL;
        if (mem->guest_phys_addr & (PAGE_SIZE - 1))
                return -EINVAL;
@@ -1718,7 +1736,6 @@ int __kvm_set_memory_region(struct kvm *kvm,
        if (!old.npages) {
                change = KVM_MR_CREATE;
                new.dirty_bitmap = NULL;
-               memset(&new.arch, 0, sizeof(new.arch));
        } else { /* Modify an existing slot. */
                if ((new.userspace_addr != old.userspace_addr) ||
                    (new.npages != old.npages) ||
@@ -1732,9 +1749,8 @@ int __kvm_set_memory_region(struct kvm *kvm,
                else /* Nothing to change. */
                        return 0;
 
-               /* Copy dirty_bitmap and arch from the current memslot. */
+               /* Copy dirty_bitmap from the current memslot. */
                new.dirty_bitmap = old.dirty_bitmap;
-               memcpy(&new.arch, &old.arch, sizeof(new.arch));
        }
 
        if ((change == KVM_MR_CREATE) || (change == KVM_MR_MOVE)) {
@@ -1760,7 +1776,7 @@ int __kvm_set_memory_region(struct kvm *kvm,
                        bitmap_set(new.dirty_bitmap, 0, new.npages);
        }
 
-       r = kvm_set_memslot(kvm, mem, &old, &new, as_id, change);
+       r = kvm_set_memslot(kvm, mem, &new, as_id, change);
        if (r)
                goto out_bitmap;
 
@@ -2548,72 +2564,36 @@ struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn)
 }
 EXPORT_SYMBOL_GPL(gfn_to_page);
 
-void kvm_release_pfn(kvm_pfn_t pfn, bool dirty, struct gfn_to_pfn_cache *cache)
+void kvm_release_pfn(kvm_pfn_t pfn, bool dirty)
 {
        if (pfn == 0)
                return;
 
-       if (cache)
-               cache->pfn = cache->gfn = 0;
-
        if (dirty)
                kvm_release_pfn_dirty(pfn);
        else
                kvm_release_pfn_clean(pfn);
 }
 
-static void kvm_cache_gfn_to_pfn(struct kvm_memory_slot *slot, gfn_t gfn,
-                                struct gfn_to_pfn_cache *cache, u64 gen)
-{
-       kvm_release_pfn(cache->pfn, cache->dirty, cache);
-
-       cache->pfn = gfn_to_pfn_memslot(slot, gfn);
-       cache->gfn = gfn;
-       cache->dirty = false;
-       cache->generation = gen;
-}
-
-static int __kvm_map_gfn(struct kvm_memslots *slots, gfn_t gfn,
-                        struct kvm_host_map *map,
-                        struct gfn_to_pfn_cache *cache,
-                        bool atomic)
+int kvm_vcpu_map(struct kvm_vcpu *vcpu, gfn_t gfn, struct kvm_host_map *map)
 {
        kvm_pfn_t pfn;
        void *hva = NULL;
        struct page *page = KVM_UNMAPPED_PAGE;
-       struct kvm_memory_slot *slot = __gfn_to_memslot(slots, gfn);
-       u64 gen = slots->generation;
 
        if (!map)
                return -EINVAL;
 
-       if (cache) {
-               if (!cache->pfn || cache->gfn != gfn ||
-                       cache->generation != gen) {
-                       if (atomic)
-                               return -EAGAIN;
-                       kvm_cache_gfn_to_pfn(slot, gfn, cache, gen);
-               }
-               pfn = cache->pfn;
-       } else {
-               if (atomic)
-                       return -EAGAIN;
-               pfn = gfn_to_pfn_memslot(slot, gfn);
-       }
+       pfn = gfn_to_pfn(vcpu->kvm, gfn);
        if (is_error_noslot_pfn(pfn))
                return -EINVAL;
 
        if (pfn_valid(pfn)) {
                page = pfn_to_page(pfn);
-               if (atomic)
-                       hva = kmap_atomic(page);
-               else
-                       hva = kmap(page);
+               hva = kmap(page);
 #ifdef CONFIG_HAS_IOMEM
-       } else if (!atomic) {
-               hva = memremap(pfn_to_hpa(pfn), PAGE_SIZE, MEMREMAP_WB);
        } else {
-               return -EINVAL;
+               hva = memremap(pfn_to_hpa(pfn), PAGE_SIZE, MEMREMAP_WB);
 #endif
        }
 
@@ -2627,27 +2607,9 @@ static int __kvm_map_gfn(struct kvm_memslots *slots, gfn_t gfn,
 
        return 0;
 }
-
-int kvm_map_gfn(struct kvm_vcpu *vcpu, gfn_t gfn, struct kvm_host_map *map,
-               struct gfn_to_pfn_cache *cache, bool atomic)
-{
-       return __kvm_map_gfn(kvm_memslots(vcpu->kvm), gfn, map,
-                       cache, atomic);
-}
-EXPORT_SYMBOL_GPL(kvm_map_gfn);
-
-int kvm_vcpu_map(struct kvm_vcpu *vcpu, gfn_t gfn, struct kvm_host_map *map)
-{
-       return __kvm_map_gfn(kvm_vcpu_memslots(vcpu), gfn, map,
-               NULL, false);
-}
 EXPORT_SYMBOL_GPL(kvm_vcpu_map);
 
-static void __kvm_unmap_gfn(struct kvm *kvm,
-                       struct kvm_memory_slot *memslot,
-                       struct kvm_host_map *map,
-                       struct gfn_to_pfn_cache *cache,
-                       bool dirty, bool atomic)
+void kvm_vcpu_unmap(struct kvm_vcpu *vcpu, struct kvm_host_map *map, bool dirty)
 {
        if (!map)
                return;
@@ -2655,45 +2617,21 @@ static void __kvm_unmap_gfn(struct kvm *kvm,
        if (!map->hva)
                return;
 
-       if (map->page != KVM_UNMAPPED_PAGE) {
-               if (atomic)
-                       kunmap_atomic(map->hva);
-               else
-                       kunmap(map->page);
-       }
+       if (map->page != KVM_UNMAPPED_PAGE)
+               kunmap(map->page);
 #ifdef CONFIG_HAS_IOMEM
-       else if (!atomic)
-               memunmap(map->hva);
        else
-               WARN_ONCE(1, "Unexpected unmapping in atomic context");
+               memunmap(map->hva);
 #endif
 
        if (dirty)
-               mark_page_dirty_in_slot(kvm, memslot, map->gfn);
+               kvm_vcpu_mark_page_dirty(vcpu, map->gfn);
 
-       if (cache)
-               cache->dirty |= dirty;
-       else
-               kvm_release_pfn(map->pfn, dirty, NULL);
+       kvm_release_pfn(map->pfn, dirty);
 
        map->hva = NULL;
        map->page = NULL;
 }
-
-int kvm_unmap_gfn(struct kvm_vcpu *vcpu, struct kvm_host_map *map, 
-                 struct gfn_to_pfn_cache *cache, bool dirty, bool atomic)
-{
-       __kvm_unmap_gfn(vcpu->kvm, gfn_to_memslot(vcpu->kvm, map->gfn), map,
-                       cache, dirty, atomic);
-       return 0;
-}
-EXPORT_SYMBOL_GPL(kvm_unmap_gfn);
-
-void kvm_vcpu_unmap(struct kvm_vcpu *vcpu, struct kvm_host_map *map, bool dirty)
-{
-       __kvm_unmap_gfn(vcpu->kvm, kvm_vcpu_gfn_to_memslot(vcpu, map->gfn),
-                       map, NULL, dirty, false);
-}
 EXPORT_SYMBOL_GPL(kvm_vcpu_unmap);
 
 struct page *kvm_vcpu_gfn_to_page(struct kvm_vcpu *vcpu, gfn_t gfn)
@@ -2993,7 +2931,8 @@ int kvm_write_guest_offset_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
        int r;
        gpa_t gpa = ghc->gpa + offset;
 
-       BUG_ON(len + offset > ghc->len);
+       if (WARN_ON_ONCE(len + offset > ghc->len))
+               return -EINVAL;
 
        if (slots->generation != ghc->generation) {
                if (__kvm_gfn_to_hva_cache_init(slots, ghc, ghc->gpa, ghc->len))
@@ -3030,7 +2969,8 @@ int kvm_read_guest_offset_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
        int r;
        gpa_t gpa = ghc->gpa + offset;
 
-       BUG_ON(len + offset > ghc->len);
+       if (WARN_ON_ONCE(len + offset > ghc->len))
+               return -EINVAL;
 
        if (slots->generation != ghc->generation) {
                if (__kvm_gfn_to_hva_cache_init(slots, ghc, ghc->gpa, ghc->len))