Merge branches 'acpi-dptf' and 'acpi-messages'
authorRafael J. Wysocki <rafael.j.wysocki@intel.com>
Tue, 29 Jun 2021 13:50:37 +0000 (15:50 +0200)
committerRafael J. Wysocki <rafael.j.wysocki@intel.com>
Tue, 29 Jun 2021 13:50:37 +0000 (15:50 +0200)
* acpi-dptf:
  ACPI: DPTF: Add battery participant for Intel SoCs

* acpi-messages:
  ACPI: Remove the macro PREFIX "ACPI: "
  ACPI: sleep: Unify the message printing
  ACPI: sbs: Unify the message printing
  ACPI: scan: Unify the log message printing
  ACPI: sbshc: Unify the message printing
  ACPI: sysfs: Cleanup message printing
  ACPI: reboot: Unify the message printing
  ACPI: processor_throttling: Cleanup the printing messages
  ACPI: processor_perflib: Cleanup print messages
  ACPI: processor_thermal: Remove unused PREFIX for printing
  ACPI: pci_root: Unify the message printing
  ACPI: osl: Remove the duplicated PREFIX for message printing
  ACPI: nvs: Unify the message printing
  ACPI: glue: Clean up the printing messages
  ACPI: event: Use pr_*() macros to replace printk()
  ACPI: bus: Use pr_*() macros to replace printk()
  ACPI: blacklist: Unify the message printing
  ACPI: cmos_rtc: Using pr_fmt() and remove PREFIX

1540 files changed:
.clang-format
.mailmap
Documentation/ABI/obsolete/sysfs-class-dax
Documentation/ABI/obsolete/sysfs-kernel-fadump_registered
Documentation/ABI/obsolete/sysfs-kernel-fadump_release_mem
Documentation/ABI/removed/sysfs-bus-nfit
Documentation/ABI/testing/sysfs-bus-nfit
Documentation/ABI/testing/sysfs-bus-papr-pmem
Documentation/ABI/testing/sysfs-module
Documentation/admin-guide/kernel-parameters.txt
Documentation/admin-guide/sysctl/kernel.rst
Documentation/block/data-integrity.rst
Documentation/cdrom/cdrom-standard.rst
Documentation/devicetree/bindings/clock/idt,versaclock5.yaml
Documentation/devicetree/bindings/connector/usb-connector.yaml
Documentation/devicetree/bindings/hwmon/ti,ads7828.yaml
Documentation/devicetree/bindings/i2c/i2c-mpc.yaml
Documentation/devicetree/bindings/iio/adc/st,stm32-dfsdm-adc.yaml
Documentation/devicetree/bindings/input/input.yaml
Documentation/devicetree/bindings/interconnect/qcom,rpmh.yaml
Documentation/devicetree/bindings/leds/leds-bcm6328.txt
Documentation/devicetree/bindings/leds/leds-bcm6358.txt
Documentation/devicetree/bindings/media/renesas,drif.yaml
Documentation/devicetree/bindings/net/qcom,ipa.yaml
Documentation/devicetree/bindings/net/renesas,ether.yaml
Documentation/devicetree/bindings/nvmem/mtk-efuse.txt
Documentation/devicetree/bindings/phy/phy-cadence-torrent.yaml
Documentation/devicetree/bindings/power/supply/sc2731-charger.yaml
Documentation/devicetree/bindings/sound/amlogic,gx-sound-card.yaml
Documentation/devicetree/bindings/sound/fsl,rpmsg.yaml
Documentation/devicetree/bindings/spi/spi-mux.yaml
Documentation/driver-api/nvdimm/nvdimm.rst
Documentation/driver-api/serial/index.rst
Documentation/driver-api/usb/usb.rst
Documentation/filesystems/erofs.rst
Documentation/hwmon/tmp103.rst
Documentation/networking/device_drivers/ethernet/intel/i40e.rst
Documentation/networking/device_drivers/ethernet/intel/iavf.rst
Documentation/powerpc/syscall64-abi.rst
Documentation/process/kernel-enforcement-statement.rst
Documentation/security/tpm/xen-tpmfront.rst
Documentation/timers/no_hz.rst
Documentation/translations/zh_CN/SecurityBugs [deleted file]
Documentation/usb/gadget_configfs.rst
Documentation/usb/mtouchusb.rst
Documentation/usb/usb-serial.rst
Documentation/userspace-api/seccomp_filter.rst
Documentation/virt/kvm/amd-memory-encryption.rst
Documentation/virt/kvm/api.rst
Documentation/virt/kvm/mmu.rst
Documentation/virt/kvm/vcpu-requests.rst
Documentation/x86/amd-memory-encryption.rst
MAINTAINERS
Makefile
arch/alpha/kernel/syscalls/syscall.tbl
arch/arc/Makefile
arch/arc/include/asm/cmpxchg.h
arch/arc/include/asm/page.h
arch/arc/include/asm/pgtable.h
arch/arc/include/uapi/asm/page.h
arch/arc/kernel/entry.S
arch/arc/kernel/kgdb.c
arch/arc/kernel/process.c
arch/arc/kernel/signal.c
arch/arc/mm/init.c
arch/arc/mm/ioremap.c
arch/arc/mm/tlb.c
arch/arm/boot/dts/imx6dl-yapp4-common.dtsi
arch/arm/boot/dts/imx6q-dhcom-som.dtsi
arch/arm/boot/dts/imx6qdl-emcon-avari.dtsi
arch/arm/boot/dts/imx7d-meerkat96.dts
arch/arm/boot/dts/imx7d-pico.dtsi
arch/arm/include/asm/cpuidle.h
arch/arm/mach-imx/pm-imx27.c
arch/arm/mach-npcm/Kconfig
arch/arm/mach-omap1/board-ams-delta.c
arch/arm/mach-omap1/board-h2.c
arch/arm/mach-omap1/pm.c
arch/arm/mach-omap2/board-n8x0.c
arch/arm/mach-pxa/pxa_cplds_irqs.c
arch/arm/tools/syscall.tbl
arch/arm/xen/mm.c
arch/arm64/Kbuild
arch/arm64/Kconfig.platforms
arch/arm64/Makefile
arch/arm64/boot/dts/freescale/fsl-ls1028a-kontron-sl28-var1.dts
arch/arm64/boot/dts/freescale/fsl-ls1028a-kontron-sl28-var4.dts
arch/arm64/boot/dts/freescale/fsl-ls1028a.dtsi
arch/arm64/boot/dts/freescale/imx8mq-zii-ultra-rmb3.dts
arch/arm64/boot/dts/freescale/imx8mq-zii-ultra.dtsi
arch/arm64/boot/dts/renesas/hihope-rzg2-ex-aistarvision-mipi-adapter-2.1.dtsi
arch/arm64/boot/dts/renesas/r8a774a1.dtsi
arch/arm64/boot/dts/renesas/r8a774b1.dtsi
arch/arm64/boot/dts/renesas/r8a774c0-ek874-mipi-2.1.dts
arch/arm64/boot/dts/renesas/r8a774c0.dtsi
arch/arm64/boot/dts/renesas/r8a774e1.dtsi
arch/arm64/boot/dts/renesas/r8a77950.dtsi
arch/arm64/boot/dts/renesas/r8a77951.dtsi
arch/arm64/boot/dts/renesas/r8a77960.dtsi
arch/arm64/boot/dts/renesas/r8a77961.dtsi
arch/arm64/boot/dts/renesas/r8a77965.dtsi
arch/arm64/boot/dts/renesas/r8a77970.dtsi
arch/arm64/boot/dts/renesas/r8a77980.dtsi
arch/arm64/boot/dts/renesas/r8a77990-ebisu.dts
arch/arm64/boot/dts/renesas/r8a77990.dtsi
arch/arm64/boot/dts/renesas/salvator-common.dtsi
arch/arm64/boot/dts/ti/k3-am64-main.dtsi
arch/arm64/boot/dts/ti/k3-am64-mcu.dtsi
arch/arm64/boot/dts/ti/k3-am65-main.dtsi
arch/arm64/boot/dts/ti/k3-am65-mcu.dtsi
arch/arm64/boot/dts/ti/k3-am65-wakeup.dtsi
arch/arm64/boot/dts/ti/k3-am654-base-board.dts
arch/arm64/boot/dts/ti/k3-j7200-main.dtsi
arch/arm64/boot/dts/ti/k3-j7200-mcu-wakeup.dtsi
arch/arm64/boot/dts/ti/k3-j721e-main.dtsi
arch/arm64/boot/dts/ti/k3-j721e-mcu-wakeup.dtsi
arch/arm64/include/asm/Kbuild
arch/arm64/include/asm/barrier.h
arch/arm64/include/asm/cpucaps.h [deleted file]
arch/arm64/include/asm/kvm_asm.h
arch/arm64/include/asm/kvm_emulate.h
arch/arm64/include/asm/unistd32.h
arch/arm64/kvm/arm.c
arch/arm64/kvm/hyp/exception.c
arch/arm64/kvm/hyp/include/hyp/adjust_pc.h
arch/arm64/kvm/hyp/nvhe/hyp-main.c
arch/arm64/kvm/hyp/nvhe/mem_protect.c
arch/arm64/kvm/hyp/nvhe/setup.c
arch/arm64/kvm/hyp/nvhe/switch.c
arch/arm64/kvm/hyp/vhe/switch.c
arch/arm64/kvm/mmu.c
arch/arm64/kvm/reset.c
arch/arm64/kvm/sys_regs.c
arch/arm64/mm/flush.c
arch/arm64/mm/init.c
arch/arm64/mm/mmu.c
arch/arm64/mm/proc.S
arch/arm64/tools/Makefile [new file with mode: 0644]
arch/arm64/tools/cpucaps [new file with mode: 0644]
arch/arm64/tools/gen-cpucaps.awk [new file with mode: 0755]
arch/ia64/kernel/syscalls/syscall.tbl
arch/m68k/kernel/signal.c
arch/m68k/kernel/syscalls/syscall.tbl
arch/microblaze/kernel/syscalls/syscall.tbl
arch/mips/alchemy/board-xxs1500.c
arch/mips/include/asm/mips-boards/launch.h
arch/mips/kernel/syscalls/syscall_n32.tbl
arch/mips/kernel/syscalls/syscall_n64.tbl
arch/mips/kernel/syscalls/syscall_o32.tbl
arch/mips/lib/mips-atomic.c
arch/mips/mm/cache.c
arch/mips/ralink/of.c
arch/openrisc/include/asm/barrier.h [new file with mode: 0644]
arch/openrisc/kernel/setup.c
arch/openrisc/mm/init.c
arch/parisc/kernel/syscalls/syscall.tbl
arch/powerpc/boot/dts/fsl/p1010si-post.dtsi
arch/powerpc/boot/dts/fsl/p2041si-post.dtsi
arch/powerpc/include/asm/hvcall.h
arch/powerpc/include/asm/interrupt.h
arch/powerpc/include/asm/kvm_host.h
arch/powerpc/include/asm/paravirt.h
arch/powerpc/include/asm/plpar_wrappers.h
arch/powerpc/include/asm/pte-walk.h
arch/powerpc/include/asm/ptrace.h
arch/powerpc/include/asm/syscall.h
arch/powerpc/include/asm/uaccess.h
arch/powerpc/kernel/eeh.c
arch/powerpc/kernel/exceptions-64e.S
arch/powerpc/kernel/interrupt.c
arch/powerpc/kernel/io-workarounds.c
arch/powerpc/kernel/iommu.c
arch/powerpc/kernel/kprobes.c
arch/powerpc/kernel/legacy_serial.c
arch/powerpc/kernel/setup_64.c
arch/powerpc/kernel/signal.h
arch/powerpc/kernel/syscalls/syscall.tbl
arch/powerpc/kvm/book3s_64_mmu_hv.c
arch/powerpc/kvm/book3s_hv.c
arch/powerpc/kvm/book3s_hv_rm_mmu.c
arch/powerpc/kvm/book3s_hv_rmhandlers.S
arch/powerpc/lib/feature-fixups.c
arch/powerpc/platforms/pseries/hvCall.S
arch/powerpc/platforms/pseries/lpar.c
arch/riscv/Kconfig
arch/riscv/Makefile
arch/riscv/boot/dts/microchip/Makefile
arch/riscv/boot/dts/sifive/Makefile
arch/riscv/errata/sifive/Makefile
arch/riscv/include/asm/alternative-macros.h
arch/riscv/include/asm/kexec.h
arch/riscv/kernel/machine_kexec.c
arch/riscv/kernel/probes/kprobes.c
arch/riscv/kernel/setup.c
arch/riscv/kernel/stacktrace.c
arch/riscv/kernel/traps.c
arch/riscv/kernel/vmlinux-xip.lds.S
arch/riscv/mm/init.c
arch/s390/kernel/syscalls/syscall.tbl
arch/sh/kernel/syscalls/syscall.tbl
arch/sh/kernel/traps.c
arch/sparc/kernel/syscalls/syscall.tbl
arch/x86/Makefile
arch/x86/boot/compressed/Makefile
arch/x86/boot/compressed/misc.c
arch/x86/boot/compressed/misc.h
arch/x86/boot/compressed/sev.c [moved from arch/x86/boot/compressed/sev-es.c with 98% similarity]
arch/x86/entry/syscalls/syscall_32.tbl
arch/x86/entry/syscalls/syscall_64.tbl
arch/x86/events/core.c
arch/x86/events/intel/core.c
arch/x86/events/intel/lbr.c
arch/x86/events/intel/uncore_snbep.c
arch/x86/events/perf_event.h
arch/x86/include/asm/apic.h
arch/x86/include/asm/disabled-features.h
arch/x86/include/asm/fpu/api.h
arch/x86/include/asm/fpu/internal.h
arch/x86/include/asm/kvm-x86-ops.h
arch/x86/include/asm/kvm_host.h
arch/x86/include/asm/kvm_para.h
arch/x86/include/asm/msr-index.h
arch/x86/include/asm/processor.h
arch/x86/include/asm/sev-common.h [new file with mode: 0644]
arch/x86/include/asm/sev.h [moved from arch/x86/include/asm/sev-es.h with 70% similarity]
arch/x86/include/asm/thermal.h
arch/x86/include/asm/vdso/clocksource.h
arch/x86/include/uapi/asm/kvm.h
arch/x86/kernel/Makefile
arch/x86/kernel/acpi/boot.c
arch/x86/kernel/alternative.c
arch/x86/kernel/apic/apic.c
arch/x86/kernel/apic/vector.c
arch/x86/kernel/cpu/amd.c
arch/x86/kernel/cpu/mtrr/cleanup.c
arch/x86/kernel/cpu/mtrr/generic.c
arch/x86/kernel/cpu/perfctr-watchdog.c
arch/x86/kernel/fpu/xstate.c
arch/x86/kernel/head64.c
arch/x86/kernel/kvm.c
arch/x86/kernel/kvmclock.c
arch/x86/kernel/mmconf-fam10h_64.c
arch/x86/kernel/nmi.c
arch/x86/kernel/setup.c
arch/x86/kernel/sev-shared.c [moved from arch/x86/kernel/sev-es-shared.c with 96% similarity]
arch/x86/kernel/sev.c [moved from arch/x86/kernel/sev-es.c with 92% similarity]
arch/x86/kernel/signal_compat.c
arch/x86/kernel/smpboot.c
arch/x86/kvm/cpuid.c
arch/x86/kvm/emulate.c
arch/x86/kvm/hyperv.c
arch/x86/kvm/kvm_emulate.h
arch/x86/kvm/lapic.c
arch/x86/kvm/mmu/mmu.c
arch/x86/kvm/mmu/paging_tmpl.h
arch/x86/kvm/mmu/tdp_mmu.c
arch/x86/kvm/svm/avic.c
arch/x86/kvm/svm/nested.c
arch/x86/kvm/svm/sev.c
arch/x86/kvm/svm/svm.c
arch/x86/kvm/svm/svm.h
arch/x86/kvm/trace.h
arch/x86/kvm/vmx/capabilities.h
arch/x86/kvm/vmx/nested.c
arch/x86/kvm/vmx/posted_intr.c
arch/x86/kvm/vmx/posted_intr.h
arch/x86/kvm/vmx/vmx.c
arch/x86/kvm/vmx/vmx.h
arch/x86/kvm/x86.c
arch/x86/mm/extable.c
arch/x86/mm/fault.c
arch/x86/mm/mem_encrypt_identity.c
arch/x86/pci/amd_bus.c
arch/x86/platform/efi/efi_64.c
arch/x86/platform/efi/quirks.c
arch/x86/realmode/init.c
arch/x86/realmode/rm/trampoline_64.S
arch/x86/xen/enlighten_pv.c
arch/xtensa/kernel/syscalls/syscall.tbl
block/bfq-iosched.c
block/blk-iocost.c
block/blk-mq-sched.c
block/blk-mq.c
block/genhd.c
block/kyber-iosched.c
block/mq-deadline.c
block/partitions/efi.c
crypto/async_tx/async_xor.c
drivers/acpi/Kconfig
drivers/acpi/Makefile
drivers/acpi/acpi_apd.c
drivers/acpi/acpi_cmos_rtc.c
drivers/acpi/acpi_fpdt.c
drivers/acpi/acpi_video.c
drivers/acpi/acpica/acutils.h
drivers/acpi/acpica/exfield.c
drivers/acpi/acpica/exserial.c
drivers/acpi/acpica/nsrepair2.c
drivers/acpi/acpica/utdelete.c
drivers/acpi/acpica/utprint.c
drivers/acpi/acpica/utuuid.c
drivers/acpi/bgrt.c
drivers/acpi/blacklist.c
drivers/acpi/bus.c
drivers/acpi/device_pm.c
drivers/acpi/device_sysfs.c
drivers/acpi/ec.c
drivers/acpi/event.c
drivers/acpi/fan.c
drivers/acpi/fan.h [new file with mode: 0644]
drivers/acpi/glue.c
drivers/acpi/internal.h
drivers/acpi/nfit/core.c
drivers/acpi/nvs.c
drivers/acpi/osl.c
drivers/acpi/pci_root.c
drivers/acpi/pmic/intel_pmic_chtdc_ti.c
drivers/acpi/power.c
drivers/acpi/pptt.c
drivers/acpi/prmt.c [new file with mode: 0644]
drivers/acpi/processor_idle.c
drivers/acpi/processor_perflib.c
drivers/acpi/processor_thermal.c
drivers/acpi/processor_throttling.c
drivers/acpi/reboot.c
drivers/acpi/resource.c
drivers/acpi/sbs.c
drivers/acpi/sbshc.c
drivers/acpi/scan.c
drivers/acpi/sleep.c
drivers/acpi/sleep.h
drivers/acpi/sysfs.c
drivers/acpi/tables.c
drivers/acpi/x86/s2idle.c
drivers/android/binder.c
drivers/base/core.c
drivers/base/memory.c
drivers/base/power/runtime.c
drivers/block/loop.c
drivers/block/loop.h
drivers/block/nbd.c
drivers/bluetooth/btusb.c
drivers/bus/mhi/pci_generic.c
drivers/bus/ti-sysc.c
drivers/cdrom/gdrom.c
drivers/char/hpet.c
drivers/char/tpm/tpm2-cmd.c
drivers/char/tpm/tpm_tis_core.c
drivers/clk/clk.c
drivers/clocksource/hyperv_timer.c
drivers/cpufreq/acpi-cpufreq.c
drivers/cpufreq/intel_pstate.c
drivers/crypto/cavium/nitrox/nitrox_main.c
drivers/dma-buf/dma-buf.c
drivers/dma/idxd/init.c
drivers/dma/qcom/hidma_mgmt.c
drivers/edac/amd64_edac.c
drivers/firmware/arm_scmi/notify.h
drivers/firmware/arm_scpi.c
drivers/firmware/efi/cper.c
drivers/firmware/efi/fdtparams.c
drivers/firmware/efi/libstub/file.c
drivers/firmware/efi/memattr.c
drivers/gpio/gpio-cadence.c
drivers/gpio/gpio-tegra186.c
drivers/gpio/gpio-wcd934x.c
drivers/gpio/gpio-xilinx.c
drivers/gpio/gpiolib-acpi.c
drivers/gpu/drm/amd/amdgpu/amdgpu.h
drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10_3.c
drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c
drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.h
drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c
drivers/gpu/drm/amd/amdgpu/amdgpu_fru_eeprom.c
drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h
drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.c
drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c
drivers/gpu/drm/amd/amdgpu/jpeg_v3_0.c
drivers/gpu/drm/amd/amdgpu/nv.c
drivers/gpu/drm/amd/amdgpu/psp_v11_0.c
drivers/gpu/drm/amd/amdgpu/psp_v3_1.c
drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c
drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c
drivers/gpu/drm/amd/amdgpu/soc15.c
drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c
drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c
drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c
drivers/gpu/drm/amd/display/dc/core/dc_link.c
drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c
drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c
drivers/gpu/drm/amd/display/dc/dcn302/dcn302_resource.c
drivers/gpu/drm/amd/include/amd_shared.h
drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu10_hwmgr.c
drivers/gpu/drm/amd/pm/powerplay/si_dpm.c
drivers/gpu/drm/amd/pm/powerplay/sislands_smc.h
drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
drivers/gpu/drm/drm_auth.c
drivers/gpu/drm/drm_ioctl.c
drivers/gpu/drm/exynos/exynos5433_drm_decon.c
drivers/gpu/drm/exynos/exynos_drm_dsi.c
drivers/gpu/drm/exynos/exynos_drm_fimd.c
drivers/gpu/drm/i915/Kconfig
drivers/gpu/drm/i915/display/intel_dp.c
drivers/gpu/drm/i915/display/intel_dp_link_training.c
drivers/gpu/drm/i915/display/intel_overlay.c
drivers/gpu/drm/i915/gem/i915_gem_mman.c
drivers/gpu/drm/i915/gem/i915_gem_pages.c
drivers/gpu/drm/i915/gt/gen7_renderclear.c
drivers/gpu/drm/i915/gt/gen8_ppgtt.c
drivers/gpu/drm/i915/gt/intel_ggtt_fencing.c
drivers/gpu/drm/i915/gvt/gvt.c
drivers/gpu/drm/i915/gvt/gvt.h
drivers/gpu/drm/i915/gvt/hypercall.h
drivers/gpu/drm/i915/gvt/kvmgt.c
drivers/gpu/drm/i915/gvt/mpt.h
drivers/gpu/drm/i915/i915_active.c
drivers/gpu/drm/i915/i915_drv.h
drivers/gpu/drm/i915/i915_gem.c
drivers/gpu/drm/i915/i915_mm.c
drivers/gpu/drm/i915/selftests/i915_request.c
drivers/gpu/drm/mcde/mcde_dsi.c
drivers/gpu/drm/meson/meson_drv.c
drivers/gpu/drm/msm/adreno/a6xx_gpu.c
drivers/gpu/drm/msm/adreno/a6xx_gpu.h
drivers/gpu/drm/msm/dp/dp_audio.c
drivers/gpu/drm/msm/dp/dp_display.c
drivers/gpu/drm/msm/dp/dp_display.h
drivers/gpu/drm/msm/dsi/phy/dsi_phy.c
drivers/gpu/drm/msm/dsi/phy/dsi_phy_10nm.c
drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm_8960.c
drivers/gpu/drm/msm/dsi/phy/dsi_phy_7nm.c
drivers/gpu/drm/msm/msm_drv.c
drivers/gpu/drm/msm/msm_gem.c
drivers/gpu/drm/msm/msm_gem.h
drivers/gpu/drm/radeon/ni_dpm.c
drivers/gpu/drm/radeon/nislands_smc.h
drivers/gpu/drm/radeon/radeon.h
drivers/gpu/drm/radeon/radeon_gart.c
drivers/gpu/drm/radeon/radeon_pm.c
drivers/gpu/drm/radeon/radeon_uvd.c
drivers/gpu/drm/radeon/si_dpm.c
drivers/gpu/drm/radeon/sislands_smc.h
drivers/gpu/drm/sun4i/sun8i_dw_hdmi.c
drivers/gpu/drm/sun4i/sun8i_dw_hdmi.h
drivers/gpu/drm/sun4i/sun8i_hdmi_phy.c
drivers/gpu/drm/tegra/drm.h
drivers/gpu/drm/tegra/hub.c
drivers/gpu/drm/tegra/sor.c
drivers/gpu/drm/ttm/ttm_bo.c
drivers/gpu/drm/ttm/ttm_device.c
drivers/gpu/drm/vc4/vc4_kms.c
drivers/gpu/drm/vc4/vc4_vec.c
drivers/gpu/host1x/bus.c
drivers/hid/Kconfig
drivers/hid/Makefile
drivers/hid/amd-sfh-hid/amd_sfh_client.c
drivers/hid/amd-sfh-hid/amd_sfh_hid.c
drivers/hid/hid-a4tech.c
drivers/hid/hid-asus.c
drivers/hid/hid-core.c
drivers/hid/hid-debug.c
drivers/hid/hid-ft260.c
drivers/hid/hid-gt683r.c
drivers/hid/hid-ids.h
drivers/hid/hid-input.c
drivers/hid/hid-logitech-hidpp.c
drivers/hid/hid-magicmouse.c
drivers/hid/hid-multitouch.c
drivers/hid/hid-quirks.c
drivers/hid/hid-semitek.c [new file with mode: 0644]
drivers/hid/hid-sensor-custom.c
drivers/hid/hid-sensor-hub.c
drivers/hid/hid-thrustmaster.c
drivers/hid/i2c-hid/i2c-hid-core.c
drivers/hid/intel-ish-hid/ipc/hw-ish.h
drivers/hid/intel-ish-hid/ipc/pci-ish.c
drivers/hid/surface-hid/surface_hid_core.c
drivers/hid/usbhid/hid-core.c
drivers/hid/usbhid/hid-pidff.c
drivers/hwmon/adm9240.c
drivers/hwmon/corsair-psu.c
drivers/hwmon/dell-smm-hwmon.c
drivers/hwmon/lm80.c
drivers/hwmon/ltc2992.c
drivers/hwmon/occ/common.c
drivers/hwmon/occ/common.h
drivers/hwmon/pmbus/fsp-3y.c
drivers/hwmon/pmbus/isl68137.c
drivers/hwmon/pmbus/q54sj108a2.c
drivers/hwmon/scpi-hwmon.c
drivers/hwmon/tps23861.c
drivers/i2c/busses/Kconfig
drivers/i2c/busses/i2c-ali1563.c
drivers/i2c/busses/i2c-altera.c
drivers/i2c/busses/i2c-cadence.c
drivers/i2c/busses/i2c-designware-master.c
drivers/i2c/busses/i2c-eg20t.c
drivers/i2c/busses/i2c-i801.c
drivers/i2c/busses/i2c-icy.c
drivers/i2c/busses/i2c-mpc.c
drivers/i2c/busses/i2c-mt65xx.c
drivers/i2c/busses/i2c-nomadik.c
drivers/i2c/busses/i2c-ocores.c
drivers/i2c/busses/i2c-pnx.c
drivers/i2c/busses/i2c-qcom-geni.c
drivers/i2c/busses/i2c-s3c2410.c
drivers/i2c/busses/i2c-sh_mobile.c
drivers/i2c/busses/i2c-st.c
drivers/i2c/busses/i2c-stm32f4.c
drivers/i2c/busses/i2c-tegra-bpmp.c
drivers/i2c/i2c-core-acpi.c
drivers/i2c/muxes/i2c-arb-gpio-challenge.c
drivers/iio/accel/Kconfig
drivers/iio/adc/ad7124.c
drivers/iio/adc/ad7192.c
drivers/iio/adc/ad7768-1.c
drivers/iio/adc/ad7793.c
drivers/iio/adc/ad7923.c
drivers/iio/common/hid-sensors/Kconfig
drivers/iio/dac/ad5770r.c
drivers/iio/gyro/Kconfig
drivers/iio/gyro/fxas21002c_core.c
drivers/iio/gyro/mpu3050-core.c
drivers/iio/humidity/Kconfig
drivers/iio/industrialio-core.c
drivers/iio/light/Kconfig
drivers/iio/light/gp2ap002.c
drivers/iio/light/tsl2583.c
drivers/iio/magnetometer/Kconfig
drivers/iio/orientation/Kconfig
drivers/iio/pressure/Kconfig
drivers/iio/proximity/pulsedlight-lidar-lite-v2.c
drivers/iio/temperature/Kconfig
drivers/infiniband/core/cma.c
drivers/infiniband/core/uverbs_cmd.c
drivers/infiniband/core/uverbs_std_types_device.c
drivers/infiniband/hw/mlx4/main.c
drivers/infiniband/hw/mlx5/cq.c
drivers/infiniband/hw/mlx5/devx.c
drivers/infiniband/hw/mlx5/dm.c
drivers/infiniband/hw/mlx5/doorbell.c
drivers/infiniband/hw/mlx5/fs.c
drivers/infiniband/hw/mlx5/main.c
drivers/infiniband/hw/mlx5/mr.c
drivers/infiniband/sw/rxe/rxe_comp.c
drivers/infiniband/sw/rxe/rxe_qp.c
drivers/infiniband/sw/siw/siw_verbs.c
drivers/infiniband/ulp/ipoib/ipoib_netlink.c
drivers/interconnect/qcom/bcm-voter.c
drivers/iommu/amd/iommu.c
drivers/iommu/intel/dmar.c
drivers/iommu/intel/iommu.c
drivers/iommu/intel/pasid.c
drivers/iommu/virtio-iommu.c
drivers/irqchip/Kconfig
drivers/irqchip/irq-mvebu-icu.c
drivers/irqchip/irq-mvebu-sei.c
drivers/irqchip/irq-stm32-exti.c
drivers/isdn/hardware/mISDN/hfcsusb.c
drivers/isdn/hardware/mISDN/mISDNinfineon.c
drivers/isdn/hardware/mISDN/netjet.c
drivers/leds/leds-lp5523.c
drivers/md/bcache/bcache.h
drivers/md/bcache/request.c
drivers/md/bcache/stats.c
drivers/md/bcache/stats.h
drivers/md/bcache/sysfs.c
drivers/md/dm-integrity.c
drivers/md/dm-snap.c
drivers/md/dm-verity-verify-sig.c
drivers/md/raid5.c
drivers/media/dvb-frontends/sp8870.c
drivers/media/platform/rcar_drif.c
drivers/media/usb/gspca/cpia1.c
drivers/media/usb/gspca/m5602/m5602_mt9m111.c
drivers/media/usb/gspca/m5602/m5602_po1030.c
drivers/misc/cardreader/rtl8411.c
drivers/misc/cardreader/rts5209.c
drivers/misc/cardreader/rts5227.c
drivers/misc/cardreader/rts5228.c
drivers/misc/cardreader/rts5229.c
drivers/misc/cardreader/rts5249.c
drivers/misc/cardreader/rts5260.c
drivers/misc/cardreader/rts5261.c
drivers/misc/cardreader/rtsx_pcr.c
drivers/misc/eeprom/at24.c
drivers/misc/habanalabs/common/command_submission.c
drivers/misc/habanalabs/common/firmware_if.c
drivers/misc/habanalabs/common/habanalabs.h
drivers/misc/habanalabs/common/habanalabs_drv.c
drivers/misc/habanalabs/common/sysfs.c
drivers/misc/habanalabs/gaudi/gaudi.c
drivers/misc/habanalabs/gaudi/gaudi_hwmgr.c
drivers/misc/habanalabs/goya/goya.c
drivers/misc/habanalabs/goya/goya_hwmgr.c
drivers/misc/ics932s401.c
drivers/misc/kgdbts.c
drivers/misc/lis3lv02d/lis3lv02d.h
drivers/misc/mei/interrupt.c
drivers/mmc/host/meson-gx-mmc.c
drivers/mmc/host/renesas_sdhi_core.c
drivers/mmc/host/sdhci-pci-gli.c
drivers/mtd/nand/raw/cs553x_nand.c
drivers/mtd/nand/raw/fsmc_nand.c
drivers/mtd/nand/raw/lpc32xx_slc.c
drivers/mtd/nand/raw/ndfc.c
drivers/mtd/nand/raw/sharpsl.c
drivers/mtd/nand/raw/tmio_nand.c
drivers/mtd/nand/raw/txx9ndfmc.c
drivers/mtd/parsers/ofpart_core.c
drivers/net/appletalk/cops.c
drivers/net/bonding/bond_main.c
drivers/net/caif/caif_serial.c
drivers/net/dsa/bcm_sf2.c
drivers/net/dsa/microchip/ksz9477.c
drivers/net/dsa/mt7530.c
drivers/net/dsa/ocelot/felix_vsc9959.c
drivers/net/dsa/sja1105/sja1105_dynamic_config.c
drivers/net/dsa/sja1105/sja1105_main.c
drivers/net/ethernet/broadcom/bnx2.c
drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
drivers/net/ethernet/broadcom/bnxt/bnxt.c
drivers/net/ethernet/broadcom/bnxt/bnxt.h
drivers/net/ethernet/cadence/macb_main.c
drivers/net/ethernet/cavium/liquidio/lio_main.c
drivers/net/ethernet/cavium/liquidio/lio_vf_main.c
drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c
drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c
drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_mqprio.c
drivers/net/ethernet/chelsio/cxgb4/sge.c
drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c
drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.h
drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_io.c
drivers/net/ethernet/freescale/fec_main.c
drivers/net/ethernet/fujitsu/fmvj18x_cs.c
drivers/net/ethernet/google/gve/gve_main.c
drivers/net/ethernet/google/gve/gve_tx.c
drivers/net/ethernet/hisilicon/hns/hns_ethtool.c
drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c
drivers/net/ethernet/intel/i40e/i40e_txrx.c
drivers/net/ethernet/intel/i40e/i40e_xsk.c
drivers/net/ethernet/intel/ice/ice.h
drivers/net/ethernet/intel/ice/ice_ethtool.c
drivers/net/ethernet/intel/ice/ice_hw_autogen.h
drivers/net/ethernet/intel/ice/ice_lib.c
drivers/net/ethernet/intel/ice/ice_txrx.c
drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c
drivers/net/ethernet/intel/ice/ice_xsk.c
drivers/net/ethernet/intel/igb/igb.h
drivers/net/ethernet/intel/igb/igb_main.c
drivers/net/ethernet/intel/igb/igb_ptp.c
drivers/net/ethernet/intel/igc/igc_main.c
drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c
drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
drivers/net/ethernet/korina.c
drivers/net/ethernet/lantiq_xrx200.c
drivers/net/ethernet/marvell/mvpp2/mvpp2.h
drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c
drivers/net/ethernet/mediatek/mtk_eth_soc.c
drivers/net/ethernet/mediatek/mtk_eth_soc.h
drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
drivers/net/ethernet/mellanox/mlx4/fw.c
drivers/net/ethernet/mellanox/mlx4/fw.h
drivers/net/ethernet/mellanox/mlx4/main.c
drivers/net/ethernet/mellanox/mlx4/port.c
drivers/net/ethernet/mellanox/mlx5/core/en/rep/bond.c
drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c
drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c
drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
drivers/net/ethernet/mellanox/mlx5/core/en_fs.c
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_termtbl.c
drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c
drivers/net/ethernet/mellanox/mlx5/core/lag_mp.c
drivers/net/ethernet/mellanox/mlx5/core/lib/fs_chains.c
drivers/net/ethernet/mellanox/mlx5/core/lib/fs_chains.h
drivers/net/ethernet/mellanox/mlx5/core/lib/mpfs.c
drivers/net/ethernet/mellanox/mlx5/core/lib/mpfs.h
drivers/net/ethernet/mellanox/mlx5/core/main.c
drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c
drivers/net/ethernet/mellanox/mlx5/core/sf/devlink.c
drivers/net/ethernet/mellanox/mlx5/core/steering/dr_fw.c
drivers/net/ethernet/microchip/encx24j600.c
drivers/net/ethernet/microchip/encx24j600_hw.h
drivers/net/ethernet/myricom/myri10ge/myri10ge.c
drivers/net/ethernet/pensando/Kconfig
drivers/net/ethernet/qlogic/qla3xxx.c
drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
drivers/net/ethernet/sfc/nic.c
drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c
drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
drivers/net/ethernet/sun/niu.c
drivers/net/ethernet/ti/netcp_core.c
drivers/net/ieee802154/mrf24j40.c
drivers/net/ipa/ipa.h
drivers/net/ipa/ipa_mem.c
drivers/net/mdio/mdio-octeon.c
drivers/net/mdio/mdio-thunder.c
drivers/net/phy/mdio_bus.c
drivers/net/usb/cdc_eem.c
drivers/net/usb/hso.c
drivers/net/usb/lan78xx.c
drivers/net/usb/r8152.c
drivers/net/usb/smsc75xx.c
drivers/net/virtio_net.c
drivers/net/wireguard/Makefile
drivers/net/wireguard/allowedips.c
drivers/net/wireguard/allowedips.h
drivers/net/wireguard/main.c
drivers/net/wireguard/peer.c
drivers/net/wireguard/peer.h
drivers/net/wireguard/selftest/allowedips.c
drivers/net/wireguard/socket.c
drivers/net/wireless/ath/ath10k/htt.h
drivers/net/wireless/ath/ath10k/htt_rx.c
drivers/net/wireless/ath/ath10k/rx_desc.h
drivers/net/wireless/ath/ath11k/dp_rx.c
drivers/net/wireless/ath/ath11k/dp_rx.h
drivers/net/wireless/ath/ath11k/mac.c
drivers/net/wireless/ath/ath6kl/debug.c
drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c
drivers/net/wireless/broadcom/brcm80211/brcmfmac/bus.h
drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c
drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c
drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.h
drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c
drivers/net/wireless/marvell/libertas/mesh.c
drivers/net/wireless/mediatek/mt76/mac80211.c
drivers/net/wireless/mediatek/mt76/mt7615/init.c
drivers/net/wireless/mediatek/mt76/mt7615/mac.c
drivers/net/wireless/mediatek/mt76/mt7615/sdio_mcu.c
drivers/net/wireless/mediatek/mt76/mt7615/usb_mcu.c
drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c
drivers/net/wireless/mediatek/mt76/mt76x0/pci.c
drivers/net/wireless/mediatek/mt76/mt7921/init.c
drivers/net/wireless/mediatek/mt76/mt7921/mac.c
drivers/net/wireless/mediatek/mt76/mt7921/main.c
drivers/net/wireless/mediatek/mt76/mt7921/mcu.c
drivers/net/wireless/realtek/rtlwifi/base.c
drivers/net/xen-netback/interface.c
drivers/nfc/nfcmrvl/fw_dnld.h
drivers/nfc/nfcmrvl/i2c.c
drivers/nfc/nfcmrvl/nfcmrvl.h
drivers/nfc/nfcmrvl/spi.c
drivers/nfc/nfcmrvl/uart.c
drivers/nfc/nfcmrvl/usb.c
drivers/nvme/host/Kconfig
drivers/nvme/host/core.c
drivers/nvme/host/fabrics.c
drivers/nvme/host/fc.c
drivers/nvme/host/multipath.c
drivers/nvme/host/nvme.h
drivers/nvme/host/rdma.c
drivers/nvme/host/tcp.c
drivers/nvme/target/admin-cmd.c
drivers/nvme/target/core.c
drivers/nvme/target/discovery.c
drivers/nvme/target/fabrics-cmd.c
drivers/nvme/target/io-cmd-bdev.c
drivers/nvme/target/io-cmd-file.c
drivers/nvme/target/loop.c
drivers/nvme/target/nvmet.h
drivers/nvme/target/passthru.c
drivers/nvme/target/rdma.c
drivers/nvme/target/tcp.c
drivers/pci/of.c
drivers/pci/probe.c
drivers/phy/broadcom/phy-brcm-usb-init.h
drivers/phy/cadence/phy-cadence-sierra.c
drivers/phy/mediatek/phy-mtk-tphy.c
drivers/phy/microchip/sparx5_serdes.c
drivers/phy/ralink/phy-mt7621-pci.c
drivers/phy/ti/phy-j721e-wiz.c
drivers/pinctrl/aspeed/pinctrl-aspeed-g5.c
drivers/pinctrl/aspeed/pinctrl-aspeed-g6.c
drivers/pinctrl/aspeed/pinctrl-aspeed.c
drivers/pinctrl/aspeed/pinmux-aspeed.c
drivers/pinctrl/qcom/Kconfig
drivers/pinctrl/qcom/pinctrl-sdx55.c
drivers/pinctrl/ralink/pinctrl-rt2880.c
drivers/platform/mellanox/mlxbf-tmfifo.c
drivers/platform/mellanox/mlxreg-hotplug.c
drivers/platform/surface/aggregator/controller.c
drivers/platform/surface/aggregator/core.c
drivers/platform/surface/surface3_power.c
drivers/platform/surface/surface_acpi_notify.c
drivers/platform/surface/surface_aggregator_registry.c
drivers/platform/surface/surface_dtx.c
drivers/platform/x86/Kconfig
drivers/platform/x86/dell/dell-smbios-wmi.c
drivers/platform/x86/gigabyte-wmi.c
drivers/platform/x86/hp-wireless.c
drivers/platform/x86/hp_accel.c
drivers/platform/x86/ideapad-laptop.c
drivers/platform/x86/intel_int0002_vgpio.c
drivers/platform/x86/intel_punit_ipc.c
drivers/platform/x86/thinkpad_acpi.c
drivers/platform/x86/touchscreen_dmi.c
drivers/ptp/ptp_ocp.c
drivers/rapidio/rio_cm.c
drivers/regulator/Kconfig
drivers/regulator/atc260x-regulator.c
drivers/regulator/bd718x7-regulator.c
drivers/regulator/core.c
drivers/regulator/cros-ec-regulator.c
drivers/regulator/da9121-regulator.c
drivers/regulator/fan53555.c
drivers/regulator/fan53880.c
drivers/regulator/fixed.c
drivers/regulator/helpers.c
drivers/regulator/hi6421v600-regulator.c
drivers/regulator/hi655x-regulator.c
drivers/regulator/max77620-regulator.c
drivers/regulator/mt6315-regulator.c
drivers/regulator/rt4801-regulator.c
drivers/regulator/rtmv20-regulator.c
drivers/regulator/scmi-regulator.c
drivers/s390/block/dasd_diag.c
drivers/s390/block/dasd_fba.c
drivers/s390/block/dasd_int.h
drivers/s390/cio/vfio_ccw_cp.c
drivers/s390/cio/vfio_ccw_drv.c
drivers/s390/cio/vfio_ccw_fsm.c
drivers/s390/cio/vfio_ccw_ops.c
drivers/scsi/BusLogic.c
drivers/scsi/BusLogic.h
drivers/scsi/aic7xxx/aicasm/aicasm_gram.y
drivers/scsi/aic7xxx/aicasm/aicasm_symbol.h
drivers/scsi/aic7xxx/scsi_message.h
drivers/scsi/bnx2fc/bnx2fc_io.c
drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
drivers/scsi/hosts.c
drivers/scsi/libsas/sas_port.c
drivers/scsi/lpfc/lpfc_sli.c
drivers/scsi/pm8001/pm8001_hwi.c
drivers/scsi/pm8001/pm8001_init.c
drivers/scsi/pm8001/pm8001_sas.c
drivers/scsi/pm8001/pm80xx_hwi.c
drivers/scsi/qedf/qedf_main.c
drivers/scsi/qla2xxx/qla_nx.c
drivers/scsi/qla2xxx/qla_target.c
drivers/scsi/scsi_devinfo.c
drivers/scsi/ufs/ufs-hisi.c
drivers/scsi/ufs/ufs-mediatek.c
drivers/scsi/ufs/ufshcd.c
drivers/scsi/vmw_pvscsi.c
drivers/soc/amlogic/meson-clk-measure.c
drivers/soundwire/qcom.c
drivers/spi/Kconfig
drivers/spi/spi-bcm2835.c
drivers/spi/spi-bitbang.c
drivers/spi/spi-fsl-dspi.c
drivers/spi/spi-fsl-spi.c
drivers/spi/spi-omap-uwire.c
drivers/spi/spi-omap2-mcspi.c
drivers/spi/spi-pxa2xx.c
drivers/spi/spi-sc18is602.c
drivers/spi/spi-sprd.c
drivers/spi/spi-stm32-qspi.c
drivers/spi/spi-zynq-qspi.c
drivers/spi/spi.c
drivers/staging/emxx_udc/emxx_udc.c
drivers/staging/iio/cdc/ad7746.c
drivers/staging/ralink-gdma/ralink-gdma.c
drivers/staging/rtl8723bs/os_dep/ioctl_cfg80211.c
drivers/staging/rtl8723bs/os_dep/ioctl_linux.c
drivers/target/target_core_iblock.c
drivers/target/target_core_transport.c
drivers/target/target_core_user.c
drivers/tee/amdtee/amdtee_private.h
drivers/tee/amdtee/call.c
drivers/tee/amdtee/core.c
drivers/tee/optee/call.c
drivers/tee/optee/optee_msg.h
drivers/thermal/intel/int340x_thermal/int340x_thermal_zone.c
drivers/thermal/intel/therm_throt.c
drivers/thermal/intel/x86_pkg_temp_thermal.c
drivers/thermal/qcom/qcom-spmi-adc-tm5.c
drivers/thermal/ti-soc-thermal/ti-bandgap.c
drivers/thunderbolt/dma_port.c
drivers/thunderbolt/usb4.c
drivers/tty/serial/8250/8250.h
drivers/tty/serial/8250/8250_aspeed_vuart.c
drivers/tty/serial/8250/8250_dw.c
drivers/tty/serial/8250/8250_exar.c
drivers/tty/serial/8250/8250_pci.c
drivers/tty/serial/8250/8250_port.c
drivers/tty/serial/max310x.c
drivers/tty/serial/mvebu-uart.c
drivers/tty/serial/rp2.c
drivers/tty/serial/serial-tegra.c
drivers/tty/serial/serial_core.c
drivers/tty/serial/sh-sci.c
drivers/tty/vt/vt.c
drivers/tty/vt/vt_ioctl.c
drivers/uio/uio_hv_generic.c
drivers/uio/uio_pci_generic.c
drivers/usb/cdns3/cdns3-gadget.c
drivers/usb/cdns3/cdnsp-gadget.c
drivers/usb/cdns3/cdnsp-ring.c
drivers/usb/chipidea/udc.c
drivers/usb/class/cdc-wdm.c
drivers/usb/core/devio.c
drivers/usb/core/hub.c
drivers/usb/dwc2/core.h
drivers/usb/dwc2/gadget.c
drivers/usb/dwc2/platform.c
drivers/usb/dwc3/core.c
drivers/usb/dwc3/core.h
drivers/usb/dwc3/debug.h
drivers/usb/dwc3/debugfs.c
drivers/usb/dwc3/dwc3-imx8mp.c
drivers/usb/dwc3/dwc3-meson-g12a.c
drivers/usb/dwc3/dwc3-omap.c
drivers/usb/dwc3/dwc3-pci.c
drivers/usb/dwc3/ep0.c
drivers/usb/dwc3/gadget.c
drivers/usb/gadget/config.c
drivers/usb/gadget/function/f_ecm.c
drivers/usb/gadget/function/f_eem.c
drivers/usb/gadget/function/f_fs.c
drivers/usb/gadget/function/f_hid.c
drivers/usb/gadget/function/f_loopback.c
drivers/usb/gadget/function/f_ncm.c
drivers/usb/gadget/function/f_printer.c
drivers/usb/gadget/function/f_rndis.c
drivers/usb/gadget/function/f_serial.c
drivers/usb/gadget/function/f_sourcesink.c
drivers/usb/gadget/function/f_subset.c
drivers/usb/gadget/function/f_tcm.c
drivers/usb/gadget/udc/renesas_usb3.c
drivers/usb/host/fotg210-hcd.c
drivers/usb/host/xhci-ext-caps.h
drivers/usb/host/xhci-pci.c
drivers/usb/host/xhci-ring.c
drivers/usb/host/xhci.c
drivers/usb/host/xhci.h
drivers/usb/misc/brcmstb-usb-pinmap.c
drivers/usb/misc/trancevibrator.c
drivers/usb/misc/uss720.c
drivers/usb/musb/mediatek.c
drivers/usb/musb/musb_core.c
drivers/usb/serial/cp210x.c
drivers/usb/serial/ftdi_sio.c
drivers/usb/serial/ftdi_sio_ids.h
drivers/usb/serial/omninet.c
drivers/usb/serial/option.c
drivers/usb/serial/pl2303.c
drivers/usb/serial/pl2303.h
drivers/usb/serial/quatech2.c
drivers/usb/serial/ti_usb_3410_5052.c
drivers/usb/typec/mux.c
drivers/usb/typec/mux/intel_pmc_mux.c
drivers/usb/typec/tcpm/tcpm.c
drivers/usb/typec/tcpm/wcove.c
drivers/usb/typec/ucsi/ucsi.c
drivers/usb/typec/ucsi/ucsi.h
drivers/vdpa/mlx5/net/mlx5_vnet.c
drivers/vfio/pci/Kconfig
drivers/vfio/pci/vfio_pci_config.c
drivers/vfio/platform/vfio_platform_common.c
drivers/vfio/vfio_iommu_type1.c
drivers/video/console/vgacon.c
drivers/video/fbdev/core/fb_defio.c
drivers/video/fbdev/core/fbcon.c
drivers/video/fbdev/core/fbmem.c
drivers/video/fbdev/hgafb.c
drivers/video/fbdev/imsttfb.c
drivers/xen/gntdev.c
drivers/xen/swiotlb-xen.c
drivers/xen/unpopulated-alloc.c
drivers/xen/xen-pciback/vpci.c
drivers/xen/xen-pciback/xenbus.c
fs/afs/cmservice.c
fs/afs/dir.c
fs/afs/fsclient.c
fs/afs/vlclient.c
fs/afs/write.c
fs/block_dev.c
fs/btrfs/compression.c
fs/btrfs/ctree.h
fs/btrfs/disk-io.c
fs/btrfs/extent-tree.c
fs/btrfs/extent_io.c
fs/btrfs/file-item.c
fs/btrfs/file.c
fs/btrfs/free-space-cache.c
fs/btrfs/inode.c
fs/btrfs/ioctl.c
fs/btrfs/ordered-data.c
fs/btrfs/qgroup.c
fs/btrfs/reflink.c
fs/btrfs/send.c
fs/btrfs/tree-log.c
fs/btrfs/volumes.c
fs/btrfs/zoned.c
fs/btrfs/zoned.h
fs/cifs/cifs_ioctl.h
fs/cifs/cifsfs.c
fs/cifs/cifsglob.h
fs/cifs/cifspdu.h
fs/cifs/file.c
fs/cifs/fs_context.c
fs/cifs/ioctl.c
fs/cifs/misc.c
fs/cifs/smb2ops.c
fs/cifs/smb2pdu.c
fs/cifs/trace.h
fs/coredump.c
fs/dax.c
fs/debugfs/file.c
fs/debugfs/inode.c
fs/ecryptfs/crypto.c
fs/erofs/zmap.c
fs/ext4/extents.c
fs/ext4/fast_commit.c
fs/ext4/fast_commit.h
fs/ext4/ialloc.c
fs/ext4/mballoc.c
fs/ext4/namei.c
fs/ext4/super.c
fs/ext4/sysfs.c
fs/f2fs/compress.c
fs/f2fs/data.c
fs/f2fs/f2fs.h
fs/f2fs/file.c
fs/f2fs/segment.c
fs/gfs2/file.c
fs/gfs2/glock.c
fs/gfs2/glops.c
fs/gfs2/log.c
fs/gfs2/log.h
fs/gfs2/lops.c
fs/gfs2/lops.h
fs/gfs2/util.c
fs/hfsplus/extents.c
fs/hugetlbfs/inode.c
fs/io-wq.c
fs/io-wq.h
fs/io_uring.c
fs/iomap/buffered-io.c
fs/namespace.c
fs/netfs/Kconfig
fs/netfs/read_helper.c
fs/nfs/client.c
fs/nfs/filelayout/filelayout.c
fs/nfs/namespace.c
fs/nfs/nfs4_fs.h
fs/nfs/nfs4client.c
fs/nfs/nfs4file.c
fs/nfs/nfs4proc.c
fs/nfs/nfstrace.h
fs/nfs/pagelist.c
fs/nfs/pnfs.c
fs/nfs/super.c
fs/notify/fanotify/fanotify_user.c
fs/notify/fdinfo.c
fs/ocfs2/file.c
fs/proc/base.c
fs/quota/dquot.c
fs/signalfd.c
fs/squashfs/file.c
fs/xfs/libxfs/xfs_ag_resv.c
fs/xfs/libxfs/xfs_bmap.c
fs/xfs/libxfs/xfs_fs.h
fs/xfs/libxfs/xfs_inode_buf.c
fs/xfs/libxfs/xfs_trans_inode.c
fs/xfs/scrub/common.c
fs/xfs/xfs_bmap_util.c
fs/xfs/xfs_inode.c
fs/xfs/xfs_ioctl.c
fs/xfs/xfs_message.h
include/acpi/acbuffer.h
include/acpi/acconfig.h
include/acpi/acpi_bus.h
include/acpi/acpixf.h
include/acpi/actbl1.h
include/acpi/actbl2.h
include/asm-generic/vmlinux.lds.h
include/dt-bindings/usb/pd.h
include/linux/acpi.h
include/linux/avf/virtchnl.h
include/linux/bits.h
include/linux/blkdev.h
include/linux/cgroup-defs.h
include/linux/cgroup.h
include/linux/compat.h
include/linux/compiler_attributes.h
include/linux/console_struct.h
include/linux/const.h
include/linux/device.h
include/linux/dynamic_debug.h
include/linux/elevator.h
include/linux/entry-kvm.h
include/linux/fanotify.h
include/linux/fb.h
include/linux/fwnode.h
include/linux/genhd.h
include/linux/hid.h
include/linux/host1x.h
include/linux/init.h
include/linux/kvm_host.h
include/linux/libnvdimm.h
include/linux/mfd/rohm-bd70528.h
include/linux/mfd/rohm-bd71828.h
include/linux/minmax.h
include/linux/mlx4/device.h
include/linux/mlx5/driver.h
include/linux/mlx5/mlx5_ifc.h
include/linux/mlx5/mpfs.h [new file with mode: 0644]
include/linux/mm.h
include/linux/mm_types.h
include/linux/pagemap.h
include/linux/pci.h
include/linux/pgtable.h
include/linux/phy.h
include/linux/platform_data/ti-sysc.h
include/linux/pm.h
include/linux/prmt.h [new file with mode: 0644]
include/linux/randomize_kstack.h
include/linux/rtsx_pci.h
include/linux/sched.h
include/linux/sched/signal.h
include/linux/signal.h
include/linux/spi/spi.h
include/linux/sunrpc/xprt.h
include/linux/surface_aggregator/device.h
include/linux/tick.h
include/linux/usb/pd.h
include/linux/usb/pd_ext_sdb.h
include/net/caif/caif_dev.h
include/net/caif/cfcnfg.h
include/net/caif/cfserl.h
include/net/cfg80211.h
include/net/netfilter/nf_flow_table.h
include/net/netfilter/nf_tables.h
include/net/nfc/nci_core.h
include/net/page_pool.h
include/net/pkt_cls.h
include/net/pkt_sched.h
include/net/sch_generic.h
include/net/sock.h
include/net/tls.h
include/sound/soc-dai.h
include/uapi/asm-generic/siginfo.h
include/uapi/linux/fs.h
include/uapi/linux/input-event-codes.h
include/uapi/linux/io_uring.h
include/uapi/linux/kvm.h
include/uapi/linux/perf_event.h
include/uapi/linux/signalfd.h
include/uapi/linux/virtio_ids.h
include/uapi/misc/habanalabs.h
include/xen/arm/swiotlb-xen.h
init/Kconfig
init/main.c
ipc/mqueue.c
ipc/msg.c
ipc/sem.c
kernel/bpf/Kconfig [new file with mode: 0644]
kernel/bpf/bpf_lsm.c
kernel/bpf/btf.c
kernel/bpf/helpers.c
kernel/bpf/ringbuf.c
kernel/bpf/syscall.c
kernel/bpf/verifier.c
kernel/cgroup/cgroup-v1.c
kernel/cgroup/cgroup.c
kernel/cgroup/cpuset.c
kernel/cgroup/rdma.c
kernel/cgroup/rstat.c
kernel/entry/common.c
kernel/events/core.c
kernel/irq_work.c
kernel/kcsan/debugfs.c
kernel/locking/lockdep.c
kernel/locking/mutex-debug.c
kernel/locking/mutex-debug.h
kernel/locking/mutex.c
kernel/locking/mutex.h
kernel/module.c
kernel/ptrace.c
kernel/resource.c
kernel/sched/debug.c
kernel/sched/fair.c
kernel/sched/pelt.h
kernel/seccomp.c
kernel/signal.c
kernel/sysctl.c
kernel/time/alarmtimer.c
kernel/time/tick-sched.c
kernel/trace/bpf_trace.c
kernel/trace/ftrace.c
kernel/trace/trace.c
kernel/watchdog.c
kernel/workqueue.c
lib/Makefile
lib/crc64.c
lib/dynamic_debug.c
lib/percpu-refcount.c
lib/test_kasan.c
mm/debug_vm_pgtable.c
mm/gup.c
mm/hugetlb.c
mm/internal.h
mm/ioremap.c
mm/kasan/init.c
mm/kfence/core.c
mm/ksm.c
mm/memory.c
mm/page_alloc.c
mm/shmem.c
mm/shuffle.h
mm/slab_common.c
mm/slub.c
mm/userfaultfd.c
net/Kconfig
net/bluetooth/hci_core.c
net/bluetooth/hci_sock.c
net/caif/caif_dev.c
net/caif/caif_usb.c
net/caif/cfcnfg.c
net/caif/cfserl.c
net/can/isotp.c
net/compat.c
net/core/dev.c
net/core/devlink.c
net/core/fib_rules.c
net/core/filter.c
net/core/page_pool.c
net/core/rtnetlink.c
net/core/sock.c
net/dsa/master.c
net/dsa/slave.c
net/dsa/tag_8021q.c
net/ethtool/stats.c
net/hsr/hsr_device.c
net/hsr/hsr_forward.c
net/hsr/hsr_forward.h
net/hsr/hsr_main.h
net/hsr/hsr_slave.c
net/ieee802154/nl-mac.c
net/ieee802154/nl-phy.c
net/ieee802154/nl802154.c
net/ipv4/bpf_tcp_ca.c
net/ipv4/ipconfig.c
net/ipv6/mcast.c
net/ipv6/reassembly.c
net/ipv6/route.c
net/ipv6/sit.c
net/kcm/kcmsock.c
net/mac80211/ieee80211_i.h
net/mac80211/iface.c
net/mac80211/key.c
net/mac80211/key.h
net/mac80211/rx.c
net/mac80211/sta_info.c
net/mac80211/sta_info.h
net/mac80211/wpa.c
net/mptcp/options.c
net/mptcp/pm_netlink.c
net/mptcp/protocol.c
net/mptcp/protocol.h
net/mptcp/sockopt.c
net/mptcp/subflow.c
net/netfilter/ipvs/ip_vs_ctl.c
net/netfilter/nf_conntrack_proto.c
net/netfilter/nf_flow_table_core.c
net/netfilter/nf_flow_table_offload.c
net/netfilter/nf_tables_api.c
net/netfilter/nfnetlink_cthelper.c
net/netfilter/nft_ct.c
net/netfilter/nft_set_pipapo.c
net/netfilter/nft_set_pipapo.h
net/netfilter/nft_set_pipapo_avx2.c
net/netlink/af_netlink.c
net/nfc/llcp_sock.c
net/nfc/nci/core.c
net/nfc/nci/hci.c
net/nfc/rawsock.c
net/openvswitch/meter.c
net/packet/af_packet.c
net/rds/connection.c
net/rds/tcp.c
net/rds/tcp.h
net/rds/tcp_listen.c
net/sched/act_ct.c
net/sched/cls_api.c
net/sched/sch_dsmark.c
net/sched/sch_fq_pie.c
net/sched/sch_generic.c
net/sched/sch_htb.c
net/sctp/socket.c
net/sctp/sysctl.c
net/smc/smc_ism.c
net/sunrpc/clnt.c
net/sunrpc/xprt.c
net/sunrpc/xprtrdma/rpc_rdma.c
net/sunrpc/xprtrdma/transport.c
net/sunrpc/xprtrdma/verbs.c
net/sunrpc/xprtrdma/xprt_rdma.h
net/sunrpc/xprtsock.c
net/tipc/core.c
net/tipc/core.h
net/tipc/discover.c
net/tipc/link.c
net/tipc/link.h
net/tipc/msg.c
net/tipc/net.c
net/tipc/node.c
net/tipc/socket.c
net/tipc/udp_media.c
net/tls/tls_device.c
net/tls/tls_device_fallback.c
net/tls/tls_main.c
net/tls/tls_sw.c
net/wireless/util.c
net/x25/af_x25.c
samples/bpf/xdpsock_user.c
samples/vfio-mdev/mdpy-fb.c
scripts/Makefile.modfinal
scripts/dummy-tools/gcc
scripts/jobserver-exec
scripts/link-vmlinux.sh
security/keys/trusted-keys/trusted_tpm1.c
security/keys/trusted-keys/trusted_tpm2.c
sound/core/control_led.c
sound/core/seq/seq_timer.c
sound/core/timer.c
sound/firewire/Kconfig
sound/firewire/amdtp-stream-trace.h
sound/firewire/amdtp-stream.c
sound/firewire/bebob/bebob.c
sound/firewire/dice/dice-alesis.c
sound/firewire/dice/dice-pcm.c
sound/firewire/dice/dice-stream.c
sound/firewire/dice/dice-tcelectronic.c
sound/firewire/dice/dice.c
sound/firewire/dice/dice.h
sound/firewire/oxfw/oxfw.c
sound/hda/intel-dsp-config.c
sound/isa/gus/gus_main.c
sound/isa/sb/sb16_main.c
sound/isa/sb/sb8.c
sound/pci/hda/hda_codec.c
sound/pci/hda/hda_generic.c
sound/pci/hda/hda_intel.c
sound/pci/hda/patch_cirrus.c
sound/pci/hda/patch_realtek.c
sound/pci/intel8x0.c
sound/soc/amd/raven/acp3x-pcm-dma.c
sound/soc/amd/raven/acp3x.h
sound/soc/amd/raven/pci-acp3x.c
sound/soc/codecs/ak5558.c
sound/soc/codecs/cs35l32.c
sound/soc/codecs/cs35l33.c
sound/soc/codecs/cs35l34.c
sound/soc/codecs/cs42l42.c
sound/soc/codecs/cs42l56.c
sound/soc/codecs/cs42l73.c
sound/soc/codecs/cs43130.c
sound/soc/codecs/cs53l30.c
sound/soc/codecs/da7219.c
sound/soc/codecs/lpass-rx-macro.c
sound/soc/codecs/lpass-tx-macro.c
sound/soc/codecs/max98088.c
sound/soc/codecs/rt5645.c
sound/soc/codecs/rt5659.c
sound/soc/codecs/rt5682-sdw.c
sound/soc/codecs/rt711-sdca.c
sound/soc/codecs/sti-sas.c
sound/soc/codecs/tas2562.h
sound/soc/fsl/Kconfig
sound/soc/fsl/fsl-asoc-card.c
sound/soc/generic/audio-graph-card.c
sound/soc/generic/simple-card.c
sound/soc/intel/boards/bytcr_rt5640.c
sound/soc/qcom/lpass-cpu.c
sound/soc/qcom/lpass.h
sound/soc/soc-core.c
sound/soc/soc-topology.c
sound/soc/sof/intel/hda-dai.c
sound/soc/sof/pm.c
sound/soc/stm/stm32_sai_sub.c
sound/usb/format.c
sound/usb/line6/driver.c
sound/usb/line6/pod.c
sound/usb/line6/variax.c
sound/usb/midi.c
sound/usb/mixer_quirks.c
sound/usb/mixer_scarlett_gen2.c
sound/usb/mixer_scarlett_gen2.h
tools/arch/mips/include/uapi/asm/perf_regs.h [new file with mode: 0644]
tools/arch/powerpc/include/uapi/asm/errno.h
tools/arch/x86/include/asm/cpufeatures.h
tools/arch/x86/include/asm/disabled-features.h
tools/arch/x86/include/asm/msr-index.h
tools/arch/x86/include/uapi/asm/kvm.h
tools/arch/x86/include/uapi/asm/vmx.h
tools/arch/x86/lib/memcpy_64.S
tools/arch/x86/lib/memset_64.S
tools/bootconfig/include/linux/bootconfig.h
tools/bootconfig/main.c
tools/bpf/bpftool/Documentation/bpftool-cgroup.rst
tools/bpf/bpftool/Documentation/bpftool-prog.rst
tools/bpf/bpftool/bash-completion/bpftool
tools/bpf/bpftool/cgroup.c
tools/bpf/bpftool/prog.c
tools/build/Makefile.build
tools/include/asm/alternative.h [moved from tools/include/asm/alternative-asm.h with 100% similarity]
tools/include/linux/bits.h
tools/include/linux/const.h
tools/include/uapi/asm-generic/unistd.h
tools/include/uapi/drm/drm.h
tools/include/uapi/drm/i915_drm.h
tools/include/uapi/linux/fs.h
tools/include/uapi/linux/kvm.h
tools/include/uapi/linux/perf_event.h
tools/include/uapi/linux/prctl.h
tools/kvm/kvm_stat/kvm_stat.txt
tools/lib/bpf/libbpf.c
tools/lib/bpf/libbpf_internal.h
tools/objtool/arch/x86/decode.c
tools/objtool/elf.c
tools/perf/Documentation/perf-intel-pt.txt
tools/perf/Documentation/perf-script.txt
tools/perf/Makefile.config
tools/perf/arch/arm64/util/kvm-stat.c
tools/perf/arch/mips/entry/syscalls/syscall_n64.tbl
tools/perf/arch/powerpc/entry/syscalls/syscall.tbl
tools/perf/arch/s390/entry/syscalls/syscall.tbl
tools/perf/arch/x86/entry/syscalls/syscall_64.tbl
tools/perf/builtin-buildid-list.c
tools/perf/builtin-record.c
tools/perf/builtin-stat.c
tools/perf/check-headers.sh
tools/perf/perf.c
tools/perf/pmu-events/arch/powerpc/power10/cache.json
tools/perf/pmu-events/arch/powerpc/power10/floating_point.json
tools/perf/pmu-events/arch/powerpc/power10/frontend.json
tools/perf/pmu-events/arch/powerpc/power10/locks.json
tools/perf/pmu-events/arch/powerpc/power10/marked.json
tools/perf/pmu-events/arch/powerpc/power10/memory.json
tools/perf/pmu-events/arch/powerpc/power10/others.json
tools/perf/pmu-events/arch/powerpc/power10/pipeline.json
tools/perf/pmu-events/arch/powerpc/power10/pmc.json
tools/perf/pmu-events/arch/powerpc/power10/translation.json
tools/perf/pmu-events/jevents.c
tools/perf/scripts/python/exported-sql-viewer.py
tools/perf/tests/attr/base-record
tools/perf/tests/attr/base-stat
tools/perf/tests/attr/system-wide-dummy
tools/perf/tests/pfm.c
tools/perf/util/Build
tools/perf/util/bpf_counter.c
tools/perf/util/dwarf-aux.c
tools/perf/util/env.c
tools/perf/util/event.h
tools/perf/util/evlist.c
tools/perf/util/evsel.c
tools/perf/util/evsel.h
tools/perf/util/intel-pt-decoder/intel-pt-decoder.c
tools/perf/util/intel-pt.c
tools/perf/util/parse-events.c
tools/perf/util/parse-events.l
tools/perf/util/perf_api_probe.c
tools/perf/util/perf_api_probe.h
tools/perf/util/pfm.c
tools/perf/util/probe-finder.c
tools/perf/util/record.c
tools/perf/util/session.c
tools/perf/util/stat-display.c
tools/perf/util/symbol-elf.c
tools/scripts/Makefile.include
tools/testing/nvdimm/test/iomap.c
tools/testing/nvdimm/test/nfit.c
tools/testing/selftests/arm64/bti/test.c
tools/testing/selftests/bpf/network_helpers.c
tools/testing/selftests/bpf/network_helpers.h
tools/testing/selftests/bpf/prog_tests/ringbuf.c
tools/testing/selftests/bpf/prog_tests/tc_redirect.c [new file with mode: 0644]
tools/testing/selftests/bpf/progs/test_tc_neigh.c
tools/testing/selftests/bpf/progs/test_tc_neigh_fib.c
tools/testing/selftests/bpf/progs/test_tc_peer.c
tools/testing/selftests/bpf/test_tc_redirect.sh [deleted file]
tools/testing/selftests/bpf/verifier/stack_ptr.c
tools/testing/selftests/bpf/verifier/value_ptr_arith.c
tools/testing/selftests/exec/Makefile
tools/testing/selftests/kvm/.gitignore
tools/testing/selftests/kvm/Makefile
tools/testing/selftests/kvm/demand_paging_test.c
tools/testing/selftests/kvm/hardware_disable_test.c
tools/testing/selftests/kvm/include/kvm_util.h
tools/testing/selftests/kvm/include/test_util.h
tools/testing/selftests/kvm/kvm_page_table_test.c
tools/testing/selftests/kvm/lib/kvm_util.c
tools/testing/selftests/kvm/lib/kvm_util_internal.h
tools/testing/selftests/kvm/lib/perf_test_util.c
tools/testing/selftests/kvm/lib/rbtree.c [new file with mode: 0644]
tools/testing/selftests/kvm/lib/test_util.c
tools/testing/selftests/kvm/lib/x86_64/handlers.S
tools/testing/selftests/kvm/lib/x86_64/processor.c
tools/testing/selftests/kvm/memslot_modification_stress_test.c
tools/testing/selftests/kvm/memslot_perf_test.c [new file with mode: 0644]
tools/testing/selftests/kvm/x86_64/evmcs_test.c
tools/testing/selftests/kvm/x86_64/get_cpuid_test.c
tools/testing/selftests/kvm/x86_64/get_msr_index_features.c
tools/testing/selftests/nci/.gitignore [new file with mode: 0644]
tools/testing/selftests/net/mptcp/mptcp_connect.sh
tools/testing/selftests/perf_events/sigtrap_threads.c
tools/testing/selftests/proc/.gitignore
tools/testing/selftests/seccomp/seccomp_bpf.c
tools/testing/selftests/tc-testing/tc-tests/qdiscs/fq_pie.json
tools/testing/selftests/wireguard/netns.sh
tools/testing/selftests/wireguard/qemu/kernel.config
virt/kvm/kvm_main.c
virt/lib/irqbypass.c

index c24b147cac01868bb984c945843096a3b2e779a1..15d4eaabc6b531bb26053ae25f6e2602d21659e4 100644 (file)
@@ -109,8 +109,8 @@ ForEachMacros:
   - 'css_for_each_child'
   - 'css_for_each_descendant_post'
   - 'css_for_each_descendant_pre'
-  - 'cxl_for_each_cmd'
   - 'device_for_each_child_node'
+  - 'displayid_iter_for_each'
   - 'dma_fence_chain_for_each'
   - 'do_for_each_ftrace_op'
   - 'drm_atomic_crtc_for_each_plane'
@@ -136,6 +136,7 @@ ForEachMacros:
   - 'drm_mm_for_each_node_in_range'
   - 'drm_mm_for_each_node_safe'
   - 'flow_action_for_each'
+  - 'for_each_acpi_dev_match'
   - 'for_each_active_dev_scope'
   - 'for_each_active_drhd_unit'
   - 'for_each_active_iommu'
@@ -171,7 +172,6 @@ ForEachMacros:
   - 'for_each_dapm_widgets'
   - 'for_each_dev_addr'
   - 'for_each_dev_scope'
-  - 'for_each_displayid_db'
   - 'for_each_dma_cap_mask'
   - 'for_each_dpcm_be'
   - 'for_each_dpcm_be_rollback'
@@ -179,6 +179,7 @@ ForEachMacros:
   - 'for_each_dpcm_fe'
   - 'for_each_drhd_unit'
   - 'for_each_dss_dev'
+  - 'for_each_dtpm_table'
   - 'for_each_efi_memory_desc'
   - 'for_each_efi_memory_desc_in_map'
   - 'for_each_element'
@@ -215,6 +216,7 @@ ForEachMacros:
   - 'for_each_migratetype_order'
   - 'for_each_msi_entry'
   - 'for_each_msi_entry_safe'
+  - 'for_each_msi_vector'
   - 'for_each_net'
   - 'for_each_net_continue_reverse'
   - 'for_each_netdev'
@@ -270,6 +272,12 @@ ForEachMacros:
   - 'for_each_prime_number_from'
   - 'for_each_process'
   - 'for_each_process_thread'
+  - 'for_each_prop_codec_conf'
+  - 'for_each_prop_dai_codec'
+  - 'for_each_prop_dai_cpu'
+  - 'for_each_prop_dlc_codecs'
+  - 'for_each_prop_dlc_cpus'
+  - 'for_each_prop_dlc_platforms'
   - 'for_each_property_of_node'
   - 'for_each_registered_fb'
   - 'for_each_requested_gpio'
@@ -430,6 +438,7 @@ ForEachMacros:
   - 'queue_for_each_hw_ctx'
   - 'radix_tree_for_each_slot'
   - 'radix_tree_for_each_tagged'
+  - 'rb_for_each'
   - 'rbtree_postorder_for_each_entry_safe'
   - 'rdma_for_each_block'
   - 'rdma_for_each_port'
index 3e2bff9137e918255231b3729a81c63251d5c62a..c79a78766c07f6008f1aff45c2d97ab9ba19ad65 100644 (file)
--- a/.mailmap
+++ b/.mailmap
@@ -160,6 +160,7 @@ Jeff Layton <jlayton@kernel.org> <jlayton@primarydata.com>
 Jeff Layton <jlayton@kernel.org> <jlayton@redhat.com>
 Jens Axboe <axboe@suse.de>
 Jens Osterkamp <Jens.Osterkamp@de.ibm.com>
+Jernej Skrabec <jernej.skrabec@gmail.com> <jernej.skrabec@siol.net>
 Jiri Slaby <jirislaby@kernel.org> <jirislaby@gmail.com>
 Jiri Slaby <jirislaby@kernel.org> <jslaby@novell.com>
 Jiri Slaby <jirislaby@kernel.org> <jslaby@suse.com>
@@ -242,6 +243,9 @@ Maxime Ripard <mripard@kernel.org> <maxime.ripard@free-electrons.com>
 Mayuresh Janorkar <mayur@ti.com>
 Michael Buesch <m@bues.ch>
 Michel Dänzer <michel@tungstengraphics.com>
+Michel Lespinasse <michel@lespinasse.org>
+Michel Lespinasse <michel@lespinasse.org> <walken@google.com>
+Michel Lespinasse <michel@lespinasse.org> <walken@zoy.org>
 Miguel Ojeda <ojeda@kernel.org> <miguel.ojeda.sandonis@gmail.com>
 Mike Rapoport <rppt@kernel.org> <mike@compulab.co.il>
 Mike Rapoport <rppt@kernel.org> <mike.rapoport@gmail.com>
index 0faf1354cd054fd39d69120e5b8eb5b89723af7b..5bcce27458e300fba1c21b620a8ce08f97d2d194 100644 (file)
@@ -1,7 +1,7 @@
 What:           /sys/class/dax/
 Date:           May, 2016
 KernelVersion:  v4.7
-Contact:        linux-nvdimm@lists.01.org
+Contact:        nvdimm@lists.linux.dev
 Description:   Device DAX is the device-centric analogue of Filesystem
                DAX (CONFIG_FS_DAX).  It allows memory ranges to be
                allocated and mapped without need of an intervening file
index 0360be39c98e9d809613dd856b3d5b7fda3d5b9b..dae880b1a5d5d8fdaefb6f4ab758a7793ea5a05a 100644 (file)
@@ -1,4 +1,4 @@
-This ABI is renamed and moved to a new location /sys/kernel/fadump/registered.¬
+This ABI is renamed and moved to a new location /sys/kernel/fadump/registered.
 
 What:          /sys/kernel/fadump_registered
 Date:          Feb 2012
index 6ce0b129ab12db7eb84c5ed99ced0e48ca46dc27..ca2396edb5f105430246b270dff7deb5dfc110ea 100644 (file)
@@ -1,4 +1,4 @@
-This ABI is renamed and moved to a new location /sys/kernel/fadump/release_mem.¬
+This ABI is renamed and moved to a new location /sys/kernel/fadump/release_mem.
 
 What:          /sys/kernel/fadump_release_mem
 Date:          Feb 2012
index ae8c1ca53828792274cc680bb01d15f8411edc79..277437005def734f66e8a11e9953c5108e030c87 100644 (file)
@@ -1,7 +1,7 @@
 What:          /sys/bus/nd/devices/regionX/nfit/ecc_unit_size
 Date:          Aug, 2017
 KernelVersion: v4.14 (Removed v4.18)
-Contact:       linux-nvdimm@lists.01.org
+Contact:       nvdimm@lists.linux.dev
 Description:
                (RO) Size of a write request to a DIMM that will not incur a
                read-modify-write cycle at the memory controller.
index 63ef0b9ecce70be910ff1d3da30be7565d3642be..e7282d184a747ff3e5b63b045e20bc38cffcd3a3 100644 (file)
@@ -5,7 +5,7 @@ Interface Table (NFIT)' section in the ACPI specification
 What:          /sys/bus/nd/devices/nmemX/nfit/serial
 Date:          Jun, 2015
 KernelVersion: v4.2
-Contact:       linux-nvdimm@lists.01.org
+Contact:       nvdimm@lists.linux.dev
 Description:
                (RO) Serial number of the NVDIMM (non-volatile dual in-line
                memory module), assigned by the module vendor.
@@ -14,7 +14,7 @@ Description:
 What:          /sys/bus/nd/devices/nmemX/nfit/handle
 Date:          Apr, 2015
 KernelVersion: v4.2
-Contact:       linux-nvdimm@lists.01.org
+Contact:       nvdimm@lists.linux.dev
 Description:
                (RO) The address (given by the _ADR object) of the device on its
                parent bus of the NVDIMM device containing the NVDIMM region.
@@ -23,7 +23,7 @@ Description:
 What:          /sys/bus/nd/devices/nmemX/nfit/device
 Date:          Apr, 2015
 KernelVersion: v4.1
-Contact:       linux-nvdimm@lists.01.org
+Contact:       nvdimm@lists.linux.dev
 Description:
                (RO) Device id for the NVDIMM, assigned by the module vendor.
 
@@ -31,7 +31,7 @@ Description:
 What:          /sys/bus/nd/devices/nmemX/nfit/rev_id
 Date:          Jun, 2015
 KernelVersion: v4.2
-Contact:       linux-nvdimm@lists.01.org
+Contact:       nvdimm@lists.linux.dev
 Description:
                (RO) Revision of the NVDIMM, assigned by the module vendor.
 
@@ -39,7 +39,7 @@ Description:
 What:          /sys/bus/nd/devices/nmemX/nfit/phys_id
 Date:          Apr, 2015
 KernelVersion: v4.2
-Contact:       linux-nvdimm@lists.01.org
+Contact:       nvdimm@lists.linux.dev
 Description:
                (RO) Handle (i.e., instance number) for the SMBIOS (system
                management BIOS) Memory Device structure describing the NVDIMM
@@ -49,7 +49,7 @@ Description:
 What:          /sys/bus/nd/devices/nmemX/nfit/flags
 Date:          Jun, 2015
 KernelVersion: v4.2
-Contact:       linux-nvdimm@lists.01.org
+Contact:       nvdimm@lists.linux.dev
 Description:
                (RO) The flags in the NFIT memory device sub-structure indicate
                the state of the data on the nvdimm relative to its energy
@@ -68,7 +68,7 @@ What:         /sys/bus/nd/devices/nmemX/nfit/format1
 What:          /sys/bus/nd/devices/nmemX/nfit/formats
 Date:          Apr, 2016
 KernelVersion: v4.7
-Contact:       linux-nvdimm@lists.01.org
+Contact:       nvdimm@lists.linux.dev
 Description:
                (RO) The interface codes indicate support for persistent memory
                mapped directly into system physical address space and / or a
@@ -84,7 +84,7 @@ Description:
 What:          /sys/bus/nd/devices/nmemX/nfit/vendor
 Date:          Apr, 2016
 KernelVersion: v4.7
-Contact:       linux-nvdimm@lists.01.org
+Contact:       nvdimm@lists.linux.dev
 Description:
                (RO) Vendor id of the NVDIMM.
 
@@ -92,7 +92,7 @@ Description:
 What:          /sys/bus/nd/devices/nmemX/nfit/dsm_mask
 Date:          May, 2016
 KernelVersion: v4.7
-Contact:       linux-nvdimm@lists.01.org
+Contact:       nvdimm@lists.linux.dev
 Description:
                (RO) The bitmask indicates the supported device specific control
                functions relative to the NVDIMM command family supported by the
@@ -102,7 +102,7 @@ Description:
 What:          /sys/bus/nd/devices/nmemX/nfit/family
 Date:          Apr, 2016
 KernelVersion: v4.7
-Contact:       linux-nvdimm@lists.01.org
+Contact:       nvdimm@lists.linux.dev
 Description:
                (RO) Displays the NVDIMM family command sets. Values
                0, 1, 2 and 3 correspond to NVDIMM_FAMILY_INTEL,
@@ -118,7 +118,7 @@ Description:
 What:          /sys/bus/nd/devices/nmemX/nfit/id
 Date:          Apr, 2016
 KernelVersion: v4.7
-Contact:       linux-nvdimm@lists.01.org
+Contact:       nvdimm@lists.linux.dev
 Description:
                (RO) ACPI specification 6.2 section 5.2.25.9, defines an
                identifier for an NVDIMM, which refelects the id attribute.
@@ -127,7 +127,7 @@ Description:
 What:          /sys/bus/nd/devices/nmemX/nfit/subsystem_vendor
 Date:          Apr, 2016
 KernelVersion: v4.7
-Contact:       linux-nvdimm@lists.01.org
+Contact:       nvdimm@lists.linux.dev
 Description:
                (RO) Sub-system vendor id of the NVDIMM non-volatile memory
                subsystem controller.
@@ -136,7 +136,7 @@ Description:
 What:          /sys/bus/nd/devices/nmemX/nfit/subsystem_rev_id
 Date:          Apr, 2016
 KernelVersion: v4.7
-Contact:       linux-nvdimm@lists.01.org
+Contact:       nvdimm@lists.linux.dev
 Description:
                (RO) Sub-system revision id of the NVDIMM non-volatile memory subsystem
                controller, assigned by the non-volatile memory subsystem
@@ -146,7 +146,7 @@ Description:
 What:          /sys/bus/nd/devices/nmemX/nfit/subsystem_device
 Date:          Apr, 2016
 KernelVersion: v4.7
-Contact:       linux-nvdimm@lists.01.org
+Contact:       nvdimm@lists.linux.dev
 Description:
                (RO) Sub-system device id for the NVDIMM non-volatile memory
                subsystem controller, assigned by the non-volatile memory
@@ -156,7 +156,7 @@ Description:
 What:          /sys/bus/nd/devices/ndbusX/nfit/revision
 Date:          Jun, 2015
 KernelVersion: v4.2
-Contact:       linux-nvdimm@lists.01.org
+Contact:       nvdimm@lists.linux.dev
 Description:
                (RO) ACPI NFIT table revision number.
 
@@ -164,7 +164,7 @@ Description:
 What:          /sys/bus/nd/devices/ndbusX/nfit/scrub
 Date:          Sep, 2016
 KernelVersion: v4.9
-Contact:       linux-nvdimm@lists.01.org
+Contact:       nvdimm@lists.linux.dev
 Description:
                (RW) This shows the number of full Address Range Scrubs (ARS)
                that have been completed since driver load time. Userspace can
@@ -177,7 +177,7 @@ Description:
 What:          /sys/bus/nd/devices/ndbusX/nfit/hw_error_scrub
 Date:          Sep, 2016
 KernelVersion: v4.9
-Contact:       linux-nvdimm@lists.01.org
+Contact:       nvdimm@lists.linux.dev
 Description:
                (RW) Provides a way to toggle the behavior between just adding
                the address (cache line) where the MCE happened to the poison
@@ -196,7 +196,7 @@ Description:
 What:          /sys/bus/nd/devices/ndbusX/nfit/dsm_mask
 Date:          Jun, 2017
 KernelVersion: v4.13
-Contact:       linux-nvdimm@lists.01.org
+Contact:       nvdimm@lists.linux.dev
 Description:
                (RO) The bitmask indicates the supported bus specific control
                functions. See the section named 'NVDIMM Root Device _DSMs' in
@@ -205,7 +205,7 @@ Description:
 What:          /sys/bus/nd/devices/ndbusX/nfit/firmware_activate_noidle
 Date:          Apr, 2020
 KernelVersion: v5.8
-Contact:       linux-nvdimm@lists.01.org
+Contact:       nvdimm@lists.linux.dev
 Description:
                (RW) The Intel platform implementation of firmware activate
                support exposes an option let the platform force idle devices in
@@ -225,7 +225,7 @@ Description:
 What:          /sys/bus/nd/devices/regionX/nfit/range_index
 Date:          Jun, 2015
 KernelVersion: v4.2
-Contact:       linux-nvdimm@lists.01.org
+Contact:       nvdimm@lists.linux.dev
 Description:
                (RO) A unique number provided by the BIOS to identify an address
                range. Used by NVDIMM Region Mapping Structure to uniquely refer
index 8316c33862a047b86fb508a75122bcae16ca9768..92e2db0e2d3de331216112eb77e7d2dfe5429b23 100644 (file)
@@ -1,7 +1,7 @@
 What:          /sys/bus/nd/devices/nmemX/papr/flags
 Date:          Apr, 2020
 KernelVersion: v5.8
-Contact:       linuxppc-dev <linuxppc-dev@lists.ozlabs.org>, linux-nvdimm@lists.01.org,
+Contact:       linuxppc-dev <linuxppc-dev@lists.ozlabs.org>, nvdimm@lists.linux.dev,
 Description:
                (RO) Report flags indicating various states of a
                papr-pmem NVDIMM device. Each flag maps to a one or
@@ -36,7 +36,7 @@ Description:
 What:          /sys/bus/nd/devices/nmemX/papr/perf_stats
 Date:          May, 2020
 KernelVersion: v5.9
-Contact:       linuxppc-dev <linuxppc-dev@lists.ozlabs.org>, linux-nvdimm@lists.01.org,
+Contact:       linuxppc-dev <linuxppc-dev@lists.ozlabs.org>, nvdimm@lists.linux.dev,
 Description:
                (RO) Report various performance stats related to papr-scm NVDIMM
                device.  Each stat is reported on a new line with each line
index a485434d2a0fb975082d79117056ef23cc613efe..88bddf192ceb79fc4a038b91c42a58a7bcd28f9b 100644 (file)
@@ -37,13 +37,13 @@ Description:        Maximum time allowed for periodic transfers per microframe (μs)
 
 What:          /sys/module/*/{coresize,initsize}
 Date:          Jan 2012
-KernelVersion:»·3.3
+KernelVersion: 3.3
 Contact:       Kay Sievers <kay.sievers@vrfy.org>
 Description:   Module size in bytes.
 
 What:          /sys/module/*/taint
 Date:          Jan 2012
-KernelVersion:»·3.3
+KernelVersion: 3.3
 Contact:       Kay Sievers <kay.sievers@vrfy.org>
 Description:   Module taint flags:
                        ==  =====================
index cb89dbdedc46381114cf94594d0fff68bf86e0a4..a161e58af5cc2ddc5269cc34e61360f9a360dba2 100644 (file)
                        the GPE dispatcher.
                        This facility can be used to prevent such uncontrolled
                        GPE floodings.
-                       Format: <byte>
+                       Format: <byte> or <bitmap-list>
 
        acpi_no_auto_serialize  [HW,ACPI]
                        Disable auto-serialization of AML methods
index 1d56a6b73a4e95ccb7c7b1798d7de41c231817f3..68b21395a743f0a0f355d8de09d1b0edefee42d5 100644 (file)
@@ -483,10 +483,11 @@ modprobe
 ========
 
 The full path to the usermode helper for autoloading kernel modules,
-by default "/sbin/modprobe".  This binary is executed when the kernel
-requests a module.  For example, if userspace passes an unknown
-filesystem type to mount(), then the kernel will automatically request
-the corresponding filesystem module by executing this usermode helper.
+by default ``CONFIG_MODPROBE_PATH``, which in turn defaults to
+"/sbin/modprobe".  This binary is executed when the kernel requests a
+module.  For example, if userspace passes an unknown filesystem type
+to mount(), then the kernel will automatically request the
+corresponding filesystem module by executing this usermode helper.
 This usermode helper should insert the needed module into the kernel.
 
 This sysctl only affects module autoloading.  It has no effect on the
@@ -1457,11 +1458,22 @@ unprivileged_bpf_disabled
 =========================
 
 Writing 1 to this entry will disable unprivileged calls to ``bpf()``;
-once disabled, calling ``bpf()`` without ``CAP_SYS_ADMIN`` will return
-``-EPERM``.
+once disabled, calling ``bpf()`` without ``CAP_SYS_ADMIN`` or ``CAP_BPF``
+will return ``-EPERM``. Once set to 1, this can't be cleared from the
+running kernel anymore.
 
-Once set, this can't be cleared.
+Writing 2 to this entry will also disable unprivileged calls to ``bpf()``,
+however, an admin can still change this setting later on, if needed, by
+writing 0 or 1 to this entry.
 
+If ``BPF_UNPRIV_DEFAULT_OFF`` is enabled in the kernel config, then this
+entry will default to 2 instead of 0.
+
+= =============================================================
+0 Unprivileged calls to ``bpf()`` are enabled
+1 Unprivileged calls to ``bpf()`` are disabled without recovery
+2 Unprivileged calls to ``bpf()`` are disabled
+= =============================================================
 
 watchdog
 ========
index 4f2452a95c4349cfaf3928d74c3492ae09970fb2..07a97aa266685ca19068eddb032152be4867be31 100644 (file)
@@ -1,4 +1,4 @@
-==============
+==============
 Data Integrity
 ==============
 
index 70500b189cc84431e266de9017cd082fd04e5b3f..5845960ca382185e89603015f1d10429636d0467 100644 (file)
@@ -146,18 +146,18 @@ with the kernel as a block device by registering the following general
 *struct file_operations*::
 
        struct file_operations cdrom_fops = {
-               NULL,                   /∗ lseek ∗/
-               block _read ,           /∗ read—general block-dev read ∗/
-               block _write,           /∗ write—general block-dev write ∗/
-               NULL,                   /∗ readdir ∗/
-               NULL,                   /∗ select ∗/
-               cdrom_ioctl,            /∗ ioctl ∗/
-               NULL,                   /∗ mmap ∗/
-               cdrom_open,             /∗ open ∗/
-               cdrom_release,          /∗ release ∗/
-               NULL,                   /∗ fsync ∗/
-               NULL,                   /∗ fasync ∗/
-               NULL                    /∗ revalidate ∗/
+               NULL,                   /* lseek */
+               block _read ,           /* read--general block-dev read */
+               block _write,           /* write--general block-dev write */
+               NULL,                   /* readdir */
+               NULL,                   /* select */
+               cdrom_ioctl,            /* ioctl */
+               NULL,                   /* mmap */
+               cdrom_open,             /* open */
+               cdrom_release,          /* release */
+               NULL,                   /* fsync */
+               NULL,                   /* fasync */
+               NULL                    /* revalidate */
        };
 
 Every active CD-ROM device shares this *struct*. The routines
@@ -250,12 +250,12 @@ The drive-specific, minor-like information that is registered with
 `cdrom.c`, currently contains the following fields::
 
   struct cdrom_device_info {
-       const struct cdrom_device_ops * ops;    /* device operations for this major */
+       const struct cdrom_device_ops * ops;    /* device operations for this major */
        struct list_head list;                  /* linked list of all device_info */
        struct gendisk * disk;                  /* matching block layer disk */
        void *  handle;                         /* driver-dependent data */
 
-       int mask;                               /* mask of capability: disables them */
+       int mask;                               /* mask of capability: disables them */
        int speed;                              /* maximum speed for reading data */
        int capacity;                           /* number of discs in a jukebox */
 
@@ -569,7 +569,7 @@ the *CDC_CLOSE_TRAY* bit in *mask*.
 
 In the file `cdrom.c` you will encounter many constructions of the type::
 
-       if (cdo->capability & ∼cdi->mask & CDC _⟨capability⟩) ...
+       if (cdo->capability & ~cdi->mask & CDC _<capability>) ...
 
 There is no *ioctl* to set the mask... The reason is that
 I think it is better to control the **behavior** rather than the
index c268debe5b8d58cd04530e7c8cde17a638ad149d..28675b0b80f1ba53022550c85dfa43af7a394817 100644 (file)
@@ -60,7 +60,6 @@ properties:
     maxItems: 2
 
   idt,xtal-load-femtofarads:
-    $ref: /schemas/types.yaml#/definitions/uint32
     minimum: 9000
     maximum: 22760
     description: Optional load capacitor for XTAL1 and XTAL2
@@ -84,7 +83,6 @@ patternProperties:
         enum: [ 1800000, 2500000, 3300000 ]
       idt,slew-percent:
         description: The Slew rate control for CMOS single-ended.
-        $ref: /schemas/types.yaml#/definitions/uint32
         enum: [ 80, 85, 90, 100 ]
 
 required:
index 32509b98142e98654f9c4b2eda761696b6fe281d..92b49bc379394f80174c680d1e1f2d37fa1cd80c 100644 (file)
@@ -149,6 +149,17 @@ properties:
     maxItems: 6
     $ref: /schemas/types.yaml#/definitions/uint32-array
 
+  sink-vdos-v1:
+    description: An array of u32 with each entry, a Vendor Defined Message Object (VDO),
+      providing additional information corresponding to the product, the detailed bit
+      definitions and the order of each VDO can be found in
+      "USB Power Delivery Specification Revision 2.0, Version 1.3" chapter 6.4.4.3.1 Discover
+      Identity. User can specify the VDO array via VDO_IDH/_CERT/_PRODUCT/_CABLE/_AMA defined in
+      dt-bindings/usb/pd.h.
+    minItems: 3
+    maxItems: 6
+    $ref: /schemas/types.yaml#/definitions/uint32-array
+
   op-sink-microwatt:
     description: Sink required operating power in microwatt, if source can't
       offer the power, Capability Mismatch is set. Required for power sink and
@@ -207,6 +218,10 @@ properties:
       SNK_READY for non-pd link.
     type: boolean
 
+dependencies:
+  sink-vdos-v1: [ 'sink-vdos' ]
+  sink-vdos: [ 'sink-vdos-v1' ]
+
 required:
   - compatible
 
index 33ee575bb09da1125df908a014bc3497aa7f943c..926be9a29044b2095c47bee4a91df44f31c2b47d 100644 (file)
@@ -49,7 +49,7 @@ examples:
         #size-cells = <0>;
 
         adc@48 {
-            comatible = "ti,ads7828";
+            compatible = "ti,ads7828";
             reg = <0x48>;
             vref-supply = <&vref>;
             ti,differential-input;
index 7b553d559c832c4b262b249ad71d497ec3c714f3..98c6fcf7bf26560e671cd7c8b58a395d12db47cc 100644 (file)
@@ -46,6 +46,13 @@ properties:
     description: |
       I2C bus timeout in microseconds
 
+  fsl,i2c-erratum-a004447:
+    $ref: /schemas/types.yaml#/definitions/flag
+    description: |
+      Indicates the presence of QorIQ erratum A-004447, which
+      says that the standard i2c recovery scheme mechanism does
+      not work and an alternate implementation is needed.
+
 required:
   - compatible
   - reg
index 6f2398cdc82ddd9669d42278bc00fee2baa3f9df..1e7894e524f9b9e8ab4c16b145ec26745cb2783b 100644 (file)
@@ -102,7 +102,6 @@ patternProperties:
 
       st,adc-channel-names:
         description: List of single-ended channel names.
-        $ref: /schemas/types.yaml#/definitions/string-array
 
       st,filter-order:
         description: |
index 74244d21d2b3f3bb04702f50956c4d1c9e4ff243..d41d8743aad4ed2a0f5cf66dce97ef2b569a09c5 100644 (file)
@@ -38,6 +38,5 @@ properties:
       Duration in seconds which the key should be kept pressed for device to
       reset automatically. Device with key pressed reset feature can specify
       this property.
-    $ref: /schemas/types.yaml#/definitions/uint32
 
 additionalProperties: true
index cb6498108b78a67832fc44a8c13530d066ed2f39..36c955965d90261a5be5e58afc97da695fb5dce6 100644 (file)
@@ -92,7 +92,6 @@ properties:
       this interconnect to send RPMh commands.
 
   qcom,bcm-voter-names:
-    $ref: /schemas/types.yaml#/definitions/string-array
     description: |
       Names for each of the qcom,bcm-voters specified.
 
index ccebce597f372b656adf76ab0f1eccb6e85c6a30..a555d94084b7f590cf8bade67fa7ab233d46db38 100644 (file)
@@ -4,8 +4,8 @@ This controller is present on BCM6318, BCM6328, BCM6362 and BCM63268.
 In these SoCs it's possible to control LEDs both as GPIOs or by hardware.
 However, on some devices there are Serial LEDs (LEDs connected to a 74x164
 controller), which can either be controlled by software (exporting the 74x164
-as spi-gpio. See Documentation/devicetree/bindings/gpio/gpio-74x164.txt), or
-by hardware using this driver.
+as spi-gpio. See Documentation/devicetree/bindings/gpio/fairchild,74hc595.yaml),
+or by hardware using this driver.
 Some of these Serial LEDs are hardware controlled (e.g. ethernet LEDs) and
 exporting the 74x164 as spi-gpio prevents those LEDs to be hardware
 controlled, so the only chance to keep them working is by using this driver.
index da5708e7b43b93ba83a0f759d39452c44be8223d..6e51c6b91ee54cd51c781d2e9942c1b75b652bd1 100644 (file)
@@ -3,7 +3,7 @@ LEDs connected to Broadcom BCM6358 controller
 This controller is present on BCM6358 and BCM6368.
 In these SoCs there are Serial LEDs (LEDs connected to a 74x164 controller),
 which can either be controlled by software (exporting the 74x164 as spi-gpio.
-See Documentation/devicetree/bindings/gpio/gpio-74x164.txt), or
+See Documentation/devicetree/bindings/gpio/fairchild,74hc595.yaml), or
 by hardware using this driver.
 
 Required properties:
index f1bdaeab40538f130c720cd17073e53aa74a8d6c..9cd56ff2c316cd0e5f88c8737d1cc6a132683d06 100644 (file)
@@ -67,9 +67,7 @@ properties:
     maxItems: 1
 
   clock-names:
-    maxItems: 1
-    items:
-      - const: fck
+    const: fck
 
   resets:
     maxItems: 1
@@ -99,32 +97,26 @@ properties:
       Indicates that the channel acts as primary among the bonded channels.
 
   port:
-    type: object
+    $ref: /schemas/graph.yaml#/properties/port
+    unevaluatedProperties: false
     description:
-      Child port node corresponding to the data input, in accordance with the
-      video interface bindings defined in
-      Documentation/devicetree/bindings/media/video-interfaces.txt.
-      The port node must contain at least one endpoint.
+      Child port node corresponding to the data input. The port node must
+      contain at least one endpoint.
 
     properties:
       endpoint:
-        type: object
+        $ref: /schemas/graph.yaml#/$defs/endpoint-base
+        unevaluatedProperties: false
 
         properties:
-          remote-endpoint:
-            description:
-              A phandle to the remote tuner endpoint subnode in remote node
-              port.
-
           sync-active:
+            $ref: /schemas/types.yaml#/definitions/uint32
             enum: [0, 1]
             description:
               Indicates sync signal polarity, 0/1 for low/high respectively.
               This property maps to SYNCAC bit in the hardware manual. The
               default is 1 (active high).
 
-        additionalProperties: false
-
 required:
   - compatible
   - reg
index 7443490d4cc6d6a0a51d2c72e8c45da045beea7f..5fe6d3dceb082817f4d468e7b6838aeee9a72194 100644 (file)
@@ -105,7 +105,6 @@ properties:
       - description: Whether the IPA clock is enabled (if valid)
 
   qcom,smem-state-names:
-    $ref: /schemas/types.yaml#/definitions/string-array
     description: The names of the state bits used for SMP2P output
     items:
       - const: ipa-clock-enabled-valid
index 8ce5ed8a58dd76e6af8c6bdf1ffb2fda6c986be1..c101a1ec846ea8e95052eb066c3e6bf42a2c3ba5 100644 (file)
@@ -10,7 +10,7 @@ allOf:
   - $ref: ethernet-controller.yaml#
 
 maintainers:
-  - Sergei Shtylyov <sergei.shtylyov@cogentembedded.com>
+  - Sergei Shtylyov <sergei.shtylyov@gmail.com>
 
 properties:
   compatible:
index d479ad977e24fb6b7ee1bdc1c818e3af24080e2d..b6791702bcfc916971847eae61e2a15d03456272 100644 (file)
@@ -9,7 +9,6 @@ Required properties:
              "mediatek,mt8173-efuse" or "mediatek,efuse": for MT8173
              "mediatek,mt8192-efuse", "mediatek,efuse": for MT8192
              "mediatek,mt8516-efuse", "mediatek,efuse": for MT8516
-             "mediatek,mt8192-efuse", "mediatek,efuse": for MT8192
 - reg: Should contain registers location and length
 
 = Data cells =
index 01dcd14e7b2ad21248efdd588efcaf279d1fa54e..320a232c7208c9c4315d93b64069c606bee5185e 100644 (file)
@@ -118,7 +118,7 @@ patternProperties:
         description:
           Specifies the Spread Spectrum Clocking mode used. It can be NO_SSC,
           EXTERNAL_SSC or INTERNAL_SSC.
-          Refer include/dt-bindings/phy/phy-cadence-torrent.h for the constants to be used.
+          Refer include/dt-bindings/phy/phy-cadence.h for the constants to be used.
         $ref: /schemas/types.yaml#/definitions/uint32
         enum: [0, 1, 2]
         default: 0
index db1aa238cda53fcdb2a95467127495d9c3f5f950..b62c2431f94e75c041b78383a43ef1506f9ecb5a 100644 (file)
@@ -20,7 +20,7 @@ properties:
     maxItems: 1
 
   phys:
-    $ref: /schemas/types.yaml#/definitions/phandle
+    maxItems: 1
     description: phandle to the USB phy
 
   monitored-battery:
index db61f0731a203962003fb5907d3c660b9a65cc2b..2e35aeaa8781da51257892c7de17804b52260bb3 100644 (file)
@@ -57,7 +57,7 @@ patternProperties:
           rate
 
       sound-dai:
-        $ref: /schemas/types.yaml#/definitions/phandle
+        $ref: /schemas/types.yaml#/definitions/phandle-array
         description: phandle of the CPU DAI
 
     patternProperties:
@@ -71,7 +71,7 @@ patternProperties:
 
         properties:
           sound-dai:
-            $ref: /schemas/types.yaml#/definitions/phandle
+            $ref: /schemas/types.yaml#/definitions/phandle-array
             description: phandle of the codec DAI
 
         required:
index b4c190bddd84c87fb925ce1620a39445b7c73bd3..61802a11baf4e4ebfbf78c21c90399bf5f8f57e7 100644 (file)
@@ -49,7 +49,7 @@ properties:
     maxItems: 1
 
   memory-region:
-    $ref: /schemas/types.yaml#/definitions/phandle
+    maxItems: 1
     description:
       phandle to a node describing reserved memory (System RAM memory)
       The M core can't access all the DDR memory space on some platform,
index d09c6355e22dcecffcbf874cd78fe32432cb3583..51c7622dc20b41675d1138c77aa1c3297e3f7a9d 100644 (file)
@@ -72,7 +72,7 @@ examples:
 
             mux-controls = <&mux>;
 
-            spi-flash@0 {
+            flash@0 {
                 compatible = "jedec,spi-nor";
                 reg = <0>;
                 spi-max-frequency = <40000000>;
index ef6d59e0978e77dcfd3e44b98aeee1bcf512b8b8..1d8302b89bd471c068a9417697e7521bea43922f 100644 (file)
@@ -4,7 +4,7 @@ LIBNVDIMM: Non-Volatile Devices
 
 libnvdimm - kernel / libndctl - userspace helper library
 
-linux-nvdimm@lists.01.org
+nvdimm@lists.linux.dev
 
 Version 13
 
index 21351b8c95a4d4f8db43f7d40cace9e7affeb9db..8f7d7af3b90b1d9e00ed4ea8d53535edeae54a3f 100644 (file)
@@ -19,7 +19,6 @@ Serial drivers
 
     moxa-smartio
     n_gsm
-    rocket
     serial-iso7816
     serial-rs485
 
index 543e70434da2273a0cc7da6fc3e4441217228a1f..2c94ff2f438573625e6db043698df1edbb2bb42d 100644 (file)
@@ -109,16 +109,21 @@ well as to make sure they aren't relying on some HCD-specific behavior.
 USB-Standard Types
 ==================
 
-In ``drivers/usb/common/common.c`` and ``drivers/usb/common/debug.c`` you
-will find the USB data types defined in chapter 9 of the USB specification.
-These data types are used throughout USB, and in APIs including this host
-side API, gadget APIs, usb character devices and debugfs interfaces.
+In ``include/uapi/linux/usb/ch9.h`` you will find the USB data types defined
+in chapter 9 of the USB specification. These data types are used throughout
+USB, and in APIs including this host side API, gadget APIs, usb character
+devices and debugfs interfaces. That file is itself included by
+``include/linux/usb/ch9.h``, which also contains declarations of a few
+utility routines for manipulating these data types; the implementations
+are in ``drivers/usb/common/common.c``.
 
 .. kernel-doc:: drivers/usb/common/common.c
    :export:
 
-.. kernel-doc:: drivers/usb/common/debug.c
-   :export:
+In addition, some functions useful for creating debugging output are
+defined in ``drivers/usb/common/debug.c``.
+
+.. _usb_header:
 
 Host-Side Data Types and Macros
 ===============================
index bf145171c2bf8f89095dcf8d316ad7d14a6a16e2..832839fcf4c3b6d89cc3bd44a70d89a2fece3897 100644 (file)
@@ -50,8 +50,8 @@ Here is the main features of EROFS:
 
  - Support POSIX.1e ACLs by using xattrs;
 
- - Support transparent file compression as an option:
-   LZ4 algorithm with 4 KB fixed-sized output compression for high performance.
+ - Support transparent data compression as an option:
+   LZ4 algorithm with the fixed-sized output compression for high performance.
 
 The following git tree provides the file system user-space tools under
 development (ex, formatting tool mkfs.erofs):
@@ -113,31 +113,31 @@ may not. All metadatas can be now observed in two different spaces (views):
 
     ::
 
-                                   |-> aligned with 8B
-                                           |-> followed closely
-       + meta_blkaddr blocks                                      |-> another slot
-       _____________________________________________________________________
-       |  ...   | inode |  xattrs  | extents  | data inline | ... | inode ...
-       |________|_______|(optional)|(optional)|__(optional)_|_____|__________
-               |-> aligned with the inode slot size
-                   .                   .
-                   .                         .
-               .                              .
-               .                                    .
-           .                                         .
-           .                                              .
-       .____________________________________________________|-> aligned with 4B
-       | xattr_ibody_header | shared xattrs | inline xattrs |
-       |____________________|_______________|_______________|
-       |->    12 bytes    <-|->x * 4 bytes<-|               .
-                           .                .                 .
-                       .                      .                   .
-               .                           .                     .
-           ._______________________________.______________________.
-           | id | id | id | id |  ... | id | ent | ... | ent| ... |
-           |____|____|____|____|______|____|_____|_____|____|_____|
-                                           |-> aligned with 4B
-                                                       |-> aligned with 4B
+                                 |-> aligned with 8B
+                                            |-> followed closely
+     + meta_blkaddr blocks                                      |-> another slot
+       _____________________________________________________________________
+     |  ...   | inode |  xattrs  | extents  | data inline | ... | inode ...
+     |________|_______|(optional)|(optional)|__(optional)_|_____|__________
+              |-> aligned with the inode slot size
+                   .                   .
+                 .                         .
+               .                              .
+             .                                    .
+           .                                         .
+         .                                              .
+       .____________________________________________________|-> aligned with 4B
+       | xattr_ibody_header | shared xattrs | inline xattrs |
+       |____________________|_______________|_______________|
+       |->    12 bytes    <-|->x * 4 bytes<-|               .
+                           .                .                 .
+                     .                      .                   .
+                .                           .                     .
+            ._______________________________.______________________.
+            | id | id | id | id |  ... | id | ent | ... | ent| ... |
+            |____|____|____|____|______|____|_____|_____|____|_____|
+                                            |-> aligned with 4B
+                                                        |-> aligned with 4B
 
     Inode could be 32 or 64 bytes, which can be distinguished from a common
     field which all inode versions have -- i_format::
@@ -175,13 +175,13 @@ may not. All metadatas can be now observed in two different spaces (views):
     Each share xattr can also be directly found by the following formula:
          xattr offset = xattr_blkaddr * block_size + 4 * xattr_id
 
-    ::
+::
 
-                           |-> aligned by  4 bytes
-       + xattr_blkaddr blocks                     |-> aligned with 4 bytes
-       _________________________________________________________________________
-       |  ...   | xattr_entry |  xattr data | ... |  xattr_entry | xattr data  ...
-       |________|_____________|_____________|_____|______________|_______________
+                           |-> aligned by  4 bytes
+    + xattr_blkaddr blocks                     |-> aligned with 4 bytes
+     _________________________________________________________________________
+    |  ...   | xattr_entry |  xattr data | ... |  xattr_entry | xattr data  ...
+    |________|_____________|_____________|_____|______________|_______________
 
 Directories
 -----------
@@ -193,48 +193,77 @@ algorithm (could refer to the related source code).
 
 ::
 
-                   ___________________________
-                   /                           |
-               /              ______________|________________
-               /              /              | nameoff1       | nameoffN-1
-    ____________.______________._______________v________________v__________
-    | dirent | dirent | ... | dirent | filename | filename | ... | filename |
-    |___.0___|____1___|_____|___N-1__|____0_____|____1_____|_____|___N-1____|
-       \                           ^
-       \                          |                           * could have
-       \                         |                             trailing '\0'
-           \________________________| nameoff0
-
-                               Directory block
+                  ___________________________
+                 /                           |
+                /              ______________|________________
+               /              /              | nameoff1       | nameoffN-1
+  ____________.______________._______________v________________v__________
+ | dirent | dirent | ... | dirent | filename | filename | ... | filename |
+ |___.0___|____1___|_____|___N-1__|____0_____|____1_____|_____|___N-1____|
+      \                           ^
+       \                          |                           * could have
+        \                         |                             trailing '\0'
+         \________________________| nameoff0
+                             Directory block
 
 Note that apart from the offset of the first filename, nameoff0 also indicates
 the total number of directory entries in this block since it is no need to
 introduce another on-disk field at all.
 
-Compression
------------
-Currently, EROFS supports 4KB fixed-sized output transparent file compression,
-as illustrated below::
-
-           |---- Variant-Length Extent ----|-------- VLE --------|----- VLE -----
-           clusterofs                      clusterofs            clusterofs
-           |                               |                     |   logical data
-    _________v_______________________________v_____________________v_______________
-    ... |    .        |             |        .    |             |  .          | ...
-    ____|____.________|_____________|________.____|_____________|__.__________|____
-       |-> cluster <-|-> cluster <-|-> cluster <-|-> cluster <-|-> cluster <-|
-           size          size          size          size          size
-           .                             .                .                   .
-           .                       .               .                  .
-               .                  .              .                .
-       _______._____________._____________._____________._____________________
-           ... |             |             |             | ... physical data
-       _______|_____________|_____________|_____________|_____________________
-               |-> cluster <-|-> cluster <-|-> cluster <-|
-                   size          size          size
-
-Currently each on-disk physical cluster can contain 4KB (un)compressed data
-at most. For each logical cluster, there is a corresponding on-disk index to
-describe its cluster type, physical cluster address, etc.
-
-See "struct z_erofs_vle_decompressed_index" in erofs_fs.h for more details.
+Data compression
+----------------
+EROFS implements LZ4 fixed-sized output compression which generates fixed-sized
+compressed data blocks from variable-sized input in contrast to other existing
+fixed-sized input solutions. Relatively higher compression ratios can be gotten
+by using fixed-sized output compression since nowadays popular data compression
+algorithms are mostly LZ77-based and such fixed-sized output approach can be
+benefited from the historical dictionary (aka. sliding window).
+
+In details, original (uncompressed) data is turned into several variable-sized
+extents and in the meanwhile, compressed into physical clusters (pclusters).
+In order to record each variable-sized extent, logical clusters (lclusters) are
+introduced as the basic unit of compress indexes to indicate whether a new
+extent is generated within the range (HEAD) or not (NONHEAD). Lclusters are now
+fixed in block size, as illustrated below::
+
+          |<-    variable-sized extent    ->|<-       VLE         ->|
+        clusterofs                        clusterofs              clusterofs
+          |                                 |                       |
+ _________v_________________________________v_______________________v________
+ ... |    .         |              |        .     |              |  .   ...
+ ____|____._________|______________|________.___ _|______________|__.________
+     |-> lcluster <-|-> lcluster <-|-> lcluster <-|-> lcluster <-|
+          (HEAD)        (NONHEAD)       (HEAD)        (NONHEAD)    .
+           .             CBLKCNT            .                    .
+            .                               .                  .
+             .                              .                .
+       _______._____________________________.______________._________________
+          ... |              |              |              | ...
+       _______|______________|______________|______________|_________________
+              |->      big pcluster       <-|-> pcluster <-|
+
+A physical cluster can be seen as a container of physical compressed blocks
+which contains compressed data. Previously, only lcluster-sized (4KB) pclusters
+were supported. After big pcluster feature is introduced (available since
+Linux v5.13), pcluster can be a multiple of lcluster size.
+
+For each HEAD lcluster, clusterofs is recorded to indicate where a new extent
+starts and blkaddr is used to seek the compressed data. For each NONHEAD
+lcluster, delta0 and delta1 are available instead of blkaddr to indicate the
+distance to its HEAD lcluster and the next HEAD lcluster. A PLAIN lcluster is
+also a HEAD lcluster except that its data is uncompressed. See the comments
+around "struct z_erofs_vle_decompressed_index" in erofs_fs.h for more details.
+
+If big pcluster is enabled, pcluster size in lclusters needs to be recorded as
+well. Let the delta0 of the first NONHEAD lcluster store the compressed block
+count with a special flag as a new called CBLKCNT NONHEAD lcluster. It's easy
+to understand its delta0 is constantly 1, as illustrated below::
+
+   __________________________________________________________
+  | HEAD |  NONHEAD  | NONHEAD | ... | NONHEAD | HEAD | HEAD |
+  |__:___|_(CBLKCNT)_|_________|_____|_________|__:___|____:_|
+     |<----- a big pcluster (with CBLKCNT) ------>|<--  -->|
+           a lcluster-sized pcluster (without CBLKCNT) ^
+
+If another HEAD follows a HEAD lcluster, there is no room to record CBLKCNT,
+but it's easy to know the size of such pcluster is 1 lcluster as well.
index e195a7d14309a00c4e70ad0f319462baee06de5f..b3ef81475cf8bd19b255262ff952faf98b253c79 100644 (file)
@@ -21,10 +21,10 @@ Description
 The TMP103 is a digital output temperature sensor in a four-ball
 wafer chip-scale package (WCSP). The TMP103 is capable of reading
 temperatures to a resolution of 1°C. The TMP103 is specified for
-operation over a temperature range of 40°C to +125°C.
+operation over a temperature range of -40°C to +125°C.
 
 Resolution: 8 Bits
-Accuracy: ±1°C Typ (10°C to +100°C)
+Accuracy: ±1°C Typ (-10°C to +100°C)
 
 The driver provides the common sysfs-interface for temperatures (see
 Documentation/hwmon/sysfs-interface.rst under Temperatures).
index 8a9b18573688d87c9fc521d00b129bd7ae7c61b7..2d3f6bd969a2b4093a16d7b42a9ccf8a38a7007c 100644 (file)
@@ -173,7 +173,7 @@ Director rule is added from ethtool (Sideband filter), ATR is turned off by the
 driver. To re-enable ATR, the sideband can be disabled with the ethtool -K
 option. For example::
 
-  ethtool K [adapter] ntuple [off|on]
+  ethtool -K [adapter] ntuple [off|on]
 
 If sideband is re-enabled after ATR is re-enabled, ATR remains enabled until a
 TCP-IP flow is added. When all TCP-IP sideband rules are deleted, ATR is
@@ -688,7 +688,7 @@ shaper bw_rlimit: for each tc, sets minimum and maximum bandwidth rates.
 Totals must be equal or less than port speed.
 
 For example: min_rate 1Gbit 3Gbit: Verify bandwidth limit using network
-monitoring tools such as ifstat or sar –n DEV [interval] [number of samples]
+monitoring tools such as `ifstat` or `sar -n DEV [interval] [number of samples]`
 
 2. Enable HW TC offload on interface::
 
index 52e037b11c979837e71fa789437107a5eb664890..25330b7b5168da6a7baecd9b7f9f8fcfb776589b 100644 (file)
@@ -179,7 +179,7 @@ shaper bw_rlimit: for each tc, sets minimum and maximum bandwidth rates.
 Totals must be equal or less than port speed.
 
 For example: min_rate 1Gbit 3Gbit: Verify bandwidth limit using network
-monitoring tools such as ifstat or sar –n DEV [interval] [number of samples]
+monitoring tools such as ``ifstat`` or ``sar -n DEV [interval] [number of samples]``
 
 NOTE:
   Setting up channels via ethtool (ethtool -L) is not supported when the
index dabee3729e5a5b1a12ed4c4274c2616e48660602..56490c4c0c07a33e9e38c4ab840342001d612fc5 100644 (file)
@@ -109,6 +109,16 @@ auxiliary vector.
 
 scv 0 syscalls will always behave as PPC_FEATURE2_HTM_NOSC.
 
+ptrace
+------
+When ptracing system calls (PTRACE_SYSCALL), the pt_regs.trap value contains
+the system call type that can be used to distinguish between sc and scv 0
+system calls, and the different register conventions can be accounted for.
+
+If the value of (pt_regs.trap & 0xfff0) is 0xc00 then the system call was
+performed with the sc instruction, if it is 0x3000 then the system call was
+performed with the scv 0 instruction.
+
 vsyscall
 ========
 
index e5a1be476047634777c67cd9e1b7e190e1adba36..dc2d813b2e793728327bfbc1a292e9f9121dda58 100644 (file)
@@ -1,4 +1,4 @@
-.. _process_statement_kernel:
+.. _process_statement_kernel:
 
 Linux Kernel Enforcement Statement
 ----------------------------------
index 00d5b1db227d43a7d6a8d19660a54d12b5216cc8..31c67522f2ade857a23f1fae94296d694ba7fa0d 100644 (file)
@@ -1,4 +1,4 @@
-=============================
+=============================
 Virtual TPM interface for Xen
 =============================
 
index c4c70e1aada3cbff8359c3d5f2ffcc29fc90fbc5..6cadad7c3aad45145e539511caf496fcdb294232 100644 (file)
@@ -1,4 +1,4 @@
-======================================
+======================================
 NO_HZ: Reducing Scheduling-Clock Ticks
 ======================================
 
diff --git a/Documentation/translations/zh_CN/SecurityBugs b/Documentation/translations/zh_CN/SecurityBugs
deleted file mode 100644 (file)
index 2d0fffd..0000000
+++ /dev/null
@@ -1,50 +0,0 @@
-Chinese translated version of Documentation/admin-guide/security-bugs.rst
-
-If you have any comment or update to the content, please contact the
-original document maintainer directly.  However, if you have a problem
-communicating in English you can also ask the Chinese maintainer for
-help.  Contact the Chinese maintainer if this translation is outdated
-or if there is a problem with the translation.
-
-Chinese maintainer: Harry Wei <harryxiyou@gmail.com>
----------------------------------------------------------------------
-Documentation/admin-guide/security-bugs.rst 的中文翻译
-
-如果想评论或更新本文的内容,请直接联系原文档的维护者。如果你使用英文
-交流有困难的话,也可以向中文版维护者求助。如果本翻译更新不及时或者翻
-译存在问题,请联系中文版维护者。
-
-中文版维护者: 贾威威 Harry Wei <harryxiyou@gmail.com>
-中文版翻译者: 贾威威 Harry Wei <harryxiyou@gmail.com>
-中文版校译者: 贾威威 Harry Wei <harryxiyou@gmail.com>
-
-
-以下为正文
----------------------------------------------------------------------
-Linux内核开发者认为安全非常重要。因此,我们想要知道当一个有关于
-安全的漏洞被发现的时候,并且它可能会被尽快的修复或者公开。请把这个安全
-漏洞报告给Linux内核安全团队。
-
-1) 联系
-
-linux内核安全团队可以通过email<security@kernel.org>来联系。这是
-一组独立的安全工作人员,可以帮助改善漏洞报告并且公布和取消一个修复。安
-全团队有可能会从部分的维护者那里引进额外的帮助来了解并且修复安全漏洞。
-当遇到任何漏洞,所能提供的信息越多就越能诊断和修复。如果你不清楚什么
-是有帮助的信息,那就请重温一下admin-guide/reporting-bugs.rst文件中的概述过程。任
-何攻击性的代码都是非常有用的,未经报告者的同意不会被取消,除非它已经
-被公布于众。
-
-2) 公开
-
-Linux内核安全团队的宗旨就是和漏洞提交者一起处理漏洞的解决方案直
-到公开。我们喜欢尽快地完全公开漏洞。当一个漏洞或者修复还没有被完全地理
-解,解决方案没有通过测试或者供应商协调,可以合理地延迟公开。然而,我们
-期望这些延迟尽可能的短些,是可数的几天,而不是几个星期或者几个月。公开
-日期是通过安全团队和漏洞提供者以及供应商洽谈后的结果。公开时间表是从很
-短(特殊的,它已经被公众所知道)到几个星期。作为一个基本的默认政策,我
-们所期望通知公众的日期是7天的安排。
-
-3) 保密协议
-
-Linux内核安全团队不是一个正式的团体,因此不能加入任何的保密协议。
index 158e48dab586d1438807c693e014b05fa4ed541f..e4566ffb223f29a8124f84e94feaf1a7cfc865b9 100644 (file)
@@ -140,7 +140,7 @@ is an arbitrary string allowed in a filesystem, e.g.::
 Each function provides its specific set of attributes, with either read-only
 or read-write access. Where applicable they need to be written to as
 appropriate.
-Please refer to Documentation/ABI/*/configfs-usb-gadget* for more information.
+Please refer to Documentation/ABI/testing/configfs-usb-gadget for more information.
 
 4. Associating the functions with their configurations
 ------------------------------------------------------
index d1111b74bf7590faeed5da996cfe55c596f9e9f3..5ae1f74fe74b6dee3418042cbb6f79cee13a0f40 100644 (file)
@@ -1,4 +1,4 @@
-================
+================
 mtouchusb driver
 ================
 
index 8fa7dbd3da9a4f9d627aff69148acb9881bd2786..69586aeb60bb459e505a28b4bf157e4237914f95 100644 (file)
@@ -1,4 +1,4 @@
-==========
+==========
 USB serial
 ==========
 
index bd9165241b6c8970754c5ab7b84087b4eb251ba5..6efb41cc807255a7ae1b61879062c55c16a32389 100644 (file)
@@ -250,14 +250,14 @@ Users can read via ``ioctl(SECCOMP_IOCTL_NOTIF_RECV)``  (or ``poll()``) on a
 seccomp notification fd to receive a ``struct seccomp_notif``, which contains
 five members: the input length of the structure, a unique-per-filter ``id``,
 the ``pid`` of the task which triggered this request (which may be 0 if the
-task is in a pid ns not visible from the listener's pid namespace), a ``flags``
-member which for now only has ``SECCOMP_NOTIF_FLAG_SIGNALED``, representing
-whether or not the notification is a result of a non-fatal signal, and the
-``data`` passed to seccomp. Userspace can then make a decision based on this
-information about what to do, and ``ioctl(SECCOMP_IOCTL_NOTIF_SEND)`` a
-response, indicating what should be returned to userspace. The ``id`` member of
-``struct seccomp_notif_resp`` should be the same ``id`` as in ``struct
-seccomp_notif``.
+task is in a pid ns not visible from the listener's pid namespace). The
+notification also contains the ``data`` passed to seccomp, and a filters flag.
+The structure should be zeroed out prior to calling the ioctl.
+
+Userspace can then make a decision based on this information about what to do,
+and ``ioctl(SECCOMP_IOCTL_NOTIF_SEND)`` a response, indicating what should be
+returned to userspace. The ``id`` member of ``struct seccomp_notif_resp`` should
+be the same ``id`` as in ``struct seccomp_notif``.
 
 It is worth noting that ``struct seccomp_data`` contains the values of register
 arguments to the syscall, but does not contain pointers to memory. The task's
index 5ec8a1902e15aea628a90e544994c359954229b0..5c081c8c7164ae2185eea3fa86621fdaa5288c45 100644 (file)
@@ -22,7 +22,7 @@ to SEV::
                  [ecx]:
                        Bits[31:0]  Number of encrypted guests supported simultaneously
 
-If support for SEV is present, MSR 0xc001_0010 (MSR_K8_SYSCFG) and MSR 0xc001_0015
+If support for SEV is present, MSR 0xc001_0010 (MSR_AMD64_SYSCFG) and MSR 0xc001_0015
 (MSR_K7_HWCR) can be used to determine if it can be enabled::
 
        0xc001_0010:
index 22d077562149614b9c28a5c4e6c7d905448b9072..7fcb2fd38f42e8388d220173ee86ade6b1a5d911 100644 (file)
@@ -4803,7 +4803,7 @@ KVM_PV_VM_VERIFY
 4.126 KVM_X86_SET_MSR_FILTER
 ----------------------------
 
-:Capability: KVM_X86_SET_MSR_FILTER
+:Capability: KVM_CAP_X86_MSR_FILTER
 :Architectures: x86
 :Type: vm ioctl
 :Parameters: struct kvm_msr_filter
@@ -6715,7 +6715,7 @@ accesses that would usually trigger a #GP by KVM into the guest will
 instead get bounced to user space through the KVM_EXIT_X86_RDMSR and
 KVM_EXIT_X86_WRMSR exit notifications.
 
-8.27 KVM_X86_SET_MSR_FILTER
+8.27 KVM_CAP_X86_MSR_FILTER
 ---------------------------
 
 :Architectures: x86
index 5bfe28b0728e845e4564cdc8bb9e7d92715effda..20d85daed395e34073496b5bded7726d995e1cf7 100644 (file)
@@ -171,8 +171,8 @@ Shadow pages contain the following information:
     shadow pages) so role.quadrant takes values in the range 0..3.  Each
     quadrant maps 1GB virtual address space.
   role.access:
-    Inherited guest access permissions in the form uwx.  Note execute
-    permission is positive, not negative.
+    Inherited guest access permissions from the parent ptes in the form uwx.
+    Note execute permission is positive, not negative.
   role.invalid:
     The page is invalid and should not be used.  It is a root page that is
     currently pinned (by a cpu hardware register pointing to it); once it is
index 5feb3706a7ae58278bc9c747123d042f8b6f8958..af1b37441e0aeb8b5cddde88fb4df9577243c95f 100644 (file)
@@ -118,10 +118,12 @@ KVM_REQ_MMU_RELOAD
   necessary to inform each VCPU to completely refresh the tables.  This
   request is used for that.
 
-KVM_REQ_PENDING_TIMER
+KVM_REQ_UNBLOCK
 
-  This request may be made from a timer handler run on the host on behalf
-  of a VCPU.  It informs the VCPU thread to inject a timer interrupt.
+  This request informs the vCPU to exit kvm_vcpu_block.  It is used for
+  example from timer handlers that run on the host on behalf of a vCPU,
+  or in order to update the interrupt routing and ensure that assigned
+  devices will wake up the vCPU.
 
 KVM_REQ_UNHALT
 
index c48d452d0718964f0d269ba469725199e3d399f8..a1940ebe7be50cb17260b240a3ce78ec4f6a2242 100644 (file)
@@ -53,7 +53,7 @@ CPUID function 0x8000001f reports information related to SME::
                           system physical addresses, not guest physical
                           addresses)
 
-If support for SME is present, MSR 0xc00100010 (MSR_K8_SYSCFG) can be used to
+If support for SME is present, MSR 0xc00100010 (MSR_AMD64_SYSCFG) can be used to
 determine if SME is enabled and/or to enable memory encryption::
 
        0xc0010010:
@@ -79,7 +79,7 @@ The state of SME in the Linux kernel can be documented as follows:
          The CPU supports SME (determined through CPUID instruction).
 
        - Enabled:
-         Supported and bit 23 of MSR_K8_SYSCFG is set.
+         Supported and bit 23 of MSR_AMD64_SYSCFG is set.
 
        - Active:
          Supported, Enabled and the Linux kernel is actively applying
@@ -89,7 +89,7 @@ The state of SME in the Linux kernel can be documented as follows:
 SME can also be enabled and activated in the BIOS. If SME is enabled and
 activated in the BIOS, then all memory accesses will be encrypted and it will
 not be necessary to activate the Linux memory encryption support.  If the BIOS
-merely enables SME (sets bit 23 of the MSR_K8_SYSCFG), then Linux can activate
+merely enables SME (sets bit 23 of the MSR_AMD64_SYSCFG), then Linux can activate
 memory encryption by default (CONFIG_AMD_MEM_ENCRYPT_ACTIVE_BY_DEFAULT=y) or
 by supplying mem_encrypt=on on the kernel command line.  However, if BIOS does
 not enable SME, then Linux will not be able to activate memory encryption, even
index bd7aff0c120f20556547a36ba1ee47d8bfab1694..bc0ceef87b73f7aaa7a0f66e12e27a387098f413 100644 (file)
@@ -1578,7 +1578,7 @@ F:        drivers/clk/sunxi/
 ARM/Allwinner sunXi SoC support
 M:     Maxime Ripard <mripard@kernel.org>
 M:     Chen-Yu Tsai <wens@csie.org>
-R:     Jernej Skrabec <jernej.skrabec@siol.net>
+R:     Jernej Skrabec <jernej.skrabec@gmail.com>
 L:     linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
 S:     Maintained
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/sunxi/linux.git
@@ -1618,8 +1618,8 @@ F:        Documentation/devicetree/bindings/sound/amlogic*
 F:     sound/soc/meson/
 
 ARM/Amlogic Meson SoC support
+M:     Neil Armstrong <narmstrong@baylibre.com>
 M:     Kevin Hilman <khilman@baylibre.com>
-R:     Neil Armstrong <narmstrong@baylibre.com>
 R:     Jerome Brunet <jbrunet@baylibre.com>
 R:     Martin Blumenstingl <martin.blumenstingl@googlemail.com>
 L:     linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
@@ -3877,6 +3877,7 @@ L:        linux-btrfs@vger.kernel.org
 S:     Maintained
 W:     http://btrfs.wiki.kernel.org/
 Q:     http://patchwork.kernel.org/project/linux-btrfs/list/
+C:     irc://irc.libera.chat/btrfs
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/kdave/linux.git
 F:     Documentation/filesystems/btrfs.rst
 F:     fs/btrfs/
@@ -4138,6 +4139,14 @@ S:       Odd Fixes
 F:     Documentation/devicetree/bindings/arm/cavium-thunder2.txt
 F:     arch/arm64/boot/dts/cavium/thunder2-99xx*
 
+CBS/ETF/TAPRIO QDISCS
+M:     Vinicius Costa Gomes <vinicius.gomes@intel.com>
+S:     Maintained
+L:     netdev@vger.kernel.org
+F:     net/sched/sch_cbs.c
+F:     net/sched/sch_etf.c
+F:     net/sched/sch_taprio.c
+
 CC2520 IEEE-802.15.4 RADIO DRIVER
 M:     Varka Bhadram <varkabhadram@gmail.com>
 L:     linux-wpan@vger.kernel.org
@@ -5089,7 +5098,7 @@ S:        Maintained
 F:     drivers/net/fddi/defza.*
 
 DEINTERLACE DRIVERS FOR ALLWINNER H3
-M:     Jernej Skrabec <jernej.skrabec@siol.net>
+M:     Jernej Skrabec <jernej.skrabec@gmail.com>
 L:     linux-media@vger.kernel.org
 S:     Maintained
 T:     git git://linuxtv.org/media_tree.git
@@ -5237,7 +5246,7 @@ DEVICE DIRECT ACCESS (DAX)
 M:     Dan Williams <dan.j.williams@intel.com>
 M:     Vishal Verma <vishal.l.verma@intel.com>
 M:     Dave Jiang <dave.jiang@intel.com>
-L:     linux-nvdimm@lists.01.org
+L:     nvdimm@lists.linux.dev
 S:     Supported
 F:     drivers/dax/
 
@@ -5569,7 +5578,6 @@ F:        drivers/soc/fsl/dpio
 
 DPAA2 ETHERNET DRIVER
 M:     Ioana Ciornei <ioana.ciornei@nxp.com>
-M:     Ioana Radulescu <ruxandra.radulescu@nxp.com>
 L:     netdev@vger.kernel.org
 S:     Maintained
 F:     Documentation/networking/device_drivers/ethernet/freescale/dpaa2/ethernet-driver.rst
@@ -5632,14 +5640,14 @@ F:      include/linux/power/smartreflex.h
 DRM DRIVER FOR ALLWINNER DE2 AND DE3 ENGINE
 M:     Maxime Ripard <mripard@kernel.org>
 M:     Chen-Yu Tsai <wens@csie.org>
-R:     Jernej Skrabec <jernej.skrabec@siol.net>
+R:     Jernej Skrabec <jernej.skrabec@gmail.com>
 L:     dri-devel@lists.freedesktop.org
 S:     Supported
 T:     git git://anongit.freedesktop.org/drm/drm-misc
 F:     drivers/gpu/drm/sun4i/sun8i*
 
 DRM DRIVER FOR ARM PL111 CLCD
-M:     Eric Anholt <eric@anholt.net>
+M:     Emma Anholt <emma@anholt.net>
 S:     Supported
 T:     git git://anongit.freedesktop.org/drm/drm-misc
 F:     drivers/gpu/drm/pl111/
@@ -5719,7 +5727,7 @@ T:        git git://anongit.freedesktop.org/drm/drm-misc
 F:     drivers/gpu/drm/tiny/gm12u320.c
 
 DRM DRIVER FOR HX8357D PANELS
-M:     Eric Anholt <eric@anholt.net>
+M:     Emma Anholt <emma@anholt.net>
 S:     Maintained
 T:     git git://anongit.freedesktop.org/drm/drm-misc
 F:     Documentation/devicetree/bindings/display/himax,hx8357d.txt
@@ -6023,7 +6031,7 @@ M:        Neil Armstrong <narmstrong@baylibre.com>
 M:     Robert Foss <robert.foss@linaro.org>
 R:     Laurent Pinchart <Laurent.pinchart@ideasonboard.com>
 R:     Jonas Karlman <jonas@kwiboo.se>
-R:     Jernej Skrabec <jernej.skrabec@siol.net>
+R:     Jernej Skrabec <jernej.skrabec@gmail.com>
 S:     Maintained
 T:     git git://anongit.freedesktop.org/drm/drm-misc
 F:     drivers/gpu/drm/bridge/
@@ -6177,7 +6185,7 @@ F:        Documentation/devicetree/bindings/display/ti/
 F:     drivers/gpu/drm/omapdrm/
 
 DRM DRIVERS FOR V3D
-M:     Eric Anholt <eric@anholt.net>
+M:     Emma Anholt <emma@anholt.net>
 S:     Supported
 T:     git git://anongit.freedesktop.org/drm/drm-misc
 F:     Documentation/devicetree/bindings/gpu/brcm,bcm-v3d.yaml
@@ -6185,7 +6193,7 @@ F:        drivers/gpu/drm/v3d/
 F:     include/uapi/drm/v3d_drm.h
 
 DRM DRIVERS FOR VC4
-M:     Eric Anholt <eric@anholt.net>
+M:     Emma Anholt <emma@anholt.net>
 M:     Maxime Ripard <mripard@kernel.org>
 S:     Supported
 T:     git git://github.com/anholt/linux
@@ -6938,6 +6946,7 @@ F:        net/core/failover.c
 FANOTIFY
 M:     Jan Kara <jack@suse.cz>
 R:     Amir Goldstein <amir73il@gmail.com>
+R:     Matthew Bobrowski <repnop@google.com>
 L:     linux-fsdevel@vger.kernel.org
 S:     Maintained
 F:     fs/notify/fanotify/
@@ -7006,7 +7015,7 @@ M:        Dan Williams <dan.j.williams@intel.com>
 R:     Matthew Wilcox <willy@infradead.org>
 R:     Jan Kara <jack@suse.cz>
 L:     linux-fsdevel@vger.kernel.org
-L:     linux-nvdimm@lists.01.org
+L:     nvdimm@lists.linux.dev
 S:     Supported
 F:     fs/dax.c
 F:     include/linux/dax.h
@@ -10378,7 +10387,7 @@ LIBNVDIMM BLK: MMIO-APERTURE DRIVER
 M:     Dan Williams <dan.j.williams@intel.com>
 M:     Vishal Verma <vishal.l.verma@intel.com>
 M:     Dave Jiang <dave.jiang@intel.com>
-L:     linux-nvdimm@lists.01.org
+L:     nvdimm@lists.linux.dev
 S:     Supported
 Q:     https://patchwork.kernel.org/project/linux-nvdimm/list/
 P:     Documentation/nvdimm/maintainer-entry-profile.rst
@@ -10389,7 +10398,7 @@ LIBNVDIMM BTT: BLOCK TRANSLATION TABLE
 M:     Vishal Verma <vishal.l.verma@intel.com>
 M:     Dan Williams <dan.j.williams@intel.com>
 M:     Dave Jiang <dave.jiang@intel.com>
-L:     linux-nvdimm@lists.01.org
+L:     nvdimm@lists.linux.dev
 S:     Supported
 Q:     https://patchwork.kernel.org/project/linux-nvdimm/list/
 P:     Documentation/nvdimm/maintainer-entry-profile.rst
@@ -10399,7 +10408,7 @@ LIBNVDIMM PMEM: PERSISTENT MEMORY DRIVER
 M:     Dan Williams <dan.j.williams@intel.com>
 M:     Vishal Verma <vishal.l.verma@intel.com>
 M:     Dave Jiang <dave.jiang@intel.com>
-L:     linux-nvdimm@lists.01.org
+L:     nvdimm@lists.linux.dev
 S:     Supported
 Q:     https://patchwork.kernel.org/project/linux-nvdimm/list/
 P:     Documentation/nvdimm/maintainer-entry-profile.rst
@@ -10407,7 +10416,7 @@ F:      drivers/nvdimm/pmem*
 
 LIBNVDIMM: DEVICETREE BINDINGS
 M:     Oliver O'Halloran <oohall@gmail.com>
-L:     linux-nvdimm@lists.01.org
+L:     nvdimm@lists.linux.dev
 S:     Supported
 Q:     https://patchwork.kernel.org/project/linux-nvdimm/list/
 F:     Documentation/devicetree/bindings/pmem/pmem-region.txt
@@ -10418,7 +10427,7 @@ M:      Dan Williams <dan.j.williams@intel.com>
 M:     Vishal Verma <vishal.l.verma@intel.com>
 M:     Dave Jiang <dave.jiang@intel.com>
 M:     Ira Weiny <ira.weiny@intel.com>
-L:     linux-nvdimm@lists.01.org
+L:     nvdimm@lists.linux.dev
 S:     Supported
 Q:     https://patchwork.kernel.org/project/linux-nvdimm/list/
 P:     Documentation/nvdimm/maintainer-entry-profile.rst
@@ -12180,6 +12189,7 @@ F:      drivers/platform/surface/surfacepro3_button.c
 
 MICROSOFT SURFACE SYSTEM AGGREGATOR SUBSYSTEM
 M:     Maximilian Luz <luzmaximilian@gmail.com>
+L:     platform-driver-x86@vger.kernel.org
 S:     Maintained
 W:     https://github.com/linux-surface/surface-aggregator-module
 C:     irc://chat.freenode.net/##linux-surface
@@ -12680,9 +12690,9 @@ F:      drivers/rtc/rtc-ntxec.c
 F:     include/linux/mfd/ntxec.h
 
 NETRONOME ETHERNET DRIVERS
-M:     Simon Horman <simon.horman@netronome.com>
+M:     Simon Horman <simon.horman@corigine.com>
 R:     Jakub Kicinski <kuba@kernel.org>
-L:     oss-drivers@netronome.com
+L:     oss-drivers@corigine.com
 S:     Maintained
 F:     drivers/net/ethernet/netronome/
 
@@ -12709,7 +12719,6 @@ M:      "David S. Miller" <davem@davemloft.net>
 M:     Jakub Kicinski <kuba@kernel.org>
 L:     netdev@vger.kernel.org
 S:     Maintained
-W:     http://www.linuxfoundation.org/en/Net
 Q:     https://patchwork.kernel.org/project/netdevbpf/list/
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net.git
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net-next.git
@@ -12754,7 +12763,6 @@ M:      "David S. Miller" <davem@davemloft.net>
 M:     Jakub Kicinski <kuba@kernel.org>
 L:     netdev@vger.kernel.org
 S:     Maintained
-W:     http://www.linuxfoundation.org/en/Net
 Q:     https://patchwork.kernel.org/project/netdevbpf/list/
 B:     mailto:netdev@vger.kernel.org
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net.git
@@ -12896,8 +12904,10 @@ F:     include/uapi/linux/nexthop.h
 F:     net/ipv4/nexthop.c
 
 NFC SUBSYSTEM
+M:     Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+L:     linux-nfc@lists.01.org (subscribers-only)
 L:     netdev@vger.kernel.org
-S:     Orphan
+S:     Maintained
 F:     Documentation/devicetree/bindings/net/nfc/
 F:     drivers/nfc/
 F:     include/linux/platform_data/nfcmrvl.h
@@ -12908,7 +12918,7 @@ F:      net/nfc/
 NFC VIRTUAL NCI DEVICE DRIVER
 M:     Bongsu Jeon <bongsu.jeon@samsung.com>
 L:     netdev@vger.kernel.org
-L:     linux-nfc@lists.01.org (moderated for non-subscribers)
+L:     linux-nfc@lists.01.org (subscribers-only)
 S:     Supported
 F:     drivers/nfc/virtual_ncidev.c
 F:     tools/testing/selftests/nci/
@@ -13205,9 +13215,8 @@ F:      Documentation/devicetree/bindings/sound/tfa9879.txt
 F:     sound/soc/codecs/tfa9879*
 
 NXP-NCI NFC DRIVER
-M:     Clément Perrochaud <clement.perrochaud@effinnov.com>
 R:     Charles Gorand <charles.gorand@effinnov.com>
-L:     linux-nfc@lists.01.org (moderated for non-subscribers)
+L:     linux-nfc@lists.01.org (subscribers-only)
 S:     Supported
 F:     drivers/nfc/nxp-nci
 
@@ -14110,6 +14119,7 @@ F:      drivers/pci/controller/pci-v3-semi.c
 PCI ENDPOINT SUBSYSTEM
 M:     Kishon Vijay Abraham I <kishon@ti.com>
 M:     Lorenzo Pieralisi <lorenzo.pieralisi@arm.com>
+R:     Krzysztof Wilczyński <kw@linux.com>
 L:     linux-pci@vger.kernel.org
 S:     Supported
 F:     Documentation/PCI/endpoint/*
@@ -14158,6 +14168,7 @@ F:      drivers/pci/controller/pci-xgene-msi.c
 PCI NATIVE HOST BRIDGE AND ENDPOINT DRIVERS
 M:     Lorenzo Pieralisi <lorenzo.pieralisi@arm.com>
 R:     Rob Herring <robh@kernel.org>
+R:     Krzysztof Wilczyński <kw@linux.com>
 L:     linux-pci@vger.kernel.org
 S:     Supported
 Q:     http://patchwork.ozlabs.org/project/linux-pci/list/
@@ -14317,10 +14328,12 @@ PER-CPU MEMORY ALLOCATOR
 M:     Dennis Zhou <dennis@kernel.org>
 M:     Tejun Heo <tj@kernel.org>
 M:     Christoph Lameter <cl@linux.com>
+L:     linux-mm@kvack.org
 S:     Maintained
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/dennis/percpu.git
 F:     arch/*/include/asm/percpu.h
 F:     include/linux/percpu*.h
+F:     lib/percpu*.c
 F:     mm/percpu*.c
 
 PER-TASK DELAY ACCOUNTING
@@ -14734,7 +14747,6 @@ W:      https://wireless.wiki.kernel.org/en/users/Drivers/p54
 F:     drivers/net/wireless/intersil/prism54/
 
 PROC FILESYSTEM
-R:     Alexey Dobriyan <adobriyan@gmail.com>
 L:     linux-kernel@vger.kernel.org
 L:     linux-fsdevel@vger.kernel.org
 S:     Maintained
@@ -15815,7 +15827,7 @@ F:      include/uapi/linux/rose.h
 F:     net/rose/
 
 ROTATION DRIVER FOR ALLWINNER A83T
-M:     Jernej Skrabec <jernej.skrabec@siol.net>
+M:     Jernej Skrabec <jernej.skrabec@gmail.com>
 L:     linux-media@vger.kernel.org
 S:     Maintained
 T:     git git://linuxtv.org/media_tree.git
@@ -15945,6 +15957,7 @@ S390 IUCV NETWORK LAYER
 M:     Julian Wiedmann <jwi@linux.ibm.com>
 M:     Karsten Graul <kgraul@linux.ibm.com>
 L:     linux-s390@vger.kernel.org
+L:     netdev@vger.kernel.org
 S:     Supported
 W:     http://www.ibm.com/developerworks/linux/linux390/
 F:     drivers/s390/net/*iucv*
@@ -15955,6 +15968,7 @@ S390 NETWORK DRIVERS
 M:     Julian Wiedmann <jwi@linux.ibm.com>
 M:     Karsten Graul <kgraul@linux.ibm.com>
 L:     linux-s390@vger.kernel.org
+L:     netdev@vger.kernel.org
 S:     Supported
 W:     http://www.ibm.com/developerworks/linux/linux390/
 F:     drivers/s390/net/
@@ -16133,7 +16147,7 @@ F:      include/media/drv-intf/s3c_camif.h
 SAMSUNG S3FWRN5 NFC DRIVER
 M:     Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
 M:     Krzysztof Opasiak <k.opasiak@samsung.com>
-L:     linux-nfc@lists.01.org (moderated for non-subscribers)
+L:     linux-nfc@lists.01.org (subscribers-only)
 S:     Maintained
 F:     Documentation/devicetree/bindings/net/nfc/samsung,s3fwrn5.yaml
 F:     drivers/nfc/s3fwrn5
@@ -17304,6 +17318,12 @@ L:     linux-i2c@vger.kernel.org
 S:     Maintained
 F:     drivers/i2c/busses/i2c-stm32*
 
+ST STM32 SPI DRIVER
+M:     Alain Volmat <alain.volmat@foss.st.com>
+L:     linux-spi@vger.kernel.org
+S:     Maintained
+F:     drivers/spi/spi-stm32.c
+
 ST STPDDC60 DRIVER
 M:     Daniel Nilsson <daniel.nilsson@flex.com>
 L:     linux-hwmon@vger.kernel.org
@@ -17662,7 +17682,6 @@ R:      Mika Westerberg <mika.westerberg@linux.intel.com>
 L:     linux-i2c@vger.kernel.org
 S:     Maintained
 F:     drivers/i2c/busses/i2c-designware-*
-F:     include/linux/platform_data/i2c-designware.h
 
 SYNOPSYS DESIGNWARE MMC/SD/SDIO DRIVER
 M:     Jaehoon Chung <jh80.chung@samsung.com>
@@ -18318,7 +18337,7 @@ F:      sound/soc/codecs/tas571x*
 TI TRF7970A NFC DRIVER
 M:     Mark Greer <mgreer@animalcreek.com>
 L:     linux-wireless@vger.kernel.org
-L:     linux-nfc@lists.01.org (moderated for non-subscribers)
+L:     linux-nfc@lists.01.org (subscribers-only)
 S:     Supported
 F:     Documentation/devicetree/bindings/net/nfc/trf7970a.txt
 F:     drivers/nfc/trf7970a.c
@@ -18854,6 +18873,13 @@ S:     Maintained
 F:     drivers/usb/host/isp116x*
 F:     include/linux/usb/isp116x.h
 
+USB ISP1760 DRIVER
+M:     Rui Miguel Silva <rui.silva@linaro.org>
+L:     linux-usb@vger.kernel.org
+S:     Maintained
+F:     drivers/usb/isp1760/*
+F:     Documentation/devicetree/bindings/usb/nxp,isp1760.yaml
+
 USB LAN78XX ETHERNET DRIVER
 M:     Woojung Huh <woojung.huh@microchip.com>
 M:     UNGLinuxDriver@microchip.com
@@ -19998,6 +20024,7 @@ F:      arch/x86/xen/*swiotlb*
 F:     drivers/xen/*swiotlb*
 
 XFS FILESYSTEM
+C:     irc://irc.oftc.net/xfs
 M:     Darrick J. Wong <djwong@kernel.org>
 M:     linux-xfs@vger.kernel.org
 L:     linux-xfs@vger.kernel.org
index 53d09c414635cd541eec49328229670121539afa..ed669b2d705dc7b8c59065acb28c64cebdc84450 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -2,7 +2,7 @@
 VERSION = 5
 PATCHLEVEL = 13
 SUBLEVEL = 0
-EXTRAVERSION = -rc1
+EXTRAVERSION = -rc6
 NAME = Frozen Wasteland
 
 # *DOCUMENTATION*
@@ -928,6 +928,11 @@ CC_FLAGS_LTO       += -fvisibility=hidden
 
 # Limit inlining across translation units to reduce binary size
 KBUILD_LDFLAGS += -mllvm -import-instr-limit=5
+
+# Check for frame size exceeding threshold during prolog/epilog insertion.
+ifneq ($(CONFIG_FRAME_WARN),0)
+KBUILD_LDFLAGS += -plugin-opt=-warn-stack-size=$(CONFIG_FRAME_WARN)
+endif
 endif
 
 ifdef CONFIG_LTO
index 5622578742fdde44e9c6a28955923438795f1b4e..3000a2e8ee21734d1759f2f4b208f5e68e63d4a3 100644 (file)
 550    common  process_madvise                 sys_process_madvise
 551    common  epoll_pwait2                    sys_epoll_pwait2
 552    common  mount_setattr                   sys_mount_setattr
-553    common  quotactl_path                   sys_quotactl_path
+# 553 reserved for quotactl_path
 554    common  landlock_create_ruleset         sys_landlock_create_ruleset
 555    common  landlock_add_rule               sys_landlock_add_rule
 556    common  landlock_restrict_self          sys_landlock_restrict_self
index 4392c9c189c4dc38404194f4816c1815698e7d51..e47adc97a89bf0ed55ddb1d8f950fe89c148e065 100644 (file)
@@ -31,7 +31,7 @@ endif
 
 
 ifdef CONFIG_ARC_CURR_IN_REG
-# For a global register defintion, make sure it gets passed to every file
+# For a global register definition, make sure it gets passed to every file
 # We had a customer reported bug where some code built in kernel was NOT using
 # any kernel headers, and missing the r25 global register
 # Can't do unconditionally because of recursive include issues
index 9b87e162e539b5b755ce7970c769858ad375791c..dfeffa25499bf1c7bdcefeb264f781d871ca6022 100644 (file)
@@ -116,7 +116,7 @@ static inline unsigned long __xchg(unsigned long val, volatile void *ptr,
  *
  * Technically the lock is also needed for UP (boils down to irq save/restore)
  * but we can cheat a bit since cmpxchg() atomic_ops_lock() would cause irqs to
- * be disabled thus can't possibly be interrpted/preempted/clobbered by xchg()
+ * be disabled thus can't possibly be interrupted/preempted/clobbered by xchg()
  * Other way around, xchg is one instruction anyways, so can't be interrupted
  * as such
  */
@@ -143,7 +143,7 @@ static inline unsigned long __xchg(unsigned long val, volatile void *ptr,
 /*
  * "atomic" variant of xchg()
  * REQ: It needs to follow the same serialization rules as other atomic_xxx()
- * Since xchg() doesn't always do that, it would seem that following defintion
+ * Since xchg() doesn't always do that, it would seem that following definition
  * is incorrect. But here's the rationale:
  *   SMP : Even xchg() takes the atomic_ops_lock, so OK.
  *   LLSC: atomic_ops_lock are not relevant at all (even if SMP, since LLSC
index ad9b7fe4dba363a4713a16275a99f5968802eca9..4a9d33372fe2bacf0a5afb8b96a9d983982ccea0 100644 (file)
@@ -7,6 +7,18 @@
 
 #include <uapi/asm/page.h>
 
+#ifdef CONFIG_ARC_HAS_PAE40
+
+#define MAX_POSSIBLE_PHYSMEM_BITS      40
+#define PAGE_MASK_PHYS                 (0xff00000000ull | PAGE_MASK)
+
+#else /* CONFIG_ARC_HAS_PAE40 */
+
+#define MAX_POSSIBLE_PHYSMEM_BITS      32
+#define PAGE_MASK_PHYS                 PAGE_MASK
+
+#endif /* CONFIG_ARC_HAS_PAE40 */
+
 #ifndef __ASSEMBLY__
 
 #define clear_page(paddr)              memset((paddr), 0, PAGE_SIZE)
index 163641726a2b928cba14aefd1289322202ec4a64..5878846f00cfe5913bac2398f187806cebd782b9 100644 (file)
 #define ___DEF (_PAGE_PRESENT | _PAGE_CACHEABLE)
 
 /* Set of bits not changed in pte_modify */
-#define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY | _PAGE_SPECIAL)
-
+#define _PAGE_CHG_MASK (PAGE_MASK_PHYS | _PAGE_ACCESSED | _PAGE_DIRTY | \
+                                                          _PAGE_SPECIAL)
 /* More Abbrevaited helpers */
 #define PAGE_U_NONE     __pgprot(___DEF)
 #define PAGE_U_R        __pgprot(___DEF | _PAGE_READ)
 #define PTE_BITS_IN_PD0                (_PAGE_GLOBAL | _PAGE_PRESENT | _PAGE_HW_SZ)
 #define PTE_BITS_RWX           (_PAGE_EXECUTE | _PAGE_WRITE | _PAGE_READ)
 
-#ifdef CONFIG_ARC_HAS_PAE40
-#define PTE_BITS_NON_RWX_IN_PD1        (0xff00000000 | PAGE_MASK | _PAGE_CACHEABLE)
-#define MAX_POSSIBLE_PHYSMEM_BITS 40
-#else
-#define PTE_BITS_NON_RWX_IN_PD1        (PAGE_MASK | _PAGE_CACHEABLE)
-#define MAX_POSSIBLE_PHYSMEM_BITS 32
-#endif
+#define PTE_BITS_NON_RWX_IN_PD1        (PAGE_MASK_PHYS | _PAGE_CACHEABLE)
 
 /**************************************************************************
  * Mapping of vm_flags (Generic VM) to PTE flags (arch specific)
index 2a97e2718a219c72b1bfca8ab0ac5bd83ca7ed36..2a4ad619abfba65fab6d73a5df120a3f615cf890 100644 (file)
@@ -33,5 +33,4 @@
 
 #define PAGE_MASK      (~(PAGE_SIZE-1))
 
-
 #endif /* _UAPI__ASM_ARC_PAGE_H */
index 1743506081da6230646640a13e11004b3b8892e9..2cb8dfe866b66f2691daa85458db495de5f3aa3a 100644 (file)
@@ -177,7 +177,7 @@ tracesys:
 
        ; Do the Sys Call as we normally would.
        ; Validate the Sys Call number
-       cmp     r8,  NR_syscalls
+       cmp     r8,  NR_syscalls - 1
        mov.hi  r0, -ENOSYS
        bhi     tracesys_exit
 
@@ -255,7 +255,7 @@ ENTRY(EV_Trap)
        ;============ Normal syscall case
 
        ; syscall num shd not exceed the total system calls avail
-       cmp     r8,  NR_syscalls
+       cmp     r8,  NR_syscalls - 1
        mov.hi  r0, -ENOSYS
        bhi     .Lret_from_system_call
 
index ecfbc42d3a40f0f799438f5177cb54ce66146e50..345a0000554cbb33313208a79da7d6e763c40bac 100644 (file)
@@ -140,6 +140,7 @@ int kgdb_arch_handle_exception(int e_vector, int signo, int err_code,
                ptr = &remcomInBuffer[1];
                if (kgdb_hex2long(&ptr, &addr))
                        regs->ret = addr;
+               fallthrough;
 
        case 'D':
        case 'k':
index d838d0d576964c7d1a387488e8dd252bdb460133..3793876f42d9b25bc198dfa899b584e15d6061b3 100644 (file)
@@ -50,14 +50,14 @@ SYSCALL_DEFINE3(arc_usr_cmpxchg, int *, uaddr, int, expected, int, new)
        int ret;
 
        /*
-        * This is only for old cores lacking LLOCK/SCOND, which by defintion
+        * This is only for old cores lacking LLOCK/SCOND, which by definition
         * can't possibly be SMP. Thus doesn't need to be SMP safe.
         * And this also helps reduce the overhead for serializing in
         * the UP case
         */
        WARN_ON_ONCE(IS_ENABLED(CONFIG_SMP));
 
-       /* Z indicates to userspace if operation succeded */
+       /* Z indicates to userspace if operation succeeded */
        regs->status32 &= ~STATUS_Z_MASK;
 
        ret = access_ok(uaddr, sizeof(*uaddr));
@@ -107,7 +107,7 @@ fail:
 
 void arch_cpu_idle(void)
 {
-       /* Re-enable interrupts <= default irq priority before commiting SLEEP */
+       /* Re-enable interrupts <= default irq priority before committing SLEEP */
        const unsigned int arg = 0x10 | ARCV2_IRQ_DEF_PRIO;
 
        __asm__ __volatile__(
@@ -120,7 +120,7 @@ void arch_cpu_idle(void)
 
 void arch_cpu_idle(void)
 {
-       /* sleep, but enable both set E1/E2 (levels of interrutps) before committing */
+       /* sleep, but enable both set E1/E2 (levels of interrupts) before committing */
        __asm__ __volatile__("sleep 0x3 \n");
 }
 
index fdbe06c98895ea34d306e780021f06b6ffa14214..b3ccb9e5ffe42578d40d458b93b5c63f16cf2b1f 100644 (file)
@@ -259,7 +259,7 @@ setup_rt_frame(struct ksignal *ksig, sigset_t *set, struct pt_regs *regs)
                regs->r2 = (unsigned long)&sf->uc;
 
                /*
-                * small optim to avoid unconditonally calling do_sigaltstack
+                * small optim to avoid unconditionally calling do_sigaltstack
                 * in sigreturn path, now that we only have rt_sigreturn
                 */
                magic = MAGIC_SIGALTSTK;
@@ -391,7 +391,7 @@ void do_signal(struct pt_regs *regs)
 void do_notify_resume(struct pt_regs *regs)
 {
        /*
-        * ASM glue gaurantees that this is only called when returning to
+        * ASM glue guarantees that this is only called when returning to
         * user mode
         */
        if (test_thread_flag(TIF_NOTIFY_RESUME))
index 33832e36bdb7d52f5cec6d12c2f2e2c790e0c569..e2ed355438c96ca18c7bbe37f50763898320e4c2 100644 (file)
@@ -157,7 +157,16 @@ void __init setup_arch_memory(void)
        min_high_pfn = PFN_DOWN(high_mem_start);
        max_high_pfn = PFN_DOWN(high_mem_start + high_mem_sz);
 
-       max_zone_pfn[ZONE_HIGHMEM] = min_low_pfn;
+       /*
+        * max_high_pfn should be ok here for both HIGHMEM and HIGHMEM+PAE.
+        * For HIGHMEM without PAE max_high_pfn should be less than
+        * min_low_pfn to guarantee that these two regions don't overlap.
+        * For PAE case highmem is greater than lowmem, so it is natural
+        * to use max_high_pfn.
+        *
+        * In both cases, holes should be handled by pfn_valid().
+        */
+       max_zone_pfn[ZONE_HIGHMEM] = max_high_pfn;
 
        high_memory = (void *)(min_high_pfn << PAGE_SHIFT);
 
index fac4adc902044d6c794c5ded462038aa679bcec6..95c649fbc95aff99f99a2f481b2f72d898d2f639 100644 (file)
@@ -53,9 +53,10 @@ EXPORT_SYMBOL(ioremap);
 void __iomem *ioremap_prot(phys_addr_t paddr, unsigned long size,
                           unsigned long flags)
 {
+       unsigned int off;
        unsigned long vaddr;
        struct vm_struct *area;
-       phys_addr_t off, end;
+       phys_addr_t end;
        pgprot_t prot = __pgprot(flags);
 
        /* Don't allow wraparound, zero size */
@@ -72,7 +73,7 @@ void __iomem *ioremap_prot(phys_addr_t paddr, unsigned long size,
 
        /* Mappings have to be page-aligned */
        off = paddr & ~PAGE_MASK;
-       paddr &= PAGE_MASK;
+       paddr &= PAGE_MASK_PHYS;
        size = PAGE_ALIGN(end + 1) - paddr;
 
        /*
index 9bb3c24f3677098a6e8b7d410fab51ed723f69d7..9c7c6824728963bb5ecee80d2c24da89de691787 100644 (file)
@@ -576,7 +576,7 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long vaddr_unaligned,
                      pte_t *ptep)
 {
        unsigned long vaddr = vaddr_unaligned & PAGE_MASK;
-       phys_addr_t paddr = pte_val(*ptep) & PAGE_MASK;
+       phys_addr_t paddr = pte_val(*ptep) & PAGE_MASK_PHYS;
        struct page *page = pfn_to_page(pte_pfn(*ptep));
 
        create_tlb(vma, vaddr, ptep);
index 7d2c72562c73552a43b912cdb08cd931d966ec8b..9148a01ed6d9f085c76923d0d3f1532299d0c448 100644 (file)
        phy-reset-gpios = <&gpio1 25 GPIO_ACTIVE_LOW>;
        phy-reset-duration = <20>;
        phy-supply = <&sw2_reg>;
-       phy-handle = <&ethphy0>;
        status = "okay";
 
+       fixed-link {
+               speed = <1000>;
+               full-duplex;
+       };
+
        mdio {
                #address-cells = <1>;
                #size-cells = <0>;
index 236fc205c38903cd1b3f4ba7e2fffb13972c18fb..d0768ae429faaf7d897715e8bfad042bf5630880 100644 (file)
        vin-supply = <&sw1_reg>;
 };
 
+&reg_pu {
+       vin-supply = <&sw1_reg>;
+};
+
+&reg_vdd1p1 {
+       vin-supply = <&sw2_reg>;
+};
+
+&reg_vdd2p5 {
+       vin-supply = <&sw2_reg>;
+};
+
 &uart1 {
        pinctrl-names = "default";
        pinctrl-0 = <&pinctrl_uart1>;
index 828cf3e39784afd600090615daa3d0f7d7672919..c4e146f3341bb85d93a0f9204cd999e8023abaeb 100644 (file)
                compatible = "nxp,pca8574";
                reg = <0x3a>;
                gpio-controller;
-               #gpio-cells = <1>;
+               #gpio-cells = <2>;
        };
 };
 
index 5339210b63d0f43ed4ea17e949e5a873074d5cb6..dd8003bd1fc09c83d6aa83e79c46f14e07674599 100644 (file)
        pinctrl-names = "default";
        pinctrl-0 = <&pinctrl_usdhc1>;
        keep-power-in-suspend;
-       tuning-step = <2>;
+       fsl,tuning-step = <2>;
        vmmc-supply = <&reg_3p3v>;
        no-1-8-v;
        broken-cd;
index e57da0d32b98d383b8f353a25195cf635bc2d265..e519897fae082671ada0dfb31a7a40c859b94d2f 100644 (file)
        pinctrl-2 = <&pinctrl_usdhc1_200mhz>;
        cd-gpios = <&gpio5 0 GPIO_ACTIVE_LOW>;
        bus-width = <4>;
-       tuning-step = <2>;
+       fsl,tuning-step = <2>;
        vmmc-supply = <&reg_3p3v>;
        wakeup-source;
        no-1-8-v;
index 0d67ed682e0772e7bd975ac90612e0499455e0fe..bc4ffa7ca04c7aa713e247580152ebbfc7d66ca8 100644 (file)
@@ -7,9 +7,11 @@
 #ifdef CONFIG_CPU_IDLE
 extern int arm_cpuidle_simple_enter(struct cpuidle_device *dev,
                struct cpuidle_driver *drv, int index);
+#define __cpuidle_method_section __used __section("__cpuidle_method_of_table")
 #else
 static inline int arm_cpuidle_simple_enter(struct cpuidle_device *dev,
                struct cpuidle_driver *drv, int index) { return -ENODEV; }
+#define __cpuidle_method_section __maybe_unused /* drop silently */
 #endif
 
 /* Common ARM WFI state */
@@ -42,8 +44,7 @@ struct of_cpuidle_method {
 
 #define CPUIDLE_METHOD_OF_DECLARE(name, _method, _ops)                 \
        static const struct of_cpuidle_method __cpuidle_method_of_table_##name \
-       __used __section("__cpuidle_method_of_table")                   \
-       = { .method = _method, .ops = _ops }
+       __cpuidle_method_section = { .method = _method, .ops = _ops }
 
 extern int arm_cpuidle_suspend(int index);
 
index 020e6deb67c8c6e9de192905d839b84ec994837d..237e8aa9fe8378e584590a96bb294300574352aa 100644 (file)
@@ -12,6 +12,7 @@
 #include <linux/suspend.h>
 #include <linux/io.h>
 
+#include "common.h"
 #include "hardware.h"
 
 static int mx27_suspend_enter(suspend_state_t state)
index 658c8efb4ca1445891c370aad48ff924658537d5..a71cf1d189ae5b740246be8477d7e2a8b18e82e9 100644 (file)
@@ -10,6 +10,7 @@ config ARCH_WPCM450
        bool "Support for WPCM450 BMC (Hermon)"
        depends on ARCH_MULTI_V5
        select CPU_ARM926T
+       select WPCM450_AIC
        select NPCM7XX_TIMER
        help
          General support for WPCM450 BMC (Hermon).
index 2ee527c00284085d28921f30906777e6c98f130b..1026a816dcc0208642dcfbe5b20ad019c1c795af 100644 (file)
@@ -458,20 +458,6 @@ static struct gpiod_lookup_table leds_gpio_table = {
 
 #ifdef CONFIG_LEDS_TRIGGERS
 DEFINE_LED_TRIGGER(ams_delta_camera_led_trigger);
-
-static int ams_delta_camera_power(struct device *dev, int power)
-{
-       /*
-        * turn on camera LED
-        */
-       if (power)
-               led_trigger_event(ams_delta_camera_led_trigger, LED_FULL);
-       else
-               led_trigger_event(ams_delta_camera_led_trigger, LED_OFF);
-       return 0;
-}
-#else
-#define ams_delta_camera_power NULL
 #endif
 
 static struct platform_device ams_delta_audio_device = {
index c40cf5ef8607926b372d8fab52ac0ce61a852161..977b0b744c22aefb413ea53ecc3c64ca8011d5ad 100644 (file)
@@ -320,7 +320,7 @@ static int tps_setup(struct i2c_client *client, void *context)
 {
        if (!IS_BUILTIN(CONFIG_TPS65010))
                return -ENOSYS;
-       
+
        tps65010_config_vregs1(TPS_LDO2_ENABLE | TPS_VLDO2_3_0V |
                                TPS_LDO1_ENABLE | TPS_VLDO1_3_0V);
 
@@ -394,6 +394,8 @@ static void __init h2_init(void)
        BUG_ON(gpio_request(H2_NAND_RB_GPIO_PIN, "NAND ready") < 0);
        gpio_direction_input(H2_NAND_RB_GPIO_PIN);
 
+       gpiod_add_lookup_table(&isp1301_gpiod_table);
+
        omap_cfg_reg(L3_1610_FLASH_CS2B_OE);
        omap_cfg_reg(M8_1610_FLASH_CS2B_WE);
 
index 2c1e2b32b9b36f26507ecbbae13ea5c1de978143..a745d64d46995a53d0e88d786bb80e49fa38bbb9 100644 (file)
@@ -655,9 +655,13 @@ static int __init omap_pm_init(void)
                irq = INT_7XX_WAKE_UP_REQ;
        else if (cpu_is_omap16xx())
                irq = INT_1610_WAKE_UP_REQ;
-       if (request_irq(irq, omap_wakeup_interrupt, 0, "peripheral wakeup",
-                       NULL))
-               pr_err("Failed to request irq %d (peripheral wakeup)\n", irq);
+       else
+               irq = -1;
+
+       if (irq >= 0) {
+               if (request_irq(irq, omap_wakeup_interrupt, 0, "peripheral wakeup", NULL))
+                       pr_err("Failed to request irq %d (peripheral wakeup)\n", irq);
+       }
 
        /* Program new power ramp-up time
         * (0 for most boards since we don't lower voltage when in deep sleep)
index 418a61ecb827512e5ba91aaf64f23fc9330a542f..5e86145db0e2ac9e8b8ac769ea3ccc122e293935 100644 (file)
@@ -322,6 +322,7 @@ static int n8x0_mmc_get_cover_state(struct device *dev, int slot)
 
 static void n8x0_mmc_callback(void *data, u8 card_mask)
 {
+#ifdef CONFIG_MMC_OMAP
        int bit, *openp, index;
 
        if (board_is_n800()) {
@@ -339,7 +340,6 @@ static void n8x0_mmc_callback(void *data, u8 card_mask)
        else
                *openp = 0;
 
-#ifdef CONFIG_MMC_OMAP
        omap_mmc_notify_cover_event(mmc_device, index, *openp);
 #else
        pr_warn("MMC: notify cover event not available\n");
index ec0d9b094744db67ae99bb80faababa817c3845e..bddfc7cd5d40f5e921ca8ef0fd6db332ddd632e2 100644 (file)
@@ -121,8 +121,13 @@ static int cplds_probe(struct platform_device *pdev)
                return fpga->irq;
 
        base_irq = platform_get_irq(pdev, 1);
-       if (base_irq < 0)
+       if (base_irq < 0) {
                base_irq = 0;
+       } else {
+               ret = devm_irq_alloc_descs(&pdev->dev, base_irq, base_irq, CPLDS_NB_IRQ, 0);
+               if (ret < 0)
+                       return ret;
+       }
 
        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
        fpga->base = devm_ioremap_resource(&pdev->dev, res);
index c7679d7db98b0cf689b9d549569effb71ca72128..28e03b5fec0044da777eaf5fa8efc5f3a6e87df4 100644 (file)
 440    common  process_madvise                 sys_process_madvise
 441    common  epoll_pwait2                    sys_epoll_pwait2
 442    common  mount_setattr                   sys_mount_setattr
-443    common  quotactl_path                   sys_quotactl_path
+# 443 reserved for quotactl_path
 444    common  landlock_create_ruleset         sys_landlock_create_ruleset
 445    common  landlock_add_rule               sys_landlock_add_rule
 446    common  landlock_restrict_self          sys_landlock_restrict_self
index f8f07469d259171338c4c9dd66df765496677434..a7e54a087b80240a60af284755f45667e0969c54 100644 (file)
@@ -135,24 +135,18 @@ void xen_destroy_contiguous_region(phys_addr_t pstart, unsigned int order)
        return;
 }
 
-int xen_swiotlb_detect(void)
-{
-       if (!xen_domain())
-               return 0;
-       if (xen_feature(XENFEAT_direct_mapped))
-               return 1;
-       /* legacy case */
-       if (!xen_feature(XENFEAT_not_direct_mapped) && xen_initial_domain())
-               return 1;
-       return 0;
-}
-
 static int __init xen_mm_init(void)
 {
        struct gnttab_cache_flush cflush;
+       int rc;
+
        if (!xen_swiotlb_detect())
                return 0;
-       xen_swiotlb_init();
+
+       rc = xen_swiotlb_init();
+       /* we can work with the default swiotlb */
+       if (rc < 0 && rc != -EEXIST)
+               return rc;
 
        cflush.op = 0;
        cflush.a.dev_bus_addr = 0;
index d6465823b28102eb6510bb2367ebc2da9975722b..7b393cfec071617ceadc2bcf5d6ea17049605f6e 100644 (file)
@@ -1,6 +1,5 @@
 # SPDX-License-Identifier: GPL-2.0-only
-obj-y                  += kernel/ mm/
-obj-$(CONFIG_NET)      += net/
+obj-y                  += kernel/ mm/ net/
 obj-$(CONFIG_KVM)      += kvm/
 obj-$(CONFIG_XEN)      += xen/
 obj-$(CONFIG_CRYPTO)   += crypto/
index 6409b47b73e4a245b60069f47df2f1d0702e94b4..7336c1fd0ddace598cce75b03762c9156292732d 100644 (file)
@@ -165,6 +165,7 @@ config ARCH_MEDIATEK
 
 config ARCH_MESON
        bool "Amlogic Platforms"
+       select COMMON_CLK
        select MESON_IRQ_GPIO
        help
          This enables support for the arm64 based Amlogic SoCs
index 7ef44478560dff8281cfaec32a25e81a0f0eb00f..b52481f0605d8e43010d41de5810b53101c47a0b 100644 (file)
@@ -175,6 +175,9 @@ vdso_install:
        $(if $(CONFIG_COMPAT_VDSO), \
                $(Q)$(MAKE) $(build)=arch/arm64/kernel/vdso32 $@)
 
+archprepare:
+       $(Q)$(MAKE) $(build)=arch/arm64/tools kapi
+
 # We use MRPROPER_FILES and CLEAN_FILES now
 archclean:
        $(Q)$(MAKE) $(clean)=$(boot)
index 6c309b97587df1fdf872fac3aaf90d6be2550341..e8d31279b7a34567d7d2765605fdab2ac9d2b860 100644 (file)
@@ -46,7 +46,8 @@
                        eee-broken-100tx;
                        qca,clk-out-frequency = <125000000>;
                        qca,clk-out-strength = <AR803X_STRENGTH_FULL>;
-                       vddio-supply = <&vddh>;
+                       qca,keep-pll-enabled;
+                       vddio-supply = <&vddio>;
 
                        vddio: vddio-regulator {
                                regulator-name = "VDDIO";
index df212ed5bb9422bd17618b8efae0cb2d536c1271..e65d1c477e2ceb38509e121f4931114e31718c90 100644 (file)
                        reg = <0x4>;
                        eee-broken-1000t;
                        eee-broken-100tx;
-
                        qca,clk-out-frequency = <125000000>;
                        qca,clk-out-strength = <AR803X_STRENGTH_FULL>;
-
-                       vddio-supply = <&vddh>;
+                       qca,keep-pll-enabled;
+                       vddio-supply = <&vddio>;
 
                        vddio: vddio-regulator {
                                regulator-name = "VDDIO";
index eca06a0c3cf876601f45905548cdc9d74dbd1e08..a30249ebffa8c0878617d5cc5672b45c90a29cd8 100644 (file)
                ddr: memory-controller@1080000 {
                        compatible = "fsl,qoriq-memory-controller";
                        reg = <0x0 0x1080000 0x0 0x1000>;
-                       interrupts = <GIC_SPI 144 IRQ_TYPE_LEVEL_HIGH>;
-                       big-endian;
+                       interrupts = <GIC_SPI 17 IRQ_TYPE_LEVEL_HIGH>;
+                       little-endian;
                };
 
                dcfg: syscon@1e00000 {
index 631e01c1b9fd4d8b8dc8adf41adcdc7cc4aad986..be1e7d6f0ecb5d7b48692efdf3f62613ed6ac953 100644 (file)
                pinctrl-0 = <&pinctrl_codec2>;
                reg = <0x18>;
                #sound-dai-cells = <0>;
-               HPVDD-supply = <&reg_3p3v>;
-               SPRVDD-supply = <&reg_3p3v>;
-               SPLVDD-supply = <&reg_3p3v>;
-               AVDD-supply = <&reg_3p3v>;
-               IOVDD-supply = <&reg_3p3v>;
+               HPVDD-supply = <&reg_gen_3p3>;
+               SPRVDD-supply = <&reg_gen_3p3>;
+               SPLVDD-supply = <&reg_gen_3p3>;
+               AVDD-supply = <&reg_gen_3p3>;
+               IOVDD-supply = <&reg_gen_3p3>;
                DVDD-supply = <&vgen4_reg>;
                reset-gpios = <&gpio3 4 GPIO_ACTIVE_HIGH>;
        };
index 4dc8383478ee270c9ae4c3282b289d5513b36eed..a08a568c31d921e6ddb57e065e588d38dc2de998 100644 (file)
@@ -45,8 +45,8 @@
        reg_12p0_main: regulator-12p0-main {
                compatible = "regulator-fixed";
                regulator-name = "12V_MAIN";
-               regulator-min-microvolt = <5000000>;
-               regulator-max-microvolt = <5000000>;
+               regulator-min-microvolt = <12000000>;
+               regulator-max-microvolt = <12000000>;
                regulator-always-on;
        };
 
                regulator-always-on;
        };
 
-       reg_3p3v: regulator-3p3v {
-               compatible = "regulator-fixed";
-               vin-supply = <&reg_3p3_main>;
-               regulator-name = "GEN_3V3";
-               regulator-min-microvolt = <3300000>;
-               regulator-max-microvolt = <3300000>;
-               regulator-always-on;
-       };
-
        reg_usdhc2_vmmc: regulator-vsd-3v3 {
                pinctrl-names = "default";
                pinctrl-0 = <&pinctrl_reg_usdhc2>;
                pinctrl-0 = <&pinctrl_codec1>;
                reg = <0x18>;
                #sound-dai-cells = <0>;
-               HPVDD-supply = <&reg_3p3v>;
-               SPRVDD-supply = <&reg_3p3v>;
-               SPLVDD-supply = <&reg_3p3v>;
-               AVDD-supply = <&reg_3p3v>;
-               IOVDD-supply = <&reg_3p3v>;
+               HPVDD-supply = <&reg_gen_3p3>;
+               SPRVDD-supply = <&reg_gen_3p3>;
+               SPLVDD-supply = <&reg_gen_3p3>;
+               AVDD-supply = <&reg_gen_3p3>;
+               IOVDD-supply = <&reg_gen_3p3>;
                DVDD-supply = <&vgen4_reg>;
                reset-gpios = <&gpio3 3 GPIO_ACTIVE_LOW>;
        };
index c62ddb9b2ba565e9270d2acd72417ae73b7a81ba..3771144a2ce49a452a69c96bb8abb6b1135d8bc0 100644 (file)
@@ -14,7 +14,6 @@
 
        ports {
                port@0 {
-                       reg = <0>;
                        csi20_in: endpoint {
                                clock-lanes = <0>;
                                data-lanes = <1 2>;
@@ -29,7 +28,6 @@
 
        ports {
                port@0 {
-                       reg = <0>;
                        csi40_in: endpoint {
                                clock-lanes = <0>;
                                data-lanes = <1 2>;
index d64fb8b1b86c37318a8b63abdd4630b0b954f9fc..46f8dbf6890482d93fe8f279f0198d8b0ad93f72 100644 (file)
                                #address-cells = <1>;
                                #size-cells = <0>;
 
+                               port@0 {
+                                       reg = <0>;
+                               };
+
                                port@1 {
                                        #address-cells = <1>;
                                        #size-cells = <0>;
                                #address-cells = <1>;
                                #size-cells = <0>;
 
+                               port@0 {
+                                       reg = <0>;
+                               };
+
                                port@1 {
                                        #address-cells = <1>;
                                        #size-cells = <0>;
index 5b05474dc272788414fba848d01bb0f559260dc5..d16a4be5ef77a21bd64284ada40132d3cf105874 100644 (file)
                                #address-cells = <1>;
                                #size-cells = <0>;
 
+                               port@0 {
+                                       reg = <0>;
+                               };
+
                                port@1 {
                                        #address-cells = <1>;
                                        #size-cells = <0>;
                                #address-cells = <1>;
                                #size-cells = <0>;
 
+                               port@0 {
+                                       reg = <0>;
+                               };
+
                                port@1 {
                                        #address-cells = <1>;
                                        #size-cells = <0>;
index e7b4a929bb17484002b98d349254bc7c9ccdae24..2e3d1981cac4814478c1d3e7d573e172d378a995 100644 (file)
@@ -33,7 +33,7 @@
        status = "okay";
 
        ports {
-               port {
+               port@0 {
                        csi40_in: endpoint {
                                clock-lanes = <0>;
                                data-lanes = <1 2>;
index 20fa3caa050e5d5ec0eaae8904e3b1c632fdeebc..1aef34447abd1b3d30d623593dcb558b85ad3f02 100644 (file)
                                #address-cells = <1>;
                                #size-cells = <0>;
 
+                               port@0 {
+                                       reg = <0>;
+                               };
+
                                port@1 {
                                        #address-cells = <1>;
                                        #size-cells = <0>;
index 8eb006cbd9af4c98f4dc28b5b12c049da83f20dc..1f51237ab0a648fa9c9d5a188d6013fb39d7e27a 100644 (file)
                                #address-cells = <1>;
                                #size-cells = <0>;
 
+                               port@0 {
+                                       reg = <0>;
+                               };
+
                                port@1 {
                                        #address-cells = <1>;
                                        #size-cells = <0>;
                                #address-cells = <1>;
                                #size-cells = <0>;
 
+                               port@0 {
+                                       reg = <0>;
+                               };
+
                                port@1 {
                                        #address-cells = <1>;
                                        #size-cells = <0>;
index 25b87da32eebb6dc2f9abf0f671cfa7410722274..b643d3079db1e7aa0a72761f535a76da86e75316 100644 (file)
                        #address-cells = <1>;
                        #size-cells = <0>;
 
+                       port@0 {
+                               reg = <0>;
+                       };
+
                        port@1 {
                                #address-cells = <1>;
                                #size-cells = <0>;
index 5c39152e45707441d3c5f31bb50fcfe11c883b4d..85d66d15465ab2b6ca66c39634f429120ca70d20 100644 (file)
                                #address-cells = <1>;
                                #size-cells = <0>;
 
+                               port@0 {
+                                       reg = <0>;
+                               };
+
                                port@1 {
                                        #address-cells = <1>;
                                        #size-cells = <0>;
                                #address-cells = <1>;
                                #size-cells = <0>;
 
+                               port@0 {
+                                       reg = <0>;
+                               };
+
                                port@1 {
                                        #address-cells = <1>;
                                        #size-cells = <0>;
                                #address-cells = <1>;
                                #size-cells = <0>;
 
+                               port@0 {
+                                       reg = <0>;
+                               };
+
                                port@1 {
                                        #address-cells = <1>;
                                        #size-cells = <0>;
index 25d947a81b294a8138ed8a928cd4c787187866d7..12476e354d746d0eb867c7e2d97a355ce1a76660 100644 (file)
                                #address-cells = <1>;
                                #size-cells = <0>;
 
+                               port@0 {
+                                       reg = <0>;
+                               };
+
                                port@1 {
                                        #address-cells = <1>;
                                        #size-cells = <0>;
                                #address-cells = <1>;
                                #size-cells = <0>;
 
+                               port@0 {
+                                       reg = <0>;
+                               };
+
                                port@1 {
                                        #address-cells = <1>;
                                        #size-cells = <0>;
index ab081f14af9aa0fbf92e7323b0b8cdc5f702f4fd..d9804768425a76037d716ea818be100cab045d35 100644 (file)
                                #address-cells = <1>;
                                #size-cells = <0>;
 
+                               port@0 {
+                                       reg = <0>;
+                               };
+
                                port@1 {
                                        #address-cells = <1>;
                                        #size-cells = <0>;
                                #address-cells = <1>;
                                #size-cells = <0>;
 
+                               port@0 {
+                                       reg = <0>;
+                               };
+
                                port@1 {
                                        #address-cells = <1>;
                                        #size-cells = <0>;
index 657b20d3533bd398c6d323ccd61372c167a3a53e..dcb9df861d749692798bfd8afc0ad07815da47e7 100644 (file)
                                #address-cells = <1>;
                                #size-cells = <0>;
 
+                               port@0 {
+                                       reg = <0>;
+                               };
+
                                port@1 {
                                        #address-cells = <1>;
                                        #size-cells = <0>;
                                #address-cells = <1>;
                                #size-cells = <0>;
 
+                               port@0 {
+                                       reg = <0>;
+                               };
+
                                port@1 {
                                        #address-cells = <1>;
                                        #size-cells = <0>;
index 5a5d5649332a8b1dec168ebe2d81d0ee2442edf3..e8f6352c3665f44c6503a53e0ee832a17d328407 100644 (file)
                                #address-cells = <1>;
                                #size-cells = <0>;
 
+                               port@0 {
+                                       reg = <0>;
+                               };
+
                                port@1 {
                                        #address-cells = <1>;
                                        #size-cells = <0>;
index 1ffa4a995a7ab8ae8fe8fa25d347ef04fd7a5fa4..7b51d464de0eac8bdeea65f63fb79c331056463e 100644 (file)
                                #address-cells = <1>;
                                #size-cells = <0>;
 
+                               port@0 {
+                                       reg = <0>;
+                               };
+
                                port@1 {
                                        #address-cells = <1>;
                                        #size-cells = <0>;
                                #address-cells = <1>;
                                #size-cells = <0>;
 
+                               port@0 {
+                                       reg = <0>;
+                               };
+
                                port@1 {
                                        #address-cells = <1>;
                                        #size-cells = <0>;
index 295d34f1d216d963e22e38e60b6f72613e21ba7f..4715e4a4abe064cdf70b577c27c84428c06d0886 100644 (file)
 
        ports {
                port@0 {
-                       reg = <0>;
-
                        csi40_in: endpoint {
                                clock-lanes = <0>;
                                data-lanes = <1 2>;
index 5010f23fafcc7f98e1b1d7f7c9de7741f636471e..0eaea58f4210d89a9f3417b74e09d0a7e270f276 100644 (file)
                                #address-cells = <1>;
                                #size-cells = <0>;
 
+                               port@0 {
+                                       reg = <0>;
+                               };
+
                                port@1 {
                                        #address-cells = <1>;
                                        #size-cells = <0>;
index e18747df219f8118ae2f75bbbe3290207d44647e..453ffcef24fae1be1dec0923258322182461c98a 100644 (file)
 
        ports {
                port@0 {
-                       reg = <0>;
                        csi20_in: endpoint {
                                clock-lanes = <0>;
                                data-lanes = <1>;
 
        ports {
                port@0 {
-                       reg = <0>;
-
                        csi40_in: endpoint {
                                clock-lanes = <0>;
                                data-lanes = <1 2 3 4>;
index b2bcbf23eefda88e59ab0968cc8359c0cae526f1..ca59d1f711f8aee5b71e1ed7267d4f2e85563176 100644 (file)
                };
        };
 
-       dmss: dmss {
+       dmss: bus@48000000 {
                compatible = "simple-mfd";
                #address-cells = <2>;
                #size-cells = <2>;
                dma-ranges;
-               ranges;
+               ranges = <0x00 0x48000000 0x00 0x48000000 0x00 0x06400000>;
 
                ti,sci-dev-id = <25>;
 
                };
        };
 
-       dmsc: dmsc@44043000 {
+       dmsc: system-controller@44043000 {
                compatible = "ti,k2g-sci";
                ti,host-id = <12>;
                mbox-names = "rx", "tx";
                        #power-domain-cells = <2>;
                };
 
-               k3_clks: clocks {
+               k3_clks: clock-controller {
                        compatible = "ti,k2g-sci-clk";
                        #clock-cells = <2>;
                };
                clocks = <&k3_clks 145 0>;
        };
 
-       main_gpio_intr: interrupt-controller0 {
+       main_gpio_intr: interrupt-controller@a00000 {
                compatible = "ti,sci-intr";
+               reg = <0x00 0x00a00000 0x00 0x800>;
                ti,intr-trigger-type = <1>;
                interrupt-controller;
                interrupt-parent = <&gic500>;
index 99e94dee1bd45812608f80cfbed7c87c8d725a01..deb19ae5e168ae0a8ea98c75f80dfeadb2900246 100644 (file)
@@ -74,8 +74,9 @@
                clocks = <&k3_clks 148 0>;
        };
 
-       mcu_gpio_intr: interrupt-controller1 {
+       mcu_gpio_intr: interrupt-controller@4210000 {
                compatible = "ti,sci-intr";
+               reg = <0x00 0x04210000 0x00 0x200>;
                ti,intr-trigger-type = <1>;
                interrupt-controller;
                interrupt-parent = <&gic500>;
index cb340d1b401f3c7938eefc72a646a0b162c49396..6cd3131eb9ff94002662ed461681f28292906cf5 100644 (file)
                #phy-cells = <0>;
        };
 
-       intr_main_gpio: interrupt-controller0 {
+       intr_main_gpio: interrupt-controller@a00000 {
                compatible = "ti,sci-intr";
+               reg = <0x0 0x00a00000 0x0 0x400>;
                ti,intr-trigger-type = <1>;
                interrupt-controller;
                interrupt-parent = <&gic500>;
                ti,interrupt-ranges = <0 392 32>;
        };
 
-       main-navss {
+       main_navss: bus@30800000 {
                compatible = "simple-mfd";
                #address-cells = <2>;
                #size-cells = <2>;
-               ranges;
+               ranges = <0x0 0x30800000 0x0 0x30800000 0x0 0xbc00000>;
                dma-coherent;
                dma-ranges;
 
                ti,sci-dev-id = <118>;
 
-               intr_main_navss: interrupt-controller1 {
+               intr_main_navss: interrupt-controller@310e0000 {
                        compatible = "ti,sci-intr";
+                       reg = <0x0 0x310e0000 0x0 0x2000>;
                        ti,intr-trigger-type = <4>;
                        interrupt-controller;
                        interrupt-parent = <&gic500>;
index 0388c02c22037317e764e6a180a8bc9b64617d52..f5b8ef2f5f773ac9e7c0b2e4a6626d790a4639e2 100644 (file)
                };
        };
 
-       mcu-navss {
+       mcu_navss: bus@28380000 {
                compatible = "simple-mfd";
                #address-cells = <2>;
                #size-cells = <2>;
-               ranges;
+               ranges = <0x00 0x28380000 0x00 0x28380000 0x00 0x03880000>;
                dma-coherent;
                dma-ranges;
 
index ed42f13e7663479bbeefeb87d5d3817fc40782c8..7cb864b4d74a83956dfbec008cf2642f052cb690 100644 (file)
@@ -6,24 +6,24 @@
  */
 
 &cbass_wakeup {
-       dmsc: dmsc {
+       dmsc: system-controller@44083000 {
                compatible = "ti,am654-sci";
                ti,host-id = <12>;
-               #address-cells = <1>;
-               #size-cells = <1>;
-               ranges;
 
                mbox-names = "rx", "tx";
 
                mboxes= <&secure_proxy_main 11>,
                        <&secure_proxy_main 13>;
 
+               reg-names = "debug_messages";
+               reg = <0x44083000 0x1000>;
+
                k3_pds: power-controller {
                        compatible = "ti,sci-pm-domain";
                        #power-domain-cells = <2>;
                };
 
-               k3_clks: clocks {
+               k3_clks: clock-controller {
                        compatible = "ti,k2g-sci-clk";
                        #clock-cells = <2>;
                };
@@ -69,8 +69,9 @@
                power-domains = <&k3_pds 115 TI_SCI_PD_EXCLUSIVE>;
        };
 
-       intr_wkup_gpio: interrupt-controller2 {
+       intr_wkup_gpio: interrupt-controller@42200000 {
                compatible = "ti,sci-intr";
+               reg = <0x42200000 0x200>;
                ti,intr-trigger-type = <1>;
                interrupt-controller;
                interrupt-parent = <&gic500>;
index 9e87fb313a541c5d25b40539494de6f6dec02b13..eddb2ffb93ca69795e4ee407550d5f6bc6d0947b 100644 (file)
                        gpios = <&wkup_gpio0 27 GPIO_ACTIVE_LOW>;
                };
        };
-
-       clk_ov5640_fixed: clock {
-               compatible = "fixed-clock";
-               #clock-cells = <0>;
-               clock-frequency = <24000000>;
-       };
 };
 
 &wkup_pmx0 {
        pinctrl-names = "default";
        pinctrl-0 = <&main_i2c1_pins_default>;
        clock-frequency = <400000>;
-
-       ov5640: camera@3c {
-               compatible = "ovti,ov5640";
-               reg = <0x3c>;
-
-               clocks = <&clk_ov5640_fixed>;
-               clock-names = "xclk";
-
-               port {
-                       csi2_cam0: endpoint {
-                               remote-endpoint = <&csi2_phy0>;
-                               clock-lanes = <0>;
-                               data-lanes = <1 2>;
-                       };
-               };
-       };
-
 };
 
 &main_i2c2 {
        };
 };
 
-&csi2_0 {
-       csi2_phy0: endpoint {
-               remote-endpoint = <&csi2_cam0>;
-               clock-lanes = <0>;
-               data-lanes = <1 2>;
-       };
-};
-
 &mcu_cpsw {
        pinctrl-names = "default";
        pinctrl-0 = <&mcu_cpsw_pins_default &mcu_mdio_pins_default>;
index f86c493a44f1cfa9f3f0b2e211b169943b818020..19fea8adbcff4254d8e207f49a5c07441bce976a 100644 (file)
@@ -68,8 +68,9 @@
                };
        };
 
-       main_gpio_intr: interrupt-controller0 {
+       main_gpio_intr: interrupt-controller@a00000 {
                compatible = "ti,sci-intr";
+               reg = <0x00 0x00a00000 0x00 0x800>;
                ti,intr-trigger-type = <1>;
                interrupt-controller;
                interrupt-parent = <&gic500>;
                #size-cells = <2>;
                ranges = <0x00 0x30000000 0x00 0x30000000 0x00 0x0c400000>;
                ti,sci-dev-id = <199>;
+               dma-coherent;
+               dma-ranges;
 
-               main_navss_intr: interrupt-controller1 {
+               main_navss_intr: interrupt-controller@310e0000 {
                        compatible = "ti,sci-intr";
+                       reg = <0x00 0x310e0000 0x00 0x4000>;
                        ti,intr-trigger-type = <4>;
                        interrupt-controller;
                        interrupt-parent = <&gic500>;
index 5e74e43822c3f6ad250a2a2c960b81445ad7d04d..5663fe3ea46602d109423761622a36a8248ae14a 100644 (file)
@@ -6,7 +6,7 @@
  */
 
 &cbass_mcu_wakeup {
-       dmsc: dmsc@44083000 {
+       dmsc: system-controller@44083000 {
                compatible = "ti,k2g-sci";
                ti,host-id = <12>;
 
@@ -23,7 +23,7 @@
                        #power-domain-cells = <2>;
                };
 
-               k3_clks: clocks {
+               k3_clks: clock-controller {
                        compatible = "ti,k2g-sci-clk";
                        #clock-cells = <2>;
                };
@@ -96,8 +96,9 @@
                clock-names = "fclk";
        };
 
-       wkup_gpio_intr: interrupt-controller2 {
+       wkup_gpio_intr: interrupt-controller@42200000 {
                compatible = "ti,sci-intr";
+               reg = <0x00 0x42200000 0x00 0x400>;
                ti,intr-trigger-type = <1>;
                interrupt-controller;
                interrupt-parent = <&gic500>;
index c2aa45a3ac795b57d842f0641cdc771fb81e94fc..3bcafe4c1742e82273420abc103bd0cdcd7c646f 100644 (file)
@@ -76,8 +76,9 @@
                };
        };
 
-       main_gpio_intr: interrupt-controller0 {
+       main_gpio_intr: interrupt-controller@a00000 {
                compatible = "ti,sci-intr";
+               reg = <0x00 0x00a00000 0x00 0x800>;
                ti,intr-trigger-type = <1>;
                interrupt-controller;
                interrupt-parent = <&gic500>;
                ti,interrupt-ranges = <8 392 56>;
        };
 
-       main-navss {
+       main_navss: bus@30000000 {
                compatible = "simple-mfd";
                #address-cells = <2>;
                #size-cells = <2>;
-               ranges;
+               ranges = <0x00 0x30000000 0x00 0x30000000 0x00 0x0c400000>;
                dma-coherent;
                dma-ranges;
 
                ti,sci-dev-id = <199>;
 
-               main_navss_intr: interrupt-controller1 {
+               main_navss_intr: interrupt-controller@310e0000 {
                        compatible = "ti,sci-intr";
+                       reg = <0x0 0x310e0000 0x0 0x4000>;
                        ti,intr-trigger-type = <4>;
                        interrupt-controller;
                        interrupt-parent = <&gic500>;
index d56e3475aee79e59396f1ed767470de43e9cbbcc..5e825e4d0306d168b09d7292e196f3608f55a353 100644 (file)
@@ -6,7 +6,7 @@
  */
 
 &cbass_mcu_wakeup {
-       dmsc: dmsc@44083000 {
+       dmsc: system-controller@44083000 {
                compatible = "ti,k2g-sci";
                ti,host-id = <12>;
 
@@ -23,7 +23,7 @@
                        #power-domain-cells = <2>;
                };
 
-               k3_clks: clocks {
+               k3_clks: clock-controller {
                        compatible = "ti,k2g-sci-clk";
                        #clock-cells = <2>;
                };
@@ -96,8 +96,9 @@
                clock-names = "fclk";
        };
 
-       wkup_gpio_intr: interrupt-controller2 {
+       wkup_gpio_intr: interrupt-controller@42200000 {
                compatible = "ti,sci-intr";
+               reg = <0x00 0x42200000 0x00 0x400>;
                ti,intr-trigger-type = <1>;
                interrupt-controller;
                interrupt-parent = <&gic500>;
                };
        };
 
-       mcu-navss {
+       mcu_navss: bus@28380000 {
                compatible = "simple-mfd";
                #address-cells = <2>;
                #size-cells = <2>;
-               ranges;
+               ranges = <0x00 0x28380000 0x00 0x28380000 0x00 0x03880000>;
                dma-coherent;
                dma-ranges;
 
index 07ac208edc89441b06401c960e29a4c43a0b9cf7..26889dbfe904da887482ad7fbc9cd2d883f15d8d 100644 (file)
@@ -5,3 +5,5 @@ generic-y += qrwlock.h
 generic-y += qspinlock.h
 generic-y += set_memory.h
 generic-y += user.h
+
+generated-y += cpucaps.h
index 2175ec0004edb38fbb2de0a8c9e691e3536c7f62..451e11e5fd23b768af35a4587fe568db7cdbee7f 100644 (file)
@@ -74,7 +74,7 @@ static inline unsigned long array_index_mask_nospec(unsigned long idx,
  * This insanity brought to you by speculative system register reads,
  * out-of-order memory accesses, sequence locks and Thomas Gleixner.
  *
- * http://lists.infradead.org/pipermail/linux-arm-kernel/2019-February/631195.html
+ * https://lore.kernel.org/r/alpine.DEB.2.21.1902081950260.1662@nanos.tec.linutronix.de/
  */
 #define arch_counter_enforce_ordering(val) do {                                \
        u64 tmp, _val = (val);                                          \
diff --git a/arch/arm64/include/asm/cpucaps.h b/arch/arm64/include/asm/cpucaps.h
deleted file mode 100644 (file)
index b0c5eda..0000000
+++ /dev/null
@@ -1,74 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * arch/arm64/include/asm/cpucaps.h
- *
- * Copyright (C) 2016 ARM Ltd.
- */
-#ifndef __ASM_CPUCAPS_H
-#define __ASM_CPUCAPS_H
-
-#define ARM64_WORKAROUND_CLEAN_CACHE           0
-#define ARM64_WORKAROUND_DEVICE_LOAD_ACQUIRE   1
-#define ARM64_WORKAROUND_845719                        2
-#define ARM64_HAS_SYSREG_GIC_CPUIF             3
-#define ARM64_HAS_PAN                          4
-#define ARM64_HAS_LSE_ATOMICS                  5
-#define ARM64_WORKAROUND_CAVIUM_23154          6
-#define ARM64_WORKAROUND_834220                        7
-#define ARM64_HAS_NO_HW_PREFETCH               8
-#define ARM64_HAS_VIRT_HOST_EXTN               11
-#define ARM64_WORKAROUND_CAVIUM_27456          12
-#define ARM64_HAS_32BIT_EL0                    13
-#define ARM64_SPECTRE_V3A                      14
-#define ARM64_HAS_CNP                          15
-#define ARM64_HAS_NO_FPSIMD                    16
-#define ARM64_WORKAROUND_REPEAT_TLBI           17
-#define ARM64_WORKAROUND_QCOM_FALKOR_E1003     18
-#define ARM64_WORKAROUND_858921                        19
-#define ARM64_WORKAROUND_CAVIUM_30115          20
-#define ARM64_HAS_DCPOP                                21
-#define ARM64_SVE                              22
-#define ARM64_UNMAP_KERNEL_AT_EL0              23
-#define ARM64_SPECTRE_V2                       24
-#define ARM64_HAS_RAS_EXTN                     25
-#define ARM64_WORKAROUND_843419                        26
-#define ARM64_HAS_CACHE_IDC                    27
-#define ARM64_HAS_CACHE_DIC                    28
-#define ARM64_HW_DBM                           29
-#define ARM64_SPECTRE_V4                       30
-#define ARM64_MISMATCHED_CACHE_TYPE            31
-#define ARM64_HAS_STAGE2_FWB                   32
-#define ARM64_HAS_CRC32                                33
-#define ARM64_SSBS                             34
-#define ARM64_WORKAROUND_1418040               35
-#define ARM64_HAS_SB                           36
-#define ARM64_WORKAROUND_SPECULATIVE_AT                37
-#define ARM64_HAS_ADDRESS_AUTH_ARCH            38
-#define ARM64_HAS_ADDRESS_AUTH_IMP_DEF         39
-#define ARM64_HAS_GENERIC_AUTH_ARCH            40
-#define ARM64_HAS_GENERIC_AUTH_IMP_DEF         41
-#define ARM64_HAS_IRQ_PRIO_MASKING             42
-#define ARM64_HAS_DCPODP                       43
-#define ARM64_WORKAROUND_1463225               44
-#define ARM64_WORKAROUND_CAVIUM_TX2_219_TVM    45
-#define ARM64_WORKAROUND_CAVIUM_TX2_219_PRFM   46
-#define ARM64_WORKAROUND_1542419               47
-#define ARM64_HAS_E0PD                         48
-#define ARM64_HAS_RNG                          49
-#define ARM64_HAS_AMU_EXTN                     50
-#define ARM64_HAS_ADDRESS_AUTH                 51
-#define ARM64_HAS_GENERIC_AUTH                 52
-#define ARM64_HAS_32BIT_EL1                    53
-#define ARM64_BTI                              54
-#define ARM64_HAS_ARMv8_4_TTL                  55
-#define ARM64_HAS_TLB_RANGE                    56
-#define ARM64_MTE                              57
-#define ARM64_WORKAROUND_1508412               58
-#define ARM64_HAS_LDAPR                                59
-#define ARM64_KVM_PROTECTED_MODE               60
-#define ARM64_WORKAROUND_NVIDIA_CARMEL_CNP     61
-#define ARM64_HAS_EPAN                         62
-
-#define ARM64_NCAPS                            63
-
-#endif /* __ASM_CPUCAPS_H */
index cf8df032b9c30e4285df23ec4c8078c8f465857e..5e9b33cbac513094f5ab77cca3b924be34850280 100644 (file)
@@ -63,6 +63,7 @@
 #define __KVM_HOST_SMCCC_FUNC___pkvm_cpu_set_vector            18
 #define __KVM_HOST_SMCCC_FUNC___pkvm_prot_finalize             19
 #define __KVM_HOST_SMCCC_FUNC___pkvm_mark_hyp                  20
+#define __KVM_HOST_SMCCC_FUNC___kvm_adjust_pc                  21
 
 #ifndef __ASSEMBLY__
 
@@ -201,6 +202,8 @@ extern void __kvm_timer_set_cntvoff(u64 cntvoff);
 
 extern int __kvm_vcpu_run(struct kvm_vcpu *vcpu);
 
+extern void __kvm_adjust_pc(struct kvm_vcpu *vcpu);
+
 extern u64 __vgic_v3_get_gic_config(void);
 extern u64 __vgic_v3_read_vmcr(void);
 extern void __vgic_v3_write_vmcr(u32 vmcr);
index f612c090f2e41880a4d354eacce11aed685f3db5..01b9857757f2a0c4992bfa99f75f74f0321beda5 100644 (file)
@@ -463,4 +463,9 @@ static __always_inline void kvm_incr_pc(struct kvm_vcpu *vcpu)
        vcpu->arch.flags |= KVM_ARM64_INCREMENT_PC;
 }
 
+static inline bool vcpu_has_feature(struct kvm_vcpu *vcpu, int feature)
+{
+       return test_bit(feature, vcpu->arch.features);
+}
+
 #endif /* __ARM64_KVM_EMULATE_H__ */
index 7859749d6628a5d0bf4ed758d03b25e5d3c0ac19..5dab69d2c22bf2074acee8be2df2e25c9521ab39 100644 (file)
@@ -893,8 +893,7 @@ __SYSCALL(__NR_process_madvise, sys_process_madvise)
 __SYSCALL(__NR_epoll_pwait2, compat_sys_epoll_pwait2)
 #define __NR_mount_setattr 442
 __SYSCALL(__NR_mount_setattr, sys_mount_setattr)
-#define __NR_quotactl_path 443
-__SYSCALL(__NR_quotactl_path, sys_quotactl_path)
+/* 443 is reserved for quotactl_path */
 #define __NR_landlock_create_ruleset 444
 __SYSCALL(__NR_landlock_create_ruleset, sys_landlock_create_ruleset)
 #define __NR_landlock_add_rule 445
index 1cb39c0803a447872559c30f60437b67f21bbd71..e720148232a06c887c3035f3a4f790d235ec6744 100644 (file)
@@ -720,11 +720,13 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
                        return ret;
        }
 
-       if (run->immediate_exit)
-               return -EINTR;
-
        vcpu_load(vcpu);
 
+       if (run->immediate_exit) {
+               ret = -EINTR;
+               goto out;
+       }
+
        kvm_sigset_activate(vcpu);
 
        ret = 1;
@@ -897,6 +899,18 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
 
        kvm_sigset_deactivate(vcpu);
 
+out:
+       /*
+        * In the unlikely event that we are returning to userspace
+        * with pending exceptions or PC adjustment, commit these
+        * adjustments in order to give userspace a consistent view of
+        * the vcpu state. Note that this relies on __kvm_adjust_pc()
+        * being preempt-safe on VHE.
+        */
+       if (unlikely(vcpu->arch.flags & (KVM_ARM64_PENDING_EXCEPTION |
+                                        KVM_ARM64_INCREMENT_PC)))
+               kvm_call_hyp(__kvm_adjust_pc, vcpu);
+
        vcpu_put(vcpu);
        return ret;
 }
index 73629094f90303e57fe3d4cf950547dbc85cd729..11541b94b328f8e1d110fa0be3fd37ec07f1ae59 100644 (file)
@@ -296,7 +296,7 @@ static void enter_exception32(struct kvm_vcpu *vcpu, u32 mode, u32 vect_offset)
        *vcpu_pc(vcpu) = vect_offset;
 }
 
-void kvm_inject_exception(struct kvm_vcpu *vcpu)
+static void kvm_inject_exception(struct kvm_vcpu *vcpu)
 {
        if (vcpu_el1_is_32bit(vcpu)) {
                switch (vcpu->arch.flags & KVM_ARM64_EXCEPT_MASK) {
@@ -329,3 +329,19 @@ void kvm_inject_exception(struct kvm_vcpu *vcpu)
                }
        }
 }
+
+/*
+ * Adjust the guest PC (and potentially exception state) depending on
+ * flags provided by the emulation code.
+ */
+void __kvm_adjust_pc(struct kvm_vcpu *vcpu)
+{
+       if (vcpu->arch.flags & KVM_ARM64_PENDING_EXCEPTION) {
+               kvm_inject_exception(vcpu);
+               vcpu->arch.flags &= ~(KVM_ARM64_PENDING_EXCEPTION |
+                                     KVM_ARM64_EXCEPT_MASK);
+       } else  if (vcpu->arch.flags & KVM_ARM64_INCREMENT_PC) {
+               kvm_skip_instr(vcpu);
+               vcpu->arch.flags &= ~KVM_ARM64_INCREMENT_PC;
+       }
+}
index 61716359035d6fa2a600a4def8d731681617de09..4fdfeabefeb432b44416ff64cf380f88ec8f0fa0 100644 (file)
@@ -13,8 +13,6 @@
 #include <asm/kvm_emulate.h>
 #include <asm/kvm_host.h>
 
-void kvm_inject_exception(struct kvm_vcpu *vcpu);
-
 static inline void kvm_skip_instr(struct kvm_vcpu *vcpu)
 {
        if (vcpu_mode_is_32bit(vcpu)) {
@@ -43,22 +41,6 @@ static inline void __kvm_skip_instr(struct kvm_vcpu *vcpu)
        write_sysreg_el2(*vcpu_pc(vcpu), SYS_ELR);
 }
 
-/*
- * Adjust the guest PC on entry, depending on flags provided by EL1
- * for the purpose of emulation (MMIO, sysreg) or exception injection.
- */
-static inline void __adjust_pc(struct kvm_vcpu *vcpu)
-{
-       if (vcpu->arch.flags & KVM_ARM64_PENDING_EXCEPTION) {
-               kvm_inject_exception(vcpu);
-               vcpu->arch.flags &= ~(KVM_ARM64_PENDING_EXCEPTION |
-                                     KVM_ARM64_EXCEPT_MASK);
-       } else  if (vcpu->arch.flags & KVM_ARM64_INCREMENT_PC) {
-               kvm_skip_instr(vcpu);
-               vcpu->arch.flags &= ~KVM_ARM64_INCREMENT_PC;
-       }
-}
-
 /*
  * Skip an instruction while host sysregs are live.
  * Assumes host is always 64-bit.
index f36420a80474524f064c9f5bdf7a0dbfa59b700e..1632f001f4ed6bf2d47eaba895a5082fe9c5520a 100644 (file)
@@ -28,6 +28,13 @@ static void handle___kvm_vcpu_run(struct kvm_cpu_context *host_ctxt)
        cpu_reg(host_ctxt, 1) =  __kvm_vcpu_run(kern_hyp_va(vcpu));
 }
 
+static void handle___kvm_adjust_pc(struct kvm_cpu_context *host_ctxt)
+{
+       DECLARE_REG(struct kvm_vcpu *, vcpu, host_ctxt, 1);
+
+       __kvm_adjust_pc(kern_hyp_va(vcpu));
+}
+
 static void handle___kvm_flush_vm_context(struct kvm_cpu_context *host_ctxt)
 {
        __kvm_flush_vm_context();
@@ -170,6 +177,7 @@ typedef void (*hcall_t)(struct kvm_cpu_context *);
 
 static const hcall_t host_hcall[] = {
        HANDLE_FUNC(__kvm_vcpu_run),
+       HANDLE_FUNC(__kvm_adjust_pc),
        HANDLE_FUNC(__kvm_flush_vm_context),
        HANDLE_FUNC(__kvm_tlb_flush_vmid_ipa),
        HANDLE_FUNC(__kvm_tlb_flush_vmid),
index e342f7f4f4fb71aca6c4ebbf14d9929436af07a5..4b60c0056c041521ffba9b9f0505ca605404aed2 100644 (file)
@@ -23,8 +23,8 @@
 extern unsigned long hyp_nr_cpus;
 struct host_kvm host_kvm;
 
-struct hyp_pool host_s2_mem;
-struct hyp_pool host_s2_dev;
+static struct hyp_pool host_s2_mem;
+static struct hyp_pool host_s2_dev;
 
 /*
  * Copies of the host's CPU features registers holding sanitized values.
index 7488f53b0aa2fe05e4c11bd32060488a690ff386..a3d3a275344e906f8b539abf910c0410845eafba 100644 (file)
@@ -17,7 +17,6 @@
 #include <nvhe/trap_handler.h>
 
 struct hyp_pool hpool;
-struct kvm_pgtable_mm_ops pkvm_pgtable_mm_ops;
 unsigned long hyp_nr_cpus;
 
 #define hyp_percpu_size ((unsigned long)__per_cpu_end - \
@@ -27,6 +26,7 @@ static void *vmemmap_base;
 static void *hyp_pgt_base;
 static void *host_s2_mem_pgt_base;
 static void *host_s2_dev_pgt_base;
+static struct kvm_pgtable_mm_ops pkvm_pgtable_mm_ops;
 
 static int divide_memory_pool(void *virt, unsigned long size)
 {
index e9f6ea704d07d02b6ea986886e29ca68f1d89ddf..f7af9688c1f72839280dc9c391b00c919860e300 100644 (file)
@@ -4,7 +4,6 @@
  * Author: Marc Zyngier <marc.zyngier@arm.com>
  */
 
-#include <hyp/adjust_pc.h>
 #include <hyp/switch.h>
 #include <hyp/sysreg-sr.h>
 
@@ -201,7 +200,7 @@ int __kvm_vcpu_run(struct kvm_vcpu *vcpu)
         */
        __debug_save_host_buffers_nvhe(vcpu);
 
-       __adjust_pc(vcpu);
+       __kvm_adjust_pc(vcpu);
 
        /*
         * We must restore the 32-bit state before the sysregs, thanks
index 7b8f7db5c1ed610080ab204668edb9359e1dbc01..b3229924d2431a164f32e746c4c8b4274cf219e3 100644 (file)
@@ -4,7 +4,6 @@
  * Author: Marc Zyngier <marc.zyngier@arm.com>
  */
 
-#include <hyp/adjust_pc.h>
 #include <hyp/switch.h>
 
 #include <linux/arm-smccc.h>
@@ -132,7 +131,7 @@ static int __kvm_vcpu_run_vhe(struct kvm_vcpu *vcpu)
        __load_guest_stage2(vcpu->arch.hw_mmu);
        __activate_traps(vcpu);
 
-       __adjust_pc(vcpu);
+       __kvm_adjust_pc(vcpu);
 
        sysreg_restore_guest_state_vhe(guest_ctxt);
        __debug_switch_to_guest(vcpu);
index c5d1f3c87dbdb61d9f8dc83e944a26d44dd93b6d..c10207fed2f36fe6a46970912e302e7528d4b196 100644 (file)
@@ -1156,13 +1156,13 @@ out_unlock:
 bool kvm_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range)
 {
        if (!kvm->arch.mmu.pgt)
-               return 0;
+               return false;
 
        __unmap_stage2_range(&kvm->arch.mmu, range->start << PAGE_SHIFT,
                             (range->end - range->start) << PAGE_SHIFT,
                             range->may_block);
 
-       return 0;
+       return false;
 }
 
 bool kvm_set_spte_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
@@ -1170,7 +1170,7 @@ bool kvm_set_spte_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
        kvm_pfn_t pfn = pte_pfn(range->pte);
 
        if (!kvm->arch.mmu.pgt)
-               return 0;
+               return false;
 
        WARN_ON(range->end - range->start != 1);
 
@@ -1190,7 +1190,7 @@ bool kvm_set_spte_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
                               PAGE_SIZE, __pfn_to_phys(pfn),
                               KVM_PGTABLE_PROT_R, NULL);
 
-       return 0;
+       return false;
 }
 
 bool kvm_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
@@ -1200,7 +1200,7 @@ bool kvm_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
        pte_t pte;
 
        if (!kvm->arch.mmu.pgt)
-               return 0;
+               return false;
 
        WARN_ON(size != PAGE_SIZE && size != PMD_SIZE && size != PUD_SIZE);
 
@@ -1213,7 +1213,7 @@ bool kvm_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
 bool kvm_test_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
 {
        if (!kvm->arch.mmu.pgt)
-               return 0;
+               return false;
 
        return kvm_pgtable_stage2_is_young(kvm->arch.mmu.pgt,
                                           range->start << PAGE_SHIFT);
index 956cdc240148bf37024994700573997c3734678b..d37ebee085cfeb23671e7d58efcf6c753908e5b5 100644 (file)
@@ -166,6 +166,25 @@ static int kvm_vcpu_enable_ptrauth(struct kvm_vcpu *vcpu)
        return 0;
 }
 
+static bool vcpu_allowed_register_width(struct kvm_vcpu *vcpu)
+{
+       struct kvm_vcpu *tmp;
+       bool is32bit;
+       int i;
+
+       is32bit = vcpu_has_feature(vcpu, KVM_ARM_VCPU_EL1_32BIT);
+       if (!cpus_have_const_cap(ARM64_HAS_32BIT_EL1) && is32bit)
+               return false;
+
+       /* Check that the vcpus are either all 32bit or all 64bit */
+       kvm_for_each_vcpu(i, tmp, vcpu->kvm) {
+               if (vcpu_has_feature(tmp, KVM_ARM_VCPU_EL1_32BIT) != is32bit)
+                       return false;
+       }
+
+       return true;
+}
+
 /**
  * kvm_reset_vcpu - sets core registers and sys_regs to reset value
  * @vcpu: The VCPU pointer
@@ -217,13 +236,14 @@ int kvm_reset_vcpu(struct kvm_vcpu *vcpu)
                }
        }
 
+       if (!vcpu_allowed_register_width(vcpu)) {
+               ret = -EINVAL;
+               goto out;
+       }
+
        switch (vcpu->arch.target) {
        default:
                if (test_bit(KVM_ARM_VCPU_EL1_32BIT, vcpu->arch.features)) {
-                       if (!cpus_have_const_cap(ARM64_HAS_32BIT_EL1)) {
-                               ret = -EINVAL;
-                               goto out;
-                       }
                        pstate = VCPU_RESET_PSTATE_SVC;
                } else {
                        pstate = VCPU_RESET_PSTATE_EL1;
index 76ea2800c33e8240450341b54f207c5129f6a693..1a7968ad078c60a00f0d9e63577674c6b8fa13d9 100644 (file)
@@ -399,14 +399,14 @@ static bool trap_bvr(struct kvm_vcpu *vcpu,
                     struct sys_reg_params *p,
                     const struct sys_reg_desc *rd)
 {
-       u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_bvr[rd->reg];
+       u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_bvr[rd->CRm];
 
        if (p->is_write)
                reg_to_dbg(vcpu, p, rd, dbg_reg);
        else
                dbg_to_reg(vcpu, p, rd, dbg_reg);
 
-       trace_trap_reg(__func__, rd->reg, p->is_write, *dbg_reg);
+       trace_trap_reg(__func__, rd->CRm, p->is_write, *dbg_reg);
 
        return true;
 }
@@ -414,7 +414,7 @@ static bool trap_bvr(struct kvm_vcpu *vcpu,
 static int set_bvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
                const struct kvm_one_reg *reg, void __user *uaddr)
 {
-       __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_bvr[rd->reg];
+       __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_bvr[rd->CRm];
 
        if (copy_from_user(r, uaddr, KVM_REG_SIZE(reg->id)) != 0)
                return -EFAULT;
@@ -424,7 +424,7 @@ static int set_bvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
 static int get_bvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
        const struct kvm_one_reg *reg, void __user *uaddr)
 {
-       __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_bvr[rd->reg];
+       __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_bvr[rd->CRm];
 
        if (copy_to_user(uaddr, r, KVM_REG_SIZE(reg->id)) != 0)
                return -EFAULT;
@@ -434,21 +434,21 @@ static int get_bvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
 static void reset_bvr(struct kvm_vcpu *vcpu,
                      const struct sys_reg_desc *rd)
 {
-       vcpu->arch.vcpu_debug_state.dbg_bvr[rd->reg] = rd->val;
+       vcpu->arch.vcpu_debug_state.dbg_bvr[rd->CRm] = rd->val;
 }
 
 static bool trap_bcr(struct kvm_vcpu *vcpu,
                     struct sys_reg_params *p,
                     const struct sys_reg_desc *rd)
 {
-       u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_bcr[rd->reg];
+       u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_bcr[rd->CRm];
 
        if (p->is_write)
                reg_to_dbg(vcpu, p, rd, dbg_reg);
        else
                dbg_to_reg(vcpu, p, rd, dbg_reg);
 
-       trace_trap_reg(__func__, rd->reg, p->is_write, *dbg_reg);
+       trace_trap_reg(__func__, rd->CRm, p->is_write, *dbg_reg);
 
        return true;
 }
@@ -456,7 +456,7 @@ static bool trap_bcr(struct kvm_vcpu *vcpu,
 static int set_bcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
                const struct kvm_one_reg *reg, void __user *uaddr)
 {
-       __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_bcr[rd->reg];
+       __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_bcr[rd->CRm];
 
        if (copy_from_user(r, uaddr, KVM_REG_SIZE(reg->id)) != 0)
                return -EFAULT;
@@ -467,7 +467,7 @@ static int set_bcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
 static int get_bcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
        const struct kvm_one_reg *reg, void __user *uaddr)
 {
-       __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_bcr[rd->reg];
+       __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_bcr[rd->CRm];
 
        if (copy_to_user(uaddr, r, KVM_REG_SIZE(reg->id)) != 0)
                return -EFAULT;
@@ -477,22 +477,22 @@ static int get_bcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
 static void reset_bcr(struct kvm_vcpu *vcpu,
                      const struct sys_reg_desc *rd)
 {
-       vcpu->arch.vcpu_debug_state.dbg_bcr[rd->reg] = rd->val;
+       vcpu->arch.vcpu_debug_state.dbg_bcr[rd->CRm] = rd->val;
 }
 
 static bool trap_wvr(struct kvm_vcpu *vcpu,
                     struct sys_reg_params *p,
                     const struct sys_reg_desc *rd)
 {
-       u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_wvr[rd->reg];
+       u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_wvr[rd->CRm];
 
        if (p->is_write)
                reg_to_dbg(vcpu, p, rd, dbg_reg);
        else
                dbg_to_reg(vcpu, p, rd, dbg_reg);
 
-       trace_trap_reg(__func__, rd->reg, p->is_write,
-               vcpu->arch.vcpu_debug_state.dbg_wvr[rd->reg]);
+       trace_trap_reg(__func__, rd->CRm, p->is_write,
+               vcpu->arch.vcpu_debug_state.dbg_wvr[rd->CRm]);
 
        return true;
 }
@@ -500,7 +500,7 @@ static bool trap_wvr(struct kvm_vcpu *vcpu,
 static int set_wvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
                const struct kvm_one_reg *reg, void __user *uaddr)
 {
-       __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_wvr[rd->reg];
+       __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_wvr[rd->CRm];
 
        if (copy_from_user(r, uaddr, KVM_REG_SIZE(reg->id)) != 0)
                return -EFAULT;
@@ -510,7 +510,7 @@ static int set_wvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
 static int get_wvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
        const struct kvm_one_reg *reg, void __user *uaddr)
 {
-       __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_wvr[rd->reg];
+       __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_wvr[rd->CRm];
 
        if (copy_to_user(uaddr, r, KVM_REG_SIZE(reg->id)) != 0)
                return -EFAULT;
@@ -520,21 +520,21 @@ static int get_wvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
 static void reset_wvr(struct kvm_vcpu *vcpu,
                      const struct sys_reg_desc *rd)
 {
-       vcpu->arch.vcpu_debug_state.dbg_wvr[rd->reg] = rd->val;
+       vcpu->arch.vcpu_debug_state.dbg_wvr[rd->CRm] = rd->val;
 }
 
 static bool trap_wcr(struct kvm_vcpu *vcpu,
                     struct sys_reg_params *p,
                     const struct sys_reg_desc *rd)
 {
-       u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_wcr[rd->reg];
+       u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_wcr[rd->CRm];
 
        if (p->is_write)
                reg_to_dbg(vcpu, p, rd, dbg_reg);
        else
                dbg_to_reg(vcpu, p, rd, dbg_reg);
 
-       trace_trap_reg(__func__, rd->reg, p->is_write, *dbg_reg);
+       trace_trap_reg(__func__, rd->CRm, p->is_write, *dbg_reg);
 
        return true;
 }
@@ -542,7 +542,7 @@ static bool trap_wcr(struct kvm_vcpu *vcpu,
 static int set_wcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
                const struct kvm_one_reg *reg, void __user *uaddr)
 {
-       __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_wcr[rd->reg];
+       __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_wcr[rd->CRm];
 
        if (copy_from_user(r, uaddr, KVM_REG_SIZE(reg->id)) != 0)
                return -EFAULT;
@@ -552,7 +552,7 @@ static int set_wcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
 static int get_wcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
        const struct kvm_one_reg *reg, void __user *uaddr)
 {
-       __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_wcr[rd->reg];
+       __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_wcr[rd->CRm];
 
        if (copy_to_user(uaddr, r, KVM_REG_SIZE(reg->id)) != 0)
                return -EFAULT;
@@ -562,7 +562,7 @@ static int get_wcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
 static void reset_wcr(struct kvm_vcpu *vcpu,
                      const struct sys_reg_desc *rd)
 {
-       vcpu->arch.vcpu_debug_state.dbg_wcr[rd->reg] = rd->val;
+       vcpu->arch.vcpu_debug_state.dbg_wcr[rd->CRm] = rd->val;
 }
 
 static void reset_amair_el1(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
index ac485163a4a7669f0e6d1b1eca683c65ad8c59ec..6d44c028d1c9e647394d08e67fde7b99efb4768a 100644 (file)
@@ -55,8 +55,10 @@ void __sync_icache_dcache(pte_t pte)
 {
        struct page *page = pte_page(pte);
 
-       if (!test_and_set_bit(PG_dcache_clean, &page->flags))
+       if (!test_bit(PG_dcache_clean, &page->flags)) {
                sync_icache_aliases(page_address(page), page_size(page));
+               set_bit(PG_dcache_clean, &page->flags);
+       }
 }
 EXPORT_SYMBOL_GPL(__sync_icache_dcache);
 
index 16a2b2b1c54d4adda1b416772eded43d0b49946d..e55409caaee340a30011d8ca3e3f684102031fc3 100644 (file)
@@ -43,6 +43,7 @@
 #include <linux/sizes.h>
 #include <asm/tlb.h>
 #include <asm/alternative.h>
+#include <asm/xen/swiotlb-xen.h>
 
 /*
  * We need to be able to catch inadvertent references to memstart_addr
@@ -482,7 +483,7 @@ void __init mem_init(void)
        if (swiotlb_force == SWIOTLB_FORCE ||
            max_pfn > PFN_DOWN(arm64_dma_phys_limit))
                swiotlb_init(1);
-       else
+       else if (!xen_swiotlb_detect())
                swiotlb_force = SWIOTLB_NO_FORCE;
 
        set_max_mapnr(max_pfn - PHYS_PFN_OFFSET);
index 6dd9369e3ea0e12db81a1d667343ad2b172182e6..89b66ef43a0ff9baca0c534875564e696fafbbae 100644 (file)
@@ -515,7 +515,8 @@ static void __init map_mem(pgd_t *pgdp)
         */
        BUILD_BUG_ON(pgd_index(direct_map_end - 1) == pgd_index(direct_map_end));
 
-       if (rodata_full || crash_mem_map || debug_pagealloc_enabled())
+       if (rodata_full || crash_mem_map || debug_pagealloc_enabled() ||
+           IS_ENABLED(CONFIG_KFENCE))
                flags |= NO_BLOCK_MAPPINGS | NO_CONT_MAPPINGS;
 
        /*
index 0a48191534ff6360787b17c428b017a79190ed7e..97d7bcd8d4f26c5001c14bc346c5ce88059f2a0a 100644 (file)
@@ -447,6 +447,18 @@ SYM_FUNC_START(__cpu_setup)
        mov     x10, #(SYS_GCR_EL1_RRND | SYS_GCR_EL1_EXCL_MASK)
        msr_s   SYS_GCR_EL1, x10
 
+       /*
+        * If GCR_EL1.RRND=1 is implemented the same way as RRND=0, then
+        * RGSR_EL1.SEED must be non-zero for IRG to produce
+        * pseudorandom numbers. As RGSR_EL1 is UNKNOWN out of reset, we
+        * must initialize it.
+        */
+       mrs     x10, CNTVCT_EL0
+       ands    x10, x10, #SYS_RGSR_EL1_SEED_MASK
+       csinc   x10, x10, xzr, ne
+       lsl     x10, x10, #SYS_RGSR_EL1_SEED_SHIFT
+       msr_s   SYS_RGSR_EL1, x10
+
        /* clear any pending tag check faults in TFSR*_EL1 */
        msr_s   SYS_TFSR_EL1, xzr
        msr_s   SYS_TFSRE0_EL1, xzr
diff --git a/arch/arm64/tools/Makefile b/arch/arm64/tools/Makefile
new file mode 100644 (file)
index 0000000..932b4fe
--- /dev/null
@@ -0,0 +1,22 @@
+# SPDX-License-Identifier: GPL-2.0
+
+gen := arch/$(ARCH)/include/generated
+kapi := $(gen)/asm
+
+kapi-hdrs-y := $(kapi)/cpucaps.h
+
+targets += $(addprefix ../../../,$(gen-y) $(kapi-hdrs-y))
+
+PHONY += kapi
+
+kapi:   $(kapi-hdrs-y) $(gen-y)
+
+# Create output directory if not already present
+_dummy := $(shell [ -d '$(kapi)' ] || mkdir -p '$(kapi)')
+
+quiet_cmd_gen_cpucaps = GEN     $@
+      cmd_gen_cpucaps = mkdir -p $(dir $@) && \
+                     $(AWK) -f $(filter-out $(PHONY),$^) > $@
+
+$(kapi)/cpucaps.h: $(src)/gen-cpucaps.awk $(src)/cpucaps FORCE
+       $(call if_changed,gen_cpucaps)
diff --git a/arch/arm64/tools/cpucaps b/arch/arm64/tools/cpucaps
new file mode 100644 (file)
index 0000000..21fbdda
--- /dev/null
@@ -0,0 +1,65 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Internal CPU capabilities constants, keep this list sorted
+
+BTI
+HAS_32BIT_EL0
+HAS_32BIT_EL1
+HAS_ADDRESS_AUTH
+HAS_ADDRESS_AUTH_ARCH
+HAS_ADDRESS_AUTH_IMP_DEF
+HAS_AMU_EXTN
+HAS_ARMv8_4_TTL
+HAS_CACHE_DIC
+HAS_CACHE_IDC
+HAS_CNP
+HAS_CRC32
+HAS_DCPODP
+HAS_DCPOP
+HAS_E0PD
+HAS_EPAN
+HAS_GENERIC_AUTH
+HAS_GENERIC_AUTH_ARCH
+HAS_GENERIC_AUTH_IMP_DEF
+HAS_IRQ_PRIO_MASKING
+HAS_LDAPR
+HAS_LSE_ATOMICS
+HAS_NO_FPSIMD
+HAS_NO_HW_PREFETCH
+HAS_PAN
+HAS_RAS_EXTN
+HAS_RNG
+HAS_SB
+HAS_STAGE2_FWB
+HAS_SYSREG_GIC_CPUIF
+HAS_TLB_RANGE
+HAS_VIRT_HOST_EXTN
+HW_DBM
+KVM_PROTECTED_MODE
+MISMATCHED_CACHE_TYPE
+MTE
+SPECTRE_V2
+SPECTRE_V3A
+SPECTRE_V4
+SSBS
+SVE
+UNMAP_KERNEL_AT_EL0
+WORKAROUND_834220
+WORKAROUND_843419
+WORKAROUND_845719
+WORKAROUND_858921
+WORKAROUND_1418040
+WORKAROUND_1463225
+WORKAROUND_1508412
+WORKAROUND_1542419
+WORKAROUND_CAVIUM_23154
+WORKAROUND_CAVIUM_27456
+WORKAROUND_CAVIUM_30115
+WORKAROUND_CAVIUM_TX2_219_PRFM
+WORKAROUND_CAVIUM_TX2_219_TVM
+WORKAROUND_CLEAN_CACHE
+WORKAROUND_DEVICE_LOAD_ACQUIRE
+WORKAROUND_NVIDIA_CARMEL_CNP
+WORKAROUND_QCOM_FALKOR_E1003
+WORKAROUND_REPEAT_TLBI
+WORKAROUND_SPECULATIVE_AT
diff --git a/arch/arm64/tools/gen-cpucaps.awk b/arch/arm64/tools/gen-cpucaps.awk
new file mode 100755 (executable)
index 0000000..00c9e72
--- /dev/null
@@ -0,0 +1,40 @@
+#!/bin/awk -f
+# SPDX-License-Identifier: GPL-2.0
+# gen-cpucaps.awk: arm64 cpucaps header generator
+#
+# Usage: awk -f gen-cpucaps.awk cpucaps.txt
+
+# Log an error and terminate
+function fatal(msg) {
+       print "Error at line " NR ": " msg > "/dev/stderr"
+       exit 1
+}
+
+# skip blank lines and comment lines
+/^$/ { next }
+/^#/ { next }
+
+BEGIN {
+       print "#ifndef __ASM_CPUCAPS_H"
+       print "#define __ASM_CPUCAPS_H"
+       print ""
+       print "/* Generated file - do not edit */"
+       cap_num = 0
+       print ""
+}
+
+/^[vA-Z0-9_]+$/ {
+       printf("#define ARM64_%-30s\t%d\n", $0, cap_num++)
+       next
+}
+
+END {
+       printf("#define ARM64_NCAPS\t\t\t\t%d\n", cap_num)
+       print ""
+       print "#endif /* __ASM_CPUCAPS_H */"
+}
+
+# Any lines not handled by previous rules are unexpected
+{
+       fatal("unhandled statement")
+}
index 1ee8e736a48e37d73c5137e17154741777292a61..bb11fe4c875af9c4b03362bc8e15310bab2da3d5 100644 (file)
 440    common  process_madvise                 sys_process_madvise
 441    common  epoll_pwait2                    sys_epoll_pwait2
 442    common  mount_setattr                   sys_mount_setattr
-443    common  quotactl_path                   sys_quotactl_path
+# 443 reserved for quotactl_path
 444    common  landlock_create_ruleset         sys_landlock_create_ruleset
 445    common  landlock_add_rule               sys_landlock_add_rule
 446    common  landlock_restrict_self          sys_landlock_restrict_self
index a4b7ee1df211936ca569b673b9efe693f2ebd0af..8f215e79e70e61c1a69b949aac19a6ff8b5a43d9 100644 (file)
@@ -623,7 +623,8 @@ static inline void siginfo_build_tests(void)
        BUILD_BUG_ON(offsetof(siginfo_t, si_pkey) != 0x12);
 
        /* _sigfault._perf */
-       BUILD_BUG_ON(offsetof(siginfo_t, si_perf) != 0x10);
+       BUILD_BUG_ON(offsetof(siginfo_t, si_perf_data) != 0x10);
+       BUILD_BUG_ON(offsetof(siginfo_t, si_perf_type) != 0x14);
 
        /* _sigpoll */
        BUILD_BUG_ON(offsetof(siginfo_t, si_band)   != 0x0c);
index 0dd019dc21362d18360269261f4f69977aa1cf3b..79c2d24c89ddade58fc6161c1d917edc0df11768 100644 (file)
 440    common  process_madvise                 sys_process_madvise
 441    common  epoll_pwait2                    sys_epoll_pwait2
 442    common  mount_setattr                   sys_mount_setattr
-443    common  quotactl_path                   sys_quotactl_path
+# 443 reserved for quotactl_path
 444    common  landlock_create_ruleset         sys_landlock_create_ruleset
 445    common  landlock_add_rule               sys_landlock_add_rule
 446    common  landlock_restrict_self          sys_landlock_restrict_self
index 2ac716984ca2869112ad4847c56eda8803dc83f7..b11395a20c203769a55bc32b97e84d5c389206d1 100644 (file)
 440    common  process_madvise                 sys_process_madvise
 441    common  epoll_pwait2                    sys_epoll_pwait2
 442    common  mount_setattr                   sys_mount_setattr
-443    common  quotactl_path                   sys_quotactl_path
+# 443 reserved for quotactl_path
 444    common  landlock_create_ruleset         sys_landlock_create_ruleset
 445    common  landlock_add_rule               sys_landlock_add_rule
 446    common  landlock_restrict_self          sys_landlock_restrict_self
index b184baa4e56a65b3da2edd8b225a2c2d62095459..f175bce2987faebdf91bf9789f4217976580a712 100644 (file)
@@ -18,6 +18,7 @@
 #include <asm/reboot.h>
 #include <asm/setup.h>
 #include <asm/mach-au1x00/au1000.h>
+#include <asm/mach-au1x00/gpio-au1000.h>
 #include <prom.h>
 
 const char *get_system_type(void)
index f93aa5ee2e2e36b864b7722f1be0bad022276382..3481ed4c117bd000a9e8b44afc5a25ba87c330e8 100644 (file)
@@ -3,6 +3,9 @@
  *
  */
 
+#ifndef _ASM_MIPS_BOARDS_LAUNCH_H
+#define _ASM_MIPS_BOARDS_LAUNCH_H
+
 #ifndef _ASSEMBLER_
 
 struct cpulaunch {
@@ -34,3 +37,5 @@ struct cpulaunch {
 
 /* Polling period in count cycles for secondary CPU's */
 #define LAUNCHPERIOD   10000
+
+#endif /* _ASM_MIPS_BOARDS_LAUNCH_H */
index 5e009665725104d960b1c0a1d066a4470f324abc..9220909526f9b2246951e69f447abc6f2c4df04d 100644 (file)
 440    n32     process_madvise                 sys_process_madvise
 441    n32     epoll_pwait2                    compat_sys_epoll_pwait2
 442    n32     mount_setattr                   sys_mount_setattr
-443    n32     quotactl_path                   sys_quotactl_path
+# 443 reserved for quotactl_path
 444    n32     landlock_create_ruleset         sys_landlock_create_ruleset
 445    n32     landlock_add_rule               sys_landlock_add_rule
 446    n32     landlock_restrict_self          sys_landlock_restrict_self
index 9974f5f8e49bce05819434ff65479b98caf85b91..9cd1c34f31b5040434060a7b242be5946bd5c988 100644 (file)
 440    n64     process_madvise                 sys_process_madvise
 441    n64     epoll_pwait2                    sys_epoll_pwait2
 442    n64     mount_setattr                   sys_mount_setattr
-443    n64     quotactl_path                   sys_quotactl_path
+# 443 reserved for quotactl_path
 444    n64     landlock_create_ruleset         sys_landlock_create_ruleset
 445    n64     landlock_add_rule               sys_landlock_add_rule
 446    n64     landlock_restrict_self          sys_landlock_restrict_self
index 39d6e71e57b608866fda81de210974076d0ca8cb..d560c467a8c697b8e01bc4d66910d2a15bb27d37 100644 (file)
 440    o32     process_madvise                 sys_process_madvise
 441    o32     epoll_pwait2                    sys_epoll_pwait2                compat_sys_epoll_pwait2
 442    o32     mount_setattr                   sys_mount_setattr
-443    o32     quotactl_path                   sys_quotactl_path
+# 443 reserved for quotactl_path
 444    o32     landlock_create_ruleset         sys_landlock_create_ruleset
 445    o32     landlock_add_rule               sys_landlock_add_rule
 446    o32     landlock_restrict_self          sys_landlock_restrict_self
index de03838b343b8bc33d869156ecefaa6326f3ed48..a9b72eacfc0b36e7fec5652dc3c2445fb449b5b4 100644 (file)
@@ -37,7 +37,7 @@
  */
 notrace void arch_local_irq_disable(void)
 {
-       preempt_disable();
+       preempt_disable_notrace();
 
        __asm__ __volatile__(
        "       .set    push                                            \n"
@@ -53,7 +53,7 @@ notrace void arch_local_irq_disable(void)
        : /* no inputs */
        : "memory");
 
-       preempt_enable();
+       preempt_enable_notrace();
 }
 EXPORT_SYMBOL(arch_local_irq_disable);
 
@@ -61,7 +61,7 @@ notrace unsigned long arch_local_irq_save(void)
 {
        unsigned long flags;
 
-       preempt_disable();
+       preempt_disable_notrace();
 
        __asm__ __volatile__(
        "       .set    push                                            \n"
@@ -78,7 +78,7 @@ notrace unsigned long arch_local_irq_save(void)
        : /* no inputs */
        : "memory");
 
-       preempt_enable();
+       preempt_enable_notrace();
 
        return flags;
 }
@@ -88,7 +88,7 @@ notrace void arch_local_irq_restore(unsigned long flags)
 {
        unsigned long __tmp1;
 
-       preempt_disable();
+       preempt_disable_notrace();
 
        __asm__ __volatile__(
        "       .set    push                                            \n"
@@ -106,7 +106,7 @@ notrace void arch_local_irq_restore(unsigned long flags)
        : "0" (flags)
        : "memory");
 
-       preempt_enable();
+       preempt_enable_notrace();
 }
 EXPORT_SYMBOL(arch_local_irq_restore);
 
index a7bf0c80371cd02063add7e8300738649a9ee28c..830ab91e574f4851c56e0242d528054c94f86517 100644 (file)
@@ -158,31 +158,29 @@ unsigned long _page_cachable_default;
 EXPORT_SYMBOL(_page_cachable_default);
 
 #define PM(p)  __pgprot(_page_cachable_default | (p))
-#define PVA(p) PM(_PAGE_VALID | _PAGE_ACCESSED | (p))
 
 static inline void setup_protection_map(void)
 {
        protection_map[0]  = PM(_PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_NO_READ);
-       protection_map[1]  = PVA(_PAGE_PRESENT | _PAGE_NO_EXEC);
-       protection_map[2]  = PVA(_PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_NO_READ);
-       protection_map[3]  = PVA(_PAGE_PRESENT | _PAGE_NO_EXEC);
-       protection_map[4]  = PVA(_PAGE_PRESENT);
-       protection_map[5]  = PVA(_PAGE_PRESENT);
-       protection_map[6]  = PVA(_PAGE_PRESENT);
-       protection_map[7]  = PVA(_PAGE_PRESENT);
+       protection_map[1]  = PM(_PAGE_PRESENT | _PAGE_NO_EXEC);
+       protection_map[2]  = PM(_PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_NO_READ);
+       protection_map[3]  = PM(_PAGE_PRESENT | _PAGE_NO_EXEC);
+       protection_map[4]  = PM(_PAGE_PRESENT);
+       protection_map[5]  = PM(_PAGE_PRESENT);
+       protection_map[6]  = PM(_PAGE_PRESENT);
+       protection_map[7]  = PM(_PAGE_PRESENT);
 
        protection_map[8]  = PM(_PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_NO_READ);
-       protection_map[9]  = PVA(_PAGE_PRESENT | _PAGE_NO_EXEC);
-       protection_map[10] = PVA(_PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_WRITE |
+       protection_map[9]  = PM(_PAGE_PRESENT | _PAGE_NO_EXEC);
+       protection_map[10] = PM(_PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_WRITE |
                                _PAGE_NO_READ);
-       protection_map[11] = PVA(_PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_WRITE);
-       protection_map[12] = PVA(_PAGE_PRESENT);
-       protection_map[13] = PVA(_PAGE_PRESENT);
-       protection_map[14] = PVA(_PAGE_PRESENT);
-       protection_map[15] = PVA(_PAGE_PRESENT);
+       protection_map[11] = PM(_PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_WRITE);
+       protection_map[12] = PM(_PAGE_PRESENT);
+       protection_map[13] = PM(_PAGE_PRESENT);
+       protection_map[14] = PM(_PAGE_PRESENT | _PAGE_WRITE);
+       protection_map[15] = PM(_PAGE_PRESENT | _PAGE_WRITE);
 }
 
-#undef _PVA
 #undef PM
 
 void cpu_cache_init(void)
index 0c5de07da097ad5e9733aaffa100debb6699d0d3..0135376c5de5d0046579947bec45cc4a9af9d3c2 100644 (file)
@@ -8,6 +8,7 @@
 
 #include <linux/io.h>
 #include <linux/clk.h>
+#include <linux/export.h>
 #include <linux/init.h>
 #include <linux/sizes.h>
 #include <linux/of_fdt.h>
@@ -25,6 +26,7 @@
 
 __iomem void *rt_sysc_membase;
 __iomem void *rt_memc_membase;
+EXPORT_SYMBOL_GPL(rt_sysc_membase);
 
 __iomem void *plat_of_remap_node(const char *node)
 {
diff --git a/arch/openrisc/include/asm/barrier.h b/arch/openrisc/include/asm/barrier.h
new file mode 100644 (file)
index 0000000..7538294
--- /dev/null
@@ -0,0 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __ASM_BARRIER_H
+#define __ASM_BARRIER_H
+
+#define mb() asm volatile ("l.msync" ::: "memory")
+
+#include <asm-generic/barrier.h>
+
+#endif /* __ASM_BARRIER_H */
index 2416a9f9153306c37a2800f9396798e02510e6fa..c6f9e7b9f7cb23f165e127b153a45595ca582baf 100644 (file)
@@ -278,6 +278,8 @@ void calibrate_delay(void)
        pr_cont("%lu.%02lu BogoMIPS (lpj=%lu)\n",
                loops_per_jiffy / (500000 / HZ),
                (loops_per_jiffy / (5000 / HZ)) % 100, loops_per_jiffy);
+
+       of_node_put(cpu);
 }
 
 void __init setup_arch(char **cmdline_p)
index d5641198b90ce7b10c8fa1c7bc8d30e90a6b930c..cfef61a7b6c2a83c150eae8e775011c3305a5514 100644 (file)
@@ -75,7 +75,6 @@ static void __init map_ram(void)
        /* These mark extents of read-only kernel pages...
         * ...from vmlinux.lds.S
         */
-       struct memblock_region *region;
 
        v = PAGE_OFFSET;
 
@@ -121,7 +120,7 @@ static void __init map_ram(void)
                }
 
                printk(KERN_INFO "%s: Memory: 0x%x-0x%x\n", __func__,
-                      region->base, region->base + region->size);
+                      start, end);
        }
 }
 
@@ -129,7 +128,6 @@ void __init paging_init(void)
 {
        extern void tlb_init(void);
 
-       unsigned long end;
        int i;
 
        printk(KERN_INFO "Setting up paging and PTEs.\n");
@@ -145,8 +143,6 @@ void __init paging_init(void)
         */
        current_pgd[smp_processor_id()] = init_mm.pgd;
 
-       end = (unsigned long)__va(max_low_pfn * PAGE_SIZE);
-
        map_ram();
 
        zone_sizes_init();
index 5ac80b83d745e19020d37af6175b6da495a9a1ca..aabc37f8cae3a4248433cbbbb2af139badf75aa2 100644 (file)
 440    common  process_madvise                 sys_process_madvise
 441    common  epoll_pwait2                    sys_epoll_pwait2                compat_sys_epoll_pwait2
 442    common  mount_setattr                   sys_mount_setattr
-443    common  quotactl_path                   sys_quotactl_path
+# 443 reserved for quotactl_path
 444    common  landlock_create_ruleset         sys_landlock_create_ruleset
 445    common  landlock_add_rule               sys_landlock_add_rule
 446    common  landlock_restrict_self          sys_landlock_restrict_self
index c2717f31925a20cce778340c6c10d8bd216ea633..ccda0a91abf003425bcff4f10e2092096856ac87 100644 (file)
        };
 
 /include/ "pq3-i2c-0.dtsi"
+       i2c@3000 {
+               fsl,i2c-erratum-a004447;
+       };
+
 /include/ "pq3-i2c-1.dtsi"
+       i2c@3100 {
+               fsl,i2c-erratum-a004447;
+       };
+
 /include/ "pq3-duart-0.dtsi"
 /include/ "pq3-espi-0.dtsi"
        spi0: spi@7000 {
index 872e4485dc3f033876ee7d69f59fa0add91a1783..ddc018d42252f9c49eb994705d0dde3774bbaabb 100644 (file)
        };
 
 /include/ "qoriq-i2c-0.dtsi"
+       i2c@118000 {
+               fsl,i2c-erratum-a004447;
+       };
+
+       i2c@118100 {
+               fsl,i2c-erratum-a004447;
+       };
+
 /include/ "qoriq-i2c-1.dtsi"
+       i2c@119000 {
+               fsl,i2c-erratum-a004447;
+       };
+
+       i2c@119100 {
+               fsl,i2c-erratum-a004447;
+       };
+
 /include/ "qoriq-duart-0.dtsi"
 /include/ "qoriq-duart-1.dtsi"
 /include/ "qoriq-gpio-0.dtsi"
index 4430509060185f4757a4c603f33b3b1db8b0d3e9..e3b29eda8074c448b62f8e75c058911b0eb55c85 100644 (file)
  */
 long plpar_hcall_norets(unsigned long opcode, ...);
 
+/* Variant which does not do hcall tracing */
+long plpar_hcall_norets_notrace(unsigned long opcode, ...);
+
 /**
  * plpar_hcall: - Make a pseries hypervisor call
  * @opcode: The hypervisor call to make.
index 44cde2e129b8833a6fe7dcd2272f3fc50f570756..59f704408d65d37ab67cd94f2cd5c3ffcc4e1199 100644 (file)
@@ -153,8 +153,6 @@ static inline void interrupt_enter_prepare(struct pt_regs *regs, struct interrup
  */
 static inline void interrupt_exit_prepare(struct pt_regs *regs, struct interrupt_state *state)
 {
-       if (user_mode(regs))
-               kuep_unlock();
 }
 
 static inline void interrupt_async_enter_prepare(struct pt_regs *regs, struct interrupt_state *state)
@@ -222,6 +220,13 @@ static inline void interrupt_nmi_enter_prepare(struct pt_regs *regs, struct inte
        local_paca->irq_soft_mask = IRQS_ALL_DISABLED;
        local_paca->irq_happened |= PACA_IRQ_HARD_DIS;
 
+       if (IS_ENABLED(CONFIG_PPC_BOOK3S_64) && !(regs->msr & MSR_PR) &&
+                               regs->nip < (unsigned long)__end_interrupts) {
+               // Kernel code running below __end_interrupts is
+               // implicitly soft-masked.
+               regs->softe = IRQS_ALL_DISABLED;
+       }
+
        /* Don't do any per-CPU operations until interrupt state is fixed */
 
        if (nmi_disables_ftrace(regs)) {
index 1e83359f286b9f7fb03e2d681e091e5045e1cd20..7f2e90db2050b0a7ecab7615c9a4fd0b910ca548 100644 (file)
@@ -51,6 +51,7 @@
 /* PPC-specific vcpu->requests bit members */
 #define KVM_REQ_WATCHDOG       KVM_ARCH_REQ(0)
 #define KVM_REQ_EPR_EXIT       KVM_ARCH_REQ(1)
+#define KVM_REQ_PENDING_TIMER  KVM_ARCH_REQ(2)
 
 #include <linux/mmu_notifier.h>
 
index 5d1726bb28e79ee95988a0ecd921cd39eecefc9d..bcb7b5f917be6338a9c9eb7b094a48149c5d6215 100644 (file)
@@ -28,19 +28,35 @@ static inline u32 yield_count_of(int cpu)
        return be32_to_cpu(yield_count);
 }
 
+/*
+ * Spinlock code confers and prods, so don't trace the hcalls because the
+ * tracing code takes spinlocks which can cause recursion deadlocks.
+ *
+ * These calls are made while the lock is not held: the lock slowpath yields if
+ * it can not acquire the lock, and unlock slow path might prod if a waiter has
+ * yielded). So this may not be a problem for simple spin locks because the
+ * tracing does not technically recurse on the lock, but we avoid it anyway.
+ *
+ * However the queued spin lock contended path is more strictly ordered: the
+ * H_CONFER hcall is made after the task has queued itself on the lock, so then
+ * recursing on that lock will cause the task to then queue up again behind the
+ * first instance (or worse: queued spinlocks use tricks that assume a context
+ * never waits on more than one spinlock, so such recursion may cause random
+ * corruption in the lock code).
+ */
 static inline void yield_to_preempted(int cpu, u32 yield_count)
 {
-       plpar_hcall_norets(H_CONFER, get_hard_smp_processor_id(cpu), yield_count);
+       plpar_hcall_norets_notrace(H_CONFER, get_hard_smp_processor_id(cpu), yield_count);
 }
 
 static inline void prod_cpu(int cpu)
 {
-       plpar_hcall_norets(H_PROD, get_hard_smp_processor_id(cpu));
+       plpar_hcall_norets_notrace(H_PROD, get_hard_smp_processor_id(cpu));
 }
 
 static inline void yield_to_any(void)
 {
-       plpar_hcall_norets(H_CONFER, -1, 0);
+       plpar_hcall_norets_notrace(H_CONFER, -1, 0);
 }
 #else
 static inline bool is_shared_processor(void)
index ece84a430701fcc20ac17e709bcff0f02732036d..83e0f701ebc67d09749396173a637b225642bf60 100644 (file)
@@ -28,7 +28,11 @@ static inline void set_cede_latency_hint(u8 latency_hint)
 
 static inline long cede_processor(void)
 {
-       return plpar_hcall_norets(H_CEDE);
+       /*
+        * We cannot call tracepoints inside RCU idle regions which
+        * means we must not trace H_CEDE.
+        */
+       return plpar_hcall_norets_notrace(H_CEDE);
 }
 
 static inline long extended_cede_processor(unsigned long latency_hint)
index 33fa5dd8ee6a7d7e90f5ae0c087037c4098ce904..714a35f0d425b960ae5e15a667fcf615a4ef3525 100644 (file)
@@ -31,6 +31,35 @@ static inline pte_t *find_init_mm_pte(unsigned long ea, unsigned *hshift)
        pgd_t *pgdir = init_mm.pgd;
        return __find_linux_pte(pgdir, ea, NULL, hshift);
 }
+
+/*
+ * Convert a kernel vmap virtual address (vmalloc or ioremap space) to a
+ * physical address, without taking locks. This can be used in real-mode.
+ */
+static inline phys_addr_t ppc_find_vmap_phys(unsigned long addr)
+{
+       pte_t *ptep;
+       phys_addr_t pa;
+       int hugepage_shift;
+
+       /*
+        * init_mm does not free page tables, and does not do THP. It may
+        * have huge pages from huge vmalloc / ioremap etc.
+        */
+       ptep = find_init_mm_pte(addr, &hugepage_shift);
+       if (WARN_ON(!ptep))
+               return 0;
+
+       pa = PFN_PHYS(pte_pfn(*ptep));
+
+       if (!hugepage_shift)
+               hugepage_shift = PAGE_SHIFT;
+
+       pa |= addr & ((1ul << hugepage_shift) - 1);
+
+       return pa;
+}
+
 /*
  * This is what we should always use. Any other lockless page table lookup needs
  * careful audit against THP split.
index 9c9ab27461683bbfa904fbabc06d775a56078df0..b476a685f066ec77f080f923a1b25fdfcb8ca658 100644 (file)
@@ -19,6 +19,7 @@
 #ifndef _ASM_POWERPC_PTRACE_H
 #define _ASM_POWERPC_PTRACE_H
 
+#include <linux/err.h>
 #include <uapi/asm/ptrace.h>
 #include <asm/asm-const.h>
 
@@ -152,25 +153,6 @@ extern unsigned long profile_pc(struct pt_regs *regs);
 long do_syscall_trace_enter(struct pt_regs *regs);
 void do_syscall_trace_leave(struct pt_regs *regs);
 
-#define kernel_stack_pointer(regs) ((regs)->gpr[1])
-static inline int is_syscall_success(struct pt_regs *regs)
-{
-       return !(regs->ccr & 0x10000000);
-}
-
-static inline long regs_return_value(struct pt_regs *regs)
-{
-       if (is_syscall_success(regs))
-               return regs->gpr[3];
-       else
-               return -regs->gpr[3];
-}
-
-static inline void regs_set_return_value(struct pt_regs *regs, unsigned long rc)
-{
-       regs->gpr[3] = rc;
-}
-
 #ifdef __powerpc64__
 #define user_mode(regs) ((((regs)->msr) >> MSR_PR_LG) & 0x1)
 #else
@@ -235,6 +217,31 @@ static __always_inline void set_trap_norestart(struct pt_regs *regs)
        regs->trap |= 0x1;
 }
 
+#define kernel_stack_pointer(regs) ((regs)->gpr[1])
+static inline int is_syscall_success(struct pt_regs *regs)
+{
+       if (trap_is_scv(regs))
+               return !IS_ERR_VALUE((unsigned long)regs->gpr[3]);
+       else
+               return !(regs->ccr & 0x10000000);
+}
+
+static inline long regs_return_value(struct pt_regs *regs)
+{
+       if (trap_is_scv(regs))
+               return regs->gpr[3];
+
+       if (is_syscall_success(regs))
+               return regs->gpr[3];
+       else
+               return -regs->gpr[3];
+}
+
+static inline void regs_set_return_value(struct pt_regs *regs, unsigned long rc)
+{
+       regs->gpr[3] = rc;
+}
+
 #define arch_has_single_step() (1)
 #define arch_has_block_step()  (true)
 #define ARCH_HAS_USER_SINGLE_STEP_REPORT
index fd1b518eed17c2865010f7ed6ebb7a980261c271..ba0f88f3a30daea20e9c5e9efb322a627678a00c 100644 (file)
@@ -41,11 +41,17 @@ static inline void syscall_rollback(struct task_struct *task,
 static inline long syscall_get_error(struct task_struct *task,
                                     struct pt_regs *regs)
 {
-       /*
-        * If the system call failed,
-        * regs->gpr[3] contains a positive ERRORCODE.
-        */
-       return (regs->ccr & 0x10000000UL) ? -regs->gpr[3] : 0;
+       if (trap_is_scv(regs)) {
+               unsigned long error = regs->gpr[3];
+
+               return IS_ERR_VALUE(error) ? error : 0;
+       } else {
+               /*
+                * If the system call failed,
+                * regs->gpr[3] contains a positive ERRORCODE.
+                */
+               return (regs->ccr & 0x10000000UL) ? -regs->gpr[3] : 0;
+       }
 }
 
 static inline long syscall_get_return_value(struct task_struct *task,
@@ -58,18 +64,22 @@ static inline void syscall_set_return_value(struct task_struct *task,
                                            struct pt_regs *regs,
                                            int error, long val)
 {
-       /*
-        * In the general case it's not obvious that we must deal with CCR
-        * here, as the syscall exit path will also do that for us. However
-        * there are some places, eg. the signal code, which check ccr to
-        * decide if the value in r3 is actually an error.
-        */
-       if (error) {
-               regs->ccr |= 0x10000000L;
-               regs->gpr[3] = error;
+       if (trap_is_scv(regs)) {
+               regs->gpr[3] = (long) error ?: val;
        } else {
-               regs->ccr &= ~0x10000000L;
-               regs->gpr[3] = val;
+               /*
+                * In the general case it's not obvious that we must deal with
+                * CCR here, as the syscall exit path will also do that for us.
+                * However there are some places, eg. the signal code, which
+                * check ccr to decide if the value in r3 is actually an error.
+                */
+               if (error) {
+                       regs->ccr |= 0x10000000L;
+                       regs->gpr[3] = error;
+               } else {
+                       regs->ccr &= ~0x10000000L;
+                       regs->gpr[3] = val;
+               }
        }
 }
 
index a09e4240c5b1670ee184d85c93d82d738251ebe5..22c79ab400060fb4315aba198be7925a6ac987f1 100644 (file)
@@ -157,7 +157,7 @@ do {                                                                \
                "2:     lwz%X1 %L0, %L1\n"                      \
                EX_TABLE(1b, %l2)                               \
                EX_TABLE(2b, %l2)                               \
-               : "=r" (x)                                      \
+               : "=&r" (x)                                     \
                : "m" (*addr)                                   \
                :                                               \
                : label)
index f24cd53ff26e2e5ff4806e7fb70e45cc161c5c3b..3bbdcc86d01ba85663bd7c62d67ca788f5db6b80 100644 (file)
@@ -346,28 +346,7 @@ void eeh_slot_error_detail(struct eeh_pe *pe, int severity)
  */
 static inline unsigned long eeh_token_to_phys(unsigned long token)
 {
-       pte_t *ptep;
-       unsigned long pa;
-       int hugepage_shift;
-
-       /*
-        * We won't find hugepages here(this is iomem). Hence we are not
-        * worried about _PAGE_SPLITTING/collapse. Also we will not hit
-        * page table free, because of init_mm.
-        */
-       ptep = find_init_mm_pte(token, &hugepage_shift);
-       if (!ptep)
-               return token;
-
-       pa = pte_pfn(*ptep);
-
-       /* On radix we can do hugepage mappings for io, so handle that */
-       if (!hugepage_shift)
-               hugepage_shift = PAGE_SHIFT;
-
-       pa <<= PAGE_SHIFT;
-       pa |= token & ((1ul << hugepage_shift) - 1);
-       return pa;
+       return ppc_find_vmap_phys(token);
 }
 
 /*
index 7c3654b0d0f47b3f8a42436e59f3d3de0ca0232f..f1ae710274bc9142d5fd7fe6b4bd4f065044f734 100644 (file)
@@ -340,6 +340,12 @@ ret_from_mc_except:
        andi.   r10,r10,IRQS_DISABLED;  /* yes -> go out of line */ \
        bne     masked_interrupt_book3e_##n
 
+/*
+ * Additional regs must be re-loaded from paca before EXCEPTION_COMMON* is
+ * called, because that does SAVE_NVGPRS which must see the original register
+ * values, otherwise the scratch values might be restored when exiting the
+ * interrupt.
+ */
 #define PROLOG_ADDITION_2REGS_GEN(n)                                       \
        std     r14,PACA_EXGEN+EX_R14(r13);                                 \
        std     r15,PACA_EXGEN+EX_R15(r13)
@@ -535,6 +541,10 @@ __end_interrupts:
                                PROLOG_ADDITION_2REGS)
        mfspr   r14,SPRN_DEAR
        mfspr   r15,SPRN_ESR
+       std     r14,_DAR(r1)
+       std     r15,_DSISR(r1)
+       ld      r14,PACA_EXGEN+EX_R14(r13)
+       ld      r15,PACA_EXGEN+EX_R15(r13)
        EXCEPTION_COMMON(0x300)
        b       storage_fault_common
 
@@ -544,6 +554,10 @@ __end_interrupts:
                                PROLOG_ADDITION_2REGS)
        li      r15,0
        mr      r14,r10
+       std     r14,_DAR(r1)
+       std     r15,_DSISR(r1)
+       ld      r14,PACA_EXGEN+EX_R14(r13)
+       ld      r15,PACA_EXGEN+EX_R15(r13)
        EXCEPTION_COMMON(0x400)
        b       storage_fault_common
 
@@ -557,6 +571,10 @@ __end_interrupts:
                                PROLOG_ADDITION_2REGS)
        mfspr   r14,SPRN_DEAR
        mfspr   r15,SPRN_ESR
+       std     r14,_DAR(r1)
+       std     r15,_DSISR(r1)
+       ld      r14,PACA_EXGEN+EX_R14(r13)
+       ld      r15,PACA_EXGEN+EX_R15(r13)
        EXCEPTION_COMMON(0x600)
        b       alignment_more  /* no room, go out of line */
 
@@ -565,10 +583,10 @@ __end_interrupts:
        NORMAL_EXCEPTION_PROLOG(0x700, BOOKE_INTERRUPT_PROGRAM,
                                PROLOG_ADDITION_1REG)
        mfspr   r14,SPRN_ESR
-       EXCEPTION_COMMON(0x700)
        std     r14,_DSISR(r1)
-       addi    r3,r1,STACK_FRAME_OVERHEAD
        ld      r14,PACA_EXGEN+EX_R14(r13)
+       EXCEPTION_COMMON(0x700)
+       addi    r3,r1,STACK_FRAME_OVERHEAD
        bl      program_check_exception
        REST_NVGPRS(r1)
        b       interrupt_return
@@ -725,11 +743,11 @@ END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
         * normal exception
         */
        mfspr   r14,SPRN_DBSR
-       EXCEPTION_COMMON_CRIT(0xd00)
        std     r14,_DSISR(r1)
-       addi    r3,r1,STACK_FRAME_OVERHEAD
        ld      r14,PACA_EXCRIT+EX_R14(r13)
        ld      r15,PACA_EXCRIT+EX_R15(r13)
+       EXCEPTION_COMMON_CRIT(0xd00)
+       addi    r3,r1,STACK_FRAME_OVERHEAD
        bl      DebugException
        REST_NVGPRS(r1)
        b       interrupt_return
@@ -796,11 +814,11 @@ kernel_dbg_exc:
         * normal exception
         */
        mfspr   r14,SPRN_DBSR
-       EXCEPTION_COMMON_DBG(0xd08)
        std     r14,_DSISR(r1)
-       addi    r3,r1,STACK_FRAME_OVERHEAD
        ld      r14,PACA_EXDBG+EX_R14(r13)
        ld      r15,PACA_EXDBG+EX_R15(r13)
+       EXCEPTION_COMMON_DBG(0xd08)
+       addi    r3,r1,STACK_FRAME_OVERHEAD
        bl      DebugException
        REST_NVGPRS(r1)
        b       interrupt_return
@@ -931,11 +949,7 @@ masked_interrupt_book3e_0x2c0:
  * original values stashed away in the PACA
  */
 storage_fault_common:
-       std     r14,_DAR(r1)
-       std     r15,_DSISR(r1)
        addi    r3,r1,STACK_FRAME_OVERHEAD
-       ld      r14,PACA_EXGEN+EX_R14(r13)
-       ld      r15,PACA_EXGEN+EX_R15(r13)
        bl      do_page_fault
        b       interrupt_return
 
@@ -944,11 +958,7 @@ storage_fault_common:
  * continues here.
  */
 alignment_more:
-       std     r14,_DAR(r1)
-       std     r15,_DSISR(r1)
        addi    r3,r1,STACK_FRAME_OVERHEAD
-       ld      r14,PACA_EXGEN+EX_R14(r13)
-       ld      r15,PACA_EXGEN+EX_R15(r13)
        bl      alignment_exception
        REST_NVGPRS(r1)
        b       interrupt_return
index e4559f8914eb78920bc279430101351c0cdb1047..e0938ba298f2a2f928844706ca75ed057707d1f6 100644 (file)
@@ -34,9 +34,6 @@ notrace long system_call_exception(long r3, long r4, long r5,
        syscall_fn f;
 
        kuep_lock();
-#ifdef CONFIG_PPC32
-       kuap_save_and_lock(regs);
-#endif
 
        regs->orig_gpr3 = r3;
 
@@ -427,6 +424,7 @@ again:
 
        /* Restore user access locks last */
        kuap_user_restore(regs);
+       kuep_unlock();
 
        return ret;
 }
index 51bbaae94cccf097baed41af922f232f1ee5806e..c877f074d1749b2ce757e213efe80d48832003aa 100644 (file)
@@ -55,7 +55,6 @@ static struct iowa_bus *iowa_pci_find(unsigned long vaddr, unsigned long paddr)
 #ifdef CONFIG_PPC_INDIRECT_MMIO
 struct iowa_bus *iowa_mem_find_bus(const PCI_IO_ADDR addr)
 {
-       unsigned hugepage_shift;
        struct iowa_bus *bus;
        int token;
 
@@ -65,22 +64,13 @@ struct iowa_bus *iowa_mem_find_bus(const PCI_IO_ADDR addr)
                bus = &iowa_busses[token - 1];
        else {
                unsigned long vaddr, paddr;
-               pte_t *ptep;
 
                vaddr = (unsigned long)PCI_FIX_ADDR(addr);
                if (vaddr < PHB_IO_BASE || vaddr >= PHB_IO_END)
                        return NULL;
-               /*
-                * We won't find huge pages here (iomem). Also can't hit
-                * a page table free due to init_mm
-                */
-               ptep = find_init_mm_pte(vaddr, &hugepage_shift);
-               if (ptep == NULL)
-                       paddr = 0;
-               else {
-                       WARN_ON(hugepage_shift);
-                       paddr = pte_pfn(*ptep) << PAGE_SHIFT;
-               }
+
+               paddr = ppc_find_vmap_phys(vaddr);
+
                bus = iowa_pci_find(vaddr, paddr);
 
                if (bus == NULL)
index 57d6b85e9b964f1339f9656a893e8c3ba87ee545..2af89a5e379f2bd2e3d3e624a52615728fc27ba0 100644 (file)
@@ -898,7 +898,6 @@ void *iommu_alloc_coherent(struct device *dev, struct iommu_table *tbl,
        unsigned int order;
        unsigned int nio_pages, io_order;
        struct page *page;
-       size_t size_io = size;
 
        size = PAGE_ALIGN(size);
        order = get_order(size);
@@ -925,9 +924,8 @@ void *iommu_alloc_coherent(struct device *dev, struct iommu_table *tbl,
        memset(ret, 0, size);
 
        /* Set up tces to cover the allocated range */
-       size_io = IOMMU_PAGE_ALIGN(size_io, tbl);
-       nio_pages = size_io >> tbl->it_page_shift;
-       io_order = get_iommu_order(size_io, tbl);
+       nio_pages = size >> tbl->it_page_shift;
+       io_order = get_iommu_order(size, tbl);
        mapping = iommu_alloc(dev, tbl, ret, nio_pages, DMA_BIDIRECTIONAL,
                              mask >> tbl->it_page_shift, io_order, 0);
        if (mapping == DMA_MAPPING_ERROR) {
@@ -942,9 +940,10 @@ void iommu_free_coherent(struct iommu_table *tbl, size_t size,
                         void *vaddr, dma_addr_t dma_handle)
 {
        if (tbl) {
-               size_t size_io = IOMMU_PAGE_ALIGN(size, tbl);
-               unsigned int nio_pages = size_io >> tbl->it_page_shift;
+               unsigned int nio_pages;
 
+               size = PAGE_ALIGN(size);
+               nio_pages = size >> tbl->it_page_shift;
                iommu_free(tbl, dma_handle, nio_pages);
                size = PAGE_ALIGN(size);
                free_pages((unsigned long)vaddr, get_order(size));
index 01ab2163659e4bb7763c0f22ed13d36349e1c9b4..e8c2a6373157dacacaf0d61ada1eab9acca62eab 100644 (file)
@@ -108,7 +108,6 @@ int arch_prepare_kprobe(struct kprobe *p)
        int ret = 0;
        struct kprobe *prev;
        struct ppc_inst insn = ppc_inst_read((struct ppc_inst *)p->addr);
-       struct ppc_inst prefix = ppc_inst_read((struct ppc_inst *)(p->addr - 1));
 
        if ((unsigned long)p->addr & 0x03) {
                printk("Attempt to register kprobe at an unaligned address\n");
@@ -116,7 +115,8 @@ int arch_prepare_kprobe(struct kprobe *p)
        } else if (IS_MTMSRD(insn) || IS_RFID(insn) || IS_RFI(insn)) {
                printk("Cannot register a kprobe on rfi/rfid or mtmsr[d]\n");
                ret = -EINVAL;
-       } else if (ppc_inst_prefixed(prefix)) {
+       } else if ((unsigned long)p->addr & ~PAGE_MASK &&
+                  ppc_inst_prefixed(ppc_inst_read((struct ppc_inst *)(p->addr - 1)))) {
                printk("Cannot register a kprobe on the second word of prefixed instruction\n");
                ret = -EINVAL;
        }
index 8b2c1a8553a0ed1d1429ebf56e863d2e1cda8ae4..cfc03e016ff2d5f4f605f9cfae425b2fef34478c 100644 (file)
@@ -356,13 +356,16 @@ static void __init setup_legacy_serial_console(int console)
 
 static int __init ioremap_legacy_serial_console(void)
 {
-       struct legacy_serial_info *info = &legacy_serial_infos[legacy_serial_console];
-       struct plat_serial8250_port *port = &legacy_serial_ports[legacy_serial_console];
+       struct plat_serial8250_port *port;
+       struct legacy_serial_info *info;
        void __iomem *vaddr;
 
        if (legacy_serial_console < 0)
                return 0;
 
+       info = &legacy_serial_infos[legacy_serial_console];
+       port = &legacy_serial_ports[legacy_serial_console];
+
        if (!info->early_addr)
                return 0;
 
index b779d25761cf98f8b33ef22400f3708d665eb035..e42b85e4f1aa51a37dc2ef24fb6e1323f2c8d2c9 100644 (file)
@@ -369,11 +369,11 @@ void __init early_setup(unsigned long dt_ptr)
        apply_feature_fixups();
        setup_feature_keys();
 
-       early_ioremap_setup();
-
        /* Initialize the hash table or TLB handling */
        early_init_mmu();
 
+       early_ioremap_setup();
+
        /*
         * After firmware and early platform setup code has set things up,
         * we note the SPR values for configurable control/performance
index f4aafa337c2edbcc26dbd82df8059f0555cd2257..1f07317964e499c73ced538b8477e2f45183d3b1 100644 (file)
@@ -166,9 +166,9 @@ copy_ckfpr_from_user(struct task_struct *task, void __user *from)
 }
 #endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
 #else
-#define unsafe_copy_fpr_to_user(to, task, label) do { } while (0)
+#define unsafe_copy_fpr_to_user(to, task, label) do { if (0) goto label;} while (0)
 
-#define unsafe_copy_fpr_from_user(task, from, label) do { } while (0)
+#define unsafe_copy_fpr_from_user(task, from, label) do { if (0) goto label;} while (0)
 
 static inline unsigned long
 copy_fpr_to_user(void __user *to, struct task_struct *task)
index 2e68fbb57cc66de2e451c9e9721d425d9d76d9c1..8f052ff4058ce5534327ea64550af52bff5f5de2 100644 (file)
 440    common  process_madvise                 sys_process_madvise
 441    common  epoll_pwait2                    sys_epoll_pwait2                compat_sys_epoll_pwait2
 442    common  mount_setattr                   sys_mount_setattr
-443    common  quotactl_path                   sys_quotactl_path
+# 443 reserved for quotactl_path
 444    common  landlock_create_ruleset         sys_landlock_create_ruleset
 445    common  landlock_add_rule               sys_landlock_add_rule
 446    common  landlock_restrict_self          sys_landlock_restrict_self
index 2d9193cd73be491da0dd3115c3b737979fafebe1..c63e263312a4fa0d3833e7dd133ced40f2be97f1 100644 (file)
@@ -840,7 +840,7 @@ bool kvm_unmap_gfn_range_hv(struct kvm *kvm, struct kvm_gfn_range *range)
                        kvm_unmap_radix(kvm, range->slot, gfn);
        } else {
                for (gfn = range->start; gfn < range->end; gfn++)
-                       kvm_unmap_rmapp(kvm, range->slot, range->start);
+                       kvm_unmap_rmapp(kvm, range->slot, gfn);
        }
 
        return false;
index 28a80d240b7643b974c9a55b8bfb24632710b560..bc08136446660b7493c6abc215f6a6e841c7fac9 100644 (file)
@@ -3936,7 +3936,7 @@ static void kvmppc_vcore_blocked(struct kvmppc_vcore *vc)
                                break;
                        }
                        cur = ktime_get();
-               } while (single_task_running() && ktime_before(cur, stop));
+               } while (kvm_vcpu_can_poll(cur, stop));
 
                spin_lock(&vc->lock);
                vc->vcore_state = VCORE_INACTIVE;
@@ -4455,7 +4455,6 @@ static int kvmppc_vcpu_run_hv(struct kvm_vcpu *vcpu)
                mtspr(SPRN_EBBRR, ebb_regs[1]);
                mtspr(SPRN_BESCR, ebb_regs[2]);
                mtspr(SPRN_TAR, user_tar);
-               mtspr(SPRN_FSCR, current->thread.fscr);
        }
        mtspr(SPRN_VRSAVE, user_vrsave);
 
index 7af7c70f14680e2e3530c1d174b26454519dd38c..7a0f12404e0eed3ec0da1298cbd79a6e2171cd72 100644 (file)
 #include <asm/pte-walk.h>
 
 /* Translate address of a vmalloc'd thing to a linear map address */
-static void *real_vmalloc_addr(void *x)
+static void *real_vmalloc_addr(void *addr)
 {
-       unsigned long addr = (unsigned long) x;
-       pte_t *p;
-       /*
-        * assume we don't have huge pages in vmalloc space...
-        * So don't worry about THP collapse/split. Called
-        * Only in realmode with MSR_EE = 0, hence won't need irq_save/restore.
-        */
-       p = find_init_mm_pte(addr, NULL);
-       if (!p || !pte_present(*p))
-               return NULL;
-       addr = (pte_pfn(*p) << PAGE_SHIFT) | (addr & ~PAGE_MASK);
-       return __va(addr);
+       return __va(ppc_find_vmap_phys((unsigned long)addr));
 }
 
 /* Return 1 if we need to do a global tlbie, 0 if we can use tlbiel */
index 5e634db4809bfd2354e977840736eebe62b2119c..004f0d4e665f87f8ed695554ae55ef7cb701da07 100644 (file)
@@ -59,6 +59,7 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300)
 #define STACK_SLOT_UAMOR       (SFS-88)
 #define STACK_SLOT_DAWR1       (SFS-96)
 #define STACK_SLOT_DAWRX1      (SFS-104)
+#define STACK_SLOT_FSCR                (SFS-112)
 /* the following is used by the P9 short path */
 #define STACK_SLOT_NVGPRS      (SFS-152)       /* 18 gprs */
 
@@ -686,6 +687,8 @@ BEGIN_FTR_SECTION
        std     r6, STACK_SLOT_DAWR0(r1)
        std     r7, STACK_SLOT_DAWRX0(r1)
        std     r8, STACK_SLOT_IAMR(r1)
+       mfspr   r5, SPRN_FSCR
+       std     r5, STACK_SLOT_FSCR(r1)
 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
 BEGIN_FTR_SECTION
        mfspr   r6, SPRN_DAWR1
@@ -1663,6 +1666,10 @@ FTR_SECTION_ELSE
        ld      r7, STACK_SLOT_HFSCR(r1)
        mtspr   SPRN_HFSCR, r7
 ALT_FTR_SECTION_END_IFCLR(CPU_FTR_ARCH_300)
+BEGIN_FTR_SECTION
+       ld      r5, STACK_SLOT_FSCR(r1)
+       mtspr   SPRN_FSCR, r5
+END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
        /*
         * Restore various registers to 0, where non-zero values
         * set by the guest could disrupt the host.
index 1fd31b4b0e139e36715f49fbd2928f5384d6ed3b..fe26f2fa0f3f8b81fcefc87e092b24725ea50a67 100644 (file)
@@ -14,6 +14,7 @@
 #include <linux/string.h>
 #include <linux/init.h>
 #include <linux/sched/mm.h>
+#include <linux/stop_machine.h>
 #include <asm/cputable.h>
 #include <asm/code-patching.h>
 #include <asm/page.h>
@@ -149,17 +150,17 @@ static void do_stf_entry_barrier_fixups(enum stf_barrier_type types)
 
                pr_devel("patching dest %lx\n", (unsigned long)dest);
 
-               patch_instruction((struct ppc_inst *)dest, ppc_inst(instrs[0]));
-
-               if (types & STF_BARRIER_FALLBACK)
+               // See comment in do_entry_flush_fixups() RE order of patching
+               if (types & STF_BARRIER_FALLBACK) {
+                       patch_instruction((struct ppc_inst *)dest, ppc_inst(instrs[0]));
+                       patch_instruction((struct ppc_inst *)(dest + 2), ppc_inst(instrs[2]));
                        patch_branch((struct ppc_inst *)(dest + 1),
-                                    (unsigned long)&stf_barrier_fallback,
-                                    BRANCH_SET_LINK);
-               else
-                       patch_instruction((struct ppc_inst *)(dest + 1),
-                                         ppc_inst(instrs[1]));
-
-               patch_instruction((struct ppc_inst *)(dest + 2), ppc_inst(instrs[2]));
+                                    (unsigned long)&stf_barrier_fallback, BRANCH_SET_LINK);
+               } else {
+                       patch_instruction((struct ppc_inst *)(dest + 1), ppc_inst(instrs[1]));
+                       patch_instruction((struct ppc_inst *)(dest + 2), ppc_inst(instrs[2]));
+                       patch_instruction((struct ppc_inst *)dest, ppc_inst(instrs[0]));
+               }
        }
 
        printk(KERN_DEBUG "stf-barrier: patched %d entry locations (%s barrier)\n", i,
@@ -227,11 +228,25 @@ static void do_stf_exit_barrier_fixups(enum stf_barrier_type types)
                                                           : "unknown");
 }
 
+static int __do_stf_barrier_fixups(void *data)
+{
+       enum stf_barrier_type *types = data;
+
+       do_stf_entry_barrier_fixups(*types);
+       do_stf_exit_barrier_fixups(*types);
+
+       return 0;
+}
 
 void do_stf_barrier_fixups(enum stf_barrier_type types)
 {
-       do_stf_entry_barrier_fixups(types);
-       do_stf_exit_barrier_fixups(types);
+       /*
+        * The call to the fallback entry flush, and the fallback/sync-ori exit
+        * flush can not be safely patched in/out while other CPUs are executing
+        * them. So call __do_stf_barrier_fixups() on one CPU while all other CPUs
+        * spin in the stop machine core with interrupts hard disabled.
+        */
+       stop_machine(__do_stf_barrier_fixups, &types, NULL);
 }
 
 void do_uaccess_flush_fixups(enum l1d_flush_type types)
@@ -284,8 +299,9 @@ void do_uaccess_flush_fixups(enum l1d_flush_type types)
                                                : "unknown");
 }
 
-void do_entry_flush_fixups(enum l1d_flush_type types)
+static int __do_entry_flush_fixups(void *data)
 {
+       enum l1d_flush_type types = *(enum l1d_flush_type *)data;
        unsigned int instrs[3], *dest;
        long *start, *end;
        int i;
@@ -309,6 +325,31 @@ void do_entry_flush_fixups(enum l1d_flush_type types)
        if (types & L1D_FLUSH_MTTRIG)
                instrs[i++] = 0x7c12dba6; /* mtspr TRIG2,r0 (SPR #882) */
 
+       /*
+        * If we're patching in or out the fallback flush we need to be careful about the
+        * order in which we patch instructions. That's because it's possible we could
+        * take a page fault after patching one instruction, so the sequence of
+        * instructions must be safe even in a half patched state.
+        *
+        * To make that work, when patching in the fallback flush we patch in this order:
+        *  - the mflr          (dest)
+        *  - the mtlr          (dest + 2)
+        *  - the branch        (dest + 1)
+        *
+        * That ensures the sequence is safe to execute at any point. In contrast if we
+        * patch the mtlr last, it's possible we could return from the branch and not
+        * restore LR, leading to a crash later.
+        *
+        * When patching out the fallback flush (either with nops or another flush type),
+        * we patch in this order:
+        *  - the branch        (dest + 1)
+        *  - the mtlr          (dest + 2)
+        *  - the mflr          (dest)
+        *
+        * Note we are protected by stop_machine() from other CPUs executing the code in a
+        * semi-patched state.
+        */
+
        start = PTRRELOC(&__start___entry_flush_fixup);
        end = PTRRELOC(&__stop___entry_flush_fixup);
        for (i = 0; start < end; start++, i++) {
@@ -316,15 +357,16 @@ void do_entry_flush_fixups(enum l1d_flush_type types)
 
                pr_devel("patching dest %lx\n", (unsigned long)dest);
 
-               patch_instruction((struct ppc_inst *)dest, ppc_inst(instrs[0]));
-
-               if (types == L1D_FLUSH_FALLBACK)
-                       patch_branch((struct ppc_inst *)(dest + 1), (unsigned long)&entry_flush_fallback,
-                                    BRANCH_SET_LINK);
-               else
+               if (types == L1D_FLUSH_FALLBACK) {
+                       patch_instruction((struct ppc_inst *)dest, ppc_inst(instrs[0]));
+                       patch_instruction((struct ppc_inst *)(dest + 2), ppc_inst(instrs[2]));
+                       patch_branch((struct ppc_inst *)(dest + 1),
+                                    (unsigned long)&entry_flush_fallback, BRANCH_SET_LINK);
+               } else {
                        patch_instruction((struct ppc_inst *)(dest + 1), ppc_inst(instrs[1]));
-
-               patch_instruction((struct ppc_inst *)(dest + 2), ppc_inst(instrs[2]));
+                       patch_instruction((struct ppc_inst *)(dest + 2), ppc_inst(instrs[2]));
+                       patch_instruction((struct ppc_inst *)dest, ppc_inst(instrs[0]));
+               }
        }
 
        start = PTRRELOC(&__start___scv_entry_flush_fixup);
@@ -334,15 +376,16 @@ void do_entry_flush_fixups(enum l1d_flush_type types)
 
                pr_devel("patching dest %lx\n", (unsigned long)dest);
 
-               patch_instruction((struct ppc_inst *)dest, ppc_inst(instrs[0]));
-
-               if (types == L1D_FLUSH_FALLBACK)
-                       patch_branch((struct ppc_inst *)(dest + 1), (unsigned long)&scv_entry_flush_fallback,
-                                    BRANCH_SET_LINK);
-               else
+               if (types == L1D_FLUSH_FALLBACK) {
+                       patch_instruction((struct ppc_inst *)dest, ppc_inst(instrs[0]));
+                       patch_instruction((struct ppc_inst *)(dest + 2), ppc_inst(instrs[2]));
+                       patch_branch((struct ppc_inst *)(dest + 1),
+                                    (unsigned long)&scv_entry_flush_fallback, BRANCH_SET_LINK);
+               } else {
                        patch_instruction((struct ppc_inst *)(dest + 1), ppc_inst(instrs[1]));
-
-               patch_instruction((struct ppc_inst *)(dest + 2), ppc_inst(instrs[2]));
+                       patch_instruction((struct ppc_inst *)(dest + 2), ppc_inst(instrs[2]));
+                       patch_instruction((struct ppc_inst *)dest, ppc_inst(instrs[0]));
+               }
        }
 
 
@@ -354,6 +397,19 @@ void do_entry_flush_fixups(enum l1d_flush_type types)
                                                        : "ori type" :
                (types &  L1D_FLUSH_MTTRIG)     ? "mttrig type"
                                                : "unknown");
+
+       return 0;
+}
+
+void do_entry_flush_fixups(enum l1d_flush_type types)
+{
+       /*
+        * The call to the fallback flush can not be safely patched in/out while
+        * other CPUs are executing it. So call __do_entry_flush_fixups() on one
+        * CPU while all other CPUs spin in the stop machine core with interrupts
+        * hard disabled.
+        */
+       stop_machine(__do_entry_flush_fixups, &types, NULL);
 }
 
 void do_rfi_flush_fixups(enum l1d_flush_type types)
index 2136e42833af3fa1fcb9a408baa93fa2782e9f09..8a2b8d64265bc2c252b8c526f8932bcccf6d03e7 100644 (file)
@@ -102,6 +102,16 @@ END_FTR_SECTION(0, 1);                                             \
 #define HCALL_BRANCH(LABEL)
 #endif
 
+_GLOBAL_TOC(plpar_hcall_norets_notrace)
+       HMT_MEDIUM
+
+       mfcr    r0
+       stw     r0,8(r1)
+       HVSC                            /* invoke the hypervisor */
+       lwz     r0,8(r1)
+       mtcrf   0xff,r0
+       blr                             /* return r3 = status */
+
 _GLOBAL_TOC(plpar_hcall_norets)
        HMT_MEDIUM
 
index 1f3152ad7213262987c51ec10d9b46cdcbc032c0..dab356e3ff87c42a822ac2886a2c2ff885bb540f 100644 (file)
@@ -1829,30 +1829,28 @@ void hcall_tracepoint_unregfunc(void)
 #endif
 
 /*
- * Since the tracing code might execute hcalls we need to guard against
- * recursion. One example of this are spinlocks calling H_YIELD on
- * shared processor partitions.
+ * Keep track of hcall tracing depth and prevent recursion. Warn if any is
+ * detected because it may indicate a problem. This will not catch all
+ * problems with tracing code making hcalls, because the tracing might have
+ * been invoked from a non-hcall, so the first hcall could recurse into it
+ * without warning here, but this better than nothing.
+ *
+ * Hcalls with specific problems being traced should use the _notrace
+ * plpar_hcall variants.
  */
 static DEFINE_PER_CPU(unsigned int, hcall_trace_depth);
 
 
-void __trace_hcall_entry(unsigned long opcode, unsigned long *args)
+notrace void __trace_hcall_entry(unsigned long opcode, unsigned long *args)
 {
        unsigned long flags;
        unsigned int *depth;
 
-       /*
-        * We cannot call tracepoints inside RCU idle regions which
-        * means we must not trace H_CEDE.
-        */
-       if (opcode == H_CEDE)
-               return;
-
        local_irq_save(flags);
 
        depth = this_cpu_ptr(&hcall_trace_depth);
 
-       if (*depth)
+       if (WARN_ON_ONCE(*depth))
                goto out;
 
        (*depth)++;
@@ -1864,19 +1862,16 @@ out:
        local_irq_restore(flags);
 }
 
-void __trace_hcall_exit(long opcode, long retval, unsigned long *retbuf)
+notrace void __trace_hcall_exit(long opcode, long retval, unsigned long *retbuf)
 {
        unsigned long flags;
        unsigned int *depth;
 
-       if (opcode == H_CEDE)
-               return;
-
        local_irq_save(flags);
 
        depth = this_cpu_ptr(&hcall_trace_depth);
 
-       if (*depth)
+       if (*depth) /* Don't warn again on the way out */
                goto out;
 
        (*depth)++;
index a8ad8eb761206ab51d29f0351f6606048a867bb6..18ec0f9bb8d5c445b26345fca66154a0559be097 100644 (file)
@@ -34,6 +34,7 @@ config RISCV
        select ARCH_OPTIONAL_KERNEL_RWX if ARCH_HAS_STRICT_KERNEL_RWX
        select ARCH_OPTIONAL_KERNEL_RWX_DEFAULT
        select ARCH_SUPPORTS_HUGETLBFS if MMU
+       select ARCH_USE_MEMTEST
        select ARCH_WANT_DEFAULT_TOPDOWN_MMAP_LAYOUT if MMU
        select ARCH_WANT_FRAME_POINTERS
        select ARCH_WANT_HUGE_PMD_SHARE if 64BIT
@@ -60,11 +61,11 @@ config RISCV
        select GENERIC_TIME_VSYSCALL if MMU && 64BIT
        select HANDLE_DOMAIN_IRQ
        select HAVE_ARCH_AUDITSYSCALL
-       select HAVE_ARCH_JUMP_LABEL
-       select HAVE_ARCH_JUMP_LABEL_RELATIVE
+       select HAVE_ARCH_JUMP_LABEL if !XIP_KERNEL
+       select HAVE_ARCH_JUMP_LABEL_RELATIVE if !XIP_KERNEL
        select HAVE_ARCH_KASAN if MMU && 64BIT
        select HAVE_ARCH_KASAN_VMALLOC if MMU && 64BIT
-       select HAVE_ARCH_KGDB
+       select HAVE_ARCH_KGDB if !XIP_KERNEL
        select HAVE_ARCH_KGDB_QXFER_PKT
        select HAVE_ARCH_MMAP_RND_BITS if MMU
        select HAVE_ARCH_SECCOMP_FILTER
@@ -79,9 +80,9 @@ config RISCV
        select HAVE_GCC_PLUGINS
        select HAVE_GENERIC_VDSO if MMU && 64BIT
        select HAVE_IRQ_TIME_ACCOUNTING
-       select HAVE_KPROBES
-       select HAVE_KPROBES_ON_FTRACE
-       select HAVE_KRETPROBES
+       select HAVE_KPROBES if !XIP_KERNEL
+       select HAVE_KPROBES_ON_FTRACE if !XIP_KERNEL
+       select HAVE_KRETPROBES if !XIP_KERNEL
        select HAVE_PCI
        select HAVE_PERF_EVENTS
        select HAVE_PERF_REGS
@@ -230,11 +231,11 @@ config ARCH_RV64I
        bool "RV64I"
        select 64BIT
        select ARCH_SUPPORTS_INT128 if CC_HAS_INT128 && GCC_VERSION >= 50000
-       select HAVE_DYNAMIC_FTRACE if MMU && $(cc-option,-fpatchable-function-entry=8)
+       select HAVE_DYNAMIC_FTRACE if !XIP_KERNEL && MMU && $(cc-option,-fpatchable-function-entry=8)
        select HAVE_DYNAMIC_FTRACE_WITH_REGS if HAVE_DYNAMIC_FTRACE
-       select HAVE_FTRACE_MCOUNT_RECORD
+       select HAVE_FTRACE_MCOUNT_RECORD if !XIP_KERNEL
        select HAVE_FUNCTION_GRAPH_TRACER
-       select HAVE_FUNCTION_TRACER
+       select HAVE_FUNCTION_TRACER if !XIP_KERNEL
        select SWIOTLB if MMU
 
 endchoice
index 3eb9590a077591af541e79eebe83785e170955bc..4be02069542894aa3d299bb374b66531338ecaed 100644 (file)
@@ -38,6 +38,15 @@ else
        KBUILD_LDFLAGS += -melf32lriscv
 endif
 
+ifeq ($(CONFIG_LD_IS_LLD),y)
+       KBUILD_CFLAGS += -mno-relax
+       KBUILD_AFLAGS += -mno-relax
+ifneq ($(LLVM_IAS),1)
+       KBUILD_CFLAGS += -Wa,-mno-relax
+       KBUILD_AFLAGS += -Wa,-mno-relax
+endif
+endif
+
 # ISA string setting
 riscv-march-$(CONFIG_ARCH_RV32I)       := rv32ima
 riscv-march-$(CONFIG_ARCH_RV64I)       := rv64ima
index 622b12771fd3fd31a2c96be14769d17baae3b20d..855c1502d912bac524dc8e53b612193c73d65b08 100644 (file)
@@ -1,2 +1,3 @@
 # SPDX-License-Identifier: GPL-2.0
 dtb-$(CONFIG_SOC_MICROCHIP_POLARFIRE) += microchip-mpfs-icicle-kit.dtb
+obj-$(CONFIG_BUILTIN_DTB) += $(addsuffix .o, $(dtb-y))
index 74c47fe9fc22e037c6eb441855fa030fa89a7f4f..d90e4eb0ade84a4fdcba0a1f65fc00584866fd30 100644 (file)
@@ -1,3 +1,4 @@
 # SPDX-License-Identifier: GPL-2.0
 dtb-$(CONFIG_SOC_SIFIVE) += hifive-unleashed-a00.dtb \
                            hifive-unmatched-a00.dtb
+obj-$(CONFIG_BUILTIN_DTB) += $(addsuffix .o, $(dtb-y))
index bdd5fc843b8ee20abd07e3cfcdce784c8acb1d75..2fde48db0619aa43270a870dea192ddd1485af06 100644 (file)
@@ -1,2 +1,2 @@
-obj-y += errata_cip_453.o
+obj-$(CONFIG_ERRATA_SIFIVE_CIP_453) += errata_cip_453.o
 obj-y += errata.o
index 88c08705f64aad4d380cdbe298118b675b0a6045..67406c37638903a24ef420e9bd679678050bb1e0 100644 (file)
@@ -51,7 +51,7 @@
        REG_ASM " " newlen "\n" \
        ".word " errata_id "\n"
 
-#define ALT_NEW_CONSTENT(vendor_id, errata_id, enable, new_c) \
+#define ALT_NEW_CONTENT(vendor_id, errata_id, enable, new_c) \
        ".if " __stringify(enable) " == 1\n"                            \
        ".pushsection .alternative, \"a\"\n"                            \
        ALT_ENTRY("886b", "888f", __stringify(vendor_id), __stringify(errata_id), "889f - 888f") \
@@ -69,7 +69,7 @@
        "886 :\n"       \
        old_c "\n"      \
        "887 :\n"       \
-       ALT_NEW_CONSTENT(vendor_id, errata_id, enable, new_c)
+       ALT_NEW_CONTENT(vendor_id, errata_id, enable, new_c)
 
 #define _ALTERNATIVE_CFG(old_c, new_c, vendor_id, errata_id, CONFIG_k) \
        __ALTERNATIVE_CFG(old_c, new_c, vendor_id, errata_id, IS_ENABLED(CONFIG_k))
index 1e954101906ac7a24cdb81634be86720cfea45ee..e4e291d40759f003143c491e2baf3ee742e37bb2 100644 (file)
@@ -42,8 +42,8 @@ struct kimage_arch {
        unsigned long fdt_addr;
 };
 
-const extern unsigned char riscv_kexec_relocate[];
-const extern unsigned int riscv_kexec_relocate_size;
+extern const unsigned char riscv_kexec_relocate[];
+extern const unsigned int riscv_kexec_relocate_size;
 
 typedef void (*riscv_kexec_method)(unsigned long first_ind_entry,
                                   unsigned long jump_addr,
index cc048143fba55fe80d73e15a3f6e871edea172d4..9e99e1db156bc8ebd61b30f4ba006907b1b0e514 100644 (file)
@@ -14,8 +14,9 @@
 #include <asm/set_memory.h>    /* For set_memory_x() */
 #include <linux/compiler.h>    /* For unreachable() */
 #include <linux/cpu.h>         /* For cpu_down() */
+#include <linux/reboot.h>
 
-/**
+/*
  * kexec_image_info - Print received image details
  */
 static void
@@ -39,7 +40,7 @@ kexec_image_info(const struct kimage *image)
        }
 }
 
-/**
+/*
  * machine_kexec_prepare - Initialize kexec
  *
  * This function is called from do_kexec_load, when the user has
@@ -100,7 +101,7 @@ machine_kexec_prepare(struct kimage *image)
 }
 
 
-/**
+/*
  * machine_kexec_cleanup - Cleanup any leftovers from
  *                        machine_kexec_prepare
  *
@@ -135,7 +136,7 @@ void machine_shutdown(void)
 #endif
 }
 
-/**
+/*
  * machine_crash_shutdown - Prepare to kexec after a kernel crash
  *
  * This function is called by crash_kexec just before machine_kexec
@@ -151,7 +152,7 @@ machine_crash_shutdown(struct pt_regs *regs)
        pr_info("Starting crashdump kernel...\n");
 }
 
-/**
+/*
  * machine_kexec - Jump to the loaded kimage
  *
  * This function is called by kernel_kexec which is called by the
index 10b965c345366a4341ba9089033a2ee1ec3f8058..15cc65ac7ca6510668df49900af15d78aae86256 100644 (file)
@@ -84,6 +84,7 @@ int __kprobes arch_prepare_kprobe(struct kprobe *p)
        return 0;
 }
 
+#ifdef CONFIG_MMU
 void *alloc_insn_page(void)
 {
        return  __vmalloc_node_range(PAGE_SIZE, 1, VMALLOC_START, VMALLOC_END,
@@ -91,6 +92,7 @@ void *alloc_insn_page(void)
                                     VM_FLUSH_RESET_PERMS, NUMA_NO_NODE,
                                     __builtin_return_address(0));
 }
+#endif
 
 /* install breakpoint in text */
 void __kprobes arch_arm_kprobe(struct kprobe *p)
index 03901d3a8b02734300d6c719ec04573a1a4e2674..9a1b7a0603b28fbce08bbfa35938792da17def54 100644 (file)
@@ -231,13 +231,13 @@ static void __init init_resources(void)
 
        /* Clean-up any unused pre-allocated resources */
        mem_res_sz = (num_resources - res_idx + 1) * sizeof(*mem_res);
-       memblock_free((phys_addr_t) mem_res, mem_res_sz);
+       memblock_free(__pa(mem_res), mem_res_sz);
        return;
 
  error:
        /* Better an empty resource tree than an inconsistent one */
        release_child_resources(&iomem_resource);
-       memblock_free((phys_addr_t) mem_res, mem_res_sz);
+       memblock_free(__pa(mem_res), mem_res_sz);
 }
 
 
index 2b3e0cb90d789bec4bd0b84acadc0d91b4cb4cd2..bde85fc53357f8a0369fe20dc4f55b9926ac1175 100644 (file)
@@ -27,10 +27,10 @@ void notrace walk_stackframe(struct task_struct *task, struct pt_regs *regs,
                fp = frame_pointer(regs);
                sp = user_stack_pointer(regs);
                pc = instruction_pointer(regs);
-       } else if (task == NULL || task == current) {
-               fp = (unsigned long)__builtin_frame_address(0);
-               sp = sp_in_global;
-               pc = (unsigned long)walk_stackframe;
+       } else if (task == current) {
+               fp = (unsigned long)__builtin_frame_address(1);
+               sp = (unsigned long)__builtin_frame_address(0);
+               pc = (unsigned long)__builtin_return_address(0);
        } else {
                /* task blocked in __switch_to */
                fp = task->thread.s[0];
@@ -106,15 +106,15 @@ static bool print_trace_address(void *arg, unsigned long pc)
        return true;
 }
 
-void dump_backtrace(struct pt_regs *regs, struct task_struct *task,
+noinline void dump_backtrace(struct pt_regs *regs, struct task_struct *task,
                    const char *loglvl)
 {
-       pr_cont("%sCall Trace:\n", loglvl);
        walk_stackframe(task, regs, print_trace_address, (void *)loglvl);
 }
 
 void show_stack(struct task_struct *task, unsigned long *sp, const char *loglvl)
 {
+       pr_cont("%sCall Trace:\n", loglvl);
        dump_backtrace(NULL, task, loglvl);
 }
 
@@ -139,7 +139,7 @@ unsigned long get_wchan(struct task_struct *task)
 
 #ifdef CONFIG_STACKTRACE
 
-void arch_stack_walk(stack_trace_consume_fn consume_entry, void *cookie,
+noinline void arch_stack_walk(stack_trace_consume_fn consume_entry, void *cookie,
                     struct task_struct *task, struct pt_regs *regs)
 {
        walk_stackframe(task, regs, consume_entry, cookie);
index 0721b979859515738875e4e4ea6b9308fa915125..7bc88d8aab97fbbbb9db62b4ece7b7964a4b26a0 100644 (file)
@@ -86,8 +86,13 @@ static void do_trap_error(struct pt_regs *regs, int signo, int code,
        }
 }
 
+#if defined (CONFIG_XIP_KERNEL) && defined (CONFIG_RISCV_ERRATA_ALTERNATIVE)
+#define __trap_section         __section(".xip.traps")
+#else
+#define __trap_section
+#endif
 #define DO_ERROR_INFO(name, signo, code, str)                          \
-asmlinkage __visible void name(struct pt_regs *regs)                   \
+asmlinkage __visible __trap_section void name(struct pt_regs *regs)    \
 {                                                                      \
        do_trap_error(regs, signo, code, regs->epc, "Oops - " str);     \
 }
@@ -111,7 +116,7 @@ DO_ERROR_INFO(do_trap_store_misaligned,
 int handle_misaligned_load(struct pt_regs *regs);
 int handle_misaligned_store(struct pt_regs *regs);
 
-asmlinkage void do_trap_load_misaligned(struct pt_regs *regs)
+asmlinkage void __trap_section do_trap_load_misaligned(struct pt_regs *regs)
 {
        if (!handle_misaligned_load(regs))
                return;
@@ -119,7 +124,7 @@ asmlinkage void do_trap_load_misaligned(struct pt_regs *regs)
                      "Oops - load address misaligned");
 }
 
-asmlinkage void do_trap_store_misaligned(struct pt_regs *regs)
+asmlinkage void __trap_section do_trap_store_misaligned(struct pt_regs *regs)
 {
        if (!handle_misaligned_store(regs))
                return;
@@ -146,7 +151,7 @@ static inline unsigned long get_break_insn_length(unsigned long pc)
        return GET_INSN_LENGTH(insn);
 }
 
-asmlinkage __visible void do_trap_break(struct pt_regs *regs)
+asmlinkage __visible __trap_section void do_trap_break(struct pt_regs *regs)
 {
 #ifdef CONFIG_KPROBES
        if (kprobe_single_step_handler(regs))
index 4b29b9917f99df8664897efce56f9859fa2238c7..a3ff09c4c3f919051d859bbf2887512a74ba2b51 100644 (file)
@@ -99,9 +99,22 @@ SECTIONS
        }
        PERCPU_SECTION(L1_CACHE_BYTES)
 
-       . = ALIGN(PAGE_SIZE);
+       . = ALIGN(8);
+       .alternative : {
+               __alt_start = .;
+               *(.alternative)
+               __alt_end = .;
+       }
        __init_end = .;
 
+       . = ALIGN(16);
+       .xip.traps : {
+               __xip_traps_start = .;
+               *(.xip.traps)
+               __xip_traps_end = .;
+       }
+
+       . = ALIGN(PAGE_SIZE);
        .sdata : {
                __global_pointer$ = . + 0x800;
                *(.sdata*)
index 4faf8bd157eaa986fb6b70238d9c6773e69a164d..4c4c92ce0bb81fcaf58c4a389c519816abf6ad5b 100644 (file)
@@ -746,14 +746,18 @@ void __init protect_kernel_text_data(void)
        unsigned long init_data_start = (unsigned long)__init_data_begin;
        unsigned long rodata_start = (unsigned long)__start_rodata;
        unsigned long data_start = (unsigned long)_data;
-       unsigned long max_low = (unsigned long)(__va(PFN_PHYS(max_low_pfn)));
+#if defined(CONFIG_64BIT) && defined(CONFIG_MMU)
+       unsigned long end_va = kernel_virt_addr + load_sz;
+#else
+       unsigned long end_va = (unsigned long)(__va(PFN_PHYS(max_low_pfn)));
+#endif
 
        set_memory_ro(text_start, (init_text_start - text_start) >> PAGE_SHIFT);
        set_memory_ro(init_text_start, (init_data_start - init_text_start) >> PAGE_SHIFT);
        set_memory_nx(init_data_start, (rodata_start - init_data_start) >> PAGE_SHIFT);
        /* rodata section is marked readonly in mark_rodata_ro */
        set_memory_nx(rodata_start, (data_start - rodata_start) >> PAGE_SHIFT);
-       set_memory_nx(data_start, (max_low - data_start) >> PAGE_SHIFT);
+       set_memory_nx(data_start, (end_va - data_start) >> PAGE_SHIFT);
 }
 
 void mark_rodata_ro(void)
index 7e4a2aba366df088e153856f93f94cb0be0a2b4e..0690263df1dd0b58268941074441cb4e820744bc 100644 (file)
 440  common    process_madvise         sys_process_madvise             sys_process_madvise
 441  common    epoll_pwait2            sys_epoll_pwait2                compat_sys_epoll_pwait2
 442  common    mount_setattr           sys_mount_setattr               sys_mount_setattr
-443  common    quotactl_path           sys_quotactl_path               sys_quotactl_path
+# 443 reserved for quotactl_path
 444  common    landlock_create_ruleset sys_landlock_create_ruleset     sys_landlock_create_ruleset
 445  common    landlock_add_rule       sys_landlock_add_rule           sys_landlock_add_rule
 446  common    landlock_restrict_self  sys_landlock_restrict_self      sys_landlock_restrict_self
index f47a0dc5544551961efff6287c860ab347f90f05..0b91499ebdcfc2f67dfd597a66186cfea4d64280 100644 (file)
 440    common  process_madvise                 sys_process_madvise
 441    common  epoll_pwait2                    sys_epoll_pwait2
 442    common  mount_setattr                   sys_mount_setattr
-443    common  quotactl_path                   sys_quotactl_path
+# 443 reserved for quotactl_path
 444    common  landlock_create_ruleset         sys_landlock_create_ruleset
 445    common  landlock_add_rule               sys_landlock_add_rule
 446    common  landlock_restrict_self          sys_landlock_restrict_self
index f5beecdac69382f2d719fa33d50b9d58e22f6ff8..e76b221570999776e3bc9276d6b2fd60b9132e94 100644 (file)
@@ -180,7 +180,6 @@ static inline void arch_ftrace_nmi_exit(void) { }
 
 BUILD_TRAP_HANDLER(nmi)
 {
-       unsigned int cpu = smp_processor_id();
        TRAP_HANDLER_DECL;
 
        arch_ftrace_nmi_enter();
index b9e1c0e735b72b6ea50a862e401dd3afa39eb7f7..e34cc30ef22ced28c550699ab9973ac6b955a90e 100644 (file)
 440    common  process_madvise                 sys_process_madvise
 441    common  epoll_pwait2                    sys_epoll_pwait2                compat_sys_epoll_pwait2
 442    common  mount_setattr                   sys_mount_setattr
-443    common  quotactl_path                   sys_quotactl_path
+# 443 reserved for quotactl_path
 444    common  landlock_create_ruleset         sys_landlock_create_ruleset
 445    common  landlock_add_rule               sys_landlock_add_rule
 446    common  landlock_restrict_self          sys_landlock_restrict_self
index c77c5d8a7b3eb181084daad2a2620eec1ffe16c9..cb5e8d39cac156421b32fbd37e8a7c437e0c8e26 100644 (file)
@@ -178,11 +178,6 @@ ifeq ($(ACCUMULATE_OUTGOING_ARGS), 1)
        KBUILD_CFLAGS += $(call cc-option,-maccumulate-outgoing-args,)
 endif
 
-ifdef CONFIG_LTO_CLANG
-KBUILD_LDFLAGS += -plugin-opt=-code-model=kernel \
-                  -plugin-opt=-stack-alignment=$(if $(CONFIG_X86_32),4,8)
-endif
-
 # Workaround for a gcc prelease that unfortunately was shipped in a suse release
 KBUILD_CFLAGS += -Wno-sign-compare
 #
@@ -202,7 +197,13 @@ ifdef CONFIG_RETPOLINE
   endif
 endif
 
-KBUILD_LDFLAGS := -m elf_$(UTS_MACHINE)
+KBUILD_LDFLAGS += -m elf_$(UTS_MACHINE)
+
+ifdef CONFIG_LTO_CLANG
+ifeq ($(shell test $(CONFIG_LLD_VERSION) -lt 130000; echo $$?),0)
+KBUILD_LDFLAGS += -plugin-opt=-stack-alignment=$(if $(CONFIG_X86_32),4,8)
+endif
+endif
 
 ifdef CONFIG_X86_NEED_RELOCS
 LDFLAGS_vmlinux := --emit-relocs --discard-none
index 6e5522aebbbd467e17df99a91513caff232056ad..431bf7f846c3caeb643a5e0b8ed200f4a55a34f5 100644 (file)
@@ -30,6 +30,7 @@ targets := vmlinux vmlinux.bin vmlinux.bin.gz vmlinux.bin.bz2 vmlinux.bin.lzma \
 
 KBUILD_CFLAGS := -m$(BITS) -O2
 KBUILD_CFLAGS += -fno-strict-aliasing -fPIE
+KBUILD_CFLAGS += -Wundef
 KBUILD_CFLAGS += -DDISABLE_BRANCH_PROFILING
 cflags-$(CONFIG_X86_32) := -march=i386
 cflags-$(CONFIG_X86_64) := -mcmodel=small -mno-red-zone
@@ -48,10 +49,10 @@ KBUILD_CFLAGS += $(call as-option,-Wa$(comma)-mrelax-relocations=no)
 KBUILD_CFLAGS += -include $(srctree)/include/linux/hidden.h
 KBUILD_CFLAGS += $(CLANG_FLAGS)
 
-# sev-es.c indirectly inludes inat-table.h which is generated during
+# sev.c indirectly inludes inat-table.h which is generated during
 # compilation and stored in $(objtree). Add the directory to the includes so
 # that the compiler finds it even with out-of-tree builds (make O=/some/path).
-CFLAGS_sev-es.o += -I$(objtree)/arch/x86/lib/
+CFLAGS_sev.o += -I$(objtree)/arch/x86/lib/
 
 KBUILD_AFLAGS  := $(KBUILD_CFLAGS) -D__ASSEMBLY__
 GCOV_PROFILE := n
@@ -93,7 +94,7 @@ ifdef CONFIG_X86_64
        vmlinux-objs-y += $(obj)/idt_64.o $(obj)/idt_handlers_64.o
        vmlinux-objs-y += $(obj)/mem_encrypt.o
        vmlinux-objs-y += $(obj)/pgtable_64.o
-       vmlinux-objs-$(CONFIG_AMD_MEM_ENCRYPT) += $(obj)/sev-es.o
+       vmlinux-objs-$(CONFIG_AMD_MEM_ENCRYPT) += $(obj)/sev.o
 endif
 
 vmlinux-objs-$(CONFIG_ACPI) += $(obj)/acpi.o
index dde042f64ccaa4b41fe983d4d6ad20416392a731..743f13ea25c12898ac8c9d7a121262cd756bb9e2 100644 (file)
@@ -172,7 +172,7 @@ void __puthex(unsigned long value)
        }
 }
 
-#if CONFIG_X86_NEED_RELOCS
+#ifdef CONFIG_X86_NEED_RELOCS
 static void handle_relocations(void *output, unsigned long output_len,
                               unsigned long virt_addr)
 {
index e5612f035498c15d2762911807ad4c518638e390..31139256859fc0a37c6d846a337d1d5ea6a5d66a 100644 (file)
@@ -79,7 +79,7 @@ struct mem_vector {
        u64 size;
 };
 
-#if CONFIG_RANDOMIZE_BASE
+#ifdef CONFIG_RANDOMIZE_BASE
 /* kaslr.c */
 void choose_random_location(unsigned long input,
                            unsigned long input_size,
similarity index 98%
rename from arch/x86/boot/compressed/sev-es.c
rename to arch/x86/boot/compressed/sev.c
index 82041bd380e561171e105d12a953cd0c9a6ebd91..670e998fe93062e5dd2aaf97e97bf0554c7a8fa9 100644 (file)
@@ -13,7 +13,7 @@
 #include "misc.h"
 
 #include <asm/pgtable_types.h>
-#include <asm/sev-es.h>
+#include <asm/sev.h>
 #include <asm/trapnr.h>
 #include <asm/trap_pf.h>
 #include <asm/msr-index.h>
@@ -117,7 +117,7 @@ static enum es_result vc_read_mem(struct es_em_ctxt *ctxt,
 #include "../../lib/insn.c"
 
 /* Include code for early handlers */
-#include "../../kernel/sev-es-shared.c"
+#include "../../kernel/sev-shared.c"
 
 static bool early_setup_sev_es(void)
 {
index 28a1423ce32ee54e4e9b193d8f104b828acb5c96..4bbc267fb36bb9c8d07df4d4bd0b773f6bb1dc34 100644 (file)
 440    i386    process_madvise         sys_process_madvise
 441    i386    epoll_pwait2            sys_epoll_pwait2                compat_sys_epoll_pwait2
 442    i386    mount_setattr           sys_mount_setattr
-443    i386    quotactl_path           sys_quotactl_path
+# 443 reserved for quotactl_path
 444    i386    landlock_create_ruleset sys_landlock_create_ruleset
 445    i386    landlock_add_rule       sys_landlock_add_rule
 446    i386    landlock_restrict_self  sys_landlock_restrict_self
index ecd551b08d05256af7235764c72d711460b2d58a..ce18119ea0d0f0c6deaf54959a9a7de1ed2e89ef 100644 (file)
 440    common  process_madvise         sys_process_madvise
 441    common  epoll_pwait2            sys_epoll_pwait2
 442    common  mount_setattr           sys_mount_setattr
-443    common  quotactl_path           sys_quotactl_path
+# 443 reserved for quotactl_path
 444    common  landlock_create_ruleset sys_landlock_create_ruleset
 445    common  landlock_add_rule       sys_landlock_add_rule
 446    common  landlock_restrict_self  sys_landlock_restrict_self
index 8e509325c2c3d792792e76378898850c15111892..8f71dd72ef95fd6a583f1ee7d346fc15e3b5772b 100644 (file)
@@ -396,10 +396,12 @@ int x86_reserve_hardware(void)
        if (!atomic_inc_not_zero(&pmc_refcount)) {
                mutex_lock(&pmc_reserve_mutex);
                if (atomic_read(&pmc_refcount) == 0) {
-                       if (!reserve_pmc_hardware())
+                       if (!reserve_pmc_hardware()) {
                                err = -EBUSY;
-                       else
+                       } else {
                                reserve_ds_buffers();
+                               reserve_lbr_buffers();
+                       }
                }
                if (!err)
                        atomic_inc(&pmc_refcount);
index 2521d03de5e02e9f347e09edda033cbd0a6e9ede..e28892270c5803d421571aa207798e6740276ac8 100644 (file)
@@ -6253,7 +6253,7 @@ __init int intel_pmu_init(void)
         * Check all LBT MSR here.
         * Disable LBR access if any LBR MSRs can not be accessed.
         */
-       if (x86_pmu.lbr_nr && !check_msr(x86_pmu.lbr_tos, 0x3UL))
+       if (x86_pmu.lbr_tos && !check_msr(x86_pmu.lbr_tos, 0x3UL))
                x86_pmu.lbr_nr = 0;
        for (i = 0; i < x86_pmu.lbr_nr; i++) {
                if (!(check_msr(x86_pmu.lbr_from + i, 0xffffUL) &&
index 76dbab6ac9fbe3a82a1c6520eeea04354b2582ed..4409d2cccfda574fce38c8bef90ea97156313394 100644 (file)
@@ -658,7 +658,6 @@ static inline bool branch_user_callstack(unsigned br_sel)
 
 void intel_pmu_lbr_add(struct perf_event *event)
 {
-       struct kmem_cache *kmem_cache = event->pmu->task_ctx_cache;
        struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
 
        if (!x86_pmu.lbr_nr)
@@ -696,11 +695,6 @@ void intel_pmu_lbr_add(struct perf_event *event)
        perf_sched_cb_inc(event->ctx->pmu);
        if (!cpuc->lbr_users++ && !event->total_time_running)
                intel_pmu_lbr_reset();
-
-       if (static_cpu_has(X86_FEATURE_ARCH_LBR) &&
-           kmem_cache && !cpuc->lbr_xsave &&
-           (cpuc->lbr_users != cpuc->lbr_pebs_users))
-               cpuc->lbr_xsave = kmem_cache_alloc(kmem_cache, GFP_KERNEL);
 }
 
 void release_lbr_buffers(void)
@@ -722,6 +716,26 @@ void release_lbr_buffers(void)
        }
 }
 
+void reserve_lbr_buffers(void)
+{
+       struct kmem_cache *kmem_cache;
+       struct cpu_hw_events *cpuc;
+       int cpu;
+
+       if (!static_cpu_has(X86_FEATURE_ARCH_LBR))
+               return;
+
+       for_each_possible_cpu(cpu) {
+               cpuc = per_cpu_ptr(&cpu_hw_events, cpu);
+               kmem_cache = x86_get_pmu(cpu)->task_ctx_cache;
+               if (!kmem_cache || cpuc->lbr_xsave)
+                       continue;
+
+               cpuc->lbr_xsave = kmem_cache_alloc_node(kmem_cache, GFP_KERNEL,
+                                                       cpu_to_node(cpu));
+       }
+}
+
 void intel_pmu_lbr_del(struct perf_event *event)
 {
        struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
index 63f097289a84cce8e63e80d4a851bce41d82c22d..3a75a2c601c2af8b4ab18836a661ce531fba4725 100644 (file)
@@ -1406,6 +1406,8 @@ static int snbep_pci2phy_map_init(int devid, int nodeid_loc, int idmap_loc, bool
                                                die_id = i;
                                        else
                                                die_id = topology_phys_to_logical_pkg(i);
+                                       if (die_id < 0)
+                                               die_id = -ENODEV;
                                        map->pbus_to_dieid[bus] = die_id;
                                        break;
                                }
@@ -1452,14 +1454,14 @@ static int snbep_pci2phy_map_init(int devid, int nodeid_loc, int idmap_loc, bool
                        i = -1;
                        if (reverse) {
                                for (bus = 255; bus >= 0; bus--) {
-                                       if (map->pbus_to_dieid[bus] >= 0)
+                                       if (map->pbus_to_dieid[bus] != -1)
                                                i = map->pbus_to_dieid[bus];
                                        else
                                                map->pbus_to_dieid[bus] = i;
                                }
                        } else {
                                for (bus = 0; bus <= 255; bus++) {
-                                       if (map->pbus_to_dieid[bus] >= 0)
+                                       if (map->pbus_to_dieid[bus] != -1)
                                                i = map->pbus_to_dieid[bus];
                                        else
                                                map->pbus_to_dieid[bus] = i;
@@ -5097,9 +5099,10 @@ static struct intel_uncore_type icx_uncore_m2m = {
        .perf_ctr       = SNR_M2M_PCI_PMON_CTR0,
        .event_ctl      = SNR_M2M_PCI_PMON_CTL0,
        .event_mask     = SNBEP_PMON_RAW_EVENT_MASK,
+       .event_mask_ext = SNR_M2M_PCI_PMON_UMASK_EXT,
        .box_ctl        = SNR_M2M_PCI_PMON_BOX_CTL,
        .ops            = &snr_m2m_uncore_pci_ops,
-       .format_group   = &skx_uncore_format_group,
+       .format_group   = &snr_m2m_uncore_format_group,
 };
 
 static struct attribute *icx_upi_uncore_formats_attr[] = {
index 27fa85e7d4fda0a56ca34323f9f9c29bacf68ae4..ad87cb36f7c81c1ee67f82b108d91738cc17c806 100644 (file)
@@ -1244,6 +1244,8 @@ void reserve_ds_buffers(void);
 
 void release_lbr_buffers(void);
 
+void reserve_lbr_buffers(void);
+
 extern struct event_constraint bts_constraint;
 extern struct event_constraint vlbr_constraint;
 
@@ -1393,6 +1395,10 @@ static inline void release_lbr_buffers(void)
 {
 }
 
+static inline void reserve_lbr_buffers(void)
+{
+}
+
 static inline int intel_pmu_init(void)
 {
        return 0;
index 412b51e059c80674977968d0f3d1cece99075a2b..48067af946785b1569a802e3e2811eb67fbd7386 100644 (file)
@@ -174,6 +174,7 @@ static inline int apic_is_clustered_box(void)
 extern int setup_APIC_eilvt(u8 lvt_off, u8 vector, u8 msg_type, u8 mask);
 extern void lapic_assign_system_vectors(void);
 extern void lapic_assign_legacy_vector(unsigned int isairq, bool replace);
+extern void lapic_update_legacy_vectors(void);
 extern void lapic_online(void);
 extern void lapic_offline(void);
 extern bool apic_needs_pit(void);
index b7dd944dc8673f908b3227246622629318ce59e9..8f28fafa98b32e6f9b775f4c8b2d16713f9b7686 100644 (file)
 # define DISABLE_PTI           (1 << (X86_FEATURE_PTI & 31))
 #endif
 
-#ifdef CONFIG_IOMMU_SUPPORT
-# define DISABLE_ENQCMD        0
-#else
-# define DISABLE_ENQCMD (1 << (X86_FEATURE_ENQCMD & 31))
-#endif
+/* Force disable because it's broken beyond repair */
+#define DISABLE_ENQCMD         (1 << (X86_FEATURE_ENQCMD & 31))
 
 #ifdef CONFIG_X86_SGX
 # define DISABLE_SGX   0
index ed33a14188f6642bea84771f65792f8dd457905f..23bef08a8388035e0c8723a6c54048bed8ddd206 100644 (file)
@@ -106,10 +106,6 @@ extern int cpu_has_xfeatures(u64 xfeatures_mask, const char **feature_name);
  */
 #define PASID_DISABLED 0
 
-#ifdef CONFIG_IOMMU_SUPPORT
-/* Update current's PASID MSR/state by mm's PASID. */
-void update_pasid(void);
-#else
 static inline void update_pasid(void) { }
-#endif
+
 #endif /* _ASM_X86_FPU_API_H */
index 8d33ad80704f264ba4db632dde22a7e5f6caa816..ceeba9f6317222ba114bcd213fc6f55e7fb4e504 100644 (file)
@@ -584,13 +584,6 @@ static inline void switch_fpu_finish(struct fpu *new_fpu)
                        pkru_val = pk->pkru;
        }
        __write_pkru(pkru_val);
-
-       /*
-        * Expensive PASID MSR write will be avoided in update_pasid() because
-        * TIF_NEED_FPU_LOAD was set. And the PASID state won't be updated
-        * unless it's different from mm->pasid to reduce overhead.
-        */
-       update_pasid();
 }
 
 #endif /* _ASM_X86_FPU_INTERNAL_H */
index 323641097f63aa697bcc6aefb621252cce61849c..e7bef91cee04ab6e1d23345561f94bcc76428727 100644 (file)
@@ -99,6 +99,7 @@ KVM_X86_OP_NULL(post_block)
 KVM_X86_OP_NULL(vcpu_blocking)
 KVM_X86_OP_NULL(vcpu_unblocking)
 KVM_X86_OP_NULL(update_pi_irte)
+KVM_X86_OP_NULL(start_assignment)
 KVM_X86_OP_NULL(apicv_post_state_restore)
 KVM_X86_OP_NULL(dy_apicv_has_pending_interrupt)
 KVM_X86_OP_NULL(set_hv_timer)
index cbbcee0a84f922349fb02451309e7134e0b4969b..9c7ced0e31718976c5744ea6c8d366a7a9c7882a 100644 (file)
 #define VALID_PAGE(x) ((x) != INVALID_PAGE)
 
 #define UNMAPPED_GVA (~(gpa_t)0)
+#define INVALID_GPA (~(gpa_t)0)
 
 /* KVM Hugepage definitions for x86 */
 #define KVM_MAX_HUGEPAGE_LEVEL PG_LEVEL_1G
@@ -199,6 +200,7 @@ enum x86_intercept_stage;
 
 #define KVM_NR_DB_REGS 4
 
+#define DR6_BUS_LOCK   (1 << 11)
 #define DR6_BD         (1 << 13)
 #define DR6_BS         (1 << 14)
 #define DR6_BT         (1 << 15)
@@ -212,7 +214,7 @@ enum x86_intercept_stage;
  * DR6_ACTIVE_LOW is also used as the init/reset value for DR6.
  */
 #define DR6_ACTIVE_LOW 0xffff0ff0
-#define DR6_VOLATILE   0x0001e00f
+#define DR6_VOLATILE   0x0001e80f
 #define DR6_FIXED_1    (DR6_ACTIVE_LOW & ~DR6_VOLATILE)
 
 #define DR7_BP_EN_MASK 0x000000ff
@@ -407,7 +409,7 @@ struct kvm_mmu {
        u32 pkru_mask;
 
        u64 *pae_root;
-       u64 *lm_root;
+       u64 *pml4_root;
 
        /*
         * check zero bits on shadow page table entries, these
@@ -1350,6 +1352,7 @@ struct kvm_x86_ops {
 
        int (*update_pi_irte)(struct kvm *kvm, unsigned int host_irq,
                              uint32_t guest_irq, bool set);
+       void (*start_assignment)(struct kvm *kvm);
        void (*apicv_post_state_restore)(struct kvm_vcpu *vcpu);
        bool (*dy_apicv_has_pending_interrupt)(struct kvm_vcpu *vcpu);
 
@@ -1417,6 +1420,7 @@ struct kvm_arch_async_pf {
        bool direct_map;
 };
 
+extern u32 __read_mostly kvm_nr_uret_msrs;
 extern u64 __read_mostly host_efer;
 extern bool __read_mostly allow_smaller_maxphyaddr;
 extern struct kvm_x86_ops kvm_x86_ops;
@@ -1775,9 +1779,15 @@ int kvm_pv_send_ipi(struct kvm *kvm, unsigned long ipi_bitmap_low,
                    unsigned long ipi_bitmap_high, u32 min,
                    unsigned long icr, int op_64_bit);
 
-void kvm_define_user_return_msr(unsigned index, u32 msr);
+int kvm_add_user_return_msr(u32 msr);
+int kvm_find_user_return_msr(u32 msr);
 int kvm_set_user_return_msr(unsigned index, u64 val, u64 mask);
 
+static inline bool kvm_is_supported_user_return_msr(u32 msr)
+{
+       return kvm_find_user_return_msr(msr) >= 0;
+}
+
 u64 kvm_scale_tsc(struct kvm_vcpu *vcpu, u64 tsc);
 u64 kvm_read_l1_tsc(struct kvm_vcpu *vcpu, u64 host_tsc);
 
index 338119852512654b51d6c01befa7af58e6621782..69299878b200a2306e6654b60e6a9bb762c846cf 100644 (file)
@@ -7,8 +7,6 @@
 #include <linux/interrupt.h>
 #include <uapi/asm/kvm_para.h>
 
-extern void kvmclock_init(void);
-
 #ifdef CONFIG_KVM_GUEST
 bool kvm_check_and_clear_guest_paused(void);
 #else
@@ -86,13 +84,14 @@ static inline long kvm_hypercall4(unsigned int nr, unsigned long p1,
 }
 
 #ifdef CONFIG_KVM_GUEST
+void kvmclock_init(void);
+void kvmclock_disable(void);
 bool kvm_para_available(void);
 unsigned int kvm_arch_para_features(void);
 unsigned int kvm_arch_para_hints(void);
 void kvm_async_pf_task_wait_schedule(u32 token);
 void kvm_async_pf_task_wake(u32 token);
 u32 kvm_read_and_reset_apf_flags(void);
-void kvm_disable_steal_time(void);
 bool __kvm_handle_async_pf(struct pt_regs *regs, u32 token);
 
 DECLARE_STATIC_KEY_FALSE(kvm_async_pf_enabled);
@@ -137,11 +136,6 @@ static inline u32 kvm_read_and_reset_apf_flags(void)
        return 0;
 }
 
-static inline void kvm_disable_steal_time(void)
-{
-       return;
-}
-
 static __always_inline bool kvm_handle_async_pf(struct pt_regs *regs, u32 token)
 {
        return false;
index 742d89a00721dce1a0bd8ea27e70a6bf90397c51..211ba3375ee9602b4671df659ee599b830be13fa 100644 (file)
 /* K8 MSRs */
 #define MSR_K8_TOP_MEM1                        0xc001001a
 #define MSR_K8_TOP_MEM2                        0xc001001d
-#define MSR_K8_SYSCFG                  0xc0010010
-#define MSR_K8_SYSCFG_MEM_ENCRYPT_BIT  23
-#define MSR_K8_SYSCFG_MEM_ENCRYPT      BIT_ULL(MSR_K8_SYSCFG_MEM_ENCRYPT_BIT)
+#define MSR_AMD64_SYSCFG               0xc0010010
+#define MSR_AMD64_SYSCFG_MEM_ENCRYPT_BIT       23
+#define MSR_AMD64_SYSCFG_MEM_ENCRYPT   BIT_ULL(MSR_AMD64_SYSCFG_MEM_ENCRYPT_BIT)
 #define MSR_K8_INT_PENDING_MSG         0xc0010055
 /* C1E active bits in int pending message */
 #define K8_INTP_C1E_ACTIVE_MASK                0x18000000
index 154321d29050f605721c51fab97237374a8d4ee5..556b2b17c3e2f7ed3580de676917a184d20d2124 100644 (file)
@@ -787,8 +787,10 @@ DECLARE_PER_CPU(u64, msr_misc_features_shadow);
 
 #ifdef CONFIG_CPU_SUP_AMD
 extern u32 amd_get_nodes_per_socket(void);
+extern u32 amd_get_highest_perf(void);
 #else
 static inline u32 amd_get_nodes_per_socket(void)       { return 0; }
+static inline u32 amd_get_highest_perf(void)           { return 0; }
 #endif
 
 static inline uint32_t hypervisor_cpuid_base(const char *sig, uint32_t leaves)
diff --git a/arch/x86/include/asm/sev-common.h b/arch/x86/include/asm/sev-common.h
new file mode 100644 (file)
index 0000000..629c3df
--- /dev/null
@@ -0,0 +1,62 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * AMD SEV header common between the guest and the hypervisor.
+ *
+ * Author: Brijesh Singh <brijesh.singh@amd.com>
+ */
+
+#ifndef __ASM_X86_SEV_COMMON_H
+#define __ASM_X86_SEV_COMMON_H
+
+#define GHCB_MSR_INFO_POS              0
+#define GHCB_MSR_INFO_MASK             (BIT_ULL(12) - 1)
+
+#define GHCB_MSR_SEV_INFO_RESP         0x001
+#define GHCB_MSR_SEV_INFO_REQ          0x002
+#define GHCB_MSR_VER_MAX_POS           48
+#define GHCB_MSR_VER_MAX_MASK          0xffff
+#define GHCB_MSR_VER_MIN_POS           32
+#define GHCB_MSR_VER_MIN_MASK          0xffff
+#define GHCB_MSR_CBIT_POS              24
+#define GHCB_MSR_CBIT_MASK             0xff
+#define GHCB_MSR_SEV_INFO(_max, _min, _cbit)                           \
+       ((((_max) & GHCB_MSR_VER_MAX_MASK) << GHCB_MSR_VER_MAX_POS) |   \
+        (((_min) & GHCB_MSR_VER_MIN_MASK) << GHCB_MSR_VER_MIN_POS) |   \
+        (((_cbit) & GHCB_MSR_CBIT_MASK) << GHCB_MSR_CBIT_POS) |        \
+        GHCB_MSR_SEV_INFO_RESP)
+#define GHCB_MSR_INFO(v)               ((v) & 0xfffUL)
+#define GHCB_MSR_PROTO_MAX(v)          (((v) >> GHCB_MSR_VER_MAX_POS) & GHCB_MSR_VER_MAX_MASK)
+#define GHCB_MSR_PROTO_MIN(v)          (((v) >> GHCB_MSR_VER_MIN_POS) & GHCB_MSR_VER_MIN_MASK)
+
+#define GHCB_MSR_CPUID_REQ             0x004
+#define GHCB_MSR_CPUID_RESP            0x005
+#define GHCB_MSR_CPUID_FUNC_POS                32
+#define GHCB_MSR_CPUID_FUNC_MASK       0xffffffff
+#define GHCB_MSR_CPUID_VALUE_POS       32
+#define GHCB_MSR_CPUID_VALUE_MASK      0xffffffff
+#define GHCB_MSR_CPUID_REG_POS         30
+#define GHCB_MSR_CPUID_REG_MASK                0x3
+#define GHCB_CPUID_REQ_EAX             0
+#define GHCB_CPUID_REQ_EBX             1
+#define GHCB_CPUID_REQ_ECX             2
+#define GHCB_CPUID_REQ_EDX             3
+#define GHCB_CPUID_REQ(fn, reg)                \
+               (GHCB_MSR_CPUID_REQ | \
+               (((unsigned long)reg & GHCB_MSR_CPUID_REG_MASK) << GHCB_MSR_CPUID_REG_POS) | \
+               (((unsigned long)fn) << GHCB_MSR_CPUID_FUNC_POS))
+
+#define GHCB_MSR_TERM_REQ              0x100
+#define GHCB_MSR_TERM_REASON_SET_POS   12
+#define GHCB_MSR_TERM_REASON_SET_MASK  0xf
+#define GHCB_MSR_TERM_REASON_POS       16
+#define GHCB_MSR_TERM_REASON_MASK      0xff
+#define GHCB_SEV_TERM_REASON(reason_set, reason_val)                                             \
+       (((((u64)reason_set) &  GHCB_MSR_TERM_REASON_SET_MASK) << GHCB_MSR_TERM_REASON_SET_POS) | \
+       ((((u64)reason_val) & GHCB_MSR_TERM_REASON_MASK) << GHCB_MSR_TERM_REASON_POS))
+
+#define GHCB_SEV_ES_REASON_GENERAL_REQUEST     0
+#define GHCB_SEV_ES_REASON_PROTOCOL_UNSUPPORTED        1
+
+#define GHCB_RESP_CODE(v)              ((v) & GHCB_MSR_INFO_MASK)
+
+#endif
similarity index 70%
rename from arch/x86/include/asm/sev-es.h
rename to arch/x86/include/asm/sev.h
index cf1d957c7091949adde1237fdb656e3a5e927ff5..fa5cd05d3b5beeaefcc175c6f97b46085a77c5a6 100644 (file)
 
 #include <linux/types.h>
 #include <asm/insn.h>
+#include <asm/sev-common.h>
 
-#define GHCB_SEV_INFO          0x001UL
-#define GHCB_SEV_INFO_REQ      0x002UL
-#define                GHCB_INFO(v)            ((v) & 0xfffUL)
-#define                GHCB_PROTO_MAX(v)       (((v) >> 48) & 0xffffUL)
-#define                GHCB_PROTO_MIN(v)       (((v) >> 32) & 0xffffUL)
-#define                GHCB_PROTO_OUR          0x0001UL
-#define GHCB_SEV_CPUID_REQ     0x004UL
-#define                GHCB_CPUID_REQ_EAX      0
-#define                GHCB_CPUID_REQ_EBX      1
-#define                GHCB_CPUID_REQ_ECX      2
-#define                GHCB_CPUID_REQ_EDX      3
-#define                GHCB_CPUID_REQ(fn, reg) (GHCB_SEV_CPUID_REQ | \
-                                       (((unsigned long)reg & 3) << 30) | \
-                                       (((unsigned long)fn) << 32))
+#define GHCB_PROTO_OUR         0x0001UL
+#define GHCB_PROTOCOL_MAX      1ULL
+#define GHCB_DEFAULT_USAGE     0ULL
 
-#define        GHCB_PROTOCOL_MAX       0x0001UL
-#define GHCB_DEFAULT_USAGE     0x0000UL
-
-#define GHCB_SEV_CPUID_RESP    0x005UL
-#define GHCB_SEV_TERMINATE     0x100UL
-#define                GHCB_SEV_TERMINATE_REASON(reason_set, reason_val)       \
-                       (((((u64)reason_set) &  0x7) << 12) |           \
-                        ((((u64)reason_val) & 0xff) << 16))
-#define                GHCB_SEV_ES_REASON_GENERAL_REQUEST      0
-#define                GHCB_SEV_ES_REASON_PROTOCOL_UNSUPPORTED 1
-
-#define        GHCB_SEV_GHCB_RESP_CODE(v)      ((v) & 0xfff)
 #define        VMGEXIT()                       { asm volatile("rep; vmmcall\n\r"); }
 
 enum es_result {
index ddbdefd5b94f1024dab964540b00b787a81031af..91a7b6687c3b9e380ea41493372f2b75850c9425 100644 (file)
@@ -3,11 +3,13 @@
 #define _ASM_X86_THERMAL_H
 
 #ifdef CONFIG_X86_THERMAL_VECTOR
+void therm_lvt_init(void);
 void intel_init_thermal(struct cpuinfo_x86 *c);
 bool x86_thermal_enabled(void);
 void intel_thermal_interrupt(void);
 #else
-static inline void intel_init_thermal(struct cpuinfo_x86 *c) { }
+static inline void therm_lvt_init(void)                                { }
+static inline void intel_init_thermal(struct cpuinfo_x86 *c)   { }
 #endif
 
 #endif /* _ASM_X86_THERMAL_H */
index 119ac8612d893164634f76e46f674f435bccbd87..136e5e57cfe112ddd486a71d0def80413fe15f07 100644 (file)
@@ -7,4 +7,6 @@
        VDSO_CLOCKMODE_PVCLOCK, \
        VDSO_CLOCKMODE_HVCLOCK
 
+#define HAVE_VDSO_CLOCKMODE_HVCLOCK
+
 #endif /* __ASM_VDSO_CLOCKSOURCE_H */
index 5a3022c8af82b8af1b88e5ed9d8b9af8afc5c9dd..0662f644aad9da71e25b9f7554d20d6981a5e7c7 100644 (file)
@@ -437,6 +437,8 @@ struct kvm_vmx_nested_state_hdr {
                __u16 flags;
        } smm;
 
+       __u16 pad;
+
        __u32 flags;
        __u64 preemption_timer_deadline;
 };
index 0704c2a94272c0b30b052441eb5070cb53b39430..0f66682ac02a62cee39694a84dbfd36dbc7ecedc 100644 (file)
@@ -20,7 +20,7 @@ CFLAGS_REMOVE_kvmclock.o = -pg
 CFLAGS_REMOVE_ftrace.o = -pg
 CFLAGS_REMOVE_early_printk.o = -pg
 CFLAGS_REMOVE_head64.o = -pg
-CFLAGS_REMOVE_sev-es.o = -pg
+CFLAGS_REMOVE_sev.o = -pg
 endif
 
 KASAN_SANITIZE_head$(BITS).o                           := n
@@ -28,7 +28,7 @@ KASAN_SANITIZE_dumpstack.o                            := n
 KASAN_SANITIZE_dumpstack_$(BITS).o                     := n
 KASAN_SANITIZE_stacktrace.o                            := n
 KASAN_SANITIZE_paravirt.o                              := n
-KASAN_SANITIZE_sev-es.o                                        := n
+KASAN_SANITIZE_sev.o                                   := n
 
 # With some compiler versions the generated code results in boot hangs, caused
 # by several compilation units. To be safe, disable all instrumentation.
@@ -148,7 +148,7 @@ obj-$(CONFIG_UNWINDER_ORC)          += unwind_orc.o
 obj-$(CONFIG_UNWINDER_FRAME_POINTER)   += unwind_frame.o
 obj-$(CONFIG_UNWINDER_GUESS)           += unwind_guess.o
 
-obj-$(CONFIG_AMD_MEM_ENCRYPT)          += sev-es.o
+obj-$(CONFIG_AMD_MEM_ENCRYPT)          += sev.o
 ###
 # 64 bit specific files
 ifeq ($(CONFIG_X86_64),y)
index e90310cbe73ac2fac72f21ab469e139130703544..e55e0c1fad8c823479c21ed3606403b3b301541b 100644 (file)
@@ -5,6 +5,7 @@
  *  Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
  *  Copyright (C) 2001 Jun Nakajima <jun.nakajima@intel.com>
  */
+#define pr_fmt(fmt) "ACPI: " fmt
 
 #include <linux/init.h>
 #include <linux/acpi.h>
@@ -42,8 +43,6 @@ EXPORT_SYMBOL(acpi_disabled);
 # include <asm/proto.h>
 #endif                         /* X86 */
 
-#define PREFIX                 "ACPI: "
-
 int acpi_noirq;                                /* skip ACPI IRQ initialization */
 static int acpi_nobgrt;                        /* skip ACPI BGRT */
 int acpi_pci_disabled;         /* skip ACPI PCI scan and IRQ initialization */
@@ -130,15 +129,14 @@ static int __init acpi_parse_madt(struct acpi_table_header *table)
 
        madt = (struct acpi_table_madt *)table;
        if (!madt) {
-               printk(KERN_WARNING PREFIX "Unable to map MADT\n");
+               pr_warn("Unable to map MADT\n");
                return -ENODEV;
        }
 
        if (madt->address) {
                acpi_lapic_addr = (u64) madt->address;
 
-               printk(KERN_DEBUG PREFIX "Local APIC address 0x%08x\n",
-                      madt->address);
+               pr_debug("Local APIC address 0x%08x\n", madt->address);
        }
 
        default_acpi_madt_oem_check(madt->header.oem_id,
@@ -161,7 +159,7 @@ static int acpi_register_lapic(int id, u32 acpiid, u8 enabled)
        int cpu;
 
        if (id >= MAX_LOCAL_APIC) {
-               printk(KERN_INFO PREFIX "skipped apicid that is too big\n");
+               pr_info("skipped apicid that is too big\n");
                return -EINVAL;
        }
 
@@ -213,13 +211,13 @@ acpi_parse_x2apic(union acpi_subtable_headers *header, const unsigned long end)
         */
        if (!apic->apic_id_valid(apic_id)) {
                if (enabled)
-                       pr_warn(PREFIX "x2apic entry ignored\n");
+                       pr_warn("x2apic entry ignored\n");
                return 0;
        }
 
        acpi_register_lapic(apic_id, processor->uid, enabled);
 #else
-       printk(KERN_WARNING PREFIX "x2apic entry ignored\n");
+       pr_warn("x2apic entry ignored\n");
 #endif
 
        return 0;
@@ -306,7 +304,7 @@ acpi_parse_x2apic_nmi(union acpi_subtable_headers *header,
        acpi_table_print_madt_entry(&header->common);
 
        if (x2apic_nmi->lint != 1)
-               printk(KERN_WARNING PREFIX "NMI not connected to LINT 1!\n");
+               pr_warn("NMI not connected to LINT 1!\n");
 
        return 0;
 }
@@ -324,7 +322,7 @@ acpi_parse_lapic_nmi(union acpi_subtable_headers * header, const unsigned long e
        acpi_table_print_madt_entry(&header->common);
 
        if (lapic_nmi->lint != 1)
-               printk(KERN_WARNING PREFIX "NMI not connected to LINT 1!\n");
+               pr_warn("NMI not connected to LINT 1!\n");
 
        return 0;
 }
@@ -514,14 +512,14 @@ acpi_parse_int_src_ovr(union acpi_subtable_headers * header,
 
        if (intsrc->source_irq == 0) {
                if (acpi_skip_timer_override) {
-                       printk(PREFIX "BIOS IRQ0 override ignored.\n");
+                       pr_warn("BIOS IRQ0 override ignored.\n");
                        return 0;
                }
 
                if ((intsrc->global_irq == 2) && acpi_fix_pin2_polarity
                        && (intsrc->inti_flags & ACPI_MADT_POLARITY_MASK)) {
                        intsrc->inti_flags &= ~ACPI_MADT_POLARITY_MASK;
-                       printk(PREFIX "BIOS IRQ0 pin2 override: forcing polarity to high active.\n");
+                       pr_warn("BIOS IRQ0 pin2 override: forcing polarity to high active.\n");
                }
        }
 
@@ -597,7 +595,7 @@ void __init acpi_pic_sci_set_trigger(unsigned int irq, u16 trigger)
        if (old == new)
                return;
 
-       printk(PREFIX "setting ELCR to %04x (from %04x)\n", new, old);
+       pr_warn("setting ELCR to %04x (from %04x)\n", new, old);
        outb(new, 0x4d0);
        outb(new >> 8, 0x4d1);
 }
@@ -754,7 +752,7 @@ int acpi_map_cpu(acpi_handle handle, phys_cpuid_t physid, u32 acpi_id,
 
        cpu = acpi_register_lapic(physid, acpi_id, ACPI_MADT_ENABLED);
        if (cpu < 0) {
-               pr_info(PREFIX "Unable to map lapic to logical cpu number\n");
+               pr_info("Unable to map lapic to logical cpu number\n");
                return cpu;
        }
 
@@ -870,8 +868,7 @@ static int __init acpi_parse_hpet(struct acpi_table_header *table)
        struct acpi_table_hpet *hpet_tbl = (struct acpi_table_hpet *)table;
 
        if (hpet_tbl->address.space_id != ACPI_SPACE_MEM) {
-               printk(KERN_WARNING PREFIX "HPET timers must be located in "
-                      "memory.\n");
+               pr_warn("HPET timers must be located in memory.\n");
                return -1;
        }
 
@@ -883,9 +880,7 @@ static int __init acpi_parse_hpet(struct acpi_table_header *table)
         * want to allocate a resource there.
         */
        if (!hpet_address) {
-               printk(KERN_WARNING PREFIX
-                      "HPET id: %#x base: %#lx is invalid\n",
-                      hpet_tbl->id, hpet_address);
+               pr_warn("HPET id: %#x base: %#lx is invalid\n", hpet_tbl->id, hpet_address);
                return 0;
        }
 #ifdef CONFIG_X86_64
@@ -896,21 +891,17 @@ static int __init acpi_parse_hpet(struct acpi_table_header *table)
         */
        if (hpet_address == 0xfed0000000000000UL) {
                if (!hpet_force_user) {
-                       printk(KERN_WARNING PREFIX "HPET id: %#x "
-                              "base: 0xfed0000000000000 is bogus\n "
-                              "try hpet=force on the kernel command line to "
-                              "fix it up to 0xfed00000.\n", hpet_tbl->id);
+                       pr_warn("HPET id: %#x base: 0xfed0000000000000 is bogus, try hpet=force on the kernel command line to fix it up to 0xfed00000.\n",
+                               hpet_tbl->id);
                        hpet_address = 0;
                        return 0;
                }
-               printk(KERN_WARNING PREFIX
-                      "HPET id: %#x base: 0xfed0000000000000 fixed up "
-                      "to 0xfed00000.\n", hpet_tbl->id);
+               pr_warn("HPET id: %#x base: 0xfed0000000000000 fixed up to 0xfed00000.\n",
+                       hpet_tbl->id);
                hpet_address >>= 32;
        }
 #endif
-       printk(KERN_INFO PREFIX "HPET id: %#x base: %#lx\n",
-              hpet_tbl->id, hpet_address);
+       pr_info("HPET id: %#x base: %#lx\n", hpet_tbl->id, hpet_address);
 
        /*
         * Allocate and initialize the HPET firmware resource for adding into
@@ -955,24 +946,24 @@ late_initcall(hpet_insert_resource);
 static int __init acpi_parse_fadt(struct acpi_table_header *table)
 {
        if (!(acpi_gbl_FADT.boot_flags & ACPI_FADT_LEGACY_DEVICES)) {
-               pr_debug("ACPI: no legacy devices present\n");
+               pr_debug("no legacy devices present\n");
                x86_platform.legacy.devices.pnpbios = 0;
        }
 
        if (acpi_gbl_FADT.header.revision >= FADT2_REVISION_ID &&
            !(acpi_gbl_FADT.boot_flags & ACPI_FADT_8042) &&
            x86_platform.legacy.i8042 != X86_LEGACY_I8042_PLATFORM_ABSENT) {
-               pr_debug("ACPI: i8042 controller is absent\n");
+               pr_debug("i8042 controller is absent\n");
                x86_platform.legacy.i8042 = X86_LEGACY_I8042_FIRMWARE_ABSENT;
        }
 
        if (acpi_gbl_FADT.boot_flags & ACPI_FADT_NO_CMOS_RTC) {
-               pr_debug("ACPI: not registering RTC platform device\n");
+               pr_debug("not registering RTC platform device\n");
                x86_platform.legacy.rtc = 0;
        }
 
        if (acpi_gbl_FADT.boot_flags & ACPI_FADT_NO_VGA) {
-               pr_debug("ACPI: probing for VGA not safe\n");
+               pr_debug("probing for VGA not safe\n");
                x86_platform.legacy.no_vga = 1;
        }
 
@@ -997,8 +988,7 @@ static int __init acpi_parse_fadt(struct acpi_table_header *table)
                pmtmr_ioport = acpi_gbl_FADT.pm_timer_block;
        }
        if (pmtmr_ioport)
-               printk(KERN_INFO PREFIX "PM-Timer IO Port: %#x\n",
-                      pmtmr_ioport);
+               pr_info("PM-Timer IO Port: %#x\n", pmtmr_ioport);
 #endif
        return 0;
 }
@@ -1024,8 +1014,7 @@ static int __init early_acpi_parse_madt_lapic_addr_ovr(void)
        count = acpi_table_parse_madt(ACPI_MADT_TYPE_LOCAL_APIC_OVERRIDE,
                                      acpi_parse_lapic_addr_ovr, 0);
        if (count < 0) {
-               printk(KERN_ERR PREFIX
-                      "Error parsing LAPIC address override entry\n");
+               pr_err("Error parsing LAPIC address override entry\n");
                return count;
        }
 
@@ -1057,8 +1046,7 @@ static int __init acpi_parse_madt_lapic_entries(void)
                                sizeof(struct acpi_table_madt),
                                madt_proc, ARRAY_SIZE(madt_proc), MAX_LOCAL_APIC);
                if (ret < 0) {
-                       printk(KERN_ERR PREFIX
-                                       "Error parsing LAPIC/X2APIC entries\n");
+                       pr_err("Error parsing LAPIC/X2APIC entries\n");
                        return ret;
                }
 
@@ -1066,11 +1054,11 @@ static int __init acpi_parse_madt_lapic_entries(void)
                x2count = madt_proc[1].count;
        }
        if (!count && !x2count) {
-               printk(KERN_ERR PREFIX "No LAPIC entries present\n");
+               pr_err("No LAPIC entries present\n");
                /* TBD: Cleanup to allow fallback to MPS */
                return -ENODEV;
        } else if (count < 0 || x2count < 0) {
-               printk(KERN_ERR PREFIX "Error parsing LAPIC entry\n");
+               pr_err("Error parsing LAPIC entry\n");
                /* TBD: Cleanup to allow fallback to MPS */
                return count;
        }
@@ -1080,7 +1068,7 @@ static int __init acpi_parse_madt_lapic_entries(void)
        count = acpi_table_parse_madt(ACPI_MADT_TYPE_LOCAL_APIC_NMI,
                                      acpi_parse_lapic_nmi, 0);
        if (count < 0 || x2count < 0) {
-               printk(KERN_ERR PREFIX "Error parsing LAPIC NMI entry\n");
+               pr_err("Error parsing LAPIC NMI entry\n");
                /* TBD: Cleanup to allow fallback to MPS */
                return count;
        }
@@ -1139,7 +1127,7 @@ static void __init mp_config_acpi_legacy_irqs(void)
                }
 
                if (idx != mp_irq_entries) {
-                       printk(KERN_DEBUG "ACPI: IRQ%d used by override.\n", i);
+                       pr_debug("ACPI: IRQ%d used by override.\n", i);
                        continue;       /* IRQ already used */
                }
 
@@ -1179,26 +1167,24 @@ static int __init acpi_parse_madt_ioapic_entries(void)
         * if "noapic" boot option, don't look for IO-APICs
         */
        if (skip_ioapic_setup) {
-               printk(KERN_INFO PREFIX "Skipping IOAPIC probe "
-                      "due to 'noapic' option.\n");
+               pr_info("Skipping IOAPIC probe due to 'noapic' option.\n");
                return -ENODEV;
        }
 
        count = acpi_table_parse_madt(ACPI_MADT_TYPE_IO_APIC, acpi_parse_ioapic,
                                      MAX_IO_APICS);
        if (!count) {
-               printk(KERN_ERR PREFIX "No IOAPIC entries present\n");
+               pr_err("No IOAPIC entries present\n");
                return -ENODEV;
        } else if (count < 0) {
-               printk(KERN_ERR PREFIX "Error parsing IOAPIC entry\n");
+               pr_err("Error parsing IOAPIC entry\n");
                return count;
        }
 
        count = acpi_table_parse_madt(ACPI_MADT_TYPE_INTERRUPT_OVERRIDE,
                                      acpi_parse_int_src_ovr, nr_irqs);
        if (count < 0) {
-               printk(KERN_ERR PREFIX
-                      "Error parsing interrupt source overrides entry\n");
+               pr_err("Error parsing interrupt source overrides entry\n");
                /* TBD: Cleanup to allow fallback to MPS */
                return count;
        }
@@ -1218,7 +1204,7 @@ static int __init acpi_parse_madt_ioapic_entries(void)
        count = acpi_table_parse_madt(ACPI_MADT_TYPE_NMI_SOURCE,
                                      acpi_parse_nmi_src, nr_irqs);
        if (count < 0) {
-               printk(KERN_ERR PREFIX "Error parsing NMI SRC entry\n");
+               pr_err("Error parsing NMI SRC entry\n");
                /* TBD: Cleanup to allow fallback to MPS */
                return count;
        }
@@ -1251,8 +1237,7 @@ static void __init early_acpi_process_madt(void)
                        /*
                         * Dell Precision Workstation 410, 610 come here.
                         */
-                       printk(KERN_ERR PREFIX
-                              "Invalid BIOS MADT, disabling ACPI\n");
+                       pr_err("Invalid BIOS MADT, disabling ACPI\n");
                        disable_acpi();
                }
        }
@@ -1289,8 +1274,7 @@ static void __init acpi_process_madt(void)
                        /*
                         * Dell Precision Workstation 410, 610 come here.
                         */
-                       printk(KERN_ERR PREFIX
-                              "Invalid BIOS MADT, disabling ACPI\n");
+                       pr_err("Invalid BIOS MADT, disabling ACPI\n");
                        disable_acpi();
                }
        } else {
@@ -1300,8 +1284,7 @@ static void __init acpi_process_madt(void)
                 * Boot with "acpi=off" to use MPS on such a system.
                 */
                if (smp_found_config) {
-                       printk(KERN_WARNING PREFIX
-                               "No APIC-table, disabling MPS\n");
+                       pr_warn("No APIC-table, disabling MPS\n");
                        smp_found_config = 0;
                }
        }
@@ -1311,11 +1294,9 @@ static void __init acpi_process_madt(void)
         * processors, where MPS only supports physical.
         */
        if (acpi_lapic && acpi_ioapic)
-               printk(KERN_INFO "Using ACPI (MADT) for SMP configuration "
-                      "information\n");
+               pr_info("Using ACPI (MADT) for SMP configuration information\n");
        else if (acpi_lapic)
-               printk(KERN_INFO "Using ACPI for processor (LAPIC) "
-                      "configuration information\n");
+               pr_info("Using ACPI for processor (LAPIC) configuration information\n");
 #endif
        return;
 }
@@ -1323,8 +1304,7 @@ static void __init acpi_process_madt(void)
 static int __init disable_acpi_irq(const struct dmi_system_id *d)
 {
        if (!acpi_force) {
-               printk(KERN_NOTICE "%s detected: force use of acpi=noirq\n",
-                      d->ident);
+               pr_notice("%s detected: force use of acpi=noirq\n", d->ident);
                acpi_noirq_set();
        }
        return 0;
@@ -1333,8 +1313,7 @@ static int __init disable_acpi_irq(const struct dmi_system_id *d)
 static int __init disable_acpi_pci(const struct dmi_system_id *d)
 {
        if (!acpi_force) {
-               printk(KERN_NOTICE "%s detected: force use of pci=noacpi\n",
-                      d->ident);
+               pr_notice("%s detected: force use of pci=noacpi\n", d->ident);
                acpi_disable_pci();
        }
        return 0;
@@ -1343,11 +1322,10 @@ static int __init disable_acpi_pci(const struct dmi_system_id *d)
 static int __init dmi_disable_acpi(const struct dmi_system_id *d)
 {
        if (!acpi_force) {
-               printk(KERN_NOTICE "%s detected: acpi off\n", d->ident);
+               pr_notice("%s detected: acpi off\n", d->ident);
                disable_acpi();
        } else {
-               printk(KERN_NOTICE
-                      "Warning: DMI blacklist says broken, but acpi forced\n");
+               pr_notice("Warning: DMI blacklist says broken, but acpi forced\n");
        }
        return 0;
 }
@@ -1574,9 +1552,9 @@ int __init early_acpi_boot_init(void)
         */
        if (acpi_blacklisted()) {
                if (acpi_force) {
-                       printk(KERN_WARNING PREFIX "acpi=force override\n");
+                       pr_warn("acpi=force override\n");
                } else {
-                       printk(KERN_WARNING PREFIX "Disabling ACPI support\n");
+                       pr_warn("Disabling ACPI support\n");
                        disable_acpi();
                        return 1;
                }
@@ -1692,9 +1670,7 @@ int __init acpi_mps_check(void)
 #if defined(CONFIG_X86_LOCAL_APIC) && !defined(CONFIG_X86_MPPARSE)
 /* mptable code is not built-in*/
        if (acpi_disabled || acpi_noirq) {
-               printk(KERN_WARNING "MPS support code is not built-in.\n"
-                      "Using acpi=off or acpi=noirq or pci=noacpi "
-                      "may have problem\n");
+               pr_warn("MPS support code is not built-in, using acpi=off or acpi=noirq or pci=noacpi may have problem\n");
                return 1;
        }
 #endif
index 6974b517449552c2725972263730ec6162b56936..6fe5b44fcbc9f44b437e8ef523912efe370d8e80 100644 (file)
@@ -182,42 +182,70 @@ done:
                n_dspl, (unsigned long)orig_insn + n_dspl + repl_len);
 }
 
+/*
+ * optimize_nops_range() - Optimize a sequence of single byte NOPs (0x90)
+ *
+ * @instr: instruction byte stream
+ * @instrlen: length of the above
+ * @off: offset within @instr where the first NOP has been detected
+ *
+ * Return: number of NOPs found (and replaced).
+ */
+static __always_inline int optimize_nops_range(u8 *instr, u8 instrlen, int off)
+{
+       unsigned long flags;
+       int i = off, nnops;
+
+       while (i < instrlen) {
+               if (instr[i] != 0x90)
+                       break;
+
+               i++;
+       }
+
+       nnops = i - off;
+
+       if (nnops <= 1)
+               return nnops;
+
+       local_irq_save(flags);
+       add_nops(instr + off, nnops);
+       local_irq_restore(flags);
+
+       DUMP_BYTES(instr, instrlen, "%px: [%d:%d) optimized NOPs: ", instr, off, i);
+
+       return nnops;
+}
+
 /*
  * "noinline" to cause control flow change and thus invalidate I$ and
  * cause refetch after modification.
  */
 static void __init_or_module noinline optimize_nops(struct alt_instr *a, u8 *instr)
 {
-       unsigned long flags;
        struct insn insn;
-       int nop, i = 0;
+       int i = 0;
 
        /*
-        * Jump over the non-NOP insns, the remaining bytes must be single-byte
-        * NOPs, optimize them.
+        * Jump over the non-NOP insns and optimize single-byte NOPs into bigger
+        * ones.
         */
        for (;;) {
                if (insn_decode_kernel(&insn, &instr[i]))
                        return;
 
+               /*
+                * See if this and any potentially following NOPs can be
+                * optimized.
+                */
                if (insn.length == 1 && insn.opcode.bytes[0] == 0x90)
-                       break;
-
-               if ((i += insn.length) >= a->instrlen)
-                       return;
-       }
+                       i += optimize_nops_range(instr, a->instrlen, i);
+               else
+                       i += insn.length;
 
-       for (nop = i; i < a->instrlen; i++) {
-               if (WARN_ONCE(instr[i] != 0x90, "Not a NOP at 0x%px\n", &instr[i]))
+               if (i >= a->instrlen)
                        return;
        }
-
-       local_irq_save(flags);
-       add_nops(instr + nop, i - nop);
-       local_irq_restore(flags);
-
-       DUMP_BYTES(instr, a->instrlen, "%px: [%d:%d) optimized NOPs: ",
-                  instr, nop, a->instrlen);
 }
 
 /*
index 4a39fb429f15b427f796b95f97ce60db56d0f029..d262811ce14b5a08a7f31054b4346d688def9357 100644 (file)
@@ -2604,6 +2604,7 @@ static void __init apic_bsp_setup(bool upmode)
        end_local_APIC_setup();
        irq_remap_enable_fault_handling();
        setup_IO_APIC();
+       lapic_update_legacy_vectors();
 }
 
 #ifdef CONFIG_UP_LATE_INIT
index 6dbdc7c22bb751126cb70f9ea5766e120235f4eb..fb67ed5e7e6a8a616a46c098b814966a14f612d1 100644 (file)
@@ -738,6 +738,26 @@ void lapic_assign_legacy_vector(unsigned int irq, bool replace)
        irq_matrix_assign_system(vector_matrix, ISA_IRQ_VECTOR(irq), replace);
 }
 
+void __init lapic_update_legacy_vectors(void)
+{
+       unsigned int i;
+
+       if (IS_ENABLED(CONFIG_X86_IO_APIC) && nr_ioapics > 0)
+               return;
+
+       /*
+        * If the IO/APIC is disabled via config, kernel command line or
+        * lack of enumeration then all legacy interrupts are routed
+        * through the PIC. Make sure that they are marked as legacy
+        * vectors. PIC_CASCADE_IRQ has already been marked in
+        * lapic_assign_system_vectors().
+        */
+       for (i = 0; i < nr_legacy_irqs(); i++) {
+               if (i != PIC_CASCADE_IR)
+                       lapic_assign_legacy_vector(i, true);
+       }
+}
+
 void __init lapic_assign_system_vectors(void)
 {
        unsigned int i, vector = 0;
index 2d11384dc9ab4bd08f43d16b3fa0202085220c7a..c06ac56eae4db1d81f45645498db7c8d4dc9ad79 100644 (file)
@@ -593,8 +593,8 @@ static void early_detect_mem_encrypt(struct cpuinfo_x86 *c)
         */
        if (cpu_has(c, X86_FEATURE_SME) || cpu_has(c, X86_FEATURE_SEV)) {
                /* Check if memory encryption is enabled */
-               rdmsrl(MSR_K8_SYSCFG, msr);
-               if (!(msr & MSR_K8_SYSCFG_MEM_ENCRYPT))
+               rdmsrl(MSR_AMD64_SYSCFG, msr);
+               if (!(msr & MSR_AMD64_SYSCFG_MEM_ENCRYPT))
                        goto clear_all;
 
                /*
@@ -1165,3 +1165,19 @@ void set_dr_addr_mask(unsigned long mask, int dr)
                break;
        }
 }
+
+u32 amd_get_highest_perf(void)
+{
+       struct cpuinfo_x86 *c = &boot_cpu_data;
+
+       if (c->x86 == 0x17 && ((c->x86_model >= 0x30 && c->x86_model < 0x40) ||
+                              (c->x86_model >= 0x70 && c->x86_model < 0x80)))
+               return 166;
+
+       if (c->x86 == 0x19 && ((c->x86_model >= 0x20 && c->x86_model < 0x30) ||
+                              (c->x86_model >= 0x40 && c->x86_model < 0x70)))
+               return 166;
+
+       return 255;
+}
+EXPORT_SYMBOL_GPL(amd_get_highest_perf);
index 0c3b372318b70776dc915279c005130a0454e7ff..b5f43049fa5f762a1c005ee66e2801c0d4be5286 100644 (file)
@@ -836,7 +836,7 @@ int __init amd_special_default_mtrr(void)
        if (boot_cpu_data.x86 < 0xf)
                return 0;
        /* In case some hypervisor doesn't pass SYSCFG through: */
-       if (rdmsr_safe(MSR_K8_SYSCFG, &l, &h) < 0)
+       if (rdmsr_safe(MSR_AMD64_SYSCFG, &l, &h) < 0)
                return 0;
        /*
         * Memory between 4GB and top of mem is forced WB by this magic bit.
index b90f3f437765c40f3f983b56b30dab6a2079a07d..558108296f3cf1e5e7a7c3436a1748db63b99945 100644 (file)
@@ -53,13 +53,13 @@ static inline void k8_check_syscfg_dram_mod_en(void)
              (boot_cpu_data.x86 >= 0x0f)))
                return;
 
-       rdmsr(MSR_K8_SYSCFG, lo, hi);
+       rdmsr(MSR_AMD64_SYSCFG, lo, hi);
        if (lo & K8_MTRRFIXRANGE_DRAM_MODIFY) {
                pr_err(FW_WARN "MTRR: CPU %u: SYSCFG[MtrrFixDramModEn]"
                       " not cleared by BIOS, clearing this bit\n",
                       smp_processor_id());
                lo &= ~K8_MTRRFIXRANGE_DRAM_MODIFY;
-               mtrr_wrmsr(MSR_K8_SYSCFG, lo, hi);
+               mtrr_wrmsr(MSR_AMD64_SYSCFG, lo, hi);
        }
 }
 
index 3ef5868ac588ad55c8f67d797ba3ffa81db67d07..7aecb2fc3186388c645b3ba1f674223617a8d893 100644 (file)
@@ -63,7 +63,7 @@ static inline unsigned int nmi_perfctr_msr_to_bit(unsigned int msr)
                case 15:
                        return msr - MSR_P4_BPU_PERFCTR0;
                }
-               fallthrough;
+               break;
        case X86_VENDOR_ZHAOXIN:
        case X86_VENDOR_CENTAUR:
                return msr - MSR_ARCH_PERFMON_PERFCTR0;
@@ -96,7 +96,7 @@ static inline unsigned int nmi_evntsel_msr_to_bit(unsigned int msr)
                case 15:
                        return msr - MSR_P4_BSU_ESCR0;
                }
-               fallthrough;
+               break;
        case X86_VENDOR_ZHAOXIN:
        case X86_VENDOR_CENTAUR:
                return msr - MSR_ARCH_PERFMON_EVENTSEL0;
index a85c64000218968bc30854ccb583845b6790175d..d0eef963aad138c9ca1d6fd75aa527da5266d4ad 100644 (file)
@@ -1402,60 +1402,3 @@ int proc_pid_arch_status(struct seq_file *m, struct pid_namespace *ns,
        return 0;
 }
 #endif /* CONFIG_PROC_PID_ARCH_STATUS */
-
-#ifdef CONFIG_IOMMU_SUPPORT
-void update_pasid(void)
-{
-       u64 pasid_state;
-       u32 pasid;
-
-       if (!cpu_feature_enabled(X86_FEATURE_ENQCMD))
-               return;
-
-       if (!current->mm)
-               return;
-
-       pasid = READ_ONCE(current->mm->pasid);
-       /* Set the valid bit in the PASID MSR/state only for valid pasid. */
-       pasid_state = pasid == PASID_DISABLED ?
-                     pasid : pasid | MSR_IA32_PASID_VALID;
-
-       /*
-        * No need to hold fregs_lock() since the task's fpstate won't
-        * be changed by others (e.g. ptrace) while the task is being
-        * switched to or is in IPI.
-        */
-       if (!test_thread_flag(TIF_NEED_FPU_LOAD)) {
-               /* The MSR is active and can be directly updated. */
-               wrmsrl(MSR_IA32_PASID, pasid_state);
-       } else {
-               struct fpu *fpu = &current->thread.fpu;
-               struct ia32_pasid_state *ppasid_state;
-               struct xregs_state *xsave;
-
-               /*
-                * The CPU's xstate registers are not currently active. Just
-                * update the PASID state in the memory buffer here. The
-                * PASID MSR will be loaded when returning to user mode.
-                */
-               xsave = &fpu->state.xsave;
-               xsave->header.xfeatures |= XFEATURE_MASK_PASID;
-               ppasid_state = get_xsave_addr(xsave, XFEATURE_PASID);
-               /*
-                * Since XFEATURE_MASK_PASID is set in xfeatures, ppasid_state
-                * won't be NULL and no need to check its value.
-                *
-                * Only update the task's PASID state when it's different
-                * from the mm's pasid.
-                */
-               if (ppasid_state->pasid != pasid_state) {
-                       /*
-                        * Invalid fpregs so that state restoring will pick up
-                        * the PASID state.
-                        */
-                       __fpu_invalidate_fpregs_state(fpu);
-                       ppasid_state->pasid = pasid_state;
-               }
-       }
-}
-#endif /* CONFIG_IOMMU_SUPPORT */
index 18be44163a50fb4ec9d06f3094fad3b4a0bdb732..de01903c3735540c9641c17eeecea3488138f704 100644 (file)
@@ -39,7 +39,7 @@
 #include <asm/realmode.h>
 #include <asm/extable.h>
 #include <asm/trapnr.h>
-#include <asm/sev-es.h>
+#include <asm/sev.h>
 
 /*
  * Manage page tables very early on.
index d307c22e5c188ce6d9f9f2dffc1c75b1aaf43a88..a26643dc6bd635a7ad3032e3979b7edc2ab899f0 100644 (file)
@@ -26,6 +26,7 @@
 #include <linux/kprobes.h>
 #include <linux/nmi.h>
 #include <linux/swait.h>
+#include <linux/syscore_ops.h>
 #include <asm/timer.h>
 #include <asm/cpu.h>
 #include <asm/traps.h>
@@ -37,6 +38,7 @@
 #include <asm/tlb.h>
 #include <asm/cpuidle_haltpoll.h>
 #include <asm/ptrace.h>
+#include <asm/reboot.h>
 #include <asm/svm.h>
 
 DEFINE_STATIC_KEY_FALSE(kvm_async_pf_enabled);
@@ -345,7 +347,7 @@ static void kvm_guest_cpu_init(void)
 
                wrmsrl(MSR_KVM_ASYNC_PF_EN, pa);
                __this_cpu_write(apf_reason.enabled, 1);
-               pr_info("KVM setup async PF for cpu %d\n", smp_processor_id());
+               pr_info("setup async PF for cpu %d\n", smp_processor_id());
        }
 
        if (kvm_para_has_feature(KVM_FEATURE_PV_EOI)) {
@@ -371,34 +373,17 @@ static void kvm_pv_disable_apf(void)
        wrmsrl(MSR_KVM_ASYNC_PF_EN, 0);
        __this_cpu_write(apf_reason.enabled, 0);
 
-       pr_info("Unregister pv shared memory for cpu %d\n", smp_processor_id());
+       pr_info("disable async PF for cpu %d\n", smp_processor_id());
 }
 
-static void kvm_pv_guest_cpu_reboot(void *unused)
+static void kvm_disable_steal_time(void)
 {
-       /*
-        * We disable PV EOI before we load a new kernel by kexec,
-        * since MSR_KVM_PV_EOI_EN stores a pointer into old kernel's memory.
-        * New kernel can re-enable when it boots.
-        */
-       if (kvm_para_has_feature(KVM_FEATURE_PV_EOI))
-               wrmsrl(MSR_KVM_PV_EOI_EN, 0);
-       kvm_pv_disable_apf();
-       kvm_disable_steal_time();
-}
+       if (!has_steal_clock)
+               return;
 
-static int kvm_pv_reboot_notify(struct notifier_block *nb,
-                               unsigned long code, void *unused)
-{
-       if (code == SYS_RESTART)
-               on_each_cpu(kvm_pv_guest_cpu_reboot, NULL, 1);
-       return NOTIFY_DONE;
+       wrmsr(MSR_KVM_STEAL_TIME, 0, 0);
 }
 
-static struct notifier_block kvm_pv_reboot_nb = {
-       .notifier_call = kvm_pv_reboot_notify,
-};
-
 static u64 kvm_steal_clock(int cpu)
 {
        u64 steal;
@@ -416,14 +401,6 @@ static u64 kvm_steal_clock(int cpu)
        return steal;
 }
 
-void kvm_disable_steal_time(void)
-{
-       if (!has_steal_clock)
-               return;
-
-       wrmsr(MSR_KVM_STEAL_TIME, 0, 0);
-}
-
 static inline void __set_percpu_decrypted(void *ptr, unsigned long size)
 {
        early_set_memory_decrypted((unsigned long) ptr, size);
@@ -451,6 +428,27 @@ static void __init sev_map_percpu_data(void)
        }
 }
 
+static void kvm_guest_cpu_offline(bool shutdown)
+{
+       kvm_disable_steal_time();
+       if (kvm_para_has_feature(KVM_FEATURE_PV_EOI))
+               wrmsrl(MSR_KVM_PV_EOI_EN, 0);
+       kvm_pv_disable_apf();
+       if (!shutdown)
+               apf_task_wake_all();
+       kvmclock_disable();
+}
+
+static int kvm_cpu_online(unsigned int cpu)
+{
+       unsigned long flags;
+
+       local_irq_save(flags);
+       kvm_guest_cpu_init();
+       local_irq_restore(flags);
+       return 0;
+}
+
 #ifdef CONFIG_SMP
 
 static DEFINE_PER_CPU(cpumask_var_t, __pv_cpu_mask);
@@ -635,31 +633,64 @@ static void __init kvm_smp_prepare_boot_cpu(void)
        kvm_spinlock_init();
 }
 
-static void kvm_guest_cpu_offline(void)
+static int kvm_cpu_down_prepare(unsigned int cpu)
 {
-       kvm_disable_steal_time();
-       if (kvm_para_has_feature(KVM_FEATURE_PV_EOI))
-               wrmsrl(MSR_KVM_PV_EOI_EN, 0);
-       kvm_pv_disable_apf();
-       apf_task_wake_all();
+       unsigned long flags;
+
+       local_irq_save(flags);
+       kvm_guest_cpu_offline(false);
+       local_irq_restore(flags);
+       return 0;
 }
 
-static int kvm_cpu_online(unsigned int cpu)
+#endif
+
+static int kvm_suspend(void)
 {
-       local_irq_disable();
-       kvm_guest_cpu_init();
-       local_irq_enable();
+       kvm_guest_cpu_offline(false);
+
        return 0;
 }
 
-static int kvm_cpu_down_prepare(unsigned int cpu)
+static void kvm_resume(void)
 {
-       local_irq_disable();
-       kvm_guest_cpu_offline();
-       local_irq_enable();
-       return 0;
+       kvm_cpu_online(raw_smp_processor_id());
+}
+
+static struct syscore_ops kvm_syscore_ops = {
+       .suspend        = kvm_suspend,
+       .resume         = kvm_resume,
+};
+
+static void kvm_pv_guest_cpu_reboot(void *unused)
+{
+       kvm_guest_cpu_offline(true);
+}
+
+static int kvm_pv_reboot_notify(struct notifier_block *nb,
+                               unsigned long code, void *unused)
+{
+       if (code == SYS_RESTART)
+               on_each_cpu(kvm_pv_guest_cpu_reboot, NULL, 1);
+       return NOTIFY_DONE;
 }
 
+static struct notifier_block kvm_pv_reboot_nb = {
+       .notifier_call = kvm_pv_reboot_notify,
+};
+
+/*
+ * After a PV feature is registered, the host will keep writing to the
+ * registered memory location. If the guest happens to shutdown, this memory
+ * won't be valid. In cases like kexec, in which you install a new kernel, this
+ * means a random memory location will be kept being written.
+ */
+#ifdef CONFIG_KEXEC_CORE
+static void kvm_crash_shutdown(struct pt_regs *regs)
+{
+       kvm_guest_cpu_offline(true);
+       native_machine_crash_shutdown(regs);
+}
 #endif
 
 static void __init kvm_guest_init(void)
@@ -704,6 +735,12 @@ static void __init kvm_guest_init(void)
        kvm_guest_cpu_init();
 #endif
 
+#ifdef CONFIG_KEXEC_CORE
+       machine_ops.crash_shutdown = kvm_crash_shutdown;
+#endif
+
+       register_syscore_ops(&kvm_syscore_ops);
+
        /*
         * Hard lockup detection is enabled by default. Disable it, as guests
         * can get false positives too easily, for example if the host is
index d37ed4e1d0338c8b21ad461f5d2672cead15629a..ad273e5861c1b267cb815fdd54969c2b26ffc90d 100644 (file)
@@ -20,7 +20,6 @@
 #include <asm/hypervisor.h>
 #include <asm/mem_encrypt.h>
 #include <asm/x86_init.h>
-#include <asm/reboot.h>
 #include <asm/kvmclock.h>
 
 static int kvmclock __initdata = 1;
@@ -203,28 +202,9 @@ static void kvm_setup_secondary_clock(void)
 }
 #endif
 
-/*
- * After the clock is registered, the host will keep writing to the
- * registered memory location. If the guest happens to shutdown, this memory
- * won't be valid. In cases like kexec, in which you install a new kernel, this
- * means a random memory location will be kept being written. So before any
- * kind of shutdown from our side, we unregister the clock by writing anything
- * that does not have the 'enable' bit set in the msr
- */
-#ifdef CONFIG_KEXEC_CORE
-static void kvm_crash_shutdown(struct pt_regs *regs)
-{
-       native_write_msr(msr_kvm_system_time, 0, 0);
-       kvm_disable_steal_time();
-       native_machine_crash_shutdown(regs);
-}
-#endif
-
-static void kvm_shutdown(void)
+void kvmclock_disable(void)
 {
        native_write_msr(msr_kvm_system_time, 0, 0);
-       kvm_disable_steal_time();
-       native_machine_shutdown();
 }
 
 static void __init kvmclock_init_mem(void)
@@ -351,10 +331,6 @@ void __init kvmclock_init(void)
 #endif
        x86_platform.save_sched_clock_state = kvm_save_sched_clock_state;
        x86_platform.restore_sched_clock_state = kvm_restore_sched_clock_state;
-       machine_ops.shutdown  = kvm_shutdown;
-#ifdef CONFIG_KEXEC_CORE
-       machine_ops.crash_shutdown  = kvm_crash_shutdown;
-#endif
        kvm_get_preset_lpj();
 
        /*
index b5cb49e57df850343f63cae4ba197361a09f1f51..c94dec6a18345a3a16574ca9944f402e4a9de410 100644 (file)
@@ -95,7 +95,7 @@ static void get_fam10h_pci_mmconf_base(void)
                return;
 
        /* SYS_CFG */
-       address = MSR_K8_SYSCFG;
+       address = MSR_AMD64_SYSCFG;
        rdmsrl(address, val);
 
        /* TOP_MEM2 is not enabled? */
index 2ef961cf4cfc54b5e77162735fc0cea48dfb57d9..4bce802d25fb18b79cd525883ea87a2b4a75ff24 100644 (file)
@@ -33,7 +33,7 @@
 #include <asm/reboot.h>
 #include <asm/cache.h>
 #include <asm/nospec-branch.h>
-#include <asm/sev-es.h>
+#include <asm/sev.h>
 
 #define CREATE_TRACE_POINTS
 #include <trace/events/nmi.h>
index 72920af0b3c011aa3c9c4d9a23b8de3212d81cbd..1e720626069a35ef10ca880855ef6e6293808850 100644 (file)
@@ -44,6 +44,7 @@
 #include <asm/pci-direct.h>
 #include <asm/prom.h>
 #include <asm/proto.h>
+#include <asm/thermal.h>
 #include <asm/unwind.h>
 #include <asm/vsyscall.h>
 #include <linux/vmalloc.h>
@@ -637,11 +638,11 @@ static void __init trim_snb_memory(void)
         * them from accessing certain memory ranges, namely anything below
         * 1M and in the pages listed in bad_pages[] above.
         *
-        * To avoid these pages being ever accessed by SNB gfx devices
-        * reserve all memory below the 1 MB mark and bad_pages that have
-        * not already been reserved at boot time.
+        * To avoid these pages being ever accessed by SNB gfx devices reserve
+        * bad_pages that have not already been reserved at boot time.
+        * All memory below the 1 MB mark is anyway reserved later during
+        * setup_arch(), so there is no need to reserve it here.
         */
-       memblock_reserve(0, 1<<20);
 
        for (i = 0; i < ARRAY_SIZE(bad_pages); i++) {
                if (memblock_reserve(bad_pages[i], PAGE_SIZE))
@@ -733,14 +734,14 @@ static void __init early_reserve_memory(void)
         * The first 4Kb of memory is a BIOS owned area, but generally it is
         * not listed as such in the E820 table.
         *
-        * Reserve the first memory page and typically some additional
-        * memory (64KiB by default) since some BIOSes are known to corrupt
-        * low memory. See the Kconfig help text for X86_RESERVE_LOW.
+        * Reserve the first 64K of memory since some BIOSes are known to
+        * corrupt low memory. After the real mode trampoline is allocated the
+        * rest of the memory below 640k is reserved.
         *
         * In addition, make sure page 0 is always reserved because on
         * systems with L1TF its contents can be leaked to user processes.
         */
-       memblock_reserve(0, ALIGN(reserve_low, PAGE_SIZE));
+       memblock_reserve(0, SZ_64K);
 
        early_reserve_initrd();
 
@@ -751,6 +752,7 @@ static void __init early_reserve_memory(void)
 
        reserve_ibft_region();
        reserve_bios_regions();
+       trim_snb_memory();
 }
 
 /*
@@ -1081,14 +1083,20 @@ void __init setup_arch(char **cmdline_p)
                        (max_pfn_mapped<<PAGE_SHIFT) - 1);
 #endif
 
-       reserve_real_mode();
-
        /*
-        * Reserving memory causing GPU hangs on Sandy Bridge integrated
-        * graphics devices should be done after we allocated memory under
-        * 1M for the real mode trampoline.
+        * Find free memory for the real mode trampoline and place it
+        * there.
+        * If there is not enough free memory under 1M, on EFI-enabled
+        * systems there will be additional attempt to reclaim the memory
+        * for the real mode trampoline at efi_free_boot_services().
+        *
+        * Unconditionally reserve the entire first 1M of RAM because
+        * BIOSes are know to corrupt low memory and several
+        * hundred kilobytes are not worth complex detection what memory gets
+        * clobbered. Moreover, on machines with SandyBridge graphics or in
+        * setups that use crashkernel the entire 1M is reserved anyway.
         */
-       trim_snb_memory();
+       reserve_real_mode();
 
        init_mem_mapping();
 
@@ -1226,6 +1234,14 @@ void __init setup_arch(char **cmdline_p)
 
        x86_init.timers.wallclock_init();
 
+       /*
+        * This needs to run before setup_local_APIC() which soft-disables the
+        * local APIC temporarily and that masks the thermal LVT interrupt,
+        * leading to softlockups on machines which have configured SMI
+        * interrupt delivery.
+        */
+       therm_lvt_init();
+
        mcheck_init();
 
        register_refined_jiffies(CLOCK_TICK_RATE);
similarity index 96%
rename from arch/x86/kernel/sev-es-shared.c
rename to arch/x86/kernel/sev-shared.c
index 0aa9f13efd57230b4690059878cd0253ed8c2e01..9f90f460a28cc0f49b161188933dba804249acdf 100644 (file)
@@ -26,13 +26,13 @@ static bool __init sev_es_check_cpu_features(void)
 
 static void __noreturn sev_es_terminate(unsigned int reason)
 {
-       u64 val = GHCB_SEV_TERMINATE;
+       u64 val = GHCB_MSR_TERM_REQ;
 
        /*
         * Tell the hypervisor what went wrong - only reason-set 0 is
         * currently supported.
         */
-       val |= GHCB_SEV_TERMINATE_REASON(0, reason);
+       val |= GHCB_SEV_TERM_REASON(0, reason);
 
        /* Request Guest Termination from Hypvervisor */
        sev_es_wr_ghcb_msr(val);
@@ -47,15 +47,15 @@ static bool sev_es_negotiate_protocol(void)
        u64 val;
 
        /* Do the GHCB protocol version negotiation */
-       sev_es_wr_ghcb_msr(GHCB_SEV_INFO_REQ);
+       sev_es_wr_ghcb_msr(GHCB_MSR_SEV_INFO_REQ);
        VMGEXIT();
        val = sev_es_rd_ghcb_msr();
 
-       if (GHCB_INFO(val) != GHCB_SEV_INFO)
+       if (GHCB_MSR_INFO(val) != GHCB_MSR_SEV_INFO_RESP)
                return false;
 
-       if (GHCB_PROTO_MAX(val) < GHCB_PROTO_OUR ||
-           GHCB_PROTO_MIN(val) > GHCB_PROTO_OUR)
+       if (GHCB_MSR_PROTO_MAX(val) < GHCB_PROTO_OUR ||
+           GHCB_MSR_PROTO_MIN(val) > GHCB_PROTO_OUR)
                return false;
 
        return true;
@@ -63,6 +63,7 @@ static bool sev_es_negotiate_protocol(void)
 
 static __always_inline void vc_ghcb_invalidate(struct ghcb *ghcb)
 {
+       ghcb->save.sw_exit_code = 0;
        memset(ghcb->save.valid_bitmap, 0, sizeof(ghcb->save.valid_bitmap));
 }
 
@@ -153,28 +154,28 @@ void __init do_vc_no_ghcb(struct pt_regs *regs, unsigned long exit_code)
        sev_es_wr_ghcb_msr(GHCB_CPUID_REQ(fn, GHCB_CPUID_REQ_EAX));
        VMGEXIT();
        val = sev_es_rd_ghcb_msr();
-       if (GHCB_SEV_GHCB_RESP_CODE(val) != GHCB_SEV_CPUID_RESP)
+       if (GHCB_RESP_CODE(val) != GHCB_MSR_CPUID_RESP)
                goto fail;
        regs->ax = val >> 32;
 
        sev_es_wr_ghcb_msr(GHCB_CPUID_REQ(fn, GHCB_CPUID_REQ_EBX));
        VMGEXIT();
        val = sev_es_rd_ghcb_msr();
-       if (GHCB_SEV_GHCB_RESP_CODE(val) != GHCB_SEV_CPUID_RESP)
+       if (GHCB_RESP_CODE(val) != GHCB_MSR_CPUID_RESP)
                goto fail;
        regs->bx = val >> 32;
 
        sev_es_wr_ghcb_msr(GHCB_CPUID_REQ(fn, GHCB_CPUID_REQ_ECX));
        VMGEXIT();
        val = sev_es_rd_ghcb_msr();
-       if (GHCB_SEV_GHCB_RESP_CODE(val) != GHCB_SEV_CPUID_RESP)
+       if (GHCB_RESP_CODE(val) != GHCB_MSR_CPUID_RESP)
                goto fail;
        regs->cx = val >> 32;
 
        sev_es_wr_ghcb_msr(GHCB_CPUID_REQ(fn, GHCB_CPUID_REQ_EDX));
        VMGEXIT();
        val = sev_es_rd_ghcb_msr();
-       if (GHCB_SEV_GHCB_RESP_CODE(val) != GHCB_SEV_CPUID_RESP)
+       if (GHCB_RESP_CODE(val) != GHCB_MSR_CPUID_RESP)
                goto fail;
        regs->dx = val >> 32;
 
similarity index 92%
rename from arch/x86/kernel/sev-es.c
rename to arch/x86/kernel/sev.c
index 73873b00783800b22e9b3990cebed2f6f96e2962..651b81cd648e5dc183853d90e2d5f1c33d39ed50 100644 (file)
@@ -22,7 +22,7 @@
 
 #include <asm/cpu_entry_area.h>
 #include <asm/stacktrace.h>
-#include <asm/sev-es.h>
+#include <asm/sev.h>
 #include <asm/insn-eval.h>
 #include <asm/fpu/internal.h>
 #include <asm/processor.h>
@@ -203,8 +203,18 @@ static __always_inline struct ghcb *sev_es_get_ghcb(struct ghcb_state *state)
        if (unlikely(data->ghcb_active)) {
                /* GHCB is already in use - save its contents */
 
-               if (unlikely(data->backup_ghcb_active))
-                       return NULL;
+               if (unlikely(data->backup_ghcb_active)) {
+                       /*
+                        * Backup-GHCB is also already in use. There is no way
+                        * to continue here so just kill the machine. To make
+                        * panic() work, mark GHCBs inactive so that messages
+                        * can be printed out.
+                        */
+                       data->ghcb_active        = false;
+                       data->backup_ghcb_active = false;
+
+                       panic("Unable to handle #VC exception! GHCB and Backup GHCB are already in use");
+               }
 
                /* Mark backup_ghcb active before writing to it */
                data->backup_ghcb_active = true;
@@ -221,24 +231,6 @@ static __always_inline struct ghcb *sev_es_get_ghcb(struct ghcb_state *state)
        return ghcb;
 }
 
-static __always_inline void sev_es_put_ghcb(struct ghcb_state *state)
-{
-       struct sev_es_runtime_data *data;
-       struct ghcb *ghcb;
-
-       data = this_cpu_read(runtime_data);
-       ghcb = &data->ghcb_page;
-
-       if (state->ghcb) {
-               /* Restore GHCB from Backup */
-               *ghcb = *state->ghcb;
-               data->backup_ghcb_active = false;
-               state->ghcb = NULL;
-       } else {
-               data->ghcb_active = false;
-       }
-}
-
 /* Needed in vc_early_forward_exception */
 void do_early_exception(struct pt_regs *regs, int trapnr);
 
@@ -323,31 +315,44 @@ static enum es_result vc_write_mem(struct es_em_ctxt *ctxt,
        u16 d2;
        u8  d1;
 
-       /* If instruction ran in kernel mode and the I/O buffer is in kernel space */
-       if (!user_mode(ctxt->regs) && !access_ok(target, size)) {
-               memcpy(dst, buf, size);
-               return ES_OK;
-       }
-
+       /*
+        * This function uses __put_user() independent of whether kernel or user
+        * memory is accessed. This works fine because __put_user() does no
+        * sanity checks of the pointer being accessed. All that it does is
+        * to report when the access failed.
+        *
+        * Also, this function runs in atomic context, so __put_user() is not
+        * allowed to sleep. The page-fault handler detects that it is running
+        * in atomic context and will not try to take mmap_sem and handle the
+        * fault, so additional pagefault_enable()/disable() calls are not
+        * needed.
+        *
+        * The access can't be done via copy_to_user() here because
+        * vc_write_mem() must not use string instructions to access unsafe
+        * memory. The reason is that MOVS is emulated by the #VC handler by
+        * splitting the move up into a read and a write and taking a nested #VC
+        * exception on whatever of them is the MMIO access. Using string
+        * instructions here would cause infinite nesting.
+        */
        switch (size) {
        case 1:
                memcpy(&d1, buf, 1);
-               if (put_user(d1, target))
+               if (__put_user(d1, target))
                        goto fault;
                break;
        case 2:
                memcpy(&d2, buf, 2);
-               if (put_user(d2, target))
+               if (__put_user(d2, target))
                        goto fault;
                break;
        case 4:
                memcpy(&d4, buf, 4);
-               if (put_user(d4, target))
+               if (__put_user(d4, target))
                        goto fault;
                break;
        case 8:
                memcpy(&d8, buf, 8);
-               if (put_user(d8, target))
+               if (__put_user(d8, target))
                        goto fault;
                break;
        default:
@@ -378,30 +383,43 @@ static enum es_result vc_read_mem(struct es_em_ctxt *ctxt,
        u16 d2;
        u8  d1;
 
-       /* If instruction ran in kernel mode and the I/O buffer is in kernel space */
-       if (!user_mode(ctxt->regs) && !access_ok(s, size)) {
-               memcpy(buf, src, size);
-               return ES_OK;
-       }
-
+       /*
+        * This function uses __get_user() independent of whether kernel or user
+        * memory is accessed. This works fine because __get_user() does no
+        * sanity checks of the pointer being accessed. All that it does is
+        * to report when the access failed.
+        *
+        * Also, this function runs in atomic context, so __get_user() is not
+        * allowed to sleep. The page-fault handler detects that it is running
+        * in atomic context and will not try to take mmap_sem and handle the
+        * fault, so additional pagefault_enable()/disable() calls are not
+        * needed.
+        *
+        * The access can't be done via copy_from_user() here because
+        * vc_read_mem() must not use string instructions to access unsafe
+        * memory. The reason is that MOVS is emulated by the #VC handler by
+        * splitting the move up into a read and a write and taking a nested #VC
+        * exception on whatever of them is the MMIO access. Using string
+        * instructions here would cause infinite nesting.
+        */
        switch (size) {
        case 1:
-               if (get_user(d1, s))
+               if (__get_user(d1, s))
                        goto fault;
                memcpy(buf, &d1, 1);
                break;
        case 2:
-               if (get_user(d2, s))
+               if (__get_user(d2, s))
                        goto fault;
                memcpy(buf, &d2, 2);
                break;
        case 4:
-               if (get_user(d4, s))
+               if (__get_user(d4, s))
                        goto fault;
                memcpy(buf, &d4, 4);
                break;
        case 8:
-               if (get_user(d8, s))
+               if (__get_user(d8, s))
                        goto fault;
                memcpy(buf, &d8, 8);
                break;
@@ -459,7 +477,30 @@ static enum es_result vc_slow_virt_to_phys(struct ghcb *ghcb, struct es_em_ctxt
 }
 
 /* Include code shared with pre-decompression boot stage */
-#include "sev-es-shared.c"
+#include "sev-shared.c"
+
+static __always_inline void sev_es_put_ghcb(struct ghcb_state *state)
+{
+       struct sev_es_runtime_data *data;
+       struct ghcb *ghcb;
+
+       data = this_cpu_read(runtime_data);
+       ghcb = &data->ghcb_page;
+
+       if (state->ghcb) {
+               /* Restore GHCB from Backup */
+               *ghcb = *state->ghcb;
+               data->backup_ghcb_active = false;
+               state->ghcb = NULL;
+       } else {
+               /*
+                * Invalidate the GHCB so a VMGEXIT instruction issued
+                * from userspace won't appear to be valid.
+                */
+               vc_ghcb_invalidate(ghcb);
+               data->ghcb_active = false;
+       }
+}
 
 void noinstr __sev_es_nmi_complete(void)
 {
@@ -1255,6 +1296,10 @@ static __always_inline void vc_forward_exception(struct es_em_ctxt *ctxt)
        case X86_TRAP_UD:
                exc_invalid_op(ctxt->regs);
                break;
+       case X86_TRAP_PF:
+               write_cr2(ctxt->fi.cr2);
+               exc_page_fault(ctxt->regs, error_code);
+               break;
        case X86_TRAP_AC:
                exc_alignment_check(ctxt->regs, error_code);
                break;
@@ -1284,7 +1329,6 @@ static __always_inline bool on_vc_fallback_stack(struct pt_regs *regs)
  */
 DEFINE_IDTENTRY_VC_SAFE_STACK(exc_vmm_communication)
 {
-       struct sev_es_runtime_data *data = this_cpu_read(runtime_data);
        irqentry_state_t irq_state;
        struct ghcb_state state;
        struct es_em_ctxt ctxt;
@@ -1310,16 +1354,6 @@ DEFINE_IDTENTRY_VC_SAFE_STACK(exc_vmm_communication)
         */
 
        ghcb = sev_es_get_ghcb(&state);
-       if (!ghcb) {
-               /*
-                * Mark GHCBs inactive so that panic() is able to print the
-                * message.
-                */
-               data->ghcb_active        = false;
-               data->backup_ghcb_active = false;
-
-               panic("Unable to handle #VC exception! GHCB and Backup GHCB are already in use");
-       }
 
        vc_ghcb_invalidate(ghcb);
        result = vc_init_em_ctxt(&ctxt, regs, error_code);
index 0e5d0a7e203b36f5b6171cde4b4207ee67cbb32c..06743ec054d2a99026050456094c7917b5fa7c86 100644 (file)
@@ -127,6 +127,9 @@ static inline void signal_compat_build_tests(void)
        BUILD_BUG_ON(offsetof(siginfo_t, si_addr) != 0x10);
        BUILD_BUG_ON(offsetof(compat_siginfo_t, si_addr) != 0x0C);
 
+       BUILD_BUG_ON(offsetof(siginfo_t, si_trapno) != 0x18);
+       BUILD_BUG_ON(offsetof(compat_siginfo_t, si_trapno) != 0x10);
+
        BUILD_BUG_ON(offsetof(siginfo_t, si_addr_lsb) != 0x18);
        BUILD_BUG_ON(offsetof(compat_siginfo_t, si_addr_lsb) != 0x10);
 
@@ -138,8 +141,10 @@ static inline void signal_compat_build_tests(void)
        BUILD_BUG_ON(offsetof(siginfo_t, si_pkey) != 0x20);
        BUILD_BUG_ON(offsetof(compat_siginfo_t, si_pkey) != 0x14);
 
-       BUILD_BUG_ON(offsetof(siginfo_t, si_perf) != 0x18);
-       BUILD_BUG_ON(offsetof(compat_siginfo_t, si_perf) != 0x10);
+       BUILD_BUG_ON(offsetof(siginfo_t, si_perf_data) != 0x18);
+       BUILD_BUG_ON(offsetof(siginfo_t, si_perf_type) != 0x20);
+       BUILD_BUG_ON(offsetof(compat_siginfo_t, si_perf_data) != 0x10);
+       BUILD_BUG_ON(offsetof(compat_siginfo_t, si_perf_type) != 0x14);
 
        CHECK_CSI_OFFSET(_sigpoll);
        CHECK_CSI_SIZE  (_sigpoll, 2*sizeof(int));
index 0ad5214f598a9cc8ee03136472cddbc9520a5959..7770245cc7fa7e6da36ebcebfce7f1978f6e9523 100644 (file)
@@ -2043,7 +2043,7 @@ static bool amd_set_max_freq_ratio(void)
                return false;
        }
 
-       highest_perf = perf_caps.highest_perf;
+       highest_perf = amd_get_highest_perf();
        nominal_perf = perf_caps.nominal_perf;
 
        if (!highest_perf || !nominal_perf) {
index 19606a3418889ca881a2c112b11e595b7b4d2de6..9a48f138832d42ee79eb153ca1288868d6560f5b 100644 (file)
@@ -458,7 +458,7 @@ void kvm_set_cpu_caps(void)
                F(AVX512_VPOPCNTDQ) | F(UMIP) | F(AVX512_VBMI2) | F(GFNI) |
                F(VAES) | F(VPCLMULQDQ) | F(AVX512_VNNI) | F(AVX512_BITALG) |
                F(CLDEMOTE) | F(MOVDIRI) | F(MOVDIR64B) | 0 /*WAITPKG*/ |
-               F(SGX_LC)
+               F(SGX_LC) | F(BUS_LOCK_DETECT)
        );
        /* Set LA57 based on hardware capability. */
        if (cpuid_ecx(7) & F(LA57))
@@ -567,6 +567,21 @@ void kvm_set_cpu_caps(void)
                F(ACE2) | F(ACE2_EN) | F(PHE) | F(PHE_EN) |
                F(PMM) | F(PMM_EN)
        );
+
+       /*
+        * Hide RDTSCP and RDPID if either feature is reported as supported but
+        * probing MSR_TSC_AUX failed.  This is purely a sanity check and
+        * should never happen, but the guest will likely crash if RDTSCP or
+        * RDPID is misreported, and KVM has botched MSR_TSC_AUX emulation in
+        * the past.  For example, the sanity check may fire if this instance of
+        * KVM is running as L1 on top of an older, broken KVM.
+        */
+       if (WARN_ON((kvm_cpu_cap_has(X86_FEATURE_RDTSCP) ||
+                    kvm_cpu_cap_has(X86_FEATURE_RDPID)) &&
+                    !kvm_is_supported_user_return_msr(MSR_TSC_AUX))) {
+               kvm_cpu_cap_clear(X86_FEATURE_RDTSCP);
+               kvm_cpu_cap_clear(X86_FEATURE_RDPID);
+       }
 }
 EXPORT_SYMBOL_GPL(kvm_set_cpu_caps);
 
@@ -637,7 +652,8 @@ static int __do_cpuid_func_emulated(struct kvm_cpuid_array *array, u32 func)
        case 7:
                entry->flags |= KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
                entry->eax = 0;
-               entry->ecx = F(RDPID);
+               if (kvm_cpu_cap_has(X86_FEATURE_RDTSCP))
+                       entry->ecx = F(RDPID);
                ++array->nent;
        default:
                break;
index 77e1c89a95a7f919c3115781118592c81efb507a..5e5de05a8fbfaf91e424c70b9f65c3efec4c3bba 100644 (file)
@@ -4502,7 +4502,7 @@ static const struct opcode group8[] = {
  * from the register case of group9.
  */
 static const struct gprefix pfx_0f_c7_7 = {
-       N, N, N, II(DstMem | ModRM | Op3264 | EmulateOnUD, em_rdpid, rdtscp),
+       N, N, N, II(DstMem | ModRM | Op3264 | EmulateOnUD, em_rdpid, rdpid),
 };
 
 
@@ -5111,7 +5111,7 @@ done:
        return rc;
 }
 
-int x86_decode_insn(struct x86_emulate_ctxt *ctxt, void *insn, int insn_len)
+int x86_decode_insn(struct x86_emulate_ctxt *ctxt, void *insn, int insn_len, int emulation_type)
 {
        int rc = X86EMUL_CONTINUE;
        int mode = ctxt->mode;
@@ -5322,7 +5322,8 @@ done_prefixes:
 
        ctxt->execute = opcode.u.execute;
 
-       if (unlikely(ctxt->ud) && likely(!(ctxt->d & EmulateOnUD)))
+       if (unlikely(emulation_type & EMULTYPE_TRAP_UD) &&
+           likely(!(ctxt->d & EmulateOnUD)))
                return EMULATION_FAILED;
 
        if (unlikely(ctxt->d &
index f98370a399361b1104040a4e9d749a167454ebb1..f00830e5202fec90f68d2206bd3f8609490a4e3a 100644 (file)
@@ -1172,6 +1172,7 @@ void kvm_hv_invalidate_tsc_page(struct kvm *kvm)
 {
        struct kvm_hv *hv = to_kvm_hv(kvm);
        u64 gfn;
+       int idx;
 
        if (hv->hv_tsc_page_status == HV_TSC_PAGE_BROKEN ||
            hv->hv_tsc_page_status == HV_TSC_PAGE_UNSET ||
@@ -1190,9 +1191,16 @@ void kvm_hv_invalidate_tsc_page(struct kvm *kvm)
        gfn = hv->hv_tsc_page >> HV_X64_MSR_TSC_REFERENCE_ADDRESS_SHIFT;
 
        hv->tsc_ref.tsc_sequence = 0;
+
+       /*
+        * Take the srcu lock as memslots will be accessed to check the gfn
+        * cache generation against the memslots generation.
+        */
+       idx = srcu_read_lock(&kvm->srcu);
        if (kvm_write_guest(kvm, gfn_to_gpa(gfn),
                            &hv->tsc_ref, sizeof(hv->tsc_ref.tsc_sequence)))
                hv->hv_tsc_page_status = HV_TSC_PAGE_BROKEN;
+       srcu_read_unlock(&kvm->srcu, idx);
 
 out_unlock:
        mutex_unlock(&hv->hv_lock);
index 0d359115429ad2b51c97dc13eeb26a4b99a33569..3e870bf9ca4d59941aead4ace432634f3b56067f 100644 (file)
@@ -314,7 +314,6 @@ struct x86_emulate_ctxt {
        int interruptibility;
 
        bool perm_ok; /* do not check permissions if true */
-       bool ud;        /* inject an #UD if host doesn't support insn */
        bool tf;        /* TF value before instruction (after for syscall/sysret) */
 
        bool have_exception;
@@ -468,6 +467,7 @@ enum x86_intercept {
        x86_intercept_clgi,
        x86_intercept_skinit,
        x86_intercept_rdtscp,
+       x86_intercept_rdpid,
        x86_intercept_icebp,
        x86_intercept_wbinvd,
        x86_intercept_monitor,
@@ -490,7 +490,7 @@ enum x86_intercept {
 #define X86EMUL_MODE_HOST X86EMUL_MODE_PROT64
 #endif
 
-int x86_decode_insn(struct x86_emulate_ctxt *ctxt, void *insn, int insn_len);
+int x86_decode_insn(struct x86_emulate_ctxt *ctxt, void *insn, int insn_len, int emulation_type);
 bool x86_page_table_writing_insn(struct x86_emulate_ctxt *ctxt);
 #define EMULATION_FAILED -1
 #define EMULATION_OK 0
index 152591f9243abd9bf3ad726114c3e41d99dd7b66..6d72d8f433107217b43a69f10b2565925d093587 100644 (file)
@@ -1494,6 +1494,15 @@ static void limit_periodic_timer_frequency(struct kvm_lapic *apic)
 
 static void cancel_hv_timer(struct kvm_lapic *apic);
 
+static void cancel_apic_timer(struct kvm_lapic *apic)
+{
+       hrtimer_cancel(&apic->lapic_timer.timer);
+       preempt_disable();
+       if (apic->lapic_timer.hv_timer_in_use)
+               cancel_hv_timer(apic);
+       preempt_enable();
+}
+
 static void apic_update_lvtt(struct kvm_lapic *apic)
 {
        u32 timer_mode = kvm_lapic_get_reg(apic, APIC_LVTT) &
@@ -1502,11 +1511,7 @@ static void apic_update_lvtt(struct kvm_lapic *apic)
        if (apic->lapic_timer.timer_mode != timer_mode) {
                if (apic_lvtt_tscdeadline(apic) != (timer_mode ==
                                APIC_LVT_TIMER_TSCDEADLINE)) {
-                       hrtimer_cancel(&apic->lapic_timer.timer);
-                       preempt_disable();
-                       if (apic->lapic_timer.hv_timer_in_use)
-                               cancel_hv_timer(apic);
-                       preempt_enable();
+                       cancel_apic_timer(apic);
                        kvm_lapic_set_reg(apic, APIC_TMICT, 0);
                        apic->lapic_timer.period = 0;
                        apic->lapic_timer.tscdeadline = 0;
@@ -1598,11 +1603,19 @@ static void __kvm_wait_lapic_expire(struct kvm_vcpu *vcpu)
        guest_tsc = kvm_read_l1_tsc(vcpu, rdtsc());
        apic->lapic_timer.advance_expire_delta = guest_tsc - tsc_deadline;
 
+       if (lapic_timer_advance_dynamic) {
+               adjust_lapic_timer_advance(vcpu, apic->lapic_timer.advance_expire_delta);
+               /*
+                * If the timer fired early, reread the TSC to account for the
+                * overhead of the above adjustment to avoid waiting longer
+                * than is necessary.
+                */
+               if (guest_tsc < tsc_deadline)
+                       guest_tsc = kvm_read_l1_tsc(vcpu, rdtsc());
+       }
+
        if (guest_tsc < tsc_deadline)
                __wait_lapic_expire(vcpu, tsc_deadline - guest_tsc);
-
-       if (lapic_timer_advance_dynamic)
-               adjust_lapic_timer_advance(vcpu, apic->lapic_timer.advance_expire_delta);
 }
 
 void kvm_wait_lapic_expire(struct kvm_vcpu *vcpu)
@@ -1661,7 +1674,7 @@ static void apic_timer_expired(struct kvm_lapic *apic, bool from_timer_fn)
        }
 
        atomic_inc(&apic->lapic_timer.pending);
-       kvm_make_request(KVM_REQ_PENDING_TIMER, vcpu);
+       kvm_make_request(KVM_REQ_UNBLOCK, vcpu);
        if (from_timer_fn)
                kvm_vcpu_kick(vcpu);
 }
@@ -1913,8 +1926,8 @@ void kvm_lapic_expired_hv_timer(struct kvm_vcpu *vcpu)
        if (!apic->lapic_timer.hv_timer_in_use)
                goto out;
        WARN_ON(rcuwait_active(&vcpu->wait));
-       cancel_hv_timer(apic);
        apic_timer_expired(apic, false);
+       cancel_hv_timer(apic);
 
        if (apic_lvtt_period(apic) && apic->lapic_timer.period) {
                advance_periodic_target_expiration(apic);
@@ -2084,7 +2097,7 @@ int kvm_lapic_reg_write(struct kvm_lapic *apic, u32 reg, u32 val)
                if (apic_lvtt_tscdeadline(apic))
                        break;
 
-               hrtimer_cancel(&apic->lapic_timer.timer);
+               cancel_apic_timer(apic);
                kvm_lapic_set_reg(apic, APIC_TMICT, val);
                start_apic_timer(apic);
                break;
index 4b3ee244ebe0573de8c004d2eb0250494821fc69..0144c40d09c76c0dc765a3a1f09ddb6df8fbd6ad 100644 (file)
@@ -3310,12 +3310,12 @@ static int mmu_alloc_shadow_roots(struct kvm_vcpu *vcpu)
        if (mmu->shadow_root_level == PT64_ROOT_4LEVEL) {
                pm_mask |= PT_ACCESSED_MASK | PT_WRITABLE_MASK | PT_USER_MASK;
 
-               if (WARN_ON_ONCE(!mmu->lm_root)) {
+               if (WARN_ON_ONCE(!mmu->pml4_root)) {
                        r = -EIO;
                        goto out_unlock;
                }
 
-               mmu->lm_root[0] = __pa(mmu->pae_root) | pm_mask;
+               mmu->pml4_root[0] = __pa(mmu->pae_root) | pm_mask;
        }
 
        for (i = 0; i < 4; ++i) {
@@ -3335,7 +3335,7 @@ static int mmu_alloc_shadow_roots(struct kvm_vcpu *vcpu)
        }
 
        if (mmu->shadow_root_level == PT64_ROOT_4LEVEL)
-               mmu->root_hpa = __pa(mmu->lm_root);
+               mmu->root_hpa = __pa(mmu->pml4_root);
        else
                mmu->root_hpa = __pa(mmu->pae_root);
 
@@ -3350,7 +3350,7 @@ out_unlock:
 static int mmu_alloc_special_roots(struct kvm_vcpu *vcpu)
 {
        struct kvm_mmu *mmu = vcpu->arch.mmu;
-       u64 *lm_root, *pae_root;
+       u64 *pml4_root, *pae_root;
 
        /*
         * When shadowing 32-bit or PAE NPT with 64-bit NPT, the PML4 and PDP
@@ -3369,14 +3369,14 @@ static int mmu_alloc_special_roots(struct kvm_vcpu *vcpu)
        if (WARN_ON_ONCE(mmu->shadow_root_level != PT64_ROOT_4LEVEL))
                return -EIO;
 
-       if (mmu->pae_root && mmu->lm_root)
+       if (mmu->pae_root && mmu->pml4_root)
                return 0;
 
        /*
         * The special roots should always be allocated in concert.  Yell and
         * bail if KVM ends up in a state where only one of the roots is valid.
         */
-       if (WARN_ON_ONCE(!tdp_enabled || mmu->pae_root || mmu->lm_root))
+       if (WARN_ON_ONCE(!tdp_enabled || mmu->pae_root || mmu->pml4_root))
                return -EIO;
 
        /*
@@ -3387,14 +3387,14 @@ static int mmu_alloc_special_roots(struct kvm_vcpu *vcpu)
        if (!pae_root)
                return -ENOMEM;
 
-       lm_root = (void *)get_zeroed_page(GFP_KERNEL_ACCOUNT);
-       if (!lm_root) {
+       pml4_root = (void *)get_zeroed_page(GFP_KERNEL_ACCOUNT);
+       if (!pml4_root) {
                free_page((unsigned long)pae_root);
                return -ENOMEM;
        }
 
        mmu->pae_root = pae_root;
-       mmu->lm_root = lm_root;
+       mmu->pml4_root = pml4_root;
 
        return 0;
 }
@@ -5261,7 +5261,7 @@ static void free_mmu_pages(struct kvm_mmu *mmu)
        if (!tdp_enabled && mmu->pae_root)
                set_memory_encrypted((unsigned long)mmu->pae_root, 1);
        free_page((unsigned long)mmu->pae_root);
-       free_page((unsigned long)mmu->lm_root);
+       free_page((unsigned long)mmu->pml4_root);
 }
 
 static int __kvm_mmu_create(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu)
index 70b7e44e303526de974b8e9acbb2029c999d1aaf..823a5919f9fa01fb85562bff4ed23d88c0c8285f 100644 (file)
@@ -90,8 +90,8 @@ struct guest_walker {
        gpa_t pte_gpa[PT_MAX_FULL_LEVELS];
        pt_element_t __user *ptep_user[PT_MAX_FULL_LEVELS];
        bool pte_writable[PT_MAX_FULL_LEVELS];
-       unsigned pt_access;
-       unsigned pte_access;
+       unsigned int pt_access[PT_MAX_FULL_LEVELS];
+       unsigned int pte_access;
        gfn_t gfn;
        struct x86_exception fault;
 };
@@ -418,13 +418,15 @@ retry_walk:
                }
 
                walker->ptes[walker->level - 1] = pte;
+
+               /* Convert to ACC_*_MASK flags for struct guest_walker.  */
+               walker->pt_access[walker->level - 1] = FNAME(gpte_access)(pt_access ^ walk_nx_mask);
        } while (!is_last_gpte(mmu, walker->level, pte));
 
        pte_pkey = FNAME(gpte_pkeys)(vcpu, pte);
        accessed_dirty = have_ad ? pte_access & PT_GUEST_ACCESSED_MASK : 0;
 
        /* Convert to ACC_*_MASK flags for struct guest_walker.  */
-       walker->pt_access = FNAME(gpte_access)(pt_access ^ walk_nx_mask);
        walker->pte_access = FNAME(gpte_access)(pte_access ^ walk_nx_mask);
        errcode = permission_fault(vcpu, mmu, walker->pte_access, pte_pkey, access);
        if (unlikely(errcode))
@@ -463,7 +465,8 @@ retry_walk:
        }
 
        pgprintk("%s: pte %llx pte_access %x pt_access %x\n",
-                __func__, (u64)pte, walker->pte_access, walker->pt_access);
+                __func__, (u64)pte, walker->pte_access,
+                walker->pt_access[walker->level - 1]);
        return 1;
 
 error:
@@ -643,7 +646,7 @@ static int FNAME(fetch)(struct kvm_vcpu *vcpu, gpa_t addr,
        bool huge_page_disallowed = exec && nx_huge_page_workaround_enabled;
        struct kvm_mmu_page *sp = NULL;
        struct kvm_shadow_walk_iterator it;
-       unsigned direct_access, access = gw->pt_access;
+       unsigned int direct_access, access;
        int top_level, level, req_level, ret;
        gfn_t base_gfn = gw->gfn;
 
@@ -675,6 +678,7 @@ static int FNAME(fetch)(struct kvm_vcpu *vcpu, gpa_t addr,
                sp = NULL;
                if (!is_shadow_present_pte(*it.sptep)) {
                        table_gfn = gw->table_gfn[it.level - 2];
+                       access = gw->pt_access[it.level - 2];
                        sp = kvm_mmu_get_page(vcpu, table_gfn, addr, it.level-1,
                                              false, access);
                }
index 88f69a6cc49226b725f07223325fab79b2e2f9a8..237317b1eddda3c4f15136f54f6e61e9e0c9a49c 100644 (file)
@@ -388,7 +388,7 @@ static void handle_removed_tdp_mmu_page(struct kvm *kvm, tdp_ptep_t pt,
 }
 
 /**
- * handle_changed_spte - handle bookkeeping associated with an SPTE change
+ * __handle_changed_spte - handle bookkeeping associated with an SPTE change
  * @kvm: kvm instance
  * @as_id: the address space of the paging structure the SPTE was a part of
  * @gfn: the base GFN that was mapped by the SPTE
@@ -444,6 +444,13 @@ static void __handle_changed_spte(struct kvm *kvm, int as_id, gfn_t gfn,
 
        trace_kvm_tdp_mmu_spte_changed(as_id, gfn, level, old_spte, new_spte);
 
+       if (is_large_pte(old_spte) != is_large_pte(new_spte)) {
+               if (is_large_pte(old_spte))
+                       atomic64_sub(1, (atomic64_t*)&kvm->stat.lpages);
+               else
+                       atomic64_add(1, (atomic64_t*)&kvm->stat.lpages);
+       }
+
        /*
         * The only times a SPTE should be changed from a non-present to
         * non-present state is when an MMIO entry is installed/modified/
@@ -1009,6 +1016,14 @@ int kvm_tdp_mmu_map(struct kvm_vcpu *vcpu, gpa_t gpa, u32 error_code,
                }
 
                if (!is_shadow_present_pte(iter.old_spte)) {
+                       /*
+                        * If SPTE has been forzen by another thread, just
+                        * give up and retry, avoiding unnecessary page table
+                        * allocation and free.
+                        */
+                       if (is_removed_spte(iter.old_spte))
+                               break;
+
                        sp = alloc_tdp_mmu_page(vcpu, iter.gfn, iter.level);
                        child_pt = sp->spt;
 
@@ -1177,9 +1192,9 @@ bool kvm_tdp_mmu_set_spte_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
 }
 
 /*
- * Remove write access from all the SPTEs mapping GFNs [start, end). If
- * skip_4k is set, SPTEs that map 4k pages, will not be write-protected.
- * Returns true if an SPTE has been changed and the TLBs need to be flushed.
+ * Remove write access from all SPTEs at or above min_level that map GFNs
+ * [start, end). Returns true if an SPTE has been changed and the TLBs need to
+ * be flushed.
  */
 static bool wrprot_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root,
                             gfn_t start, gfn_t end, int min_level)
index 712b4e0de4818d2040583f98107398cccf364a9c..0e62e6a2438cfd041841e950fb9ee3ff1fb5f64c 100644 (file)
 #include "svm.h"
 
 /* enable / disable AVIC */
-int avic;
-#ifdef CONFIG_X86_LOCAL_APIC
-module_param(avic, int, S_IRUGO);
-#endif
+bool avic;
+module_param(avic, bool, S_IRUGO);
 
 #define SVM_AVIC_DOORBELL      0xc001011b
 
index 540d43ba2cf4644fcade75f18fcbcc1f166d9ae2..5e8d8443154e850dc3814c1e3aeb97d9d124776a 100644 (file)
@@ -764,7 +764,6 @@ int nested_svm_vmexit(struct vcpu_svm *svm)
        nested_svm_copy_common_state(svm->nested.vmcb02.ptr, svm->vmcb01.ptr);
 
        svm_switch_vmcb(svm, &svm->vmcb01);
-       WARN_ON_ONCE(svm->vmcb->control.exit_code != SVM_EXIT_VMRUN);
 
        /*
         * On vmexit the  GIF is set to false and
@@ -872,6 +871,15 @@ void svm_free_nested(struct vcpu_svm *svm)
        __free_page(virt_to_page(svm->nested.vmcb02.ptr));
        svm->nested.vmcb02.ptr = NULL;
 
+       /*
+        * When last_vmcb12_gpa matches the current vmcb12 gpa,
+        * some vmcb12 fields are not loaded if they are marked clean
+        * in the vmcb12, since in this case they are up to date already.
+        *
+        * When the vmcb02 is freed, this optimization becomes invalid.
+        */
+       svm->nested.last_vmcb12_gpa = INVALID_GPA;
+
        svm->nested.initialized = false;
 }
 
@@ -884,9 +892,11 @@ void svm_leave_nested(struct vcpu_svm *svm)
 
        if (is_guest_mode(vcpu)) {
                svm->nested.nested_run_pending = 0;
+               svm->nested.vmcb12_gpa = INVALID_GPA;
+
                leave_guest_mode(vcpu);
 
-               svm_switch_vmcb(svm, &svm->nested.vmcb02);
+               svm_switch_vmcb(svm, &svm->vmcb01);
 
                nested_svm_uninit_mmu_context(vcpu);
                vmcb_mark_all_dirty(svm->vmcb);
@@ -1298,12 +1308,17 @@ static int svm_set_nested_state(struct kvm_vcpu *vcpu,
         * L2 registers if needed are moved from the current VMCB to VMCB02.
         */
 
+       if (is_guest_mode(vcpu))
+               svm_leave_nested(svm);
+       else
+               svm->nested.vmcb02.ptr->save = svm->vmcb01.ptr->save;
+
+       svm_set_gif(svm, !!(kvm_state->flags & KVM_STATE_NESTED_GIF_SET));
+
        svm->nested.nested_run_pending =
                !!(kvm_state->flags & KVM_STATE_NESTED_RUN_PENDING);
 
        svm->nested.vmcb12_gpa = kvm_state->hdr.svm.vmcb_pa;
-       if (svm->current_vmcb == &svm->vmcb01)
-               svm->nested.vmcb02.ptr->save = svm->vmcb01.ptr->save;
 
        svm->vmcb01.ptr->save.es = save->es;
        svm->vmcb01.ptr->save.cs = save->cs;
index 1356ee095cd5532e64cade9bb449bc8fc7302431..e0ce5da97fc2f3e8131fc1be223850ff3bfae061 100644 (file)
@@ -763,7 +763,7 @@ static int __sev_dbg_decrypt(struct kvm *kvm, unsigned long src_paddr,
 }
 
 static int __sev_dbg_decrypt_user(struct kvm *kvm, unsigned long paddr,
-                                 unsigned long __user dst_uaddr,
+                                 void __user *dst_uaddr,
                                  unsigned long dst_paddr,
                                  int size, int *err)
 {
@@ -787,8 +787,7 @@ static int __sev_dbg_decrypt_user(struct kvm *kvm, unsigned long paddr,
 
        if (tpage) {
                offset = paddr & 15;
-               if (copy_to_user((void __user *)(uintptr_t)dst_uaddr,
-                                page_address(tpage) + offset, size))
+               if (copy_to_user(dst_uaddr, page_address(tpage) + offset, size))
                        ret = -EFAULT;
        }
 
@@ -800,9 +799,9 @@ e_free:
 }
 
 static int __sev_dbg_encrypt_user(struct kvm *kvm, unsigned long paddr,
-                                 unsigned long __user vaddr,
+                                 void __user *vaddr,
                                  unsigned long dst_paddr,
-                                 unsigned long __user dst_vaddr,
+                                 void __user *dst_vaddr,
                                  int size, int *error)
 {
        struct page *src_tpage = NULL;
@@ -810,13 +809,12 @@ static int __sev_dbg_encrypt_user(struct kvm *kvm, unsigned long paddr,
        int ret, len = size;
 
        /* If source buffer is not aligned then use an intermediate buffer */
-       if (!IS_ALIGNED(vaddr, 16)) {
+       if (!IS_ALIGNED((unsigned long)vaddr, 16)) {
                src_tpage = alloc_page(GFP_KERNEL);
                if (!src_tpage)
                        return -ENOMEM;
 
-               if (copy_from_user(page_address(src_tpage),
-                               (void __user *)(uintptr_t)vaddr, size)) {
+               if (copy_from_user(page_address(src_tpage), vaddr, size)) {
                        __free_page(src_tpage);
                        return -EFAULT;
                }
@@ -830,7 +828,7 @@ static int __sev_dbg_encrypt_user(struct kvm *kvm, unsigned long paddr,
         *   - copy the source buffer in an intermediate buffer
         *   - use the intermediate buffer as source buffer
         */
-       if (!IS_ALIGNED(dst_vaddr, 16) || !IS_ALIGNED(size, 16)) {
+       if (!IS_ALIGNED((unsigned long)dst_vaddr, 16) || !IS_ALIGNED(size, 16)) {
                int dst_offset;
 
                dst_tpage = alloc_page(GFP_KERNEL);
@@ -855,7 +853,7 @@ static int __sev_dbg_encrypt_user(struct kvm *kvm, unsigned long paddr,
                               page_address(src_tpage), size);
                else {
                        if (copy_from_user(page_address(dst_tpage) + dst_offset,
-                                          (void __user *)(uintptr_t)vaddr, size)) {
+                                          vaddr, size)) {
                                ret = -EFAULT;
                                goto e_free;
                        }
@@ -935,15 +933,15 @@ static int sev_dbg_crypt(struct kvm *kvm, struct kvm_sev_cmd *argp, bool dec)
                if (dec)
                        ret = __sev_dbg_decrypt_user(kvm,
                                                     __sme_page_pa(src_p[0]) + s_off,
-                                                    dst_vaddr,
+                                                    (void __user *)dst_vaddr,
                                                     __sme_page_pa(dst_p[0]) + d_off,
                                                     len, &argp->error);
                else
                        ret = __sev_dbg_encrypt_user(kvm,
                                                     __sme_page_pa(src_p[0]) + s_off,
-                                                    vaddr,
+                                                    (void __user *)vaddr,
                                                     __sme_page_pa(dst_p[0]) + d_off,
-                                                    dst_vaddr,
+                                                    (void __user *)dst_vaddr,
                                                     len, &argp->error);
 
                sev_unpin_memory(kvm, src_p, n);
@@ -1105,10 +1103,9 @@ __sev_send_start_query_session_length(struct kvm *kvm, struct kvm_sev_cmd *argp,
        struct sev_data_send_start data;
        int ret;
 
+       memset(&data, 0, sizeof(data));
        data.handle = sev->handle;
        ret = sev_issue_cmd(kvm, SEV_CMD_SEND_START, &data, &argp->error);
-       if (ret < 0)
-               return ret;
 
        params->session_len = data.session_len;
        if (copy_to_user((void __user *)(uintptr_t)argp->data, params,
@@ -1217,10 +1214,9 @@ __sev_send_update_data_query_lengths(struct kvm *kvm, struct kvm_sev_cmd *argp,
        struct sev_data_send_update_data data;
        int ret;
 
+       memset(&data, 0, sizeof(data));
        data.handle = sev->handle;
        ret = sev_issue_cmd(kvm, SEV_CMD_SEND_UPDATE_DATA, &data, &argp->error);
-       if (ret < 0)
-               return ret;
 
        params->hdr_len = data.hdr_len;
        params->trans_len = data.trans_len;
@@ -1764,7 +1760,8 @@ e_mirror_unlock:
 e_source_unlock:
        mutex_unlock(&source_kvm->lock);
 e_source_put:
-       fput(source_kvm_file);
+       if (source_kvm_file)
+               fput(source_kvm_file);
        return ret;
 }
 
@@ -2198,7 +2195,7 @@ vmgexit_err:
        return -EINVAL;
 }
 
-static void pre_sev_es_run(struct vcpu_svm *svm)
+void sev_es_unmap_ghcb(struct vcpu_svm *svm)
 {
        if (!svm->ghcb)
                return;
@@ -2234,9 +2231,6 @@ void pre_sev_run(struct vcpu_svm *svm, int cpu)
        struct svm_cpu_data *sd = per_cpu(svm_data, cpu);
        int asid = sev_get_asid(svm->vcpu.kvm);
 
-       /* Perform any SEV-ES pre-run actions */
-       pre_sev_es_run(svm);
-
        /* Assign the asid allocated with this SEV guest */
        svm->asid = asid;
 
index b649f92287a2e53d6e018ba33066ec2b281bf930..e088086f3de6ab073ac8f46094fc4258753b9e5d 100644 (file)
@@ -212,7 +212,7 @@ DEFINE_PER_CPU(struct svm_cpu_data *, svm_data);
  * RDTSCP and RDPID are not used in the kernel, specifically to allow KVM to
  * defer the restoration of TSC_AUX until the CPU returns to userspace.
  */
-#define TSC_AUX_URET_SLOT      0
+static int tsc_aux_uret_slot __read_mostly = -1;
 
 static const u32 msrpm_ranges[] = {0, 0xc0000000, 0xc0010000};
 
@@ -447,6 +447,11 @@ static int has_svm(void)
                return 0;
        }
 
+       if (pgtable_l5_enabled()) {
+               pr_info("KVM doesn't yet support 5-level paging on AMD SVM\n");
+               return 0;
+       }
+
        return 1;
 }
 
@@ -858,8 +863,8 @@ static __init void svm_adjust_mmio_mask(void)
                return;
 
        /* If memory encryption is not enabled, use existing mask */
-       rdmsrl(MSR_K8_SYSCFG, msr);
-       if (!(msr & MSR_K8_SYSCFG_MEM_ENCRYPT))
+       rdmsrl(MSR_AMD64_SYSCFG, msr);
+       if (!(msr & MSR_AMD64_SYSCFG_MEM_ENCRYPT))
                return;
 
        enc_bit = cpuid_ebx(0x8000001f) & 0x3f;
@@ -959,8 +964,7 @@ static __init int svm_hardware_setup(void)
                kvm_tsc_scaling_ratio_frac_bits = 32;
        }
 
-       if (boot_cpu_has(X86_FEATURE_RDTSCP))
-               kvm_define_user_return_msr(TSC_AUX_URET_SLOT, MSR_TSC_AUX);
+       tsc_aux_uret_slot = kvm_add_user_return_msr(MSR_TSC_AUX);
 
        /* Check for pause filtering support */
        if (!boot_cpu_has(X86_FEATURE_PAUSEFILTER)) {
@@ -1006,9 +1010,7 @@ static __init int svm_hardware_setup(void)
        }
 
        if (avic) {
-               if (!npt_enabled ||
-                   !boot_cpu_has(X86_FEATURE_AVIC) ||
-                   !IS_ENABLED(CONFIG_X86_LOCAL_APIC)) {
+               if (!npt_enabled || !boot_cpu_has(X86_FEATURE_AVIC)) {
                        avic = false;
                } else {
                        pr_info("AVIC enabled\n");
@@ -1100,7 +1102,9 @@ static u64 svm_write_l1_tsc_offset(struct kvm_vcpu *vcpu, u64 offset)
        return svm->vmcb->control.tsc_offset;
 }
 
-static void svm_check_invpcid(struct vcpu_svm *svm)
+/* Evaluate instruction intercepts that depend on guest CPUID features. */
+static void svm_recalc_instruction_intercepts(struct kvm_vcpu *vcpu,
+                                             struct vcpu_svm *svm)
 {
        /*
         * Intercept INVPCID if shadow paging is enabled to sync/free shadow
@@ -1113,6 +1117,13 @@ static void svm_check_invpcid(struct vcpu_svm *svm)
                else
                        svm_clr_intercept(svm, INTERCEPT_INVPCID);
        }
+
+       if (kvm_cpu_cap_has(X86_FEATURE_RDTSCP)) {
+               if (guest_cpuid_has(vcpu, X86_FEATURE_RDTSCP))
+                       svm_clr_intercept(svm, INTERCEPT_RDTSCP);
+               else
+                       svm_set_intercept(svm, INTERCEPT_RDTSCP);
+       }
 }
 
 static void init_vmcb(struct kvm_vcpu *vcpu)
@@ -1235,8 +1246,8 @@ static void init_vmcb(struct kvm_vcpu *vcpu)
        svm->current_vmcb->asid_generation = 0;
        svm->asid = 0;
 
-       svm->nested.vmcb12_gpa = 0;
-       svm->nested.last_vmcb12_gpa = 0;
+       svm->nested.vmcb12_gpa = INVALID_GPA;
+       svm->nested.last_vmcb12_gpa = INVALID_GPA;
        vcpu->arch.hflags = 0;
 
        if (!kvm_pause_in_guest(vcpu->kvm)) {
@@ -1248,7 +1259,7 @@ static void init_vmcb(struct kvm_vcpu *vcpu)
                svm_clr_intercept(svm, INTERCEPT_PAUSE);
        }
 
-       svm_check_invpcid(svm);
+       svm_recalc_instruction_intercepts(vcpu, svm);
 
        /*
         * If the host supports V_SPEC_CTRL then disable the interception
@@ -1424,6 +1435,9 @@ static void svm_prepare_guest_switch(struct kvm_vcpu *vcpu)
        struct vcpu_svm *svm = to_svm(vcpu);
        struct svm_cpu_data *sd = per_cpu(svm_data, vcpu->cpu);
 
+       if (sev_es_guest(vcpu->kvm))
+               sev_es_unmap_ghcb(svm);
+
        if (svm->guest_state_loaded)
                return;
 
@@ -1445,8 +1459,8 @@ static void svm_prepare_guest_switch(struct kvm_vcpu *vcpu)
                }
        }
 
-       if (static_cpu_has(X86_FEATURE_RDTSCP))
-               kvm_set_user_return_msr(TSC_AUX_URET_SLOT, svm->tsc_aux, -1ull);
+       if (likely(tsc_aux_uret_slot >= 0))
+               kvm_set_user_return_msr(tsc_aux_uret_slot, svm->tsc_aux, -1ull);
 
        svm->guest_state_loaded = true;
 }
@@ -2655,11 +2669,6 @@ static int svm_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
                        msr_info->data |= (u64)svm->sysenter_esp_hi << 32;
                break;
        case MSR_TSC_AUX:
-               if (!boot_cpu_has(X86_FEATURE_RDTSCP))
-                       return 1;
-               if (!msr_info->host_initiated &&
-                   !guest_cpuid_has(vcpu, X86_FEATURE_RDTSCP))
-                       return 1;
                msr_info->data = svm->tsc_aux;
                break;
        /*
@@ -2876,30 +2885,13 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr)
                svm->sysenter_esp_hi = guest_cpuid_is_intel(vcpu) ? (data >> 32) : 0;
                break;
        case MSR_TSC_AUX:
-               if (!boot_cpu_has(X86_FEATURE_RDTSCP))
-                       return 1;
-
-               if (!msr->host_initiated &&
-                   !guest_cpuid_has(vcpu, X86_FEATURE_RDTSCP))
-                       return 1;
-
-               /*
-                * Per Intel's SDM, bits 63:32 are reserved, but AMD's APM has
-                * incomplete and conflicting architectural behavior.  Current
-                * AMD CPUs completely ignore bits 63:32, i.e. they aren't
-                * reserved and always read as zeros.  Emulate AMD CPU behavior
-                * to avoid explosions if the vCPU is migrated from an AMD host
-                * to an Intel host.
-                */
-               data = (u32)data;
-
                /*
                 * TSC_AUX is usually changed only during boot and never read
                 * directly.  Intercept TSC_AUX instead of exposing it to the
                 * guest via direct_access_msrs, and switch it via user return.
                 */
                preempt_disable();
-               r = kvm_set_user_return_msr(TSC_AUX_URET_SLOT, data, -1ull);
+               r = kvm_set_user_return_msr(tsc_aux_uret_slot, data, -1ull);
                preempt_enable();
                if (r)
                        return 1;
@@ -3084,6 +3076,7 @@ static int (*const svm_exit_handlers[])(struct kvm_vcpu *vcpu) = {
        [SVM_EXIT_STGI]                         = stgi_interception,
        [SVM_EXIT_CLGI]                         = clgi_interception,
        [SVM_EXIT_SKINIT]                       = skinit_interception,
+       [SVM_EXIT_RDTSCP]                       = kvm_handle_invalid_op,
        [SVM_EXIT_WBINVD]                       = kvm_emulate_wbinvd,
        [SVM_EXIT_MONITOR]                      = kvm_emulate_monitor,
        [SVM_EXIT_MWAIT]                        = kvm_emulate_mwait,
@@ -3972,8 +3965,7 @@ static void svm_vcpu_after_set_cpuid(struct kvm_vcpu *vcpu)
        svm->nrips_enabled = kvm_cpu_cap_has(X86_FEATURE_NRIPS) &&
                             guest_cpuid_has(vcpu, X86_FEATURE_NRIPS);
 
-       /* Check again if INVPCID interception if required */
-       svm_check_invpcid(svm);
+       svm_recalc_instruction_intercepts(vcpu, svm);
 
        /* For sev guests, the memory encryption bit is not reserved in CR3.  */
        if (sev_guest(vcpu->kvm)) {
index 84b3133c2251d1c2abcb4f99dd8d0dd1e6e0351e..2908c6ab5bb4f39091051780359fe4d3d093c395 100644 (file)
@@ -20,6 +20,7 @@
 #include <linux/bits.h>
 
 #include <asm/svm.h>
+#include <asm/sev-common.h>
 
 #define __sme_page_pa(x) __sme_set(page_to_pfn(x) << PAGE_SHIFT)
 
@@ -479,7 +480,7 @@ extern struct kvm_x86_nested_ops svm_nested_ops;
 
 #define VMCB_AVIC_APIC_BAR_MASK                0xFFFFFFFFFF000ULL
 
-extern int avic;
+extern bool avic;
 
 static inline void avic_update_vapic_bar(struct vcpu_svm *svm, u64 data)
 {
@@ -525,40 +526,9 @@ void svm_vcpu_unblocking(struct kvm_vcpu *vcpu);
 
 /* sev.c */
 
-#define GHCB_VERSION_MAX               1ULL
-#define GHCB_VERSION_MIN               1ULL
-
-#define GHCB_MSR_INFO_POS              0
-#define GHCB_MSR_INFO_MASK             (BIT_ULL(12) - 1)
-
-#define GHCB_MSR_SEV_INFO_RESP         0x001
-#define GHCB_MSR_SEV_INFO_REQ          0x002
-#define GHCB_MSR_VER_MAX_POS           48
-#define GHCB_MSR_VER_MAX_MASK          0xffff
-#define GHCB_MSR_VER_MIN_POS           32
-#define GHCB_MSR_VER_MIN_MASK          0xffff
-#define GHCB_MSR_CBIT_POS              24
-#define GHCB_MSR_CBIT_MASK             0xff
-#define GHCB_MSR_SEV_INFO(_max, _min, _cbit)                           \
-       ((((_max) & GHCB_MSR_VER_MAX_MASK) << GHCB_MSR_VER_MAX_POS) |   \
-        (((_min) & GHCB_MSR_VER_MIN_MASK) << GHCB_MSR_VER_MIN_POS) |   \
-        (((_cbit) & GHCB_MSR_CBIT_MASK) << GHCB_MSR_CBIT_POS) |        \
-        GHCB_MSR_SEV_INFO_RESP)
-
-#define GHCB_MSR_CPUID_REQ             0x004
-#define GHCB_MSR_CPUID_RESP            0x005
-#define GHCB_MSR_CPUID_FUNC_POS                32
-#define GHCB_MSR_CPUID_FUNC_MASK       0xffffffff
-#define GHCB_MSR_CPUID_VALUE_POS       32
-#define GHCB_MSR_CPUID_VALUE_MASK      0xffffffff
-#define GHCB_MSR_CPUID_REG_POS         30
-#define GHCB_MSR_CPUID_REG_MASK                0x3
-
-#define GHCB_MSR_TERM_REQ              0x100
-#define GHCB_MSR_TERM_REASON_SET_POS   12
-#define GHCB_MSR_TERM_REASON_SET_MASK  0xf
-#define GHCB_MSR_TERM_REASON_POS       16
-#define GHCB_MSR_TERM_REASON_MASK      0xff
+#define GHCB_VERSION_MAX       1ULL
+#define GHCB_VERSION_MIN       1ULL
+
 
 extern unsigned int max_sev_asid;
 
@@ -581,6 +551,7 @@ void sev_es_init_vmcb(struct vcpu_svm *svm);
 void sev_es_create_vcpu(struct vcpu_svm *svm);
 void sev_vcpu_deliver_sipi_vector(struct kvm_vcpu *vcpu, u8 vector);
 void sev_es_prepare_guest_switch(struct vcpu_svm *svm, unsigned int cpu);
+void sev_es_unmap_ghcb(struct vcpu_svm *svm);
 
 /* vmenter.S */
 
index a61c015870e33570a403c36f54a640b61f153495..4f839148948bc81107cc3eb5830bfd3b5d2ac8be 100644 (file)
@@ -1550,16 +1550,16 @@ TRACE_EVENT(kvm_nested_vmenter_failed,
        TP_ARGS(msg, err),
 
        TP_STRUCT__entry(
-               __field(const char *, msg)
+               __string(msg, msg)
                __field(u32, err)
        ),
 
        TP_fast_assign(
-               __entry->msg = msg;
+               __assign_str(msg, msg);
                __entry->err = err;
        ),
 
-       TP_printk("%s%s", __entry->msg, !__entry->err ? "" :
+       TP_printk("%s%s", __get_str(msg), !__entry->err ? "" :
                __print_symbolic(__entry->err, VMX_VMENTER_INSTRUCTION_ERRORS))
 );
 
index d1d77985e889fbf98cc4f40f3ef90a4ae2e95329..aa0e7872fcc9f27f9efdf00ee87b011adea5282f 100644 (file)
@@ -90,8 +90,7 @@ static inline bool cpu_has_vmx_preemption_timer(void)
 
 static inline bool cpu_has_vmx_posted_intr(void)
 {
-       return IS_ENABLED(CONFIG_X86_LOCAL_APIC) &&
-               vmcs_config.pin_based_exec_ctrl & PIN_BASED_POSTED_INTR;
+       return vmcs_config.pin_based_exec_ctrl & PIN_BASED_POSTED_INTR;
 }
 
 static inline bool cpu_has_load_ia32_efer(void)
@@ -398,6 +397,9 @@ static inline u64 vmx_supported_debugctl(void)
 {
        u64 debugctl = 0;
 
+       if (boot_cpu_has(X86_FEATURE_BUS_LOCK_DETECT))
+               debugctl |= DEBUGCTLMSR_BUS_LOCK_DETECT;
+
        if (vmx_get_perf_capabilities() & PMU_CAP_LBR_FMT)
                debugctl |= DEBUGCTLMSR_LBR_MASK;
 
index bced766378232944a527eb57b11ea59940d16a49..6058a65a6ede67883541752e2cff485cc6b43203 100644 (file)
@@ -3098,15 +3098,8 @@ static bool nested_get_evmcs_page(struct kvm_vcpu *vcpu)
                        nested_vmx_handle_enlightened_vmptrld(vcpu, false);
 
                if (evmptrld_status == EVMPTRLD_VMFAIL ||
-                   evmptrld_status == EVMPTRLD_ERROR) {
-                       pr_debug_ratelimited("%s: enlightened vmptrld failed\n",
-                                            __func__);
-                       vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
-                       vcpu->run->internal.suberror =
-                               KVM_INTERNAL_ERROR_EMULATION;
-                       vcpu->run->internal.ndata = 0;
+                   evmptrld_status == EVMPTRLD_ERROR)
                        return false;
-               }
        }
 
        return true;
@@ -3194,8 +3187,16 @@ static bool nested_get_vmcs12_pages(struct kvm_vcpu *vcpu)
 
 static bool vmx_get_nested_state_pages(struct kvm_vcpu *vcpu)
 {
-       if (!nested_get_evmcs_page(vcpu))
+       if (!nested_get_evmcs_page(vcpu)) {
+               pr_debug_ratelimited("%s: enlightened vmptrld failed\n",
+                                    __func__);
+               vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+               vcpu->run->internal.suberror =
+                       KVM_INTERNAL_ERROR_EMULATION;
+               vcpu->run->internal.ndata = 0;
+
                return false;
+       }
 
        if (is_guest_mode(vcpu) && !nested_get_vmcs12_pages(vcpu))
                return false;
@@ -4435,7 +4436,15 @@ void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 vm_exit_reason,
        /* Similarly, triple faults in L2 should never escape. */
        WARN_ON_ONCE(kvm_check_request(KVM_REQ_TRIPLE_FAULT, vcpu));
 
-       kvm_clear_request(KVM_REQ_GET_NESTED_STATE_PAGES, vcpu);
+       if (kvm_check_request(KVM_REQ_GET_NESTED_STATE_PAGES, vcpu)) {
+               /*
+                * KVM_REQ_GET_NESTED_STATE_PAGES is also used to map
+                * Enlightened VMCS after migration and we still need to
+                * do that when something is forcing L2->L1 exit prior to
+                * the first L2 run.
+                */
+               (void)nested_get_evmcs_page(vcpu);
+       }
 
        /* Service the TLB flush request for L2 before switching to L1. */
        if (kvm_check_request(KVM_REQ_TLB_FLUSH_CURRENT, vcpu))
index 459748680daf2b93e40a67c56e7306a85a65121b..5f81ef092bd436b1a25ded21fbab536d6743ca24 100644 (file)
@@ -237,6 +237,20 @@ bool pi_has_pending_interrupt(struct kvm_vcpu *vcpu)
 }
 
 
+/*
+ * Bail out of the block loop if the VM has an assigned
+ * device, but the blocking vCPU didn't reconfigure the
+ * PI.NV to the wakeup vector, i.e. the assigned device
+ * came along after the initial check in pi_pre_block().
+ */
+void vmx_pi_start_assignment(struct kvm *kvm)
+{
+       if (!irq_remapping_cap(IRQ_POSTING_CAP))
+               return;
+
+       kvm_make_all_cpus_request(kvm, KVM_REQ_UNBLOCK);
+}
+
 /*
  * pi_update_irte - set IRTE for Posted-Interrupts
  *
index 0bdc41391c5b8fae1d0f581944a6b21ee9156d26..7f7b2326caf53f3afc3197832310d07d54934135 100644 (file)
@@ -95,5 +95,6 @@ void __init pi_init_cpu(int cpu);
 bool pi_has_pending_interrupt(struct kvm_vcpu *vcpu);
 int pi_update_irte(struct kvm *kvm, unsigned int host_irq, uint32_t guest_irq,
                   bool set);
+void vmx_pi_start_assignment(struct kvm *kvm);
 
 #endif /* __KVM_X86_VMX_POSTED_INTR_H */
index d000cddbd7349dc81aa0bf4731d971dda0e95b92..50b42d7a8a1178731b5282ac6c3a67e302420342 100644 (file)
@@ -455,21 +455,6 @@ static inline void vmx_segment_cache_clear(struct vcpu_vmx *vmx)
 
 static unsigned long host_idt_base;
 
-/*
- * Though SYSCALL is only supported in 64-bit mode on Intel CPUs, kvm
- * will emulate SYSCALL in legacy mode if the vendor string in guest
- * CPUID.0:{EBX,ECX,EDX} is "AuthenticAMD" or "AMDisbetter!" To
- * support this emulation, IA32_STAR must always be included in
- * vmx_uret_msrs_list[], even in i386 builds.
- */
-static const u32 vmx_uret_msrs_list[] = {
-#ifdef CONFIG_X86_64
-       MSR_SYSCALL_MASK, MSR_LSTAR, MSR_CSTAR,
-#endif
-       MSR_EFER, MSR_TSC_AUX, MSR_STAR,
-       MSR_IA32_TSX_CTRL,
-};
-
 #if IS_ENABLED(CONFIG_HYPERV)
 static bool __read_mostly enlightened_vmcs = true;
 module_param(enlightened_vmcs, bool, 0444);
@@ -697,21 +682,11 @@ static bool is_valid_passthrough_msr(u32 msr)
        return r;
 }
 
-static inline int __vmx_find_uret_msr(struct vcpu_vmx *vmx, u32 msr)
-{
-       int i;
-
-       for (i = 0; i < vmx->nr_uret_msrs; ++i)
-               if (vmx_uret_msrs_list[vmx->guest_uret_msrs[i].slot] == msr)
-                       return i;
-       return -1;
-}
-
 struct vmx_uret_msr *vmx_find_uret_msr(struct vcpu_vmx *vmx, u32 msr)
 {
        int i;
 
-       i = __vmx_find_uret_msr(vmx, msr);
+       i = kvm_find_user_return_msr(msr);
        if (i >= 0)
                return &vmx->guest_uret_msrs[i];
        return NULL;
@@ -720,13 +695,14 @@ struct vmx_uret_msr *vmx_find_uret_msr(struct vcpu_vmx *vmx, u32 msr)
 static int vmx_set_guest_uret_msr(struct vcpu_vmx *vmx,
                                  struct vmx_uret_msr *msr, u64 data)
 {
+       unsigned int slot = msr - vmx->guest_uret_msrs;
        int ret = 0;
 
        u64 old_msr_data = msr->data;
        msr->data = data;
-       if (msr - vmx->guest_uret_msrs < vmx->nr_active_uret_msrs) {
+       if (msr->load_into_hardware) {
                preempt_disable();
-               ret = kvm_set_user_return_msr(msr->slot, msr->data, msr->mask);
+               ret = kvm_set_user_return_msr(slot, msr->data, msr->mask);
                preempt_enable();
                if (ret)
                        msr->data = old_msr_data;
@@ -1078,7 +1054,7 @@ static bool update_transition_efer(struct vcpu_vmx *vmx)
                return false;
        }
 
-       i = __vmx_find_uret_msr(vmx, MSR_EFER);
+       i = kvm_find_user_return_msr(MSR_EFER);
        if (i < 0)
                return false;
 
@@ -1240,11 +1216,14 @@ void vmx_prepare_switch_to_guest(struct kvm_vcpu *vcpu)
         */
        if (!vmx->guest_uret_msrs_loaded) {
                vmx->guest_uret_msrs_loaded = true;
-               for (i = 0; i < vmx->nr_active_uret_msrs; ++i)
-                       kvm_set_user_return_msr(vmx->guest_uret_msrs[i].slot,
+               for (i = 0; i < kvm_nr_uret_msrs; ++i) {
+                       if (!vmx->guest_uret_msrs[i].load_into_hardware)
+                               continue;
+
+                       kvm_set_user_return_msr(i,
                                                vmx->guest_uret_msrs[i].data,
                                                vmx->guest_uret_msrs[i].mask);
-
+               }
        }
 
        if (vmx->nested.need_vmcs12_to_shadow_sync)
@@ -1751,19 +1730,16 @@ static void vmx_queue_exception(struct kvm_vcpu *vcpu)
        vmx_clear_hlt(vcpu);
 }
 
-static void vmx_setup_uret_msr(struct vcpu_vmx *vmx, unsigned int msr)
+static void vmx_setup_uret_msr(struct vcpu_vmx *vmx, unsigned int msr,
+                              bool load_into_hardware)
 {
-       struct vmx_uret_msr tmp;
-       int from, to;
+       struct vmx_uret_msr *uret_msr;
 
-       from = __vmx_find_uret_msr(vmx, msr);
-       if (from < 0)
+       uret_msr = vmx_find_uret_msr(vmx, msr);
+       if (!uret_msr)
                return;
-       to = vmx->nr_active_uret_msrs++;
 
-       tmp = vmx->guest_uret_msrs[to];
-       vmx->guest_uret_msrs[to] = vmx->guest_uret_msrs[from];
-       vmx->guest_uret_msrs[from] = tmp;
+       uret_msr->load_into_hardware = load_into_hardware;
 }
 
 /*
@@ -1773,29 +1749,42 @@ static void vmx_setup_uret_msr(struct vcpu_vmx *vmx, unsigned int msr)
  */
 static void setup_msrs(struct vcpu_vmx *vmx)
 {
-       vmx->guest_uret_msrs_loaded = false;
-       vmx->nr_active_uret_msrs = 0;
 #ifdef CONFIG_X86_64
+       bool load_syscall_msrs;
+
        /*
         * The SYSCALL MSRs are only needed on long mode guests, and only
         * when EFER.SCE is set.
         */
-       if (is_long_mode(&vmx->vcpu) && (vmx->vcpu.arch.efer & EFER_SCE)) {
-               vmx_setup_uret_msr(vmx, MSR_STAR);
-               vmx_setup_uret_msr(vmx, MSR_LSTAR);
-               vmx_setup_uret_msr(vmx, MSR_SYSCALL_MASK);
-       }
+       load_syscall_msrs = is_long_mode(&vmx->vcpu) &&
+                           (vmx->vcpu.arch.efer & EFER_SCE);
+
+       vmx_setup_uret_msr(vmx, MSR_STAR, load_syscall_msrs);
+       vmx_setup_uret_msr(vmx, MSR_LSTAR, load_syscall_msrs);
+       vmx_setup_uret_msr(vmx, MSR_SYSCALL_MASK, load_syscall_msrs);
 #endif
-       if (update_transition_efer(vmx))
-               vmx_setup_uret_msr(vmx, MSR_EFER);
+       vmx_setup_uret_msr(vmx, MSR_EFER, update_transition_efer(vmx));
 
-       if (guest_cpuid_has(&vmx->vcpu, X86_FEATURE_RDTSCP))
-               vmx_setup_uret_msr(vmx, MSR_TSC_AUX);
+       vmx_setup_uret_msr(vmx, MSR_TSC_AUX,
+                          guest_cpuid_has(&vmx->vcpu, X86_FEATURE_RDTSCP) ||
+                          guest_cpuid_has(&vmx->vcpu, X86_FEATURE_RDPID));
 
-       vmx_setup_uret_msr(vmx, MSR_IA32_TSX_CTRL);
+       /*
+        * hle=0, rtm=0, tsx_ctrl=1 can be found with some combinations of new
+        * kernel and old userspace.  If those guests run on a tsx=off host, do
+        * allow guests to use TSX_CTRL, but don't change the value in hardware
+        * so that TSX remains always disabled.
+        */
+       vmx_setup_uret_msr(vmx, MSR_IA32_TSX_CTRL, boot_cpu_has(X86_FEATURE_RTM));
 
        if (cpu_has_vmx_msr_bitmap())
                vmx_update_msr_bitmap(&vmx->vcpu);
+
+       /*
+        * The set of MSRs to load may have changed, reload MSRs before the
+        * next VM-Enter.
+        */
+       vmx->guest_uret_msrs_loaded = false;
 }
 
 static u64 vmx_write_l1_tsc_offset(struct kvm_vcpu *vcpu, u64 offset)
@@ -1993,11 +1982,6 @@ static int vmx_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
                else
                        msr_info->data = vmx->pt_desc.guest.addr_a[index / 2];
                break;
-       case MSR_TSC_AUX:
-               if (!msr_info->host_initiated &&
-                   !guest_cpuid_has(vcpu, X86_FEATURE_RDTSCP))
-                       return 1;
-               goto find_uret_msr;
        case MSR_IA32_DEBUGCTLMSR:
                msr_info->data = vmcs_read64(GUEST_IA32_DEBUGCTL);
                break;
@@ -2031,6 +2015,9 @@ static u64 vcpu_supported_debugctl(struct kvm_vcpu *vcpu)
        if (!intel_pmu_lbr_is_enabled(vcpu))
                debugctl &= ~DEBUGCTLMSR_LBR_MASK;
 
+       if (!guest_cpuid_has(vcpu, X86_FEATURE_BUS_LOCK_DETECT))
+               debugctl &= ~DEBUGCTLMSR_BUS_LOCK_DETECT;
+
        return debugctl;
 }
 
@@ -2313,14 +2300,6 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
                else
                        vmx->pt_desc.guest.addr_a[index / 2] = data;
                break;
-       case MSR_TSC_AUX:
-               if (!msr_info->host_initiated &&
-                   !guest_cpuid_has(vcpu, X86_FEATURE_RDTSCP))
-                       return 1;
-               /* Check reserved bit, higher 32 bits should be zero */
-               if ((data >> 32) != 0)
-                       return 1;
-               goto find_uret_msr;
        case MSR_IA32_PERF_CAPABILITIES:
                if (data && !vcpu_to_pmu(vcpu)->version)
                        return 1;
@@ -4369,7 +4348,23 @@ static void vmx_compute_secondary_exec_control(struct vcpu_vmx *vmx)
                                                  xsaves_enabled, false);
        }
 
-       vmx_adjust_sec_exec_feature(vmx, &exec_control, rdtscp, RDTSCP);
+       /*
+        * RDPID is also gated by ENABLE_RDTSCP, turn on the control if either
+        * feature is exposed to the guest.  This creates a virtualization hole
+        * if both are supported in hardware but only one is exposed to the
+        * guest, but letting the guest execute RDTSCP or RDPID when either one
+        * is advertised is preferable to emulating the advertised instruction
+        * in KVM on #UD, and obviously better than incorrectly injecting #UD.
+        */
+       if (cpu_has_vmx_rdtscp()) {
+               bool rdpid_or_rdtscp_enabled =
+                       guest_cpuid_has(vcpu, X86_FEATURE_RDTSCP) ||
+                       guest_cpuid_has(vcpu, X86_FEATURE_RDPID);
+
+               vmx_adjust_secondary_exec_control(vmx, &exec_control,
+                                                 SECONDARY_EXEC_ENABLE_RDTSCP,
+                                                 rdpid_or_rdtscp_enabled, false);
+       }
        vmx_adjust_sec_exec_feature(vmx, &exec_control, invpcid, INVPCID);
 
        vmx_adjust_sec_exec_exiting(vmx, &exec_control, rdrand, RDRAND);
@@ -4848,7 +4843,7 @@ static int handle_exception_nmi(struct kvm_vcpu *vcpu)
        struct vcpu_vmx *vmx = to_vmx(vcpu);
        struct kvm_run *kvm_run = vcpu->run;
        u32 intr_info, ex_no, error_code;
-       unsigned long cr2, rip, dr6;
+       unsigned long cr2, dr6;
        u32 vect_info;
 
        vect_info = vmx->idt_vectoring_info;
@@ -4938,8 +4933,7 @@ static int handle_exception_nmi(struct kvm_vcpu *vcpu)
                vmx->vcpu.arch.event_exit_inst_len =
                        vmcs_read32(VM_EXIT_INSTRUCTION_LEN);
                kvm_run->exit_reason = KVM_EXIT_DEBUG;
-               rip = kvm_rip_read(vcpu);
-               kvm_run->debug.arch.pc = vmcs_readl(GUEST_CS_BASE) + rip;
+               kvm_run->debug.arch.pc = kvm_get_linear_rip(vcpu);
                kvm_run->debug.arch.exception = ex_no;
                break;
        case AC_VECTOR:
@@ -6855,6 +6849,7 @@ static void vmx_free_vcpu(struct kvm_vcpu *vcpu)
 
 static int vmx_create_vcpu(struct kvm_vcpu *vcpu)
 {
+       struct vmx_uret_msr *tsx_ctrl;
        struct vcpu_vmx *vmx;
        int i, cpu, err;
 
@@ -6877,43 +6872,19 @@ static int vmx_create_vcpu(struct kvm_vcpu *vcpu)
                        goto free_vpid;
        }
 
-       BUILD_BUG_ON(ARRAY_SIZE(vmx_uret_msrs_list) != MAX_NR_USER_RETURN_MSRS);
-
-       for (i = 0; i < ARRAY_SIZE(vmx_uret_msrs_list); ++i) {
-               u32 index = vmx_uret_msrs_list[i];
-               u32 data_low, data_high;
-               int j = vmx->nr_uret_msrs;
-
-               if (rdmsr_safe(index, &data_low, &data_high) < 0)
-                       continue;
-               if (wrmsr_safe(index, data_low, data_high) < 0)
-                       continue;
-
-               vmx->guest_uret_msrs[j].slot = i;
-               vmx->guest_uret_msrs[j].data = 0;
-               switch (index) {
-               case MSR_IA32_TSX_CTRL:
-                       /*
-                        * TSX_CTRL_CPUID_CLEAR is handled in the CPUID
-                        * interception.  Keep the host value unchanged to avoid
-                        * changing CPUID bits under the host kernel's feet.
-                        *
-                        * hle=0, rtm=0, tsx_ctrl=1 can be found with some
-                        * combinations of new kernel and old userspace.  If
-                        * those guests run on a tsx=off host, do allow guests
-                        * to use TSX_CTRL, but do not change the value on the
-                        * host so that TSX remains always disabled.
-                        */
-                       if (boot_cpu_has(X86_FEATURE_RTM))
-                               vmx->guest_uret_msrs[j].mask = ~(u64)TSX_CTRL_CPUID_CLEAR;
-                       else
-                               vmx->guest_uret_msrs[j].mask = 0;
-                       break;
-               default:
-                       vmx->guest_uret_msrs[j].mask = -1ull;
-                       break;
-               }
-               ++vmx->nr_uret_msrs;
+       for (i = 0; i < kvm_nr_uret_msrs; ++i) {
+               vmx->guest_uret_msrs[i].data = 0;
+               vmx->guest_uret_msrs[i].mask = -1ull;
+       }
+       if (boot_cpu_has(X86_FEATURE_RTM)) {
+               /*
+                * TSX_CTRL_CPUID_CLEAR is handled in the CPUID interception.
+                * Keep the host value unchanged to avoid changing CPUID bits
+                * under the host kernel's feet.
+                */
+               tsx_ctrl = vmx_find_uret_msr(vmx, MSR_IA32_TSX_CTRL);
+               if (tsx_ctrl)
+                       vmx->guest_uret_msrs[i].mask = ~(u64)TSX_CTRL_CPUID_CLEAR;
        }
 
        err = alloc_loaded_vmcs(&vmx->vmcs01);
@@ -7344,9 +7315,11 @@ static __init void vmx_set_cpu_caps(void)
        if (!cpu_has_vmx_xsaves())
                kvm_cpu_cap_clear(X86_FEATURE_XSAVES);
 
-       /* CPUID 0x80000001 */
-       if (!cpu_has_vmx_rdtscp())
+       /* CPUID 0x80000001 and 0x7 (RDPID) */
+       if (!cpu_has_vmx_rdtscp()) {
                kvm_cpu_cap_clear(X86_FEATURE_RDTSCP);
+               kvm_cpu_cap_clear(X86_FEATURE_RDPID);
+       }
 
        if (cpu_has_vmx_waitpkg())
                kvm_cpu_cap_check_and_set(X86_FEATURE_WAITPKG);
@@ -7402,8 +7375,9 @@ static int vmx_check_intercept(struct kvm_vcpu *vcpu,
        /*
         * RDPID causes #UD if disabled through secondary execution controls.
         * Because it is marked as EmulateOnUD, we need to intercept it here.
+        * Note, RDPID is hidden behind ENABLE_RDTSCP.
         */
-       case x86_intercept_rdtscp:
+       case x86_intercept_rdpid:
                if (!nested_cpu_has2(vmcs12, SECONDARY_EXEC_ENABLE_RDTSCP)) {
                        exception->vector = UD_VECTOR;
                        exception->error_code_valid = false;
@@ -7746,6 +7720,7 @@ static struct kvm_x86_ops vmx_x86_ops __initdata = {
        .nested_ops = &vmx_nested_ops,
 
        .update_pi_irte = pi_update_irte,
+       .start_assignment = vmx_pi_start_assignment,
 
 #ifdef CONFIG_X86_64
        .set_hv_timer = vmx_set_hv_timer,
@@ -7769,17 +7744,42 @@ static struct kvm_x86_ops vmx_x86_ops __initdata = {
        .vcpu_deliver_sipi_vector = kvm_vcpu_deliver_sipi_vector,
 };
 
+static __init void vmx_setup_user_return_msrs(void)
+{
+
+       /*
+        * Though SYSCALL is only supported in 64-bit mode on Intel CPUs, kvm
+        * will emulate SYSCALL in legacy mode if the vendor string in guest
+        * CPUID.0:{EBX,ECX,EDX} is "AuthenticAMD" or "AMDisbetter!" To
+        * support this emulation, MSR_STAR is included in the list for i386,
+        * but is never loaded into hardware.  MSR_CSTAR is also never loaded
+        * into hardware and is here purely for emulation purposes.
+        */
+       const u32 vmx_uret_msrs_list[] = {
+       #ifdef CONFIG_X86_64
+               MSR_SYSCALL_MASK, MSR_LSTAR, MSR_CSTAR,
+       #endif
+               MSR_EFER, MSR_TSC_AUX, MSR_STAR,
+               MSR_IA32_TSX_CTRL,
+       };
+       int i;
+
+       BUILD_BUG_ON(ARRAY_SIZE(vmx_uret_msrs_list) != MAX_NR_USER_RETURN_MSRS);
+
+       for (i = 0; i < ARRAY_SIZE(vmx_uret_msrs_list); ++i)
+               kvm_add_user_return_msr(vmx_uret_msrs_list[i]);
+}
+
 static __init int hardware_setup(void)
 {
        unsigned long host_bndcfgs;
        struct desc_ptr dt;
-       int r, i, ept_lpage_level;
+       int r, ept_lpage_level;
 
        store_idt(&dt);
        host_idt_base = dt.address;
 
-       for (i = 0; i < ARRAY_SIZE(vmx_uret_msrs_list); ++i)
-               kvm_define_user_return_msr(i, vmx_uret_msrs_list[i]);
+       vmx_setup_user_return_msrs();
 
        if (setup_vmcs_config(&vmcs_config, &vmx_capability) < 0)
                return -EIO;
index 008cb87ff088cb2979b30036137252ce28fb774c..16e4e457ba23c98ef20798e880022f092921ed97 100644 (file)
@@ -36,7 +36,7 @@ struct vmx_msrs {
 };
 
 struct vmx_uret_msr {
-       unsigned int slot; /* The MSR's slot in kvm_user_return_msrs. */
+       bool load_into_hardware;
        u64 data;
        u64 mask;
 };
@@ -245,8 +245,16 @@ struct vcpu_vmx {
        u32                   idt_vectoring_info;
        ulong                 rflags;
 
+       /*
+        * User return MSRs are always emulated when enabled in the guest, but
+        * only loaded into hardware when necessary, e.g. SYSCALL #UDs outside
+        * of 64-bit mode or if EFER.SCE=1, thus the SYSCALL MSRs don't need to
+        * be loaded into hardware if those conditions aren't met.
+        * nr_active_uret_msrs tracks the number of MSRs that need to be loaded
+        * into hardware when running the guest.  guest_uret_msrs[] is resorted
+        * whenever the number of "active" uret MSRs is modified.
+        */
        struct vmx_uret_msr   guest_uret_msrs[MAX_NR_USER_RETURN_MSRS];
-       int                   nr_uret_msrs;
        int                   nr_active_uret_msrs;
        bool                  guest_uret_msrs_loaded;
 #ifdef CONFIG_X86_64
index 6eda2834fc05ef07bef81d873aa80692169dcce9..6d3955a6a7639436177cff50d526b933340173f6 100644 (file)
@@ -184,11 +184,6 @@ module_param(pi_inject_timer, bint, S_IRUGO | S_IWUSR);
  */
 #define KVM_MAX_NR_USER_RETURN_MSRS 16
 
-struct kvm_user_return_msrs_global {
-       int nr;
-       u32 msrs[KVM_MAX_NR_USER_RETURN_MSRS];
-};
-
 struct kvm_user_return_msrs {
        struct user_return_notifier urn;
        bool registered;
@@ -198,7 +193,9 @@ struct kvm_user_return_msrs {
        } values[KVM_MAX_NR_USER_RETURN_MSRS];
 };
 
-static struct kvm_user_return_msrs_global __read_mostly user_return_msrs_global;
+u32 __read_mostly kvm_nr_uret_msrs;
+EXPORT_SYMBOL_GPL(kvm_nr_uret_msrs);
+static u32 __read_mostly kvm_uret_msrs_list[KVM_MAX_NR_USER_RETURN_MSRS];
 static struct kvm_user_return_msrs __percpu *user_return_msrs;
 
 #define KVM_SUPPORTED_XCR0     (XFEATURE_MASK_FP | XFEATURE_MASK_SSE \
@@ -330,23 +327,53 @@ static void kvm_on_user_return(struct user_return_notifier *urn)
                user_return_notifier_unregister(urn);
        }
        local_irq_restore(flags);
-       for (slot = 0; slot < user_return_msrs_global.nr; ++slot) {
+       for (slot = 0; slot < kvm_nr_uret_msrs; ++slot) {
                values = &msrs->values[slot];
                if (values->host != values->curr) {
-                       wrmsrl(user_return_msrs_global.msrs[slot], values->host);
+                       wrmsrl(kvm_uret_msrs_list[slot], values->host);
                        values->curr = values->host;
                }
        }
 }
 
-void kvm_define_user_return_msr(unsigned slot, u32 msr)
+static int kvm_probe_user_return_msr(u32 msr)
 {
-       BUG_ON(slot >= KVM_MAX_NR_USER_RETURN_MSRS);
-       user_return_msrs_global.msrs[slot] = msr;
-       if (slot >= user_return_msrs_global.nr)
-               user_return_msrs_global.nr = slot + 1;
+       u64 val;
+       int ret;
+
+       preempt_disable();
+       ret = rdmsrl_safe(msr, &val);
+       if (ret)
+               goto out;
+       ret = wrmsrl_safe(msr, val);
+out:
+       preempt_enable();
+       return ret;
 }
-EXPORT_SYMBOL_GPL(kvm_define_user_return_msr);
+
+int kvm_add_user_return_msr(u32 msr)
+{
+       BUG_ON(kvm_nr_uret_msrs >= KVM_MAX_NR_USER_RETURN_MSRS);
+
+       if (kvm_probe_user_return_msr(msr))
+               return -1;
+
+       kvm_uret_msrs_list[kvm_nr_uret_msrs] = msr;
+       return kvm_nr_uret_msrs++;
+}
+EXPORT_SYMBOL_GPL(kvm_add_user_return_msr);
+
+int kvm_find_user_return_msr(u32 msr)
+{
+       int i;
+
+       for (i = 0; i < kvm_nr_uret_msrs; ++i) {
+               if (kvm_uret_msrs_list[i] == msr)
+                       return i;
+       }
+       return -1;
+}
+EXPORT_SYMBOL_GPL(kvm_find_user_return_msr);
 
 static void kvm_user_return_msr_cpu_online(void)
 {
@@ -355,8 +382,8 @@ static void kvm_user_return_msr_cpu_online(void)
        u64 value;
        int i;
 
-       for (i = 0; i < user_return_msrs_global.nr; ++i) {
-               rdmsrl_safe(user_return_msrs_global.msrs[i], &value);
+       for (i = 0; i < kvm_nr_uret_msrs; ++i) {
+               rdmsrl_safe(kvm_uret_msrs_list[i], &value);
                msrs->values[i].host = value;
                msrs->values[i].curr = value;
        }
@@ -371,7 +398,7 @@ int kvm_set_user_return_msr(unsigned slot, u64 value, u64 mask)
        value = (value & mask) | (msrs->values[slot].host & ~mask);
        if (value == msrs->values[slot].curr)
                return 0;
-       err = wrmsrl_safe(user_return_msrs_global.msrs[slot], value);
+       err = wrmsrl_safe(kvm_uret_msrs_list[slot], value);
        if (err)
                return 1;
 
@@ -1149,6 +1176,9 @@ static u64 kvm_dr6_fixed(struct kvm_vcpu *vcpu)
 
        if (!guest_cpuid_has(vcpu, X86_FEATURE_RTM))
                fixed |= DR6_RTM;
+
+       if (!guest_cpuid_has(vcpu, X86_FEATURE_BUS_LOCK_DETECT))
+               fixed |= DR6_BUS_LOCK;
        return fixed;
 }
 
@@ -1615,6 +1645,30 @@ static int __kvm_set_msr(struct kvm_vcpu *vcpu, u32 index, u64 data,
                 * invokes 64-bit SYSENTER.
                 */
                data = get_canonical(data, vcpu_virt_addr_bits(vcpu));
+               break;
+       case MSR_TSC_AUX:
+               if (!kvm_is_supported_user_return_msr(MSR_TSC_AUX))
+                       return 1;
+
+               if (!host_initiated &&
+                   !guest_cpuid_has(vcpu, X86_FEATURE_RDTSCP) &&
+                   !guest_cpuid_has(vcpu, X86_FEATURE_RDPID))
+                       return 1;
+
+               /*
+                * Per Intel's SDM, bits 63:32 are reserved, but AMD's APM has
+                * incomplete and conflicting architectural behavior.  Current
+                * AMD CPUs completely ignore bits 63:32, i.e. they aren't
+                * reserved and always read as zeros.  Enforce Intel's reserved
+                * bits check if and only if the guest CPU is Intel, and clear
+                * the bits in all other cases.  This ensures cross-vendor
+                * migration will provide consistent behavior for the guest.
+                */
+               if (guest_cpuid_is_intel(vcpu) && (data >> 32) != 0)
+                       return 1;
+
+               data = (u32)data;
+               break;
        }
 
        msr.data = data;
@@ -1651,6 +1705,18 @@ int __kvm_get_msr(struct kvm_vcpu *vcpu, u32 index, u64 *data,
        if (!host_initiated && !kvm_msr_allowed(vcpu, index, KVM_MSR_FILTER_READ))
                return KVM_MSR_RET_FILTERED;
 
+       switch (index) {
+       case MSR_TSC_AUX:
+               if (!kvm_is_supported_user_return_msr(MSR_TSC_AUX))
+                       return 1;
+
+               if (!host_initiated &&
+                   !guest_cpuid_has(vcpu, X86_FEATURE_RDTSCP) &&
+                   !guest_cpuid_has(vcpu, X86_FEATURE_RDPID))
+                       return 1;
+               break;
+       }
+
        msr.index = index;
        msr.host_initiated = host_initiated;
 
@@ -3006,6 +3072,19 @@ static void kvm_vcpu_flush_tlb_all(struct kvm_vcpu *vcpu)
 static void kvm_vcpu_flush_tlb_guest(struct kvm_vcpu *vcpu)
 {
        ++vcpu->stat.tlb_flush;
+
+       if (!tdp_enabled) {
+               /*
+                * A TLB flush on behalf of the guest is equivalent to
+                * INVPCID(all), toggling CR4.PGE, etc., which requires
+                * a forced sync of the shadow page tables.  Unload the
+                * entire MMU here and the subsequent load will sync the
+                * shadow page tables, and also flush the TLB.
+                */
+               kvm_mmu_unload(vcpu);
+               return;
+       }
+
        static_call(kvm_x86_tlb_flush_guest)(vcpu);
 }
 
@@ -3035,10 +3114,14 @@ static void record_steal_time(struct kvm_vcpu *vcpu)
         * expensive IPIs.
         */
        if (guest_pv_has(vcpu, KVM_FEATURE_PV_TLB_FLUSH)) {
+               u8 st_preempted = xchg(&st->preempted, 0);
+
                trace_kvm_pv_tlb_flush(vcpu->vcpu_id,
-                                      st->preempted & KVM_VCPU_FLUSH_TLB);
-               if (xchg(&st->preempted, 0) & KVM_VCPU_FLUSH_TLB)
+                                      st_preempted & KVM_VCPU_FLUSH_TLB);
+               if (st_preempted & KVM_VCPU_FLUSH_TLB)
                        kvm_vcpu_flush_tlb_guest(vcpu);
+       } else {
+               st->preempted = 0;
        }
 
        vcpu->arch.st.preempted = 0;
@@ -3402,7 +3485,7 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
        case MSR_IA32_LASTBRANCHTOIP:
        case MSR_IA32_LASTINTFROMIP:
        case MSR_IA32_LASTINTTOIP:
-       case MSR_K8_SYSCFG:
+       case MSR_AMD64_SYSCFG:
        case MSR_K8_TSEG_ADDR:
        case MSR_K8_TSEG_MASK:
        case MSR_VM_HSAVE_PA:
@@ -5468,14 +5551,18 @@ static void kvm_free_msr_filter(struct kvm_x86_msr_filter *msr_filter)
 static int kvm_add_msr_filter(struct kvm_x86_msr_filter *msr_filter,
                              struct kvm_msr_filter_range *user_range)
 {
-       struct msr_bitmap_range range;
        unsigned long *bitmap = NULL;
        size_t bitmap_size;
-       int r;
 
        if (!user_range->nmsrs)
                return 0;
 
+       if (user_range->flags & ~(KVM_MSR_FILTER_READ | KVM_MSR_FILTER_WRITE))
+               return -EINVAL;
+
+       if (!user_range->flags)
+               return -EINVAL;
+
        bitmap_size = BITS_TO_LONGS(user_range->nmsrs) * sizeof(long);
        if (!bitmap_size || bitmap_size > KVM_MSR_FILTER_MAX_BITMAP_SIZE)
                return -EINVAL;
@@ -5484,31 +5571,15 @@ static int kvm_add_msr_filter(struct kvm_x86_msr_filter *msr_filter,
        if (IS_ERR(bitmap))
                return PTR_ERR(bitmap);
 
-       range = (struct msr_bitmap_range) {
+       msr_filter->ranges[msr_filter->count] = (struct msr_bitmap_range) {
                .flags = user_range->flags,
                .base = user_range->base,
                .nmsrs = user_range->nmsrs,
                .bitmap = bitmap,
        };
 
-       if (range.flags & ~(KVM_MSR_FILTER_READ | KVM_MSR_FILTER_WRITE)) {
-               r = -EINVAL;
-               goto err;
-       }
-
-       if (!range.flags) {
-               r = -EINVAL;
-               goto err;
-       }
-
-       /* Everything ok, add this range identifier. */
-       msr_filter->ranges[msr_filter->count] = range;
        msr_filter->count++;
-
        return 0;
-err:
-       kfree(bitmap);
-       return r;
 }
 
 static int kvm_vm_ioctl_set_msr_filter(struct kvm *kvm, void __user *argp)
@@ -5937,7 +6008,8 @@ static void kvm_init_msr_list(void)
                                continue;
                        break;
                case MSR_TSC_AUX:
-                       if (!kvm_cpu_cap_has(X86_FEATURE_RDTSCP))
+                       if (!kvm_cpu_cap_has(X86_FEATURE_RDTSCP) &&
+                           !kvm_cpu_cap_has(X86_FEATURE_RDPID))
                                continue;
                        break;
                case MSR_IA32_UMWAIT_CONTROL:
@@ -7171,6 +7243,11 @@ static void init_emulate_ctxt(struct kvm_vcpu *vcpu)
        BUILD_BUG_ON(HF_SMM_MASK != X86EMUL_SMM_MASK);
        BUILD_BUG_ON(HF_SMM_INSIDE_NMI_MASK != X86EMUL_SMM_INSIDE_NMI_MASK);
 
+       ctxt->interruptibility = 0;
+       ctxt->have_exception = false;
+       ctxt->exception.vector = -1;
+       ctxt->perm_ok = false;
+
        init_decode_cache(ctxt);
        vcpu->arch.emulate_regs_need_sync_from_vcpu = false;
 }
@@ -7506,14 +7583,7 @@ int x86_decode_emulated_instruction(struct kvm_vcpu *vcpu, int emulation_type,
            kvm_vcpu_check_breakpoint(vcpu, &r))
                return r;
 
-       ctxt->interruptibility = 0;
-       ctxt->have_exception = false;
-       ctxt->exception.vector = -1;
-       ctxt->perm_ok = false;
-
-       ctxt->ud = emulation_type & EMULTYPE_TRAP_UD;
-
-       r = x86_decode_insn(ctxt, insn, insn_len);
+       r = x86_decode_insn(ctxt, insn, insn_len, emulation_type);
 
        trace_kvm_emulate_insn_start(vcpu);
        ++vcpu->stat.insn_emulation;
@@ -8039,6 +8109,18 @@ static void pvclock_gtod_update_fn(struct work_struct *work)
 
 static DECLARE_WORK(pvclock_gtod_work, pvclock_gtod_update_fn);
 
+/*
+ * Indirection to move queue_work() out of the tk_core.seq write held
+ * region to prevent possible deadlocks against time accessors which
+ * are invoked with work related locks held.
+ */
+static void pvclock_irq_work_fn(struct irq_work *w)
+{
+       queue_work(system_long_wq, &pvclock_gtod_work);
+}
+
+static DEFINE_IRQ_WORK(pvclock_irq_work, pvclock_irq_work_fn);
+
 /*
  * Notification about pvclock gtod data update.
  */
@@ -8050,13 +8132,14 @@ static int pvclock_gtod_notify(struct notifier_block *nb, unsigned long unused,
 
        update_pvclock_gtod(tk);
 
-       /* disable master clock if host does not trust, or does not
-        * use, TSC based clocksource.
+       /*
+        * Disable master clock if host does not trust, or does not use,
+        * TSC based clocksource. Delegate queue_work() to irq_work as
+        * this is invoked with tk_core.seq write held.
         */
        if (!gtod_is_based_on_tsc(gtod->clock.vclock_mode) &&
            atomic_read(&kvm_guest_has_master_clock) != 0)
-               queue_work(system_long_wq, &pvclock_gtod_work);
-
+               irq_work_queue(&pvclock_irq_work);
        return 0;
 }
 
@@ -8118,6 +8201,7 @@ int kvm_arch_init(void *opaque)
                printk(KERN_ERR "kvm: failed to allocate percpu kvm_user_return_msrs\n");
                goto out_free_x86_emulator_cache;
        }
+       kvm_nr_uret_msrs = 0;
 
        r = kvm_mmu_module_init();
        if (r)
@@ -8168,6 +8252,8 @@ void kvm_arch_exit(void)
        cpuhp_remove_state_nocalls(CPUHP_AP_X86_KVM_CLK_ONLINE);
 #ifdef CONFIG_X86_64
        pvclock_gtod_unregister_notifier(&pvclock_gtod_notifier);
+       irq_work_sync(&pvclock_irq_work);
+       cancel_work_sync(&pvclock_gtod_work);
 #endif
        kvm_x86_ops.hardware_enable = NULL;
        kvm_mmu_module_exit();
@@ -8289,6 +8375,9 @@ static void kvm_sched_yield(struct kvm_vcpu *vcpu, unsigned long dest_id)
 
        vcpu->stat.directed_yield_attempted++;
 
+       if (single_task_running())
+               goto no_yield;
+
        rcu_read_lock();
        map = rcu_dereference(vcpu->kvm->arch.apic_map);
 
@@ -9425,7 +9514,7 @@ static int vcpu_run(struct kvm_vcpu *vcpu)
                if (r <= 0)
                        break;
 
-               kvm_clear_request(KVM_REQ_PENDING_TIMER, vcpu);
+               kvm_clear_request(KVM_REQ_UNBLOCK, vcpu);
                if (kvm_cpu_has_pending_timer(vcpu))
                        kvm_inject_pending_timer_irqs(vcpu);
 
@@ -10044,8 +10133,7 @@ int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
        kvm_update_dr7(vcpu);
 
        if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP)
-               vcpu->arch.singlestep_rip = kvm_rip_read(vcpu) +
-                       get_segment_base(vcpu, VCPU_SREG_CS);
+               vcpu->arch.singlestep_rip = kvm_get_linear_rip(vcpu);
 
        /*
         * Trigger an rflags update that will inject or remove the trace
@@ -11428,7 +11516,8 @@ bool kvm_arch_can_dequeue_async_page_present(struct kvm_vcpu *vcpu)
 
 void kvm_arch_start_assignment(struct kvm *kvm)
 {
-       atomic_inc(&kvm->arch.assigned_device_count);
+       if (atomic_inc_return(&kvm->arch.assigned_device_count) == 1)
+               static_call_cond(kvm_x86_start_assignment)(kvm);
 }
 EXPORT_SYMBOL_GPL(kvm_arch_start_assignment);
 
index b93d6cd08a7ff0ce1bfa14a3001ad6be22dbeed3..121921b2927cb8ba90fbcda748004dec06e301ca 100644 (file)
@@ -5,7 +5,7 @@
 #include <xen/xen.h>
 
 #include <asm/fpu/internal.h>
-#include <asm/sev-es.h>
+#include <asm/sev.h>
 #include <asm/traps.h>
 #include <asm/kdebug.h>
 
index 1c548ad0075204e3d129913be2767b64d1edb2fc..6bda7f67d737e9bb88102b295ff013d210a4a377 100644 (file)
@@ -836,8 +836,8 @@ __bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code,
 
        if (si_code == SEGV_PKUERR)
                force_sig_pkuerr((void __user *)address, pkey);
-
-       force_sig_fault(SIGSEGV, si_code, (void __user *)address);
+       else
+               force_sig_fault(SIGSEGV, si_code, (void __user *)address);
 
        local_irq_disable();
 }
index 04aba7e80a3620aa1285e396364e3557282a7d6d..470b202084306e7fac234ef2318d12d1c90dba67 100644 (file)
@@ -504,10 +504,6 @@ void __init sme_enable(struct boot_params *bp)
 #define AMD_SME_BIT    BIT(0)
 #define AMD_SEV_BIT    BIT(1)
 
-       /* Check the SEV MSR whether SEV or SME is enabled */
-       sev_status   = __rdmsr(MSR_AMD64_SEV);
-       feature_mask = (sev_status & MSR_AMD64_SEV_ENABLED) ? AMD_SEV_BIT : AMD_SME_BIT;
-
        /*
         * Check for the SME/SEV feature:
         *   CPUID Fn8000_001F[EAX]
@@ -519,17 +515,22 @@ void __init sme_enable(struct boot_params *bp)
        eax = 0x8000001f;
        ecx = 0;
        native_cpuid(&eax, &ebx, &ecx, &edx);
-       if (!(eax & feature_mask))
+       /* Check whether SEV or SME is supported */
+       if (!(eax & (AMD_SEV_BIT | AMD_SME_BIT)))
                return;
 
        me_mask = 1UL << (ebx & 0x3f);
 
+       /* Check the SEV MSR whether SEV or SME is enabled */
+       sev_status   = __rdmsr(MSR_AMD64_SEV);
+       feature_mask = (sev_status & MSR_AMD64_SEV_ENABLED) ? AMD_SEV_BIT : AMD_SME_BIT;
+
        /* Check if memory encryption is enabled */
        if (feature_mask == AMD_SME_BIT) {
                /*
                 * No SME if Hypervisor bit is set. This check is here to
                 * prevent a guest from trying to enable SME. For running as a
-                * KVM guest the MSR_K8_SYSCFG will be sufficient, but there
+                * KVM guest the MSR_AMD64_SYSCFG will be sufficient, but there
                 * might be other hypervisors which emulate that MSR as non-zero
                 * or even pass it through to the guest.
                 * A malicious hypervisor can still trick a guest into this
@@ -542,8 +543,8 @@ void __init sme_enable(struct boot_params *bp)
                        return;
 
                /* For SME, check the SYSCFG MSR */
-               msr = __rdmsr(MSR_K8_SYSCFG);
-               if (!(msr & MSR_K8_SYSCFG_MEM_ENCRYPT))
+               msr = __rdmsr(MSR_AMD64_SYSCFG);
+               if (!(msr & MSR_AMD64_SYSCFG_MEM_ENCRYPT))
                        return;
        } else {
                /* SEV state cannot be controlled by a command line option */
index ae744b6a0785639185b633b84a5a37a9787c309b..dd40d3fea74e4e43fccb66752c15076fc702cbff 100644 (file)
@@ -284,7 +284,7 @@ static int __init early_root_info_init(void)
 
        /* need to take out [4G, TOM2) for RAM*/
        /* SYS_CFG */
-       address = MSR_K8_SYSCFG;
+       address = MSR_AMD64_SYSCFG;
        rdmsrl(address, val);
        /* TOP_MEM2 is enabled? */
        if (val & (1<<21)) {
index df7b5477fc4f2a0306a8103de7510e9affe4f4e3..7515e78ef89832fad802bc475249a672dbc79e98 100644 (file)
@@ -47,7 +47,7 @@
 #include <asm/realmode.h>
 #include <asm/time.h>
 #include <asm/pgalloc.h>
-#include <asm/sev-es.h>
+#include <asm/sev.h>
 
 /*
  * We allocate runtime services regions top-down, starting from -4G, i.e.
index 7850111008a8b87e4c5a63ea984fcfec18af8184..b15ebfe40a73ea16660d68c6aa624094bbe8a8ad 100644 (file)
@@ -450,6 +450,18 @@ void __init efi_free_boot_services(void)
                        size -= rm_size;
                }
 
+               /*
+                * Don't free memory under 1M for two reasons:
+                * - BIOS might clobber it
+                * - Crash kernel needs it to be reserved
+                */
+               if (start + size < SZ_1M)
+                       continue;
+               if (start < SZ_1M) {
+                       size -= (SZ_1M - start);
+                       start = SZ_1M;
+               }
+
                memblock_free_late(start, size);
        }
 
index 1be71ef5e4c4ed52ed074a90bf7d6d55efc901fb..6534c92d0f83f0baed036717f465235b011db851 100644 (file)
@@ -9,7 +9,7 @@
 #include <asm/realmode.h>
 #include <asm/tlbflush.h>
 #include <asm/crash.h>
-#include <asm/sev-es.h>
+#include <asm/sev.h>
 
 struct real_mode_header *real_mode_header;
 u32 *trampoline_cr4_features;
@@ -29,14 +29,16 @@ void __init reserve_real_mode(void)
 
        /* Has to be under 1M so we can execute real-mode AP code. */
        mem = memblock_find_in_range(0, 1<<20, size, PAGE_SIZE);
-       if (!mem) {
+       if (!mem)
                pr_info("No sub-1M memory is available for the trampoline\n");
-               return;
-       }
+       else
+               set_real_mode_mem(mem);
 
-       memblock_reserve(mem, size);
-       set_real_mode_mem(mem);
-       crash_reserve_low_1M();
+       /*
+        * Unconditionally reserve the entire fisrt 1M, see comment in
+        * setup_arch().
+        */
+       memblock_reserve(0, SZ_1M);
 }
 
 static void sme_sev_setup_real_mode(struct trampoline_header *th)
index 84c5d1b33d100b33d61a316483f5c34c435edba5..cc8391f86cdb61469b2b775a64490e470b2ef534 100644 (file)
@@ -123,9 +123,9 @@ SYM_CODE_START(startup_32)
         */
        btl     $TH_FLAGS_SME_ACTIVE_BIT, pa_tr_flags
        jnc     .Ldone
-       movl    $MSR_K8_SYSCFG, %ecx
+       movl    $MSR_AMD64_SYSCFG, %ecx
        rdmsr
-       bts     $MSR_K8_SYSCFG_MEM_ENCRYPT_BIT, %eax
+       bts     $MSR_AMD64_SYSCFG_MEM_ENCRYPT_BIT, %eax
        jc      .Ldone
 
        /*
index 17503fed20177ba8a258848a0ce3e4e7f8499fde..e87699aa2dc8263d29054bba0adf937f7841e978 100644 (file)
@@ -1273,16 +1273,16 @@ asmlinkage __visible void __init xen_start_kernel(void)
        /* Get mfn list */
        xen_build_dynamic_phys_to_machine();
 
+       /* Work out if we support NX */
+       get_cpu_cap(&boot_cpu_data);
+       x86_configure_nx();
+
        /*
         * Set up kernel GDT and segment registers, mainly so that
         * -fstack-protector code can be executed.
         */
        xen_setup_gdt(0);
 
-       /* Work out if we support NX */
-       get_cpu_cap(&boot_cpu_data);
-       x86_configure_nx();
-
        /* Determine virtual and physical address sizes */
        get_cpu_address_sizes(&boot_cpu_data);
 
index 9d76d433d3d67012b73ae48fc7232e0294a92973..fd2f30227d961d7e356045142c1a8bb33044b269 100644 (file)
 440    common  process_madvise                 sys_process_madvise
 441    common  epoll_pwait2                    sys_epoll_pwait2
 442    common  mount_setattr                   sys_mount_setattr
-443    common  quotactl_path                   sys_quotactl_path
+# 443 reserved for quotactl_path
 444    common  landlock_create_ruleset         sys_landlock_create_ruleset
 445    common  landlock_add_rule               sys_landlock_add_rule
 446    common  landlock_restrict_self          sys_landlock_restrict_self
index 0270cd7ca1658fd3e9022aa7cf65099ef7dfe9da..acd1f881273e0dcfbe75933791ebf765a9939b23 100644 (file)
@@ -372,9 +372,38 @@ struct bfq_queue *bic_to_bfqq(struct bfq_io_cq *bic, bool is_sync)
        return bic->bfqq[is_sync];
 }
 
+static void bfq_put_stable_ref(struct bfq_queue *bfqq);
+
 void bic_set_bfqq(struct bfq_io_cq *bic, struct bfq_queue *bfqq, bool is_sync)
 {
+       /*
+        * If bfqq != NULL, then a non-stable queue merge between
+        * bic->bfqq and bfqq is happening here. This causes troubles
+        * in the following case: bic->bfqq has also been scheduled
+        * for a possible stable merge with bic->stable_merge_bfqq,
+        * and bic->stable_merge_bfqq == bfqq happens to
+        * hold. Troubles occur because bfqq may then undergo a split,
+        * thereby becoming eligible for a stable merge. Yet, if
+        * bic->stable_merge_bfqq points exactly to bfqq, then bfqq
+        * would be stably merged with itself. To avoid this anomaly,
+        * we cancel the stable merge if
+        * bic->stable_merge_bfqq == bfqq.
+        */
        bic->bfqq[is_sync] = bfqq;
+
+       if (bfqq && bic->stable_merge_bfqq == bfqq) {
+               /*
+                * Actually, these same instructions are executed also
+                * in bfq_setup_cooperator, in case of abort or actual
+                * execution of a stable merge. We could avoid
+                * repeating these instructions there too, but if we
+                * did so, we would nest even more complexity in this
+                * function.
+                */
+               bfq_put_stable_ref(bic->stable_merge_bfqq);
+
+               bic->stable_merge_bfqq = NULL;
+       }
 }
 
 struct bfq_data *bic_to_bfqd(struct bfq_io_cq *bic)
@@ -2263,10 +2292,9 @@ static void bfq_remove_request(struct request_queue *q,
 
 }
 
-static bool bfq_bio_merge(struct blk_mq_hw_ctx *hctx, struct bio *bio,
+static bool bfq_bio_merge(struct request_queue *q, struct bio *bio,
                unsigned int nr_segs)
 {
-       struct request_queue *q = hctx->queue;
        struct bfq_data *bfqd = q->elevator->elevator_data;
        struct request *free = NULL;
        /*
@@ -2631,8 +2659,6 @@ static bool bfq_may_be_close_cooperator(struct bfq_queue *bfqq,
 static bool idling_boosts_thr_without_issues(struct bfq_data *bfqd,
                                             struct bfq_queue *bfqq);
 
-static void bfq_put_stable_ref(struct bfq_queue *bfqq);
-
 /*
  * Attempt to schedule a merge of bfqq with the currently in-service
  * queue or with a close queue among the scheduled queues.  Return
index e0c4baa0185783a586e83faf75b8a62a6b1dace2..c2d6bc88d3f15882ca39569cc370f4bb0b572982 100644 (file)
@@ -1069,7 +1069,17 @@ static void __propagate_weights(struct ioc_gq *iocg, u32 active, u32 inuse,
 
        lockdep_assert_held(&ioc->lock);
 
-       inuse = clamp_t(u32, inuse, 1, active);
+       /*
+        * For an active leaf node, its inuse shouldn't be zero or exceed
+        * @active. An active internal node's inuse is solely determined by the
+        * inuse to active ratio of its children regardless of @inuse.
+        */
+       if (list_empty(&iocg->active_list) && iocg->child_active_sum) {
+               inuse = DIV64_U64_ROUND_UP(active * iocg->child_inuse_sum,
+                                          iocg->child_active_sum);
+       } else {
+               inuse = clamp_t(u32, inuse, 1, active);
+       }
 
        iocg->last_inuse = iocg->inuse;
        if (save)
@@ -1086,7 +1096,7 @@ static void __propagate_weights(struct ioc_gq *iocg, u32 active, u32 inuse,
                /* update the level sums */
                parent->child_active_sum += (s32)(active - child->active);
                parent->child_inuse_sum += (s32)(inuse - child->inuse);
-               /* apply the udpates */
+               /* apply the updates */
                child->active = active;
                child->inuse = inuse;
 
index 42a365b1b9c0e395936371e1c7d9c2adcefb2549..996a4b2f73aa9c6a4422b12c853f05b10e25f5a8 100644 (file)
@@ -358,14 +358,16 @@ bool __blk_mq_sched_bio_merge(struct request_queue *q, struct bio *bio,
                unsigned int nr_segs)
 {
        struct elevator_queue *e = q->elevator;
-       struct blk_mq_ctx *ctx = blk_mq_get_ctx(q);
-       struct blk_mq_hw_ctx *hctx = blk_mq_map_queue(q, bio->bi_opf, ctx);
+       struct blk_mq_ctx *ctx;
+       struct blk_mq_hw_ctx *hctx;
        bool ret = false;
        enum hctx_type type;
 
        if (e && e->type->ops.bio_merge)
-               return e->type->ops.bio_merge(hctx, bio, nr_segs);
+               return e->type->ops.bio_merge(q, bio, nr_segs);
 
+       ctx = blk_mq_get_ctx(q);
+       hctx = blk_mq_map_queue(q, bio->bi_opf, ctx);
        type = hctx->type;
        if (!(hctx->flags & BLK_MQ_F_SHOULD_MERGE) ||
            list_empty_careful(&ctx->rq_lists[type]))
index 466676bc2f0be5aa75bbe1ff83d73d3f92e7d1dd..c86c01bfecdbe51e67128bc952c2780c4a9eecab 100644 (file)
@@ -2232,8 +2232,9 @@ blk_qc_t blk_mq_submit_bio(struct bio *bio)
                /* Bypass scheduler for flush requests */
                blk_insert_flush(rq);
                blk_mq_run_hw_queue(data.hctx, true);
-       } else if (plug && (q->nr_hw_queues == 1 || q->mq_ops->commit_rqs ||
-                               !blk_queue_nonrot(q))) {
+       } else if (plug && (q->nr_hw_queues == 1 ||
+                  blk_mq_is_sbitmap_shared(rq->mq_hctx->flags) ||
+                  q->mq_ops->commit_rqs || !blk_queue_nonrot(q))) {
                /*
                 * Use plugging if we have a ->commit_rqs() hook as well, as
                 * we know the driver uses bd->last in a smart fashion.
@@ -3285,10 +3286,12 @@ EXPORT_SYMBOL(blk_mq_init_allocated_queue);
 /* tags can _not_ be used after returning from blk_mq_exit_queue */
 void blk_mq_exit_queue(struct request_queue *q)
 {
-       struct blk_mq_tag_set   *set = q->tag_set;
+       struct blk_mq_tag_set *set = q->tag_set;
 
-       blk_mq_del_queue_tag_set(q);
+       /* Checks hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED. */
        blk_mq_exit_hw_queues(q, set, set->nr_hw_queues);
+       /* May clear BLK_MQ_F_TAG_QUEUE_SHARED in hctx->flags. */
+       blk_mq_del_queue_tag_set(q);
 }
 
 static int __blk_mq_alloc_rq_maps(struct blk_mq_tag_set *set)
index 39ca97b0edc612ad2828952e7424d0a232bbf640..9f8cb7beaad1160b249912919e6fcf4e1f9c1a11 100644 (file)
@@ -29,8 +29,6 @@
 
 static struct kobject *block_depr;
 
-DECLARE_RWSEM(bdev_lookup_sem);
-
 /* for extended dynamic devt allocation, currently only one major is used */
 #define NR_EXT_DEVT            (1 << MINORBITS)
 static DEFINE_IDA(ext_devt_ida);
@@ -609,13 +607,8 @@ void del_gendisk(struct gendisk *disk)
        blk_integrity_del(disk);
        disk_del_events(disk);
 
-       /*
-        * Block lookups of the disk until all bdevs are unhashed and the
-        * disk is marked as dead (GENHD_FL_UP cleared).
-        */
-       down_write(&bdev_lookup_sem);
-
        mutex_lock(&disk->part0->bd_mutex);
+       disk->flags &= ~GENHD_FL_UP;
        blk_drop_partitions(disk);
        mutex_unlock(&disk->part0->bd_mutex);
 
@@ -629,8 +622,6 @@ void del_gendisk(struct gendisk *disk)
        remove_inode_hash(disk->part0->bd_inode);
 
        set_capacity(disk, 0);
-       disk->flags &= ~GENHD_FL_UP;
-       up_write(&bdev_lookup_sem);
 
        if (!(disk->flags & GENHD_FL_HIDDEN)) {
                sysfs_remove_link(&disk_to_dev(disk)->kobj, "bdi");
index 8969e122f08110ed526f93522e0b205a1a587a18..81e3279ecd5747a99f0902b37de4d1f02388d12d 100644 (file)
@@ -561,11 +561,12 @@ static void kyber_limit_depth(unsigned int op, struct blk_mq_alloc_data *data)
        }
 }
 
-static bool kyber_bio_merge(struct blk_mq_hw_ctx *hctx, struct bio *bio,
+static bool kyber_bio_merge(struct request_queue *q, struct bio *bio,
                unsigned int nr_segs)
 {
+       struct blk_mq_ctx *ctx = blk_mq_get_ctx(q);
+       struct blk_mq_hw_ctx *hctx = blk_mq_map_queue(q, bio->bi_opf, ctx);
        struct kyber_hctx_data *khd = hctx->sched_data;
-       struct blk_mq_ctx *ctx = blk_mq_get_ctx(hctx->queue);
        struct kyber_ctx_queue *kcq = &khd->kcqs[ctx->index_hw[hctx->type]];
        unsigned int sched_domain = kyber_sched_domain(bio->bi_opf);
        struct list_head *rq_list = &kcq->rq_list[sched_domain];
index 04aded71ead2732c7c5c16a8e591c741f84c47c1..8eea2cbf2bf4ad1f509dffac7df6b5efb75f7c6d 100644 (file)
@@ -461,10 +461,9 @@ static int dd_request_merge(struct request_queue *q, struct request **rq,
        return ELEVATOR_NO_MERGE;
 }
 
-static bool dd_bio_merge(struct blk_mq_hw_ctx *hctx, struct bio *bio,
+static bool dd_bio_merge(struct request_queue *q, struct bio *bio,
                unsigned int nr_segs)
 {
-       struct request_queue *q = hctx->queue;
        struct deadline_data *dd = q->elevator->elevator_data;
        struct request *free = NULL;
        bool ret;
index b64bfdd4326c9d1d54c033a4c40ec510bc828dac..e2716792ecc13c6d0be975049c0aaa81e7257839 100644 (file)
@@ -682,7 +682,7 @@ static void utf16_le_to_7bit(const __le16 *in, unsigned int size, u8 *out)
 }
 
 /**
- * efi_partition(struct parsed_partitions *state)
+ * efi_partition - scan for GPT partitions
  * @state: disk parsed partitions
  *
  * Description: called from check.c, if the disk contains GPT
index 6cd7f7025df47873261988e29532ef4938de0481..d8a91521144e0a58d0e8797d07cd13b09ec7f7a1 100644 (file)
@@ -233,7 +233,8 @@ async_xor_offs(struct page *dest, unsigned int offset,
                if (submit->flags & ASYNC_TX_XOR_DROP_DST) {
                        src_cnt--;
                        src_list++;
-                       src_offs++;
+                       if (src_offs)
+                               src_offs++;
                }
 
                /* wait for any prerequisite operations */
index eedec61e3476ebec2d85e16efcd15bc80d3f5a05..3972de7b75653ce15f59ee7ebb5d27f6373239d1 100644 (file)
@@ -543,3 +543,8 @@ config X86_PM_TIMER
 
          You should nearly always say Y here because many modern
          systems require this timer.
+
+config ACPI_PRMT
+       bool "Platform Runtime Mechanism Support"
+       depends on EFI && X86_64
+       default y
index 700b41adf2db6dd82f272b5a0983d89a9ff7b00e..ceb1aed4b1fc9eff7bd8bbb78ec19196f31712c5 100644 (file)
@@ -8,6 +8,11 @@ ccflags-$(CONFIG_ACPI_DEBUG)   += -DACPI_DEBUG_OUTPUT
 #
 # ACPI Boot-Time Table Parsing
 #
+ifeq ($(CONFIG_ACPI_CUSTOM_DSDT),y)
+tables.o: $(src)/../../include/$(subst $\",,$(CONFIG_ACPI_CUSTOM_DSDT_FILE)) ;
+
+endif
+
 obj-$(CONFIG_ACPI)             += tables.o
 obj-$(CONFIG_X86)              += blacklist.o
 
@@ -61,6 +66,7 @@ acpi-$(CONFIG_ACPI_FPDT)      += acpi_fpdt.o
 acpi-$(CONFIG_ACPI_LPIT)       += acpi_lpit.o
 acpi-$(CONFIG_ACPI_GENERIC_GSI) += irq.o
 acpi-$(CONFIG_ACPI_WATCHDOG)   += acpi_watchdog.o
+acpi-$(CONFIG_ACPI_PRMT)       += prmt.o
 
 # Address translation
 acpi-$(CONFIG_ACPI_ADXL)       += acpi_adxl.o
index 0ec5b3f69112774f176ee1b12a185d4e5350e755..6e02448d15d951967de0972843770529e6bc3852 100644 (file)
@@ -226,6 +226,7 @@ static const struct acpi_device_id acpi_apd_device_ids[] = {
        { "AMDI0010", APD_ADDR(wt_i2c_desc) },
        { "AMD0020", APD_ADDR(cz_uart_desc) },
        { "AMDI0020", APD_ADDR(cz_uart_desc) },
+       { "AMDI0022", APD_ADDR(cz_uart_desc) },
        { "AMD0030", },
        { "AMD0040", APD_ADDR(fch_misc_desc)},
        { "HYGO0010", APD_ADDR(wt_i2c_desc) },
index 67f1d33d15c4ed09cf319b2b3645d173f929b3a3..4cf4aef7ce0c17f68f2e1bda5363e0c634ca9bff 100644 (file)
@@ -6,6 +6,8 @@
  * Authors: Lan Tianyu <tianyu.lan@intel.com>
  */
 
+#define pr_fmt(fmt) "ACPI: " fmt
+
 #include <linux/acpi.h>
 #include <linux/device.h>
 #include <linux/err.h>
@@ -59,7 +61,7 @@ static int acpi_install_cmos_rtc_space_handler(struct acpi_device *adev,
                        &acpi_cmos_rtc_space_handler,
                        NULL, NULL);
        if (ACPI_FAILURE(status)) {
-               pr_err(PREFIX "Error installing CMOS-RTC region handler\n");
+               pr_err("Error installing CMOS-RTC region handler\n");
                return -ENODEV;
        }
 
@@ -70,7 +72,7 @@ static void acpi_remove_cmos_rtc_space_handler(struct acpi_device *adev)
 {
        if (ACPI_FAILURE(acpi_remove_address_space_handler(adev->handle,
                        ACPI_ADR_SPACE_CMOS, &acpi_cmos_rtc_space_handler)))
-               pr_err(PREFIX "Error removing CMOS-RTC region handler\n");
+               pr_err("Error removing CMOS-RTC region handler\n");
 }
 
 static struct acpi_scan_handler cmos_rtc_handler = {
index a89a806a7a2a943ba4a0cb5a2c5d691270509a2f..4ee2ad234e3d60af9d83782ae5493e6bad624f72 100644 (file)
@@ -240,8 +240,10 @@ static int __init acpi_init_fpdt(void)
                return 0;
 
        fpdt_kobj = kobject_create_and_add("fpdt", acpi_kobj);
-       if (!fpdt_kobj)
+       if (!fpdt_kobj) {
+               acpi_put_table(header);
                return -ENOMEM;
+       }
 
        while (offset < header->length) {
                subtable = (void *)header + offset;
index 0c884020f74b4759ef1a1cfeded4104b7bbaf7ad..ffb4afc5aad9ae2b886970e84c20c3821eff4646 100644 (file)
@@ -1619,8 +1619,6 @@ static void acpi_video_bus_notify(struct acpi_device *device, u32 event)
                input_report_key(input, keycode, 0);
                input_sync(input);
        }
-
-       return;
 }
 
 static void brightness_switch_event(struct acpi_video_device *video_device,
@@ -1690,8 +1688,6 @@ static void acpi_video_device_notify(acpi_handle handle, u32 event, void *data)
                input_report_key(input, keycode, 0);
                input_sync(input);
        }
-
-       return;
 }
 
 static int acpi_video_resume(struct notifier_block *nb,
@@ -2308,8 +2304,6 @@ static void __exit acpi_video_exit(void)
 {
        acpi_video_detect_exit();
        acpi_video_unregister();
-
-       return;
 }
 
 module_init(acpi_video_init);
index bccae0d3db752b70513dbef510df5855ad95f779..59d6ded01614f4902ada5a93c9fea6800560c1e2 100644 (file)
@@ -737,6 +737,8 @@ const char *acpi_ah_match_uuid(u8 *data);
  */
 #if (defined ACPI_ASL_COMPILER || defined ACPI_EXEC_APP || defined ACPI_HELP_APP)
 void acpi_ut_convert_string_to_uuid(char *in_string, u8 *uuid_buffer);
+
+acpi_status acpi_ut_convert_uuid_to_string(char *uuid_buffer, char *out_string);
 #endif
 
 #endif                         /* _ACUTILS_H */
index 32f03ee8178515b686203639ad8359c02bd9869e..06f3c9df1e22d22f17d12b3c4732ad95fab118ae 100644 (file)
@@ -139,7 +139,9 @@ acpi_ex_read_data_from_field(struct acpi_walk_state *walk_state,
                    || obj_desc->field.region_obj->region.space_id ==
                    ACPI_ADR_SPACE_GSBUS
                    || obj_desc->field.region_obj->region.space_id ==
-                   ACPI_ADR_SPACE_IPMI)) {
+                   ACPI_ADR_SPACE_IPMI
+                   || obj_desc->field.region_obj->region.space_id ==
+                   ACPI_ADR_SPACE_PLATFORM_RT)) {
 
                /* SMBus, GSBus, IPMI serial */
 
@@ -301,7 +303,9 @@ acpi_ex_write_data_to_field(union acpi_operand_object *source_desc,
                    || obj_desc->field.region_obj->region.space_id ==
                    ACPI_ADR_SPACE_GSBUS
                    || obj_desc->field.region_obj->region.space_id ==
-                   ACPI_ADR_SPACE_IPMI)) {
+                   ACPI_ADR_SPACE_IPMI
+                   || obj_desc->field.region_obj->region.space_id ==
+                   ACPI_ADR_SPACE_PLATFORM_RT)) {
 
                /* SMBus, GSBus, IPMI serial */
 
index 8e8d95f7947bde919858d8e1fbb3293eb44ba24e..10d68a5f76a32c96e77c714156c98ada47da93e4 100644 (file)
@@ -195,6 +195,12 @@ acpi_ex_read_serial_bus(union acpi_operand_object *obj_desc,
                function = ACPI_READ | (accessor_type << 16);
                break;
 
+       case ACPI_ADR_SPACE_PLATFORM_RT:
+
+               buffer_length = ACPI_PRM_INPUT_BUFFER_SIZE;
+               function = ACPI_READ;
+               break;
+
        default:
                return_ACPI_STATUS(AE_AML_INVALID_SPACE_ID);
        }
@@ -311,6 +317,12 @@ acpi_ex_write_serial_bus(union acpi_operand_object *source_desc,
                function = ACPI_WRITE | (accessor_type << 16);
                break;
 
+       case ACPI_ADR_SPACE_PLATFORM_RT:
+
+               buffer_length = ACPI_PRM_INPUT_BUFFER_SIZE;
+               function = ACPI_WRITE;
+               break;
+
        default:
                return_ACPI_STATUS(AE_AML_INVALID_SPACE_ID);
        }
index 14b71b41e845370a20613a69a27fbf0da7130919..38e10ab976e674e41efd313bb4b0817a8ee1855a 100644 (file)
@@ -379,6 +379,13 @@ acpi_ns_repair_CID(struct acpi_evaluate_info *info,
 
                        (*element_ptr)->common.reference_count =
                            original_ref_count;
+
+                       /*
+                        * The original_element holds a reference from the package object
+                        * that represents _HID. Since a new element was created by _HID,
+                        * remove the reference from the _CID package.
+                        */
+                       acpi_ut_remove_reference(original_element);
                }
 
                element_ptr++;
index 624a26794d5588206ddd5aaa60664e5821496b8c..e5ba9795ec696e62042222ceb7d3ee5537127aa2 100644 (file)
@@ -285,6 +285,14 @@ static void acpi_ut_delete_internal_obj(union acpi_operand_object *object)
                }
                break;
 
+       case ACPI_TYPE_LOCAL_ADDRESS_HANDLER:
+
+               ACPI_DEBUG_PRINT((ACPI_DB_ALLOCATIONS,
+                                 "***** Address handler %p\n", object));
+
+               acpi_os_delete_mutex(object->address_space.context_mutex);
+               break;
+
        default:
 
                break;
index e37d612e8db5956a8b0f908fb31a40555bd2dba7..05426596d1f4aae21fb52530534683b0674b9fcb 100644 (file)
@@ -475,7 +475,7 @@ int vsnprintf(char *string, acpi_size size, const char *format, va_list args)
                case 'X':
 
                        type |= ACPI_FORMAT_UPPER;
-                       /* FALLTHROUGH */
+                       ACPI_FALLTHROUGH;
 
                case 'x':
 
index 090e44b6b6c74f71549ae7af78293ed0f8928e31..dca9061518abed2c5478a805d070a9b7d58cfa52 100644 (file)
@@ -61,4 +61,45 @@ void acpi_ut_convert_string_to_uuid(char *in_string, u8 *uuid_buffer)
                                               1]);
        }
 }
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ut_convert_uuid_to_string
+ *
+ * PARAMETERS:  uuid_buffer         - 16-byte UUID buffer
+ *              out_string          - 36-byte formatted UUID string
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Convert 16-byte UUID buffer to 36-byte formatted UUID string
+ *              out_string must be 37 bytes to include null terminator.
+ *
+ ******************************************************************************/
+
+acpi_status acpi_ut_convert_uuid_to_string(char *uuid_buffer, char *out_string)
+{
+       u32 i;
+
+       if (!uuid_buffer || !out_string) {
+               return (AE_BAD_PARAMETER);
+       }
+
+       for (i = 0; i < UUID_BUFFER_LENGTH; i++) {
+               out_string[acpi_gbl_map_to_uuid_offset[i]] =
+                   acpi_ut_hex_to_ascii_char(uuid_buffer[i], 4);
+
+               out_string[acpi_gbl_map_to_uuid_offset[i] + 1] =
+                   acpi_ut_hex_to_ascii_char(uuid_buffer[i], 0);
+       }
+
+       /* Insert required hyphens (dashes) */
+
+       out_string[UUID_HYPHEN1_OFFSET] =
+           out_string[UUID_HYPHEN2_OFFSET] =
+           out_string[UUID_HYPHEN3_OFFSET] =
+           out_string[UUID_HYPHEN4_OFFSET] = '-';
+
+       out_string[UUID_STRING_LENGTH] = 0;     /* Null terminate */
+       return (AE_OK);
+}
 #endif
index 19bb7f870204c09c39995956ad2f44ff86fa7638..02d208732f9a3e0b8cbdb09ea30783bdbc9417e4 100644 (file)
 static void *bgrt_image;
 static struct kobject *bgrt_kobj;
 
-static ssize_t version_show(struct device *dev,
-                           struct device_attribute *attr, char *buf)
-{
-       return snprintf(buf, PAGE_SIZE, "%d\n", bgrt_tab.version);
-}
-static DEVICE_ATTR_RO(version);
-
-static ssize_t status_show(struct device *dev,
-                          struct device_attribute *attr, char *buf)
-{
-       return snprintf(buf, PAGE_SIZE, "%d\n", bgrt_tab.status);
-}
-static DEVICE_ATTR_RO(status);
-
-static ssize_t type_show(struct device *dev,
-                        struct device_attribute *attr, char *buf)
-{
-       return snprintf(buf, PAGE_SIZE, "%d\n", bgrt_tab.image_type);
-}
-static DEVICE_ATTR_RO(type);
-
-static ssize_t xoffset_show(struct device *dev,
-                           struct device_attribute *attr, char *buf)
-{
-       return snprintf(buf, PAGE_SIZE, "%d\n", bgrt_tab.image_offset_x);
-}
-static DEVICE_ATTR_RO(xoffset);
-
-static ssize_t yoffset_show(struct device *dev,
-                           struct device_attribute *attr, char *buf)
-{
-       return snprintf(buf, PAGE_SIZE, "%d\n", bgrt_tab.image_offset_y);
-}
-static DEVICE_ATTR_RO(yoffset);
+#define BGRT_SHOW(_name, _member) \
+       static ssize_t _name##_show(struct kobject *kobj,                       \
+                                   struct kobj_attribute *attr, char *buf)     \
+       {                                                                       \
+               return sysfs_emit(buf, "%d\n", bgrt_tab._member);               \
+       }                                                                       \
+       struct kobj_attribute bgrt_attr_##_name = __ATTR_RO(_name)
+
+BGRT_SHOW(version, version);
+BGRT_SHOW(status, status);
+BGRT_SHOW(type, image_type);
+BGRT_SHOW(xoffset, image_offset_x);
+BGRT_SHOW(yoffset, image_offset_y);
 
 static ssize_t image_read(struct file *file, struct kobject *kobj,
               struct bin_attribute *attr, char *buf, loff_t off, size_t count)
@@ -60,11 +39,11 @@ static ssize_t image_read(struct file *file, struct kobject *kobj,
 static BIN_ATTR_RO(image, 0);  /* size gets filled in later */
 
 static struct attribute *bgrt_attributes[] = {
-       &dev_attr_version.attr,
-       &dev_attr_status.attr,
-       &dev_attr_type.attr,
-       &dev_attr_xoffset.attr,
-       &dev_attr_yoffset.attr,
+       &bgrt_attr_version.attr,
+       &bgrt_attr_status.attr,
+       &bgrt_attr_type.attr,
+       &bgrt_attr_xoffset.attr,
+       &bgrt_attr_yoffset.attr,
        NULL,
 };
 
index a86a770c9b798a9412465e7e8a9de4ce0d085831..a558d24fb7884c12b69e39a5070eac3f563a9946 100644 (file)
@@ -10,6 +10,8 @@
  *  Copyright (C) 2002 Andy Grover <andrew.grover@intel.com>
  */
 
+#define pr_fmt(fmt) "ACPI: " fmt
+
 #include <linux/kernel.h>
 #include <linux/init.h>
 #include <linux/acpi.h>
@@ -49,12 +51,12 @@ int __init acpi_blacklisted(void)
 
        i = acpi_match_platform_list(acpi_blacklist);
        if (i >= 0) {
-               pr_err(PREFIX "Vendor \"%6.6s\" System \"%8.8s\" Revision 0x%x has a known ACPI BIOS problem.\n",
+               pr_err("Vendor \"%6.6s\" System \"%8.8s\" Revision 0x%x has a known ACPI BIOS problem.\n",
                       acpi_blacklist[i].oem_id,
                       acpi_blacklist[i].oem_table_id,
                       acpi_blacklist[i].oem_revision);
 
-               pr_err(PREFIX "Reason: %s. This is a %s error\n",
+               pr_err("Reason: %s. This is a %s error\n",
                       acpi_blacklist[i].reason,
                       (acpi_blacklist[i].data ?
                        "non-recoverable" : "recoverable"));
@@ -73,8 +75,7 @@ int __init acpi_blacklisted(void)
 #ifdef CONFIG_ACPI_REV_OVERRIDE_POSSIBLE
 static int __init dmi_enable_rev_override(const struct dmi_system_id *d)
 {
-       printk(KERN_NOTICE PREFIX "DMI detected: %s (force ACPI _REV to 5)\n",
-              d->ident);
+       pr_notice("DMI detected: %s (force ACPI _REV to 5)\n", d->ident);
        acpi_rev_override_setup(NULL);
        return 0;
 }
index be7da23fad76f4a17f69ad12a039015b38fc92e3..14ec87a76e14fb75ec17a3eec9ddd3cbd033f105 100644 (file)
@@ -30,6 +30,7 @@
 #include <linux/pci.h>
 #include <acpi/apei.h>
 #include <linux/suspend.h>
+#include <linux/prmt.h>
 
 #include "internal.h"
 
@@ -262,8 +263,6 @@ out_success:
 
 out_kfree:
        kfree(output.pointer);
-       if (status != AE_OK)
-               context->ret.pointer = NULL;
        return status;
 }
 EXPORT_SYMBOL(acpi_run_osc);
@@ -304,6 +303,7 @@ static void acpi_bus_osc_negotiate_platform_control(void)
 
        capbuf[OSC_SUPPORT_DWORD] |= OSC_SB_HOTPLUG_OST_SUPPORT;
        capbuf[OSC_SUPPORT_DWORD] |= OSC_SB_PCLPI_SUPPORT;
+       capbuf[OSC_SUPPORT_DWORD] |= OSC_SB_PRM_SUPPORT;
 
 #ifdef CONFIG_ARM64
        capbuf[OSC_SUPPORT_DWORD] |= OSC_SB_GENERIC_INITIATOR_SUPPORT;
@@ -330,32 +330,21 @@ static void acpi_bus_osc_negotiate_platform_control(void)
        if (ACPI_FAILURE(acpi_run_osc(handle, &context)))
                return;
 
-       capbuf_ret = context.ret.pointer;
-       if (context.ret.length <= OSC_SUPPORT_DWORD) {
-               kfree(context.ret.pointer);
-               return;
-       }
+       kfree(context.ret.pointer);
 
-       /*
-        * Now run _OSC again with query flag clear and with the caps
-        * supported by both the OS and the platform.
-        */
+       /* Now run _OSC again with query flag clear */
        capbuf[OSC_QUERY_DWORD] = 0;
-       capbuf[OSC_SUPPORT_DWORD] = capbuf_ret[OSC_SUPPORT_DWORD];
-       kfree(context.ret.pointer);
 
        if (ACPI_FAILURE(acpi_run_osc(handle, &context)))
                return;
 
        capbuf_ret = context.ret.pointer;
-       if (context.ret.length > OSC_SUPPORT_DWORD) {
-               osc_sb_apei_support_acked =
-                       capbuf_ret[OSC_SUPPORT_DWORD] & OSC_SB_APEI_SUPPORT;
-               osc_pc_lpi_support_confirmed =
-                       capbuf_ret[OSC_SUPPORT_DWORD] & OSC_SB_PCLPI_SUPPORT;
-               osc_sb_native_usb4_support_confirmed =
-                       capbuf_ret[OSC_SUPPORT_DWORD] & OSC_SB_NATIVE_USB4_SUPPORT;
-       }
+       osc_sb_apei_support_acked =
+               capbuf_ret[OSC_SUPPORT_DWORD] & OSC_SB_APEI_SUPPORT;
+       osc_pc_lpi_support_confirmed =
+               capbuf_ret[OSC_SUPPORT_DWORD] & OSC_SB_PCLPI_SUPPORT;
+       osc_sb_native_usb4_support_confirmed =
+               capbuf_ret[OSC_SUPPORT_DWORD] & OSC_SB_NATIVE_USB4_SUPPORT;
 
        kfree(context.ret.pointer);
 }
@@ -370,7 +359,7 @@ EXPORT_SYMBOL_GPL(osc_sb_native_usb4_control);
 
 static void acpi_bus_decode_usb_osc(const char *msg, u32 bits)
 {
-       printk(KERN_INFO PREFIX "%s USB3%c DisplayPort%c PCIe%c XDomain%c\n", msg,
+       pr_info("%s USB3%c DisplayPort%c PCIe%c XDomain%c\n", msg,
               (bits & OSC_USB_USB3_TUNNELING) ? '+' : '-',
               (bits & OSC_USB_DP_TUNNELING) ? '+' : '-',
               (bits & OSC_USB_PCIE_TUNNELING) ? '+' : '-',
@@ -409,7 +398,7 @@ static void acpi_bus_osc_negotiate_usb_control(void)
                return;
 
        if (context.ret.length != sizeof(capbuf)) {
-               printk(KERN_INFO PREFIX "USB4 _OSC: returned invalid length buffer\n");
+               pr_info("USB4 _OSC: returned invalid length buffer\n");
                goto out_free;
        }
 
@@ -1206,7 +1195,8 @@ void __init acpi_subsystem_init(void)
 
 static acpi_status acpi_bus_table_handler(u32 event, void *table, void *context)
 {
-       acpi_scan_table_handler(event, table, context);
+       if (event == ACPI_TABLE_EVENT_LOAD)
+               acpi_scan_table_notify();
 
        return acpi_sysfs_table_handler(event, table, context);
 }
@@ -1330,6 +1320,7 @@ static int __init acpi_init(void)
                acpi_kobj = NULL;
        }
 
+       init_prmt();
        result = acpi_bus_init();
        if (result) {
                disable_acpi();
index 16c0fe8a72a7ffa788d733d969141634d8102c3b..675a69de516f853e88883cfddc0e69edad7d20ea 100644 (file)
@@ -20,6 +20,7 @@
 #include <linux/pm_runtime.h>
 #include <linux/suspend.h>
 
+#include "fan.h"
 #include "internal.h"
 
 /**
@@ -1133,19 +1134,48 @@ static int acpi_subsys_resume_noirq(struct device *dev)
  *
  * Use ACPI to put the given device into the full-power state and carry out the
  * generic early resume procedure for it during system transition into the
- * working state.
+ * working state, but only do that if device either defines early resume
+ * handler, or does not define power operations at all. Otherwise powering up
+ * of the device is postponed to the normal resume phase.
  */
 static int acpi_subsys_resume_early(struct device *dev)
 {
+       const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
        int ret;
 
        if (dev_pm_skip_resume(dev))
                return 0;
 
+       if (pm && !pm->resume_early) {
+               dev_dbg(dev, "postponing D0 transition to normal resume stage\n");
+               return 0;
+       }
+
        ret = acpi_dev_resume(dev);
        return ret ? ret : pm_generic_resume_early(dev);
 }
 
+/**
+ * acpi_subsys_resume - Resume device using ACPI.
+ * @dev: Device to Resume.
+ *
+ * Use ACPI to put the given device into the full-power state if it has not been
+ * powered up during early resume phase, and carry out the generic resume
+ * procedure for it during system transition into the working state.
+ */
+static int acpi_subsys_resume(struct device *dev)
+{
+       const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
+       int ret = 0;
+
+       if (!dev_pm_skip_resume(dev) && pm && !pm->resume_early) {
+               dev_dbg(dev, "executing postponed D0 transition\n");
+               ret = acpi_dev_resume(dev);
+       }
+
+       return ret ? ret : pm_generic_resume(dev);
+}
+
 /**
  * acpi_subsys_freeze - Run the device driver's freeze callback.
  * @dev: Device to handle.
@@ -1239,6 +1269,7 @@ static struct dev_pm_domain acpi_general_pm_domain = {
                .prepare = acpi_subsys_prepare,
                .complete = acpi_subsys_complete,
                .suspend = acpi_subsys_suspend,
+               .resume = acpi_subsys_resume,
                .suspend_late = acpi_subsys_suspend_late,
                .suspend_noirq = acpi_subsys_suspend_noirq,
                .resume_noirq = acpi_subsys_resume_noirq,
@@ -1310,9 +1341,7 @@ int acpi_dev_pm_attach(struct device *dev, bool power_on)
         * with the generic ACPI PM domain.
         */
        static const struct acpi_device_id special_pm_ids[] = {
-               {"PNP0C0B", }, /* Generic ACPI fan */
-               {"INT3404", }, /* Fan */
-               {"INTC1044", }, /* Fan for Tiger Lake generation */
+               ACPI_FAN_DEVICE_IDS,
                {}
        };
        struct acpi_device *adev = ACPI_COMPANION(dev);
index fa2c1c93072cf44b292512c53926d49659ad08c3..61271e61c30735253605f20085a97601670ec18d 100644 (file)
@@ -268,6 +268,8 @@ int __acpi_device_uevent_modalias(struct acpi_device *adev,
 
 /**
  * acpi_device_uevent_modalias - uevent modalias for ACPI-enumerated devices.
+ * @dev: Struct device to get ACPI device node.
+ * @env: Environment variables of the kobject uevent.
  *
  * Create the uevent modalias field for ACPI-enumerated devices.
  *
@@ -313,6 +315,9 @@ static int __acpi_device_modalias(struct acpi_device *adev, char *buf, int size)
 
 /**
  * acpi_device_modalias - modalias sysfs attribute for ACPI-enumerated devices.
+ * @dev: Struct device to get ACPI device node.
+ * @buf: The buffer to save pnp_modalias and of_modalias.
+ * @size: Size of buffer.
  *
  * Create the modalias sysfs attribute for ACPI-enumerated devices.
  *
@@ -448,7 +453,7 @@ static ssize_t description_show(struct device *dev,
                (wchar_t *)acpi_dev->pnp.str_obj->buffer.pointer,
                acpi_dev->pnp.str_obj->buffer.length,
                UTF16_LITTLE_ENDIAN, buf,
-               PAGE_SIZE);
+               PAGE_SIZE - 1);
 
        buf[result++] = '\n';
 
index 13565629ce0a8c570b68de07ce21669ce94f6539..3f7680a007a38f6b382e5ef57ae7adcff6fc7c44 100644 (file)
@@ -1627,7 +1627,7 @@ static int acpi_ec_add(struct acpi_device *device)
        WARN(!ret, "Could not request EC cmd io port 0x%lx", ec->command_addr);
 
        /* Reprobe devices depending on the EC */
-       acpi_walk_dep_device_list(ec->handle);
+       acpi_dev_clear_dependencies(device);
 
        acpi_handle_debug(ec->handle, "enumerated.\n");
        return 0;
index 92e59f45329bc576c97ca3c702cd40462fe1c22f..df38f3c94861f0007e8dae5d29e0d53ba597aa09 100644 (file)
@@ -7,6 +7,8 @@
  *
  */
 
+#define pr_fmt(fmt) "ACPI: " fmt
+
 #include <linux/spinlock.h>
 #include <linux/export.h>
 #include <linux/proc_fs.h>
@@ -173,8 +175,8 @@ static int __init acpi_event_init(void)
        /* create genetlink for acpi event */
        error = acpi_event_genetlink_init();
        if (error)
-               printk(KERN_WARNING PREFIX
-                      "Failed to create genetlink family for ACPI event\n");
+               pr_warn("Failed to create genetlink family for ACPI event\n");
+
        return 0;
 }
 
index 66c3983f0ccca7aacb83feb63c34c5b9e4e36cab..5cd0ceb50bc8a4d88aaa452755e4ea49657cc52c 100644 (file)
@@ -16,6 +16,8 @@
 #include <linux/platform_device.h>
 #include <linux/sort.h>
 
+#include "fan.h"
+
 MODULE_AUTHOR("Paul Diefenbaugh");
 MODULE_DESCRIPTION("ACPI Fan Driver");
 MODULE_LICENSE("GPL");
@@ -24,10 +26,7 @@ static int acpi_fan_probe(struct platform_device *pdev);
 static int acpi_fan_remove(struct platform_device *pdev);
 
 static const struct acpi_device_id fan_device_ids[] = {
-       {"PNP0C0B", 0},
-       {"INT3404", 0},
-       {"INTC1044", 0},
-       {"INTC1048", 0},
+       ACPI_FAN_DEVICE_IDS,
        {"", 0},
 };
 MODULE_DEVICE_TABLE(acpi, fan_device_ids);
diff --git a/drivers/acpi/fan.h b/drivers/acpi/fan.h
new file mode 100644 (file)
index 0000000..dc9a6ef
--- /dev/null
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+
+/*
+ * ACPI fan device IDs are shared between the fan driver and the device power
+ * management code.
+ *
+ * Add new device IDs before the generic ACPI fan one.
+ */
+#define ACPI_FAN_DEVICE_IDS    \
+       {"INT3404", }, /* Fan */ \
+       {"INTC1044", }, /* Fan for Tiger Lake generation */ \
+       {"INTC1048", }, /* Fan for Alder Lake generation */ \
+       {"PNP0C0B", } /* Generic ACPI fan */
index 0715e3be99a0be9b16a937d4bd98d3ec6cd646f7..fce3f3bba714add50426fbb04b0351c59d228a28 100644 (file)
@@ -6,6 +6,8 @@
  * Copyright (c) 2005 Intel Corp.
  */
 
+#define pr_fmt(fmt) "ACPI: " fmt
+
 #include <linux/acpi_iort.h>
 #include <linux/export.h>
 #include <linux/init.h>
 
 #include "internal.h"
 
-#define ACPI_GLUE_DEBUG        0
-#if ACPI_GLUE_DEBUG
-#define DBG(fmt, ...)                                          \
-       printk(KERN_DEBUG PREFIX fmt, ##__VA_ARGS__)
-#else
-#define DBG(fmt, ...)                                          \
-do {                                                           \
-       if (0)                                                  \
-               printk(KERN_DEBUG PREFIX fmt, ##__VA_ARGS__);   \
-} while (0)
-#endif
 static LIST_HEAD(bus_type_list);
 static DECLARE_RWSEM(bus_type_sem);
 
@@ -44,7 +35,7 @@ int register_acpi_bus_type(struct acpi_bus_type *type)
                down_write(&bus_type_sem);
                list_add_tail(&type->list, &bus_type_list);
                up_write(&bus_type_sem);
-               printk(KERN_INFO PREFIX "bus type %s registered\n", type->name);
+               pr_info("bus type %s registered\n", type->name);
                return 0;
        }
        return -ENODEV;
@@ -59,8 +50,7 @@ int unregister_acpi_bus_type(struct acpi_bus_type *type)
                down_write(&bus_type_sem);
                list_del_init(&type->list);
                up_write(&bus_type_sem);
-               printk(KERN_INFO PREFIX "bus type %s unregistered\n",
-                      type->name);
+               pr_info("bus type %s unregistered\n", type->name);
                return 0;
        }
        return -ENODEV;
@@ -307,7 +297,7 @@ static int acpi_device_notify(struct device *dev)
 
                adev = type->find_companion(dev);
                if (!adev) {
-                       DBG("Unable to get handle for %s\n", dev_name(dev));
+                       pr_debug("Unable to get handle for %s\n", dev_name(dev));
                        ret = -ENODEV;
                        goto out;
                }
@@ -328,16 +318,15 @@ static int acpi_device_notify(struct device *dev)
                adev->handler->bind(dev);
 
  out:
-#if ACPI_GLUE_DEBUG
        if (!ret) {
                struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
 
                acpi_get_name(ACPI_HANDLE(dev), ACPI_FULL_PATHNAME, &buffer);
-               DBG("Device %s -> %s\n", dev_name(dev), (char *)buffer.pointer);
+               pr_debug("Device %s -> %s\n", dev_name(dev), (char *)buffer.pointer);
                kfree(buffer.pointer);
-       } else
-               DBG("Device %s -> No ACPI support\n", dev_name(dev));
-#endif
+       } else {
+               pr_debug("Device %s -> No ACPI support\n", dev_name(dev));
+       }
 
        return ret;
 }
index b852cff8028725fac360e6d2a04de3632876879d..b1d2cc342014594cd6204b3dadc8803ce832346e 100644 (file)
@@ -11,8 +11,6 @@
 
 #include <linux/idr.h>
 
-#define PREFIX "ACPI: "
-
 int early_acpi_osi_init(void);
 int acpi_osi_init(void);
 acpi_status acpi_os_initialize1(void);
@@ -88,7 +86,7 @@ void acpi_device_hotplug(struct acpi_device *adev, u32 src);
 bool acpi_scan_is_offline(struct acpi_device *adev, bool uevent);
 
 acpi_status acpi_sysfs_table_handler(u32 event, void *table, void *context);
-void acpi_scan_table_handler(u32 event, void *table, void *context);
+void acpi_scan_table_notify(void);
 
 /* --------------------------------------------------------------------------
                      Device Node Initialization / Removal
@@ -134,7 +132,7 @@ int acpi_power_init(void);
 void acpi_power_resources_list_free(struct list_head *list);
 int acpi_extract_power_resources(union acpi_object *package, unsigned int start,
                                 struct list_head *list);
-int acpi_add_power_resource(acpi_handle handle);
+struct acpi_device *acpi_add_power_resource(acpi_handle handle);
 void acpi_power_add_remove_device(struct acpi_device *adev, bool add);
 int acpi_power_wakeup_list_init(struct list_head *list, int *system_level);
 int acpi_device_sleep_wake(struct acpi_device *dev,
@@ -142,6 +140,7 @@ int acpi_device_sleep_wake(struct acpi_device *dev,
 int acpi_power_get_inferred_state(struct acpi_device *device, int *state);
 int acpi_power_on_resources(struct acpi_device *device, int state);
 int acpi_power_transition(struct acpi_device *device, int state);
+void acpi_turn_off_unused_power_resources(void);
 
 /* --------------------------------------------------------------------------
                               Device Power Management
index 958aaac869e8d2f9440f33f12a8bf4d68053608f..23d9a09d7060445be83e153ba483a16fe1fb3a51 100644 (file)
@@ -686,6 +686,13 @@ int nfit_spa_type(struct acpi_nfit_system_address *spa)
        return -1;
 }
 
+static size_t sizeof_spa(struct acpi_nfit_system_address *spa)
+{
+       if (spa->flags & ACPI_NFIT_LOCATION_COOKIE_VALID)
+               return sizeof(*spa);
+       return sizeof(*spa) - 8;
+}
+
 static bool add_spa(struct acpi_nfit_desc *acpi_desc,
                struct nfit_table_prev *prev,
                struct acpi_nfit_system_address *spa)
@@ -693,22 +700,22 @@ static bool add_spa(struct acpi_nfit_desc *acpi_desc,
        struct device *dev = acpi_desc->dev;
        struct nfit_spa *nfit_spa;
 
-       if (spa->header.length != sizeof(*spa))
+       if (spa->header.length != sizeof_spa(spa))
                return false;
 
        list_for_each_entry(nfit_spa, &prev->spas, list) {
-               if (memcmp(nfit_spa->spa, spa, sizeof(*spa)) == 0) {
+               if (memcmp(nfit_spa->spa, spa, sizeof_spa(spa)) == 0) {
                        list_move_tail(&nfit_spa->list, &acpi_desc->spas);
                        return true;
                }
        }
 
-       nfit_spa = devm_kzalloc(dev, sizeof(*nfit_spa) + sizeof(*spa),
+       nfit_spa = devm_kzalloc(dev, sizeof(*nfit_spa) + sizeof_spa(spa),
                        GFP_KERNEL);
        if (!nfit_spa)
                return false;
        INIT_LIST_HEAD(&nfit_spa->list);
-       memcpy(nfit_spa->spa, spa, sizeof(*spa));
+       memcpy(nfit_spa->spa, spa, sizeof_spa(spa));
        list_add_tail(&nfit_spa->list, &acpi_desc->spas);
        dev_dbg(dev, "spa index: %d type: %s\n",
                        spa->range_index,
index 9f8712a557b3bf485fc3135df94dddf4b22cc5cf..7f02e399047c42762a33204bc159a7c3016e791b 100644 (file)
@@ -5,6 +5,8 @@
  * Copyright (C) 2008-2011 Rafael J. Wysocki <rjw@sisk.pl>, Novell Inc.
  */
 
+#define pr_fmt(fmt) "ACPI: PM: " fmt
+
 #include <linux/io.h>
 #include <linux/kernel.h>
 #include <linux/list.h>
@@ -94,7 +96,7 @@ static int suspend_nvs_register(unsigned long start, unsigned long size)
 {
        struct nvs_page *entry, *next;
 
-       pr_info("PM: Registering ACPI NVS region [mem %#010lx-%#010lx] (%ld bytes)\n",
+       pr_info("Registering ACPI NVS region [mem %#010lx-%#010lx] (%ld bytes)\n",
                start, start + size - 1, size);
 
        while (size > 0) {
@@ -170,7 +172,7 @@ int suspend_nvs_save(void)
 {
        struct nvs_page *entry;
 
-       printk(KERN_INFO "PM: Saving platform NVS memory\n");
+       pr_info("Saving platform NVS memory\n");
 
        list_for_each_entry(entry, &nvs_list, node)
                if (entry->data) {
@@ -202,7 +204,7 @@ void suspend_nvs_restore(void)
 {
        struct nvs_page *entry;
 
-       printk(KERN_INFO "PM: Restoring platform NVS memory\n");
+       pr_info("Restoring platform NVS memory\n");
 
        list_for_each_entry(entry, &nvs_list, node)
                if (entry->data)
index 327e1b4eb6b096f99f5d3fb15d45250d84b0fd9d..1207490b0a5088b371cbe92928b9e1ff888a6bcc 100644 (file)
@@ -212,7 +212,7 @@ acpi_physical_address __init acpi_os_get_root_pointer(void)
                        return efi.acpi20;
                if (efi.acpi != EFI_INVALID_TABLE_ADDR)
                        return efi.acpi;
-               pr_err(PREFIX "System description tables not found\n");
+               pr_err("System description tables not found\n");
        } else if (IS_ENABLED(CONFIG_ACPI_LEGACY_TABLES_LOOKUP)) {
                acpi_find_root_pointer(&pa);
        }
@@ -430,7 +430,7 @@ void __ref acpi_os_unmap_iomem(void __iomem *virt, acpi_size size)
        map = acpi_map_lookup_virt(virt, size);
        if (!map) {
                mutex_unlock(&acpi_ioremap_lock);
-               WARN(true, PREFIX "%s: bad address %p\n", __func__, virt);
+               WARN(true, "ACPI: %s: bad address %p\n", __func__, virt);
                return;
        }
        acpi_os_drop_map_ref(map);
index dcd593766a643319d78d36f3759840397a3203c1..d7deedf3548e0f3a8cb22462ffe347471b41f761 100644 (file)
@@ -6,6 +6,8 @@
  *  Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
  */
 
+#define pr_fmt(fmt) "ACPI: " fmt
+
 #include <linux/kernel.h>
 #include <linux/module.h>
 #include <linux/init.h>
@@ -574,7 +576,7 @@ static int acpi_pci_root_add(struct acpi_device *device,
                goto end;
        }
 
-       pr_info(PREFIX "%s [%s] (domain %04x %pR)\n",
+       pr_info("%s [%s] (domain %04x %pR)\n",
               acpi_device_name(device), acpi_device_bid(device),
               root->segment, &root->secondary);
 
index a5101b07611aae9569f847f345c94b1849dfc009..fef7831d0d63701b4f1b2c870f3579ab07a00bbf 100644 (file)
@@ -117,7 +117,7 @@ static int chtdc_ti_pmic_opregion_probe(struct platform_device *pdev)
                return err;
 
        /* Re-enumerate devices depending on PMIC */
-       acpi_walk_dep_device_list(ACPI_HANDLE(pdev->dev.parent));
+       acpi_dev_clear_dependencies(ACPI_COMPANION(pdev->dev.parent));
        return 0;
 }
 
index 32974b575e46479b9d3d204c046593fd491a6e47..eba7785047cad34d72bd25a2e8b11d39c05ae954 100644 (file)
@@ -52,6 +52,7 @@ struct acpi_power_resource {
        u32 system_level;
        u32 order;
        unsigned int ref_count;
+       u8 state;
        bool wakeup_enabled;
        struct mutex resource_lock;
        struct list_head dependents;
@@ -147,6 +148,7 @@ int acpi_extract_power_resources(union acpi_object *package, unsigned int start,
 
        for (i = start; i < package->package.count; i++) {
                union acpi_object *element = &package->package.elements[i];
+               struct acpi_device *rdev;
                acpi_handle rhandle;
 
                if (element->type != ACPI_TYPE_LOCAL_REFERENCE) {
@@ -163,10 +165,11 @@ int acpi_extract_power_resources(union acpi_object *package, unsigned int start,
                if (acpi_power_resource_is_dup(package, start, i))
                        continue;
 
-               err = acpi_add_power_resource(rhandle);
-               if (err)
+               rdev = acpi_add_power_resource(rhandle);
+               if (!rdev) {
+                       err = -ENODEV;
                        break;
-
+               }
                err = acpi_power_resources_list_add(rhandle, list);
                if (err)
                        break;
@@ -177,44 +180,54 @@ int acpi_extract_power_resources(union acpi_object *package, unsigned int start,
        return err;
 }
 
-static int acpi_power_get_state(acpi_handle handle, int *state)
+static int __get_state(acpi_handle handle, u8 *state)
 {
        acpi_status status = AE_OK;
        unsigned long long sta = 0;
-
-       if (!handle || !state)
-               return -EINVAL;
+       u8 cur_state;
 
        status = acpi_evaluate_integer(handle, "_STA", NULL, &sta);
        if (ACPI_FAILURE(status))
                return -ENODEV;
 
-       *state = (sta & 0x01)?ACPI_POWER_RESOURCE_STATE_ON:
-                             ACPI_POWER_RESOURCE_STATE_OFF;
+       cur_state = sta & ACPI_POWER_RESOURCE_STATE_ON;
 
        acpi_handle_debug(handle, "Power resource is %s\n",
-                         *state ? "on" : "off");
+                         cur_state ? "on" : "off");
+
+       *state = cur_state;
+       return 0;
+}
+
+static int acpi_power_get_state(struct acpi_power_resource *resource, u8 *state)
+{
+       if (resource->state == ACPI_POWER_RESOURCE_STATE_UNKNOWN) {
+               int ret;
 
+               ret = __get_state(resource->device.handle, &resource->state);
+               if (ret)
+                       return ret;
+       }
+
+       *state = resource->state;
        return 0;
 }
 
-static int acpi_power_get_list_state(struct list_head *list, int *state)
+static int acpi_power_get_list_state(struct list_head *list, u8 *state)
 {
        struct acpi_power_resource_entry *entry;
-       int cur_state;
+       u8 cur_state = ACPI_POWER_RESOURCE_STATE_OFF;
 
        if (!list || !state)
                return -EINVAL;
 
        /* The state of the list is 'on' IFF all resources are 'on'. */
-       cur_state = 0;
        list_for_each_entry(entry, list, node) {
                struct acpi_power_resource *resource = entry->resource;
-               acpi_handle handle = resource->device.handle;
                int result;
 
                mutex_lock(&resource->resource_lock);
-               result = acpi_power_get_state(handle, &cur_state);
+               result = acpi_power_get_state(resource, &cur_state);
                mutex_unlock(&resource->resource_lock);
                if (result)
                        return result;
@@ -347,8 +360,12 @@ static int __acpi_power_on(struct acpi_power_resource *resource)
        acpi_status status = AE_OK;
 
        status = acpi_evaluate_object(resource->device.handle, "_ON", NULL, NULL);
-       if (ACPI_FAILURE(status))
+       if (ACPI_FAILURE(status)) {
+               resource->state = ACPI_POWER_RESOURCE_STATE_UNKNOWN;
                return -ENODEV;
+       }
+
+       resource->state = ACPI_POWER_RESOURCE_STATE_ON;
 
        pr_debug("Power resource [%s] turned on\n", resource->name);
 
@@ -400,8 +417,12 @@ static int __acpi_power_off(struct acpi_power_resource *resource)
 
        status = acpi_evaluate_object(resource->device.handle, "_OFF",
                                      NULL, NULL);
-       if (ACPI_FAILURE(status))
+       if (ACPI_FAILURE(status)) {
+               resource->state = ACPI_POWER_RESOURCE_STATE_UNKNOWN;
                return -ENODEV;
+       }
+
+       resource->state = ACPI_POWER_RESOURCE_STATE_OFF;
 
        pr_debug("Power resource [%s] turned off\n", resource->name);
 
@@ -585,13 +606,12 @@ int acpi_power_wakeup_list_init(struct list_head *list, int *system_level_p)
 
        list_for_each_entry(entry, list, node) {
                struct acpi_power_resource *resource = entry->resource;
-               acpi_handle handle = resource->device.handle;
                int result;
-               int state;
+               u8 state;
 
                mutex_lock(&resource->resource_lock);
 
-               result = acpi_power_get_state(handle, &state);
+               result = acpi_power_get_state(resource, &state);
                if (result) {
                        mutex_unlock(&resource->resource_lock);
                        return result;
@@ -784,8 +804,8 @@ int acpi_disable_wakeup_device_power(struct acpi_device *dev)
 
 int acpi_power_get_inferred_state(struct acpi_device *device, int *state)
 {
+       u8 list_state = ACPI_POWER_RESOURCE_STATE_OFF;
        int result = 0;
-       int list_state = 0;
        int i = 0;
 
        if (!device || !state)
@@ -907,22 +927,22 @@ static void acpi_power_add_resource_to_list(struct acpi_power_resource *resource
        mutex_unlock(&power_resource_list_lock);
 }
 
-int acpi_add_power_resource(acpi_handle handle)
+struct acpi_device *acpi_add_power_resource(acpi_handle handle)
 {
        struct acpi_power_resource *resource;
        struct acpi_device *device = NULL;
        union acpi_object acpi_object;
        struct acpi_buffer buffer = { sizeof(acpi_object), &acpi_object };
        acpi_status status;
-       int state, result = -ENODEV;
+       int result;
 
        acpi_bus_get_device(handle, &device);
        if (device)
-               return 0;
+               return device;
 
        resource = kzalloc(sizeof(*resource), GFP_KERNEL);
        if (!resource)
-               return -ENOMEM;
+               return NULL;
 
        device = &resource->device;
        acpi_init_device_object(device, handle, ACPI_BUS_TYPE_POWER);
@@ -941,13 +961,9 @@ int acpi_add_power_resource(acpi_handle handle)
 
        resource->system_level = acpi_object.power_resource.system_level;
        resource->order = acpi_object.power_resource.resource_order;
+       resource->state = ACPI_POWER_RESOURCE_STATE_UNKNOWN;
 
-       result = acpi_power_get_state(handle, &state);
-       if (result)
-               goto err;
-
-       pr_info("%s [%s] (%s)\n", acpi_device_name(device),
-               acpi_device_bid(device), state ? "on" : "off");
+       pr_info("%s [%s]\n", acpi_device_name(device), acpi_device_bid(device));
 
        device->flags.match_driver = true;
        result = acpi_device_add(device, acpi_release_power_resource);
@@ -959,11 +975,11 @@ int acpi_add_power_resource(acpi_handle handle)
 
        acpi_power_add_resource_to_list(resource);
        acpi_device_add_finalize(device);
-       return 0;
+       return device;
 
  err:
        acpi_release_power_resource(&device->dev);
-       return result;
+       return NULL;
 }
 
 #ifdef CONFIG_ACPI_SLEEP
@@ -974,11 +990,13 @@ void acpi_resume_power_resources(void)
        mutex_lock(&power_resource_list_lock);
 
        list_for_each_entry(resource, &acpi_power_resource_list, list_node) {
-               int result, state;
+               int result;
+               u8 state;
 
                mutex_lock(&resource->resource_lock);
 
-               result = acpi_power_get_state(resource->device.handle, &state);
+               resource->state = ACPI_POWER_RESOURCE_STATE_UNKNOWN;
+               result = acpi_power_get_state(resource, &state);
                if (result) {
                        mutex_unlock(&resource->resource_lock);
                        continue;
@@ -986,7 +1004,7 @@ void acpi_resume_power_resources(void)
 
                if (state == ACPI_POWER_RESOURCE_STATE_OFF
                    && resource->ref_count) {
-                       dev_info(&resource->device.dev, "Turning ON\n");
+                       dev_dbg(&resource->device.dev, "Turning ON\n");
                        __acpi_power_on(resource);
                }
 
@@ -995,7 +1013,11 @@ void acpi_resume_power_resources(void)
 
        mutex_unlock(&power_resource_list_lock);
 }
+#endif
 
+/**
+ * acpi_turn_off_unused_power_resources - Turn off power resources not in use.
+ */
 void acpi_turn_off_unused_power_resources(void)
 {
        struct acpi_power_resource *resource;
@@ -1005,8 +1027,14 @@ void acpi_turn_off_unused_power_resources(void)
        list_for_each_entry_reverse(resource, &acpi_power_resource_list, list_node) {
                mutex_lock(&resource->resource_lock);
 
-               if (!resource->ref_count) {
-                       dev_info(&resource->device.dev, "Turning OFF\n");
+               /*
+                * Turn off power resources in an unknown state too, because the
+                * platform firmware on some system expects the OS to turn off
+                * power resources without any users unconditionally.
+                */
+               if (!resource->ref_count &&
+                   resource->state != ACPI_POWER_RESOURCE_STATE_OFF) {
+                       dev_dbg(&resource->device.dev, "Turning OFF\n");
                        __acpi_power_off(resource);
                }
 
@@ -1015,4 +1043,3 @@ void acpi_turn_off_unused_power_resources(void)
 
        mutex_unlock(&power_resource_list_lock);
 }
-#endif
index 4ae93350b70dec24ef5a3e44dd35ebdee874c897..fe69dc518f31efd182e83ccf134d48cc28b7dd1a 100644 (file)
@@ -347,6 +347,7 @@ static struct acpi_pptt_cache *acpi_find_cache_node(struct acpi_table_header *ta
  * @this_leaf: Kernel cache info structure being updated
  * @found_cache: The PPTT node describing this cache instance
  * @cpu_node: A unique reference to describe this cache instance
+ * @revision: The revision of the PPTT table
  *
  * The ACPI spec implies that the fields in the cache structures are used to
  * extend and correct the information probed from the hardware. Lets only
@@ -356,8 +357,11 @@ static struct acpi_pptt_cache *acpi_find_cache_node(struct acpi_table_header *ta
  */
 static void update_cache_properties(struct cacheinfo *this_leaf,
                                    struct acpi_pptt_cache *found_cache,
-                                   struct acpi_pptt_processor *cpu_node)
+                                   struct acpi_pptt_processor *cpu_node,
+                                   u8 revision)
 {
+       struct acpi_pptt_cache_v1* found_cache_v1;
+
        this_leaf->fw_token = cpu_node;
        if (found_cache->flags & ACPI_PPTT_SIZE_PROPERTY_VALID)
                this_leaf->size = found_cache->size;
@@ -405,6 +409,13 @@ static void update_cache_properties(struct cacheinfo *this_leaf,
        if (this_leaf->type == CACHE_TYPE_NOCACHE &&
            found_cache->flags & ACPI_PPTT_CACHE_TYPE_VALID)
                this_leaf->type = CACHE_TYPE_UNIFIED;
+
+       if (revision >= 3 && (found_cache->flags & ACPI_PPTT_CACHE_ID_VALID)) {
+               found_cache_v1 = ACPI_ADD_PTR(struct acpi_pptt_cache_v1,
+                                             found_cache, sizeof(struct acpi_pptt_cache));
+               this_leaf->id = found_cache_v1->cache_id;
+               this_leaf->attributes |= CACHE_ID;
+       }
 }
 
 static void cache_setup_acpi_cpu(struct acpi_table_header *table,
@@ -425,9 +436,8 @@ static void cache_setup_acpi_cpu(struct acpi_table_header *table,
                                                   &cpu_node);
                pr_debug("found = %p %p\n", found_cache, cpu_node);
                if (found_cache)
-                       update_cache_properties(this_leaf,
-                                               found_cache,
-                                               cpu_node);
+                       update_cache_properties(this_leaf, found_cache,
+                                               cpu_node, table->revision);
 
                index++;
        }
diff --git a/drivers/acpi/prmt.c b/drivers/acpi/prmt.c
new file mode 100644 (file)
index 0000000..31cf9ae
--- /dev/null
@@ -0,0 +1,303 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Author: Erik Kaneda <erik.kaneda@intel.com>
+ * Copyright 2020 Intel Corporation
+ *
+ * prmt.c
+ *
+ * Each PRM service is an executable that is run in a restricted environment
+ * that is invoked by writing to the PlatformRtMechanism OperationRegion from
+ * AML bytecode.
+ *
+ * init_prmt initializes the Platform Runtime Mechanism (PRM) services by
+ * processing data in the PRMT as well as registering an ACPI OperationRegion
+ * handler for the PlatformRtMechanism subtype.
+ *
+ */
+#include <linux/kernel.h>
+#include <linux/efi.h>
+#include <linux/acpi.h>
+#include <linux/prmt.h>
+#include <asm/efi.h>
+
+#pragma pack(1)
+struct prm_mmio_addr_range {
+       u64 phys_addr;
+       u64 virt_addr;
+       u32 length;
+};
+
+struct prm_mmio_info {
+       u64 mmio_count;
+       struct prm_mmio_addr_range addr_ranges[];
+};
+
+struct prm_buffer {
+       u8 prm_status;
+       u64 efi_status;
+       u8 prm_cmd;
+       guid_t handler_guid;
+};
+
+struct prm_context_buffer {
+       char signature[ACPI_NAMESEG_SIZE];
+       u16 revision;
+       u16 reserved;
+       guid_t identifier;
+       u64 static_data_buffer;
+       struct prm_mmio_info *mmio_ranges;
+};
+#pragma pack()
+
+
+static LIST_HEAD(prm_module_list);
+
+struct prm_handler_info {
+       guid_t guid;
+       u64 handler_addr;
+       u64 static_data_buffer_addr;
+       u64 acpi_param_buffer_addr;
+
+       struct list_head handler_list;
+};
+
+struct prm_module_info {
+       guid_t guid;
+       u16 major_rev;
+       u16 minor_rev;
+       u16 handler_count;
+       struct prm_mmio_info *mmio_info;
+       bool updatable;
+
+       struct list_head module_list;
+       struct prm_handler_info handlers[];
+};
+
+
+static u64 efi_pa_va_lookup(u64 pa)
+{
+       efi_memory_desc_t *md;
+       u64 pa_offset = pa & ~PAGE_MASK;
+       u64 page = pa & PAGE_MASK;
+
+       for_each_efi_memory_desc(md) {
+               if (md->phys_addr < pa && pa < md->phys_addr + PAGE_SIZE * md->num_pages)
+                       return pa_offset + md->virt_addr + page - md->phys_addr;
+       }
+
+       return 0;
+}
+
+
+#define get_first_handler(a) ((struct acpi_prmt_handler_info *) ((char *) (a) + a->handler_info_offset))
+#define get_next_handler(a) ((struct acpi_prmt_handler_info *) (sizeof(struct acpi_prmt_handler_info) + (char *) a))
+
+static int __init
+acpi_parse_prmt(union acpi_subtable_headers *header, const unsigned long end)
+{
+       struct acpi_prmt_module_info *module_info;
+       struct acpi_prmt_handler_info *handler_info;
+       struct prm_handler_info *th;
+       struct prm_module_info *tm;
+       u64 mmio_count = 0;
+       u64 cur_handler = 0;
+       u32 module_info_size = 0;
+       u64 mmio_range_size = 0;
+       void *temp_mmio;
+
+       module_info = (struct acpi_prmt_module_info *) header;
+       module_info_size = struct_size(tm, handlers, module_info->handler_info_count);
+       tm = kmalloc(module_info_size, GFP_KERNEL);
+
+       guid_copy(&tm->guid, (guid_t *) module_info->module_guid);
+       tm->major_rev = module_info->major_rev;
+       tm->minor_rev = module_info->minor_rev;
+       tm->handler_count = module_info->handler_info_count;
+       tm->updatable = true;
+
+       if (module_info->mmio_list_pointer) {
+               /*
+                * Each module is associated with a list of addr
+                * ranges that it can use during the service
+                */
+               mmio_count = *(u64 *) memremap(module_info->mmio_list_pointer, 8, MEMREMAP_WB);
+               mmio_range_size = struct_size(tm->mmio_info, addr_ranges, mmio_count);
+               tm->mmio_info = kmalloc(mmio_range_size, GFP_KERNEL);
+               temp_mmio = memremap(module_info->mmio_list_pointer, mmio_range_size, MEMREMAP_WB);
+               memmove(tm->mmio_info, temp_mmio, mmio_range_size);
+       } else {
+               mmio_range_size = struct_size(tm->mmio_info, addr_ranges, mmio_count);
+               tm->mmio_info = kmalloc(mmio_range_size, GFP_KERNEL);
+               tm->mmio_info->mmio_count = 0;
+       }
+
+       INIT_LIST_HEAD(&tm->module_list);
+       list_add(&tm->module_list, &prm_module_list);
+
+       handler_info = get_first_handler(module_info);
+       do {
+               th = &tm->handlers[cur_handler];
+
+               guid_copy(&th->guid, (guid_t *)handler_info->handler_guid);
+               th->handler_addr = efi_pa_va_lookup(handler_info->handler_address);
+               th->static_data_buffer_addr = efi_pa_va_lookup(handler_info->static_data_buffer_address);
+               th->acpi_param_buffer_addr = efi_pa_va_lookup(handler_info->acpi_param_buffer_address);
+       } while (++cur_handler < tm->handler_count && (handler_info = get_next_handler(handler_info)));
+
+       return 0;
+}
+
+#define GET_MODULE     0
+#define GET_HANDLER    1
+
+static void *find_guid_info(const guid_t *guid, u8 mode)
+{
+       struct prm_handler_info *cur_handler;
+       struct prm_module_info *cur_module;
+       int i = 0;
+
+       list_for_each_entry(cur_module, &prm_module_list, module_list) {
+               for (i = 0; i < cur_module->handler_count; ++i) {
+                       cur_handler = &cur_module->handlers[i];
+                       if (guid_equal(guid, &cur_handler->guid)) {
+                               if (mode == GET_MODULE)
+                                       return (void *)cur_module;
+                               else
+                                       return (void *)cur_handler;
+                       }
+               }
+       }
+
+       return NULL;
+}
+
+
+static struct prm_module_info *find_prm_module(const guid_t *guid)
+{
+       return (struct prm_module_info *)find_guid_info(guid, GET_MODULE);
+}
+
+static struct prm_handler_info *find_prm_handler(const guid_t *guid)
+{
+       return (struct prm_handler_info *) find_guid_info(guid, GET_HANDLER);
+}
+
+/* In-coming PRM commands */
+
+#define PRM_CMD_RUN_SERVICE            0
+#define PRM_CMD_START_TRANSACTION      1
+#define PRM_CMD_END_TRANSACTION                2
+
+/* statuses that can be passed back to ASL */
+
+#define PRM_HANDLER_SUCCESS            0
+#define PRM_HANDLER_ERROR              1
+#define INVALID_PRM_COMMAND            2
+#define PRM_HANDLER_GUID_NOT_FOUND     3
+#define UPDATE_LOCK_ALREADY_HELD       4
+#define UPDATE_UNLOCK_WITHOUT_LOCK     5
+
+/*
+ * This is the PlatformRtMechanism opregion space handler.
+ * @function: indicates the read/write. In fact as the PlatformRtMechanism
+ * message is driven by command, only write is meaningful.
+ *
+ * @addr   : not used
+ * @bits   : not used.
+ * @value  : it is an in/out parameter. It points to the PRM message buffer.
+ * @handler_context: not used
+ */
+static acpi_status acpi_platformrt_space_handler(u32 function,
+                                                acpi_physical_address addr,
+                                                u32 bits, acpi_integer *value,
+                                                void *handler_context,
+                                                void *region_context)
+{
+       struct prm_buffer *buffer = ACPI_CAST_PTR(struct prm_buffer, value);
+       struct prm_handler_info *handler;
+       struct prm_module_info *module;
+       efi_status_t status;
+       struct prm_context_buffer context;
+
+       /*
+        * The returned acpi_status will always be AE_OK. Error values will be
+        * saved in the first byte of the PRM message buffer to be used by ASL.
+        */
+       switch (buffer->prm_cmd) {
+       case PRM_CMD_RUN_SERVICE:
+
+               handler = find_prm_handler(&buffer->handler_guid);
+               module = find_prm_module(&buffer->handler_guid);
+               if (!handler || !module)
+                       goto invalid_guid;
+
+               ACPI_COPY_NAMESEG(context.signature, "PRMC");
+               context.revision = 0x0;
+               context.reserved = 0x0;
+               context.identifier = handler->guid;
+               context.static_data_buffer = handler->static_data_buffer_addr;
+               context.mmio_ranges = module->mmio_info;
+
+               status = efi_call_virt_pointer(handler, handler_addr,
+                                              handler->acpi_param_buffer_addr,
+                                              &context);
+               if (status == EFI_SUCCESS) {
+                       buffer->prm_status = PRM_HANDLER_SUCCESS;
+               } else {
+                       buffer->prm_status = PRM_HANDLER_ERROR;
+                       buffer->efi_status = status;
+               }
+               break;
+
+       case PRM_CMD_START_TRANSACTION:
+
+               module = find_prm_module(&buffer->handler_guid);
+               if (!module)
+                       goto invalid_guid;
+
+               if (module->updatable)
+                       module->updatable = false;
+               else
+                       buffer->prm_status = UPDATE_LOCK_ALREADY_HELD;
+               break;
+
+       case PRM_CMD_END_TRANSACTION:
+
+               module = find_prm_module(&buffer->handler_guid);
+               if (!module)
+                       goto invalid_guid;
+
+               if (module->updatable)
+                       buffer->prm_status = UPDATE_UNLOCK_WITHOUT_LOCK;
+               else
+                       module->updatable = true;
+               break;
+
+       default:
+
+               buffer->prm_status = INVALID_PRM_COMMAND;
+               break;
+       }
+
+       return AE_OK;
+
+invalid_guid:
+       buffer->prm_status = PRM_HANDLER_GUID_NOT_FOUND;
+       return AE_OK;
+}
+
+void __init init_prmt(void)
+{
+       acpi_status status;
+       int mc = acpi_table_parse_entries(ACPI_SIG_PRMT, sizeof(struct acpi_table_prmt) +
+                                         sizeof (struct acpi_table_prmt_header),
+                                         0, acpi_parse_prmt, 0);
+       pr_info("PRM: found %u modules\n", mc);
+
+       status = acpi_install_address_space_handler(ACPI_ROOT_OBJECT,
+                                                   ACPI_ADR_SPACE_PLATFORM_RT,
+                                                   &acpi_platformrt_space_handler,
+                                                   NULL, NULL);
+       if (ACPI_FAILURE(status))
+               pr_alert("PRM: OperationRegion handler could not be installed\n");
+}
index 45a019619e4a5657d51e238f6af01d9ec3ad739c..095c8aca141eb526f8bc0460f0d95f7204625d4e 100644 (file)
@@ -16,6 +16,7 @@
 #include <linux/acpi.h>
 #include <linux/dmi.h>
 #include <linux/sched.h>       /* need_resched() */
+#include <linux/sort.h>
 #include <linux/tick.h>
 #include <linux/cpuidle.h>
 #include <linux/cpu.h>
@@ -384,10 +385,37 @@ static void acpi_processor_power_verify_c3(struct acpi_processor *pr,
        return;
 }
 
+static int acpi_cst_latency_cmp(const void *a, const void *b)
+{
+       const struct acpi_processor_cx *x = a, *y = b;
+
+       if (!(x->valid && y->valid))
+               return 0;
+       if (x->latency > y->latency)
+               return 1;
+       if (x->latency < y->latency)
+               return -1;
+       return 0;
+}
+static void acpi_cst_latency_swap(void *a, void *b, int n)
+{
+       struct acpi_processor_cx *x = a, *y = b;
+       u32 tmp;
+
+       if (!(x->valid && y->valid))
+               return;
+       tmp = x->latency;
+       x->latency = y->latency;
+       y->latency = tmp;
+}
+
 static int acpi_processor_power_verify(struct acpi_processor *pr)
 {
        unsigned int i;
        unsigned int working = 0;
+       unsigned int last_latency = 0;
+       unsigned int last_type = 0;
+       bool buggy_latency = false;
 
        pr->power.timer_broadcast_on_state = INT_MAX;
 
@@ -411,12 +439,24 @@ static int acpi_processor_power_verify(struct acpi_processor *pr)
                }
                if (!cx->valid)
                        continue;
+               if (cx->type >= last_type && cx->latency < last_latency)
+                       buggy_latency = true;
+               last_latency = cx->latency;
+               last_type = cx->type;
 
                lapic_timer_check_state(i, pr, cx);
                tsc_check_state(cx->type);
                working++;
        }
 
+       if (buggy_latency) {
+               pr_notice("FW issue: working around C-state latencies out of order\n");
+               sort(&pr->power.states[1], max_cstate,
+                    sizeof(struct acpi_processor_cx),
+                    acpi_cst_latency_cmp,
+                    acpi_cst_latency_swap);
+       }
+
        lapic_timer_propagate_broadcast(pr);
 
        return (working);
index d088a0089ee9bb3ead1c913138624b7a33127b5c..757a98f6d7a24d5b2114b18aced28d39355da2ec 100644 (file)
@@ -9,6 +9,8 @@
  *                     - Added processor hotplug support
  */
 
+#define pr_fmt(fmt) "ACPI: " fmt
+
 #include <linux/kernel.h>
 #include <linux/module.h>
 #include <linux/init.h>
@@ -20,8 +22,6 @@
 #include <asm/cpufeature.h>
 #endif
 
-#define PREFIX "ACPI: "
-
 #define ACPI_PROCESSOR_FILE_PERFORMANCE        "performance"
 
 static DEFINE_MUTEX(performance_mutex);
@@ -194,7 +194,6 @@ static int acpi_processor_get_performance_control(struct acpi_processor *pr)
        union acpi_object *pct = NULL;
        union acpi_object obj = { 0 };
 
-
        status = acpi_evaluate_object(pr->handle, "_PCT", NULL, &buffer);
        if (ACPI_FAILURE(status)) {
                acpi_evaluation_failure_warn(pr->handle, "_PCT", status);
@@ -204,7 +203,7 @@ static int acpi_processor_get_performance_control(struct acpi_processor *pr)
        pct = (union acpi_object *)buffer.pointer;
        if (!pct || (pct->type != ACPI_TYPE_PACKAGE)
            || (pct->package.count != 2)) {
-               printk(KERN_ERR PREFIX "Invalid _PCT data\n");
+               pr_err("Invalid _PCT data\n");
                result = -EFAULT;
                goto end;
        }
@@ -218,7 +217,7 @@ static int acpi_processor_get_performance_control(struct acpi_processor *pr)
        if ((obj.type != ACPI_TYPE_BUFFER)
            || (obj.buffer.length < sizeof(struct acpi_pct_register))
            || (obj.buffer.pointer == NULL)) {
-               printk(KERN_ERR PREFIX "Invalid _PCT data (control_register)\n");
+               pr_err("Invalid _PCT data (control_register)\n");
                result = -EFAULT;
                goto end;
        }
@@ -234,7 +233,7 @@ static int acpi_processor_get_performance_control(struct acpi_processor *pr)
        if ((obj.type != ACPI_TYPE_BUFFER)
            || (obj.buffer.length < sizeof(struct acpi_pct_register))
            || (obj.buffer.pointer == NULL)) {
-               printk(KERN_ERR PREFIX "Invalid _PCT data (status_register)\n");
+               pr_err("Invalid _PCT data (status_register)\n");
                result = -EFAULT;
                goto end;
        }
@@ -242,7 +241,7 @@ static int acpi_processor_get_performance_control(struct acpi_processor *pr)
        memcpy(&pr->performance->status_register, obj.buffer.pointer,
               sizeof(struct acpi_pct_register));
 
-      end:
+end:
        kfree(buffer.pointer);
 
        return result;
@@ -294,7 +293,6 @@ static int acpi_processor_get_performance_states(struct acpi_processor *pr)
        int i;
        int last_invalid = -1;
 
-
        status = acpi_evaluate_object(pr->handle, "_PSS", NULL, &buffer);
        if (ACPI_FAILURE(status)) {
                acpi_evaluation_failure_warn(pr->handle, "_PSS", status);
@@ -303,7 +301,7 @@ static int acpi_processor_get_performance_states(struct acpi_processor *pr)
 
        pss = buffer.pointer;
        if (!pss || (pss->type != ACPI_TYPE_PACKAGE)) {
-               printk(KERN_ERR PREFIX "Invalid _PSS data\n");
+               pr_err("Invalid _PSS data\n");
                result = -EFAULT;
                goto end;
        }
@@ -357,7 +355,7 @@ static int acpi_processor_get_performance_states(struct acpi_processor *pr)
                if (!px->core_frequency ||
                    ((u32)(px->core_frequency * 1000) !=
                     (px->core_frequency * 1000))) {
-                       printk(KERN_ERR FW_BUG PREFIX
+                       pr_err(FW_BUG
                               "Invalid BIOS _PSS frequency found for processor %d: 0x%llx MHz\n",
                               pr->id, px->core_frequency);
                        if (last_invalid == -1)
@@ -375,8 +373,8 @@ static int acpi_processor_get_performance_states(struct acpi_processor *pr)
        }
 
        if (last_invalid == 0) {
-               printk(KERN_ERR FW_BUG PREFIX
-                      "No valid BIOS _PSS frequency found for processor %d\n", pr->id);
+               pr_err(FW_BUG
+                          "No valid BIOS _PSS frequency found for processor %d\n", pr->id);
                result = -EFAULT;
                kfree(pr->performance->states);
                pr->performance->states = NULL;
@@ -385,7 +383,7 @@ static int acpi_processor_get_performance_states(struct acpi_processor *pr)
        if (last_invalid > 0)
                pr->performance->state_count = last_invalid;
 
-      end:
+end:
        kfree(buffer.pointer);
 
        return result;
@@ -426,7 +424,7 @@ int acpi_processor_get_performance_info(struct acpi_processor *pr)
 #ifdef CONFIG_X86
        if (acpi_has_method(pr->handle, "_PPC")) {
                if(boot_cpu_has(X86_FEATURE_EST))
-                       printk(KERN_WARNING FW_BUG "BIOS needs update for CPU "
+                       pr_warn(FW_BUG "BIOS needs update for CPU "
                               "frequency support\n");
        }
 #endif
@@ -520,13 +518,13 @@ int acpi_processor_get_psd(acpi_handle handle, struct acpi_psd_package *pdomain)
 
        psd = buffer.pointer;
        if (!psd || (psd->type != ACPI_TYPE_PACKAGE)) {
-               printk(KERN_ERR PREFIX "Invalid _PSD data\n");
+               pr_err("Invalid _PSD data\n");
                result = -EFAULT;
                goto end;
        }
 
        if (psd->package.count != 1) {
-               printk(KERN_ERR PREFIX "Invalid _PSD data\n");
+               pr_err("Invalid _PSD data\n");
                result = -EFAULT;
                goto end;
        }
@@ -537,19 +535,19 @@ int acpi_processor_get_psd(acpi_handle handle, struct acpi_psd_package *pdomain)
        status = acpi_extract_package(&(psd->package.elements[0]),
                &format, &state);
        if (ACPI_FAILURE(status)) {
-               printk(KERN_ERR PREFIX "Invalid _PSD data\n");
+               pr_err("Invalid _PSD data\n");
                result = -EFAULT;
                goto end;
        }
 
        if (pdomain->num_entries != ACPI_PSD_REV0_ENTRIES) {
-               printk(KERN_ERR PREFIX "Unknown _PSD:num_entries\n");
+               pr_err("Unknown _PSD:num_entries\n");
                result = -EFAULT;
                goto end;
        }
 
        if (pdomain->revision != ACPI_PSD_REV0_REVISION) {
-               printk(KERN_ERR PREFIX "Unknown _PSD:revision\n");
+               pr_err("Unknown _PSD:revision\n");
                result = -EFAULT;
                goto end;
        }
@@ -557,7 +555,7 @@ int acpi_processor_get_psd(acpi_handle handle, struct acpi_psd_package *pdomain)
        if (pdomain->coord_type != DOMAIN_COORD_TYPE_SW_ALL &&
            pdomain->coord_type != DOMAIN_COORD_TYPE_SW_ANY &&
            pdomain->coord_type != DOMAIN_COORD_TYPE_HW_ALL) {
-               printk(KERN_ERR PREFIX "Invalid _PSD:coord_type\n");
+               pr_err("Invalid _PSD:coord_type\n");
                result = -EFAULT;
                goto end;
        }
index 677a132be242b68d04721a3a3d44b112085133ae..a3d34e3f9f94bef6158e8057f3b8caaf457ab015 100644 (file)
@@ -17,8 +17,6 @@
 #include <acpi/processor.h>
 #include <linux/uaccess.h>
 
-#define PREFIX "ACPI: "
-
 #ifdef CONFIG_CPU_FREQ
 
 /* If a passive cooling situation is detected, primarily CPUfreq is used, as it
index e61b8f038364bb5c0414c5ef4c09480479db02f2..a822fe410dda01387db768fa7f2b080bcbbdbb49 100644 (file)
@@ -6,9 +6,11 @@
  *  Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
  *  Copyright (C) 2004       Dominik Brodowski <linux@brodo.de>
  *  Copyright (C) 2004  Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
- *                     - Added processor hotplug support
+ *                      - Added processor hotplug support
  */
 
+#define pr_fmt(fmt) "ACPI: " fmt
+
 #include <linux/kernel.h>
 #include <linux/module.h>
 #include <linux/slab.h>
@@ -20,8 +22,6 @@
 #include <asm/io.h>
 #include <linux/uaccess.h>
 
-#define PREFIX "ACPI: "
-
 /* ignore_tpc:
  *  0 -> acpi processor driver doesn't ignore _TPC values
  *  1 -> acpi processor driver ignores _TPC values
@@ -195,15 +195,13 @@ void acpi_processor_throttling_init(void)
 {
        if (acpi_processor_update_tsd_coord())
                pr_debug("Assume no T-state coordination\n");
-
-       return;
 }
 
 static int acpi_processor_throttling_notifier(unsigned long event, void *data)
 {
        struct throttling_tstate *p_tstate = data;
        struct acpi_processor *pr;
-       unsigned int cpu ;
+       unsigned int cpu;
        int target_state;
        struct acpi_processor_limit *p_limit;
        struct acpi_processor_throttling *p_throttling;
@@ -236,8 +234,7 @@ static int acpi_processor_throttling_notifier(unsigned long event, void *data)
                if (pr->throttling_platform_limit > target_state)
                        target_state = pr->throttling_platform_limit;
                if (target_state >= p_throttling->state_count) {
-                       printk(KERN_WARNING
-                               "Exceed the limit of T-state \n");
+                       pr_warn("Exceed the limit of T-state \n");
                        target_state = p_throttling->state_count - 1;
                }
                p_tstate->target_state = target_state;
@@ -256,8 +253,7 @@ static int acpi_processor_throttling_notifier(unsigned long event, void *data)
                                  cpu, target_state);
                break;
        default:
-               printk(KERN_WARNING
-                       "Unsupported Throttling notifier event\n");
+               pr_warn("Unsupported Throttling notifier event\n");
                break;
        }
 
@@ -408,7 +404,7 @@ static int acpi_processor_get_throttling_control(struct acpi_processor *pr)
        acpi_status status = 0;
        struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
        union acpi_object *ptc = NULL;
-       union acpi_object obj = { 0 };
+       union acpi_object obj;
        struct acpi_processor_throttling *throttling;
 
        status = acpi_evaluate_object(pr->handle, "_PTC", NULL, &buffer);
@@ -422,7 +418,7 @@ static int acpi_processor_get_throttling_control(struct acpi_processor *pr)
        ptc = (union acpi_object *)buffer.pointer;
        if (!ptc || (ptc->type != ACPI_TYPE_PACKAGE)
            || (ptc->package.count != 2)) {
-               printk(KERN_ERR PREFIX "Invalid _PTC data\n");
+               pr_err("Invalid _PTC data\n");
                result = -EFAULT;
                goto end;
        }
@@ -436,8 +432,7 @@ static int acpi_processor_get_throttling_control(struct acpi_processor *pr)
        if ((obj.type != ACPI_TYPE_BUFFER)
            || (obj.buffer.length < sizeof(struct acpi_ptc_register))
            || (obj.buffer.pointer == NULL)) {
-               printk(KERN_ERR PREFIX
-                      "Invalid _PTC data (control_register)\n");
+               pr_err("Invalid _PTC data (control_register)\n");
                result = -EFAULT;
                goto end;
        }
@@ -453,7 +448,7 @@ static int acpi_processor_get_throttling_control(struct acpi_processor *pr)
        if ((obj.type != ACPI_TYPE_BUFFER)
            || (obj.buffer.length < sizeof(struct acpi_ptc_register))
            || (obj.buffer.pointer == NULL)) {
-               printk(KERN_ERR PREFIX "Invalid _PTC data (status_register)\n");
+               pr_err("Invalid _PTC data (status_register)\n");
                result = -EFAULT;
                goto end;
        }
@@ -465,19 +460,19 @@ static int acpi_processor_get_throttling_control(struct acpi_processor *pr)
 
        if ((throttling->control_register.bit_width +
                throttling->control_register.bit_offset) > 32) {
-               printk(KERN_ERR PREFIX "Invalid _PTC control register\n");
+               pr_err("Invalid _PTC control register\n");
                result = -EFAULT;
                goto end;
        }
 
        if ((throttling->status_register.bit_width +
                throttling->status_register.bit_offset) > 32) {
-               printk(KERN_ERR PREFIX "Invalid _PTC status register\n");
+               pr_err("Invalid _PTC status register\n");
                result = -EFAULT;
                goto end;
        }
 
-      end:
+end:
        kfree(buffer.pointer);
 
        return result;
@@ -506,7 +501,7 @@ static int acpi_processor_get_throttling_states(struct acpi_processor *pr)
 
        tss = buffer.pointer;
        if (!tss || (tss->type != ACPI_TYPE_PACKAGE)) {
-               printk(KERN_ERR PREFIX "Invalid _TSS data\n");
+               pr_err("Invalid _TSS data\n");
                result = -EFAULT;
                goto end;
        }
@@ -546,15 +541,14 @@ static int acpi_processor_get_throttling_states(struct acpi_processor *pr)
                }
 
                if (!tx->freqpercentage) {
-                       printk(KERN_ERR PREFIX
-                              "Invalid _TSS data: freq is zero\n");
+                       pr_err("Invalid _TSS data: freq is zero\n");
                        result = -EFAULT;
                        kfree(pr->throttling.states_tss);
                        goto end;
                }
        }
 
-      end:
+end:
        kfree(buffer.pointer);
 
        return result;
@@ -587,13 +581,13 @@ static int acpi_processor_get_tsd(struct acpi_processor *pr)
 
        tsd = buffer.pointer;
        if (!tsd || (tsd->type != ACPI_TYPE_PACKAGE)) {
-               printk(KERN_ERR PREFIX "Invalid _TSD data\n");
+               pr_err("Invalid _TSD data\n");
                result = -EFAULT;
                goto end;
        }
 
        if (tsd->package.count != 1) {
-               printk(KERN_ERR PREFIX "Invalid _TSD data\n");
+               pr_err("Invalid _TSD data\n");
                result = -EFAULT;
                goto end;
        }
@@ -606,19 +600,19 @@ static int acpi_processor_get_tsd(struct acpi_processor *pr)
        status = acpi_extract_package(&(tsd->package.elements[0]),
                                      &format, &state);
        if (ACPI_FAILURE(status)) {
-               printk(KERN_ERR PREFIX "Invalid _TSD data\n");
+               pr_err("Invalid _TSD data\n");
                result = -EFAULT;
                goto end;
        }
 
        if (pdomain->num_entries != ACPI_TSD_REV0_ENTRIES) {
-               printk(KERN_ERR PREFIX "Unknown _TSD:num_entries\n");
+               pr_err("Unknown _TSD:num_entries\n");
                result = -EFAULT;
                goto end;
        }
 
        if (pdomain->revision != ACPI_TSD_REV0_REVISION) {
-               printk(KERN_ERR PREFIX "Unknown _TSD:revision\n");
+               pr_err("Unknown _TSD:revision\n");
                result = -EFAULT;
                goto end;
        }
@@ -639,7 +633,7 @@ static int acpi_processor_get_tsd(struct acpi_processor *pr)
                pthrottling->shared_type = DOMAIN_COORD_TYPE_SW_ALL;
        }
 
-      end:
+end:
        kfree(buffer.pointer);
        return result;
 }
@@ -711,13 +705,12 @@ static int acpi_throttling_rdmsr(u64 *value)
 
        if ((this_cpu_read(cpu_info.x86_vendor) != X86_VENDOR_INTEL) ||
                !this_cpu_has(X86_FEATURE_ACPI)) {
-               printk(KERN_ERR PREFIX
-                       "HARDWARE addr space,NOT supported yet\n");
+               pr_err("HARDWARE addr space,NOT supported yet\n");
        } else {
                msr_low = 0;
                msr_high = 0;
                rdmsr_safe(MSR_IA32_THERM_CONTROL,
-                       (u32 *)&msr_low , (u32 *) &msr_high);
+                       (u32 *)&msr_low, (u32 *) &msr_high);
                msr = (msr_high << 32) | msr_low;
                *value = (u64) msr;
                ret = 0;
@@ -732,8 +725,7 @@ static int acpi_throttling_wrmsr(u64 value)
 
        if ((this_cpu_read(cpu_info.x86_vendor) != X86_VENDOR_INTEL) ||
                !this_cpu_has(X86_FEATURE_ACPI)) {
-               printk(KERN_ERR PREFIX
-                       "HARDWARE addr space,NOT supported yet\n");
+               pr_err("HARDWARE addr space,NOT supported yet\n");
        } else {
                msr = value;
                wrmsr_safe(MSR_IA32_THERM_CONTROL,
@@ -745,15 +737,13 @@ static int acpi_throttling_wrmsr(u64 value)
 #else
 static int acpi_throttling_rdmsr(u64 *value)
 {
-       printk(KERN_ERR PREFIX
-               "HARDWARE addr space,NOT supported yet\n");
+       pr_err("HARDWARE addr space,NOT supported yet\n");
        return -1;
 }
 
 static int acpi_throttling_wrmsr(u64 value)
 {
-       printk(KERN_ERR PREFIX
-               "HARDWARE addr space,NOT supported yet\n");
+       pr_err("HARDWARE addr space,NOT supported yet\n");
        return -1;
 }
 #endif
@@ -784,7 +774,7 @@ static int acpi_read_throttling_status(struct acpi_processor *pr,
                ret = acpi_throttling_rdmsr(value);
                break;
        default:
-               printk(KERN_ERR PREFIX "Unknown addr space %d\n",
+               pr_err("Unknown addr space %d\n",
                       (u32) (throttling->status_register.space_id));
        }
        return ret;
@@ -817,7 +807,7 @@ static int acpi_write_throttling_state(struct acpi_processor *pr,
                ret = acpi_throttling_wrmsr(value);
                break;
        default:
-               printk(KERN_ERR PREFIX "Unknown addr space %d\n",
+               pr_err("Unknown addr space %d\n",
                       (u32) (throttling->control_register.space_id));
        }
        return ret;
@@ -926,7 +916,7 @@ static int acpi_processor_get_fadt_info(struct acpi_processor *pr)
        }
        /* TBD: Support duty_cycle values that span bit 4. */
        else if ((pr->throttling.duty_offset + pr->throttling.duty_width) > 4) {
-               printk(KERN_WARNING PREFIX "duty_cycle spans bit 4\n");
+               pr_warn("duty_cycle spans bit 4\n");
                return -EINVAL;
        }
 
@@ -1185,8 +1175,7 @@ int acpi_processor_get_throttling_info(struct acpi_processor *pr)
         */
        if (acpi_processor_get_throttling_control(pr) ||
                acpi_processor_get_throttling_states(pr) ||
-               acpi_processor_get_platform_limit(pr))
-       {
+               acpi_processor_get_platform_limit(pr)) {
                pr->throttling.acpi_processor_get_throttling =
                    &acpi_processor_get_throttling_fadt;
                pr->throttling.acpi_processor_set_throttling =
@@ -1246,7 +1235,7 @@ int acpi_processor_get_throttling_info(struct acpi_processor *pr)
                        goto end;
        }
 
-      end:
+end:
        if (result)
                pr->flags.throttling = 0;
 
index 2a61f884e2228c8c64a62e1c15a0b73988ea7f90..b79b7c99c237c331a4ca7dbba5f738b9a4b88a61 100644 (file)
@@ -1,5 +1,7 @@
 // SPDX-License-Identifier: GPL-2.0
 
+#define pr_fmt(fmt) "ACPI: " fmt
+
 #include <linux/pci.h>
 #include <linux/acpi.h>
 #include <acpi/reboot.h>
@@ -63,7 +65,7 @@ void acpi_reboot(void)
 
        case ACPI_ADR_SPACE_SYSTEM_MEMORY:
        case ACPI_ADR_SPACE_SYSTEM_IO:
-               printk(KERN_DEBUG "ACPI MEMORY or I/O RESET_REG.\n");
+               pr_debug("ACPI MEMORY or I/O RESET_REG.\n");
                acpi_reset();
                break;
        }
index ee78a210c6068645574b8a5954e14578efd7e564..dc01fb550b28dc6743b22594f859cee9f2ff4e33 100644 (file)
@@ -423,6 +423,13 @@ static void acpi_dev_get_irqresource(struct resource *res, u32 gsi,
        }
 }
 
+static bool irq_is_legacy(struct acpi_resource_irq *irq)
+{
+       return irq->triggering == ACPI_EDGE_SENSITIVE &&
+               irq->polarity == ACPI_ACTIVE_HIGH &&
+               irq->shareable == ACPI_EXCLUSIVE;
+}
+
 /**
  * acpi_dev_resource_interrupt - Extract ACPI interrupt resource information.
  * @ares: Input ACPI resource object.
@@ -461,7 +468,7 @@ bool acpi_dev_resource_interrupt(struct acpi_resource *ares, int index,
                }
                acpi_dev_get_irqresource(res, irq->interrupts[index],
                                         irq->triggering, irq->polarity,
-                                        irq->shareable, true);
+                                        irq->shareable, irq_is_legacy(irq));
                break;
        case ACPI_RESOURCE_TYPE_EXTENDED_IRQ:
                ext_irq = &ares->data.extended_irq;
index 3b0b6dd34914fbf7e6c4710c05f94c6c300ad288..4938010fcac78d821f828434a44e84389eff4539 100644 (file)
@@ -7,6 +7,8 @@
  *  Copyright (c) 2005 Rich Townsend <rhdt@bartol.udel.edu>
  */
 
+#define pr_fmt(fmt) "ACPI: " fmt
+
 #include <linux/init.h>
 #include <linux/slab.h>
 #include <linux/module.h>
@@ -23,8 +25,6 @@
 
 #include "sbshc.h"
 
-#define PREFIX "ACPI: "
-
 #define ACPI_SBS_CLASS                 "sbs"
 #define ACPI_AC_CLASS                  "ac_adapter"
 #define ACPI_SBS_DEVICE_NAME           "Smart Battery System"
@@ -544,7 +544,7 @@ static int acpi_battery_add(struct acpi_sbs *sbs, int id)
                goto end;
        battery->have_sysfs_alarm = 1;
       end:
-       printk(KERN_INFO PREFIX "%s [%s]: Battery Slot [%s] (battery %s)\n",
+       pr_info("%s [%s]: Battery Slot [%s] (battery %s)\n",
               ACPI_SBS_DEVICE_NAME, acpi_device_bid(sbs->device),
               battery->name, battery->present ? "present" : "absent");
        return result;
@@ -577,10 +577,10 @@ static int acpi_charger_add(struct acpi_sbs *sbs)
                result = PTR_ERR(sbs->charger);
                sbs->charger = NULL;
        }
-       printk(KERN_INFO PREFIX "%s [%s]: AC Adapter [%s] (%s)\n",
+       pr_info("%s [%s]: AC Adapter [%s] (%s)\n",
               ACPI_SBS_DEVICE_NAME, acpi_device_bid(sbs->device),
               ACPI_AC_DIR_NAME, sbs->charger_present ? "on-line" : "off-line");
-      end:
+end:
        return result;
 }
 
@@ -658,7 +658,7 @@ static int acpi_sbs_add(struct acpi_device *device)
                acpi_battery_add(sbs, 0);
 
        acpi_smbus_register_callback(sbs->hc, acpi_sbs_callback, sbs);
-      end:
+end:
        if (result)
                acpi_sbs_remove(device);
        return result;
index 53c2862c4c75dc05235eb8b98cb08d8b1be0fcdd..d69a2a84c83cbad87a75eb7d9e9d2386b888ca41 100644 (file)
@@ -5,6 +5,8 @@
  * Copyright (c) 2007 Alexey Starikovskiy
  */
 
+#define pr_fmt(fmt) "ACPI: " fmt
+
 #include <linux/acpi.h>
 #include <linux/wait.h>
 #include <linux/slab.h>
@@ -13,8 +15,6 @@
 #include <linux/interrupt.h>
 #include "sbshc.h"
 
-#define PREFIX "ACPI: "
-
 #define ACPI_SMB_HC_CLASS      "smbus_host_ctl"
 #define ACPI_SMB_HC_DEVICE_NAME        "ACPI SMBus HC"
 
@@ -109,7 +109,7 @@ static int acpi_smbus_transaction(struct acpi_smb_hc *hc, u8 protocol,
        u8 temp, sz = 0;
 
        if (!hc) {
-               printk(KERN_ERR PREFIX "host controller is not configured\n");
+               pr_err("host controller is not configured\n");
                return ret;
        }
 
@@ -254,7 +254,7 @@ static int acpi_smbus_hc_add(struct acpi_device *device)
 
        status = acpi_evaluate_integer(device->handle, "_EC", NULL, &val);
        if (ACPI_FAILURE(status)) {
-               printk(KERN_ERR PREFIX "error obtaining _EC.\n");
+               pr_err("error obtaining _EC.\n");
                return -EIO;
        }
 
index a22778e880c22e58c5f5e74629666c5a033e9482..57507b643afded99e4bef429d41ff9f95688a55d 100644 (file)
@@ -3,6 +3,8 @@
  * scan.c - support for transforming the ACPI namespace into individual objects
  */
 
+#define pr_fmt(fmt) "ACPI: " fmt
+
 #include <linux/module.h>
 #include <linux/init.h>
 #include <linux/slab.h>
@@ -47,12 +49,6 @@ static DEFINE_MUTEX(acpi_hp_context_lock);
  */
 static u64 spcr_uart_addr;
 
-struct acpi_dep_data {
-       struct list_head node;
-       acpi_handle supplier;
-       acpi_handle consumer;
-};
-
 void acpi_scan_lock_acquire(void)
 {
        mutex_lock(&acpi_scan_lock);
@@ -612,11 +608,6 @@ struct acpi_device *acpi_bus_get_acpi_device(acpi_handle handle)
        return handle_to_device(handle, get_acpi_device);
 }
 
-void acpi_bus_put_acpi_device(struct acpi_device *adev)
-{
-       acpi_dev_put(adev);
-}
-
 static struct acpi_device_bus_id *acpi_device_bus_id_match(const char *dev_id)
 {
        struct acpi_device_bus_id *acpi_device_bus_id;
@@ -644,24 +635,29 @@ static int acpi_device_set_name(struct acpi_device *device,
        return 0;
 }
 
-int acpi_device_add(struct acpi_device *device,
-                   void (*release)(struct device *))
+static int acpi_tie_acpi_dev(struct acpi_device *adev)
 {
-       struct acpi_device_bus_id *acpi_device_bus_id;
-       int result;
+       acpi_handle handle = adev->handle;
+       acpi_status status;
 
-       if (device->handle) {
-               acpi_status status;
+       if (!handle)
+               return 0;
 
-               status = acpi_attach_data(device->handle, acpi_scan_drop_device,
-                                         device);
-               if (ACPI_FAILURE(status)) {
-                       acpi_handle_err(device->handle,
-                                       "Unable to attach device data\n");
-                       return -ENODEV;
-               }
+       status = acpi_attach_data(handle, acpi_scan_drop_device, adev);
+       if (ACPI_FAILURE(status)) {
+               acpi_handle_err(handle, "Unable to attach device data\n");
+               return -ENODEV;
        }
 
+       return 0;
+}
+
+static int __acpi_device_add(struct acpi_device *device,
+                            void (*release)(struct device *))
+{
+       struct acpi_device_bus_id *acpi_device_bus_id;
+       int result;
+
        /*
         * Linkage
         * -------
@@ -700,6 +696,7 @@ int acpi_device_add(struct acpi_device *device,
 
                result = acpi_device_set_name(device, acpi_device_bus_id);
                if (result) {
+                       kfree_const(acpi_device_bus_id->bus_id);
                        kfree(acpi_device_bus_id);
                        goto err_unlock;
                }
@@ -728,7 +725,7 @@ int acpi_device_add(struct acpi_device *device,
 
        result = acpi_device_setup_files(device);
        if (result)
-               printk(KERN_ERR PREFIX "Error creating sysfs interface for device %s\n",
+               pr_err("Error creating sysfs interface for device %s\n",
                       dev_name(&device->dev));
 
        return 0;
@@ -749,6 +746,17 @@ err_unlock:
        return result;
 }
 
+int acpi_device_add(struct acpi_device *adev, void (*release)(struct device *))
+{
+       int ret;
+
+       ret = acpi_tie_acpi_dev(adev);
+       if (ret)
+               return ret;
+
+       return __acpi_device_add(adev, release);
+}
+
 /* --------------------------------------------------------------------------
                                  Device Enumeration
    -------------------------------------------------------------------------- */
@@ -1319,8 +1327,7 @@ static void acpi_set_pnp_ids(acpi_handle handle, struct acpi_device_pnp *pnp,
 
                acpi_get_object_info(handle, &info);
                if (!info) {
-                       pr_err(PREFIX "%s: Error reading device info\n",
-                                       __func__);
+                       pr_err("%s: Error reading device info\n", __func__);
                        return;
                }
 
@@ -1670,8 +1677,16 @@ void acpi_init_device_object(struct acpi_device *device, acpi_handle handle,
        device_initialize(&device->dev);
        dev_set_uevent_suppress(&device->dev, true);
        acpi_init_coherency(device);
-       /* Assume there are unmet deps to start with. */
-       device->dep_unmet = 1;
+}
+
+static void acpi_scan_dep_init(struct acpi_device *adev)
+{
+       struct acpi_dep_data *dep;
+
+       list_for_each_entry(dep, &acpi_dep_list, node) {
+               if (dep->consumer == adev->handle)
+                       adev->dep_unmet++;
+       }
 }
 
 void acpi_device_add_finalize(struct acpi_device *device)
@@ -1687,9 +1702,10 @@ static void acpi_scan_init_status(struct acpi_device *adev)
 }
 
 static int acpi_add_single_object(struct acpi_device **child,
-                                 acpi_handle handle, int type)
+                                 acpi_handle handle, int type, bool dep_init)
 {
        struct acpi_device *device;
+       bool release_dep_lock = false;
        int result;
 
        device = kzalloc(sizeof(struct acpi_device), GFP_KERNEL);
@@ -1702,13 +1718,32 @@ static int acpi_add_single_object(struct acpi_device **child,
         * acpi_bus_get_status() and use its quirk handling.  Note that
         * this must be done before the get power-/wakeup_dev-flags calls.
         */
-       if (type == ACPI_BUS_TYPE_DEVICE || type == ACPI_BUS_TYPE_PROCESSOR)
+       if (type == ACPI_BUS_TYPE_DEVICE || type == ACPI_BUS_TYPE_PROCESSOR) {
+               if (dep_init) {
+                       mutex_lock(&acpi_dep_list_lock);
+                       /*
+                        * Hold the lock until the acpi_tie_acpi_dev() call
+                        * below to prevent concurrent acpi_scan_clear_dep()
+                        * from deleting a dependency list entry without
+                        * updating dep_unmet for the device.
+                        */
+                       release_dep_lock = true;
+                       acpi_scan_dep_init(device);
+               }
                acpi_scan_init_status(device);
+       }
 
        acpi_bus_get_power_flags(device);
        acpi_bus_get_wakeup_device_flags(device);
 
-       result = acpi_device_add(device, acpi_device_release);
+       result = acpi_tie_acpi_dev(device);
+
+       if (release_dep_lock)
+               mutex_unlock(&acpi_dep_list_lock);
+
+       if (!result)
+               result = __acpi_device_add(device, acpi_device_release);
+
        if (result) {
                acpi_device_release(&device->dev);
                return result;
@@ -1885,22 +1920,6 @@ static u32 acpi_scan_check_dep(acpi_handle handle, bool check_dep)
        return count;
 }
 
-static void acpi_scan_dep_init(struct acpi_device *adev)
-{
-       struct acpi_dep_data *dep;
-
-       adev->dep_unmet = 0;
-
-       mutex_lock(&acpi_dep_list_lock);
-
-       list_for_each_entry(dep, &acpi_dep_list, node) {
-               if (dep->consumer == adev->handle)
-                       adev->dep_unmet++;
-       }
-
-       mutex_unlock(&acpi_dep_list_lock);
-}
-
 static bool acpi_bus_scan_second_pass;
 
 static acpi_status acpi_bus_check_add(acpi_handle handle, bool check_dep,
@@ -1948,19 +1967,15 @@ static acpi_status acpi_bus_check_add(acpi_handle handle, bool check_dep,
                return AE_OK;
        }
 
-       acpi_add_single_object(&device, handle, type);
-       if (!device)
-               return AE_CTRL_DEPTH;
-
-       acpi_scan_init_hotplug(device);
        /*
         * If check_dep is true at this point, the device has no dependencies,
         * or the creation of the device object would have been postponed above.
         */
-       if (check_dep)
-               device->dep_unmet = 0;
-       else
-               acpi_scan_dep_init(device);
+       acpi_add_single_object(&device, handle, type, !check_dep);
+       if (!device)
+               return AE_CTRL_DEPTH;
+
+       acpi_scan_init_hotplug(device);
 
 out:
        if (!*adev_p)
@@ -2110,29 +2125,141 @@ static void acpi_bus_attach(struct acpi_device *device, bool first_pass)
                device->handler->hotplug.notify_online(device);
 }
 
-void acpi_walk_dep_device_list(acpi_handle handle)
+static int acpi_dev_get_first_consumer_dev_cb(struct acpi_dep_data *dep, void *data)
 {
-       struct acpi_dep_data *dep, *tmp;
        struct acpi_device *adev;
 
+       adev = acpi_bus_get_acpi_device(dep->consumer);
+       if (adev) {
+               *(struct acpi_device **)data = adev;
+               return 1;
+       }
+       /* Continue parsing if the device object is not present. */
+       return 0;
+}
+
+struct acpi_scan_clear_dep_work {
+       struct work_struct work;
+       struct acpi_device *adev;
+};
+
+static void acpi_scan_clear_dep_fn(struct work_struct *work)
+{
+       struct acpi_scan_clear_dep_work *cdw;
+
+       cdw = container_of(work, struct acpi_scan_clear_dep_work, work);
+
+       acpi_scan_lock_acquire();
+       acpi_bus_attach(cdw->adev, true);
+       acpi_scan_lock_release();
+
+       acpi_dev_put(cdw->adev);
+       kfree(cdw);
+}
+
+static bool acpi_scan_clear_dep_queue(struct acpi_device *adev)
+{
+       struct acpi_scan_clear_dep_work *cdw;
+
+       if (adev->dep_unmet)
+               return false;
+
+       cdw = kmalloc(sizeof(*cdw), GFP_KERNEL);
+       if (!cdw)
+               return false;
+
+       cdw->adev = adev;
+       INIT_WORK(&cdw->work, acpi_scan_clear_dep_fn);
+       /*
+        * Since the work function may block on the lock until the entire
+        * initial enumeration of devices is complete, put it into the unbound
+        * workqueue.
+        */
+       queue_work(system_unbound_wq, &cdw->work);
+
+       return true;
+}
+
+static int acpi_scan_clear_dep(struct acpi_dep_data *dep, void *data)
+{
+       struct acpi_device *adev = acpi_bus_get_acpi_device(dep->consumer);
+
+       if (adev) {
+               adev->dep_unmet--;
+               if (!acpi_scan_clear_dep_queue(adev))
+                       acpi_dev_put(adev);
+       }
+
+       list_del(&dep->node);
+       kfree(dep);
+
+       return 0;
+}
+
+/**
+ * acpi_walk_dep_device_list - Apply a callback to every entry in acpi_dep_list
+ * @handle:    The ACPI handle of the supplier device
+ * @callback:  Pointer to the callback function to apply
+ * @data:      Pointer to some data to pass to the callback
+ *
+ * The return value of the callback determines this function's behaviour. If 0
+ * is returned we continue to iterate over acpi_dep_list. If a positive value
+ * is returned then the loop is broken but this function returns 0. If a
+ * negative value is returned by the callback then the loop is broken and that
+ * value is returned as the final error.
+ */
+static int acpi_walk_dep_device_list(acpi_handle handle,
+                               int (*callback)(struct acpi_dep_data *, void *),
+                               void *data)
+{
+       struct acpi_dep_data *dep, *tmp;
+       int ret = 0;
+
        mutex_lock(&acpi_dep_list_lock);
        list_for_each_entry_safe(dep, tmp, &acpi_dep_list, node) {
                if (dep->supplier == handle) {
-                       acpi_bus_get_device(dep->consumer, &adev);
-
-                       if (adev) {
-                               adev->dep_unmet--;
-                               if (!adev->dep_unmet)
-                                       acpi_bus_attach(adev, true);
-                       }
-
-                       list_del(&dep->node);
-                       kfree(dep);
+                       ret = callback(dep, data);
+                       if (ret)
+                               break;
                }
        }
        mutex_unlock(&acpi_dep_list_lock);
+
+       return ret > 0 ? 0 : ret;
+}
+
+/**
+ * acpi_dev_clear_dependencies - Inform consumers that the device is now active
+ * @supplier: Pointer to the supplier &struct acpi_device
+ *
+ * Clear dependencies on the given device.
+ */
+void acpi_dev_clear_dependencies(struct acpi_device *supplier)
+{
+       acpi_walk_dep_device_list(supplier->handle, acpi_scan_clear_dep, NULL);
+}
+EXPORT_SYMBOL_GPL(acpi_dev_clear_dependencies);
+
+/**
+ * acpi_dev_get_first_consumer_dev - Return ACPI device dependent on @supplier
+ * @supplier: Pointer to the dependee device
+ *
+ * Returns the first &struct acpi_device which declares itself dependent on
+ * @supplier via the _DEP buffer, parsed from the acpi_dep_list.
+ *
+ * The caller is responsible for putting the reference to adev when it is no
+ * longer needed.
+ */
+struct acpi_device *acpi_dev_get_first_consumer_dev(struct acpi_device *supplier)
+{
+       struct acpi_device *adev = NULL;
+
+       acpi_walk_dep_device_list(supplier->handle,
+                                 acpi_dev_get_first_consumer_dev_cb, &adev);
+
+       return adev;
 }
-EXPORT_SYMBOL_GPL(acpi_walk_dep_device_list);
+EXPORT_SYMBOL_GPL(acpi_dev_get_first_consumer_dev);
 
 /**
  * acpi_bus_scan - Add ACPI device node objects in a given namespace scope.
@@ -2222,7 +2349,7 @@ int acpi_bus_register_early_device(int type)
        struct acpi_device *device = NULL;
        int result;
 
-       result = acpi_add_single_object(&device, NULL, type);
+       result = acpi_add_single_object(&device, NULL, type, false);
        if (result)
                return result;
 
@@ -2242,7 +2369,7 @@ static int acpi_bus_scan_fixed(void)
                struct acpi_device *device = NULL;
 
                result = acpi_add_single_object(&device, NULL,
-                                               ACPI_BUS_TYPE_POWER_BUTTON);
+                                               ACPI_BUS_TYPE_POWER_BUTTON, false);
                if (result)
                        return result;
 
@@ -2258,7 +2385,7 @@ static int acpi_bus_scan_fixed(void)
                struct acpi_device *device = NULL;
 
                result = acpi_add_single_object(&device, NULL,
-                                               ACPI_BUS_TYPE_SLEEP_BUTTON);
+                                               ACPI_BUS_TYPE_SLEEP_BUTTON, false);
                if (result)
                        return result;
 
@@ -2277,7 +2404,7 @@ static void __init acpi_get_spcr_uart_addr(void)
        status = acpi_get_table(ACPI_SIG_SPCR, 0,
                                (struct acpi_table_header **)&spcr_ptr);
        if (ACPI_FAILURE(status)) {
-               pr_warn(PREFIX "STAO table present, but SPCR is missing\n");
+               pr_warn("STAO table present, but SPCR is missing\n");
                return;
        }
 
@@ -2318,7 +2445,7 @@ int __init acpi_scan_init(void)
                                (struct acpi_table_header **)&stao_ptr);
        if (ACPI_SUCCESS(status)) {
                if (stao_ptr->header.length > sizeof(struct acpi_table_stao))
-                       pr_info(PREFIX "STAO Name List not yet supported.\n");
+                       pr_info("STAO Name List not yet supported.\n");
 
                if (stao_ptr->ignore_uart)
                        acpi_get_spcr_uart_addr();
@@ -2359,6 +2486,8 @@ int __init acpi_scan_init(void)
                }
        }
 
+       acpi_turn_off_unused_power_resources();
+
        acpi_scan_initialized = true;
 
  out:
@@ -2405,46 +2534,28 @@ int __init __acpi_probe_device_table(struct acpi_probe_entry *ap_head, int nr)
        return count;
 }
 
-struct acpi_table_events_work {
-       struct work_struct work;
-       void *table;
-       u32 event;
-};
-
 static void acpi_table_events_fn(struct work_struct *work)
 {
-       struct acpi_table_events_work *tew;
+       acpi_scan_lock_acquire();
+       acpi_bus_scan(ACPI_ROOT_OBJECT);
+       acpi_scan_lock_release();
 
-       tew = container_of(work, struct acpi_table_events_work, work);
-
-       if (tew->event == ACPI_TABLE_EVENT_LOAD) {
-               acpi_scan_lock_acquire();
-               acpi_bus_scan(ACPI_ROOT_OBJECT);
-               acpi_scan_lock_release();
-       }
-
-       kfree(tew);
+       kfree(work);
 }
 
-void acpi_scan_table_handler(u32 event, void *table, void *context)
+void acpi_scan_table_notify(void)
 {
-       struct acpi_table_events_work *tew;
+       struct work_struct *work;
 
        if (!acpi_scan_initialized)
                return;
 
-       if (event != ACPI_TABLE_EVENT_LOAD)
-               return;
-
-       tew = kmalloc(sizeof(*tew), GFP_KERNEL);
-       if (!tew)
+       work = kmalloc(sizeof(*work), GFP_KERNEL);
+       if (!work)
                return;
 
-       INIT_WORK(&tew->work, acpi_table_events_fn);
-       tew->table = table;
-       tew->event = event;
-
-       schedule_work(&tew->work);
+       INIT_WORK(work, acpi_table_events_fn);
+       schedule_work(work);
 }
 
 int acpi_reconfig_notifier_register(struct notifier_block *nb)
index 09fd13757b658fad6cfa16f1cca4f72e3a76fa67..3023224515abe1643f3b9c87f53170d6d2709ed1 100644 (file)
@@ -8,6 +8,8 @@
  * Copyright (c) 2003 Open Source Development Lab
  */
 
+#define pr_fmt(fmt) "ACPI: PM: " fmt
+
 #include <linux/delay.h>
 #include <linux/irq.h>
 #include <linux/dmi.h>
@@ -41,7 +43,7 @@ static void acpi_sleep_tts_switch(u32 acpi_state)
                 * OS can't evaluate the _TTS object correctly. Some warning
                 * message will be printed. But it won't break anything.
                 */
-               printk(KERN_NOTICE "Failure in evaluating _TTS object\n");
+               pr_notice("Failure in evaluating _TTS object\n");
        }
 }
 
@@ -73,8 +75,7 @@ static int acpi_sleep_prepare(u32 acpi_state)
        }
        ACPI_FLUSH_CPU_CACHE();
 #endif
-       printk(KERN_INFO PREFIX "Preparing to enter system sleep state S%d\n",
-               acpi_state);
+       pr_info("Preparing to enter system sleep state S%d\n", acpi_state);
        acpi_enable_wakeup_devices(acpi_state);
        acpi_enter_sleep_state_prep(acpi_state);
        return 0;
@@ -406,7 +407,7 @@ static int acpi_pm_freeze(void)
 }
 
 /**
- * acpi_pre_suspend - Enable wakeup devices, "freeze" EC and save NVS.
+ * acpi_pm_pre_suspend - Enable wakeup devices, "freeze" EC and save NVS.
  */
 static int acpi_pm_pre_suspend(void)
 {
@@ -459,8 +460,7 @@ static void acpi_pm_finish(void)
        if (acpi_state == ACPI_STATE_S0)
                return;
 
-       printk(KERN_INFO PREFIX "Waking up from system sleep state S%d\n",
-               acpi_state);
+       pr_info("Waking up from system sleep state S%d\n", acpi_state);
        acpi_disable_wakeup_devices(acpi_state);
        acpi_leave_sleep_state(acpi_state);
 
@@ -581,7 +581,7 @@ static int acpi_suspend_enter(suspend_state_t pm_state)
                error = acpi_suspend_lowlevel();
                if (error)
                        return error;
-               pr_info(PREFIX "Low-level resume complete\n");
+               pr_info("Low-level resume complete\n");
                pm_set_resume_via_firmware();
                break;
        }
@@ -921,7 +921,7 @@ static void acpi_hibernation_leave(void)
        acpi_leave_sleep_state_prep(ACPI_STATE_S4);
        /* Check the hardware signature */
        if (facs && s4_hardware_signature != facs->hardware_signature)
-               pr_crit("ACPI: Hardware changed while hibernated, success doubtful!\n");
+               pr_crit("Hardware changed while hibernated, success doubtful!\n");
        /* Restore the NVS memory area */
        suspend_nvs_restore();
        /* Allow EC transactions to happen. */
@@ -1009,10 +1009,8 @@ static void acpi_sleep_hibernate_setup(void)
                return;
 
        acpi_get_table(ACPI_SIG_FACS, 1, (struct acpi_table_header **)&facs);
-       if (facs) {
+       if (facs)
                s4_hardware_signature = facs->hardware_signature;
-               acpi_put_table((struct acpi_table_header *)facs);
-       }
 }
 #else /* !CONFIG_HIBERNATION */
 static inline void acpi_sleep_hibernate_setup(void) {}
@@ -1029,7 +1027,7 @@ static void acpi_power_off_prepare(void)
 static void acpi_power_off(void)
 {
        /* acpi_sleep_prepare(ACPI_STATE_S5) should have already been called */
-       printk(KERN_DEBUG "%s called\n", __func__);
+       pr_debug("%s called\n", __func__);
        local_irq_disable();
        acpi_enter_sleep_state(ACPI_STATE_S5);
 }
@@ -1061,7 +1059,7 @@ int __init acpi_sleep_init(void)
                if (sleep_states[i])
                        pos += sprintf(pos, " S%d", i);
        }
-       pr_info(PREFIX "(supports%s)\n", supported);
+       pr_info("(supports%s)\n", supported);
 
        /*
         * Register the tts_notifier to reboot notifier list so that the _TTS
index 1856f76ac83f73451715af33f5488f2420b7fca9..7fe41ee489d6123c2fec04a6672ef25f1c04cbce 100644 (file)
@@ -8,7 +8,6 @@ extern struct list_head acpi_wakeup_device_list;
 extern struct mutex acpi_device_lock;
 
 extern void acpi_resume_power_resources(void);
-extern void acpi_turn_off_unused_power_resources(void);
 
 static inline acpi_status acpi_set_waking_vector(u32 wakeup_address)
 {
index d25927195d6d8ef099fb1fd4e7a2aa301f0f5d51..00c0ebaab29f75d30091938a179d98ea867b92d2 100644 (file)
@@ -5,10 +5,11 @@
 
 #define pr_fmt(fmt) "ACPI: " fmt
 
+#include <linux/acpi.h>
+#include <linux/bitmap.h>
 #include <linux/init.h>
 #include <linux/kernel.h>
 #include <linux/moduleparam.h>
-#include <linux/acpi.h>
 
 #include "internal.h"
 
@@ -254,16 +255,12 @@ static int param_get_trace_state(char *buffer, const struct kernel_param *kp)
 {
        if (!(acpi_gbl_trace_flags & ACPI_TRACE_ENABLED))
                return sprintf(buffer, "disable\n");
-       else {
-               if (acpi_gbl_trace_method_name) {
-                       if (acpi_gbl_trace_flags & ACPI_TRACE_ONESHOT)
-                               return sprintf(buffer, "method-once\n");
-                       else
-                               return sprintf(buffer, "method\n");
-               } else
-                       return sprintf(buffer, "enable\n");
-       }
-       return 0;
+       if (!acpi_gbl_trace_method_name)
+               return sprintf(buffer, "enable\n");
+       if (acpi_gbl_trace_flags & ACPI_TRACE_ONESHOT)
+               return sprintf(buffer, "method-once\n");
+       else
+               return sprintf(buffer, "method\n");
 }
 
 module_param_call(trace_state, param_set_trace_state, param_get_trace_state,
@@ -359,8 +356,7 @@ static int acpi_table_attr_init(struct kobject *tables_obj,
        }
        table_attr->instance++;
        if (table_attr->instance > ACPI_MAX_TABLE_INSTANCES) {
-               pr_warn("%4.4s: too many table instances\n",
-                       table_attr->name);
+               pr_warn("%4.4s: too many table instances\n", table_attr->name);
                return -ERANGE;
        }
 
@@ -388,8 +384,7 @@ acpi_status acpi_sysfs_table_handler(u32 event, void *table, void *context)
 
        switch (event) {
        case ACPI_TABLE_EVENT_INSTALL:
-               table_attr =
-                   kzalloc(sizeof(struct acpi_table_attr), GFP_KERNEL);
+               table_attr = kzalloc(sizeof(*table_attr), GFP_KERNEL);
                if (!table_attr)
                        return AE_NO_MEMORY;
 
@@ -420,7 +415,7 @@ static ssize_t acpi_data_show(struct file *filp, struct kobject *kobj,
                              loff_t offset, size_t count)
 {
        struct acpi_data_attr *data_attr;
-       void __iomem *base;
+       void *base;
        ssize_t rc;
 
        data_attr = container_of(bin_attr, struct acpi_data_attr, attr);
@@ -582,8 +577,6 @@ static void delete_gpe_attr_array(void)
                kfree(counter_attrs);
        }
        kfree(all_attrs);
-
-       return;
 }
 
 static void gpe_count(u32 gpe_number)
@@ -598,8 +591,6 @@ static void gpe_count(u32 gpe_number)
        else
                all_counters[num_gpes + ACPI_NUM_FIXED_EVENTS +
                             COUNT_ERROR].count++;
-
-       return;
 }
 
 static void fixed_event_count(u32 event_number)
@@ -612,8 +603,6 @@ static void fixed_event_count(u32 event_number)
        else
                all_counters[num_gpes + ACPI_NUM_FIXED_EVENTS +
                             COUNT_ERROR].count++;
-
-       return;
 }
 
 static void acpi_global_event_handler(u32 event_type, acpi_handle device,
@@ -737,8 +726,7 @@ static ssize_t counter_set(struct kobject *kobj,
                goto end;
 
        if (!(status & ACPI_EVENT_FLAG_HAS_HANDLER)) {
-               printk(KERN_WARNING PREFIX
-                      "Can not change Invalid GPE/Fixed Event status\n");
+               pr_warn("Can not change Invalid GPE/Fixed Event status\n");
                return -EINVAL;
        }
 
@@ -796,6 +784,7 @@ end:
  * the GPE flooding for GPE 00, they need to specify the following boot
  * parameter:
  *   acpi_mask_gpe=0x00
+ * Note, the parameter can be a list (see bitmap_parselist() for the details).
  * The masking status can be modified by the following runtime controlling
  * interface:
  *   echo unmask > /sys/firmware/acpi/interrupts/gpe00
@@ -805,11 +794,16 @@ static DECLARE_BITMAP(acpi_masked_gpes_map, ACPI_MASKABLE_GPE_MAX) __initdata;
 
 static int __init acpi_gpe_set_masked_gpes(char *val)
 {
+       int ret;
        u8 gpe;
 
-       if (kstrtou8(val, 0, &gpe))
-               return -EINVAL;
-       set_bit(gpe, acpi_masked_gpes_map);
+       ret = kstrtou8(val, 0, &gpe);
+       if (ret) {
+               ret = bitmap_parselist(val, acpi_masked_gpes_map, ACPI_MASKABLE_GPE_MAX);
+               if (ret)
+                       return ret;
+       } else
+               set_bit(gpe, acpi_masked_gpes_map);
 
        return 1;
 }
@@ -841,13 +835,11 @@ void acpi_irq_stats_init(void)
        num_gpes = acpi_current_gpe_count;
        num_counters = num_gpes + ACPI_NUM_FIXED_EVENTS + NUM_COUNTERS_EXTRA;
 
-       all_attrs = kcalloc(num_counters + 1, sizeof(struct attribute *),
-                           GFP_KERNEL);
+       all_attrs = kcalloc(num_counters + 1, sizeof(*all_attrs), GFP_KERNEL);
        if (all_attrs == NULL)
                return;
 
-       all_counters = kcalloc(num_counters, sizeof(struct event_counter),
-                              GFP_KERNEL);
+       all_counters = kcalloc(num_counters, sizeof(*all_counters), GFP_KERNEL);
        if (all_counters == NULL)
                goto fail;
 
@@ -855,8 +847,7 @@ void acpi_irq_stats_init(void)
        if (ACPI_FAILURE(status))
                goto fail;
 
-       counter_attrs = kcalloc(num_counters, sizeof(struct kobj_attribute),
-                               GFP_KERNEL);
+       counter_attrs = kcalloc(num_counters, sizeof(*counter_attrs), GFP_KERNEL);
        if (counter_attrs == NULL)
                goto fail;
 
@@ -906,7 +897,6 @@ void acpi_irq_stats_init(void)
 
 fail:
        delete_gpe_attr_array();
-       return;
 }
 
 static void __exit interrupt_stats_exit(void)
@@ -914,31 +904,24 @@ static void __exit interrupt_stats_exit(void)
        sysfs_remove_group(acpi_kobj, &interrupt_stats_attr_group);
 
        delete_gpe_attr_array();
-
-       return;
 }
 
-static ssize_t
-acpi_show_profile(struct kobject *kobj, struct kobj_attribute *attr,
-                 char *buf)
+static ssize_t pm_profile_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
 {
        return sprintf(buf, "%d\n", acpi_gbl_FADT.preferred_profile);
 }
 
-static const struct kobj_attribute pm_profile_attr =
-       __ATTR(pm_profile, S_IRUGO, acpi_show_profile, NULL);
+static const struct kobj_attribute pm_profile_attr = __ATTR_RO(pm_profile);
 
-static ssize_t hotplug_enabled_show(struct kobject *kobj,
-                                   struct kobj_attribute *attr, char *buf)
+static ssize_t enabled_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
 {
        struct acpi_hotplug_profile *hotplug = to_acpi_hotplug_profile(kobj);
 
        return sprintf(buf, "%d\n", hotplug->enabled);
 }
 
-static ssize_t hotplug_enabled_store(struct kobject *kobj,
-                                    struct kobj_attribute *attr,
-                                    const char *buf, size_t size)
+static ssize_t enabled_store(struct kobject *kobj, struct kobj_attribute *attr,
+                            const char *buf, size_t size)
 {
        struct acpi_hotplug_profile *hotplug = to_acpi_hotplug_profile(kobj);
        unsigned int val;
@@ -950,9 +933,7 @@ static ssize_t hotplug_enabled_store(struct kobject *kobj,
        return size;
 }
 
-static struct kobj_attribute hotplug_enabled_attr =
-       __ATTR(enabled, S_IRUGO | S_IWUSR, hotplug_enabled_show,
-               hotplug_enabled_store);
+static struct kobj_attribute hotplug_enabled_attr = __ATTR_RW(enabled);
 
 static struct attribute *hotplug_profile_attrs[] = {
        &hotplug_enabled_attr.attr,
@@ -983,7 +964,7 @@ void acpi_sysfs_add_hotplug_profile(struct acpi_hotplug_profile *hotplug,
        return;
 
  err_out:
-       pr_err(PREFIX "Unable to add hotplug profile '%s'\n", name);
+       pr_err("Unable to add hotplug profile '%s'\n", name);
 }
 
 static ssize_t force_remove_show(struct kobject *kobj,
@@ -1010,9 +991,7 @@ static ssize_t force_remove_store(struct kobject *kobj,
        return size;
 }
 
-static const struct kobj_attribute force_remove_attr =
-       __ATTR(force_remove, S_IRUGO | S_IWUSR, force_remove_show,
-              force_remove_store);
+static const struct kobj_attribute force_remove_attr = __ATTR_RW(force_remove);
 
 int __init acpi_sysfs_init(void)
 {
index 9d581045acff079276ed81a307b60ec7244f816b..a37a1532a575719c17bd774c064163518691f3de 100644 (file)
@@ -39,6 +39,7 @@ static int acpi_apic_instance __initdata;
 enum acpi_subtable_type {
        ACPI_SUBTABLE_COMMON,
        ACPI_SUBTABLE_HMAT,
+       ACPI_SUBTABLE_PRMT,
 };
 
 struct acpi_subtable_entry {
@@ -222,6 +223,8 @@ acpi_get_entry_type(struct acpi_subtable_entry *entry)
                return entry->hdr->common.type;
        case ACPI_SUBTABLE_HMAT:
                return entry->hdr->hmat.type;
+       case ACPI_SUBTABLE_PRMT:
+               return 0;
        }
        return 0;
 }
@@ -234,6 +237,8 @@ acpi_get_entry_length(struct acpi_subtable_entry *entry)
                return entry->hdr->common.length;
        case ACPI_SUBTABLE_HMAT:
                return entry->hdr->hmat.length;
+       case ACPI_SUBTABLE_PRMT:
+               return entry->hdr->prmt.length;
        }
        return 0;
 }
@@ -246,6 +251,8 @@ acpi_get_subtable_header_length(struct acpi_subtable_entry *entry)
                return sizeof(entry->hdr->common);
        case ACPI_SUBTABLE_HMAT:
                return sizeof(entry->hdr->hmat);
+       case ACPI_SUBTABLE_PRMT:
+               return sizeof(entry->hdr->prmt);
        }
        return 0;
 }
@@ -255,6 +262,8 @@ acpi_get_subtable_type(char *id)
 {
        if (strncmp(id, ACPI_SIG_HMAT, 4) == 0)
                return ACPI_SUBTABLE_HMAT;
+       if (strncmp(id, ACPI_SIG_PRMT, 4) == 0)
+               return ACPI_SUBTABLE_PRMT;
        return ACPI_SUBTABLE_COMMON;
 }
 
index 2b69536cdccbafdf9f50e142e9374a54b541bbfa..816bf2c34b7a97ad1fbbda6640ea5097efcdf77b 100644 (file)
@@ -32,6 +32,9 @@ static const struct acpi_device_id lps0_device_ids[] = {
        {"", },
 };
 
+/* Microsoft platform agnostic UUID */
+#define ACPI_LPS0_DSM_UUID_MICROSOFT      "11e00d56-ce64-47ce-837b-1f898f9aa461"
+
 #define ACPI_LPS0_DSM_UUID     "c4eb40a0-6cd2-11e2-bcfd-0800200c9a66"
 
 #define ACPI_LPS0_GET_DEVICE_CONSTRAINTS       1
@@ -39,15 +42,22 @@ static const struct acpi_device_id lps0_device_ids[] = {
 #define ACPI_LPS0_SCREEN_ON    4
 #define ACPI_LPS0_ENTRY                5
 #define ACPI_LPS0_EXIT         6
+#define ACPI_LPS0_MS_ENTRY      7
+#define ACPI_LPS0_MS_EXIT       8
 
 /* AMD */
 #define ACPI_LPS0_DSM_UUID_AMD      "e3f32452-febc-43ce-9039-932122d37721"
+#define ACPI_LPS0_ENTRY_AMD         2
+#define ACPI_LPS0_EXIT_AMD          3
 #define ACPI_LPS0_SCREEN_OFF_AMD    4
 #define ACPI_LPS0_SCREEN_ON_AMD     5
 
 static acpi_handle lps0_device_handle;
 static guid_t lps0_dsm_guid;
-static char lps0_dsm_func_mask;
+static int lps0_dsm_func_mask;
+
+static guid_t lps0_dsm_guid_microsoft;
+static int lps0_dsm_func_mask_microsoft;
 
 /* Device constraint entry structure */
 struct lpi_device_info {
@@ -68,15 +78,7 @@ struct lpi_constraints {
        int min_dstate;
 };
 
-/* AMD */
-/* Device constraint entry structure */
-struct lpi_device_info_amd {
-       int revision;
-       int count;
-       union acpi_object *package;
-};
-
-/* Constraint package structure */
+/* AMD Constraint package structure */
 struct lpi_device_constraint_amd {
        char *name;
        int enabled;
@@ -94,15 +96,15 @@ static void lpi_device_get_constraints_amd(void)
        int i, j, k;
 
        out_obj = acpi_evaluate_dsm_typed(lps0_device_handle, &lps0_dsm_guid,
-                                         1, ACPI_LPS0_GET_DEVICE_CONSTRAINTS,
+                                         rev_id, ACPI_LPS0_GET_DEVICE_CONSTRAINTS,
                                          NULL, ACPI_TYPE_PACKAGE);
 
-       if (!out_obj)
-               return;
-
        acpi_handle_debug(lps0_device_handle, "_DSM function 1 eval %s\n",
                          out_obj ? "successful" : "failed");
 
+       if (!out_obj)
+               return;
+
        for (i = 0; i < out_obj->package.count; i++) {
                union acpi_object *package = &out_obj->package.elements[i];
 
@@ -315,14 +317,15 @@ static void lpi_check_constraints(void)
        }
 }
 
-static void acpi_sleep_run_lps0_dsm(unsigned int func)
+static void acpi_sleep_run_lps0_dsm(unsigned int func, unsigned int func_mask, guid_t dsm_guid)
 {
        union acpi_object *out_obj;
 
-       if (!(lps0_dsm_func_mask & (1 << func)))
+       if (!(func_mask & (1 << func)))
                return;
 
-       out_obj = acpi_evaluate_dsm(lps0_device_handle, &lps0_dsm_guid, rev_id, func, NULL);
+       out_obj = acpi_evaluate_dsm(lps0_device_handle, &dsm_guid,
+                                       rev_id, func, NULL);
        ACPI_FREE(out_obj);
 
        acpi_handle_debug(lps0_device_handle, "_DSM function %u evaluation %s\n",
@@ -334,11 +337,33 @@ static bool acpi_s2idle_vendor_amd(void)
        return boot_cpu_data.x86_vendor == X86_VENDOR_AMD;
 }
 
+static int validate_dsm(acpi_handle handle, const char *uuid, int rev, guid_t *dsm_guid)
+{
+       union acpi_object *obj;
+       int ret = -EINVAL;
+
+       guid_parse(uuid, dsm_guid);
+       obj = acpi_evaluate_dsm(handle, dsm_guid, rev, 0, NULL);
+
+       /* Check if the _DSM is present and as expected. */
+       if (!obj || obj->type != ACPI_TYPE_BUFFER || obj->buffer.length == 0 ||
+           obj->buffer.length > sizeof(u32)) {
+               acpi_handle_debug(handle,
+                               "_DSM UUID %s rev %d function 0 evaluation failed\n", uuid, rev);
+               goto out;
+       }
+
+       ret = *(int *)obj->buffer.pointer;
+       acpi_handle_debug(handle, "_DSM UUID %s rev %d function mask: 0x%x\n", uuid, rev, ret);
+
+out:
+       ACPI_FREE(obj);
+       return ret;
+}
+
 static int lps0_device_attach(struct acpi_device *adev,
                              const struct acpi_device_id *not_used)
 {
-       union acpi_object *out_obj;
-
        if (lps0_device_handle)
                return 0;
 
@@ -346,28 +371,36 @@ static int lps0_device_attach(struct acpi_device *adev,
                return 0;
 
        if (acpi_s2idle_vendor_amd()) {
-               guid_parse(ACPI_LPS0_DSM_UUID_AMD, &lps0_dsm_guid);
-               out_obj = acpi_evaluate_dsm(adev->handle, &lps0_dsm_guid, 0, 0, NULL);
+               /* AMD0004, AMDI0005:
+                * - Should use rev_id 0x0
+                * - function mask > 0x3: Should use AMD method, but has off by one bug
+                * - function mask = 0x3: Should use Microsoft method
+                * AMDI0006:
+                * - should use rev_id 0x0
+                * - function mask = 0x3: Should use Microsoft method
+                */
+               const char *hid = acpi_device_hid(adev);
                rev_id = 0;
+               lps0_dsm_func_mask = validate_dsm(adev->handle,
+                                       ACPI_LPS0_DSM_UUID_AMD, rev_id, &lps0_dsm_guid);
+               lps0_dsm_func_mask_microsoft = validate_dsm(adev->handle,
+                                       ACPI_LPS0_DSM_UUID_MICROSOFT, rev_id,
+                                       &lps0_dsm_guid_microsoft);
+               if (lps0_dsm_func_mask > 0x3 && (!strcmp(hid, "AMD0004") ||
+                                                !strcmp(hid, "AMDI0005"))) {
+                       lps0_dsm_func_mask = (lps0_dsm_func_mask << 1) | 0x1;
+                       acpi_handle_debug(adev->handle, "_DSM UUID %s: Adjusted function mask: 0x%x\n",
+                                         ACPI_LPS0_DSM_UUID_AMD, lps0_dsm_func_mask);
+               }
        } else {
-               guid_parse(ACPI_LPS0_DSM_UUID, &lps0_dsm_guid);
-               out_obj = acpi_evaluate_dsm(adev->handle, &lps0_dsm_guid, 1, 0, NULL);
                rev_id = 1;
+               lps0_dsm_func_mask = validate_dsm(adev->handle,
+                                       ACPI_LPS0_DSM_UUID, rev_id, &lps0_dsm_guid);
+               lps0_dsm_func_mask_microsoft = -EINVAL;
        }
 
-       /* Check if the _DSM is present and as expected. */
-       if (!out_obj || out_obj->type != ACPI_TYPE_BUFFER) {
-               acpi_handle_debug(adev->handle,
-                                 "_DSM function 0 evaluation failed\n");
-               return 0;
-       }
-
-       lps0_dsm_func_mask = *(char *)out_obj->buffer.pointer;
-
-       ACPI_FREE(out_obj);
-
-       acpi_handle_debug(adev->handle, "_DSM function mask: 0x%x\n",
-                         lps0_dsm_func_mask);
+       if (lps0_dsm_func_mask < 0 && lps0_dsm_func_mask_microsoft < 0)
+               return 0; //function evaluation failed
 
        lps0_device_handle = adev->handle;
 
@@ -406,11 +439,23 @@ int acpi_s2idle_prepare_late(void)
        if (pm_debug_messages_on)
                lpi_check_constraints();
 
-       if (acpi_s2idle_vendor_amd()) {
-               acpi_sleep_run_lps0_dsm(ACPI_LPS0_SCREEN_OFF_AMD);
+       if (lps0_dsm_func_mask_microsoft > 0) {
+               acpi_sleep_run_lps0_dsm(ACPI_LPS0_SCREEN_OFF,
+                               lps0_dsm_func_mask_microsoft, lps0_dsm_guid_microsoft);
+               acpi_sleep_run_lps0_dsm(ACPI_LPS0_MS_EXIT,
+                               lps0_dsm_func_mask_microsoft, lps0_dsm_guid_microsoft);
+               acpi_sleep_run_lps0_dsm(ACPI_LPS0_ENTRY,
+                               lps0_dsm_func_mask_microsoft, lps0_dsm_guid_microsoft);
+       } else if (acpi_s2idle_vendor_amd()) {
+               acpi_sleep_run_lps0_dsm(ACPI_LPS0_SCREEN_OFF_AMD,
+                               lps0_dsm_func_mask, lps0_dsm_guid);
+               acpi_sleep_run_lps0_dsm(ACPI_LPS0_ENTRY_AMD,
+                               lps0_dsm_func_mask, lps0_dsm_guid);
        } else {
-               acpi_sleep_run_lps0_dsm(ACPI_LPS0_SCREEN_OFF);
-               acpi_sleep_run_lps0_dsm(ACPI_LPS0_ENTRY);
+               acpi_sleep_run_lps0_dsm(ACPI_LPS0_SCREEN_OFF,
+                               lps0_dsm_func_mask, lps0_dsm_guid);
+               acpi_sleep_run_lps0_dsm(ACPI_LPS0_ENTRY,
+                               lps0_dsm_func_mask, lps0_dsm_guid);
        }
 
        return 0;
@@ -421,11 +466,23 @@ void acpi_s2idle_restore_early(void)
        if (!lps0_device_handle || sleep_no_lps0)
                return;
 
-       if (acpi_s2idle_vendor_amd()) {
-               acpi_sleep_run_lps0_dsm(ACPI_LPS0_SCREEN_ON_AMD);
+       if (lps0_dsm_func_mask_microsoft > 0) {
+               acpi_sleep_run_lps0_dsm(ACPI_LPS0_EXIT,
+                               lps0_dsm_func_mask_microsoft, lps0_dsm_guid_microsoft);
+               acpi_sleep_run_lps0_dsm(ACPI_LPS0_MS_ENTRY,
+                               lps0_dsm_func_mask_microsoft, lps0_dsm_guid_microsoft);
+               acpi_sleep_run_lps0_dsm(ACPI_LPS0_SCREEN_ON,
+                               lps0_dsm_func_mask_microsoft, lps0_dsm_guid_microsoft);
+       } else if (acpi_s2idle_vendor_amd()) {
+               acpi_sleep_run_lps0_dsm(ACPI_LPS0_EXIT_AMD,
+                               lps0_dsm_func_mask, lps0_dsm_guid);
+               acpi_sleep_run_lps0_dsm(ACPI_LPS0_SCREEN_ON_AMD,
+                               lps0_dsm_func_mask, lps0_dsm_guid);
        } else {
-               acpi_sleep_run_lps0_dsm(ACPI_LPS0_EXIT);
-               acpi_sleep_run_lps0_dsm(ACPI_LPS0_SCREEN_ON);
+               acpi_sleep_run_lps0_dsm(ACPI_LPS0_EXIT,
+                               lps0_dsm_func_mask, lps0_dsm_guid);
+               acpi_sleep_run_lps0_dsm(ACPI_LPS0_SCREEN_ON,
+                               lps0_dsm_func_mask, lps0_dsm_guid);
        }
 }
 
index 61d34e1dc59c5c6ff68924a5f4e066a4d66c4a98..bcec598b89f23d0588079a5b21818dde05380856 100644 (file)
@@ -4918,7 +4918,7 @@ static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
                uint32_t enable;
 
                if (copy_from_user(&enable, ubuf, sizeof(enable))) {
-                       ret = -EINVAL;
+                       ret = -EFAULT;
                        goto err;
                }
                binder_inner_proc_lock(proc);
index 4a8bf8cda52bc77243fcffb8af554309aeed01bb..54ba506e5a89dcf919a625cc0c01f6dad0401993 100644 (file)
@@ -150,7 +150,7 @@ void fwnode_links_purge(struct fwnode_handle *fwnode)
        fwnode_links_purge_consumers(fwnode);
 }
 
-static void fw_devlink_purge_absent_suppliers(struct fwnode_handle *fwnode)
+void fw_devlink_purge_absent_suppliers(struct fwnode_handle *fwnode)
 {
        struct fwnode_handle *child;
 
@@ -164,6 +164,7 @@ static void fw_devlink_purge_absent_suppliers(struct fwnode_handle *fwnode)
        fwnode_for_each_available_child_node(fwnode, child)
                fw_devlink_purge_absent_suppliers(child);
 }
+EXPORT_SYMBOL_GPL(fw_devlink_purge_absent_suppliers);
 
 #ifdef CONFIG_SRCU
 static DEFINE_MUTEX(device_links_lock);
@@ -193,6 +194,17 @@ int device_links_read_lock_held(void)
 {
        return srcu_read_lock_held(&device_links_srcu);
 }
+
+static void device_link_synchronize_removal(void)
+{
+       synchronize_srcu(&device_links_srcu);
+}
+
+static void device_link_remove_from_lists(struct device_link *link)
+{
+       list_del_rcu(&link->s_node);
+       list_del_rcu(&link->c_node);
+}
 #else /* !CONFIG_SRCU */
 static DECLARE_RWSEM(device_links_lock);
 
@@ -223,6 +235,16 @@ int device_links_read_lock_held(void)
        return lockdep_is_held(&device_links_lock);
 }
 #endif
+
+static inline void device_link_synchronize_removal(void)
+{
+}
+
+static void device_link_remove_from_lists(struct device_link *link)
+{
+       list_del(&link->s_node);
+       list_del(&link->c_node);
+}
 #endif /* !CONFIG_SRCU */
 
 static bool device_is_ancestor(struct device *dev, struct device *target)
@@ -444,8 +466,13 @@ static struct attribute *devlink_attrs[] = {
 };
 ATTRIBUTE_GROUPS(devlink);
 
-static void device_link_free(struct device_link *link)
+static void device_link_release_fn(struct work_struct *work)
 {
+       struct device_link *link = container_of(work, struct device_link, rm_work);
+
+       /* Ensure that all references to the link object have been dropped. */
+       device_link_synchronize_removal();
+
        while (refcount_dec_not_one(&link->rpm_active))
                pm_runtime_put(link->supplier);
 
@@ -454,24 +481,19 @@ static void device_link_free(struct device_link *link)
        kfree(link);
 }
 
-#ifdef CONFIG_SRCU
-static void __device_link_free_srcu(struct rcu_head *rhead)
-{
-       device_link_free(container_of(rhead, struct device_link, rcu_head));
-}
-
 static void devlink_dev_release(struct device *dev)
 {
        struct device_link *link = to_devlink(dev);
 
-       call_srcu(&device_links_srcu, &link->rcu_head, __device_link_free_srcu);
-}
-#else
-static void devlink_dev_release(struct device *dev)
-{
-       device_link_free(to_devlink(dev));
+       INIT_WORK(&link->rm_work, device_link_release_fn);
+       /*
+        * It may take a while to complete this work because of the SRCU
+        * synchronization in device_link_release_fn() and if the consumer or
+        * supplier devices get deleted when it runs, so put it into the "long"
+        * workqueue.
+        */
+       queue_work(system_long_wq, &link->rm_work);
 }
-#endif
 
 static struct class devlink_class = {
        .name = "devlink",
@@ -845,7 +867,6 @@ out:
 }
 EXPORT_SYMBOL_GPL(device_link_add);
 
-#ifdef CONFIG_SRCU
 static void __device_link_del(struct kref *kref)
 {
        struct device_link *link = container_of(kref, struct device_link, kref);
@@ -855,25 +876,9 @@ static void __device_link_del(struct kref *kref)
 
        pm_runtime_drop_link(link);
 
-       list_del_rcu(&link->s_node);
-       list_del_rcu(&link->c_node);
-       device_unregister(&link->link_dev);
-}
-#else /* !CONFIG_SRCU */
-static void __device_link_del(struct kref *kref)
-{
-       struct device_link *link = container_of(kref, struct device_link, kref);
-
-       dev_info(link->consumer, "Dropping the link to %s\n",
-                dev_name(link->supplier));
-
-       pm_runtime_drop_link(link);
-
-       list_del(&link->s_node);
-       list_del(&link->c_node);
+       device_link_remove_from_lists(link);
        device_unregister(&link->link_dev);
 }
-#endif /* !CONFIG_SRCU */
 
 static void device_link_put_kref(struct device_link *link)
 {
index b31b3af5c490fc56b9a0ac3a0d5be62ac87b45cf..d5ffaab3cb61c95a194386e71a94910db1fb31b4 100644 (file)
@@ -218,14 +218,14 @@ static int memory_block_offline(struct memory_block *mem)
        struct zone *zone;
        int ret;
 
-       zone = page_zone(pfn_to_page(start_pfn));
-
        /*
         * Unaccount before offlining, such that unpopulated zone and kthreads
         * can properly be torn down in offline_pages().
         */
-       if (nr_vmemmap_pages)
+       if (nr_vmemmap_pages) {
+               zone = page_zone(pfn_to_page(start_pfn));
                adjust_present_page_count(zone, -nr_vmemmap_pages);
+       }
 
        ret = offline_pages(start_pfn + nr_vmemmap_pages,
                            nr_pages - nr_vmemmap_pages);
index 1fc1a992f90caa07fd607ebbb4a2d0a1a80b26a2..b570848d23e0eb9f9fe6d13ec9e75263ae737586 100644 (file)
@@ -1637,6 +1637,7 @@ void pm_runtime_init(struct device *dev)
        dev->power.request_pending = false;
        dev->power.request = RPM_REQ_NONE;
        dev->power.deferred_resume = false;
+       dev->power.needs_force_resume = 0;
        INIT_WORK(&dev->power.work, pm_runtime_work);
 
        dev->power.timer_expires = 0;
@@ -1804,10 +1805,12 @@ int pm_runtime_force_suspend(struct device *dev)
         * its parent, but set its status to RPM_SUSPENDED anyway in case this
         * function will be called again for it in the meantime.
         */
-       if (pm_runtime_need_not_resume(dev))
+       if (pm_runtime_need_not_resume(dev)) {
                pm_runtime_set_suspended(dev);
-       else
+       } else {
                __update_runtime_status(dev, RPM_SUSPENDED);
+               dev->power.needs_force_resume = 1;
+       }
 
        return 0;
 
@@ -1834,7 +1837,7 @@ int pm_runtime_force_resume(struct device *dev)
        int (*callback)(struct device *);
        int ret = 0;
 
-       if (!pm_runtime_status_suspended(dev) || pm_runtime_need_not_resume(dev))
+       if (!pm_runtime_status_suspended(dev) || !dev->power.needs_force_resume)
                goto out;
 
        /*
@@ -1853,6 +1856,7 @@ int pm_runtime_force_resume(struct device *dev)
 
        pm_runtime_mark_last_busy(dev);
 out:
+       dev->power.needs_force_resume = 0;
        pm_runtime_enable(dev);
        return ret;
 }
index d58d68f3c7cd04014511cebd878c8db821510577..76e12f3482a917ab9ddc64121095fc5742805bfb 100644 (file)
@@ -1879,29 +1879,18 @@ static int lo_compat_ioctl(struct block_device *bdev, fmode_t mode,
 
 static int lo_open(struct block_device *bdev, fmode_t mode)
 {
-       struct loop_device *lo;
+       struct loop_device *lo = bdev->bd_disk->private_data;
        int err;
 
-       /*
-        * take loop_ctl_mutex to protect lo pointer from race with
-        * loop_control_ioctl(LOOP_CTL_REMOVE), however, to reduce contention
-        * release it prior to updating lo->lo_refcnt.
-        */
-       err = mutex_lock_killable(&loop_ctl_mutex);
-       if (err)
-               return err;
-       lo = bdev->bd_disk->private_data;
-       if (!lo) {
-               mutex_unlock(&loop_ctl_mutex);
-               return -ENXIO;
-       }
        err = mutex_lock_killable(&lo->lo_mutex);
-       mutex_unlock(&loop_ctl_mutex);
        if (err)
                return err;
-       atomic_inc(&lo->lo_refcnt);
+       if (lo->lo_state == Lo_deleting)
+               err = -ENXIO;
+       else
+               atomic_inc(&lo->lo_refcnt);
        mutex_unlock(&lo->lo_mutex);
-       return 0;
+       return err;
 }
 
 static void lo_release(struct gendisk *disk, fmode_t mode)
@@ -2285,7 +2274,7 @@ static long loop_control_ioctl(struct file *file, unsigned int cmd,
                        mutex_unlock(&lo->lo_mutex);
                        break;
                }
-               lo->lo_disk->private_data = NULL;
+               lo->lo_state = Lo_deleting;
                mutex_unlock(&lo->lo_mutex);
                idr_remove(&loop_index_idr, lo->lo_number);
                loop_remove(lo);
index a3c04f310672e8e3968fbb30a1c596368002d340..5beb959b94d36b79bf38a40faa0c502146d8c26f 100644 (file)
@@ -22,6 +22,7 @@ enum {
        Lo_unbound,
        Lo_bound,
        Lo_rundown,
+       Lo_deleting,
 };
 
 struct loop_func_table;
index 4ff71b579cfcc8051f911fdef19a35a653509c0c..45d2c28c8fc8327ed28941923416272df2833fca 100644 (file)
@@ -1980,7 +1980,8 @@ static void nbd_disconnect_and_put(struct nbd_device *nbd)
         * config ref and try to destroy the workqueue from inside the work
         * queue.
         */
-       flush_workqueue(nbd->recv_workq);
+       if (nbd->recv_workq)
+               flush_workqueue(nbd->recv_workq);
        if (test_and_clear_bit(NBD_RT_HAS_CONFIG_REF,
                               &nbd->config->runtime_flags))
                nbd_config_put(nbd);
@@ -2014,12 +2015,11 @@ static int nbd_genl_disconnect(struct sk_buff *skb, struct genl_info *info)
                return -EINVAL;
        }
        mutex_unlock(&nbd_index_mutex);
-       if (!refcount_inc_not_zero(&nbd->config_refs)) {
-               nbd_put(nbd);
-               return 0;
-       }
+       if (!refcount_inc_not_zero(&nbd->config_refs))
+               goto put_nbd;
        nbd_disconnect_and_put(nbd);
        nbd_config_put(nbd);
+put_nbd:
        nbd_put(nbd);
        return 0;
 }
index 5d603ef39bad93a17ddb14e78ce644a8962e60dd..7f6ba2c975ed4e8e1c5e767cc2bb1dce7e594399 100644 (file)
@@ -388,6 +388,8 @@ static const struct usb_device_id blacklist_table[] = {
        /* Realtek 8822CE Bluetooth devices */
        { USB_DEVICE(0x0bda, 0xb00c), .driver_info = BTUSB_REALTEK |
                                                     BTUSB_WIDEBAND_SPEECH },
+       { USB_DEVICE(0x0bda, 0xc822), .driver_info = BTUSB_REALTEK |
+                                                    BTUSB_WIDEBAND_SPEECH },
 
        /* Realtek 8852AE Bluetooth devices */
        { USB_DEVICE(0x0bda, 0xc852), .driver_info = BTUSB_REALTEK |
@@ -2527,10 +2529,17 @@ static int btusb_intel_download_firmware_newgen(struct hci_dev *hdev,
        }
 
        btusb_setup_intel_newgen_get_fw_name(ver, fwname, sizeof(fwname), "sfi");
-       err = request_firmware(&fw, fwname, &hdev->dev);
+       err = firmware_request_nowarn(&fw, fwname, &hdev->dev);
        if (err < 0) {
+               if (!test_bit(BTUSB_BOOTLOADER, &data->flags)) {
+                       /* Firmware has already been loaded */
+                       set_bit(BTUSB_FIRMWARE_LOADED, &data->flags);
+                       return 0;
+               }
+
                bt_dev_err(hdev, "Failed to load Intel firmware file %s (%d)",
                           fwname, err);
+
                return err;
        }
 
@@ -2680,12 +2689,24 @@ download:
        err = btusb_setup_intel_new_get_fw_name(ver, params, fwname,
                                                sizeof(fwname), "sfi");
        if (err < 0) {
+               if (!test_bit(BTUSB_BOOTLOADER, &data->flags)) {
+                       /* Firmware has already been loaded */
+                       set_bit(BTUSB_FIRMWARE_LOADED, &data->flags);
+                       return 0;
+               }
+
                bt_dev_err(hdev, "Unsupported Intel firmware naming");
                return -EINVAL;
        }
 
-       err = request_firmware(&fw, fwname, &hdev->dev);
+       err = firmware_request_nowarn(&fw, fwname, &hdev->dev);
        if (err < 0) {
+               if (!test_bit(BTUSB_BOOTLOADER, &data->flags)) {
+                       /* Firmware has already been loaded */
+                       set_bit(BTUSB_FIRMWARE_LOADED, &data->flags);
+                       return 0;
+               }
+
                bt_dev_err(hdev, "Failed to load Intel firmware file %s (%d)",
                           fwname, err);
                return err;
index 7c810f02a2ef4d164f959e3a02a7aa6684bfe32b..b3357a8a2fdbcf3428487a5675d56e35e57ee098 100644 (file)
@@ -311,8 +311,8 @@ static const struct mhi_channel_config mhi_foxconn_sdx55_channels[] = {
        MHI_CHANNEL_CONFIG_DL(5, "DIAG", 32, 1),
        MHI_CHANNEL_CONFIG_UL(12, "MBIM", 32, 0),
        MHI_CHANNEL_CONFIG_DL(13, "MBIM", 32, 0),
-       MHI_CHANNEL_CONFIG_UL(32, "AT", 32, 0),
-       MHI_CHANNEL_CONFIG_DL(33, "AT", 32, 0),
+       MHI_CHANNEL_CONFIG_UL(32, "DUN", 32, 0),
+       MHI_CHANNEL_CONFIG_DL(33, "DUN", 32, 0),
        MHI_CHANNEL_CONFIG_HW_UL(100, "IP_HW0_MBIM", 128, 2),
        MHI_CHANNEL_CONFIG_HW_DL(101, "IP_HW0_MBIM", 128, 3),
 };
@@ -708,7 +708,7 @@ static void mhi_pci_remove(struct pci_dev *pdev)
        struct mhi_pci_device *mhi_pdev = pci_get_drvdata(pdev);
        struct mhi_controller *mhi_cntrl = &mhi_pdev->mhi_cntrl;
 
-       del_timer(&mhi_pdev->health_check_timer);
+       del_timer_sync(&mhi_pdev->health_check_timer);
        cancel_work_sync(&mhi_pdev->recovery_work);
 
        if (test_and_clear_bit(MHI_PCI_DEV_STARTED, &mhi_pdev->status)) {
@@ -935,9 +935,43 @@ static int __maybe_unused mhi_pci_resume(struct device *dev)
        return ret;
 }
 
+static int __maybe_unused mhi_pci_freeze(struct device *dev)
+{
+       struct mhi_pci_device *mhi_pdev = dev_get_drvdata(dev);
+       struct mhi_controller *mhi_cntrl = &mhi_pdev->mhi_cntrl;
+
+       /* We want to stop all operations, hibernation does not guarantee that
+        * device will be in the same state as before freezing, especially if
+        * the intermediate restore kernel reinitializes MHI device with new
+        * context.
+        */
+       if (test_and_clear_bit(MHI_PCI_DEV_STARTED, &mhi_pdev->status)) {
+               mhi_power_down(mhi_cntrl, false);
+               mhi_unprepare_after_power_down(mhi_cntrl);
+       }
+
+       return 0;
+}
+
+static int __maybe_unused mhi_pci_restore(struct device *dev)
+{
+       struct mhi_pci_device *mhi_pdev = dev_get_drvdata(dev);
+
+       /* Reinitialize the device */
+       queue_work(system_long_wq, &mhi_pdev->recovery_work);
+
+       return 0;
+}
+
 static const struct dev_pm_ops mhi_pci_pm_ops = {
        SET_RUNTIME_PM_OPS(mhi_pci_runtime_suspend, mhi_pci_runtime_resume, NULL)
-       SET_SYSTEM_SLEEP_PM_OPS(mhi_pci_suspend, mhi_pci_resume)
+#ifdef CONFIG_PM_SLEEP
+       .suspend = mhi_pci_suspend,
+       .resume = mhi_pci_resume,
+       .freeze = mhi_pci_freeze,
+       .thaw = mhi_pci_restore,
+       .restore = mhi_pci_restore,
+#endif
 };
 
 static struct pci_driver mhi_pci_driver = {
index 5fae60f8c1355519c0c4825fae8888e4990f7ef0..38cb116ed433fb6c3ba64322e7b58c913fa80489 100644 (file)
@@ -1334,6 +1334,34 @@ err_allow_idle:
        return error;
 }
 
+static int sysc_reinit_module(struct sysc *ddata, bool leave_enabled)
+{
+       struct device *dev = ddata->dev;
+       int error;
+
+       /* Disable target module if it is enabled */
+       if (ddata->enabled) {
+               error = sysc_runtime_suspend(dev);
+               if (error)
+                       dev_warn(dev, "reinit suspend failed: %i\n", error);
+       }
+
+       /* Enable target module */
+       error = sysc_runtime_resume(dev);
+       if (error)
+               dev_warn(dev, "reinit resume failed: %i\n", error);
+
+       if (leave_enabled)
+               return error;
+
+       /* Disable target module if no leave_enabled was set */
+       error = sysc_runtime_suspend(dev);
+       if (error)
+               dev_warn(dev, "reinit suspend failed: %i\n", error);
+
+       return error;
+}
+
 static int __maybe_unused sysc_noirq_suspend(struct device *dev)
 {
        struct sysc *ddata;
@@ -1344,12 +1372,18 @@ static int __maybe_unused sysc_noirq_suspend(struct device *dev)
            (SYSC_QUIRK_LEGACY_IDLE | SYSC_QUIRK_NO_IDLE))
                return 0;
 
-       return pm_runtime_force_suspend(dev);
+       if (!ddata->enabled)
+               return 0;
+
+       ddata->needs_resume = 1;
+
+       return sysc_runtime_suspend(dev);
 }
 
 static int __maybe_unused sysc_noirq_resume(struct device *dev)
 {
        struct sysc *ddata;
+       int error = 0;
 
        ddata = dev_get_drvdata(dev);
 
@@ -1357,7 +1391,19 @@ static int __maybe_unused sysc_noirq_resume(struct device *dev)
            (SYSC_QUIRK_LEGACY_IDLE | SYSC_QUIRK_NO_IDLE))
                return 0;
 
-       return pm_runtime_force_resume(dev);
+       if (ddata->cfg.quirks & SYSC_QUIRK_REINIT_ON_RESUME) {
+               error = sysc_reinit_module(ddata, ddata->needs_resume);
+               if (error)
+                       dev_warn(dev, "noirq_resume failed: %i\n", error);
+       } else if (ddata->needs_resume) {
+               error = sysc_runtime_resume(dev);
+               if (error)
+                       dev_warn(dev, "noirq_resume failed: %i\n", error);
+       }
+
+       ddata->needs_resume = 0;
+
+       return error;
 }
 
 static const struct dev_pm_ops sysc_pm_ops = {
@@ -1408,9 +1454,9 @@ static const struct sysc_revision_quirk sysc_revision_quirks[] = {
                   SYSC_QUIRK_SWSUP_SIDLE | SYSC_QUIRK_LEGACY_IDLE),
        /* Uarts on omap4 and later */
        SYSC_QUIRK("uart", 0, 0x50, 0x54, 0x58, 0x50411e03, 0xffff00ff,
-                  SYSC_QUIRK_SWSUP_SIDLE_ACT | SYSC_QUIRK_LEGACY_IDLE),
+                  SYSC_QUIRK_SWSUP_SIDLE | SYSC_QUIRK_LEGACY_IDLE),
        SYSC_QUIRK("uart", 0, 0x50, 0x54, 0x58, 0x47422e03, 0xffffffff,
-                  SYSC_QUIRK_SWSUP_SIDLE_ACT | SYSC_QUIRK_LEGACY_IDLE),
+                  SYSC_QUIRK_SWSUP_SIDLE | SYSC_QUIRK_LEGACY_IDLE),
 
        /* Quirks that need to be set based on the module address */
        SYSC_QUIRK("mcpdm", 0x40132000, 0, 0x10, -ENODEV, 0x50000800, 0xffffffff,
@@ -1459,6 +1505,8 @@ static const struct sysc_revision_quirk sysc_revision_quirks[] = {
                   SYSC_QUIRK_SWSUP_SIDLE | SYSC_QUIRK_SWSUP_MSTANDBY),
        SYSC_QUIRK("tptc", 0, 0, -ENODEV, -ENODEV, 0x40007c00, 0xffffffff,
                   SYSC_QUIRK_SWSUP_SIDLE | SYSC_QUIRK_SWSUP_MSTANDBY),
+       SYSC_QUIRK("sata", 0, 0xfc, 0x1100, -ENODEV, 0x5e412000, 0xffffffff,
+                  SYSC_QUIRK_SWSUP_SIDLE | SYSC_QUIRK_SWSUP_MSTANDBY),
        SYSC_QUIRK("usb_host_hs", 0, 0, 0x10, 0x14, 0x50700100, 0xffffffff,
                   SYSC_QUIRK_SWSUP_SIDLE | SYSC_QUIRK_SWSUP_MSTANDBY),
        SYSC_QUIRK("usb_host_hs", 0, 0, 0x10, -ENODEV, 0x50700101, 0xffffffff,
@@ -1466,7 +1514,8 @@ static const struct sysc_revision_quirk sysc_revision_quirks[] = {
        SYSC_QUIRK("usb_otg_hs", 0, 0x400, 0x404, 0x408, 0x00000050,
                   0xffffffff, SYSC_QUIRK_SWSUP_SIDLE | SYSC_QUIRK_SWSUP_MSTANDBY),
        SYSC_QUIRK("usb_otg_hs", 0, 0, 0x10, -ENODEV, 0x4ea2080d, 0xffffffff,
-                  SYSC_QUIRK_SWSUP_SIDLE | SYSC_QUIRK_SWSUP_MSTANDBY),
+                  SYSC_QUIRK_SWSUP_SIDLE | SYSC_QUIRK_SWSUP_MSTANDBY |
+                  SYSC_QUIRK_REINIT_ON_RESUME),
        SYSC_QUIRK("wdt", 0, 0, 0x10, 0x14, 0x502a0500, 0xfffff0f0,
                   SYSC_MODULE_QUIRK_WDT),
        /* PRUSS on am3, am4 and am5 */
@@ -1524,7 +1573,6 @@ static const struct sysc_revision_quirk sysc_revision_quirks[] = {
        SYSC_QUIRK("prcm", 0, 0, -ENODEV, -ENODEV, 0x40000400, 0xffffffff, 0),
        SYSC_QUIRK("rfbi", 0x4832a800, 0, 0x10, 0x14, 0x00000010, 0xffffffff, 0),
        SYSC_QUIRK("rfbi", 0x58002000, 0, 0x10, 0x14, 0x00000010, 0xffffffff, 0),
-       SYSC_QUIRK("sata", 0, 0xfc, 0x1100, -ENODEV, 0x5e412000, 0xffffffff, 0),
        SYSC_QUIRK("scm", 0, 0, 0x10, -ENODEV, 0x40000900, 0xffffffff, 0),
        SYSC_QUIRK("scm", 0, 0, -ENODEV, -ENODEV, 0x4e8b0100, 0xffffffff, 0),
        SYSC_QUIRK("scm", 0, 0, -ENODEV, -ENODEV, 0x4f000100, 0xffffffff, 0),
index 742b4a0932e3da962c802ed10703440ec7ccd00b..c6d8c0f597224bb95276b85c8fb948f63f0790e1 100644 (file)
@@ -744,6 +744,13 @@ static const struct blk_mq_ops gdrom_mq_ops = {
 static int probe_gdrom(struct platform_device *devptr)
 {
        int err;
+
+       /*
+        * Ensure our "one" device is initialized properly in case of previous
+        * usages of it
+        */
+       memset(&gd, 0, sizeof(gd));
+
        /* Start the device */
        if (gdrom_execute_diagnostic() != 1) {
                pr_warn("ATA Probe for GDROM failed\n");
@@ -830,6 +837,8 @@ static int remove_gdrom(struct platform_device *devptr)
        if (gdrom_major)
                unregister_blkdev(gdrom_major, GDROM_DEV_NAME);
        unregister_cdrom(gd.cd_info);
+       kfree(gd.cd_info);
+       kfree(gd.toc);
 
        return 0;
 }
@@ -845,7 +854,7 @@ static struct platform_driver gdrom_driver = {
 static int __init init_gdrom(void)
 {
        int rc;
-       gd.toc = NULL;
+
        rc = platform_driver_register(&gdrom_driver);
        if (rc)
                return rc;
@@ -861,8 +870,6 @@ static void __exit exit_gdrom(void)
 {
        platform_device_unregister(pd);
        platform_driver_unregister(&gdrom_driver);
-       kfree(gd.toc);
-       kfree(gd.cd_info);
 }
 
 module_init(init_gdrom);
index ed3b7dab678dbd16acc5cef239415fcaaa360816..8b55085650ad08203eea541715d9a9f7f93dcb5e 100644 (file)
@@ -984,6 +984,8 @@ static acpi_status hpet_resources(struct acpi_resource *res, void *data)
                hdp->hd_phys_address = fixmem32->address;
                hdp->hd_address = ioremap(fixmem32->address,
                                                HPET_RANGE_SIZE);
+               if (!hdp->hd_address)
+                       return AE_ERROR;
 
                if (hpet_is_known(hdp)) {
                        iounmap(hdp->hd_address);
index eff1f12d981ab275e07387864a876e1cfc35e2d9..c84d239512197aa279aaae47ae2b48b7a4b9779e 100644 (file)
@@ -656,6 +656,7 @@ int tpm2_get_cc_attrs_tbl(struct tpm_chip *chip)
 
        if (nr_commands !=
            be32_to_cpup((__be32 *)&buf.data[TPM_HEADER_SIZE + 5])) {
+               rc = -EFAULT;
                tpm_buf_destroy(&buf);
                goto out;
        }
index a2e0395cbe618d8a86604c854a0e46dccca40be3..55b9d3965ae1b3b9135065db1690bb97771b6a07 100644 (file)
@@ -709,16 +709,14 @@ static int tpm_tis_gen_interrupt(struct tpm_chip *chip)
        cap_t cap;
        int ret;
 
-       /* TPM 2.0 */
-       if (chip->flags & TPM_CHIP_FLAG_TPM2)
-               return tpm2_get_tpm_pt(chip, 0x100, &cap2, desc);
-
-       /* TPM 1.2 */
        ret = request_locality(chip, 0);
        if (ret < 0)
                return ret;
 
-       ret = tpm1_getcap(chip, TPM_CAP_PROP_TIS_TIMEOUT, &cap, desc, 0);
+       if (chip->flags & TPM_CHIP_FLAG_TPM2)
+               ret = tpm2_get_tpm_pt(chip, 0x100, &cap2, desc);
+       else
+               ret = tpm1_getcap(chip, TPM_CAP_PROP_TIS_TIMEOUT, &cap, desc, 0);
 
        release_locality(chip, 0);
 
@@ -1127,12 +1125,20 @@ int tpm_tis_resume(struct device *dev)
        if (ret)
                return ret;
 
-       /* TPM 1.2 requires self-test on resume. This function actually returns
+       /*
+        * TPM 1.2 requires self-test on resume. This function actually returns
         * an error code but for unknown reason it isn't handled.
         */
-       if (!(chip->flags & TPM_CHIP_FLAG_TPM2))
+       if (!(chip->flags & TPM_CHIP_FLAG_TPM2)) {
+               ret = request_locality(chip, 0);
+               if (ret < 0)
+                       return ret;
+
                tpm1_do_selftest(chip);
 
+               release_locality(chip, 0);
+       }
+
        return 0;
 }
 EXPORT_SYMBOL_GPL(tpm_tis_resume);
index e2ec1b7452439cde9a4fcf54c1b3c2e2954cf33d..65508eb89ec9965395ceb98e1aeb5c2f324e4cf1 100644 (file)
@@ -4540,6 +4540,9 @@ int of_clk_add_provider(struct device_node *np,
        struct of_clk_provider *cp;
        int ret;
 
+       if (!np)
+               return 0;
+
        cp = kzalloc(sizeof(*cp), GFP_KERNEL);
        if (!cp)
                return -ENOMEM;
@@ -4579,6 +4582,9 @@ int of_clk_add_hw_provider(struct device_node *np,
        struct of_clk_provider *cp;
        int ret;
 
+       if (!np)
+               return 0;
+
        cp = kzalloc(sizeof(*cp), GFP_KERNEL);
        if (!cp)
                return -ENOMEM;
@@ -4676,6 +4682,9 @@ void of_clk_del_provider(struct device_node *np)
 {
        struct of_clk_provider *cp;
 
+       if (!np)
+               return;
+
        mutex_lock(&of_clk_mutex);
        list_for_each_entry(cp, &of_clk_providers, link) {
                if (cp->node == np) {
index 977fd05ac35f62e1db380a8f289f475b52109355..d6ece7bbce894b0f9d42da6922178f9341bbb658 100644 (file)
@@ -419,7 +419,7 @@ static void resume_hv_clock_tsc(struct clocksource *arg)
        hv_set_register(HV_REGISTER_REFERENCE_TSC, tsc_msr);
 }
 
-#ifdef VDSO_CLOCKMODE_HVCLOCK
+#ifdef HAVE_VDSO_CLOCKMODE_HVCLOCK
 static int hv_cs_enable(struct clocksource *cs)
 {
        vclocks_set_used(VDSO_CLOCKMODE_HVCLOCK);
@@ -435,7 +435,7 @@ static struct clocksource hyperv_cs_tsc = {
        .flags  = CLOCK_SOURCE_IS_CONTINUOUS,
        .suspend= suspend_hv_clock_tsc,
        .resume = resume_hv_clock_tsc,
-#ifdef VDSO_CLOCKMODE_HVCLOCK
+#ifdef HAVE_VDSO_CLOCKMODE_HVCLOCK
        .enable = hv_cs_enable,
        .vdso_clock_mode = VDSO_CLOCKMODE_HVCLOCK,
 #else
index d1bbc16fba4b4c9881ba88c3e039cc63ddd91b1a..7e7450453714d3d77d3a0cec0020a2edb6e9e8e7 100644 (file)
@@ -646,7 +646,11 @@ static u64 get_max_boost_ratio(unsigned int cpu)
                return 0;
        }
 
-       highest_perf = perf_caps.highest_perf;
+       if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD)
+               highest_perf = amd_get_highest_perf();
+       else
+               highest_perf = perf_caps.highest_perf;
+
        nominal_perf = perf_caps.nominal_perf;
 
        if (!highest_perf || !nominal_perf) {
index f0401064d7aa5da75b0b15187a88d58d4ee100f7..0e69dffd5a76708f21017be678ed5cf5335f296a 100644 (file)
@@ -3033,6 +3033,14 @@ static const struct x86_cpu_id hwp_support_ids[] __initconst = {
        {}
 };
 
+static bool intel_pstate_hwp_is_enabled(void)
+{
+       u64 value;
+
+       rdmsrl(MSR_PM_ENABLE, value);
+       return !!(value & 0x1);
+}
+
 static int __init intel_pstate_init(void)
 {
        const struct x86_cpu_id *id;
@@ -3051,8 +3059,12 @@ static int __init intel_pstate_init(void)
                 * Avoid enabling HWP for processors without EPP support,
                 * because that means incomplete HWP implementation which is a
                 * corner case and supporting it is generally problematic.
+                *
+                * If HWP is enabled already, though, there is no choice but to
+                * deal with it.
                 */
-               if (!no_hwp && boot_cpu_has(X86_FEATURE_HWP_EPP)) {
+               if ((!no_hwp && boot_cpu_has(X86_FEATURE_HWP_EPP)) ||
+                   intel_pstate_hwp_is_enabled()) {
                        hwp_active++;
                        hwp_mode_bdw = id->driver_data;
                        intel_pstate.attr = hwp_cpufreq_attrs;
index facc8e6bc58013cbdf48573ca7a94bf44357e416..d385daf2c71c3ee18f6b56d77cf2eed1581acd47 100644 (file)
@@ -442,7 +442,6 @@ static int nitrox_probe(struct pci_dev *pdev,
        err = pci_request_mem_regions(pdev, nitrox_driver_name);
        if (err) {
                pci_disable_device(pdev);
-               dev_err(&pdev->dev, "Failed to request mem regions!\n");
                return err;
        }
        pci_set_master(pdev);
index f264b70c383eb4ec5db083bc455296929668dc4d..eadd1eaa2fb541cd4eaff428b8d53865619ea5ad 100644 (file)
@@ -760,7 +760,7 @@ dma_buf_dynamic_attach(struct dma_buf *dmabuf, struct device *dev,
 
                if (dma_buf_is_dynamic(attach->dmabuf)) {
                        dma_resv_lock(attach->dmabuf->resv, NULL);
-                       ret = dma_buf_pin(attach);
+                       ret = dmabuf->ops->pin(attach);
                        if (ret)
                                goto err_unlock;
                }
@@ -786,7 +786,7 @@ err_attach:
 
 err_unpin:
        if (dma_buf_is_dynamic(attach->dmabuf))
-               dma_buf_unpin(attach);
+               dmabuf->ops->unpin(attach);
 
 err_unlock:
        if (dma_buf_is_dynamic(attach->dmabuf))
@@ -843,7 +843,7 @@ void dma_buf_detach(struct dma_buf *dmabuf, struct dma_buf_attachment *attach)
                __unmap_dma_buf(attach, attach->sgt, attach->dir);
 
                if (dma_buf_is_dynamic(attach->dmabuf)) {
-                       dma_buf_unpin(attach);
+                       dmabuf->ops->unpin(attach);
                        dma_resv_unlock(attach->dmabuf->resv);
                }
        }
@@ -956,7 +956,7 @@ struct sg_table *dma_buf_map_attachment(struct dma_buf_attachment *attach,
        if (dma_buf_is_dynamic(attach->dmabuf)) {
                dma_resv_assert_held(attach->dmabuf->resv);
                if (!IS_ENABLED(CONFIG_DMABUF_MOVE_NOTIFY)) {
-                       r = dma_buf_pin(attach);
+                       r = attach->dmabuf->ops->pin(attach);
                        if (r)
                                return ERR_PTR(r);
                }
@@ -968,7 +968,7 @@ struct sg_table *dma_buf_map_attachment(struct dma_buf_attachment *attach,
 
        if (IS_ERR(sg_table) && dma_buf_is_dynamic(attach->dmabuf) &&
             !IS_ENABLED(CONFIG_DMABUF_MOVE_NOTIFY))
-               dma_buf_unpin(attach);
+               attach->dmabuf->ops->unpin(attach);
 
        if (!IS_ERR(sg_table) && attach->dmabuf->ops->cache_sgt_mapping) {
                attach->sgt = sg_table;
index 2a926bef87f2ac8b19a246244a478987e3750bc9..776fd44aff5ffac07a32a9bd750101fe715006e0 100644 (file)
@@ -745,12 +745,12 @@ static int __init idxd_init_module(void)
         * If the CPU does not support MOVDIR64B or ENQCMDS, there's no point in
         * enumerating the device. We can not utilize it.
         */
-       if (!boot_cpu_has(X86_FEATURE_MOVDIR64B)) {
+       if (!cpu_feature_enabled(X86_FEATURE_MOVDIR64B)) {
                pr_warn("idxd driver failed to load without MOVDIR64B.\n");
                return -ENODEV;
        }
 
-       if (!boot_cpu_has(X86_FEATURE_ENQCMD))
+       if (!cpu_feature_enabled(X86_FEATURE_ENQCMD))
                pr_warn("Platform does not have ENQCMD(S) support.\n");
        else
                support_enqcmd = true;
index 806ca02c52d711702793c13804b1fec4ae5ea9ad..62026607f3f8bcff76e2f1cb6d89eefab8a0fd58 100644 (file)
@@ -418,8 +418,23 @@ static int __init hidma_mgmt_init(void)
                hidma_mgmt_of_populate_channels(child);
        }
 #endif
-       return platform_driver_register(&hidma_mgmt_driver);
+       /*
+        * We do not check for return value here, as it is assumed that
+        * platform_driver_register must not fail. The reason for this is that
+        * the (potential) hidma_mgmt_of_populate_channels calls above are not
+        * cleaned up if it does fail, and to do this work is quite
+        * complicated. In particular, various calls of of_address_to_resource,
+        * of_irq_to_resource, platform_device_register_full, of_dma_configure,
+        * and of_msi_configure which then call other functions and so on, must
+        * be cleaned up - this is not a trivial exercise.
+        *
+        * Currently, this module is not intended to be unloaded, and there is
+        * no module_exit function defined which does the needed cleanup. For
+        * this reason, we have to assume success here.
+        */
+       platform_driver_register(&hidma_mgmt_driver);
 
+       return 0;
 }
 module_init(hidma_mgmt_init);
 MODULE_LICENSE("GPL v2");
index 9fa4dfc6ebee6896cf1b49963862c2036119c5d1..f0d8f60acee10c36004ae3c241bb71d89705ae8c 100644 (file)
@@ -3083,7 +3083,7 @@ static void read_mc_regs(struct amd64_pvt *pvt)
        edac_dbg(0, "  TOP_MEM:  0x%016llx\n", pvt->top_mem);
 
        /* Check first whether TOP_MEM2 is enabled: */
-       rdmsrl(MSR_K8_SYSCFG, msr_val);
+       rdmsrl(MSR_AMD64_SYSCFG, msr_val);
        if (msr_val & BIT(21)) {
                rdmsrl(MSR_K8_TOP_MEM2, pvt->top_mem2);
                edac_dbg(0, "  TOP_MEM2: 0x%016llx\n", pvt->top_mem2);
index ce0324be6c71df9580a8e299d3214b5ea2cf6dc9..4e9b627edfefaa819f958be55b67e188b257c1ed 100644 (file)
@@ -79,8 +79,6 @@ struct scmi_protocol_events {
 
 int scmi_notification_init(struct scmi_handle *handle);
 void scmi_notification_exit(struct scmi_handle *handle);
-
-struct scmi_protocol_handle;
 int scmi_register_protocol_events(const struct scmi_handle *handle, u8 proto_id,
                                  const struct scmi_protocol_handle *ph,
                                  const struct scmi_protocol_events *ee);
index d0dee37ad5228814e61142fe14dc9a992c6d2430..4ceba5ef789587f4e10ffb09dfa8c7892d7f82fe 100644 (file)
@@ -552,8 +552,10 @@ static unsigned long scpi_clk_get_val(u16 clk_id)
 
        ret = scpi_send_message(CMD_GET_CLOCK_VALUE, &le_clk_id,
                                sizeof(le_clk_id), &rate, sizeof(rate));
+       if (ret)
+               return 0;
 
-       return ret ? ret : le32_to_cpu(rate);
+       return le32_to_cpu(rate);
 }
 
 static int scpi_clk_set_val(u16 clk_id, unsigned long rate)
index e15d484b6a5a7a5f5f1409f55d98131ca151610a..ea7ca74fc1730e021273dcdfc77c8def8f5c8ed6 100644 (file)
@@ -276,8 +276,7 @@ static int cper_dimm_err_location(struct cper_mem_err_compact *mem, char *msg)
        if (!msg || !(mem->validation_bits & CPER_MEM_VALID_MODULE_HANDLE))
                return 0;
 
-       n = 0;
-       len = CPER_REC_LEN - 1;
+       len = CPER_REC_LEN;
        dmi_memdev_name(mem->mem_dev_handle, &bank, &device);
        if (bank && device)
                n = snprintf(msg, len, "DIMM location: %s %s ", bank, device);
@@ -286,7 +285,6 @@ static int cper_dimm_err_location(struct cper_mem_err_compact *mem, char *msg)
                             "DIMM location: not present. DMI handle: 0x%.4x ",
                             mem->mem_dev_handle);
 
-       msg[n] = '\0';
        return n;
 }
 
index bb042ab7c2be6411bf4d8f3f40a68bdad80e2c56..e901f8564ca0c5741b3755f8333812fdbd69591e 100644 (file)
@@ -98,6 +98,9 @@ u64 __init efi_get_fdt_params(struct efi_memory_map_data *mm)
        BUILD_BUG_ON(ARRAY_SIZE(target) != ARRAY_SIZE(name));
        BUILD_BUG_ON(ARRAY_SIZE(target) != ARRAY_SIZE(dt_params[0].params));
 
+       if (!fdt)
+               return 0;
+
        for (i = 0; i < ARRAY_SIZE(dt_params); i++) {
                node = fdt_path_offset(fdt, dt_params[i].path);
                if (node < 0)
index 4e81c6077188ede20f43800dc66ac28497142209..dd95f330fe6e173ef8bed69f2b59b7e2cbaddb57 100644 (file)
@@ -103,7 +103,7 @@ static int find_file_option(const efi_char16_t *cmdline, int cmdline_len,
                return 0;
 
        /* Skip any leading slashes */
-       while (cmdline[i] == L'/' || cmdline[i] == L'\\')
+       while (i < cmdline_len && (cmdline[i] == L'/' || cmdline[i] == L'\\'))
                i++;
 
        while (--result_len > 0 && i < cmdline_len) {
index 5737cb0fcd44efbbbe3c6b3e84b93ede544af6a3..0a9aba5f9ceff0bff6c4507382afe487378afcd1 100644 (file)
@@ -67,11 +67,6 @@ static bool entry_is_valid(const efi_memory_desc_t *in, efi_memory_desc_t *out)
                return false;
        }
 
-       if (!(in->attribute & (EFI_MEMORY_RO | EFI_MEMORY_XP))) {
-               pr_warn("Entry attributes invalid: RO and XP bits both cleared\n");
-               return false;
-       }
-
        if (PAGE_SIZE > EFI_PAGE_SIZE &&
            (!PAGE_ALIGNED(in->phys_addr) ||
             !PAGE_ALIGNED(in->num_pages << EFI_PAGE_SHIFT))) {
index a4d3239d25944dd772a7d1d75498331d7acce3d6..4ab3fcd9b9ba6e41a2aca13a0e747c475a9a87ff 100644 (file)
@@ -278,6 +278,7 @@ static const struct of_device_id cdns_of_ids[] = {
        { .compatible = "cdns,gpio-r1p02" },
        { /* sentinel */ },
 };
+MODULE_DEVICE_TABLE(of, cdns_of_ids);
 
 static struct platform_driver cdns_gpio_driver = {
        .driver = {
index 1bd9e44df718484bd38318142c7a1bf7a1baa3bc..05974b760796b409a0310969cdb4da4e7cf943ef 100644 (file)
@@ -444,16 +444,6 @@ static int tegra186_irq_set_wake(struct irq_data *data, unsigned int on)
        return 0;
 }
 
-static int tegra186_irq_set_affinity(struct irq_data *data,
-                                    const struct cpumask *dest,
-                                    bool force)
-{
-       if (data->parent_data)
-               return irq_chip_set_affinity_parent(data, dest, force);
-
-       return -EINVAL;
-}
-
 static void tegra186_gpio_irq(struct irq_desc *desc)
 {
        struct tegra_gpio *gpio = irq_desc_get_handler_data(desc);
@@ -700,7 +690,6 @@ static int tegra186_gpio_probe(struct platform_device *pdev)
        gpio->intc.irq_unmask = tegra186_irq_unmask;
        gpio->intc.irq_set_type = tegra186_irq_set_type;
        gpio->intc.irq_set_wake = tegra186_irq_set_wake;
-       gpio->intc.irq_set_affinity = tegra186_irq_set_affinity;
 
        irq = &gpio->gpio.irq;
        irq->chip = &gpio->intc;
index 1cbce599085584aae09698725326b6ecb1fb0b65..97e6caedf1f33e12b24182f1e1dbca38d2414645 100644 (file)
@@ -7,7 +7,7 @@
 #include <linux/slab.h>
 #include <linux/of_device.h>
 
-#define WCD_PIN_MASK(p) BIT(p - 1)
+#define WCD_PIN_MASK(p) BIT(p)
 #define WCD_REG_DIR_CTL_OFFSET 0x42
 #define WCD_REG_VAL_CTL_OFFSET 0x43
 #define WCD934X_NPINS          5
index b411d3156e0b687c39244e82ee492d6dfa3d1c54..136557e7dd3ceb00931db0c141b53e4a60381fb6 100644 (file)
@@ -542,7 +542,7 @@ static void xgpio_irqhandler(struct irq_desc *desc)
 }
 
 /**
- * xgpio_of_probe - Probe method for the GPIO device.
+ * xgpio_probe - Probe method for the GPIO device.
  * @pdev: pointer to the platform device
  *
  * Return:
index 3ef22a3c104d910be96fab7370556f3ac09d3cc6..5b4111e4be3f4e34614ecee6310f6ed3e5b8744b 100644 (file)
@@ -1233,14 +1233,14 @@ static void acpi_gpiochip_scan_gpios(struct acpi_gpio_chip *achip)
 void acpi_gpiochip_add(struct gpio_chip *chip)
 {
        struct acpi_gpio_chip *acpi_gpio;
-       acpi_handle handle;
+       struct acpi_device *adev;
        acpi_status status;
 
        if (!chip || !chip->parent)
                return;
 
-       handle = ACPI_HANDLE(chip->parent);
-       if (!handle)
+       adev = ACPI_COMPANION(chip->parent);
+       if (!adev)
                return;
 
        acpi_gpio = kzalloc(sizeof(*acpi_gpio), GFP_KERNEL);
@@ -1254,7 +1254,7 @@ void acpi_gpiochip_add(struct gpio_chip *chip)
        INIT_LIST_HEAD(&acpi_gpio->events);
        INIT_LIST_HEAD(&acpi_gpio->deferred_req_irqs_list_entry);
 
-       status = acpi_attach_data(handle, acpi_gpio_chip_dh, acpi_gpio);
+       status = acpi_attach_data(adev->handle, acpi_gpio_chip_dh, acpi_gpio);
        if (ACPI_FAILURE(status)) {
                dev_err(chip->parent, "Failed to attach ACPI GPIO chip\n");
                kfree(acpi_gpio);
@@ -1263,7 +1263,7 @@ void acpi_gpiochip_add(struct gpio_chip *chip)
 
        acpi_gpiochip_request_regions(acpi_gpio);
        acpi_gpiochip_scan_gpios(acpi_gpio);
-       acpi_walk_dep_device_list(handle);
+       acpi_dev_clear_dependencies(adev);
 }
 
 void acpi_gpiochip_remove(struct gpio_chip *chip)
index dc3a69296321b350cf8e954613e3f10c9c7e7c3e..264176a01e16ab3ca64b06885e892c42f67586c2 100644 (file)
@@ -1006,6 +1006,7 @@ struct amdgpu_device {
        struct amdgpu_df                df;
 
        struct amdgpu_ip_block          ip_blocks[AMDGPU_MAX_IP_NUM];
+       uint32_t                        harvest_ip_mask;
        int                             num_ip_blocks;
        struct mutex    mn_lock;
        DECLARE_HASHTABLE(mn_hash, 7);
index fad3b91f74f54972a4aee067bc921c5ec4b5befb..d39cff4a1fe3805c9b2f881d91e3004a6c4c8cef 100644 (file)
@@ -156,16 +156,16 @@ static uint32_t get_sdma_rlc_reg_offset(struct amdgpu_device *adev,
                                mmSDMA0_RLC0_RB_CNTL) - mmSDMA0_RLC0_RB_CNTL;
                break;
        case 1:
-               sdma_engine_reg_base = SOC15_REG_OFFSET(SDMA1, 0,
+               sdma_engine_reg_base = SOC15_REG_OFFSET(SDMA0, 0,
                                mmSDMA1_RLC0_RB_CNTL) - mmSDMA0_RLC0_RB_CNTL;
                break;
        case 2:
-               sdma_engine_reg_base = SOC15_REG_OFFSET(SDMA2, 0,
-                               mmSDMA2_RLC0_RB_CNTL) - mmSDMA2_RLC0_RB_CNTL;
+               sdma_engine_reg_base = SOC15_REG_OFFSET(SDMA0, 0,
+                               mmSDMA2_RLC0_RB_CNTL) - mmSDMA0_RLC0_RB_CNTL;
                break;
        case 3:
-               sdma_engine_reg_base = SOC15_REG_OFFSET(SDMA3, 0,
-                               mmSDMA3_RLC0_RB_CNTL) - mmSDMA2_RLC0_RB_CNTL;
+               sdma_engine_reg_base = SOC15_REG_OFFSET(SDMA0, 0,
+                               mmSDMA3_RLC0_RB_CNTL) - mmSDMA0_RLC0_RB_CNTL;
                break;
        }
 
@@ -450,7 +450,7 @@ static int hqd_sdma_dump_v10_3(struct kgd_dev *kgd,
                        engine_id, queue_id);
        uint32_t i = 0, reg;
 #undef HQD_N_REGS
-#define HQD_N_REGS (19+6+7+10)
+#define HQD_N_REGS (19+6+7+12)
 
        *dump = kmalloc(HQD_N_REGS*2*sizeof(uint32_t), GFP_KERNEL);
        if (*dump == NULL)
index 0350205c48974e0dfafd81c2f14b365ebbff7e91..6819fe5612d9e7a1a1f2b9a148b0990f5533ba68 100644 (file)
@@ -337,7 +337,6 @@ static int amdgpu_ctx_query2(struct amdgpu_device *adev,
 {
        struct amdgpu_ctx *ctx;
        struct amdgpu_ctx_mgr *mgr;
-       unsigned long ras_counter;
 
        if (!fpriv)
                return -EINVAL;
@@ -362,21 +361,6 @@ static int amdgpu_ctx_query2(struct amdgpu_device *adev,
        if (atomic_read(&ctx->guilty))
                out->state.flags |= AMDGPU_CTX_QUERY2_FLAGS_GUILTY;
 
-       /*query ue count*/
-       ras_counter = amdgpu_ras_query_error_count(adev, false);
-       /*ras counter is monotonic increasing*/
-       if (ras_counter != ctx->ras_counter_ue) {
-               out->state.flags |= AMDGPU_CTX_QUERY2_FLAGS_RAS_UE;
-               ctx->ras_counter_ue = ras_counter;
-       }
-
-       /*query ce count*/
-       ras_counter = amdgpu_ras_query_error_count(adev, true);
-       if (ras_counter != ctx->ras_counter_ce) {
-               out->state.flags |= AMDGPU_CTX_QUERY2_FLAGS_RAS_CE;
-               ctx->ras_counter_ce = ras_counter;
-       }
-
        mutex_unlock(&mgr->lock);
        return 0;
 }
index 7d3b546151475db126a30fe964e373e2ad723db4..57ec108b597208421696099d8545cf68d6eab7ba 100644 (file)
@@ -1683,6 +1683,19 @@ int amdgpu_device_ip_block_add(struct amdgpu_device *adev,
        if (!ip_block_version)
                return -EINVAL;
 
+       switch (ip_block_version->type) {
+       case AMD_IP_BLOCK_TYPE_VCN:
+               if (adev->harvest_ip_mask & AMD_HARVEST_IP_VCN_MASK)
+                       return 0;
+               break;
+       case AMD_IP_BLOCK_TYPE_JPEG:
+               if (adev->harvest_ip_mask & AMD_HARVEST_IP_JPEG_MASK)
+                       return 0;
+               break;
+       default:
+               break;
+       }
+
        DRM_INFO("add ip block number %d <%s>\n", adev->num_ip_blocks,
                  ip_block_version->funcs->name);
 
@@ -3105,13 +3118,14 @@ bool amdgpu_device_asic_has_dc_support(enum amd_asic_type asic_type)
  */
 bool amdgpu_device_has_dc_support(struct amdgpu_device *adev)
 {
-       if (amdgpu_sriov_vf(adev) || adev->enable_virtual_display)
+       if (amdgpu_sriov_vf(adev) || 
+           adev->enable_virtual_display ||
+           (adev->harvest_ip_mask & AMD_HARVEST_IP_DMU_MASK))
                return false;
 
        return amdgpu_device_asic_has_dc_support(adev->asic_type);
 }
 
-
 static void amdgpu_device_xgmi_reset_func(struct work_struct *__work)
 {
        struct amdgpu_device *adev =
@@ -3276,6 +3290,7 @@ int amdgpu_device_init(struct amdgpu_device *adev,
        adev->vm_manager.vm_pte_funcs = NULL;
        adev->vm_manager.vm_pte_num_scheds = 0;
        adev->gmc.gmc_funcs = NULL;
+       adev->harvest_ip_mask = 0x0;
        adev->fence_context = dma_fence_context_alloc(AMDGPU_MAX_RINGS);
        bitmap_zero(adev->gfx.pipe_reserve_bitmap, AMDGPU_MAX_COMPUTE_QUEUES);
 
@@ -4466,7 +4481,6 @@ out:
                        r = amdgpu_ib_ring_tests(tmp_adev);
                        if (r) {
                                dev_err(tmp_adev->dev, "ib ring test failed (%d).\n", r);
-                               r = amdgpu_device_ip_suspend(tmp_adev);
                                need_full_reset = true;
                                r = -EAGAIN;
                                goto end;
index b2dbcb4df0208c944fdd5d9eba485d7fadb8b031..e1b6f5891759957811c29572dc5815653e60e302 100644 (file)
@@ -373,6 +373,34 @@ int amdgpu_discovery_get_ip_version(struct amdgpu_device *adev, int hw_id,
        return -EINVAL;
 }
 
+void amdgpu_discovery_harvest_ip(struct amdgpu_device *adev)
+{
+       struct binary_header *bhdr;
+       struct harvest_table *harvest_info;
+       int i;
+
+       bhdr = (struct binary_header *)adev->mman.discovery_bin;
+       harvest_info = (struct harvest_table *)(adev->mman.discovery_bin +
+                       le16_to_cpu(bhdr->table_list[HARVEST_INFO].offset));
+
+       for (i = 0; i < 32; i++) {
+               if (le32_to_cpu(harvest_info->list[i].hw_id) == 0)
+                       break;
+
+               switch (le32_to_cpu(harvest_info->list[i].hw_id)) {
+               case VCN_HWID:
+                       adev->harvest_ip_mask |= AMD_HARVEST_IP_VCN_MASK;
+                       adev->harvest_ip_mask |= AMD_HARVEST_IP_JPEG_MASK;
+                       break;
+               case DMU_HWID:
+                       adev->harvest_ip_mask |= AMD_HARVEST_IP_DMU_MASK;
+                       break;
+               default:
+                       break;
+               }
+       }
+}
+
 int amdgpu_discovery_get_gfx_info(struct amdgpu_device *adev)
 {
        struct binary_header *bhdr;
index 8f6183801cb34efe41eaebab2265d40f1bfedc11..1b1ae21b10375262e315c378dc47d32b4f3d91f4 100644 (file)
@@ -29,6 +29,7 @@
 
 void amdgpu_discovery_fini(struct amdgpu_device *adev);
 int amdgpu_discovery_reg_base_init(struct amdgpu_device *adev);
+void amdgpu_discovery_harvest_ip(struct amdgpu_device *adev);
 int amdgpu_discovery_get_ip_version(struct amdgpu_device *adev, int hw_id,
                                     int *major, int *minor, int *revision);
 int amdgpu_discovery_get_gfx_info(struct amdgpu_device *adev);
index 8a1fb8b6606e525eb809d5fbd5fb4232b0509f3d..c13985fb35bed8a8388dfe8f237ab85bc1bb6c78 100644 (file)
@@ -1057,7 +1057,7 @@ int amdgpu_display_gem_fb_init(struct drm_device *dev,
 
        return 0;
 err:
-       drm_err(dev, "Failed to init gem fb: %d\n", ret);
+       drm_dbg_kms(dev, "Failed to init gem fb: %d\n", ret);
        rfb->base.obj[0] = NULL;
        return ret;
 }
@@ -1094,7 +1094,7 @@ int amdgpu_display_gem_fb_verify_and_init(
 
        return 0;
 err:
-       drm_err(dev, "Failed to verify and init gem fb: %d\n", ret);
+       drm_dbg_kms(dev, "Failed to verify and init gem fb: %d\n", ret);
        rfb->base.obj[0] = NULL;
        return ret;
 }
index 4f10c452984068c08428f440ca9cc8675e23c798..09b0486475232ffbeec31d3fd70ff1332c6c168e 100644 (file)
@@ -288,10 +288,13 @@ out:
 static int amdgpu_fbdev_destroy(struct drm_device *dev, struct amdgpu_fbdev *rfbdev)
 {
        struct amdgpu_framebuffer *rfb = &rfbdev->rfb;
+       int i;
 
        drm_fb_helper_unregister_fbi(&rfbdev->helper);
 
        if (rfb->base.obj[0]) {
+               for (i = 0; i < rfb->base.format->num_planes; i++)
+                       drm_gem_object_put(rfb->base.obj[0]);
                amdgpufb_destroy_pinned_object(rfb->base.obj[0]);
                rfb->base.obj[0] = NULL;
                drm_framebuffer_unregister_private(&rfb->base);
index 8f4a8f8d814631f8923d9d0f4371c746b1647f2d..39b6c6bfab453330d9734d797896de498fc52419 100644 (file)
@@ -101,7 +101,8 @@ static int amdgpu_fru_read_eeprom(struct amdgpu_device *adev, uint32_t addrptr,
 int amdgpu_fru_get_product_info(struct amdgpu_device *adev)
 {
        unsigned char buff[34];
-       int addrptr = 0, size = 0;
+       int addrptr, size;
+       int len;
 
        if (!is_fru_eeprom_supported(adev))
                return 0;
@@ -109,7 +110,7 @@ int amdgpu_fru_get_product_info(struct amdgpu_device *adev)
        /* If algo exists, it means that the i2c_adapter's initialized */
        if (!adev->pm.smu_i2c.algo) {
                DRM_WARN("Cannot access FRU, EEPROM accessor not initialized");
-               return 0;
+               return -ENODEV;
        }
 
        /* There's a lot of repetition here. This is due to the FRU having
@@ -128,7 +129,7 @@ int amdgpu_fru_get_product_info(struct amdgpu_device *adev)
        size = amdgpu_fru_read_eeprom(adev, addrptr, buff);
        if (size < 1) {
                DRM_ERROR("Failed to read FRU Manufacturer, ret:%d", size);
-               return size;
+               return -EINVAL;
        }
 
        /* Increment the addrptr by the size of the field, and 1 due to the
@@ -138,43 +139,45 @@ int amdgpu_fru_get_product_info(struct amdgpu_device *adev)
        size = amdgpu_fru_read_eeprom(adev, addrptr, buff);
        if (size < 1) {
                DRM_ERROR("Failed to read FRU product name, ret:%d", size);
-               return size;
+               return -EINVAL;
        }
 
+       len = size;
        /* Product name should only be 32 characters. Any more,
         * and something could be wrong. Cap it at 32 to be safe
         */
-       if (size > 32) {
+       if (len >= sizeof(adev->product_name)) {
                DRM_WARN("FRU Product Number is larger than 32 characters. This is likely a mistake");
-               size = 32;
+               len = sizeof(adev->product_name) - 1;
        }
        /* Start at 2 due to buff using fields 0 and 1 for the address */
-       memcpy(adev->product_name, &buff[2], size);
-       adev->product_name[size] = '\0';
+       memcpy(adev->product_name, &buff[2], len);
+       adev->product_name[len] = '\0';
 
        addrptr += size + 1;
        size = amdgpu_fru_read_eeprom(adev, addrptr, buff);
        if (size < 1) {
                DRM_ERROR("Failed to read FRU product number, ret:%d", size);
-               return size;
+               return -EINVAL;
        }
 
+       len = size;
        /* Product number should only be 16 characters. Any more,
         * and something could be wrong. Cap it at 16 to be safe
         */
-       if (size > 16) {
+       if (len >= sizeof(adev->product_number)) {
                DRM_WARN("FRU Product Number is larger than 16 characters. This is likely a mistake");
-               size = 16;
+               len = sizeof(adev->product_number) - 1;
        }
-       memcpy(adev->product_number, &buff[2], size);
-       adev->product_number[size] = '\0';
+       memcpy(adev->product_number, &buff[2], len);
+       adev->product_number[len] = '\0';
 
        addrptr += size + 1;
        size = amdgpu_fru_read_eeprom(adev, addrptr, buff);
 
        if (size < 1) {
                DRM_ERROR("Failed to read FRU product version, ret:%d", size);
-               return size;
+               return -EINVAL;
        }
 
        addrptr += size + 1;
@@ -182,18 +185,19 @@ int amdgpu_fru_get_product_info(struct amdgpu_device *adev)
 
        if (size < 1) {
                DRM_ERROR("Failed to read FRU serial number, ret:%d", size);
-               return size;
+               return -EINVAL;
        }
 
+       len = size;
        /* Serial number should only be 16 characters. Any more,
         * and something could be wrong. Cap it at 16 to be safe
         */
-       if (size > 16) {
+       if (len >= sizeof(adev->serial)) {
                DRM_WARN("FRU Serial Number is larger than 16 characters. This is likely a mistake");
-               size = 16;
+               len = sizeof(adev->serial) - 1;
        }
-       memcpy(adev->serial, &buff[2], size);
-       adev->serial[size] = '\0';
+       memcpy(adev->serial, &buff[2], len);
+       adev->serial[len] = '\0';
 
        return 0;
 }
index 1345f7eba011bf49414b4d46f8123a1b36ec7126..f9434bc2f9b21963c06324eea0f79b7ba36d180f 100644 (file)
@@ -100,7 +100,7 @@ static void amdgpu_bo_destroy(struct ttm_buffer_object *tbo)
                kfree(ubo->metadata);
        }
 
-       kfree(bo);
+       kvfree(bo);
 }
 
 /**
@@ -552,7 +552,7 @@ static int amdgpu_bo_do_create(struct amdgpu_device *adev,
        BUG_ON(bp->bo_ptr_size < sizeof(struct amdgpu_bo));
 
        *bo_ptr = NULL;
-       bo = kzalloc(bp->bo_ptr_size, GFP_KERNEL);
+       bo = kvzalloc(bp->bo_ptr_size, GFP_KERNEL);
        if (bo == NULL)
                return -ENOMEM;
        drm_gem_private_object_init(adev_to_drm(adev), &bo->tbo.base, size);
index 46a5328e00e0bbcf949527f593a297da2d88bd73..60aa99a39a743fa5d78376e62f5af65b229f5c7e 100644 (file)
@@ -76,6 +76,7 @@ struct psp_ring
        uint64_t                        ring_mem_mc_addr;
        void                            *ring_mem_handle;
        uint32_t                        ring_size;
+       uint32_t                        ring_wptr;
 };
 
 /* More registers may will be supported */
index 3bef0432cac2f7dba196c2e23c64f25fa2a93fea..d5cbc51c5eaa8313fb1b9da91e37966351e3ca52 100644 (file)
@@ -225,7 +225,7 @@ static int amdgpu_ttm_map_buffer(struct ttm_buffer_object *bo,
        *addr += mm_cur->start & ~PAGE_MASK;
 
        num_dw = ALIGN(adev->mman.buffer_funcs->copy_num_dw, 8);
-       num_bytes = num_pages * 8;
+       num_bytes = num_pages * 8 * AMDGPU_GPU_PAGES_IN_CPU_PAGE;
 
        r = amdgpu_job_alloc_with_ib(adev, num_dw * 4 + num_bytes,
                                     AMDGPU_IB_POOL_DELAYED, &job);
@@ -1210,6 +1210,7 @@ static void amdgpu_ttm_tt_unpopulate(struct ttm_device *bdev,
        if (gtt && gtt->userptr) {
                amdgpu_ttm_tt_set_user_pages(ttm, NULL);
                kfree(ttm->sg);
+               ttm->sg = NULL;
                ttm->page_flags &= ~TTM_PAGE_FLAG_SG;
                return;
        }
index 2408ed4c7d847c570355442f88cefb3bbbf4bd47..0597aeb5f0e8988aab8a0dac29aecacdae65f19a 100644 (file)
 #define mmGC_THROTTLE_CTRL_Sienna_Cichlid              0x2030
 #define mmGC_THROTTLE_CTRL_Sienna_Cichlid_BASE_IDX     0
 
+#define mmRLC_SPARE_INT_0_Sienna_Cichlid               0x4ca5
+#define mmRLC_SPARE_INT_0_Sienna_Cichlid_BASE_IDX      1
+
 #define GFX_RLCG_GC_WRITE_OLD  (0x8 << 28)
 #define GFX_RLCG_GC_WRITE      (0x0 << 28)
 #define GFX_RLCG_GC_READ       (0x1 << 28)
@@ -1395,9 +1398,10 @@ static const struct soc15_reg_golden golden_settings_gc_10_1_2[] =
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG, 0xffffffff, 0x20000000),
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG2, 0xffffffff, 0x00000420),
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG3, 0xffffffff, 0x00000200),
-       SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG4, 0xffffffff, 0x04800000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG4, 0xffffffff, 0x04900000),
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DFSM_TILES_IN_FLIGHT, 0x0000ffff, 0x0000003f),
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_LAST_OF_BURST_CONFIG, 0xffffffff, 0x03860204),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG, 0x0c1800ff, 0x00000044),
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmGCR_GENERAL_CNTL, 0x1ff0ffff, 0x00000500),
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmGE_PRIV_CONTROL, 0x00007fff, 0x000001fe),
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL1_PIPE_STEER, 0xffffffff, 0xe4e4e4e4),
@@ -1415,12 +1419,13 @@ static const struct soc15_reg_golden golden_settings_gc_10_1_2[] =
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE_2, 0x00000820, 0x00000820),
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_LINE_STIPPLE_STATE, 0x0000ff0f, 0x00000000),
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmRMI_SPARE, 0xffffffff, 0xffff3101),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_CONFIG_CNTL_1, 0x001f0000, 0x00070104),
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_ALU_CLK_CTRL, 0xffffffff, 0xffffffff),
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_ARB_CONFIG, 0x00000133, 0x00000130),
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_LDS_CLK_CTRL, 0xffffffff, 0xffffffff),
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmTA_CNTL_AUX, 0xfff7ffff, 0x01030000),
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CNTL, 0xffdf80ff, 0x479c0010),
-       SOC15_REG_GOLDEN_VALUE(GC, 0, mmUTCL1_CTRL, 0xffffffff, 0x00800000)
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmUTCL1_CTRL, 0xffffffff, 0x00c00000)
 };
 
 static bool gfx_v10_is_rlcg_rw(struct amdgpu_device *adev, u32 offset, uint32_t *flag, bool write)
@@ -1478,8 +1483,15 @@ static u32 gfx_v10_rlcg_rw(struct amdgpu_device *adev, u32 offset, u32 v, uint32
                       (adev->reg_offset[GC_HWIP][0][mmSCRATCH_REG0_BASE_IDX] + mmSCRATCH_REG2) * 4;
        scratch_reg3 = adev->rmmio +
                       (adev->reg_offset[GC_HWIP][0][mmSCRATCH_REG1_BASE_IDX] + mmSCRATCH_REG3) * 4;
-       spare_int = adev->rmmio +
-                   (adev->reg_offset[GC_HWIP][0][mmRLC_SPARE_INT_BASE_IDX] + mmRLC_SPARE_INT) * 4;
+
+       if (adev->asic_type >= CHIP_SIENNA_CICHLID) {
+               spare_int = adev->rmmio +
+                           (adev->reg_offset[GC_HWIP][0][mmRLC_SPARE_INT_0_Sienna_Cichlid_BASE_IDX]
+                            + mmRLC_SPARE_INT_0_Sienna_Cichlid) * 4;
+       } else {
+               spare_int = adev->rmmio +
+                           (adev->reg_offset[GC_HWIP][0][mmRLC_SPARE_INT_BASE_IDX] + mmRLC_SPARE_INT) * 4;
+       }
 
        grbm_cntl = adev->reg_offset[GC_HWIP][0][mmGRBM_GFX_CNTL_BASE_IDX] + mmGRBM_GFX_CNTL;
        grbm_idx = adev->reg_offset[GC_HWIP][0][mmGRBM_GFX_INDEX_BASE_IDX] + mmGRBM_GFX_INDEX;
@@ -7347,9 +7359,15 @@ static int gfx_v10_0_hw_fini(void *handle)
        if (amdgpu_sriov_vf(adev)) {
                gfx_v10_0_cp_gfx_enable(adev, false);
                /* Program KIQ position of RLC_CP_SCHEDULERS during destroy */
-               tmp = RREG32_SOC15(GC, 0, mmRLC_CP_SCHEDULERS);
-               tmp &= 0xffffff00;
-               WREG32_SOC15(GC, 0, mmRLC_CP_SCHEDULERS, tmp);
+               if (adev->asic_type >= CHIP_SIENNA_CICHLID) {
+                       tmp = RREG32_SOC15(GC, 0, mmRLC_CP_SCHEDULERS_Sienna_Cichlid);
+                       tmp &= 0xffffff00;
+                       WREG32_SOC15(GC, 0, mmRLC_CP_SCHEDULERS_Sienna_Cichlid, tmp);
+               } else {
+                       tmp = RREG32_SOC15(GC, 0, mmRLC_CP_SCHEDULERS);
+                       tmp &= 0xffffff00;
+                       WREG32_SOC15(GC, 0, mmRLC_CP_SCHEDULERS, tmp);
+               }
 
                return 0;
        }
index a078a38c2ceeaaf6ff97a3559e22c9dc683c3c91..516467e962b727bb3f079d20536825de0886fc9c 100644 (file)
@@ -4943,7 +4943,7 @@ static void gfx_v9_0_update_3d_clock_gating(struct amdgpu_device *adev,
        amdgpu_gfx_rlc_enter_safe_mode(adev);
 
        /* Enable 3D CGCG/CGLS */
-       if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_3D_CGCG)) {
+       if (enable) {
                /* write cmd to clear cgcg/cgls ov */
                def = data = RREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE);
                /* unset CGCG override */
@@ -4955,8 +4955,12 @@ static void gfx_v9_0_update_3d_clock_gating(struct amdgpu_device *adev,
                /* enable 3Dcgcg FSM(0x0000363f) */
                def = RREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL_3D);
 
-               data = (0x36 << RLC_CGCG_CGLS_CTRL_3D__CGCG_GFX_IDLE_THRESHOLD__SHIFT) |
-                       RLC_CGCG_CGLS_CTRL_3D__CGCG_EN_MASK;
+               if (adev->cg_flags & AMD_CG_SUPPORT_GFX_3D_CGCG)
+                       data = (0x36 << RLC_CGCG_CGLS_CTRL_3D__CGCG_GFX_IDLE_THRESHOLD__SHIFT) |
+                               RLC_CGCG_CGLS_CTRL_3D__CGCG_EN_MASK;
+               else
+                       data = 0x0 << RLC_CGCG_CGLS_CTRL_3D__CGCG_GFX_IDLE_THRESHOLD__SHIFT;
+
                if (adev->cg_flags & AMD_CG_SUPPORT_GFX_3D_CGLS)
                        data |= (0x000F << RLC_CGCG_CGLS_CTRL_3D__CGLS_REP_COMPANSAT_DELAY__SHIFT) |
                                RLC_CGCG_CGLS_CTRL_3D__CGLS_EN_MASK;
index de5abceced0dd28ac92cacdf9a424a8388860de2..85967a5570cb41f5247e99fde958e65e3fa0122b 100644 (file)
@@ -172,6 +172,8 @@ static int jpeg_v2_0_hw_fini(void *handle)
 {
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
+       cancel_delayed_work_sync(&adev->vcn.idle_work);
+
        if (adev->jpeg.cur_state != AMD_PG_STATE_GATE &&
              RREG32_SOC15(JPEG, 0, mmUVD_JRBC_STATUS))
                jpeg_v2_0_set_powergating_state(adev, AMD_PG_STATE_GATE);
index 83531997aeba9056bf0e802decdd9f5e254c01a7..46096ad7f0d9119a5d0e75876a16913a90039a49 100644 (file)
@@ -187,19 +187,17 @@ static int jpeg_v2_5_hw_init(void *handle)
 static int jpeg_v2_5_hw_fini(void *handle)
 {
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-       struct amdgpu_ring *ring;
        int i;
 
+       cancel_delayed_work_sync(&adev->vcn.idle_work);
+
        for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) {
                if (adev->jpeg.harvest_config & (1 << i))
                        continue;
 
-               ring = &adev->jpeg.inst[i].ring_dec;
                if (adev->jpeg.cur_state != AMD_PG_STATE_GATE &&
                      RREG32_SOC15(JPEG, i, mmUVD_JRBC_STATUS))
                        jpeg_v2_5_set_powergating_state(adev, AMD_PG_STATE_GATE);
-
-               ring->sched.ready = false;
        }
 
        return 0;
index de5dfcfb385919985f983bf7a2c9b90da5fbbfa5..bd77794315bc6bea0e90b35c65237fb6e542170c 100644 (file)
@@ -159,15 +159,13 @@ static int jpeg_v3_0_hw_init(void *handle)
 static int jpeg_v3_0_hw_fini(void *handle)
 {
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-       struct amdgpu_ring *ring;
 
-       ring = &adev->jpeg.inst->ring_dec;
+       cancel_delayed_work_sync(&adev->vcn.idle_work);
+
        if (adev->jpeg.cur_state != AMD_PG_STATE_GATE &&
              RREG32_SOC15(JPEG, 0, mmUVD_JRBC_STATUS))
                jpeg_v3_0_set_powergating_state(adev, AMD_PG_STATE_GATE);
 
-       ring->sched.ready = false;
-
        return 0;
 }
 
index d54af7f8801bf8f555a43d7d3622def657acb009..d290ca0b06da87dcf380a7965065ca7ae99d1914 100644 (file)
@@ -623,6 +623,16 @@ static const struct amdgpu_ip_block_version nv_common_ip_block =
        .funcs = &nv_common_ip_funcs,
 };
 
+static bool nv_is_headless_sku(struct pci_dev *pdev)
+{
+       if ((pdev->device == 0x731E &&
+           (pdev->revision == 0xC6 || pdev->revision == 0xC7)) ||
+           (pdev->device == 0x7340 && pdev->revision == 0xC9)  ||
+           (pdev->device == 0x7360 && pdev->revision == 0xC7))
+               return true;
+       return false;
+}
+
 static int nv_reg_base_init(struct amdgpu_device *adev)
 {
        int r;
@@ -635,6 +645,12 @@ static int nv_reg_base_init(struct amdgpu_device *adev)
                        goto legacy_init;
                }
 
+               amdgpu_discovery_harvest_ip(adev);
+               if (nv_is_headless_sku(adev->pdev)) {
+                       adev->harvest_ip_mask |= AMD_HARVEST_IP_VCN_MASK;
+                       adev->harvest_ip_mask |= AMD_HARVEST_IP_JPEG_MASK;
+               }
+
                return 0;
        }
 
@@ -671,16 +687,6 @@ void nv_set_virt_ops(struct amdgpu_device *adev)
        adev->virt.ops = &xgpu_nv_virt_ops;
 }
 
-static bool nv_is_headless_sku(struct pci_dev *pdev)
-{
-       if ((pdev->device == 0x731E &&
-           (pdev->revision == 0xC6 || pdev->revision == 0xC7)) ||
-           (pdev->device == 0x7340 && pdev->revision == 0xC9)  ||
-           (pdev->device == 0x7360 && pdev->revision == 0xC7))
-               return true;
-       return false;
-}
-
 int nv_set_ip_blocks(struct amdgpu_device *adev)
 {
        int r;
@@ -728,8 +734,7 @@ int nv_set_ip_blocks(struct amdgpu_device *adev)
                if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT &&
                    !amdgpu_sriov_vf(adev))
                        amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block);
-               if (!nv_is_headless_sku(adev->pdev))
-                       amdgpu_device_ip_block_add(adev, &vcn_v2_0_ip_block);
+               amdgpu_device_ip_block_add(adev, &vcn_v2_0_ip_block);
                amdgpu_device_ip_block_add(adev, &jpeg_v2_0_ip_block);
                if (adev->enable_mes)
                        amdgpu_device_ip_block_add(adev, &mes_v10_1_ip_block);
@@ -752,8 +757,7 @@ int nv_set_ip_blocks(struct amdgpu_device *adev)
                if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT &&
                    !amdgpu_sriov_vf(adev))
                        amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block);
-               if (!nv_is_headless_sku(adev->pdev))
-                       amdgpu_device_ip_block_add(adev, &vcn_v2_0_ip_block);
+               amdgpu_device_ip_block_add(adev, &vcn_v2_0_ip_block);
                if (!amdgpu_sriov_vf(adev))
                        amdgpu_device_ip_block_add(adev, &jpeg_v2_0_ip_block);
                break;
@@ -777,7 +781,6 @@ int nv_set_ip_blocks(struct amdgpu_device *adev)
                amdgpu_device_ip_block_add(adev, &vcn_v3_0_ip_block);
                if (!amdgpu_sriov_vf(adev))
                        amdgpu_device_ip_block_add(adev, &jpeg_v3_0_ip_block);
-
                if (adev->enable_mes)
                        amdgpu_device_ip_block_add(adev, &mes_v10_1_ip_block);
                break;
@@ -1149,6 +1152,11 @@ static int nv_common_early_init(void *handle)
                return -EINVAL;
        }
 
+       if (adev->harvest_ip_mask & AMD_HARVEST_IP_VCN_MASK)
+               adev->pg_flags &= ~(AMD_PG_SUPPORT_VCN |
+                                   AMD_PG_SUPPORT_VCN_DPG |
+                                   AMD_PG_SUPPORT_JPEG);
+
        if (amdgpu_sriov_vf(adev)) {
                amdgpu_virt_init_setting(adev);
                xgpu_nv_mailbox_set_irq_funcs(adev);
index 589410c32d095a47e3f5993d4e468622c8305e9a..02bba1f3c42e0e8b3b4edeea40c62a4245e6c346 100644 (file)
@@ -720,7 +720,7 @@ static uint32_t psp_v11_0_ring_get_wptr(struct psp_context *psp)
        struct amdgpu_device *adev = psp->adev;
 
        if (amdgpu_sriov_vf(adev))
-               data = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_102);
+               data = psp->km_ring.ring_wptr;
        else
                data = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_67);
 
@@ -734,6 +734,7 @@ static void psp_v11_0_ring_set_wptr(struct psp_context *psp, uint32_t value)
        if (amdgpu_sriov_vf(adev)) {
                WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_102, value);
                WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_101, GFX_CTRL_CMD_ID_CONSUME_CMD);
+               psp->km_ring.ring_wptr = value;
        } else
                WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_67, value);
 }
index f2e725f72d2f1644e1a786084df6782c4fe9aad3..908664a5774bb33f0f740fde92609a88f0793b71 100644 (file)
@@ -379,7 +379,7 @@ static uint32_t psp_v3_1_ring_get_wptr(struct psp_context *psp)
        struct amdgpu_device *adev = psp->adev;
 
        if (amdgpu_sriov_vf(adev))
-               data = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_102);
+               data = psp->km_ring.ring_wptr;
        else
                data = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_67);
        return data;
@@ -394,6 +394,7 @@ static void psp_v3_1_ring_set_wptr(struct psp_context *psp, uint32_t value)
                /* send interrupt to PSP for SRIOV ring write pointer update */
                WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_101,
                        GFX_CTRL_CMD_ID_CONSUME_CMD);
+               psp->km_ring.ring_wptr = value;
        } else
                WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_67, value);
 }
index 920fc6d4a1273b2470c9fbe071e10385dbe3183b..8859133ce37eb64acac0a4cb8416d1654e725386 100644 (file)
@@ -123,6 +123,10 @@ static const struct soc15_reg_golden golden_settings_sdma_nv14[] = {
 
 static const struct soc15_reg_golden golden_settings_sdma_nv12[] = {
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_RLC3_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_GB_ADDR_CONFIG, 0x001877ff, 0x00000044),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_GB_ADDR_CONFIG_READ, 0x001877ff, 0x00000044),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_GB_ADDR_CONFIG, 0x001877ff, 0x00000044),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_GB_ADDR_CONFIG_READ, 0x001877ff, 0x00000044),
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_RLC3_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
 };
 
index b1ad9e52b2347e7c0d2e082a749b906acfe9bc4c..240596b25fe4e32cf5263a9b901e0dcb0c125f82 100644 (file)
@@ -497,11 +497,6 @@ static void sdma_v5_2_gfx_stop(struct amdgpu_device *adev)
                ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_GFX_IB_CNTL, IB_ENABLE, 0);
                WREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_IB_CNTL), ib_cntl);
        }
-
-       sdma0->sched.ready = false;
-       sdma1->sched.ready = false;
-       sdma2->sched.ready = false;
-       sdma3->sched.ready = false;
 }
 
 /**
index d80e12b80c7e5d2f955c1e4be0f3b016692ab703..e65c286f93a6bbf80e73ea83cafa3b22f9ff470f 100644 (file)
@@ -302,6 +302,7 @@ static int soc15_query_video_codecs(struct amdgpu_device *adev, bool encode,
                        *codecs = &rv_video_codecs_decode;
                return 0;
        case CHIP_ARCTURUS:
+       case CHIP_ALDEBARAN:
        case CHIP_RENOIR:
                if (encode)
                        *codecs = &vega_video_codecs_encode;
@@ -1392,7 +1393,6 @@ static int soc15_common_early_init(void *handle)
                        adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
                                AMD_CG_SUPPORT_GFX_MGLS |
                                AMD_CG_SUPPORT_GFX_CP_LS |
-                               AMD_CG_SUPPORT_GFX_3D_CGCG |
                                AMD_CG_SUPPORT_GFX_3D_CGLS |
                                AMD_CG_SUPPORT_GFX_CGCG |
                                AMD_CG_SUPPORT_GFX_CGLS |
@@ -1401,7 +1401,8 @@ static int soc15_common_early_init(void *handle)
                                AMD_CG_SUPPORT_MC_MGCG |
                                AMD_CG_SUPPORT_MC_LS |
                                AMD_CG_SUPPORT_SDMA_MGCG |
-                               AMD_CG_SUPPORT_SDMA_LS;
+                               AMD_CG_SUPPORT_SDMA_LS |
+                               AMD_CG_SUPPORT_VCN_MGCG;
 
                        adev->pg_flags = AMD_PG_SUPPORT_SDMA |
                                AMD_PG_SUPPORT_MMHUB |
@@ -1411,7 +1412,6 @@ static int soc15_common_early_init(void *handle)
                                AMD_CG_SUPPORT_GFX_MGLS |
                                AMD_CG_SUPPORT_GFX_RLC_LS |
                                AMD_CG_SUPPORT_GFX_CP_LS |
-                               AMD_CG_SUPPORT_GFX_3D_CGCG |
                                AMD_CG_SUPPORT_GFX_3D_CGLS |
                                AMD_CG_SUPPORT_GFX_CGCG |
                                AMD_CG_SUPPORT_GFX_CGLS |
index 2bab9c77952fd73163a6df839d441d0827b0de05..cf3803f8f075d3a24e3ebc61c442a7fd33b594aa 100644 (file)
@@ -357,6 +357,7 @@ static int uvd_v6_0_enc_ring_test_ib(struct amdgpu_ring *ring, long timeout)
 
 error:
        dma_fence_put(fence);
+       amdgpu_bo_unpin(bo);
        amdgpu_bo_unreserve(bo);
        amdgpu_bo_unref(&bo);
        return r;
index 51a773a37a354ee8d73a126e6326bf9fe15c65c5..27b1ced145d2c9d60f75d62caca8d0e6db97d212 100644 (file)
@@ -231,9 +231,13 @@ static int vcn_v1_0_hw_fini(void *handle)
 {
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
+       cancel_delayed_work_sync(&adev->vcn.idle_work);
+
        if ((adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) ||
-               RREG32_SOC15(VCN, 0, mmUVD_STATUS))
+               (adev->vcn.cur_state != AMD_PG_STATE_GATE &&
+                RREG32_SOC15(VCN, 0, mmUVD_STATUS))) {
                vcn_v1_0_set_powergating_state(adev, AMD_PG_STATE_GATE);
+       }
 
        return 0;
 }
@@ -1119,10 +1123,10 @@ static int vcn_v1_0_stop_spg_mode(struct amdgpu_device *adev)
                UVD_LMI_STATUS__WRITE_CLEAN_RAW_MASK;
        SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_LMI_STATUS, tmp, tmp);
 
-       /* put VCPU into reset */
-       WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_SOFT_RESET),
-               UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK,
-               ~UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
+       /* stall UMC channel */
+       WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_CTRL2),
+               UVD_LMI_CTRL2__STALL_ARB_UMC_MASK,
+               ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK);
 
        tmp = UVD_LMI_STATUS__UMC_READ_CLEAN_RAW_MASK |
                UVD_LMI_STATUS__UMC_WRITE_CLEAN_RAW_MASK;
@@ -1141,6 +1145,11 @@ static int vcn_v1_0_stop_spg_mode(struct amdgpu_device *adev)
                UVD_SOFT_RESET__LMI_SOFT_RESET_MASK,
                ~UVD_SOFT_RESET__LMI_SOFT_RESET_MASK);
 
+       /* put VCPU into reset */
+       WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_SOFT_RESET),
+               UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK,
+               ~UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
+
        WREG32_SOC15(UVD, 0, mmUVD_STATUS, 0);
 
        vcn_v1_0_enable_clock_gating(adev);
index 116b9643d5bab315fd890dad877eaf213dbe4c24..8af567c546dbc39b9f0ad533633e682d313f86de 100644 (file)
@@ -262,6 +262,8 @@ static int vcn_v2_0_hw_fini(void *handle)
 {
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
+       cancel_delayed_work_sync(&adev->vcn.idle_work);
+
        if ((adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) ||
            (adev->vcn.cur_state != AMD_PG_STATE_GATE &&
              RREG32_SOC15(VCN, 0, mmUVD_STATUS)))
index 948813d7caa024221679ded544247602bbf72b88..888b17d84691ceb7ba6894be98cdfc288c5745e1 100644 (file)
@@ -321,6 +321,8 @@ static int vcn_v2_5_hw_fini(void *handle)
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
        int i;
 
+       cancel_delayed_work_sync(&adev->vcn.idle_work);
+
        for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
                if (adev->vcn.harvest_config & (1 << i))
                        continue;
index cf165ab5dd26de0ad9809f990dbcfc25fad319aa..3b23de996db22195163db66f43f2d13cacbc435b 100644 (file)
@@ -372,15 +372,14 @@ done:
 static int vcn_v3_0_hw_fini(void *handle)
 {
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-       struct amdgpu_ring *ring;
-       int i, j;
+       int i;
+
+       cancel_delayed_work_sync(&adev->vcn.idle_work);
 
        for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
                if (adev->vcn.harvest_config & (1 << i))
                        continue;
 
-               ring = &adev->vcn.inst[i].ring_dec;
-
                if (!amdgpu_sriov_vf(adev)) {
                        if ((adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) ||
                                        (adev->vcn.cur_state != AMD_PG_STATE_GATE &&
@@ -388,12 +387,6 @@ static int vcn_v3_0_hw_fini(void *handle)
                                vcn_v3_0_set_powergating_state(adev, AMD_PG_STATE_GATE);
                        }
                }
-               ring->sched.ready = false;
-
-               for (j = 0; j < adev->vcn.num_enc_rings; ++j) {
-                       ring = &adev->vcn.inst[i].ring_enc[j];
-                       ring->sched.ready = false;
-               }
        }
 
        return 0;
index 389eff96fcf6c63a76db81a0f9a4592411141bd8..652cc1a0e450f3633cd964b822af7ab026d030e7 100644 (file)
@@ -925,7 +925,8 @@ static int dm_dmub_hw_init(struct amdgpu_device *adev)
                abm->dmcu_is_running = dmcu->funcs->is_dmcu_initialized(dmcu);
        }
 
-       adev->dm.dc->ctx->dmub_srv = dc_dmub_srv_create(adev->dm.dc, dmub_srv);
+       if (!adev->dm.dc->ctx->dmub_srv)
+               adev->dm.dc->ctx->dmub_srv = dc_dmub_srv_create(adev->dm.dc, dmub_srv);
        if (!adev->dm.dc->ctx->dmub_srv) {
                DRM_ERROR("Couldn't allocate DC DMUB server!\n");
                return -ENOMEM;
@@ -1954,7 +1955,6 @@ static int dm_suspend(void *handle)
 
        amdgpu_dm_irq_suspend(adev);
 
-
        dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D3);
 
        return 0;
@@ -5500,7 +5500,8 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
        struct drm_display_mode saved_mode;
        struct drm_display_mode *freesync_mode = NULL;
        bool native_mode_found = false;
-       bool recalculate_timing = dm_state ? (dm_state->scaling != RMX_OFF) : false;
+       bool recalculate_timing = false;
+       bool scale = dm_state ? (dm_state->scaling != RMX_OFF) : false;
        int mode_refresh;
        int preferred_refresh = 0;
 #if defined(CONFIG_DRM_AMD_DC_DCN)
@@ -5563,7 +5564,7 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
                 */
                DRM_DEBUG_DRIVER("No preferred mode found\n");
        } else {
-               recalculate_timing |= amdgpu_freesync_vid_mode &&
+               recalculate_timing = amdgpu_freesync_vid_mode &&
                                 is_freesync_video_mode(&mode, aconnector);
                if (recalculate_timing) {
                        freesync_mode = get_highest_refresh_rate_mode(aconnector, false);
@@ -5571,11 +5572,10 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
                        mode = *freesync_mode;
                } else {
                        decide_crtc_timing_for_drm_display_mode(
-                               &mode, preferred_mode,
-                               dm_state ? (dm_state->scaling != RMX_OFF) : false);
-               }
+                               &mode, preferred_mode, scale);
 
-               preferred_refresh = drm_mode_vrefresh(preferred_mode);
+                       preferred_refresh = drm_mode_vrefresh(preferred_mode);
+               }
        }
 
        if (recalculate_timing)
@@ -5587,7 +5587,7 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
        * If scaling is enabled and refresh rate didn't change
        * we copy the vic and polarities of the old timings
        */
-       if (!recalculate_timing || mode_refresh != preferred_refresh)
+       if (!scale || mode_refresh != preferred_refresh)
                fill_stream_properties_from_drm_display_mode(
                        stream, &mode, &aconnector->base, con_state, NULL,
                        requested_bpc);
@@ -9854,7 +9854,7 @@ static int dm_check_crtc_cursor(struct drm_atomic_state *state,
 
        if (cursor_scale_w != primary_scale_w ||
            cursor_scale_h != primary_scale_h) {
-               DRM_DEBUG_ATOMIC("Cursor plane scaling doesn't match primary plane\n");
+               drm_dbg_atomic(crtc->dev, "Cursor plane scaling doesn't match primary plane\n");
                return -EINVAL;
        }
 
@@ -9891,7 +9891,7 @@ static int validate_overlay(struct drm_atomic_state *state)
        int i;
        struct drm_plane *plane;
        struct drm_plane_state *old_plane_state, *new_plane_state;
-       struct drm_plane_state *primary_state, *overlay_state = NULL;
+       struct drm_plane_state *primary_state, *cursor_state, *overlay_state = NULL;
 
        /* Check if primary plane is contained inside overlay */
        for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
@@ -9921,6 +9921,14 @@ static int validate_overlay(struct drm_atomic_state *state)
        if (!primary_state->crtc)
                return 0;
 
+       /* check if cursor plane is enabled */
+       cursor_state = drm_atomic_get_plane_state(state, overlay_state->crtc->cursor);
+       if (IS_ERR(cursor_state))
+               return PTR_ERR(cursor_state);
+
+       if (drm_atomic_plane_disabling(plane->state, cursor_state))
+               return 0;
+
        /* Perform the bounds check to ensure the overlay plane covers the primary */
        if (primary_state->crtc_x < overlay_state->crtc_x ||
            primary_state->crtc_y < overlay_state->crtc_y ||
index 616f5b1ea3a881abe162cddf45173dd2ec81d068..666796a0067c35b6a287bf557447b2073f73ea62 100644 (file)
@@ -650,6 +650,7 @@ struct hdcp_workqueue *hdcp_create_workqueue(struct amdgpu_device *adev, struct
 
        /* File created at /sys/class/drm/card0/device/hdcp_srm*/
        hdcp_work[0].attr = data_attr;
+       sysfs_bin_attr_init(&hdcp_work[0].attr);
 
        if (sysfs_create_bin_file(&adev->dev->kobj, &hdcp_work[0].attr))
                DRM_WARN("Failed to create device file hdcp_srm");
index f4374d83662aeb7432a2c8e9ade1ef3644a5094b..c1f5474c205a809af9c8f5691600edf5c0e0bea6 100644 (file)
@@ -1076,6 +1076,24 @@ static bool dc_link_detect_helper(struct dc_link *link,
                            dc_is_dvi_signal(link->connector_signal)) {
                                if (prev_sink)
                                        dc_sink_release(prev_sink);
+                               link_disconnect_sink(link);
+
+                               return false;
+                       }
+                       /*
+                        * Abort detection for DP connectors if we have
+                        * no EDID and connector is active converter
+                        * as there are no display downstream
+                        *
+                        */
+                       if (dc_is_dp_sst_signal(link->connector_signal) &&
+                               (link->dpcd_caps.dongle_type ==
+                                               DISPLAY_DONGLE_DP_VGA_CONVERTER ||
+                               link->dpcd_caps.dongle_type ==
+                                               DISPLAY_DONGLE_DP_DVI_CONVERTER)) {
+                               if (prev_sink)
+                                       dc_sink_release(prev_sink);
+                               link_disconnect_sink(link);
 
                                return false;
                        }
index 527e56c353cb77514957fbaec024f23315e98cbf..8357aa3c41d5a935dc64777a815f704c0c7035fd 100644 (file)
@@ -3236,7 +3236,7 @@ static noinline bool dcn20_validate_bandwidth_fp(struct dc *dc,
        voltage_supported = dcn20_validate_bandwidth_internal(dc, context, false);
        dummy_pstate_supported = context->bw_ctx.bw.dcn.clk.p_state_change_support;
 
-       if (voltage_supported && dummy_pstate_supported) {
+       if (voltage_supported && (dummy_pstate_supported || !(context->stream_count))) {
                context->bw_ctx.bw.dcn.clk.p_state_change_support = false;
                goto restore_dml_state;
        }
index 4a5fa23d8e7b06911d13ed22c6e197d74df2b8f8..5fcc2e64305d574a1195a824bdd0ec879ff91fd8 100644 (file)
@@ -826,10 +826,11 @@ static const struct dc_plane_cap plane_cap = {
                        .fp16 = 16000
        },
 
+       /* 6:1 downscaling ratio: 1000/6 = 166.666 */
        .max_downscale_factor = {
-                       .argb8888 = 600,
-                       .nv12 = 600,
-                       .fp16 = 600
+                       .argb8888 = 167,
+                       .nv12 = 167,
+                       .fp16 = 167
        }
 };
 
index 5b54b7fc5105d05d75d4f04a1443923fbbbbc637..472696f949ac38aead373336669895598fffc807 100644 (file)
@@ -843,10 +843,11 @@ static const struct dc_plane_cap plane_cap = {
                        .fp16 = 16000
        },
 
+       /* 6:1 downscaling ratio: 1000/6 = 166.666 */
        .max_downscale_factor = {
-                       .argb8888 = 600,
-                       .nv12 = 600,
-                       .fp16 = 600
+                       .argb8888 = 167,
+                       .nv12 = 167,
+                       .fp16 = 167 
        },
        64,
        64
index fc2dea243d1ba06b07cae3b418a90709a7be8049..a33f0365329b9dbd3c85c2aeb8954759d34fdc63 100644 (file)
@@ -284,10 +284,11 @@ static const struct dc_plane_cap plane_cap = {
                                .nv12 = 16000,
                                .fp16 = 16000
                },
+               /* 6:1 downscaling ratio: 1000/6 = 166.666 */
                .max_downscale_factor = {
-                               .argb8888 = 600,
-                               .nv12 = 600,
-                               .fp16 = 600
+                               .argb8888 = 167,
+                               .nv12 = 167,
+                               .fp16 = 167
                },
                16,
                16
index 43ed6291b2b89acea6e0f736043afde1fa1e2a68..9ab706cd07ff471f5898d188e595e188ad5008ca 100644 (file)
@@ -216,6 +216,12 @@ enum PP_FEATURE_MASK {
        PP_GFX_DCS_MASK = 0x80000,
 };
 
+enum amd_harvest_ip_mask {
+    AMD_HARVEST_IP_VCN_MASK = 0x1,
+    AMD_HARVEST_IP_JPEG_MASK = 0x2,
+    AMD_HARVEST_IP_DMU_MASK = 0x4,
+};
+
 enum DC_FEATURE_MASK {
        DC_FBC_MASK = 0x1,
        DC_MULTI_MON_PP_MCLK_SWITCH_MASK = 0x2,
index f5fe540cd5366b4cbf619b33207e08470f6239ba..27cf227167939176127a48fcc102d67a039189c6 100644 (file)
@@ -810,6 +810,7 @@ static int smu10_dpm_force_dpm_level(struct pp_hwmgr *hwmgr,
                break;
        case AMD_DPM_FORCED_LEVEL_MANUAL:
                data->fine_grain_enabled = 1;
+               break;
        case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT:
        default:
                break;
index 26a5321e621bfea0259fd6f1a31fc1371da2a94c..15c0b8af376f843b6c828c9240fbb93ab5ec3591 100644 (file)
@@ -4817,70 +4817,70 @@ static int si_populate_smc_initial_state(struct amdgpu_device *adev,
        u32 reg;
        int ret;
 
-       table->initialState.levels[0].mclk.vDLL_CNTL =
+       table->initialState.level.mclk.vDLL_CNTL =
                cpu_to_be32(si_pi->clock_registers.dll_cntl);
-       table->initialState.levels[0].mclk.vMCLK_PWRMGT_CNTL =
+       table->initialState.level.mclk.vMCLK_PWRMGT_CNTL =
                cpu_to_be32(si_pi->clock_registers.mclk_pwrmgt_cntl);
-       table->initialState.levels[0].mclk.vMPLL_AD_FUNC_CNTL =
+       table->initialState.level.mclk.vMPLL_AD_FUNC_CNTL =
                cpu_to_be32(si_pi->clock_registers.mpll_ad_func_cntl);
-       table->initialState.levels[0].mclk.vMPLL_DQ_FUNC_CNTL =
+       table->initialState.level.mclk.vMPLL_DQ_FUNC_CNTL =
                cpu_to_be32(si_pi->clock_registers.mpll_dq_func_cntl);
-       table->initialState.levels[0].mclk.vMPLL_FUNC_CNTL =
+       table->initialState.level.mclk.vMPLL_FUNC_CNTL =
                cpu_to_be32(si_pi->clock_registers.mpll_func_cntl);
-       table->initialState.levels[0].mclk.vMPLL_FUNC_CNTL_1 =
+       table->initialState.level.mclk.vMPLL_FUNC_CNTL_1 =
                cpu_to_be32(si_pi->clock_registers.mpll_func_cntl_1);
-       table->initialState.levels[0].mclk.vMPLL_FUNC_CNTL_2 =
+       table->initialState.level.mclk.vMPLL_FUNC_CNTL_2 =
                cpu_to_be32(si_pi->clock_registers.mpll_func_cntl_2);
-       table->initialState.levels[0].mclk.vMPLL_SS =
+       table->initialState.level.mclk.vMPLL_SS =
                cpu_to_be32(si_pi->clock_registers.mpll_ss1);
-       table->initialState.levels[0].mclk.vMPLL_SS2 =
+       table->initialState.level.mclk.vMPLL_SS2 =
                cpu_to_be32(si_pi->clock_registers.mpll_ss2);
 
-       table->initialState.levels[0].mclk.mclk_value =
+       table->initialState.level.mclk.mclk_value =
                cpu_to_be32(initial_state->performance_levels[0].mclk);
 
-       table->initialState.levels[0].sclk.vCG_SPLL_FUNC_CNTL =
+       table->initialState.level.sclk.vCG_SPLL_FUNC_CNTL =
                cpu_to_be32(si_pi->clock_registers.cg_spll_func_cntl);
-       table->initialState.levels[0].sclk.vCG_SPLL_FUNC_CNTL_2 =
+       table->initialState.level.sclk.vCG_SPLL_FUNC_CNTL_2 =
                cpu_to_be32(si_pi->clock_registers.cg_spll_func_cntl_2);
-       table->initialState.levels[0].sclk.vCG_SPLL_FUNC_CNTL_3 =
+       table->initialState.level.sclk.vCG_SPLL_FUNC_CNTL_3 =
                cpu_to_be32(si_pi->clock_registers.cg_spll_func_cntl_3);
-       table->initialState.levels[0].sclk.vCG_SPLL_FUNC_CNTL_4 =
+       table->initialState.level.sclk.vCG_SPLL_FUNC_CNTL_4 =
                cpu_to_be32(si_pi->clock_registers.cg_spll_func_cntl_4);
-       table->initialState.levels[0].sclk.vCG_SPLL_SPREAD_SPECTRUM =
+       table->initialState.level.sclk.vCG_SPLL_SPREAD_SPECTRUM =
                cpu_to_be32(si_pi->clock_registers.cg_spll_spread_spectrum);
-       table->initialState.levels[0].sclk.vCG_SPLL_SPREAD_SPECTRUM_2  =
+       table->initialState.level.sclk.vCG_SPLL_SPREAD_SPECTRUM_2  =
                cpu_to_be32(si_pi->clock_registers.cg_spll_spread_spectrum_2);
 
-       table->initialState.levels[0].sclk.sclk_value =
+       table->initialState.level.sclk.sclk_value =
                cpu_to_be32(initial_state->performance_levels[0].sclk);
 
-       table->initialState.levels[0].arbRefreshState =
+       table->initialState.level.arbRefreshState =
                SISLANDS_INITIAL_STATE_ARB_INDEX;
 
-       table->initialState.levels[0].ACIndex = 0;
+       table->initialState.level.ACIndex = 0;
 
        ret = si_populate_voltage_value(adev, &eg_pi->vddc_voltage_table,
                                        initial_state->performance_levels[0].vddc,
-                                       &table->initialState.levels[0].vddc);
+                                       &table->initialState.level.vddc);
 
        if (!ret) {
                u16 std_vddc;
 
                ret = si_get_std_voltage_value(adev,
-                                              &table->initialState.levels[0].vddc,
+                                              &table->initialState.level.vddc,
                                               &std_vddc);
                if (!ret)
                        si_populate_std_voltage_value(adev, std_vddc,
-                                                     table->initialState.levels[0].vddc.index,
-                                                     &table->initialState.levels[0].std_vddc);
+                                                     table->initialState.level.vddc.index,
+                                                     &table->initialState.level.std_vddc);
        }
 
        if (eg_pi->vddci_control)
                si_populate_voltage_value(adev,
                                          &eg_pi->vddci_voltage_table,
                                          initial_state->performance_levels[0].vddci,
-                                         &table->initialState.levels[0].vddci);
+                                         &table->initialState.level.vddci);
 
        if (si_pi->vddc_phase_shed_control)
                si_populate_phase_shedding_value(adev,
@@ -4888,41 +4888,41 @@ static int si_populate_smc_initial_state(struct amdgpu_device *adev,
                                                 initial_state->performance_levels[0].vddc,
                                                 initial_state->performance_levels[0].sclk,
                                                 initial_state->performance_levels[0].mclk,
-                                                &table->initialState.levels[0].vddc);
+                                                &table->initialState.level.vddc);
 
-       si_populate_initial_mvdd_value(adev, &table->initialState.levels[0].mvdd);
+       si_populate_initial_mvdd_value(adev, &table->initialState.level.mvdd);
 
        reg = CG_R(0xffff) | CG_L(0);
-       table->initialState.levels[0].aT = cpu_to_be32(reg);
-       table->initialState.levels[0].bSP = cpu_to_be32(pi->dsp);
-       table->initialState.levels[0].gen2PCIE = (u8)si_pi->boot_pcie_gen;
+       table->initialState.level.aT = cpu_to_be32(reg);
+       table->initialState.level.bSP = cpu_to_be32(pi->dsp);
+       table->initialState.level.gen2PCIE = (u8)si_pi->boot_pcie_gen;
 
        if (adev->gmc.vram_type == AMDGPU_VRAM_TYPE_GDDR5) {
-               table->initialState.levels[0].strobeMode =
+               table->initialState.level.strobeMode =
                        si_get_strobe_mode_settings(adev,
                                                    initial_state->performance_levels[0].mclk);
 
                if (initial_state->performance_levels[0].mclk > pi->mclk_edc_enable_threshold)
-                       table->initialState.levels[0].mcFlags = SISLANDS_SMC_MC_EDC_RD_FLAG | SISLANDS_SMC_MC_EDC_WR_FLAG;
+                       table->initialState.level.mcFlags = SISLANDS_SMC_MC_EDC_RD_FLAG | SISLANDS_SMC_MC_EDC_WR_FLAG;
                else
-                       table->initialState.levels[0].mcFlags =  0;
+                       table->initialState.level.mcFlags =  0;
        }
 
        table->initialState.levelCount = 1;
 
        table->initialState.flags |= PPSMC_SWSTATE_FLAG_DC;
 
-       table->initialState.levels[0].dpm2.MaxPS = 0;
-       table->initialState.levels[0].dpm2.NearTDPDec = 0;
-       table->initialState.levels[0].dpm2.AboveSafeInc = 0;
-       table->initialState.levels[0].dpm2.BelowSafeInc = 0;
-       table->initialState.levels[0].dpm2.PwrEfficiencyRatio = 0;
+       table->initialState.level.dpm2.MaxPS = 0;
+       table->initialState.level.dpm2.NearTDPDec = 0;
+       table->initialState.level.dpm2.AboveSafeInc = 0;
+       table->initialState.level.dpm2.BelowSafeInc = 0;
+       table->initialState.level.dpm2.PwrEfficiencyRatio = 0;
 
        reg = MIN_POWER_MASK | MAX_POWER_MASK;
-       table->initialState.levels[0].SQPowerThrottle = cpu_to_be32(reg);
+       table->initialState.level.SQPowerThrottle = cpu_to_be32(reg);
 
        reg = MAX_POWER_DELTA_MASK | STI_SIZE_MASK | LTI_RATIO_MASK;
-       table->initialState.levels[0].SQPowerThrottle_2 = cpu_to_be32(reg);
+       table->initialState.level.SQPowerThrottle_2 = cpu_to_be32(reg);
 
        return 0;
 }
@@ -4953,18 +4953,18 @@ static int si_populate_smc_acpi_state(struct amdgpu_device *adev,
 
        if (pi->acpi_vddc) {
                ret = si_populate_voltage_value(adev, &eg_pi->vddc_voltage_table,
-                                               pi->acpi_vddc, &table->ACPIState.levels[0].vddc);
+                                               pi->acpi_vddc, &table->ACPIState.level.vddc);
                if (!ret) {
                        u16 std_vddc;
 
                        ret = si_get_std_voltage_value(adev,
-                                                      &table->ACPIState.levels[0].vddc, &std_vddc);
+                                                      &table->ACPIState.level.vddc, &std_vddc);
                        if (!ret)
                                si_populate_std_voltage_value(adev, std_vddc,
-                                                             table->ACPIState.levels[0].vddc.index,
-                                                             &table->ACPIState.levels[0].std_vddc);
+                                                             table->ACPIState.level.vddc.index,
+                                                             &table->ACPIState.level.std_vddc);
                }
-               table->ACPIState.levels[0].gen2PCIE = si_pi->acpi_pcie_gen;
+               table->ACPIState.level.gen2PCIE = si_pi->acpi_pcie_gen;
 
                if (si_pi->vddc_phase_shed_control) {
                        si_populate_phase_shedding_value(adev,
@@ -4972,23 +4972,23 @@ static int si_populate_smc_acpi_state(struct amdgpu_device *adev,
                                                         pi->acpi_vddc,
                                                         0,
                                                         0,
-                                                        &table->ACPIState.levels[0].vddc);
+                                                        &table->ACPIState.level.vddc);
                }
        } else {
                ret = si_populate_voltage_value(adev, &eg_pi->vddc_voltage_table,
-                                               pi->min_vddc_in_table, &table->ACPIState.levels[0].vddc);
+                                               pi->min_vddc_in_table, &table->ACPIState.level.vddc);
                if (!ret) {
                        u16 std_vddc;
 
                        ret = si_get_std_voltage_value(adev,
-                                                      &table->ACPIState.levels[0].vddc, &std_vddc);
+                                                      &table->ACPIState.level.vddc, &std_vddc);
 
                        if (!ret)
                                si_populate_std_voltage_value(adev, std_vddc,
-                                                             table->ACPIState.levels[0].vddc.index,
-                                                             &table->ACPIState.levels[0].std_vddc);
+                                                             table->ACPIState.level.vddc.index,
+                                                             &table->ACPIState.level.std_vddc);
                }
-               table->ACPIState.levels[0].gen2PCIE =
+               table->ACPIState.level.gen2PCIE =
                        (u8)amdgpu_get_pcie_gen_support(adev,
                                                        si_pi->sys_pcie_mask,
                                                        si_pi->boot_pcie_gen,
@@ -5000,14 +5000,14 @@ static int si_populate_smc_acpi_state(struct amdgpu_device *adev,
                                                         pi->min_vddc_in_table,
                                                         0,
                                                         0,
-                                                        &table->ACPIState.levels[0].vddc);
+                                                        &table->ACPIState.level.vddc);
        }
 
        if (pi->acpi_vddc) {
                if (eg_pi->acpi_vddci)
                        si_populate_voltage_value(adev, &eg_pi->vddci_voltage_table,
                                                  eg_pi->acpi_vddci,
-                                                 &table->ACPIState.levels[0].vddci);
+                                                 &table->ACPIState.level.vddci);
        }
 
        mclk_pwrmgt_cntl |= MRDCK0_RESET | MRDCK1_RESET;
@@ -5018,59 +5018,59 @@ static int si_populate_smc_acpi_state(struct amdgpu_device *adev,
        spll_func_cntl_2 &= ~SCLK_MUX_SEL_MASK;
        spll_func_cntl_2 |= SCLK_MUX_SEL(4);
 
-       table->ACPIState.levels[0].mclk.vDLL_CNTL =
+       table->ACPIState.level.mclk.vDLL_CNTL =
                cpu_to_be32(dll_cntl);
-       table->ACPIState.levels[0].mclk.vMCLK_PWRMGT_CNTL =
+       table->ACPIState.level.mclk.vMCLK_PWRMGT_CNTL =
                cpu_to_be32(mclk_pwrmgt_cntl);
-       table->ACPIState.levels[0].mclk.vMPLL_AD_FUNC_CNTL =
+       table->ACPIState.level.mclk.vMPLL_AD_FUNC_CNTL =
                cpu_to_be32(mpll_ad_func_cntl);
-       table->ACPIState.levels[0].mclk.vMPLL_DQ_FUNC_CNTL =
+       table->ACPIState.level.mclk.vMPLL_DQ_FUNC_CNTL =
                cpu_to_be32(mpll_dq_func_cntl);
-       table->ACPIState.levels[0].mclk.vMPLL_FUNC_CNTL =
+       table->ACPIState.level.mclk.vMPLL_FUNC_CNTL =
                cpu_to_be32(mpll_func_cntl);
-       table->ACPIState.levels[0].mclk.vMPLL_FUNC_CNTL_1 =
+       table->ACPIState.level.mclk.vMPLL_FUNC_CNTL_1 =
                cpu_to_be32(mpll_func_cntl_1);
-       table->ACPIState.levels[0].mclk.vMPLL_FUNC_CNTL_2 =
+       table->ACPIState.level.mclk.vMPLL_FUNC_CNTL_2 =
                cpu_to_be32(mpll_func_cntl_2);
-       table->ACPIState.levels[0].mclk.vMPLL_SS =
+       table->ACPIState.level.mclk.vMPLL_SS =
                cpu_to_be32(si_pi->clock_registers.mpll_ss1);
-       table->ACPIState.levels[0].mclk.vMPLL_SS2 =
+       table->ACPIState.level.mclk.vMPLL_SS2 =
                cpu_to_be32(si_pi->clock_registers.mpll_ss2);
 
-       table->ACPIState.levels[0].sclk.vCG_SPLL_FUNC_CNTL =
+       table->ACPIState.level.sclk.vCG_SPLL_FUNC_CNTL =
                cpu_to_be32(spll_func_cntl);
-       table->ACPIState.levels[0].sclk.vCG_SPLL_FUNC_CNTL_2 =
+       table->ACPIState.level.sclk.vCG_SPLL_FUNC_CNTL_2 =
                cpu_to_be32(spll_func_cntl_2);
-       table->ACPIState.levels[0].sclk.vCG_SPLL_FUNC_CNTL_3 =
+       table->ACPIState.level.sclk.vCG_SPLL_FUNC_CNTL_3 =
                cpu_to_be32(spll_func_cntl_3);
-       table->ACPIState.levels[0].sclk.vCG_SPLL_FUNC_CNTL_4 =
+       table->ACPIState.level.sclk.vCG_SPLL_FUNC_CNTL_4 =
                cpu_to_be32(spll_func_cntl_4);
 
-       table->ACPIState.levels[0].mclk.mclk_value = 0;
-       table->ACPIState.levels[0].sclk.sclk_value = 0;
+       table->ACPIState.level.mclk.mclk_value = 0;
+       table->ACPIState.level.sclk.sclk_value = 0;
 
-       si_populate_mvdd_value(adev, 0, &table->ACPIState.levels[0].mvdd);
+       si_populate_mvdd_value(adev, 0, &table->ACPIState.level.mvdd);
 
        if (eg_pi->dynamic_ac_timing)
-               table->ACPIState.levels[0].ACIndex = 0;
+               table->ACPIState.level.ACIndex = 0;
 
-       table->ACPIState.levels[0].dpm2.MaxPS = 0;
-       table->ACPIState.levels[0].dpm2.NearTDPDec = 0;
-       table->ACPIState.levels[0].dpm2.AboveSafeInc = 0;
-       table->ACPIState.levels[0].dpm2.BelowSafeInc = 0;
-       table->ACPIState.levels[0].dpm2.PwrEfficiencyRatio = 0;
+       table->ACPIState.level.dpm2.MaxPS = 0;
+       table->ACPIState.level.dpm2.NearTDPDec = 0;
+       table->ACPIState.level.dpm2.AboveSafeInc = 0;
+       table->ACPIState.level.dpm2.BelowSafeInc = 0;
+       table->ACPIState.level.dpm2.PwrEfficiencyRatio = 0;
 
        reg = MIN_POWER_MASK | MAX_POWER_MASK;
-       table->ACPIState.levels[0].SQPowerThrottle = cpu_to_be32(reg);
+       table->ACPIState.level.SQPowerThrottle = cpu_to_be32(reg);
 
        reg = MAX_POWER_DELTA_MASK | STI_SIZE_MASK | LTI_RATIO_MASK;
-       table->ACPIState.levels[0].SQPowerThrottle_2 = cpu_to_be32(reg);
+       table->ACPIState.level.SQPowerThrottle_2 = cpu_to_be32(reg);
 
        return 0;
 }
 
 static int si_populate_ulv_state(struct amdgpu_device *adev,
-                                SISLANDS_SMC_SWSTATE *state)
+                                struct SISLANDS_SMC_SWSTATE_SINGLE *state)
 {
        struct evergreen_power_info *eg_pi = evergreen_get_pi(adev);
        struct si_power_info *si_pi = si_get_pi(adev);
@@ -5079,19 +5079,19 @@ static int si_populate_ulv_state(struct amdgpu_device *adev,
        int ret;
 
        ret = si_convert_power_level_to_smc(adev, &ulv->pl,
-                                           &state->levels[0]);
+                                           &state->level);
        if (!ret) {
                if (eg_pi->sclk_deep_sleep) {
                        if (sclk_in_sr <= SCLK_MIN_DEEPSLEEP_FREQ)
-                               state->levels[0].stateFlags |= PPSMC_STATEFLAG_DEEPSLEEP_BYPASS;
+                               state->level.stateFlags |= PPSMC_STATEFLAG_DEEPSLEEP_BYPASS;
                        else
-                               state->levels[0].stateFlags |= PPSMC_STATEFLAG_DEEPSLEEP_THROTTLE;
+                               state->level.stateFlags |= PPSMC_STATEFLAG_DEEPSLEEP_THROTTLE;
                }
                if (ulv->one_pcie_lane_in_ulv)
                        state->flags |= PPSMC_SWSTATE_FLAG_PCIE_X1;
-               state->levels[0].arbRefreshState = (u8)(SISLANDS_ULV_STATE_ARB_INDEX);
-               state->levels[0].ACIndex = 1;
-               state->levels[0].std_vddc = state->levels[0].vddc;
+               state->level.arbRefreshState = (u8)(SISLANDS_ULV_STATE_ARB_INDEX);
+               state->level.ACIndex = 1;
+               state->level.std_vddc = state->level.vddc;
                state->levelCount = 1;
 
                state->flags |= PPSMC_SWSTATE_FLAG_DC;
@@ -5190,7 +5190,9 @@ static int si_init_smc_table(struct amdgpu_device *adev)
        if (ret)
                return ret;
 
-       table->driverState = table->initialState;
+       table->driverState.flags = table->initialState.flags;
+       table->driverState.levelCount = table->initialState.levelCount;
+       table->driverState.levels[0] = table->initialState.level;
 
        ret = si_do_program_memory_timing_parameters(adev, amdgpu_boot_state,
                                                     SISLANDS_INITIAL_STATE_ARB_INDEX);
@@ -5737,8 +5739,8 @@ static int si_upload_ulv_state(struct amdgpu_device *adev)
        if (ulv->supported && ulv->pl.vddc) {
                u32 address = si_pi->state_table_start +
                        offsetof(SISLANDS_SMC_STATETABLE, ULVState);
-               SISLANDS_SMC_SWSTATE *smc_state = &si_pi->smc_statetable.ULVState;
-               u32 state_size = sizeof(SISLANDS_SMC_SWSTATE);
+               struct SISLANDS_SMC_SWSTATE_SINGLE *smc_state = &si_pi->smc_statetable.ULVState;
+               u32 state_size = sizeof(struct SISLANDS_SMC_SWSTATE_SINGLE);
 
                memset(smc_state, 0, state_size);
 
index 0f7554052c9062a8ce88f5ad0750fde01227dd2b..c7dc117a688cbe3987d1073e5c76bfabb7a84866 100644 (file)
@@ -191,6 +191,14 @@ struct SISLANDS_SMC_SWSTATE
 
 typedef struct SISLANDS_SMC_SWSTATE SISLANDS_SMC_SWSTATE;
 
+struct SISLANDS_SMC_SWSTATE_SINGLE {
+       uint8_t                             flags;
+       uint8_t                             levelCount;
+       uint8_t                             padding2;
+       uint8_t                             padding3;
+       SISLANDS_SMC_HW_PERFORMANCE_LEVEL   level;
+};
+
 #define SISLANDS_SMC_VOLTAGEMASK_VDDC  0
 #define SISLANDS_SMC_VOLTAGEMASK_MVDD  1
 #define SISLANDS_SMC_VOLTAGEMASK_VDDCI 2
@@ -208,19 +216,19 @@ typedef struct SISLANDS_SMC_VOLTAGEMASKTABLE SISLANDS_SMC_VOLTAGEMASKTABLE;
 
 struct SISLANDS_SMC_STATETABLE
 {
-    uint8_t                             thermalProtectType;
-    uint8_t                             systemFlags;
-    uint8_t                             maxVDDCIndexInPPTable;
-    uint8_t                             extraFlags;
-    uint32_t                            lowSMIO[SISLANDS_MAX_NO_VREG_STEPS];
-    SISLANDS_SMC_VOLTAGEMASKTABLE       voltageMaskTable;
-    SISLANDS_SMC_VOLTAGEMASKTABLE       phaseMaskTable;
-    PP_SIslands_DPM2Parameters          dpm2Params;
-    SISLANDS_SMC_SWSTATE                initialState;
-    SISLANDS_SMC_SWSTATE                ACPIState;
-    SISLANDS_SMC_SWSTATE                ULVState;
-    SISLANDS_SMC_SWSTATE                driverState;
-    SISLANDS_SMC_HW_PERFORMANCE_LEVEL   dpmLevels[SISLANDS_MAX_SMC_PERFORMANCE_LEVELS_PER_SWSTATE - 1];
+       uint8_t                                 thermalProtectType;
+       uint8_t                                 systemFlags;
+       uint8_t                                 maxVDDCIndexInPPTable;
+       uint8_t                                 extraFlags;
+       uint32_t                                lowSMIO[SISLANDS_MAX_NO_VREG_STEPS];
+       SISLANDS_SMC_VOLTAGEMASKTABLE           voltageMaskTable;
+       SISLANDS_SMC_VOLTAGEMASKTABLE           phaseMaskTable;
+       PP_SIslands_DPM2Parameters              dpm2Params;
+       struct SISLANDS_SMC_SWSTATE_SINGLE      initialState;
+       struct SISLANDS_SMC_SWSTATE_SINGLE      ACPIState;
+       struct SISLANDS_SMC_SWSTATE_SINGLE      ULVState;
+       SISLANDS_SMC_SWSTATE                    driverState;
+       SISLANDS_SMC_HW_PERFORMANCE_LEVEL       dpmLevels[SISLANDS_MAX_SMC_PERFORMANCE_LEVELS_PER_SWSTATE];
 };
 
 typedef struct SISLANDS_SMC_STATETABLE SISLANDS_SMC_STATETABLE;
index ac13042672ea1094e1bc1b9b64e9a68d25ff7ac6..0eaf86b5e6986912606a23d61ad3b7d867f012e3 100644 (file)
@@ -2925,6 +2925,8 @@ static ssize_t navi1x_get_gpu_metrics(struct smu_context *smu,
 
 static int navi10_enable_mgpu_fan_boost(struct smu_context *smu)
 {
+       struct smu_table_context *table_context = &smu->smu_table;
+       PPTable_t *smc_pptable = table_context->driver_pptable;
        struct amdgpu_device *adev = smu->adev;
        uint32_t param = 0;
 
@@ -2932,6 +2934,13 @@ static int navi10_enable_mgpu_fan_boost(struct smu_context *smu)
        if (adev->asic_type == CHIP_NAVI12)
                return 0;
 
+       /*
+        * Skip the MGpuFanBoost setting for those ASICs
+        * which do not support it
+        */
+       if (!smc_pptable->MGpuFanBoostLimitRpm)
+               return 0;
+
        /* Workaround for WS SKU */
        if (adev->pdev->device == 0x7312 &&
            adev->pdev->revision == 0)
index d2fd44b903ca415274756d7d7bcd634c82950717..b124a5e40dd6a96f5273cc9a25276924cef557c4 100644 (file)
@@ -3027,6 +3027,16 @@ static ssize_t sienna_cichlid_get_gpu_metrics(struct smu_context *smu,
 
 static int sienna_cichlid_enable_mgpu_fan_boost(struct smu_context *smu)
 {
+       struct smu_table_context *table_context = &smu->smu_table;
+       PPTable_t *smc_pptable = table_context->driver_pptable;
+
+       /*
+        * Skip the MGpuFanBoost setting for those ASICs
+        * which do not support it
+        */
+       if (!smc_pptable->MGpuFanBoostLimitRpm)
+               return 0;
+
        return smu_cmn_send_smc_msg_with_param(smu,
                                               SMU_MSG_SetMGpuFanBoostLimitRpm,
                                               0,
index f2d46b7ac6f9f8f25ff27b2620a7a165c3d8eff8..232abbba36868b425321ae02cb8b83351f5b3c74 100644 (file)
@@ -314,9 +314,10 @@ int drm_master_open(struct drm_file *file_priv)
 void drm_master_release(struct drm_file *file_priv)
 {
        struct drm_device *dev = file_priv->minor->dev;
-       struct drm_master *master = file_priv->master;
+       struct drm_master *master;
 
        mutex_lock(&dev->master_mutex);
+       master = file_priv->master;
        if (file_priv->magic)
                idr_remove(&file_priv->master->magic_map, file_priv->magic);
 
index d273d1a8603a9592b22c7cc3fe23eecc2e0959c7..495a4767a44306299615b103e5dc20a798db915d 100644 (file)
@@ -118,17 +118,18 @@ int drm_getunique(struct drm_device *dev, void *data,
                  struct drm_file *file_priv)
 {
        struct drm_unique *u = data;
-       struct drm_master *master = file_priv->master;
+       struct drm_master *master;
 
-       mutex_lock(&master->dev->master_mutex);
+       mutex_lock(&dev->master_mutex);
+       master = file_priv->master;
        if (u->unique_len >= master->unique_len) {
                if (copy_to_user(u->unique, master->unique, master->unique_len)) {
-                       mutex_unlock(&master->dev->master_mutex);
+                       mutex_unlock(&dev->master_mutex);
                        return -EFAULT;
                }
        }
        u->unique_len = master->unique_len;
-       mutex_unlock(&master->dev->master_mutex);
+       mutex_unlock(&dev->master_mutex);
 
        return 0;
 }
index b9a4b7670a899a15b3352e06915d7f6d6f692496..197b97341cad26cf53ec8b447e8d417768bca216 100644 (file)
@@ -815,10 +815,8 @@ static int exynos5433_decon_probe(struct platform_device *pdev)
 
        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
        ctx->addr = devm_ioremap_resource(dev, res);
-       if (IS_ERR(ctx->addr)) {
-               dev_err(dev, "ioremap failed\n");
+       if (IS_ERR(ctx->addr))
                return PTR_ERR(ctx->addr);
-       }
 
        ret = decon_conf_irq(ctx, "vsync", decon_irq_handler, 0);
        if (ret < 0)
index 44e402b7cdfb6e399a4ac3416778e1a6573a8441..2d2fe5ab26e70300af7546617f90132164f4af36 100644 (file)
@@ -1786,10 +1786,8 @@ static int exynos_dsi_probe(struct platform_device *pdev)
 
        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
        dsi->reg_base = devm_ioremap_resource(dev, res);
-       if (IS_ERR(dsi->reg_base)) {
-               dev_err(dev, "failed to remap io region\n");
+       if (IS_ERR(dsi->reg_base))
                return PTR_ERR(dsi->reg_base);
-       }
 
        dsi->phy = devm_phy_get(dev, "dsim");
        if (IS_ERR(dsi->phy)) {
index 49a2e0c539187dae79b5df8857d8c0255cb6dced..ae576122873e0330224a923353e194f6c2df2f94 100644 (file)
@@ -723,7 +723,7 @@ static void fimd_win_set_colkey(struct fimd_context *ctx, unsigned int win)
 }
 
 /**
- * shadow_protect_win() - disable updating values from shadow registers at vsync
+ * fimd_shadow_protect_win() - disable updating values from shadow registers at vsync
  *
  * @ctx: local driver data
  * @win: window to protect registers for
index 69f57ca9c68d7372ba01ac846c43a9293dfbe770..1e1cb245fca778e71d0553cb15e66693f1f0f2c6 100644 (file)
@@ -20,7 +20,6 @@ config DRM_I915
        select INPUT if ACPI
        select ACPI_VIDEO if ACPI
        select ACPI_BUTTON if ACPI
-       select IO_MAPPING
        select SYNC_FILE
        select IOSF_MBI
        select CRC32
@@ -102,7 +101,6 @@ config DRM_I915_GVT
        bool "Enable Intel GVT-g graphics virtualization host support"
        depends on DRM_I915
        depends on 64BIT
-       depends on VFIO_MDEV=y || VFIO_MDEV=DRM_I915
        default n
        help
          Choose this option if you want to enable Intel GVT-g graphics
index 6a2dee8cef1f16d530f9d6b3809385df91deda65..642c60f3d9b18cae1d323ba4bd6e469f5680247d 100644 (file)
@@ -1095,44 +1095,6 @@ intel_dp_compute_link_config_wide(struct intel_dp *intel_dp,
        return -EINVAL;
 }
 
-/* Optimize link config in order: max bpp, min lanes, min clock */
-static int
-intel_dp_compute_link_config_fast(struct intel_dp *intel_dp,
-                                 struct intel_crtc_state *pipe_config,
-                                 const struct link_config_limits *limits)
-{
-       const struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode;
-       int bpp, clock, lane_count;
-       int mode_rate, link_clock, link_avail;
-
-       for (bpp = limits->max_bpp; bpp >= limits->min_bpp; bpp -= 2 * 3) {
-               int output_bpp = intel_dp_output_bpp(pipe_config->output_format, bpp);
-
-               mode_rate = intel_dp_link_required(adjusted_mode->crtc_clock,
-                                                  output_bpp);
-
-               for (lane_count = limits->min_lane_count;
-                    lane_count <= limits->max_lane_count;
-                    lane_count <<= 1) {
-                       for (clock = limits->min_clock; clock <= limits->max_clock; clock++) {
-                               link_clock = intel_dp->common_rates[clock];
-                               link_avail = intel_dp_max_data_rate(link_clock,
-                                                                   lane_count);
-
-                               if (mode_rate <= link_avail) {
-                                       pipe_config->lane_count = lane_count;
-                                       pipe_config->pipe_bpp = bpp;
-                                       pipe_config->port_clock = link_clock;
-
-                                       return 0;
-                               }
-                       }
-               }
-       }
-
-       return -EINVAL;
-}
-
 static int intel_dp_dsc_compute_bpp(struct intel_dp *intel_dp, u8 dsc_max_bpc)
 {
        int i, num_bpc;
@@ -1382,22 +1344,11 @@ intel_dp_compute_link_config(struct intel_encoder *encoder,
            intel_dp_can_bigjoiner(intel_dp))
                pipe_config->bigjoiner = true;
 
-       if (intel_dp_is_edp(intel_dp))
-               /*
-                * Optimize for fast and narrow. eDP 1.3 section 3.3 and eDP 1.4
-                * section A.1: "It is recommended that the minimum number of
-                * lanes be used, using the minimum link rate allowed for that
-                * lane configuration."
-                *
-                * Note that we fall back to the max clock and lane count for eDP
-                * panels that fail with the fast optimal settings (see
-                * intel_dp->use_max_params), in which case the fast vs. wide
-                * choice doesn't matter.
-                */
-               ret = intel_dp_compute_link_config_fast(intel_dp, pipe_config, &limits);
-       else
-               /* Optimize for slow and wide. */
-               ret = intel_dp_compute_link_config_wide(intel_dp, pipe_config, &limits);
+       /*
+        * Optimize for slow and wide for everything, because there are some
+        * eDP 1.3 and 1.4 panels don't work well with fast and narrow.
+        */
+       ret = intel_dp_compute_link_config_wide(intel_dp, pipe_config, &limits);
 
        /* enable compression if the mode doesn't fit available BW */
        drm_dbg_kms(&i915->drm, "Force DSC en = %d\n", intel_dp->force_dsc_en);
@@ -2160,7 +2111,7 @@ void intel_dp_check_frl_training(struct intel_dp *intel_dp)
         * -PCON supports SRC_CTL_MODE (VESA DP2.0-HDMI2.1 PCON Spec Draft-1 Sec-7)
         * -sink is HDMI2.1
         */
-       if (!(intel_dp->dpcd[2] & DP_PCON_SOURCE_CTL_MODE) ||
+       if (!(intel_dp->downstream_ports[2] & DP_PCON_SOURCE_CTL_MODE) ||
            !intel_dp_is_hdmi_2_1_sink(intel_dp) ||
            intel_dp->frl.is_trained)
                return;
index 02a003fd48fb22533f0025cd8b64ca16270def35..50cae0198a3d0baf7ba44c798f1d513f7c6da1ea 100644 (file)
@@ -128,49 +128,13 @@ intel_dp_set_lttpr_transparent_mode(struct intel_dp *intel_dp, bool enable)
        return drm_dp_dpcd_write(&intel_dp->aux, DP_PHY_REPEATER_MODE, &val, 1) == 1;
 }
 
-/**
- * intel_dp_init_lttpr_and_dprx_caps - detect LTTPR and DPRX caps, init the LTTPR link training mode
- * @intel_dp: Intel DP struct
- *
- * Read the LTTPR common and DPRX capabilities and switch to non-transparent
- * link training mode if any is detected and read the PHY capabilities for all
- * detected LTTPRs. In case of an LTTPR detection error or if the number of
- * LTTPRs is more than is supported (8), fall back to the no-LTTPR,
- * transparent mode link training mode.
- *
- * Returns:
- *   >0  if LTTPRs were detected and the non-transparent LT mode was set. The
- *       DPRX capabilities are read out.
- *    0  if no LTTPRs or more than 8 LTTPRs were detected or in case of a
- *       detection failure and the transparent LT mode was set. The DPRX
- *       capabilities are read out.
- *   <0  Reading out the DPRX capabilities failed.
- */
-int intel_dp_init_lttpr_and_dprx_caps(struct intel_dp *intel_dp)
+static int intel_dp_init_lttpr(struct intel_dp *intel_dp)
 {
        int lttpr_count;
-       bool ret;
        int i;
 
-       ret = intel_dp_read_lttpr_common_caps(intel_dp);
-
-       /* The DPTX shall read the DPRX caps after LTTPR detection. */
-       if (drm_dp_read_dpcd_caps(&intel_dp->aux, intel_dp->dpcd)) {
-               intel_dp_reset_lttpr_common_caps(intel_dp);
-               return -EIO;
-       }
-
-       if (!ret)
-               return 0;
-
-       /*
-        * The 0xF0000-0xF02FF range is only valid if the DPCD revision is
-        * at least 1.4.
-        */
-       if (intel_dp->dpcd[DP_DPCD_REV] < 0x14) {
-               intel_dp_reset_lttpr_common_caps(intel_dp);
+       if (!intel_dp_read_lttpr_common_caps(intel_dp))
                return 0;
-       }
 
        lttpr_count = drm_dp_lttpr_count(intel_dp->lttpr_common_caps);
        /*
@@ -211,6 +175,37 @@ int intel_dp_init_lttpr_and_dprx_caps(struct intel_dp *intel_dp)
 
        return lttpr_count;
 }
+
+/**
+ * intel_dp_init_lttpr_and_dprx_caps - detect LTTPR and DPRX caps, init the LTTPR link training mode
+ * @intel_dp: Intel DP struct
+ *
+ * Read the LTTPR common and DPRX capabilities and switch to non-transparent
+ * link training mode if any is detected and read the PHY capabilities for all
+ * detected LTTPRs. In case of an LTTPR detection error or if the number of
+ * LTTPRs is more than is supported (8), fall back to the no-LTTPR,
+ * transparent mode link training mode.
+ *
+ * Returns:
+ *   >0  if LTTPRs were detected and the non-transparent LT mode was set. The
+ *       DPRX capabilities are read out.
+ *    0  if no LTTPRs or more than 8 LTTPRs were detected or in case of a
+ *       detection failure and the transparent LT mode was set. The DPRX
+ *       capabilities are read out.
+ *   <0  Reading out the DPRX capabilities failed.
+ */
+int intel_dp_init_lttpr_and_dprx_caps(struct intel_dp *intel_dp)
+{
+       int lttpr_count = intel_dp_init_lttpr(intel_dp);
+
+       /* The DPTX shall read the DPRX caps after LTTPR detection. */
+       if (drm_dp_read_dpcd_caps(&intel_dp->aux, intel_dp->dpcd)) {
+               intel_dp_reset_lttpr_common_caps(intel_dp);
+               return -EIO;
+       }
+
+       return lttpr_count;
+}
 EXPORT_SYMBOL(intel_dp_init_lttpr_and_dprx_caps);
 
 static u8 dp_voltage_max(u8 preemph)
index e5dadde422f74eb04b35859a491284a3ccea3b1a..bbaf05515e88379a9d055511106da30cfe043a88 100644 (file)
@@ -383,7 +383,7 @@ static void intel_overlay_off_tail(struct intel_overlay *overlay)
                i830_overlay_clock_gating(dev_priv, true);
 }
 
-static void
+__i915_active_call static void
 intel_overlay_last_flip_retire(struct i915_active *active)
 {
        struct intel_overlay *overlay =
index 23f6b00e08e211436f7eece5c3ba25a0493d237a..8598a1c78a4c2659ffb8143fd1a8dde796c297ca 100644 (file)
@@ -189,7 +189,7 @@ compute_partial_view(const struct drm_i915_gem_object *obj,
        struct i915_ggtt_view view;
 
        if (i915_gem_object_is_tiled(obj))
-               chunk = roundup(chunk, tile_row_pages(obj));
+               chunk = roundup(chunk, tile_row_pages(obj) ?: 1);
 
        view.type = I915_GGTT_VIEW_PARTIAL;
        view.partial.offset = rounddown(page_offset, chunk);
@@ -367,10 +367,11 @@ retry:
                goto err_unpin;
 
        /* Finally, remap it using the new GTT offset */
-       ret = io_mapping_map_user(&ggtt->iomap, area, area->vm_start +
-                       (vma->ggtt_view.partial.offset << PAGE_SHIFT),
-                       (ggtt->gmadr.start + vma->node.start) >> PAGE_SHIFT,
-                       min_t(u64, vma->size, area->vm_end - area->vm_start));
+       ret = remap_io_mapping(area,
+                              area->vm_start + (vma->ggtt_view.partial.offset << PAGE_SHIFT),
+                              (ggtt->gmadr.start + vma->node.start) >> PAGE_SHIFT,
+                              min_t(u64, vma->size, area->vm_end - area->vm_start),
+                              &ggtt->iomap);
        if (ret)
                goto err_fence;
 
index aed8a37ccdc938ea4022b758d59c3a654f2e9bd9..7361971c177ddb6a7975342c184ca64f6355d91c 100644 (file)
@@ -63,6 +63,8 @@ void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj,
            i915->quirks & QUIRK_PIN_SWIZZLED_PAGES) {
                GEM_BUG_ON(i915_gem_object_has_tiling_quirk(obj));
                i915_gem_object_set_tiling_quirk(obj);
+               GEM_BUG_ON(!list_empty(&obj->mm.link));
+               atomic_inc(&obj->mm.shrink_pin);
                shrinkable = false;
        }
 
index de575fdb033f5acb9b3c4ccf47e45af3228e586e..21f08e53889c39a1b52d6f48dd8e4d52676b801c 100644 (file)
@@ -397,7 +397,10 @@ static void emit_batch(struct i915_vma * const vma,
        gen7_emit_pipeline_invalidate(&cmds);
        batch_add(&cmds, MI_LOAD_REGISTER_IMM(2));
        batch_add(&cmds, i915_mmio_reg_offset(CACHE_MODE_0_GEN7));
-       batch_add(&cmds, 0xffff0000);
+       batch_add(&cmds, 0xffff0000 |
+                       ((IS_IVB_GT1(i915) || IS_VALLEYVIEW(i915)) ?
+                        HIZ_RAW_STALL_OPT_DISABLE :
+                        0));
        batch_add(&cmds, i915_mmio_reg_offset(CACHE_MODE_1));
        batch_add(&cmds, 0xffff0000 | PIXEL_SUBSPAN_COLLECT_OPT_DISABLE);
        gen7_emit_pipeline_invalidate(&cmds);
index 176c19633412fb88abcd1a2ab63b6c7dbca762a6..74bf6fc8461fe50e7c96670000970460fda2c341 100644 (file)
@@ -641,7 +641,6 @@ static int gen8_preallocate_top_level_pdp(struct i915_ppgtt *ppgtt)
 
                err = pin_pt_dma(vm, pde->pt.base);
                if (err) {
-                       i915_gem_object_put(pde->pt.base);
                        free_pd(vm, pde);
                        return err;
                }
index e72b7a0dc316e4e9c785689d72f9eacd4a2e98df..8a322594210c47b62ed6228b2d0cc3489a76eea8 100644 (file)
@@ -653,8 +653,8 @@ static void detect_bit_6_swizzle(struct i915_ggtt *ggtt)
                 * banks of memory are paired and unswizzled on the
                 * uneven portion, so leave that as unknown.
                 */
-               if (intel_uncore_read(uncore, C0DRB3) ==
-                   intel_uncore_read(uncore, C1DRB3)) {
+               if (intel_uncore_read16(uncore, C0DRB3) ==
+                   intel_uncore_read16(uncore, C1DRB3)) {
                        swizzle_x = I915_BIT_6_SWIZZLE_9_10;
                        swizzle_y = I915_BIT_6_SWIZZLE_9;
                }
index e7c2babcee8b7071714150e24594773ce85cfb07..cbac409f6c8a56b84442615f04ae46420106e993 100644 (file)
@@ -46,118 +46,6 @@ static const char * const supported_hypervisors[] = {
        [INTEL_GVT_HYPERVISOR_KVM] = "KVM",
 };
 
-static struct intel_vgpu_type *
-intel_gvt_find_vgpu_type(struct intel_gvt *gvt, unsigned int type_group_id)
-{
-       if (WARN_ON(type_group_id >= gvt->num_types))
-               return NULL;
-       return &gvt->types[type_group_id];
-}
-
-static ssize_t available_instances_show(struct mdev_type *mtype,
-                                       struct mdev_type_attribute *attr,
-                                       char *buf)
-{
-       struct intel_vgpu_type *type;
-       unsigned int num = 0;
-       void *gvt = kdev_to_i915(mtype_get_parent_dev(mtype))->gvt;
-
-       type = intel_gvt_find_vgpu_type(gvt, mtype_get_type_group_id(mtype));
-       if (!type)
-               num = 0;
-       else
-               num = type->avail_instance;
-
-       return sprintf(buf, "%u\n", num);
-}
-
-static ssize_t device_api_show(struct mdev_type *mtype,
-                              struct mdev_type_attribute *attr, char *buf)
-{
-       return sprintf(buf, "%s\n", VFIO_DEVICE_API_PCI_STRING);
-}
-
-static ssize_t description_show(struct mdev_type *mtype,
-                               struct mdev_type_attribute *attr, char *buf)
-{
-       struct intel_vgpu_type *type;
-       void *gvt = kdev_to_i915(mtype_get_parent_dev(mtype))->gvt;
-
-       type = intel_gvt_find_vgpu_type(gvt, mtype_get_type_group_id(mtype));
-       if (!type)
-               return 0;
-
-       return sprintf(buf, "low_gm_size: %dMB\nhigh_gm_size: %dMB\n"
-                      "fence: %d\nresolution: %s\n"
-                      "weight: %d\n",
-                      BYTES_TO_MB(type->low_gm_size),
-                      BYTES_TO_MB(type->high_gm_size),
-                      type->fence, vgpu_edid_str(type->resolution),
-                      type->weight);
-}
-
-static MDEV_TYPE_ATTR_RO(available_instances);
-static MDEV_TYPE_ATTR_RO(device_api);
-static MDEV_TYPE_ATTR_RO(description);
-
-static struct attribute *gvt_type_attrs[] = {
-       &mdev_type_attr_available_instances.attr,
-       &mdev_type_attr_device_api.attr,
-       &mdev_type_attr_description.attr,
-       NULL,
-};
-
-static struct attribute_group *gvt_vgpu_type_groups[] = {
-       [0 ... NR_MAX_INTEL_VGPU_TYPES - 1] = NULL,
-};
-
-static bool intel_get_gvt_attrs(struct attribute_group ***intel_vgpu_type_groups)
-{
-       *intel_vgpu_type_groups = gvt_vgpu_type_groups;
-       return true;
-}
-
-static int intel_gvt_init_vgpu_type_groups(struct intel_gvt *gvt)
-{
-       int i, j;
-       struct intel_vgpu_type *type;
-       struct attribute_group *group;
-
-       for (i = 0; i < gvt->num_types; i++) {
-               type = &gvt->types[i];
-
-               group = kzalloc(sizeof(struct attribute_group), GFP_KERNEL);
-               if (WARN_ON(!group))
-                       goto unwind;
-
-               group->name = type->name;
-               group->attrs = gvt_type_attrs;
-               gvt_vgpu_type_groups[i] = group;
-       }
-
-       return 0;
-
-unwind:
-       for (j = 0; j < i; j++) {
-               group = gvt_vgpu_type_groups[j];
-               kfree(group);
-       }
-
-       return -ENOMEM;
-}
-
-static void intel_gvt_cleanup_vgpu_type_groups(struct intel_gvt *gvt)
-{
-       int i;
-       struct attribute_group *group;
-
-       for (i = 0; i < gvt->num_types; i++) {
-               group = gvt_vgpu_type_groups[i];
-               gvt_vgpu_type_groups[i] = NULL;
-               kfree(group);
-       }
-}
-
 static const struct intel_gvt_ops intel_gvt_ops = {
        .emulate_cfg_read = intel_vgpu_emulate_cfg_read,
        .emulate_cfg_write = intel_vgpu_emulate_cfg_write,
@@ -169,8 +57,6 @@ static const struct intel_gvt_ops intel_gvt_ops = {
        .vgpu_reset = intel_gvt_reset_vgpu,
        .vgpu_activate = intel_gvt_activate_vgpu,
        .vgpu_deactivate = intel_gvt_deactivate_vgpu,
-       .gvt_find_vgpu_type = intel_gvt_find_vgpu_type,
-       .get_gvt_attrs = intel_get_gvt_attrs,
        .vgpu_query_plane = intel_vgpu_query_plane,
        .vgpu_get_dmabuf = intel_vgpu_get_dmabuf,
        .write_protect_handler = intel_vgpu_page_track_handler,
@@ -274,7 +160,6 @@ void intel_gvt_clean_device(struct drm_i915_private *i915)
                return;
 
        intel_gvt_destroy_idle_vgpu(gvt->idle_vgpu);
-       intel_gvt_cleanup_vgpu_type_groups(gvt);
        intel_gvt_clean_vgpu_types(gvt);
 
        intel_gvt_debugfs_clean(gvt);
@@ -363,12 +248,6 @@ int intel_gvt_init_device(struct drm_i915_private *i915)
        if (ret)
                goto out_clean_thread;
 
-       ret = intel_gvt_init_vgpu_type_groups(gvt);
-       if (ret) {
-               gvt_err("failed to init vgpu type groups: %d\n", ret);
-               goto out_clean_types;
-       }
-
        vgpu = intel_gvt_create_idle_vgpu(gvt);
        if (IS_ERR(vgpu)) {
                ret = PTR_ERR(vgpu);
@@ -454,7 +333,8 @@ EXPORT_SYMBOL_GPL(intel_gvt_register_hypervisor);
 void
 intel_gvt_unregister_hypervisor(void)
 {
-       intel_gvt_hypervisor_host_exit(intel_gvt_host.dev);
+       void *gvt = (void *)kdev_to_i915(intel_gvt_host.dev)->gvt;
+       intel_gvt_hypervisor_host_exit(intel_gvt_host.dev, gvt);
        module_put(THIS_MODULE);
 }
 EXPORT_SYMBOL_GPL(intel_gvt_unregister_hypervisor);
index 88ab360fcb31a40eed612eb9046e215bd85d2e45..0c0615602343a563bc90e0ccc78aca48faa5b4b9 100644 (file)
@@ -574,9 +574,6 @@ struct intel_gvt_ops {
        void (*vgpu_reset)(struct intel_vgpu *);
        void (*vgpu_activate)(struct intel_vgpu *);
        void (*vgpu_deactivate)(struct intel_vgpu *);
-       struct intel_vgpu_type *(*gvt_find_vgpu_type)(
-               struct intel_gvt *gvt, unsigned int type_group_id);
-       bool (*get_gvt_attrs)(struct attribute_group ***intel_vgpu_type_groups);
        int (*vgpu_query_plane)(struct intel_vgpu *vgpu, void *);
        int (*vgpu_get_dmabuf)(struct intel_vgpu *vgpu, unsigned int);
        int (*write_protect_handler)(struct intel_vgpu *, u64, void *,
index b79da5124f831cc4f4ad843545b9b031d0643529..f33e3cbd0439d3a053db16dbca8dac2aecd8f770 100644 (file)
@@ -49,7 +49,7 @@ enum hypervisor_type {
 struct intel_gvt_mpt {
        enum hypervisor_type type;
        int (*host_init)(struct device *dev, void *gvt, const void *ops);
-       void (*host_exit)(struct device *dev);
+       void (*host_exit)(struct device *dev, void *gvt);
        int (*attach_vgpu)(void *vgpu, unsigned long *handle);
        void (*detach_vgpu)(void *vgpu);
        int (*inject_msi)(unsigned long handle, u32 addr, u16 data);
index 65ff43cfc0f719edf91eaaec127d0359a4ca8f99..48b4d4cf805d11816511260b129fc68ecd448c8f 100644 (file)
@@ -144,6 +144,104 @@ static inline bool handle_valid(unsigned long handle)
        return !!(handle & ~0xff);
 }
 
+static ssize_t available_instances_show(struct mdev_type *mtype,
+                                       struct mdev_type_attribute *attr,
+                                       char *buf)
+{
+       struct intel_vgpu_type *type;
+       unsigned int num = 0;
+       struct intel_gvt *gvt = kdev_to_i915(mtype_get_parent_dev(mtype))->gvt;
+
+       type = &gvt->types[mtype_get_type_group_id(mtype)];
+       if (!type)
+               num = 0;
+       else
+               num = type->avail_instance;
+
+       return sprintf(buf, "%u\n", num);
+}
+
+static ssize_t device_api_show(struct mdev_type *mtype,
+                              struct mdev_type_attribute *attr, char *buf)
+{
+       return sprintf(buf, "%s\n", VFIO_DEVICE_API_PCI_STRING);
+}
+
+static ssize_t description_show(struct mdev_type *mtype,
+                               struct mdev_type_attribute *attr, char *buf)
+{
+       struct intel_vgpu_type *type;
+       struct intel_gvt *gvt = kdev_to_i915(mtype_get_parent_dev(mtype))->gvt;
+
+       type = &gvt->types[mtype_get_type_group_id(mtype)];
+       if (!type)
+               return 0;
+
+       return sprintf(buf, "low_gm_size: %dMB\nhigh_gm_size: %dMB\n"
+                      "fence: %d\nresolution: %s\n"
+                      "weight: %d\n",
+                      BYTES_TO_MB(type->low_gm_size),
+                      BYTES_TO_MB(type->high_gm_size),
+                      type->fence, vgpu_edid_str(type->resolution),
+                      type->weight);
+}
+
+static MDEV_TYPE_ATTR_RO(available_instances);
+static MDEV_TYPE_ATTR_RO(device_api);
+static MDEV_TYPE_ATTR_RO(description);
+
+static struct attribute *gvt_type_attrs[] = {
+       &mdev_type_attr_available_instances.attr,
+       &mdev_type_attr_device_api.attr,
+       &mdev_type_attr_description.attr,
+       NULL,
+};
+
+static struct attribute_group *gvt_vgpu_type_groups[] = {
+       [0 ... NR_MAX_INTEL_VGPU_TYPES - 1] = NULL,
+};
+
+static int intel_gvt_init_vgpu_type_groups(struct intel_gvt *gvt)
+{
+       int i, j;
+       struct intel_vgpu_type *type;
+       struct attribute_group *group;
+
+       for (i = 0; i < gvt->num_types; i++) {
+               type = &gvt->types[i];
+
+               group = kzalloc(sizeof(struct attribute_group), GFP_KERNEL);
+               if (!group)
+                       goto unwind;
+
+               group->name = type->name;
+               group->attrs = gvt_type_attrs;
+               gvt_vgpu_type_groups[i] = group;
+       }
+
+       return 0;
+
+unwind:
+       for (j = 0; j < i; j++) {
+               group = gvt_vgpu_type_groups[j];
+               kfree(group);
+       }
+
+       return -ENOMEM;
+}
+
+static void intel_gvt_cleanup_vgpu_type_groups(struct intel_gvt *gvt)
+{
+       int i;
+       struct attribute_group *group;
+
+       for (i = 0; i < gvt->num_types; i++) {
+               group = gvt_vgpu_type_groups[i];
+               gvt_vgpu_type_groups[i] = NULL;
+               kfree(group);
+       }
+}
+
 static int kvmgt_guest_init(struct mdev_device *mdev);
 static void intel_vgpu_release_work(struct work_struct *work);
 static bool kvmgt_guest_exit(struct kvmgt_guest_info *info);
@@ -694,14 +792,13 @@ static int intel_vgpu_create(struct mdev_device *mdev)
        struct intel_vgpu *vgpu = NULL;
        struct intel_vgpu_type *type;
        struct device *pdev;
-       void *gvt;
+       struct intel_gvt *gvt;
        int ret;
 
        pdev = mdev_parent_dev(mdev);
        gvt = kdev_to_i915(pdev)->gvt;
 
-       type = intel_gvt_ops->gvt_find_vgpu_type(gvt,
-                                                mdev_get_type_group_id(mdev));
+       type = &gvt->types[mdev_get_type_group_id(mdev)];
        if (!type) {
                ret = -EINVAL;
                goto out;
@@ -1667,19 +1764,26 @@ static struct mdev_parent_ops intel_vgpu_ops = {
 
 static int kvmgt_host_init(struct device *dev, void *gvt, const void *ops)
 {
-       struct attribute_group **kvm_vgpu_type_groups;
+       int ret;
+
+       ret = intel_gvt_init_vgpu_type_groups((struct intel_gvt *)gvt);
+       if (ret)
+               return ret;
 
        intel_gvt_ops = ops;
-       if (!intel_gvt_ops->get_gvt_attrs(&kvm_vgpu_type_groups))
-               return -EFAULT;
-       intel_vgpu_ops.supported_type_groups = kvm_vgpu_type_groups;
+       intel_vgpu_ops.supported_type_groups = gvt_vgpu_type_groups;
 
-       return mdev_register_device(dev, &intel_vgpu_ops);
+       ret = mdev_register_device(dev, &intel_vgpu_ops);
+       if (ret)
+               intel_gvt_cleanup_vgpu_type_groups((struct intel_gvt *)gvt);
+
+       return ret;
 }
 
-static void kvmgt_host_exit(struct device *dev)
+static void kvmgt_host_exit(struct device *dev, void *gvt)
 {
        mdev_unregister_device(dev);
+       intel_gvt_cleanup_vgpu_type_groups((struct intel_gvt *)gvt);
 }
 
 static int kvmgt_page_track_add(unsigned long handle, u64 gfn)
index 550a456e936fc44ee69bad487cf31d4b7596f974..e6c5a792a49a5ba50171611410c560d9be8d7f96 100644 (file)
@@ -63,13 +63,13 @@ static inline int intel_gvt_hypervisor_host_init(struct device *dev,
 /**
  * intel_gvt_hypervisor_host_exit - exit GVT-g host side
  */
-static inline void intel_gvt_hypervisor_host_exit(struct device *dev)
+static inline void intel_gvt_hypervisor_host_exit(struct device *dev, void *gvt)
 {
        /* optional to provide */
        if (!intel_gvt_host.mpt->host_exit)
                return;
 
-       intel_gvt_host.mpt->host_exit(dev);
+       intel_gvt_host.mpt->host_exit(dev, gvt);
 }
 
 /**
index cf9a3d384971fc70b16d3b6238237b4031cfd8ff..aa573b078ae75c83d9e81c60f54579a66c1dfe2a 100644 (file)
@@ -1156,7 +1156,8 @@ static int auto_active(struct i915_active *ref)
        return 0;
 }
 
-static void auto_retire(struct i915_active *ref)
+__i915_active_call static void
+auto_retire(struct i915_active *ref)
 {
        i915_active_put(ref);
 }
index 9ec9277539ec14e8e5cd5629b3e5909be600e8a7..69e43bf91a153254b559290a4a7afbe3bf373fe6 100644 (file)
@@ -1905,6 +1905,9 @@ int i915_reg_read_ioctl(struct drm_device *dev, void *data,
                        struct drm_file *file);
 
 /* i915_mm.c */
+int remap_io_mapping(struct vm_area_struct *vma,
+                    unsigned long addr, unsigned long pfn, unsigned long size,
+                    struct io_mapping *iomap);
 int remap_io_sg(struct vm_area_struct *vma,
                unsigned long addr, unsigned long size,
                struct scatterlist *sgl, resource_size_t iobase);
index b23f58e94cfb702a0ce41608ef6cc7a2d2692773..b3cedd20f3653c17a446dc5c656ee35e711c3ba6 100644 (file)
@@ -999,12 +999,11 @@ i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
                obj->mm.madv = args->madv;
 
        if (i915_gem_object_has_pages(obj)) {
-               struct list_head *list;
+               unsigned long flags;
 
-               if (i915_gem_object_is_shrinkable(obj)) {
-                       unsigned long flags;
-
-                       spin_lock_irqsave(&i915->mm.obj_lock, flags);
+               spin_lock_irqsave(&i915->mm.obj_lock, flags);
+               if (!list_empty(&obj->mm.link)) {
+                       struct list_head *list;
 
                        if (obj->mm.madv != I915_MADV_WILLNEED)
                                list = &i915->mm.purge_list;
@@ -1012,8 +1011,8 @@ i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
                                list = &i915->mm.shrink_list;
                        list_move_tail(&obj->mm.link, list);
 
-                       spin_unlock_irqrestore(&i915->mm.obj_lock, flags);
                }
+               spin_unlock_irqrestore(&i915->mm.obj_lock, flags);
        }
 
        /* if the object is no longer attached, discard its backing storage */
index 4c8cd08c672d2dd899ac87b78ea6c0f0ed93484b..666808cb3a32609f78858205933b43e85a252576 100644 (file)
 
 #include "i915_drv.h"
 
-#define EXPECTED_FLAGS (VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP)
+struct remap_pfn {
+       struct mm_struct *mm;
+       unsigned long pfn;
+       pgprot_t prot;
+
+       struct sgt_iter sgt;
+       resource_size_t iobase;
+};
+
+static int remap_pfn(pte_t *pte, unsigned long addr, void *data)
+{
+       struct remap_pfn *r = data;
+
+       /* Special PTE are not associated with any struct page */
+       set_pte_at(r->mm, addr, pte, pte_mkspecial(pfn_pte(r->pfn, r->prot)));
+       r->pfn++;
+
+       return 0;
+}
 
 #define use_dma(io) ((io) != -1)
 
+static inline unsigned long sgt_pfn(const struct remap_pfn *r)
+{
+       if (use_dma(r->iobase))
+               return (r->sgt.dma + r->sgt.curr + r->iobase) >> PAGE_SHIFT;
+       else
+               return r->sgt.pfn + (r->sgt.curr >> PAGE_SHIFT);
+}
+
+static int remap_sg(pte_t *pte, unsigned long addr, void *data)
+{
+       struct remap_pfn *r = data;
+
+       if (GEM_WARN_ON(!r->sgt.sgp))
+               return -EINVAL;
+
+       /* Special PTE are not associated with any struct page */
+       set_pte_at(r->mm, addr, pte,
+                  pte_mkspecial(pfn_pte(sgt_pfn(r), r->prot)));
+       r->pfn++; /* track insertions in case we need to unwind later */
+
+       r->sgt.curr += PAGE_SIZE;
+       if (r->sgt.curr >= r->sgt.max)
+               r->sgt = __sgt_iter(__sg_next(r->sgt.sgp), use_dma(r->iobase));
+
+       return 0;
+}
+
+/**
+ * remap_io_mapping - remap an IO mapping to userspace
+ * @vma: user vma to map to
+ * @addr: target user address to start at
+ * @pfn: physical address of kernel memory
+ * @size: size of map area
+ * @iomap: the source io_mapping
+ *
+ *  Note: this is only safe if the mm semaphore is held when called.
+ */
+int remap_io_mapping(struct vm_area_struct *vma,
+                    unsigned long addr, unsigned long pfn, unsigned long size,
+                    struct io_mapping *iomap)
+{
+       struct remap_pfn r;
+       int err;
+
+#define EXPECTED_FLAGS (VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP)
+       GEM_BUG_ON((vma->vm_flags & EXPECTED_FLAGS) != EXPECTED_FLAGS);
+
+       /* We rely on prevalidation of the io-mapping to skip track_pfn(). */
+       r.mm = vma->vm_mm;
+       r.pfn = pfn;
+       r.prot = __pgprot((pgprot_val(iomap->prot) & _PAGE_CACHE_MASK) |
+                         (pgprot_val(vma->vm_page_prot) & ~_PAGE_CACHE_MASK));
+
+       err = apply_to_page_range(r.mm, addr, size, remap_pfn, &r);
+       if (unlikely(err)) {
+               zap_vma_ptes(vma, addr, (r.pfn - pfn) << PAGE_SHIFT);
+               return err;
+       }
+
+       return 0;
+}
+
 /**
  * remap_io_sg - remap an IO mapping to userspace
  * @vma: user vma to map to
@@ -46,7 +126,12 @@ int remap_io_sg(struct vm_area_struct *vma,
                unsigned long addr, unsigned long size,
                struct scatterlist *sgl, resource_size_t iobase)
 {
-       unsigned long pfn, len, remapped = 0;
+       struct remap_pfn r = {
+               .mm = vma->vm_mm,
+               .prot = vma->vm_page_prot,
+               .sgt = __sgt_iter(sgl, use_dma(iobase)),
+               .iobase = iobase,
+       };
        int err;
 
        /* We rely on prevalidation of the io-mapping to skip track_pfn(). */
@@ -55,25 +140,11 @@ int remap_io_sg(struct vm_area_struct *vma,
        if (!use_dma(iobase))
                flush_cache_range(vma, addr, size);
 
-       do {
-               if (use_dma(iobase)) {
-                       if (!sg_dma_len(sgl))
-                               break;
-                       pfn = (sg_dma_address(sgl) + iobase) >> PAGE_SHIFT;
-                       len = sg_dma_len(sgl);
-               } else {
-                       pfn = page_to_pfn(sg_page(sgl));
-                       len = sgl->length;
-               }
-
-               err = remap_pfn_range(vma, addr + remapped, pfn, len,
-                                     vma->vm_page_prot);
-               if (err)
-                       break;
-               remapped += len;
-       } while ((sgl = __sg_next(sgl)));
-
-       if (err)
-               zap_vma_ptes(vma, addr, remapped);
-       return err;
+       err = apply_to_page_range(r.mm, addr, size, remap_sg, &r);
+       if (unlikely(err)) {
+               zap_vma_ptes(vma, addr, r.pfn << PAGE_SHIFT);
+               return err;
+       }
+
+       return 0;
 }
index ee8e753d98ce2a6b3c86ade241d3b1858b18597c..eae0abd614cbc2f2c0837db3c7e5e82a9ef53781 100644 (file)
@@ -1592,8 +1592,8 @@ static int live_breadcrumbs_smoketest(void *arg)
 
        for (n = 0; n < smoke[0].ncontexts; n++) {
                smoke[0].contexts[n] = live_context(i915, file);
-               if (!smoke[0].contexts[n]) {
-                       ret = -ENOMEM;
+               if (IS_ERR(smoke[0].contexts[n])) {
+                       ret = PTR_ERR(smoke[0].contexts[n]);
                        goto out_contexts;
                }
        }
index b3fd3501c41279979dea06abf2de9eac392a5ea7..5275b2723293b7299f48cd11e48d8789db868c05 100644 (file)
@@ -577,7 +577,7 @@ static void mcde_dsi_setup_video_mode(struct mcde_dsi *d,
         * porches and sync.
         */
        /* (ps/s) / (pixels/s) = ps/pixels */
-       pclk = DIV_ROUND_UP_ULL(1000000000000, mode->clock);
+       pclk = DIV_ROUND_UP_ULL(1000000000000, (mode->clock * 1000));
        dev_dbg(d->dev, "picoseconds between two pixels: %llu\n",
                pclk);
 
index 453d8b4c5763d9b65665ad8cca3ab0dfd0d8f4b2..07fcd12dca160d0791ebf1c5f78ef113e6870162 100644 (file)
@@ -485,11 +485,12 @@ static int meson_probe_remote(struct platform_device *pdev,
 static void meson_drv_shutdown(struct platform_device *pdev)
 {
        struct meson_drm *priv = dev_get_drvdata(&pdev->dev);
-       struct drm_device *drm = priv->drm;
 
-       DRM_DEBUG_DRIVER("\n");
-       drm_kms_helper_poll_fini(drm);
-       drm_atomic_helper_shutdown(drm);
+       if (!priv)
+               return;
+
+       drm_kms_helper_poll_fini(priv->drm);
+       drm_atomic_helper_shutdown(priv->drm);
 }
 
 static int meson_drv_probe(struct platform_device *pdev)
index d553f62f4eeb8e4f6ec899d6f58c3b64be6bd547..f6c1b62b901e2705ea1cb812301282186253eea0 100644 (file)
@@ -157,7 +157,7 @@ static void a6xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
         * GPU registers so we need to add 0x1a800 to the register value on A630
         * to get the right value from PM4.
         */
-       get_stats_counter(ring, REG_A6XX_GMU_ALWAYS_ON_COUNTER_L + 0x1a800,
+       get_stats_counter(ring, REG_A6XX_CP_ALWAYS_ON_COUNTER_LO,
                rbmemptr_stats(ring, index, alwayson_start));
 
        /* Invalidate CCU depth and color */
@@ -187,7 +187,7 @@ static void a6xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
 
        get_stats_counter(ring, REG_A6XX_RBBM_PERFCTR_CP_0_LO,
                rbmemptr_stats(ring, index, cpcycles_end));
-       get_stats_counter(ring, REG_A6XX_GMU_ALWAYS_ON_COUNTER_L + 0x1a800,
+       get_stats_counter(ring, REG_A6XX_CP_ALWAYS_ON_COUNTER_LO,
                rbmemptr_stats(ring, index, alwayson_end));
 
        /* Write the fence to the scratch register */
@@ -206,8 +206,8 @@ static void a6xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
        OUT_RING(ring, submit->seqno);
 
        trace_msm_gpu_submit_flush(submit,
-               gmu_read64(&a6xx_gpu->gmu, REG_A6XX_GMU_ALWAYS_ON_COUNTER_L,
-                       REG_A6XX_GMU_ALWAYS_ON_COUNTER_H));
+               gpu_read64(gpu, REG_A6XX_CP_ALWAYS_ON_COUNTER_LO,
+                       REG_A6XX_CP_ALWAYS_ON_COUNTER_HI));
 
        a6xx_flush(gpu, ring);
 }
@@ -462,6 +462,113 @@ static void a6xx_set_hwcg(struct msm_gpu *gpu, bool state)
        gpu_write(gpu, REG_A6XX_RBBM_CLOCK_CNTL, state ? clock_cntl_on : 0);
 }
 
+/* For a615, a616, a618, A619, a630, a640 and a680 */
+static const u32 a6xx_protect[] = {
+       A6XX_PROTECT_RDONLY(0x00000, 0x04ff),
+       A6XX_PROTECT_RDONLY(0x00501, 0x0005),
+       A6XX_PROTECT_RDONLY(0x0050b, 0x02f4),
+       A6XX_PROTECT_NORDWR(0x0050e, 0x0000),
+       A6XX_PROTECT_NORDWR(0x00510, 0x0000),
+       A6XX_PROTECT_NORDWR(0x00534, 0x0000),
+       A6XX_PROTECT_NORDWR(0x00800, 0x0082),
+       A6XX_PROTECT_NORDWR(0x008a0, 0x0008),
+       A6XX_PROTECT_NORDWR(0x008ab, 0x0024),
+       A6XX_PROTECT_RDONLY(0x008de, 0x00ae),
+       A6XX_PROTECT_NORDWR(0x00900, 0x004d),
+       A6XX_PROTECT_NORDWR(0x0098d, 0x0272),
+       A6XX_PROTECT_NORDWR(0x00e00, 0x0001),
+       A6XX_PROTECT_NORDWR(0x00e03, 0x000c),
+       A6XX_PROTECT_NORDWR(0x03c00, 0x00c3),
+       A6XX_PROTECT_RDONLY(0x03cc4, 0x1fff),
+       A6XX_PROTECT_NORDWR(0x08630, 0x01cf),
+       A6XX_PROTECT_NORDWR(0x08e00, 0x0000),
+       A6XX_PROTECT_NORDWR(0x08e08, 0x0000),
+       A6XX_PROTECT_NORDWR(0x08e50, 0x001f),
+       A6XX_PROTECT_NORDWR(0x09624, 0x01db),
+       A6XX_PROTECT_NORDWR(0x09e70, 0x0001),
+       A6XX_PROTECT_NORDWR(0x09e78, 0x0187),
+       A6XX_PROTECT_NORDWR(0x0a630, 0x01cf),
+       A6XX_PROTECT_NORDWR(0x0ae02, 0x0000),
+       A6XX_PROTECT_NORDWR(0x0ae50, 0x032f),
+       A6XX_PROTECT_NORDWR(0x0b604, 0x0000),
+       A6XX_PROTECT_NORDWR(0x0be02, 0x0001),
+       A6XX_PROTECT_NORDWR(0x0be20, 0x17df),
+       A6XX_PROTECT_NORDWR(0x0f000, 0x0bff),
+       A6XX_PROTECT_RDONLY(0x0fc00, 0x1fff),
+       A6XX_PROTECT_NORDWR(0x11c00, 0x0000), /* note: infinite range */
+};
+
+/* These are for a620 and a650 */
+static const u32 a650_protect[] = {
+       A6XX_PROTECT_RDONLY(0x00000, 0x04ff),
+       A6XX_PROTECT_RDONLY(0x00501, 0x0005),
+       A6XX_PROTECT_RDONLY(0x0050b, 0x02f4),
+       A6XX_PROTECT_NORDWR(0x0050e, 0x0000),
+       A6XX_PROTECT_NORDWR(0x00510, 0x0000),
+       A6XX_PROTECT_NORDWR(0x00534, 0x0000),
+       A6XX_PROTECT_NORDWR(0x00800, 0x0082),
+       A6XX_PROTECT_NORDWR(0x008a0, 0x0008),
+       A6XX_PROTECT_NORDWR(0x008ab, 0x0024),
+       A6XX_PROTECT_RDONLY(0x008de, 0x00ae),
+       A6XX_PROTECT_NORDWR(0x00900, 0x004d),
+       A6XX_PROTECT_NORDWR(0x0098d, 0x0272),
+       A6XX_PROTECT_NORDWR(0x00e00, 0x0001),
+       A6XX_PROTECT_NORDWR(0x00e03, 0x000c),
+       A6XX_PROTECT_NORDWR(0x03c00, 0x00c3),
+       A6XX_PROTECT_RDONLY(0x03cc4, 0x1fff),
+       A6XX_PROTECT_NORDWR(0x08630, 0x01cf),
+       A6XX_PROTECT_NORDWR(0x08e00, 0x0000),
+       A6XX_PROTECT_NORDWR(0x08e08, 0x0000),
+       A6XX_PROTECT_NORDWR(0x08e50, 0x001f),
+       A6XX_PROTECT_NORDWR(0x08e80, 0x027f),
+       A6XX_PROTECT_NORDWR(0x09624, 0x01db),
+       A6XX_PROTECT_NORDWR(0x09e60, 0x0011),
+       A6XX_PROTECT_NORDWR(0x09e78, 0x0187),
+       A6XX_PROTECT_NORDWR(0x0a630, 0x01cf),
+       A6XX_PROTECT_NORDWR(0x0ae02, 0x0000),
+       A6XX_PROTECT_NORDWR(0x0ae50, 0x032f),
+       A6XX_PROTECT_NORDWR(0x0b604, 0x0000),
+       A6XX_PROTECT_NORDWR(0x0b608, 0x0007),
+       A6XX_PROTECT_NORDWR(0x0be02, 0x0001),
+       A6XX_PROTECT_NORDWR(0x0be20, 0x17df),
+       A6XX_PROTECT_NORDWR(0x0f000, 0x0bff),
+       A6XX_PROTECT_RDONLY(0x0fc00, 0x1fff),
+       A6XX_PROTECT_NORDWR(0x18400, 0x1fff),
+       A6XX_PROTECT_NORDWR(0x1a800, 0x1fff),
+       A6XX_PROTECT_NORDWR(0x1f400, 0x0443),
+       A6XX_PROTECT_RDONLY(0x1f844, 0x007b),
+       A6XX_PROTECT_NORDWR(0x1f887, 0x001b),
+       A6XX_PROTECT_NORDWR(0x1f8c0, 0x0000), /* note: infinite range */
+};
+
+static void a6xx_set_cp_protect(struct msm_gpu *gpu)
+{
+       struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+       const u32 *regs = a6xx_protect;
+       unsigned i, count = ARRAY_SIZE(a6xx_protect), count_max = 32;
+
+       BUILD_BUG_ON(ARRAY_SIZE(a6xx_protect) > 32);
+       BUILD_BUG_ON(ARRAY_SIZE(a650_protect) > 48);
+
+       if (adreno_is_a650(adreno_gpu)) {
+               regs = a650_protect;
+               count = ARRAY_SIZE(a650_protect);
+               count_max = 48;
+       }
+
+       /*
+        * Enable access protection to privileged registers, fault on an access
+        * protect violation and select the last span to protect from the start
+        * address all the way to the end of the register address space
+        */
+       gpu_write(gpu, REG_A6XX_CP_PROTECT_CNTL, BIT(0) | BIT(1) | BIT(3));
+
+       for (i = 0; i < count - 1; i++)
+               gpu_write(gpu, REG_A6XX_CP_PROTECT(i), regs[i]);
+       /* last CP_PROTECT to have "infinite" length on the last entry */
+       gpu_write(gpu, REG_A6XX_CP_PROTECT(count_max - 1), regs[i]);
+}
+
 static void a6xx_set_ubwc_config(struct msm_gpu *gpu)
 {
        struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
@@ -489,7 +596,7 @@ static void a6xx_set_ubwc_config(struct msm_gpu *gpu)
                rgb565_predicator << 11 | amsbc << 4 | lower_bit << 1);
        gpu_write(gpu, REG_A6XX_TPL1_NC_MODE_CNTL, lower_bit << 1);
        gpu_write(gpu, REG_A6XX_SP_NC_MODE_CNTL,
-               uavflagprd_inv >> 4 | lower_bit << 1);
+               uavflagprd_inv << 4 | lower_bit << 1);
        gpu_write(gpu, REG_A6XX_UCHE_MODE_CNTL, lower_bit << 21);
 }
 
@@ -776,41 +883,7 @@ static int a6xx_hw_init(struct msm_gpu *gpu)
        }
 
        /* Protect registers from the CP */
-       gpu_write(gpu, REG_A6XX_CP_PROTECT_CNTL, 0x00000003);
-
-       gpu_write(gpu, REG_A6XX_CP_PROTECT(0),
-               A6XX_PROTECT_RDONLY(0x600, 0x51));
-       gpu_write(gpu, REG_A6XX_CP_PROTECT(1), A6XX_PROTECT_RW(0xae50, 0x2));
-       gpu_write(gpu, REG_A6XX_CP_PROTECT(2), A6XX_PROTECT_RW(0x9624, 0x13));
-       gpu_write(gpu, REG_A6XX_CP_PROTECT(3), A6XX_PROTECT_RW(0x8630, 0x8));
-       gpu_write(gpu, REG_A6XX_CP_PROTECT(4), A6XX_PROTECT_RW(0x9e70, 0x1));
-       gpu_write(gpu, REG_A6XX_CP_PROTECT(5), A6XX_PROTECT_RW(0x9e78, 0x187));
-       gpu_write(gpu, REG_A6XX_CP_PROTECT(6), A6XX_PROTECT_RW(0xf000, 0x810));
-       gpu_write(gpu, REG_A6XX_CP_PROTECT(7),
-               A6XX_PROTECT_RDONLY(0xfc00, 0x3));
-       gpu_write(gpu, REG_A6XX_CP_PROTECT(8), A6XX_PROTECT_RW(0x50e, 0x0));
-       gpu_write(gpu, REG_A6XX_CP_PROTECT(9), A6XX_PROTECT_RDONLY(0x50f, 0x0));
-       gpu_write(gpu, REG_A6XX_CP_PROTECT(10), A6XX_PROTECT_RW(0x510, 0x0));
-       gpu_write(gpu, REG_A6XX_CP_PROTECT(11),
-               A6XX_PROTECT_RDONLY(0x0, 0x4f9));
-       gpu_write(gpu, REG_A6XX_CP_PROTECT(12),
-               A6XX_PROTECT_RDONLY(0x501, 0xa));
-       gpu_write(gpu, REG_A6XX_CP_PROTECT(13),
-               A6XX_PROTECT_RDONLY(0x511, 0x44));
-       gpu_write(gpu, REG_A6XX_CP_PROTECT(14), A6XX_PROTECT_RW(0xe00, 0xe));
-       gpu_write(gpu, REG_A6XX_CP_PROTECT(15), A6XX_PROTECT_RW(0x8e00, 0x0));
-       gpu_write(gpu, REG_A6XX_CP_PROTECT(16), A6XX_PROTECT_RW(0x8e50, 0xf));
-       gpu_write(gpu, REG_A6XX_CP_PROTECT(17), A6XX_PROTECT_RW(0xbe02, 0x0));
-       gpu_write(gpu, REG_A6XX_CP_PROTECT(18),
-               A6XX_PROTECT_RW(0xbe20, 0x11f3));
-       gpu_write(gpu, REG_A6XX_CP_PROTECT(19), A6XX_PROTECT_RW(0x800, 0x82));
-       gpu_write(gpu, REG_A6XX_CP_PROTECT(20), A6XX_PROTECT_RW(0x8a0, 0x8));
-       gpu_write(gpu, REG_A6XX_CP_PROTECT(21), A6XX_PROTECT_RW(0x8ab, 0x19));
-       gpu_write(gpu, REG_A6XX_CP_PROTECT(22), A6XX_PROTECT_RW(0x900, 0x4d));
-       gpu_write(gpu, REG_A6XX_CP_PROTECT(23), A6XX_PROTECT_RW(0x98d, 0x76));
-       gpu_write(gpu, REG_A6XX_CP_PROTECT(24),
-                       A6XX_PROTECT_RDONLY(0x980, 0x4));
-       gpu_write(gpu, REG_A6XX_CP_PROTECT(25), A6XX_PROTECT_RW(0xa630, 0x0));
+       a6xx_set_cp_protect(gpu);
 
        /* Enable expanded apriv for targets that support it */
        if (gpu->hw_apriv) {
@@ -1153,10 +1226,6 @@ static void a6xx_llc_slices_init(struct platform_device *pdev,
 {
        struct device_node *phandle;
 
-       a6xx_gpu->llc_mmio = msm_ioremap(pdev, "cx_mem", "gpu_cx");
-       if (IS_ERR(a6xx_gpu->llc_mmio))
-               return;
-
        /*
         * There is a different programming path for targets with an mmu500
         * attached, so detect if that is the case
@@ -1166,6 +1235,11 @@ static void a6xx_llc_slices_init(struct platform_device *pdev,
                of_device_is_compatible(phandle, "arm,mmu-500"));
        of_node_put(phandle);
 
+       if (a6xx_gpu->have_mmu500)
+               a6xx_gpu->llc_mmio = NULL;
+       else
+               a6xx_gpu->llc_mmio = msm_ioremap(pdev, "cx_mem", "gpu_cx");
+
        a6xx_gpu->llc_slice = llcc_slice_getd(LLCC_GPU);
        a6xx_gpu->htw_llc_slice = llcc_slice_getd(LLCC_GPUHTW);
 
@@ -1210,7 +1284,7 @@ static int a6xx_pm_suspend(struct msm_gpu *gpu)
        if (ret)
                return ret;
 
-       if (adreno_gpu->base.hw_apriv || a6xx_gpu->has_whereami)
+       if (a6xx_gpu->shadow_bo)
                for (i = 0; i < gpu->nr_rings; i++)
                        a6xx_gpu->shadow[i] = 0;
 
index ce0610c5256f7ae16a0fc1ec3d9e20839ab4c728..bb544dfe57379d01165e8b8855b575d9cd242715 100644 (file)
@@ -44,7 +44,7 @@ struct a6xx_gpu {
  * REG_CP_PROTECT_REG(n) - this will block both reads and writes for _len
  * registers starting at _reg.
  */
-#define A6XX_PROTECT_RW(_reg, _len) \
+#define A6XX_PROTECT_NORDWR(_reg, _len) \
        ((1 << 31) | \
        (((_len) & 0x3FFF) << 18) | ((_reg) & 0x3FFFF))
 
index 82a8673ab8daf958789bc8df7e257cd7378ea1ba..d7e4a39a904e2734f92c148480f70bc81460bdd7 100644 (file)
@@ -527,6 +527,7 @@ int dp_audio_hw_params(struct device *dev,
        dp_audio_setup_acr(audio);
        dp_audio_safe_to_exit_level(audio);
        dp_audio_enable(audio, true);
+       dp_display_signal_audio_start(dp_display);
        dp_display->audio_enabled = true;
 
 end:
index 5a39da6e1eaf2779a2944a81490e22f8bc0374d2..1784e119269b7241ef65f697de6a286c6a66c1f2 100644 (file)
@@ -178,6 +178,15 @@ static int dp_del_event(struct dp_display_private *dp_priv, u32 event)
        return 0;
 }
 
+void dp_display_signal_audio_start(struct msm_dp *dp_display)
+{
+       struct dp_display_private *dp;
+
+       dp = container_of(dp_display, struct dp_display_private, dp_display);
+
+       reinit_completion(&dp->audio_comp);
+}
+
 void dp_display_signal_audio_complete(struct msm_dp *dp_display)
 {
        struct dp_display_private *dp;
@@ -586,10 +595,8 @@ static int dp_connect_pending_timeout(struct dp_display_private *dp, u32 data)
        mutex_lock(&dp->event_mutex);
 
        state = dp->hpd_state;
-       if (state == ST_CONNECT_PENDING) {
-               dp_display_enable(dp, 0);
+       if (state == ST_CONNECT_PENDING)
                dp->hpd_state = ST_CONNECTED;
-       }
 
        mutex_unlock(&dp->event_mutex);
 
@@ -651,7 +658,6 @@ static int dp_hpd_unplug_handle(struct dp_display_private *dp, u32 data)
        dp_add_event(dp, EV_DISCONNECT_PENDING_TIMEOUT, 0, DP_TIMEOUT_5_SECOND);
 
        /* signal the disconnect event early to ensure proper teardown */
-       reinit_completion(&dp->audio_comp);
        dp_display_handle_plugged_change(g_dp_display, false);
 
        dp_catalog_hpd_config_intr(dp->catalog, DP_DP_HPD_PLUG_INT_MASK |
@@ -669,10 +675,8 @@ static int dp_disconnect_pending_timeout(struct dp_display_private *dp, u32 data
        mutex_lock(&dp->event_mutex);
 
        state =  dp->hpd_state;
-       if (state == ST_DISCONNECT_PENDING) {
-               dp_display_disable(dp, 0);
+       if (state == ST_DISCONNECT_PENDING)
                dp->hpd_state = ST_DISCONNECTED;
-       }
 
        mutex_unlock(&dp->event_mutex);
 
@@ -898,7 +902,6 @@ static int dp_display_disable(struct dp_display_private *dp, u32 data)
        /* wait only if audio was enabled */
        if (dp_display->audio_enabled) {
                /* signal the disconnect event */
-               reinit_completion(&dp->audio_comp);
                dp_display_handle_plugged_change(dp_display, false);
                if (!wait_for_completion_timeout(&dp->audio_comp,
                                HZ * 5))
@@ -1272,7 +1275,12 @@ static int dp_pm_resume(struct device *dev)
 
        status = dp_catalog_link_is_connected(dp->catalog);
 
-       if (status)
+       /*
+        * can not declared display is connected unless
+        * HDMI cable is plugged in and sink_count of
+        * dongle become 1
+        */
+       if (status && dp->link->sink_count)
                dp->dp_display.is_connected = true;
        else
                dp->dp_display.is_connected = false;
index 6092ba1ed85ed1bfde70059e682eae72fb9b8836..5173c89eedf7e189c2586cbd3c496a41a817a98f 100644 (file)
@@ -34,6 +34,7 @@ int dp_display_get_modes(struct msm_dp *dp_display,
 int dp_display_request_irq(struct msm_dp *dp_display);
 bool dp_display_check_video_test(struct msm_dp *dp_display);
 int dp_display_get_test_bpp(struct msm_dp *dp_display);
+void dp_display_signal_audio_start(struct msm_dp *dp_display);
 void dp_display_signal_audio_complete(struct msm_dp *dp_display);
 
 #endif /* _DP_DISPLAY_H_ */
index f0a2ddf96a4b95aaf51a92d2f7947f5769a32388..ff7f2ec4203001437bc1453bb8f52492d65be9c4 100644 (file)
@@ -843,7 +843,7 @@ int msm_dsi_phy_get_clk_provider(struct msm_dsi_phy *phy,
        if (pixel_clk_provider)
                *pixel_clk_provider = phy->provided_clocks->hws[DSI_PIXEL_PLL_CLK]->clk;
 
-       return -EINVAL;
+       return 0;
 }
 
 void msm_dsi_phy_pll_save_state(struct msm_dsi_phy *phy)
index 34bc93548fcfb7aba893cae6a19d25ac393252e1..657778889d359f2957f9993ccd0ad473035b0bd2 100644 (file)
@@ -432,6 +432,7 @@ static unsigned long dsi_pll_10nm_vco_recalc_rate(struct clk_hw *hw,
        pll_freq += div_u64(tmp64, multiplier);
 
        vco_rate = pll_freq;
+       pll_10nm->vco_current_rate = vco_rate;
 
        DBG("DSI PLL%d returning vco rate = %lu, dec = %x, frac = %x",
            pll_10nm->phy->id, (unsigned long)vco_rate, dec, frac);
index 582b1428f9715d60734d518bce09b9fdf69e573a..86e40a0d41a3b1d0623af7c3eae85b4a5d5e4d40 100644 (file)
@@ -405,6 +405,10 @@ static int pll_28nm_register(struct dsi_pll_28nm *pll_28nm, struct clk_hw **prov
        if (!vco_name)
                return -ENOMEM;
 
+       parent_name = devm_kzalloc(dev, 32, GFP_KERNEL);
+       if (!parent_name)
+               return -ENOMEM;
+
        clk_name = devm_kzalloc(dev, 32, GFP_KERNEL);
        if (!clk_name)
                return -ENOMEM;
index e76ce40a12abad143cd69f9661d1880ddccfe3c0..6f96fbac8282404e2b95916865074f974c4adce1 100644 (file)
@@ -460,6 +460,7 @@ static unsigned long dsi_pll_7nm_vco_recalc_rate(struct clk_hw *hw,
        pll_freq += div_u64(tmp64, multiplier);
 
        vco_rate = pll_freq;
+       pll_7nm->vco_current_rate = vco_rate;
 
        DBG("DSI PLL%d returning vco rate = %lu, dec = %x, frac = %x",
            pll_7nm->phy->id, (unsigned long)vco_rate, dec, frac);
index e1104d2454e2ee50cd09f31284ef0ed143a89e42..fe7d17cd35ecd7d8ea9e813de41a28a60717ce76 100644 (file)
@@ -42,7 +42,7 @@
  * - 1.7.0 - Add MSM_PARAM_SUSPENDS to access suspend count
  */
 #define MSM_VERSION_MAJOR      1
-#define MSM_VERSION_MINOR      6
+#define MSM_VERSION_MINOR      7
 #define MSM_VERSION_PATCHLEVEL 0
 
 static const struct drm_mode_config_funcs mode_config_funcs = {
index b199942266a26502eb449e83010d01a9cbc41bf1..369d91e6361ecd2723f245f1bd515f0f7044498e 100644 (file)
@@ -190,13 +190,25 @@ struct page **msm_gem_get_pages(struct drm_gem_object *obj)
        }
 
        p = get_pages(obj);
+
+       if (!IS_ERR(p)) {
+               msm_obj->pin_count++;
+               update_inactive(msm_obj);
+       }
+
        msm_gem_unlock(obj);
        return p;
 }
 
 void msm_gem_put_pages(struct drm_gem_object *obj)
 {
-       /* when we start tracking the pin count, then do something here */
+       struct msm_gem_object *msm_obj = to_msm_bo(obj);
+
+       msm_gem_lock(obj);
+       msm_obj->pin_count--;
+       GEM_WARN_ON(msm_obj->pin_count < 0);
+       update_inactive(msm_obj);
+       msm_gem_unlock(obj);
 }
 
 int msm_gem_mmap_obj(struct drm_gem_object *obj,
@@ -646,6 +658,8 @@ static void *get_vaddr(struct drm_gem_object *obj, unsigned madv)
                        ret = -ENOMEM;
                        goto fail;
                }
+
+               update_inactive(msm_obj);
        }
 
        return msm_obj->vaddr;
@@ -1227,6 +1241,13 @@ static struct drm_gem_object *_msm_gem_new(struct drm_device *dev,
 
                to_msm_bo(obj)->vram_node = &vma->node;
 
+               /* Call chain get_pages() -> update_inactive() tries to
+                * access msm_obj->mm_list, but it is not initialized yet.
+                * To avoid NULL pointer dereference error, initialize
+                * mm_list to be empty.
+                */
+               INIT_LIST_HEAD(&msm_obj->mm_list);
+
                msm_gem_lock(obj);
                pages = get_pages(obj);
                msm_gem_unlock(obj);
index a6480d2c81b2c290a3f5f03abb35250128495a86..03e2cc2a2ce152ad64904451fdf1d504332a3d40 100644 (file)
@@ -221,7 +221,7 @@ static inline bool is_active(struct msm_gem_object *msm_obj)
 /* imported/exported objects are not purgeable: */
 static inline bool is_unpurgeable(struct msm_gem_object *msm_obj)
 {
-       return msm_obj->base.dma_buf && msm_obj->base.import_attach;
+       return msm_obj->base.import_attach || msm_obj->pin_count;
 }
 
 static inline bool is_purgeable(struct msm_gem_object *msm_obj)
@@ -271,7 +271,7 @@ static inline void mark_unpurgeable(struct msm_gem_object *msm_obj)
 
 static inline bool is_unevictable(struct msm_gem_object *msm_obj)
 {
-       return is_unpurgeable(msm_obj) || msm_obj->pin_count || msm_obj->vaddr;
+       return is_unpurgeable(msm_obj) || msm_obj->vaddr;
 }
 
 static inline void mark_evictable(struct msm_gem_object *msm_obj)
index dd5ef64937230890060733afdc8accb68af0b61d..769f666335ac49d2d7e6cf2a25e56da9d3615b34 100644 (file)
@@ -1687,102 +1687,102 @@ static int ni_populate_smc_initial_state(struct radeon_device *rdev,
        u32 reg;
        int ret;
 
-       table->initialState.levels[0].mclk.vMPLL_AD_FUNC_CNTL =
+       table->initialState.level.mclk.vMPLL_AD_FUNC_CNTL =
                cpu_to_be32(ni_pi->clock_registers.mpll_ad_func_cntl);
-       table->initialState.levels[0].mclk.vMPLL_AD_FUNC_CNTL_2 =
+       table->initialState.level.mclk.vMPLL_AD_FUNC_CNTL_2 =
                cpu_to_be32(ni_pi->clock_registers.mpll_ad_func_cntl_2);
-       table->initialState.levels[0].mclk.vMPLL_DQ_FUNC_CNTL =
+       table->initialState.level.mclk.vMPLL_DQ_FUNC_CNTL =
                cpu_to_be32(ni_pi->clock_registers.mpll_dq_func_cntl);
-       table->initialState.levels[0].mclk.vMPLL_DQ_FUNC_CNTL_2 =
+       table->initialState.level.mclk.vMPLL_DQ_FUNC_CNTL_2 =
                cpu_to_be32(ni_pi->clock_registers.mpll_dq_func_cntl_2);
-       table->initialState.levels[0].mclk.vMCLK_PWRMGT_CNTL =
+       table->initialState.level.mclk.vMCLK_PWRMGT_CNTL =
                cpu_to_be32(ni_pi->clock_registers.mclk_pwrmgt_cntl);
-       table->initialState.levels[0].mclk.vDLL_CNTL =
+       table->initialState.level.mclk.vDLL_CNTL =
                cpu_to_be32(ni_pi->clock_registers.dll_cntl);
-       table->initialState.levels[0].mclk.vMPLL_SS =
+       table->initialState.level.mclk.vMPLL_SS =
                cpu_to_be32(ni_pi->clock_registers.mpll_ss1);
-       table->initialState.levels[0].mclk.vMPLL_SS2 =
+       table->initialState.level.mclk.vMPLL_SS2 =
                cpu_to_be32(ni_pi->clock_registers.mpll_ss2);
-       table->initialState.levels[0].mclk.mclk_value =
+       table->initialState.level.mclk.mclk_value =
                cpu_to_be32(initial_state->performance_levels[0].mclk);
 
-       table->initialState.levels[0].sclk.vCG_SPLL_FUNC_CNTL =
+       table->initialState.level.sclk.vCG_SPLL_FUNC_CNTL =
                cpu_to_be32(ni_pi->clock_registers.cg_spll_func_cntl);
-       table->initialState.levels[0].sclk.vCG_SPLL_FUNC_CNTL_2 =
+       table->initialState.level.sclk.vCG_SPLL_FUNC_CNTL_2 =
                cpu_to_be32(ni_pi->clock_registers.cg_spll_func_cntl_2);
-       table->initialState.levels[0].sclk.vCG_SPLL_FUNC_CNTL_3 =
+       table->initialState.level.sclk.vCG_SPLL_FUNC_CNTL_3 =
                cpu_to_be32(ni_pi->clock_registers.cg_spll_func_cntl_3);
-       table->initialState.levels[0].sclk.vCG_SPLL_FUNC_CNTL_4 =
+       table->initialState.level.sclk.vCG_SPLL_FUNC_CNTL_4 =
                cpu_to_be32(ni_pi->clock_registers.cg_spll_func_cntl_4);
-       table->initialState.levels[0].sclk.vCG_SPLL_SPREAD_SPECTRUM =
+       table->initialState.level.sclk.vCG_SPLL_SPREAD_SPECTRUM =
                cpu_to_be32(ni_pi->clock_registers.cg_spll_spread_spectrum);
-       table->initialState.levels[0].sclk.vCG_SPLL_SPREAD_SPECTRUM_2 =
+       table->initialState.level.sclk.vCG_SPLL_SPREAD_SPECTRUM_2 =
                cpu_to_be32(ni_pi->clock_registers.cg_spll_spread_spectrum_2);
-       table->initialState.levels[0].sclk.sclk_value =
+       table->initialState.level.sclk.sclk_value =
                cpu_to_be32(initial_state->performance_levels[0].sclk);
-       table->initialState.levels[0].arbRefreshState =
+       table->initialState.level.arbRefreshState =
                NISLANDS_INITIAL_STATE_ARB_INDEX;
 
-       table->initialState.levels[0].ACIndex = 0;
+       table->initialState.level.ACIndex = 0;
 
        ret = ni_populate_voltage_value(rdev, &eg_pi->vddc_voltage_table,
                                        initial_state->performance_levels[0].vddc,
-                                       &table->initialState.levels[0].vddc);
+                                       &table->initialState.level.vddc);
        if (!ret) {
                u16 std_vddc;
 
                ret = ni_get_std_voltage_value(rdev,
-                                              &table->initialState.levels[0].vddc,
+                                              &table->initialState.level.vddc,
                                               &std_vddc);
                if (!ret)
                        ni_populate_std_voltage_value(rdev, std_vddc,
-                                                     table->initialState.levels[0].vddc.index,
-                                                     &table->initialState.levels[0].std_vddc);
+                                                     table->initialState.level.vddc.index,
+                                                     &table->initialState.level.std_vddc);
        }
 
        if (eg_pi->vddci_control)
                ni_populate_voltage_value(rdev,
                                          &eg_pi->vddci_voltage_table,
                                          initial_state->performance_levels[0].vddci,
-                                         &table->initialState.levels[0].vddci);
+                                         &table->initialState.level.vddci);
 
-       ni_populate_initial_mvdd_value(rdev, &table->initialState.levels[0].mvdd);
+       ni_populate_initial_mvdd_value(rdev, &table->initialState.level.mvdd);
 
        reg = CG_R(0xffff) | CG_L(0);
-       table->initialState.levels[0].aT = cpu_to_be32(reg);
+       table->initialState.level.aT = cpu_to_be32(reg);
 
-       table->initialState.levels[0].bSP = cpu_to_be32(pi->dsp);
+       table->initialState.level.bSP = cpu_to_be32(pi->dsp);
 
        if (pi->boot_in_gen2)
-               table->initialState.levels[0].gen2PCIE = 1;
+               table->initialState.level.gen2PCIE = 1;
        else
-               table->initialState.levels[0].gen2PCIE = 0;
+               table->initialState.level.gen2PCIE = 0;
 
        if (pi->mem_gddr5) {
-               table->initialState.levels[0].strobeMode =
+               table->initialState.level.strobeMode =
                        cypress_get_strobe_mode_settings(rdev,
                                                         initial_state->performance_levels[0].mclk);
 
                if (initial_state->performance_levels[0].mclk > pi->mclk_edc_enable_threshold)
-                       table->initialState.levels[0].mcFlags = NISLANDS_SMC_MC_EDC_RD_FLAG | NISLANDS_SMC_MC_EDC_WR_FLAG;
+                       table->initialState.level.mcFlags = NISLANDS_SMC_MC_EDC_RD_FLAG | NISLANDS_SMC_MC_EDC_WR_FLAG;
                else
-                       table->initialState.levels[0].mcFlags =  0;
+                       table->initialState.level.mcFlags =  0;
        }
 
        table->initialState.levelCount = 1;
 
        table->initialState.flags |= PPSMC_SWSTATE_FLAG_DC;
 
-       table->initialState.levels[0].dpm2.MaxPS = 0;
-       table->initialState.levels[0].dpm2.NearTDPDec = 0;
-       table->initialState.levels[0].dpm2.AboveSafeInc = 0;
-       table->initialState.levels[0].dpm2.BelowSafeInc = 0;
+       table->initialState.level.dpm2.MaxPS = 0;
+       table->initialState.level.dpm2.NearTDPDec = 0;
+       table->initialState.level.dpm2.AboveSafeInc = 0;
+       table->initialState.level.dpm2.BelowSafeInc = 0;
 
        reg = MIN_POWER_MASK | MAX_POWER_MASK;
-       table->initialState.levels[0].SQPowerThrottle = cpu_to_be32(reg);
+       table->initialState.level.SQPowerThrottle = cpu_to_be32(reg);
 
        reg = MAX_POWER_DELTA_MASK | STI_SIZE_MASK | LTI_RATIO_MASK;
-       table->initialState.levels[0].SQPowerThrottle_2 = cpu_to_be32(reg);
+       table->initialState.level.SQPowerThrottle_2 = cpu_to_be32(reg);
 
        return 0;
 }
@@ -1813,43 +1813,43 @@ static int ni_populate_smc_acpi_state(struct radeon_device *rdev,
        if (pi->acpi_vddc) {
                ret = ni_populate_voltage_value(rdev,
                                                &eg_pi->vddc_voltage_table,
-                                               pi->acpi_vddc, &table->ACPIState.levels[0].vddc);
+                                               pi->acpi_vddc, &table->ACPIState.level.vddc);
                if (!ret) {
                        u16 std_vddc;
 
                        ret = ni_get_std_voltage_value(rdev,
-                                                      &table->ACPIState.levels[0].vddc, &std_vddc);
+                                                      &table->ACPIState.level.vddc, &std_vddc);
                        if (!ret)
                                ni_populate_std_voltage_value(rdev, std_vddc,
-                                                             table->ACPIState.levels[0].vddc.index,
-                                                             &table->ACPIState.levels[0].std_vddc);
+                                                             table->ACPIState.level.vddc.index,
+                                                             &table->ACPIState.level.std_vddc);
                }
 
                if (pi->pcie_gen2) {
                        if (pi->acpi_pcie_gen2)
-                               table->ACPIState.levels[0].gen2PCIE = 1;
+                               table->ACPIState.level.gen2PCIE = 1;
                        else
-                               table->ACPIState.levels[0].gen2PCIE = 0;
+                               table->ACPIState.level.gen2PCIE = 0;
                } else {
-                       table->ACPIState.levels[0].gen2PCIE = 0;
+                       table->ACPIState.level.gen2PCIE = 0;
                }
        } else {
                ret = ni_populate_voltage_value(rdev,
                                                &eg_pi->vddc_voltage_table,
                                                pi->min_vddc_in_table,
-                                               &table->ACPIState.levels[0].vddc);
+                                               &table->ACPIState.level.vddc);
                if (!ret) {
                        u16 std_vddc;
 
                        ret = ni_get_std_voltage_value(rdev,
-                                                      &table->ACPIState.levels[0].vddc,
+                                                      &table->ACPIState.level.vddc,
                                                       &std_vddc);
                        if (!ret)
                                ni_populate_std_voltage_value(rdev, std_vddc,
-                                                             table->ACPIState.levels[0].vddc.index,
-                                                             &table->ACPIState.levels[0].std_vddc);
+                                                             table->ACPIState.level.vddc.index,
+                                                             &table->ACPIState.level.std_vddc);
                }
-               table->ACPIState.levels[0].gen2PCIE = 0;
+               table->ACPIState.level.gen2PCIE = 0;
        }
 
        if (eg_pi->acpi_vddci) {
@@ -1857,7 +1857,7 @@ static int ni_populate_smc_acpi_state(struct radeon_device *rdev,
                        ni_populate_voltage_value(rdev,
                                                  &eg_pi->vddci_voltage_table,
                                                  eg_pi->acpi_vddci,
-                                                 &table->ACPIState.levels[0].vddci);
+                                                 &table->ACPIState.level.vddci);
        }
 
 
@@ -1900,37 +1900,37 @@ static int ni_populate_smc_acpi_state(struct radeon_device *rdev,
        spll_func_cntl_2 &= ~SCLK_MUX_SEL_MASK;
        spll_func_cntl_2 |= SCLK_MUX_SEL(4);
 
-       table->ACPIState.levels[0].mclk.vMPLL_AD_FUNC_CNTL = cpu_to_be32(mpll_ad_func_cntl);
-       table->ACPIState.levels[0].mclk.vMPLL_AD_FUNC_CNTL_2 = cpu_to_be32(mpll_ad_func_cntl_2);
-       table->ACPIState.levels[0].mclk.vMPLL_DQ_FUNC_CNTL = cpu_to_be32(mpll_dq_func_cntl);
-       table->ACPIState.levels[0].mclk.vMPLL_DQ_FUNC_CNTL_2 = cpu_to_be32(mpll_dq_func_cntl_2);
-       table->ACPIState.levels[0].mclk.vMCLK_PWRMGT_CNTL = cpu_to_be32(mclk_pwrmgt_cntl);
-       table->ACPIState.levels[0].mclk.vDLL_CNTL = cpu_to_be32(dll_cntl);
+       table->ACPIState.level.mclk.vMPLL_AD_FUNC_CNTL = cpu_to_be32(mpll_ad_func_cntl);
+       table->ACPIState.level.mclk.vMPLL_AD_FUNC_CNTL_2 = cpu_to_be32(mpll_ad_func_cntl_2);
+       table->ACPIState.level.mclk.vMPLL_DQ_FUNC_CNTL = cpu_to_be32(mpll_dq_func_cntl);
+       table->ACPIState.level.mclk.vMPLL_DQ_FUNC_CNTL_2 = cpu_to_be32(mpll_dq_func_cntl_2);
+       table->ACPIState.level.mclk.vMCLK_PWRMGT_CNTL = cpu_to_be32(mclk_pwrmgt_cntl);
+       table->ACPIState.level.mclk.vDLL_CNTL = cpu_to_be32(dll_cntl);
 
-       table->ACPIState.levels[0].mclk.mclk_value = 0;
+       table->ACPIState.level.mclk.mclk_value = 0;
 
-       table->ACPIState.levels[0].sclk.vCG_SPLL_FUNC_CNTL = cpu_to_be32(spll_func_cntl);
-       table->ACPIState.levels[0].sclk.vCG_SPLL_FUNC_CNTL_2 = cpu_to_be32(spll_func_cntl_2);
-       table->ACPIState.levels[0].sclk.vCG_SPLL_FUNC_CNTL_3 = cpu_to_be32(spll_func_cntl_3);
-       table->ACPIState.levels[0].sclk.vCG_SPLL_FUNC_CNTL_4 = cpu_to_be32(spll_func_cntl_4);
+       table->ACPIState.level.sclk.vCG_SPLL_FUNC_CNTL = cpu_to_be32(spll_func_cntl);
+       table->ACPIState.level.sclk.vCG_SPLL_FUNC_CNTL_2 = cpu_to_be32(spll_func_cntl_2);
+       table->ACPIState.level.sclk.vCG_SPLL_FUNC_CNTL_3 = cpu_to_be32(spll_func_cntl_3);
+       table->ACPIState.level.sclk.vCG_SPLL_FUNC_CNTL_4 = cpu_to_be32(spll_func_cntl_4);
 
-       table->ACPIState.levels[0].sclk.sclk_value = 0;
+       table->ACPIState.level.sclk.sclk_value = 0;
 
-       ni_populate_mvdd_value(rdev, 0, &table->ACPIState.levels[0].mvdd);
+       ni_populate_mvdd_value(rdev, 0, &table->ACPIState.level.mvdd);
 
        if (eg_pi->dynamic_ac_timing)
-               table->ACPIState.levels[0].ACIndex = 1;
+               table->ACPIState.level.ACIndex = 1;
 
-       table->ACPIState.levels[0].dpm2.MaxPS = 0;
-       table->ACPIState.levels[0].dpm2.NearTDPDec = 0;
-       table->ACPIState.levels[0].dpm2.AboveSafeInc = 0;
-       table->ACPIState.levels[0].dpm2.BelowSafeInc = 0;
+       table->ACPIState.level.dpm2.MaxPS = 0;
+       table->ACPIState.level.dpm2.NearTDPDec = 0;
+       table->ACPIState.level.dpm2.AboveSafeInc = 0;
+       table->ACPIState.level.dpm2.BelowSafeInc = 0;
 
        reg = MIN_POWER_MASK | MAX_POWER_MASK;
-       table->ACPIState.levels[0].SQPowerThrottle = cpu_to_be32(reg);
+       table->ACPIState.level.SQPowerThrottle = cpu_to_be32(reg);
 
        reg = MAX_POWER_DELTA_MASK | STI_SIZE_MASK | LTI_RATIO_MASK;
-       table->ACPIState.levels[0].SQPowerThrottle_2 = cpu_to_be32(reg);
+       table->ACPIState.level.SQPowerThrottle_2 = cpu_to_be32(reg);
 
        return 0;
 }
@@ -1980,7 +1980,9 @@ static int ni_init_smc_table(struct radeon_device *rdev)
        if (ret)
                return ret;
 
-       table->driverState = table->initialState;
+       table->driverState.flags = table->initialState.flags;
+       table->driverState.levelCount = table->initialState.levelCount;
+       table->driverState.levels[0] = table->initialState.level;
 
        table->ULVState = table->initialState;
 
index 7395cb6b3cac6d9e3ae1bbb167509c6cf8e53bc6..42f3bab0f9ee689845765c44b849538e8d53d121 100644 (file)
@@ -143,6 +143,14 @@ struct NISLANDS_SMC_SWSTATE
 
 typedef struct NISLANDS_SMC_SWSTATE NISLANDS_SMC_SWSTATE;
 
+struct NISLANDS_SMC_SWSTATE_SINGLE {
+       uint8_t                             flags;
+       uint8_t                             levelCount;
+       uint8_t                             padding2;
+       uint8_t                             padding3;
+       NISLANDS_SMC_HW_PERFORMANCE_LEVEL   level;
+};
+
 #define NISLANDS_SMC_VOLTAGEMASK_VDDC  0
 #define NISLANDS_SMC_VOLTAGEMASK_MVDD  1
 #define NISLANDS_SMC_VOLTAGEMASK_VDDCI 2
@@ -160,19 +168,19 @@ typedef struct NISLANDS_SMC_VOLTAGEMASKTABLE NISLANDS_SMC_VOLTAGEMASKTABLE;
 
 struct NISLANDS_SMC_STATETABLE
 {
-    uint8_t                             thermalProtectType;
-    uint8_t                             systemFlags;
-    uint8_t                             maxVDDCIndexInPPTable;
-    uint8_t                             extraFlags;
-    uint8_t                             highSMIO[NISLANDS_MAX_NO_VREG_STEPS];
-    uint32_t                            lowSMIO[NISLANDS_MAX_NO_VREG_STEPS];
-    NISLANDS_SMC_VOLTAGEMASKTABLE       voltageMaskTable;
-    PP_NIslands_DPM2Parameters          dpm2Params;
-    NISLANDS_SMC_SWSTATE                initialState;
-    NISLANDS_SMC_SWSTATE                ACPIState;
-    NISLANDS_SMC_SWSTATE                ULVState;
-    NISLANDS_SMC_SWSTATE                driverState;
-    NISLANDS_SMC_HW_PERFORMANCE_LEVEL   dpmLevels[NISLANDS_MAX_SMC_PERFORMANCE_LEVELS_PER_SWSTATE - 1];
+       uint8_t                             thermalProtectType;
+       uint8_t                             systemFlags;
+       uint8_t                             maxVDDCIndexInPPTable;
+       uint8_t                             extraFlags;
+       uint8_t                             highSMIO[NISLANDS_MAX_NO_VREG_STEPS];
+       uint32_t                            lowSMIO[NISLANDS_MAX_NO_VREG_STEPS];
+       NISLANDS_SMC_VOLTAGEMASKTABLE       voltageMaskTable;
+       PP_NIslands_DPM2Parameters          dpm2Params;
+       struct NISLANDS_SMC_SWSTATE_SINGLE  initialState;
+       struct NISLANDS_SMC_SWSTATE_SINGLE  ACPIState;
+       struct NISLANDS_SMC_SWSTATE_SINGLE  ULVState;
+       NISLANDS_SMC_SWSTATE                driverState;
+       NISLANDS_SMC_HW_PERFORMANCE_LEVEL   dpmLevels[NISLANDS_MAX_SMC_PERFORMANCE_LEVELS_PER_SWSTATE];
 };
 
 typedef struct NISLANDS_SMC_STATETABLE NISLANDS_SMC_STATETABLE;
index 42281fce552e6ee66221b0646cd80eedddb9ca1a..56ed5634cebef7945c289ec6c078fe2527b42db3 100644 (file)
@@ -1549,6 +1549,7 @@ struct radeon_dpm {
        void                    *priv;
        u32                     new_active_crtcs;
        int                     new_active_crtc_count;
+       int                     high_pixelclock_count;
        u32                     current_active_crtcs;
        int                     current_active_crtc_count;
        bool single_display;
index 3808a753127bcb7c1eb861ea85bc85e09e0b7c12..04109a2a6fd760f10ac220bd3ff08f6b10afba3d 100644 (file)
@@ -301,7 +301,8 @@ int radeon_gart_bind(struct radeon_device *rdev, unsigned offset,
        p = t / (PAGE_SIZE / RADEON_GPU_PAGE_SIZE);
 
        for (i = 0; i < pages; i++, p++) {
-               rdev->gart.pages[p] = pagelist[i];
+               rdev->gart.pages[p] = pagelist ? pagelist[i] :
+                       rdev->dummy_page.page;
                page_base = dma_addr[i];
                for (j = 0; j < (PAGE_SIZE / RADEON_GPU_PAGE_SIZE); j++, t++) {
                        page_entry = radeon_gart_get_page_entry(page_base, flags);
index 0c1950f4e146f158f00d2cc879c5bac3e07d0325..3861c0b98fcf38906ebff11c46ab062a0fba8563 100644 (file)
@@ -1767,6 +1767,7 @@ static void radeon_pm_compute_clocks_dpm(struct radeon_device *rdev)
        struct drm_device *ddev = rdev->ddev;
        struct drm_crtc *crtc;
        struct radeon_crtc *radeon_crtc;
+       struct radeon_connector *radeon_connector;
 
        if (!rdev->pm.dpm_enabled)
                return;
@@ -1776,6 +1777,7 @@ static void radeon_pm_compute_clocks_dpm(struct radeon_device *rdev)
        /* update active crtc counts */
        rdev->pm.dpm.new_active_crtcs = 0;
        rdev->pm.dpm.new_active_crtc_count = 0;
+       rdev->pm.dpm.high_pixelclock_count = 0;
        if (rdev->num_crtc && rdev->mode_info.mode_config_initialized) {
                list_for_each_entry(crtc,
                                    &ddev->mode_config.crtc_list, head) {
@@ -1783,6 +1785,12 @@ static void radeon_pm_compute_clocks_dpm(struct radeon_device *rdev)
                        if (crtc->enabled) {
                                rdev->pm.dpm.new_active_crtcs |= (1 << radeon_crtc->crtc_id);
                                rdev->pm.dpm.new_active_crtc_count++;
+                               if (!radeon_crtc->connector)
+                                       continue;
+
+                               radeon_connector = to_radeon_connector(radeon_crtc->connector);
+                               if (radeon_connector->pixelclock_for_modeset > 297000)
+                                       rdev->pm.dpm.high_pixelclock_count++;
                        }
                }
        }
index dfa9fdbe98da25fc2360769b583419405b7f962f..06bb24d7a9feea8521404abdbcde020294eca377 100644 (file)
@@ -286,7 +286,7 @@ int radeon_uvd_resume(struct radeon_device *rdev)
        if (rdev->uvd.vcpu_bo == NULL)
                return -EINVAL;
 
-       memcpy(rdev->uvd.cpu_addr, rdev->uvd_fw->data, rdev->uvd_fw->size);
+       memcpy_toio((void __iomem *)rdev->uvd.cpu_addr, rdev->uvd_fw->data, rdev->uvd_fw->size);
 
        size = radeon_bo_size(rdev->uvd.vcpu_bo);
        size -= rdev->uvd_fw->size;
@@ -294,7 +294,7 @@ int radeon_uvd_resume(struct radeon_device *rdev)
        ptr = rdev->uvd.cpu_addr;
        ptr += rdev->uvd_fw->size;
 
-       memset(ptr, 0, size);
+       memset_io((void __iomem *)ptr, 0, size);
 
        return 0;
 }
index 91860955180474ec23f5c0fcaf9ff9b0fc02b53c..3add39c1a6897b42dbf7f0064e5f4958ffe6496c 100644 (file)
@@ -2979,6 +2979,9 @@ static void si_apply_state_adjust_rules(struct radeon_device *rdev,
                    (rdev->pdev->device == 0x6605)) {
                        max_sclk = 75000;
                }
+
+               if (rdev->pm.dpm.high_pixelclock_count > 1)
+                       disable_sclk_switching = true;
        }
 
        if (rps->vce_active) {
@@ -4350,70 +4353,70 @@ static int si_populate_smc_initial_state(struct radeon_device *rdev,
        u32 reg;
        int ret;
 
-       table->initialState.levels[0].mclk.vDLL_CNTL =
+       table->initialState.level.mclk.vDLL_CNTL =
                cpu_to_be32(si_pi->clock_registers.dll_cntl);
-       table->initialState.levels[0].mclk.vMCLK_PWRMGT_CNTL =
+       table->initialState.level.mclk.vMCLK_PWRMGT_CNTL =
                cpu_to_be32(si_pi->clock_registers.mclk_pwrmgt_cntl);
-       table->initialState.levels[0].mclk.vMPLL_AD_FUNC_CNTL =
+       table->initialState.level.mclk.vMPLL_AD_FUNC_CNTL =
                cpu_to_be32(si_pi->clock_registers.mpll_ad_func_cntl);
-       table->initialState.levels[0].mclk.vMPLL_DQ_FUNC_CNTL =
+       table->initialState.level.mclk.vMPLL_DQ_FUNC_CNTL =
                cpu_to_be32(si_pi->clock_registers.mpll_dq_func_cntl);
-       table->initialState.levels[0].mclk.vMPLL_FUNC_CNTL =
+       table->initialState.level.mclk.vMPLL_FUNC_CNTL =
                cpu_to_be32(si_pi->clock_registers.mpll_func_cntl);
-       table->initialState.levels[0].mclk.vMPLL_FUNC_CNTL_1 =
+       table->initialState.level.mclk.vMPLL_FUNC_CNTL_1 =
                cpu_to_be32(si_pi->clock_registers.mpll_func_cntl_1);
-       table->initialState.levels[0].mclk.vMPLL_FUNC_CNTL_2 =
+       table->initialState.level.mclk.vMPLL_FUNC_CNTL_2 =
                cpu_to_be32(si_pi->clock_registers.mpll_func_cntl_2);
-       table->initialState.levels[0].mclk.vMPLL_SS =
+       table->initialState.level.mclk.vMPLL_SS =
                cpu_to_be32(si_pi->clock_registers.mpll_ss1);
-       table->initialState.levels[0].mclk.vMPLL_SS2 =
+       table->initialState.level.mclk.vMPLL_SS2 =
                cpu_to_be32(si_pi->clock_registers.mpll_ss2);
 
-       table->initialState.levels[0].mclk.mclk_value =
+       table->initialState.level.mclk.mclk_value =
                cpu_to_be32(initial_state->performance_levels[0].mclk);
 
-       table->initialState.levels[0].sclk.vCG_SPLL_FUNC_CNTL =
+       table->initialState.level.sclk.vCG_SPLL_FUNC_CNTL =
                cpu_to_be32(si_pi->clock_registers.cg_spll_func_cntl);
-       table->initialState.levels[0].sclk.vCG_SPLL_FUNC_CNTL_2 =
+       table->initialState.level.sclk.vCG_SPLL_FUNC_CNTL_2 =
                cpu_to_be32(si_pi->clock_registers.cg_spll_func_cntl_2);
-       table->initialState.levels[0].sclk.vCG_SPLL_FUNC_CNTL_3 =
+       table->initialState.level.sclk.vCG_SPLL_FUNC_CNTL_3 =
                cpu_to_be32(si_pi->clock_registers.cg_spll_func_cntl_3);
-       table->initialState.levels[0].sclk.vCG_SPLL_FUNC_CNTL_4 =
+       table->initialState.level.sclk.vCG_SPLL_FUNC_CNTL_4 =
                cpu_to_be32(si_pi->clock_registers.cg_spll_func_cntl_4);
-       table->initialState.levels[0].sclk.vCG_SPLL_SPREAD_SPECTRUM =
+       table->initialState.level.sclk.vCG_SPLL_SPREAD_SPECTRUM =
                cpu_to_be32(si_pi->clock_registers.cg_spll_spread_spectrum);
-       table->initialState.levels[0].sclk.vCG_SPLL_SPREAD_SPECTRUM_2  =
+       table->initialState.level.sclk.vCG_SPLL_SPREAD_SPECTRUM_2  =
                cpu_to_be32(si_pi->clock_registers.cg_spll_spread_spectrum_2);
 
-       table->initialState.levels[0].sclk.sclk_value =
+       table->initialState.level.sclk.sclk_value =
                cpu_to_be32(initial_state->performance_levels[0].sclk);
 
-       table->initialState.levels[0].arbRefreshState =
+       table->initialState.level.arbRefreshState =
                SISLANDS_INITIAL_STATE_ARB_INDEX;
 
-       table->initialState.levels[0].ACIndex = 0;
+       table->initialState.level.ACIndex = 0;
 
        ret = si_populate_voltage_value(rdev, &eg_pi->vddc_voltage_table,
                                        initial_state->performance_levels[0].vddc,
-                                       &table->initialState.levels[0].vddc);
+                                       &table->initialState.level.vddc);
 
        if (!ret) {
                u16 std_vddc;
 
                ret = si_get_std_voltage_value(rdev,
-                                              &table->initialState.levels[0].vddc,
+                                              &table->initialState.level.vddc,
                                               &std_vddc);
                if (!ret)
                        si_populate_std_voltage_value(rdev, std_vddc,
-                                                     table->initialState.levels[0].vddc.index,
-                                                     &table->initialState.levels[0].std_vddc);
+                                                     table->initialState.level.vddc.index,
+                                                     &table->initialState.level.std_vddc);
        }
 
        if (eg_pi->vddci_control)
                si_populate_voltage_value(rdev,
                                          &eg_pi->vddci_voltage_table,
                                          initial_state->performance_levels[0].vddci,
-                                         &table->initialState.levels[0].vddci);
+                                         &table->initialState.level.vddci);
 
        if (si_pi->vddc_phase_shed_control)
                si_populate_phase_shedding_value(rdev,
@@ -4421,43 +4424,43 @@ static int si_populate_smc_initial_state(struct radeon_device *rdev,
                                                 initial_state->performance_levels[0].vddc,
                                                 initial_state->performance_levels[0].sclk,
                                                 initial_state->performance_levels[0].mclk,
-                                                &table->initialState.levels[0].vddc);
+                                                &table->initialState.level.vddc);
 
-       si_populate_initial_mvdd_value(rdev, &table->initialState.levels[0].mvdd);
+       si_populate_initial_mvdd_value(rdev, &table->initialState.level.mvdd);
 
        reg = CG_R(0xffff) | CG_L(0);
-       table->initialState.levels[0].aT = cpu_to_be32(reg);
+       table->initialState.level.aT = cpu_to_be32(reg);
 
-       table->initialState.levels[0].bSP = cpu_to_be32(pi->dsp);
+       table->initialState.level.bSP = cpu_to_be32(pi->dsp);
 
-       table->initialState.levels[0].gen2PCIE = (u8)si_pi->boot_pcie_gen;
+       table->initialState.level.gen2PCIE = (u8)si_pi->boot_pcie_gen;
 
        if (pi->mem_gddr5) {
-               table->initialState.levels[0].strobeMode =
+               table->initialState.level.strobeMode =
                        si_get_strobe_mode_settings(rdev,
                                                    initial_state->performance_levels[0].mclk);
 
                if (initial_state->performance_levels[0].mclk > pi->mclk_edc_enable_threshold)
-                       table->initialState.levels[0].mcFlags = SISLANDS_SMC_MC_EDC_RD_FLAG | SISLANDS_SMC_MC_EDC_WR_FLAG;
+                       table->initialState.level.mcFlags = SISLANDS_SMC_MC_EDC_RD_FLAG | SISLANDS_SMC_MC_EDC_WR_FLAG;
                else
-                       table->initialState.levels[0].mcFlags =  0;
+                       table->initialState.level.mcFlags =  0;
        }
 
        table->initialState.levelCount = 1;
 
        table->initialState.flags |= PPSMC_SWSTATE_FLAG_DC;
 
-       table->initialState.levels[0].dpm2.MaxPS = 0;
-       table->initialState.levels[0].dpm2.NearTDPDec = 0;
-       table->initialState.levels[0].dpm2.AboveSafeInc = 0;
-       table->initialState.levels[0].dpm2.BelowSafeInc = 0;
-       table->initialState.levels[0].dpm2.PwrEfficiencyRatio = 0;
+       table->initialState.level.dpm2.MaxPS = 0;
+       table->initialState.level.dpm2.NearTDPDec = 0;
+       table->initialState.level.dpm2.AboveSafeInc = 0;
+       table->initialState.level.dpm2.BelowSafeInc = 0;
+       table->initialState.level.dpm2.PwrEfficiencyRatio = 0;
 
        reg = MIN_POWER_MASK | MAX_POWER_MASK;
-       table->initialState.levels[0].SQPowerThrottle = cpu_to_be32(reg);
+       table->initialState.level.SQPowerThrottle = cpu_to_be32(reg);
 
        reg = MAX_POWER_DELTA_MASK | STI_SIZE_MASK | LTI_RATIO_MASK;
-       table->initialState.levels[0].SQPowerThrottle_2 = cpu_to_be32(reg);
+       table->initialState.level.SQPowerThrottle_2 = cpu_to_be32(reg);
 
        return 0;
 }
@@ -4488,18 +4491,18 @@ static int si_populate_smc_acpi_state(struct radeon_device *rdev,
 
        if (pi->acpi_vddc) {
                ret = si_populate_voltage_value(rdev, &eg_pi->vddc_voltage_table,
-                                               pi->acpi_vddc, &table->ACPIState.levels[0].vddc);
+                                               pi->acpi_vddc, &table->ACPIState.level.vddc);
                if (!ret) {
                        u16 std_vddc;
 
                        ret = si_get_std_voltage_value(rdev,
-                                                      &table->ACPIState.levels[0].vddc, &std_vddc);
+                                                      &table->ACPIState.level.vddc, &std_vddc);
                        if (!ret)
                                si_populate_std_voltage_value(rdev, std_vddc,
-                                                             table->ACPIState.levels[0].vddc.index,
-                                                             &table->ACPIState.levels[0].std_vddc);
+                                                             table->ACPIState.level.vddc.index,
+                                                             &table->ACPIState.level.std_vddc);
                }
-               table->ACPIState.levels[0].gen2PCIE = si_pi->acpi_pcie_gen;
+               table->ACPIState.level.gen2PCIE = si_pi->acpi_pcie_gen;
 
                if (si_pi->vddc_phase_shed_control) {
                        si_populate_phase_shedding_value(rdev,
@@ -4507,23 +4510,23 @@ static int si_populate_smc_acpi_state(struct radeon_device *rdev,
                                                         pi->acpi_vddc,
                                                         0,
                                                         0,
-                                                        &table->ACPIState.levels[0].vddc);
+                                                        &table->ACPIState.level.vddc);
                }
        } else {
                ret = si_populate_voltage_value(rdev, &eg_pi->vddc_voltage_table,
-                                               pi->min_vddc_in_table, &table->ACPIState.levels[0].vddc);
+                                               pi->min_vddc_in_table, &table->ACPIState.level.vddc);
                if (!ret) {
                        u16 std_vddc;
 
                        ret = si_get_std_voltage_value(rdev,
-                                                      &table->ACPIState.levels[0].vddc, &std_vddc);
+                                                      &table->ACPIState.level.vddc, &std_vddc);
 
                        if (!ret)
                                si_populate_std_voltage_value(rdev, std_vddc,
-                                                             table->ACPIState.levels[0].vddc.index,
-                                                             &table->ACPIState.levels[0].std_vddc);
+                                                             table->ACPIState.level.vddc.index,
+                                                             &table->ACPIState.level.std_vddc);
                }
-               table->ACPIState.levels[0].gen2PCIE = (u8)r600_get_pcie_gen_support(rdev,
+               table->ACPIState.level.gen2PCIE = (u8)r600_get_pcie_gen_support(rdev,
                                                                                    si_pi->sys_pcie_mask,
                                                                                    si_pi->boot_pcie_gen,
                                                                                    RADEON_PCIE_GEN1);
@@ -4534,14 +4537,14 @@ static int si_populate_smc_acpi_state(struct radeon_device *rdev,
                                                         pi->min_vddc_in_table,
                                                         0,
                                                         0,
-                                                        &table->ACPIState.levels[0].vddc);
+                                                        &table->ACPIState.level.vddc);
        }
 
        if (pi->acpi_vddc) {
                if (eg_pi->acpi_vddci)
                        si_populate_voltage_value(rdev, &eg_pi->vddci_voltage_table,
                                                  eg_pi->acpi_vddci,
-                                                 &table->ACPIState.levels[0].vddci);
+                                                 &table->ACPIState.level.vddci);
        }
 
        mclk_pwrmgt_cntl |= MRDCK0_RESET | MRDCK1_RESET;
@@ -4552,59 +4555,59 @@ static int si_populate_smc_acpi_state(struct radeon_device *rdev,
        spll_func_cntl_2 &= ~SCLK_MUX_SEL_MASK;
        spll_func_cntl_2 |= SCLK_MUX_SEL(4);
 
-       table->ACPIState.levels[0].mclk.vDLL_CNTL =
+       table->ACPIState.level.mclk.vDLL_CNTL =
                cpu_to_be32(dll_cntl);
-       table->ACPIState.levels[0].mclk.vMCLK_PWRMGT_CNTL =
+       table->ACPIState.level.mclk.vMCLK_PWRMGT_CNTL =
                cpu_to_be32(mclk_pwrmgt_cntl);
-       table->ACPIState.levels[0].mclk.vMPLL_AD_FUNC_CNTL =
+       table->ACPIState.level.mclk.vMPLL_AD_FUNC_CNTL =
                cpu_to_be32(mpll_ad_func_cntl);
-       table->ACPIState.levels[0].mclk.vMPLL_DQ_FUNC_CNTL =
+       table->ACPIState.level.mclk.vMPLL_DQ_FUNC_CNTL =
                cpu_to_be32(mpll_dq_func_cntl);
-       table->ACPIState.levels[0].mclk.vMPLL_FUNC_CNTL =
+       table->ACPIState.level.mclk.vMPLL_FUNC_CNTL =
                cpu_to_be32(mpll_func_cntl);
-       table->ACPIState.levels[0].mclk.vMPLL_FUNC_CNTL_1 =
+       table->ACPIState.level.mclk.vMPLL_FUNC_CNTL_1 =
                cpu_to_be32(mpll_func_cntl_1);
-       table->ACPIState.levels[0].mclk.vMPLL_FUNC_CNTL_2 =
+       table->ACPIState.level.mclk.vMPLL_FUNC_CNTL_2 =
                cpu_to_be32(mpll_func_cntl_2);
-       table->ACPIState.levels[0].mclk.vMPLL_SS =
+       table->ACPIState.level.mclk.vMPLL_SS =
                cpu_to_be32(si_pi->clock_registers.mpll_ss1);
-       table->ACPIState.levels[0].mclk.vMPLL_SS2 =
+       table->ACPIState.level.mclk.vMPLL_SS2 =
                cpu_to_be32(si_pi->clock_registers.mpll_ss2);
 
-       table->ACPIState.levels[0].sclk.vCG_SPLL_FUNC_CNTL =
+       table->ACPIState.level.sclk.vCG_SPLL_FUNC_CNTL =
                cpu_to_be32(spll_func_cntl);
-       table->ACPIState.levels[0].sclk.vCG_SPLL_FUNC_CNTL_2 =
+       table->ACPIState.level.sclk.vCG_SPLL_FUNC_CNTL_2 =
                cpu_to_be32(spll_func_cntl_2);
-       table->ACPIState.levels[0].sclk.vCG_SPLL_FUNC_CNTL_3 =
+       table->ACPIState.level.sclk.vCG_SPLL_FUNC_CNTL_3 =
                cpu_to_be32(spll_func_cntl_3);
-       table->ACPIState.levels[0].sclk.vCG_SPLL_FUNC_CNTL_4 =
+       table->ACPIState.level.sclk.vCG_SPLL_FUNC_CNTL_4 =
                cpu_to_be32(spll_func_cntl_4);
 
-       table->ACPIState.levels[0].mclk.mclk_value = 0;
-       table->ACPIState.levels[0].sclk.sclk_value = 0;
+       table->ACPIState.level.mclk.mclk_value = 0;
+       table->ACPIState.level.sclk.sclk_value = 0;
 
-       si_populate_mvdd_value(rdev, 0, &table->ACPIState.levels[0].mvdd);
+       si_populate_mvdd_value(rdev, 0, &table->ACPIState.level.mvdd);
 
        if (eg_pi->dynamic_ac_timing)
-               table->ACPIState.levels[0].ACIndex = 0;
+               table->ACPIState.level.ACIndex = 0;
 
-       table->ACPIState.levels[0].dpm2.MaxPS = 0;
-       table->ACPIState.levels[0].dpm2.NearTDPDec = 0;
-       table->ACPIState.levels[0].dpm2.AboveSafeInc = 0;
-       table->ACPIState.levels[0].dpm2.BelowSafeInc = 0;
-       table->ACPIState.levels[0].dpm2.PwrEfficiencyRatio = 0;
+       table->ACPIState.level.dpm2.MaxPS = 0;
+       table->ACPIState.level.dpm2.NearTDPDec = 0;
+       table->ACPIState.level.dpm2.AboveSafeInc = 0;
+       table->ACPIState.level.dpm2.BelowSafeInc = 0;
+       table->ACPIState.level.dpm2.PwrEfficiencyRatio = 0;
 
        reg = MIN_POWER_MASK | MAX_POWER_MASK;
-       table->ACPIState.levels[0].SQPowerThrottle = cpu_to_be32(reg);
+       table->ACPIState.level.SQPowerThrottle = cpu_to_be32(reg);
 
        reg = MAX_POWER_DELTA_MASK | STI_SIZE_MASK | LTI_RATIO_MASK;
-       table->ACPIState.levels[0].SQPowerThrottle_2 = cpu_to_be32(reg);
+       table->ACPIState.level.SQPowerThrottle_2 = cpu_to_be32(reg);
 
        return 0;
 }
 
 static int si_populate_ulv_state(struct radeon_device *rdev,
-                                SISLANDS_SMC_SWSTATE *state)
+                                struct SISLANDS_SMC_SWSTATE_SINGLE *state)
 {
        struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
        struct si_power_info *si_pi = si_get_pi(rdev);
@@ -4613,19 +4616,19 @@ static int si_populate_ulv_state(struct radeon_device *rdev,
        int ret;
 
        ret = si_convert_power_level_to_smc(rdev, &ulv->pl,
-                                           &state->levels[0]);
+                                           &state->level);
        if (!ret) {
                if (eg_pi->sclk_deep_sleep) {
                        if (sclk_in_sr <= SCLK_MIN_DEEPSLEEP_FREQ)
-                               state->levels[0].stateFlags |= PPSMC_STATEFLAG_DEEPSLEEP_BYPASS;
+                               state->level.stateFlags |= PPSMC_STATEFLAG_DEEPSLEEP_BYPASS;
                        else
-                               state->levels[0].stateFlags |= PPSMC_STATEFLAG_DEEPSLEEP_THROTTLE;
+                               state->level.stateFlags |= PPSMC_STATEFLAG_DEEPSLEEP_THROTTLE;
                }
                if (ulv->one_pcie_lane_in_ulv)
                        state->flags |= PPSMC_SWSTATE_FLAG_PCIE_X1;
-               state->levels[0].arbRefreshState = (u8)(SISLANDS_ULV_STATE_ARB_INDEX);
-               state->levels[0].ACIndex = 1;
-               state->levels[0].std_vddc = state->levels[0].vddc;
+               state->level.arbRefreshState = (u8)(SISLANDS_ULV_STATE_ARB_INDEX);
+               state->level.ACIndex = 1;
+               state->level.std_vddc = state->level.vddc;
                state->levelCount = 1;
 
                state->flags |= PPSMC_SWSTATE_FLAG_DC;
@@ -4725,7 +4728,9 @@ static int si_init_smc_table(struct radeon_device *rdev)
        if (ret)
                return ret;
 
-       table->driverState = table->initialState;
+       table->driverState.flags = table->initialState.flags;
+       table->driverState.levelCount = table->initialState.levelCount;
+       table->driverState.levels[0] = table->initialState.level;
 
        ret = si_do_program_memory_timing_parameters(rdev, radeon_boot_state,
                                                     SISLANDS_INITIAL_STATE_ARB_INDEX);
@@ -5275,8 +5280,8 @@ static int si_upload_ulv_state(struct radeon_device *rdev)
        if (ulv->supported && ulv->pl.vddc) {
                u32 address = si_pi->state_table_start +
                        offsetof(SISLANDS_SMC_STATETABLE, ULVState);
-               SISLANDS_SMC_SWSTATE *smc_state = &si_pi->smc_statetable.ULVState;
-               u32 state_size = sizeof(SISLANDS_SMC_SWSTATE);
+               struct SISLANDS_SMC_SWSTATE_SINGLE *smc_state = &si_pi->smc_statetable.ULVState;
+               u32 state_size = sizeof(struct SISLANDS_SMC_SWSTATE_SINGLE);
 
                memset(smc_state, 0, state_size);
 
index fbd6589bdab92a7a464ec1d4cddb49e04628b3de..4ea1cb2e45a3ca98eaa1c544008a0c598a7d2e67 100644 (file)
@@ -191,6 +191,14 @@ struct SISLANDS_SMC_SWSTATE
 
 typedef struct SISLANDS_SMC_SWSTATE SISLANDS_SMC_SWSTATE;
 
+struct SISLANDS_SMC_SWSTATE_SINGLE {
+       uint8_t                             flags;
+       uint8_t                             levelCount;
+       uint8_t                             padding2;
+       uint8_t                             padding3;
+       SISLANDS_SMC_HW_PERFORMANCE_LEVEL   level;
+};
+
 #define SISLANDS_SMC_VOLTAGEMASK_VDDC  0
 #define SISLANDS_SMC_VOLTAGEMASK_MVDD  1
 #define SISLANDS_SMC_VOLTAGEMASK_VDDCI 2
@@ -208,19 +216,19 @@ typedef struct SISLANDS_SMC_VOLTAGEMASKTABLE SISLANDS_SMC_VOLTAGEMASKTABLE;
 
 struct SISLANDS_SMC_STATETABLE
 {
-    uint8_t                             thermalProtectType;
-    uint8_t                             systemFlags;
-    uint8_t                             maxVDDCIndexInPPTable;
-    uint8_t                             extraFlags;
-    uint32_t                            lowSMIO[SISLANDS_MAX_NO_VREG_STEPS];
-    SISLANDS_SMC_VOLTAGEMASKTABLE       voltageMaskTable;
-    SISLANDS_SMC_VOLTAGEMASKTABLE       phaseMaskTable;
-    PP_SIslands_DPM2Parameters          dpm2Params;
-    SISLANDS_SMC_SWSTATE                initialState;
-    SISLANDS_SMC_SWSTATE                ACPIState;
-    SISLANDS_SMC_SWSTATE                ULVState;
-    SISLANDS_SMC_SWSTATE                driverState;
-    SISLANDS_SMC_HW_PERFORMANCE_LEVEL   dpmLevels[SISLANDS_MAX_SMC_PERFORMANCE_LEVELS_PER_SWSTATE - 1];
+       uint8_t                                 thermalProtectType;
+       uint8_t                                 systemFlags;
+       uint8_t                                 maxVDDCIndexInPPTable;
+       uint8_t                                 extraFlags;
+       uint32_t                                lowSMIO[SISLANDS_MAX_NO_VREG_STEPS];
+       SISLANDS_SMC_VOLTAGEMASKTABLE           voltageMaskTable;
+       SISLANDS_SMC_VOLTAGEMASKTABLE           phaseMaskTable;
+       PP_SIslands_DPM2Parameters              dpm2Params;
+       struct SISLANDS_SMC_SWSTATE_SINGLE      initialState;
+       struct SISLANDS_SMC_SWSTATE_SINGLE      ACPIState;
+       struct SISLANDS_SMC_SWSTATE_SINGLE      ULVState;
+       SISLANDS_SMC_SWSTATE                    driverState;
+       SISLANDS_SMC_HW_PERFORMANCE_LEVEL       dpmLevels[SISLANDS_MAX_SMC_PERFORMANCE_LEVELS_PER_SWSTATE];
 };
 
 typedef struct SISLANDS_SMC_STATETABLE SISLANDS_SMC_STATETABLE;
index bbdfd5e26ec88dbf1e9f115f5e0e6f48a29c1841..f75fb157f2ff757da981031f438f80afda33d355 100644 (file)
@@ -209,7 +209,7 @@ static int sun8i_dw_hdmi_bind(struct device *dev, struct device *master,
                goto err_disable_clk_tmds;
        }
 
-       ret = sun8i_hdmi_phy_probe(hdmi, phy_node);
+       ret = sun8i_hdmi_phy_get(hdmi, phy_node);
        of_node_put(phy_node);
        if (ret) {
                dev_err(dev, "Couldn't get the HDMI PHY\n");
@@ -242,7 +242,6 @@ static int sun8i_dw_hdmi_bind(struct device *dev, struct device *master,
 
 cleanup_encoder:
        drm_encoder_cleanup(encoder);
-       sun8i_hdmi_phy_remove(hdmi);
 err_disable_clk_tmds:
        clk_disable_unprepare(hdmi->clk_tmds);
 err_assert_ctrl_reset:
@@ -263,7 +262,6 @@ static void sun8i_dw_hdmi_unbind(struct device *dev, struct device *master,
        struct sun8i_dw_hdmi *hdmi = dev_get_drvdata(dev);
 
        dw_hdmi_unbind(hdmi->hdmi);
-       sun8i_hdmi_phy_remove(hdmi);
        clk_disable_unprepare(hdmi->clk_tmds);
        reset_control_assert(hdmi->rst_ctrl);
        gpiod_set_value(hdmi->ddc_en, 0);
@@ -320,7 +318,32 @@ static struct platform_driver sun8i_dw_hdmi_pltfm_driver = {
                .of_match_table = sun8i_dw_hdmi_dt_ids,
        },
 };
-module_platform_driver(sun8i_dw_hdmi_pltfm_driver);
+
+static int __init sun8i_dw_hdmi_init(void)
+{
+       int ret;
+
+       ret = platform_driver_register(&sun8i_dw_hdmi_pltfm_driver);
+       if (ret)
+               return ret;
+
+       ret = platform_driver_register(&sun8i_hdmi_phy_driver);
+       if (ret) {
+               platform_driver_unregister(&sun8i_dw_hdmi_pltfm_driver);
+               return ret;
+       }
+
+       return ret;
+}
+
+static void __exit sun8i_dw_hdmi_exit(void)
+{
+       platform_driver_unregister(&sun8i_dw_hdmi_pltfm_driver);
+       platform_driver_unregister(&sun8i_hdmi_phy_driver);
+}
+
+module_init(sun8i_dw_hdmi_init);
+module_exit(sun8i_dw_hdmi_exit);
 
 MODULE_AUTHOR("Jernej Skrabec <jernej.skrabec@siol.net>");
 MODULE_DESCRIPTION("Allwinner DW HDMI bridge");
index d4b55af0592f840997e37f265db71a1e4c741ccc..74f6ed0e25709f6ae3bbfebb430ae89bf75534cb 100644 (file)
@@ -195,14 +195,15 @@ struct sun8i_dw_hdmi {
        struct gpio_desc                *ddc_en;
 };
 
+extern struct platform_driver sun8i_hdmi_phy_driver;
+
 static inline struct sun8i_dw_hdmi *
 encoder_to_sun8i_dw_hdmi(struct drm_encoder *encoder)
 {
        return container_of(encoder, struct sun8i_dw_hdmi, encoder);
 }
 
-int sun8i_hdmi_phy_probe(struct sun8i_dw_hdmi *hdmi, struct device_node *node);
-void sun8i_hdmi_phy_remove(struct sun8i_dw_hdmi *hdmi);
+int sun8i_hdmi_phy_get(struct sun8i_dw_hdmi *hdmi, struct device_node *node);
 
 void sun8i_hdmi_phy_init(struct sun8i_hdmi_phy *phy);
 void sun8i_hdmi_phy_set_ops(struct sun8i_hdmi_phy *phy,
index 9994edf675096e6375a214df3fc9767d00e2552a..c9239708d398cdaa85ebffba7ca4a41ecba5a760 100644 (file)
@@ -5,6 +5,7 @@
 
 #include <linux/delay.h>
 #include <linux/of_address.h>
+#include <linux/of_platform.h>
 
 #include "sun8i_dw_hdmi.h"
 
@@ -597,10 +598,30 @@ static const struct of_device_id sun8i_hdmi_phy_of_table[] = {
        { /* sentinel */ }
 };
 
-int sun8i_hdmi_phy_probe(struct sun8i_dw_hdmi *hdmi, struct device_node *node)
+int sun8i_hdmi_phy_get(struct sun8i_dw_hdmi *hdmi, struct device_node *node)
+{
+       struct platform_device *pdev = of_find_device_by_node(node);
+       struct sun8i_hdmi_phy *phy;
+
+       if (!pdev)
+               return -EPROBE_DEFER;
+
+       phy = platform_get_drvdata(pdev);
+       if (!phy)
+               return -EPROBE_DEFER;
+
+       hdmi->phy = phy;
+
+       put_device(&pdev->dev);
+
+       return 0;
+}
+
+static int sun8i_hdmi_phy_probe(struct platform_device *pdev)
 {
        const struct of_device_id *match;
-       struct device *dev = hdmi->dev;
+       struct device *dev = &pdev->dev;
+       struct device_node *node = dev->of_node;
        struct sun8i_hdmi_phy *phy;
        struct resource res;
        void __iomem *regs;
@@ -704,7 +725,7 @@ int sun8i_hdmi_phy_probe(struct sun8i_dw_hdmi *hdmi, struct device_node *node)
                clk_prepare_enable(phy->clk_phy);
        }
 
-       hdmi->phy = phy;
+       platform_set_drvdata(pdev, phy);
 
        return 0;
 
@@ -728,9 +749,9 @@ err_put_clk_bus:
        return ret;
 }
 
-void sun8i_hdmi_phy_remove(struct sun8i_dw_hdmi *hdmi)
+static int sun8i_hdmi_phy_remove(struct platform_device *pdev)
 {
-       struct sun8i_hdmi_phy *phy = hdmi->phy;
+       struct sun8i_hdmi_phy *phy = platform_get_drvdata(pdev);
 
        clk_disable_unprepare(phy->clk_mod);
        clk_disable_unprepare(phy->clk_bus);
@@ -744,4 +765,14 @@ void sun8i_hdmi_phy_remove(struct sun8i_dw_hdmi *hdmi)
        clk_put(phy->clk_pll1);
        clk_put(phy->clk_mod);
        clk_put(phy->clk_bus);
+       return 0;
 }
+
+struct platform_driver sun8i_hdmi_phy_driver = {
+       .probe  = sun8i_hdmi_phy_probe,
+       .remove = sun8i_hdmi_phy_remove,
+       .driver = {
+               .name = "sun8i-hdmi-phy",
+               .of_match_table = sun8i_hdmi_phy_of_table,
+       },
+};
index 87df251c1fcf5589cd96f335ffcaf8be21895e5d..0cb8680653483bbb5c49cdd390b0921c8fe7662a 100644 (file)
@@ -25,7 +25,7 @@
 #include "trace.h"
 
 /* XXX move to include/uapi/drm/drm_fourcc.h? */
-#define DRM_FORMAT_MOD_NVIDIA_SECTOR_LAYOUT BIT(22)
+#define DRM_FORMAT_MOD_NVIDIA_SECTOR_LAYOUT BIT_ULL(22)
 
 struct reset_control;
 
index 79bff8b48271a47071818131505b79e2a0be4b18..bfae8a02f55b82dfc4968c7d03b507994e49b1c1 100644 (file)
@@ -510,7 +510,7 @@ static void tegra_shared_plane_atomic_update(struct drm_plane *plane,
         * dGPU sector layout.
         */
        if (tegra_plane_state->tiling.sector_layout == TEGRA_BO_SECTOR_LAYOUT_GPU)
-               base |= BIT(39);
+               base |= BIT_ULL(39);
 #endif
 
        tegra_plane_writel(p, tegra_plane_state->format, DC_WIN_COLOR_DEPTH);
index 7b88261f57bb698bac7e38495a8843e6ac6b1658..0ea320c1092bd2e68308f546037d0af7375d6b0c 100644 (file)
@@ -3125,21 +3125,21 @@ static int tegra_sor_init(struct host1x_client *client)
                if (err < 0) {
                        dev_err(sor->dev, "failed to acquire SOR reset: %d\n",
                                err);
-                       return err;
+                       goto rpm_put;
                }
 
                err = reset_control_assert(sor->rst);
                if (err < 0) {
                        dev_err(sor->dev, "failed to assert SOR reset: %d\n",
                                err);
-                       return err;
+                       goto rpm_put;
                }
        }
 
        err = clk_prepare_enable(sor->clk);
        if (err < 0) {
                dev_err(sor->dev, "failed to enable clock: %d\n", err);
-               return err;
+               goto rpm_put;
        }
 
        usleep_range(1000, 3000);
@@ -3150,7 +3150,7 @@ static int tegra_sor_init(struct host1x_client *client)
                        dev_err(sor->dev, "failed to deassert SOR reset: %d\n",
                                err);
                        clk_disable_unprepare(sor->clk);
-                       return err;
+                       goto rpm_put;
                }
 
                reset_control_release(sor->rst);
@@ -3171,6 +3171,12 @@ static int tegra_sor_init(struct host1x_client *client)
        }
 
        return 0;
+
+rpm_put:
+       if (sor->rst)
+               pm_runtime_put(sor->dev);
+
+       return err;
 }
 
 static int tegra_sor_exit(struct host1x_client *client)
@@ -3739,12 +3745,8 @@ static int tegra_sor_probe(struct platform_device *pdev)
                if (!sor->aux)
                        return -EPROBE_DEFER;
 
-               if (get_device(&sor->aux->ddc.dev)) {
-                       if (try_module_get(sor->aux->ddc.owner))
-                               sor->output.ddc = &sor->aux->ddc;
-                       else
-                               put_device(&sor->aux->ddc.dev);
-               }
+               if (get_device(sor->aux->dev))
+                       sor->output.ddc = &sor->aux->ddc;
        }
 
        if (!sor->aux) {
@@ -3772,12 +3774,13 @@ static int tegra_sor_probe(struct platform_device *pdev)
 
        err = tegra_sor_parse_dt(sor);
        if (err < 0)
-               return err;
+               goto put_aux;
 
        err = tegra_output_probe(&sor->output);
-       if (err < 0)
-               return dev_err_probe(&pdev->dev, err,
-                                    "failed to probe output\n");
+       if (err < 0) {
+               dev_err_probe(&pdev->dev, err, "failed to probe output\n");
+               goto put_aux;
+       }
 
        if (sor->ops && sor->ops->probe) {
                err = sor->ops->probe(sor);
@@ -3916,17 +3919,10 @@ static int tegra_sor_probe(struct platform_device *pdev)
        platform_set_drvdata(pdev, sor);
        pm_runtime_enable(&pdev->dev);
 
-       INIT_LIST_HEAD(&sor->client.list);
+       host1x_client_init(&sor->client);
        sor->client.ops = &sor_client_ops;
        sor->client.dev = &pdev->dev;
 
-       err = host1x_client_register(&sor->client);
-       if (err < 0) {
-               dev_err(&pdev->dev, "failed to register host1x client: %d\n",
-                       err);
-               goto rpm_disable;
-       }
-
        /*
         * On Tegra210 and earlier, provide our own implementation for the
         * pad output clock.
@@ -3938,13 +3934,13 @@ static int tegra_sor_probe(struct platform_device *pdev)
                                      sor->index);
                if (!name) {
                        err = -ENOMEM;
-                       goto unregister;
+                       goto uninit;
                }
 
                err = host1x_client_resume(&sor->client);
                if (err < 0) {
                        dev_err(sor->dev, "failed to resume: %d\n", err);
-                       goto unregister;
+                       goto uninit;
                }
 
                sor->clk_pad = tegra_clk_sor_pad_register(sor, name);
@@ -3955,17 +3951,30 @@ static int tegra_sor_probe(struct platform_device *pdev)
                err = PTR_ERR(sor->clk_pad);
                dev_err(sor->dev, "failed to register SOR pad clock: %d\n",
                        err);
-               goto unregister;
+               goto uninit;
+       }
+
+       err = __host1x_client_register(&sor->client);
+       if (err < 0) {
+               dev_err(&pdev->dev, "failed to register host1x client: %d\n",
+                       err);
+               goto uninit;
        }
 
        return 0;
 
-unregister:
-       host1x_client_unregister(&sor->client);
-rpm_disable:
+uninit:
+       host1x_client_exit(&sor->client);
        pm_runtime_disable(&pdev->dev);
 remove:
+       if (sor->aux)
+               sor->output.ddc = NULL;
+
        tegra_output_remove(&sor->output);
+put_aux:
+       if (sor->aux)
+               put_device(sor->aux->dev);
+
        return err;
 }
 
@@ -3983,6 +3992,11 @@ static int tegra_sor_remove(struct platform_device *pdev)
 
        pm_runtime_disable(&pdev->dev);
 
+       if (sor->aux) {
+               put_device(sor->aux->dev);
+               sor->output.ddc = NULL;
+       }
+
        tegra_output_remove(&sor->output);
 
        return 0;
index cfd0b929239734b3a0dd998ebf70d9f1fd31fc2c..ebcffe794adb8c49230e8fe7e9a4cb770992600c 100644 (file)
@@ -1172,7 +1172,10 @@ int ttm_bo_swapout(struct ttm_buffer_object *bo, struct ttm_operation_ctx *ctx,
        if (!ttm_bo_evict_swapout_allowable(bo, ctx, &locked, NULL))
                return -EBUSY;
 
-       if (!ttm_bo_get_unless_zero(bo)) {
+       if (!bo->ttm || !ttm_tt_is_populated(bo->ttm) ||
+           bo->ttm->page_flags & TTM_PAGE_FLAG_SG ||
+           bo->ttm->page_flags & TTM_PAGE_FLAG_SWAPPED ||
+           !ttm_bo_get_unless_zero(bo)) {
                if (locked)
                        dma_resv_unlock(bo->base.resv);
                return -EBUSY;
index 510e3e001dabe02c196c09d2cc10b0843daa9d7e..3d9c62b93e2990d941aba47fe3d8b80d1e6775d5 100644 (file)
@@ -143,14 +143,8 @@ int ttm_device_swapout(struct ttm_device *bdev, struct ttm_operation_ctx *ctx,
 
                for (j = 0; j < TTM_MAX_BO_PRIORITY; ++j) {
                        list_for_each_entry(bo, &man->lru[j], lru) {
-                               uint32_t num_pages;
+                               uint32_t num_pages = PFN_UP(bo->base.size);
 
-                               if (!bo->ttm ||
-                                   bo->ttm->page_flags & TTM_PAGE_FLAG_SG ||
-                                   bo->ttm->page_flags & TTM_PAGE_FLAG_SWAPPED)
-                                       continue;
-
-                               num_pages = bo->ttm->num_pages;
                                ret = ttm_bo_swapout(bo, ctx, gfp_flags);
                                /* ttm_bo_swapout has dropped the lru_lock */
                                if (!ret)
index bb5529a7a9c28bea238222189598579dea865597..948b3a58aad16dff28ad371b613bfb0cea312ff1 100644 (file)
@@ -372,7 +372,7 @@ static void vc4_atomic_commit_tail(struct drm_atomic_state *state)
                if (!old_hvs_state->fifo_state[channel].in_use)
                        continue;
 
-               ret = drm_crtc_commit_wait(old_hvs_state->fifo_state[i].pending_commit);
+               ret = drm_crtc_commit_wait(old_hvs_state->fifo_state[channel].pending_commit);
                if (ret)
                        drm_err(dev, "Timed out waiting for commit\n");
        }
index bd5b8eb58b180696b217f914d550af1419a2978d..090529d0d5dcda1995eb67a50ceba7f798abcc7a 100644 (file)
@@ -197,12 +197,6 @@ struct vc4_vec_connector {
        struct drm_encoder *encoder;
 };
 
-static inline struct vc4_vec_connector *
-to_vc4_vec_connector(struct drm_connector *connector)
-{
-       return container_of(connector, struct vc4_vec_connector, base);
-}
-
 enum vc4_vec_tv_mode_id {
        VC4_VEC_TV_MODE_NTSC,
        VC4_VEC_TV_MODE_NTSC_J,
index 46f69c532b6b7add70b9f9f485fb442f66483cf0..218e3718fd68c6ebc31663dba988b3755301f6c2 100644 (file)
@@ -735,6 +735,29 @@ void host1x_driver_unregister(struct host1x_driver *driver)
 }
 EXPORT_SYMBOL(host1x_driver_unregister);
 
+/**
+ * __host1x_client_init() - initialize a host1x client
+ * @client: host1x client
+ * @key: lock class key for the client-specific mutex
+ */
+void __host1x_client_init(struct host1x_client *client, struct lock_class_key *key)
+{
+       INIT_LIST_HEAD(&client->list);
+       __mutex_init(&client->lock, "host1x client lock", key);
+       client->usecount = 0;
+}
+EXPORT_SYMBOL(__host1x_client_init);
+
+/**
+ * host1x_client_exit() - uninitialize a host1x client
+ * @client: host1x client
+ */
+void host1x_client_exit(struct host1x_client *client)
+{
+       mutex_destroy(&client->lock);
+}
+EXPORT_SYMBOL(host1x_client_exit);
+
 /**
  * __host1x_client_register() - register a host1x client
  * @client: host1x client
@@ -747,16 +770,11 @@ EXPORT_SYMBOL(host1x_driver_unregister);
  * device and call host1x_device_init(), which will in turn call each client's
  * &host1x_client_ops.init implementation.
  */
-int __host1x_client_register(struct host1x_client *client,
-                            struct lock_class_key *key)
+int __host1x_client_register(struct host1x_client *client)
 {
        struct host1x *host1x;
        int err;
 
-       INIT_LIST_HEAD(&client->list);
-       __mutex_init(&client->lock, "host1x client lock", key);
-       client->usecount = 0;
-
        mutex_lock(&devices_lock);
 
        list_for_each_entry(host1x, &devices, list) {
index 4bf263c2d61a4613d48add35fc96f42a1e66c02b..160554903ef96ad7230a9e5017ae2b8f4feb4e80 100644 (file)
@@ -93,11 +93,11 @@ menu "Special HID drivers"
        depends on HID
 
 config HID_A4TECH
-       tristate "A4 tech mice"
+       tristate "A4TECH mice"
        depends on HID
        default !EXPERT
        help
-       Support for A4 tech X5 and WOP-35 / Trust 450L mice.
+       Support for some A4TECH mice with two scroll wheels.
 
 config HID_ACCUTOUCH
        tristate "Accutouch touch device"
@@ -922,6 +922,21 @@ config HID_SAMSUNG
        help
        Support for Samsung InfraRed remote control or keyboards.
 
+config HID_SEMITEK
+       tristate "Semitek USB keyboards"
+       depends on HID
+       help
+       Support for Semitek USB keyboards that are not fully compliant
+       with the HID standard.
+
+       There are many variants, including:
+       - GK61, GK64, GK68, GK84, GK96, etc.
+       - SK61, SK64, SK68, SK84, SK96, etc.
+       - Dierya DK61/DK66
+       - Tronsmart TK09R
+       - Woo-dy
+       - X-Bows Nature/Knight
+
 config HID_SONY
        tristate "Sony PS2/3/4 accessories"
        depends on USB_HID
index 193431ec4db84c1bcbd5ce3ee77aee53ef1a8110..1ea1a7c0b20fe76eb0119b734dee84ee4e59408f 100644 (file)
@@ -106,6 +106,7 @@ obj-$(CONFIG_HID_ROCCAT)    += hid-roccat.o hid-roccat-common.o \
 obj-$(CONFIG_HID_RMI)          += hid-rmi.o
 obj-$(CONFIG_HID_SAITEK)       += hid-saitek.o
 obj-$(CONFIG_HID_SAMSUNG)      += hid-samsung.o
+obj-$(CONFIG_HID_SEMITEK)      += hid-semitek.o
 obj-$(CONFIG_HID_SMARTJOYPLUS) += hid-sjoy.o
 obj-$(CONFIG_HID_SONY)         += hid-sony.o
 obj-$(CONFIG_HID_SPEEDLINK)    += hid-speedlink.o
index 2ab38b715347717142c9805a902869c25fc10cf6..3589d9945da1cb1b5b233ca4fedae504c8ef1d2b 100644 (file)
@@ -88,6 +88,7 @@ static void amd_sfh_work(struct work_struct *work)
        sensor_index = req_node->sensor_idx;
        report_id = req_node->report_id;
        node_type = req_node->report_type;
+       kfree(req_node);
 
        if (node_type == HID_FEATURE_REPORT) {
                report_size = get_feature_report(sensor_index, report_id,
@@ -142,7 +143,7 @@ int amd_sfh_hid_client_init(struct amd_mp2_dev *privdata)
        int rc, i;
 
        dev = &privdata->pdev->dev;
-       cl_data = kzalloc(sizeof(*cl_data), GFP_KERNEL);
+       cl_data = devm_kzalloc(dev, sizeof(*cl_data), GFP_KERNEL);
        if (!cl_data)
                return -ENOMEM;
 
@@ -175,12 +176,12 @@ int amd_sfh_hid_client_init(struct amd_mp2_dev *privdata)
                        rc = -EINVAL;
                        goto cleanup;
                }
-               cl_data->feature_report[i] = kzalloc(feature_report_size, GFP_KERNEL);
+               cl_data->feature_report[i] = devm_kzalloc(dev, feature_report_size, GFP_KERNEL);
                if (!cl_data->feature_report[i]) {
                        rc = -ENOMEM;
                        goto cleanup;
                }
-               cl_data->input_report[i] = kzalloc(input_report_size, GFP_KERNEL);
+               cl_data->input_report[i] = devm_kzalloc(dev, input_report_size, GFP_KERNEL);
                if (!cl_data->input_report[i]) {
                        rc = -ENOMEM;
                        goto cleanup;
@@ -189,7 +190,8 @@ int amd_sfh_hid_client_init(struct amd_mp2_dev *privdata)
                info.sensor_idx = cl_idx;
                info.dma_address = cl_data->sensor_dma_addr[i];
 
-               cl_data->report_descr[i] = kzalloc(cl_data->report_descr_sz[i], GFP_KERNEL);
+               cl_data->report_descr[i] =
+                       devm_kzalloc(dev, cl_data->report_descr_sz[i], GFP_KERNEL);
                if (!cl_data->report_descr[i]) {
                        rc = -ENOMEM;
                        goto cleanup;
@@ -214,11 +216,11 @@ cleanup:
                                          cl_data->sensor_virt_addr[i],
                                          cl_data->sensor_dma_addr[i]);
                }
-               kfree(cl_data->feature_report[i]);
-               kfree(cl_data->input_report[i]);
-               kfree(cl_data->report_descr[i]);
+               devm_kfree(dev, cl_data->feature_report[i]);
+               devm_kfree(dev, cl_data->input_report[i]);
+               devm_kfree(dev, cl_data->report_descr[i]);
        }
-       kfree(cl_data);
+       devm_kfree(dev, cl_data);
        return rc;
 }
 
@@ -241,6 +243,5 @@ int amd_sfh_hid_client_deinit(struct amd_mp2_dev *privdata)
                                          cl_data->sensor_dma_addr[i]);
                }
        }
-       kfree(cl_data);
        return 0;
 }
index 4f989483aa03d88f0afdbbe10ed1daea39f23cdf..5ad1e7acd294e656068b6a3e2cc11a78b15c38ea 100644 (file)
@@ -162,9 +162,6 @@ void amdtp_hid_remove(struct amdtp_cl_data *cli_data)
        int i;
 
        for (i = 0; i < cli_data->num_hid_devices; ++i) {
-               kfree(cli_data->feature_report[i]);
-               kfree(cli_data->input_report[i]);
-               kfree(cli_data->report_descr[i]);
                if (cli_data->hid_sensor_hubs[i]) {
                        kfree(cli_data->hid_sensor_hubs[i]->driver_data);
                        hid_destroy_device(cli_data->hid_sensor_hubs[i]);
index 3a8c4a5971f70d51e7c21f0ecf2c4417c102bd80..2cbc32dda7f74576a30249db05aa3660dae5035e 100644 (file)
@@ -147,6 +147,8 @@ static const struct hid_device_id a4_devices[] = {
                .driver_data = A4_2WHEEL_MOUSE_HACK_B8 },
        { HID_USB_DEVICE(USB_VENDOR_ID_A4TECH, USB_DEVICE_ID_A4TECH_RP_649),
                .driver_data = A4_2WHEEL_MOUSE_HACK_B8 },
+       { HID_USB_DEVICE(USB_VENDOR_ID_A4TECH, USB_DEVICE_ID_A4TECH_NB_95),
+               .driver_data = A4_2WHEEL_MOUSE_HACK_B8 },
        { }
 };
 MODULE_DEVICE_TABLE(hid, a4_devices);
index 2ab22b92594182816883da368b7c3209b81aba56..fca8fc78a78a310449601c9ed4c7aec66477cd47 100644 (file)
@@ -79,10 +79,9 @@ MODULE_DESCRIPTION("Asus HID Keyboard and TouchPad");
 #define QUIRK_T100_KEYBOARD            BIT(6)
 #define QUIRK_T100CHI                  BIT(7)
 #define QUIRK_G752_KEYBOARD            BIT(8)
-#define QUIRK_T101HA_DOCK              BIT(9)
-#define QUIRK_T90CHI                   BIT(10)
-#define QUIRK_MEDION_E1239T            BIT(11)
-#define QUIRK_ROG_NKEY_KEYBOARD                BIT(12)
+#define QUIRK_T90CHI                   BIT(9)
+#define QUIRK_MEDION_E1239T            BIT(10)
+#define QUIRK_ROG_NKEY_KEYBOARD                BIT(11)
 
 #define I2C_KEYBOARD_QUIRKS                    (QUIRK_FIX_NOTEBOOK_REPORT | \
                                                 QUIRK_NO_INIT_REPORTS | \
@@ -335,7 +334,7 @@ static int asus_raw_event(struct hid_device *hdev,
        if (drvdata->quirks & QUIRK_MEDION_E1239T)
                return asus_e1239t_event(drvdata, data, size);
 
-       if (drvdata->quirks & QUIRK_ROG_NKEY_KEYBOARD) {
+       if (drvdata->quirks & QUIRK_USE_KBD_BACKLIGHT) {
                /*
                 * Skip these report ID, the device emits a continuous stream associated
                 * with the AURA mode it is in which looks like an 'echo'.
@@ -355,6 +354,16 @@ static int asus_raw_event(struct hid_device *hdev,
                                return -1;
                        }
                }
+               if (drvdata->quirks & QUIRK_ROG_NKEY_KEYBOARD) {
+                       /*
+                        * G713 and G733 send these codes on some keypresses, depending on
+                        * the key pressed it can trigger a shutdown event if not caught.
+                       */
+                       if(data[0] == 0x02 && data[1] == 0x30) {
+                               return -1;
+                       }
+               }
+
        }
 
        return 0;
@@ -1072,11 +1081,6 @@ static int asus_probe(struct hid_device *hdev, const struct hid_device_id *id)
                return ret;
        }
 
-       /* use hid-multitouch for T101HA touchpad */
-       if (id->driver_data & QUIRK_T101HA_DOCK &&
-           hdev->collection->usage == HID_GD_MOUSE)
-               return -ENODEV;
-
        ret = hid_hw_start(hdev, HID_CONNECT_DEFAULT);
        if (ret) {
                hid_err(hdev, "Asus hw start failed: %d\n", ret);
@@ -1230,8 +1234,6 @@ static const struct hid_device_id asus_devices[] = {
        { HID_USB_DEVICE(USB_VENDOR_ID_ASUSTEK,
                USB_DEVICE_ID_ASUSTEK_T100TAF_KEYBOARD),
          QUIRK_T100_KEYBOARD | QUIRK_NO_CONSUMER_USAGES },
-       { HID_USB_DEVICE(USB_VENDOR_ID_ASUSTEK,
-               USB_DEVICE_ID_ASUSTEK_T101HA_KEYBOARD), QUIRK_T101HA_DOCK },
        { HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_ASUS_AK1D) },
        { HID_USB_DEVICE(USB_VENDOR_ID_TURBOX, USB_DEVICE_ID_ASUS_MD_5110) },
        { HID_USB_DEVICE(USB_VENDOR_ID_JESS, USB_DEVICE_ID_ASUS_MD_5112) },
@@ -1239,6 +1241,12 @@ static const struct hid_device_id asus_devices[] = {
                USB_DEVICE_ID_ASUSTEK_T100CHI_KEYBOARD), QUIRK_T100CHI },
        { HID_USB_DEVICE(USB_VENDOR_ID_ITE, USB_DEVICE_ID_ITE_MEDION_E1239T),
                QUIRK_MEDION_E1239T },
+       /*
+        * Note bind to the HID_GROUP_GENERIC group, so that we only bind to the keyboard
+        * part, while letting hid-multitouch.c handle the touchpad.
+        */
+       { HID_DEVICE(BUS_USB, HID_GROUP_GENERIC,
+               USB_VENDOR_ID_ASUSTEK, USB_DEVICE_ID_ASUSTEK_T101HA_KEYBOARD) },
        { }
 };
 MODULE_DEVICE_TABLE(hid, asus_devices);
index 0ae9f6df59d10726b56ba7e295e41edf5ef0dd92..0de2788b9814ca1941251c7836561bd37cf13ab4 100644 (file)
@@ -2005,6 +2005,9 @@ int hid_connect(struct hid_device *hdev, unsigned int connect_mask)
        case BUS_I2C:
                bus = "I2C";
                break;
+       case BUS_VIRTUAL:
+               bus = "VIRTUAL";
+               break;
        default:
                bus = "<UNKNOWN>";
        }
@@ -2588,7 +2591,6 @@ int hid_check_keys_pressed(struct hid_device *hid)
 
        return 0;
 }
-
 EXPORT_SYMBOL_GPL(hid_check_keys_pressed);
 
 static int __init hid_init(void)
index 59f8d716d78f5aad6508c8f0b6b94e885214f5ce..a311fb87b02a126009d9034edbf99df451f57b23 100644 (file)
@@ -930,6 +930,9 @@ static const char *keys[KEY_MAX + 1] = {
        [KEY_APPSELECT] = "AppSelect",
        [KEY_SCREENSAVER] = "ScreenSaver",
        [KEY_VOICECOMMAND] = "VoiceCommand",
+       [KEY_ASSISTANT] = "Assistant",
+       [KEY_KBD_LAYOUT_NEXT] = "KbdLayoutNext",
+       [KEY_EMOJI_PICKER] = "EmojiPicker",
        [KEY_BRIGHTNESS_MIN] = "BrightnessMin",
        [KEY_BRIGHTNESS_MAX] = "BrightnessMax",
        [KEY_BRIGHTNESS_AUTO] = "BrightnessAuto",
index a5751607ce24aedfa7722e21355bebf56b625c6a..f43a8406cb9a97e86e516b02535a6e80f6ad0d48 100644 (file)
@@ -201,7 +201,7 @@ struct ft260_i2c_write_request_report {
        u8 address;             /* 7-bit I2C address */
        u8 flag;                /* I2C transaction condition */
        u8 length;              /* data payload length */
-       u8 data[60];            /* data payload */
+       u8 data[FT260_WR_DATA_MAX]; /* data payload */
 } __packed;
 
 struct ft260_i2c_read_request_report {
@@ -249,7 +249,10 @@ static int ft260_hid_feature_report_get(struct hid_device *hdev,
 
        ret = hid_hw_raw_request(hdev, report_id, buf, len, HID_FEATURE_REPORT,
                                 HID_REQ_GET_REPORT);
-       memcpy(data, buf, len);
+       if (likely(ret == len))
+               memcpy(data, buf, len);
+       else if (ret >= 0)
+               ret = -EIO;
        kfree(buf);
        return ret;
 }
@@ -298,7 +301,7 @@ static int ft260_xfer_status(struct ft260_device *dev)
 
        ret = ft260_hid_feature_report_get(hdev, FT260_I2C_STATUS,
                                           (u8 *)&report, sizeof(report));
-       if (ret < 0) {
+       if (unlikely(ret < 0)) {
                hid_err(hdev, "failed to retrieve status: %d\n", ret);
                return ret;
        }
@@ -429,6 +432,9 @@ static int ft260_smbus_write(struct ft260_device *dev, u8 addr, u8 cmd,
        struct ft260_i2c_write_request_report *rep =
                (struct ft260_i2c_write_request_report *)dev->write_buf;
 
+       if (data_len >= sizeof(rep->data))
+               return -EINVAL;
+
        rep->address = addr;
        rep->data[0] = cmd;
        rep->length = data_len + 1;
@@ -721,10 +727,9 @@ static int ft260_get_system_config(struct hid_device *hdev,
 
        ret = ft260_hid_feature_report_get(hdev, FT260_SYSTEM_SETTINGS,
                                           (u8 *)cfg, len);
-       if (ret != len) {
+       if (ret < 0) {
                hid_err(hdev, "failed to retrieve system status\n");
-               if (ret >= 0)
-                       return -EIO;
+               return ret;
        }
        return 0;
 }
@@ -777,8 +782,8 @@ static int ft260_byte_show(struct hid_device *hdev, int id, u8 *cfg, int len,
        int ret;
 
        ret = ft260_hid_feature_report_get(hdev, id, cfg, len);
-       if (ret != len && ret >= 0)
-               return -EIO;
+       if (ret < 0)
+               return ret;
 
        return scnprintf(buf, PAGE_SIZE, "%hi\n", *field);
 }
@@ -789,8 +794,8 @@ static int ft260_word_show(struct hid_device *hdev, int id, u8 *cfg, int len,
        int ret;
 
        ret = ft260_hid_feature_report_get(hdev, id, cfg, len);
-       if (ret != len && ret >= 0)
-               return -EIO;
+       if (ret < 0)
+               return ret;
 
        return scnprintf(buf, PAGE_SIZE, "%hi\n", le16_to_cpu(*field));
 }
@@ -941,10 +946,8 @@ static int ft260_probe(struct hid_device *hdev, const struct hid_device_id *id)
 
        ret = ft260_hid_feature_report_get(hdev, FT260_CHIP_VERSION,
                                           (u8 *)&version, sizeof(version));
-       if (ret != sizeof(version)) {
+       if (ret < 0) {
                hid_err(hdev, "failed to retrieve chip version\n");
-               if (ret >= 0)
-                       ret = -EIO;
                goto err_hid_close;
        }
 
index 898871c8c768e51d5c3d5bb54dd75aa1e42d5d59..29ccb0accfba81a7383aca22a261c2d5e5ab84ad 100644 (file)
@@ -54,6 +54,7 @@ static const struct hid_device_id gt683r_led_id[] = {
        { HID_USB_DEVICE(USB_VENDOR_ID_MSI, USB_DEVICE_ID_MSI_GT683R_LED_PANEL) },
        { }
 };
+MODULE_DEVICE_TABLE(hid, gt683r_led_id);
 
 static void gt683r_brightness_set(struct led_classdev *led_cdev,
                                enum led_brightness brightness)
index 84b8da3e7d09a71de6c782a5acce727224ba26e3..b84a0a11e05bf57601b09927c035fcd45d14e7aa 100644 (file)
@@ -26,6 +26,7 @@
 #define USB_DEVICE_ID_A4TECH_WCP32PU   0x0006
 #define USB_DEVICE_ID_A4TECH_X5_005D   0x000a
 #define USB_DEVICE_ID_A4TECH_RP_649    0x001a
+#define USB_DEVICE_ID_A4TECH_NB_95     0x022b
 
 #define USB_VENDOR_ID_AASHIMA          0x06d6
 #define USB_DEVICE_ID_AASHIMA_GAMEPAD  0x0025
 
 #define USB_VENDOR_ID_CORSAIR          0x1b1c
 #define USB_DEVICE_ID_CORSAIR_K90      0x1b02
-
-#define USB_VENDOR_ID_CORSAIR           0x1b1c
 #define USB_DEVICE_ID_CORSAIR_K70R      0x1b09
 #define USB_DEVICE_ID_CORSAIR_K95RGB    0x1b11
 #define USB_DEVICE_ID_CORSAIR_M65RGB    0x1b12
 #define USB_DEVICE_ID_LENOVO_X1_COVER  0x6085
 #define USB_DEVICE_ID_LENOVO_X1_TAB    0x60a3
 #define USB_DEVICE_ID_LENOVO_X1_TAB3   0x60b5
+#define USB_DEVICE_ID_LENOVO_OPTICAL_USB_MOUSE_600E    0x600e
 #define USB_DEVICE_ID_LENOVO_PIXART_USB_MOUSE_608D     0x608d
 #define USB_DEVICE_ID_LENOVO_PIXART_USB_MOUSE_6019     0x6019
 #define USB_DEVICE_ID_LENOVO_PIXART_USB_MOUSE_602E     0x602e
 #define USB_DEVICE_ID_SAITEK_X52       0x075c
 #define USB_DEVICE_ID_SAITEK_X52_2     0x0255
 #define USB_DEVICE_ID_SAITEK_X52_PRO   0x0762
+#define USB_DEVICE_ID_SAITEK_X65       0x0b6a
 
 #define USB_VENDOR_ID_SAMSUNG          0x0419
 #define USB_DEVICE_ID_SAMSUNG_IR_REMOTE        0x0001
 #define USB_DEVICE_ID_SEMICO_USB_KEYKOARD      0x0023
 #define USB_DEVICE_ID_SEMICO_USB_KEYKOARD2     0x0027
 
+#define USB_VENDOR_ID_SEMITEK  0x1ea7
+#define USB_DEVICE_ID_SEMITEK_KEYBOARD 0x0907
+
 #define USB_VENDOR_ID_SENNHEISER       0x1395
 #define USB_DEVICE_ID_SENNHEISER_BTD500USB     0x002c
 
 #define USB_DEVICE_ID_SYNAPTICS_DELL_K12A      0x2819
 #define USB_DEVICE_ID_SYNAPTICS_ACER_SWITCH5_012       0x2968
 #define USB_DEVICE_ID_SYNAPTICS_TP_V103        0x5710
+#define USB_DEVICE_ID_SYNAPTICS_DELL_K15A      0x6e21
 #define USB_DEVICE_ID_SYNAPTICS_ACER_ONE_S1002 0x73f4
 #define USB_DEVICE_ID_SYNAPTICS_ACER_ONE_S1003 0x73f5
 #define USB_DEVICE_ID_SYNAPTICS_ACER_SWITCH5   0x81a7
index 18f5e28d475cd52d5a411418e722f2936cad3f78..abbfa91e73e4307142e184969541f567b991f289 100644 (file)
@@ -964,6 +964,9 @@ static void hidinput_configure_usage(struct hid_input *hidinput, struct hid_fiel
 
                case 0x0cd: map_key_clear(KEY_PLAYPAUSE);       break;
                case 0x0cf: map_key_clear(KEY_VOICECOMMAND);    break;
+
+               case 0x0d9: map_key_clear(KEY_EMOJI_PICKER);    break;
+
                case 0x0e0: map_abs_clear(ABS_VOLUME);          break;
                case 0x0e2: map_key_clear(KEY_MUTE);            break;
                case 0x0e5: map_key_clear(KEY_BASSBOOST);       break;
index d598094dadd0c52db909bbd44b2097ad53207bab..fee4e54a3ce083535656e6558c2f7dcb1e24f9e7 100644 (file)
@@ -1263,6 +1263,7 @@ static int hidpp20_battery_map_status_voltage(u8 data[3], int *voltage,
        int status;
 
        long flags = (long) data[2];
+       *level = POWER_SUPPLY_CAPACITY_LEVEL_UNKNOWN;
 
        if (flags & 0x80)
                switch (flags & 0x07) {
index 2bb473d8c424e0c190b05665e7581fe034b4ff42..8bcaee4ccae0325a2c114d1811a5653d81169147 100644 (file)
@@ -693,7 +693,7 @@ static int magicmouse_probe(struct hid_device *hdev,
        if (id->vendor == USB_VENDOR_ID_APPLE &&
            id->product == USB_DEVICE_ID_APPLE_MAGICTRACKPAD2 &&
            hdev->type != HID_TYPE_USBMOUSE)
-               return 0;
+               return -ENODEV;
 
        msc = devm_kzalloc(&hdev->dev, sizeof(*msc), GFP_KERNEL);
        if (msc == NULL) {
@@ -779,7 +779,10 @@ err_stop_hw:
 static void magicmouse_remove(struct hid_device *hdev)
 {
        struct magicmouse_sc *msc = hid_get_drvdata(hdev);
-       cancel_delayed_work_sync(&msc->work);
+
+       if (msc)
+               cancel_delayed_work_sync(&msc->work);
+
        hid_hw_stop(hdev);
 }
 
index 9d9f3e1bd5f418bb61ccb50eabf6e0b1711b2bb2..2e4fb76c45f3d2fe24f9f73eb3244aee618b3088 100644 (file)
@@ -70,6 +70,7 @@ MODULE_LICENSE("GPL");
 #define MT_QUIRK_WIN8_PTP_BUTTONS      BIT(18)
 #define MT_QUIRK_SEPARATE_APP_REPORT   BIT(19)
 #define MT_QUIRK_FORCE_MULTI_INPUT     BIT(20)
+#define MT_QUIRK_DISABLE_WAKEUP                BIT(21)
 
 #define MT_INPUTMODE_TOUCHSCREEN       0x02
 #define MT_INPUTMODE_TOUCHPAD          0x03
@@ -191,6 +192,7 @@ static void mt_post_parse(struct mt_device *td, struct mt_application *app);
 #define MT_CLS_EXPORT_ALL_INPUTS               0x0013
 /* reserved                                    0x0014 */
 #define MT_CLS_WIN_8_FORCE_MULTI_INPUT         0x0015
+#define MT_CLS_WIN_8_DISABLE_WAKEUP            0x0016
 
 /* vendor specific classes */
 #define MT_CLS_3M                              0x0101
@@ -283,6 +285,15 @@ static const struct mt_class mt_classes[] = {
                        MT_QUIRK_WIN8_PTP_BUTTONS |
                        MT_QUIRK_FORCE_MULTI_INPUT,
                .export_all_inputs = true },
+       { .name = MT_CLS_WIN_8_DISABLE_WAKEUP,
+               .quirks = MT_QUIRK_ALWAYS_VALID |
+                       MT_QUIRK_IGNORE_DUPLICATES |
+                       MT_QUIRK_HOVERING |
+                       MT_QUIRK_CONTACT_CNT_ACCURATE |
+                       MT_QUIRK_STICKY_FINGERS |
+                       MT_QUIRK_WIN8_PTP_BUTTONS |
+                       MT_QUIRK_DISABLE_WAKEUP,
+               .export_all_inputs = true },
 
        /*
         * vendor specific classes
@@ -604,9 +615,13 @@ static struct mt_report_data *mt_allocate_report_data(struct mt_device *td,
                if (!(HID_MAIN_ITEM_VARIABLE & field->flags))
                        continue;
 
-               for (n = 0; n < field->report_count; n++) {
-                       if (field->usage[n].hid == HID_DG_CONTACTID)
-                               rdata->is_mt_collection = true;
+               if (field->logical == HID_DG_FINGER || td->hdev->group != HID_GROUP_MULTITOUCH_WIN_8) {
+                       for (n = 0; n < field->report_count; n++) {
+                               if (field->usage[n].hid == HID_DG_CONTACTID) {
+                                       rdata->is_mt_collection = true;
+                                       break;
+                               }
+                       }
                }
        }
 
@@ -759,7 +774,8 @@ static int mt_touch_input_mapping(struct hid_device *hdev, struct hid_input *hi,
                        return 1;
                case HID_DG_CONFIDENCE:
                        if ((cls->name == MT_CLS_WIN_8 ||
-                            cls->name == MT_CLS_WIN_8_FORCE_MULTI_INPUT) &&
+                            cls->name == MT_CLS_WIN_8_FORCE_MULTI_INPUT ||
+                            cls->name == MT_CLS_WIN_8_DISABLE_WAKEUP) &&
                                (field->application == HID_DG_TOUCHPAD ||
                                 field->application == HID_DG_TOUCHSCREEN))
                                app->quirks |= MT_QUIRK_CONFIDENCE;
@@ -1576,13 +1592,13 @@ static int mt_input_configured(struct hid_device *hdev, struct hid_input *hi)
                /* we do not set suffix = "Touchscreen" */
                hi->input->name = hdev->name;
                break;
-       case HID_DG_STYLUS:
-               /* force BTN_STYLUS to allow tablet matching in udev */
-               __set_bit(BTN_STYLUS, hi->input->keybit);
-               break;
        case HID_VD_ASUS_CUSTOM_MEDIA_KEYS:
                suffix = "Custom Media Keys";
                break;
+       case HID_DG_STYLUS:
+               /* force BTN_STYLUS to allow tablet matching in udev */
+               __set_bit(BTN_STYLUS, hi->input->keybit);
+               fallthrough;
        case HID_DG_PEN:
                suffix = "Stylus";
                break;
@@ -1749,8 +1765,14 @@ static int mt_probe(struct hid_device *hdev, const struct hid_device_id *id)
 #ifdef CONFIG_PM
 static int mt_suspend(struct hid_device *hdev, pm_message_t state)
 {
+       struct mt_device *td = hid_get_drvdata(hdev);
+
        /* High latency is desirable for power savings during S3/S0ix */
-       mt_set_modes(hdev, HID_LATENCY_HIGH, true, true);
+       if (td->mtclass.quirks & MT_QUIRK_DISABLE_WAKEUP)
+               mt_set_modes(hdev, HID_LATENCY_HIGH, false, false);
+       else
+               mt_set_modes(hdev, HID_LATENCY_HIGH, true, true);
+
        return 0;
 }
 
@@ -1809,6 +1831,12 @@ static const struct hid_device_id mt_devices[] = {
                MT_USB_DEVICE(USB_VENDOR_ID_ANTON,
                        USB_DEVICE_ID_ANTON_TOUCH_PAD) },
 
+       /* Asus T101HA */
+       { .driver_data = MT_CLS_WIN_8_DISABLE_WAKEUP,
+               HID_DEVICE(BUS_USB, HID_GROUP_MULTITOUCH_WIN_8,
+                          USB_VENDOR_ID_ASUSTEK,
+                          USB_DEVICE_ID_ASUSTEK_T101HA_KEYBOARD) },
+
        /* Asus T304UA */
        { .driver_data = MT_CLS_ASUS,
                HID_DEVICE(BUS_USB, HID_GROUP_MULTITOUCH_WIN_8,
index 3dd6f15f2a67f080f223c97d5eb7c822212a20ed..51b39bda9a9d2d1a9aa0cc6a0e2d3d8a61e5c247 100644 (file)
@@ -110,6 +110,7 @@ static const struct hid_device_id hid_quirks[] = {
        { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_PENSKETCH_M912), HID_QUIRK_MULTI_INPUT },
        { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_EASYPEN_M406XE), HID_QUIRK_MULTI_INPUT },
        { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_PIXART_USB_OPTICAL_MOUSE_ID2), HID_QUIRK_ALWAYS_POLL },
+       { HID_USB_DEVICE(USB_VENDOR_ID_LENOVO, USB_DEVICE_ID_LENOVO_OPTICAL_USB_MOUSE_600E), HID_QUIRK_ALWAYS_POLL },
        { HID_USB_DEVICE(USB_VENDOR_ID_LENOVO, USB_DEVICE_ID_LENOVO_PIXART_USB_MOUSE_608D), HID_QUIRK_ALWAYS_POLL },
        { HID_USB_DEVICE(USB_VENDOR_ID_LENOVO, USB_DEVICE_ID_LENOVO_PIXART_USB_MOUSE_6019), HID_QUIRK_ALWAYS_POLL },
        { HID_USB_DEVICE(USB_VENDOR_ID_LENOVO, USB_DEVICE_ID_LENOVO_PIXART_USB_MOUSE_602E), HID_QUIRK_ALWAYS_POLL },
@@ -158,6 +159,7 @@ static const struct hid_device_id hid_quirks[] = {
        { HID_USB_DEVICE(USB_VENDOR_ID_SAITEK, USB_DEVICE_ID_SAITEK_X52), HID_QUIRK_INCREMENT_USAGE_ON_DUPLICATE },
        { HID_USB_DEVICE(USB_VENDOR_ID_SAITEK, USB_DEVICE_ID_SAITEK_X52_2), HID_QUIRK_INCREMENT_USAGE_ON_DUPLICATE },
        { HID_USB_DEVICE(USB_VENDOR_ID_SAITEK, USB_DEVICE_ID_SAITEK_X52_PRO), HID_QUIRK_INCREMENT_USAGE_ON_DUPLICATE },
+       { HID_USB_DEVICE(USB_VENDOR_ID_SAITEK, USB_DEVICE_ID_SAITEK_X65), HID_QUIRK_INCREMENT_USAGE_ON_DUPLICATE },
        { HID_USB_DEVICE(USB_VENDOR_ID_SEMICO, USB_DEVICE_ID_SEMICO_USB_KEYKOARD2), HID_QUIRK_NO_INIT_REPORTS },
        { HID_USB_DEVICE(USB_VENDOR_ID_SEMICO, USB_DEVICE_ID_SEMICO_USB_KEYKOARD), HID_QUIRK_NO_INIT_REPORTS },
        { HID_USB_DEVICE(USB_VENDOR_ID_SENNHEISER, USB_DEVICE_ID_SENNHEISER_BTD500USB), HID_QUIRK_NOGET },
@@ -176,6 +178,7 @@ static const struct hid_device_id hid_quirks[] = {
        { HID_USB_DEVICE(USB_VENDOR_ID_SYNAPTICS, USB_DEVICE_ID_SYNAPTICS_QUAD_HD), HID_QUIRK_NO_INIT_REPORTS },
        { HID_USB_DEVICE(USB_VENDOR_ID_SYNAPTICS, USB_DEVICE_ID_SYNAPTICS_TP_V103), HID_QUIRK_NO_INIT_REPORTS },
        { HID_USB_DEVICE(USB_VENDOR_ID_SYNAPTICS, USB_DEVICE_ID_SYNAPTICS_DELL_K12A), HID_QUIRK_NO_INIT_REPORTS },
+       { HID_USB_DEVICE(USB_VENDOR_ID_SYNAPTICS, USB_DEVICE_ID_SYNAPTICS_DELL_K15A), HID_QUIRK_NO_INIT_REPORTS },
        { HID_USB_DEVICE(USB_VENDOR_ID_TOPMAX, USB_DEVICE_ID_TOPMAX_COBRAPAD), HID_QUIRK_BADPAD },
        { HID_USB_DEVICE(USB_VENDOR_ID_TOUCHPACK, USB_DEVICE_ID_TOUCHPACK_RTS), HID_QUIRK_MULTI_INPUT },
        { HID_USB_DEVICE(USB_VENDOR_ID_TPV, USB_DEVICE_ID_TPV_OPTICAL_TOUCHSCREEN_8882), HID_QUIRK_NOGET },
@@ -211,6 +214,7 @@ static const struct hid_device_id hid_have_special_driver[] = {
        { HID_USB_DEVICE(USB_VENDOR_ID_A4TECH, USB_DEVICE_ID_A4TECH_WCP32PU) },
        { HID_USB_DEVICE(USB_VENDOR_ID_A4TECH, USB_DEVICE_ID_A4TECH_X5_005D) },
        { HID_USB_DEVICE(USB_VENDOR_ID_A4TECH, USB_DEVICE_ID_A4TECH_RP_649) },
+       { HID_USB_DEVICE(USB_VENDOR_ID_A4TECH, USB_DEVICE_ID_A4TECH_NB_95) },
 #endif
 #if IS_ENABLED(CONFIG_HID_ACCUTOUCH)
        { HID_USB_DEVICE(USB_VENDOR_ID_ELO, USB_DEVICE_ID_ELO_ACCUTOUCH_2216) },
diff --git a/drivers/hid/hid-semitek.c b/drivers/hid/hid-semitek.c
new file mode 100644 (file)
index 0000000..ba6607d
--- /dev/null
@@ -0,0 +1,40 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ *  HID driver for Semitek keyboards
+ *
+ *  Copyright (c) 2021 Benjamin Moody
+ */
+
+#include <linux/device.h>
+#include <linux/hid.h>
+#include <linux/module.h>
+
+#include "hid-ids.h"
+
+static __u8 *semitek_report_fixup(struct hid_device *hdev, __u8 *rdesc,
+                                  unsigned int *rsize)
+{
+       /* In the report descriptor for interface 2, fix the incorrect
+          description of report ID 0x04 (the report contains a
+          bitmask, not an array of keycodes.) */
+       if (*rsize == 0xcb && rdesc[0x83] == 0x81 && rdesc[0x84] == 0x00) {
+               hid_info(hdev, "fixing up Semitek report descriptor\n");
+               rdesc[0x84] = 0x02;
+       }
+       return rdesc;
+}
+
+static const struct hid_device_id semitek_devices[] = {
+       { HID_USB_DEVICE(USB_VENDOR_ID_SEMITEK, USB_DEVICE_ID_SEMITEK_KEYBOARD) },
+       { }
+};
+MODULE_DEVICE_TABLE(hid, semitek_devices);
+
+static struct hid_driver semitek_driver = {
+       .name = "semitek",
+       .id_table = semitek_devices,
+       .report_fixup = semitek_report_fixup,
+};
+module_hid_driver(semitek_driver);
+
+MODULE_LICENSE("GPL");
index 2e6662173a79c0b6f2b6a82496248c5227fce7cc..32c2306e240d605944b1d0d2fd957c897f1d407a 100644 (file)
@@ -387,7 +387,7 @@ static ssize_t store_value(struct device *dev, struct device_attribute *attr,
        struct hid_sensor_custom *sensor_inst = dev_get_drvdata(dev);
        int index, field_index, usage;
        char name[HID_CUSTOM_NAME_LENGTH];
-       int value;
+       int value, ret;
 
        if (sscanf(attr->attr.name, "feature-%x-%x-%s", &index, &usage,
                   name) == 3) {
@@ -403,8 +403,10 @@ static ssize_t store_value(struct device *dev, struct device_attribute *attr,
 
                report_id = sensor_inst->fields[field_index].attribute.
                                                                report_id;
-               sensor_hub_set_feature(sensor_inst->hsdev, report_id,
-                                      index, sizeof(value), &value);
+               ret = sensor_hub_set_feature(sensor_inst->hsdev, report_id,
+                                            index, sizeof(value), &value);
+               if (ret)
+                       return ret;
        } else
                return -EINVAL;
 
index 95cf88f3bafb9c6c00d849529aeed5c0995abd31..6abd3e2a9094c248ebbf0057124b003f4e96e7de 100644 (file)
@@ -209,16 +209,21 @@ int sensor_hub_set_feature(struct hid_sensor_hub_device *hsdev, u32 report_id,
        buffer_size = buffer_size / sizeof(__s32);
        if (buffer_size) {
                for (i = 0; i < buffer_size; ++i) {
-                       hid_set_field(report->field[field_index], i,
-                                     (__force __s32)cpu_to_le32(*buf32));
+                       ret = hid_set_field(report->field[field_index], i,
+                                           (__force __s32)cpu_to_le32(*buf32));
+                       if (ret)
+                               goto done_proc;
+
                        ++buf32;
                }
        }
        if (remaining_bytes) {
                value = 0;
                memcpy(&value, (u8 *)buf32, remaining_bytes);
-               hid_set_field(report->field[field_index], i,
-                             (__force __s32)cpu_to_le32(value));
+               ret = hid_set_field(report->field[field_index], i,
+                                   (__force __s32)cpu_to_le32(value));
+               if (ret)
+                       goto done_proc;
        }
        hid_hw_request(hsdev->hdev, report, HID_REQ_SET_REPORT);
        hid_hw_wait(hsdev->hdev);
index 2e452c6e8ef402216159a28eb80b8a842fec371d..f643b1cb112d591dbf2f9f0bfdf232760a75698c 100644 (file)
@@ -312,7 +312,7 @@ static int thrustmaster_probe(struct hid_device *hdev, const struct hid_device_i
        }
 
        tm_wheel->change_request = kzalloc(sizeof(struct usb_ctrlrequest), GFP_KERNEL);
-       if (!tm_wheel->model_request) {
+       if (!tm_wheel->change_request) {
                ret = -ENOMEM;
                goto error5;
        }
index 9993133989a58c032d1318b6d3356462dd788f66..46474612e73c66fbf924fef484ac3c2f31953e36 100644 (file)
@@ -45,6 +45,7 @@
 #define I2C_HID_QUIRK_BOGUS_IRQ                        BIT(4)
 #define I2C_HID_QUIRK_RESET_ON_RESUME          BIT(5)
 #define I2C_HID_QUIRK_BAD_INPUT_SIZE           BIT(6)
+#define I2C_HID_QUIRK_NO_WAKEUP_AFTER_RESET    BIT(7)
 
 
 /* flags */
@@ -178,6 +179,11 @@ static const struct i2c_hid_quirks {
                 I2C_HID_QUIRK_RESET_ON_RESUME },
        { USB_VENDOR_ID_ITE, I2C_DEVICE_ID_ITE_LENOVO_LEGION_Y720,
                I2C_HID_QUIRK_BAD_INPUT_SIZE },
+       /*
+        * Sending the wakeup after reset actually break ELAN touchscreen controller
+        */
+       { USB_VENDOR_ID_ELAN, HID_ANY_ID,
+                I2C_HID_QUIRK_NO_WAKEUP_AFTER_RESET },
        { 0, 0 }
 };
 
@@ -461,7 +467,8 @@ static int i2c_hid_hwreset(struct i2c_client *client)
        }
 
        /* At least some SIS devices need this after reset */
-       ret = i2c_hid_set_power(client, I2C_HID_PWR_ON);
+       if (!(ihid->quirks & I2C_HID_QUIRK_NO_WAKEUP_AFTER_RESET))
+               ret = i2c_hid_set_power(client, I2C_HID_PWR_ON);
 
 out_unlock:
        mutex_unlock(&ihid->reset_lock);
@@ -990,8 +997,8 @@ int i2c_hid_core_probe(struct i2c_client *client, struct i2chid_ops *ops,
        hid->vendor = le16_to_cpu(ihid->hdesc.wVendorID);
        hid->product = le16_to_cpu(ihid->hdesc.wProductID);
 
-       snprintf(hid->name, sizeof(hid->name), "%s %04hX:%04hX",
-                client->name, hid->vendor, hid->product);
+       snprintf(hid->name, sizeof(hid->name), "%s %04X:%04X",
+                client->name, (u16)hid->vendor, (u16)hid->product);
        strlcpy(hid->phys, dev_name(&client->dev), sizeof(hid->phys));
 
        ihid->quirks = i2c_hid_lookup_quirk(hid->vendor, hid->product);
index 21b87e4003afcacfeed0bf49cbef4a19dff61c94..07e3cbc86bef1076796a905938cc9b27e57714ad 100644 (file)
@@ -28,6 +28,8 @@
 #define EHL_Ax_DEVICE_ID       0x4BB3
 #define TGL_LP_DEVICE_ID       0xA0FC
 #define TGL_H_DEVICE_ID                0x43FC
+#define ADL_S_DEVICE_ID                0x7AF8
+#define ADL_P_DEVICE_ID                0x51FC
 
 #define        REVISION_ID_CHT_A0      0x6
 #define        REVISION_ID_CHT_Ax_SI   0x0
index 06081cf9b85a22f84c94add1f0f57139a87d7fe8..a6d5173ac0030035a8b33ed1acc1b19f4ee719d6 100644 (file)
@@ -39,6 +39,8 @@ static const struct pci_device_id ish_pci_tbl[] = {
        {PCI_DEVICE(PCI_VENDOR_ID_INTEL, EHL_Ax_DEVICE_ID)},
        {PCI_DEVICE(PCI_VENDOR_ID_INTEL, TGL_LP_DEVICE_ID)},
        {PCI_DEVICE(PCI_VENDOR_ID_INTEL, TGL_H_DEVICE_ID)},
+       {PCI_DEVICE(PCI_VENDOR_ID_INTEL, ADL_S_DEVICE_ID)},
+       {PCI_DEVICE(PCI_VENDOR_ID_INTEL, ADL_P_DEVICE_ID)},
        {0, }
 };
 MODULE_DEVICE_TABLE(pci, ish_pci_tbl);
index 7b27ec3922322bdbfbff77b2ba1c9df13c8d8531..5571e74abe91b803398ddf19a11136dd70b21781 100644 (file)
@@ -168,9 +168,9 @@ int surface_hid_device_add(struct surface_hid_device *shid)
 
        shid->hid->dev.parent = shid->dev;
        shid->hid->bus = BUS_HOST;
-       shid->hid->vendor = cpu_to_le16(shid->attrs.vendor);
-       shid->hid->product = cpu_to_le16(shid->attrs.product);
-       shid->hid->version = cpu_to_le16(shid->hid_desc.hid_version);
+       shid->hid->vendor = get_unaligned_le16(&shid->attrs.vendor);
+       shid->hid->product = get_unaligned_le16(&shid->attrs.product);
+       shid->hid->version = get_unaligned_le16(&shid->hid_desc.hid_version);
        shid->hid->country = shid->hid_desc.country_code;
 
        snprintf(shid->hid->name, sizeof(shid->hid->name), "Microsoft Surface %04X:%04X",
index 86257ce6d61982256648ef36fc511f63205cb519..4e9077363c962c0e5425d698887ada7096cf4731 100644 (file)
@@ -374,7 +374,7 @@ static int hid_submit_ctrl(struct hid_device *hid)
        raw_report = usbhid->ctrl[usbhid->ctrltail].raw_report;
        dir = usbhid->ctrl[usbhid->ctrltail].dir;
 
-       len = ((report->size - 1) >> 3) + 1 + (report->id > 0);
+       len = hid_report_len(report);
        if (dir == USB_DIR_OUT) {
                usbhid->urbctrl->pipe = usb_sndctrlpipe(hid_to_usb_dev(hid), 0);
                usbhid->urbctrl->transfer_buffer_length = len;
index ea126c50acc3bb9c22ae158146bc268cc2df7684..3b4ee21cd811198c7391b4855c500b67db10205d 100644 (file)
@@ -1292,6 +1292,7 @@ int hid_pidff_init(struct hid_device *hid)
 
        if (pidff->pool[PID_DEVICE_MANAGED_POOL].value &&
            pidff->pool[PID_DEVICE_MANAGED_POOL].value[0] == 0) {
+               error = -EPERM;
                hid_notice(hid,
                           "device does not support device managed pool\n");
                goto fail;
index 5677263bcf0dea99d8cb0e2850c7e334d8da6bf5..483cd757abd339a402ff6bd3ccf53dabd4b7eda1 100644 (file)
@@ -485,7 +485,7 @@ static int adm9240_in_write(struct device *dev, u32 attr, int channel, long val)
                reg = ADM9240_REG_IN_MIN(channel);
                break;
        case hwmon_in_max:
-               reg = ADM9240_REG_IN(channel);
+               reg = ADM9240_REG_IN_MAX(channel);
                break;
        default:
                return -EOPNOTSUPP;
index 3a5807e4a2efb8d5d084a3083e9b8c328d0be9b5..731d5117f9f10879506a9f123f46e4f1b7513de2 100644 (file)
@@ -355,7 +355,7 @@ static umode_t corsairpsu_hwmon_power_is_visible(const struct corsairpsu_data *p
                return 0444;
        default:
                return 0;
-       };
+       }
 }
 
 static umode_t corsairpsu_hwmon_in_is_visible(const struct corsairpsu_data *priv, u32 attr,
@@ -376,7 +376,7 @@ static umode_t corsairpsu_hwmon_in_is_visible(const struct corsairpsu_data *priv
                break;
        default:
                break;
-       };
+       }
 
        return res;
 }
@@ -771,6 +771,16 @@ static int corsairpsu_raw_event(struct hid_device *hdev, struct hid_report *repo
        return 0;
 }
 
+#ifdef CONFIG_PM
+static int corsairpsu_resume(struct hid_device *hdev)
+{
+       struct corsairpsu_data *priv = hid_get_drvdata(hdev);
+
+       /* some PSUs turn off the microcontroller during standby, so a reinit is required */
+       return corsairpsu_init(priv);
+}
+#endif
+
 static const struct hid_device_id corsairpsu_idtable[] = {
        { HID_USB_DEVICE(0x1b1c, 0x1c03) }, /* Corsair HX550i */
        { HID_USB_DEVICE(0x1b1c, 0x1c04) }, /* Corsair HX650i */
@@ -793,6 +803,10 @@ static struct hid_driver corsairpsu_driver = {
        .probe          = corsairpsu_probe,
        .remove         = corsairpsu_remove,
        .raw_event      = corsairpsu_raw_event,
+#ifdef CONFIG_PM
+       .resume         = corsairpsu_resume,
+       .reset_resume   = corsairpsu_resume,
+#endif
 };
 module_hid_driver(corsairpsu_driver);
 
index 2970892bed829026ff99ba001f12d4b0a012fe66..f2221ca0aa7be03d5959782dbf0f9b4024fdd67d 100644 (file)
@@ -838,10 +838,10 @@ static struct attribute *i8k_attrs[] = {
 static umode_t i8k_is_visible(struct kobject *kobj, struct attribute *attr,
                              int index)
 {
-       if (disallow_fan_support && index >= 8)
+       if (disallow_fan_support && index >= 20)
                return 0;
        if (disallow_fan_type_call &&
-           (index == 9 || index == 12 || index == 15))
+           (index == 21 || index == 25 || index == 28))
                return 0;
        if (index >= 0 && index <= 1 &&
            !(i8k_hwmon_flags & I8K_HWMON_HAVE_TEMP1))
index ac4adb44b224d783d44f51b2ecc2a4bffc7ab8c2..97ab491d2922cf0c81611a50ac3f09b221f0f944 100644 (file)
@@ -596,7 +596,6 @@ static int lm80_probe(struct i2c_client *client)
        struct device *dev = &client->dev;
        struct device *hwmon_dev;
        struct lm80_data *data;
-       int rv;
 
        data = devm_kzalloc(dev, sizeof(struct lm80_data), GFP_KERNEL);
        if (!data)
@@ -609,14 +608,8 @@ static int lm80_probe(struct i2c_client *client)
        lm80_init_client(client);
 
        /* A few vars need to be filled upon startup */
-       rv = lm80_read_value(client, LM80_REG_FAN_MIN(1));
-       if (rv < 0)
-               return rv;
-       data->fan[f_min][0] = rv;
-       rv = lm80_read_value(client, LM80_REG_FAN_MIN(2));
-       if (rv < 0)
-               return rv;
-       data->fan[f_min][1] = rv;
+       data->fan[f_min][0] = lm80_read_value(client, LM80_REG_FAN_MIN(1));
+       data->fan[f_min][1] = lm80_read_value(client, LM80_REG_FAN_MIN(2));
 
        hwmon_dev = devm_hwmon_device_register_with_groups(dev, client->name,
                                                           data, lm80_groups);
index 4382105bf14205d5658401464b839b3fa40c0952..2a4bed0ab226bdfd76d80b8ebe1a43bfb8a5e71d 100644 (file)
@@ -900,11 +900,15 @@ static int ltc2992_parse_dt(struct ltc2992_state *st)
 
        fwnode_for_each_available_child_node(fwnode, child) {
                ret = fwnode_property_read_u32(child, "reg", &addr);
-               if (ret < 0)
+               if (ret < 0) {
+                       fwnode_handle_put(child);
                        return ret;
+               }
 
-               if (addr > 1)
+               if (addr > 1) {
+                       fwnode_handle_put(child);
                        return -EINVAL;
+               }
 
                ret = fwnode_property_read_u32(child, "shunt-resistor-micro-ohms", &val);
                if (!ret)
index f1ac153d0b568a94c771af555569f9517bab248e..967532afb1c01caeed05039a023d50473bf4bf32 100644 (file)
@@ -217,9 +217,9 @@ int occ_update_response(struct occ *occ)
                return rc;
 
        /* limit the maximum rate of polling the OCC */
-       if (time_after(jiffies, occ->last_update + OCC_UPDATE_FREQUENCY)) {
+       if (time_after(jiffies, occ->next_update)) {
                rc = occ_poll(occ);
-               occ->last_update = jiffies;
+               occ->next_update = jiffies + OCC_UPDATE_FREQUENCY;
        } else {
                rc = occ->last_error;
        }
@@ -1165,6 +1165,7 @@ int occ_setup(struct occ *occ, const char *name)
                return rc;
        }
 
+       occ->next_update = jiffies + OCC_UPDATE_FREQUENCY;
        occ_parse_poll_response(occ);
 
        rc = occ_setup_sensor_attrs(occ);
index 67e6968b8978ee720fd0356203746754977dc728..e6df719770e81298d026df5050b962f8054e7871 100644 (file)
@@ -99,7 +99,7 @@ struct occ {
        u8 poll_cmd_data;               /* to perform OCC poll command */
        int (*send_cmd)(struct occ *occ, u8 *cmd);
 
-       unsigned long last_update;
+       unsigned long next_update;
        struct mutex lock;              /* lock OCC access */
 
        struct device *hwmon;
index b177987286ae07f1d973c31e850cbccf750ea2bf..aec294cc72d1f249743cf8549184eb309ae67b83 100644 (file)
@@ -37,6 +37,8 @@ struct fsp3y_data {
        struct pmbus_driver_info info;
        int chip;
        int page;
+
+       bool vout_linear_11;
 };
 
 #define to_fsp3y_data(x) container_of(x, struct fsp3y_data, info)
@@ -57,7 +59,7 @@ static int page_log_to_page_real(int page_log, enum chips chip)
                case YH5151E_PAGE_12V_LOG:
                        return YH5151E_PAGE_12V_REAL;
                case YH5151E_PAGE_5V_LOG:
-                       return YH5151E_PAGE_5V_LOG;
+                       return YH5151E_PAGE_5V_REAL;
                case YH5151E_PAGE_3V3_LOG:
                        return YH5151E_PAGE_3V3_REAL;
                }
@@ -103,8 +105,16 @@ static int set_page(struct i2c_client *client, int page_log)
 
 static int fsp3y_read_byte_data(struct i2c_client *client, int page, int reg)
 {
+       const struct pmbus_driver_info *info = pmbus_get_driver_info(client);
+       struct fsp3y_data *data = to_fsp3y_data(info);
        int rv;
 
+       /*
+        * Inject an exponent for non-compliant YH5151-E.
+        */
+       if (data->vout_linear_11 && reg == PMBUS_VOUT_MODE)
+               return 0x1A;
+
        rv = set_page(client, page);
        if (rv < 0)
                return rv;
@@ -114,6 +124,8 @@ static int fsp3y_read_byte_data(struct i2c_client *client, int page, int reg)
 
 static int fsp3y_read_word_data(struct i2c_client *client, int page, int phase, int reg)
 {
+       const struct pmbus_driver_info *info = pmbus_get_driver_info(client);
+       struct fsp3y_data *data = to_fsp3y_data(info);
        int rv;
 
        /*
@@ -144,7 +156,17 @@ static int fsp3y_read_word_data(struct i2c_client *client, int page, int phase,
        if (rv < 0)
                return rv;
 
-       return i2c_smbus_read_word_data(client, reg);
+       rv = i2c_smbus_read_word_data(client, reg);
+       if (rv < 0)
+               return rv;
+
+       /*
+        * Handle YH-5151E non-compliant linear11 vout voltage.
+        */
+       if (data->vout_linear_11 && reg == PMBUS_READ_VOUT)
+               rv = sign_extend32(rv, 10) & 0xffff;
+
+       return rv;
 }
 
 static struct pmbus_driver_info fsp3y_info[] = {
@@ -233,6 +255,25 @@ static int fsp3y_probe(struct i2c_client *client)
 
        data->info = fsp3y_info[data->chip];
 
+       /*
+        * YH-5151E sometimes reports vout in linear11 and sometimes in
+        * linear16. This depends on the exact individual piece of hardware. One
+        * YH-5151E can use linear16 and another might use linear11 instead.
+        *
+        * The format can be recognized by reading VOUT_MODE - if it doesn't
+        * report a valid exponent, then vout uses linear11. Otherwise, the
+        * device is compliant and uses linear16.
+        */
+       data->vout_linear_11 = false;
+       if (data->chip == yh5151e) {
+               rv = i2c_smbus_read_byte_data(client, PMBUS_VOUT_MODE);
+               if (rv < 0)
+                       return rv;
+
+               if (rv == 0xFF)
+                       data->vout_linear_11 = true;
+       }
+
        return pmbus_do_probe(client, &data->info);
 }
 
index 40597a9e799f5d754861f13af8e9003515e694ae..1a8caff1ac5f6b03cd7f17d26a69a8074de14c92 100644 (file)
@@ -244,8 +244,8 @@ static int isl68137_probe(struct i2c_client *client)
                info->read_word_data = raa_dmpvr2_read_word_data;
                break;
        case raa_dmpvr2_2rail_nontc:
-               info->func[0] &= ~PMBUS_HAVE_TEMP;
-               info->func[1] &= ~PMBUS_HAVE_TEMP;
+               info->func[0] &= ~PMBUS_HAVE_TEMP3;
+               info->func[1] &= ~PMBUS_HAVE_TEMP3;
                fallthrough;
        case raa_dmpvr2_2rail:
                info->pages = 2;
index b6e8b20466f15207bc4b7262b27da53a37753043..fa298b4265a1c6807d074549d684040b11a628c0 100644 (file)
@@ -299,7 +299,7 @@ static int q54sj108a2_probe(struct i2c_client *client)
                dev_err(&client->dev, "Failed to read Manufacturer ID\n");
                return ret;
        }
-       if (ret != 5 || strncmp(buf, "DELTA", 5)) {
+       if (ret != 6 || strncmp(buf, "DELTA", 5)) {
                buf[ret] = '\0';
                dev_err(dev, "Unsupported Manufacturer ID '%s'\n", buf);
                return -ENODEV;
index 25aac40f2764aec4b3223b46d6d3919773b8b4e7..919877970ae3b1c342b7a37ce7da2ffac1c45204 100644 (file)
@@ -99,6 +99,15 @@ scpi_show_sensor(struct device *dev, struct device_attribute *attr, char *buf)
 
        scpi_scale_reading(&value, sensor);
 
+       /*
+        * Temperature sensor values are treated as signed values based on
+        * observation even though that is not explicitly specified, and
+        * because an unsigned u64 temperature does not really make practical
+        * sense especially when the temperature is below zero degrees Celsius.
+        */
+       if (sensor->info.class == TEMPERATURE)
+               return sprintf(buf, "%lld\n", (s64)value);
+
        return sprintf(buf, "%llu\n", value);
 }
 
index c2484f15298b09514b065c1897bfffaf0fabbf62..8bd6435c13e82fb1fa0c4fc2f5c117291bbd9cf9 100644 (file)
 #define POWER_ENABLE                   0x19
 #define TPS23861_NUM_PORTS             4
 
+#define TPS23861_GENERAL_MASK_1                0x17
+#define TPS23861_CURRENT_SHUNT_MASK    BIT(0)
+
 #define TEMPERATURE_LSB                        652 /* 0.652 degrees Celsius */
 #define VOLTAGE_LSB                    3662 /* 3.662 mV */
 #define SHUNT_RESISTOR_DEFAULT         255000 /* 255 mOhm */
-#define CURRENT_LSB_255                        62260 /* 62.260 uA */
-#define CURRENT_LSB_250                        61039 /* 61.039 uA */
+#define CURRENT_LSB_250                        62260 /* 62.260 uA */
+#define CURRENT_LSB_255                        61039 /* 61.039 uA */
 #define RESISTANCE_LSB                 110966 /* 11.0966 Ohm*/
 #define RESISTANCE_LSB_LOW             157216 /* 15.7216 Ohm*/
 
@@ -117,6 +120,7 @@ struct tps23861_data {
 static struct regmap_config tps23861_regmap_config = {
        .reg_bits = 8,
        .val_bits = 8,
+       .max_register = 0x6f,
 };
 
 static int tps23861_read_temp(struct tps23861_data *data, long *val)
@@ -560,6 +564,15 @@ static int tps23861_probe(struct i2c_client *client)
        else
                data->shunt_resistor = SHUNT_RESISTOR_DEFAULT;
 
+       if (data->shunt_resistor == SHUNT_RESISTOR_DEFAULT)
+               regmap_clear_bits(data->regmap,
+                                 TPS23861_GENERAL_MASK_1,
+                                 TPS23861_CURRENT_SHUNT_MASK);
+       else
+               regmap_set_bits(data->regmap,
+                               TPS23861_GENERAL_MASK_1,
+                               TPS23861_CURRENT_SHUNT_MASK);
+
        hwmon_dev = devm_hwmon_device_register_with_info(dev, client->name,
                                                         data, &tps23861_chip_info,
                                                         NULL);
index 281a65d9b44bba75c4b542b98b5c2bd4d3c3b692..10acece9d7b9383eca6a3b67e190f194a1cb0d1b 100644 (file)
@@ -647,7 +647,7 @@ config I2C_HIGHLANDER
 
 config I2C_HISI
        tristate "HiSilicon I2C controller"
-       depends on ARM64 || COMPILE_TEST
+       depends on (ARM64 && ACPI) || COMPILE_TEST
        help
          Say Y here if you want to have Hisilicon I2C controller support
          available on the Kunpeng Server.
index 4d12e3da12f0dc658a680f458a7d2f171789efd8..55a9e93fbfeb56a55d36795e3bca21c101a12f03 100644 (file)
@@ -1,5 +1,5 @@
 // SPDX-License-Identifier: GPL-2.0-only
-/**
+/*
  *     i2c-ali1563.c - i2c driver for the ALi 1563 Southbridge
  *
  *     Copyright (C) 2004 Patrick Mochel
index 7d62cbda6e06c6aac7dcdee8800f72fdba4cd8a9..354cf7e45c4a095182fa679cd7139abeeaf287c8 100644 (file)
@@ -55,7 +55,7 @@
 #define ALTR_I2C_XFER_TIMEOUT  (msecs_to_jiffies(250))
 
 /**
- * altr_i2c_dev - I2C device context
+ * struct altr_i2c_dev - I2C device context
  * @base: pointer to register struct
  * @msg: pointer to current message
  * @msg_len: number of bytes transferred in msg
@@ -172,7 +172,7 @@ static void altr_i2c_init(struct altr_i2c_dev *idev)
        altr_i2c_int_enable(idev, ALTR_I2C_ALL_IRQ, false);
 }
 
-/**
+/*
  * altr_i2c_transfer - On the last byte to be transmitted, send
  * a Stop bit on the last byte.
  */
@@ -185,7 +185,7 @@ static void altr_i2c_transfer(struct altr_i2c_dev *idev, u32 data)
                writel(data, idev->base + ALTR_I2C_TFR_CMD);
 }
 
-/**
+/*
  * altr_i2c_empty_rx_fifo - Fetch data from RX FIFO until end of
  * transfer. Send a Stop bit on the last byte.
  */
@@ -201,9 +201,8 @@ static void altr_i2c_empty_rx_fifo(struct altr_i2c_dev *idev)
        }
 }
 
-/**
+/*
  * altr_i2c_fill_tx_fifo - Fill TX FIFO from current message buffer.
- * @return: Number of bytes left to transfer.
  */
 static int altr_i2c_fill_tx_fifo(struct altr_i2c_dev *idev)
 {
index c1bbc4caeb5c948489339a923b37718d657d90c0..66aafa7d11234c5d185e3aa02ce34e327ae0cf27 100644 (file)
@@ -144,7 +144,7 @@ enum cdns_i2c_mode {
 };
 
 /**
- * enum cdns_i2c_slave_mode - Slave state when I2C is operating in slave mode
+ * enum cdns_i2c_slave_state - Slave state when I2C is operating in slave mode
  *
  * @CDNS_I2C_SLAVE_STATE_IDLE: I2C slave idle
  * @CDNS_I2C_SLAVE_STATE_SEND: I2C slave sending data to master
index 13be1d678c399cd40bdaadaa8ea17c7c7f5aea94..9b08bb5df38d29d48cd2881a212b7967bb3d2280 100644 (file)
@@ -165,7 +165,7 @@ static int i2c_dw_set_timings_master(struct dw_i2c_dev *dev)
 }
 
 /**
- * i2c_dw_init() - Initialize the designware I2C master hardware
+ * i2c_dw_init_master() - Initialize the designware I2C master hardware
  * @dev: device private data
  *
  * This functions configures and enables the I2C master.
index 843b31a0f752bc023471c702eda32f5572627154..321b2770feabc04c91c292e106261df0fdbb3ed9 100644 (file)
@@ -148,7 +148,7 @@ struct i2c_algo_pch_data {
 
 /**
  * struct adapter_info - This structure holds the adapter information for the
                       PCH i2c controller
*                      PCH i2c controller
  * @pch_data:          stores a list of i2c_algo_pch_data
  * @pch_i2c_suspended: specifies whether the system is suspended or not
  *                     perhaps with more lines and words.
@@ -358,6 +358,7 @@ static void pch_i2c_repstart(struct i2c_algo_pch_data *adap)
 /**
  * pch_i2c_writebytes() - write data to I2C bus in normal mode
  * @i2c_adap:  Pointer to the struct i2c_adapter.
+ * @msgs:      Pointer to the i2c message structure.
  * @last:      specifies whether last message or not.
  *             In the case of compound mode it will be 1 for last message,
  *             otherwise 0.
index 99d446763530ea4028d449048d3a9e3f3486aeea..f9e1c2ceaac052496b61338b762d56262498d5a7 100644 (file)
@@ -395,11 +395,9 @@ static int i801_check_post(struct i801_priv *priv, int status)
                dev_err(&priv->pci_dev->dev, "Transaction timeout\n");
                /* try to stop the current command */
                dev_dbg(&priv->pci_dev->dev, "Terminating the current operation\n");
-               outb_p(inb_p(SMBHSTCNT(priv)) | SMBHSTCNT_KILL,
-                      SMBHSTCNT(priv));
+               outb_p(SMBHSTCNT_KILL, SMBHSTCNT(priv));
                usleep_range(1000, 2000);
-               outb_p(inb_p(SMBHSTCNT(priv)) & (~SMBHSTCNT_KILL),
-                      SMBHSTCNT(priv));
+               outb_p(0, SMBHSTCNT(priv));
 
                /* Check if it worked */
                status = inb_p(SMBHSTSTS(priv));
index c8c422e9dda4339344b6ceab728c30b0758b1497..5dae7cab7260559242a24d60005be2511d5a7743 100644 (file)
@@ -123,7 +123,6 @@ static int icy_probe(struct zorro_dev *z,
 {
        struct icy_i2c *i2c;
        struct i2c_algo_pcf_data *algo_data;
-       struct fwnode_handle *new_fwnode;
        struct i2c_board_info ltc2990_info = {
                .type           = "ltc2990",
                .swnode         = &icy_ltc2990_node,
index 30d9e89a3db20a9f83dc120beb504de93b1ffc1a..dcca9c2396db169c570f5a33c29eee3d89904bb9 100644 (file)
@@ -19,6 +19,7 @@
 
 #include <linux/clk.h>
 #include <linux/io.h>
+#include <linux/iopoll.h>
 #include <linux/fsl_devices.h>
 #include <linux/i2c.h>
 #include <linux/interrupt.h>
@@ -45,6 +46,7 @@
 #define CCR_MTX  0x10
 #define CCR_TXAK 0x08
 #define CCR_RSTA 0x04
+#define CCR_RSVD 0x02
 
 #define CSR_MCF  0x80
 #define CSR_MAAS 0x40
@@ -97,7 +99,7 @@ struct mpc_i2c {
        u32 block;
        int rc;
        int expect_rxack;
-
+       bool has_errata_A004447;
 };
 
 struct mpc_i2c_divider {
@@ -136,6 +138,75 @@ static void mpc_i2c_fixup(struct mpc_i2c *i2c)
        }
 }
 
+static int i2c_mpc_wait_sr(struct mpc_i2c *i2c, int mask)
+{
+       void __iomem *addr = i2c->base + MPC_I2C_SR;
+       u8 val;
+
+       return readb_poll_timeout(addr, val, val & mask, 0, 100);
+}
+
+/*
+ * Workaround for Erratum A004447. From the P2040CE Rev Q
+ *
+ * 1.  Set up the frequency divider and sampling rate.
+ * 2.  I2CCR - a0h
+ * 3.  Poll for I2CSR[MBB] to get set.
+ * 4.  If I2CSR[MAL] is set (an indication that SDA is stuck low), then go to
+ *     step 5. If MAL is not set, then go to step 13.
+ * 5.  I2CCR - 00h
+ * 6.  I2CCR - 22h
+ * 7.  I2CCR - a2h
+ * 8.  Poll for I2CSR[MBB] to get set.
+ * 9.  Issue read to I2CDR.
+ * 10. Poll for I2CSR[MIF] to be set.
+ * 11. I2CCR - 82h
+ * 12. Workaround complete. Skip the next steps.
+ * 13. Issue read to I2CDR.
+ * 14. Poll for I2CSR[MIF] to be set.
+ * 15. I2CCR - 80h
+ */
+static void mpc_i2c_fixup_A004447(struct mpc_i2c *i2c)
+{
+       int ret;
+       u32 val;
+
+       writeccr(i2c, CCR_MEN | CCR_MSTA);
+       ret = i2c_mpc_wait_sr(i2c, CSR_MBB);
+       if (ret) {
+               dev_err(i2c->dev, "timeout waiting for CSR_MBB\n");
+               return;
+       }
+
+       val = readb(i2c->base + MPC_I2C_SR);
+
+       if (val & CSR_MAL) {
+               writeccr(i2c, 0x00);
+               writeccr(i2c, CCR_MSTA | CCR_RSVD);
+               writeccr(i2c, CCR_MEN | CCR_MSTA | CCR_RSVD);
+               ret = i2c_mpc_wait_sr(i2c, CSR_MBB);
+               if (ret) {
+                       dev_err(i2c->dev, "timeout waiting for CSR_MBB\n");
+                       return;
+               }
+               val = readb(i2c->base + MPC_I2C_DR);
+               ret = i2c_mpc_wait_sr(i2c, CSR_MIF);
+               if (ret) {
+                       dev_err(i2c->dev, "timeout waiting for CSR_MIF\n");
+                       return;
+               }
+               writeccr(i2c, CCR_MEN | CCR_RSVD);
+       } else {
+               val = readb(i2c->base + MPC_I2C_DR);
+               ret = i2c_mpc_wait_sr(i2c, CSR_MIF);
+               if (ret) {
+                       dev_err(i2c->dev, "timeout waiting for CSR_MIF\n");
+                       return;
+               }
+               writeccr(i2c, CCR_MEN);
+       }
+}
+
 #if defined(CONFIG_PPC_MPC52xx) || defined(CONFIG_PPC_MPC512x)
 static const struct mpc_i2c_divider mpc_i2c_dividers_52xx[] = {
        {20, 0x20}, {22, 0x21}, {24, 0x22}, {26, 0x23},
@@ -670,7 +741,10 @@ static int fsl_i2c_bus_recovery(struct i2c_adapter *adap)
 {
        struct mpc_i2c *i2c = i2c_get_adapdata(adap);
 
-       mpc_i2c_fixup(i2c);
+       if (i2c->has_errata_A004447)
+               mpc_i2c_fixup_A004447(i2c);
+       else
+               mpc_i2c_fixup(i2c);
 
        return 0;
 }
@@ -767,6 +841,9 @@ static int fsl_i2c_probe(struct platform_device *op)
        }
        dev_info(i2c->dev, "timeout %u us\n", mpc_ops.timeout * 1000000 / HZ);
 
+       if (of_property_read_bool(op->dev.of_node, "fsl,i2c-erratum-a004447"))
+               i2c->has_errata_A004447 = true;
+
        i2c->adap = mpc_ops;
        scnprintf(i2c->adap.name, sizeof(i2c->adap.name),
                  "MPC adapter (%s)", of_node_full_name(op->dev.of_node));
index 5ddfa4e56ee232006cf73397c2be6096f3501372..4e9fb6b44436ad5ae19d2e3c8bfbe8b0ee02bfd3 100644 (file)
@@ -479,6 +479,11 @@ static void mtk_i2c_clock_disable(struct mtk_i2c *i2c)
 static void mtk_i2c_init_hw(struct mtk_i2c *i2c)
 {
        u16 control_reg;
+       u16 intr_stat_reg;
+
+       mtk_i2c_writew(i2c, I2C_CHN_CLR_FLAG, OFFSET_START);
+       intr_stat_reg = mtk_i2c_readw(i2c, OFFSET_INTR_STAT);
+       mtk_i2c_writew(i2c, intr_stat_reg, OFFSET_INTR_STAT);
 
        if (i2c->dev_comp->apdma_sync) {
                writel(I2C_DMA_WARM_RST, i2c->pdmabase + OFFSET_RST);
index dc77e1c4e80f9d8b6741a1aeb57f921fa4aa608c..a2d12a5b1c34c1265cbe9bc6c0215bef621d5bd0 100644 (file)
@@ -159,7 +159,7 @@ struct i2c_nmk_client {
  * @clk_freq: clock frequency for the operation mode
  * @tft: Tx FIFO Threshold in bytes
  * @rft: Rx FIFO Threshold in bytes
- * @timeout Slave response timeout (ms)
+ * @timeout: Slave response timeout (ms)
  * @sm: speed mode
  * @stop: stop condition.
  * @xfer_complete: acknowledge completion for a I2C message.
index 273222e38056edda2d89aea155cbef9c1232ba97..a0af027db04c11b98fef28be555ceb7804d92eb6 100644 (file)
@@ -250,7 +250,7 @@ static irqreturn_t ocores_isr(int irq, void *dev_id)
 }
 
 /**
- * Process timeout event
+ * ocores_process_timeout() - Process timeout event
  * @i2c: ocores I2C device instance
  */
 static void ocores_process_timeout(struct ocores_i2c *i2c)
@@ -264,7 +264,7 @@ static void ocores_process_timeout(struct ocores_i2c *i2c)
 }
 
 /**
- * Wait until something change in a given register
+ * ocores_wait() - Wait until something change in a given register
  * @i2c: ocores I2C device instance
  * @reg: register to query
  * @mask: bitmask to apply on register value
@@ -296,7 +296,7 @@ static int ocores_wait(struct ocores_i2c *i2c,
 }
 
 /**
- * Wait until is possible to process some data
+ * ocores_poll_wait() - Wait until is possible to process some data
  * @i2c: ocores I2C device instance
  *
  * Used when the device is in polling mode (interrupts disabled).
@@ -334,7 +334,7 @@ static int ocores_poll_wait(struct ocores_i2c *i2c)
 }
 
 /**
- * It handles an IRQ-less transfer
+ * ocores_process_polling() - It handles an IRQ-less transfer
  * @i2c: ocores I2C device instance
  *
  * Even if IRQ are disabled, the I2C OpenCore IP behavior is exactly the same
index 8c4ec7f13f5ab26b9fe3b9b08ca3ccae2c8cb961..50f21cdbe90d3cbfae78f65238908a604a64e7c5 100644 (file)
@@ -138,7 +138,7 @@ static inline void i2c_pnx_arm_timer(struct i2c_pnx_algo_data *alg_data)
 /**
  * i2c_pnx_start - start a device
  * @slave_addr:                slave address
- * @adap:              pointer to adapter structure
+ * @alg_data:          pointer to local driver data structure
  *
  * Generate a START signal in the desired mode.
  */
@@ -194,7 +194,7 @@ static int i2c_pnx_start(unsigned char slave_addr,
 
 /**
  * i2c_pnx_stop - stop a device
- * @adap:              pointer to I2C adapter structure
+ * @alg_data:          pointer to local driver data structure
  *
  * Generate a STOP signal to terminate the master transaction.
  */
@@ -223,7 +223,7 @@ static void i2c_pnx_stop(struct i2c_pnx_algo_data *alg_data)
 
 /**
  * i2c_pnx_master_xmit - transmit data to slave
- * @adap:              pointer to I2C adapter structure
+ * @alg_data:          pointer to local driver data structure
  *
  * Sends one byte of data to the slave
  */
@@ -293,7 +293,7 @@ static int i2c_pnx_master_xmit(struct i2c_pnx_algo_data *alg_data)
 
 /**
  * i2c_pnx_master_rcv - receive data from slave
- * @adap:              pointer to I2C adapter structure
+ * @alg_data:          pointer to local driver data structure
  *
  * Reads one byte data from the slave
  */
index 214b4c913a139ac872f356e0f5d10d4653f5d556..6d635a7c104ce93ad6041b90a761b9702c0604fe 100644 (file)
@@ -100,7 +100,7 @@ static const struct geni_i2c_err_log gi2c_log[] = {
        [GP_IRQ0] = {-EIO, "Unknown I2C err GP_IRQ0"},
        [NACK] = {-ENXIO, "NACK: slv unresponsive, check its power/reset-ln"},
        [GP_IRQ2] = {-EIO, "Unknown I2C err GP IRQ2"},
-       [BUS_PROTO] = {-EPROTO, "Bus proto err, noisy/unepxected start/stop"},
+       [BUS_PROTO] = {-EPROTO, "Bus proto err, noisy/unexpected start/stop"},
        [ARB_LOST] = {-EAGAIN, "Bus arbitration lost, clock line undriveable"},
        [GP_IRQ5] = {-EIO, "Unknown I2C err GP IRQ5"},
        [GENI_OVERRUN] = {-EIO, "Cmd overrun, check GENI cmd-state machine"},
@@ -650,6 +650,14 @@ static int geni_i2c_remove(struct platform_device *pdev)
        return 0;
 }
 
+static void geni_i2c_shutdown(struct platform_device *pdev)
+{
+       struct geni_i2c_dev *gi2c = platform_get_drvdata(pdev);
+
+       /* Make client i2c transfers start failing */
+       i2c_mark_adapter_suspended(&gi2c->adap);
+}
+
 static int __maybe_unused geni_i2c_runtime_suspend(struct device *dev)
 {
        int ret;
@@ -690,6 +698,8 @@ static int __maybe_unused geni_i2c_suspend_noirq(struct device *dev)
 {
        struct geni_i2c_dev *gi2c = dev_get_drvdata(dev);
 
+       i2c_mark_adapter_suspended(&gi2c->adap);
+
        if (!gi2c->suspended) {
                geni_i2c_runtime_suspend(dev);
                pm_runtime_disable(dev);
@@ -699,8 +709,16 @@ static int __maybe_unused geni_i2c_suspend_noirq(struct device *dev)
        return 0;
 }
 
+static int __maybe_unused geni_i2c_resume_noirq(struct device *dev)
+{
+       struct geni_i2c_dev *gi2c = dev_get_drvdata(dev);
+
+       i2c_mark_adapter_resumed(&gi2c->adap);
+       return 0;
+}
+
 static const struct dev_pm_ops geni_i2c_pm_ops = {
-       SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(geni_i2c_suspend_noirq, NULL)
+       SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(geni_i2c_suspend_noirq, geni_i2c_resume_noirq)
        SET_RUNTIME_PM_OPS(geni_i2c_runtime_suspend, geni_i2c_runtime_resume,
                                                                        NULL)
 };
@@ -714,6 +732,7 @@ MODULE_DEVICE_TABLE(of, geni_i2c_dt_match);
 static struct platform_driver geni_i2c_driver = {
        .probe  = geni_i2c_probe,
        .remove = geni_i2c_remove,
+       .shutdown = geni_i2c_shutdown,
        .driver = {
                .name = "geni_i2c",
                .pm = &geni_i2c_pm_ops,
index ab928613afba44a9785de522dae97d865eb63468..4d82761e1585eaf0cc02f102860b08d366ab5216 100644 (file)
@@ -480,7 +480,10 @@ static int i2c_s3c_irq_nextbyte(struct s3c24xx_i2c *i2c, unsigned long iicstat)
                                         * forces us to send a new START
                                         * when we change direction
                                         */
+                                       dev_dbg(i2c->dev,
+                                               "missing START before write->read\n");
                                        s3c24xx_i2c_stop(i2c, -EINVAL);
+                                       break;
                                }
 
                                goto retry_write;
index 3ae6ca21a02c6b9f08d737d56ead76b6dca550de..2d2e630fd438712f5de581d2319456b23d74de26 100644 (file)
@@ -807,7 +807,7 @@ static const struct sh_mobile_dt_config r8a7740_dt_config = {
 static const struct of_device_id sh_mobile_i2c_dt_ids[] = {
        { .compatible = "renesas,iic-r8a73a4", .data = &fast_clock_dt_config },
        { .compatible = "renesas,iic-r8a7740", .data = &r8a7740_dt_config },
-       { .compatible = "renesas,iic-r8a774c0", .data = &fast_clock_dt_config },
+       { .compatible = "renesas,iic-r8a774c0", .data = &v2_freq_calc_dt_config },
        { .compatible = "renesas,iic-r8a7790", .data = &v2_freq_calc_dt_config },
        { .compatible = "renesas,iic-r8a7791", .data = &v2_freq_calc_dt_config },
        { .compatible = "renesas,iic-r8a7792", .data = &v2_freq_calc_dt_config },
index faa81a95551fe98d230a2ee063b72298ad2b59c7..88482316d22a050a982a8024f046cf02d7378393 100644 (file)
@@ -524,7 +524,7 @@ static void st_i2c_handle_write(struct st_i2c_dev *i2c_dev)
 }
 
 /**
- * st_i2c_handle_write() - Handle FIFO enmpty interrupt in case of read
+ * st_i2c_handle_read() - Handle FIFO empty interrupt in case of read
  * @i2c_dev: Controller's private data
  */
 static void st_i2c_handle_read(struct st_i2c_dev *i2c_dev)
@@ -558,7 +558,7 @@ static void st_i2c_handle_read(struct st_i2c_dev *i2c_dev)
 }
 
 /**
- * st_i2c_isr() - Interrupt routine
+ * st_i2c_isr_thread() - Interrupt routine
  * @irq: interrupt number
  * @data: Controller's private data
  */
index 4933fc8ce3fd1ce0d8300005352def6b06b880a9..eebce7ecef25b7c887e3f654cbcc0266d2a59066 100644 (file)
@@ -313,7 +313,7 @@ static int stm32f4_i2c_wait_free_bus(struct stm32f4_i2c_dev *i2c_dev)
 }
 
 /**
- * stm32f4_i2c_write_ byte() - Write a byte in the data register
+ * stm32f4_i2c_write_byte() - Write a byte in the data register
  * @i2c_dev: Controller's private data
  * @byte: Data to write in the register
  */
index 3680d608698b10dfa14835347d889f14c81420f0..ec0c7cad424019067306142ce2192696a53d4cec 100644 (file)
@@ -65,7 +65,7 @@ static void tegra_bpmp_xlate_flags(u16 flags, u16 *out)
                *out |= SERIALI2C_RECV_LEN;
 }
 
-/**
+/*
  * The serialized I2C format is simply the following:
  * [addr little-endian][flags little-endian][len little-endian][data if write]
  * [addr little-endian][flags little-endian][len little-endian][data if write]
@@ -109,7 +109,7 @@ static void tegra_bpmp_serialize_i2c_msg(struct tegra_bpmp_i2c *i2c,
        request->xfer.data_size = pos;
 }
 
-/**
+/*
  * The data in the BPMP -> CPU direction is composed of sequential blocks for
  * those messages that have I2C_M_RD. So, for example, if you have:
  *
index 8ceaa88dd78fbe968218c6e3edae288d38e82c7c..6f0aa0ed3241ef2762d8bcc8f5169bc4e812085e 100644 (file)
@@ -259,8 +259,8 @@ static acpi_status i2c_acpi_add_device(acpi_handle handle, u32 level,
  */
 void i2c_acpi_register_devices(struct i2c_adapter *adap)
 {
+       struct acpi_device *adev;
        acpi_status status;
-       acpi_handle handle;
 
        if (!has_acpi_companion(&adap->dev))
                return;
@@ -275,11 +275,11 @@ void i2c_acpi_register_devices(struct i2c_adapter *adap)
        if (!adap->dev.parent)
                return;
 
-       handle = ACPI_HANDLE(adap->dev.parent);
-       if (!handle)
+       adev = ACPI_COMPANION(adap->dev.parent);
+       if (!adev)
                return;
 
-       acpi_walk_dep_device_list(handle);
+       acpi_dev_clear_dependencies(adev);
 }
 
 static const struct acpi_device_id i2c_acpi_force_400khz_device_ids[] = {
index 6dc88902c189fbb015b3a136c0a2a79498b780a9..1c78657631f4f838196aa093640bd218b880e1c6 100644 (file)
@@ -34,7 +34,7 @@ struct i2c_arbitrator_data {
 };
 
 
-/**
+/*
  * i2c_arbitrator_select - claim the I2C bus
  *
  * Use the GPIO-based signalling protocol; return -EBUSY if we fail.
@@ -77,7 +77,7 @@ static int i2c_arbitrator_select(struct i2c_mux_core *muxc, u32 chan)
        return -EBUSY;
 }
 
-/**
+/*
  * i2c_arbitrator_deselect - release the I2C bus
  *
  * Release the I2C bus using the GPIO-based signalling protocol.
index cceda3cecbcf47b29a398bc2c1e91571915607e3..8b1723635cce5a667df421a98c6d331403b5bb0d 100644 (file)
@@ -229,7 +229,6 @@ config DMARD10
 config HID_SENSOR_ACCEL_3D
        depends on HID_SENSOR_HUB
        select IIO_BUFFER
-       select IIO_TRIGGERED_BUFFER
        select HID_SENSOR_IIO_COMMON
        select HID_SENSOR_IIO_TRIGGER
        tristate "HID Accelerometers 3D"
index 9d3952b4674f56a83e0f65eff65903570457a26a..a27db78ea13ee6f3b24bf345efa4e4f662a70365 100644 (file)
@@ -771,6 +771,13 @@ static int ad7124_of_parse_channel_config(struct iio_dev *indio_dev,
                if (ret)
                        goto err;
 
+               if (channel >= indio_dev->num_channels) {
+                       dev_err(indio_dev->dev.parent,
+                               "Channel index >= number of channels\n");
+                       ret = -EINVAL;
+                       goto err;
+               }
+
                ret = of_property_read_u32_array(child, "diff-channels",
                                                 ain, 2);
                if (ret)
@@ -850,6 +857,11 @@ static int ad7124_setup(struct ad7124_state *st)
        return ret;
 }
 
+static void ad7124_reg_disable(void *r)
+{
+       regulator_disable(r);
+}
+
 static int ad7124_probe(struct spi_device *spi)
 {
        const struct ad7124_chip_info *info;
@@ -895,17 +907,20 @@ static int ad7124_probe(struct spi_device *spi)
                ret = regulator_enable(st->vref[i]);
                if (ret)
                        return ret;
+
+               ret = devm_add_action_or_reset(&spi->dev, ad7124_reg_disable,
+                                              st->vref[i]);
+               if (ret)
+                       return ret;
        }
 
        st->mclk = devm_clk_get(&spi->dev, "mclk");
-       if (IS_ERR(st->mclk)) {
-               ret = PTR_ERR(st->mclk);
-               goto error_regulator_disable;
-       }
+       if (IS_ERR(st->mclk))
+               return PTR_ERR(st->mclk);
 
        ret = clk_prepare_enable(st->mclk);
        if (ret < 0)
-               goto error_regulator_disable;
+               return ret;
 
        ret = ad7124_soft_reset(st);
        if (ret < 0)
@@ -935,11 +950,6 @@ error_remove_trigger:
        ad_sd_cleanup_buffer_and_trigger(indio_dev);
 error_clk_disable_unprepare:
        clk_disable_unprepare(st->mclk);
-error_regulator_disable:
-       for (i = ARRAY_SIZE(st->vref) - 1; i >= 0; i--) {
-               if (!IS_ERR_OR_NULL(st->vref[i]))
-                       regulator_disable(st->vref[i]);
-       }
 
        return ret;
 }
@@ -948,17 +958,11 @@ static int ad7124_remove(struct spi_device *spi)
 {
        struct iio_dev *indio_dev = spi_get_drvdata(spi);
        struct ad7124_state *st = iio_priv(indio_dev);
-       int i;
 
        iio_device_unregister(indio_dev);
        ad_sd_cleanup_buffer_and_trigger(indio_dev);
        clk_disable_unprepare(st->mclk);
 
-       for (i = ARRAY_SIZE(st->vref) - 1; i >= 0; i--) {
-               if (!IS_ERR_OR_NULL(st->vref[i]))
-                       regulator_disable(st->vref[i]);
-       }
-
        return 0;
 }
 
index 2ed580521d815a0b869a63275fa9ab197f5da9e2..1141cc13a1249829ee9e83fc9718ff5a8976f1df 100644 (file)
@@ -912,7 +912,7 @@ static int ad7192_probe(struct spi_device *spi)
 {
        struct ad7192_state *st;
        struct iio_dev *indio_dev;
-       int ret, voltage_uv = 0;
+       int ret;
 
        if (!spi->irq) {
                dev_err(&spi->dev, "no IRQ?\n");
@@ -949,15 +949,12 @@ static int ad7192_probe(struct spi_device *spi)
                goto error_disable_avdd;
        }
 
-       voltage_uv = regulator_get_voltage(st->avdd);
-
-       if (voltage_uv > 0) {
-               st->int_vref_mv = voltage_uv / 1000;
-       } else {
-               ret = voltage_uv;
+       ret = regulator_get_voltage(st->avdd);
+       if (ret < 0) {
                dev_err(&spi->dev, "Device tree error, reference voltage undefined\n");
                goto error_disable_avdd;
        }
+       st->int_vref_mv = ret / 1000;
 
        spi_set_drvdata(spi, indio_dev);
        st->chip_info = of_device_get_match_data(&spi->dev);
@@ -1014,7 +1011,9 @@ static int ad7192_probe(struct spi_device *spi)
        return 0;
 
 error_disable_clk:
-       clk_disable_unprepare(st->mclk);
+       if (st->clock_sel == AD7192_CLK_EXT_MCLK1_2 ||
+           st->clock_sel == AD7192_CLK_EXT_MCLK2)
+               clk_disable_unprepare(st->mclk);
 error_remove_trigger:
        ad_sd_cleanup_buffer_and_trigger(indio_dev);
 error_disable_dvdd:
@@ -1031,7 +1030,9 @@ static int ad7192_remove(struct spi_device *spi)
        struct ad7192_state *st = iio_priv(indio_dev);
 
        iio_device_unregister(indio_dev);
-       clk_disable_unprepare(st->mclk);
+       if (st->clock_sel == AD7192_CLK_EXT_MCLK1_2 ||
+           st->clock_sel == AD7192_CLK_EXT_MCLK2)
+               clk_disable_unprepare(st->mclk);
        ad_sd_cleanup_buffer_and_trigger(indio_dev);
 
        regulator_disable(st->dvdd);
index c945f1349623f7ad7f58069710ed02565f022af5..60f21fed6dcbe1a8c69336c0dbe3b3452d2be923 100644 (file)
@@ -167,6 +167,10 @@ struct ad7768_state {
         * transfer buffers to live in their own cache lines.
         */
        union {
+               struct {
+                       __be32 chan;
+                       s64 timestamp;
+               } scan;
                __be32 d32;
                u8 d8[2];
        } data ____cacheline_aligned;
@@ -469,11 +473,11 @@ static irqreturn_t ad7768_trigger_handler(int irq, void *p)
 
        mutex_lock(&st->lock);
 
-       ret = spi_read(st->spi, &st->data.d32, 3);
+       ret = spi_read(st->spi, &st->data.scan.chan, 3);
        if (ret < 0)
                goto err_unlock;
 
-       iio_push_to_buffers_with_timestamp(indio_dev, &st->data.d32,
+       iio_push_to_buffers_with_timestamp(indio_dev, &st->data.scan,
                                           iio_get_time_ns(indio_dev));
 
        iio_trigger_notify_done(indio_dev->trig);
index 5e980a06258e64753b8f0b8320c75bb4c847e92c..440ef4c7be074c1c4b9f1d680147fd47703dd944 100644 (file)
@@ -279,6 +279,7 @@ static int ad7793_setup(struct iio_dev *indio_dev,
        id &= AD7793_ID_MASK;
 
        if (id != st->chip_info->id) {
+               ret = -ENODEV;
                dev_err(&st->sd.spi->dev, "device ID query failed\n");
                goto out;
        }
index 9a649745cd0a81c2744722c9c6dba0b0118efef6..069b561ee76896fb3a4b24fb63c6c70f88f7c9ed 100644 (file)
@@ -59,8 +59,10 @@ struct ad7923_state {
        /*
         * DMA (thus cache coherency maintenance) requires the
         * transfer buffers to live in their own cache lines.
+        * Ensure rx_buf can be directly used in iio_push_to_buffers_with_timetamp
+        * Length = 8 channels + 4 extra for 8 byte timestamp
         */
-       __be16                          rx_buf[4] ____cacheline_aligned;
+       __be16                          rx_buf[12] ____cacheline_aligned;
        __be16                          tx_buf[4];
 };
 
index 24d4925673363e09ec9b0f848a55cb22ba2f31d8..2a3dd3b907beeabbbd44419ea8023ef8b1e24075 100644 (file)
@@ -19,6 +19,7 @@ config HID_SENSOR_IIO_TRIGGER
        tristate "Common module (trigger) for all HID Sensor IIO drivers"
        depends on HID_SENSOR_HUB && HID_SENSOR_IIO_COMMON && IIO_BUFFER
        select IIO_TRIGGER
+       select IIO_TRIGGERED_BUFFER
        help
          Say yes here to build trigger support for HID sensors.
          Triggers will be send if all requested attributes were read.
index 7ab2ccf908639744c876f828015799529733e9a2..8107f7bbbe3c5885ddcdd5f65664a16b1278d653 100644 (file)
@@ -524,23 +524,29 @@ static int ad5770r_channel_config(struct ad5770r_state *st)
        device_for_each_child_node(&st->spi->dev, child) {
                ret = fwnode_property_read_u32(child, "num", &num);
                if (ret)
-                       return ret;
-               if (num >= AD5770R_MAX_CHANNELS)
-                       return -EINVAL;
+                       goto err_child_out;
+               if (num >= AD5770R_MAX_CHANNELS) {
+                       ret = -EINVAL;
+                       goto err_child_out;
+               }
 
                ret = fwnode_property_read_u32_array(child,
                                                     "adi,range-microamp",
                                                     tmp, 2);
                if (ret)
-                       return ret;
+                       goto err_child_out;
 
                min = tmp[0] / 1000;
                max = tmp[1] / 1000;
                ret = ad5770r_store_output_range(st, min, max, num);
                if (ret)
-                       return ret;
+                       goto err_child_out;
        }
 
+       return 0;
+
+err_child_out:
+       fwnode_handle_put(child);
        return ret;
 }
 
index 5824f2edf97581711dd8ccd0499dc7df9f1ee5fb..20b5ac7ab66af9faec2de0ecd01c48081ce656b7 100644 (file)
@@ -111,7 +111,6 @@ config FXAS21002C_SPI
 config HID_SENSOR_GYRO_3D
        depends on HID_SENSOR_HUB
        select IIO_BUFFER
-       select IIO_TRIGGERED_BUFFER
        select HID_SENSOR_IIO_COMMON
        select HID_SENSOR_IIO_TRIGGER
        tristate "HID Gyroscope 3D"
index 1a20c6b88e7db5cf3b28665ab25ab17c9c270882..645461c70454777f0745f128107c3e25f64bfc0a 100644 (file)
@@ -399,6 +399,7 @@ static int fxas21002c_temp_get(struct fxas21002c_data *data, int *val)
        ret = regmap_field_read(data->regmap_fields[F_TEMP], &temp);
        if (ret < 0) {
                dev_err(dev, "failed to read temp: %d\n", ret);
+               fxas21002c_pm_put(data);
                goto data_unlock;
        }
 
@@ -432,6 +433,7 @@ static int fxas21002c_axis_get(struct fxas21002c_data *data,
                               &axis_be, sizeof(axis_be));
        if (ret < 0) {
                dev_err(dev, "failed to read axis: %d: %d\n", index, ret);
+               fxas21002c_pm_put(data);
                goto data_unlock;
        }
 
index ac90be03332af86ba35687a76938e6684e8e268e..f17a9351953523e6070e88855e8bfa4e6696f9c7 100644 (file)
@@ -272,7 +272,16 @@ static int mpu3050_read_raw(struct iio_dev *indio_dev,
        case IIO_CHAN_INFO_OFFSET:
                switch (chan->type) {
                case IIO_TEMP:
-                       /* The temperature scaling is (x+23000)/280 Celsius */
+                       /*
+                        * The temperature scaling is (x+23000)/280 Celsius
+                        * for the "best fit straight line" temperature range
+                        * of -30C..85C.  The 23000 includes room temperature
+                        * offset of +35C, 280 is the precision scale and x is
+                        * the 16-bit signed integer reported by hardware.
+                        *
+                        * Temperature value itself represents temperature of
+                        * the sensor die.
+                        */
                        *val = 23000;
                        return IIO_VAL_INT;
                default:
@@ -329,7 +338,7 @@ static int mpu3050_read_raw(struct iio_dev *indio_dev,
                                goto out_read_raw_unlock;
                        }
 
-                       *val = be16_to_cpu(raw_val);
+                       *val = (s16)be16_to_cpu(raw_val);
                        ret = IIO_VAL_INT;
 
                        goto out_read_raw_unlock;
index 6549fcf6db698680b321e3bedcf43f7f13ca684f..2de5494e7c22585aa52f016dc18e30c4f8107f37 100644 (file)
@@ -52,7 +52,6 @@ config HID_SENSOR_HUMIDITY
        tristate "HID Environmental humidity sensor"
        depends on HID_SENSOR_HUB
        select IIO_BUFFER
-       select IIO_TRIGGERED_BUFFER
        select HID_SENSOR_IIO_COMMON
        select HID_SENSOR_IIO_TRIGGER
        help
index d92c58a94fe4f9081f0b47f0d8ed97e37e5d821b..59efb36db2c7ce485c8ae7186b5665d7b744dfdc 100644 (file)
@@ -1778,7 +1778,6 @@ static long iio_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
        if (!indio_dev->info)
                goto out_unlock;
 
-       ret = -EINVAL;
        list_for_each_entry(h, &iio_dev_opaque->ioctl_handlers, entry) {
                ret = h->ioctl(indio_dev, filp, cmd, arg);
                if (ret != IIO_IOCTL_UNHANDLED)
@@ -1786,7 +1785,7 @@ static long iio_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
        }
 
        if (ret == IIO_IOCTL_UNHANDLED)
-               ret = -EINVAL;
+               ret = -ENODEV;
 
 out_unlock:
        mutex_unlock(&indio_dev->info_exist_lock);
@@ -1926,9 +1925,6 @@ EXPORT_SYMBOL(__iio_device_register);
  **/
 void iio_device_unregister(struct iio_dev *indio_dev)
 {
-       struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
-       struct iio_ioctl_handler *h, *t;
-
        cdev_device_del(&indio_dev->chrdev, &indio_dev->dev);
 
        mutex_lock(&indio_dev->info_exist_lock);
@@ -1939,9 +1935,6 @@ void iio_device_unregister(struct iio_dev *indio_dev)
 
        indio_dev->info = NULL;
 
-       list_for_each_entry_safe(h, t, &iio_dev_opaque->ioctl_handlers, entry)
-               list_del(&h->entry);
-
        iio_device_wakeup_eventset(indio_dev);
        iio_buffer_wakeup_poll(indio_dev);
 
index 33ad4dd0b5c7b5dfba060984cedef3673874bfbf..917f9becf9c75d0e8ca6938aaf3cf705d6165568 100644 (file)
@@ -256,7 +256,6 @@ config ISL29125
 config HID_SENSOR_ALS
        depends on HID_SENSOR_HUB
        select IIO_BUFFER
-       select IIO_TRIGGERED_BUFFER
        select HID_SENSOR_IIO_COMMON
        select HID_SENSOR_IIO_TRIGGER
        tristate "HID ALS"
@@ -270,7 +269,6 @@ config HID_SENSOR_ALS
 config HID_SENSOR_PROX
        depends on HID_SENSOR_HUB
        select IIO_BUFFER
-       select IIO_TRIGGERED_BUFFER
        select HID_SENSOR_IIO_COMMON
        select HID_SENSOR_IIO_TRIGGER
        tristate "HID PROX"
index d048ae257c5198759e576ed2053abfa5207128d9..f960be7d40016c72de11ff0e71a0b5e04f0ecfc9 100644 (file)
@@ -582,7 +582,7 @@ static int gp2ap002_probe(struct i2c_client *client,
                                        "gp2ap002", indio_dev);
        if (ret) {
                dev_err(dev, "unable to request IRQ\n");
-               goto out_disable_vio;
+               goto out_put_pm;
        }
        gp2ap002->irq = client->irq;
 
@@ -612,8 +612,9 @@ static int gp2ap002_probe(struct i2c_client *client,
 
        return 0;
 
-out_disable_pm:
+out_put_pm:
        pm_runtime_put_noidle(dev);
+out_disable_pm:
        pm_runtime_disable(dev);
 out_disable_vio:
        regulator_disable(gp2ap002->vio);
index 0f787bfc88fc48f262b584e10f26b498bd8a9e76..c9d8f07a6fcdd9e38785ef5b9bf96cb91f184194 100644 (file)
@@ -341,6 +341,14 @@ static int tsl2583_als_calibrate(struct iio_dev *indio_dev)
                return lux_val;
        }
 
+       /* Avoid division by zero of lux_value later on */
+       if (lux_val == 0) {
+               dev_err(&chip->client->dev,
+                       "%s: lux_val of 0 will produce out of range trim_value\n",
+                       __func__);
+               return -ENODATA;
+       }
+
        gain_trim_val = (unsigned int)(((chip->als_settings.als_cal_target)
                        * chip->als_settings.als_gain_trim) / lux_val);
        if ((gain_trim_val < 250) || (gain_trim_val > 4000)) {
index 5d4ffd66032e94570140fb5c285f219a12ac182a..74ad5701c6c29b0147fea32a935f2d3d4a0d1f6e 100644 (file)
@@ -95,7 +95,6 @@ config MAG3110
 config HID_SENSOR_MAGNETOMETER_3D
        depends on HID_SENSOR_HUB
        select IIO_BUFFER
-       select IIO_TRIGGERED_BUFFER
        select HID_SENSOR_IIO_COMMON
        select HID_SENSOR_IIO_TRIGGER
        tristate "HID Magenetometer 3D"
index a505583cc2fdacfaf5086c268c4d98e0a9ce2d1c..396cbbb867f4cbb6236d3848940f5bfb684b22c5 100644 (file)
@@ -9,7 +9,6 @@ menu "Inclinometer sensors"
 config HID_SENSOR_INCLINOMETER_3D
        depends on HID_SENSOR_HUB
        select IIO_BUFFER
-       select IIO_TRIGGERED_BUFFER
        select HID_SENSOR_IIO_COMMON
        select HID_SENSOR_IIO_TRIGGER
        tristate "HID Inclinometer 3D"
@@ -20,7 +19,6 @@ config HID_SENSOR_INCLINOMETER_3D
 config HID_SENSOR_DEVICE_ROTATION
        depends on HID_SENSOR_HUB
        select IIO_BUFFER
-       select IIO_TRIGGERED_BUFFER
        select HID_SENSOR_IIO_COMMON
        select HID_SENSOR_IIO_TRIGGER
        tristate "HID Device Rotation"
index 689b978db4f950d15d03e019fcdfc445d60d6a0c..fc0d3cfca4186bd9a32130dfb6bbad6b275679f0 100644 (file)
@@ -79,7 +79,6 @@ config DPS310
 config HID_SENSOR_PRESS
        depends on HID_SENSOR_HUB
        select IIO_BUFFER
-       select IIO_TRIGGERED_BUFFER
        select HID_SENSOR_IIO_COMMON
        select HID_SENSOR_IIO_TRIGGER
        tristate "HID PRESS"
index c685f10b5ae48f6fbad9b8b4451a91741a41c230..cc206bfa09c78d720f7496f94cd37c20e585de19 100644 (file)
@@ -160,6 +160,7 @@ static int lidar_get_measurement(struct lidar_data *data, u16 *reg)
        ret = lidar_write_control(data, LIDAR_REG_CONTROL_ACQUIRE);
        if (ret < 0) {
                dev_err(&client->dev, "cannot send start measurement command");
+               pm_runtime_put_noidle(&client->dev);
                return ret;
        }
 
index f1f2a1499c9e23eeec7491e1eb5c748001848435..4df60082c1fa8c81ae521f5e2c7c11a3c95bfaad 100644 (file)
@@ -45,7 +45,6 @@ config HID_SENSOR_TEMP
        tristate "HID Environmental temperature sensor"
        depends on HID_SENSOR_HUB
        select IIO_BUFFER
-       select IIO_TRIGGERED_BUFFER
        select HID_SENSOR_IIO_COMMON
        select HID_SENSOR_IIO_TRIGGER
        help
index 2b9ffc21cbc4ad4a3fbbd096f610c96284b8474c..ab148a696c0cebe9995a95cf2d998c049cfbcecf 100644 (file)
@@ -473,6 +473,7 @@ static void cma_release_dev(struct rdma_id_private *id_priv)
        list_del(&id_priv->list);
        cma_dev_put(id_priv->cma_dev);
        id_priv->cma_dev = NULL;
+       id_priv->id.device = NULL;
        if (id_priv->id.route.addr.dev_addr.sgid_attr) {
                rdma_put_gid_attr(id_priv->id.route.addr.dev_addr.sgid_attr);
                id_priv->id.route.addr.dev_addr.sgid_attr = NULL;
@@ -1860,6 +1861,7 @@ static void _destroy_id(struct rdma_id_private *id_priv,
                                iw_destroy_cm_id(id_priv->cm_id.iw);
                }
                cma_leave_mc_groups(id_priv);
+               rdma_restrack_del(&id_priv->res);
                cma_release_dev(id_priv);
        }
 
@@ -1873,7 +1875,6 @@ static void _destroy_id(struct rdma_id_private *id_priv,
        kfree(id_priv->id.route.path_rec);
 
        put_net(id_priv->id.route.addr.dev_addr.net);
-       rdma_restrack_del(&id_priv->res);
        kfree(id_priv);
 }
 
@@ -3774,7 +3775,7 @@ int rdma_listen(struct rdma_cm_id *id, int backlog)
        }
 
        id_priv->backlog = backlog;
-       if (id->device) {
+       if (id_priv->cma_dev) {
                if (rdma_cap_ib_cm(id->device, 1)) {
                        ret = cma_ib_listen(id_priv);
                        if (ret)
index d5e15a8c870d1240dddd17253c40d3fcf24814c1..64e4be1cbec7c7041cac73f0284555f2d5109c4b 100644 (file)
@@ -3248,6 +3248,11 @@ static int ib_uverbs_ex_create_flow(struct uverbs_attr_bundle *attrs)
                goto err_free_attr;
        }
 
+       if (!rdma_is_port_valid(uobj->context->device, cmd.flow_attr.port)) {
+               err = -EINVAL;
+               goto err_uobj;
+       }
+
        qp = uobj_get_obj_read(qp, UVERBS_OBJECT_QP, cmd.qp_handle, attrs);
        if (!qp) {
                err = -EINVAL;
index 9ec6971056fa853db1dad103986de213d5544047..049684880ae03df75a47c4bc9d49f8ca78ed6dd8 100644 (file)
@@ -117,8 +117,8 @@ static int UVERBS_HANDLER(UVERBS_METHOD_INFO_HANDLES)(
                return ret;
 
        uapi_object = uapi_get_object(attrs->ufile->device->uapi, object_id);
-       if (!uapi_object)
-               return -EINVAL;
+       if (IS_ERR(uapi_object))
+               return PTR_ERR(uapi_object);
 
        handles = gather_objects_handle(attrs->ufile, uapi_object, attrs,
                                        out_len, &total);
@@ -331,6 +331,9 @@ static int UVERBS_HANDLER(UVERBS_METHOD_QUERY_GID_TABLE)(
        if (ret)
                return ret;
 
+       if (!user_entry_size)
+               return -EINVAL;
+
        max_entries = uverbs_attr_ptr_get_array_size(
                attrs, UVERBS_ATTR_QUERY_GID_TABLE_RESP_ENTRIES,
                user_entry_size);
index 22898d97ecbdac5f2457d5a9b508628719b139ba..230a6ae0ab5a0cc796e6c6969a74a8c4ce0fece2 100644 (file)
@@ -581,12 +581,9 @@ static int mlx4_ib_query_device(struct ib_device *ibdev,
        props->cq_caps.max_cq_moderation_count = MLX4_MAX_CQ_COUNT;
        props->cq_caps.max_cq_moderation_period = MLX4_MAX_CQ_PERIOD;
 
-       if (!mlx4_is_slave(dev->dev))
-               err = mlx4_get_internal_clock_params(dev->dev, &clock_params);
-
        if (uhw->outlen >= resp.response_length + sizeof(resp.hca_core_clock_offset)) {
                resp.response_length += sizeof(resp.hca_core_clock_offset);
-               if (!err && !mlx4_is_slave(dev->dev)) {
+               if (!mlx4_get_internal_clock_params(dev->dev, &clock_params)) {
                        resp.comp_mask |= MLX4_IB_QUERY_DEV_RESP_MASK_CORE_CLOCK_OFFSET;
                        resp.hca_core_clock_offset = clock_params.offset % PAGE_SIZE;
                }
@@ -1702,9 +1699,6 @@ static struct ib_flow *mlx4_ib_create_flow(struct ib_qp *qp,
        struct mlx4_dev *dev = (to_mdev(qp->device))->dev;
        int is_bonded = mlx4_is_bonded(dev);
 
-       if (!rdma_is_port_valid(qp->device, flow_attr->port))
-               return ERR_PTR(-EINVAL);
-
        if (flow_attr->flags & ~IB_FLOW_ATTR_FLAGS_DONT_TRAP)
                return ERR_PTR(-EOPNOTSUPP);
 
index eb92cefffd7771a2950aaeb13ba124b557c4bc3d..9ce01f729673918eef2a53697800f12a55ece8ef 100644 (file)
@@ -849,15 +849,14 @@ static void destroy_cq_user(struct mlx5_ib_cq *cq, struct ib_udata *udata)
        ib_umem_release(cq->buf.umem);
 }
 
-static void init_cq_frag_buf(struct mlx5_ib_cq *cq,
-                            struct mlx5_ib_cq_buf *buf)
+static void init_cq_frag_buf(struct mlx5_ib_cq_buf *buf)
 {
        int i;
        void *cqe;
        struct mlx5_cqe64 *cqe64;
 
        for (i = 0; i < buf->nent; i++) {
-               cqe = get_cqe(cq, i);
+               cqe = mlx5_frag_buf_get_wqe(&buf->fbc, i);
                cqe64 = buf->cqe_size == 64 ? cqe : cqe + 64;
                cqe64->op_own = MLX5_CQE_INVALID << 4;
        }
@@ -883,7 +882,7 @@ static int create_cq_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *cq,
        if (err)
                goto err_db;
 
-       init_cq_frag_buf(cq, &cq->buf);
+       init_cq_frag_buf(&cq->buf);
 
        *inlen = MLX5_ST_SZ_BYTES(create_cq_in) +
                 MLX5_FLD_SZ_BYTES(create_cq_in, pas[0]) *
@@ -1184,7 +1183,7 @@ static int resize_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *cq,
        if (err)
                goto ex;
 
-       init_cq_frag_buf(cq, cq->resize_buf);
+       init_cq_frag_buf(cq->resize_buf);
 
        return 0;
 
index a0b677accd96516cc748084fb630606711ea7e93..eb9b0a2707f800a33719e4f5ab80c80cf715343a 100644 (file)
@@ -630,9 +630,8 @@ static bool devx_is_valid_obj_id(struct uverbs_attr_bundle *attrs,
        case UVERBS_OBJECT_QP:
        {
                struct mlx5_ib_qp *qp = to_mqp(uobj->object);
-               enum ib_qp_type qp_type = qp->ibqp.qp_type;
 
-               if (qp_type == IB_QPT_RAW_PACKET ||
+               if (qp->type == IB_QPT_RAW_PACKET ||
                    (qp->flags & IB_QP_CREATE_SOURCE_QPN)) {
                        struct mlx5_ib_raw_packet_qp *raw_packet_qp =
                                                         &qp->raw_packet_qp;
@@ -649,10 +648,9 @@ static bool devx_is_valid_obj_id(struct uverbs_attr_bundle *attrs,
                                               sq->tisn) == obj_id);
                }
 
-               if (qp_type == MLX5_IB_QPT_DCT)
+               if (qp->type == MLX5_IB_QPT_DCT)
                        return get_enc_obj_id(MLX5_CMD_OP_CREATE_DCT,
                                              qp->dct.mdct.mqp.qpn) == obj_id;
-
                return get_enc_obj_id(MLX5_CMD_OP_CREATE_QP,
                                      qp->ibqp.qp_num) == obj_id;
        }
index 094bf85589db501596cc58571af6e155d950fc62..001d766cf291c711361fd690deb6565dc0bfa893 100644 (file)
@@ -217,6 +217,9 @@ static int UVERBS_HANDLER(MLX5_IB_METHOD_DM_MAP_OP_ADDR)(
        if (err)
                return err;
 
+       if (op >= BITS_PER_TYPE(u32))
+               return -EOPNOTSUPP;
+
        if (!(MLX5_CAP_DEV_MEM(dev->mdev, memic_operations) & BIT(op)))
                return -EOPNOTSUPP;
 
index 61475b571531271f22bccc852d1bf6051898386f..7af4df7a6823787e07250fe11fa89bc637bda00a 100644 (file)
@@ -41,6 +41,7 @@ struct mlx5_ib_user_db_page {
        struct ib_umem         *umem;
        unsigned long           user_virt;
        int                     refcnt;
+       struct mm_struct        *mm;
 };
 
 int mlx5_ib_db_map_user(struct mlx5_ib_ucontext *context,
@@ -53,7 +54,8 @@ int mlx5_ib_db_map_user(struct mlx5_ib_ucontext *context,
        mutex_lock(&context->db_page_mutex);
 
        list_for_each_entry(page, &context->db_page_list, list)
-               if (page->user_virt == (virt & PAGE_MASK))
+               if ((current->mm == page->mm) &&
+                   (page->user_virt == (virt & PAGE_MASK)))
                        goto found;
 
        page = kmalloc(sizeof(*page), GFP_KERNEL);
@@ -71,6 +73,8 @@ int mlx5_ib_db_map_user(struct mlx5_ib_ucontext *context,
                kfree(page);
                goto out;
        }
+       mmgrab(current->mm);
+       page->mm = current->mm;
 
        list_add(&page->list, &context->db_page_list);
 
@@ -91,6 +95,7 @@ void mlx5_ib_db_unmap_user(struct mlx5_ib_ucontext *context, struct mlx5_db *db)
 
        if (!--db->u.user_page->refcnt) {
                list_del(&db->u.user_page->list);
+               mmdrop(db->u.user_page->mm);
                ib_umem_release(db->u.user_page->umem);
                kfree(db->u.user_page);
        }
index 2fc6a60c4e775e16b6e2164aaada2e9fdb9357e4..18ee2f29382502249a19ed6146a83b4120d7c615 100644 (file)
@@ -1194,9 +1194,8 @@ static struct ib_flow *mlx5_ib_create_flow(struct ib_qp *qp,
                goto free_ucmd;
        }
 
-       if (flow_attr->port > dev->num_ports ||
-           (flow_attr->flags &
-            ~(IB_FLOW_ATTR_FLAGS_DONT_TRAP | IB_FLOW_ATTR_FLAGS_EGRESS))) {
+       if (flow_attr->flags &
+           ~(IB_FLOW_ATTR_FLAGS_DONT_TRAP | IB_FLOW_ATTR_FLAGS_EGRESS)) {
                err = -EINVAL;
                goto free_ucmd;
        }
@@ -2134,6 +2133,12 @@ static int UVERBS_HANDLER(MLX5_IB_METHOD_FLOW_MATCHER_CREATE)(
        if (err)
                goto end;
 
+       if (obj->ns_type == MLX5_FLOW_NAMESPACE_FDB &&
+           mlx5_eswitch_mode(dev->mdev) != MLX5_ESWITCH_OFFLOADS) {
+               err = -EINVAL;
+               goto end;
+       }
+
        uobj->object = obj;
        obj->mdev = dev->mdev;
        atomic_set(&obj->usecnt, 0);
index 6d1dd09a4388177be9a0fedae2c89d026ace1be4..644d5d0ac54428728a3d05c21b3af9c9ca17f543 100644 (file)
@@ -4419,6 +4419,7 @@ static int mlx5r_mp_probe(struct auxiliary_device *adev,
 
                if (bound) {
                        rdma_roce_rescan_device(&dev->ib_dev);
+                       mpi->ibdev->ib_active = true;
                        break;
                }
        }
index 4388afeff2512be8513779ef4126ecb74be9ab43..425423dfac7245b74195c244257d3bd83b837041 100644 (file)
@@ -743,10 +743,10 @@ int mlx5_mr_cache_init(struct mlx5_ib_dev *dev)
                ent->xlt = (1 << ent->order) * sizeof(struct mlx5_mtt) /
                           MLX5_IB_UMR_OCTOWORD;
                ent->access_mode = MLX5_MKC_ACCESS_MODE_MTT;
-               if ((dev->mdev->profile->mask & MLX5_PROF_MASK_MR_CACHE) &&
+               if ((dev->mdev->profile.mask & MLX5_PROF_MASK_MR_CACHE) &&
                    !dev->is_rep && mlx5_core_is_pf(dev->mdev) &&
                    mlx5_ib_can_load_pas_with_umr(dev, 0))
-                       ent->limit = dev->mdev->profile->mr_cache[i].limit;
+                       ent->limit = dev->mdev->profile.mr_cache[i].limit;
                else
                        ent->limit = 0;
                spin_lock_irq(&ent->lock);
@@ -1940,8 +1940,8 @@ int mlx5_ib_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata)
                mlx5r_deref_wait_odp_mkey(&mr->mmkey);
 
        if (ibmr->type == IB_MR_TYPE_INTEGRITY) {
-               xa_cmpxchg(&dev->sig_mrs, mlx5_base_mkey(mr->mmkey.key), ibmr,
-                          NULL, GFP_KERNEL);
+               xa_cmpxchg(&dev->sig_mrs, mlx5_base_mkey(mr->mmkey.key),
+                          mr->sig, NULL, GFP_KERNEL);
 
                if (mr->mtt_mr) {
                        rc = mlx5_ib_dereg_mr(&mr->mtt_mr->ibmr, NULL);
index 2af26737d32dc80b12f117c069f3e826969bc012..a6712e373eed633cdddf0a993f57e45e521a6f06 100644 (file)
@@ -346,13 +346,15 @@ static inline enum comp_state do_read(struct rxe_qp *qp,
        ret = copy_data(qp->pd, IB_ACCESS_LOCAL_WRITE,
                        &wqe->dma, payload_addr(pkt),
                        payload_size(pkt), to_mr_obj, NULL);
-       if (ret)
+       if (ret) {
+               wqe->status = IB_WC_LOC_PROT_ERR;
                return COMPST_ERROR;
+       }
 
        if (wqe->dma.resid == 0 && (pkt->mask & RXE_END_MASK))
                return COMPST_COMP_ACK;
-       else
-               return COMPST_UPDATE_COMP;
+
+       return COMPST_UPDATE_COMP;
 }
 
 static inline enum comp_state do_atomic(struct rxe_qp *qp,
@@ -366,10 +368,12 @@ static inline enum comp_state do_atomic(struct rxe_qp *qp,
        ret = copy_data(qp->pd, IB_ACCESS_LOCAL_WRITE,
                        &wqe->dma, &atomic_orig,
                        sizeof(u64), to_mr_obj, NULL);
-       if (ret)
+       if (ret) {
+               wqe->status = IB_WC_LOC_PROT_ERR;
                return COMPST_ERROR;
-       else
-               return COMPST_COMP_ACK;
+       }
+
+       return COMPST_COMP_ACK;
 }
 
 static void make_send_cqe(struct rxe_qp *qp, struct rxe_send_wqe *wqe,
index 34ae957a315ca91ebcf240244d3b639b540fb416..b0f350d674fdbf79d0ab487afc04ec49087de382 100644 (file)
@@ -242,6 +242,7 @@ static int rxe_qp_init_req(struct rxe_dev *rxe, struct rxe_qp *qp,
        if (err) {
                vfree(qp->sq.queue->buf);
                kfree(qp->sq.queue);
+               qp->sq.queue = NULL;
                return err;
        }
 
@@ -295,6 +296,7 @@ static int rxe_qp_init_resp(struct rxe_dev *rxe, struct rxe_qp *qp,
                if (err) {
                        vfree(qp->rq.queue->buf);
                        kfree(qp->rq.queue);
+                       qp->rq.queue = NULL;
                        return err;
                }
        }
@@ -355,6 +357,11 @@ int rxe_qp_from_init(struct rxe_dev *rxe, struct rxe_qp *qp, struct rxe_pd *pd,
 err2:
        rxe_queue_cleanup(qp->sq.queue);
 err1:
+       qp->pd = NULL;
+       qp->rcq = NULL;
+       qp->scq = NULL;
+       qp->srq = NULL;
+
        if (srq)
                rxe_drop_ref(srq);
        rxe_drop_ref(scq);
index d2313efb26db833e4307683a3a8ca910254de635..3f175f220a2299bc74e025b6fbcf73729357f5a3 100644 (file)
@@ -300,7 +300,6 @@ struct ib_qp *siw_create_qp(struct ib_pd *pd,
        struct siw_ucontext *uctx =
                rdma_udata_to_drv_context(udata, struct siw_ucontext,
                                          base_ucontext);
-       struct siw_cq *scq = NULL, *rcq = NULL;
        unsigned long flags;
        int num_sqe, num_rqe, rv = 0;
        size_t length;
@@ -343,10 +342,8 @@ struct ib_qp *siw_create_qp(struct ib_pd *pd,
                rv = -EINVAL;
                goto err_out;
        }
-       scq = to_siw_cq(attrs->send_cq);
-       rcq = to_siw_cq(attrs->recv_cq);
 
-       if (!scq || (!rcq && !attrs->srq)) {
+       if (!attrs->send_cq || (!attrs->recv_cq && !attrs->srq)) {
                siw_dbg(base_dev, "send CQ or receive CQ invalid\n");
                rv = -EINVAL;
                goto err_out;
@@ -378,7 +375,7 @@ struct ib_qp *siw_create_qp(struct ib_pd *pd,
        else {
                /* Zero sized SQ is not supported */
                rv = -EINVAL;
-               goto err_out;
+               goto err_out_xa;
        }
        if (num_rqe)
                num_rqe = roundup_pow_of_two(num_rqe);
@@ -401,8 +398,8 @@ struct ib_qp *siw_create_qp(struct ib_pd *pd,
                }
        }
        qp->pd = pd;
-       qp->scq = scq;
-       qp->rcq = rcq;
+       qp->scq = to_siw_cq(attrs->send_cq);
+       qp->rcq = to_siw_cq(attrs->recv_cq);
 
        if (attrs->srq) {
                /*
index d5a90a66b45cfed4fe13fbda0e0274c149719cbb..5b05cf3837da1e55197481bd6c82d542c5c416b1 100644 (file)
@@ -163,6 +163,7 @@ static size_t ipoib_get_size(const struct net_device *dev)
 
 static struct rtnl_link_ops ipoib_link_ops __read_mostly = {
        .kind           = "ipoib",
+       .netns_refund   = true,
        .maxtype        = IFLA_IPOIB_MAX,
        .policy         = ipoib_policy,
        .priv_size      = sizeof(struct ipoib_dev_priv),
index d1591a28b74388cbfb5e9df0bbaae65727425139..8f385f9c2dd382e18f3ce17ae2a66b4f83c8e68b 100644 (file)
@@ -1,6 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0
 /*
- * Copyright (c) 2020, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2020-2021, The Linux Foundation. All rights reserved.
  */
 
 #include <asm/div64.h>
@@ -205,6 +205,7 @@ struct bcm_voter *of_bcm_voter_get(struct device *dev, const char *name)
        }
        mutex_unlock(&bcm_voter_lock);
 
+       of_node_put(node);
        return voter;
 }
 EXPORT_SYMBOL_GPL(of_bcm_voter_get);
@@ -362,6 +363,7 @@ static const struct of_device_id bcm_voter_of_match[] = {
        { .compatible = "qcom,bcm-voter" },
        { }
 };
+MODULE_DEVICE_TABLE(of, bcm_voter_of_match);
 
 static struct platform_driver qcom_icc_bcm_voter_driver = {
        .probe = qcom_icc_bcm_voter_probe,
index 80e8e1916dd17c9c3ef2b46841103830d2ff95b6..3ac42bbdefc634bed6c31eaf38efdc4142fd3184 100644 (file)
@@ -884,7 +884,7 @@ static inline u64 build_inv_address(u64 address, size_t size)
                 * The msb-bit must be clear on the address. Just set all the
                 * lower bits.
                 */
-               address |= 1ull << (msb_diff - 1);
+               address |= (1ull << msb_diff) - 1;
        }
 
        /* Clear bits 11:0 */
@@ -1714,6 +1714,8 @@ static void amd_iommu_probe_finalize(struct device *dev)
        domain = iommu_get_domain_for_dev(dev);
        if (domain->type == IOMMU_DOMAIN_DMA)
                iommu_setup_dma_ops(dev, IOVA_START_PFN << PAGE_SHIFT, 0);
+       else
+               set_dma_ops(dev, NULL);
 }
 
 static void amd_iommu_release_device(struct device *dev)
index 1757ac1e1623e9aef3583d7a8ae819fd2baa968c..84057cb9596cb2a62acfeecc52839e198aee50d0 100644 (file)
@@ -1142,7 +1142,7 @@ static int alloc_iommu(struct dmar_drhd_unit *drhd)
 
                err = iommu_device_register(&iommu->iommu, &intel_iommu_ops, NULL);
                if (err)
-                       goto err_unmap;
+                       goto err_sysfs;
        }
 
        drhd->iommu = iommu;
@@ -1150,6 +1150,8 @@ static int alloc_iommu(struct dmar_drhd_unit *drhd)
 
        return 0;
 
+err_sysfs:
+       iommu_device_sysfs_remove(&iommu->iommu);
 err_unmap:
        unmap_iommu(iommu);
 error_free_seq_id:
index 708f430af1c4403662245c08530dbab79c83f078..be35284a201600a9bf924204e735361bf75cf515 100644 (file)
@@ -2525,9 +2525,9 @@ static int domain_setup_first_level(struct intel_iommu *iommu,
                                    struct device *dev,
                                    u32 pasid)
 {
-       int flags = PASID_FLAG_SUPERVISOR_MODE;
        struct dma_pte *pgd = domain->pgd;
        int agaw, level;
+       int flags = 0;
 
        /*
         * Skip top levels of page tables for iommu which has
@@ -2543,7 +2543,10 @@ static int domain_setup_first_level(struct intel_iommu *iommu,
        if (level != 4 && level != 5)
                return -EINVAL;
 
-       flags |= (level == 5) ? PASID_FLAG_FL5LP : 0;
+       if (pasid != PASID_RID2PASID)
+               flags |= PASID_FLAG_SUPERVISOR_MODE;
+       if (level == 5)
+               flags |= PASID_FLAG_FL5LP;
 
        if (domain->domain.type == IOMMU_DOMAIN_UNMANAGED)
                flags |= PASID_FLAG_PAGE_SNOOP;
@@ -4606,6 +4609,8 @@ static int auxiliary_link_device(struct dmar_domain *domain,
 
        if (!sinfo) {
                sinfo = kzalloc(sizeof(*sinfo), GFP_ATOMIC);
+               if (!sinfo)
+                       return -ENOMEM;
                sinfo->domain = domain;
                sinfo->pdev = dev;
                list_add(&sinfo->link_phys, &info->subdevices);
index 72646bafc52f418bb39fe60800d05f1b992cb40d..72dc84821dad2de15f56c0ea26c222afb7b338a5 100644 (file)
@@ -699,7 +699,8 @@ int intel_pasid_setup_second_level(struct intel_iommu *iommu,
         * Since it is a second level only translation setup, we should
         * set SRE bit as well (addresses are expected to be GPAs).
         */
-       pasid_set_sre(pte);
+       if (pasid != PASID_RID2PASID)
+               pasid_set_sre(pte);
        pasid_set_present(pte);
        pasid_flush_caches(iommu, pte, pasid, did);
 
index 7c02481a81b4e9ebd6ed3cd1c3e271b22e0d35d2..c6e5ee4d9cef83e81b0896d0811c673d3be09b89 100644 (file)
@@ -1136,6 +1136,7 @@ static struct virtio_device_id id_table[] = {
        { VIRTIO_ID_IOMMU, VIRTIO_DEV_ANY_ID },
        { 0 },
 };
+MODULE_DEVICE_TABLE(virtio, id_table);
 
 static struct virtio_driver virtio_iommu_drv = {
        .driver.name            = KBUILD_MODNAME,
index b90e825df7e1470f125fab3760b025473b01e9b9..62543a4eccc086c989a6045d331c60d85b8903a7 100644 (file)
@@ -596,7 +596,7 @@ config IRQ_IDT3243X
 config APPLE_AIC
        bool "Apple Interrupt Controller (AIC)"
        depends on ARM64
-       default ARCH_APPLE
+       depends on ARCH_APPLE || COMPILE_TEST
        help
          Support for the Apple Interrupt Controller found on Apple Silicon SoCs,
          such as the M1.
index 91adf771f1859737157af823d231669e99b7f97b..090bc3f4f7d883d226d25d5e7e45e2da9fff1bf1 100644 (file)
@@ -359,10 +359,8 @@ static int mvebu_icu_probe(struct platform_device *pdev)
 
        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
        icu->base = devm_ioremap_resource(&pdev->dev, res);
-       if (IS_ERR(icu->base)) {
-               dev_err(&pdev->dev, "Failed to map icu base address.\n");
+       if (IS_ERR(icu->base))
                return PTR_ERR(icu->base);
-       }
 
        /*
         * Legacy bindings: ICU is one node with one MSI parent: force manually
index 18832ccc8ff8751d2562f56e53466b6b5e5b84fb..3a7b7a7f20ca7a4223888bab5ae6392f9b3b45d4 100644 (file)
@@ -384,10 +384,8 @@ static int mvebu_sei_probe(struct platform_device *pdev)
 
        sei->res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
        sei->base = devm_ioremap_resource(sei->dev, sei->res);
-       if (IS_ERR(sei->base)) {
-               dev_err(sei->dev, "Failed to remap SEI resource\n");
+       if (IS_ERR(sei->base))
                return PTR_ERR(sei->base);
-       }
 
        /* Retrieve the SEI capabilities with the interrupt ranges */
        sei->caps = of_device_get_match_data(&pdev->dev);
index b9db90c4aa566f186622fe05b50c07b8bbc5e986..4704f2ee5797a1de2e13a1442b0618f6438eb817 100644 (file)
@@ -892,10 +892,8 @@ static int stm32_exti_probe(struct platform_device *pdev)
 
        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
        host_data->base = devm_ioremap_resource(dev, res);
-       if (IS_ERR(host_data->base)) {
-               dev_err(dev, "Unable to map registers\n");
+       if (IS_ERR(host_data->base))
                return PTR_ERR(host_data->base);
-       }
 
        for (i = 0; i < drv_data->bank_nr; i++)
                stm32_exti_chip_init(host_data, i, np);
index 70061991915a5833acd33d6354ba08e60177ee20..cd5642cef01fd667b2bba54262aec4e5de5017ea 100644 (file)
@@ -46,7 +46,7 @@ static void hfcsusb_start_endpoint(struct hfcsusb *hw, int channel);
 static void hfcsusb_stop_endpoint(struct hfcsusb *hw, int channel);
 static int  hfcsusb_setup_bch(struct bchannel *bch, int protocol);
 static void deactivate_bchannel(struct bchannel *bch);
-static void hfcsusb_ph_info(struct hfcsusb *hw);
+static int  hfcsusb_ph_info(struct hfcsusb *hw);
 
 /* start next background transfer for control channel */
 static void
@@ -241,7 +241,7 @@ hfcusb_l2l1B(struct mISDNchannel *ch, struct sk_buff *skb)
  * send full D/B channel status information
  * as MPH_INFORMATION_IND
  */
-static void
+static int
 hfcsusb_ph_info(struct hfcsusb *hw)
 {
        struct ph_info *phi;
@@ -250,7 +250,7 @@ hfcsusb_ph_info(struct hfcsusb *hw)
 
        phi = kzalloc(struct_size(phi, bch, dch->dev.nrbchan), GFP_ATOMIC);
        if (!phi)
-               return;
+               return -ENOMEM;
 
        phi->dch.ch.protocol = hw->protocol;
        phi->dch.ch.Flags = dch->Flags;
@@ -263,6 +263,8 @@ hfcsusb_ph_info(struct hfcsusb *hw)
        _queue_data(&dch->dev.D, MPH_INFORMATION_IND, MISDN_ID_ANY,
                    struct_size(phi, bch, dch->dev.nrbchan), phi, GFP_ATOMIC);
        kfree(phi);
+
+       return 0;
 }
 
 /*
@@ -347,8 +349,7 @@ hfcusb_l2l1D(struct mISDNchannel *ch, struct sk_buff *skb)
                        ret = l1_event(dch->l1, hh->prim);
                break;
        case MPH_INFORMATION_REQ:
-               hfcsusb_ph_info(hw);
-               ret = 0;
+               ret = hfcsusb_ph_info(hw);
                break;
        }
 
@@ -403,8 +404,7 @@ hfc_l1callback(struct dchannel *dch, u_int cmd)
                               hw->name, __func__, cmd);
                return -1;
        }
-       hfcsusb_ph_info(hw);
-       return 0;
+       return hfcsusb_ph_info(hw);
 }
 
 static int
@@ -746,8 +746,7 @@ hfcsusb_setup_bch(struct bchannel *bch, int protocol)
                        handle_led(hw, (bch->nr == 1) ? LED_B1_OFF :
                                   LED_B2_OFF);
        }
-       hfcsusb_ph_info(hw);
-       return 0;
+       return hfcsusb_ph_info(hw);
 }
 
 static void
index a16c7a2a7f3d0d22bf03713a58640db143440a25..88d592bafdb0279635e2ac9ae57798b715282614 100644 (file)
@@ -630,17 +630,19 @@ static void
 release_io(struct inf_hw *hw)
 {
        if (hw->cfg.mode) {
-               if (hw->cfg.p) {
+               if (hw->cfg.mode == AM_MEMIO) {
                        release_mem_region(hw->cfg.start, hw->cfg.size);
-                       iounmap(hw->cfg.p);
+                       if (hw->cfg.p)
+                               iounmap(hw->cfg.p);
                } else
                        release_region(hw->cfg.start, hw->cfg.size);
                hw->cfg.mode = AM_NONE;
        }
        if (hw->addr.mode) {
-               if (hw->addr.p) {
+               if (hw->addr.mode == AM_MEMIO) {
                        release_mem_region(hw->addr.start, hw->addr.size);
-                       iounmap(hw->addr.p);
+                       if (hw->addr.p)
+                               iounmap(hw->addr.p);
                } else
                        release_region(hw->addr.start, hw->addr.size);
                hw->addr.mode = AM_NONE;
@@ -670,9 +672,12 @@ setup_io(struct inf_hw *hw)
                                (ulong)hw->cfg.start, (ulong)hw->cfg.size);
                        return err;
                }
-               if (hw->ci->cfg_mode == AM_MEMIO)
-                       hw->cfg.p = ioremap(hw->cfg.start, hw->cfg.size);
                hw->cfg.mode = hw->ci->cfg_mode;
+               if (hw->ci->cfg_mode == AM_MEMIO) {
+                       hw->cfg.p = ioremap(hw->cfg.start, hw->cfg.size);
+                       if (!hw->cfg.p)
+                               return -ENOMEM;
+               }
                if (debug & DEBUG_HW)
                        pr_notice("%s: IO cfg %lx (%lu bytes) mode%d\n",
                                  hw->name, (ulong)hw->cfg.start,
@@ -697,12 +702,12 @@ setup_io(struct inf_hw *hw)
                                (ulong)hw->addr.start, (ulong)hw->addr.size);
                        return err;
                }
+               hw->addr.mode = hw->ci->addr_mode;
                if (hw->ci->addr_mode == AM_MEMIO) {
                        hw->addr.p = ioremap(hw->addr.start, hw->addr.size);
-                       if (unlikely(!hw->addr.p))
+                       if (!hw->addr.p)
                                return -ENOMEM;
                }
-               hw->addr.mode = hw->ci->addr_mode;
                if (debug & DEBUG_HW)
                        pr_notice("%s: IO addr %lx (%lu bytes) mode%d\n",
                                  hw->name, (ulong)hw->addr.start,
index ee925b58bbcea7ab0a411069cc5b394f3b276165..2a1ddd47a0968da8db09c9800b5219547ca5e43f 100644 (file)
@@ -1100,7 +1100,6 @@ nj_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
                card->typ = NETJET_S_TJ300;
 
        card->base = pci_resource_start(pdev, 0);
-       card->irq = pdev->irq;
        pci_set_drvdata(pdev, card);
        err = setup_instance(card);
        if (err)
index fc433e63b1dc0319966fb52493f9c6d99d218368..b1590cb4a188783b0595d9c713357ac8cf12fccb 100644 (file)
@@ -307,7 +307,7 @@ static int lp5523_init_program_engine(struct lp55xx_chip *chip)
        usleep_range(3000, 6000);
        ret = lp55xx_read(chip, LP5523_REG_STATUS, &status);
        if (ret)
-               return ret;
+               goto out;
        status &= LP5523_ENG_STATUS_MASK;
 
        if (status != LP5523_ENG_STATUS_MASK) {
index 0a4551e165abf9e06112574a7ff06d3e38fdf4e7..5fc989a6d452888f2645d00b12cf5c84cfed4ba3 100644 (file)
@@ -364,7 +364,6 @@ struct cached_dev {
 
        /* The rest of this all shows up in sysfs */
        unsigned int            sequential_cutoff;
-       unsigned int            readahead;
 
        unsigned int            io_disable:1;
        unsigned int            verify:1;
index 29c231758293e2bb99bbc2026b076813a06dbf27..6d1de889baeb1c2caeb813a29f58dc6ec84de131 100644 (file)
@@ -880,9 +880,9 @@ static int cached_dev_cache_miss(struct btree *b, struct search *s,
                                 struct bio *bio, unsigned int sectors)
 {
        int ret = MAP_CONTINUE;
-       unsigned int reada = 0;
        struct cached_dev *dc = container_of(s->d, struct cached_dev, disk);
        struct bio *miss, *cache_bio;
+       unsigned int size_limit;
 
        s->cache_missed = 1;
 
@@ -892,14 +892,10 @@ static int cached_dev_cache_miss(struct btree *b, struct search *s,
                goto out_submit;
        }
 
-       if (!(bio->bi_opf & REQ_RAHEAD) &&
-           !(bio->bi_opf & (REQ_META|REQ_PRIO)) &&
-           s->iop.c->gc_stats.in_use < CUTOFF_CACHE_READA)
-               reada = min_t(sector_t, dc->readahead >> 9,
-                             get_capacity(bio->bi_bdev->bd_disk) -
-                             bio_end_sector(bio));
-
-       s->insert_bio_sectors = min(sectors, bio_sectors(bio) + reada);
+       /* Limitation for valid replace key size and cache_bio bvecs number */
+       size_limit = min_t(unsigned int, BIO_MAX_VECS * PAGE_SECTORS,
+                          (1 << KEY_SIZE_BITS) - 1);
+       s->insert_bio_sectors = min3(size_limit, sectors, bio_sectors(bio));
 
        s->iop.replace_key = KEY(s->iop.inode,
                                 bio->bi_iter.bi_sector + s->insert_bio_sectors,
@@ -911,7 +907,8 @@ static int cached_dev_cache_miss(struct btree *b, struct search *s,
 
        s->iop.replace = true;
 
-       miss = bio_next_split(bio, sectors, GFP_NOIO, &s->d->bio_split);
+       miss = bio_next_split(bio, s->insert_bio_sectors, GFP_NOIO,
+                             &s->d->bio_split);
 
        /* btree_search_recurse()'s btree iterator is no good anymore */
        ret = miss == bio ? MAP_DONE : -EINTR;
@@ -933,9 +930,6 @@ static int cached_dev_cache_miss(struct btree *b, struct search *s,
        if (bch_bio_alloc_pages(cache_bio, __GFP_NOWARN|GFP_NOIO))
                goto out_put;
 
-       if (reada)
-               bch_mark_cache_readahead(s->iop.c, s->d);
-
        s->cache_miss   = miss;
        s->iop.bio      = cache_bio;
        bio_get(cache_bio);
index 503aafe188dce4b6b1ca4a9361ec0a3f637967f6..4c7ee5fedb9dcb47033a55301d4aba9f04df6408 100644 (file)
@@ -46,7 +46,6 @@ read_attribute(cache_misses);
 read_attribute(cache_bypass_hits);
 read_attribute(cache_bypass_misses);
 read_attribute(cache_hit_ratio);
-read_attribute(cache_readaheads);
 read_attribute(cache_miss_collisions);
 read_attribute(bypassed);
 
@@ -64,7 +63,6 @@ SHOW(bch_stats)
                    DIV_SAFE(var(cache_hits) * 100,
                             var(cache_hits) + var(cache_misses)));
 
-       var_print(cache_readaheads);
        var_print(cache_miss_collisions);
        sysfs_hprint(bypassed,  var(sectors_bypassed) << 9);
 #undef var
@@ -86,7 +84,6 @@ static struct attribute *bch_stats_files[] = {
        &sysfs_cache_bypass_hits,
        &sysfs_cache_bypass_misses,
        &sysfs_cache_hit_ratio,
-       &sysfs_cache_readaheads,
        &sysfs_cache_miss_collisions,
        &sysfs_bypassed,
        NULL
@@ -113,7 +110,6 @@ void bch_cache_accounting_clear(struct cache_accounting *acc)
        acc->total.cache_misses = 0;
        acc->total.cache_bypass_hits = 0;
        acc->total.cache_bypass_misses = 0;
-       acc->total.cache_readaheads = 0;
        acc->total.cache_miss_collisions = 0;
        acc->total.sectors_bypassed = 0;
 }
@@ -145,7 +141,6 @@ static void scale_stats(struct cache_stats *stats, unsigned long rescale_at)
                scale_stat(&stats->cache_misses);
                scale_stat(&stats->cache_bypass_hits);
                scale_stat(&stats->cache_bypass_misses);
-               scale_stat(&stats->cache_readaheads);
                scale_stat(&stats->cache_miss_collisions);
                scale_stat(&stats->sectors_bypassed);
        }
@@ -168,7 +163,6 @@ static void scale_accounting(struct timer_list *t)
        move_stat(cache_misses);
        move_stat(cache_bypass_hits);
        move_stat(cache_bypass_misses);
-       move_stat(cache_readaheads);
        move_stat(cache_miss_collisions);
        move_stat(sectors_bypassed);
 
@@ -209,14 +203,6 @@ void bch_mark_cache_accounting(struct cache_set *c, struct bcache_device *d,
        mark_cache_stats(&c->accounting.collector, hit, bypass);
 }
 
-void bch_mark_cache_readahead(struct cache_set *c, struct bcache_device *d)
-{
-       struct cached_dev *dc = container_of(d, struct cached_dev, disk);
-
-       atomic_inc(&dc->accounting.collector.cache_readaheads);
-       atomic_inc(&c->accounting.collector.cache_readaheads);
-}
-
 void bch_mark_cache_miss_collision(struct cache_set *c, struct bcache_device *d)
 {
        struct cached_dev *dc = container_of(d, struct cached_dev, disk);
index abfaabf7e7fcf8fee33c8370074551bf5fc91768..ca4f435f7216a7231cf9e1dd4ee8cae56ea93841 100644 (file)
@@ -7,7 +7,6 @@ struct cache_stat_collector {
        atomic_t cache_misses;
        atomic_t cache_bypass_hits;
        atomic_t cache_bypass_misses;
-       atomic_t cache_readaheads;
        atomic_t cache_miss_collisions;
        atomic_t sectors_bypassed;
 };
index cc89f3156d1aadb0953f249d82bb2a4cde976230..05ac1d6fbbf3541fdd7dfba61bf5dbafe0a18c44 100644 (file)
@@ -137,7 +137,6 @@ rw_attribute(io_disable);
 rw_attribute(discard);
 rw_attribute(running);
 rw_attribute(label);
-rw_attribute(readahead);
 rw_attribute(errors);
 rw_attribute(io_error_limit);
 rw_attribute(io_error_halflife);
@@ -260,7 +259,6 @@ SHOW(__bch_cached_dev)
        var_printf(partial_stripes_expensive,   "%u");
 
        var_hprint(sequential_cutoff);
-       var_hprint(readahead);
 
        sysfs_print(running,            atomic_read(&dc->running));
        sysfs_print(state,              states[BDEV_STATE(&dc->sb)]);
@@ -365,7 +363,6 @@ STORE(__cached_dev)
        sysfs_strtoul_clamp(sequential_cutoff,
                            dc->sequential_cutoff,
                            0, UINT_MAX);
-       d_strtoi_h(readahead);
 
        if (attr == &sysfs_clear_stats)
                bch_cache_accounting_clear(&dc->accounting);
@@ -538,7 +535,6 @@ static struct attribute *bch_cached_dev_files[] = {
        &sysfs_running,
        &sysfs_state,
        &sysfs_label,
-       &sysfs_readahead,
 #ifdef CONFIG_BCACHE_DEBUG
        &sysfs_verify,
        &sysfs_bypass_torture_test,
index 781942aeddd153f4ec3591194be9a143712783a6..20f2510db1f673ba9104222b85163df57ca7c7ad 100644 (file)
@@ -66,14 +66,14 @@ struct superblock {
        __u8 magic[8];
        __u8 version;
        __u8 log2_interleave_sectors;
-       __u16 integrity_tag_size;
-       __u32 journal_sections;
-       __u64 provided_data_sectors;    /* userspace uses this value */
-       __u32 flags;
+       __le16 integrity_tag_size;
+       __le32 journal_sections;
+       __le64 provided_data_sectors;   /* userspace uses this value */
+       __le32 flags;
        __u8 log2_sectors_per_block;
        __u8 log2_blocks_per_bitmap_bit;
        __u8 pad[2];
-       __u64 recalc_sector;
+       __le64 recalc_sector;
        __u8 pad2[8];
        __u8 salt[SALT_SIZE];
 };
@@ -86,16 +86,16 @@ struct superblock {
 
 #define        JOURNAL_ENTRY_ROUNDUP           8
 
-typedef __u64 commit_id_t;
+typedef __le64 commit_id_t;
 #define JOURNAL_MAC_PER_SECTOR         8
 
 struct journal_entry {
        union {
                struct {
-                       __u32 sector_lo;
-                       __u32 sector_hi;
+                       __le32 sector_lo;
+                       __le32 sector_hi;
                } s;
-               __u64 sector;
+               __le64 sector;
        } u;
        commit_id_t last_bytes[];
        /* __u8 tag[0]; */
@@ -806,7 +806,7 @@ static void section_mac(struct dm_integrity_c *ic, unsigned section, __u8 result
        }
 
        if (ic->sb->flags & cpu_to_le32(SB_FLAG_FIXED_HMAC)) {
-               uint64_t section_le;
+               __le64 section_le;
 
                r = crypto_shash_update(desc, (__u8 *)&ic->sb->salt, SALT_SIZE);
                if (unlikely(r < 0)) {
@@ -1640,7 +1640,7 @@ static void integrity_end_io(struct bio *bio)
 static void integrity_sector_checksum(struct dm_integrity_c *ic, sector_t sector,
                                      const char *data, char *result)
 {
-       __u64 sector_le = cpu_to_le64(sector);
+       __le64 sector_le = cpu_to_le64(sector);
        SHASH_DESC_ON_STACK(req, ic->internal_hash);
        int r;
        unsigned digest_size;
@@ -2689,30 +2689,26 @@ next_chunk:
        if (unlikely(dm_integrity_failed(ic)))
                goto err;
 
-       if (!ic->discard) {
-               io_req.bi_op = REQ_OP_READ;
-               io_req.bi_op_flags = 0;
-               io_req.mem.type = DM_IO_VMA;
-               io_req.mem.ptr.addr = ic->recalc_buffer;
-               io_req.notify.fn = NULL;
-               io_req.client = ic->io;
-               io_loc.bdev = ic->dev->bdev;
-               io_loc.sector = get_data_sector(ic, area, offset);
-               io_loc.count = n_sectors;
+       io_req.bi_op = REQ_OP_READ;
+       io_req.bi_op_flags = 0;
+       io_req.mem.type = DM_IO_VMA;
+       io_req.mem.ptr.addr = ic->recalc_buffer;
+       io_req.notify.fn = NULL;
+       io_req.client = ic->io;
+       io_loc.bdev = ic->dev->bdev;
+       io_loc.sector = get_data_sector(ic, area, offset);
+       io_loc.count = n_sectors;
 
-               r = dm_io(&io_req, 1, &io_loc, NULL);
-               if (unlikely(r)) {
-                       dm_integrity_io_error(ic, "reading data", r);
-                       goto err;
-               }
+       r = dm_io(&io_req, 1, &io_loc, NULL);
+       if (unlikely(r)) {
+               dm_integrity_io_error(ic, "reading data", r);
+               goto err;
+       }
 
-               t = ic->recalc_tags;
-               for (i = 0; i < n_sectors; i += ic->sectors_per_block) {
-                       integrity_sector_checksum(ic, logical_sector + i, ic->recalc_buffer + (i << SECTOR_SHIFT), t);
-                       t += ic->tag_size;
-               }
-       } else {
-               t = ic->recalc_tags + (n_sectors >> ic->sb->log2_sectors_per_block) * ic->tag_size;
+       t = ic->recalc_tags;
+       for (i = 0; i < n_sectors; i += ic->sectors_per_block) {
+               integrity_sector_checksum(ic, logical_sector + i, ic->recalc_buffer + (i << SECTOR_SHIFT), t);
+               t += ic->tag_size;
        }
 
        metadata_block = get_metadata_sector_and_offset(ic, area, offset, &metadata_offset);
@@ -3826,7 +3822,7 @@ static int create_journal(struct dm_integrity_c *ic, char **error)
                        for (i = 0; i < ic->journal_sections; i++) {
                                struct scatterlist sg;
                                struct skcipher_request *section_req;
-                               __u32 section_le = cpu_to_le32(i);
+                               __le32 section_le = cpu_to_le32(i);
 
                                memset(crypt_iv, 0x00, ivsize);
                                memset(crypt_data, 0x00, crypt_len);
@@ -4368,13 +4364,11 @@ try_smaller_buffer:
                        goto bad;
                }
                INIT_WORK(&ic->recalc_work, integrity_recalc);
-               if (!ic->discard) {
-                       ic->recalc_buffer = vmalloc(RECALC_SECTORS << SECTOR_SHIFT);
-                       if (!ic->recalc_buffer) {
-                               ti->error = "Cannot allocate buffer for recalculating";
-                               r = -ENOMEM;
-                               goto bad;
-                       }
+               ic->recalc_buffer = vmalloc(RECALC_SECTORS << SECTOR_SHIFT);
+               if (!ic->recalc_buffer) {
+                       ti->error = "Cannot allocate buffer for recalculating";
+                       r = -ENOMEM;
+                       goto bad;
                }
                ic->recalc_tags = kvmalloc_array(RECALC_SECTORS >> ic->sb->log2_sectors_per_block,
                                                 ic->tag_size, GFP_KERNEL);
@@ -4383,9 +4377,6 @@ try_smaller_buffer:
                        r = -ENOMEM;
                        goto bad;
                }
-               if (ic->discard)
-                       memset(ic->recalc_tags, DISCARD_FILLER,
-                              (RECALC_SECTORS >> ic->sb->log2_sectors_per_block) * ic->tag_size);
        } else {
                if (ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING)) {
                        ti->error = "Recalculate can only be specified with internal_hash";
@@ -4579,7 +4570,7 @@ static void dm_integrity_dtr(struct dm_target *ti)
 
 static struct target_type integrity_target = {
        .name                   = "integrity",
-       .version                = {1, 9, 0},
+       .version                = {1, 10, 0},
        .module                 = THIS_MODULE,
        .features               = DM_TARGET_SINGLETON | DM_TARGET_INTEGRITY,
        .ctr                    = dm_integrity_ctr,
index a2acb014c13ae45b7394545a57645d729bdbc764..751ec5ea1dbb5fcfaf23a28ad171a13b0f44ee92 100644 (file)
@@ -855,7 +855,7 @@ static int dm_add_exception(void *context, chunk_t old, chunk_t new)
 static uint32_t __minimum_chunk_size(struct origin *o)
 {
        struct dm_snapshot *snap;
-       unsigned chunk_size = 0;
+       unsigned chunk_size = rounddown_pow_of_two(UINT_MAX);
 
        if (o)
                list_for_each_entry(snap, &o->snapshots, list)
@@ -1409,6 +1409,7 @@ static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv)
 
        if (!s->store->chunk_size) {
                ti->error = "Chunk size not set";
+               r = -EINVAL;
                goto bad_read_metadata;
        }
 
index 29385dc470d5d9509c41a127ea0e4478017a6cac..db61a1f43ae91e3328c9b8bfc1bb83fd0a0012da 100644 (file)
@@ -15,7 +15,7 @@
 #define DM_VERITY_VERIFY_ERR(s) DM_VERITY_ROOT_HASH_VERIFICATION " " s
 
 static bool require_signatures;
-module_param(require_signatures, bool, false);
+module_param(require_signatures, bool, 0444);
 MODULE_PARM_DESC(require_signatures,
                "Verify the roothash of dm-verity hash tree");
 
index 841e1c1aa5e63aa51677794f0cf578fc9635b65d..7d4ff8a5c55e2f2196c88f77401444a6c8141448 100644 (file)
@@ -5311,8 +5311,6 @@ static int in_chunk_boundary(struct mddev *mddev, struct bio *bio)
        unsigned int chunk_sectors;
        unsigned int bio_sectors = bio_sectors(bio);
 
-       WARN_ON_ONCE(bio->bi_bdev->bd_partno);
-
        chunk_sectors = min(conf->chunk_sectors, conf->prev_chunk_sectors);
        return  chunk_sectors >=
                ((sector & (chunk_sectors - 1)) + bio_sectors);
index 655db8272268d56ef882393923515f8577bc3962..9767159aeb9b2f80ec5d307bb7ae74ea3ef5f2be 100644 (file)
@@ -281,7 +281,7 @@ static int sp8870_set_frontend_parameters(struct dvb_frontend *fe)
 
        // read status reg in order to clear pending irqs
        err = sp8870_readreg(state, 0x200);
-       if (err)
+       if (err < 0)
                return err;
 
        // system controller start
index 83bd9a412a560751a5d7e3418f07606df603030b..1e3b68a8743af1bcd8aaeeaf008d1ed8ded927ed 100644 (file)
@@ -915,7 +915,6 @@ static int rcar_drif_g_fmt_sdr_cap(struct file *file, void *priv,
 {
        struct rcar_drif_sdr *sdr = video_drvdata(file);
 
-       memset(f->fmt.sdr.reserved, 0, sizeof(f->fmt.sdr.reserved));
        f->fmt.sdr.pixelformat = sdr->fmt->pixelformat;
        f->fmt.sdr.buffersize = sdr->fmt->buffersize;
 
index a4f7431486f317326cf4074e33a892f1f9ad017f..d93d384286c165856c2f76ab7e7a4cd632003b99 100644 (file)
@@ -1424,7 +1424,6 @@ static int sd_config(struct gspca_dev *gspca_dev,
 {
        struct sd *sd = (struct sd *) gspca_dev;
        struct cam *cam;
-       int ret;
 
        sd->mainsFreq = FREQ_DEF == V4L2_CID_POWER_LINE_FREQUENCY_60HZ;
        reset_camera_params(gspca_dev);
@@ -1436,10 +1435,7 @@ static int sd_config(struct gspca_dev *gspca_dev,
        cam->cam_mode = mode;
        cam->nmodes = ARRAY_SIZE(mode);
 
-       ret = goto_low_power(gspca_dev);
-       if (ret)
-               gspca_err(gspca_dev, "Cannot go to low power mode: %d\n",
-                         ret);
+       goto_low_power(gspca_dev);
        /* Check the firmware version. */
        sd->params.version.firmwareVersion = 0;
        get_version_information(gspca_dev);
index bfa3b381d8a26ee672715648a94d7a77b85e0d6c..bf1af6ed9131e914dd4f0debb37728dcae28052e 100644 (file)
@@ -195,7 +195,7 @@ static const struct v4l2_ctrl_config mt9m111_greenbal_cfg = {
 int mt9m111_probe(struct sd *sd)
 {
        u8 data[2] = {0x00, 0x00};
-       int i, rc = 0;
+       int i, err;
        struct gspca_dev *gspca_dev = (struct gspca_dev *)sd;
 
        if (force_sensor) {
@@ -213,18 +213,18 @@ int mt9m111_probe(struct sd *sd)
        /* Do the preinit */
        for (i = 0; i < ARRAY_SIZE(preinit_mt9m111); i++) {
                if (preinit_mt9m111[i][0] == BRIDGE) {
-                       rc |= m5602_write_bridge(sd,
-                               preinit_mt9m111[i][1],
-                               preinit_mt9m111[i][2]);
+                       err = m5602_write_bridge(sd,
+                                       preinit_mt9m111[i][1],
+                                       preinit_mt9m111[i][2]);
                } else {
                        data[0] = preinit_mt9m111[i][2];
                        data[1] = preinit_mt9m111[i][3];
-                       rc |= m5602_write_sensor(sd,
-                               preinit_mt9m111[i][1], data, 2);
+                       err = m5602_write_sensor(sd,
+                                       preinit_mt9m111[i][1], data, 2);
                }
+               if (err < 0)
+                       return err;
        }
-       if (rc < 0)
-               return rc;
 
        if (m5602_read_sensor(sd, MT9M111_SC_CHIPVER, data, 2))
                return -ENODEV;
index d680b777f097fd092a77e578ca272b8494516edb..8fd99ceee4b67e29133cc388ae37eb7ba2b6e25a 100644 (file)
@@ -154,8 +154,8 @@ static const struct v4l2_ctrl_config po1030_greenbal_cfg = {
 
 int po1030_probe(struct sd *sd)
 {
-       int rc = 0;
        u8 dev_id_h = 0, i;
+       int err;
        struct gspca_dev *gspca_dev = (struct gspca_dev *)sd;
 
        if (force_sensor) {
@@ -174,14 +174,14 @@ int po1030_probe(struct sd *sd)
        for (i = 0; i < ARRAY_SIZE(preinit_po1030); i++) {
                u8 data = preinit_po1030[i][2];
                if (preinit_po1030[i][0] == SENSOR)
-                       rc |= m5602_write_sensor(sd,
-                               preinit_po1030[i][1], &data, 1);
+                       err = m5602_write_sensor(sd, preinit_po1030[i][1],
+                                                &data, 1);
                else
-                       rc |= m5602_write_bridge(sd, preinit_po1030[i][1],
-                                               data);
+                       err = m5602_write_bridge(sd, preinit_po1030[i][1],
+                                                data);
+               if (err < 0)
+                       return err;
        }
-       if (rc < 0)
-               return rc;
 
        if (m5602_read_sensor(sd, PO1030_DEVID_H, &dev_id_h, 1))
                return -ENODEV;
index a07674ed0596512461b562fdcbc3947a7c3ead6b..4c5621b17a6fb4f6ebffe2a6152fee807b34e37e 100644 (file)
@@ -468,6 +468,7 @@ static void rtl8411_init_common_params(struct rtsx_pcr *pcr)
        pcr->sd30_drive_sel_1v8 = DRIVER_TYPE_B;
        pcr->sd30_drive_sel_3v3 = DRIVER_TYPE_D;
        pcr->aspm_en = ASPM_L1_EN;
+       pcr->aspm_mode = ASPM_MODE_CFG;
        pcr->tx_initial_phase = SET_CLOCK_PHASE(23, 7, 14);
        pcr->rx_initial_phase = SET_CLOCK_PHASE(4, 3, 10);
        pcr->ic_version = rtl8411_get_ic_version(pcr);
index 39a6a7ecc32e9815b416b26db8c8fed9a5ce525e..29f5414072bf107b68ecf643e9aadc882922c710 100644 (file)
@@ -255,6 +255,7 @@ void rts5209_init_params(struct rtsx_pcr *pcr)
        pcr->sd30_drive_sel_1v8 = DRIVER_TYPE_B;
        pcr->sd30_drive_sel_3v3 = DRIVER_TYPE_D;
        pcr->aspm_en = ASPM_L1_EN;
+       pcr->aspm_mode = ASPM_MODE_CFG;
        pcr->tx_initial_phase = SET_CLOCK_PHASE(27, 27, 16);
        pcr->rx_initial_phase = SET_CLOCK_PHASE(24, 6, 5);
 
index 8200af22b529efaa8a285c7d516425d2a6e6960d..4bcfbc9afbac18325f4afd8b52f0137e4cb823fd 100644 (file)
@@ -358,6 +358,7 @@ void rts5227_init_params(struct rtsx_pcr *pcr)
        pcr->sd30_drive_sel_1v8 = CFG_DRIVER_TYPE_B;
        pcr->sd30_drive_sel_3v3 = CFG_DRIVER_TYPE_B;
        pcr->aspm_en = ASPM_L1_EN;
+       pcr->aspm_mode = ASPM_MODE_CFG;
        pcr->tx_initial_phase = SET_CLOCK_PHASE(27, 27, 15);
        pcr->rx_initial_phase = SET_CLOCK_PHASE(30, 7, 7);
 
@@ -483,6 +484,7 @@ void rts522a_init_params(struct rtsx_pcr *pcr)
 
        rts5227_init_params(pcr);
        pcr->ops = &rts522a_pcr_ops;
+       pcr->aspm_mode = ASPM_MODE_REG;
        pcr->tx_initial_phase = SET_CLOCK_PHASE(20, 20, 11);
        pcr->reg_pm_ctrl3 = RTS522A_PM_CTRL3;
 
index 781a86def59a90bb27536d2865866640488d0275..ffc128278613b872daab41939e0752d56fd2da44 100644 (file)
@@ -718,6 +718,7 @@ void rts5228_init_params(struct rtsx_pcr *pcr)
        pcr->sd30_drive_sel_1v8 = CFG_DRIVER_TYPE_B;
        pcr->sd30_drive_sel_3v3 = CFG_DRIVER_TYPE_B;
        pcr->aspm_en = ASPM_L1_EN;
+       pcr->aspm_mode = ASPM_MODE_REG;
        pcr->tx_initial_phase = SET_CLOCK_PHASE(28, 27, 11);
        pcr->rx_initial_phase = SET_CLOCK_PHASE(24, 6, 5);
 
index 89e6f124ca5ca068e94219db9f2e58901ae7adcc..c748eaf1ec1f980a92e6b6f654fda950250ce220 100644 (file)
@@ -246,6 +246,7 @@ void rts5229_init_params(struct rtsx_pcr *pcr)
        pcr->sd30_drive_sel_1v8 = DRIVER_TYPE_B;
        pcr->sd30_drive_sel_3v3 = DRIVER_TYPE_D;
        pcr->aspm_en = ASPM_L1_EN;
+       pcr->aspm_mode = ASPM_MODE_CFG;
        pcr->tx_initial_phase = SET_CLOCK_PHASE(27, 27, 15);
        pcr->rx_initial_phase = SET_CLOCK_PHASE(30, 6, 6);
 
index b2676e7f50271d50e5d4fb16fd95db3f1344bd87..53f3a1f45c4a758a485555d21d5eec6f0dd8fe1a 100644 (file)
@@ -566,6 +566,7 @@ void rts5249_init_params(struct rtsx_pcr *pcr)
        pcr->sd30_drive_sel_1v8 = CFG_DRIVER_TYPE_B;
        pcr->sd30_drive_sel_3v3 = CFG_DRIVER_TYPE_B;
        pcr->aspm_en = ASPM_L1_EN;
+       pcr->aspm_mode = ASPM_MODE_CFG;
        pcr->tx_initial_phase = SET_CLOCK_PHASE(1, 29, 16);
        pcr->rx_initial_phase = SET_CLOCK_PHASE(24, 6, 5);
 
@@ -729,6 +730,7 @@ static const struct pcr_ops rts524a_pcr_ops = {
 void rts524a_init_params(struct rtsx_pcr *pcr)
 {
        rts5249_init_params(pcr);
+       pcr->aspm_mode = ASPM_MODE_REG;
        pcr->tx_initial_phase = SET_CLOCK_PHASE(27, 29, 11);
        pcr->option.ltr_l1off_sspwrgate = LTR_L1OFF_SSPWRGATE_5250_DEF;
        pcr->option.ltr_l1off_snooze_sspwrgate =
@@ -845,6 +847,7 @@ static const struct pcr_ops rts525a_pcr_ops = {
 void rts525a_init_params(struct rtsx_pcr *pcr)
 {
        rts5249_init_params(pcr);
+       pcr->aspm_mode = ASPM_MODE_REG;
        pcr->tx_initial_phase = SET_CLOCK_PHASE(25, 29, 11);
        pcr->option.ltr_l1off_sspwrgate = LTR_L1OFF_SSPWRGATE_5250_DEF;
        pcr->option.ltr_l1off_snooze_sspwrgate =
index 080a7d67a8e1a605a3b408518ea7367cb37f22bd..9b42b20a3e5ae97771aed280b88abcafa61569d4 100644 (file)
@@ -628,6 +628,7 @@ void rts5260_init_params(struct rtsx_pcr *pcr)
        pcr->sd30_drive_sel_1v8 = CFG_DRIVER_TYPE_B;
        pcr->sd30_drive_sel_3v3 = CFG_DRIVER_TYPE_B;
        pcr->aspm_en = ASPM_L1_EN;
+       pcr->aspm_mode = ASPM_MODE_REG;
        pcr->tx_initial_phase = SET_CLOCK_PHASE(27, 29, 11);
        pcr->rx_initial_phase = SET_CLOCK_PHASE(24, 6, 5);
 
index 6c64dade8e1af58d207429323f96cd6fb4f04d7f..1fd4e0e507302c83d0994be04730293c7abec0e5 100644 (file)
@@ -783,6 +783,7 @@ void rts5261_init_params(struct rtsx_pcr *pcr)
        pcr->sd30_drive_sel_1v8 = 0x00;
        pcr->sd30_drive_sel_3v3 = 0x00;
        pcr->aspm_en = ASPM_L1_EN;
+       pcr->aspm_mode = ASPM_MODE_REG;
        pcr->tx_initial_phase = SET_CLOCK_PHASE(27, 27, 11);
        pcr->rx_initial_phase = SET_CLOCK_PHASE(24, 6, 5);
 
index 273311184669af4d062402c38342904da8e3bf6c..baf83594a01d338f16be66cc1f95cbd8cc2c1f48 100644 (file)
@@ -85,12 +85,18 @@ static void rtsx_comm_set_aspm(struct rtsx_pcr *pcr, bool enable)
        if (pcr->aspm_enabled == enable)
                return;
 
-       if (pcr->aspm_en & 0x02)
-               rtsx_pci_write_register(pcr, ASPM_FORCE_CTL, FORCE_ASPM_CTL0 |
-                       FORCE_ASPM_CTL1, enable ? 0 : FORCE_ASPM_CTL0 | FORCE_ASPM_CTL1);
-       else
-               rtsx_pci_write_register(pcr, ASPM_FORCE_CTL, FORCE_ASPM_CTL0 |
-                       FORCE_ASPM_CTL1, FORCE_ASPM_CTL0 | FORCE_ASPM_CTL1);
+       if (pcr->aspm_mode == ASPM_MODE_CFG) {
+               pcie_capability_clear_and_set_word(pcr->pci, PCI_EXP_LNKCTL,
+                                               PCI_EXP_LNKCTL_ASPMC,
+                                               enable ? pcr->aspm_en : 0);
+       } else if (pcr->aspm_mode == ASPM_MODE_REG) {
+               if (pcr->aspm_en & 0x02)
+                       rtsx_pci_write_register(pcr, ASPM_FORCE_CTL, FORCE_ASPM_CTL0 |
+                               FORCE_ASPM_CTL1, enable ? 0 : FORCE_ASPM_CTL0 | FORCE_ASPM_CTL1);
+               else
+                       rtsx_pci_write_register(pcr, ASPM_FORCE_CTL, FORCE_ASPM_CTL0 |
+                               FORCE_ASPM_CTL1, FORCE_ASPM_CTL0 | FORCE_ASPM_CTL1);
+       }
 
        if (!enable && (pcr->aspm_en & 0x02))
                mdelay(10);
@@ -1394,7 +1400,8 @@ static int rtsx_pci_init_hw(struct rtsx_pcr *pcr)
                        return err;
        }
 
-       rtsx_pci_write_register(pcr, ASPM_FORCE_CTL, 0x30, 0x30);
+       if (pcr->aspm_mode == ASPM_MODE_REG)
+               rtsx_pci_write_register(pcr, ASPM_FORCE_CTL, 0x30, 0x30);
 
        /* No CD interrupt if probing driver with card inserted.
         * So we need to initialize pcr->card_exist here.
@@ -1410,6 +1417,8 @@ static int rtsx_pci_init_hw(struct rtsx_pcr *pcr)
 static int rtsx_pci_init_chip(struct rtsx_pcr *pcr)
 {
        int err;
+       u16 cfg_val;
+       u8 val;
 
        spin_lock_init(&pcr->lock);
        mutex_init(&pcr->pcr_mutex);
@@ -1477,6 +1486,21 @@ static int rtsx_pci_init_chip(struct rtsx_pcr *pcr)
        if (!pcr->slots)
                return -ENOMEM;
 
+       if (pcr->aspm_mode == ASPM_MODE_CFG) {
+               pcie_capability_read_word(pcr->pci, PCI_EXP_LNKCTL, &cfg_val);
+               if (cfg_val & PCI_EXP_LNKCTL_ASPM_L1)
+                       pcr->aspm_enabled = true;
+               else
+                       pcr->aspm_enabled = false;
+
+       } else if (pcr->aspm_mode == ASPM_MODE_REG) {
+               rtsx_pci_read_register(pcr, ASPM_FORCE_CTL, &val);
+               if (val & FORCE_ASPM_CTL0 && val & FORCE_ASPM_CTL1)
+                       pcr->aspm_enabled = false;
+               else
+                       pcr->aspm_enabled = true;
+       }
+
        if (pcr->ops->fetch_vendor_settings)
                pcr->ops->fetch_vendor_settings(pcr);
 
@@ -1506,7 +1530,6 @@ static int rtsx_pci_probe(struct pci_dev *pcidev,
        struct pcr_handle *handle;
        u32 base, len;
        int ret, i, bar = 0;
-       u8 val;
 
        dev_dbg(&(pcidev->dev),
                ": Realtek PCI-E Card Reader found at %s [%04x:%04x] (rev %x)\n",
@@ -1572,11 +1595,6 @@ static int rtsx_pci_probe(struct pci_dev *pcidev,
        pcr->host_cmds_addr = pcr->rtsx_resv_buf_addr;
        pcr->host_sg_tbl_ptr = pcr->rtsx_resv_buf + HOST_CMDS_BUF_LEN;
        pcr->host_sg_tbl_addr = pcr->rtsx_resv_buf_addr + HOST_CMDS_BUF_LEN;
-       rtsx_pci_read_register(pcr, ASPM_FORCE_CTL, &val);
-       if (val & FORCE_ASPM_CTL0 && val & FORCE_ASPM_CTL1)
-               pcr->aspm_enabled = false;
-       else
-               pcr->aspm_enabled = true;
        pcr->card_inserted = 0;
        pcr->card_removed = 0;
        INIT_DELAYED_WORK(&pcr->carddet_work, rtsx_pci_card_detect);
index 926408b41270c2a62e6261e455510e5fdc76d219..7a6f01ace78ace991362a99abc45a4b4b741709f 100644 (file)
@@ -763,7 +763,8 @@ static int at24_probe(struct i2c_client *client)
        at24->nvmem = devm_nvmem_register(dev, &nvmem_config);
        if (IS_ERR(at24->nvmem)) {
                pm_runtime_disable(dev);
-               regulator_disable(at24->vcc_reg);
+               if (!pm_runtime_status_suspended(dev))
+                       regulator_disable(at24->vcc_reg);
                return PTR_ERR(at24->nvmem);
        }
 
@@ -774,7 +775,8 @@ static int at24_probe(struct i2c_client *client)
        err = at24_read(at24, 0, &test_byte, 1);
        if (err) {
                pm_runtime_disable(dev);
-               regulator_disable(at24->vcc_reg);
+               if (!pm_runtime_status_suspended(dev))
+                       regulator_disable(at24->vcc_reg);
                return -ENODEV;
        }
 
index ff8791a651fd1030b7091b709141f367771c2bd3..af3c497defb1053b334535dd0f958e3f2b71df92 100644 (file)
@@ -2017,7 +2017,7 @@ wait_again:
                if (completion_value >= target_value) {
                        *status = CS_WAIT_STATUS_COMPLETED;
                } else {
-                       timeout -= jiffies_to_usecs(completion_rc);
+                       timeout = completion_rc;
                        goto wait_again;
                }
        } else {
index 832dd5c5bb0653952e682f4cf5385c2abd4aa3c0..0713b2c12d54f4fded7b38bf79538c02e1f0fe1e 100644 (file)
@@ -362,12 +362,9 @@ static int fw_read_errors(struct hl_device *hdev, u32 boot_err0_reg,
        }
 
        if (err_val & CPU_BOOT_ERR0_SECURITY_NOT_RDY) {
-               dev_warn(hdev->dev,
+               dev_err(hdev->dev,
                        "Device boot warning - security not ready\n");
-               /* This is a warning so we don't want it to disable the
-                * device
-                */
-               err_val &= ~CPU_BOOT_ERR0_SECURITY_NOT_RDY;
+               err_exists = true;
        }
 
        if (err_val & CPU_BOOT_ERR0_SECURITY_FAIL) {
@@ -403,7 +400,8 @@ static int fw_read_errors(struct hl_device *hdev, u32 boot_err0_reg,
                err_exists = true;
        }
 
-       if (err_exists)
+       if (err_exists && ((err_val & ~CPU_BOOT_ERR0_ENABLED) &
+                               lower_32_bits(hdev->boot_error_status_mask)))
                return -EIO;
 
        return 0;
@@ -661,18 +659,13 @@ int hl_fw_cpucp_total_energy_get(struct hl_device *hdev, u64 *total_energy)
        return rc;
 }
 
-int get_used_pll_index(struct hl_device *hdev, enum pll_index input_pll_index,
+int get_used_pll_index(struct hl_device *hdev, u32 input_pll_index,
                                                enum pll_index *pll_index)
 {
        struct asic_fixed_properties *prop = &hdev->asic_prop;
        u8 pll_byte, pll_bit_off;
        bool dynamic_pll;
-
-       if (input_pll_index >= PLL_MAX) {
-               dev_err(hdev->dev, "PLL index %d is out of range\n",
-                                                       input_pll_index);
-               return -EINVAL;
-       }
+       int fw_pll_idx;
 
        dynamic_pll = prop->fw_security_status_valid &&
                (prop->fw_app_security_map & CPU_BOOT_DEV_STS0_DYN_PLL_EN);
@@ -680,28 +673,39 @@ int get_used_pll_index(struct hl_device *hdev, enum pll_index input_pll_index,
        if (!dynamic_pll) {
                /*
                 * in case we are working with legacy FW (each asic has unique
-                * PLL numbering) extract the legacy numbering
+                * PLL numbering) use the driver based index as they are
+                * aligned with fw legacy numbering
                 */
-               *pll_index = hdev->legacy_pll_map[input_pll_index];
+               *pll_index = input_pll_index;
                return 0;
        }
 
+       /* retrieve a FW compatible PLL index based on
+        * ASIC specific user request
+        */
+       fw_pll_idx = hdev->asic_funcs->map_pll_idx_to_fw_idx(input_pll_index);
+       if (fw_pll_idx < 0) {
+               dev_err(hdev->dev, "Invalid PLL index (%u) error %d\n",
+                       input_pll_index, fw_pll_idx);
+               return -EINVAL;
+       }
+
        /* PLL map is a u8 array */
-       pll_byte = prop->cpucp_info.pll_map[input_pll_index >> 3];
-       pll_bit_off = input_pll_index & 0x7;
+       pll_byte = prop->cpucp_info.pll_map[fw_pll_idx >> 3];
+       pll_bit_off = fw_pll_idx & 0x7;
 
        if (!(pll_byte & BIT(pll_bit_off))) {
                dev_err(hdev->dev, "PLL index %d is not supported\n",
-                                                       input_pll_index);
+                       fw_pll_idx);
                return -EINVAL;
        }
 
-       *pll_index = input_pll_index;
+       *pll_index = fw_pll_idx;
 
        return 0;
 }
 
-int hl_fw_cpucp_pll_info_get(struct hl_device *hdev, enum pll_index pll_index,
+int hl_fw_cpucp_pll_info_get(struct hl_device *hdev, u32 pll_index,
                u16 *pll_freq_arr)
 {
        struct cpucp_packet pkt;
@@ -844,8 +848,13 @@ int hl_fw_read_preboot_status(struct hl_device *hdev, u32 cpu_boot_status_reg,
        if (rc) {
                dev_err(hdev->dev, "Failed to read preboot version\n");
                detect_cpu_boot_status(hdev, status);
-               fw_read_errors(hdev, boot_err0_reg,
-                               cpu_security_boot_status_reg);
+
+               /* If we read all FF, then something is totally wrong, no point
+                * of reading specific errors
+                */
+               if (status != -1)
+                       fw_read_errors(hdev, boot_err0_reg,
+                                       cpu_security_boot_status_reg);
                return -EIO;
        }
 
index 44e89da30b4a70040488c2d838755b62275aa44f..6579f8767abdaccc6edee903449a44541b789815 100644 (file)
@@ -930,6 +930,9 @@ enum div_select_defs {
  *                         driver is ready to receive asynchronous events. This
  *                         function should be called during the first init and
  *                         after every hard-reset of the device
+ * @get_msi_info: Retrieve asic-specific MSI ID of the f/w async event
+ * @map_pll_idx_to_fw_idx: convert driver specific per asic PLL index to
+ *                         generic f/w compatible PLL Indexes
  */
 struct hl_asic_funcs {
        int (*early_init)(struct hl_device *hdev);
@@ -1054,6 +1057,7 @@ struct hl_asic_funcs {
                        u32 block_id, u32 block_size);
        void (*enable_events_from_fw)(struct hl_device *hdev);
        void (*get_msi_info)(u32 *table);
+       int (*map_pll_idx_to_fw_idx)(u32 pll_idx);
 };
 
 
@@ -1950,8 +1954,6 @@ struct hl_mmu_funcs {
  * @aggregated_cs_counters: aggregated cs counters among all contexts
  * @mmu_priv: device-specific MMU data.
  * @mmu_func: device-related MMU functions.
- * @legacy_pll_map: map holding map between dynamic (common) PLL indexes and
- *                  static (asic specific) PLL indexes.
  * @dram_used_mem: current DRAM memory consumption.
  * @timeout_jiffies: device CS timeout value.
  * @max_power: the max power of the device, as configured by the sysadmin. This
@@ -1960,6 +1962,12 @@ struct hl_mmu_funcs {
  * @clock_gating_mask: is clock gating enabled. bitmask that represents the
  *                     different engines. See debugfs-driver-habanalabs for
  *                     details.
+ * @boot_error_status_mask: contains a mask of the device boot error status.
+ *                          Each bit represents a different error, according to
+ *                          the defines in hl_boot_if.h. If the bit is cleared,
+ *                          the error will be ignored by the driver during
+ *                          device initialization. Mainly used to debug and
+ *                          workaround firmware bugs
  * @in_reset: is device in reset flow.
  * @curr_pll_profile: current PLL profile.
  * @card_type: Various ASICs have several card types. This indicates the card
@@ -2071,12 +2079,11 @@ struct hl_device {
        struct hl_mmu_priv              mmu_priv;
        struct hl_mmu_funcs             mmu_func[MMU_NUM_PGT_LOCATIONS];
 
-       enum pll_index                  *legacy_pll_map;
-
        atomic64_t                      dram_used_mem;
        u64                             timeout_jiffies;
        u64                             max_power;
        u64                             clock_gating_mask;
+       u64                             boot_error_status_mask;
        atomic_t                        in_reset;
        enum hl_pll_frequency           curr_pll_profile;
        enum cpucp_card_types           card_type;
@@ -2387,9 +2394,9 @@ int hl_fw_cpucp_pci_counters_get(struct hl_device *hdev,
                struct hl_info_pci_counters *counters);
 int hl_fw_cpucp_total_energy_get(struct hl_device *hdev,
                        u64 *total_energy);
-int get_used_pll_index(struct hl_device *hdev, enum pll_index input_pll_index,
+int get_used_pll_index(struct hl_device *hdev, u32 input_pll_index,
                                                enum pll_index *pll_index);
-int hl_fw_cpucp_pll_info_get(struct hl_device *hdev, enum pll_index pll_index,
+int hl_fw_cpucp_pll_info_get(struct hl_device *hdev, u32 pll_index,
                u16 *pll_freq_arr);
 int hl_fw_cpucp_power_get(struct hl_device *hdev, u64 *power);
 int hl_fw_init_cpu(struct hl_device *hdev, u32 cpu_boot_status_reg,
@@ -2411,9 +2418,9 @@ int hl_pci_set_outbound_region(struct hl_device *hdev,
 int hl_pci_init(struct hl_device *hdev);
 void hl_pci_fini(struct hl_device *hdev);
 
-long hl_get_frequency(struct hl_device *hdev, enum pll_index pll_index,
+long hl_get_frequency(struct hl_device *hdev, u32 pll_index,
                                                                bool curr);
-void hl_set_frequency(struct hl_device *hdev, enum pll_index pll_index,
+void hl_set_frequency(struct hl_device *hdev, u32 pll_index,
                                                                u64 freq);
 int hl_get_temperature(struct hl_device *hdev,
                       int sensor_index, u32 attr, long *value);
index 7135f1e03864107b6bcdf8b6040f1f8ff15dfe3e..64d1530db9854501047da270e80f07f5bbb39f08 100644 (file)
@@ -30,6 +30,7 @@ static DEFINE_MUTEX(hl_devs_idr_lock);
 static int timeout_locked = 30;
 static int reset_on_lockup = 1;
 static int memory_scrub = 1;
+static ulong boot_error_status_mask = ULONG_MAX;
 
 module_param(timeout_locked, int, 0444);
 MODULE_PARM_DESC(timeout_locked,
@@ -43,6 +44,10 @@ module_param(memory_scrub, int, 0444);
 MODULE_PARM_DESC(memory_scrub,
        "Scrub device memory in various states (0 = no, 1 = yes, default yes)");
 
+module_param(boot_error_status_mask, ulong, 0444);
+MODULE_PARM_DESC(boot_error_status_mask,
+       "Mask of the error status during device CPU boot (If bitX is cleared then error X is masked. Default all 1's)");
+
 #define PCI_VENDOR_ID_HABANALABS       0x1da3
 
 #define PCI_IDS_GOYA                   0x0001
@@ -319,6 +324,8 @@ int create_hdev(struct hl_device **dev, struct pci_dev *pdev,
        hdev->major = hl_major;
        hdev->reset_on_lockup = reset_on_lockup;
        hdev->memory_scrub = memory_scrub;
+       hdev->boot_error_status_mask = boot_error_status_mask;
+
        hdev->pldm = 0;
 
        set_driver_behavior_per_device(hdev);
index 9fa61573a89de89e15d4f80e5a228bb66a890536..c9f649b31e3a96302d938d53fa02a89c76dd4439 100644 (file)
@@ -9,7 +9,7 @@
 
 #include <linux/pci.h>
 
-long hl_get_frequency(struct hl_device *hdev, enum pll_index pll_index,
+long hl_get_frequency(struct hl_device *hdev, u32 pll_index,
                                                                bool curr)
 {
        struct cpucp_packet pkt;
@@ -44,7 +44,7 @@ long hl_get_frequency(struct hl_device *hdev, enum pll_index pll_index,
        return (long) result;
 }
 
-void hl_set_frequency(struct hl_device *hdev, enum pll_index pll_index,
+void hl_set_frequency(struct hl_device *hdev, u32 pll_index,
                                                                u64 freq)
 {
        struct cpucp_packet pkt;
index b751652f80a8ce043359bae5be7b18c98878ecda..9e4a6bb3acd11c0e5a3d11bed9a245ec11e12df0 100644 (file)
 
 #define GAUDI_PLL_MAX 10
 
-/*
- * this enum kept here for compatibility with old FW (in which each asic has
- * unique PLL numbering
- */
-enum gaudi_pll_index {
-       GAUDI_CPU_PLL = 0,
-       GAUDI_PCI_PLL,
-       GAUDI_SRAM_PLL,
-       GAUDI_HBM_PLL,
-       GAUDI_NIC_PLL,
-       GAUDI_DMA_PLL,
-       GAUDI_MESH_PLL,
-       GAUDI_MME_PLL,
-       GAUDI_TPC_PLL,
-       GAUDI_IF_PLL,
-};
-
-static enum pll_index gaudi_pll_map[PLL_MAX] = {
-       [CPU_PLL] = GAUDI_CPU_PLL,
-       [PCI_PLL] = GAUDI_PCI_PLL,
-       [SRAM_PLL] = GAUDI_SRAM_PLL,
-       [HBM_PLL] = GAUDI_HBM_PLL,
-       [NIC_PLL] = GAUDI_NIC_PLL,
-       [DMA_PLL] = GAUDI_DMA_PLL,
-       [MESH_PLL] = GAUDI_MESH_PLL,
-       [MME_PLL] = GAUDI_MME_PLL,
-       [TPC_PLL] = GAUDI_TPC_PLL,
-       [IF_PLL] = GAUDI_IF_PLL,
-};
-
 static const char gaudi_irq_name[GAUDI_MSI_ENTRIES][GAUDI_MAX_STRING_LEN] = {
                "gaudi cq 0_0", "gaudi cq 0_1", "gaudi cq 0_2", "gaudi cq 0_3",
                "gaudi cq 1_0", "gaudi cq 1_1", "gaudi cq 1_2", "gaudi cq 1_3",
@@ -810,7 +780,7 @@ static int gaudi_fetch_psoc_frequency(struct hl_device *hdev)
                        freq = 0;
                }
        } else {
-               rc = hl_fw_cpucp_pll_info_get(hdev, CPU_PLL, pll_freq_arr);
+               rc = hl_fw_cpucp_pll_info_get(hdev, HL_GAUDI_CPU_PLL, pll_freq_arr);
 
                if (rc)
                        return rc;
@@ -1652,9 +1622,6 @@ static int gaudi_sw_init(struct hl_device *hdev)
 
        hdev->asic_specific = gaudi;
 
-       /* store legacy PLL map */
-       hdev->legacy_pll_map = gaudi_pll_map;
-
        /* Create DMA pool for small allocations */
        hdev->dma_pool = dma_pool_create(dev_name(hdev->dev),
                        &hdev->pdev->dev, GAUDI_DMA_POOL_BLK_SIZE, 8, 0);
@@ -5612,6 +5579,7 @@ static int gaudi_memset_device_memory(struct hl_device *hdev, u64 addr,
        struct hl_cs_job *job;
        u32 cb_size, ctl, err_cause;
        struct hl_cb *cb;
+       u64 id;
        int rc;
 
        cb = hl_cb_kernel_create(hdev, PAGE_SIZE, false);
@@ -5678,8 +5646,9 @@ static int gaudi_memset_device_memory(struct hl_device *hdev, u64 addr,
        }
 
 release_cb:
+       id = cb->id;
        hl_cb_put(cb);
-       hl_cb_destroy(hdev, &hdev->kernel_cb_mgr, cb->id << PAGE_SHIFT);
+       hl_cb_destroy(hdev, &hdev->kernel_cb_mgr, id << PAGE_SHIFT);
 
        return rc;
 }
@@ -8783,6 +8752,23 @@ static void gaudi_enable_events_from_fw(struct hl_device *hdev)
        WREG32(mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR, GAUDI_EVENT_INTS_REGISTER);
 }
 
+static int gaudi_map_pll_idx_to_fw_idx(u32 pll_idx)
+{
+       switch (pll_idx) {
+       case HL_GAUDI_CPU_PLL: return CPU_PLL;
+       case HL_GAUDI_PCI_PLL: return PCI_PLL;
+       case HL_GAUDI_NIC_PLL: return NIC_PLL;
+       case HL_GAUDI_DMA_PLL: return DMA_PLL;
+       case HL_GAUDI_MESH_PLL: return MESH_PLL;
+       case HL_GAUDI_MME_PLL: return MME_PLL;
+       case HL_GAUDI_TPC_PLL: return TPC_PLL;
+       case HL_GAUDI_IF_PLL: return IF_PLL;
+       case HL_GAUDI_SRAM_PLL: return SRAM_PLL;
+       case HL_GAUDI_HBM_PLL: return HBM_PLL;
+       default: return -EINVAL;
+       }
+}
+
 static const struct hl_asic_funcs gaudi_funcs = {
        .early_init = gaudi_early_init,
        .early_fini = gaudi_early_fini,
@@ -8866,7 +8852,8 @@ static const struct hl_asic_funcs gaudi_funcs = {
        .ack_protection_bits_errors = gaudi_ack_protection_bits_errors,
        .get_hw_block_id = gaudi_get_hw_block_id,
        .hw_block_mmap = gaudi_block_mmap,
-       .enable_events_from_fw = gaudi_enable_events_from_fw
+       .enable_events_from_fw = gaudi_enable_events_from_fw,
+       .map_pll_idx_to_fw_idx = gaudi_map_pll_idx_to_fw_idx
 };
 
 /**
index 8c49da4bcbd58a1ad07f024e5430dee72b5cde7c..9b60eadd4c355ee8ccd55ce1a37169dada71e915 100644 (file)
@@ -13,7 +13,7 @@ void gaudi_set_pll_profile(struct hl_device *hdev, enum hl_pll_frequency freq)
        struct gaudi_device *gaudi = hdev->asic_specific;
 
        if (freq == PLL_LAST)
-               hl_set_frequency(hdev, MME_PLL, gaudi->max_freq_value);
+               hl_set_frequency(hdev, HL_GAUDI_MME_PLL, gaudi->max_freq_value);
 }
 
 int gaudi_get_clk_rate(struct hl_device *hdev, u32 *cur_clk, u32 *max_clk)
@@ -23,7 +23,7 @@ int gaudi_get_clk_rate(struct hl_device *hdev, u32 *cur_clk, u32 *max_clk)
        if (!hl_device_operational(hdev, NULL))
                return -ENODEV;
 
-       value = hl_get_frequency(hdev, MME_PLL, false);
+       value = hl_get_frequency(hdev, HL_GAUDI_MME_PLL, false);
 
        if (value < 0) {
                dev_err(hdev->dev, "Failed to retrieve device max clock %ld\n",
@@ -33,7 +33,7 @@ int gaudi_get_clk_rate(struct hl_device *hdev, u32 *cur_clk, u32 *max_clk)
 
        *max_clk = (value / 1000 / 1000);
 
-       value = hl_get_frequency(hdev, MME_PLL, true);
+       value = hl_get_frequency(hdev, HL_GAUDI_MME_PLL, true);
 
        if (value < 0) {
                dev_err(hdev->dev,
@@ -57,7 +57,7 @@ static ssize_t clk_max_freq_mhz_show(struct device *dev,
        if (!hl_device_operational(hdev, NULL))
                return -ENODEV;
 
-       value = hl_get_frequency(hdev, MME_PLL, false);
+       value = hl_get_frequency(hdev, HL_GAUDI_MME_PLL, false);
 
        gaudi->max_freq_value = value;
 
@@ -85,7 +85,7 @@ static ssize_t clk_max_freq_mhz_store(struct device *dev,
 
        gaudi->max_freq_value = value * 1000 * 1000;
 
-       hl_set_frequency(hdev, MME_PLL, gaudi->max_freq_value);
+       hl_set_frequency(hdev, HL_GAUDI_MME_PLL, gaudi->max_freq_value);
 
 fail:
        return count;
@@ -100,7 +100,7 @@ static ssize_t clk_cur_freq_mhz_show(struct device *dev,
        if (!hl_device_operational(hdev, NULL))
                return -ENODEV;
 
-       value = hl_get_frequency(hdev, MME_PLL, true);
+       value = hl_get_frequency(hdev, HL_GAUDI_MME_PLL, true);
 
        return sprintf(buf, "%lu\n", (value / 1000 / 1000));
 }
index e27338f4aad2f68afebc1d736ea2014d7e5231e7..e0ad2a269779b1f3b103f97340d2e7bec6a4ef68 100644 (file)
 #define IS_MME_IDLE(mme_arch_sts) \
        (((mme_arch_sts) & MME_ARCH_IDLE_MASK) == MME_ARCH_IDLE_MASK)
 
-/*
- * this enum kept here for compatibility with old FW (in which each asic has
- * unique PLL numbering
- */
-enum goya_pll_index {
-       GOYA_CPU_PLL = 0,
-       GOYA_IC_PLL,
-       GOYA_MC_PLL,
-       GOYA_MME_PLL,
-       GOYA_PCI_PLL,
-       GOYA_EMMC_PLL,
-       GOYA_TPC_PLL,
-};
-
-static enum pll_index goya_pll_map[PLL_MAX] = {
-       [CPU_PLL] = GOYA_CPU_PLL,
-       [IC_PLL] = GOYA_IC_PLL,
-       [MC_PLL] = GOYA_MC_PLL,
-       [MME_PLL] = GOYA_MME_PLL,
-       [PCI_PLL] = GOYA_PCI_PLL,
-       [EMMC_PLL] = GOYA_EMMC_PLL,
-       [TPC_PLL] = GOYA_TPC_PLL,
-};
-
 static const char goya_irq_name[GOYA_MSIX_ENTRIES][GOYA_MAX_STRING_LEN] = {
                "goya cq 0", "goya cq 1", "goya cq 2", "goya cq 3",
                "goya cq 4", "goya cpu eq"
@@ -775,7 +751,8 @@ static void goya_fetch_psoc_frequency(struct hl_device *hdev)
                        freq = 0;
                }
        } else {
-               rc = hl_fw_cpucp_pll_info_get(hdev, PCI_PLL, pll_freq_arr);
+               rc = hl_fw_cpucp_pll_info_get(hdev, HL_GOYA_PCI_PLL,
+                               pll_freq_arr);
 
                if (rc)
                        return;
@@ -897,9 +874,6 @@ static int goya_sw_init(struct hl_device *hdev)
 
        hdev->asic_specific = goya;
 
-       /* store legacy PLL map */
-       hdev->legacy_pll_map = goya_pll_map;
-
        /* Create DMA pool for small allocations */
        hdev->dma_pool = dma_pool_create(dev_name(hdev->dev),
                        &hdev->pdev->dev, GOYA_DMA_POOL_BLK_SIZE, 8, 0);
@@ -5512,6 +5486,20 @@ static void goya_enable_events_from_fw(struct hl_device *hdev)
                        GOYA_ASYNC_EVENT_ID_INTS_REGISTER);
 }
 
+static int goya_map_pll_idx_to_fw_idx(u32 pll_idx)
+{
+       switch (pll_idx) {
+       case HL_GOYA_CPU_PLL: return CPU_PLL;
+       case HL_GOYA_PCI_PLL: return PCI_PLL;
+       case HL_GOYA_MME_PLL: return MME_PLL;
+       case HL_GOYA_TPC_PLL: return TPC_PLL;
+       case HL_GOYA_IC_PLL: return IC_PLL;
+       case HL_GOYA_MC_PLL: return MC_PLL;
+       case HL_GOYA_EMMC_PLL: return EMMC_PLL;
+       default: return -EINVAL;
+       }
+}
+
 static const struct hl_asic_funcs goya_funcs = {
        .early_init = goya_early_init,
        .early_fini = goya_early_fini,
@@ -5595,7 +5583,8 @@ static const struct hl_asic_funcs goya_funcs = {
        .ack_protection_bits_errors = goya_ack_protection_bits_errors,
        .get_hw_block_id = goya_get_hw_block_id,
        .hw_block_mmap = goya_block_mmap,
-       .enable_events_from_fw = goya_enable_events_from_fw
+       .enable_events_from_fw = goya_enable_events_from_fw,
+       .map_pll_idx_to_fw_idx = goya_map_pll_idx_to_fw_idx
 };
 
 /*
index 3acb36a1a902ea7129b23b5de379de88430c554b..7d007125727ffef8f1b153c77c1f1778247322ac 100644 (file)
@@ -13,19 +13,19 @@ void goya_set_pll_profile(struct hl_device *hdev, enum hl_pll_frequency freq)
 
        switch (freq) {
        case PLL_HIGH:
-               hl_set_frequency(hdev, MME_PLL, hdev->high_pll);
-               hl_set_frequency(hdev, TPC_PLL, hdev->high_pll);
-               hl_set_frequency(hdev, IC_PLL, hdev->high_pll);
+               hl_set_frequency(hdev, HL_GOYA_MME_PLL, hdev->high_pll);
+               hl_set_frequency(hdev, HL_GOYA_TPC_PLL, hdev->high_pll);
+               hl_set_frequency(hdev, HL_GOYA_IC_PLL, hdev->high_pll);
                break;
        case PLL_LOW:
-               hl_set_frequency(hdev, MME_PLL, GOYA_PLL_FREQ_LOW);
-               hl_set_frequency(hdev, TPC_PLL, GOYA_PLL_FREQ_LOW);
-               hl_set_frequency(hdev, IC_PLL, GOYA_PLL_FREQ_LOW);
+               hl_set_frequency(hdev, HL_GOYA_MME_PLL, GOYA_PLL_FREQ_LOW);
+               hl_set_frequency(hdev, HL_GOYA_TPC_PLL, GOYA_PLL_FREQ_LOW);
+               hl_set_frequency(hdev, HL_GOYA_IC_PLL, GOYA_PLL_FREQ_LOW);
                break;
        case PLL_LAST:
-               hl_set_frequency(hdev, MME_PLL, goya->mme_clk);
-               hl_set_frequency(hdev, TPC_PLL, goya->tpc_clk);
-               hl_set_frequency(hdev, IC_PLL, goya->ic_clk);
+               hl_set_frequency(hdev, HL_GOYA_MME_PLL, goya->mme_clk);
+               hl_set_frequency(hdev, HL_GOYA_TPC_PLL, goya->tpc_clk);
+               hl_set_frequency(hdev, HL_GOYA_IC_PLL, goya->ic_clk);
                break;
        default:
                dev_err(hdev->dev, "unknown frequency setting\n");
@@ -39,7 +39,7 @@ int goya_get_clk_rate(struct hl_device *hdev, u32 *cur_clk, u32 *max_clk)
        if (!hl_device_operational(hdev, NULL))
                return -ENODEV;
 
-       value = hl_get_frequency(hdev, MME_PLL, false);
+       value = hl_get_frequency(hdev, HL_GOYA_MME_PLL, false);
 
        if (value < 0) {
                dev_err(hdev->dev, "Failed to retrieve device max clock %ld\n",
@@ -49,7 +49,7 @@ int goya_get_clk_rate(struct hl_device *hdev, u32 *cur_clk, u32 *max_clk)
 
        *max_clk = (value / 1000 / 1000);
 
-       value = hl_get_frequency(hdev, MME_PLL, true);
+       value = hl_get_frequency(hdev, HL_GOYA_MME_PLL, true);
 
        if (value < 0) {
                dev_err(hdev->dev,
@@ -72,7 +72,7 @@ static ssize_t mme_clk_show(struct device *dev, struct device_attribute *attr,
        if (!hl_device_operational(hdev, NULL))
                return -ENODEV;
 
-       value = hl_get_frequency(hdev, MME_PLL, false);
+       value = hl_get_frequency(hdev, HL_GOYA_MME_PLL, false);
 
        if (value < 0)
                return value;
@@ -105,7 +105,7 @@ static ssize_t mme_clk_store(struct device *dev, struct device_attribute *attr,
                goto fail;
        }
 
-       hl_set_frequency(hdev, MME_PLL, value);
+       hl_set_frequency(hdev, HL_GOYA_MME_PLL, value);
        goya->mme_clk = value;
 
 fail:
@@ -121,7 +121,7 @@ static ssize_t tpc_clk_show(struct device *dev, struct device_attribute *attr,
        if (!hl_device_operational(hdev, NULL))
                return -ENODEV;
 
-       value = hl_get_frequency(hdev, TPC_PLL, false);
+       value = hl_get_frequency(hdev, HL_GOYA_TPC_PLL, false);
 
        if (value < 0)
                return value;
@@ -154,7 +154,7 @@ static ssize_t tpc_clk_store(struct device *dev, struct device_attribute *attr,
                goto fail;
        }
 
-       hl_set_frequency(hdev, TPC_PLL, value);
+       hl_set_frequency(hdev, HL_GOYA_TPC_PLL, value);
        goya->tpc_clk = value;
 
 fail:
@@ -170,7 +170,7 @@ static ssize_t ic_clk_show(struct device *dev, struct device_attribute *attr,
        if (!hl_device_operational(hdev, NULL))
                return -ENODEV;
 
-       value = hl_get_frequency(hdev, IC_PLL, false);
+       value = hl_get_frequency(hdev, HL_GOYA_IC_PLL, false);
 
        if (value < 0)
                return value;
@@ -203,7 +203,7 @@ static ssize_t ic_clk_store(struct device *dev, struct device_attribute *attr,
                goto fail;
        }
 
-       hl_set_frequency(hdev, IC_PLL, value);
+       hl_set_frequency(hdev, HL_GOYA_IC_PLL, value);
        goya->ic_clk = value;
 
 fail:
@@ -219,7 +219,7 @@ static ssize_t mme_clk_curr_show(struct device *dev,
        if (!hl_device_operational(hdev, NULL))
                return -ENODEV;
 
-       value = hl_get_frequency(hdev, MME_PLL, true);
+       value = hl_get_frequency(hdev, HL_GOYA_MME_PLL, true);
 
        if (value < 0)
                return value;
@@ -236,7 +236,7 @@ static ssize_t tpc_clk_curr_show(struct device *dev,
        if (!hl_device_operational(hdev, NULL))
                return -ENODEV;
 
-       value = hl_get_frequency(hdev, TPC_PLL, true);
+       value = hl_get_frequency(hdev, HL_GOYA_TPC_PLL, true);
 
        if (value < 0)
                return value;
@@ -253,7 +253,7 @@ static ssize_t ic_clk_curr_show(struct device *dev,
        if (!hl_device_operational(hdev, NULL))
                return -ENODEV;
 
-       value = hl_get_frequency(hdev, IC_PLL, true);
+       value = hl_get_frequency(hdev, HL_GOYA_IC_PLL, true);
 
        if (value < 0)
                return value;
index 2bdf560ee681ba7cbe3535379f7ffe75e5e90627..0f9ea75b0b18976d0807e05c95c342639e2bf55d 100644 (file)
@@ -134,7 +134,7 @@ static struct ics932s401_data *ics932s401_update_device(struct device *dev)
        for (i = 0; i < NUM_MIRRORED_REGS; i++) {
                temp = i2c_smbus_read_word_data(client, regs_to_copy[i]);
                if (temp < 0)
-                       data->regs[regs_to_copy[i]] = 0;
+                       temp = 0;
                data->regs[regs_to_copy[i]] = temp >> 8;
        }
 
index 64d33e3685091352aa6cab2b8ffd4a70c109bb5c..67c5b452dd35632b011028a078401d94ae112c2d 100644 (file)
                printk(KERN_INFO a);    \
 } while (0)
 #define v2printk(a...) do {            \
-       if (verbose > 1)                \
+       if (verbose > 1) {              \
                printk(KERN_INFO a);    \
+       }                               \
        touch_nmi_watchdog();           \
 } while (0)
 #define eprintk(a...) do {             \
index c394c0b08519aa4a44290ebaf717a52b36051667..7ac788fae1b8647b0641c1a50c2a41c39a9c39e3 100644 (file)
@@ -271,6 +271,7 @@ struct lis3lv02d {
        int                     regs_size;
        u8                      *reg_cache;
        bool                    regs_stored;
+       bool                    init_required;
        u8                      odr_mask;  /* ODR bit mask */
        u8                      whoami;    /* indicates measurement precision */
        s16 (*read_data) (struct lis3lv02d *lis3, int reg);
index a98f6b895af71dfa7f2657f9b072abb4232b0c08..aab3ebfa9fc4ddaabea8cb301c9aab5998a847db 100644 (file)
@@ -277,6 +277,9 @@ static int mei_cl_irq_read(struct mei_cl *cl, struct mei_cl_cb *cb,
                return ret;
        }
 
+       pm_runtime_mark_last_busy(dev->dev);
+       pm_request_autosuspend(dev->dev);
+
        list_move_tail(&cb->list, &cl->rd_pending);
 
        return 0;
index b8b771b643cc8bfc9fb685d53936cd614104846c..016a6106151a5bcdd75a66483ed2d57842c69124 100644 (file)
@@ -236,7 +236,8 @@ static void meson_mmc_get_transfer_mode(struct mmc_host *mmc,
        if (host->dram_access_quirk)
                return;
 
-       if (data->blocks > 1) {
+       /* SD_IO_RW_EXTENDED (CMD53) can also use block mode under the hood */
+       if (data->blocks > 1 || mrq->cmd->opcode == SD_IO_RW_EXTENDED) {
                /*
                 * In block mode DMA descriptor format, "length" field indicates
                 * number of blocks and there is no way to pass DMA size that
@@ -258,7 +259,9 @@ static void meson_mmc_get_transfer_mode(struct mmc_host *mmc,
        for_each_sg(data->sg, sg, data->sg_len, i) {
                /* check for 8 byte alignment */
                if (sg->offset % 8) {
-                       WARN_ONCE(1, "unaligned scatterlist buffer\n");
+                       dev_warn_once(mmc_dev(mmc),
+                                     "unaligned sg offset %u, disabling descriptor DMA for transfer\n",
+                                     sg->offset);
                        return;
                }
        }
index 635bf31a67359f102194fc83b08badc645dafc39..baab4c2e1b5337afef2ed967abec0666c41148b2 100644 (file)
@@ -692,14 +692,19 @@ static int renesas_sdhi_execute_tuning(struct mmc_host *mmc, u32 opcode)
 
        /* Issue CMD19 twice for each tap */
        for (i = 0; i < 2 * priv->tap_num; i++) {
+               int cmd_error;
+
                /* Set sampling clock position */
                sd_scc_write32(host, priv, SH_MOBILE_SDHI_SCC_TAPSET, i % priv->tap_num);
 
-               if (mmc_send_tuning(mmc, opcode, NULL) == 0)
+               if (mmc_send_tuning(mmc, opcode, &cmd_error) == 0)
                        set_bit(i, priv->taps);
 
                if (sd_scc_read32(host, priv, SH_MOBILE_SDHI_SCC_SMPCMP) == 0)
                        set_bit(i, priv->smpcmp);
+
+               if (cmd_error)
+                       mmc_abort_tuning(mmc, opcode);
        }
 
        ret = renesas_sdhi_select_tuning(host);
@@ -939,7 +944,7 @@ static const struct soc_device_attribute sdhi_quirks_match[]  = {
        { .soc_id = "r8a7795", .revision = "ES3.*", .data = &sdhi_quirks_bad_taps2367 },
        { .soc_id = "r8a7796", .revision = "ES1.[012]", .data = &sdhi_quirks_4tap_nohs400 },
        { .soc_id = "r8a7796", .revision = "ES1.*", .data = &sdhi_quirks_r8a7796_es13 },
-       { .soc_id = "r8a7796", .revision = "ES3.*", .data = &sdhi_quirks_bad_taps1357 },
+       { .soc_id = "r8a77961", .data = &sdhi_quirks_bad_taps1357 },
        { .soc_id = "r8a77965", .data = &sdhi_quirks_r8a77965 },
        { .soc_id = "r8a77980", .data = &sdhi_quirks_nohs400 },
        { .soc_id = "r8a77990", .data = &sdhi_quirks_r8a77990 },
index 592d79082f58cd71767095b96fe774ebf60ad1a6..061618aa247f5d1865b22959d558aab77218c91d 100644 (file)
@@ -627,8 +627,13 @@ static void sdhci_gli_voltage_switch(struct sdhci_host *host)
         *
         * Wait 5ms after set 1.8V signal enable in Host Control 2 register
         * to ensure 1.8V signal enable bit is set by GL9750/GL9755.
+        *
+        * ...however, the controller in the NUC10i3FNK4 (a 9755) requires
+        * slightly longer than 5ms before the control register reports that
+        * 1.8V is ready, and far longer still before the card will actually
+        * work reliably.
         */
-       usleep_range(5000, 5500);
+       usleep_range(100000, 110000);
 }
 
 static void sdhci_gl9750_reset(struct sdhci_host *host, u8 mask)
index 6edf78c16fc8b0a5a1abceb4281e8764886892c2..df40927e56788fa30ce5a42bffdd3c53673a3f13 100644 (file)
@@ -18,6 +18,7 @@
 #include <linux/module.h>
 #include <linux/delay.h>
 #include <linux/mtd/mtd.h>
+#include <linux/mtd/nand-ecc-sw-hamming.h>
 #include <linux/mtd/rawnand.h>
 #include <linux/mtd/partitions.h>
 #include <linux/iopoll.h>
@@ -240,6 +241,15 @@ static int cs_calculate_ecc(struct nand_chip *this, const u_char *dat,
        return 0;
 }
 
+static int cs553x_ecc_correct(struct nand_chip *chip,
+                             unsigned char *buf,
+                             unsigned char *read_ecc,
+                             unsigned char *calc_ecc)
+{
+       return ecc_sw_hamming_correct(buf, read_ecc, calc_ecc,
+                                     chip->ecc.size, false);
+}
+
 static struct cs553x_nand_controller *controllers[4];
 
 static int cs553x_attach_chip(struct nand_chip *chip)
@@ -251,7 +261,7 @@ static int cs553x_attach_chip(struct nand_chip *chip)
        chip->ecc.bytes = 3;
        chip->ecc.hwctl  = cs_enable_hwecc;
        chip->ecc.calculate = cs_calculate_ecc;
-       chip->ecc.correct  = rawnand_sw_hamming_correct;
+       chip->ecc.correct  = cs553x_ecc_correct;
        chip->ecc.strength = 1;
 
        return 0;
index bf695255b43a25ae213db314ee836cb783127e96..a3e66155ae4055262656ab1ca64eb8c3691c54df 100644 (file)
@@ -25,6 +25,7 @@
 #include <linux/sched.h>
 #include <linux/types.h>
 #include <linux/mtd/mtd.h>
+#include <linux/mtd/nand-ecc-sw-hamming.h>
 #include <linux/mtd/rawnand.h>
 #include <linux/platform_device.h>
 #include <linux/of.h>
@@ -432,6 +433,15 @@ static int fsmc_read_hwecc_ecc1(struct nand_chip *chip, const u8 *data,
        return 0;
 }
 
+static int fsmc_correct_ecc1(struct nand_chip *chip,
+                            unsigned char *buf,
+                            unsigned char *read_ecc,
+                            unsigned char *calc_ecc)
+{
+       return ecc_sw_hamming_correct(buf, read_ecc, calc_ecc,
+                                     chip->ecc.size, false);
+}
+
 /* Count the number of 0's in buff upto a max of max_bits */
 static int count_written_bits(u8 *buff, int size, int max_bits)
 {
@@ -917,7 +927,7 @@ static int fsmc_nand_attach_chip(struct nand_chip *nand)
        case NAND_ECC_ENGINE_TYPE_ON_HOST:
                dev_info(host->dev, "Using 1-bit HW ECC scheme\n");
                nand->ecc.calculate = fsmc_read_hwecc_ecc1;
-               nand->ecc.correct = rawnand_sw_hamming_correct;
+               nand->ecc.correct = fsmc_correct_ecc1;
                nand->ecc.hwctl = fsmc_enable_hwecc;
                nand->ecc.bytes = 3;
                nand->ecc.strength = 1;
index 6b7269cfb7d8390ef3372ef755102454fe76624c..d7dfc6fd85ca73eb90fd5c2add1e2bb20c5993e7 100644 (file)
@@ -27,6 +27,7 @@
 #include <linux/of.h>
 #include <linux/of_gpio.h>
 #include <linux/mtd/lpc32xx_slc.h>
+#include <linux/mtd/nand-ecc-sw-hamming.h>
 
 #define LPC32XX_MODNAME                "lpc32xx-nand"
 
@@ -344,6 +345,18 @@ static int lpc32xx_nand_ecc_calculate(struct nand_chip *chip,
        return 0;
 }
 
+/*
+ * Corrects the data
+ */
+static int lpc32xx_nand_ecc_correct(struct nand_chip *chip,
+                                   unsigned char *buf,
+                                   unsigned char *read_ecc,
+                                   unsigned char *calc_ecc)
+{
+       return ecc_sw_hamming_correct(buf, read_ecc, calc_ecc,
+                                     chip->ecc.size, false);
+}
+
 /*
  * Read a single byte from NAND device
  */
@@ -802,7 +815,7 @@ static int lpc32xx_nand_attach_chip(struct nand_chip *chip)
        chip->ecc.write_oob = lpc32xx_nand_write_oob_syndrome;
        chip->ecc.read_oob = lpc32xx_nand_read_oob_syndrome;
        chip->ecc.calculate = lpc32xx_nand_ecc_calculate;
-       chip->ecc.correct = rawnand_sw_hamming_correct;
+       chip->ecc.correct = lpc32xx_nand_ecc_correct;
        chip->ecc.hwctl = lpc32xx_nand_ecc_enable;
 
        /*
index 338d6b1a189eb69d112f069f1763554d97fbf239..98d5a94c3a24238b43cd0c60b73a6c59f2084503 100644 (file)
@@ -22,6 +22,7 @@
 #include <linux/mtd/ndfc.h>
 #include <linux/slab.h>
 #include <linux/mtd/mtd.h>
+#include <linux/mtd/nand-ecc-sw-hamming.h>
 #include <linux/of_address.h>
 #include <linux/of_platform.h>
 #include <asm/io.h>
@@ -100,6 +101,15 @@ static int ndfc_calculate_ecc(struct nand_chip *chip,
        return 0;
 }
 
+static int ndfc_correct_ecc(struct nand_chip *chip,
+                           unsigned char *buf,
+                           unsigned char *read_ecc,
+                           unsigned char *calc_ecc)
+{
+       return ecc_sw_hamming_correct(buf, read_ecc, calc_ecc,
+                                     chip->ecc.size, false);
+}
+
 /*
  * Speedups for buffer read/write/verify
  *
@@ -145,7 +155,7 @@ static int ndfc_chip_init(struct ndfc_controller *ndfc,
        chip->controller = &ndfc->ndfc_control;
        chip->legacy.read_buf = ndfc_read_buf;
        chip->legacy.write_buf = ndfc_write_buf;
-       chip->ecc.correct = rawnand_sw_hamming_correct;
+       chip->ecc.correct = ndfc_correct_ecc;
        chip->ecc.hwctl = ndfc_enable_hwecc;
        chip->ecc.calculate = ndfc_calculate_ecc;
        chip->ecc.engine_type = NAND_ECC_ENGINE_TYPE_ON_HOST;
index 5612ee628425b733bc4bdadfde9aa27c2848f108..2f1fe464e66377f6a0ee6a69bbc66e8421654741 100644 (file)
@@ -11,6 +11,7 @@
 #include <linux/module.h>
 #include <linux/delay.h>
 #include <linux/mtd/mtd.h>
+#include <linux/mtd/nand-ecc-sw-hamming.h>
 #include <linux/mtd/rawnand.h>
 #include <linux/mtd/partitions.h>
 #include <linux/mtd/sharpsl.h>
@@ -96,6 +97,15 @@ static int sharpsl_nand_calculate_ecc(struct nand_chip *chip,
        return readb(sharpsl->io + ECCCNTR) != 0;
 }
 
+static int sharpsl_nand_correct_ecc(struct nand_chip *chip,
+                                   unsigned char *buf,
+                                   unsigned char *read_ecc,
+                                   unsigned char *calc_ecc)
+{
+       return ecc_sw_hamming_correct(buf, read_ecc, calc_ecc,
+                                     chip->ecc.size, false);
+}
+
 static int sharpsl_attach_chip(struct nand_chip *chip)
 {
        if (chip->ecc.engine_type != NAND_ECC_ENGINE_TYPE_ON_HOST)
@@ -106,7 +116,7 @@ static int sharpsl_attach_chip(struct nand_chip *chip)
        chip->ecc.strength = 1;
        chip->ecc.hwctl = sharpsl_nand_enable_hwecc;
        chip->ecc.calculate = sharpsl_nand_calculate_ecc;
-       chip->ecc.correct = rawnand_sw_hamming_correct;
+       chip->ecc.correct = sharpsl_nand_correct_ecc;
 
        return 0;
 }
index de8e919d0ebe647ab39f474f01e445c303b0c051..6d93dd31969b22bf829291f745be7ce7f14f62ea 100644 (file)
@@ -34,6 +34,7 @@
 #include <linux/interrupt.h>
 #include <linux/ioport.h>
 #include <linux/mtd/mtd.h>
+#include <linux/mtd/nand-ecc-sw-hamming.h>
 #include <linux/mtd/rawnand.h>
 #include <linux/mtd/partitions.h>
 #include <linux/slab.h>
@@ -292,11 +293,12 @@ static int tmio_nand_correct_data(struct nand_chip *chip, unsigned char *buf,
        int r0, r1;
 
        /* assume ecc.size = 512 and ecc.bytes = 6 */
-       r0 = rawnand_sw_hamming_correct(chip, buf, read_ecc, calc_ecc);
+       r0 = ecc_sw_hamming_correct(buf, read_ecc, calc_ecc,
+                                   chip->ecc.size, false);
        if (r0 < 0)
                return r0;
-       r1 = rawnand_sw_hamming_correct(chip, buf + 256, read_ecc + 3,
-                                       calc_ecc + 3);
+       r1 = ecc_sw_hamming_correct(buf + 256, read_ecc + 3, calc_ecc + 3,
+                                   chip->ecc.size, false);
        if (r1 < 0)
                return r1;
        return r0 + r1;
index 1a9449e53bf9dea75d9782e14a3f238c4fc0ef97..b8894ac27073c29b5f432aae385f15c63fe30b9e 100644 (file)
@@ -13,6 +13,7 @@
 #include <linux/platform_device.h>
 #include <linux/delay.h>
 #include <linux/mtd/mtd.h>
+#include <linux/mtd/nand-ecc-sw-hamming.h>
 #include <linux/mtd/rawnand.h>
 #include <linux/mtd/partitions.h>
 #include <linux/io.h>
@@ -193,8 +194,8 @@ static int txx9ndfmc_correct_data(struct nand_chip *chip, unsigned char *buf,
        int stat;
 
        for (eccsize = chip->ecc.size; eccsize > 0; eccsize -= 256) {
-               stat = rawnand_sw_hamming_correct(chip, buf, read_ecc,
-                                                 calc_ecc);
+               stat = ecc_sw_hamming_correct(buf, read_ecc, calc_ecc,
+                                             chip->ecc.size, false);
                if (stat < 0)
                        return stat;
                corrected += stat;
index 0fd8d2a0db973c5233ce978835360d777d683adb..192190c42fc84d3f8c8ba72465fa3c44bf5243e7 100644 (file)
@@ -57,20 +57,22 @@ static int parse_fixed_partitions(struct mtd_info *master,
        if (!mtd_node)
                return 0;
 
-       ofpart_node = of_get_child_by_name(mtd_node, "partitions");
-       if (!ofpart_node && !master->parent) {
-               /*
-                * We might get here even when ofpart isn't used at all (e.g.,
-                * when using another parser), so don't be louder than
-                * KERN_DEBUG
-                */
-               pr_debug("%s: 'partitions' subnode not found on %pOF. Trying to parse direct subnodes as partitions.\n",
-                        master->name, mtd_node);
+       if (!master->parent) { /* Master */
+               ofpart_node = of_get_child_by_name(mtd_node, "partitions");
+               if (!ofpart_node) {
+                       /*
+                        * We might get here even when ofpart isn't used at all (e.g.,
+                        * when using another parser), so don't be louder than
+                        * KERN_DEBUG
+                        */
+                       pr_debug("%s: 'partitions' subnode not found on %pOF. Trying to parse direct subnodes as partitions.\n",
+                               master->name, mtd_node);
+                       ofpart_node = mtd_node;
+                       dedicated = false;
+               }
+       } else { /* Partition */
                ofpart_node = mtd_node;
-               dedicated = false;
        }
-       if (!ofpart_node)
-               return 0;
 
        of_id = of_match_node(parse_ofpart_match_table, ofpart_node);
        if (dedicated && !of_id) {
index ba8e70a8e3125d45b14d1aad19f87ee2d188ee45..6b12ce822e51a8da1d18a3c4ca55fb1a72ed7542 100644 (file)
@@ -327,6 +327,8 @@ static int __init cops_probe1(struct net_device *dev, int ioaddr)
                        break;
        }
 
+       dev->base_addr = ioaddr;
+
        /* Reserve any actual interrupt. */
        if (dev->irq) {
                retval = request_irq(dev->irq, cops_interrupt, 0, dev->name, dev);
@@ -334,8 +336,6 @@ static int __init cops_probe1(struct net_device *dev, int ioaddr)
                        goto err_out;
        }
 
-       dev->base_addr = ioaddr;
-
         lp = netdev_priv(dev);
         spin_lock_init(&lp->lock);
 
index 20bbda1b36e1e71968ce2498bce098529b7e00cb..c5a646d06102a7f73b820978cc294e0c97218267 100644 (file)
@@ -1526,6 +1526,7 @@ static struct slave *bond_alloc_slave(struct bonding *bond,
 
        slave->bond = bond;
        slave->dev = slave_dev;
+       INIT_DELAYED_WORK(&slave->notify_work, bond_netdev_notify_work);
 
        if (bond_kobj_init(slave))
                return NULL;
@@ -1538,7 +1539,6 @@ static struct slave *bond_alloc_slave(struct bonding *bond,
                        return NULL;
                }
        }
-       INIT_DELAYED_WORK(&slave->notify_work, bond_netdev_notify_work);
 
        return slave;
 }
index da6fffb4d5a8eb689bd1a74e81d3659e883135bc..d17482395a4dace2cfa319a675be463849be3366 100644 (file)
@@ -269,9 +269,6 @@ static netdev_tx_t caif_xmit(struct sk_buff *skb, struct net_device *dev)
 {
        struct ser_device *ser;
 
-       if (WARN_ON(!dev))
-               return -EINVAL;
-
        ser = netdev_priv(dev);
 
        /* Send flow off once, on high water mark */
index 9150038b60cb4ed8d84c211682e7a02ac2d1f9ba..3b018fcf44124ac8ba04ed5d979206a8fe2304f3 100644 (file)
@@ -821,11 +821,9 @@ static void bcm_sf2_sw_mac_link_up(struct dsa_switch *ds, int port,
        bcm_sf2_sw_mac_link_set(ds, port, interface, true);
 
        if (port != core_readl(priv, CORE_IMP0_PRT_ID)) {
-               u32 reg_rgmii_ctrl;
+               u32 reg_rgmii_ctrl = 0;
                u32 reg, offset;
 
-               reg_rgmii_ctrl = bcm_sf2_reg_rgmii_cntrl(priv, port);
-
                if (priv->type == BCM4908_DEVICE_ID ||
                    priv->type == BCM7445_DEVICE_ID)
                        offset = CORE_STS_OVERRIDE_GMIIP_PORT(port);
@@ -836,6 +834,7 @@ static void bcm_sf2_sw_mac_link_up(struct dsa_switch *ds, int port,
                    interface == PHY_INTERFACE_MODE_RGMII_TXID ||
                    interface == PHY_INTERFACE_MODE_MII ||
                    interface == PHY_INTERFACE_MODE_REVMII) {
+                       reg_rgmii_ctrl = bcm_sf2_reg_rgmii_cntrl(priv, port);
                        reg = reg_readl(priv, reg_rgmii_ctrl);
                        reg &= ~(RX_PAUSE_EN | TX_PAUSE_EN);
 
index 55e5d479acce332046aa177ddbcc72f6e5920c09..854e25f43fa708157cf4785ca1c170763b96c0ee 100644 (file)
@@ -1530,6 +1530,7 @@ static const struct ksz_chip_data ksz9477_switch_chips[] = {
                .num_statics = 16,
                .cpu_ports = 0x7F,      /* can be configured as cpu port */
                .port_cnt = 7,          /* total physical port count */
+               .phy_errata_9477 = true,
        },
 };
 
index 96f7c9eede358ef781a4d72ca19759a44fe1cd7b..9b90f3d3a8f505f360bbc88366ff5b442a5275a5 100644 (file)
@@ -1262,14 +1262,6 @@ mt7530_port_set_vlan_aware(struct dsa_switch *ds, int port)
 {
        struct mt7530_priv *priv = ds->priv;
 
-       /* The real fabric path would be decided on the membership in the
-        * entry of VLAN table. PCR_MATRIX set up here with ALL_MEMBERS
-        * means potential VLAN can be consisting of certain subset of all
-        * ports.
-        */
-       mt7530_rmw(priv, MT7530_PCR_P(port),
-                  PCR_MATRIX_MASK, PCR_MATRIX(MT7530_ALL_MEMBERS));
-
        /* Trapped into security mode allows packet forwarding through VLAN
         * table lookup. CPU port is set to fallback mode to let untagged
         * frames pass through.
index 2473bebe48e6ee19af24890f9b5d4f57f96516ac..f966a253d1c77a9aa6af3b6f27327e61c9951d56 100644 (file)
@@ -1227,12 +1227,17 @@ static int vsc9959_qos_port_tas_set(struct ocelot *ocelot, int port,
        if (taprio->num_entries > VSC9959_TAS_GCL_ENTRY_MAX)
                return -ERANGE;
 
-       /* Set port num and disable ALWAYS_GUARD_BAND_SCH_Q, which means set
-        * guard band to be implemented for nonschedule queues to schedule
-        * queues transition.
+       /* Enable guard band. The switch will schedule frames without taking
+        * their length into account. Thus we'll always need to enable the
+        * guard band which reserves the time of a maximum sized frame at the
+        * end of the time window.
+        *
+        * Although the ALWAYS_GUARD_BAND_SCH_Q bit is global for all ports, we
+        * need to set PORT_NUM, because subsequent writes to PARAM_CFG_REG_n
+        * operate on the port number.
         */
-       ocelot_rmw(ocelot,
-                  QSYS_TAS_PARAM_CFG_CTRL_PORT_NUM(port),
+       ocelot_rmw(ocelot, QSYS_TAS_PARAM_CFG_CTRL_PORT_NUM(port) |
+                  QSYS_TAS_PARAM_CFG_CTRL_ALWAYS_GUARD_BAND_SCH_Q,
                   QSYS_TAS_PARAM_CFG_CTRL_PORT_NUM_M |
                   QSYS_TAS_PARAM_CFG_CTRL_ALWAYS_GUARD_BAND_SCH_Q,
                   QSYS_TAS_PARAM_CFG_CTRL);
index b777d3f375736e891f91fb6b54b838973e4aa4b6..12cd04b56803056e6c4e687d543f9ff872676664 100644 (file)
@@ -167,9 +167,10 @@ enum sja1105_hostcmd {
        SJA1105_HOSTCMD_INVALIDATE = 4,
 };
 
+/* Command and entry overlap */
 static void
-sja1105_vl_lookup_cmd_packing(void *buf, struct sja1105_dyn_cmd *cmd,
-                             enum packing_op op)
+sja1105et_vl_lookup_cmd_packing(void *buf, struct sja1105_dyn_cmd *cmd,
+                               enum packing_op op)
 {
        const int size = SJA1105_SIZE_DYN_CMD;
 
@@ -179,6 +180,20 @@ sja1105_vl_lookup_cmd_packing(void *buf, struct sja1105_dyn_cmd *cmd,
        sja1105_packing(buf, &cmd->index,    9,  0, size, op);
 }
 
+/* Command and entry are separate */
+static void
+sja1105pqrs_vl_lookup_cmd_packing(void *buf, struct sja1105_dyn_cmd *cmd,
+                                 enum packing_op op)
+{
+       u8 *p = buf + SJA1105_SIZE_VL_LOOKUP_ENTRY;
+       const int size = SJA1105_SIZE_DYN_CMD;
+
+       sja1105_packing(p, &cmd->valid,   31, 31, size, op);
+       sja1105_packing(p, &cmd->errors,  30, 30, size, op);
+       sja1105_packing(p, &cmd->rdwrset, 29, 29, size, op);
+       sja1105_packing(p, &cmd->index,    9,  0, size, op);
+}
+
 static size_t sja1105et_vl_lookup_entry_packing(void *buf, void *entry_ptr,
                                                enum packing_op op)
 {
@@ -641,7 +656,7 @@ static size_t sja1105pqrs_cbs_entry_packing(void *buf, void *entry_ptr,
 const struct sja1105_dynamic_table_ops sja1105et_dyn_ops[BLK_IDX_MAX_DYN] = {
        [BLK_IDX_VL_LOOKUP] = {
                .entry_packing = sja1105et_vl_lookup_entry_packing,
-               .cmd_packing = sja1105_vl_lookup_cmd_packing,
+               .cmd_packing = sja1105et_vl_lookup_cmd_packing,
                .access = OP_WRITE,
                .max_entry_count = SJA1105_MAX_VL_LOOKUP_COUNT,
                .packed_size = SJA1105ET_SIZE_VL_LOOKUP_DYN_CMD,
@@ -725,7 +740,7 @@ const struct sja1105_dynamic_table_ops sja1105et_dyn_ops[BLK_IDX_MAX_DYN] = {
 const struct sja1105_dynamic_table_ops sja1105pqrs_dyn_ops[BLK_IDX_MAX_DYN] = {
        [BLK_IDX_VL_LOOKUP] = {
                .entry_packing = sja1105_vl_lookup_entry_packing,
-               .cmd_packing = sja1105_vl_lookup_cmd_packing,
+               .cmd_packing = sja1105pqrs_vl_lookup_cmd_packing,
                .access = (OP_READ | OP_WRITE),
                .max_entry_count = SJA1105_MAX_VL_LOOKUP_COUNT,
                .packed_size = SJA1105PQRS_SIZE_VL_LOOKUP_DYN_CMD,
index 405024b637d6c0ee43034653f2368fdf2f239753..b88d9ef45a1f1d138861bc454dcf197171dd0e6b 100644 (file)
@@ -26,6 +26,7 @@
 #include "sja1105_tas.h"
 
 #define SJA1105_UNKNOWN_MULTICAST      0x010000000000ull
+#define SJA1105_DEFAULT_VLAN           (VLAN_N_VID - 1)
 
 static const struct dsa_switch_ops sja1105_switch_ops;
 
@@ -207,6 +208,7 @@ static int sja1105_init_mii_settings(struct sja1105_private *priv,
                default:
                        dev_err(dev, "Unsupported PHY mode %s!\n",
                                phy_modes(ports[i].phy_mode));
+                       return -EINVAL;
                }
 
                /* Even though the SerDes port is able to drive SGMII autoneg
@@ -321,6 +323,13 @@ static int sja1105_init_l2_lookup_params(struct sja1105_private *priv)
        return 0;
 }
 
+/* Set up a default VLAN for untagged traffic injected from the CPU
+ * using management routes (e.g. STP, PTP) as opposed to tag_8021q.
+ * All DT-defined ports are members of this VLAN, and there are no
+ * restrictions on forwarding (since the CPU selects the destination).
+ * Frames from this VLAN will always be transmitted as untagged, and
+ * neither the bridge nor the 8021q module cannot create this VLAN ID.
+ */
 static int sja1105_init_static_vlan(struct sja1105_private *priv)
 {
        struct sja1105_table *table;
@@ -330,17 +339,13 @@ static int sja1105_init_static_vlan(struct sja1105_private *priv)
                .vmemb_port = 0,
                .vlan_bc = 0,
                .tag_port = 0,
-               .vlanid = 1,
+               .vlanid = SJA1105_DEFAULT_VLAN,
        };
        struct dsa_switch *ds = priv->ds;
        int port;
 
        table = &priv->static_config.tables[BLK_IDX_VLAN_LOOKUP];
 
-       /* The static VLAN table will only contain the initial pvid of 1.
-        * All other VLANs are to be configured through dynamic entries,
-        * and kept in the static configuration table as backing memory.
-        */
        if (table->entry_count) {
                kfree(table->entries);
                table->entry_count = 0;
@@ -353,9 +358,6 @@ static int sja1105_init_static_vlan(struct sja1105_private *priv)
 
        table->entry_count = 1;
 
-       /* VLAN 1: all DT-defined ports are members; no restrictions on
-        * forwarding; always transmit as untagged.
-        */
        for (port = 0; port < ds->num_ports; port++) {
                struct sja1105_bridge_vlan *v;
 
@@ -366,15 +368,12 @@ static int sja1105_init_static_vlan(struct sja1105_private *priv)
                pvid.vlan_bc |= BIT(port);
                pvid.tag_port &= ~BIT(port);
 
-               /* Let traffic that don't need dsa_8021q (e.g. STP, PTP) be
-                * transmitted as untagged.
-                */
                v = kzalloc(sizeof(*v), GFP_KERNEL);
                if (!v)
                        return -ENOMEM;
 
                v->port = port;
-               v->vid = 1;
+               v->vid = SJA1105_DEFAULT_VLAN;
                v->untagged = true;
                if (dsa_is_cpu_port(ds, port))
                        v->pvid = true;
@@ -2817,11 +2816,22 @@ static int sja1105_vlan_add_one(struct dsa_switch *ds, int port, u16 vid,
        bool pvid = flags & BRIDGE_VLAN_INFO_PVID;
        struct sja1105_bridge_vlan *v;
 
-       list_for_each_entry(v, vlan_list, list)
-               if (v->port == port && v->vid == vid &&
-                   v->untagged == untagged && v->pvid == pvid)
+       list_for_each_entry(v, vlan_list, list) {
+               if (v->port == port && v->vid == vid) {
                        /* Already added */
-                       return 0;
+                       if (v->untagged == untagged && v->pvid == pvid)
+                               /* Nothing changed */
+                               return 0;
+
+                       /* It's the same VLAN, but some of the flags changed
+                        * and the user did not bother to delete it first.
+                        * Update it and trigger sja1105_build_vlan_table.
+                        */
+                       v->untagged = untagged;
+                       v->pvid = pvid;
+                       return 1;
+               }
+       }
 
        v = kzalloc(sizeof(*v), GFP_KERNEL);
        if (!v) {
@@ -2976,13 +2986,13 @@ static int sja1105_setup(struct dsa_switch *ds)
        rc = sja1105_static_config_load(priv, ports);
        if (rc < 0) {
                dev_err(ds->dev, "Failed to load static config: %d\n", rc);
-               return rc;
+               goto out_ptp_clock_unregister;
        }
        /* Configure the CGU (PHY link modes and speeds) */
        rc = sja1105_clocking_setup(priv);
        if (rc < 0) {
                dev_err(ds->dev, "Failed to configure MII clocking: %d\n", rc);
-               return rc;
+               goto out_static_config_free;
        }
        /* On SJA1105, VLAN filtering per se is always enabled in hardware.
         * The only thing we can do to disable it is lie about what the 802.1Q
@@ -3003,7 +3013,7 @@ static int sja1105_setup(struct dsa_switch *ds)
 
        rc = sja1105_devlink_setup(ds);
        if (rc < 0)
-               return rc;
+               goto out_static_config_free;
 
        /* The DSA/switchdev model brings up switch ports in standalone mode by
         * default, and that means vlan_filtering is 0 since they're not under
@@ -3012,6 +3022,17 @@ static int sja1105_setup(struct dsa_switch *ds)
        rtnl_lock();
        rc = sja1105_setup_8021q_tagging(ds, true);
        rtnl_unlock();
+       if (rc)
+               goto out_devlink_teardown;
+
+       return 0;
+
+out_devlink_teardown:
+       sja1105_devlink_teardown(ds);
+out_ptp_clock_unregister:
+       sja1105_ptp_clock_unregister(ds);
+out_static_config_free:
+       sja1105_static_config_free(&priv->static_config);
 
        return rc;
 }
@@ -3646,8 +3667,10 @@ static int sja1105_probe(struct spi_device *spi)
                priv->cbs = devm_kcalloc(dev, priv->info->num_cbs_shapers,
                                         sizeof(struct sja1105_cbs_entry),
                                         GFP_KERNEL);
-               if (!priv->cbs)
-                       return -ENOMEM;
+               if (!priv->cbs) {
+                       rc = -ENOMEM;
+                       goto out_unregister_switch;
+               }
        }
 
        /* Connections between dsa_port and sja1105_port */
@@ -3672,7 +3695,7 @@ static int sja1105_probe(struct spi_device *spi)
                        dev_err(ds->dev,
                                "failed to create deferred xmit thread: %d\n",
                                rc);
-                       goto out;
+                       goto out_destroy_workers;
                }
                skb_queue_head_init(&sp->xmit_queue);
                sp->xmit_tpid = ETH_P_SJA1105;
@@ -3682,7 +3705,8 @@ static int sja1105_probe(struct spi_device *spi)
        }
 
        return 0;
-out:
+
+out_destroy_workers:
        while (port-- > 0) {
                struct sja1105_port *sp = &priv->ports[port];
 
@@ -3691,6 +3715,10 @@ out:
 
                kthread_destroy_worker(sp->xmit_worker);
        }
+
+out_unregister_switch:
+       dsa_unregister_switch(ds);
+
        return rc;
 }
 
index c0986096c701d368952ae3a8148e0a045ed89be1..5bace8a93d73be50d79f5da89abb2946998b9540 100644 (file)
@@ -8247,9 +8247,9 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
                BNX2_WR(bp, PCI_COMMAND, reg);
        } else if ((BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A1) &&
                !(bp->flags & BNX2_FLAG_PCIX)) {
-
                dev_err(&pdev->dev,
                        "5706 A1 can only be used in a PCIX bus, aborting\n");
+               rc = -EPERM;
                goto err_out_unmap;
        }
 
index d21f085044cddd81ec46fbac2e3ad1ca6587f407..27943b0446c28465abd249aa65c875eb2bb900b1 100644 (file)
@@ -1223,8 +1223,10 @@ int bnx2x_iov_init_one(struct bnx2x *bp, int int_mode_param,
                goto failed;
 
        /* SR-IOV capability was enabled but there are no VFs*/
-       if (iov->total == 0)
+       if (iov->total == 0) {
+               err = -EINVAL;
                goto failed;
+       }
 
        iov->nr_virtfn = min_t(u16, iov->total, num_vfs_param);
 
index 2985844634c8b1a59ab27d4ef680c164eef50ea4..fcc729d52b17450f1678f4517b67b89d9a7a5433 100644 (file)
@@ -282,7 +282,8 @@ static bool bnxt_vf_pciid(enum board_idx idx)
 {
        return (idx == NETXTREME_C_VF || idx == NETXTREME_E_VF ||
                idx == NETXTREME_S_VF || idx == NETXTREME_C_VF_HV ||
-               idx == NETXTREME_E_VF_HV || idx == NETXTREME_E_P5_VF);
+               idx == NETXTREME_E_VF_HV || idx == NETXTREME_E_P5_VF ||
+               idx == NETXTREME_E_P5_VF_HV);
 }
 
 #define DB_CP_REARM_FLAGS      (DB_KEY_CP | DB_IDX_VALID)
@@ -6932,17 +6933,10 @@ ctx_err:
 static void bnxt_hwrm_set_pg_attr(struct bnxt_ring_mem_info *rmem, u8 *pg_attr,
                                  __le64 *pg_dir)
 {
-       u8 pg_size = 0;
-
        if (!rmem->nr_pages)
                return;
 
-       if (BNXT_PAGE_SHIFT == 13)
-               pg_size = 1 << 4;
-       else if (BNXT_PAGE_SIZE == 16)
-               pg_size = 2 << 4;
-
-       *pg_attr = pg_size;
+       BNXT_SET_CTX_PAGE_ATTR(*pg_attr);
        if (rmem->depth >= 1) {
                if (rmem->depth == 2)
                        *pg_attr |= 2;
@@ -10785,37 +10779,125 @@ static int bnxt_set_features(struct net_device *dev, netdev_features_t features)
        return rc;
 }
 
+static bool bnxt_exthdr_check(struct bnxt *bp, struct sk_buff *skb, int nw_off,
+                             u8 **nextp)
+{
+       struct ipv6hdr *ip6h = (struct ipv6hdr *)(skb->data + nw_off);
+       int hdr_count = 0;
+       u8 *nexthdr;
+       int start;
+
+       /* Check that there are at most 2 IPv6 extension headers, no
+        * fragment header, and each is <= 64 bytes.
+        */
+       start = nw_off + sizeof(*ip6h);
+       nexthdr = &ip6h->nexthdr;
+       while (ipv6_ext_hdr(*nexthdr)) {
+               struct ipv6_opt_hdr *hp;
+               int hdrlen;
+
+               if (hdr_count >= 3 || *nexthdr == NEXTHDR_NONE ||
+                   *nexthdr == NEXTHDR_FRAGMENT)
+                       return false;
+               hp = __skb_header_pointer(NULL, start, sizeof(*hp), skb->data,
+                                         skb_headlen(skb), NULL);
+               if (!hp)
+                       return false;
+               if (*nexthdr == NEXTHDR_AUTH)
+                       hdrlen = ipv6_authlen(hp);
+               else
+                       hdrlen = ipv6_optlen(hp);
+
+               if (hdrlen > 64)
+                       return false;
+               nexthdr = &hp->nexthdr;
+               start += hdrlen;
+               hdr_count++;
+       }
+       if (nextp) {
+               /* Caller will check inner protocol */
+               if (skb->encapsulation) {
+                       *nextp = nexthdr;
+                       return true;
+               }
+               *nextp = NULL;
+       }
+       /* Only support TCP/UDP for non-tunneled ipv6 and inner ipv6 */
+       return *nexthdr == IPPROTO_TCP || *nexthdr == IPPROTO_UDP;
+}
+
+/* For UDP, we can only handle 1 Vxlan port and 1 Geneve port. */
+static bool bnxt_udp_tunl_check(struct bnxt *bp, struct sk_buff *skb)
+{
+       struct udphdr *uh = udp_hdr(skb);
+       __be16 udp_port = uh->dest;
+
+       if (udp_port != bp->vxlan_port && udp_port != bp->nge_port)
+               return false;
+       if (skb->inner_protocol_type == ENCAP_TYPE_ETHER) {
+               struct ethhdr *eh = inner_eth_hdr(skb);
+
+               switch (eh->h_proto) {
+               case htons(ETH_P_IP):
+                       return true;
+               case htons(ETH_P_IPV6):
+                       return bnxt_exthdr_check(bp, skb,
+                                                skb_inner_network_offset(skb),
+                                                NULL);
+               }
+       }
+       return false;
+}
+
+static bool bnxt_tunl_check(struct bnxt *bp, struct sk_buff *skb, u8 l4_proto)
+{
+       switch (l4_proto) {
+       case IPPROTO_UDP:
+               return bnxt_udp_tunl_check(bp, skb);
+       case IPPROTO_IPIP:
+               return true;
+       case IPPROTO_GRE: {
+               switch (skb->inner_protocol) {
+               default:
+                       return false;
+               case htons(ETH_P_IP):
+                       return true;
+               case htons(ETH_P_IPV6):
+                       fallthrough;
+               }
+       }
+       case IPPROTO_IPV6:
+               /* Check ext headers of inner ipv6 */
+               return bnxt_exthdr_check(bp, skb, skb_inner_network_offset(skb),
+                                        NULL);
+       }
+       return false;
+}
+
 static netdev_features_t bnxt_features_check(struct sk_buff *skb,
                                             struct net_device *dev,
                                             netdev_features_t features)
 {
-       struct bnxt *bp;
-       __be16 udp_port;
-       u8 l4_proto = 0;
+       struct bnxt *bp = netdev_priv(dev);
+       u8 *l4_proto;
 
        features = vlan_features_check(skb, features);
-       if (!skb->encapsulation)
-               return features;
-
        switch (vlan_get_protocol(skb)) {
        case htons(ETH_P_IP):
-               l4_proto = ip_hdr(skb)->protocol;
+               if (!skb->encapsulation)
+                       return features;
+               l4_proto = &ip_hdr(skb)->protocol;
+               if (bnxt_tunl_check(bp, skb, *l4_proto))
+                       return features;
                break;
        case htons(ETH_P_IPV6):
-               l4_proto = ipv6_hdr(skb)->nexthdr;
+               if (!bnxt_exthdr_check(bp, skb, skb_network_offset(skb),
+                                      &l4_proto))
+                       break;
+               if (!l4_proto || bnxt_tunl_check(bp, skb, *l4_proto))
+                       return features;
                break;
-       default:
-               return features;
        }
-
-       if (l4_proto != IPPROTO_UDP)
-               return features;
-
-       bp = netdev_priv(dev);
-       /* For UDP, we can only handle 1 Vxlan port and 1 Geneve port. */
-       udp_port = udp_hdr(skb)->dest;
-       if (udp_port == bp->vxlan_port || udp_port == bp->nge_port)
-               return features;
        return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
 }
 
index 98e0cef4532cbacb90ef2e464b5c552d373e5aef..30e47ea343f916759e877161639962cc40af6546 100644 (file)
@@ -1457,6 +1457,16 @@ struct bnxt_ctx_pg_info {
 
 #define BNXT_BACKING_STORE_CFG_LEGACY_LEN      256
 
+#define BNXT_SET_CTX_PAGE_ATTR(attr)                                   \
+do {                                                                   \
+       if (BNXT_PAGE_SIZE == 0x2000)                                   \
+               attr = FUNC_BACKING_STORE_CFG_REQ_SRQ_PG_SIZE_PG_8K;    \
+       else if (BNXT_PAGE_SIZE == 0x10000)                             \
+               attr = FUNC_BACKING_STORE_CFG_REQ_QPC_PG_SIZE_PG_64K;   \
+       else                                                            \
+               attr = FUNC_BACKING_STORE_CFG_REQ_QPC_PG_SIZE_PG_4K;    \
+} while (0)
+
 struct bnxt_ctx_mem_info {
        u32     qp_max_entries;
        u16     qp_min_qp1_entries;
index 6bc7d41d519b7065f809b18be4f5b450b871c2d3..a0c7b1167dbb243d436b58d5e948332b72a513f4 100644 (file)
@@ -2867,6 +2867,9 @@ static struct net_device_stats *gem_get_stats(struct macb *bp)
        struct gem_stats *hwstat = &bp->hw_stats.gem;
        struct net_device_stats *nstat = &bp->dev->stats;
 
+       if (!netif_running(bp->dev))
+               return nstat;
+
        gem_update_stats(bp);
 
        nstat->rx_errors = (hwstat->rx_frame_check_sequence_errors +
index 7c5af4beedc6d31318fb6f46a6f5f8470e5b33fb..591229b96257e4e5cc379a7dd19670e280902d66 100644 (file)
@@ -1153,7 +1153,7 @@ static void octeon_destroy_resources(struct octeon_device *oct)
  * @lio: per-network private data
  * @start_stop: whether to start or stop
  */
-static void send_rx_ctrl_cmd(struct lio *lio, int start_stop)
+static int send_rx_ctrl_cmd(struct lio *lio, int start_stop)
 {
        struct octeon_soft_command *sc;
        union octnet_cmd *ncmd;
@@ -1161,15 +1161,15 @@ static void send_rx_ctrl_cmd(struct lio *lio, int start_stop)
        int retval;
 
        if (oct->props[lio->ifidx].rx_on == start_stop)
-               return;
+               return 0;
 
        sc = (struct octeon_soft_command *)
                octeon_alloc_soft_command(oct, OCTNET_CMD_SIZE,
                                          16, 0);
        if (!sc) {
                netif_info(lio, rx_err, lio->netdev,
-                          "Failed to allocate octeon_soft_command\n");
-               return;
+                          "Failed to allocate octeon_soft_command struct\n");
+               return -ENOMEM;
        }
 
        ncmd = (union octnet_cmd *)sc->virtdptr;
@@ -1192,18 +1192,19 @@ static void send_rx_ctrl_cmd(struct lio *lio, int start_stop)
        if (retval == IQ_SEND_FAILED) {
                netif_info(lio, rx_err, lio->netdev, "Failed to send RX Control message\n");
                octeon_free_soft_command(oct, sc);
-               return;
        } else {
                /* Sleep on a wait queue till the cond flag indicates that the
                 * response arrived or timed-out.
                 */
                retval = wait_for_sc_completion_timeout(oct, sc, 0);
                if (retval)
-                       return;
+                       return retval;
 
                oct->props[lio->ifidx].rx_on = start_stop;
                WRITE_ONCE(sc->caller_is_done, true);
        }
+
+       return retval;
 }
 
 /**
@@ -1778,6 +1779,7 @@ static int liquidio_open(struct net_device *netdev)
        struct octeon_device_priv *oct_priv =
                (struct octeon_device_priv *)oct->priv;
        struct napi_struct *napi, *n;
+       int ret = 0;
 
        if (oct->props[lio->ifidx].napi_enabled == 0) {
                tasklet_disable(&oct_priv->droq_tasklet);
@@ -1813,7 +1815,9 @@ static int liquidio_open(struct net_device *netdev)
        netif_info(lio, ifup, lio->netdev, "Interface Open, ready for traffic\n");
 
        /* tell Octeon to start forwarding packets to host */
-       send_rx_ctrl_cmd(lio, 1);
+       ret = send_rx_ctrl_cmd(lio, 1);
+       if (ret)
+               return ret;
 
        /* start periodical statistics fetch */
        INIT_DELAYED_WORK(&lio->stats_wk.work, lio_fetch_stats);
@@ -1824,7 +1828,7 @@ static int liquidio_open(struct net_device *netdev)
        dev_info(&oct->pci_dev->dev, "%s interface is opened\n",
                 netdev->name);
 
-       return 0;
+       return ret;
 }
 
 /**
@@ -1838,6 +1842,7 @@ static int liquidio_stop(struct net_device *netdev)
        struct octeon_device_priv *oct_priv =
                (struct octeon_device_priv *)oct->priv;
        struct napi_struct *napi, *n;
+       int ret = 0;
 
        ifstate_reset(lio, LIO_IFSTATE_RUNNING);
 
@@ -1854,7 +1859,9 @@ static int liquidio_stop(struct net_device *netdev)
        lio->link_changes++;
 
        /* Tell Octeon that nic interface is down. */
-       send_rx_ctrl_cmd(lio, 0);
+       ret = send_rx_ctrl_cmd(lio, 0);
+       if (ret)
+               return ret;
 
        if (OCTEON_CN23XX_PF(oct)) {
                if (!oct->msix_on)
@@ -1889,7 +1896,7 @@ static int liquidio_stop(struct net_device *netdev)
 
        dev_info(&oct->pci_dev->dev, "%s interface is stopped\n", netdev->name);
 
-       return 0;
+       return ret;
 }
 
 /**
index 516f166ceff8c3195864e6bfa4a5c7a0a3a8ea62..ffddb3126a3236d5bd99d6755400335adc167dbf 100644 (file)
@@ -595,7 +595,7 @@ static void octeon_destroy_resources(struct octeon_device *oct)
  * @lio: per-network private data
  * @start_stop: whether to start or stop
  */
-static void send_rx_ctrl_cmd(struct lio *lio, int start_stop)
+static int send_rx_ctrl_cmd(struct lio *lio, int start_stop)
 {
        struct octeon_device *oct = (struct octeon_device *)lio->oct_dev;
        struct octeon_soft_command *sc;
@@ -603,11 +603,16 @@ static void send_rx_ctrl_cmd(struct lio *lio, int start_stop)
        int retval;
 
        if (oct->props[lio->ifidx].rx_on == start_stop)
-               return;
+               return 0;
 
        sc = (struct octeon_soft_command *)
                octeon_alloc_soft_command(oct, OCTNET_CMD_SIZE,
                                          16, 0);
+       if (!sc) {
+               netif_info(lio, rx_err, lio->netdev,
+                          "Failed to allocate octeon_soft_command struct\n");
+               return -ENOMEM;
+       }
 
        ncmd = (union octnet_cmd *)sc->virtdptr;
 
@@ -635,11 +640,13 @@ static void send_rx_ctrl_cmd(struct lio *lio, int start_stop)
                 */
                retval = wait_for_sc_completion_timeout(oct, sc, 0);
                if (retval)
-                       return;
+                       return retval;
 
                oct->props[lio->ifidx].rx_on = start_stop;
                WRITE_ONCE(sc->caller_is_done, true);
        }
+
+       return retval;
 }
 
 /**
@@ -906,6 +913,7 @@ static int liquidio_open(struct net_device *netdev)
        struct octeon_device_priv *oct_priv =
                (struct octeon_device_priv *)oct->priv;
        struct napi_struct *napi, *n;
+       int ret = 0;
 
        if (!oct->props[lio->ifidx].napi_enabled) {
                tasklet_disable(&oct_priv->droq_tasklet);
@@ -932,11 +940,13 @@ static int liquidio_open(struct net_device *netdev)
                                        (LIQUIDIO_NDEV_STATS_POLL_TIME_MS));
 
        /* tell Octeon to start forwarding packets to host */
-       send_rx_ctrl_cmd(lio, 1);
+       ret = send_rx_ctrl_cmd(lio, 1);
+       if (ret)
+               return ret;
 
        dev_info(&oct->pci_dev->dev, "%s interface is opened\n", netdev->name);
 
-       return 0;
+       return ret;
 }
 
 /**
@@ -950,9 +960,12 @@ static int liquidio_stop(struct net_device *netdev)
        struct octeon_device_priv *oct_priv =
                (struct octeon_device_priv *)oct->priv;
        struct napi_struct *napi, *n;
+       int ret = 0;
 
        /* tell Octeon to stop forwarding packets to host */
-       send_rx_ctrl_cmd(lio, 0);
+       ret = send_rx_ctrl_cmd(lio, 0);
+       if (ret)
+               return ret;
 
        netif_info(lio, ifdown, lio->netdev, "Stopping interface!\n");
        /* Inform that netif carrier is down */
@@ -986,7 +999,7 @@ static int liquidio_stop(struct net_device *netdev)
 
        dev_info(&oct->pci_dev->dev, "%s interface is stopped\n", netdev->name);
 
-       return 0;
+       return ret;
 }
 
 /**
index 314f8d8067231708cbc802649797fa06feb57e5e..9058f09f921ee6faff2aa0041b8c3cc0513c502a 100644 (file)
@@ -2177,8 +2177,6 @@ int cxgb4_update_mac_filt(struct port_info *pi, unsigned int viid,
                          bool persistent, u8 *smt_idx);
 int cxgb4_get_msix_idx_from_bmap(struct adapter *adap);
 void cxgb4_free_msix_idx_in_bmap(struct adapter *adap, u32 msix_idx);
-int cxgb_open(struct net_device *dev);
-int cxgb_close(struct net_device *dev);
 void cxgb4_enable_rx(struct adapter *adap, struct sge_rspq *q);
 void cxgb4_quiesce_rx(struct sge_rspq *q);
 int cxgb4_port_mirror_alloc(struct net_device *dev);
index bc581b149b113608b0f2de68069121c1f26ad80d..22c9ac922ebae9bee87bb403e17ccfaed08557cc 100644 (file)
@@ -1042,7 +1042,7 @@ void clear_all_filters(struct adapter *adapter)
                                cxgb4_del_filter(dev, f->tid, &f->fs);
                }
 
-               sb = t4_read_reg(adapter, LE_DB_SRVR_START_INDEX_A);
+               sb = adapter->tids.stid_base;
                for (i = 0; i < sb; i++) {
                        f = (struct filter_entry *)adapter->tids.tid_tab[i];
 
index 6264bc66a4fc9aa3472ab33f09f472cf6f4a78d2..1f601de02e7067397a6c0936be727e8e84508cce 100644 (file)
@@ -2834,7 +2834,7 @@ static void cxgb_down(struct adapter *adapter)
 /*
  * net_device operations
  */
-int cxgb_open(struct net_device *dev)
+static int cxgb_open(struct net_device *dev)
 {
        struct port_info *pi = netdev_priv(dev);
        struct adapter *adapter = pi->adapter;
@@ -2882,7 +2882,7 @@ out_unlock:
        return err;
 }
 
-int cxgb_close(struct net_device *dev)
+static int cxgb_close(struct net_device *dev)
 {
        struct port_info *pi = netdev_priv(dev);
        struct adapter *adapter = pi->adapter;
@@ -6480,9 +6480,9 @@ static void cxgb4_ktls_dev_del(struct net_device *netdev,
 
        adap->uld[CXGB4_ULD_KTLS].tlsdev_ops->tls_dev_del(netdev, tls_ctx,
                                                          direction);
-       cxgb4_set_ktls_feature(adap, FW_PARAMS_PARAM_DEV_KTLS_HW_DISABLE);
 
 out_unlock:
+       cxgb4_set_ktls_feature(adap, FW_PARAMS_PARAM_DEV_KTLS_HW_DISABLE);
        mutex_unlock(&uld_mutex);
 }
 
index 1b88bd1c2dbe426cecba5f9de6557983eb62580d..dd9be229819a594701b52e039b6afd7ffb466b8a 100644 (file)
@@ -997,20 +997,16 @@ int cxgb4_tc_flower_destroy(struct net_device *dev,
        if (!ch_flower)
                return -ENOENT;
 
+       rhashtable_remove_fast(&adap->flower_tbl, &ch_flower->node,
+                              adap->flower_ht_params);
+
        ret = cxgb4_flow_rule_destroy(dev, ch_flower->fs.tc_prio,
                                      &ch_flower->fs, ch_flower->filter_id);
        if (ret)
-               goto err;
+               netdev_err(dev, "Flow rule destroy failed for tid: %u, ret: %d",
+                          ch_flower->filter_id, ret);
 
-       ret = rhashtable_remove_fast(&adap->flower_tbl, &ch_flower->node,
-                                    adap->flower_ht_params);
-       if (ret) {
-               netdev_err(dev, "Flow remove from rhashtable failed");
-               goto err;
-       }
        kfree_rcu(ch_flower, rcu);
-
-err:
        return ret;
 }
 
index 6c259de96f969219e5ba08d7e6df1f43a0e9e605..338b04f339b3da0c39e851ef7ff577dbd9f4a65d 100644 (file)
@@ -589,7 +589,8 @@ int cxgb4_setup_tc_mqprio(struct net_device *dev,
         * down before configuring tc params.
         */
        if (netif_running(dev)) {
-               cxgb_close(dev);
+               netif_tx_stop_all_queues(dev);
+               netif_carrier_off(dev);
                needs_bring_up = true;
        }
 
@@ -615,8 +616,10 @@ int cxgb4_setup_tc_mqprio(struct net_device *dev,
        }
 
 out:
-       if (needs_bring_up)
-               cxgb_open(dev);
+       if (needs_bring_up) {
+               netif_tx_start_all_queues(dev);
+               netif_carrier_on(dev);
+       }
 
        mutex_unlock(&adap->tc_mqprio->mqprio_mutex);
        return ret;
index 1e5f2edb70cf45e68bc2771da6ca897b319b5733..6a099cb34b1228ee764381ecd2ae1006a26d7862 100644 (file)
@@ -2556,6 +2556,12 @@ int cxgb4_ethofld_send_flowc(struct net_device *dev, u32 eotid, u32 tc)
        if (!eosw_txq)
                return -ENOMEM;
 
+       if (!(adap->flags & CXGB4_FW_OK)) {
+               /* Don't stall caller when access to FW is lost */
+               complete(&eosw_txq->completion);
+               return -EIO;
+       }
+
        skb = alloc_skb(len, GFP_KERNEL);
        if (!skb)
                return -ENOMEM;
index ef3f1e92632f3512b4780fe53d4d530231590abf..59683f79959ced63f03b02d6a5ac8d00d489b162 100644 (file)
@@ -59,6 +59,7 @@ static int chcr_get_nfrags_to_send(struct sk_buff *skb, u32 start, u32 len)
 }
 
 static int chcr_init_tcb_fields(struct chcr_ktls_info *tx_info);
+static void clear_conn_resources(struct chcr_ktls_info *tx_info);
 /*
  * chcr_ktls_save_keys: calculate and save crypto keys.
  * @tx_info - driver specific tls info.
@@ -364,10 +365,14 @@ static void chcr_ktls_dev_del(struct net_device *netdev,
                                chcr_get_ktls_tx_context(tls_ctx);
        struct chcr_ktls_info *tx_info = tx_ctx->chcr_info;
        struct ch_ktls_port_stats_debug *port_stats;
+       struct chcr_ktls_uld_ctx *u_ctx;
 
        if (!tx_info)
                return;
 
+       u_ctx = tx_info->adap->uld[CXGB4_ULD_KTLS].handle;
+       if (u_ctx && u_ctx->detach)
+               return;
        /* clear l2t entry */
        if (tx_info->l2te)
                cxgb4_l2t_release(tx_info->l2te);
@@ -384,6 +389,8 @@ static void chcr_ktls_dev_del(struct net_device *netdev,
        if (tx_info->tid != -1) {
                cxgb4_remove_tid(&tx_info->adap->tids, tx_info->tx_chan,
                                 tx_info->tid, tx_info->ip_family);
+
+               xa_erase(&u_ctx->tid_list, tx_info->tid);
        }
 
        port_stats = &tx_info->adap->ch_ktls_stats.ktls_port[tx_info->port_id];
@@ -411,6 +418,7 @@ static int chcr_ktls_dev_add(struct net_device *netdev, struct sock *sk,
        struct tls_context *tls_ctx = tls_get_ctx(sk);
        struct ch_ktls_port_stats_debug *port_stats;
        struct chcr_ktls_ofld_ctx_tx *tx_ctx;
+       struct chcr_ktls_uld_ctx *u_ctx;
        struct chcr_ktls_info *tx_info;
        struct dst_entry *dst;
        struct adapter *adap;
@@ -425,6 +433,7 @@ static int chcr_ktls_dev_add(struct net_device *netdev, struct sock *sk,
        adap = pi->adapter;
        port_stats = &adap->ch_ktls_stats.ktls_port[pi->port_id];
        atomic64_inc(&port_stats->ktls_tx_connection_open);
+       u_ctx = adap->uld[CXGB4_ULD_KTLS].handle;
 
        if (direction == TLS_OFFLOAD_CTX_DIR_RX) {
                pr_err("not expecting for RX direction\n");
@@ -434,6 +443,9 @@ static int chcr_ktls_dev_add(struct net_device *netdev, struct sock *sk,
        if (tx_ctx->chcr_info)
                goto out;
 
+       if (u_ctx && u_ctx->detach)
+               goto out;
+
        tx_info = kvzalloc(sizeof(*tx_info), GFP_KERNEL);
        if (!tx_info)
                goto out;
@@ -569,6 +581,8 @@ free_tid:
        cxgb4_remove_tid(&tx_info->adap->tids, tx_info->tx_chan,
                         tx_info->tid, tx_info->ip_family);
 
+       xa_erase(&u_ctx->tid_list, tx_info->tid);
+
 put_module:
        /* release module refcount */
        module_put(THIS_MODULE);
@@ -633,8 +647,12 @@ static int chcr_ktls_cpl_act_open_rpl(struct adapter *adap,
 {
        const struct cpl_act_open_rpl *p = (void *)input;
        struct chcr_ktls_info *tx_info = NULL;
+       struct chcr_ktls_ofld_ctx_tx *tx_ctx;
+       struct chcr_ktls_uld_ctx *u_ctx;
        unsigned int atid, tid, status;
+       struct tls_context *tls_ctx;
        struct tid_info *t;
+       int ret = 0;
 
        tid = GET_TID(p);
        status = AOPEN_STATUS_G(ntohl(p->atid_status));
@@ -666,14 +684,29 @@ static int chcr_ktls_cpl_act_open_rpl(struct adapter *adap,
        if (!status) {
                tx_info->tid = tid;
                cxgb4_insert_tid(t, tx_info, tx_info->tid, tx_info->ip_family);
+               /* Adding tid */
+               tls_ctx = tls_get_ctx(tx_info->sk);
+               tx_ctx = chcr_get_ktls_tx_context(tls_ctx);
+               u_ctx = adap->uld[CXGB4_ULD_KTLS].handle;
+               if (u_ctx) {
+                       ret = xa_insert_bh(&u_ctx->tid_list, tid, tx_ctx,
+                                          GFP_NOWAIT);
+                       if (ret < 0) {
+                               pr_err("%s: Failed to allocate tid XA entry = %d\n",
+                                      __func__, tx_info->tid);
+                               tx_info->open_state = CH_KTLS_OPEN_FAILURE;
+                               goto out;
+                       }
+               }
                tx_info->open_state = CH_KTLS_OPEN_SUCCESS;
        } else {
                tx_info->open_state = CH_KTLS_OPEN_FAILURE;
        }
+out:
        spin_unlock(&tx_info->lock);
 
        complete(&tx_info->completion);
-       return 0;
+       return ret;
 }
 
 /*
@@ -2090,6 +2123,8 @@ static void *chcr_ktls_uld_add(const struct cxgb4_lld_info *lldi)
                goto out;
        }
        u_ctx->lldi = *lldi;
+       u_ctx->detach = false;
+       xa_init_flags(&u_ctx->tid_list, XA_FLAGS_LOCK_BH);
 out:
        return u_ctx;
 }
@@ -2123,6 +2158,45 @@ static int chcr_ktls_uld_rx_handler(void *handle, const __be64 *rsp,
        return 0;
 }
 
+static void clear_conn_resources(struct chcr_ktls_info *tx_info)
+{
+       /* clear l2t entry */
+       if (tx_info->l2te)
+               cxgb4_l2t_release(tx_info->l2te);
+
+#if IS_ENABLED(CONFIG_IPV6)
+       /* clear clip entry */
+       if (tx_info->ip_family == AF_INET6)
+               cxgb4_clip_release(tx_info->netdev, (const u32 *)
+                                  &tx_info->sk->sk_v6_rcv_saddr,
+                                  1);
+#endif
+
+       /* clear tid */
+       if (tx_info->tid != -1)
+               cxgb4_remove_tid(&tx_info->adap->tids, tx_info->tx_chan,
+                                tx_info->tid, tx_info->ip_family);
+}
+
+static void ch_ktls_reset_all_conn(struct chcr_ktls_uld_ctx *u_ctx)
+{
+       struct ch_ktls_port_stats_debug *port_stats;
+       struct chcr_ktls_ofld_ctx_tx *tx_ctx;
+       struct chcr_ktls_info *tx_info;
+       unsigned long index;
+
+       xa_for_each(&u_ctx->tid_list, index, tx_ctx) {
+               tx_info = tx_ctx->chcr_info;
+               clear_conn_resources(tx_info);
+               port_stats = &tx_info->adap->ch_ktls_stats.ktls_port[tx_info->port_id];
+               atomic64_inc(&port_stats->ktls_tx_connection_close);
+               kvfree(tx_info);
+               tx_ctx->chcr_info = NULL;
+               /* release module refcount */
+               module_put(THIS_MODULE);
+       }
+}
+
 static int chcr_ktls_uld_state_change(void *handle, enum cxgb4_state new_state)
 {
        struct chcr_ktls_uld_ctx *u_ctx = handle;
@@ -2139,7 +2213,10 @@ static int chcr_ktls_uld_state_change(void *handle, enum cxgb4_state new_state)
        case CXGB4_STATE_DETACH:
                pr_info("%s: Down\n", pci_name(u_ctx->lldi.pdev));
                mutex_lock(&dev_mutex);
+               u_ctx->detach = true;
                list_del(&u_ctx->entry);
+               ch_ktls_reset_all_conn(u_ctx);
+               xa_destroy(&u_ctx->tid_list);
                mutex_unlock(&dev_mutex);
                break;
        default:
@@ -2178,6 +2255,7 @@ static void __exit chcr_ktls_exit(void)
                adap = pci_get_drvdata(u_ctx->lldi.pdev);
                memset(&adap->ch_ktls_stats, 0, sizeof(adap->ch_ktls_stats));
                list_del(&u_ctx->entry);
+               xa_destroy(&u_ctx->tid_list);
                kfree(u_ctx);
        }
        mutex_unlock(&dev_mutex);
index 18b3b1f024156cdbd4a503b0f51ce26f56775e32..10572dc55365a9e418eb9f02587ea3841a5e78a3 100644 (file)
@@ -75,6 +75,8 @@ struct chcr_ktls_ofld_ctx_tx {
 struct chcr_ktls_uld_ctx {
        struct list_head entry;
        struct cxgb4_lld_info lldi;
+       struct xarray tid_list;
+       bool detach;
 };
 
 static inline struct chcr_ktls_ofld_ctx_tx *
index 188d871f6b8cdbf613a50edd09b48aa1fde39b88..c320cc8ca68d61070fdb069316088aae67ba01af 100644 (file)
@@ -1564,8 +1564,10 @@ found_ok_skb:
                        cerr = put_cmsg(msg, SOL_TLS, TLS_GET_RECORD_TYPE,
                                        sizeof(thdr->type), &thdr->type);
 
-                       if (cerr && thdr->type != TLS_RECORD_TYPE_DATA)
-                               return -EIO;
+                       if (cerr && thdr->type != TLS_RECORD_TYPE_DATA) {
+                               copied = -EIO;
+                               break;
+                       }
                        /*  don't send tls header, skip copy */
                        goto skip_copy;
                }
index f2065f9d02e62b11944986b0207cf63e0ee15ece..ad82cffc6f3f59f591339a5426e9856a8de20374 100644 (file)
@@ -1662,7 +1662,7 @@ static int fec_enet_rx_napi(struct napi_struct *napi, int budget)
 }
 
 /* ------------------------------------------------------------------------- */
-static void fec_get_mac(struct net_device *ndev)
+static int fec_get_mac(struct net_device *ndev)
 {
        struct fec_enet_private *fep = netdev_priv(ndev);
        unsigned char *iap, tmpaddr[ETH_ALEN];
@@ -1685,6 +1685,8 @@ static void fec_get_mac(struct net_device *ndev)
                        ret = of_get_mac_address(np, tmpaddr);
                        if (!ret)
                                iap = tmpaddr;
+                       else if (ret == -EPROBE_DEFER)
+                               return ret;
                }
        }
 
@@ -1723,7 +1725,7 @@ static void fec_get_mac(struct net_device *ndev)
                eth_hw_addr_random(ndev);
                dev_info(&fep->pdev->dev, "Using random MAC address: %pM\n",
                         ndev->dev_addr);
-               return;
+               return 0;
        }
 
        memcpy(ndev->dev_addr, iap, ETH_ALEN);
@@ -1731,6 +1733,8 @@ static void fec_get_mac(struct net_device *ndev)
        /* Adjust MAC if using macaddr */
        if (iap == macaddr)
                 ndev->dev_addr[ETH_ALEN-1] = macaddr[ETH_ALEN-1] + fep->dev_id;
+
+       return 0;
 }
 
 /* ------------------------------------------------------------------------- */
@@ -3290,7 +3294,9 @@ static int fec_enet_init(struct net_device *ndev)
                return ret;
        }
 
-       fec_enet_alloc_queue(ndev);
+       ret = fec_enet_alloc_queue(ndev);
+       if (ret)
+               return ret;
 
        bd_size = (fep->total_tx_ring_size + fep->total_rx_ring_size) * dsize;
 
@@ -3298,11 +3304,15 @@ static int fec_enet_init(struct net_device *ndev)
        cbd_base = dmam_alloc_coherent(&fep->pdev->dev, bd_size, &bd_dma,
                                       GFP_KERNEL);
        if (!cbd_base) {
-               return -ENOMEM;
+               ret = -ENOMEM;
+               goto free_queue_mem;
        }
 
        /* Get the Ethernet address */
-       fec_get_mac(ndev);
+       ret = fec_get_mac(ndev);
+       if (ret)
+               goto free_queue_mem;
+
        /* make sure MAC we just acquired is programmed into the hw */
        fec_set_mac_address(ndev, NULL);
 
@@ -3376,6 +3386,10 @@ static int fec_enet_init(struct net_device *ndev)
                fec_enet_update_ethtool_stats(ndev);
 
        return 0;
+
+free_queue_mem:
+       fec_enet_free_queue(ndev);
+       return ret;
 }
 
 #ifdef CONFIG_OF
index a7b7a4aace7913a9780d17ecdcaf43cab25da8d6..b0c0504950d8118c034b7fc73096423833d4eae0 100644 (file)
@@ -548,8 +548,8 @@ static int fmvj18x_get_hwinfo(struct pcmcia_device *link, u_char *node_id)
 
     base = ioremap(link->resource[2]->start, resource_size(link->resource[2]));
     if (!base) {
-           pcmcia_release_window(link, link->resource[2]);
-           return -ENOMEM;
+       pcmcia_release_window(link, link->resource[2]);
+       return -1;
     }
 
     pcmcia_map_mem_page(link, link->resource[2], 0);
index 7302498c6df3608f3ffe6e0727c511c0797959c9..bbc423e931223c27f1516f470108c3ddf5c19ef0 100644 (file)
@@ -180,7 +180,7 @@ static int gve_napi_poll(struct napi_struct *napi, int budget)
        /* Double check we have no extra work.
         * Ensure unmask synchronizes with checking for work.
         */
-       dma_rmb();
+       mb();
        if (block->tx)
                reschedule |= gve_tx_poll(block, -1);
        if (block->rx)
@@ -220,6 +220,7 @@ static int gve_alloc_notify_blocks(struct gve_priv *priv)
                int vecs_left = new_num_ntfy_blks % 2;
 
                priv->num_ntfy_blks = new_num_ntfy_blks;
+               priv->mgmt_msix_idx = priv->num_ntfy_blks;
                priv->tx_cfg.max_queues = min_t(int, priv->tx_cfg.max_queues,
                                                vecs_per_type);
                priv->rx_cfg.max_queues = min_t(int, priv->rx_cfg.max_queues,
@@ -300,20 +301,22 @@ static void gve_free_notify_blocks(struct gve_priv *priv)
 {
        int i;
 
-       /* Free the irqs */
-       for (i = 0; i < priv->num_ntfy_blks; i++) {
-               struct gve_notify_block *block = &priv->ntfy_blocks[i];
-               int msix_idx = i;
+       if (priv->msix_vectors) {
+               /* Free the irqs */
+               for (i = 0; i < priv->num_ntfy_blks; i++) {
+                       struct gve_notify_block *block = &priv->ntfy_blocks[i];
+                       int msix_idx = i;
 
-               irq_set_affinity_hint(priv->msix_vectors[msix_idx].vector,
-                                     NULL);
-               free_irq(priv->msix_vectors[msix_idx].vector, block);
+                       irq_set_affinity_hint(priv->msix_vectors[msix_idx].vector,
+                                             NULL);
+                       free_irq(priv->msix_vectors[msix_idx].vector, block);
+               }
+               free_irq(priv->msix_vectors[priv->mgmt_msix_idx].vector, priv);
        }
        dma_free_coherent(&priv->pdev->dev,
                          priv->num_ntfy_blks * sizeof(*priv->ntfy_blocks),
                          priv->ntfy_blocks, priv->ntfy_block_bus);
        priv->ntfy_blocks = NULL;
-       free_irq(priv->msix_vectors[priv->mgmt_msix_idx].vector, priv);
        pci_disable_msix(priv->pdev);
        kvfree(priv->msix_vectors);
        priv->msix_vectors = NULL;
index 6938f3a939d64654ef9982865d34b18d4005e6fa..3e04a3973d6803bbe4ed6068bbcac76219d017f3 100644 (file)
@@ -212,10 +212,11 @@ static int gve_tx_alloc_ring(struct gve_priv *priv, int idx)
        tx->dev = &priv->pdev->dev;
        if (!tx->raw_addressing) {
                tx->tx_fifo.qpl = gve_assign_tx_qpl(priv);
-
+               if (!tx->tx_fifo.qpl)
+                       goto abort_with_desc;
                /* map Tx FIFO */
                if (gve_tx_fifo_init(priv, &tx->tx_fifo))
-                       goto abort_with_desc;
+                       goto abort_with_qpl;
        }
 
        tx->q_resources =
@@ -236,6 +237,9 @@ static int gve_tx_alloc_ring(struct gve_priv *priv, int idx)
 abort_with_fifo:
        if (!tx->raw_addressing)
                gve_tx_fifo_release(priv, &tx->tx_fifo);
+abort_with_qpl:
+       if (!tx->raw_addressing)
+               gve_unassign_qpl(priv, tx->tx_fifo.qpl->id);
 abort_with_desc:
        dma_free_coherent(hdev, bytes, tx->desc, tx->bus);
        tx->desc = NULL;
@@ -589,7 +593,7 @@ netdev_tx_t gve_tx(struct sk_buff *skb, struct net_device *dev)
        struct gve_tx_ring *tx;
        int nsegs;
 
-       WARN(skb_get_queue_mapping(skb) > priv->tx_cfg.num_queues,
+       WARN(skb_get_queue_mapping(skb) >= priv->tx_cfg.num_queues,
             "skb queue index out of range");
        tx = &priv->tx[skb_get_queue_mapping(skb)];
        if (unlikely(gve_maybe_stop_tx(tx, skb))) {
index da48c05435ea6cc07537293867a17764353c5bcd..7e62dcff242647cfb975d713d1decc219387f13e 100644 (file)
@@ -192,7 +192,7 @@ static int hns_nic_get_link_ksettings(struct net_device *net_dev,
 }
 
 /**
- *hns_nic_set_link_settings - implement ethtool set link ksettings
+ *hns_nic_set_link_ksettings - implement ethtool set link ksettings
  *@net_dev: net_device
  *@cmd: ethtool_link_ksettings
  *retuen 0 - success , negative --fail
@@ -827,7 +827,7 @@ hns_get_channels(struct net_device *net_dev, struct ethtool_channels *ch)
 }
 
 /**
- * get_ethtool_stats - get detail statistics.
+ * hns_get_ethtool_stats - get detail statistics.
  * @netdev: net device
  * @stats: statistics info.
  * @data: statistics data.
@@ -885,7 +885,7 @@ static void hns_get_ethtool_stats(struct net_device *netdev,
 }
 
 /**
- * get_strings: Return a set of strings that describe the requested objects
+ * hns_get_strings: Return a set of strings that describe the requested objects
  * @netdev: net device
  * @stringset: string set ID.
  * @data: objects data.
index 783fdaf8f8d64ec1ab5c1f28e7f5fead31135986..026558f8e04b950da306c479e27d54b86a8c4317 100644 (file)
@@ -264,22 +264,17 @@ static void hns3_vector_coalesce_init(struct hns3_enet_tqp_vector *tqp_vector,
        struct hnae3_ae_dev *ae_dev = pci_get_drvdata(priv->ae_handle->pdev);
        struct hns3_enet_coalesce *tx_coal = &tqp_vector->tx_group.coal;
        struct hns3_enet_coalesce *rx_coal = &tqp_vector->rx_group.coal;
+       struct hns3_enet_coalesce *ptx_coal = &priv->tx_coal;
+       struct hns3_enet_coalesce *prx_coal = &priv->rx_coal;
 
-       /* initialize the configuration for interrupt coalescing.
-        * 1. GL (Interrupt Gap Limiter)
-        * 2. RL (Interrupt Rate Limiter)
-        * 3. QL (Interrupt Quantity Limiter)
-        *
-        * Default: enable interrupt coalescing self-adaptive and GL
-        */
-       tx_coal->adapt_enable = 1;
-       rx_coal->adapt_enable = 1;
+       tx_coal->adapt_enable = ptx_coal->adapt_enable;
+       rx_coal->adapt_enable = prx_coal->adapt_enable;
 
-       tx_coal->int_gl = HNS3_INT_GL_50K;
-       rx_coal->int_gl = HNS3_INT_GL_50K;
+       tx_coal->int_gl = ptx_coal->int_gl;
+       rx_coal->int_gl = prx_coal->int_gl;
 
-       rx_coal->flow_level = HNS3_FLOW_LOW;
-       tx_coal->flow_level = HNS3_FLOW_LOW;
+       rx_coal->flow_level = prx_coal->flow_level;
+       tx_coal->flow_level = ptx_coal->flow_level;
 
        /* device version above V3(include V3), GL can configure 1us
         * unit, so uses 1us unit.
@@ -294,8 +289,8 @@ static void hns3_vector_coalesce_init(struct hns3_enet_tqp_vector *tqp_vector,
                rx_coal->ql_enable = 1;
                tx_coal->int_ql_max = ae_dev->dev_specs.int_ql_max;
                rx_coal->int_ql_max = ae_dev->dev_specs.int_ql_max;
-               tx_coal->int_ql = HNS3_INT_QL_DEFAULT_CFG;
-               rx_coal->int_ql = HNS3_INT_QL_DEFAULT_CFG;
+               tx_coal->int_ql = ptx_coal->int_ql;
+               rx_coal->int_ql = prx_coal->int_ql;
        }
 }
 
@@ -846,8 +841,6 @@ static bool hns3_tunnel_csum_bug(struct sk_buff *skb)
              l4.udp->dest == htons(4790))))
                return false;
 
-       skb_checksum_help(skb);
-
        return true;
 }
 
@@ -924,8 +917,7 @@ static int hns3_set_l2l3l4(struct sk_buff *skb, u8 ol4_proto,
                        /* the stack computes the IP header already,
                         * driver calculate l4 checksum when not TSO.
                         */
-                       skb_checksum_help(skb);
-                       return 0;
+                       return skb_checksum_help(skb);
                }
 
                hns3_set_outer_l2l3l4(skb, ol4_proto, ol_type_vlan_len_msec);
@@ -970,7 +962,7 @@ static int hns3_set_l2l3l4(struct sk_buff *skb, u8 ol4_proto,
                break;
        case IPPROTO_UDP:
                if (hns3_tunnel_csum_bug(skb))
-                       break;
+                       return skb_checksum_help(skb);
 
                hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4CS_B, 1);
                hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4T_S,
@@ -995,8 +987,7 @@ static int hns3_set_l2l3l4(struct sk_buff *skb, u8 ol4_proto,
                /* the stack computes the IP header already,
                 * driver calculate l4 checksum when not TSO.
                 */
-               skb_checksum_help(skb);
-               return 0;
+               return skb_checksum_help(skb);
        }
 
        return 0;
@@ -3844,6 +3835,34 @@ map_ring_fail:
        return ret;
 }
 
+static void hns3_nic_init_coal_cfg(struct hns3_nic_priv *priv)
+{
+       struct hnae3_ae_dev *ae_dev = pci_get_drvdata(priv->ae_handle->pdev);
+       struct hns3_enet_coalesce *tx_coal = &priv->tx_coal;
+       struct hns3_enet_coalesce *rx_coal = &priv->rx_coal;
+
+       /* initialize the configuration for interrupt coalescing.
+        * 1. GL (Interrupt Gap Limiter)
+        * 2. RL (Interrupt Rate Limiter)
+        * 3. QL (Interrupt Quantity Limiter)
+        *
+        * Default: enable interrupt coalescing self-adaptive and GL
+        */
+       tx_coal->adapt_enable = 1;
+       rx_coal->adapt_enable = 1;
+
+       tx_coal->int_gl = HNS3_INT_GL_50K;
+       rx_coal->int_gl = HNS3_INT_GL_50K;
+
+       rx_coal->flow_level = HNS3_FLOW_LOW;
+       tx_coal->flow_level = HNS3_FLOW_LOW;
+
+       if (ae_dev->dev_specs.int_ql_max) {
+               tx_coal->int_ql = HNS3_INT_QL_DEFAULT_CFG;
+               rx_coal->int_ql = HNS3_INT_QL_DEFAULT_CFG;
+       }
+}
+
 static int hns3_nic_alloc_vector_data(struct hns3_nic_priv *priv)
 {
        struct hnae3_handle *h = priv->ae_handle;
@@ -4295,6 +4314,8 @@ static int hns3_client_init(struct hnae3_handle *handle)
                goto out_get_ring_cfg;
        }
 
+       hns3_nic_init_coal_cfg(priv);
+
        ret = hns3_nic_alloc_vector_data(priv);
        if (ret) {
                ret = -ENOMEM;
@@ -4317,12 +4338,6 @@ static int hns3_client_init(struct hnae3_handle *handle)
        if (ret)
                goto out_init_phy;
 
-       ret = register_netdev(netdev);
-       if (ret) {
-               dev_err(priv->dev, "probe register netdev fail!\n");
-               goto out_reg_netdev_fail;
-       }
-
        /* the device can work without cpu rmap, only aRFS needs it */
        ret = hns3_set_rx_cpu_rmap(netdev);
        if (ret)
@@ -4355,17 +4370,23 @@ static int hns3_client_init(struct hnae3_handle *handle)
        if (ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V3)
                set_bit(HNAE3_PFLAG_LIMIT_PROMISC, &handle->supported_pflags);
 
+       ret = register_netdev(netdev);
+       if (ret) {
+               dev_err(priv->dev, "probe register netdev fail!\n");
+               goto out_reg_netdev_fail;
+       }
+
        if (netif_msg_drv(handle))
                hns3_info_show(priv);
 
        return ret;
 
+out_reg_netdev_fail:
+       hns3_dbg_uninit(handle);
 out_client_start:
        hns3_free_rx_cpu_rmap(netdev);
        hns3_nic_uninit_irq(priv);
 out_init_irq_fail:
-       unregister_netdev(netdev);
-out_reg_netdev_fail:
        hns3_uninit_phy(netdev);
 out_init_phy:
        hns3_uninit_all_ring(priv);
@@ -4571,31 +4592,6 @@ int hns3_nic_reset_all_ring(struct hnae3_handle *h)
        return 0;
 }
 
-static void hns3_store_coal(struct hns3_nic_priv *priv)
-{
-       /* ethtool only support setting and querying one coal
-        * configuration for now, so save the vector 0' coal
-        * configuration here in order to restore it.
-        */
-       memcpy(&priv->tx_coal, &priv->tqp_vector[0].tx_group.coal,
-              sizeof(struct hns3_enet_coalesce));
-       memcpy(&priv->rx_coal, &priv->tqp_vector[0].rx_group.coal,
-              sizeof(struct hns3_enet_coalesce));
-}
-
-static void hns3_restore_coal(struct hns3_nic_priv *priv)
-{
-       u16 vector_num = priv->vector_num;
-       int i;
-
-       for (i = 0; i < vector_num; i++) {
-               memcpy(&priv->tqp_vector[i].tx_group.coal, &priv->tx_coal,
-                      sizeof(struct hns3_enet_coalesce));
-               memcpy(&priv->tqp_vector[i].rx_group.coal, &priv->rx_coal,
-                      sizeof(struct hns3_enet_coalesce));
-       }
-}
-
 static int hns3_reset_notify_down_enet(struct hnae3_handle *handle)
 {
        struct hnae3_knic_private_info *kinfo = &handle->kinfo;
@@ -4654,8 +4650,6 @@ static int hns3_reset_notify_init_enet(struct hnae3_handle *handle)
        if (ret)
                goto err_put_ring;
 
-       hns3_restore_coal(priv);
-
        ret = hns3_nic_init_vector_data(priv);
        if (ret)
                goto err_dealloc_vector;
@@ -4721,8 +4715,6 @@ static int hns3_reset_notify_uninit_enet(struct hnae3_handle *handle)
 
        hns3_nic_uninit_vector_data(priv);
 
-       hns3_store_coal(priv);
-
        hns3_nic_dealloc_vector_data(priv);
 
        hns3_uninit_all_ring(priv);
index b48faf769b1c9b88e30a87b388f913e49af64077..c1ea403d2b56798acdbda19b4d386c328abb5dbc 100644 (file)
@@ -1134,50 +1134,32 @@ static void hns3_get_channels(struct net_device *netdev,
                h->ae_algo->ops->get_channels(h, ch);
 }
 
-static int hns3_get_coalesce_per_queue(struct net_device *netdev, u32 queue,
-                                      struct ethtool_coalesce *cmd)
+static int hns3_get_coalesce(struct net_device *netdev,
+                            struct ethtool_coalesce *cmd)
 {
-       struct hns3_enet_tqp_vector *tx_vector, *rx_vector;
        struct hns3_nic_priv *priv = netdev_priv(netdev);
+       struct hns3_enet_coalesce *tx_coal = &priv->tx_coal;
+       struct hns3_enet_coalesce *rx_coal = &priv->rx_coal;
        struct hnae3_handle *h = priv->ae_handle;
-       u16 queue_num = h->kinfo.num_tqps;
 
        if (hns3_nic_resetting(netdev))
                return -EBUSY;
 
-       if (queue >= queue_num) {
-               netdev_err(netdev,
-                          "Invalid queue value %u! Queue max id=%u\n",
-                          queue, queue_num - 1);
-               return -EINVAL;
-       }
-
-       tx_vector = priv->ring[queue].tqp_vector;
-       rx_vector = priv->ring[queue_num + queue].tqp_vector;
+       cmd->use_adaptive_tx_coalesce = tx_coal->adapt_enable;
+       cmd->use_adaptive_rx_coalesce = rx_coal->adapt_enable;
 
-       cmd->use_adaptive_tx_coalesce =
-                       tx_vector->tx_group.coal.adapt_enable;
-       cmd->use_adaptive_rx_coalesce =
-                       rx_vector->rx_group.coal.adapt_enable;
-
-       cmd->tx_coalesce_usecs = tx_vector->tx_group.coal.int_gl;
-       cmd->rx_coalesce_usecs = rx_vector->rx_group.coal.int_gl;
+       cmd->tx_coalesce_usecs = tx_coal->int_gl;
+       cmd->rx_coalesce_usecs = rx_coal->int_gl;
 
        cmd->tx_coalesce_usecs_high = h->kinfo.int_rl_setting;
        cmd->rx_coalesce_usecs_high = h->kinfo.int_rl_setting;
 
-       cmd->tx_max_coalesced_frames = tx_vector->tx_group.coal.int_ql;
-       cmd->rx_max_coalesced_frames = rx_vector->rx_group.coal.int_ql;
+       cmd->tx_max_coalesced_frames = tx_coal->int_ql;
+       cmd->rx_max_coalesced_frames = rx_coal->int_ql;
 
        return 0;
 }
 
-static int hns3_get_coalesce(struct net_device *netdev,
-                            struct ethtool_coalesce *cmd)
-{
-       return hns3_get_coalesce_per_queue(netdev, 0, cmd);
-}
-
 static int hns3_check_gl_coalesce_para(struct net_device *netdev,
                                       struct ethtool_coalesce *cmd)
 {
@@ -1292,19 +1274,7 @@ static int hns3_check_coalesce_para(struct net_device *netdev,
                return ret;
        }
 
-       ret = hns3_check_ql_coalesce_param(netdev, cmd);
-       if (ret)
-               return ret;
-
-       if (cmd->use_adaptive_tx_coalesce == 1 ||
-           cmd->use_adaptive_rx_coalesce == 1) {
-               netdev_info(netdev,
-                           "adaptive-tx=%u and adaptive-rx=%u, tx_usecs or rx_usecs will changed dynamically.\n",
-                           cmd->use_adaptive_tx_coalesce,
-                           cmd->use_adaptive_rx_coalesce);
-       }
-
-       return 0;
+       return hns3_check_ql_coalesce_param(netdev, cmd);
 }
 
 static void hns3_set_coalesce_per_queue(struct net_device *netdev,
@@ -1350,6 +1320,9 @@ static int hns3_set_coalesce(struct net_device *netdev,
                             struct ethtool_coalesce *cmd)
 {
        struct hnae3_handle *h = hns3_get_handle(netdev);
+       struct hns3_nic_priv *priv = netdev_priv(netdev);
+       struct hns3_enet_coalesce *tx_coal = &priv->tx_coal;
+       struct hns3_enet_coalesce *rx_coal = &priv->rx_coal;
        u16 queue_num = h->kinfo.num_tqps;
        int ret;
        int i;
@@ -1364,6 +1337,15 @@ static int hns3_set_coalesce(struct net_device *netdev,
        h->kinfo.int_rl_setting =
                hns3_rl_round_down(cmd->rx_coalesce_usecs_high);
 
+       tx_coal->adapt_enable = cmd->use_adaptive_tx_coalesce;
+       rx_coal->adapt_enable = cmd->use_adaptive_rx_coalesce;
+
+       tx_coal->int_gl = cmd->tx_coalesce_usecs;
+       rx_coal->int_gl = cmd->rx_coalesce_usecs;
+
+       tx_coal->int_ql = cmd->tx_max_coalesced_frames;
+       rx_coal->int_ql = cmd->rx_max_coalesced_frames;
+
        for (i = 0; i < queue_num; i++)
                hns3_set_coalesce_per_queue(netdev, cmd, i);
 
index 8e5f9dc8791d289187430d251a8e253b85ad3f0d..f1c9f4ada348a77e8cda8fb974c9fd6795e73854 100644 (file)
@@ -710,7 +710,6 @@ void hclge_mbx_handler(struct hclge_dev *hdev)
        unsigned int flag;
        int ret = 0;
 
-       memset(&resp_msg, 0, sizeof(resp_msg));
        /* handle all the mailbox requests in the queue */
        while (!hclge_cmd_crq_empty(&hdev->hw)) {
                if (test_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state)) {
@@ -738,6 +737,9 @@ void hclge_mbx_handler(struct hclge_dev *hdev)
 
                trace_hclge_pf_mbx_get(hdev, req);
 
+               /* clear the resp_msg before processing every mailbox message */
+               memset(&resp_msg, 0, sizeof(resp_msg));
+
                switch (req->msg.code) {
                case HCLGE_MBX_MAP_RING_TO_VECTOR:
                        ret = hclge_map_unmap_ring_to_vf_vector(vport, true,
index de70c16ef619aaafcd2fcbb30fc460ee8f5f71a1..b883ab809df30a419bba516df4b14aea6a591cec 100644 (file)
@@ -2313,15 +2313,20 @@ static int i40e_run_xdp(struct i40e_ring *rx_ring, struct xdp_buff *xdp)
        case XDP_TX:
                xdp_ring = rx_ring->vsi->xdp_rings[rx_ring->queue_index];
                result = i40e_xmit_xdp_tx_ring(xdp, xdp_ring);
+               if (result == I40E_XDP_CONSUMED)
+                       goto out_failure;
                break;
        case XDP_REDIRECT:
                err = xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog);
-               result = !err ? I40E_XDP_REDIR : I40E_XDP_CONSUMED;
+               if (err)
+                       goto out_failure;
+               result = I40E_XDP_REDIR;
                break;
        default:
                bpf_warn_invalid_xdp_action(act);
                fallthrough;
        case XDP_ABORTED:
+out_failure:
                trace_xdp_exception(rx_ring->netdev, xdp_prog, act);
                fallthrough; /* handle aborts by dropping packet */
        case XDP_DROP:
index 46d884417c6327fc1cdb00a81b083b32bfe85b87..68f177a86403f75c9a7845ce19bea3a99b4cd131 100644 (file)
@@ -162,9 +162,10 @@ static int i40e_run_xdp_zc(struct i40e_ring *rx_ring, struct xdp_buff *xdp)
 
        if (likely(act == XDP_REDIRECT)) {
                err = xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog);
-               result = !err ? I40E_XDP_REDIR : I40E_XDP_CONSUMED;
+               if (err)
+                       goto out_failure;
                rcu_read_unlock();
-               return result;
+               return I40E_XDP_REDIR;
        }
 
        switch (act) {
@@ -173,11 +174,14 @@ static int i40e_run_xdp_zc(struct i40e_ring *rx_ring, struct xdp_buff *xdp)
        case XDP_TX:
                xdp_ring = rx_ring->vsi->xdp_rings[rx_ring->queue_index];
                result = i40e_xmit_xdp_tx_ring(xdp, xdp_ring);
+               if (result == I40E_XDP_CONSUMED)
+                       goto out_failure;
                break;
        default:
                bpf_warn_invalid_xdp_action(act);
                fallthrough;
        case XDP_ABORTED:
+out_failure:
                trace_xdp_exception(rx_ring->netdev, xdp_prog, act);
                fallthrough; /* handle aborts by dropping packet */
        case XDP_DROP:
index e35db3ff583bcc4a7c34ae0f00e52c19ef93989c..2924c67567b8ac0e32af31caf7c6d55eda12952a 100644 (file)
@@ -335,6 +335,7 @@ struct ice_vsi {
        struct ice_tc_cfg tc_cfg;
        struct bpf_prog *xdp_prog;
        struct ice_ring **xdp_rings;     /* XDP ring array */
+       unsigned long *af_xdp_zc_qps;    /* tracks AF_XDP ZC enabled qps */
        u16 num_xdp_txq;                 /* Used XDP queues */
        u8 xdp_mapping_mode;             /* ICE_MAP_MODE_[CONTIG|SCATTER] */
 
@@ -547,15 +548,16 @@ static inline void ice_set_ring_xdp(struct ice_ring *ring)
  */
 static inline struct xsk_buff_pool *ice_xsk_pool(struct ice_ring *ring)
 {
+       struct ice_vsi *vsi = ring->vsi;
        u16 qid = ring->q_index;
 
        if (ice_ring_is_xdp(ring))
-               qid -= ring->vsi->num_xdp_txq;
+               qid -= vsi->num_xdp_txq;
 
-       if (!ice_is_xdp_ena_vsi(ring->vsi))
+       if (!ice_is_xdp_ena_vsi(vsi) || !test_bit(qid, vsi->af_xdp_zc_qps))
                return NULL;
 
-       return xsk_get_pool_from_qid(ring->vsi->netdev, qid);
+       return xsk_get_pool_from_qid(vsi->netdev, qid);
 }
 
 /**
index d9ddd0bcf65f80c38e47056457e4d87fdb1320d8..99301ad95290d38886a5d212b49d41858067c31a 100644 (file)
@@ -1773,49 +1773,6 @@ ice_phy_type_to_ethtool(struct net_device *netdev,
                ice_ethtool_advertise_link_mode(ICE_AQ_LINK_SPEED_100GB,
                                                100000baseKR4_Full);
        }
-
-       /* Autoneg PHY types */
-       if (phy_types_low & ICE_PHY_TYPE_LOW_100BASE_TX ||
-           phy_types_low & ICE_PHY_TYPE_LOW_1000BASE_T ||
-           phy_types_low & ICE_PHY_TYPE_LOW_1000BASE_KX ||
-           phy_types_low & ICE_PHY_TYPE_LOW_2500BASE_T ||
-           phy_types_low & ICE_PHY_TYPE_LOW_2500BASE_KX ||
-           phy_types_low & ICE_PHY_TYPE_LOW_5GBASE_T ||
-           phy_types_low & ICE_PHY_TYPE_LOW_5GBASE_KR ||
-           phy_types_low & ICE_PHY_TYPE_LOW_10GBASE_T ||
-           phy_types_low & ICE_PHY_TYPE_LOW_10GBASE_KR_CR1 ||
-           phy_types_low & ICE_PHY_TYPE_LOW_25GBASE_T ||
-           phy_types_low & ICE_PHY_TYPE_LOW_25GBASE_CR ||
-           phy_types_low & ICE_PHY_TYPE_LOW_25GBASE_CR_S ||
-           phy_types_low & ICE_PHY_TYPE_LOW_25GBASE_CR1 ||
-           phy_types_low & ICE_PHY_TYPE_LOW_25GBASE_KR ||
-           phy_types_low & ICE_PHY_TYPE_LOW_25GBASE_KR_S ||
-           phy_types_low & ICE_PHY_TYPE_LOW_25GBASE_KR1 ||
-           phy_types_low & ICE_PHY_TYPE_LOW_40GBASE_CR4 ||
-           phy_types_low & ICE_PHY_TYPE_LOW_40GBASE_KR4) {
-               ethtool_link_ksettings_add_link_mode(ks, supported,
-                                                    Autoneg);
-               ethtool_link_ksettings_add_link_mode(ks, advertising,
-                                                    Autoneg);
-       }
-       if (phy_types_low & ICE_PHY_TYPE_LOW_50GBASE_CR2 ||
-           phy_types_low & ICE_PHY_TYPE_LOW_50GBASE_KR2 ||
-           phy_types_low & ICE_PHY_TYPE_LOW_50GBASE_CP ||
-           phy_types_low & ICE_PHY_TYPE_LOW_50GBASE_KR_PAM4) {
-               ethtool_link_ksettings_add_link_mode(ks, supported,
-                                                    Autoneg);
-               ethtool_link_ksettings_add_link_mode(ks, advertising,
-                                                    Autoneg);
-       }
-       if (phy_types_low & ICE_PHY_TYPE_LOW_100GBASE_CR4 ||
-           phy_types_low & ICE_PHY_TYPE_LOW_100GBASE_KR4 ||
-           phy_types_low & ICE_PHY_TYPE_LOW_100GBASE_KR_PAM4 ||
-           phy_types_low & ICE_PHY_TYPE_LOW_100GBASE_CP2) {
-               ethtool_link_ksettings_add_link_mode(ks, supported,
-                                                    Autoneg);
-               ethtool_link_ksettings_add_link_mode(ks, advertising,
-                                                    Autoneg);
-       }
 }
 
 #define TEST_SET_BITS_TIMEOUT  50
@@ -1972,9 +1929,7 @@ ice_get_link_ksettings(struct net_device *netdev,
                ks->base.port = PORT_TP;
                break;
        case ICE_MEDIA_BACKPLANE:
-               ethtool_link_ksettings_add_link_mode(ks, supported, Autoneg);
                ethtool_link_ksettings_add_link_mode(ks, supported, Backplane);
-               ethtool_link_ksettings_add_link_mode(ks, advertising, Autoneg);
                ethtool_link_ksettings_add_link_mode(ks, advertising,
                                                     Backplane);
                ks->base.port = PORT_NONE;
@@ -2049,6 +2004,12 @@ ice_get_link_ksettings(struct net_device *netdev,
        if (caps->link_fec_options & ICE_AQC_PHY_FEC_25G_RS_CLAUSE91_EN)
                ethtool_link_ksettings_add_link_mode(ks, supported, FEC_RS);
 
+       /* Set supported and advertised autoneg */
+       if (ice_is_phy_caps_an_enabled(caps)) {
+               ethtool_link_ksettings_add_link_mode(ks, supported, Autoneg);
+               ethtool_link_ksettings_add_link_mode(ks, advertising, Autoneg);
+       }
+
 done:
        kfree(caps);
        return err;
index de38a0fc9665d538b5cdcbeb2ee24a4b9f63c6db..9b8300d4a2674183ba257357ee238b32d1273895 100644 (file)
@@ -31,6 +31,7 @@
 #define PF_FW_ATQLEN_ATQOVFL_M                 BIT(29)
 #define PF_FW_ATQLEN_ATQCRIT_M                 BIT(30)
 #define VF_MBX_ARQLEN(_VF)                     (0x0022BC00 + ((_VF) * 4))
+#define VF_MBX_ATQLEN(_VF)                     (0x0022A800 + ((_VF) * 4))
 #define PF_FW_ATQLEN_ATQENABLE_M               BIT(31)
 #define PF_FW_ATQT                             0x00080400
 #define PF_MBX_ARQBAH                          0x0022E400
index 82e2ce23df3dc51e5f93d4aff4c7039bbcab8498..d70ee573fde5bc4c2d6b8992b4168b3cb693a918 100644 (file)
@@ -105,8 +105,14 @@ static int ice_vsi_alloc_arrays(struct ice_vsi *vsi)
        if (!vsi->q_vectors)
                goto err_vectors;
 
+       vsi->af_xdp_zc_qps = bitmap_zalloc(max_t(int, vsi->alloc_txq, vsi->alloc_rxq), GFP_KERNEL);
+       if (!vsi->af_xdp_zc_qps)
+               goto err_zc_qps;
+
        return 0;
 
+err_zc_qps:
+       devm_kfree(dev, vsi->q_vectors);
 err_vectors:
        devm_kfree(dev, vsi->rxq_map);
 err_rxq_map:
@@ -194,6 +200,8 @@ static void ice_vsi_set_num_qs(struct ice_vsi *vsi, u16 vf_id)
                break;
        case ICE_VSI_VF:
                vf = &pf->vf[vsi->vf_id];
+               if (vf->num_req_qs)
+                       vf->num_vf_qs = vf->num_req_qs;
                vsi->alloc_txq = vf->num_vf_qs;
                vsi->alloc_rxq = vf->num_vf_qs;
                /* pf->num_msix_per_vf includes (VF miscellaneous vector +
@@ -288,6 +296,10 @@ static void ice_vsi_free_arrays(struct ice_vsi *vsi)
 
        dev = ice_pf_to_dev(pf);
 
+       if (vsi->af_xdp_zc_qps) {
+               bitmap_free(vsi->af_xdp_zc_qps);
+               vsi->af_xdp_zc_qps = NULL;
+       }
        /* free the ring and vector containers */
        if (vsi->q_vectors) {
                devm_kfree(dev, vsi->q_vectors);
index e2b4b29ea20754e9a98ca1d608812ae528dc0e9d..04748aa4c7c8cb1456c69e3114aba1d0107bb264 100644 (file)
@@ -523,7 +523,7 @@ ice_run_xdp(struct ice_ring *rx_ring, struct xdp_buff *xdp,
            struct bpf_prog *xdp_prog)
 {
        struct ice_ring *xdp_ring;
-       int err;
+       int err, result;
        u32 act;
 
        act = bpf_prog_run_xdp(xdp_prog, xdp);
@@ -532,14 +532,20 @@ ice_run_xdp(struct ice_ring *rx_ring, struct xdp_buff *xdp,
                return ICE_XDP_PASS;
        case XDP_TX:
                xdp_ring = rx_ring->vsi->xdp_rings[smp_processor_id()];
-               return ice_xmit_xdp_buff(xdp, xdp_ring);
+               result = ice_xmit_xdp_buff(xdp, xdp_ring);
+               if (result == ICE_XDP_CONSUMED)
+                       goto out_failure;
+               return result;
        case XDP_REDIRECT:
                err = xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog);
-               return !err ? ICE_XDP_REDIR : ICE_XDP_CONSUMED;
+               if (err)
+                       goto out_failure;
+               return ICE_XDP_REDIR;
        default:
                bpf_warn_invalid_xdp_action(act);
                fallthrough;
        case XDP_ABORTED:
+out_failure:
                trace_xdp_exception(rx_ring->netdev, xdp_prog, act);
                fallthrough;
        case XDP_DROP:
@@ -2143,6 +2149,7 @@ ice_xmit_frame_ring(struct sk_buff *skb, struct ice_ring *tx_ring)
        struct ice_tx_offload_params offload = { 0 };
        struct ice_vsi *vsi = tx_ring->vsi;
        struct ice_tx_buf *first;
+       struct ethhdr *eth;
        unsigned int count;
        int tso, csum;
 
@@ -2189,7 +2196,9 @@ ice_xmit_frame_ring(struct sk_buff *skb, struct ice_ring *tx_ring)
                goto out_drop;
 
        /* allow CONTROL frames egress from main VSI if FW LLDP disabled */
-       if (unlikely(skb->priority == TC_PRIO_CONTROL &&
+       eth = (struct ethhdr *)skb_mac_header(skb);
+       if (unlikely((skb->priority == TC_PRIO_CONTROL ||
+                     eth->h_proto == htons(ETH_P_LLDP)) &&
                     vsi->type == ICE_VSI_PF &&
                     vsi->port_info->qos_cfg.is_sw_lldp))
                offload.cd_qw1 |= (u64)(ICE_TX_DESC_DTYPE_CTX |
index a1d22d2aa0bdd5101480c415889c3e91fb40eb3b..97a46c616aca7c629118abb3d98430b35bc83c82 100644 (file)
@@ -713,13 +713,15 @@ static void ice_trigger_vf_reset(struct ice_vf *vf, bool is_vflr, bool is_pfr)
         */
        clear_bit(ICE_VF_STATE_INIT, vf->vf_states);
 
-       /* VF_MBX_ARQLEN is cleared by PFR, so the driver needs to clear it
-        * in the case of VFR. If this is done for PFR, it can mess up VF
-        * resets because the VF driver may already have started cleanup
-        * by the time we get here.
+       /* VF_MBX_ARQLEN and VF_MBX_ATQLEN are cleared by PFR, so the driver
+        * needs to clear them in the case of VFR/VFLR. If this is done for
+        * PFR, it can mess up VF resets because the VF driver may already
+        * have started cleanup by the time we get here.
         */
-       if (!is_pfr)
+       if (!is_pfr) {
                wr32(hw, VF_MBX_ARQLEN(vf->vf_id), 0);
+               wr32(hw, VF_MBX_ATQLEN(vf->vf_id), 0);
+       }
 
        /* In the case of a VFLR, the HW has already reset the VF and we
         * just need to clean up, so don't hit the VFRTRIG register.
@@ -1698,7 +1700,12 @@ bool ice_reset_vf(struct ice_vf *vf, bool is_vflr)
                ice_vf_ctrl_vsi_release(vf);
 
        ice_vf_pre_vsi_rebuild(vf);
-       ice_vf_rebuild_vsi_with_release(vf);
+
+       if (ice_vf_rebuild_vsi_with_release(vf)) {
+               dev_err(dev, "Failed to release and setup the VF%u's VSI\n", vf->vf_id);
+               return false;
+       }
+
        ice_vf_post_vsi_rebuild(vf);
 
        /* if the VF has been reset allow it to come up again */
index faa7b8d96adb509691b1484a994483a90d1594ce..a1f89ea3c2bdb69838eefa7cf647f3292d880fdd 100644 (file)
@@ -270,6 +270,7 @@ static int ice_xsk_pool_disable(struct ice_vsi *vsi, u16 qid)
        if (!pool)
                return -EINVAL;
 
+       clear_bit(qid, vsi->af_xdp_zc_qps);
        xsk_pool_dma_unmap(pool, ICE_RX_DMA_ATTR);
 
        return 0;
@@ -300,6 +301,8 @@ ice_xsk_pool_enable(struct ice_vsi *vsi, struct xsk_buff_pool *pool, u16 qid)
        if (err)
                return err;
 
+       set_bit(qid, vsi->af_xdp_zc_qps);
+
        return 0;
 }
 
@@ -473,9 +476,10 @@ ice_run_xdp_zc(struct ice_ring *rx_ring, struct xdp_buff *xdp)
 
        if (likely(act == XDP_REDIRECT)) {
                err = xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog);
-               result = !err ? ICE_XDP_REDIR : ICE_XDP_CONSUMED;
+               if (err)
+                       goto out_failure;
                rcu_read_unlock();
-               return result;
+               return ICE_XDP_REDIR;
        }
 
        switch (act) {
@@ -484,11 +488,14 @@ ice_run_xdp_zc(struct ice_ring *rx_ring, struct xdp_buff *xdp)
        case XDP_TX:
                xdp_ring = rx_ring->vsi->xdp_rings[rx_ring->q_index];
                result = ice_xmit_xdp_buff(xdp, xdp_ring);
+               if (result == ICE_XDP_CONSUMED)
+                       goto out_failure;
                break;
        default:
                bpf_warn_invalid_xdp_action(act);
                fallthrough;
        case XDP_ABORTED:
+out_failure:
                trace_xdp_exception(rx_ring->netdev, xdp_prog, act);
                fallthrough;
        case XDP_DROP:
index 7bda8c5edea5dcee1bb0016a118c52c92c1428e7..2d3daf022651ce839b4029d805b821051b16342f 100644 (file)
@@ -749,7 +749,7 @@ void igb_ptp_rx_hang(struct igb_adapter *adapter);
 void igb_ptp_tx_hang(struct igb_adapter *adapter);
 void igb_ptp_rx_rgtstamp(struct igb_q_vector *q_vector, struct sk_buff *skb);
 int igb_ptp_rx_pktstamp(struct igb_q_vector *q_vector, void *va,
-                       struct sk_buff *skb);
+                       ktime_t *timestamp);
 int igb_ptp_set_ts_config(struct net_device *netdev, struct ifreq *ifr);
 int igb_ptp_get_ts_config(struct net_device *netdev, struct ifreq *ifr);
 void igb_set_flag_queue_pairs(struct igb_adapter *, const u32);
index 038a9fd1af44de7dde4e0b0527a25a44588baaa9..b2a042f825ff51870cd9ec08a21c79e800785014 100644 (file)
@@ -8280,7 +8280,7 @@ static void igb_add_rx_frag(struct igb_ring *rx_ring,
 static struct sk_buff *igb_construct_skb(struct igb_ring *rx_ring,
                                         struct igb_rx_buffer *rx_buffer,
                                         struct xdp_buff *xdp,
-                                        union e1000_adv_rx_desc *rx_desc)
+                                        ktime_t timestamp)
 {
 #if (PAGE_SIZE < 8192)
        unsigned int truesize = igb_rx_pg_size(rx_ring) / 2;
@@ -8300,12 +8300,8 @@ static struct sk_buff *igb_construct_skb(struct igb_ring *rx_ring,
        if (unlikely(!skb))
                return NULL;
 
-       if (unlikely(igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP))) {
-               if (!igb_ptp_rx_pktstamp(rx_ring->q_vector, xdp->data, skb)) {
-                       xdp->data += IGB_TS_HDR_LEN;
-                       size -= IGB_TS_HDR_LEN;
-               }
-       }
+       if (timestamp)
+               skb_hwtstamps(skb)->hwtstamp = timestamp;
 
        /* Determine available headroom for copy */
        headlen = size;
@@ -8336,7 +8332,7 @@ static struct sk_buff *igb_construct_skb(struct igb_ring *rx_ring,
 static struct sk_buff *igb_build_skb(struct igb_ring *rx_ring,
                                     struct igb_rx_buffer *rx_buffer,
                                     struct xdp_buff *xdp,
-                                    union e1000_adv_rx_desc *rx_desc)
+                                    ktime_t timestamp)
 {
 #if (PAGE_SIZE < 8192)
        unsigned int truesize = igb_rx_pg_size(rx_ring) / 2;
@@ -8363,11 +8359,8 @@ static struct sk_buff *igb_build_skb(struct igb_ring *rx_ring,
        if (metasize)
                skb_metadata_set(skb, metasize);
 
-       /* pull timestamp out of packet data */
-       if (igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP)) {
-               if (!igb_ptp_rx_pktstamp(rx_ring->q_vector, skb->data, skb))
-                       __skb_pull(skb, IGB_TS_HDR_LEN);
-       }
+       if (timestamp)
+               skb_hwtstamps(skb)->hwtstamp = timestamp;
 
        /* update buffer offset */
 #if (PAGE_SIZE < 8192)
@@ -8401,18 +8394,20 @@ static struct sk_buff *igb_run_xdp(struct igb_adapter *adapter,
                break;
        case XDP_TX:
                result = igb_xdp_xmit_back(adapter, xdp);
+               if (result == IGB_XDP_CONSUMED)
+                       goto out_failure;
                break;
        case XDP_REDIRECT:
                err = xdp_do_redirect(adapter->netdev, xdp, xdp_prog);
-               if (!err)
-                       result = IGB_XDP_REDIR;
-               else
-                       result = IGB_XDP_CONSUMED;
+               if (err)
+                       goto out_failure;
+               result = IGB_XDP_REDIR;
                break;
        default:
                bpf_warn_invalid_xdp_action(act);
                fallthrough;
        case XDP_ABORTED:
+out_failure:
                trace_xdp_exception(rx_ring->netdev, xdp_prog, act);
                fallthrough;
        case XDP_DROP:
@@ -8682,7 +8677,10 @@ static int igb_clean_rx_irq(struct igb_q_vector *q_vector, const int budget)
        while (likely(total_packets < budget)) {
                union e1000_adv_rx_desc *rx_desc;
                struct igb_rx_buffer *rx_buffer;
+               ktime_t timestamp = 0;
+               int pkt_offset = 0;
                unsigned int size;
+               void *pktbuf;
 
                /* return some buffers to hardware, one at a time is too slow */
                if (cleaned_count >= IGB_RX_BUFFER_WRITE) {
@@ -8702,14 +8700,24 @@ static int igb_clean_rx_irq(struct igb_q_vector *q_vector, const int budget)
                dma_rmb();
 
                rx_buffer = igb_get_rx_buffer(rx_ring, size, &rx_buf_pgcnt);
+               pktbuf = page_address(rx_buffer->page) + rx_buffer->page_offset;
+
+               /* pull rx packet timestamp if available and valid */
+               if (igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP)) {
+                       int ts_hdr_len;
+
+                       ts_hdr_len = igb_ptp_rx_pktstamp(rx_ring->q_vector,
+                                                        pktbuf, &timestamp);
+
+                       pkt_offset += ts_hdr_len;
+                       size -= ts_hdr_len;
+               }
 
                /* retrieve a buffer from the ring */
                if (!skb) {
-                       unsigned int offset = igb_rx_offset(rx_ring);
-                       unsigned char *hard_start;
+                       unsigned char *hard_start = pktbuf - igb_rx_offset(rx_ring);
+                       unsigned int offset = pkt_offset + igb_rx_offset(rx_ring);
 
-                       hard_start = page_address(rx_buffer->page) +
-                                    rx_buffer->page_offset - offset;
                        xdp_prepare_buff(&xdp, hard_start, offset, size, true);
 #if (PAGE_SIZE > 4096)
                        /* At larger PAGE_SIZE, frame_sz depend on len size */
@@ -8732,10 +8740,11 @@ static int igb_clean_rx_irq(struct igb_q_vector *q_vector, const int budget)
                } else if (skb)
                        igb_add_rx_frag(rx_ring, rx_buffer, skb, size);
                else if (ring_uses_build_skb(rx_ring))
-                       skb = igb_build_skb(rx_ring, rx_buffer, &xdp, rx_desc);
+                       skb = igb_build_skb(rx_ring, rx_buffer, &xdp,
+                                           timestamp);
                else
                        skb = igb_construct_skb(rx_ring, rx_buffer,
-                                               &xdp, rx_desc);
+                                               &xdp, timestamp);
 
                /* exit if we failed to retrieve a buffer */
                if (!skb) {
index ba61fe9bfaf4e36ad657e4fc18f1f825f24e8f2d..d68cd4466a546dab1f851fcc9faefd5aa73d9f31 100644 (file)
@@ -856,30 +856,28 @@ static void igb_ptp_tx_hwtstamp(struct igb_adapter *adapter)
        dev_kfree_skb_any(skb);
 }
 
-#define IGB_RET_PTP_DISABLED 1
-#define IGB_RET_PTP_INVALID 2
-
 /**
  * igb_ptp_rx_pktstamp - retrieve Rx per packet timestamp
  * @q_vector: Pointer to interrupt specific structure
  * @va: Pointer to address containing Rx buffer
- * @skb: Buffer containing timestamp and packet
+ * @timestamp: Pointer where timestamp will be stored
  *
  * This function is meant to retrieve a timestamp from the first buffer of an
  * incoming frame.  The value is stored in little endian format starting on
  * byte 8
  *
- * Returns: 0 if success, nonzero if failure
+ * Returns: The timestamp header length or 0 if not available
  **/
 int igb_ptp_rx_pktstamp(struct igb_q_vector *q_vector, void *va,
-                       struct sk_buff *skb)
+                       ktime_t *timestamp)
 {
        struct igb_adapter *adapter = q_vector->adapter;
+       struct skb_shared_hwtstamps ts;
        __le64 *regval = (__le64 *)va;
        int adjust = 0;
 
        if (!(adapter->ptp_flags & IGB_PTP_ENABLED))
-               return IGB_RET_PTP_DISABLED;
+               return 0;
 
        /* The timestamp is recorded in little endian format.
         * DWORD: 0        1        2        3
@@ -888,10 +886,9 @@ int igb_ptp_rx_pktstamp(struct igb_q_vector *q_vector, void *va,
 
        /* check reserved dwords are zero, be/le doesn't matter for zero */
        if (regval[0])
-               return IGB_RET_PTP_INVALID;
+               return 0;
 
-       igb_ptp_systim_to_hwtstamp(adapter, skb_hwtstamps(skb),
-                                  le64_to_cpu(regval[1]));
+       igb_ptp_systim_to_hwtstamp(adapter, &ts, le64_to_cpu(regval[1]));
 
        /* adjust timestamp for the RX latency based on link speed */
        if (adapter->hw.mac.type == e1000_i210) {
@@ -907,10 +904,10 @@ int igb_ptp_rx_pktstamp(struct igb_q_vector *q_vector, void *va,
                        break;
                }
        }
-       skb_hwtstamps(skb)->hwtstamp =
-               ktime_sub_ns(skb_hwtstamps(skb)->hwtstamp, adjust);
 
-       return 0;
+       *timestamp = ktime_sub_ns(ts.hwtstamp, adjust);
+
+       return IGB_TS_HDR_LEN;
 }
 
 /**
index 069471b7ffb0ab5914c9014f42aa1d61a91f4fa8..f1adf154ec4ae1bd15b6ae436591864e22f1202b 100644 (file)
@@ -2047,20 +2047,19 @@ static struct sk_buff *igc_xdp_run_prog(struct igc_adapter *adapter,
                break;
        case XDP_TX:
                if (igc_xdp_xmit_back(adapter, xdp) < 0)
-                       res = IGC_XDP_CONSUMED;
-               else
-                       res = IGC_XDP_TX;
+                       goto out_failure;
+               res = IGC_XDP_TX;
                break;
        case XDP_REDIRECT:
                if (xdp_do_redirect(adapter->netdev, xdp, prog) < 0)
-                       res = IGC_XDP_CONSUMED;
-               else
-                       res = IGC_XDP_REDIRECT;
+                       goto out_failure;
+               res = IGC_XDP_REDIRECT;
                break;
        default:
                bpf_warn_invalid_xdp_action(act);
                fallthrough;
        case XDP_ABORTED:
+out_failure:
                trace_xdp_exception(adapter->netdev, prog, act);
                fallthrough;
        case XDP_DROP:
index c5ec17d19c59d039265eb7c80c2f254b8b6a5d91..2ac5b82676f3b8e5db203839237dfe4e305fbaab 100644 (file)
@@ -2213,23 +2213,23 @@ static struct sk_buff *ixgbe_run_xdp(struct ixgbe_adapter *adapter,
                break;
        case XDP_TX:
                xdpf = xdp_convert_buff_to_frame(xdp);
-               if (unlikely(!xdpf)) {
-                       result = IXGBE_XDP_CONSUMED;
-                       break;
-               }
+               if (unlikely(!xdpf))
+                       goto out_failure;
                result = ixgbe_xmit_xdp_ring(adapter, xdpf);
+               if (result == IXGBE_XDP_CONSUMED)
+                       goto out_failure;
                break;
        case XDP_REDIRECT:
                err = xdp_do_redirect(adapter->netdev, xdp, xdp_prog);
-               if (!err)
-                       result = IXGBE_XDP_REDIR;
-               else
-                       result = IXGBE_XDP_CONSUMED;
+               if (err)
+                       goto out_failure;
+               result = IXGBE_XDP_REDIR;
                break;
        default:
                bpf_warn_invalid_xdp_action(act);
                fallthrough;
        case XDP_ABORTED:
+out_failure:
                trace_xdp_exception(rx_ring->netdev, xdp_prog, act);
                fallthrough; /* handle aborts by dropping packet */
        case XDP_DROP:
index 988db46bff0eec77c37ef58112b4d8950f7a67dd..214a38de3f415012600bccb0af01a20e6f30c9f1 100644 (file)
@@ -467,12 +467,16 @@ static int ixgbe_set_vf_vlan(struct ixgbe_adapter *adapter, int add, int vid,
        return err;
 }
 
-static s32 ixgbe_set_vf_lpe(struct ixgbe_adapter *adapter, u32 *msgbuf, u32 vf)
+static int ixgbe_set_vf_lpe(struct ixgbe_adapter *adapter, u32 max_frame, u32 vf)
 {
        struct ixgbe_hw *hw = &adapter->hw;
-       int max_frame = msgbuf[1];
        u32 max_frs;
 
+       if (max_frame < ETH_MIN_MTU || max_frame > IXGBE_MAX_JUMBO_FRAME_SIZE) {
+               e_err(drv, "VF max_frame %d out of range\n", max_frame);
+               return -EINVAL;
+       }
+
        /*
         * For 82599EB we have to keep all PFs and VFs operating with
         * the same max_frame value in order to avoid sending an oversize
@@ -533,12 +537,6 @@ static s32 ixgbe_set_vf_lpe(struct ixgbe_adapter *adapter, u32 *msgbuf, u32 vf)
                }
        }
 
-       /* MTU < 68 is an error and causes problems on some kernels */
-       if (max_frame > IXGBE_MAX_JUMBO_FRAME_SIZE) {
-               e_err(drv, "VF max_frame %d out of range\n", max_frame);
-               return -EINVAL;
-       }
-
        /* pull current max frame size from hardware */
        max_frs = IXGBE_READ_REG(hw, IXGBE_MAXFRS);
        max_frs &= IXGBE_MHADD_MFS_MASK;
@@ -1249,7 +1247,7 @@ static int ixgbe_rcv_msg_from_vf(struct ixgbe_adapter *adapter, u32 vf)
                retval = ixgbe_set_vf_vlan_msg(adapter, msgbuf, vf);
                break;
        case IXGBE_VF_SET_LPE:
-               retval = ixgbe_set_vf_lpe(adapter, msgbuf, vf);
+               retval = ixgbe_set_vf_lpe(adapter, msgbuf[1], vf);
                break;
        case IXGBE_VF_SET_MACVLAN:
                retval = ixgbe_set_vf_macvlan_msg(adapter, msgbuf, vf);
index 91ad5b902673cfa1df55d202f22625988bd8bc69..f72d2978263b97212eb0e056339b4ceefd6c750c 100644 (file)
@@ -106,9 +106,10 @@ static int ixgbe_run_xdp_zc(struct ixgbe_adapter *adapter,
 
        if (likely(act == XDP_REDIRECT)) {
                err = xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog);
-               result = !err ? IXGBE_XDP_REDIR : IXGBE_XDP_CONSUMED;
+               if (err)
+                       goto out_failure;
                rcu_read_unlock();
-               return result;
+               return IXGBE_XDP_REDIR;
        }
 
        switch (act) {
@@ -116,16 +117,17 @@ static int ixgbe_run_xdp_zc(struct ixgbe_adapter *adapter,
                break;
        case XDP_TX:
                xdpf = xdp_convert_buff_to_frame(xdp);
-               if (unlikely(!xdpf)) {
-                       result = IXGBE_XDP_CONSUMED;
-                       break;
-               }
+               if (unlikely(!xdpf))
+                       goto out_failure;
                result = ixgbe_xmit_xdp_ring(adapter, xdpf);
+               if (result == IXGBE_XDP_CONSUMED)
+                       goto out_failure;
                break;
        default:
                bpf_warn_invalid_xdp_action(act);
                fallthrough;
        case XDP_ABORTED:
+out_failure:
                trace_xdp_exception(rx_ring->netdev, xdp_prog, act);
                fallthrough; /* handle aborts by dropping packet */
        case XDP_DROP:
index ba2ed8a43d2de0a52b1fc5493ed43fb2cd9fb5d0..0e733cc15c5886ff4a69ee9451cf107b36fa6534 100644 (file)
@@ -1067,11 +1067,14 @@ static struct sk_buff *ixgbevf_run_xdp(struct ixgbevf_adapter *adapter,
        case XDP_TX:
                xdp_ring = adapter->xdp_ring[rx_ring->queue_index];
                result = ixgbevf_xmit_xdp_ring(xdp_ring, xdp);
+               if (result == IXGBEVF_XDP_CONSUMED)
+                       goto out_failure;
                break;
        default:
                bpf_warn_invalid_xdp_action(act);
                fallthrough;
        case XDP_ABORTED:
+out_failure:
                trace_xdp_exception(rx_ring->netdev, xdp_prog, act);
                fallthrough; /* handle aborts by dropping packet */
        case XDP_DROP:
index 6f987a7ffcb36e51755972b87f1606d813863af9..b30a45725374b48174fba0b0001644a22e963b54 100644 (file)
@@ -1315,23 +1315,23 @@ static int korina_probe(struct platform_device *pdev)
        lp->tx_irq = platform_get_irq_byname(pdev, "tx");
 
        p = devm_platform_ioremap_resource_byname(pdev, "emac");
-       if (!p) {
+       if (IS_ERR(p)) {
                printk(KERN_ERR DRV_NAME ": cannot remap registers\n");
-               return -ENOMEM;
+               return PTR_ERR(p);
        }
        lp->eth_regs = p;
 
        p = devm_platform_ioremap_resource_byname(pdev, "dma_rx");
-       if (!p) {
+       if (IS_ERR(p)) {
                printk(KERN_ERR DRV_NAME ": cannot remap Rx DMA registers\n");
-               return -ENOMEM;
+               return PTR_ERR(p);
        }
        lp->rx_dma_regs = p;
 
        p = devm_platform_ioremap_resource_byname(pdev, "dma_tx");
-       if (!p) {
+       if (IS_ERR(p)) {
                printk(KERN_ERR DRV_NAME ": cannot remap Tx DMA registers\n");
-               return -ENOMEM;
+               return PTR_ERR(p);
        }
        lp->tx_dma_regs = p;
 
index 41c2ad210bc99c65175dd79f551a77df0c098caf..36dc3e5f621897c50ca477f5311e38f64733a0bb 100644 (file)
@@ -154,6 +154,7 @@ static int xrx200_close(struct net_device *net_dev)
 
 static int xrx200_alloc_skb(struct xrx200_chan *ch)
 {
+       dma_addr_t mapping;
        int ret = 0;
 
        ch->skb[ch->dma.desc] = netdev_alloc_skb_ip_align(ch->priv->net_dev,
@@ -163,16 +164,17 @@ static int xrx200_alloc_skb(struct xrx200_chan *ch)
                goto skip;
        }
 
-       ch->dma.desc_base[ch->dma.desc].addr = dma_map_single(ch->priv->dev,
-                       ch->skb[ch->dma.desc]->data, XRX200_DMA_DATA_LEN,
-                       DMA_FROM_DEVICE);
-       if (unlikely(dma_mapping_error(ch->priv->dev,
-                                      ch->dma.desc_base[ch->dma.desc].addr))) {
+       mapping = dma_map_single(ch->priv->dev, ch->skb[ch->dma.desc]->data,
+                                XRX200_DMA_DATA_LEN, DMA_FROM_DEVICE);
+       if (unlikely(dma_mapping_error(ch->priv->dev, mapping))) {
                dev_kfree_skb_any(ch->skb[ch->dma.desc]);
                ret = -ENOMEM;
                goto skip;
        }
 
+       ch->dma.desc_base[ch->dma.desc].addr = mapping;
+       /* Make sure the address is written before we give it to HW */
+       wmb();
 skip:
        ch->dma.desc_base[ch->dma.desc].ctl =
                LTQ_DMA_OWN | LTQ_DMA_RX_OFFSET(NET_IP_ALIGN) |
@@ -196,6 +198,8 @@ static int xrx200_hw_receive(struct xrx200_chan *ch)
        ch->dma.desc %= LTQ_DESC_NUM;
 
        if (ret) {
+               ch->skb[ch->dma.desc] = skb;
+               net_dev->stats.rx_dropped++;
                netdev_err(net_dev, "failed to allocate new rx buffer\n");
                return ret;
        }
index 8edba5ea90f03bac6004a3cc43e119161ce0d1ad..4a61c90003b5ead489bc5147fc068c3d12976b3c 100644 (file)
@@ -993,6 +993,14 @@ enum mvpp22_ptp_packet_format {
 
 #define MVPP2_DESC_DMA_MASK    DMA_BIT_MASK(40)
 
+/* Buffer header info bits */
+#define MVPP2_B_HDR_INFO_MC_ID_MASK    0xfff
+#define MVPP2_B_HDR_INFO_MC_ID(info)   ((info) & MVPP2_B_HDR_INFO_MC_ID_MASK)
+#define MVPP2_B_HDR_INFO_LAST_OFFS     12
+#define MVPP2_B_HDR_INFO_LAST_MASK     BIT(12)
+#define MVPP2_B_HDR_INFO_IS_LAST(info) \
+          (((info) & MVPP2_B_HDR_INFO_LAST_MASK) >> MVPP2_B_HDR_INFO_LAST_OFFS)
+
 struct mvpp2_tai;
 
 /* Definitions */
@@ -1002,6 +1010,20 @@ struct mvpp2_rss_table {
        u32 indir[MVPP22_RSS_TABLE_ENTRIES];
 };
 
+struct mvpp2_buff_hdr {
+       __le32 next_phys_addr;
+       __le32 next_dma_addr;
+       __le16 byte_count;
+       __le16 info;
+       __le16 reserved1;       /* bm_qset (for future use, BM) */
+       u8 next_phys_addr_high;
+       u8 next_dma_addr_high;
+       __le16 reserved2;
+       __le16 reserved3;
+       __le16 reserved4;
+       __le16 reserved5;
+};
+
 /* Shared Packet Processor resources */
 struct mvpp2 {
        /* Shared registers' base addresses */
index ec706d614cacc146221bcaf037f7a46c9162add7..d39c7639cdbab16b5688d294d7e55ac71421853f 100644 (file)
@@ -3839,6 +3839,35 @@ mvpp2_run_xdp(struct mvpp2_port *port, struct mvpp2_rx_queue *rxq,
        return ret;
 }
 
+static void mvpp2_buff_hdr_pool_put(struct mvpp2_port *port, struct mvpp2_rx_desc *rx_desc,
+                                   int pool, u32 rx_status)
+{
+       phys_addr_t phys_addr, phys_addr_next;
+       dma_addr_t dma_addr, dma_addr_next;
+       struct mvpp2_buff_hdr *buff_hdr;
+
+       phys_addr = mvpp2_rxdesc_dma_addr_get(port, rx_desc);
+       dma_addr = mvpp2_rxdesc_cookie_get(port, rx_desc);
+
+       do {
+               buff_hdr = (struct mvpp2_buff_hdr *)phys_to_virt(phys_addr);
+
+               phys_addr_next = le32_to_cpu(buff_hdr->next_phys_addr);
+               dma_addr_next = le32_to_cpu(buff_hdr->next_dma_addr);
+
+               if (port->priv->hw_version >= MVPP22) {
+                       phys_addr_next |= ((u64)buff_hdr->next_phys_addr_high << 32);
+                       dma_addr_next |= ((u64)buff_hdr->next_dma_addr_high << 32);
+               }
+
+               mvpp2_bm_pool_put(port, pool, dma_addr, phys_addr);
+
+               phys_addr = phys_addr_next;
+               dma_addr = dma_addr_next;
+
+       } while (!MVPP2_B_HDR_INFO_IS_LAST(le16_to_cpu(buff_hdr->info)));
+}
+
 /* Main rx processing */
 static int mvpp2_rx(struct mvpp2_port *port, struct napi_struct *napi,
                    int rx_todo, struct mvpp2_rx_queue *rxq)
@@ -3885,14 +3914,6 @@ static int mvpp2_rx(struct mvpp2_port *port, struct napi_struct *napi,
                        MVPP2_RXD_BM_POOL_ID_OFFS;
                bm_pool = &port->priv->bm_pools[pool];
 
-               /* In case of an error, release the requested buffer pointer
-                * to the Buffer Manager. This request process is controlled
-                * by the hardware, and the information about the buffer is
-                * comprised by the RX descriptor.
-                */
-               if (rx_status & MVPP2_RXD_ERR_SUMMARY)
-                       goto err_drop_frame;
-
                if (port->priv->percpu_pools) {
                        pp = port->priv->page_pool[pool];
                        dma_dir = page_pool_get_dma_dir(pp);
@@ -3904,6 +3925,18 @@ static int mvpp2_rx(struct mvpp2_port *port, struct napi_struct *napi,
                                        rx_bytes + MVPP2_MH_SIZE,
                                        dma_dir);
 
+               /* Buffer header not supported */
+               if (rx_status & MVPP2_RXD_BUF_HDR)
+                       goto err_drop_frame;
+
+               /* In case of an error, release the requested buffer pointer
+                * to the Buffer Manager. This request process is controlled
+                * by the hardware, and the information about the buffer is
+                * comprised by the RX descriptor.
+                */
+               if (rx_status & MVPP2_RXD_ERR_SUMMARY)
+                       goto err_drop_frame;
+
                /* Prefetch header */
                prefetch(data);
 
@@ -3985,7 +4018,10 @@ err_drop_frame:
                dev->stats.rx_errors++;
                mvpp2_rx_error(port, rx_desc);
                /* Return the buffer to the pool */
-               mvpp2_bm_pool_put(port, pool, dma_addr, phys_addr);
+               if (rx_status & MVPP2_RXD_BUF_HDR)
+                       mvpp2_buff_hdr_pool_put(port, rx_desc, pool, rx_status);
+               else
+                       mvpp2_bm_pool_put(port, pool, dma_addr, phys_addr);
        }
 
        rcu_read_unlock();
index f4962a97a0757ca5cf6b95f396a8698bfa5f4e3f..9d9a2e438acfcdbb4365918b54eaae3bd757cc6f 100644 (file)
@@ -786,6 +786,10 @@ static int otx2_set_rxfh_context(struct net_device *dev, const u32 *indir,
        if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP)
                return -EOPNOTSUPP;
 
+       if (*rss_context != ETH_RXFH_CONTEXT_ALLOC &&
+           *rss_context >= MAX_RSS_GROUPS)
+               return -EINVAL;
+
        rss = &pfvf->hw.rss_info;
 
        if (!rss->enable) {
index ed4eacef17cea42a5ab32dbaadab6995face80a5..64adfd24e134d80ec341cf2ed5055e054ecc9b57 100644 (file)
@@ -681,32 +681,53 @@ static int mtk_set_mac_address(struct net_device *dev, void *p)
 void mtk_stats_update_mac(struct mtk_mac *mac)
 {
        struct mtk_hw_stats *hw_stats = mac->hw_stats;
-       unsigned int base = MTK_GDM1_TX_GBCNT;
-       u64 stats;
-
-       base += hw_stats->reg_offset;
+       struct mtk_eth *eth = mac->hw;
 
        u64_stats_update_begin(&hw_stats->syncp);
 
-       hw_stats->rx_bytes += mtk_r32(mac->hw, base);
-       stats =  mtk_r32(mac->hw, base + 0x04);
-       if (stats)
-               hw_stats->rx_bytes += (stats << 32);
-       hw_stats->rx_packets += mtk_r32(mac->hw, base + 0x08);
-       hw_stats->rx_overflow += mtk_r32(mac->hw, base + 0x10);
-       hw_stats->rx_fcs_errors += mtk_r32(mac->hw, base + 0x14);
-       hw_stats->rx_short_errors += mtk_r32(mac->hw, base + 0x18);
-       hw_stats->rx_long_errors += mtk_r32(mac->hw, base + 0x1c);
-       hw_stats->rx_checksum_errors += mtk_r32(mac->hw, base + 0x20);
-       hw_stats->rx_flow_control_packets +=
-                                       mtk_r32(mac->hw, base + 0x24);
-       hw_stats->tx_skip += mtk_r32(mac->hw, base + 0x28);
-       hw_stats->tx_collisions += mtk_r32(mac->hw, base + 0x2c);
-       hw_stats->tx_bytes += mtk_r32(mac->hw, base + 0x30);
-       stats =  mtk_r32(mac->hw, base + 0x34);
-       if (stats)
-               hw_stats->tx_bytes += (stats << 32);
-       hw_stats->tx_packets += mtk_r32(mac->hw, base + 0x38);
+       if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) {
+               hw_stats->tx_packets += mtk_r32(mac->hw, MT7628_SDM_TPCNT);
+               hw_stats->tx_bytes += mtk_r32(mac->hw, MT7628_SDM_TBCNT);
+               hw_stats->rx_packets += mtk_r32(mac->hw, MT7628_SDM_RPCNT);
+               hw_stats->rx_bytes += mtk_r32(mac->hw, MT7628_SDM_RBCNT);
+               hw_stats->rx_checksum_errors +=
+                       mtk_r32(mac->hw, MT7628_SDM_CS_ERR);
+       } else {
+               unsigned int offs = hw_stats->reg_offset;
+               u64 stats;
+
+               hw_stats->rx_bytes += mtk_r32(mac->hw,
+                                             MTK_GDM1_RX_GBCNT_L + offs);
+               stats = mtk_r32(mac->hw, MTK_GDM1_RX_GBCNT_H + offs);
+               if (stats)
+                       hw_stats->rx_bytes += (stats << 32);
+               hw_stats->rx_packets +=
+                       mtk_r32(mac->hw, MTK_GDM1_RX_GPCNT + offs);
+               hw_stats->rx_overflow +=
+                       mtk_r32(mac->hw, MTK_GDM1_RX_OERCNT + offs);
+               hw_stats->rx_fcs_errors +=
+                       mtk_r32(mac->hw, MTK_GDM1_RX_FERCNT + offs);
+               hw_stats->rx_short_errors +=
+                       mtk_r32(mac->hw, MTK_GDM1_RX_SERCNT + offs);
+               hw_stats->rx_long_errors +=
+                       mtk_r32(mac->hw, MTK_GDM1_RX_LENCNT + offs);
+               hw_stats->rx_checksum_errors +=
+                       mtk_r32(mac->hw, MTK_GDM1_RX_CERCNT + offs);
+               hw_stats->rx_flow_control_packets +=
+                       mtk_r32(mac->hw, MTK_GDM1_RX_FCCNT + offs);
+               hw_stats->tx_skip +=
+                       mtk_r32(mac->hw, MTK_GDM1_TX_SKIPCNT + offs);
+               hw_stats->tx_collisions +=
+                       mtk_r32(mac->hw, MTK_GDM1_TX_COLCNT + offs);
+               hw_stats->tx_bytes +=
+                       mtk_r32(mac->hw, MTK_GDM1_TX_GBCNT_L + offs);
+               stats =  mtk_r32(mac->hw, MTK_GDM1_TX_GBCNT_H + offs);
+               if (stats)
+                       hw_stats->tx_bytes += (stats << 32);
+               hw_stats->tx_packets +=
+                       mtk_r32(mac->hw, MTK_GDM1_TX_GPCNT + offs);
+       }
+
        u64_stats_update_end(&hw_stats->syncp);
 }
 
@@ -2423,7 +2444,8 @@ static void mtk_dim_rx(struct work_struct *work)
        val |= cur << MTK_PDMA_DELAY_RX_PINT_SHIFT;
 
        mtk_w32(eth, val, MTK_PDMA_DELAY_INT);
-       mtk_w32(eth, val, MTK_QDMA_DELAY_INT);
+       if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
+               mtk_w32(eth, val, MTK_QDMA_DELAY_INT);
 
        spin_unlock_bh(&eth->dim_lock);
 
@@ -2452,7 +2474,8 @@ static void mtk_dim_tx(struct work_struct *work)
        val |= cur << MTK_PDMA_DELAY_TX_PINT_SHIFT;
 
        mtk_w32(eth, val, MTK_PDMA_DELAY_INT);
-       mtk_w32(eth, val, MTK_QDMA_DELAY_INT);
+       if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
+               mtk_w32(eth, val, MTK_QDMA_DELAY_INT);
 
        spin_unlock_bh(&eth->dim_lock);
 
@@ -2480,6 +2503,10 @@ static int mtk_hw_init(struct mtk_eth *eth)
                        goto err_disable_pm;
                }
 
+               /* set interrupt delays based on current Net DIM sample */
+               mtk_dim_rx(&eth->rx_dim.work);
+               mtk_dim_tx(&eth->tx_dim.work);
+
                /* disable delay and normal interrupt */
                mtk_tx_irq_disable(eth, ~0);
                mtk_rx_irq_disable(eth, ~0);
index 11331b44ba07966ee3baef60da719c1343dfc4df..5ef70dd8b49c655c8d954f96b0fb9c0228d94489 100644 (file)
 /* QDMA FQ Free Page Buffer Length Register */
 #define MTK_QDMA_FQ_BLEN       0x1B2C
 
-/* GMA1 Received Good Byte Count Register */
-#define MTK_GDM1_TX_GBCNT      0x2400
+/* GMA1 counter / statics register */
+#define MTK_GDM1_RX_GBCNT_L    0x2400
+#define MTK_GDM1_RX_GBCNT_H    0x2404
+#define MTK_GDM1_RX_GPCNT      0x2408
+#define MTK_GDM1_RX_OERCNT     0x2410
+#define MTK_GDM1_RX_FERCNT     0x2414
+#define MTK_GDM1_RX_SERCNT     0x2418
+#define MTK_GDM1_RX_LENCNT     0x241c
+#define MTK_GDM1_RX_CERCNT     0x2420
+#define MTK_GDM1_RX_FCCNT      0x2424
+#define MTK_GDM1_TX_SKIPCNT    0x2428
+#define MTK_GDM1_TX_COLCNT     0x242c
+#define MTK_GDM1_TX_GBCNT_L    0x2430
+#define MTK_GDM1_TX_GBCNT_H    0x2434
+#define MTK_GDM1_TX_GPCNT      0x2438
 #define MTK_STAT_OFFSET                0x40
 
 /* QDMA descriptor txd4 */
 #define MT7628_SDM_MAC_ADRL    (MT7628_SDM_OFFSET + 0x0c)
 #define MT7628_SDM_MAC_ADRH    (MT7628_SDM_OFFSET + 0x10)
 
+/* Counter / stat register */
+#define MT7628_SDM_TPCNT       (MT7628_SDM_OFFSET + 0x100)
+#define MT7628_SDM_TBCNT       (MT7628_SDM_OFFSET + 0x104)
+#define MT7628_SDM_RPCNT       (MT7628_SDM_OFFSET + 0x108)
+#define MT7628_SDM_RBCNT       (MT7628_SDM_OFFSET + 0x10c)
+#define MT7628_SDM_CS_ERR      (MT7628_SDM_OFFSET + 0x110)
+
 struct mtk_rx_dma {
        unsigned int rxd1;
        unsigned int rxd2;
index 1434df66fcf2ee0910d086e44694dfd35d7f1039..3616b77caa0adb407288c51297e580a7f415bb13 100644 (file)
@@ -2027,8 +2027,6 @@ static int mlx4_en_set_tunable(struct net_device *dev,
        return ret;
 }
 
-#define MLX4_EEPROM_PAGE_LEN 256
-
 static int mlx4_en_get_module_info(struct net_device *dev,
                                   struct ethtool_modinfo *modinfo)
 {
@@ -2063,7 +2061,7 @@ static int mlx4_en_get_module_info(struct net_device *dev,
                break;
        case MLX4_MODULE_ID_SFP:
                modinfo->type = ETH_MODULE_SFF_8472;
-               modinfo->eeprom_len = MLX4_EEPROM_PAGE_LEN;
+               modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN;
                break;
        default:
                return -EINVAL;
index f6cfec81ccc3bcb7b3833cca4e871e36e9283b5c..dc4ac1a2b6b67625edf665363f6bf60337a81146 100644 (file)
@@ -823,6 +823,7 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
 #define QUERY_DEV_CAP_MAD_DEMUX_OFFSET         0xb0
 #define QUERY_DEV_CAP_DMFS_HIGH_RATE_QPN_BASE_OFFSET   0xa8
 #define QUERY_DEV_CAP_DMFS_HIGH_RATE_QPN_RANGE_OFFSET  0xac
+#define QUERY_DEV_CAP_MAP_CLOCK_TO_USER 0xc1
 #define QUERY_DEV_CAP_QP_RATE_LIMIT_NUM_OFFSET 0xcc
 #define QUERY_DEV_CAP_QP_RATE_LIMIT_MAX_OFFSET 0xd0
 #define QUERY_DEV_CAP_QP_RATE_LIMIT_MIN_OFFSET 0xd2
@@ -841,6 +842,8 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
 
        if (mlx4_is_mfunc(dev))
                disable_unsupported_roce_caps(outbox);
+       MLX4_GET(field, outbox, QUERY_DEV_CAP_MAP_CLOCK_TO_USER);
+       dev_cap->map_clock_to_user = field & 0x80;
        MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_QP_OFFSET);
        dev_cap->reserved_qps = 1 << (field & 0xf);
        MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_QP_OFFSET);
index 8f020f26ebf5fc28cbff902ff790f96658b25c17..cf64e54eecb050bcc0be547b2d2693c75612371d 100644 (file)
@@ -131,6 +131,7 @@ struct mlx4_dev_cap {
        u32 health_buffer_addrs;
        struct mlx4_port_cap port_cap[MLX4_MAX_PORTS + 1];
        bool wol_port[MLX4_MAX_PORTS + 1];
+       bool map_clock_to_user;
 };
 
 struct mlx4_func_cap {
index c326b434734e1c601fb64814930351f6e4d0c9bd..00c84656b2e7e3265037f86868f0f7a5928bd3d1 100644 (file)
@@ -498,6 +498,7 @@ static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
                }
        }
 
+       dev->caps.map_clock_to_user  = dev_cap->map_clock_to_user;
        dev->caps.uar_page_size      = PAGE_SIZE;
        dev->caps.num_uars           = dev_cap->uar_size / PAGE_SIZE;
        dev->caps.local_ca_ack_delay = dev_cap->local_ca_ack_delay;
@@ -1948,6 +1949,11 @@ int mlx4_get_internal_clock_params(struct mlx4_dev *dev,
        if (mlx4_is_slave(dev))
                return -EOPNOTSUPP;
 
+       if (!dev->caps.map_clock_to_user) {
+               mlx4_dbg(dev, "Map clock to user is not supported.\n");
+               return -EOPNOTSUPP;
+       }
+
        if (!params)
                return -EINVAL;
 
index ba6ac31a339dc3c6c84ff30eea9d8126ab2279b9..256a06b3c096b302a0196d71fe02bca3182db580 100644 (file)
@@ -1973,6 +1973,7 @@ EXPORT_SYMBOL(mlx4_get_roce_gid_from_slave);
 #define I2C_ADDR_LOW  0x50
 #define I2C_ADDR_HIGH 0x51
 #define I2C_PAGE_SIZE 256
+#define I2C_HIGH_PAGE_SIZE 128
 
 /* Module Info Data */
 struct mlx4_cable_info {
@@ -2026,6 +2027,88 @@ static inline const char *cable_info_mad_err_str(u16 mad_status)
        return "Unknown Error";
 }
 
+static int mlx4_get_module_id(struct mlx4_dev *dev, u8 port, u8 *module_id)
+{
+       struct mlx4_cmd_mailbox *inbox, *outbox;
+       struct mlx4_mad_ifc *inmad, *outmad;
+       struct mlx4_cable_info *cable_info;
+       int ret;
+
+       inbox = mlx4_alloc_cmd_mailbox(dev);
+       if (IS_ERR(inbox))
+               return PTR_ERR(inbox);
+
+       outbox = mlx4_alloc_cmd_mailbox(dev);
+       if (IS_ERR(outbox)) {
+               mlx4_free_cmd_mailbox(dev, inbox);
+               return PTR_ERR(outbox);
+       }
+
+       inmad = (struct mlx4_mad_ifc *)(inbox->buf);
+       outmad = (struct mlx4_mad_ifc *)(outbox->buf);
+
+       inmad->method = 0x1; /* Get */
+       inmad->class_version = 0x1;
+       inmad->mgmt_class = 0x1;
+       inmad->base_version = 0x1;
+       inmad->attr_id = cpu_to_be16(0xFF60); /* Module Info */
+
+       cable_info = (struct mlx4_cable_info *)inmad->data;
+       cable_info->dev_mem_address = 0;
+       cable_info->page_num = 0;
+       cable_info->i2c_addr = I2C_ADDR_LOW;
+       cable_info->size = cpu_to_be16(1);
+
+       ret = mlx4_cmd_box(dev, inbox->dma, outbox->dma, port, 3,
+                          MLX4_CMD_MAD_IFC, MLX4_CMD_TIME_CLASS_C,
+                          MLX4_CMD_NATIVE);
+       if (ret)
+               goto out;
+
+       if (be16_to_cpu(outmad->status)) {
+               /* Mad returned with bad status */
+               ret = be16_to_cpu(outmad->status);
+               mlx4_warn(dev,
+                         "MLX4_CMD_MAD_IFC Get Module ID attr(%x) port(%d) i2c_addr(%x) offset(%d) size(%d): Response Mad Status(%x) - %s\n",
+                         0xFF60, port, I2C_ADDR_LOW, 0, 1, ret,
+                         cable_info_mad_err_str(ret));
+               ret = -ret;
+               goto out;
+       }
+       cable_info = (struct mlx4_cable_info *)outmad->data;
+       *module_id = cable_info->data[0];
+out:
+       mlx4_free_cmd_mailbox(dev, inbox);
+       mlx4_free_cmd_mailbox(dev, outbox);
+       return ret;
+}
+
+static void mlx4_sfp_eeprom_params_set(u8 *i2c_addr, u8 *page_num, u16 *offset)
+{
+       *i2c_addr = I2C_ADDR_LOW;
+       *page_num = 0;
+
+       if (*offset < I2C_PAGE_SIZE)
+               return;
+
+       *i2c_addr = I2C_ADDR_HIGH;
+       *offset -= I2C_PAGE_SIZE;
+}
+
+static void mlx4_qsfp_eeprom_params_set(u8 *i2c_addr, u8 *page_num, u16 *offset)
+{
+       /* Offsets 0-255 belong to page 0.
+        * Offsets 256-639 belong to pages 01, 02, 03.
+        * For example, offset 400 is page 02: 1 + (400 - 256) / 128 = 2
+        */
+       if (*offset < I2C_PAGE_SIZE)
+               *page_num = 0;
+       else
+               *page_num = 1 + (*offset - I2C_PAGE_SIZE) / I2C_HIGH_PAGE_SIZE;
+       *i2c_addr = I2C_ADDR_LOW;
+       *offset -= *page_num * I2C_HIGH_PAGE_SIZE;
+}
+
 /**
  * mlx4_get_module_info - Read cable module eeprom data
  * @dev: mlx4_dev.
@@ -2045,12 +2128,30 @@ int mlx4_get_module_info(struct mlx4_dev *dev, u8 port,
        struct mlx4_cmd_mailbox *inbox, *outbox;
        struct mlx4_mad_ifc *inmad, *outmad;
        struct mlx4_cable_info *cable_info;
-       u16 i2c_addr;
+       u8 module_id, i2c_addr, page_num;
        int ret;
 
        if (size > MODULE_INFO_MAX_READ)
                size = MODULE_INFO_MAX_READ;
 
+       ret = mlx4_get_module_id(dev, port, &module_id);
+       if (ret)
+               return ret;
+
+       switch (module_id) {
+       case MLX4_MODULE_ID_SFP:
+               mlx4_sfp_eeprom_params_set(&i2c_addr, &page_num, &offset);
+               break;
+       case MLX4_MODULE_ID_QSFP:
+       case MLX4_MODULE_ID_QSFP_PLUS:
+       case MLX4_MODULE_ID_QSFP28:
+               mlx4_qsfp_eeprom_params_set(&i2c_addr, &page_num, &offset);
+               break;
+       default:
+               mlx4_err(dev, "Module ID not recognized: %#x\n", module_id);
+               return -EINVAL;
+       }
+
        inbox = mlx4_alloc_cmd_mailbox(dev);
        if (IS_ERR(inbox))
                return PTR_ERR(inbox);
@@ -2076,11 +2177,9 @@ int mlx4_get_module_info(struct mlx4_dev *dev, u8 port,
                 */
                size -= offset + size - I2C_PAGE_SIZE;
 
-       i2c_addr = I2C_ADDR_LOW;
-
        cable_info = (struct mlx4_cable_info *)inmad->data;
        cable_info->dev_mem_address = cpu_to_be16(offset);
-       cable_info->page_num = 0;
+       cable_info->page_num = page_num;
        cable_info->i2c_addr = i2c_addr;
        cable_info->size = cpu_to_be16(size);
 
index 95f2b26a3ee316c5f73b42a24ebad573f3b0345d..9c076aa20306a7d2bd116b85dfac000bcafaa3f0 100644 (file)
@@ -223,6 +223,8 @@ static void mlx5e_rep_changelowerstate_event(struct net_device *netdev, void *pt
        rpriv = priv->ppriv;
        fwd_vport_num = rpriv->rep->vport;
        lag_dev = netdev_master_upper_dev_get(netdev);
+       if (!lag_dev)
+               return;
 
        netdev_dbg(netdev, "lag_dev(%s)'s slave vport(%d) is txable(%d)\n",
                   lag_dev->name, fwd_vport_num, net_lag_port_dev_txable(netdev));
index 6cdc52d50a488a9a372719f7fb5252fa76f85f97..3113822618402f9854e3c83d0aa6af26ffc9dd93 100644 (file)
@@ -626,7 +626,7 @@ static bool mlx5e_restore_skb(struct sk_buff *skb, u32 chain, u32 reg_c1,
                struct mlx5_eswitch *esw;
                u32 zone_restore_id;
 
-               tc_skb_ext = skb_ext_add(skb, TC_SKB_EXT);
+               tc_skb_ext = tc_skb_ext_alloc(skb);
                if (!tc_skb_ext) {
                        WARN_ON(1);
                        return false;
index 593503bc4d0785a8699edb1b624577ffc2e8403a..f1fb11680d2026b5a7d07add4959c409971994e8 100644 (file)
@@ -1505,7 +1505,7 @@ mlx5e_init_fib_work_ipv4(struct mlx5e_priv *priv,
 
        fen_info = container_of(info, struct fib_entry_notifier_info, info);
        fib_dev = fib_info_nh(fen_info->fi, 0)->fib_nh_dev;
-       if (fib_dev->netdev_ops != &mlx5e_netdev_ops ||
+       if (!fib_dev || fib_dev->netdev_ops != &mlx5e_netdev_ops ||
            fen_info->dst_len != 32)
                return NULL;
 
index 8360289813f0dba27f9697536984fc508f3eb7fb..d6513aef5cd457dc4101d275d80b0f26137ef975 100644 (file)
@@ -1624,12 +1624,13 @@ static int mlx5e_set_fecparam(struct net_device *netdev,
 {
        struct mlx5e_priv *priv = netdev_priv(netdev);
        struct mlx5_core_dev *mdev = priv->mdev;
+       unsigned long fec_bitmap;
        u16 fec_policy = 0;
        int mode;
        int err;
 
-       if (bitmap_weight((unsigned long *)&fecparam->fec,
-                         ETHTOOL_FEC_LLRS_BIT + 1) > 1)
+       bitmap_from_arr32(&fec_bitmap, &fecparam->fec, sizeof(fecparam->fec) * BITS_PER_BYTE);
+       if (bitmap_weight(&fec_bitmap, ETHTOOL_FEC_LLRS_BIT + 1) > 1)
                return -EOPNOTSUPP;
 
        for (mode = 0; mode < ARRAY_SIZE(pplm_fec_2_ethtool); mode++) {
@@ -1893,6 +1894,13 @@ int mlx5e_modify_rx_cqe_compression_locked(struct mlx5e_priv *priv, bool new_val
        if (curr_val == new_val)
                return 0;
 
+       if (new_val && !priv->profile->rx_ptp_support &&
+           priv->tstamp.rx_filter != HWTSTAMP_FILTER_NONE) {
+               netdev_err(priv->netdev,
+                          "Profile doesn't support enabling of CQE compression while hardware time-stamping is enabled.\n");
+               return -EINVAL;
+       }
+
        new_params = priv->channels.params;
        MLX5E_SET_PFLAG(&new_params, MLX5E_PFLAG_RX_CQE_COMPRESS, new_val);
        if (priv->tstamp.rx_filter != HWTSTAMP_FILTER_NONE)
index 0d571a0c76d90aec5c70e5497292f3de33776d3c..0b75fab41ae8f819737cd31d5178bec847621536 100644 (file)
@@ -35,6 +35,7 @@
 #include <linux/ipv6.h>
 #include <linux/tcp.h>
 #include <linux/mlx5/fs.h>
+#include <linux/mlx5/mpfs.h>
 #include "en.h"
 #include "en_rep.h"
 #include "lib/mpfs.h"
index bca832cdc4cbe22bd3a26d9ac847311f32bb9738..ec6bafe7a2e596c5b1d78be4c00aa8adc1efae18 100644 (file)
@@ -889,10 +889,13 @@ err_free_rq:
 void mlx5e_activate_rq(struct mlx5e_rq *rq)
 {
        set_bit(MLX5E_RQ_STATE_ENABLED, &rq->state);
-       if (rq->icosq)
+       if (rq->icosq) {
                mlx5e_trigger_irq(rq->icosq);
-       else
+       } else {
+               local_bh_disable();
                napi_schedule(rq->cq.napi);
+               local_bh_enable();
+       }
 }
 
 void mlx5e_deactivate_rq(struct mlx5e_rq *rq)
@@ -2697,7 +2700,7 @@ static int mlx5e_update_netdev_queues(struct mlx5e_priv *priv)
        int err;
 
        old_num_txqs = netdev->real_num_tx_queues;
-       old_ntc = netdev->num_tc;
+       old_ntc = netdev->num_tc ? : 1;
 
        nch = priv->channels.params.num_channels;
        ntc = priv->channels.params.num_tc;
@@ -3855,6 +3858,16 @@ static netdev_features_t mlx5e_fix_features(struct net_device *netdev,
                        netdev_warn(netdev, "Disabling rxhash, not supported when CQE compress is active\n");
        }
 
+       if (mlx5e_is_uplink_rep(priv)) {
+               features &= ~NETIF_F_HW_TLS_RX;
+               if (netdev->features & NETIF_F_HW_TLS_RX)
+                       netdev_warn(netdev, "Disabling hw_tls_rx, not supported in switchdev mode\n");
+
+               features &= ~NETIF_F_HW_TLS_TX;
+               if (netdev->features & NETIF_F_HW_TLS_TX)
+                       netdev_warn(netdev, "Disabling hw_tls_tx, not supported in switchdev mode\n");
+       }
+
        mutex_unlock(&priv->state_lock);
 
        return features;
@@ -3971,11 +3984,45 @@ int mlx5e_ptp_rx_manage_fs_ctx(struct mlx5e_priv *priv, void *ctx)
        return mlx5e_ptp_rx_manage_fs(priv, set);
 }
 
-int mlx5e_hwstamp_set(struct mlx5e_priv *priv, struct ifreq *ifr)
+static int mlx5e_hwstamp_config_no_ptp_rx(struct mlx5e_priv *priv, bool rx_filter)
+{
+       bool rx_cqe_compress_def = priv->channels.params.rx_cqe_compress_def;
+       int err;
+
+       if (!rx_filter)
+               /* Reset CQE compression to Admin default */
+               return mlx5e_modify_rx_cqe_compression_locked(priv, rx_cqe_compress_def);
+
+       if (!MLX5E_GET_PFLAG(&priv->channels.params, MLX5E_PFLAG_RX_CQE_COMPRESS))
+               return 0;
+
+       /* Disable CQE compression */
+       netdev_warn(priv->netdev, "Disabling RX cqe compression\n");
+       err = mlx5e_modify_rx_cqe_compression_locked(priv, false);
+       if (err)
+               netdev_err(priv->netdev, "Failed disabling cqe compression err=%d\n", err);
+
+       return err;
+}
+
+static int mlx5e_hwstamp_config_ptp_rx(struct mlx5e_priv *priv, bool ptp_rx)
 {
        struct mlx5e_params new_params;
+
+       if (ptp_rx == priv->channels.params.ptp_rx)
+               return 0;
+
+       new_params = priv->channels.params;
+       new_params.ptp_rx = ptp_rx;
+       return mlx5e_safe_switch_params(priv, &new_params, mlx5e_ptp_rx_manage_fs_ctx,
+                                       &new_params.ptp_rx, true);
+}
+
+int mlx5e_hwstamp_set(struct mlx5e_priv *priv, struct ifreq *ifr)
+{
        struct hwtstamp_config config;
        bool rx_cqe_compress_def;
+       bool ptp_rx;
        int err;
 
        if (!MLX5_CAP_GEN(priv->mdev, device_frequency_khz) ||
@@ -3995,13 +4042,12 @@ int mlx5e_hwstamp_set(struct mlx5e_priv *priv, struct ifreq *ifr)
        }
 
        mutex_lock(&priv->state_lock);
-       new_params = priv->channels.params;
        rx_cqe_compress_def = priv->channels.params.rx_cqe_compress_def;
 
        /* RX HW timestamp */
        switch (config.rx_filter) {
        case HWTSTAMP_FILTER_NONE:
-               new_params.ptp_rx = false;
+               ptp_rx = false;
                break;
        case HWTSTAMP_FILTER_ALL:
        case HWTSTAMP_FILTER_SOME:
@@ -4018,24 +4064,25 @@ int mlx5e_hwstamp_set(struct mlx5e_priv *priv, struct ifreq *ifr)
        case HWTSTAMP_FILTER_PTP_V2_SYNC:
        case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
        case HWTSTAMP_FILTER_NTP_ALL:
-               new_params.ptp_rx = rx_cqe_compress_def;
                config.rx_filter = HWTSTAMP_FILTER_ALL;
+               /* ptp_rx is set if both HW TS is set and CQE
+                * compression is set
+                */
+               ptp_rx = rx_cqe_compress_def;
                break;
        default:
-               mutex_unlock(&priv->state_lock);
-               return -ERANGE;
+               err = -ERANGE;
+               goto err_unlock;
        }
 
-       if (new_params.ptp_rx == priv->channels.params.ptp_rx)
-               goto out;
+       if (!priv->profile->rx_ptp_support)
+               err = mlx5e_hwstamp_config_no_ptp_rx(priv,
+                                                    config.rx_filter != HWTSTAMP_FILTER_NONE);
+       else
+               err = mlx5e_hwstamp_config_ptp_rx(priv, ptp_rx);
+       if (err)
+               goto err_unlock;
 
-       err = mlx5e_safe_switch_params(priv, &new_params, mlx5e_ptp_rx_manage_fs_ctx,
-                                      &new_params.ptp_rx, true);
-       if (err) {
-               mutex_unlock(&priv->state_lock);
-               return err;
-       }
-out:
        memcpy(&priv->tstamp, &config, sizeof(config));
        mutex_unlock(&priv->state_lock);
 
@@ -4044,6 +4091,9 @@ out:
 
        return copy_to_user(ifr->ifr_data, &config,
                            sizeof(config)) ? -EFAULT : 0;
+err_unlock:
+       mutex_unlock(&priv->state_lock);
+       return err;
 }
 
 int mlx5e_hwstamp_get(struct mlx5e_priv *priv, struct ifreq *ifr)
@@ -5229,6 +5279,11 @@ static void mlx5e_update_features(struct net_device *netdev)
        rtnl_unlock();
 }
 
+static void mlx5e_reset_channels(struct net_device *netdev)
+{
+       netdev_reset_tc(netdev);
+}
+
 int mlx5e_attach_netdev(struct mlx5e_priv *priv)
 {
        const bool take_rtnl = priv->netdev->reg_state == NETREG_REGISTERED;
@@ -5283,6 +5338,7 @@ err_cleanup_tx:
        profile->cleanup_tx(priv);
 
 out:
+       mlx5e_reset_channels(priv->netdev);
        set_bit(MLX5E_STATE_DESTROYING, &priv->state);
        cancel_work_sync(&priv->update_stats_work);
        return err;
@@ -5300,6 +5356,7 @@ void mlx5e_detach_netdev(struct mlx5e_priv *priv)
 
        profile->cleanup_rx(priv);
        profile->cleanup_tx(priv);
+       mlx5e_reset_channels(priv->netdev);
        cancel_work_sync(&priv->update_stats_work);
 }
 
index 47a9c49b25fd1bc39137e3e803094aa09fd2ee7d..dd64878e5b38183ac19fd28c277b095eb500f087 100644 (file)
@@ -1322,10 +1322,10 @@ mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv,
                      struct netlink_ext_ack *extack)
 {
        struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
-       struct net_device *out_dev, *encap_dev = NULL;
        struct mlx5e_tc_flow_parse_attr *parse_attr;
        struct mlx5_flow_attr *attr = flow->attr;
        bool vf_tun = false, encap_valid = true;
+       struct net_device *encap_dev = NULL;
        struct mlx5_esw_flow_attr *esw_attr;
        struct mlx5_fc *counter = NULL;
        struct mlx5e_rep_priv *rpriv;
@@ -1371,16 +1371,22 @@ mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv,
        esw_attr = attr->esw_attr;
 
        for (out_index = 0; out_index < MLX5_MAX_FLOW_FWD_VPORTS; out_index++) {
+               struct net_device *out_dev;
                int mirred_ifindex;
 
                if (!(esw_attr->dests[out_index].flags & MLX5_ESW_DEST_ENCAP))
                        continue;
 
                mirred_ifindex = parse_attr->mirred_ifindex[out_index];
-               out_dev = __dev_get_by_index(dev_net(priv->netdev),
-                                            mirred_ifindex);
+               out_dev = dev_get_by_index(dev_net(priv->netdev), mirred_ifindex);
+               if (!out_dev) {
+                       NL_SET_ERR_MSG_MOD(extack, "Requested mirred device not found");
+                       err = -ENODEV;
+                       goto err_out;
+               }
                err = mlx5e_attach_encap(priv, flow, out_dev, out_index,
                                         extack, &encap_dev, &encap_valid);
+               dev_put(out_dev);
                if (err)
                        goto err_out;
 
@@ -1393,6 +1399,12 @@ mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv,
                esw_attr->dests[out_index].mdev = out_priv->mdev;
        }
 
+       if (vf_tun && esw_attr->out_count > 1) {
+               NL_SET_ERR_MSG_MOD(extack, "VF tunnel encap with mirroring is not supported");
+               err = -EOPNOTSUPP;
+               goto err_out;
+       }
+
        err = mlx5_eswitch_add_vlan_action(esw, attr);
        if (err)
                goto err_out;
@@ -2003,11 +2015,13 @@ static int __parse_cls_flower(struct mlx5e_priv *priv,
                                    misc_parameters_3);
        struct flow_rule *rule = flow_cls_offload_flow_rule(f);
        struct flow_dissector *dissector = rule->match.dissector;
+       enum fs_flow_table_type fs_type;
        u16 addr_type = 0;
        u8 ip_proto = 0;
        u8 *match_level;
        int err;
 
+       fs_type = mlx5e_is_eswitch_flow(flow) ? FS_FT_FDB : FS_FT_NIC_RX;
        match_level = outer_match_level;
 
        if (dissector->used_keys &
@@ -2133,6 +2147,13 @@ static int __parse_cls_flower(struct mlx5e_priv *priv,
                if (match.mask->vlan_id ||
                    match.mask->vlan_priority ||
                    match.mask->vlan_tpid) {
+                       if (!MLX5_CAP_FLOWTABLE_TYPE(priv->mdev, ft_field_support.outer_second_vid,
+                                                    fs_type)) {
+                               NL_SET_ERR_MSG_MOD(extack,
+                                                  "Matching on CVLAN is not supported");
+                               return -EOPNOTSUPP;
+                       }
+
                        if (match.key->vlan_tpid == htons(ETH_P_8021AD)) {
                                MLX5_SET(fte_match_set_misc, misc_c,
                                         outer_second_svlan_tag, 1);
@@ -3526,8 +3547,12 @@ static int add_vlan_push_action(struct mlx5e_priv *priv,
        if (err)
                return err;
 
-       *out_dev = dev_get_by_index_rcu(dev_net(vlan_dev),
-                                       dev_get_iflink(vlan_dev));
+       rcu_read_lock();
+       *out_dev = dev_get_by_index_rcu(dev_net(vlan_dev), dev_get_iflink(vlan_dev));
+       rcu_read_unlock();
+       if (!*out_dev)
+               return -ENODEV;
+
        if (is_vlan_dev(*out_dev))
                err = add_vlan_push_action(priv, attr, out_dev, action);
 
@@ -5074,7 +5099,7 @@ bool mlx5e_tc_update_skb(struct mlx5_cqe64 *cqe,
 
        if (mapped_obj.type == MLX5_MAPPED_OBJ_CHAIN) {
                chain = mapped_obj.chain;
-               tc_skb_ext = skb_ext_add(skb, TC_SKB_EXT);
+               tc_skb_ext = tc_skb_ext_alloc(skb);
                if (WARN_ON(!tc_skb_ext))
                        return false;
 
index 570f2280823c0671ced01947b67150c41f9db3bc..b88705a3a1a8e515251ca5b3961a2406917d1fd3 100644 (file)
@@ -35,6 +35,7 @@
 #include <linux/mlx5/mlx5_ifc.h>
 #include <linux/mlx5/vport.h>
 #include <linux/mlx5/fs.h>
+#include <linux/mlx5/mpfs.h>
 #include "esw/acl/lgcy.h"
 #include "esw/legacy.h"
 #include "mlx5_core.h"
index db1e74280e5704747a4a0bac8bd278b8650f8bd4..d18a28a6e9a63a7b5f9c3cd15e6da7fda3db7dbb 100644 (file)
@@ -219,7 +219,8 @@ esw_setup_slow_path_dest(struct mlx5_flow_destination *dest,
                         struct mlx5_fs_chains *chains,
                         int i)
 {
-       flow_act->flags |= FLOW_ACT_IGNORE_FLOW_LEVEL;
+       if (mlx5_chains_ignore_flow_level_supported(chains))
+               flow_act->flags |= FLOW_ACT_IGNORE_FLOW_LEVEL;
        dest[i].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
        dest[i].ft = mlx5_chains_get_tc_end_ft(chains);
 }
index a81ece94f59910c00cc1123d16f89540ed64d6cf..b459549058450ab890f6093050d484926a5e232a 100644 (file)
@@ -65,7 +65,7 @@ mlx5_eswitch_termtbl_create(struct mlx5_core_dev *dev,
 {
        struct mlx5_flow_table_attr ft_attr = {};
        struct mlx5_flow_namespace *root_ns;
-       int err;
+       int err, err2;
 
        root_ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_FDB);
        if (!root_ns) {
@@ -76,33 +76,34 @@ mlx5_eswitch_termtbl_create(struct mlx5_core_dev *dev,
        /* As this is the terminating action then the termination table is the
         * same prio as the slow path
         */
-       ft_attr.flags = MLX5_FLOW_TABLE_TERMINATION |
+       ft_attr.flags = MLX5_FLOW_TABLE_TERMINATION | MLX5_FLOW_TABLE_UNMANAGED |
                        MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT;
-       ft_attr.prio = FDB_SLOW_PATH;
+       ft_attr.prio = FDB_TC_OFFLOAD;
        ft_attr.max_fte = 1;
+       ft_attr.level = 1;
        ft_attr.autogroup.max_num_groups = 1;
        tt->termtbl = mlx5_create_auto_grouped_flow_table(root_ns, &ft_attr);
        if (IS_ERR(tt->termtbl)) {
-               esw_warn(dev, "Failed to create termination table (error %d)\n",
-                        IS_ERR(tt->termtbl));
-               return -EOPNOTSUPP;
+               err = PTR_ERR(tt->termtbl);
+               esw_warn(dev, "Failed to create termination table, err %pe\n", tt->termtbl);
+               return err;
        }
 
        tt->rule = mlx5_add_flow_rules(tt->termtbl, NULL, flow_act,
                                       &tt->dest, 1);
        if (IS_ERR(tt->rule)) {
-               esw_warn(dev, "Failed to create termination table rule (error %d)\n",
-                        IS_ERR(tt->rule));
+               err = PTR_ERR(tt->rule);
+               esw_warn(dev, "Failed to create termination table rule, err %pe\n", tt->rule);
                goto add_flow_err;
        }
        return 0;
 
 add_flow_err:
-       err = mlx5_destroy_flow_table(tt->termtbl);
-       if (err)
-               esw_warn(dev, "Failed to destroy termination table\n");
+       err2 = mlx5_destroy_flow_table(tt->termtbl);
+       if (err2)
+               esw_warn(dev, "Failed to destroy termination table, err %d\n", err2);
 
-       return -EOPNOTSUPP;
+       return err;
 }
 
 static struct mlx5_termtbl_handle *
@@ -172,19 +173,6 @@ mlx5_eswitch_termtbl_put(struct mlx5_eswitch *esw,
        }
 }
 
-static bool mlx5_eswitch_termtbl_is_encap_reformat(struct mlx5_pkt_reformat *rt)
-{
-       switch (rt->reformat_type) {
-       case MLX5_REFORMAT_TYPE_L2_TO_VXLAN:
-       case MLX5_REFORMAT_TYPE_L2_TO_NVGRE:
-       case MLX5_REFORMAT_TYPE_L2_TO_L2_TUNNEL:
-       case MLX5_REFORMAT_TYPE_L2_TO_L3_TUNNEL:
-               return true;
-       default:
-               return false;
-       }
-}
-
 static void
 mlx5_eswitch_termtbl_actions_move(struct mlx5_flow_act *src,
                                  struct mlx5_flow_act *dst)
@@ -202,14 +190,6 @@ mlx5_eswitch_termtbl_actions_move(struct mlx5_flow_act *src,
                        memset(&src->vlan[1], 0, sizeof(src->vlan[1]));
                }
        }
-
-       if (src->action & MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT &&
-           mlx5_eswitch_termtbl_is_encap_reformat(src->pkt_reformat)) {
-               src->action &= ~MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT;
-               dst->action |= MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT;
-               dst->pkt_reformat = src->pkt_reformat;
-               src->pkt_reformat = NULL;
-       }
 }
 
 static bool mlx5_eswitch_offload_is_uplink_port(const struct mlx5_eswitch *esw,
@@ -238,6 +218,7 @@ mlx5_eswitch_termtbl_required(struct mlx5_eswitch *esw,
        int i;
 
        if (!MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, termination_table) ||
+           !MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, ignore_flow_level) ||
            attr->flags & MLX5_ESW_ATTR_FLAG_SLOW_PATH ||
            !mlx5_eswitch_offload_is_uplink_port(esw, spec))
                return false;
@@ -279,12 +260,19 @@ mlx5_eswitch_add_termtbl_rule(struct mlx5_eswitch *esw,
                if (dest[i].type != MLX5_FLOW_DESTINATION_TYPE_VPORT)
                        continue;
 
+               if (attr->dests[num_vport_dests].flags & MLX5_ESW_DEST_ENCAP) {
+                       term_tbl_act.action |= MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT;
+                       term_tbl_act.pkt_reformat = attr->dests[num_vport_dests].pkt_reformat;
+               } else {
+                       term_tbl_act.action &= ~MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT;
+                       term_tbl_act.pkt_reformat = NULL;
+               }
+
                /* get the terminating table for the action list */
                tt = mlx5_eswitch_termtbl_get_create(esw, &term_tbl_act,
                                                     &dest[i], attr);
                if (IS_ERR(tt)) {
-                       esw_warn(esw->dev, "Failed to get termination table (error %d)\n",
-                                IS_ERR(tt));
+                       esw_warn(esw->dev, "Failed to get termination table, err %pe\n", tt);
                        goto revert_changes;
                }
                attr->dests[num_vport_dests].termtbl = tt;
@@ -301,6 +289,9 @@ mlx5_eswitch_add_termtbl_rule(struct mlx5_eswitch *esw,
                goto revert_changes;
 
        /* create the FTE */
+       flow_act->action &= ~MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT;
+       flow_act->pkt_reformat = NULL;
+       flow_act->flags |= FLOW_ACT_IGNORE_FLOW_LEVEL;
        rule = mlx5_add_flow_rules(fdb, spec, flow_act, dest, num_dest);
        if (IS_ERR(rule))
                goto revert_changes;
index d5d57630015ffb4c94fba3c74c9ec6a095b84c43..106b50e42b464d57d39e751595152bc2e2bf4ed1 100644 (file)
@@ -349,6 +349,9 @@ static void mlx5_sync_reset_abort_event(struct work_struct *work)
                                                      reset_abort_work);
        struct mlx5_core_dev *dev = fw_reset->dev;
 
+       if (!test_bit(MLX5_FW_RESET_FLAGS_RESET_REQUESTED, &fw_reset->reset_flags))
+               return;
+
        mlx5_sync_reset_clear_reset_requested(dev, true);
        mlx5_core_warn(dev, "PCI Sync FW Update Reset Aborted.\n");
 }
index 2c41a69202642ad62756e06e1a3f3b47203e8c2b..fd6196b5e1630e0fbdb7713ea1a5e686644bfa80 100644 (file)
@@ -307,6 +307,11 @@ int mlx5_lag_mp_init(struct mlx5_lag *ldev)
        struct lag_mp *mp = &ldev->lag_mp;
        int err;
 
+       /* always clear mfi, as it might become stale when a route delete event
+        * has been missed
+        */
+       mp->mfi = NULL;
+
        if (mp->fib_nb.notifier_call)
                return 0;
 
@@ -335,4 +340,5 @@ void mlx5_lag_mp_cleanup(struct mlx5_lag *ldev)
        unregister_fib_notifier(&init_net, &mp->fib_nb);
        destroy_workqueue(mp->wq);
        mp->fib_nb.notifier_call = NULL;
+       mp->mfi = NULL;
 }
index 00ef10a1a9f86dabe1078d4d2f0a6ef5124e6322..20a4047f2737d998fe43cc235738a5ea43526d4e 100644 (file)
@@ -107,7 +107,7 @@ bool mlx5_chains_prios_supported(struct mlx5_fs_chains *chains)
        return chains->flags & MLX5_CHAINS_AND_PRIOS_SUPPORTED;
 }
 
-static bool mlx5_chains_ignore_flow_level_supported(struct mlx5_fs_chains *chains)
+bool mlx5_chains_ignore_flow_level_supported(struct mlx5_fs_chains *chains)
 {
        return chains->flags & MLX5_CHAINS_IGNORE_FLOW_LEVEL_SUPPORTED;
 }
index e96f345e7dae7b7f2d4460094dfb8ef593ebcf89..d50bdb226cef8ff16aa380c359acaf27e593c4f1 100644 (file)
@@ -28,6 +28,7 @@ struct mlx5_chains_attr {
 
 bool
 mlx5_chains_prios_supported(struct mlx5_fs_chains *chains);
+bool mlx5_chains_ignore_flow_level_supported(struct mlx5_fs_chains *chains);
 bool
 mlx5_chains_backwards_supported(struct mlx5_fs_chains *chains);
 u32
@@ -70,6 +71,10 @@ mlx5_chains_set_end_ft(struct mlx5_fs_chains *chains,
 
 #else /* CONFIG_MLX5_CLS_ACT */
 
+static inline bool
+mlx5_chains_ignore_flow_level_supported(struct mlx5_fs_chains *chains)
+{ return false; }
+
 static inline struct mlx5_flow_table *
 mlx5_chains_get_table(struct mlx5_fs_chains *chains, u32 chain, u32 prio,
                      u32 level) { return ERR_PTR(-EOPNOTSUPP); }
index fd8449ff9e176594dbfcc11b06d52b93851ccb1a..839a01da110f34390a00f3e542d26ae8fd0a50c7 100644 (file)
@@ -33,6 +33,7 @@
 #include <linux/etherdevice.h>
 #include <linux/mlx5/driver.h>
 #include <linux/mlx5/mlx5_ifc.h>
+#include <linux/mlx5/mpfs.h>
 #include <linux/mlx5/eswitch.h>
 #include "mlx5_core.h"
 #include "lib/mpfs.h"
@@ -175,6 +176,7 @@ out:
        mutex_unlock(&mpfs->lock);
        return err;
 }
+EXPORT_SYMBOL(mlx5_mpfs_add_mac);
 
 int mlx5_mpfs_del_mac(struct mlx5_core_dev *dev, u8 *mac)
 {
@@ -206,3 +208,4 @@ unlock:
        mutex_unlock(&mpfs->lock);
        return err;
 }
+EXPORT_SYMBOL(mlx5_mpfs_del_mac);
index 4a7b2c3203a7e94cbdc328283beefe586d490caa..4a293542a7aa1a0b6f9fa20bc8c2d50e15e7c2c0 100644 (file)
@@ -84,12 +84,9 @@ struct l2addr_node {
 #ifdef CONFIG_MLX5_MPFS
 int  mlx5_mpfs_init(struct mlx5_core_dev *dev);
 void mlx5_mpfs_cleanup(struct mlx5_core_dev *dev);
-int  mlx5_mpfs_add_mac(struct mlx5_core_dev *dev, u8 *mac);
-int  mlx5_mpfs_del_mac(struct mlx5_core_dev *dev, u8 *mac);
 #else /* #ifndef CONFIG_MLX5_MPFS */
 static inline int  mlx5_mpfs_init(struct mlx5_core_dev *dev) { return 0; }
 static inline void mlx5_mpfs_cleanup(struct mlx5_core_dev *dev) {}
-static inline int  mlx5_mpfs_add_mac(struct mlx5_core_dev *dev, u8 *mac) { return 0; }
-static inline int  mlx5_mpfs_del_mac(struct mlx5_core_dev *dev, u8 *mac) { return 0; }
 #endif
+
 #endif
index c114365eb126fd9ef458e6875b2696b5612e1092..a1d67bd7fb43b54ddce183eacd0a879fe1107598 100644 (file)
@@ -503,7 +503,7 @@ static int handle_hca_cap_odp(struct mlx5_core_dev *dev, void *set_ctx)
 
 static int handle_hca_cap(struct mlx5_core_dev *dev, void *set_ctx)
 {
-       struct mlx5_profile *prof = dev->profile;
+       struct mlx5_profile *prof = &dev->profile;
        void *set_hca_cap;
        int err;
 
@@ -524,11 +524,11 @@ static int handle_hca_cap(struct mlx5_core_dev *dev, void *set_ctx)
                 to_fw_pkey_sz(dev, 128));
 
        /* Check log_max_qp from HCA caps to set in current profile */
-       if (MLX5_CAP_GEN_MAX(dev, log_max_qp) < profile[prof_sel].log_max_qp) {
+       if (MLX5_CAP_GEN_MAX(dev, log_max_qp) < prof->log_max_qp) {
                mlx5_core_warn(dev, "log_max_qp value in current profile is %d, changing it to HCA capability limit (%d)\n",
-                              profile[prof_sel].log_max_qp,
+                              prof->log_max_qp,
                               MLX5_CAP_GEN_MAX(dev, log_max_qp));
-               profile[prof_sel].log_max_qp = MLX5_CAP_GEN_MAX(dev, log_max_qp);
+               prof->log_max_qp = MLX5_CAP_GEN_MAX(dev, log_max_qp);
        }
        if (prof->mask & MLX5_PROF_MASK_QP_SIZE)
                MLX5_SET(cmd_hca_cap, set_hca_cap, log_max_qp,
@@ -1381,8 +1381,7 @@ int mlx5_mdev_init(struct mlx5_core_dev *dev, int profile_idx)
        struct mlx5_priv *priv = &dev->priv;
        int err;
 
-       dev->profile = &profile[profile_idx];
-
+       memcpy(&dev->profile, &profile[profile_idx], sizeof(dev->profile));
        INIT_LIST_HEAD(&priv->ctx_list);
        spin_lock_init(&priv->ctx_lock);
        mutex_init(&dev->intf_state_mutex);
index 1f907df5b3a2b0b3bceceae5c0c7bec9c635c35c..c3373fb1cd7fcd73b131d8ec7eab74828eeb6b8a 100644 (file)
@@ -95,9 +95,10 @@ int mlx5_get_default_msix_vec_count(struct mlx5_core_dev *dev, int num_vfs)
 int mlx5_set_msix_vec_count(struct mlx5_core_dev *dev, int function_id,
                            int msix_vec_count)
 {
-       int sz = MLX5_ST_SZ_BYTES(set_hca_cap_in);
+       int query_sz = MLX5_ST_SZ_BYTES(query_hca_cap_out);
+       int set_sz = MLX5_ST_SZ_BYTES(set_hca_cap_in);
+       void *hca_cap = NULL, *query_cap = NULL, *cap;
        int num_vf_msix, min_msix, max_msix;
-       void *hca_cap, *cap;
        int ret;
 
        num_vf_msix = MLX5_CAP_GEN_MAX(dev, num_total_dynamic_vf_msix);
@@ -116,11 +117,20 @@ int mlx5_set_msix_vec_count(struct mlx5_core_dev *dev, int function_id,
        if (msix_vec_count > max_msix)
                return -EOVERFLOW;
 
-       hca_cap = kzalloc(sz, GFP_KERNEL);
-       if (!hca_cap)
-               return -ENOMEM;
+       query_cap = kzalloc(query_sz, GFP_KERNEL);
+       hca_cap = kzalloc(set_sz, GFP_KERNEL);
+       if (!hca_cap || !query_cap) {
+               ret = -ENOMEM;
+               goto out;
+       }
+
+       ret = mlx5_vport_get_other_func_cap(dev, function_id, query_cap);
+       if (ret)
+               goto out;
 
        cap = MLX5_ADDR_OF(set_hca_cap_in, hca_cap, capability);
+       memcpy(cap, MLX5_ADDR_OF(query_hca_cap_out, query_cap, capability),
+              MLX5_UN_SZ_BYTES(hca_cap_union));
        MLX5_SET(cmd_hca_cap, cap, dynamic_msix_table_size, msix_vec_count);
 
        MLX5_SET(set_hca_cap_in, hca_cap, opcode, MLX5_CMD_OP_SET_HCA_CAP);
@@ -130,7 +140,9 @@ int mlx5_set_msix_vec_count(struct mlx5_core_dev *dev, int function_id,
        MLX5_SET(set_hca_cap_in, hca_cap, op_mod,
                 MLX5_SET_HCA_CAP_OP_MOD_GENERAL_DEVICE << 1);
        ret = mlx5_cmd_exec_in(dev, set_hca_cap, hca_cap);
+out:
        kfree(hca_cap);
+       kfree(query_cap);
        return ret;
 }
 
index a8e73c9ed1eae11a2e797d117415c2e9edf87bbd..1be0487693094a4ab613005383ba18118e7672bd 100644 (file)
@@ -136,10 +136,10 @@ static enum devlink_port_fn_state mlx5_sf_to_devlink_state(u8 hw_state)
        switch (hw_state) {
        case MLX5_VHCA_STATE_ACTIVE:
        case MLX5_VHCA_STATE_IN_USE:
-       case MLX5_VHCA_STATE_TEARDOWN_REQUEST:
                return DEVLINK_PORT_FN_STATE_ACTIVE;
        case MLX5_VHCA_STATE_INVALID:
        case MLX5_VHCA_STATE_ALLOCATED:
+       case MLX5_VHCA_STATE_TEARDOWN_REQUEST:
        default:
                return DEVLINK_PORT_FN_STATE_INACTIVE;
        }
@@ -192,14 +192,17 @@ sf_err:
        return err;
 }
 
-static int mlx5_sf_activate(struct mlx5_core_dev *dev, struct mlx5_sf *sf)
+static int mlx5_sf_activate(struct mlx5_core_dev *dev, struct mlx5_sf *sf,
+                           struct netlink_ext_ack *extack)
 {
        int err;
 
        if (mlx5_sf_is_active(sf))
                return 0;
-       if (sf->hw_state != MLX5_VHCA_STATE_ALLOCATED)
-               return -EINVAL;
+       if (sf->hw_state != MLX5_VHCA_STATE_ALLOCATED) {
+               NL_SET_ERR_MSG_MOD(extack, "SF is inactivated but it is still attached");
+               return -EBUSY;
+       }
 
        err = mlx5_cmd_sf_enable_hca(dev, sf->hw_fn_id);
        if (err)
@@ -226,7 +229,8 @@ static int mlx5_sf_deactivate(struct mlx5_core_dev *dev, struct mlx5_sf *sf)
 
 static int mlx5_sf_state_set(struct mlx5_core_dev *dev, struct mlx5_sf_table *table,
                             struct mlx5_sf *sf,
-                            enum devlink_port_fn_state state)
+                            enum devlink_port_fn_state state,
+                            struct netlink_ext_ack *extack)
 {
        int err = 0;
 
@@ -234,7 +238,7 @@ static int mlx5_sf_state_set(struct mlx5_core_dev *dev, struct mlx5_sf_table *ta
        if (state == mlx5_sf_to_devlink_state(sf->hw_state))
                goto out;
        if (state == DEVLINK_PORT_FN_STATE_ACTIVE)
-               err = mlx5_sf_activate(dev, sf);
+               err = mlx5_sf_activate(dev, sf, extack);
        else if (state == DEVLINK_PORT_FN_STATE_INACTIVE)
                err = mlx5_sf_deactivate(dev, sf);
        else
@@ -265,7 +269,7 @@ int mlx5_devlink_sf_port_fn_state_set(struct devlink *devlink, struct devlink_po
                goto out;
        }
 
-       err = mlx5_sf_state_set(dev, table, sf, state);
+       err = mlx5_sf_state_set(dev, table, sf, state, extack);
 out:
        mlx5_sf_table_put(table);
        return err;
index 1fbcd012bb855eb0aca04de14a25a24e633f441b..7ccfd40586cee5e727b55ebaba63d86536cde8d6 100644 (file)
@@ -112,7 +112,8 @@ int mlx5dr_fw_create_md_tbl(struct mlx5dr_domain *dmn,
        int ret;
 
        ft_attr.table_type = MLX5_FLOW_TABLE_TYPE_FDB;
-       ft_attr.level = dmn->info.caps.max_ft_level - 2;
+       ft_attr.level = min_t(int, dmn->info.caps.max_ft_level - 2,
+                             MLX5_FT_MAX_MULTIPATH_LEVEL);
        ft_attr.reformat_en = reformat_req;
        ft_attr.decap_en = reformat_req;
 
index 3658c4ae3c37e82c9b89b506439eaca83f77fe2e..ee921a99e439a23f42b27f34cca6a01a64b99957 100644 (file)
@@ -1,5 +1,5 @@
 // SPDX-License-Identifier: GPL-2.0-or-later
-/**
+/*
  * Microchip ENCX24J600 ethernet driver
  *
  * Copyright (C) 2015 Gridpoint
index f604a260ede798b93d9235087cb850250f40de2e..fac61a8fbd0205feef4e1ed3608b733fcad12722 100644 (file)
@@ -1,5 +1,5 @@
 /* SPDX-License-Identifier: GPL-2.0 */
-/**
+/*
  * encx24j600_hw.h: Register definitions
  *
  */
index c84c8bf2bc20eaefb6711b5836652502f9196da8..fc99ad8e4a388c02578f338d9006b0e69737c79d 100644 (file)
@@ -3815,6 +3815,7 @@ static int myri10ge_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
                dev_err(&pdev->dev,
                        "invalid sram_size %dB or board span %ldB\n",
                        mgp->sram_size, mgp->board_span);
+               status = -EINVAL;
                goto abort_with_ioremap;
        }
        memcpy_fromio(mgp->eeprom_strings,
index 5f8b0bb3af6e342ee08077279e29764058df20ea..202973a82712ea5a1ed45f9b90d914d1513e0e4b 100644 (file)
@@ -20,6 +20,7 @@ if NET_VENDOR_PENSANDO
 config IONIC
        tristate "Pensando Ethernet IONIC Support"
        depends on 64BIT && PCI
+       depends on PTP_1588_CLOCK || !PTP_1588_CLOCK
        select NET_DEVLINK
        select DIMLIB
        help
index 214e347097a7a952913b9cc0e7752df70257da0b..2376b2729633f9e98c3c2a6238c871339260b655 100644 (file)
@@ -114,7 +114,7 @@ static int ql_sem_spinlock(struct ql3_adapter *qdev,
                value = readl(&port_regs->CommonRegs.semaphoreReg);
                if ((value & (sem_mask >> 16)) == sem_bits)
                        return 0;
-               ssleep(1);
+               mdelay(1000);
        } while (--seconds);
        return -1;
 }
index d8a3ecaed3fc65a458d12c044006fe593a79e154..d8f0863b393425b0dd2c49526e10b679f6062eec 100644 (file)
@@ -1048,7 +1048,7 @@ int qlcnic_do_lb_test(struct qlcnic_adapter *adapter, u8 mode)
        for (i = 0; i < QLCNIC_NUM_ILB_PKT; i++) {
                skb = netdev_alloc_skb(adapter->netdev, QLCNIC_ILB_PKT_SIZE);
                if (!skb)
-                       break;
+                       goto error;
                qlcnic_create_loopback_buff(skb->data, adapter->mac_addr);
                skb_put(skb, QLCNIC_ILB_PKT_SIZE);
                adapter->ahw->diag_cnt = 0;
@@ -1072,6 +1072,7 @@ int qlcnic_do_lb_test(struct qlcnic_adapter *adapter, u8 mode)
                        cnt++;
        }
        if (cnt != i) {
+error:
                dev_err(&adapter->pdev->dev,
                        "LB Test: failed, TX[%d], RX[%d]\n", i, cnt);
                if (mode != QLCNIC_ILB_MODE)
index d1e908846f5dd9347adae576456dee70caa6820a..22fbb0ae77fba7681bee74ed0726ab6dbb8d7a2c 100644 (file)
@@ -90,6 +90,7 @@ int efx_nic_init_interrupt(struct efx_nic *efx)
                                  efx->pci_dev->irq);
                        goto fail1;
                }
+               efx->irqs_hooked = true;
                return 0;
        }
 
index 527077c98ebce1a49d87c5c3458c2a947143855a..fc3b0acc8f99f0ee2cae3db690eb8fac78436f81 100644 (file)
@@ -30,7 +30,7 @@ struct sunxi_priv_data {
 static int sun7i_gmac_init(struct platform_device *pdev, void *priv)
 {
        struct sunxi_priv_data *gmac = priv;
-       int ret;
+       int ret = 0;
 
        if (gmac->regulator) {
                ret = regulator_enable(gmac->regulator);
@@ -51,11 +51,11 @@ static int sun7i_gmac_init(struct platform_device *pdev, void *priv)
        } else {
                clk_set_rate(gmac->tx_clk, SUN7I_GMAC_MII_RATE);
                ret = clk_prepare(gmac->tx_clk);
-               if (ret)
-                       return ret;
+               if (ret && gmac->regulator)
+                       regulator_disable(gmac->regulator);
        }
 
-       return 0;
+       return ret;
 }
 
 static void sun7i_gmac_exit(struct platform_device *pdev, void *priv)
index 345b4c6d1fd40ad402d9380a121a0dfb3c728941..c87202cbd3d6d38defabae6c52214a839678c1b0 100644 (file)
@@ -1196,7 +1196,6 @@ static void stmmac_check_pcs_mode(struct stmmac_priv *priv)
  */
 static int stmmac_init_phy(struct net_device *dev)
 {
-       struct ethtool_wolinfo wol = { .cmd = ETHTOOL_GWOL };
        struct stmmac_priv *priv = netdev_priv(dev);
        struct device_node *node;
        int ret;
@@ -1222,8 +1221,12 @@ static int stmmac_init_phy(struct net_device *dev)
                ret = phylink_connect_phy(priv->phylink, phydev);
        }
 
-       phylink_ethtool_get_wol(priv->phylink, &wol);
-       device_set_wakeup_capable(priv->device, !!wol.supported);
+       if (!priv->plat->pmt) {
+               struct ethtool_wolinfo wol = { .cmd = ETHTOOL_GWOL };
+
+               phylink_ethtool_get_wol(priv->phylink, &wol);
+               device_set_wakeup_capable(priv->device, !!wol.supported);
+       }
 
        return ret;
 }
@@ -1237,8 +1240,9 @@ static int stmmac_phy_setup(struct stmmac_priv *priv)
        priv->phylink_config.dev = &priv->dev->dev;
        priv->phylink_config.type = PHYLINK_NETDEV;
        priv->phylink_config.pcs_poll = true;
-       priv->phylink_config.ovr_an_inband =
-               priv->plat->mdio_bus_data->xpcs_an_inband;
+       if (priv->plat->mdio_bus_data)
+               priv->phylink_config.ovr_an_inband =
+                       priv->plat->mdio_bus_data->xpcs_an_inband;
 
        if (!fwnode)
                fwnode = dev_fwnode(priv->device);
@@ -5888,12 +5892,21 @@ static int stmmac_set_mac_address(struct net_device *ndev, void *addr)
        struct stmmac_priv *priv = netdev_priv(ndev);
        int ret = 0;
 
+       ret = pm_runtime_get_sync(priv->device);
+       if (ret < 0) {
+               pm_runtime_put_noidle(priv->device);
+               return ret;
+       }
+
        ret = eth_mac_addr(ndev, addr);
        if (ret)
-               return ret;
+               goto set_mac_error;
 
        stmmac_set_umac_addr(priv, priv->hw, ndev->dev_addr, 0);
 
+set_mac_error:
+       pm_runtime_put(priv->device);
+
        return ret;
 }
 
@@ -6188,12 +6201,6 @@ static int stmmac_vlan_rx_add_vid(struct net_device *ndev, __be16 proto, u16 vid
        bool is_double = false;
        int ret;
 
-       ret = pm_runtime_get_sync(priv->device);
-       if (ret < 0) {
-               pm_runtime_put_noidle(priv->device);
-               return ret;
-       }
-
        if (be16_to_cpu(proto) == ETH_P_8021AD)
                is_double = true;
 
@@ -6219,6 +6226,12 @@ static int stmmac_vlan_rx_kill_vid(struct net_device *ndev, __be16 proto, u16 vi
        bool is_double = false;
        int ret;
 
+       ret = pm_runtime_get_sync(priv->device);
+       if (ret < 0) {
+               pm_runtime_put_noidle(priv->device);
+               return ret;
+       }
+
        if (be16_to_cpu(proto) == ETH_P_8021AD)
                is_double = true;
 
@@ -7036,7 +7049,6 @@ error_mdio_register:
        stmmac_napi_del(ndev);
 error_hw_init:
        destroy_workqueue(priv->wq);
-       stmmac_bus_clks_config(priv, false);
        bitmap_free(priv->af_xdp_zc_qps);
 
        return ret;
index 707ccdd03b19eea1de7c818bd6ead715a2da7393..74e748662ec017027b1eac053fa7618948c90a36 100644 (file)
@@ -8144,10 +8144,10 @@ static int niu_pci_vpd_scan_props(struct niu *np, u32 start, u32 end)
                                     "VPD_SCAN: Reading in property [%s] len[%d]\n",
                                     namebuf, prop_len);
                        for (i = 0; i < prop_len; i++) {
-                               err = niu_pci_eeprom_read(np, off + i);
-                               if (err >= 0)
-                                       *prop_buf = err;
-                               ++prop_buf;
+                               err =  niu_pci_eeprom_read(np, off + i);
+                               if (err < 0)
+                                       return err;
+                               *prop_buf++ = err;
                        }
                }
 
@@ -8158,14 +8158,14 @@ static int niu_pci_vpd_scan_props(struct niu *np, u32 start, u32 end)
 }
 
 /* ESPC_PIO_EN_ENABLE must be set */
-static void niu_pci_vpd_fetch(struct niu *np, u32 start)
+static int niu_pci_vpd_fetch(struct niu *np, u32 start)
 {
        u32 offset;
        int err;
 
        err = niu_pci_eeprom_read16_swp(np, start + 1);
        if (err < 0)
-               return;
+               return err;
 
        offset = err + 3;
 
@@ -8174,12 +8174,14 @@ static void niu_pci_vpd_fetch(struct niu *np, u32 start)
                u32 end;
 
                err = niu_pci_eeprom_read(np, here);
+               if (err < 0)
+                       return err;
                if (err != 0x90)
-                       return;
+                       return -EINVAL;
 
                err = niu_pci_eeprom_read16_swp(np, here + 1);
                if (err < 0)
-                       return;
+                       return err;
 
                here = start + offset + 3;
                end = start + offset + err;
@@ -8187,9 +8189,12 @@ static void niu_pci_vpd_fetch(struct niu *np, u32 start)
                offset += err;
 
                err = niu_pci_vpd_scan_props(np, here, end);
-               if (err < 0 || err == 1)
-                       return;
+               if (err < 0)
+                       return err;
+               if (err == 1)
+                       return -EINVAL;
        }
+       return 0;
 }
 
 /* ESPC_PIO_EN_ENABLE must be set */
@@ -9280,8 +9285,11 @@ static int niu_get_invariants(struct niu *np)
                offset = niu_pci_vpd_offset(np);
                netif_printk(np, probe, KERN_DEBUG, np->dev,
                             "%s() VPD offset [%08x]\n", __func__, offset);
-               if (offset)
-                       niu_pci_vpd_fetch(np, offset);
+               if (offset) {
+                       err = niu_pci_vpd_fetch(np, offset);
+                       if (err < 0)
+                               return err;
+               }
                nw64(ESPC_PIO_EN, 0);
 
                if (np->flags & NIU_FLAGS_VPD_VALID) {
index 9030e619e54360fa4f17eb29e8966cdec8306b19..97942b0e3897539b0a63eb491719398cc9ac33a8 100644 (file)
@@ -1350,8 +1350,8 @@ int netcp_txpipe_open(struct netcp_tx_pipe *tx_pipe)
        tx_pipe->dma_queue = knav_queue_open(name, tx_pipe->dma_queue_id,
                                             KNAV_QUEUE_SHARED);
        if (IS_ERR(tx_pipe->dma_queue)) {
-               dev_err(dev, "Could not open DMA queue for channel \"%s\": %d\n",
-                       name, ret);
+               dev_err(dev, "Could not open DMA queue for channel \"%s\": %pe\n",
+                       name, tx_pipe->dma_queue);
                ret = PTR_ERR(tx_pipe->dma_queue);
                goto err;
        }
index b9be530b285f9d967b510919d6083ccf27a56bec..ff83e00b77af7e7c3b091ed52f15b838bbcbf745 100644 (file)
@@ -8,8 +8,8 @@
 
 #include <linux/spi/spi.h>
 #include <linux/interrupt.h>
+#include <linux/mod_devicetable.h>
 #include <linux/module.h>
-#include <linux/of.h>
 #include <linux/regmap.h>
 #include <linux/ieee802154.h>
 #include <linux/irq.h>
@@ -1388,7 +1388,7 @@ MODULE_DEVICE_TABLE(spi, mrf24j40_ids);
 
 static struct spi_driver mrf24j40_driver = {
        .driver = {
-               .of_match_table = of_match_ptr(mrf24j40_of_match),
+               .of_match_table = mrf24j40_of_match,
                .name = "mrf24j40",
        },
        .id_table = mrf24j40_ids,
index e7ff376cb5b7d358661e648e369120dfc06611c0..744406832a774a0cefa2fa1cea844cb4fa396135 100644 (file)
@@ -58,6 +58,7 @@ enum ipa_flag {
  * @mem_virt:          Virtual address of IPA-local memory space
  * @mem_offset:                Offset from @mem_virt used for access to IPA memory
  * @mem_size:          Total size (bytes) of memory at @mem_virt
+ * @mem_count:         Number of entries in the mem array
  * @mem:               Array of IPA-local memory region descriptors
  * @imem_iova:         I/O virtual address of IPA region in IMEM
  * @imem_size:         Size of IMEM region
@@ -103,6 +104,7 @@ struct ipa {
        void *mem_virt;
        u32 mem_offset;
        u32 mem_size;
+       u32 mem_count;
        const struct ipa_mem *mem;
 
        unsigned long imem_iova;
index c5c3b1b7e67d5a13c6122217cedc244aa040bbb4..1624125e7459f145275dc1c14e826f46f0154349 100644 (file)
@@ -180,7 +180,7 @@ int ipa_mem_config(struct ipa *ipa)
         * for the region, write "canary" values in the space prior to
         * the region's base address.
         */
-       for (mem_id = 0; mem_id < IPA_MEM_COUNT; mem_id++) {
+       for (mem_id = 0; mem_id < ipa->mem_count; mem_id++) {
                const struct ipa_mem *mem = &ipa->mem[mem_id];
                u16 canary_count;
                __le32 *canary;
@@ -487,6 +487,7 @@ int ipa_mem_init(struct ipa *ipa, const struct ipa_mem_data *mem_data)
        ipa->mem_size = resource_size(res);
 
        /* The ipa->mem[] array is indexed by enum ipa_mem_id values */
+       ipa->mem_count = mem_data->local_count;
        ipa->mem = mem_data->local;
 
        ret = ipa_imem_init(ipa, mem_data->imem_addr, mem_data->imem_size);
index 8ce99c4888e106d2e7ef6001b5528f6905085bab..e096e68ac667b5a01e418913a9d09c4fb4e9f608 100644 (file)
@@ -71,7 +71,6 @@ static int octeon_mdiobus_probe(struct platform_device *pdev)
 
        return 0;
 fail_register:
-       mdiobus_free(bus->mii_bus);
        smi_en.u64 = 0;
        oct_mdio_writeq(smi_en.u64, bus->register_base + SMI_EN);
        return err;
@@ -85,7 +84,6 @@ static int octeon_mdiobus_remove(struct platform_device *pdev)
        bus = platform_get_drvdata(pdev);
 
        mdiobus_unregister(bus->mii_bus);
-       mdiobus_free(bus->mii_bus);
        smi_en.u64 = 0;
        oct_mdio_writeq(smi_en.u64, bus->register_base + SMI_EN);
        return 0;
index cb1761693b690983a7c177f25669148912838eed..822d2cdd2f3599025f3e79d4243337c18114c951 100644 (file)
@@ -126,7 +126,6 @@ static void thunder_mdiobus_pci_remove(struct pci_dev *pdev)
                        continue;
 
                mdiobus_unregister(bus->mii_bus);
-               mdiobus_free(bus->mii_bus);
                oct_mdio_writeq(0, bus->register_base + SMI_EN);
        }
        pci_release_regions(pdev);
index dadf75ff3ab938f9dd0ae7a70ec65f62c9ebef85..6045ad3def1235fe60a550967d39a25aa8446cad 100644 (file)
@@ -607,7 +607,8 @@ void mdiobus_unregister(struct mii_bus *bus)
        struct mdio_device *mdiodev;
        int i;
 
-       BUG_ON(bus->state != MDIOBUS_REGISTERED);
+       if (WARN_ON_ONCE(bus->state != MDIOBUS_REGISTERED))
+               return;
        bus->state = MDIOBUS_UNREGISTERED;
 
        for (i = 0; i < PHY_MAX_ADDR; i++) {
index 0eeec80bec311ca6fd31f1d62c2847e2c7299a6f..2e60bc1b9a6b0a82b504f3b7f00efc6bd75daaae 100644 (file)
@@ -26,7 +26,7 @@
  * for transport over USB using a simpler USB device model than the
  * previous CDC "Ethernet Control Model" (ECM, or "CDC Ethernet").
  *
- * For details, see www.usb.org/developers/devclass_docs/CDC_EEM10.pdf
+ * For details, see https://usb.org/sites/default/files/CDC_EEM10.pdf
  *
  * This version has been tested with GIGAntIC WuaoW SIM Smart Card on 2.6.24,
  * 2.6.27 and 2.6.30rc2 kernel.
index 3ef4b2841402c28c80605a19ce7ff8c973d46f5d..5c779cc0ea1124463808db677337b9d8ea792803 100644 (file)
@@ -1689,7 +1689,7 @@ static int hso_serial_tiocmset(struct tty_struct *tty,
        spin_unlock_irqrestore(&serial->serial_lock, flags);
 
        return usb_control_msg(serial->parent->usb,
-                              usb_rcvctrlpipe(serial->parent->usb, 0), 0x22,
+                              usb_sndctrlpipe(serial->parent->usb, 0), 0x22,
                               0x21, val, if_num, NULL, 0,
                               USB_CTRL_SET_TIMEOUT);
 }
@@ -2436,7 +2436,7 @@ static int hso_rfkill_set_block(void *data, bool blocked)
        if (hso_dev->usb_gone)
                rv = 0;
        else
-               rv = usb_control_msg(hso_dev->usb, usb_rcvctrlpipe(hso_dev->usb, 0),
+               rv = usb_control_msg(hso_dev->usb, usb_sndctrlpipe(hso_dev->usb, 0),
                                       enabled ? 0x82 : 0x81, 0x40, 0, 0, NULL, 0,
                                       USB_CTRL_SET_TIMEOUT);
        mutex_unlock(&hso_dev->mutex);
@@ -2618,32 +2618,31 @@ static struct hso_device *hso_create_bulk_serial_device(
                num_urbs = 2;
                serial->tiocmget = kzalloc(sizeof(struct hso_tiocmget),
                                           GFP_KERNEL);
+               if (!serial->tiocmget)
+                       goto exit;
                serial->tiocmget->serial_state_notification
                        = kzalloc(sizeof(struct hso_serial_state_notification),
                                           GFP_KERNEL);
-               /* it isn't going to break our heart if serial->tiocmget
-                *  allocation fails don't bother checking this.
-                */
-               if (serial->tiocmget && serial->tiocmget->serial_state_notification) {
-                       tiocmget = serial->tiocmget;
-                       tiocmget->endp = hso_get_ep(interface,
-                                                   USB_ENDPOINT_XFER_INT,
-                                                   USB_DIR_IN);
-                       if (!tiocmget->endp) {
-                               dev_err(&interface->dev, "Failed to find INT IN ep\n");
-                               goto exit;
-                       }
-
-                       tiocmget->urb = usb_alloc_urb(0, GFP_KERNEL);
-                       if (tiocmget->urb) {
-                               mutex_init(&tiocmget->mutex);
-                               init_waitqueue_head(&tiocmget->waitq);
-                       } else
-                               hso_free_tiomget(serial);
+               if (!serial->tiocmget->serial_state_notification)
+                       goto exit;
+               tiocmget = serial->tiocmget;
+               tiocmget->endp = hso_get_ep(interface,
+                                           USB_ENDPOINT_XFER_INT,
+                                           USB_DIR_IN);
+               if (!tiocmget->endp) {
+                       dev_err(&interface->dev, "Failed to find INT IN ep\n");
+                       goto exit;
                }
-       }
-       else
+
+               tiocmget->urb = usb_alloc_urb(0, GFP_KERNEL);
+               if (!tiocmget->urb)
+                       goto exit;
+
+               mutex_init(&tiocmget->mutex);
+               init_waitqueue_head(&tiocmget->waitq);
+       } else {
                num_urbs = 1;
+       }
 
        if (hso_serial_common_create(serial, num_urbs, BULK_URB_RX_SIZE,
                                     BULK_URB_TX_SIZE))
index 6acc5e9045181c6cacfd03ff35f94b9e9433b16c..02bce40a67e5ba36018634f9070b890f56ea4630 100644 (file)
@@ -1645,6 +1645,7 @@ static const struct ethtool_ops lan78xx_ethtool_ops = {
        .get_strings    = lan78xx_get_strings,
        .get_wol        = lan78xx_get_wol,
        .set_wol        = lan78xx_set_wol,
+       .get_ts_info    = ethtool_op_get_ts_info,
        .get_eee        = lan78xx_get_eee,
        .set_eee        = lan78xx_set_eee,
        .get_pauseparam = lan78xx_get_pause,
index 136ea06540ff7923578c05152c552a9abb2e7c59..f6abb2fbf9728e5f8e378ef31cba21361a2e14e4 100644 (file)
@@ -8107,6 +8107,37 @@ static void r8156b_init(struct r8152 *tp)
        tp->coalesce = 15000;   /* 15 us */
 }
 
+static bool rtl_check_vendor_ok(struct usb_interface *intf)
+{
+       struct usb_host_interface *alt = intf->cur_altsetting;
+       struct usb_endpoint_descriptor *in, *out, *intr;
+
+       if (usb_find_common_endpoints(alt, &in, &out, &intr, NULL) < 0) {
+               dev_err(&intf->dev, "Expected endpoints are not found\n");
+               return false;
+       }
+
+       /* Check Rx endpoint address */
+       if (usb_endpoint_num(in) != 1) {
+               dev_err(&intf->dev, "Invalid Rx endpoint address\n");
+               return false;
+       }
+
+       /* Check Tx endpoint address */
+       if (usb_endpoint_num(out) != 2) {
+               dev_err(&intf->dev, "Invalid Tx endpoint address\n");
+               return false;
+       }
+
+       /* Check interrupt endpoint address */
+       if (usb_endpoint_num(intr) != 3) {
+               dev_err(&intf->dev, "Invalid interrupt endpoint address\n");
+               return false;
+       }
+
+       return true;
+}
+
 static bool rtl_vendor_mode(struct usb_interface *intf)
 {
        struct usb_host_interface *alt = intf->cur_altsetting;
@@ -8115,12 +8146,15 @@ static bool rtl_vendor_mode(struct usb_interface *intf)
        int i, num_configs;
 
        if (alt->desc.bInterfaceClass == USB_CLASS_VENDOR_SPEC)
-               return true;
+               return rtl_check_vendor_ok(intf);
 
        /* The vendor mode is not always config #1, so to find it out. */
        udev = interface_to_usbdev(intf);
        c = udev->config;
        num_configs = udev->descriptor.bNumConfigurations;
+       if (num_configs < 2)
+               return false;
+
        for (i = 0; i < num_configs; (i++, c++)) {
                struct usb_interface_descriptor *desc = NULL;
 
@@ -8135,7 +8169,8 @@ static bool rtl_vendor_mode(struct usb_interface *intf)
                }
        }
 
-       WARN_ON_ONCE(i == num_configs);
+       if (i == num_configs)
+               dev_err(&intf->dev, "Unexpected Device\n");
 
        return false;
 }
@@ -9381,9 +9416,6 @@ static int rtl8152_probe(struct usb_interface *intf,
        if (!rtl_vendor_mode(intf))
                return -ENODEV;
 
-       if (intf->cur_altsetting->desc.bNumEndpoints < 3)
-               return -ENODEV;
-
        usb_reset_device(udev);
        netdev = alloc_etherdev(sizeof(struct r8152));
        if (!netdev) {
index f8cdabb9ef5a4dd181605dc01f47e10b991ee9aa..b286993da67c96c4df24c12aff0f126100422be2 100644 (file)
@@ -1483,7 +1483,7 @@ static int smsc75xx_bind(struct usbnet *dev, struct usb_interface *intf)
        ret = smsc75xx_wait_ready(dev, 0);
        if (ret < 0) {
                netdev_warn(dev->net, "device not ready in smsc75xx_bind\n");
-               return ret;
+               goto err;
        }
 
        smsc75xx_init_mac_address(dev);
@@ -1492,7 +1492,7 @@ static int smsc75xx_bind(struct usbnet *dev, struct usb_interface *intf)
        ret = smsc75xx_reset(dev);
        if (ret < 0) {
                netdev_warn(dev->net, "smsc75xx_reset error %d\n", ret);
-               return ret;
+               goto err;
        }
 
        dev->net->netdev_ops = &smsc75xx_netdev_ops;
@@ -1502,6 +1502,10 @@ static int smsc75xx_bind(struct usbnet *dev, struct usb_interface *intf)
        dev->hard_mtu = dev->net->mtu + dev->net->hard_header_len;
        dev->net->max_mtu = MAX_SINGLE_PACKET_SIZE;
        return 0;
+
+err:
+       kfree(pdata);
+       return ret;
 }
 
 static void smsc75xx_unbind(struct usbnet *dev, struct usb_interface *intf)
index 9b6a4a875c5531133fad2080ed84af9148d27972..78a01c71a17cfb3d833491cfda6e59e8b2e01eb3 100644 (file)
@@ -401,18 +401,13 @@ static struct sk_buff *page_to_skb(struct virtnet_info *vi,
        /* If headroom is not 0, there is an offset between the beginning of the
         * data and the allocated space, otherwise the data and the allocated
         * space are aligned.
+        *
+        * Buffers with headroom use PAGE_SIZE as alloc size, see
+        * add_recvbuf_mergeable() + get_mergeable_buf_len()
         */
-       if (headroom) {
-               /* Buffers with headroom use PAGE_SIZE as alloc size,
-                * see add_recvbuf_mergeable() + get_mergeable_buf_len()
-                */
-               truesize = PAGE_SIZE;
-               tailroom = truesize - len - offset;
-               buf = page_address(page);
-       } else {
-               tailroom = truesize - len;
-               buf = p;
-       }
+       truesize = headroom ? PAGE_SIZE : truesize;
+       tailroom = truesize - len - headroom - (hdr_padded_len - hdr_len);
+       buf = p - headroom;
 
        len -= hdr_len;
        offset += hdr_padded_len;
@@ -958,7 +953,8 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
                                put_page(page);
                                head_skb = page_to_skb(vi, rq, xdp_page, offset,
                                                       len, PAGE_SIZE, false,
-                                                      metasize, headroom);
+                                                      metasize,
+                                                      VIRTIO_XDP_HEADROOM);
                                return head_skb;
                        }
                        break;
index fc52b2cb500b3d9d235ad8aa459aab7a881ebc5b..dbe1f8514efc3df1175762c62d71f88fe6327835 100644 (file)
@@ -1,5 +1,4 @@
-ccflags-y := -O3
-ccflags-y += -D'pr_fmt(fmt)=KBUILD_MODNAME ": " fmt'
+ccflags-y := -D'pr_fmt(fmt)=KBUILD_MODNAME ": " fmt'
 ccflags-$(CONFIG_WIREGUARD_DEBUG) += -DDEBUG
 wireguard-y := main.o
 wireguard-y += noise.o
index 3725e9cd85f4f2797afd59f42af454acc107aa9a..b7197e80f2264053d4e4e28bdf69a33038335294 100644 (file)
@@ -6,6 +6,8 @@
 #include "allowedips.h"
 #include "peer.h"
 
+static struct kmem_cache *node_cache;
+
 static void swap_endian(u8 *dst, const u8 *src, u8 bits)
 {
        if (bits == 32) {
@@ -28,8 +30,11 @@ static void copy_and_assign_cidr(struct allowedips_node *node, const u8 *src,
        node->bitlen = bits;
        memcpy(node->bits, src, bits / 8U);
 }
-#define CHOOSE_NODE(parent, key) \
-       parent->bit[(key[parent->bit_at_a] >> parent->bit_at_b) & 1]
+
+static inline u8 choose(struct allowedips_node *node, const u8 *key)
+{
+       return (key[node->bit_at_a] >> node->bit_at_b) & 1;
+}
 
 static void push_rcu(struct allowedips_node **stack,
                     struct allowedips_node __rcu *p, unsigned int *len)
@@ -40,6 +45,11 @@ static void push_rcu(struct allowedips_node **stack,
        }
 }
 
+static void node_free_rcu(struct rcu_head *rcu)
+{
+       kmem_cache_free(node_cache, container_of(rcu, struct allowedips_node, rcu));
+}
+
 static void root_free_rcu(struct rcu_head *rcu)
 {
        struct allowedips_node *node, *stack[128] = {
@@ -49,7 +59,7 @@ static void root_free_rcu(struct rcu_head *rcu)
        while (len > 0 && (node = stack[--len])) {
                push_rcu(stack, node->bit[0], &len);
                push_rcu(stack, node->bit[1], &len);
-               kfree(node);
+               kmem_cache_free(node_cache, node);
        }
 }
 
@@ -66,60 +76,6 @@ static void root_remove_peer_lists(struct allowedips_node *root)
        }
 }
 
-static void walk_remove_by_peer(struct allowedips_node __rcu **top,
-                               struct wg_peer *peer, struct mutex *lock)
-{
-#define REF(p) rcu_access_pointer(p)
-#define DEREF(p) rcu_dereference_protected(*(p), lockdep_is_held(lock))
-#define PUSH(p) ({                                                             \
-               WARN_ON(IS_ENABLED(DEBUG) && len >= 128);                      \
-               stack[len++] = p;                                              \
-       })
-
-       struct allowedips_node __rcu **stack[128], **nptr;
-       struct allowedips_node *node, *prev;
-       unsigned int len;
-
-       if (unlikely(!peer || !REF(*top)))
-               return;
-
-       for (prev = NULL, len = 0, PUSH(top); len > 0; prev = node) {
-               nptr = stack[len - 1];
-               node = DEREF(nptr);
-               if (!node) {
-                       --len;
-                       continue;
-               }
-               if (!prev || REF(prev->bit[0]) == node ||
-                   REF(prev->bit[1]) == node) {
-                       if (REF(node->bit[0]))
-                               PUSH(&node->bit[0]);
-                       else if (REF(node->bit[1]))
-                               PUSH(&node->bit[1]);
-               } else if (REF(node->bit[0]) == prev) {
-                       if (REF(node->bit[1]))
-                               PUSH(&node->bit[1]);
-               } else {
-                       if (rcu_dereference_protected(node->peer,
-                               lockdep_is_held(lock)) == peer) {
-                               RCU_INIT_POINTER(node->peer, NULL);
-                               list_del_init(&node->peer_list);
-                               if (!node->bit[0] || !node->bit[1]) {
-                                       rcu_assign_pointer(*nptr, DEREF(
-                                              &node->bit[!REF(node->bit[0])]));
-                                       kfree_rcu(node, rcu);
-                                       node = DEREF(nptr);
-                               }
-                       }
-                       --len;
-               }
-       }
-
-#undef REF
-#undef DEREF
-#undef PUSH
-}
-
 static unsigned int fls128(u64 a, u64 b)
 {
        return a ? fls64(a) + 64U : fls64(b);
@@ -159,7 +115,7 @@ static struct allowedips_node *find_node(struct allowedips_node *trie, u8 bits,
                        found = node;
                if (node->cidr == bits)
                        break;
-               node = rcu_dereference_bh(CHOOSE_NODE(node, key));
+               node = rcu_dereference_bh(node->bit[choose(node, key)]);
        }
        return found;
 }
@@ -191,8 +147,7 @@ static bool node_placement(struct allowedips_node __rcu *trie, const u8 *key,
                           u8 cidr, u8 bits, struct allowedips_node **rnode,
                           struct mutex *lock)
 {
-       struct allowedips_node *node = rcu_dereference_protected(trie,
-                                               lockdep_is_held(lock));
+       struct allowedips_node *node = rcu_dereference_protected(trie, lockdep_is_held(lock));
        struct allowedips_node *parent = NULL;
        bool exact = false;
 
@@ -202,13 +157,24 @@ static bool node_placement(struct allowedips_node __rcu *trie, const u8 *key,
                        exact = true;
                        break;
                }
-               node = rcu_dereference_protected(CHOOSE_NODE(parent, key),
-                                                lockdep_is_held(lock));
+               node = rcu_dereference_protected(parent->bit[choose(parent, key)], lockdep_is_held(lock));
        }
        *rnode = parent;
        return exact;
 }
 
+static inline void connect_node(struct allowedips_node **parent, u8 bit, struct allowedips_node *node)
+{
+       node->parent_bit_packed = (unsigned long)parent | bit;
+       rcu_assign_pointer(*parent, node);
+}
+
+static inline void choose_and_connect_node(struct allowedips_node *parent, struct allowedips_node *node)
+{
+       u8 bit = choose(parent, node->bits);
+       connect_node(&parent->bit[bit], bit, node);
+}
+
 static int add(struct allowedips_node __rcu **trie, u8 bits, const u8 *key,
               u8 cidr, struct wg_peer *peer, struct mutex *lock)
 {
@@ -218,13 +184,13 @@ static int add(struct allowedips_node __rcu **trie, u8 bits, const u8 *key,
                return -EINVAL;
 
        if (!rcu_access_pointer(*trie)) {
-               node = kzalloc(sizeof(*node), GFP_KERNEL);
+               node = kmem_cache_zalloc(node_cache, GFP_KERNEL);
                if (unlikely(!node))
                        return -ENOMEM;
                RCU_INIT_POINTER(node->peer, peer);
                list_add_tail(&node->peer_list, &peer->allowedips_list);
                copy_and_assign_cidr(node, key, cidr, bits);
-               rcu_assign_pointer(*trie, node);
+               connect_node(trie, 2, node);
                return 0;
        }
        if (node_placement(*trie, key, cidr, bits, &node, lock)) {
@@ -233,7 +199,7 @@ static int add(struct allowedips_node __rcu **trie, u8 bits, const u8 *key,
                return 0;
        }
 
-       newnode = kzalloc(sizeof(*newnode), GFP_KERNEL);
+       newnode = kmem_cache_zalloc(node_cache, GFP_KERNEL);
        if (unlikely(!newnode))
                return -ENOMEM;
        RCU_INIT_POINTER(newnode->peer, peer);
@@ -243,10 +209,10 @@ static int add(struct allowedips_node __rcu **trie, u8 bits, const u8 *key,
        if (!node) {
                down = rcu_dereference_protected(*trie, lockdep_is_held(lock));
        } else {
-               down = rcu_dereference_protected(CHOOSE_NODE(node, key),
-                                                lockdep_is_held(lock));
+               const u8 bit = choose(node, key);
+               down = rcu_dereference_protected(node->bit[bit], lockdep_is_held(lock));
                if (!down) {
-                       rcu_assign_pointer(CHOOSE_NODE(node, key), newnode);
+                       connect_node(&node->bit[bit], bit, newnode);
                        return 0;
                }
        }
@@ -254,30 +220,29 @@ static int add(struct allowedips_node __rcu **trie, u8 bits, const u8 *key,
        parent = node;
 
        if (newnode->cidr == cidr) {
-               rcu_assign_pointer(CHOOSE_NODE(newnode, down->bits), down);
+               choose_and_connect_node(newnode, down);
                if (!parent)
-                       rcu_assign_pointer(*trie, newnode);
+                       connect_node(trie, 2, newnode);
                else
-                       rcu_assign_pointer(CHOOSE_NODE(parent, newnode->bits),
-                                          newnode);
-       } else {
-               node = kzalloc(sizeof(*node), GFP_KERNEL);
-               if (unlikely(!node)) {
-                       list_del(&newnode->peer_list);
-                       kfree(newnode);
-                       return -ENOMEM;
-               }
-               INIT_LIST_HEAD(&node->peer_list);
-               copy_and_assign_cidr(node, newnode->bits, cidr, bits);
+                       choose_and_connect_node(parent, newnode);
+               return 0;
+       }
 
-               rcu_assign_pointer(CHOOSE_NODE(node, down->bits), down);
-               rcu_assign_pointer(CHOOSE_NODE(node, newnode->bits), newnode);
-               if (!parent)
-                       rcu_assign_pointer(*trie, node);
-               else
-                       rcu_assign_pointer(CHOOSE_NODE(parent, node->bits),
-                                          node);
+       node = kmem_cache_zalloc(node_cache, GFP_KERNEL);
+       if (unlikely(!node)) {
+               list_del(&newnode->peer_list);
+               kmem_cache_free(node_cache, newnode);
+               return -ENOMEM;
        }
+       INIT_LIST_HEAD(&node->peer_list);
+       copy_and_assign_cidr(node, newnode->bits, cidr, bits);
+
+       choose_and_connect_node(node, down);
+       choose_and_connect_node(node, newnode);
+       if (!parent)
+               connect_node(trie, 2, node);
+       else
+               choose_and_connect_node(parent, node);
        return 0;
 }
 
@@ -335,9 +300,41 @@ int wg_allowedips_insert_v6(struct allowedips *table, const struct in6_addr *ip,
 void wg_allowedips_remove_by_peer(struct allowedips *table,
                                  struct wg_peer *peer, struct mutex *lock)
 {
+       struct allowedips_node *node, *child, **parent_bit, *parent, *tmp;
+       bool free_parent;
+
+       if (list_empty(&peer->allowedips_list))
+               return;
        ++table->seq;
-       walk_remove_by_peer(&table->root4, peer, lock);
-       walk_remove_by_peer(&table->root6, peer, lock);
+       list_for_each_entry_safe(node, tmp, &peer->allowedips_list, peer_list) {
+               list_del_init(&node->peer_list);
+               RCU_INIT_POINTER(node->peer, NULL);
+               if (node->bit[0] && node->bit[1])
+                       continue;
+               child = rcu_dereference_protected(node->bit[!rcu_access_pointer(node->bit[0])],
+                                                 lockdep_is_held(lock));
+               if (child)
+                       child->parent_bit_packed = node->parent_bit_packed;
+               parent_bit = (struct allowedips_node **)(node->parent_bit_packed & ~3UL);
+               *parent_bit = child;
+               parent = (void *)parent_bit -
+                        offsetof(struct allowedips_node, bit[node->parent_bit_packed & 1]);
+               free_parent = !rcu_access_pointer(node->bit[0]) &&
+                             !rcu_access_pointer(node->bit[1]) &&
+                             (node->parent_bit_packed & 3) <= 1 &&
+                             !rcu_access_pointer(parent->peer);
+               if (free_parent)
+                       child = rcu_dereference_protected(
+                                       parent->bit[!(node->parent_bit_packed & 1)],
+                                       lockdep_is_held(lock));
+               call_rcu(&node->rcu, node_free_rcu);
+               if (!free_parent)
+                       continue;
+               if (child)
+                       child->parent_bit_packed = parent->parent_bit_packed;
+               *(struct allowedips_node **)(parent->parent_bit_packed & ~3UL) = child;
+               call_rcu(&parent->rcu, node_free_rcu);
+       }
 }
 
 int wg_allowedips_read_node(struct allowedips_node *node, u8 ip[16], u8 *cidr)
@@ -374,4 +371,16 @@ struct wg_peer *wg_allowedips_lookup_src(struct allowedips *table,
        return NULL;
 }
 
+int __init wg_allowedips_slab_init(void)
+{
+       node_cache = KMEM_CACHE(allowedips_node, 0);
+       return node_cache ? 0 : -ENOMEM;
+}
+
+void wg_allowedips_slab_uninit(void)
+{
+       rcu_barrier();
+       kmem_cache_destroy(node_cache);
+}
+
 #include "selftest/allowedips.c"
index e5c83cafcef4c92447cc4358d0e9f9d76f7832e0..2346c797eb4d877d76504e3e23d4d2b5dc831f1a 100644 (file)
@@ -15,14 +15,11 @@ struct wg_peer;
 struct allowedips_node {
        struct wg_peer __rcu *peer;
        struct allowedips_node __rcu *bit[2];
-       /* While it may seem scandalous that we waste space for v4,
-        * we're alloc'ing to the nearest power of 2 anyway, so this
-        * doesn't actually make a difference.
-        */
-       u8 bits[16] __aligned(__alignof(u64));
        u8 cidr, bit_at_a, bit_at_b, bitlen;
+       u8 bits[16] __aligned(__alignof(u64));
 
-       /* Keep rarely used list at bottom to be beyond cache line. */
+       /* Keep rarely used members at bottom to be beyond cache line. */
+       unsigned long parent_bit_packed;
        union {
                struct list_head peer_list;
                struct rcu_head rcu;
@@ -33,7 +30,7 @@ struct allowedips {
        struct allowedips_node __rcu *root4;
        struct allowedips_node __rcu *root6;
        u64 seq;
-};
+} __aligned(4); /* We pack the lower 2 bits of &root, but m68k only gives 16-bit alignment. */
 
 void wg_allowedips_init(struct allowedips *table);
 void wg_allowedips_free(struct allowedips *table, struct mutex *mutex);
@@ -56,4 +53,7 @@ struct wg_peer *wg_allowedips_lookup_src(struct allowedips *table,
 bool wg_allowedips_selftest(void);
 #endif
 
+int wg_allowedips_slab_init(void);
+void wg_allowedips_slab_uninit(void);
+
 #endif /* _WG_ALLOWEDIPS_H */
index 7a7d5f1a80fc7a29e2f889cef2fc53e101087041..75dbe77b0b4b4aeacbc75d77524108248b37a2fa 100644 (file)
@@ -21,13 +21,22 @@ static int __init mod_init(void)
 {
        int ret;
 
+       ret = wg_allowedips_slab_init();
+       if (ret < 0)
+               goto err_allowedips;
+
 #ifdef DEBUG
+       ret = -ENOTRECOVERABLE;
        if (!wg_allowedips_selftest() || !wg_packet_counter_selftest() ||
            !wg_ratelimiter_selftest())
-               return -ENOTRECOVERABLE;
+               goto err_peer;
 #endif
        wg_noise_init();
 
+       ret = wg_peer_init();
+       if (ret < 0)
+               goto err_peer;
+
        ret = wg_device_init();
        if (ret < 0)
                goto err_device;
@@ -44,6 +53,10 @@ static int __init mod_init(void)
 err_netlink:
        wg_device_uninit();
 err_device:
+       wg_peer_uninit();
+err_peer:
+       wg_allowedips_slab_uninit();
+err_allowedips:
        return ret;
 }
 
@@ -51,6 +64,8 @@ static void __exit mod_exit(void)
 {
        wg_genetlink_uninit();
        wg_device_uninit();
+       wg_peer_uninit();
+       wg_allowedips_slab_uninit();
 }
 
 module_init(mod_init);
index cd5cb0292cb6752bd5eac2f0a111e1eec9fd393a..1acd00ab2fbcbf0ac5faabae474bf6ec16dcf30c 100644 (file)
@@ -15,6 +15,7 @@
 #include <linux/rcupdate.h>
 #include <linux/list.h>
 
+static struct kmem_cache *peer_cache;
 static atomic64_t peer_counter = ATOMIC64_INIT(0);
 
 struct wg_peer *wg_peer_create(struct wg_device *wg,
@@ -29,10 +30,10 @@ struct wg_peer *wg_peer_create(struct wg_device *wg,
        if (wg->num_peers >= MAX_PEERS_PER_DEVICE)
                return ERR_PTR(ret);
 
-       peer = kzalloc(sizeof(*peer), GFP_KERNEL);
+       peer = kmem_cache_zalloc(peer_cache, GFP_KERNEL);
        if (unlikely(!peer))
                return ERR_PTR(ret);
-       if (dst_cache_init(&peer->endpoint_cache, GFP_KERNEL))
+       if (unlikely(dst_cache_init(&peer->endpoint_cache, GFP_KERNEL)))
                goto err;
 
        peer->device = wg;
@@ -64,7 +65,7 @@ struct wg_peer *wg_peer_create(struct wg_device *wg,
        return peer;
 
 err:
-       kfree(peer);
+       kmem_cache_free(peer_cache, peer);
        return ERR_PTR(ret);
 }
 
@@ -88,7 +89,7 @@ static void peer_make_dead(struct wg_peer *peer)
        /* Mark as dead, so that we don't allow jumping contexts after. */
        WRITE_ONCE(peer->is_dead, true);
 
-       /* The caller must now synchronize_rcu() for this to take effect. */
+       /* The caller must now synchronize_net() for this to take effect. */
 }
 
 static void peer_remove_after_dead(struct wg_peer *peer)
@@ -160,7 +161,7 @@ void wg_peer_remove(struct wg_peer *peer)
        lockdep_assert_held(&peer->device->device_update_lock);
 
        peer_make_dead(peer);
-       synchronize_rcu();
+       synchronize_net();
        peer_remove_after_dead(peer);
 }
 
@@ -178,7 +179,7 @@ void wg_peer_remove_all(struct wg_device *wg)
                peer_make_dead(peer);
                list_add_tail(&peer->peer_list, &dead_peers);
        }
-       synchronize_rcu();
+       synchronize_net();
        list_for_each_entry_safe(peer, temp, &dead_peers, peer_list)
                peer_remove_after_dead(peer);
 }
@@ -193,7 +194,8 @@ static void rcu_release(struct rcu_head *rcu)
        /* The final zeroing takes care of clearing any remaining handshake key
         * material and other potentially sensitive information.
         */
-       kfree_sensitive(peer);
+       memzero_explicit(peer, sizeof(*peer));
+       kmem_cache_free(peer_cache, peer);
 }
 
 static void kref_release(struct kref *refcount)
@@ -225,3 +227,14 @@ void wg_peer_put(struct wg_peer *peer)
                return;
        kref_put(&peer->refcount, kref_release);
 }
+
+int __init wg_peer_init(void)
+{
+       peer_cache = KMEM_CACHE(wg_peer, 0);
+       return peer_cache ? 0 : -ENOMEM;
+}
+
+void wg_peer_uninit(void)
+{
+       kmem_cache_destroy(peer_cache);
+}
index 8d53b687a1d163e33c79d2bd1495b976a70eebe1..76e4d3128ad4ea3f0601cb5055280a4dc097adc2 100644 (file)
@@ -80,4 +80,7 @@ void wg_peer_put(struct wg_peer *peer);
 void wg_peer_remove(struct wg_peer *peer);
 void wg_peer_remove_all(struct wg_device *wg);
 
+int wg_peer_init(void);
+void wg_peer_uninit(void);
+
 #endif /* _WG_PEER_H */
index 846db14cb046b9250a4722f43d60a9d69c6da9e8..e173204ae7d78d48d730ea34b36f751added0abe 100644 (file)
 
 #include <linux/siphash.h>
 
-static __init void swap_endian_and_apply_cidr(u8 *dst, const u8 *src, u8 bits,
-                                             u8 cidr)
-{
-       swap_endian(dst, src, bits);
-       memset(dst + (cidr + 7) / 8, 0, bits / 8 - (cidr + 7) / 8);
-       if (cidr)
-               dst[(cidr + 7) / 8 - 1] &= ~0U << ((8 - (cidr % 8)) % 8);
-}
-
 static __init void print_node(struct allowedips_node *node, u8 bits)
 {
        char *fmt_connection = KERN_DEBUG "\t\"%p/%d\" -> \"%p/%d\";\n";
-       char *fmt_declaration = KERN_DEBUG
-               "\t\"%p/%d\"[style=%s, color=\"#%06x\"];\n";
+       char *fmt_declaration = KERN_DEBUG "\t\"%p/%d\"[style=%s, color=\"#%06x\"];\n";
+       u8 ip1[16], ip2[16], cidr1, cidr2;
        char *style = "dotted";
-       u8 ip1[16], ip2[16];
        u32 color = 0;
 
+       if (node == NULL)
+               return;
        if (bits == 32) {
                fmt_connection = KERN_DEBUG "\t\"%pI4/%d\" -> \"%pI4/%d\";\n";
-               fmt_declaration = KERN_DEBUG
-                       "\t\"%pI4/%d\"[style=%s, color=\"#%06x\"];\n";
+               fmt_declaration = KERN_DEBUG "\t\"%pI4/%d\"[style=%s, color=\"#%06x\"];\n";
        } else if (bits == 128) {
                fmt_connection = KERN_DEBUG "\t\"%pI6/%d\" -> \"%pI6/%d\";\n";
-               fmt_declaration = KERN_DEBUG
-                       "\t\"%pI6/%d\"[style=%s, color=\"#%06x\"];\n";
+               fmt_declaration = KERN_DEBUG "\t\"%pI6/%d\"[style=%s, color=\"#%06x\"];\n";
        }
        if (node->peer) {
                hsiphash_key_t key = { { 0 } };
@@ -55,24 +45,20 @@ static __init void print_node(struct allowedips_node *node, u8 bits)
                        hsiphash_1u32(0xabad1dea, &key) % 200;
                style = "bold";
        }
-       swap_endian_and_apply_cidr(ip1, node->bits, bits, node->cidr);
-       printk(fmt_declaration, ip1, node->cidr, style, color);
+       wg_allowedips_read_node(node, ip1, &cidr1);
+       printk(fmt_declaration, ip1, cidr1, style, color);
        if (node->bit[0]) {
-               swap_endian_and_apply_cidr(ip2,
-                               rcu_dereference_raw(node->bit[0])->bits, bits,
-                               node->cidr);
-               printk(fmt_connection, ip1, node->cidr, ip2,
-                      rcu_dereference_raw(node->bit[0])->cidr);
-               print_node(rcu_dereference_raw(node->bit[0]), bits);
+               wg_allowedips_read_node(rcu_dereference_raw(node->bit[0]), ip2, &cidr2);
+               printk(fmt_connection, ip1, cidr1, ip2, cidr2);
        }
        if (node->bit[1]) {
-               swap_endian_and_apply_cidr(ip2,
-                               rcu_dereference_raw(node->bit[1])->bits,
-                               bits, node->cidr);
-               printk(fmt_connection, ip1, node->cidr, ip2,
-                      rcu_dereference_raw(node->bit[1])->cidr);
-               print_node(rcu_dereference_raw(node->bit[1]), bits);
+               wg_allowedips_read_node(rcu_dereference_raw(node->bit[1]), ip2, &cidr2);
+               printk(fmt_connection, ip1, cidr1, ip2, cidr2);
        }
+       if (node->bit[0])
+               print_node(rcu_dereference_raw(node->bit[0]), bits);
+       if (node->bit[1])
+               print_node(rcu_dereference_raw(node->bit[1]), bits);
 }
 
 static __init void print_tree(struct allowedips_node __rcu *top, u8 bits)
@@ -121,8 +107,8 @@ static __init inline union nf_inet_addr horrible_cidr_to_mask(u8 cidr)
 {
        union nf_inet_addr mask;
 
-       memset(&mask, 0x00, 128 / 8);
-       memset(&mask, 0xff, cidr / 8);
+       memset(&mask, 0, sizeof(mask));
+       memset(&mask.all, 0xff, cidr / 8);
        if (cidr % 32)
                mask.all[cidr / 32] = (__force u32)htonl(
                        (0xFFFFFFFFUL << (32 - (cidr % 32))) & 0xFFFFFFFFUL);
@@ -149,42 +135,36 @@ horrible_mask_self(struct horrible_allowedips_node *node)
 }
 
 static __init inline bool
-horrible_match_v4(const struct horrible_allowedips_node *node,
-                 struct in_addr *ip)
+horrible_match_v4(const struct horrible_allowedips_node *node, struct in_addr *ip)
 {
        return (ip->s_addr & node->mask.ip) == node->ip.ip;
 }
 
 static __init inline bool
-horrible_match_v6(const struct horrible_allowedips_node *node,
-                 struct in6_addr *ip)
+horrible_match_v6(const struct horrible_allowedips_node *node, struct in6_addr *ip)
 {
-       return (ip->in6_u.u6_addr32[0] & node->mask.ip6[0]) ==
-                      node->ip.ip6[0] &&
-              (ip->in6_u.u6_addr32[1] & node->mask.ip6[1]) ==
-                      node->ip.ip6[1] &&
-              (ip->in6_u.u6_addr32[2] & node->mask.ip6[2]) ==
-                      node->ip.ip6[2] &&
+       return (ip->in6_u.u6_addr32[0] & node->mask.ip6[0]) == node->ip.ip6[0] &&
+              (ip->in6_u.u6_addr32[1] & node->mask.ip6[1]) == node->ip.ip6[1] &&
+              (ip->in6_u.u6_addr32[2] & node->mask.ip6[2]) == node->ip.ip6[2] &&
               (ip->in6_u.u6_addr32[3] & node->mask.ip6[3]) == node->ip.ip6[3];
 }
 
 static __init void
-horrible_insert_ordered(struct horrible_allowedips *table,
-                       struct horrible_allowedips_node *node)
+horrible_insert_ordered(struct horrible_allowedips *table, struct horrible_allowedips_node *node)
 {
        struct horrible_allowedips_node *other = NULL, *where = NULL;
        u8 my_cidr = horrible_mask_to_cidr(node->mask);
 
        hlist_for_each_entry(other, &table->head, table) {
-               if (!memcmp(&other->mask, &node->mask,
-                           sizeof(union nf_inet_addr)) &&
-                   !memcmp(&other->ip, &node->ip,
-                           sizeof(union nf_inet_addr)) &&
-                   other->ip_version == node->ip_version) {
+               if (other->ip_version == node->ip_version &&
+                   !memcmp(&other->mask, &node->mask, sizeof(union nf_inet_addr)) &&
+                   !memcmp(&other->ip, &node->ip, sizeof(union nf_inet_addr))) {
                        other->value = node->value;
                        kfree(node);
                        return;
                }
+       }
+       hlist_for_each_entry(other, &table->head, table) {
                where = other;
                if (horrible_mask_to_cidr(other->mask) <= my_cidr)
                        break;
@@ -201,8 +181,7 @@ static __init int
 horrible_allowedips_insert_v4(struct horrible_allowedips *table,
                              struct in_addr *ip, u8 cidr, void *value)
 {
-       struct horrible_allowedips_node *node = kzalloc(sizeof(*node),
-                                                       GFP_KERNEL);
+       struct horrible_allowedips_node *node = kzalloc(sizeof(*node), GFP_KERNEL);
 
        if (unlikely(!node))
                return -ENOMEM;
@@ -219,8 +198,7 @@ static __init int
 horrible_allowedips_insert_v6(struct horrible_allowedips *table,
                              struct in6_addr *ip, u8 cidr, void *value)
 {
-       struct horrible_allowedips_node *node = kzalloc(sizeof(*node),
-                                                       GFP_KERNEL);
+       struct horrible_allowedips_node *node = kzalloc(sizeof(*node), GFP_KERNEL);
 
        if (unlikely(!node))
                return -ENOMEM;
@@ -234,39 +212,43 @@ horrible_allowedips_insert_v6(struct horrible_allowedips *table,
 }
 
 static __init void *
-horrible_allowedips_lookup_v4(struct horrible_allowedips *table,
-                             struct in_addr *ip)
+horrible_allowedips_lookup_v4(struct horrible_allowedips *table, struct in_addr *ip)
 {
        struct horrible_allowedips_node *node;
-       void *ret = NULL;
 
        hlist_for_each_entry(node, &table->head, table) {
-               if (node->ip_version != 4)
-                       continue;
-               if (horrible_match_v4(node, ip)) {
-                       ret = node->value;
-                       break;
-               }
+               if (node->ip_version == 4 && horrible_match_v4(node, ip))
+                       return node->value;
        }
-       return ret;
+       return NULL;
 }
 
 static __init void *
-horrible_allowedips_lookup_v6(struct horrible_allowedips *table,
-                             struct in6_addr *ip)
+horrible_allowedips_lookup_v6(struct horrible_allowedips *table, struct in6_addr *ip)
 {
        struct horrible_allowedips_node *node;
-       void *ret = NULL;
 
        hlist_for_each_entry(node, &table->head, table) {
-               if (node->ip_version != 6)
+               if (node->ip_version == 6 && horrible_match_v6(node, ip))
+                       return node->value;
+       }
+       return NULL;
+}
+
+
+static __init void
+horrible_allowedips_remove_by_value(struct horrible_allowedips *table, void *value)
+{
+       struct horrible_allowedips_node *node;
+       struct hlist_node *h;
+
+       hlist_for_each_entry_safe(node, h, &table->head, table) {
+               if (node->value != value)
                        continue;
-               if (horrible_match_v6(node, ip)) {
-                       ret = node->value;
-                       break;
-               }
+               hlist_del(&node->table);
+               kfree(node);
        }
-       return ret;
+
 }
 
 static __init bool randomized_test(void)
@@ -296,6 +278,7 @@ static __init bool randomized_test(void)
                        goto free;
                }
                kref_init(&peers[i]->refcount);
+               INIT_LIST_HEAD(&peers[i]->allowedips_list);
        }
 
        mutex_lock(&mutex);
@@ -333,7 +316,7 @@ static __init bool randomized_test(void)
                        if (wg_allowedips_insert_v4(&t,
                                                    (struct in_addr *)mutated,
                                                    cidr, peer, &mutex) < 0) {
-                               pr_err("allowedips random malloc: FAIL\n");
+                               pr_err("allowedips random self-test malloc: FAIL\n");
                                goto free_locked;
                        }
                        if (horrible_allowedips_insert_v4(&h,
@@ -396,23 +379,33 @@ static __init bool randomized_test(void)
                print_tree(t.root6, 128);
        }
 
-       for (i = 0; i < NUM_QUERIES; ++i) {
-               prandom_bytes(ip, 4);
-               if (lookup(t.root4, 32, ip) !=
-                   horrible_allowedips_lookup_v4(&h, (struct in_addr *)ip)) {
-                       pr_err("allowedips random self-test: FAIL\n");
-                       goto free;
+       for (j = 0;; ++j) {
+               for (i = 0; i < NUM_QUERIES; ++i) {
+                       prandom_bytes(ip, 4);
+                       if (lookup(t.root4, 32, ip) != horrible_allowedips_lookup_v4(&h, (struct in_addr *)ip)) {
+                               horrible_allowedips_lookup_v4(&h, (struct in_addr *)ip);
+                               pr_err("allowedips random v4 self-test: FAIL\n");
+                               goto free;
+                       }
+                       prandom_bytes(ip, 16);
+                       if (lookup(t.root6, 128, ip) != horrible_allowedips_lookup_v6(&h, (struct in6_addr *)ip)) {
+                               pr_err("allowedips random v6 self-test: FAIL\n");
+                               goto free;
+                       }
                }
+               if (j >= NUM_PEERS)
+                       break;
+               mutex_lock(&mutex);
+               wg_allowedips_remove_by_peer(&t, peers[j], &mutex);
+               mutex_unlock(&mutex);
+               horrible_allowedips_remove_by_value(&h, peers[j]);
        }
 
-       for (i = 0; i < NUM_QUERIES; ++i) {
-               prandom_bytes(ip, 16);
-               if (lookup(t.root6, 128, ip) !=
-                   horrible_allowedips_lookup_v6(&h, (struct in6_addr *)ip)) {
-                       pr_err("allowedips random self-test: FAIL\n");
-                       goto free;
-               }
+       if (t.root4 || t.root6) {
+               pr_err("allowedips random self-test removal: FAIL\n");
+               goto free;
        }
+
        ret = true;
 
 free:
index d9ad850daa7932b93e6dc982eccc1d22a4cc7ca3..8c496b7471082eb6c093154d7a05662718f9999c 100644 (file)
@@ -430,7 +430,7 @@ void wg_socket_reinit(struct wg_device *wg, struct sock *new4,
        if (new4)
                wg->incoming_port = ntohs(inet_sk(new4)->inet_sport);
        mutex_unlock(&wg->socket_update_lock);
-       synchronize_rcu();
+       synchronize_net();
        sock_free(old4);
        sock_free(old6);
 }
index 956157946106c3f333426af6e706a48af22fa12f..dbc8aef82a65f3466e57e8ee1ef0aa48284e3f4d 100644 (file)
@@ -845,6 +845,7 @@ enum htt_security_types {
 
 #define ATH10K_HTT_TXRX_PEER_SECURITY_MAX 2
 #define ATH10K_TXRX_NUM_EXT_TIDS 19
+#define ATH10K_TXRX_NON_QOS_TID 16
 
 enum htt_security_flags {
 #define HTT_SECURITY_TYPE_MASK 0x7F
index 1a08156d5011df4464739aef42862277b5f7be74..7ffb5d5b2a70e6f9bbee4d2b3c1a0406a79fc6d0 100644 (file)
@@ -1746,16 +1746,97 @@ static void ath10k_htt_rx_h_csum_offload(struct sk_buff *msdu)
        msdu->ip_summed = ath10k_htt_rx_get_csum_state(msdu);
 }
 
+static u64 ath10k_htt_rx_h_get_pn(struct ath10k *ar, struct sk_buff *skb,
+                                 u16 offset,
+                                 enum htt_rx_mpdu_encrypt_type enctype)
+{
+       struct ieee80211_hdr *hdr;
+       u64 pn = 0;
+       u8 *ehdr;
+
+       hdr = (struct ieee80211_hdr *)(skb->data + offset);
+       ehdr = skb->data + offset + ieee80211_hdrlen(hdr->frame_control);
+
+       if (enctype == HTT_RX_MPDU_ENCRYPT_AES_CCM_WPA2) {
+               pn = ehdr[0];
+               pn |= (u64)ehdr[1] << 8;
+               pn |= (u64)ehdr[4] << 16;
+               pn |= (u64)ehdr[5] << 24;
+               pn |= (u64)ehdr[6] << 32;
+               pn |= (u64)ehdr[7] << 40;
+       }
+       return pn;
+}
+
+static bool ath10k_htt_rx_h_frag_multicast_check(struct ath10k *ar,
+                                                struct sk_buff *skb,
+                                                u16 offset)
+{
+       struct ieee80211_hdr *hdr;
+
+       hdr = (struct ieee80211_hdr *)(skb->data + offset);
+       return !is_multicast_ether_addr(hdr->addr1);
+}
+
+static bool ath10k_htt_rx_h_frag_pn_check(struct ath10k *ar,
+                                         struct sk_buff *skb,
+                                         u16 peer_id,
+                                         u16 offset,
+                                         enum htt_rx_mpdu_encrypt_type enctype)
+{
+       struct ath10k_peer *peer;
+       union htt_rx_pn_t *last_pn, new_pn = {0};
+       struct ieee80211_hdr *hdr;
+       bool more_frags;
+       u8 tid, frag_number;
+       u32 seq;
+
+       peer = ath10k_peer_find_by_id(ar, peer_id);
+       if (!peer) {
+               ath10k_dbg(ar, ATH10K_DBG_HTT, "invalid peer for frag pn check\n");
+               return false;
+       }
+
+       hdr = (struct ieee80211_hdr *)(skb->data + offset);
+       if (ieee80211_is_data_qos(hdr->frame_control))
+               tid = ieee80211_get_tid(hdr);
+       else
+               tid = ATH10K_TXRX_NON_QOS_TID;
+
+       last_pn = &peer->frag_tids_last_pn[tid];
+       new_pn.pn48 = ath10k_htt_rx_h_get_pn(ar, skb, offset, enctype);
+       more_frags = ieee80211_has_morefrags(hdr->frame_control);
+       frag_number = le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_FRAG;
+       seq = (__le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_SEQ) >> 4;
+
+       if (frag_number == 0) {
+               last_pn->pn48 = new_pn.pn48;
+               peer->frag_tids_seq[tid] = seq;
+       } else {
+               if (seq != peer->frag_tids_seq[tid])
+                       return false;
+
+               if (new_pn.pn48 != last_pn->pn48 + 1)
+                       return false;
+
+               last_pn->pn48 = new_pn.pn48;
+       }
+
+       return true;
+}
+
 static void ath10k_htt_rx_h_mpdu(struct ath10k *ar,
                                 struct sk_buff_head *amsdu,
                                 struct ieee80211_rx_status *status,
                                 bool fill_crypt_header,
                                 u8 *rx_hdr,
-                                enum ath10k_pkt_rx_err *err)
+                                enum ath10k_pkt_rx_err *err,
+                                u16 peer_id,
+                                bool frag)
 {
        struct sk_buff *first;
        struct sk_buff *last;
-       struct sk_buff *msdu;
+       struct sk_buff *msdu, *temp;
        struct htt_rx_desc *rxd;
        struct ieee80211_hdr *hdr;
        enum htt_rx_mpdu_encrypt_type enctype;
@@ -1768,6 +1849,7 @@ static void ath10k_htt_rx_h_mpdu(struct ath10k *ar,
        bool is_decrypted;
        bool is_mgmt;
        u32 attention;
+       bool frag_pn_check = true, multicast_check = true;
 
        if (skb_queue_empty(amsdu))
                return;
@@ -1866,7 +1948,37 @@ static void ath10k_htt_rx_h_mpdu(struct ath10k *ar,
        }
 
        skb_queue_walk(amsdu, msdu) {
+               if (frag && !fill_crypt_header && is_decrypted &&
+                   enctype == HTT_RX_MPDU_ENCRYPT_AES_CCM_WPA2)
+                       frag_pn_check = ath10k_htt_rx_h_frag_pn_check(ar,
+                                                                     msdu,
+                                                                     peer_id,
+                                                                     0,
+                                                                     enctype);
+
+               if (frag)
+                       multicast_check = ath10k_htt_rx_h_frag_multicast_check(ar,
+                                                                              msdu,
+                                                                              0);
+
+               if (!frag_pn_check || !multicast_check) {
+                       /* Discard the fragment with invalid PN or multicast DA
+                        */
+                       temp = msdu->prev;
+                       __skb_unlink(msdu, amsdu);
+                       dev_kfree_skb_any(msdu);
+                       msdu = temp;
+                       frag_pn_check = true;
+                       multicast_check = true;
+                       continue;
+               }
+
                ath10k_htt_rx_h_csum_offload(msdu);
+
+               if (frag && !fill_crypt_header &&
+                   enctype == HTT_RX_MPDU_ENCRYPT_TKIP_WPA)
+                       status->flag &= ~RX_FLAG_MMIC_STRIPPED;
+
                ath10k_htt_rx_h_undecap(ar, msdu, status, first_hdr, enctype,
                                        is_decrypted);
 
@@ -1884,6 +1996,11 @@ static void ath10k_htt_rx_h_mpdu(struct ath10k *ar,
 
                hdr = (void *)msdu->data;
                hdr->frame_control &= ~__cpu_to_le16(IEEE80211_FCTL_PROTECTED);
+
+               if (frag && !fill_crypt_header &&
+                   enctype == HTT_RX_MPDU_ENCRYPT_TKIP_WPA)
+                       status->flag &= ~RX_FLAG_IV_STRIPPED &
+                                       ~RX_FLAG_MMIC_STRIPPED;
        }
 }
 
@@ -1991,14 +2108,62 @@ static void ath10k_htt_rx_h_unchain(struct ath10k *ar,
        ath10k_unchain_msdu(amsdu, unchain_cnt);
 }
 
+static bool ath10k_htt_rx_validate_amsdu(struct ath10k *ar,
+                                        struct sk_buff_head *amsdu)
+{
+       u8 *subframe_hdr;
+       struct sk_buff *first;
+       bool is_first, is_last;
+       struct htt_rx_desc *rxd;
+       struct ieee80211_hdr *hdr;
+       size_t hdr_len, crypto_len;
+       enum htt_rx_mpdu_encrypt_type enctype;
+       int bytes_aligned = ar->hw_params.decap_align_bytes;
+
+       first = skb_peek(amsdu);
+
+       rxd = (void *)first->data - sizeof(*rxd);
+       hdr = (void *)rxd->rx_hdr_status;
+
+       is_first = !!(rxd->msdu_end.common.info0 &
+                     __cpu_to_le32(RX_MSDU_END_INFO0_FIRST_MSDU));
+       is_last = !!(rxd->msdu_end.common.info0 &
+                    __cpu_to_le32(RX_MSDU_END_INFO0_LAST_MSDU));
+
+       /* Return in case of non-aggregated msdu */
+       if (is_first && is_last)
+               return true;
+
+       /* First msdu flag is not set for the first msdu of the list */
+       if (!is_first)
+               return false;
+
+       enctype = MS(__le32_to_cpu(rxd->mpdu_start.info0),
+                    RX_MPDU_START_INFO0_ENCRYPT_TYPE);
+
+       hdr_len = ieee80211_hdrlen(hdr->frame_control);
+       crypto_len = ath10k_htt_rx_crypto_param_len(ar, enctype);
+
+       subframe_hdr = (u8 *)hdr + round_up(hdr_len, bytes_aligned) +
+                      crypto_len;
+
+       /* Validate if the amsdu has a proper first subframe.
+        * There are chances a single msdu can be received as amsdu when
+        * the unauthenticated amsdu flag of a QoS header
+        * gets flipped in non-SPP AMSDU's, in such cases the first
+        * subframe has llc/snap header in place of a valid da.
+        * return false if the da matches rfc1042 pattern
+        */
+       if (ether_addr_equal(subframe_hdr, rfc1042_header))
+               return false;
+
+       return true;
+}
+
 static bool ath10k_htt_rx_amsdu_allowed(struct ath10k *ar,
                                        struct sk_buff_head *amsdu,
                                        struct ieee80211_rx_status *rx_status)
 {
-       /* FIXME: It might be a good idea to do some fuzzy-testing to drop
-        * invalid/dangerous frames.
-        */
-
        if (!rx_status->freq) {
                ath10k_dbg(ar, ATH10K_DBG_HTT, "no channel configured; ignoring frame(s)!\n");
                return false;
@@ -2009,6 +2174,11 @@ static bool ath10k_htt_rx_amsdu_allowed(struct ath10k *ar,
                return false;
        }
 
+       if (!ath10k_htt_rx_validate_amsdu(ar, amsdu)) {
+               ath10k_dbg(ar, ATH10K_DBG_HTT, "invalid amsdu received\n");
+               return false;
+       }
+
        return true;
 }
 
@@ -2071,7 +2241,8 @@ static int ath10k_htt_rx_handle_amsdu(struct ath10k_htt *htt)
                ath10k_htt_rx_h_unchain(ar, &amsdu, &drop_cnt, &unchain_cnt);
 
        ath10k_htt_rx_h_filter(ar, &amsdu, rx_status, &drop_cnt_filter);
-       ath10k_htt_rx_h_mpdu(ar, &amsdu, rx_status, true, first_hdr, &err);
+       ath10k_htt_rx_h_mpdu(ar, &amsdu, rx_status, true, first_hdr, &err, 0,
+                            false);
        msdus_to_queue = skb_queue_len(&amsdu);
        ath10k_htt_rx_h_enqueue(ar, &amsdu, rx_status);
 
@@ -2204,6 +2375,11 @@ static bool ath10k_htt_rx_proc_rx_ind_hl(struct ath10k_htt *htt,
        fw_desc = &rx->fw_desc;
        rx_desc_len = fw_desc->len;
 
+       if (fw_desc->u.bits.discard) {
+               ath10k_dbg(ar, ATH10K_DBG_HTT, "htt discard mpdu\n");
+               goto err;
+       }
+
        /* I have not yet seen any case where num_mpdu_ranges > 1.
         * qcacld does not seem handle that case either, so we introduce the
         * same limitiation here as well.
@@ -2509,6 +2685,13 @@ static bool ath10k_htt_rx_proc_rx_frag_ind_hl(struct ath10k_htt *htt,
        rx_desc = (struct htt_hl_rx_desc *)(skb->data + tot_hdr_len);
        rx_desc_info = __le32_to_cpu(rx_desc->info);
 
+       hdr = (struct ieee80211_hdr *)((u8 *)rx_desc + rx_hl->fw_desc.len);
+
+       if (is_multicast_ether_addr(hdr->addr1)) {
+               /* Discard the fragment with multicast DA */
+               goto err;
+       }
+
        if (!MS(rx_desc_info, HTT_RX_DESC_HL_INFO_ENCRYPTED)) {
                spin_unlock_bh(&ar->data_lock);
                return ath10k_htt_rx_proc_rx_ind_hl(htt, &resp->rx_ind_hl, skb,
@@ -2516,8 +2699,6 @@ static bool ath10k_htt_rx_proc_rx_frag_ind_hl(struct ath10k_htt *htt,
                                                    HTT_RX_NON_TKIP_MIC);
        }
 
-       hdr = (struct ieee80211_hdr *)((u8 *)rx_desc + rx_hl->fw_desc.len);
-
        if (ieee80211_has_retry(hdr->frame_control))
                goto err;
 
@@ -3027,7 +3208,7 @@ static int ath10k_htt_rx_in_ord_ind(struct ath10k *ar, struct sk_buff *skb)
                        ath10k_htt_rx_h_ppdu(ar, &amsdu, status, vdev_id);
                        ath10k_htt_rx_h_filter(ar, &amsdu, status, NULL);
                        ath10k_htt_rx_h_mpdu(ar, &amsdu, status, false, NULL,
-                                            NULL);
+                                            NULL, peer_id, frag);
                        ath10k_htt_rx_h_enqueue(ar, &amsdu, status);
                        break;
                case -EAGAIN:
index f2b6bf8f0d60db404fa4987116817fd25267d141..705b6295e4663bc65dbf5df3a7a752d15f70a51e 100644 (file)
@@ -1282,7 +1282,19 @@ struct fw_rx_desc_base {
 #define FW_RX_DESC_UDP              (1 << 6)
 
 struct fw_rx_desc_hl {
-       u8 info0;
+       union {
+               struct {
+               u8 discard:1,
+                  forward:1,
+                  any_err:1,
+                  dup_err:1,
+                  reserved:1,
+                  inspect:1,
+                  extension:2;
+               } bits;
+               u8 info0;
+       } u;
+
        u8 version;
        u8 len;
        u8 flags;
index 1d9aa1bb6b6e994a97ce6e471972795dac8d5209..603d2f93ac18f73fcf74e9eebdc04053ed1ac172 100644 (file)
@@ -260,6 +260,16 @@ static void ath11k_dp_rxdesc_set_msdu_len(struct ath11k_base *ab,
        ab->hw_params.hw_ops->rx_desc_set_msdu_len(desc, len);
 }
 
+static bool ath11k_dp_rx_h_attn_is_mcbc(struct ath11k_base *ab,
+                                       struct hal_rx_desc *desc)
+{
+       struct rx_attention *attn = ath11k_dp_rx_get_attention(ab, desc);
+
+       return ath11k_dp_rx_h_msdu_end_first_msdu(ab, desc) &&
+               (!!FIELD_GET(RX_ATTENTION_INFO1_MCAST_BCAST,
+                __le32_to_cpu(attn->info1)));
+}
+
 static void ath11k_dp_service_mon_ring(struct timer_list *t)
 {
        struct ath11k_base *ab = from_timer(ab, t, mon_reap_timer);
@@ -852,6 +862,24 @@ static void ath11k_dp_rx_frags_cleanup(struct dp_rx_tid *rx_tid, bool rel_link_d
        __skb_queue_purge(&rx_tid->rx_frags);
 }
 
+void ath11k_peer_frags_flush(struct ath11k *ar, struct ath11k_peer *peer)
+{
+       struct dp_rx_tid *rx_tid;
+       int i;
+
+       lockdep_assert_held(&ar->ab->base_lock);
+
+       for (i = 0; i <= IEEE80211_NUM_TIDS; i++) {
+               rx_tid = &peer->rx_tid[i];
+
+               spin_unlock_bh(&ar->ab->base_lock);
+               del_timer_sync(&rx_tid->frag_timer);
+               spin_lock_bh(&ar->ab->base_lock);
+
+               ath11k_dp_rx_frags_cleanup(rx_tid, true);
+       }
+}
+
 void ath11k_peer_rx_tid_cleanup(struct ath11k *ar, struct ath11k_peer *peer)
 {
        struct dp_rx_tid *rx_tid;
@@ -3450,6 +3478,7 @@ static int ath11k_dp_rx_frag_h_mpdu(struct ath11k *ar,
        u8 tid;
        int ret = 0;
        bool more_frags;
+       bool is_mcbc;
 
        rx_desc = (struct hal_rx_desc *)msdu->data;
        peer_id = ath11k_dp_rx_h_mpdu_start_peer_id(ar->ab, rx_desc);
@@ -3457,6 +3486,11 @@ static int ath11k_dp_rx_frag_h_mpdu(struct ath11k *ar,
        seqno = ath11k_dp_rx_h_mpdu_start_seq_no(ar->ab, rx_desc);
        frag_no = ath11k_dp_rx_h_mpdu_start_frag_no(ar->ab, msdu);
        more_frags = ath11k_dp_rx_h_mpdu_start_more_frags(ar->ab, msdu);
+       is_mcbc = ath11k_dp_rx_h_attn_is_mcbc(ar->ab, rx_desc);
+
+       /* Multicast/Broadcast fragments are not expected */
+       if (is_mcbc)
+               return -EINVAL;
 
        if (!ath11k_dp_rx_h_mpdu_start_seq_ctrl_valid(ar->ab, rx_desc) ||
            !ath11k_dp_rx_h_mpdu_start_fc_valid(ar->ab, rx_desc) ||
index bf399312b5ff5c26a3823b29fa7a9ca09b14c3a5..623da3bf9dc81035ed7ce5db3258515c172b6f08 100644 (file)
@@ -49,6 +49,7 @@ int ath11k_dp_peer_rx_pn_replay_config(struct ath11k_vif *arvif,
                                       const u8 *peer_addr,
                                       enum set_key_cmd key_cmd,
                                       struct ieee80211_key_conf *key);
+void ath11k_peer_frags_flush(struct ath11k *ar, struct ath11k_peer *peer);
 void ath11k_peer_rx_tid_cleanup(struct ath11k *ar, struct ath11k_peer *peer);
 void ath11k_peer_rx_tid_delete(struct ath11k *ar,
                               struct ath11k_peer *peer, u8 tid);
index 4df425dd31a2661bf9ec2715f4cf6d75fc60cf5a..9d0ff150ec30f17304ecf435e9009035b790868a 100644 (file)
@@ -2779,6 +2779,12 @@ static int ath11k_mac_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
         */
        spin_lock_bh(&ab->base_lock);
        peer = ath11k_peer_find(ab, arvif->vdev_id, peer_addr);
+
+       /* flush the fragments cache during key (re)install to
+        * ensure all frags in the new frag list belong to the same key.
+        */
+       if (peer && cmd == SET_KEY)
+               ath11k_peer_frags_flush(ar, peer);
        spin_unlock_bh(&ab->base_lock);
 
        if (!peer) {
index 7506cea46f58918207255eaf3e9a01fb07076abf..433a047f3747bc01e3c3b604bc42a94a4cecc47c 100644 (file)
@@ -1027,14 +1027,17 @@ static ssize_t ath6kl_lrssi_roam_write(struct file *file,
 {
        struct ath6kl *ar = file->private_data;
        unsigned long lrssi_roam_threshold;
+       int ret;
 
        if (kstrtoul_from_user(user_buf, count, 0, &lrssi_roam_threshold))
                return -EINVAL;
 
        ar->lrssi_roam_threshold = lrssi_roam_threshold;
 
-       ath6kl_wmi_set_roam_lrssi_cmd(ar->wmi, ar->lrssi_roam_threshold);
+       ret = ath6kl_wmi_set_roam_lrssi_cmd(ar->wmi, ar->lrssi_roam_threshold);
 
+       if (ret)
+               return ret;
        return count;
 }
 
index ce8c102df7b3e820b6a4db9e030427fdbe0bf5fd..633d0ab190314bfb84178d9f4a37fdeed19feb05 100644 (file)
@@ -1217,13 +1217,9 @@ static struct sdio_driver brcmf_sdmmc_driver = {
        },
 };
 
-void brcmf_sdio_register(void)
+int brcmf_sdio_register(void)
 {
-       int ret;
-
-       ret = sdio_register_driver(&brcmf_sdmmc_driver);
-       if (ret)
-               brcmf_err("sdio_register_driver failed: %d\n", ret);
+       return sdio_register_driver(&brcmf_sdmmc_driver);
 }
 
 void brcmf_sdio_exit(void)
index 08f9d47f2e5ca68b9e9e01e2e39b85d455222930..3f5da3bb6aa5930b026bdbb0d2737a3e30cd9f0f 100644 (file)
@@ -275,11 +275,26 @@ void brcmf_bus_add_txhdrlen(struct device *dev, uint len);
 
 #ifdef CONFIG_BRCMFMAC_SDIO
 void brcmf_sdio_exit(void);
-void brcmf_sdio_register(void);
+int brcmf_sdio_register(void);
+#else
+static inline void brcmf_sdio_exit(void) { }
+static inline int brcmf_sdio_register(void) { return 0; }
 #endif
+
 #ifdef CONFIG_BRCMFMAC_USB
 void brcmf_usb_exit(void);
-void brcmf_usb_register(void);
+int brcmf_usb_register(void);
+#else
+static inline void brcmf_usb_exit(void) { }
+static inline int brcmf_usb_register(void) { return 0; }
+#endif
+
+#ifdef CONFIG_BRCMFMAC_PCIE
+void brcmf_pcie_exit(void);
+int brcmf_pcie_register(void);
+#else
+static inline void brcmf_pcie_exit(void) { }
+static inline int brcmf_pcie_register(void) { return 0; }
 #endif
 
 #endif /* BRCMFMAC_BUS_H */
index 838b09b23abff49da1e4e45817741a309ab4123c..cee1682d2333599666080df6c8e5ee16ab021552 100644 (file)
@@ -1518,40 +1518,34 @@ void brcmf_bus_change_state(struct brcmf_bus *bus, enum brcmf_bus_state state)
        }
 }
 
-static void brcmf_driver_register(struct work_struct *work)
-{
-#ifdef CONFIG_BRCMFMAC_SDIO
-       brcmf_sdio_register();
-#endif
-#ifdef CONFIG_BRCMFMAC_USB
-       brcmf_usb_register();
-#endif
-#ifdef CONFIG_BRCMFMAC_PCIE
-       brcmf_pcie_register();
-#endif
-}
-static DECLARE_WORK(brcmf_driver_work, brcmf_driver_register);
-
 int __init brcmf_core_init(void)
 {
-       if (!schedule_work(&brcmf_driver_work))
-               return -EBUSY;
+       int err;
 
+       err = brcmf_sdio_register();
+       if (err)
+               return err;
+
+       err = brcmf_usb_register();
+       if (err)
+               goto error_usb_register;
+
+       err = brcmf_pcie_register();
+       if (err)
+               goto error_pcie_register;
        return 0;
+
+error_pcie_register:
+       brcmf_usb_exit();
+error_usb_register:
+       brcmf_sdio_exit();
+       return err;
 }
 
 void __exit brcmf_core_exit(void)
 {
-       cancel_work_sync(&brcmf_driver_work);
-
-#ifdef CONFIG_BRCMFMAC_SDIO
        brcmf_sdio_exit();
-#endif
-#ifdef CONFIG_BRCMFMAC_USB
        brcmf_usb_exit();
-#endif
-#ifdef CONFIG_BRCMFMAC_PCIE
        brcmf_pcie_exit();
-#endif
 }
 
index ad79e3b7e74a350845494534e88d23679b2e8a00..143a705b5cb3a44fe3fddf491596162979edb05d 100644 (file)
@@ -2140,15 +2140,10 @@ static struct pci_driver brcmf_pciedrvr = {
 };
 
 
-void brcmf_pcie_register(void)
+int brcmf_pcie_register(void)
 {
-       int err;
-
        brcmf_dbg(PCIE, "Enter\n");
-       err = pci_register_driver(&brcmf_pciedrvr);
-       if (err)
-               brcmf_err(NULL, "PCIE driver registration failed, err=%d\n",
-                         err);
+       return pci_register_driver(&brcmf_pciedrvr);
 }
 
 
index d026401d200109bcd0cb7d3ef0b035c47d3cd785..8e6c227e8315c8372720ae5df9c7dca121ca1ae3 100644 (file)
@@ -11,9 +11,4 @@ struct brcmf_pciedev {
        struct brcmf_pciedev_info *devinfo;
 };
 
-
-void brcmf_pcie_exit(void);
-void brcmf_pcie_register(void);
-
-
 #endif /* BRCMFMAC_PCIE_H */
index 586f4dfc638b9ecd69f53898f95896af9ea689ce..9fb68c2dc7e39cec6d1ef547115b5d89aa4dfddd 100644 (file)
@@ -1584,12 +1584,8 @@ void brcmf_usb_exit(void)
        usb_deregister(&brcmf_usbdrvr);
 }
 
-void brcmf_usb_register(void)
+int brcmf_usb_register(void)
 {
-       int ret;
-
        brcmf_dbg(USB, "Enter\n");
-       ret = usb_register(&brcmf_usbdrvr);
-       if (ret)
-               brcmf_err("usb_register failed %d\n", ret);
+       return usb_register(&brcmf_usbdrvr);
 }
index f5b78257d551858692af2488b15236c0cdf7016b..c68814841583f562ce3495ee3dec82f7e72a8c21 100644 (file)
@@ -801,24 +801,6 @@ static const struct attribute_group mesh_ie_group = {
        .attrs = mesh_ie_attrs,
 };
 
-static void lbs_persist_config_init(struct net_device *dev)
-{
-       int ret;
-       ret = sysfs_create_group(&(dev->dev.kobj), &boot_opts_group);
-       if (ret)
-               pr_err("failed to create boot_opts_group.\n");
-
-       ret = sysfs_create_group(&(dev->dev.kobj), &mesh_ie_group);
-       if (ret)
-               pr_err("failed to create mesh_ie_group.\n");
-}
-
-static void lbs_persist_config_remove(struct net_device *dev)
-{
-       sysfs_remove_group(&(dev->dev.kobj), &boot_opts_group);
-       sysfs_remove_group(&(dev->dev.kobj), &mesh_ie_group);
-}
-
 
 /***************************************************************************
  * Initializing and starting, stopping mesh
@@ -1014,6 +996,10 @@ static int lbs_add_mesh(struct lbs_private *priv)
        SET_NETDEV_DEV(priv->mesh_dev, priv->dev->dev.parent);
 
        mesh_dev->flags |= IFF_BROADCAST | IFF_MULTICAST;
+       mesh_dev->sysfs_groups[0] = &lbs_mesh_attr_group;
+       mesh_dev->sysfs_groups[1] = &boot_opts_group;
+       mesh_dev->sysfs_groups[2] = &mesh_ie_group;
+
        /* Register virtual mesh interface */
        ret = register_netdev(mesh_dev);
        if (ret) {
@@ -1021,19 +1007,10 @@ static int lbs_add_mesh(struct lbs_private *priv)
                goto err_free_netdev;
        }
 
-       ret = sysfs_create_group(&(mesh_dev->dev.kobj), &lbs_mesh_attr_group);
-       if (ret)
-               goto err_unregister;
-
-       lbs_persist_config_init(mesh_dev);
-
        /* Everything successful */
        ret = 0;
        goto done;
 
-err_unregister:
-       unregister_netdev(mesh_dev);
-
 err_free_netdev:
        free_netdev(mesh_dev);
 
@@ -1054,8 +1031,6 @@ void lbs_remove_mesh(struct lbs_private *priv)
 
        netif_stop_queue(mesh_dev);
        netif_carrier_off(mesh_dev);
-       sysfs_remove_group(&(mesh_dev->dev.kobj), &lbs_mesh_attr_group);
-       lbs_persist_config_remove(mesh_dev);
        unregister_netdev(mesh_dev);
        priv->mesh_dev = NULL;
        kfree(mesh_dev->ieee80211_ptr);
index 977acab0360a3eb4889f45f4c756874aa4c0d3ea..03fe62837557632624ac9789e0e9ccfb8576072d 100644 (file)
@@ -514,10 +514,36 @@ EXPORT_SYMBOL_GPL(mt76_free_device);
 static void mt76_rx_release_amsdu(struct mt76_phy *phy, enum mt76_rxq_id q)
 {
        struct sk_buff *skb = phy->rx_amsdu[q].head;
+       struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
        struct mt76_dev *dev = phy->dev;
 
        phy->rx_amsdu[q].head = NULL;
        phy->rx_amsdu[q].tail = NULL;
+
+       /*
+        * Validate if the amsdu has a proper first subframe.
+        * A single MSDU can be parsed as A-MSDU when the unauthenticated A-MSDU
+        * flag of the QoS header gets flipped. In such cases, the first
+        * subframe has a LLC/SNAP header in the location of the destination
+        * address.
+        */
+       if (skb_shinfo(skb)->frag_list) {
+               int offset = 0;
+
+               if (!(status->flag & RX_FLAG_8023)) {
+                       offset = ieee80211_get_hdrlen_from_skb(skb);
+
+                       if ((status->flag &
+                            (RX_FLAG_DECRYPTED | RX_FLAG_IV_STRIPPED)) ==
+                           RX_FLAG_DECRYPTED)
+                               offset += 8;
+               }
+
+               if (ether_addr_equal(skb->data + offset, rfc1042_header)) {
+                       dev_kfree_skb(skb);
+                       return;
+               }
+       }
        __skb_queue_tail(&dev->rx_skb[q], skb);
 }
 
index 86341d1f82f36cd390fb981ef6a68d5390bc3629..d20f05a7717d014bb36a6de31915ffa1fa22ab03 100644 (file)
@@ -510,7 +510,6 @@ void mt7615_init_device(struct mt7615_dev *dev)
        mutex_init(&dev->pm.mutex);
        init_waitqueue_head(&dev->pm.wait);
        spin_lock_init(&dev->pm.txq_lock);
-       set_bit(MT76_STATE_PM, &dev->mphy.state);
        INIT_DELAYED_WORK(&dev->mphy.mac_work, mt7615_mac_work);
        INIT_DELAYED_WORK(&dev->phy.scan_work, mt7615_scan_work);
        INIT_DELAYED_WORK(&dev->coredump.work, mt7615_coredump_work);
index f81a17d5600889f59c9f559a40e618ee00b5cc4a..e2dcfee6be81e2c2fa5dab80c3c92d158a121dcc 100644 (file)
@@ -1912,8 +1912,9 @@ void mt7615_pm_wake_work(struct work_struct *work)
                        napi_schedule(&dev->mt76.napi[i]);
                mt76_connac_pm_dequeue_skbs(mphy, &dev->pm);
                mt76_queue_tx_cleanup(dev, dev->mt76.q_mcu[MT_MCUQ_WM], false);
-               ieee80211_queue_delayed_work(mphy->hw, &mphy->mac_work,
-                                            MT7615_WATCHDOG_TIME);
+               if (test_bit(MT76_STATE_RUNNING, &mphy->state))
+                       ieee80211_queue_delayed_work(mphy->hw, &mphy->mac_work,
+                                                    MT7615_WATCHDOG_TIME);
        }
 
        ieee80211_wake_queues(mphy->hw);
index 17fe4187d1de042a2b004393b5bc58fa95c22dfe..d1be78b0711c934c0222f708053aa30020c8da92 100644 (file)
@@ -51,16 +51,13 @@ mt7663s_mcu_send_message(struct mt76_dev *mdev, struct sk_buff *skb,
        return ret;
 }
 
-static int mt7663s_mcu_drv_pmctrl(struct mt7615_dev *dev)
+static int __mt7663s_mcu_drv_pmctrl(struct mt7615_dev *dev)
 {
        struct sdio_func *func = dev->mt76.sdio.func;
        struct mt76_phy *mphy = &dev->mt76.phy;
        u32 status;
        int ret;
 
-       if (!test_and_clear_bit(MT76_STATE_PM, &mphy->state))
-               goto out;
-
        sdio_claim_host(func);
 
        sdio_writel(func, WHLPCR_FW_OWN_REQ_CLR, MCR_WHLPCR, NULL);
@@ -76,13 +73,21 @@ static int mt7663s_mcu_drv_pmctrl(struct mt7615_dev *dev)
        }
 
        sdio_release_host(func);
-
-out:
        dev->pm.last_activity = jiffies;
 
        return 0;
 }
 
+static int mt7663s_mcu_drv_pmctrl(struct mt7615_dev *dev)
+{
+       struct mt76_phy *mphy = &dev->mt76.phy;
+
+       if (test_and_clear_bit(MT76_STATE_PM, &mphy->state))
+               return __mt7663s_mcu_drv_pmctrl(dev);
+
+       return 0;
+}
+
 static int mt7663s_mcu_fw_pmctrl(struct mt7615_dev *dev)
 {
        struct sdio_func *func = dev->mt76.sdio.func;
@@ -123,7 +128,7 @@ int mt7663s_mcu_init(struct mt7615_dev *dev)
        struct mt7615_mcu_ops *mcu_ops;
        int ret;
 
-       ret = mt7663s_mcu_drv_pmctrl(dev);
+       ret = __mt7663s_mcu_drv_pmctrl(dev);
        if (ret)
                return ret;
 
index c55698f9c49afdc7be9d3e7e9db4408192faf229..028ff432d811fba1859348b667710df3f01771ab 100644 (file)
@@ -55,10 +55,7 @@ int mt7663u_mcu_init(struct mt7615_dev *dev)
 
        dev->mt76.mcu_ops = &mt7663u_mcu_ops,
 
-       /* usb does not support runtime-pm */
-       clear_bit(MT76_STATE_PM, &dev->mphy.state);
        mt76_set(dev, MT_UDMA_TX_QSEL, MT_FW_DL_EN);
-
        if (test_and_clear_bit(MT76_STATE_POWER_OFF, &dev->mphy.state)) {
                mt7615_mcu_restart(&dev->mt76);
                if (!mt76_poll_msec(dev, MT_CONN_ON_MISC,
index fe0ab5e5ff815ea78526ffba24ebd8c31d7e961f..619561606f96dba570a84304fb28d5b9f0a99eb7 100644 (file)
@@ -721,6 +721,10 @@ void mt76_connac_mcu_sta_tlv(struct mt76_phy *mphy, struct sk_buff *skb,
        phy->phy_type = mt76_connac_get_phy_mode_v2(mphy, vif, band, sta);
        phy->basic_rate = cpu_to_le16((u16)vif->bss_conf.basic_rates);
        phy->rcpi = rcpi;
+       phy->ampdu = FIELD_PREP(IEEE80211_HT_AMPDU_PARM_FACTOR,
+                               sta->ht_cap.ampdu_factor) |
+                    FIELD_PREP(IEEE80211_HT_AMPDU_PARM_DENSITY,
+                               sta->ht_cap.ampdu_density);
 
        tlv = mt76_connac_mcu_add_tlv(skb, STA_REC_RA, sizeof(*ra_info));
        ra_info = (struct sta_rec_ra_info *)tlv;
index 5847f943e8da780e7bfa1983267907ba717f91bd..b795e7245c0750e6f900b90ad3246f611742471b 100644 (file)
@@ -87,7 +87,7 @@ static const struct ieee80211_ops mt76x0e_ops = {
        .reconfig_complete = mt76x02_reconfig_complete,
 };
 
-static int mt76x0e_register_device(struct mt76x02_dev *dev)
+static int mt76x0e_init_hardware(struct mt76x02_dev *dev, bool resume)
 {
        int err;
 
@@ -100,9 +100,11 @@ static int mt76x0e_register_device(struct mt76x02_dev *dev)
        if (err < 0)
                return err;
 
-       err = mt76x02_dma_init(dev);
-       if (err < 0)
-               return err;
+       if (!resume) {
+               err = mt76x02_dma_init(dev);
+               if (err < 0)
+                       return err;
+       }
 
        err = mt76x0_init_hardware(dev);
        if (err < 0)
@@ -123,6 +125,17 @@ static int mt76x0e_register_device(struct mt76x02_dev *dev)
        mt76_clear(dev, 0x110, BIT(9));
        mt76_set(dev, MT_MAX_LEN_CFG, BIT(13));
 
+       return 0;
+}
+
+static int mt76x0e_register_device(struct mt76x02_dev *dev)
+{
+       int err;
+
+       err = mt76x0e_init_hardware(dev, false);
+       if (err < 0)
+               return err;
+
        err = mt76x0_register_device(dev);
        if (err < 0)
                return err;
@@ -167,6 +180,8 @@ mt76x0e_probe(struct pci_dev *pdev, const struct pci_device_id *id)
        if (ret)
                return ret;
 
+       mt76_pci_disable_aspm(pdev);
+
        mdev = mt76_alloc_device(&pdev->dev, sizeof(*dev), &mt76x0e_ops,
                                 &drv_ops);
        if (!mdev)
@@ -220,6 +235,60 @@ mt76x0e_remove(struct pci_dev *pdev)
        mt76_free_device(mdev);
 }
 
+#ifdef CONFIG_PM
+static int mt76x0e_suspend(struct pci_dev *pdev, pm_message_t state)
+{
+       struct mt76_dev *mdev = pci_get_drvdata(pdev);
+       struct mt76x02_dev *dev = container_of(mdev, struct mt76x02_dev, mt76);
+       int i;
+
+       mt76_worker_disable(&mdev->tx_worker);
+       for (i = 0; i < ARRAY_SIZE(mdev->phy.q_tx); i++)
+               mt76_queue_tx_cleanup(dev, mdev->phy.q_tx[i], true);
+       for (i = 0; i < ARRAY_SIZE(mdev->q_mcu); i++)
+               mt76_queue_tx_cleanup(dev, mdev->q_mcu[i], true);
+       napi_disable(&mdev->tx_napi);
+
+       mt76_for_each_q_rx(mdev, i)
+               napi_disable(&mdev->napi[i]);
+
+       mt76x02_dma_disable(dev);
+       mt76x02_mcu_cleanup(dev);
+       mt76x0_chip_onoff(dev, false, false);
+
+       pci_enable_wake(pdev, pci_choose_state(pdev, state), true);
+       pci_save_state(pdev);
+
+       return pci_set_power_state(pdev, pci_choose_state(pdev, state));
+}
+
+static int mt76x0e_resume(struct pci_dev *pdev)
+{
+       struct mt76_dev *mdev = pci_get_drvdata(pdev);
+       struct mt76x02_dev *dev = container_of(mdev, struct mt76x02_dev, mt76);
+       int err, i;
+
+       err = pci_set_power_state(pdev, PCI_D0);
+       if (err)
+               return err;
+
+       pci_restore_state(pdev);
+
+       mt76_worker_enable(&mdev->tx_worker);
+
+       mt76_for_each_q_rx(mdev, i) {
+               mt76_queue_rx_reset(dev, i);
+               napi_enable(&mdev->napi[i]);
+               napi_schedule(&mdev->napi[i]);
+       }
+
+       napi_enable(&mdev->tx_napi);
+       napi_schedule(&mdev->tx_napi);
+
+       return mt76x0e_init_hardware(dev, true);
+}
+#endif /* CONFIG_PM */
+
 static const struct pci_device_id mt76x0e_device_table[] = {
        { PCI_DEVICE(PCI_VENDOR_ID_MEDIATEK, 0x7610) },
        { PCI_DEVICE(PCI_VENDOR_ID_MEDIATEK, 0x7630) },
@@ -237,6 +306,10 @@ static struct pci_driver mt76x0e_driver = {
        .id_table       = mt76x0e_device_table,
        .probe          = mt76x0e_probe,
        .remove         = mt76x0e_remove,
+#ifdef CONFIG_PM
+       .suspend        = mt76x0e_suspend,
+       .resume         = mt76x0e_resume,
+#endif /* CONFIG_PM */
 };
 
 module_pci_driver(mt76x0e_driver);
index fe28bf4050c41e60bb1ce7b88bd8e03ebbea04db..1763ea0614ce25f4817bfebc82ea5edfe74f008e 100644 (file)
@@ -76,8 +76,8 @@ mt7921_init_wiphy(struct ieee80211_hw *hw)
        struct wiphy *wiphy = hw->wiphy;
 
        hw->queues = 4;
-       hw->max_rx_aggregation_subframes = IEEE80211_MAX_AMPDU_BUF;
-       hw->max_tx_aggregation_subframes = IEEE80211_MAX_AMPDU_BUF;
+       hw->max_rx_aggregation_subframes = 64;
+       hw->max_tx_aggregation_subframes = 128;
 
        hw->radiotap_timestamp.units_pos =
                IEEE80211_RADIOTAP_TIMESTAMP_UNIT_US;
index 214bd18597929514a8bebdaf7244c28cb06d6730..decf2d5f0ce3ab6c50a371c52b330b6a498cf147 100644 (file)
@@ -1404,8 +1404,9 @@ void mt7921_pm_wake_work(struct work_struct *work)
                        napi_schedule(&dev->mt76.napi[i]);
                mt76_connac_pm_dequeue_skbs(mphy, &dev->pm);
                mt7921_tx_cleanup(dev);
-               ieee80211_queue_delayed_work(mphy->hw, &mphy->mac_work,
-                                            MT7921_WATCHDOG_TIME);
+               if (test_bit(MT76_STATE_RUNNING, &mphy->state))
+                       ieee80211_queue_delayed_work(mphy->hw, &mphy->mac_work,
+                                                    MT7921_WATCHDOG_TIME);
        }
 
        ieee80211_wake_queues(mphy->hw);
index f4c27aa41048631047739bc7010c4548ef669d10..97a0ef331ac320d450bc14e3f4343b39a1c799bb 100644 (file)
@@ -74,8 +74,7 @@ mt7921_init_he_caps(struct mt7921_phy *phy, enum nl80211_band band,
                                IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_IN_2G;
                else if (band == NL80211_BAND_5GHZ)
                        he_cap_elem->phy_cap_info[0] =
-                               IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_80MHZ_IN_5G |
-                               IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_80PLUS80_MHZ_IN_5G;
+                               IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_80MHZ_IN_5G;
 
                he_cap_elem->phy_cap_info[1] =
                        IEEE80211_HE_PHY_CAP1_LDPC_CODING_IN_PAYLOAD;
index 5f3d56d570a5ed0b9896a5bd6bd4165700b9af9d..67dc4b4cc09450b1cebe71824df4776e99b19286 100644 (file)
@@ -402,20 +402,22 @@ static void
 mt7921_mcu_tx_rate_report(struct mt7921_dev *dev, struct sk_buff *skb,
                          u16 wlan_idx)
 {
-       struct mt7921_mcu_wlan_info_event *wtbl_info =
-               (struct mt7921_mcu_wlan_info_event *)(skb->data);
-       struct rate_info rate = {};
-       u8 curr_idx = wtbl_info->rate_info.rate_idx;
-       u16 curr = le16_to_cpu(wtbl_info->rate_info.rate[curr_idx]);
-       struct mt7921_mcu_peer_cap peer = wtbl_info->peer_cap;
+       struct mt7921_mcu_wlan_info_event *wtbl_info;
        struct mt76_phy *mphy = &dev->mphy;
        struct mt7921_sta_stats *stats;
+       struct rate_info rate = {};
        struct mt7921_sta *msta;
        struct mt76_wcid *wcid;
+       u8 idx;
 
        if (wlan_idx >= MT76_N_WCIDS)
                return;
 
+       wtbl_info = (struct mt7921_mcu_wlan_info_event *)skb->data;
+       idx = wtbl_info->rate_info.rate_idx;
+       if (idx >= ARRAY_SIZE(wtbl_info->rate_info.rate))
+               return;
+
        rcu_read_lock();
 
        wcid = rcu_dereference(dev->mt76.wcid[wlan_idx]);
@@ -426,7 +428,8 @@ mt7921_mcu_tx_rate_report(struct mt7921_dev *dev, struct sk_buff *skb,
        stats = &msta->stats;
 
        /* current rate */
-       mt7921_mcu_tx_rate_parse(mphy, &peer, &rate, curr);
+       mt7921_mcu_tx_rate_parse(mphy, &wtbl_info->peer_cap, &rate,
+                                le16_to_cpu(wtbl_info->rate_info.rate[idx]));
        stats->tx_rate = rate;
 out:
        rcu_read_unlock();
index 2a7ee90a3f549c1c3a2f2c46b92582be01831676..ffd150ec181fab2ccc9d67606abd29c495800831 100644 (file)
@@ -440,9 +440,14 @@ static void rtl_watchdog_wq_callback(struct work_struct *work);
 static void rtl_fwevt_wq_callback(struct work_struct *work);
 static void rtl_c2hcmd_wq_callback(struct work_struct *work);
 
-static void _rtl_init_deferred_work(struct ieee80211_hw *hw)
+static int _rtl_init_deferred_work(struct ieee80211_hw *hw)
 {
        struct rtl_priv *rtlpriv = rtl_priv(hw);
+       struct workqueue_struct *wq;
+
+       wq = alloc_workqueue("%s", 0, 0, rtlpriv->cfg->name);
+       if (!wq)
+               return -ENOMEM;
 
        /* <1> timer */
        timer_setup(&rtlpriv->works.watchdog_timer,
@@ -451,11 +456,7 @@ static void _rtl_init_deferred_work(struct ieee80211_hw *hw)
                    rtl_easy_concurrent_retrytimer_callback, 0);
        /* <2> work queue */
        rtlpriv->works.hw = hw;
-       rtlpriv->works.rtl_wq = alloc_workqueue("%s", 0, 0, rtlpriv->cfg->name);
-       if (unlikely(!rtlpriv->works.rtl_wq)) {
-               pr_err("Failed to allocate work queue\n");
-               return;
-       }
+       rtlpriv->works.rtl_wq = wq;
 
        INIT_DELAYED_WORK(&rtlpriv->works.watchdog_wq,
                          rtl_watchdog_wq_callback);
@@ -466,6 +467,7 @@ static void _rtl_init_deferred_work(struct ieee80211_hw *hw)
                          rtl_swlps_rfon_wq_callback);
        INIT_DELAYED_WORK(&rtlpriv->works.fwevt_wq, rtl_fwevt_wq_callback);
        INIT_DELAYED_WORK(&rtlpriv->works.c2hcmd_wq, rtl_c2hcmd_wq_callback);
+       return 0;
 }
 
 void rtl_deinit_deferred_work(struct ieee80211_hw *hw, bool ips_wq)
@@ -564,9 +566,7 @@ int rtl_init_core(struct ieee80211_hw *hw)
        rtlmac->link_state = MAC80211_NOLINK;
 
        /* <6> init deferred work */
-       _rtl_init_deferred_work(hw);
-
-       return 0;
+       return _rtl_init_deferred_work(hw);
 }
 EXPORT_SYMBOL_GPL(rtl_init_core);
 
index 193b723fe3bd73b38bd1681224e0770818794780..c58996c1e2309384d150966819c649a7d6660bf4 100644 (file)
@@ -684,6 +684,7 @@ static void xenvif_disconnect_queue(struct xenvif_queue *queue)
 {
        if (queue->task) {
                kthread_stop(queue->task);
+               put_task_struct(queue->task);
                queue->task = NULL;
        }
 
@@ -745,6 +746,11 @@ int xenvif_connect_data(struct xenvif_queue *queue,
        if (IS_ERR(task))
                goto kthread_err;
        queue->task = task;
+       /*
+        * Take a reference to the task in order to prevent it from being freed
+        * if the thread function returns before kthread_stop is called.
+        */
+       get_task_struct(task);
 
        task = kthread_run(xenvif_dealloc_kthread, queue,
                           "%s-dealloc", queue->name);
index ee4a339c05fd0bd2e20ed063652c8076acc3e878..058ce77b3cbce2b367962ed797d646d679f69a23 100644 (file)
@@ -1,4 +1,4 @@
-/**
+/*
  * Marvell NFC driver: Firmware downloader
  *
  * Copyright (C) 2015, Marvell International Ltd.
index 18cd96284b77a3b6827473b56f94dcc3fefdb5c3..c5420616b7bca508eeed80a09f66264673e572a5 100644 (file)
@@ -1,4 +1,4 @@
-/**
+/*
  * Marvell NFC-over-I2C driver: I2C interface related functions
  *
  * Copyright (C) 2015, Marvell International Ltd.
index de68ff45e49a4e7c228cb15f5e46c1e378736943..e84ee18c73aeb43c5063612e5fe0c3b477ac8fb6 100644 (file)
@@ -1,4 +1,4 @@
-/**
+/*
  * Marvell NFC driver
  *
  * Copyright (C) 2014-2015, Marvell International Ltd.
index 8e0ddb43477042dfa035c1ebb05b175b39d13796..dec0d3eb3648a5299ea822bbb56d63fdf699af72 100644 (file)
@@ -1,4 +1,4 @@
-/**
+/*
  * Marvell NFC-over-SPI driver: SPI interface related functions
  *
  * Copyright (C) 2015, Marvell International Ltd.
index e5a622ce4b9517d299170f20d35bde2bd5e003da..7194dd7ef0f1f47dea7382c3b6d45f9e5209ab3c 100644 (file)
@@ -1,4 +1,4 @@
-/**
+/*
  * Marvell NFC-over-UART driver
  *
  * Copyright (C) 2015, Marvell International Ltd.
index 888e298f610b8ed7140c85c5d0355d55007b6cfc..bcd563cb556ce1199382c689dc235b40f662b80c 100644 (file)
@@ -1,4 +1,4 @@
-/**
+/*
  * Marvell NFC-over-USB driver: USB interface related functions
  *
  * Copyright (C) 2014, Marvell International Ltd.
index a44d49d63968a45a4c29f89ac9c7dadcb8890961..494675aeaaad7957e5df634fdb5911762a92328e 100644 (file)
@@ -71,7 +71,8 @@ config NVME_FC
 config NVME_TCP
        tristate "NVM Express over Fabrics TCP host driver"
        depends on INET
-       depends on BLK_DEV_NVME
+       depends on BLOCK
+       select NVME_CORE
        select NVME_FABRICS
        select CRYPTO
        select CRYPTO_CRC32C
index 522c9b229f80e81814345b80394a270ad666e4e3..66973bb5630551911c358e859266dab2da59638e 100644 (file)
@@ -2901,7 +2901,7 @@ static int nvme_init_identify(struct nvme_ctrl *ctrl)
                ctrl->hmmaxd = le16_to_cpu(id->hmmaxd);
        }
 
-       ret = nvme_mpath_init(ctrl, id);
+       ret = nvme_mpath_init_identify(ctrl, id);
        if (ret < 0)
                goto out_free;
 
@@ -3485,8 +3485,10 @@ int nvme_cdev_add(struct cdev *cdev, struct device *cdev_device,
        cdev_init(cdev, fops);
        cdev->owner = owner;
        ret = cdev_device_add(cdev, cdev_device);
-       if (ret)
+       if (ret) {
+               put_device(cdev_device);
                ida_simple_remove(&nvme_ns_chr_minor_ida, minor);
+       }
        return ret;
 }
 
@@ -4364,6 +4366,7 @@ int nvme_init_ctrl(struct nvme_ctrl *ctrl, struct device *dev,
                min(default_ps_max_latency_us, (unsigned long)S32_MAX));
 
        nvme_fault_inject_init(&ctrl->fault_inject, dev_name(ctrl->device));
+       nvme_mpath_init_ctrl(ctrl);
 
        return 0;
 out_free_name:
index a2bb7fc63a735d07f0a3e4b7e84629b68bf3eed2..34a84d2086c74f7726b0b7312401f714b4fe0741 100644 (file)
@@ -336,6 +336,11 @@ static void nvmf_log_connect_error(struct nvme_ctrl *ctrl,
                        cmd->connect.recfmt);
                break;
 
+       case NVME_SC_HOST_PATH_ERROR:
+               dev_err(ctrl->device,
+                       "Connect command failed: host path error\n");
+               break;
+
        default:
                dev_err(ctrl->device,
                        "Connect command failed, error wo/DNR bit: %d\n",
index d9ab9e7871d0f7dc4b52e18e4c96091cfeac8733..f183f9fa03d0e18223c2e02e4eade52f13401d26 100644 (file)
@@ -2461,6 +2461,18 @@ nvme_fc_terminate_exchange(struct request *req, void *data, bool reserved)
 static void
 __nvme_fc_abort_outstanding_ios(struct nvme_fc_ctrl *ctrl, bool start_queues)
 {
+       int q;
+
+       /*
+        * if aborting io, the queues are no longer good, mark them
+        * all as not live.
+        */
+       if (ctrl->ctrl.queue_count > 1) {
+               for (q = 1; q < ctrl->ctrl.queue_count; q++)
+                       clear_bit(NVME_FC_Q_LIVE, &ctrl->queues[q].flags);
+       }
+       clear_bit(NVME_FC_Q_LIVE, &ctrl->queues[0].flags);
+
        /*
         * If io queues are present, stop them and terminate all outstanding
         * ios on them. As FC allocates FC exchange for each io, the
@@ -3095,6 +3107,7 @@ nvme_fc_create_association(struct nvme_fc_ctrl *ctrl)
        if (ctrl->ctrl.icdoff) {
                dev_err(ctrl->ctrl.device, "icdoff %d is not supported!\n",
                                ctrl->ctrl.icdoff);
+               ret = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
                goto out_disconnect_admin_queue;
        }
 
@@ -3102,6 +3115,7 @@ nvme_fc_create_association(struct nvme_fc_ctrl *ctrl)
        if (!(ctrl->ctrl.sgls & ((1 << 0) | (1 << 1)))) {
                dev_err(ctrl->ctrl.device,
                        "Mandatory sgls are not supported!\n");
+               ret = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
                goto out_disconnect_admin_queue;
        }
 
@@ -3268,11 +3282,13 @@ nvme_fc_reconnect_or_delete(struct nvme_fc_ctrl *ctrl, int status)
        if (ctrl->ctrl.state != NVME_CTRL_CONNECTING)
                return;
 
-       if (portptr->port_state == FC_OBJSTATE_ONLINE)
+       if (portptr->port_state == FC_OBJSTATE_ONLINE) {
                dev_info(ctrl->ctrl.device,
                        "NVME-FC{%d}: reset: Reconnect attempt failed (%d)\n",
                        ctrl->cnum, status);
-       else if (time_after_eq(jiffies, rport->dev_loss_end))
+               if (status > 0 && (status & NVME_SC_DNR))
+                       recon = false;
+       } else if (time_after_eq(jiffies, rport->dev_loss_end))
                recon = false;
 
        if (recon && nvmf_should_reconnect(&ctrl->ctrl)) {
@@ -3286,12 +3302,17 @@ nvme_fc_reconnect_or_delete(struct nvme_fc_ctrl *ctrl, int status)
 
                queue_delayed_work(nvme_wq, &ctrl->connect_work, recon_delay);
        } else {
-               if (portptr->port_state == FC_OBJSTATE_ONLINE)
-                       dev_warn(ctrl->ctrl.device,
-                               "NVME-FC{%d}: Max reconnect attempts (%d) "
-                               "reached.\n",
-                               ctrl->cnum, ctrl->ctrl.nr_reconnects);
-               else
+               if (portptr->port_state == FC_OBJSTATE_ONLINE) {
+                       if (status > 0 && (status & NVME_SC_DNR))
+                               dev_warn(ctrl->ctrl.device,
+                                        "NVME-FC{%d}: reconnect failure\n",
+                                        ctrl->cnum);
+                       else
+                               dev_warn(ctrl->ctrl.device,
+                                        "NVME-FC{%d}: Max reconnect attempts "
+                                        "(%d) reached.\n",
+                                        ctrl->cnum, ctrl->ctrl.nr_reconnects);
+               } else
                        dev_warn(ctrl->ctrl.device,
                                "NVME-FC{%d}: dev_loss_tmo (%d) expired "
                                "while waiting for remoteport connectivity.\n",
index 0551796517e61bfa82ac122e26dc7b9ae7002ade..f81871c7128a03d521e85513d067fe8c30e97229 100644 (file)
@@ -781,9 +781,18 @@ void nvme_mpath_remove_disk(struct nvme_ns_head *head)
        put_disk(head->disk);
 }
 
-int nvme_mpath_init(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id)
+void nvme_mpath_init_ctrl(struct nvme_ctrl *ctrl)
 {
-       int error;
+       mutex_init(&ctrl->ana_lock);
+       timer_setup(&ctrl->anatt_timer, nvme_anatt_timeout, 0);
+       INIT_WORK(&ctrl->ana_work, nvme_ana_work);
+}
+
+int nvme_mpath_init_identify(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id)
+{
+       size_t max_transfer_size = ctrl->max_hw_sectors << SECTOR_SHIFT;
+       size_t ana_log_size;
+       int error = 0;
 
        /* check if multipath is enabled and we have the capability */
        if (!multipath || !ctrl->subsys ||
@@ -795,37 +804,31 @@ int nvme_mpath_init(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id)
        ctrl->nanagrpid = le32_to_cpu(id->nanagrpid);
        ctrl->anagrpmax = le32_to_cpu(id->anagrpmax);
 
-       mutex_init(&ctrl->ana_lock);
-       timer_setup(&ctrl->anatt_timer, nvme_anatt_timeout, 0);
-       ctrl->ana_log_size = sizeof(struct nvme_ana_rsp_hdr) +
-               ctrl->nanagrpid * sizeof(struct nvme_ana_group_desc);
-       ctrl->ana_log_size += ctrl->max_namespaces * sizeof(__le32);
-
-       if (ctrl->ana_log_size > ctrl->max_hw_sectors << SECTOR_SHIFT) {
+       ana_log_size = sizeof(struct nvme_ana_rsp_hdr) +
+               ctrl->nanagrpid * sizeof(struct nvme_ana_group_desc) +
+               ctrl->max_namespaces * sizeof(__le32);
+       if (ana_log_size > max_transfer_size) {
                dev_err(ctrl->device,
-                       "ANA log page size (%zd) larger than MDTS (%d).\n",
-                       ctrl->ana_log_size,
-                       ctrl->max_hw_sectors << SECTOR_SHIFT);
+                       "ANA log page size (%zd) larger than MDTS (%zd).\n",
+                       ana_log_size, max_transfer_size);
                dev_err(ctrl->device, "disabling ANA support.\n");
-               return 0;
+               goto out_uninit;
        }
-
-       INIT_WORK(&ctrl->ana_work, nvme_ana_work);
-       kfree(ctrl->ana_log_buf);
-       ctrl->ana_log_buf = kmalloc(ctrl->ana_log_size, GFP_KERNEL);
-       if (!ctrl->ana_log_buf) {
-               error = -ENOMEM;
-               goto out;
+       if (ana_log_size > ctrl->ana_log_size) {
+               nvme_mpath_stop(ctrl);
+               kfree(ctrl->ana_log_buf);
+               ctrl->ana_log_buf = kmalloc(ana_log_size, GFP_KERNEL);
+               if (!ctrl->ana_log_buf)
+                       return -ENOMEM;
        }
-
+       ctrl->ana_log_size = ana_log_size;
        error = nvme_read_ana_log(ctrl);
        if (error)
-               goto out_free_ana_log_buf;
+               goto out_uninit;
        return 0;
-out_free_ana_log_buf:
-       kfree(ctrl->ana_log_buf);
-       ctrl->ana_log_buf = NULL;
-out:
+
+out_uninit:
+       nvme_mpath_uninit(ctrl);
        return error;
 }
 
index 05f31a2c64bb2c3772974bbfb3af8c5f1aa38556..0015860ec12bfdc547dfd48305bca588d5f5dd48 100644 (file)
@@ -712,7 +712,8 @@ void nvme_kick_requeue_lists(struct nvme_ctrl *ctrl);
 int nvme_mpath_alloc_disk(struct nvme_ctrl *ctrl,struct nvme_ns_head *head);
 void nvme_mpath_add_disk(struct nvme_ns *ns, struct nvme_id_ns *id);
 void nvme_mpath_remove_disk(struct nvme_ns_head *head);
-int nvme_mpath_init(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id);
+int nvme_mpath_init_identify(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id);
+void nvme_mpath_init_ctrl(struct nvme_ctrl *ctrl);
 void nvme_mpath_uninit(struct nvme_ctrl *ctrl);
 void nvme_mpath_stop(struct nvme_ctrl *ctrl);
 bool nvme_mpath_clear_current_path(struct nvme_ns *ns);
@@ -780,7 +781,10 @@ static inline void nvme_mpath_check_last_path(struct nvme_ns *ns)
 static inline void nvme_trace_bio_complete(struct request *req)
 {
 }
-static inline int nvme_mpath_init(struct nvme_ctrl *ctrl,
+static inline void nvme_mpath_init_ctrl(struct nvme_ctrl *ctrl)
+{
+}
+static inline int nvme_mpath_init_identify(struct nvme_ctrl *ctrl,
                struct nvme_id_ctrl *id)
 {
        if (ctrl->subsys->cmic & NVME_CTRL_CMIC_ANA)
index 37943dc4c2c11eefacb684d20e9cfd4739d4841c..4697a94c09459ca924c9bbf9b5a9dbe00f54da07 100644 (file)
@@ -1320,16 +1320,17 @@ static int nvme_rdma_map_sg_inline(struct nvme_rdma_queue *queue,
                int count)
 {
        struct nvme_sgl_desc *sg = &c->common.dptr.sgl;
-       struct scatterlist *sgl = req->data_sgl.sg_table.sgl;
        struct ib_sge *sge = &req->sge[1];
+       struct scatterlist *sgl;
        u32 len = 0;
        int i;
 
-       for (i = 0; i < count; i++, sgl++, sge++) {
+       for_each_sg(req->data_sgl.sg_table.sgl, sgl, count, i) {
                sge->addr = sg_dma_address(sgl);
                sge->length = sg_dma_len(sgl);
                sge->lkey = queue->device->pd->local_dma_lkey;
                len += sge->length;
+               sge++;
        }
 
        sg->addr = cpu_to_le64(queue->ctrl->ctrl.icdoff);
index 0222e23f5936da8032c75d5045efb368aa5fe74d..34f4b3402f7c1961a52cda389f14cca5e543a0d7 100644 (file)
@@ -943,7 +943,6 @@ static int nvme_tcp_try_send_data(struct nvme_tcp_request *req)
                if (ret <= 0)
                        return ret;
 
-               nvme_tcp_advance_req(req, ret);
                if (queue->data_digest)
                        nvme_tcp_ddgst_update(queue->snd_hash, page,
                                        offset, ret);
@@ -960,6 +959,7 @@ static int nvme_tcp_try_send_data(struct nvme_tcp_request *req)
                        }
                        return 1;
                }
+               nvme_tcp_advance_req(req, ret);
        }
        return -EAGAIN;
 }
@@ -1140,7 +1140,8 @@ static void nvme_tcp_io_work(struct work_struct *w)
                                pending = true;
                        else if (unlikely(result < 0))
                                break;
-               }
+               } else
+                       pending = !llist_empty(&queue->req_list);
 
                result = nvme_tcp_try_recv(queue);
                if (result > 0)
index e7a367cf6d367d3270b776e8cdb9cb1bef818cf6..dcd49a72f2f3c13dfa70589ad09058afcbe4c712 100644 (file)
@@ -975,10 +975,7 @@ u16 nvmet_parse_admin_cmd(struct nvmet_req *req)
        case nvme_admin_keep_alive:
                req->execute = nvmet_execute_keep_alive;
                return 0;
+       default:
+               return nvmet_report_invalid_opcode(req);
        }
-
-       pr_debug("unhandled cmd %d on qid %d\n", cmd->common.opcode,
-              req->sq->qid);
-       req->error_loc = offsetof(struct nvme_common_command, opcode);
-       return NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
 }
index 25cc2ee8de3f10aa3658b6a769bbb024d965c280..b20b8d0a114416b3e9643dcc7bbf65ea6a01e910 100644 (file)
@@ -388,10 +388,10 @@ static void nvmet_keep_alive_timer(struct work_struct *work)
 {
        struct nvmet_ctrl *ctrl = container_of(to_delayed_work(work),
                        struct nvmet_ctrl, ka_work);
-       bool cmd_seen = ctrl->cmd_seen;
+       bool reset_tbkas = ctrl->reset_tbkas;
 
-       ctrl->cmd_seen = false;
-       if (cmd_seen) {
+       ctrl->reset_tbkas = false;
+       if (reset_tbkas) {
                pr_debug("ctrl %d reschedule traffic based keep-alive timer\n",
                        ctrl->cntlid);
                schedule_delayed_work(&ctrl->ka_work, ctrl->kato * HZ);
@@ -804,6 +804,13 @@ void nvmet_sq_destroy(struct nvmet_sq *sq)
        percpu_ref_exit(&sq->ref);
 
        if (ctrl) {
+               /*
+                * The teardown flow may take some time, and the host may not
+                * send us keep-alive during this period, hence reset the
+                * traffic based keep-alive timer so we don't trigger a
+                * controller teardown as a result of a keep-alive expiration.
+                */
+               ctrl->reset_tbkas = true;
                nvmet_ctrl_put(ctrl);
                sq->ctrl = NULL; /* allows reusing the queue later */
        }
@@ -952,7 +959,7 @@ bool nvmet_req_init(struct nvmet_req *req, struct nvmet_cq *cq,
        }
 
        if (sq->ctrl)
-               sq->ctrl->cmd_seen = true;
+               sq->ctrl->reset_tbkas = true;
 
        return true;
 
@@ -998,19 +1005,23 @@ static unsigned int nvmet_data_transfer_len(struct nvmet_req *req)
        return req->transfer_len - req->metadata_len;
 }
 
-static int nvmet_req_alloc_p2pmem_sgls(struct nvmet_req *req)
+static int nvmet_req_alloc_p2pmem_sgls(struct pci_dev *p2p_dev,
+               struct nvmet_req *req)
 {
-       req->sg = pci_p2pmem_alloc_sgl(req->p2p_dev, &req->sg_cnt,
+       req->sg = pci_p2pmem_alloc_sgl(p2p_dev, &req->sg_cnt,
                        nvmet_data_transfer_len(req));
        if (!req->sg)
                goto out_err;
 
        if (req->metadata_len) {
-               req->metadata_sg = pci_p2pmem_alloc_sgl(req->p2p_dev,
+               req->metadata_sg = pci_p2pmem_alloc_sgl(p2p_dev,
                                &req->metadata_sg_cnt, req->metadata_len);
                if (!req->metadata_sg)
                        goto out_free_sg;
        }
+
+       req->p2p_dev = p2p_dev;
+
        return 0;
 out_free_sg:
        pci_p2pmem_free_sgl(req->p2p_dev, req->sg);
@@ -1018,25 +1029,19 @@ out_err:
        return -ENOMEM;
 }
 
-static bool nvmet_req_find_p2p_dev(struct nvmet_req *req)
+static struct pci_dev *nvmet_req_find_p2p_dev(struct nvmet_req *req)
 {
-       if (!IS_ENABLED(CONFIG_PCI_P2PDMA))
-               return false;
-
-       if (req->sq->ctrl && req->sq->qid && req->ns) {
-               req->p2p_dev = radix_tree_lookup(&req->sq->ctrl->p2p_ns_map,
-                                                req->ns->nsid);
-               if (req->p2p_dev)
-                       return true;
-       }
-
-       req->p2p_dev = NULL;
-       return false;
+       if (!IS_ENABLED(CONFIG_PCI_P2PDMA) ||
+           !req->sq->ctrl || !req->sq->qid || !req->ns)
+               return NULL;
+       return radix_tree_lookup(&req->sq->ctrl->p2p_ns_map, req->ns->nsid);
 }
 
 int nvmet_req_alloc_sgls(struct nvmet_req *req)
 {
-       if (nvmet_req_find_p2p_dev(req) && !nvmet_req_alloc_p2pmem_sgls(req))
+       struct pci_dev *p2p_dev = nvmet_req_find_p2p_dev(req);
+
+       if (p2p_dev && !nvmet_req_alloc_p2pmem_sgls(p2p_dev, req))
                return 0;
 
        req->sg = sgl_alloc(nvmet_data_transfer_len(req), GFP_KERNEL,
@@ -1065,6 +1070,7 @@ void nvmet_req_free_sgls(struct nvmet_req *req)
                pci_p2pmem_free_sgl(req->p2p_dev, req->sg);
                if (req->metadata_sg)
                        pci_p2pmem_free_sgl(req->p2p_dev, req->metadata_sg);
+               req->p2p_dev = NULL;
        } else {
                sgl_free(req->sg);
                if (req->metadata_sg)
@@ -1372,7 +1378,7 @@ u16 nvmet_alloc_ctrl(const char *subsysnqn, const char *hostnqn,
                goto out_free_changed_ns_list;
 
        if (subsys->cntlid_min > subsys->cntlid_max)
-               goto out_free_changed_ns_list;
+               goto out_free_sqs;
 
        ret = ida_simple_get(&cntlid_ida,
                             subsys->cntlid_min, subsys->cntlid_max,
index 4845d12e374acd795fb4da2bf6f5fd66856260aa..fc3645fc2c24988c808d3ebbd26437afe4091e7c 100644 (file)
@@ -379,7 +379,7 @@ u16 nvmet_parse_discovery_cmd(struct nvmet_req *req)
                req->execute = nvmet_execute_disc_identify;
                return 0;
        default:
-               pr_err("unhandled cmd %d\n", cmd->common.opcode);
+               pr_debug("unhandled cmd %d\n", cmd->common.opcode);
                req->error_loc = offsetof(struct nvme_common_command, opcode);
                return NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
        }
index 1420a8e3e0b10173b1a106dc285a125005929d40..7d0f3523fdab2f3bdf97582bdc6ae0e84c39d3aa 100644 (file)
@@ -94,7 +94,7 @@ u16 nvmet_parse_fabrics_cmd(struct nvmet_req *req)
                req->execute = nvmet_execute_prop_get;
                break;
        default:
-               pr_err("received unknown capsule type 0x%x\n",
+               pr_debug("received unknown capsule type 0x%x\n",
                        cmd->fabrics.fctype);
                req->error_loc = offsetof(struct nvmf_common_command, fctype);
                return NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
@@ -284,13 +284,13 @@ u16 nvmet_parse_connect_cmd(struct nvmet_req *req)
        struct nvme_command *cmd = req->cmd;
 
        if (!nvme_is_fabrics(cmd)) {
-               pr_err("invalid command 0x%x on unconnected queue.\n",
+               pr_debug("invalid command 0x%x on unconnected queue.\n",
                        cmd->fabrics.opcode);
                req->error_loc = offsetof(struct nvme_common_command, opcode);
                return NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
        }
        if (cmd->fabrics.fctype != nvme_fabrics_type_connect) {
-               pr_err("invalid capsule type 0x%x on unconnected queue.\n",
+               pr_debug("invalid capsule type 0x%x on unconnected queue.\n",
                        cmd->fabrics.fctype);
                req->error_loc = offsetof(struct nvmf_common_command, fctype);
                return NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
index 9a8b3726a37c455a9e96056331e179b367e6fc9f..429263ca9b978a6f6ebe282be1a30d19198cd6db 100644 (file)
@@ -258,7 +258,7 @@ static void nvmet_bdev_execute_rw(struct nvmet_req *req)
 
        sector = nvmet_lba_to_sect(req->ns, req->cmd->rw.slba);
 
-       if (req->transfer_len <= NVMET_MAX_INLINE_DATA_LEN) {
+       if (nvmet_use_inline_bvec(req)) {
                bio = &req->b.inline_bio;
                bio_init(bio, req->inline_bvec, ARRAY_SIZE(req->inline_bvec));
        } else {
index 715d4376c997975a743bedc908218b23325a7653..7fdbdc496597d5304eb7f6938b5cc12f1d890507 100644 (file)
@@ -49,9 +49,11 @@ int nvmet_file_ns_enable(struct nvmet_ns *ns)
 
        ns->file = filp_open(ns->device_path, flags, 0);
        if (IS_ERR(ns->file)) {
-               pr_err("failed to open file %s: (%ld)\n",
-                               ns->device_path, PTR_ERR(ns->file));
-               return PTR_ERR(ns->file);
+               ret = PTR_ERR(ns->file);
+               pr_err("failed to open file %s: (%d)\n",
+                       ns->device_path, ret);
+               ns->file = NULL;
+               return ret;
        }
 
        ret = nvmet_file_ns_revalidate(ns);
index 74b3b150e1a57f9cc836275316b1b22e2a38c201..a5c4a186502639cd6daec64ff1f02a3ab5954f0c 100644 (file)
@@ -263,7 +263,8 @@ static const struct blk_mq_ops nvme_loop_admin_mq_ops = {
 
 static void nvme_loop_destroy_admin_queue(struct nvme_loop_ctrl *ctrl)
 {
-       clear_bit(NVME_LOOP_Q_LIVE, &ctrl->queues[0].flags);
+       if (!test_and_clear_bit(NVME_LOOP_Q_LIVE, &ctrl->queues[0].flags))
+               return;
        nvmet_sq_destroy(&ctrl->queues[0].nvme_sq);
        blk_cleanup_queue(ctrl->ctrl.admin_q);
        blk_cleanup_queue(ctrl->ctrl.fabrics_q);
@@ -299,6 +300,7 @@ static void nvme_loop_destroy_io_queues(struct nvme_loop_ctrl *ctrl)
                clear_bit(NVME_LOOP_Q_LIVE, &ctrl->queues[i].flags);
                nvmet_sq_destroy(&ctrl->queues[i].nvme_sq);
        }
+       ctrl->ctrl.queue_count = 1;
 }
 
 static int nvme_loop_init_io_queues(struct nvme_loop_ctrl *ctrl)
@@ -405,6 +407,7 @@ static int nvme_loop_configure_admin_queue(struct nvme_loop_ctrl *ctrl)
        return 0;
 
 out_cleanup_queue:
+       clear_bit(NVME_LOOP_Q_LIVE, &ctrl->queues[0].flags);
        blk_cleanup_queue(ctrl->ctrl.admin_q);
 out_cleanup_fabrics_q:
        blk_cleanup_queue(ctrl->ctrl.fabrics_q);
@@ -462,8 +465,10 @@ static void nvme_loop_reset_ctrl_work(struct work_struct *work)
        nvme_loop_shutdown_ctrl(ctrl);
 
        if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING)) {
-               /* state change failure should never happen */
-               WARN_ON_ONCE(1);
+               if (ctrl->ctrl.state != NVME_CTRL_DELETING &&
+                   ctrl->ctrl.state != NVME_CTRL_DELETING_NOIO)
+                       /* state change failure for non-deleted ctrl? */
+                       WARN_ON_ONCE(1);
                return;
        }
 
@@ -590,8 +595,10 @@ static struct nvme_ctrl *nvme_loop_create_ctrl(struct device *dev,
 
        ret = nvme_init_ctrl(&ctrl->ctrl, dev, &nvme_loop_ctrl_ops,
                                0 /* no quirks, we're perfect! */);
-       if (ret)
+       if (ret) {
+               kfree(ctrl);
                goto out;
+       }
 
        if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING))
                WARN_ON_ONCE(1);
index 5566ed403576ef3210b7e411423c47370238e724..53aea9a8056e73d1bff57a3c516e47370c90ff10 100644 (file)
@@ -167,7 +167,7 @@ struct nvmet_ctrl {
        struct nvmet_subsys     *subsys;
        struct nvmet_sq         **sqs;
 
-       bool                    cmd_seen;
+       bool                    reset_tbkas;
 
        struct mutex            lock;
        u64                     cap;
@@ -616,4 +616,10 @@ static inline sector_t nvmet_lba_to_sect(struct nvmet_ns *ns, __le64 lba)
        return le64_to_cpu(lba) << (ns->blksize_shift - SECTOR_SHIFT);
 }
 
+static inline bool nvmet_use_inline_bvec(struct nvmet_req *req)
+{
+       return req->transfer_len <= NVMET_MAX_INLINE_DATA_LEN &&
+              req->sg_cnt <= NVMET_MAX_INLINE_BIOVEC;
+}
+
 #endif /* _NVMET_H */
index 2798944899b7360e93dd659718cd73e14c7e84c4..39b1473f7204eb78cc17f7988233ccbd05f37791 100644 (file)
@@ -194,7 +194,7 @@ static int nvmet_passthru_map_sg(struct nvmet_req *req, struct request *rq)
        if (req->sg_cnt > BIO_MAX_VECS)
                return -EINVAL;
 
-       if (req->transfer_len <= NVMET_MAX_INLINE_DATA_LEN) {
+       if (nvmet_use_inline_bvec(req)) {
                bio = &req->p.inline_bio;
                bio_init(bio, req->inline_bvec, ARRAY_SIZE(req->inline_bvec));
        } else {
index 6c1f3ab7649c78b3e6aa9cd8b266e29bcbeba3ad..7d607f435e36664f7bc36233ae8ab3af96575e63 100644 (file)
@@ -700,7 +700,7 @@ static void nvmet_rdma_send_done(struct ib_cq *cq, struct ib_wc *wc)
 {
        struct nvmet_rdma_rsp *rsp =
                container_of(wc->wr_cqe, struct nvmet_rdma_rsp, send_cqe);
-       struct nvmet_rdma_queue *queue = cq->cq_context;
+       struct nvmet_rdma_queue *queue = wc->qp->qp_context;
 
        nvmet_rdma_release_rsp(rsp);
 
@@ -786,7 +786,7 @@ static void nvmet_rdma_write_data_done(struct ib_cq *cq, struct ib_wc *wc)
 {
        struct nvmet_rdma_rsp *rsp =
                container_of(wc->wr_cqe, struct nvmet_rdma_rsp, write_cqe);
-       struct nvmet_rdma_queue *queue = cq->cq_context;
+       struct nvmet_rdma_queue *queue = wc->qp->qp_context;
        struct rdma_cm_id *cm_id = rsp->queue->cm_id;
        u16 status;
 
index f9f34f6caf5e8c4d7f7bfa9a2af8687bb464f221..d8aceef83284680fbfeae752154eeea37b6f2960 100644 (file)
@@ -550,7 +550,7 @@ static void nvmet_tcp_queue_response(struct nvmet_req *req)
                 * nvmet_req_init is completed.
                 */
                if (queue->rcv_state == NVMET_TCP_RECV_PDU &&
-                   len && len < cmd->req.port->inline_data_size &&
+                   len && len <= cmd->req.port->inline_data_size &&
                    nvme_is_write(cmd->req.cmd))
                        return;
        }
index da5b414d585abee91751b91901e21d4e2771605e..85dcb7097da4c99ba215970f5f60db444abb8a6f 100644 (file)
@@ -103,6 +103,13 @@ struct irq_domain *pci_host_bridge_of_msi_domain(struct pci_bus *bus)
 #endif
 }
 
+bool pci_host_of_has_msi_map(struct device *dev)
+{
+       if (dev && dev->of_node)
+               return of_get_property(dev->of_node, "msi-map", NULL);
+       return false;
+}
+
 static inline int __of_pci_pci_compare(struct device_node *node,
                                       unsigned int data)
 {
index 3a62d09b8869e438175707ee519dfbc42c7d0ecc..275204646c68c08b27ef7cdb9596cc638940836e 100644 (file)
@@ -925,7 +925,8 @@ static int pci_register_host_bridge(struct pci_host_bridge *bridge)
        device_enable_async_suspend(bus->bridge);
        pci_set_bus_of_node(bus);
        pci_set_bus_msi_domain(bus);
-       if (bridge->msi_domain && !dev_get_msi_domain(&bus->dev))
+       if (bridge->msi_domain && !dev_get_msi_domain(&bus->dev) &&
+           !pci_host_of_has_msi_map(parent))
                bus->bus_flags |= PCI_BUS_FLAGS_NO_MSI;
 
        if (!parent)
index 899b9eb43fad69fabba135c37f857cef8a275953..a39f30fa2e9912f2d809e5ef8b24e44b5020c273 100644 (file)
@@ -78,7 +78,7 @@ static inline u32 brcm_usb_readl(void __iomem *addr)
         * Other architectures (e.g., ARM) either do not support big endian, or
         * else leave I/O in little endian mode.
         */
-       if (IS_ENABLED(CONFIG_MIPS) && IS_ENABLED(__BIG_ENDIAN))
+       if (IS_ENABLED(CONFIG_MIPS) && IS_ENABLED(CONFIG_CPU_BIG_ENDIAN))
                return __raw_readl(addr);
        else
                return readl_relaxed(addr);
@@ -87,7 +87,7 @@ static inline u32 brcm_usb_readl(void __iomem *addr)
 static inline void brcm_usb_writel(u32 val, void __iomem *addr)
 {
        /* See brcmnand_readl() comments */
-       if (IS_ENABLED(CONFIG_MIPS) && IS_ENABLED(__BIG_ENDIAN))
+       if (IS_ENABLED(CONFIG_MIPS) && IS_ENABLED(CONFIG_CPU_BIG_ENDIAN))
                __raw_writel(val, addr);
        else
                writel_relaxed(val, addr);
index 5c68e31c5939985a80c1415c3fef9817a3803fb3..e93818e3991fd6145a81f82c7aba3245a351c2b3 100644 (file)
@@ -940,6 +940,7 @@ static int cdns_sierra_phy_probe(struct platform_device *pdev)
        sp->nsubnodes = node;
 
        if (sp->num_lanes > SIERRA_MAX_LANES) {
+               ret = -EINVAL;
                dev_err(dev, "Invalid lane configuration\n");
                goto put_child2;
        }
index cdbcc49f71152cf479ab1a35eabeb8b663469f10..731c483a04dea37a8457bf28046588405a614296 100644 (file)
@@ -949,6 +949,8 @@ static int mtk_phy_init(struct phy *phy)
                break;
        default:
                dev_err(tphy->dev, "incompatible PHY type\n");
+               clk_disable_unprepare(instance->ref_clk);
+               clk_disable_unprepare(instance->da_ref_clk);
                return -EINVAL;
        }
 
index c8a7d0927ced10d26ef906c9ee95048117cd4a61..4076580fc2cd974c04e231ad7ae8d17c87206221 100644 (file)
@@ -2470,6 +2470,10 @@ static int sparx5_serdes_probe(struct platform_device *pdev)
        priv->coreclock = clock;
 
        iores = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+       if (!iores) {
+               dev_err(priv->dev, "Invalid resource\n");
+               return -EINVAL;
+       }
        iomem = devm_ioremap(priv->dev, iores->start, resource_size(iores));
        if (IS_ERR(iomem)) {
                dev_err(priv->dev, "Unable to get serdes registers: %s\n",
index 753cb5bab9308f71b9ea41cff320f610d42d7995..2a9465f4bb3a923e18452f726aed8401c4af328e 100644 (file)
@@ -341,7 +341,7 @@ static struct platform_driver mt7621_pci_phy_driver = {
        .probe = mt7621_pci_phy_probe,
        .driver = {
                .name = "mt7621-pci-phy",
-               .of_match_table = of_match_ptr(mt7621_pci_phy_ids),
+               .of_match_table = mt7621_pci_phy_ids,
        },
 };
 
index 9eb6d37c907ead08f83b1b92bf14e6a75219bed5..126f5b8735cc1afffe8dbf6deeaa1dedc3486934 100644 (file)
@@ -1212,6 +1212,7 @@ static int wiz_probe(struct platform_device *pdev)
 
                if (wiz->typec_dir_delay < WIZ_TYPEC_DIR_DEBOUNCE_MIN ||
                    wiz->typec_dir_delay > WIZ_TYPEC_DIR_DEBOUNCE_MAX) {
+                       ret = -EINVAL;
                        dev_err(dev, "Invalid typec-dir-debounce property\n");
                        goto err_addr_to_resource;
                }
index 996ebcba4d3867bb190473d0c40a8c2878ee3093..4c0d26606b6cc7d636718deeb3801bb2044b9aa5 100644 (file)
@@ -2702,8 +2702,8 @@ static int aspeed_g5_sig_expr_eval(struct aspeed_pinmux_data *ctx,
 }
 
 /**
- * Configure a pin's signal by applying an expression's descriptor state for
- * all descriptors in the expression.
+ * aspeed_g5_sig_expr_set() - Configure a pin's signal by applying an
+ * expression's descriptor state for all descriptors in the expression.
  *
  * @ctx: The pinmux context
  * @expr: The expression associated with the function whose signal is to be
index 5c1a109842a76054e369873b2386875ab567b2a4..eeab093a78159558d9e19c6b95a50ba939a7a98d 100644 (file)
@@ -2611,8 +2611,8 @@ static struct aspeed_pin_config aspeed_g6_configs[] = {
 };
 
 /**
- * Configure a pin's signal by applying an expression's descriptor state for
- * all descriptors in the expression.
+ * aspeed_g6_sig_expr_set() - Configure a pin's signal by applying an
+ * expression's descriptor state for all descriptors in the expression.
  *
  * @ctx: The pinmux context
  * @expr: The expression associated with the function whose signal is to be
index 9c65d560d48f711eb1a7abf9dd613971997465a7..9bbfe5c14b3687e7d4e61be0e38fcb10a3ed5c4d 100644 (file)
@@ -108,7 +108,8 @@ static int aspeed_sig_expr_disable(struct aspeed_pinmux_data *ctx,
 }
 
 /**
- * Disable a signal on a pin by disabling all provided signal expressions.
+ * aspeed_disable_sig() - Disable a signal on a pin by disabling all provided
+ * signal expressions.
  *
  * @ctx: The pinmux context
  * @exprs: The list of signal expressions (from a priority level on a pin)
index 57305ca838a7c800fb93cf5377bf1ecb5a9bc562..894e2efd3be76c232729cead82f3fa0cb5c7217a 100644 (file)
@@ -21,7 +21,8 @@ static inline void aspeed_sig_desc_print_val(
 }
 
 /**
- * Query the enabled or disabled state of a signal descriptor
+ * aspeed_sig_desc_eval() - Query the enabled or disabled state of a signal
+ * descriptor.
  *
  * @desc: The signal descriptor of interest
  * @enabled: True to query the enabled state, false to query disabled state
index 25d2f7f7f3b68e180c93dc20bad56f51d4989c52..11e967dbb44bb9e9b1bd8fb09b02196516f74177 100644 (file)
@@ -223,7 +223,7 @@ config PINCTRL_SC7280
 config PINCTRL_SC8180X
        tristate "Qualcomm Technologies Inc SC8180x pin controller driver"
        depends on GPIOLIB && (OF || ACPI)
-       select PINCTRL_MSM
+       depends on PINCTRL_MSM
        help
          This is the pinctrl, pinmux, pinconf and gpiolib driver for the
          Qualcomm Technologies Inc TLMM block found on the Qualcomm
index 5aaf57b40407f9da80c35ab82de0c084d847b1c0..0bb4931cec59e520c70a89cd8a0787b9e7244ce2 100644 (file)
@@ -410,15 +410,15 @@ static const char * const gpio_groups[] = {
        "gpio29", "gpio30", "gpio31", "gpio32", "gpio33", "gpio34", "gpio35",
        "gpio36", "gpio37", "gpio38", "gpio39", "gpio40", "gpio41", "gpio42",
        "gpio43", "gpio44", "gpio45", "gpio46", "gpio47", "gpio48", "gpio49",
-       "gpio50", "gpio51", "gpio52", "gpio52", "gpio53", "gpio53", "gpio54",
-       "gpio55", "gpio56", "gpio57", "gpio58", "gpio59", "gpio60", "gpio61",
-       "gpio62", "gpio63", "gpio64", "gpio65", "gpio66", "gpio67", "gpio68",
-       "gpio69", "gpio70", "gpio71", "gpio72", "gpio73", "gpio74", "gpio75",
-       "gpio76", "gpio77", "gpio78", "gpio79", "gpio80", "gpio81", "gpio82",
-       "gpio83", "gpio84", "gpio85", "gpio86", "gpio87", "gpio88", "gpio89",
-       "gpio90", "gpio91", "gpio92", "gpio93", "gpio94", "gpio95", "gpio96",
-       "gpio97", "gpio98", "gpio99", "gpio100", "gpio101", "gpio102",
-       "gpio103", "gpio104", "gpio105", "gpio106", "gpio107",
+       "gpio50", "gpio51", "gpio52", "gpio53", "gpio54", "gpio55", "gpio56",
+       "gpio57", "gpio58", "gpio59", "gpio60", "gpio61", "gpio62", "gpio63",
+       "gpio64", "gpio65", "gpio66", "gpio67", "gpio68", "gpio69", "gpio70",
+       "gpio71", "gpio72", "gpio73", "gpio74", "gpio75", "gpio76", "gpio77",
+       "gpio78", "gpio79", "gpio80", "gpio81", "gpio82", "gpio83", "gpio84",
+       "gpio85", "gpio86", "gpio87", "gpio88", "gpio89", "gpio90", "gpio91",
+       "gpio92", "gpio93", "gpio94", "gpio95", "gpio96", "gpio97", "gpio98",
+       "gpio99", "gpio100", "gpio101", "gpio102", "gpio103", "gpio104",
+       "gpio105", "gpio106", "gpio107",
 };
 
 static const char * const qdss_stm_groups[] = {
index 1f4bca854add5516e5d2bef3f20270f07aa193e6..a9b511c7e850086ecec2873ffbad1068b8002cc0 100644 (file)
@@ -127,7 +127,7 @@ static int rt2880_pmx_group_enable(struct pinctrl_dev *pctrldev,
        if (p->groups[group].enabled) {
                dev_err(p->dev, "%s is already enabled\n",
                        p->groups[group].name);
-               return -EBUSY;
+               return 0;
        }
 
        p->groups[group].enabled = 1;
index bbc4e71a16ff8372f69977a74e5dd7899beabba7..38800e86ed8ad47371abe6f4459b259137a85c0f 100644 (file)
@@ -294,6 +294,9 @@ mlxbf_tmfifo_get_next_desc(struct mlxbf_tmfifo_vring *vring)
        if (vring->next_avail == virtio16_to_cpu(vdev, vr->avail->idx))
                return NULL;
 
+       /* Make sure 'avail->idx' is visible already. */
+       virtio_rmb(false);
+
        idx = vring->next_avail % vr->num;
        head = virtio16_to_cpu(vdev, vr->avail->ring[idx]);
        if (WARN_ON(head >= vr->num))
@@ -322,7 +325,7 @@ static void mlxbf_tmfifo_release_desc(struct mlxbf_tmfifo_vring *vring,
         * done or not. Add a memory barrier here to make sure the update above
         * completes before updating the idx.
         */
-       mb();
+       virtio_mb(false);
        vr->used->idx = cpu_to_virtio16(vdev, vr_idx + 1);
 }
 
@@ -733,6 +736,12 @@ static bool mlxbf_tmfifo_rxtx_one_desc(struct mlxbf_tmfifo_vring *vring,
                desc = NULL;
                fifo->vring[is_rx] = NULL;
 
+               /*
+                * Make sure the load/store are in order before
+                * returning back to virtio.
+                */
+               virtio_mb(false);
+
                /* Notify upper layer that packet is done. */
                spin_lock_irqsave(&fifo->spin_lock[is_rx], flags);
                vring_interrupt(0, vring->vq);
index a9db2f32658f2bef0f255e60a5db91b1ddf6f1d2..b013445147dd5692e688a8808ab01a533b00bc4a 100644 (file)
@@ -683,13 +683,13 @@ static int mlxreg_hotplug_probe(struct platform_device *pdev)
 
        err = devm_request_irq(&pdev->dev, priv->irq,
                               mlxreg_hotplug_irq_handler, IRQF_TRIGGER_FALLING
-                              | IRQF_SHARED | IRQF_NO_AUTOEN,
-                              "mlxreg-hotplug", priv);
+                              | IRQF_SHARED, "mlxreg-hotplug", priv);
        if (err) {
                dev_err(&pdev->dev, "Failed to request irq: %d\n", err);
                return err;
        }
 
+       disable_irq(priv->irq);
        spin_lock_init(&priv->lock);
        INIT_DELAYED_WORK(&priv->dwork_irq, mlxreg_hotplug_work_handler);
        dev_set_drvdata(&pdev->dev, priv);
index 69e86cd599d38dce783d91b061ab5d3d6dbdf10b..a06964aa96e770d183009ea08a31ab9fa5d913f4 100644 (file)
@@ -1907,7 +1907,7 @@ static int ssam_ssh_event_disable(struct ssam_controller *ctrl,
 {
        int status;
 
-       status = __ssam_ssh_event_request(ctrl, reg, reg.cid_enable, id, flags);
+       status = __ssam_ssh_event_request(ctrl, reg, reg.cid_disable, id, flags);
 
        if (status < 0 && status != -EINVAL) {
                ssam_err(ctrl,
@@ -2483,8 +2483,7 @@ int ssam_irq_setup(struct ssam_controller *ctrl)
         * interrupt, and let the SAM resume callback during the controller
         * resume process clear it.
         */
-       const int irqf = IRQF_SHARED | IRQF_ONESHOT |
-                        IRQF_TRIGGER_RISING | IRQF_NO_AUTOEN;
+       const int irqf = IRQF_ONESHOT | IRQF_TRIGGER_RISING | IRQF_NO_AUTOEN;
 
        gpiod = gpiod_get(dev, "ssam_wakeup-int", GPIOD_ASIS);
        if (IS_ERR(gpiod))
index 8dc2c267bcd68f10a192f3acb337d6dddf2376df..517f774a6e60f17efc935dcc7b10016741525c08 100644 (file)
@@ -621,8 +621,8 @@ static const struct acpi_gpio_mapping ssam_acpi_gpios[] = {
 
 static int ssam_serial_hub_probe(struct serdev_device *serdev)
 {
+       struct acpi_device *ssh = ACPI_COMPANION(&serdev->dev);
        struct ssam_controller *ctrl;
-       acpi_handle *ssh = ACPI_HANDLE(&serdev->dev);
        acpi_status astatus;
        int status;
 
@@ -652,7 +652,7 @@ static int ssam_serial_hub_probe(struct serdev_device *serdev)
        if (status)
                goto err_devopen;
 
-       astatus = ssam_serdev_setup_via_acpi(ssh, serdev);
+       astatus = ssam_serdev_setup_via_acpi(ssh->handle, serdev);
        if (ACPI_FAILURE(astatus)) {
                status = -ENXIO;
                goto err_devinit;
@@ -706,7 +706,7 @@ static int ssam_serial_hub_probe(struct serdev_device *serdev)
         *       For now let's thus default power/wakeup to false.
         */
        device_set_wakeup_capable(&serdev->dev, true);
-       acpi_walk_dep_device_list(ssh);
+       acpi_dev_clear_dependencies(ssh);
 
        return 0;
 
index cc4f9cba68563c137c1c71785e8d6c26479ef2c6..dea82aa1abd489acf49717aee3bc4a0442a02653 100644 (file)
@@ -446,12 +446,12 @@ mshw0011_space_handler(u32 function, acpi_physical_address command,
 
 static int mshw0011_install_space_handler(struct i2c_client *client)
 {
-       acpi_handle handle;
+       struct acpi_device *adev;
        struct mshw0011_handler_data *data;
        acpi_status status;
 
-       handle = ACPI_HANDLE(&client->dev);
-       if (!handle)
+       adev = ACPI_COMPANION(&client->dev);
+       if (!adev)
                return -ENODEV;
 
        data = kzalloc(sizeof(struct mshw0011_handler_data),
@@ -460,25 +460,25 @@ static int mshw0011_install_space_handler(struct i2c_client *client)
                return -ENOMEM;
 
        data->client = client;
-       status = acpi_bus_attach_private_data(handle, (void *)data);
+       status = acpi_bus_attach_private_data(adev->handle, (void *)data);
        if (ACPI_FAILURE(status)) {
                kfree(data);
                return -ENOMEM;
        }
 
-       status = acpi_install_address_space_handler(handle,
-                               ACPI_ADR_SPACE_GSBUS,
-                               &mshw0011_space_handler,
-                               NULL,
-                               data);
+       status = acpi_install_address_space_handler(adev->handle,
+                                                   ACPI_ADR_SPACE_GSBUS,
+                                                   &mshw0011_space_handler,
+                                                   NULL,
+                                                   data);
        if (ACPI_FAILURE(status)) {
                dev_err(&client->dev, "Error installing i2c space handler\n");
-               acpi_bus_detach_private_data(handle);
+               acpi_bus_detach_private_data(adev->handle);
                kfree(data);
                return -ENOMEM;
        }
 
-       acpi_walk_dep_device_list(handle);
+       acpi_dev_clear_dependencies(adev);
        return 0;
 }
 
index ef9c1f8e8336ff7594f72b59676029644fa1054a..8339988d95c1b08ec91298180bcfa6aa5b4fb410 100644 (file)
@@ -798,7 +798,7 @@ static int san_consumer_links_setup(struct platform_device *pdev)
 
 static int san_probe(struct platform_device *pdev)
 {
-       acpi_handle san = ACPI_HANDLE(&pdev->dev);
+       struct acpi_device *san = ACPI_COMPANION(&pdev->dev);
        struct ssam_controller *ctrl;
        struct san_data *data;
        acpi_status astatus;
@@ -821,7 +821,8 @@ static int san_probe(struct platform_device *pdev)
 
        platform_set_drvdata(pdev, data);
 
-       astatus = acpi_install_address_space_handler(san, ACPI_ADR_SPACE_GSBUS,
+       astatus = acpi_install_address_space_handler(san->handle,
+                                                    ACPI_ADR_SPACE_GSBUS,
                                                     &san_opreg_handler, NULL,
                                                     &data->info);
        if (ACPI_FAILURE(astatus))
@@ -835,7 +836,7 @@ static int san_probe(struct platform_device *pdev)
        if (status)
                goto err_install_dev;
 
-       acpi_walk_dep_device_list(san);
+       acpi_dev_clear_dependencies(san);
        return 0;
 
 err_install_dev:
index 685d37a7add1411f63e065151cca445ffbcd832c..ef83461fa536d0a67c5522fd7ba2495706db8269 100644 (file)
@@ -156,7 +156,7 @@ static const struct software_node *ssam_node_group_sl2[] = {
        NULL,
 };
 
-/* Devices for Surface Laptop 3. */
+/* Devices for Surface Laptop 3 and 4. */
 static const struct software_node *ssam_node_group_sl3[] = {
        &ssam_node_root,
        &ssam_node_bat_ac,
@@ -521,9 +521,12 @@ static const struct acpi_device_id ssam_platform_hub_match[] = {
        /* Surface Laptop 3 (13", Intel) */
        { "MSHW0114", (unsigned long)ssam_node_group_sl3 },
 
-       /* Surface Laptop 3 (15", AMD) */
+       /* Surface Laptop 3 (15", AMD) and 4 (15", AMD) */
        { "MSHW0110", (unsigned long)ssam_node_group_sl3 },
 
+       /* Surface Laptop 4 (13", Intel) */
+       { "MSHW0250", (unsigned long)ssam_node_group_sl3 },
+
        /* Surface Laptop Go 1 */
        { "MSHW0118", (unsigned long)ssam_node_group_slg1 },
 
index 63ce587e79e3ba2470778b2dfc5386dfcb0d8ea4..1203b9a829939abb954e67b4371d7ff0c66fbe82 100644 (file)
@@ -427,6 +427,7 @@ static int surface_dtx_open(struct inode *inode, struct file *file)
         */
        if (test_bit(SDTX_DEVICE_SHUTDOWN_BIT, &ddev->flags)) {
                up_write(&ddev->client_lock);
+               mutex_destroy(&client->read_lock);
                sdtx_device_put(client->ddev);
                kfree(client);
                return -ENODEV;
@@ -527,20 +528,14 @@ static __poll_t surface_dtx_poll(struct file *file, struct poll_table_struct *pt
        struct sdtx_client *client = file->private_data;
        __poll_t events = 0;
 
-       if (down_read_killable(&client->ddev->lock))
-               return -ERESTARTSYS;
-
-       if (test_bit(SDTX_DEVICE_SHUTDOWN_BIT, &client->ddev->flags)) {
-               up_read(&client->ddev->lock);
+       if (test_bit(SDTX_DEVICE_SHUTDOWN_BIT, &client->ddev->flags))
                return EPOLLHUP | EPOLLERR;
-       }
 
        poll_wait(file, &client->ddev->waitq, pt);
 
        if (!kfifo_is_empty(&client->buffer))
                events |= EPOLLIN | EPOLLRDNORM;
 
-       up_read(&client->ddev->lock);
        return events;
 }
 
index 2714f7c3843e3d3ea34117b95a2cab422bd95076..60592fb88e7a0a347d718fb9ae66ec2525bde198 100644 (file)
@@ -711,7 +711,7 @@ config INTEL_HID_EVENT
 
 config INTEL_INT0002_VGPIO
        tristate "Intel ACPI INT0002 Virtual GPIO driver"
-       depends on GPIOLIB && ACPI
+       depends on GPIOLIB && ACPI && PM_SLEEP
        select GPIOLIB_IRQCHIP
        help
          Some peripherals on Bay Trail and Cherry Trail platforms signal a
index a1753485159ca6164206665fbb7d1e53a08eb442..33f823772733568a4f0aa59ab60efc102129bbbf 100644 (file)
@@ -270,7 +270,8 @@ int init_dell_smbios_wmi(void)
 
 void exit_dell_smbios_wmi(void)
 {
-       wmi_driver_unregister(&dell_smbios_wmi_driver);
+       if (wmi_supported)
+               wmi_driver_unregister(&dell_smbios_wmi_driver);
 }
 
 MODULE_DEVICE_TABLE(wmi, dell_smbios_wmi_id_table);
index 13d57434e60f26ee1686bd6286964301faee8ee6..5529d7b0abea353b3148510e10f0ed5c1bd063d2 100644 (file)
@@ -133,31 +133,21 @@ static u8 gigabyte_wmi_detect_sensor_usability(struct wmi_device *wdev)
        return r;
 }
 
+#define DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME(name) \
+       { .matches = { \
+               DMI_EXACT_MATCH(DMI_BOARD_VENDOR, "Gigabyte Technology Co., Ltd."), \
+               DMI_EXACT_MATCH(DMI_BOARD_NAME, name), \
+       }}
+
 static const struct dmi_system_id gigabyte_wmi_known_working_platforms[] = {
-       { .matches = {
-               DMI_EXACT_MATCH(DMI_BOARD_VENDOR, "Gigabyte Technology Co., Ltd."),
-               DMI_EXACT_MATCH(DMI_BOARD_NAME, "B550 GAMING X V2"),
-       }},
-       { .matches = {
-               DMI_EXACT_MATCH(DMI_BOARD_VENDOR, "Gigabyte Technology Co., Ltd."),
-               DMI_EXACT_MATCH(DMI_BOARD_NAME, "B550M AORUS PRO-P"),
-       }},
-       { .matches = {
-               DMI_EXACT_MATCH(DMI_BOARD_VENDOR, "Gigabyte Technology Co., Ltd."),
-               DMI_EXACT_MATCH(DMI_BOARD_NAME, "B550M DS3H"),
-       }},
-       { .matches = {
-               DMI_EXACT_MATCH(DMI_BOARD_VENDOR, "Gigabyte Technology Co., Ltd."),
-               DMI_EXACT_MATCH(DMI_BOARD_NAME, "Z390 I AORUS PRO WIFI-CF"),
-       }},
-       { .matches = {
-               DMI_EXACT_MATCH(DMI_BOARD_VENDOR, "Gigabyte Technology Co., Ltd."),
-               DMI_EXACT_MATCH(DMI_BOARD_NAME, "X570 AORUS ELITE"),
-       }},
-       { .matches = {
-               DMI_EXACT_MATCH(DMI_BOARD_VENDOR, "Gigabyte Technology Co., Ltd."),
-               DMI_EXACT_MATCH(DMI_BOARD_NAME, "X570 I AORUS PRO WIFI"),
-       }},
+       DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("B550 AORUS ELITE"),
+       DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("B550 GAMING X V2"),
+       DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("B550M AORUS PRO-P"),
+       DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("B550M DS3H"),
+       DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("Z390 I AORUS PRO WIFI-CF"),
+       DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("X570 AORUS ELITE"),
+       DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("X570 I AORUS PRO WIFI"),
+       DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("X570 UD"),
        { }
 };
 
index 12c31fd5d5ae25e25416a5ba1ca587f377c3100c..0753ef18e7211c6274f2bb41e2390a5ad3df99b5 100644 (file)
@@ -17,12 +17,14 @@ MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Alex Hung");
 MODULE_ALIAS("acpi*:HPQ6001:*");
 MODULE_ALIAS("acpi*:WSTADEF:*");
+MODULE_ALIAS("acpi*:AMDI0051:*");
 
 static struct input_dev *hpwl_input_dev;
 
 static const struct acpi_device_id hpwl_ids[] = {
        {"HPQ6001", 0},
        {"WSTADEF", 0},
+       {"AMDI0051", 0},
        {"", 0},
 };
 
index 799cbe2ffcf36fe6d9ac3ab7094964bfb2a412bd..8c0867bda8280e3340f28f1ff821db03db0f3937 100644 (file)
@@ -88,6 +88,9 @@ MODULE_DEVICE_TABLE(acpi, lis3lv02d_device_ids);
 static int lis3lv02d_acpi_init(struct lis3lv02d *lis3)
 {
        struct acpi_device *dev = lis3->bus_priv;
+       if (!lis3->init_required)
+               return 0;
+
        if (acpi_evaluate_object(dev->handle, METHOD_NAME__INI,
                                 NULL, NULL) != AE_OK)
                return -EINVAL;
@@ -356,6 +359,7 @@ static int lis3lv02d_add(struct acpi_device *device)
        }
 
        /* call the core layer do its init */
+       lis3_dev.init_required = true;
        ret = lis3lv02d_init_device(&lis3_dev);
        if (ret)
                return ret;
@@ -403,11 +407,27 @@ static int lis3lv02d_suspend(struct device *dev)
 
 static int lis3lv02d_resume(struct device *dev)
 {
+       lis3_dev.init_required = false;
+       lis3lv02d_poweron(&lis3_dev);
+       return 0;
+}
+
+static int lis3lv02d_restore(struct device *dev)
+{
+       lis3_dev.init_required = true;
        lis3lv02d_poweron(&lis3_dev);
        return 0;
 }
 
-static SIMPLE_DEV_PM_OPS(hp_accel_pm, lis3lv02d_suspend, lis3lv02d_resume);
+static const struct dev_pm_ops hp_accel_pm = {
+       .suspend = lis3lv02d_suspend,
+       .resume = lis3lv02d_resume,
+       .freeze = lis3lv02d_suspend,
+       .thaw = lis3lv02d_resume,
+       .poweroff = lis3lv02d_suspend,
+       .restore = lis3lv02d_restore,
+};
+
 #define HP_ACCEL_PM (&hp_accel_pm)
 #else
 #define HP_ACCEL_PM NULL
index 6cb5ad4be231d387385b32b648aa4aa152eb6ca5..3878172909219043b4f9e341cd2e9ad6ff2cd07a 100644 (file)
@@ -57,8 +57,8 @@ enum {
 };
 
 enum {
-       SMBC_CONSERVATION_ON  = 3,
-       SMBC_CONSERVATION_OFF = 5,
+       SBMC_CONSERVATION_ON  = 3,
+       SBMC_CONSERVATION_OFF = 5,
 };
 
 enum {
@@ -182,9 +182,9 @@ static int eval_gbmd(acpi_handle handle, unsigned long *res)
        return eval_int(handle, "GBMD", res);
 }
 
-static int exec_smbc(acpi_handle handle, unsigned long arg)
+static int exec_sbmc(acpi_handle handle, unsigned long arg)
 {
-       return exec_simple_method(handle, "SMBC", arg);
+       return exec_simple_method(handle, "SBMC", arg);
 }
 
 static int eval_hals(acpi_handle handle, unsigned long *res)
@@ -477,7 +477,7 @@ static ssize_t conservation_mode_store(struct device *dev,
        if (err)
                return err;
 
-       err = exec_smbc(priv->adev->handle, state ? SMBC_CONSERVATION_ON : SMBC_CONSERVATION_OFF);
+       err = exec_sbmc(priv->adev->handle, state ? SBMC_CONSERVATION_ON : SBMC_CONSERVATION_OFF);
        if (err)
                return err;
 
@@ -809,6 +809,7 @@ static int dytc_profile_set(struct platform_profile_handler *pprof,
 {
        struct ideapad_dytc_priv *dytc = container_of(pprof, struct ideapad_dytc_priv, pprof);
        struct ideapad_private *priv = dytc->priv;
+       unsigned long output;
        int err;
 
        err = mutex_lock_interruptible(&dytc->mutex);
@@ -829,7 +830,7 @@ static int dytc_profile_set(struct platform_profile_handler *pprof,
 
                /* Determine if we are in CQL mode. This alters the commands we do */
                err = dytc_cql_command(priv, DYTC_SET_COMMAND(DYTC_FUNCTION_MMC, perfmode, 1),
-                                      NULL);
+                                      &output);
                if (err)
                        goto unlock;
        }
index 289c6655d425d4a549cbe294212289777a697e27..569342aa8926eef258bd624f4f446b1faaceca82 100644 (file)
 #define GPE0A_STS_PORT                 0x420
 #define GPE0A_EN_PORT                  0x428
 
+struct int0002_data {
+       struct gpio_chip chip;
+       int parent_irq;
+       int wake_enable_count;
+};
+
 /*
  * As this is not a real GPIO at all, but just a hack to model an event in
  * ACPI the get / set functions are dummy functions.
@@ -98,14 +104,16 @@ static void int0002_irq_mask(struct irq_data *data)
 static int int0002_irq_set_wake(struct irq_data *data, unsigned int on)
 {
        struct gpio_chip *chip = irq_data_get_irq_chip_data(data);
-       struct platform_device *pdev = to_platform_device(chip->parent);
-       int irq = platform_get_irq(pdev, 0);
+       struct int0002_data *int0002 = container_of(chip, struct int0002_data, chip);
 
-       /* Propagate to parent irq */
+       /*
+        * Applying of the wakeup flag to our parent IRQ is delayed till system
+        * suspend, because we only want to do this when using s2idle.
+        */
        if (on)
-               enable_irq_wake(irq);
+               int0002->wake_enable_count++;
        else
-               disable_irq_wake(irq);
+               int0002->wake_enable_count--;
 
        return 0;
 }
@@ -135,7 +143,7 @@ static bool int0002_check_wake(void *data)
        return (gpe_sts_reg & GPE0A_PME_B0_STS_BIT);
 }
 
-static struct irq_chip int0002_byt_irqchip = {
+static struct irq_chip int0002_irqchip = {
        .name                   = DRV_NAME,
        .irq_ack                = int0002_irq_ack,
        .irq_mask               = int0002_irq_mask,
@@ -143,21 +151,9 @@ static struct irq_chip int0002_byt_irqchip = {
        .irq_set_wake           = int0002_irq_set_wake,
 };
 
-static struct irq_chip int0002_cht_irqchip = {
-       .name                   = DRV_NAME,
-       .irq_ack                = int0002_irq_ack,
-       .irq_mask               = int0002_irq_mask,
-       .irq_unmask             = int0002_irq_unmask,
-       /*
-        * No set_wake, on CHT the IRQ is typically shared with the ACPI SCI
-        * and we don't want to mess with the ACPI SCI irq settings.
-        */
-       .flags                  = IRQCHIP_SKIP_SET_WAKE,
-};
-
 static const struct x86_cpu_id int0002_cpu_ids[] = {
-       X86_MATCH_INTEL_FAM6_MODEL(ATOM_SILVERMONT,     &int0002_byt_irqchip),
-       X86_MATCH_INTEL_FAM6_MODEL(ATOM_AIRMONT,        &int0002_cht_irqchip),
+       X86_MATCH_INTEL_FAM6_MODEL(ATOM_SILVERMONT, NULL),
+       X86_MATCH_INTEL_FAM6_MODEL(ATOM_AIRMONT, NULL),
        {}
 };
 
@@ -172,8 +168,9 @@ static int int0002_probe(struct platform_device *pdev)
 {
        struct device *dev = &pdev->dev;
        const struct x86_cpu_id *cpu_id;
-       struct gpio_chip *chip;
+       struct int0002_data *int0002;
        struct gpio_irq_chip *girq;
+       struct gpio_chip *chip;
        int irq, ret;
 
        /* Menlow has a different INT0002 device? <sigh> */
@@ -185,10 +182,13 @@ static int int0002_probe(struct platform_device *pdev)
        if (irq < 0)
                return irq;
 
-       chip = devm_kzalloc(dev, sizeof(*chip), GFP_KERNEL);
-       if (!chip)
+       int0002 = devm_kzalloc(dev, sizeof(*int0002), GFP_KERNEL);
+       if (!int0002)
                return -ENOMEM;
 
+       int0002->parent_irq = irq;
+
+       chip = &int0002->chip;
        chip->label = DRV_NAME;
        chip->parent = dev;
        chip->owner = THIS_MODULE;
@@ -214,7 +214,7 @@ static int int0002_probe(struct platform_device *pdev)
        }
 
        girq = &chip->irq;
-       girq->chip = (struct irq_chip *)cpu_id->driver_data;
+       girq->chip = &int0002_irqchip;
        /* This let us handle the parent IRQ in the driver */
        girq->parent_handler = NULL;
        girq->num_parents = 0;
@@ -230,6 +230,7 @@ static int int0002_probe(struct platform_device *pdev)
 
        acpi_register_wakeup_handler(irq, int0002_check_wake, NULL);
        device_init_wakeup(dev, true);
+       dev_set_drvdata(dev, int0002);
        return 0;
 }
 
@@ -240,6 +241,36 @@ static int int0002_remove(struct platform_device *pdev)
        return 0;
 }
 
+static int int0002_suspend(struct device *dev)
+{
+       struct int0002_data *int0002 = dev_get_drvdata(dev);
+
+       /*
+        * The INT0002 parent IRQ is often shared with the ACPI GPE IRQ, don't
+        * muck with it when firmware based suspend is used, otherwise we may
+        * cause spurious wakeups from firmware managed suspend.
+        */
+       if (!pm_suspend_via_firmware() && int0002->wake_enable_count)
+               enable_irq_wake(int0002->parent_irq);
+
+       return 0;
+}
+
+static int int0002_resume(struct device *dev)
+{
+       struct int0002_data *int0002 = dev_get_drvdata(dev);
+
+       if (!pm_suspend_via_firmware() && int0002->wake_enable_count)
+               disable_irq_wake(int0002->parent_irq);
+
+       return 0;
+}
+
+static const struct dev_pm_ops int0002_pm_ops = {
+       .suspend = int0002_suspend,
+       .resume = int0002_resume,
+};
+
 static const struct acpi_device_id int0002_acpi_ids[] = {
        { "INT0002", 0 },
        { },
@@ -250,6 +281,7 @@ static struct platform_driver int0002_driver = {
        .driver = {
                .name                   = DRV_NAME,
                .acpi_match_table       = int0002_acpi_ids,
+               .pm                     = &int0002_pm_ops,
        },
        .probe  = int0002_probe,
        .remove = int0002_remove,
index 05cced59e251a98bfbf4c58f9f5541726f310184..f58b8543f6ac576243bc1734f3a0ba52a7107dac 100644 (file)
@@ -312,6 +312,7 @@ static const struct acpi_device_id punit_ipc_acpi_ids[] = {
        { "INT34D4", 0 },
        { }
 };
+MODULE_DEVICE_TABLE(acpi, punit_ipc_acpi_ids);
 
 static struct platform_driver intel_punit_ipc_driver = {
        .probe = intel_punit_ipc_probe,
index dd60c9397d3521aacb36b03b25100abed8b8f689..edd71e744d2750f1743914498b632ffb30e12e10 100644 (file)
@@ -8853,6 +8853,7 @@ static const struct tpacpi_quirk fan_quirk_table[] __initconst = {
        TPACPI_Q_LNV3('N', '2', 'O', TPACPI_FAN_2CTL),  /* P1 / X1 Extreme (2nd gen) */
        TPACPI_Q_LNV3('N', '2', 'V', TPACPI_FAN_2CTL),  /* P1 / X1 Extreme (3nd gen) */
        TPACPI_Q_LNV3('N', '3', '0', TPACPI_FAN_2CTL),  /* P15 (1st gen) / P15v (1st gen) */
+       TPACPI_Q_LNV3('N', '3', '2', TPACPI_FAN_2CTL),  /* X1 Carbon (9th gen) */
 };
 
 static int __init fan_init(struct ibm_init_struct *iibm)
index 90fe4f8f3c2c796b8257bf3733d6b65c84d4c955..bde740d6120e1e36c2929a5fe7626a6ea2f45bb1 100644 (file)
@@ -115,6 +115,32 @@ static const struct ts_dmi_data chuwi_hi10_plus_data = {
        .properties     = chuwi_hi10_plus_props,
 };
 
+static const struct property_entry chuwi_hi10_pro_props[] = {
+       PROPERTY_ENTRY_U32("touchscreen-min-x", 8),
+       PROPERTY_ENTRY_U32("touchscreen-min-y", 8),
+       PROPERTY_ENTRY_U32("touchscreen-size-x", 1912),
+       PROPERTY_ENTRY_U32("touchscreen-size-y", 1272),
+       PROPERTY_ENTRY_BOOL("touchscreen-swapped-x-y"),
+       PROPERTY_ENTRY_STRING("firmware-name", "gsl1680-chuwi-hi10-pro.fw"),
+       PROPERTY_ENTRY_U32("silead,max-fingers", 10),
+       PROPERTY_ENTRY_BOOL("silead,home-button"),
+       { }
+};
+
+static const struct ts_dmi_data chuwi_hi10_pro_data = {
+       .embedded_fw = {
+               .name   = "silead/gsl1680-chuwi-hi10-pro.fw",
+               .prefix = { 0xf0, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00 },
+               .length = 42504,
+               .sha256 = { 0xdb, 0x92, 0x68, 0xa8, 0xdb, 0x81, 0x31, 0x00,
+                           0x1f, 0x58, 0x89, 0xdb, 0x19, 0x1b, 0x15, 0x8c,
+                           0x05, 0x14, 0xf4, 0x95, 0xba, 0x15, 0x45, 0x98,
+                           0x42, 0xa3, 0xbb, 0x65, 0xe3, 0x30, 0xa5, 0x93 },
+       },
+       .acpi_name      = "MSSL1680:00",
+       .properties     = chuwi_hi10_pro_props,
+};
+
 static const struct property_entry chuwi_vi8_props[] = {
        PROPERTY_ENTRY_U32("touchscreen-min-x", 4),
        PROPERTY_ENTRY_U32("touchscreen-min-y", 6),
@@ -915,6 +941,15 @@ const struct dmi_system_id touchscreen_dmi_table[] = {
                        DMI_MATCH(DMI_BOARD_NAME, "Cherry Trail CR"),
                },
        },
+       {
+               /* Chuwi Hi10 Prus (CWI597) */
+               .driver_data = (void *)&chuwi_hi10_pro_data,
+               .matches = {
+                       DMI_MATCH(DMI_BOARD_VENDOR, "Hampoo"),
+                       DMI_MATCH(DMI_PRODUCT_NAME, "Hi10 pro tablet"),
+                       DMI_MATCH(DMI_BOARD_NAME, "Cherry Trail CR"),
+               },
+       },
        {
                /* Chuwi Vi8 (CWI506) */
                .driver_data = (void *)&chuwi_vi8_data,
@@ -1096,6 +1131,14 @@ const struct dmi_system_id touchscreen_dmi_table[] = {
                        DMI_MATCH(DMI_BIOS_VERSION, "jumperx.T87.KFBNEEA"),
                },
        },
+       {
+               /* Mediacom WinPad 7.0 W700 (same hw as Wintron surftab 7") */
+               .driver_data = (void *)&trekstor_surftab_wintron70_data,
+               .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "MEDIACOM"),
+                       DMI_MATCH(DMI_PRODUCT_NAME, "WinPad 7 W10 - WPW700"),
+               },
+       },
        {
                /* Mediacom Flexbook Edge 11 (same hw as TS Primebook C11) */
                .driver_data = (void *)&trekstor_primebook_c11_data,
index 530e5f90095e6cf2d879376744d531254a02dc3b..0d1034e3ed0f2d2ef9c0adf83a7fa232e1b4d91c 100644 (file)
@@ -324,7 +324,7 @@ ptp_ocp_probe(struct pci_dev *pdev, const struct pci_device_id *id)
        if (!bp->base) {
                dev_err(&pdev->dev, "io_remap bar0\n");
                err = -ENOMEM;
-               goto out;
+               goto out_release_regions;
        }
        bp->reg = bp->base + OCP_REGISTER_OFFSET;
        bp->tod = bp->base + TOD_REGISTER_OFFSET;
@@ -347,6 +347,8 @@ ptp_ocp_probe(struct pci_dev *pdev, const struct pci_device_id *id)
        return 0;
 
 out:
+       pci_iounmap(pdev, bp->base);
+out_release_regions:
        pci_release_regions(pdev);
 out_disable:
        pci_disable_device(pdev);
index 50ec53d67a4c0ed3e78a261647538d01ada3cd60..db4c265287ae6e1fdaadb62e291157cacbcb5251 100644 (file)
@@ -2127,6 +2127,14 @@ static int riocm_add_mport(struct device *dev,
                return -ENODEV;
        }
 
+       cm->rx_wq = create_workqueue(DRV_NAME "/rxq");
+       if (!cm->rx_wq) {
+               rio_release_inb_mbox(mport, cmbox);
+               rio_release_outb_mbox(mport, cmbox);
+               kfree(cm);
+               return -ENOMEM;
+       }
+
        /*
         * Allocate and register inbound messaging buffers to be ready
         * to receive channel and system management requests
@@ -2137,15 +2145,6 @@ static int riocm_add_mport(struct device *dev,
        cm->rx_slots = RIOCM_RX_RING_SIZE;
        mutex_init(&cm->rx_lock);
        riocm_rx_fill(cm, RIOCM_RX_RING_SIZE);
-       cm->rx_wq = create_workqueue(DRV_NAME "/rxq");
-       if (!cm->rx_wq) {
-               riocm_error("failed to allocate IBMBOX_%d on %s",
-                           cmbox, mport->name);
-               rio_release_outb_mbox(mport, cmbox);
-               kfree(cm);
-               return -ENOMEM;
-       }
-
        INIT_WORK(&cm->rx_work, rio_ibmsg_handler);
 
        cm->tx_slot = 0;
index 9d84d9245490e8ebbf7dd151ea96873064ff1e54..3e7a38525cb3f63cd75bb1b05705bbdbdaf25bb9 100644 (file)
@@ -1031,7 +1031,7 @@ config REGULATOR_RT5033
          current source, LDO and Buck.
 
 config REGULATOR_RTMV20
-       tristate "RTMV20 Laser Diode Regulator"
+       tristate "Richtek RTMV20 Laser Diode Regulator"
        depends on I2C
        select REGMAP_I2C
        help
index d8b429955d33fe288e7d97db091f637da5dfea1e..05147d2c384289e0bb90fb513c252c01d4907569 100644 (file)
@@ -28,16 +28,16 @@ static const struct linear_range atc2609a_dcdc_voltage_ranges[] = {
 
 static const struct linear_range atc2609a_ldo_voltage_ranges0[] = {
        REGULATOR_LINEAR_RANGE(700000, 0, 15, 100000),
-       REGULATOR_LINEAR_RANGE(2100000, 16, 28, 100000),
+       REGULATOR_LINEAR_RANGE(2100000, 0, 12, 100000),
 };
 
 static const struct linear_range atc2609a_ldo_voltage_ranges1[] = {
        REGULATOR_LINEAR_RANGE(850000, 0, 15, 100000),
-       REGULATOR_LINEAR_RANGE(2100000, 16, 27, 100000),
+       REGULATOR_LINEAR_RANGE(2100000, 0, 11, 100000),
 };
 
 static const unsigned int atc260x_ldo_voltage_range_sel[] = {
-       0x0, 0x1,
+       0x0, 0x20,
 };
 
 static int atc260x_dcdc_set_voltage_time_sel(struct regulator_dev *rdev,
@@ -411,7 +411,7 @@ enum atc2609a_reg_ids {
        .owner = THIS_MODULE, \
 }
 
-#define atc2609a_reg_desc_ldo_range_pick(num, n_range) { \
+#define atc2609a_reg_desc_ldo_range_pick(num, n_range, n_volt) { \
        .name = "LDO"#num, \
        .supply_name = "ldo"#num, \
        .of_match = of_match_ptr("ldo"#num), \
@@ -421,6 +421,7 @@ enum atc2609a_reg_ids {
        .type = REGULATOR_VOLTAGE, \
        .linear_ranges = atc2609a_ldo_voltage_ranges##n_range, \
        .n_linear_ranges = ARRAY_SIZE(atc2609a_ldo_voltage_ranges##n_range), \
+       .n_voltages = n_volt, \
        .vsel_reg = ATC2609A_PMU_LDO##num##_CTL0, \
        .vsel_mask = GENMASK(4, 1), \
        .vsel_range_reg = ATC2609A_PMU_LDO##num##_CTL0, \
@@ -458,12 +459,12 @@ static const struct regulator_desc atc2609a_reg[] = {
        atc2609a_reg_desc_ldo_bypass(0),
        atc2609a_reg_desc_ldo_bypass(1),
        atc2609a_reg_desc_ldo_bypass(2),
-       atc2609a_reg_desc_ldo_range_pick(3, 0),
-       atc2609a_reg_desc_ldo_range_pick(4, 0),
+       atc2609a_reg_desc_ldo_range_pick(3, 0, 29),
+       atc2609a_reg_desc_ldo_range_pick(4, 0, 29),
        atc2609a_reg_desc_ldo(5),
-       atc2609a_reg_desc_ldo_range_pick(6, 1),
-       atc2609a_reg_desc_ldo_range_pick(7, 0),
-       atc2609a_reg_desc_ldo_range_pick(8, 0),
+       atc2609a_reg_desc_ldo_range_pick(6, 1, 28),
+       atc2609a_reg_desc_ldo_range_pick(7, 0, 29),
+       atc2609a_reg_desc_ldo_range_pick(8, 0, 29),
        atc2609a_reg_desc_ldo_fixed(9),
 };
 
index e61295b30503f28c0eda8b03037f93520de45474..b1eb46961993040ca2032860e291b20fdd50bee3 100644 (file)
@@ -334,7 +334,7 @@ BD718XX_OPS(bd71837_buck_regulator_ops, regulator_list_voltage_linear_range,
            NULL);
 
 BD718XX_OPS(bd71837_buck_regulator_nolinear_ops, regulator_list_voltage_table,
-           regulator_map_voltage_ascend, bd718xx_set_voltage_sel_restricted,
+           regulator_map_voltage_ascend, bd71837_set_voltage_sel_restricted,
            regulator_get_voltage_sel_regmap, regulator_set_voltage_time_sel,
            NULL);
 /*
index f192bf19492ed5f19022aacaeffabe5613ef4a91..e20e77e4c159df50f838978fcbe1762caedae52c 100644 (file)
@@ -1425,6 +1425,12 @@ static int set_machine_constraints(struct regulator_dev *rdev)
         * and we have control then make sure it is enabled.
         */
        if (rdev->constraints->always_on || rdev->constraints->boot_on) {
+               /* If we want to enable this regulator, make sure that we know
+                * the supplying regulator.
+                */
+               if (rdev->supply_name && !rdev->supply)
+                       return -EPROBE_DEFER;
+
                if (rdev->supply) {
                        ret = regulator_enable(rdev->supply);
                        if (ret < 0) {
index eb3fc1db4edc8118b0be4133e559e10e1f7a233d..c4754f3cf2337790bda44700d4cbe56b87fadb6f 100644 (file)
@@ -225,8 +225,9 @@ static int cros_ec_regulator_probe(struct platform_device *pdev)
 
        drvdata->dev = devm_regulator_register(dev, &drvdata->desc, &cfg);
        if (IS_ERR(drvdata->dev)) {
+               ret = PTR_ERR(drvdata->dev);
                dev_err(&pdev->dev, "Failed to register regulator: %d\n", ret);
-               return PTR_ERR(drvdata->dev);
+               return ret;
        }
 
        platform_set_drvdata(pdev, drvdata);
index 08cbf688e14d3fc8bc29b0519e1da693ec91d19e..e66925090258021f75ebd1c994efccaa492f3282 100644 (file)
@@ -280,7 +280,7 @@ static unsigned int da9121_map_mode(unsigned int mode)
        case DA9121_BUCK_MODE_FORCE_PFM:
                return REGULATOR_MODE_STANDBY;
        default:
-               return -EINVAL;
+               return REGULATOR_MODE_INVALID;
        }
 }
 
@@ -317,7 +317,7 @@ static unsigned int da9121_buck_get_mode(struct regulator_dev *rdev)
 {
        struct da9121 *chip = rdev_get_drvdata(rdev);
        int id = rdev_get_id(rdev);
-       unsigned int val;
+       unsigned int val, mode;
        int ret = 0;
 
        ret = regmap_read(chip->regmap, da9121_mode_field[id].reg, &val);
@@ -326,7 +326,11 @@ static unsigned int da9121_buck_get_mode(struct regulator_dev *rdev)
                return -EINVAL;
        }
 
-       return da9121_map_mode(val & da9121_mode_field[id].msk);
+       mode = da9121_map_mode(val & da9121_mode_field[id].msk);
+       if (mode == REGULATOR_MODE_INVALID)
+               return -EINVAL;
+
+       return mode;
 }
 
 static const struct regulator_ops da9121_buck_ops = {
index f3918f03aaf3dfd82d5c71788dc1f85d3fbb8825..26f06f685b1b687e809d4802271632a9ef645667 100644 (file)
@@ -55,7 +55,6 @@
 
 #define FAN53555_NVOLTAGES     64      /* Numbers of voltages */
 #define FAN53526_NVOLTAGES     128
-#define TCS4525_NVOLTAGES      127     /* Numbers of voltages */
 
 #define TCS_VSEL_NSEL_MASK     0x7f
 #define TCS_VSEL0_MODE         (1 << 7)
@@ -376,7 +375,7 @@ static int fan53555_voltages_setup_tcs(struct fan53555_device_info *di)
        /* Init voltage range and step */
        di->vsel_min = 600000;
        di->vsel_step = 6250;
-       di->vsel_count = TCS4525_NVOLTAGES;
+       di->vsel_count = FAN53526_NVOLTAGES;
 
        return 0;
 }
index e83eb4fb1876a04cfcf7523c3f3f37a2ff32664e..1684faf82ed2598e8bd11f783e9afdbd82104252 100644 (file)
@@ -51,6 +51,7 @@ static const struct regulator_ops fan53880_ops = {
                      REGULATOR_LINEAR_RANGE(800000, 0xf, 0x73, 25000), \
                },                                                      \
                .n_linear_ranges = 2,                                   \
+               .n_voltages =      0x74,                                \
                .vsel_reg =        FAN53880_LDO ## _num ## VOUT,        \
                .vsel_mask =       0x7f,                                \
                .enable_reg =      FAN53880_ENABLE,                     \
@@ -76,6 +77,7 @@ static const struct regulator_desc fan53880_regulators[] = {
                      REGULATOR_LINEAR_RANGE(600000, 0x1f, 0xf7, 12500),
                },
                .n_linear_ranges = 2,
+               .n_voltages =      0xf8,
                .vsel_reg =        FAN53880_BUCKVOUT,
                .vsel_mask =       0x7f,
                .enable_reg =      FAN53880_ENABLE,
@@ -95,6 +97,7 @@ static const struct regulator_desc fan53880_regulators[] = {
                      REGULATOR_LINEAR_RANGE(3000000, 0x4, 0x70, 25000),
                },
                .n_linear_ranges = 2,
+               .n_voltages =      0x71,
                .vsel_reg =        FAN53880_BOOSTVOUT,
                .vsel_mask =       0x7f,
                .enable_reg =      FAN53880_ENABLE_BOOST,
index 02ad83153e19afabddadc3a4125ebd12c71abcfb..34e255c235d4c16110dd0ac18439c8f87ead388e 100644 (file)
@@ -88,10 +88,15 @@ static int reg_domain_disable(struct regulator_dev *rdev)
 {
        struct fixed_voltage_data *priv = rdev_get_drvdata(rdev);
        struct device *dev = rdev->dev.parent;
+       int ret;
+
+       ret = dev_pm_genpd_set_performance_state(dev, 0);
+       if (ret)
+               return ret;
 
        priv->enable_counter--;
 
-       return dev_pm_genpd_set_performance_state(dev, 0);
+       return 0;
 }
 
 static int reg_is_enabled(struct regulator_dev *rdev)
index 0e16e31c968f124c7b254e4549c994b0c8a49f72..ad2237a95572a7b21d1b11d324da1f5e9e86ad9d 100644 (file)
@@ -948,7 +948,7 @@ int regulator_set_ramp_delay_regmap(struct regulator_dev *rdev, int ramp_delay)
        int ret;
        unsigned int sel;
 
-       if (!rdev->desc->n_ramp_values)
+       if (WARN_ON(!rdev->desc->n_ramp_values || !rdev->desc->ramp_delay_table))
                return -EINVAL;
 
        ret = find_closest_bigger(ramp_delay, rdev->desc->ramp_delay_table,
index f6a14e9c3cbfef7cb3d0aa8fdad3ee16a14a58a4..d6340bb49296740b6d85f41e71194b886ccb51f4 100644 (file)
@@ -3,7 +3,7 @@
 // Device driver for regulators in Hisi IC
 //
 // Copyright (c) 2013 Linaro Ltd.
-// Copyright (c) 2011 Hisilicon.
+// Copyright (c) 2011 HiSilicon Ltd.
 // Copyright (c) 2020-2021 Huawei Technologies Co., Ltd
 //
 // Guodong Xu <guodong.xu@linaro.org>
@@ -83,7 +83,7 @@ static const unsigned int ldo34_voltages[] = {
                        .owner          = THIS_MODULE,                         \
                        .volt_table     = vtable,                              \
                        .n_voltages     = ARRAY_SIZE(vtable),                  \
-                       .vsel_mask      = (1 << (ARRAY_SIZE(vtable) - 1)) - 1, \
+                       .vsel_mask      = ARRAY_SIZE(vtable) - 1,              \
                        .vsel_reg       = vreg,                                \
                        .enable_reg     = ereg,                                \
                        .enable_mask    = emask,                               \
index ac2ee2030211a3df1cabd9d1a62deb21603dd55b..68cdb173196d6946c8a12f81dc7d3d7fe06d4f77 100644 (file)
@@ -2,7 +2,7 @@
 //
 // Device driver for regulators in Hi655x IC
 //
-// Copyright (c) 2016 Hisilicon.
+// Copyright (c) 2016 HiSilicon Ltd.
 //
 // Authors:
 // Chen Feng <puck.chen@hisilicon.com>
index 8d9731e4052bf21f23b032d33499b751d439deba..3cf8f085170a0a482e063855b40f9fe7e2f6b4a8 100644 (file)
@@ -814,6 +814,13 @@ static int max77620_regulator_probe(struct platform_device *pdev)
        config.dev = dev;
        config.driver_data = pmic;
 
+       /*
+        * Set of_node_reuse flag to prevent driver core from attempting to
+        * claim any pinmux resources already claimed by the parent device.
+        * Otherwise PMIC driver will fail to re-probe.
+        */
+       device_set_of_node_from_dev(&pdev->dev, pdev->dev.parent);
+
        for (id = 0; id < MAX77620_NUM_REGS; id++) {
                struct regulator_dev *rdev;
                struct regulator_desc *rdesc;
@@ -839,12 +846,10 @@ static int max77620_regulator_probe(struct platform_device *pdev)
                        return ret;
 
                rdev = devm_regulator_register(dev, rdesc, &config);
-               if (IS_ERR(rdev)) {
-                       ret = PTR_ERR(rdev);
-                       dev_err(dev, "Regulator registration %s failed: %d\n",
-                               rdesc->name, ret);
-                       return ret;
-               }
+               if (IS_ERR(rdev))
+                       return dev_err_probe(dev, PTR_ERR(rdev),
+                                            "Regulator registration %s failed\n",
+                                            rdesc->name);
        }
 
        return 0;
index 9edc34981ee0a21c7adcec13e801f5b5890c1bc1..6b8be52c3772a00147878351ed2f0f0679ffc92b 100644 (file)
@@ -59,7 +59,7 @@ static const struct linear_range mt_volt_range1[] = {
        REGULATOR_LINEAR_RANGE(0, 0, 0xbf, 6250),
 };
 
-static unsigned int mt6315_map_mode(u32 mode)
+static unsigned int mt6315_map_mode(unsigned int mode)
 {
        switch (mode) {
        case MT6315_BUCK_MODE_AUTO:
index 2055a9cb13ba549ddcd940035e41fb8437b2468f..7a87788d3f0922399d54d88a4e35573aa5931001 100644 (file)
@@ -66,7 +66,7 @@ static int rt4801_enable(struct regulator_dev *rdev)
        struct gpio_descs *gpios = priv->enable_gpios;
        int id = rdev_get_id(rdev), ret;
 
-       if (gpios->ndescs <= id) {
+       if (!gpios || gpios->ndescs <= id) {
                dev_warn(&rdev->dev, "no dedicated gpio can control\n");
                goto bypass_gpio;
        }
@@ -88,7 +88,7 @@ static int rt4801_disable(struct regulator_dev *rdev)
        struct gpio_descs *gpios = priv->enable_gpios;
        int id = rdev_get_id(rdev);
 
-       if (gpios->ndescs <= id) {
+       if (!gpios || gpios->ndescs <= id) {
                dev_warn(&rdev->dev, "no dedicated gpio can control\n");
                goto bypass_gpio;
        }
index 852fb2596ffdadf42a199a2736c90a8bbacbeb72..4bca64de0f672d0f7e4ed5b722379a2182985b3d 100644 (file)
@@ -27,6 +27,7 @@
 #define RTMV20_REG_LDIRQ       0x30
 #define RTMV20_REG_LDSTAT      0x40
 #define RTMV20_REG_LDMASK      0x50
+#define RTMV20_MAX_REGS                (RTMV20_REG_LDMASK + 1)
 
 #define RTMV20_VID_MASK                GENMASK(7, 4)
 #define RICHTEK_VID            0x80
@@ -103,9 +104,47 @@ static int rtmv20_lsw_disable(struct regulator_dev *rdev)
        return 0;
 }
 
+static int rtmv20_lsw_set_current_limit(struct regulator_dev *rdev, int min_uA,
+                                       int max_uA)
+{
+       int sel;
+
+       if (min_uA > RTMV20_LSW_MAXUA || max_uA < RTMV20_LSW_MINUA)
+               return -EINVAL;
+
+       if (max_uA > RTMV20_LSW_MAXUA)
+               max_uA = RTMV20_LSW_MAXUA;
+
+       sel = (max_uA - RTMV20_LSW_MINUA) / RTMV20_LSW_STEPUA;
+
+       /* Ensure the selected setting is still in range */
+       if ((sel * RTMV20_LSW_STEPUA + RTMV20_LSW_MINUA) < min_uA)
+               return -EINVAL;
+
+       sel <<= ffs(rdev->desc->csel_mask) - 1;
+
+       return regmap_update_bits(rdev->regmap, rdev->desc->csel_reg,
+                                 rdev->desc->csel_mask, sel);
+}
+
+static int rtmv20_lsw_get_current_limit(struct regulator_dev *rdev)
+{
+       unsigned int val;
+       int ret;
+
+       ret = regmap_read(rdev->regmap, rdev->desc->csel_reg, &val);
+       if (ret)
+               return ret;
+
+       val &= rdev->desc->csel_mask;
+       val >>= ffs(rdev->desc->csel_mask) - 1;
+
+       return val * RTMV20_LSW_STEPUA + RTMV20_LSW_MINUA;
+}
+
 static const struct regulator_ops rtmv20_regulator_ops = {
-       .set_current_limit = regulator_set_current_limit_regmap,
-       .get_current_limit = regulator_get_current_limit_regmap,
+       .set_current_limit = rtmv20_lsw_set_current_limit,
+       .get_current_limit = rtmv20_lsw_get_current_limit,
        .enable = rtmv20_lsw_enable,
        .disable = rtmv20_lsw_disable,
        .is_enabled = regulator_is_enabled_regmap,
@@ -275,6 +314,7 @@ static const struct regmap_config rtmv20_regmap_config = {
        .val_bits = 8,
        .cache_type = REGCACHE_RBTREE,
        .max_register = RTMV20_REG_LDMASK,
+       .num_reg_defaults_raw = RTMV20_MAX_REGS,
 
        .writeable_reg = rtmv20_is_accessible_reg,
        .readable_reg = rtmv20_is_accessible_reg,
index bbadf72b94e8c19b28b449465094f40d7b875fbc..1f02f60ad1366fb983006c7460dde762a72d05af 100644 (file)
@@ -173,7 +173,7 @@ scmi_config_linear_regulator_mappings(struct scmi_regulator *sreg,
                sreg->desc.uV_step =
                        vinfo->levels_uv[SCMI_VOLTAGE_SEGMENT_STEP];
                sreg->desc.linear_min_sel = 0;
-               sreg->desc.n_voltages = delta_uV / sreg->desc.uV_step;
+               sreg->desc.n_voltages = (delta_uV / sreg->desc.uV_step) + 1;
                sreg->desc.ops = &scmi_reg_linear_ops;
        }
 
index 1b9e1442e6a50f003efabc4977bdae8899399cf3..fd42a5fffaed1bc2c8e76918f397820a1a93e394 100644 (file)
@@ -642,12 +642,18 @@ static void dasd_diag_setup_blk_queue(struct dasd_block *block)
        blk_queue_segment_boundary(q, PAGE_SIZE - 1);
 }
 
+static int dasd_diag_pe_handler(struct dasd_device *device,
+                               __u8 tbvpm, __u8 fcsecpm)
+{
+       return dasd_generic_verify_path(device, tbvpm);
+}
+
 static struct dasd_discipline dasd_diag_discipline = {
        .owner = THIS_MODULE,
        .name = "DIAG",
        .ebcname = "DIAG",
        .check_device = dasd_diag_check_device,
-       .verify_path = dasd_generic_verify_path,
+       .pe_handler = dasd_diag_pe_handler,
        .fill_geometry = dasd_diag_fill_geometry,
        .setup_blk_queue = dasd_diag_setup_blk_queue,
        .start_IO = dasd_start_diag,
index 4789410885e4fba8b8d9e6acf7f1a14d70a6a7bb..3ad319aee51edb470b970ce8dad903ab30cb954c 100644 (file)
@@ -794,13 +794,19 @@ static void dasd_fba_setup_blk_queue(struct dasd_block *block)
        blk_queue_flag_set(QUEUE_FLAG_DISCARD, q);
 }
 
+static int dasd_fba_pe_handler(struct dasd_device *device,
+                              __u8 tbvpm, __u8 fcsecpm)
+{
+       return dasd_generic_verify_path(device, tbvpm);
+}
+
 static struct dasd_discipline dasd_fba_discipline = {
        .owner = THIS_MODULE,
        .name = "FBA ",
        .ebcname = "FBA ",
        .check_device = dasd_fba_check_characteristics,
        .do_analysis = dasd_fba_do_analysis,
-       .verify_path = dasd_generic_verify_path,
+       .pe_handler = dasd_fba_pe_handler,
        .setup_blk_queue = dasd_fba_setup_blk_queue,
        .fill_geometry = dasd_fba_fill_geometry,
        .start_IO = dasd_start_IO,
index 1c59b0e86a9f073a82831c68cd4259f2a988de20..155428bfed8ac12b3c4d9b7da1af6752c827173b 100644 (file)
@@ -297,7 +297,6 @@ struct dasd_discipline {
         * e.g. verify that new path is compatible with the current
         * configuration.
         */
-       int (*verify_path)(struct dasd_device *, __u8);
        int (*pe_handler)(struct dasd_device *, __u8, __u8);
 
        /*
index b9febc581b1f4cf82a8013ef09b75d619847b834..8d1b2771c1aa0252df0196e08189691a474c7066 100644 (file)
@@ -638,6 +638,10 @@ int cp_init(struct channel_program *cp, struct device *mdev, union orb *orb)
        static DEFINE_RATELIMIT_STATE(ratelimit_state, 5 * HZ, 1);
        int ret;
 
+       /* this is an error in the caller */
+       if (cp->initialized)
+               return -EBUSY;
+
        /*
         * We only support prefetching the channel program. We assume all channel
         * programs executed by supported guests likewise support prefetching.
index 8c625b530035f536248ba2ad0260c82342144630..9b61e9b131ade02da6be5f87be69068353881272 100644 (file)
@@ -86,6 +86,7 @@ static void vfio_ccw_sch_io_todo(struct work_struct *work)
        struct vfio_ccw_private *private;
        struct irb *irb;
        bool is_final;
+       bool cp_is_finished = false;
 
        private = container_of(work, struct vfio_ccw_private, io_work);
        irb = &private->irb;
@@ -94,14 +95,21 @@ static void vfio_ccw_sch_io_todo(struct work_struct *work)
                     (SCSW_ACTL_DEVACT | SCSW_ACTL_SCHACT));
        if (scsw_is_solicited(&irb->scsw)) {
                cp_update_scsw(&private->cp, &irb->scsw);
-               if (is_final && private->state == VFIO_CCW_STATE_CP_PENDING)
+               if (is_final && private->state == VFIO_CCW_STATE_CP_PENDING) {
                        cp_free(&private->cp);
+                       cp_is_finished = true;
+               }
        }
        mutex_lock(&private->io_mutex);
        memcpy(private->io_region->irb_area, irb, sizeof(*irb));
        mutex_unlock(&private->io_mutex);
 
-       if (private->mdev && is_final)
+       /*
+        * Reset to IDLE only if processing of a channel program
+        * has finished. Do not overwrite a possible processing
+        * state if the final interrupt was for HSCH or CSCH.
+        */
+       if (private->mdev && cp_is_finished)
                private->state = VFIO_CCW_STATE_IDLE;
 
        if (private->io_trigger)
index 23e61aa638e4eb3048ee6fb84584c6591afa7d48..e435a9cd92dacf2b14fd30013e4fa1e839c2a180 100644 (file)
@@ -318,6 +318,7 @@ static void fsm_io_request(struct vfio_ccw_private *private,
        }
 
 err_out:
+       private->state = VFIO_CCW_STATE_IDLE;
        trace_vfio_ccw_fsm_io_request(scsw->cmd.fctl, schid,
                                      io_region->ret_code, errstr);
 }
index 491a64c61fff1a6b350a949b6988b46270e16b2b..c57d2a7f0919759a661bf01021d7746c4058fea3 100644 (file)
@@ -279,8 +279,6 @@ static ssize_t vfio_ccw_mdev_write_io_region(struct vfio_ccw_private *private,
        }
 
        vfio_ccw_fsm_event(private, VFIO_CCW_EVENT_IO_REQ);
-       if (region->ret_code != 0)
-               private->state = VFIO_CCW_STATE_IDLE;
        ret = (region->ret_code != 0) ? region->ret_code : count;
 
 out_unlock:
index 3ee46a843cb5f2d36e2d35707402fa2fbddb6773..adddcd5899416c5704efb7b957290846d894f386 100644 (file)
@@ -2926,11 +2926,11 @@ static int blogic_qcmd_lck(struct scsi_cmnd *command,
                ccb->opcode = BLOGIC_INITIATOR_CCB_SG;
                ccb->datalen = count * sizeof(struct blogic_sg_seg);
                if (blogic_multimaster_type(adapter))
-                       ccb->data = (void *)((unsigned int) ccb->dma_handle +
+                       ccb->data = (unsigned int) ccb->dma_handle +
                                        ((unsigned long) &ccb->sglist -
-                                       (unsigned long) ccb));
+                                       (unsigned long) ccb);
                else
-                       ccb->data = ccb->sglist;
+                       ccb->data = virt_to_32bit_virt(ccb->sglist);
 
                scsi_for_each_sg(command, sg, count, i) {
                        ccb->sglist[i].segbytes = sg_dma_len(sg);
index a8e4a19788a77641778e16e994aa6d234da86f8c..7d1ec10f243052023f6de6512815451b6f88fc21 100644 (file)
@@ -806,7 +806,7 @@ struct blogic_ccb {
        unsigned char cdblen;                           /* Byte 2 */
        unsigned char sense_datalen;                    /* Byte 3 */
        u32 datalen;                                    /* Bytes 4-7 */
-       void *data;                                     /* Bytes 8-11 */
+       u32 data;                                       /* Bytes 8-11 */
        unsigned char:8;                                /* Byte 12 */
        unsigned char:8;                                /* Byte 13 */
        enum blogic_adapter_status adapter_status;      /* Byte 14 */
index 924d55a8acbfced66e23f8851f182b8e2c3c7439..65182ad9cdf82adcdda3a85b213da22117ddf806 100644 (file)
@@ -58,7 +58,6 @@
 #include "aicasm_symbol.h"
 #include "aicasm_insformat.h"
 
-int yylineno;
 char *yyfilename;
 char stock_prefix[] = "aic_";
 char *prefix = stock_prefix;
index 7bf7fd5953ac97aeb767fa87249abed6e246d975..ed3bdd43c29763c2f4160fddb1880fffc9e26dd0 100644 (file)
@@ -108,7 +108,7 @@ struct macro_arg {
        regex_t arg_regex;
        char   *replacement_text;
 };
-STAILQ_HEAD(macro_arg_list, macro_arg) args;
+STAILQ_HEAD(macro_arg_list, macro_arg);
 
 struct macro_info {
        struct macro_arg_list args;
index a7515c3039edb672d718f285914d01064fa3bb3f..53343a6d8ae19a8c13202763fed3ec79863117a4 100644 (file)
@@ -3,6 +3,17 @@
  * $FreeBSD: src/sys/cam/scsi/scsi_message.h,v 1.2 2000/05/01 20:21:29 peter Exp $
  */
 
+/* Messages (1 byte) */                     /* I/T (M)andatory or (O)ptional */
+#define MSG_SAVEDATAPOINTER    0x02 /* O/O */
+#define MSG_RESTOREPOINTERS    0x03 /* O/O */
+#define MSG_DISCONNECT         0x04 /* O/O */
+#define MSG_MESSAGE_REJECT     0x07 /* M/M */
+#define MSG_NOOP               0x08 /* M/M */
+
+/* Messages (2 byte) */
+#define MSG_SIMPLE_Q_TAG       0x20 /* O/O */
+#define MSG_IGN_WIDE_RESIDUE   0x23 /* O/O */
+
 /* Identify message */              /* M/M */  
 #define MSG_IDENTIFYFLAG       0x80 
 #define MSG_IDENTIFY_DISCFLAG  0x40 
index 1a0dc18d6915567d3be040a7fbe8212e6cc85990..ed300a279a387f16d669fdee54d712eee21e6c14 100644 (file)
@@ -1220,6 +1220,7 @@ int bnx2fc_eh_abort(struct scsi_cmnd *sc_cmd)
                   was a result from the ABTS request rather than the CLEANUP
                   request */
                set_bit(BNX2FC_FLAG_IO_CLEANUP, &io_req->req_flags);
+               rc = FAILED;
                goto done;
        }
 
index 499c770d405c78fcc13d0dfb1f64b7b53f37190e..e954083140786dfae2be7e1c02122da8f1a22a57 100644 (file)
@@ -4811,14 +4811,14 @@ hisi_sas_v3_destroy_irqs(struct pci_dev *pdev, struct hisi_hba *hisi_hba)
 {
        int i;
 
-       free_irq(pci_irq_vector(pdev, 1), hisi_hba);
-       free_irq(pci_irq_vector(pdev, 2), hisi_hba);
-       free_irq(pci_irq_vector(pdev, 11), hisi_hba);
+       devm_free_irq(&pdev->dev, pci_irq_vector(pdev, 1), hisi_hba);
+       devm_free_irq(&pdev->dev, pci_irq_vector(pdev, 2), hisi_hba);
+       devm_free_irq(&pdev->dev, pci_irq_vector(pdev, 11), hisi_hba);
        for (i = 0; i < hisi_hba->cq_nvecs; i++) {
                struct hisi_sas_cq *cq = &hisi_hba->cq[i];
                int nr = hisi_sas_intr_conv ? 16 : 16 + i;
 
-               free_irq(pci_irq_vector(pdev, nr), cq);
+               devm_free_irq(&pdev->dev, pci_irq_vector(pdev, nr), cq);
        }
        pci_free_irq_vectors(pdev);
 }
index 697c09ef259b3fb4230cdc60e8131057f3f1cc76..cd52664920e1aa17c49d3b491b50a4a58d2af24b 100644 (file)
@@ -254,12 +254,11 @@ int scsi_add_host_with_dma(struct Scsi_Host *shost, struct device *dev,
 
        device_enable_async_suspend(&shost->shost_dev);
 
+       get_device(&shost->shost_gendev);
        error = device_add(&shost->shost_dev);
        if (error)
                goto out_del_gendev;
 
-       get_device(&shost->shost_gendev);
-
        if (shost->transportt->host_size) {
                shost->shost_data = kzalloc(shost->transportt->host_size,
                                         GFP_KERNEL);
@@ -278,33 +277,36 @@ int scsi_add_host_with_dma(struct Scsi_Host *shost, struct device *dev,
 
                if (!shost->work_q) {
                        error = -EINVAL;
-                       goto out_free_shost_data;
+                       goto out_del_dev;
                }
        }
 
        error = scsi_sysfs_add_host(shost);
        if (error)
-               goto out_destroy_host;
+               goto out_del_dev;
 
        scsi_proc_host_add(shost);
        scsi_autopm_put_host(shost);
        return error;
 
- out_destroy_host:
-       if (shost->work_q)
-               destroy_workqueue(shost->work_q);
- out_free_shost_data:
-       kfree(shost->shost_data);
+       /*
+        * Any host allocation in this function will be freed in
+        * scsi_host_dev_release().
+        */
  out_del_dev:
        device_del(&shost->shost_dev);
  out_del_gendev:
+       /*
+        * Host state is SHOST_RUNNING so we have to explicitly release
+        * ->shost_dev.
+        */
+       put_device(&shost->shost_dev);
        device_del(&shost->shost_gendev);
  out_disable_runtime_pm:
        device_disable_async_suspend(&shost->shost_gendev);
        pm_runtime_disable(&shost->shost_gendev);
        pm_runtime_set_suspended(&shost->shost_gendev);
        pm_runtime_put_noidle(&shost->shost_gendev);
-       scsi_mq_destroy_tags(shost);
  fail:
        return error;
 }
@@ -345,7 +347,7 @@ static void scsi_host_dev_release(struct device *dev)
 
        ida_simple_remove(&host_index_ida, shost->host_no);
 
-       if (parent)
+       if (shost->shost_state != SHOST_CREATED)
                put_device(parent);
        kfree(shost);
 }
@@ -388,8 +390,10 @@ struct Scsi_Host *scsi_host_alloc(struct scsi_host_template *sht, int privsize)
        mutex_init(&shost->scan_mutex);
 
        index = ida_simple_get(&host_index_ida, 0, 0, GFP_KERNEL);
-       if (index < 0)
-               goto fail_kfree;
+       if (index < 0) {
+               kfree(shost);
+               return NULL;
+       }
        shost->host_no = index;
 
        shost->dma_channel = 0xff;
@@ -481,7 +485,7 @@ struct Scsi_Host *scsi_host_alloc(struct scsi_host_template *sht, int privsize)
                shost_printk(KERN_WARNING, shost,
                        "error handler thread failed to spawn, error = %ld\n",
                        PTR_ERR(shost->ehandler));
-               goto fail_index_remove;
+               goto fail;
        }
 
        shost->tmf_work_q = alloc_workqueue("scsi_tmf_%d",
@@ -490,17 +494,18 @@ struct Scsi_Host *scsi_host_alloc(struct scsi_host_template *sht, int privsize)
        if (!shost->tmf_work_q) {
                shost_printk(KERN_WARNING, shost,
                             "failed to create tmf workq\n");
-               goto fail_kthread;
+               goto fail;
        }
        scsi_proc_hostdir_add(shost->hostt);
        return shost;
+ fail:
+       /*
+        * Host state is still SHOST_CREATED and that is enough to release
+        * ->shost_gendev. scsi_host_dev_release() will free
+        * dev_name(&shost->shost_dev).
+        */
+       put_device(&shost->shost_gendev);
 
- fail_kthread:
-       kthread_stop(shost->ehandler);
- fail_index_remove:
-       ida_simple_remove(&host_index_ida, shost->host_no);
- fail_kfree:
-       kfree(shost);
        return NULL;
 }
 EXPORT_SYMBOL(scsi_host_alloc);
index 19cf418928faae07fb58a6e911c9297c153a79fe..e3d03d744713d126966f635be71adee28a32741a 100644 (file)
@@ -25,7 +25,7 @@ static bool phy_is_wideport_member(struct asd_sas_port *port, struct asd_sas_phy
 
 static void sas_resume_port(struct asd_sas_phy *phy)
 {
-       struct domain_device *dev;
+       struct domain_device *dev, *n;
        struct asd_sas_port *port = phy->port;
        struct sas_ha_struct *sas_ha = phy->ha;
        struct sas_internal *si = to_sas_internal(sas_ha->core.shost->transportt);
@@ -44,7 +44,7 @@ static void sas_resume_port(struct asd_sas_phy *phy)
         * 1/ presume every device came back
         * 2/ force the next revalidation to check all expander phys
         */
-       list_for_each_entry(dev, &port->dev_list, dev_list_node) {
+       list_for_each_entry_safe(dev, n, &port->dev_list, dev_list_node) {
                int i, rc;
 
                rc = sas_notify_lldd_dev_found(dev);
index 573c8599d71c2d3e155fdb64b268ef3296a0aacd..fc3682f15f5099d94feb5602cfb1bcceae9c9230 100644 (file)
@@ -20589,10 +20589,8 @@ lpfc_sli4_issue_abort_iotag(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
        abtswqe = &abtsiocb->wqe;
        memset(abtswqe, 0, sizeof(*abtswqe));
 
-       if (lpfc_is_link_up(phba))
+       if (!lpfc_is_link_up(phba))
                bf_set(abort_cmd_ia, &abtswqe->abort_cmd, 1);
-       else
-               bf_set(abort_cmd_ia, &abtswqe->abort_cmd, 0);
        bf_set(abort_cmd_criteria, &abtswqe->abort_cmd, T_XRI_TAG);
        abtswqe->abort_cmd.rsrvd5 = 0;
        abtswqe->abort_cmd.wqe_com.abort_tag = xritag;
index ecd06d2d7e81624553b67f4da111d7603dca6341..71aa6af08340612e28454018d98d274b1bf31e69 100644 (file)
@@ -3765,11 +3765,13 @@ static int mpi_hw_event(struct pm8001_hba_info *pm8001_ha, void *piomb)
        case HW_EVENT_PHY_START_STATUS:
                pm8001_dbg(pm8001_ha, MSG, "HW_EVENT_PHY_START_STATUS status = %x\n",
                           status);
-               if (status == 0) {
+               if (status == 0)
                        phy->phy_state = 1;
-                       if (pm8001_ha->flags == PM8001F_RUN_TIME &&
-                                       phy->enable_completion != NULL)
-                               complete(phy->enable_completion);
+
+               if (pm8001_ha->flags == PM8001F_RUN_TIME &&
+                               phy->enable_completion != NULL) {
+                       complete(phy->enable_completion);
+                       phy->enable_completion = NULL;
                }
                break;
        case HW_EVENT_SAS_PHY_UP:
index 390c33df03578e7bc876e266ef9d3ad98a4ab57d..af09bd282cb94d80861c37a297d7b1ac2c38767a 100644 (file)
@@ -1151,8 +1151,8 @@ static int pm8001_pci_probe(struct pci_dev *pdev,
                goto err_out_shost;
        }
        list_add_tail(&pm8001_ha->list, &hba_list);
-       scsi_scan_host(pm8001_ha->shost);
        pm8001_ha->flags = PM8001F_RUN_TIME;
+       scsi_scan_host(pm8001_ha->shost);
        return 0;
 
 err_out_shost:
index d28af413b93a227014e529addec26d2ff54ee68b..335cf37e6cb94053f6cade51993afebb1ded3e22 100644 (file)
@@ -264,12 +264,17 @@ void pm8001_scan_start(struct Scsi_Host *shost)
        int i;
        struct pm8001_hba_info *pm8001_ha;
        struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
+       DECLARE_COMPLETION_ONSTACK(completion);
        pm8001_ha = sha->lldd_ha;
        /* SAS_RE_INITIALIZATION not available in SPCv/ve */
        if (pm8001_ha->chip_id == chip_8001)
                PM8001_CHIP_DISP->sas_re_init_req(pm8001_ha);
-       for (i = 0; i < pm8001_ha->chip->n_phy; ++i)
+       for (i = 0; i < pm8001_ha->chip->n_phy; ++i) {
+               pm8001_ha->phy[i].enable_completion = &completion;
                PM8001_CHIP_DISP->phy_start_req(pm8001_ha, i);
+               wait_for_completion(&completion);
+               msleep(300);
+       }
 }
 
 int pm8001_scan_finished(struct Scsi_Host *shost, unsigned long time)
index 4e980830f9f59f137f1a80cefaba49199ee6894c..700530e969ac0d125170a3a038b692d8928cdcd0 100644 (file)
@@ -3487,13 +3487,13 @@ static int mpi_phy_start_resp(struct pm8001_hba_info *pm8001_ha, void *piomb)
        pm8001_dbg(pm8001_ha, INIT,
                   "phy start resp status:0x%x, phyid:0x%x\n",
                   status, phy_id);
-       if (status == 0) {
+       if (status == 0)
                phy->phy_state = PHY_LINK_DOWN;
-               if (pm8001_ha->flags == PM8001F_RUN_TIME &&
-                               phy->enable_completion != NULL) {
-                       complete(phy->enable_completion);
-                       phy->enable_completion = NULL;
-               }
+
+       if (pm8001_ha->flags == PM8001F_RUN_TIME &&
+                       phy->enable_completion != NULL) {
+               complete(phy->enable_completion);
+               phy->enable_completion = NULL;
        }
        return 0;
 
index 69f7784233f938ce9d6ebda1b0262da86fe49a02..b92570a7c309d32ae4a1d6493b564b986b682d34 100644 (file)
@@ -536,7 +536,9 @@ static void qedf_update_link_speed(struct qedf_ctx *qedf,
        if (linkmode_intersects(link->supported_caps, sup_caps))
                lport->link_supported_speeds |= FC_PORTSPEED_20GBIT;
 
-       fc_host_supported_speeds(lport->host) = lport->link_supported_speeds;
+       if (lport->host && lport->host->shost_data)
+               fc_host_supported_speeds(lport->host) =
+                       lport->link_supported_speeds;
 }
 
 static void qedf_bw_update(void *dev)
@@ -1825,22 +1827,20 @@ static int qedf_vport_create(struct fc_vport *vport, bool disabled)
                fcoe_wwn_to_str(vport->port_name, buf, sizeof(buf));
                QEDF_WARN(&(base_qedf->dbg_ctx), "Failed to create vport, "
                           "WWPN (0x%s) already exists.\n", buf);
-               goto err1;
+               return rc;
        }
 
        if (atomic_read(&base_qedf->link_state) != QEDF_LINK_UP) {
                QEDF_WARN(&(base_qedf->dbg_ctx), "Cannot create vport "
                           "because link is not up.\n");
-               rc = -EIO;
-               goto err1;
+               return -EIO;
        }
 
        vn_port = libfc_vport_create(vport, sizeof(struct qedf_ctx));
        if (!vn_port) {
                QEDF_WARN(&(base_qedf->dbg_ctx), "Could not create lport "
                           "for vport.\n");
-               rc = -ENOMEM;
-               goto err1;
+               return -ENOMEM;
        }
 
        fcoe_wwn_to_str(vport->port_name, buf, sizeof(buf));
@@ -1864,7 +1864,7 @@ static int qedf_vport_create(struct fc_vport *vport, bool disabled)
        if (rc) {
                QEDF_ERR(&(base_qedf->dbg_ctx), "Could not allocate memory "
                    "for lport stats.\n");
-               goto err2;
+               goto err;
        }
 
        fc_set_wwnn(vn_port, vport->node_name);
@@ -1882,7 +1882,7 @@ static int qedf_vport_create(struct fc_vport *vport, bool disabled)
        if (rc) {
                QEDF_WARN(&base_qedf->dbg_ctx,
                          "Error adding Scsi_Host rc=0x%x.\n", rc);
-               goto err2;
+               goto err;
        }
 
        /* Set default dev_loss_tmo based on module parameter */
@@ -1923,9 +1923,10 @@ static int qedf_vport_create(struct fc_vport *vport, bool disabled)
        vport_qedf->dbg_ctx.host_no = vn_port->host->host_no;
        vport_qedf->dbg_ctx.pdev = base_qedf->pdev;
 
-err2:
+       return 0;
+
+err:
        scsi_host_put(vn_port->host);
-err1:
        return rc;
 }
 
@@ -1966,8 +1967,7 @@ static int qedf_vport_destroy(struct fc_vport *vport)
        fc_lport_free_stats(vn_port);
 
        /* Release Scsi_Host */
-       if (vn_port->host)
-               scsi_host_put(vn_port->host);
+       scsi_host_put(vn_port->host);
 
 out:
        return 0;
index 0677295957bc5cb8cf89ea765011e7c6e106faaa..615e44af1ca604309fd845ea4b5b0405d1b25d6d 100644 (file)
@@ -1063,7 +1063,8 @@ qla82xx_write_flash_dword(struct qla_hw_data *ha, uint32_t flashaddr,
                return ret;
        }
 
-       if (qla82xx_flash_set_write_enable(ha))
+       ret = qla82xx_flash_set_write_enable(ha);
+       if (ret < 0)
                goto done_write;
 
        qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_WDATA, data);
index b2008fb1dd386a103598739b17c04577c69e9445..12a6848ade435eaa1407fa2a238e9444bd4bf773 100644 (file)
@@ -1563,10 +1563,12 @@ void qlt_stop_phase2(struct qla_tgt *tgt)
                return;
        }
 
+       mutex_lock(&tgt->ha->optrom_mutex);
        mutex_lock(&vha->vha_tgt.tgt_mutex);
        tgt->tgt_stop = 0;
        tgt->tgt_stopped = 1;
        mutex_unlock(&vha->vha_tgt.tgt_mutex);
+       mutex_unlock(&tgt->ha->optrom_mutex);
 
        ql_dbg(ql_dbg_tgt_mgt, vha, 0xf00c, "Stop of tgt %p finished\n",
            tgt);
index d92cec12454cb2b4ccb2321f4733a6b961574c34..d33355ab6e14505d9678627e929e5bf27711aa39 100644 (file)
@@ -184,6 +184,7 @@ static struct {
        {"HP", "C3323-300", "4269", BLIST_NOTQ},
        {"HP", "C5713A", NULL, BLIST_NOREPORTLUN},
        {"HP", "DISK-SUBSYSTEM", "*", BLIST_REPORTLUN2},
+       {"HPE", "OPEN-", "*", BLIST_REPORTLUN2 | BLIST_TRY_VPD_PAGES},
        {"IBM", "AuSaV1S2", NULL, BLIST_FORCELUN},
        {"IBM", "ProFibre 4000R", "*", BLIST_SPARSELUN | BLIST_LARGELUN},
        {"IBM", "2105", NULL, BLIST_RETRY_HWERROR},
index 0aa58131e7915add4d8bc3fca57c8290817de7ac..d0626773eb3863a1194509db2d98fc125ba26900 100644 (file)
@@ -467,21 +467,24 @@ static int ufs_hisi_init_common(struct ufs_hba *hba)
        host->hba = hba;
        ufshcd_set_variant(hba, host);
 
-       host->rst  = devm_reset_control_get(dev, "rst");
+       host->rst = devm_reset_control_get(dev, "rst");
        if (IS_ERR(host->rst)) {
                dev_err(dev, "%s: failed to get reset control\n", __func__);
-               return PTR_ERR(host->rst);
+               err = PTR_ERR(host->rst);
+               goto error;
        }
 
        ufs_hisi_set_pm_lvl(hba);
 
        err = ufs_hisi_get_resource(host);
-       if (err) {
-               ufshcd_set_variant(hba, NULL);
-               return err;
-       }
+       if (err)
+               goto error;
 
        return 0;
+
+error:
+       ufshcd_set_variant(hba, NULL);
+       return err;
 }
 
 static int ufs_hi3660_init(struct ufs_hba *hba)
index a981f261b3043cae5861ce82e8d56c4003c50fab..0a84ec9e7cea02ae17c71a09c61bb2938544e7a4 100644 (file)
@@ -603,11 +603,23 @@ static void ufs_mtk_get_controller_version(struct ufs_hba *hba)
 
        ret = ufshcd_dme_get(hba, UIC_ARG_MIB(PA_LOCALVERINFO), &ver);
        if (!ret) {
-               if (ver >= UFS_UNIPRO_VER_1_8)
+               if (ver >= UFS_UNIPRO_VER_1_8) {
                        host->hw_ver.major = 3;
+                       /*
+                        * Fix HCI version for some platforms with
+                        * incorrect version
+                        */
+                       if (hba->ufs_version < ufshci_version(3, 0))
+                               hba->ufs_version = ufshci_version(3, 0);
+               }
        }
 }
 
+static u32 ufs_mtk_get_ufs_hci_version(struct ufs_hba *hba)
+{
+       return hba->ufs_version;
+}
+
 /**
  * ufs_mtk_init - find other essential mmio bases
  * @hba: host controller instance
@@ -922,6 +934,7 @@ static void ufs_mtk_vreg_set_lpm(struct ufs_hba *hba, bool lpm)
 static int ufs_mtk_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op)
 {
        int err;
+       struct arm_smccc_res res;
 
        if (ufshcd_is_link_hibern8(hba)) {
                err = ufs_mtk_link_set_lpm(hba);
@@ -941,6 +954,9 @@ static int ufs_mtk_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op)
                        goto fail;
        }
 
+       if (ufshcd_is_link_off(hba))
+               ufs_mtk_device_reset_ctrl(0, res);
+
        return 0;
 fail:
        /*
@@ -1044,6 +1060,7 @@ static void ufs_mtk_event_notify(struct ufs_hba *hba,
 static const struct ufs_hba_variant_ops ufs_hba_mtk_vops = {
        .name                = "mediatek.ufshci",
        .init                = ufs_mtk_init,
+       .get_ufs_hci_version = ufs_mtk_get_ufs_hci_version,
        .setup_clocks        = ufs_mtk_setup_clocks,
        .hce_enable_notify   = ufs_mtk_hce_enable_notify,
        .link_startup_notify = ufs_mtk_link_startup_notify,
index 3eb54937f1d8a7057d393e51ff8c50aca3b9afad..72fd41bfbd54bf38983953cc5f6059a93ff69c3e 100644 (file)
@@ -2842,7 +2842,7 @@ static int ufshcd_wait_for_dev_cmd(struct ufs_hba *hba,
  * ufshcd_exec_dev_cmd - API for sending device management requests
  * @hba: UFS hba
  * @cmd_type: specifies the type (NOP, Query...)
- * @timeout: time in seconds
+ * @timeout: timeout in milliseconds
  *
  * NOTE: Since there is only one available tag for device management commands,
  * it is expected you hold the hba->dev_cmd.lock mutex.
@@ -2872,6 +2872,9 @@ static int ufshcd_exec_dev_cmd(struct ufs_hba *hba,
        }
        tag = req->tag;
        WARN_ON_ONCE(!ufshcd_valid_tag(hba, tag));
+       /* Set the timeout such that the SCSI error handler is not activated. */
+       req->timeout = msecs_to_jiffies(2 * timeout);
+       blk_mq_start_request(req);
 
        init_completion(&wait);
        lrbp = &hba->lrb[tag];
index 8a79605d9652150b9433c81b8cfd811eb06f13b1..b9969fce6b4d14d0f9cd933cefe145da558b718b 100644 (file)
@@ -585,7 +585,13 @@ static void pvscsi_complete_request(struct pvscsi_adapter *adapter,
                case BTSTAT_SUCCESS:
                case BTSTAT_LINKED_COMMAND_COMPLETED:
                case BTSTAT_LINKED_COMMAND_COMPLETED_WITH_FLAG:
-                       /* If everything went fine, let's move on..  */
+                       /*
+                        * Commands like INQUIRY may transfer less data than
+                        * requested by the initiator via bufflen. Set residual
+                        * count to make upper layer aware of the actual amount
+                        * of data returned.
+                        */
+                       scsi_set_resid(cmd, scsi_bufflen(cmd) - e->dataLen);
                        cmd->result = (DID_OK << 16);
                        break;
 
index e1957476a0068baccce205b5f0459fee489c25ec..6dd190270123fa32f17b6b995359de85c708f822 100644 (file)
@@ -626,10 +626,8 @@ static int meson_msr_probe(struct platform_device *pdev)
 
        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
        base = devm_ioremap_resource(&pdev->dev, res);
-       if (IS_ERR(base)) {
-               dev_err(&pdev->dev, "io resource mapping failed\n");
+       if (IS_ERR(base))
                return PTR_ERR(base);
-       }
 
        priv->regmap = devm_regmap_init_mmio(&pdev->dev, base,
                                             &meson_clk_msr_regmap_config);
index 2827085a323b85078641c055019ea2346c89c203..0ef79d60e88e6dedded0db63be62b90f8f49f80e 100644 (file)
@@ -1150,8 +1150,16 @@ static int qcom_swrm_get_port_config(struct qcom_swrm_ctrl *ctrl)
 
        ret = of_property_read_u8_array(np, "qcom,ports-block-pack-mode",
                                        bp_mode, nports);
-       if (ret)
-               return ret;
+       if (ret) {
+               u32 version;
+
+               ctrl->reg_read(ctrl, SWRM_COMP_HW_VERSION, &version);
+
+               if (version <= 0x01030000)
+                       memset(bp_mode, SWR_INVALID_PARAM, QCOM_SDW_MAX_PORTS);
+               else
+                       return ret;
+       }
 
        memset(hstart, SWR_INVALID_PARAM, QCOM_SDW_MAX_PORTS);
        of_property_read_u8_array(np, "qcom,ports-hstart", hstart, nports);
index 8b161ec4943bfe05d4b406c7eb96bd04c6273100..f4481fe48bf0698fbf5d371353060316473e48b4 100644 (file)
@@ -65,7 +65,7 @@ config SPI_ALTERA
          This is the driver for the Altera SPI Controller.
 
 config SPI_ALTERA_CORE
-       tristate "Altera SPI Controller core code"
+       tristate "Altera SPI Controller core code" if COMPILE_TEST
        select REGMAP
        help
          "The core code for the Altera SPI Controller"
index 8965fe61c8b44deebfcdfd617a6dfc4b0c72db65..fe40626e45aa82b54ca45e48202e5f34c9765b5c 100644 (file)
@@ -68,7 +68,7 @@
 #define BCM2835_SPI_FIFO_SIZE          64
 #define BCM2835_SPI_FIFO_SIZE_3_4      48
 #define BCM2835_SPI_DMA_MIN_LENGTH     96
-#define BCM2835_SPI_NUM_CS               /* raise as necessary */
+#define BCM2835_SPI_NUM_CS             24  /* raise as necessary */
 #define BCM2835_SPI_MODE_BITS  (SPI_CPOL | SPI_CPHA | SPI_CS_HIGH \
                                | SPI_NO_CS | SPI_3WIRE)
 
@@ -1195,6 +1195,12 @@ static int bcm2835_spi_setup(struct spi_device *spi)
        struct gpio_chip *chip;
        u32 cs;
 
+       if (spi->chip_select >= BCM2835_SPI_NUM_CS) {
+               dev_err(&spi->dev, "only %d chip-selects supported\n",
+                       BCM2835_SPI_NUM_CS - 1);
+               return -EINVAL;
+       }
+
        /*
         * Precalculate SPI slave's CS register value for ->prepare_message():
         * The driver always uses software-controlled GPIO chip select, hence
@@ -1288,7 +1294,7 @@ static int bcm2835_spi_probe(struct platform_device *pdev)
        ctlr->use_gpio_descriptors = true;
        ctlr->mode_bits = BCM2835_SPI_MODE_BITS;
        ctlr->bits_per_word_mask = SPI_BPW_MASK(8);
-       ctlr->num_chipselect = BCM2835_SPI_NUM_CS;
+       ctlr->num_chipselect = 3;
        ctlr->setup = bcm2835_spi_setup;
        ctlr->transfer_one = bcm2835_spi_transfer_one;
        ctlr->handle_err = bcm2835_spi_handle_err;
index 6a6af85aebfd800cecdd1edcab171a5a92992ada..27d0087f8688415e46a4eea7f4b337d2ea4022ad 100644 (file)
@@ -184,6 +184,8 @@ int spi_bitbang_setup(struct spi_device *spi)
 {
        struct spi_bitbang_cs   *cs = spi->controller_state;
        struct spi_bitbang      *bitbang;
+       bool                    initial_setup = false;
+       int                     retval;
 
        bitbang = spi_master_get_devdata(spi->master);
 
@@ -192,22 +194,30 @@ int spi_bitbang_setup(struct spi_device *spi)
                if (!cs)
                        return -ENOMEM;
                spi->controller_state = cs;
+               initial_setup = true;
        }
 
        /* per-word shift register access, in hardware or bitbanging */
        cs->txrx_word = bitbang->txrx_word[spi->mode & (SPI_CPOL|SPI_CPHA)];
-       if (!cs->txrx_word)
-               return -EINVAL;
+       if (!cs->txrx_word) {
+               retval = -EINVAL;
+               goto err_free;
+       }
 
        if (bitbang->setup_transfer) {
-               int retval = bitbang->setup_transfer(spi, NULL);
+               retval = bitbang->setup_transfer(spi, NULL);
                if (retval < 0)
-                       return retval;
+                       goto err_free;
        }
 
        dev_dbg(&spi->dev, "%s, %u nsec/bit\n", __func__, 2 * cs->nsecs);
 
        return 0;
+
+err_free:
+       if (initial_setup)
+               kfree(cs);
+       return retval;
 }
 EXPORT_SYMBOL_GPL(spi_bitbang_setup);
 
index 0287366874882de8bd941b8617b9625288088772..fb45e6af6638136ed071c282c74572697a32d198 100644 (file)
@@ -1375,11 +1375,13 @@ poll_mode:
        ret = spi_register_controller(ctlr);
        if (ret != 0) {
                dev_err(&pdev->dev, "Problem registering DSPI ctlr\n");
-               goto out_free_irq;
+               goto out_release_dma;
        }
 
        return ret;
 
+out_release_dma:
+       dspi_release_dma(dspi);
 out_free_irq:
        if (dspi->irq)
                free_irq(dspi->irq, dspi);
index d0e5aa18b7bad7f156b2018aa383df30e4fb91cc..bdf94cc7be1afe18c4939e39262c47a752c6d945 100644 (file)
@@ -440,6 +440,7 @@ static int fsl_spi_setup(struct spi_device *spi)
 {
        struct mpc8xxx_spi *mpc8xxx_spi;
        struct fsl_spi_reg __iomem *reg_base;
+       bool initial_setup = false;
        int retval;
        u32 hw_mode;
        struct spi_mpc8xxx_cs *cs = spi_get_ctldata(spi);
@@ -452,6 +453,7 @@ static int fsl_spi_setup(struct spi_device *spi)
                if (!cs)
                        return -ENOMEM;
                spi_set_ctldata(spi, cs);
+               initial_setup = true;
        }
        mpc8xxx_spi = spi_master_get_devdata(spi->master);
 
@@ -475,6 +477,8 @@ static int fsl_spi_setup(struct spi_device *spi)
        retval = fsl_spi_setup_transfer(spi, NULL);
        if (retval < 0) {
                cs->hw_mode = hw_mode; /* Restore settings */
+               if (initial_setup)
+                       kfree(cs);
                return retval;
        }
 
index 71402f71ddd850110327106787d54c8706343e19..df28c6664aba683ff85e49c79675683e6aeaac0f 100644 (file)
@@ -424,15 +424,22 @@ done:
 static int uwire_setup(struct spi_device *spi)
 {
        struct uwire_state *ust = spi->controller_state;
+       bool initial_setup = false;
+       int status;
 
        if (ust == NULL) {
                ust = kzalloc(sizeof(*ust), GFP_KERNEL);
                if (ust == NULL)
                        return -ENOMEM;
                spi->controller_state = ust;
+               initial_setup = true;
        }
 
-       return uwire_setup_transfer(spi, NULL);
+       status = uwire_setup_transfer(spi, NULL);
+       if (status && initial_setup)
+               kfree(ust);
+
+       return status;
 }
 
 static void uwire_cleanup(struct spi_device *spi)
index 999c2273641642434f65349dd91b6e24192e3ec3..ede7f05e5ced7bf51c3c6160e171c8108a0d6eae 100644 (file)
@@ -1032,8 +1032,22 @@ static void omap2_mcspi_release_dma(struct spi_master *master)
        }
 }
 
+static void omap2_mcspi_cleanup(struct spi_device *spi)
+{
+       struct omap2_mcspi_cs   *cs;
+
+       if (spi->controller_state) {
+               /* Unlink controller state from context save list */
+               cs = spi->controller_state;
+               list_del(&cs->node);
+
+               kfree(cs);
+       }
+}
+
 static int omap2_mcspi_setup(struct spi_device *spi)
 {
+       bool                    initial_setup = false;
        int                     ret;
        struct omap2_mcspi      *mcspi = spi_master_get_devdata(spi->master);
        struct omap2_mcspi_regs *ctx = &mcspi->ctx;
@@ -1051,35 +1065,28 @@ static int omap2_mcspi_setup(struct spi_device *spi)
                spi->controller_state = cs;
                /* Link this to context save list */
                list_add_tail(&cs->node, &ctx->cs);
+               initial_setup = true;
        }
 
        ret = pm_runtime_get_sync(mcspi->dev);
        if (ret < 0) {
                pm_runtime_put_noidle(mcspi->dev);
+               if (initial_setup)
+                       omap2_mcspi_cleanup(spi);
 
                return ret;
        }
 
        ret = omap2_mcspi_setup_transfer(spi, NULL);
+       if (ret && initial_setup)
+               omap2_mcspi_cleanup(spi);
+
        pm_runtime_mark_last_busy(mcspi->dev);
        pm_runtime_put_autosuspend(mcspi->dev);
 
        return ret;
 }
 
-static void omap2_mcspi_cleanup(struct spi_device *spi)
-{
-       struct omap2_mcspi_cs   *cs;
-
-       if (spi->controller_state) {
-               /* Unlink controller state from context save list */
-               cs = spi->controller_state;
-               list_del(&cs->node);
-
-               kfree(cs);
-       }
-}
-
 static irqreturn_t omap2_mcspi_irq_handler(int irq, void *data)
 {
        struct omap2_mcspi *mcspi = data;
index 5e59ba075bc7aefd582987bd88d7f233d3bf1c1f..8ee0cc071777406243ba8c8fc937c89907674093 100644 (file)
@@ -1254,6 +1254,8 @@ static int setup_cs(struct spi_device *spi, struct chip_data *chip,
                chip->gpio_cs_inverted = spi->mode & SPI_CS_HIGH;
 
                err = gpiod_direction_output(gpiod, !chip->gpio_cs_inverted);
+               if (err)
+                       gpiod_put(chip->gpiod_cs);
        }
 
        return err;
@@ -1267,6 +1269,7 @@ static int setup(struct spi_device *spi)
        struct driver_data *drv_data =
                spi_controller_get_devdata(spi->controller);
        uint tx_thres, tx_hi_thres, rx_thres;
+       int err;
 
        switch (drv_data->ssp_type) {
        case QUARK_X1000_SSP:
@@ -1413,7 +1416,11 @@ static int setup(struct spi_device *spi)
        if (drv_data->ssp_type == CE4100_SSP)
                return 0;
 
-       return setup_cs(spi, chip, chip_info);
+       err = setup_cs(spi, chip, chip_info);
+       if (err)
+               kfree(chip);
+
+       return err;
 }
 
 static void cleanup(struct spi_device *spi)
index 297c512069a5780f3d130563363eb7fa3577a33f..5d27ee4822376b9ac9702930420e525c3b20ae6b 100644 (file)
@@ -174,7 +174,7 @@ static int sc18is602_setup_transfer(struct sc18is602 *hw, u32 hz, u8 mode)
 static int sc18is602_check_transfer(struct spi_device *spi,
                                    struct spi_transfer *t, int tlen)
 {
-       if (t && t->len + tlen > SC18IS602_BUFSIZ)
+       if (t && t->len + tlen > SC18IS602_BUFSIZ + 1)
                return -EINVAL;
 
        return 0;
@@ -219,6 +219,11 @@ static int sc18is602_transfer_one(struct spi_master *master,
        return status;
 }
 
+static size_t sc18is602_max_transfer_size(struct spi_device *spi)
+{
+       return SC18IS602_BUFSIZ;
+}
+
 static int sc18is602_setup(struct spi_device *spi)
 {
        struct sc18is602 *hw = spi_master_get_devdata(spi->master);
@@ -293,6 +298,8 @@ static int sc18is602_probe(struct i2c_client *client,
        master->bits_per_word_mask = SPI_BPW_MASK(8);
        master->setup = sc18is602_setup;
        master->transfer_one_message = sc18is602_transfer_one;
+       master->max_transfer_size = sc18is602_max_transfer_size;
+       master->max_message_size = sc18is602_max_transfer_size;
        master->dev.of_node = np;
        master->min_speed_hz = hw->freq / 128;
        master->max_speed_hz = hw->freq / 4;
index b41a75749b498ed36d1361c5d2da1e4f857cb584..28e70db9bbba852d40ab53d211aada4353cf93a2 100644 (file)
@@ -1068,6 +1068,7 @@ static const struct of_device_id sprd_spi_of_match[] = {
        { .compatible = "sprd,sc9860-spi", },
        { /* sentinel */ }
 };
+MODULE_DEVICE_TABLE(of, sprd_spi_of_match);
 
 static struct platform_driver sprd_spi_driver = {
        .driver = {
index 7e640ccc7e77427cbffa669c621ed5f81c9df9da..594f6413620863d9e77a4a14274a1720bbd1a8e4 100644 (file)
@@ -294,7 +294,7 @@ static int stm32_qspi_wait_cmd(struct stm32_qspi *qspi,
        int err = 0;
 
        if (!op->data.nbytes)
-               return stm32_qspi_wait_nobusy(qspi);
+               goto wait_nobusy;
 
        if (readl_relaxed(qspi->io_base + QSPI_SR) & SR_TCF)
                goto out;
@@ -315,6 +315,9 @@ static int stm32_qspi_wait_cmd(struct stm32_qspi *qspi,
 out:
        /* clear flags */
        writel_relaxed(FCR_CTCF | FCR_CTEF, qspi->io_base + QSPI_FCR);
+wait_nobusy:
+       if (!err)
+               err = stm32_qspi_wait_nobusy(qspi);
 
        return err;
 }
index 5d8a5ee62fa236bbf207f24d778209f089cb077d..9262c6418463b6914aabad1bb0cfea00ce4f48e7 100644 (file)
@@ -367,7 +367,7 @@ static int zynq_qspi_config_op(struct zynq_qspi *xqspi, struct spi_device *spi)
 }
 
 /**
- * zynq_qspi_setup - Configure the QSPI controller
+ * zynq_qspi_setup_op - Configure the QSPI controller
  * @spi:       Pointer to the spi_device structure
  *
  * Sets the operational mode of QSPI controller for the next QSPI transfer, baud
@@ -528,18 +528,17 @@ static int zynq_qspi_exec_mem_op(struct spi_mem *mem,
        struct zynq_qspi *xqspi = spi_controller_get_devdata(mem->spi->master);
        int err = 0, i;
        u8 *tmpbuf;
-       u8 opcode = op->cmd.opcode;
 
        dev_dbg(xqspi->dev, "cmd:%#x mode:%d.%d.%d.%d\n",
-               opcode, op->cmd.buswidth, op->addr.buswidth,
+               op->cmd.opcode, op->cmd.buswidth, op->addr.buswidth,
                op->dummy.buswidth, op->data.buswidth);
 
        zynq_qspi_chipselect(mem->spi, true);
        zynq_qspi_config_op(xqspi, mem->spi);
 
-       if (op->cmd.nbytes) {
+       if (op->cmd.opcode) {
                reinit_completion(&xqspi->data_completion);
-               xqspi->txbuf = &opcode;
+               xqspi->txbuf = (u8 *)&op->cmd.opcode;
                xqspi->rxbuf = NULL;
                xqspi->tx_bytes = op->cmd.nbytes;
                xqspi->rx_bytes = op->cmd.nbytes;
@@ -679,14 +678,14 @@ static int zynq_qspi_probe(struct platform_device *pdev)
        xqspi->irq = platform_get_irq(pdev, 0);
        if (xqspi->irq <= 0) {
                ret = -ENXIO;
-               goto remove_master;
+               goto clk_dis_all;
        }
        ret = devm_request_irq(&pdev->dev, xqspi->irq, zynq_qspi_irq,
                               0, pdev->name, xqspi);
        if (ret != 0) {
                ret = -ENXIO;
                dev_err(&pdev->dev, "request_irq failed\n");
-               goto remove_master;
+               goto clk_dis_all;
        }
 
        ret = of_property_read_u32(np, "num-cs",
@@ -694,8 +693,9 @@ static int zynq_qspi_probe(struct platform_device *pdev)
        if (ret < 0) {
                ctlr->num_chipselect = 1;
        } else if (num_cs > ZYNQ_QSPI_MAX_NUM_CS) {
+               ret = -EINVAL;
                dev_err(&pdev->dev, "only 2 chip selects are available\n");
-               goto remove_master;
+               goto clk_dis_all;
        } else {
                ctlr->num_chipselect = num_cs;
        }
index ba425b9c77007ab9989d70c5be143ffebf69caa3..e353b7a9e54ebad4f5aa3d3262f474c289e3dc57 100644 (file)
@@ -47,10 +47,6 @@ static void spidev_release(struct device *dev)
 {
        struct spi_device       *spi = to_spi_device(dev);
 
-       /* spi controllers may cleanup for released devices */
-       if (spi->controller->cleanup)
-               spi->controller->cleanup(spi);
-
        spi_controller_put(spi->controller);
        kfree(spi->driver_override);
        kfree(spi);
@@ -558,6 +554,12 @@ static int spi_dev_check(struct device *dev, void *data)
        return 0;
 }
 
+static void spi_cleanup(struct spi_device *spi)
+{
+       if (spi->controller->cleanup)
+               spi->controller->cleanup(spi);
+}
+
 /**
  * spi_add_device - Add spi_device allocated with spi_alloc_device
  * @spi: spi_device to register
@@ -622,11 +624,13 @@ int spi_add_device(struct spi_device *spi)
 
        /* Device may be bound to an active driver when this returns */
        status = device_add(&spi->dev);
-       if (status < 0)
+       if (status < 0) {
                dev_err(dev, "can't add %s, status %d\n",
                                dev_name(&spi->dev), status);
-       else
+               spi_cleanup(spi);
+       } else {
                dev_dbg(dev, "registered child %s\n", dev_name(&spi->dev));
+       }
 
 done:
        mutex_unlock(&spi_add_lock);
@@ -717,7 +721,9 @@ void spi_unregister_device(struct spi_device *spi)
        if (ACPI_COMPANION(&spi->dev))
                acpi_device_clear_enumerated(ACPI_COMPANION(&spi->dev));
        device_remove_software_node(&spi->dev);
-       device_unregister(&spi->dev);
+       device_del(&spi->dev);
+       spi_cleanup(spi);
+       put_device(&spi->dev);
 }
 EXPORT_SYMBOL_GPL(spi_unregister_device);
 
@@ -814,15 +820,29 @@ static void spi_set_cs(struct spi_device *spi, bool enable, bool force)
 
        if (spi->cs_gpiod || gpio_is_valid(spi->cs_gpio)) {
                if (!(spi->mode & SPI_NO_CS)) {
-                       if (spi->cs_gpiod)
-                               /* polarity handled by gpiolib */
-                               gpiod_set_value_cansleep(spi->cs_gpiod, activate);
-                       else
+                       if (spi->cs_gpiod) {
+                               /*
+                                * Historically ACPI has no means of the GPIO polarity and
+                                * thus the SPISerialBus() resource defines it on the per-chip
+                                * basis. In order to avoid a chain of negations, the GPIO
+                                * polarity is considered being Active High. Even for the cases
+                                * when _DSD() is involved (in the updated versions of ACPI)
+                                * the GPIO CS polarity must be defined Active High to avoid
+                                * ambiguity. That's why we use enable, that takes SPI_CS_HIGH
+                                * into account.
+                                */
+                               if (has_acpi_companion(&spi->dev))
+                                       gpiod_set_value_cansleep(spi->cs_gpiod, !enable);
+                               else
+                                       /* Polarity handled by GPIO library */
+                                       gpiod_set_value_cansleep(spi->cs_gpiod, activate);
+                       } else {
                                /*
                                 * invert the enable line, as active low is
                                 * default for SPI.
                                 */
                                gpio_set_value_cansleep(spi->cs_gpio, !enable);
+                       }
                }
                /* Some SPI masters need both GPIO CS & slave_select */
                if ((spi->controller->flags & SPI_MASTER_GPIO_SS) &&
@@ -3451,9 +3471,12 @@ int spi_set_cs_timing(struct spi_device *spi, struct spi_delay *setup,
 
        if (spi->controller->set_cs_timing &&
            !(spi->cs_gpiod || gpio_is_valid(spi->cs_gpio))) {
+               mutex_lock(&spi->controller->io_mutex);
+
                if (spi->controller->auto_runtime_pm) {
                        status = pm_runtime_get_sync(parent);
                        if (status < 0) {
+                               mutex_unlock(&spi->controller->io_mutex);
                                pm_runtime_put_noidle(parent);
                                dev_err(&spi->controller->dev, "Failed to power device: %d\n",
                                        status);
@@ -3464,11 +3487,13 @@ int spi_set_cs_timing(struct spi_device *spi, struct spi_delay *setup,
                                                                hold, inactive);
                        pm_runtime_mark_last_busy(parent);
                        pm_runtime_put_autosuspend(parent);
-                       return status;
                } else {
-                       return spi->controller->set_cs_timing(spi, setup, hold,
+                       status = spi->controller->set_cs_timing(spi, setup, hold,
                                                              inactive);
                }
+
+               mutex_unlock(&spi->controller->io_mutex);
+               return status;
        }
 
        if ((setup && setup->unit == SPI_DELAY_UNIT_SCK) ||
index 741147a4f0fe4ee262eb38e0d144ddd914e91a7f..ecc5c9da902704a5a13bd91493df9a97d6d6e64e 100644 (file)
@@ -2064,7 +2064,7 @@ static int _nbu2ss_nuke(struct nbu2ss_udc *udc,
                        struct nbu2ss_ep *ep,
                        int status)
 {
-       struct nbu2ss_req *req;
+       struct nbu2ss_req *req, *n;
 
        /* Endpoint Disable */
        _nbu2ss_epn_exit(udc, ep);
@@ -2076,7 +2076,7 @@ static int _nbu2ss_nuke(struct nbu2ss_udc *udc,
                return 0;
 
        /* called with irqs blocked */
-       list_for_each_entry(req, &ep->queue, queue) {
+       list_for_each_entry_safe(req, n, &ep->queue, queue) {
                _nbu2ss_ep_done(ep, req, status);
        }
 
index dfd71e99e872ed27c089496a1b9826b9a0fe6a62..eab534dc4bcc0dbfad60de19bcc43000d3791aa3 100644 (file)
@@ -700,7 +700,6 @@ static int ad7746_probe(struct i2c_client *client,
                indio_dev->num_channels = ARRAY_SIZE(ad7746_channels);
        else
                indio_dev->num_channels =  ARRAY_SIZE(ad7746_channels) - 2;
-       indio_dev->num_channels = ARRAY_SIZE(ad7746_channels);
        indio_dev->modes = INDIO_DIRECT_MODE;
 
        if (pdata) {
index 33e28ccf4d855adc1856d00b5e7e990f774f3a01..b5229bc6eae5bc29518c6747e5a88e896b2b03cc 100644 (file)
@@ -1,6 +1,5 @@
 // SPDX-License-Identifier: GPL-2.0+
 /*
- *  Copyright (C) 2013, Lars-Peter Clausen <lars@metafoo.de>
  *  GDMA4740 DMAC support
  */
 
@@ -914,6 +913,5 @@ static struct platform_driver gdma_dma_driver = {
 };
 module_platform_driver(gdma_dma_driver);
 
-MODULE_AUTHOR("Lars-Peter Clausen <lars@metafoo.de>");
 MODULE_DESCRIPTION("Ralink/MTK DMA driver");
 MODULE_LICENSE("GPL v2");
index c1dac6eec59f5bf777ee45c88ee4b2b7c668964d..ffa1cf4f9a826e5474c10effba39e24e51a048b0 100644 (file)
@@ -527,6 +527,9 @@ static int rtw_cfg80211_ap_set_encryption(struct net_device *dev, struct ieee_pa
        struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
        struct security_priv *psecuritypriv =  &(padapter->securitypriv);
        struct sta_priv *pstapriv = &padapter->stapriv;
+       char *grpkey = padapter->securitypriv.dot118021XGrpKey[param->u.crypt.idx].skey;
+       char *txkey = padapter->securitypriv.dot118021XGrptxmickey[param->u.crypt.idx].skey;
+       char *rxkey = padapter->securitypriv.dot118021XGrprxmickey[param->u.crypt.idx].skey;
 
        param->u.crypt.err = 0;
        param->u.crypt.alg[IEEE_CRYPT_ALG_NAME_LEN - 1] = '\0';
@@ -609,7 +612,7 @@ static int rtw_cfg80211_ap_set_encryption(struct net_device *dev, struct ieee_pa
                {
                        if (strcmp(param->u.crypt.alg, "WEP") == 0)
                        {
-                               memcpy(psecuritypriv->dot118021XGrpKey[param->u.crypt.idx].skey, param->u.crypt.key, (param->u.crypt.key_len > 16 ? 16 : param->u.crypt.key_len));
+                               memcpy(grpkey, param->u.crypt.key, (param->u.crypt.key_len > 16 ? 16 : param->u.crypt.key_len));
 
                                psecuritypriv->dot118021XGrpPrivacy = _WEP40_;
                                if (param->u.crypt.key_len == 13)
@@ -622,12 +625,12 @@ static int rtw_cfg80211_ap_set_encryption(struct net_device *dev, struct ieee_pa
                        {
                                psecuritypriv->dot118021XGrpPrivacy = _TKIP_;
 
-                               memcpy(psecuritypriv->dot118021XGrpKey[param->u.crypt.idx].skey, param->u.crypt.key, (param->u.crypt.key_len > 16 ? 16 : param->u.crypt.key_len));
+                               memcpy(grpkey, param->u.crypt.key, (param->u.crypt.key_len > 16 ? 16 : param->u.crypt.key_len));
 
                                /* DEBUG_ERR("set key length :param->u.crypt.key_len =%d\n", param->u.crypt.key_len); */
                                /* set mic key */
-                               memcpy(psecuritypriv->dot118021XGrptxmickey[param->u.crypt.idx].skey, &(param->u.crypt.key[16]), 8);
-                               memcpy(psecuritypriv->dot118021XGrprxmickey[param->u.crypt.idx].skey, &(param->u.crypt.key[24]), 8);
+                               memcpy(txkey, &(param->u.crypt.key[16]), 8);
+                               memcpy(rxkey, &(param->u.crypt.key[24]), 8);
 
                                psecuritypriv->busetkipkey = true;
 
@@ -636,7 +639,7 @@ static int rtw_cfg80211_ap_set_encryption(struct net_device *dev, struct ieee_pa
                        {
                                psecuritypriv->dot118021XGrpPrivacy = _AES_;
 
-                               memcpy(psecuritypriv->dot118021XGrpKey[param->u.crypt.idx].skey, param->u.crypt.key, (param->u.crypt.key_len > 16 ? 16 : param->u.crypt.key_len));
+                               memcpy(grpkey, param->u.crypt.key, (param->u.crypt.key_len > 16 ? 16 : param->u.crypt.key_len));
                        }
                        else
                        {
@@ -713,7 +716,7 @@ static int rtw_cfg80211_ap_set_encryption(struct net_device *dev, struct ieee_pa
                        {
                                if (strcmp(param->u.crypt.alg, "WEP") == 0)
                                {
-                                       memcpy(psecuritypriv->dot118021XGrpKey[param->u.crypt.idx].skey, param->u.crypt.key, (param->u.crypt.key_len > 16 ? 16 : param->u.crypt.key_len));
+                                       memcpy(grpkey, param->u.crypt.key, (param->u.crypt.key_len > 16 ? 16 : param->u.crypt.key_len));
 
                                        psecuritypriv->dot118021XGrpPrivacy = _WEP40_;
                                        if (param->u.crypt.key_len == 13)
@@ -725,12 +728,12 @@ static int rtw_cfg80211_ap_set_encryption(struct net_device *dev, struct ieee_pa
                                {
                                        psecuritypriv->dot118021XGrpPrivacy = _TKIP_;
 
-                                       memcpy(psecuritypriv->dot118021XGrpKey[param->u.crypt.idx].skey, param->u.crypt.key, (param->u.crypt.key_len > 16 ? 16 : param->u.crypt.key_len));
+                                       memcpy(grpkey, param->u.crypt.key, (param->u.crypt.key_len > 16 ? 16 : param->u.crypt.key_len));
 
                                        /* DEBUG_ERR("set key length :param->u.crypt.key_len =%d\n", param->u.crypt.key_len); */
                                        /* set mic key */
-                                       memcpy(psecuritypriv->dot118021XGrptxmickey[param->u.crypt.idx].skey, &(param->u.crypt.key[16]), 8);
-                                       memcpy(psecuritypriv->dot118021XGrprxmickey[param->u.crypt.idx].skey, &(param->u.crypt.key[24]), 8);
+                                       memcpy(txkey, &(param->u.crypt.key[16]), 8);
+                                       memcpy(rxkey, &(param->u.crypt.key[24]), 8);
 
                                        psecuritypriv->busetkipkey = true;
 
@@ -739,7 +742,7 @@ static int rtw_cfg80211_ap_set_encryption(struct net_device *dev, struct ieee_pa
                                {
                                        psecuritypriv->dot118021XGrpPrivacy = _AES_;
 
-                                       memcpy(psecuritypriv->dot118021XGrpKey[param->u.crypt.idx].skey, param->u.crypt.key, (param->u.crypt.key_len > 16 ? 16 : param->u.crypt.key_len));
+                                       memcpy(grpkey, param->u.crypt.key, (param->u.crypt.key_len > 16 ? 16 : param->u.crypt.key_len));
                                }
                                else
                                {
@@ -2088,7 +2091,7 @@ void rtw_cfg80211_indicate_sta_assoc(struct adapter *padapter, u8 *pmgmt_frame,
        struct net_device *ndev = padapter->pnetdev;
 
        {
-               struct station_info sinfo;
+               struct station_info sinfo = {};
                u8 ie_offset;
                if (GetFrameSubType(pmgmt_frame) == WIFI_ASSOCREQ)
                        ie_offset = _ASOCREQ_IE_OFFSET_;
index e98e5388d5c7b8c8828b0fd4ab74616cd8b719f9..5088c3731b6df0337ae9a13bf68b15577b911770 100644 (file)
@@ -2963,6 +2963,9 @@ static int rtw_set_encryption(struct net_device *dev, struct ieee_param *param,
        struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
        struct security_priv *psecuritypriv = &(padapter->securitypriv);
        struct sta_priv *pstapriv = &padapter->stapriv;
+       char *txkey = padapter->securitypriv.dot118021XGrptxmickey[param->u.crypt.idx].skey;
+       char *rxkey = padapter->securitypriv.dot118021XGrprxmickey[param->u.crypt.idx].skey;
+       char *grpkey = psecuritypriv->dot118021XGrpKey[param->u.crypt.idx].skey;
 
        param->u.crypt.err = 0;
        param->u.crypt.alg[IEEE_CRYPT_ALG_NAME_LEN - 1] = '\0';
@@ -3064,7 +3067,7 @@ static int rtw_set_encryption(struct net_device *dev, struct ieee_param *param,
        if (!psta && check_fwstate(pmlmepriv, WIFI_AP_STATE)) { /*  group key */
                if (param->u.crypt.set_tx == 1) {
                        if (strcmp(param->u.crypt.alg, "WEP") == 0) {
-                               memcpy(psecuritypriv->dot118021XGrpKey[param->u.crypt.idx].skey, param->u.crypt.key, (param->u.crypt.key_len > 16 ? 16 : param->u.crypt.key_len));
+                               memcpy(grpkey, param->u.crypt.key, (param->u.crypt.key_len > 16 ? 16 : param->u.crypt.key_len));
 
                                psecuritypriv->dot118021XGrpPrivacy = _WEP40_;
                                if (param->u.crypt.key_len == 13)
@@ -3073,11 +3076,11 @@ static int rtw_set_encryption(struct net_device *dev, struct ieee_param *param,
                        } else if (strcmp(param->u.crypt.alg, "TKIP") == 0) {
                                psecuritypriv->dot118021XGrpPrivacy = _TKIP_;
 
-                               memcpy(psecuritypriv->dot118021XGrpKey[param->u.crypt.idx].skey, param->u.crypt.key, (param->u.crypt.key_len > 16 ? 16 : param->u.crypt.key_len));
+                               memcpy(grpkey, param->u.crypt.key, (param->u.crypt.key_len > 16 ? 16 : param->u.crypt.key_len));
 
                                /* DEBUG_ERR("set key length :param->u.crypt.key_len =%d\n", param->u.crypt.key_len); */
                                /* set mic key */
-                               memcpy(psecuritypriv->dot118021XGrptxmickey[param->u.crypt.idx].skey, &(param->u.crypt.key[16]), 8);
+                               memcpy(txkey, &(param->u.crypt.key[16]), 8);
                                memcpy(psecuritypriv->dot118021XGrprxmickey[param->u.crypt.idx].skey, &(param->u.crypt.key[24]), 8);
 
                                psecuritypriv->busetkipkey = true;
@@ -3086,7 +3089,7 @@ static int rtw_set_encryption(struct net_device *dev, struct ieee_param *param,
                        else if (strcmp(param->u.crypt.alg, "CCMP") == 0) {
                                psecuritypriv->dot118021XGrpPrivacy = _AES_;
 
-                               memcpy(psecuritypriv->dot118021XGrpKey[param->u.crypt.idx].skey, param->u.crypt.key, (param->u.crypt.key_len > 16 ? 16 : param->u.crypt.key_len));
+                               memcpy(grpkey, param->u.crypt.key, (param->u.crypt.key_len > 16 ? 16 : param->u.crypt.key_len));
                        } else {
                                psecuritypriv->dot118021XGrpPrivacy = _NO_PRIVACY_;
                        }
@@ -3142,7 +3145,7 @@ static int rtw_set_encryption(struct net_device *dev, struct ieee_param *param,
 
                        } else { /* group key??? */
                                if (strcmp(param->u.crypt.alg, "WEP") == 0) {
-                                       memcpy(psecuritypriv->dot118021XGrpKey[param->u.crypt.idx].skey, param->u.crypt.key, (param->u.crypt.key_len > 16 ? 16 : param->u.crypt.key_len));
+                                       memcpy(grpkey, param->u.crypt.key, (param->u.crypt.key_len > 16 ? 16 : param->u.crypt.key_len));
 
                                        psecuritypriv->dot118021XGrpPrivacy = _WEP40_;
                                        if (param->u.crypt.key_len == 13)
@@ -3150,19 +3153,19 @@ static int rtw_set_encryption(struct net_device *dev, struct ieee_param *param,
                                } else if (strcmp(param->u.crypt.alg, "TKIP") == 0) {
                                        psecuritypriv->dot118021XGrpPrivacy = _TKIP_;
 
-                                       memcpy(psecuritypriv->dot118021XGrpKey[param->u.crypt.idx].skey, param->u.crypt.key, (param->u.crypt.key_len > 16 ? 16 : param->u.crypt.key_len));
+                                       memcpy(grpkey, param->u.crypt.key, (param->u.crypt.key_len > 16 ? 16 : param->u.crypt.key_len));
 
                                        /* DEBUG_ERR("set key length :param->u.crypt.key_len =%d\n", param->u.crypt.key_len); */
                                        /* set mic key */
-                                       memcpy(psecuritypriv->dot118021XGrptxmickey[param->u.crypt.idx].skey, &(param->u.crypt.key[16]), 8);
-                                       memcpy(psecuritypriv->dot118021XGrprxmickey[param->u.crypt.idx].skey, &(param->u.crypt.key[24]), 8);
+                                       memcpy(txkey, &(param->u.crypt.key[16]), 8);
+                                       memcpy(rxkey, &(param->u.crypt.key[24]), 8);
 
                                        psecuritypriv->busetkipkey = true;
 
                                } else if (strcmp(param->u.crypt.alg, "CCMP") == 0) {
                                        psecuritypriv->dot118021XGrpPrivacy = _AES_;
 
-                                       memcpy(psecuritypriv->dot118021XGrpKey[param->u.crypt.idx].skey, param->u.crypt.key, (param->u.crypt.key_len > 16 ? 16 : param->u.crypt.key_len));
+                                       memcpy(grpkey, param->u.crypt.key, (param->u.crypt.key_len > 16 ? 16 : param->u.crypt.key_len));
                                } else {
                                        psecuritypriv->dot118021XGrpPrivacy = _NO_PRIVACY_;
                                }
index d6fdd1c61f90374957a2bdff0abb26b94f894abf..a526f9678c34bde71d001f3896d2ff201861ea50 100644 (file)
@@ -204,11 +204,11 @@ static struct se_dev_plug *iblock_plug_device(struct se_device *se_dev)
        struct iblock_dev_plug *ib_dev_plug;
 
        /*
-        * Each se_device has a per cpu work this can be run from. Wwe
+        * Each se_device has a per cpu work this can be run from. We
         * shouldn't have multiple threads on the same cpu calling this
         * at the same time.
         */
-       ib_dev_plug = &ib_dev->ibd_plug[smp_processor_id()];
+       ib_dev_plug = &ib_dev->ibd_plug[raw_smp_processor_id()];
        if (test_and_set_bit(IBD_PLUGF_PLUGGED, &ib_dev_plug->flags))
                return NULL;
 
index 8fbfe75c5744a8164efd9a9a24650131c29b6b8b..7e35eddd9eb700f36d52db375dd17b0d35ecee83 100644 (file)
@@ -1416,7 +1416,7 @@ void __target_init_cmd(
        cmd->orig_fe_lun = unpacked_lun;
 
        if (!(cmd->se_cmd_flags & SCF_USE_CPUID))
-               cmd->cpuid = smp_processor_id();
+               cmd->cpuid = raw_smp_processor_id();
 
        cmd->state_active = false;
 }
@@ -3121,9 +3121,7 @@ __transport_wait_for_tasks(struct se_cmd *cmd, bool fabric_stop,
        __releases(&cmd->t_state_lock)
        __acquires(&cmd->t_state_lock)
 {
-
-       assert_spin_locked(&cmd->t_state_lock);
-       WARN_ON_ONCE(!irqs_disabled());
+       lockdep_assert_held(&cmd->t_state_lock);
 
        if (fabric_stop)
                cmd->transport_state |= CMD_T_FABRIC_STOP;
index 198d25ae482ab6fcd83a675b6b73c1ac4ab956e3..4bba10e7755aa726282a7eec30efc70026474447 100644 (file)
@@ -516,8 +516,10 @@ static inline int tcmu_get_empty_block(struct tcmu_dev *udev,
        dpi = dbi * udev->data_pages_per_blk;
        /* Count the number of already allocated pages */
        xas_set(&xas, dpi);
+       rcu_read_lock();
        for (cnt = 0; xas_next(&xas) && cnt < page_cnt;)
                cnt++;
+       rcu_read_unlock();
 
        for (i = cnt; i < page_cnt; i++) {
                /* try to get new page from the mm */
@@ -699,11 +701,10 @@ static inline void tcmu_copy_data(struct tcmu_dev *udev,
                                  struct scatterlist *sg, unsigned int sg_nents,
                                  struct iovec **iov, size_t data_len)
 {
-       XA_STATE(xas, &udev->data_pages, 0);
        /* start value of dbi + 1 must not be a valid dbi */
        int dbi = -2;
        size_t page_remaining, cp_len;
-       int page_cnt, page_inx;
+       int page_cnt, page_inx, dpi;
        struct sg_mapping_iter sg_iter;
        unsigned int sg_flags;
        struct page *page;
@@ -726,9 +727,10 @@ static inline void tcmu_copy_data(struct tcmu_dev *udev,
                if (page_cnt > udev->data_pages_per_blk)
                        page_cnt = udev->data_pages_per_blk;
 
-               xas_set(&xas, dbi * udev->data_pages_per_blk);
-               for (page_inx = 0; page_inx < page_cnt && data_len; page_inx++) {
-                       page = xas_next(&xas);
+               dpi = dbi * udev->data_pages_per_blk;
+               for (page_inx = 0; page_inx < page_cnt && data_len;
+                    page_inx++, dpi++) {
+                       page = xa_load(&udev->data_pages, dpi);
 
                        if (direction == TCMU_DATA_AREA_TO_SG)
                                flush_dcache_page(page);
index 337c8d82f74ebfcc0f90e1d080a4a04456fa8156..6d0f7062bb870749e3a5117cf6dd98fe9d944a4b 100644 (file)
@@ -21,6 +21,7 @@
 #define TEEC_SUCCESS                   0x00000000
 #define TEEC_ERROR_GENERIC             0xFFFF0000
 #define TEEC_ERROR_BAD_PARAMETERS      0xFFFF0006
+#define TEEC_ERROR_OUT_OF_MEMORY       0xFFFF000C
 #define TEEC_ERROR_COMMUNICATION       0xFFFF000E
 
 #define TEEC_ORIGIN_COMMS              0x00000002
@@ -93,6 +94,18 @@ struct amdtee_shm_data {
        u32     buf_id;
 };
 
+/**
+ * struct amdtee_ta_data - Keeps track of all TAs loaded in AMD Secure
+ *                        Processor
+ * @ta_handle: Handle to TA loaded in TEE
+ * @refcount:  Reference count for the loaded TA
+ */
+struct amdtee_ta_data {
+       struct list_head list_node;
+       u32 ta_handle;
+       u32 refcount;
+};
+
 #define LOWER_TWO_BYTE_MASK    0x0000FFFF
 
 /**
index 096dd4d92d39ceac19a85a8dc504ddc2cd280c78..07f36ac834c888a428683b4df7def47684cfb27f 100644 (file)
@@ -121,15 +121,69 @@ static int amd_params_to_tee_params(struct tee_param *tee, u32 count,
        return ret;
 }
 
+static DEFINE_MUTEX(ta_refcount_mutex);
+static struct list_head ta_list = LIST_HEAD_INIT(ta_list);
+
+static u32 get_ta_refcount(u32 ta_handle)
+{
+       struct amdtee_ta_data *ta_data;
+       u32 count = 0;
+
+       /* Caller must hold a mutex */
+       list_for_each_entry(ta_data, &ta_list, list_node)
+               if (ta_data->ta_handle == ta_handle)
+                       return ++ta_data->refcount;
+
+       ta_data = kzalloc(sizeof(*ta_data), GFP_KERNEL);
+       if (ta_data) {
+               ta_data->ta_handle = ta_handle;
+               ta_data->refcount = 1;
+               count = ta_data->refcount;
+               list_add(&ta_data->list_node, &ta_list);
+       }
+
+       return count;
+}
+
+static u32 put_ta_refcount(u32 ta_handle)
+{
+       struct amdtee_ta_data *ta_data;
+       u32 count = 0;
+
+       /* Caller must hold a mutex */
+       list_for_each_entry(ta_data, &ta_list, list_node)
+               if (ta_data->ta_handle == ta_handle) {
+                       count = --ta_data->refcount;
+                       if (count == 0) {
+                               list_del(&ta_data->list_node);
+                               kfree(ta_data);
+                               break;
+                       }
+               }
+
+       return count;
+}
+
 int handle_unload_ta(u32 ta_handle)
 {
        struct tee_cmd_unload_ta cmd = {0};
-       u32 status;
+       u32 status, count;
        int ret;
 
        if (!ta_handle)
                return -EINVAL;
 
+       mutex_lock(&ta_refcount_mutex);
+
+       count = put_ta_refcount(ta_handle);
+
+       if (count) {
+               pr_debug("unload ta: not unloading %u count %u\n",
+                        ta_handle, count);
+               ret = -EBUSY;
+               goto unlock;
+       }
+
        cmd.ta_handle = ta_handle;
 
        ret = psp_tee_process_cmd(TEE_CMD_ID_UNLOAD_TA, (void *)&cmd,
@@ -137,8 +191,12 @@ int handle_unload_ta(u32 ta_handle)
        if (!ret && status != 0) {
                pr_err("unload ta: status = 0x%x\n", status);
                ret = -EBUSY;
+       } else {
+               pr_debug("unloaded ta handle %u\n", ta_handle);
        }
 
+unlock:
+       mutex_unlock(&ta_refcount_mutex);
        return ret;
 }
 
@@ -340,7 +398,8 @@ int handle_open_session(struct tee_ioctl_open_session_arg *arg, u32 *info,
 
 int handle_load_ta(void *data, u32 size, struct tee_ioctl_open_session_arg *arg)
 {
-       struct tee_cmd_load_ta cmd = {0};
+       struct tee_cmd_unload_ta unload_cmd = {};
+       struct tee_cmd_load_ta load_cmd = {};
        phys_addr_t blob;
        int ret;
 
@@ -353,21 +412,36 @@ int handle_load_ta(void *data, u32 size, struct tee_ioctl_open_session_arg *arg)
                return -EINVAL;
        }
 
-       cmd.hi_addr = upper_32_bits(blob);
-       cmd.low_addr = lower_32_bits(blob);
-       cmd.size = size;
+       load_cmd.hi_addr = upper_32_bits(blob);
+       load_cmd.low_addr = lower_32_bits(blob);
+       load_cmd.size = size;
 
-       ret = psp_tee_process_cmd(TEE_CMD_ID_LOAD_TA, (void *)&cmd,
-                                 sizeof(cmd), &arg->ret);
+       mutex_lock(&ta_refcount_mutex);
+
+       ret = psp_tee_process_cmd(TEE_CMD_ID_LOAD_TA, (void *)&load_cmd,
+                                 sizeof(load_cmd), &arg->ret);
        if (ret) {
                arg->ret_origin = TEEC_ORIGIN_COMMS;
                arg->ret = TEEC_ERROR_COMMUNICATION;
-       } else {
-               set_session_id(cmd.ta_handle, 0, &arg->session);
+       } else if (arg->ret == TEEC_SUCCESS) {
+               ret = get_ta_refcount(load_cmd.ta_handle);
+               if (!ret) {
+                       arg->ret_origin = TEEC_ORIGIN_COMMS;
+                       arg->ret = TEEC_ERROR_OUT_OF_MEMORY;
+
+                       /* Unload the TA on error */
+                       unload_cmd.ta_handle = load_cmd.ta_handle;
+                       psp_tee_process_cmd(TEE_CMD_ID_UNLOAD_TA,
+                                           (void *)&unload_cmd,
+                                           sizeof(unload_cmd), &ret);
+               } else {
+                       set_session_id(load_cmd.ta_handle, 0, &arg->session);
+               }
        }
+       mutex_unlock(&ta_refcount_mutex);
 
        pr_debug("load TA: TA handle = 0x%x, RO = 0x%x, ret = 0x%x\n",
-                cmd.ta_handle, arg->ret_origin, arg->ret);
+                load_cmd.ta_handle, arg->ret_origin, arg->ret);
 
        return 0;
 }
index 8a6a8f30bb427ab060b4a132c1cbd0af06f1184a..da6b88e80dc07e0afd1fc1d7eb9235c27a53a60b 100644 (file)
@@ -59,10 +59,9 @@ static void release_session(struct amdtee_session *sess)
                        continue;
 
                handle_close_session(sess->ta_handle, sess->session_info[i]);
+               handle_unload_ta(sess->ta_handle);
        }
 
-       /* Unload Trusted Application once all sessions are closed */
-       handle_unload_ta(sess->ta_handle);
        kfree(sess);
 }
 
@@ -224,8 +223,6 @@ static void destroy_session(struct kref *ref)
        struct amdtee_session *sess = container_of(ref, struct amdtee_session,
                                                   refcount);
 
-       /* Unload the TA from TEE */
-       handle_unload_ta(sess->ta_handle);
        mutex_lock(&session_list_mutex);
        list_del(&sess->list_node);
        mutex_unlock(&session_list_mutex);
@@ -238,7 +235,7 @@ int amdtee_open_session(struct tee_context *ctx,
 {
        struct amdtee_context_data *ctxdata = ctx->data;
        struct amdtee_session *sess = NULL;
-       u32 session_info;
+       u32 session_info, ta_handle;
        size_t ta_size;
        int rc, i;
        void *ta;
@@ -259,11 +256,14 @@ int amdtee_open_session(struct tee_context *ctx,
        if (arg->ret != TEEC_SUCCESS)
                goto out;
 
+       ta_handle = get_ta_handle(arg->session);
+
        mutex_lock(&session_list_mutex);
        sess = alloc_session(ctxdata, arg->session);
        mutex_unlock(&session_list_mutex);
 
        if (!sess) {
+               handle_unload_ta(ta_handle);
                rc = -ENOMEM;
                goto out;
        }
@@ -277,6 +277,7 @@ int amdtee_open_session(struct tee_context *ctx,
 
        if (i >= TEE_NUM_SESSIONS) {
                pr_err("reached maximum session count %d\n", TEE_NUM_SESSIONS);
+               handle_unload_ta(ta_handle);
                kref_put(&sess->refcount, destroy_session);
                rc = -ENOMEM;
                goto out;
@@ -289,12 +290,13 @@ int amdtee_open_session(struct tee_context *ctx,
                spin_lock(&sess->lock);
                clear_bit(i, sess->sess_mask);
                spin_unlock(&sess->lock);
+               handle_unload_ta(ta_handle);
                kref_put(&sess->refcount, destroy_session);
                goto out;
        }
 
        sess->session_info[i] = session_info;
-       set_session_id(sess->ta_handle, i, &arg->session);
+       set_session_id(ta_handle, i, &arg->session);
 out:
        free_pages((u64)ta, get_order(ta_size));
        return rc;
@@ -329,6 +331,7 @@ int amdtee_close_session(struct tee_context *ctx, u32 session)
 
        /* Close the session */
        handle_close_session(ta_handle, session_info);
+       handle_unload_ta(ta_handle);
 
        kref_put(&sess->refcount, destroy_session);
 
index 6132cc8d014c08512b487a31041844f65dbd6830..6e6eb836e9b62c8a1d4ebe4548641a898c282055 100644 (file)
@@ -220,6 +220,7 @@ int optee_open_session(struct tee_context *ctx,
        struct optee_msg_arg *msg_arg;
        phys_addr_t msg_parg;
        struct optee_session *sess = NULL;
+       uuid_t client_uuid;
 
        /* +2 for the meta parameters added below */
        shm = get_msg_arg(ctx, arg->num_params + 2, &msg_arg, &msg_parg);
@@ -240,10 +241,11 @@ int optee_open_session(struct tee_context *ctx,
        memcpy(&msg_arg->params[0].u.value, arg->uuid, sizeof(arg->uuid));
        msg_arg->params[1].u.value.c = arg->clnt_login;
 
-       rc = tee_session_calc_client_uuid((uuid_t *)&msg_arg->params[1].u.value,
-                                         arg->clnt_login, arg->clnt_uuid);
+       rc = tee_session_calc_client_uuid(&client_uuid, arg->clnt_login,
+                                         arg->clnt_uuid);
        if (rc)
                goto out;
+       export_uuid(msg_arg->params[1].u.octets, &client_uuid);
 
        rc = optee_to_msg_param(msg_arg->params + 2, arg->num_params, param);
        if (rc)
index 81ff593ac4ec22ff9ac6fb735fa5eec1e79ee7c1..e3d72d09c4848c6dfecb9945e297abf6e25d956a 100644 (file)
@@ -9,7 +9,7 @@
 #include <linux/types.h>
 
 /*
- * This file defines the OP-TEE message protocol used to communicate
+ * This file defines the OP-TEE message protocol (ABI) used to communicate
  * with an instance of OP-TEE running in secure world.
  *
  * This file is divided into two sections.
@@ -144,9 +144,10 @@ struct optee_msg_param_value {
  * @tmem:      parameter by temporary memory reference
  * @rmem:      parameter by registered memory reference
  * @value:     parameter by opaque value
+ * @octets:    parameter by octet string
  *
  * @attr & OPTEE_MSG_ATTR_TYPE_MASK indicates if tmem, rmem or value is used in
- * the union. OPTEE_MSG_ATTR_TYPE_VALUE_* indicates value,
+ * the union. OPTEE_MSG_ATTR_TYPE_VALUE_* indicates value or octets,
  * OPTEE_MSG_ATTR_TYPE_TMEM_* indicates @tmem and
  * OPTEE_MSG_ATTR_TYPE_RMEM_* indicates @rmem,
  * OPTEE_MSG_ATTR_TYPE_NONE indicates that none of the members are used.
@@ -157,6 +158,7 @@ struct optee_msg_param {
                struct optee_msg_param_tmem tmem;
                struct optee_msg_param_rmem rmem;
                struct optee_msg_param_value value;
+               u8 octets[24];
        } u;
 };
 
index d1248ba943a4eb07660633a529eae0550c4e5675..62c0aa5d0783770baac479928c3a31a922b1c117 100644 (file)
@@ -237,6 +237,8 @@ struct int34x_thermal_zone *int340x_thermal_zone_add(struct acpi_device *adev,
        if (ACPI_FAILURE(status))
                trip_cnt = 0;
        else {
+               int i;
+
                int34x_thermal_zone->aux_trips =
                        kcalloc(trip_cnt,
                                sizeof(*int34x_thermal_zone->aux_trips),
@@ -247,6 +249,8 @@ struct int34x_thermal_zone *int340x_thermal_zone_add(struct acpi_device *adev,
                }
                trip_mask = BIT(trip_cnt) - 1;
                int34x_thermal_zone->aux_trip_nr = trip_cnt;
+               for (i = 0; i < trip_cnt; ++i)
+                       int34x_thermal_zone->aux_trips[i] = THERMAL_TEMP_INVALID;
        }
 
        trip_cnt = int340x_thermal_read_trips(int34x_thermal_zone);
index f8e882592ba5d351b9311cf9ba47cb10e8bdcd23..99abdc03c44cef1361cdd58e143835b67b848965 100644 (file)
@@ -621,6 +621,17 @@ bool x86_thermal_enabled(void)
        return atomic_read(&therm_throt_en);
 }
 
+void __init therm_lvt_init(void)
+{
+       /*
+        * This function is only called on boot CPU. Save the init thermal
+        * LVT value on BSP and use that value to restore APs' thermal LVT
+        * entry BIOS programmed later
+        */
+       if (intel_thermal_supported(&boot_cpu_data))
+               lvtthmr_init = apic_read(APIC_LVTTHMR);
+}
+
 void intel_init_thermal(struct cpuinfo_x86 *c)
 {
        unsigned int cpu = smp_processor_id();
@@ -630,10 +641,6 @@ void intel_init_thermal(struct cpuinfo_x86 *c)
        if (!intel_thermal_supported(c))
                return;
 
-       /* On the BSP? */
-       if (c == &boot_cpu_data)
-               lvtthmr_init = apic_read(APIC_LVTTHMR);
-
        /*
         * First check if its enabled already, in which case there might
         * be some SMM goo which handles it, so we can't even put a handler
index 295742e839602f3d030affc4578b8c943e91e642..4d8edc61a78b22ac24ded8cff26a90d05d481a07 100644 (file)
@@ -166,7 +166,7 @@ static int sys_get_trip_temp(struct thermal_zone_device *tzd,
        if (thres_reg_value)
                *temp = zonedev->tj_max - thres_reg_value * 1000;
        else
-               *temp = 0;
+               *temp = THERMAL_TEMP_INVALID;
        pr_debug("sys_get_trip_temp %d\n", *temp);
 
        return 0;
index b460b56e981cc673ebd92fecbf6f9886f540d69e..232fd0b333251c3c7b4bfecf7289c82585002970 100644 (file)
@@ -441,7 +441,7 @@ static int adc_tm5_get_dt_channel_data(struct adc_tm5_chip *adc_tm,
 
        if (args.args_count != 1 || args.args[0] >= ADC5_MAX_CHANNEL) {
                dev_err(dev, "%s: invalid ADC channel number %d\n", name, chan);
-               return ret;
+               return -EINVAL;
        }
        channel->adc_channel = args.args[0];
 
index ebe7cb70bfb6334990f3306411949f04b5f5a783..ea0603b59309f5f0b0218d6e8398006cb41e6bbf 100644 (file)
@@ -770,7 +770,7 @@ static int ti_bandgap_tshut_init(struct ti_bandgap *bgp,
 }
 
 /**
- * ti_bandgap_alert_init() - setup and initialize talert handling
+ * ti_bandgap_talert_init() - setup and initialize talert handling
  * @bgp: pointer to struct ti_bandgap
  * @pdev: pointer to device struct platform_device
  *
index 7288aaf01ae6a494148ecb11df8b01f3b3d663d9..5631319f7b205676382166bcce2568aa7119b3ce 100644 (file)
@@ -366,15 +366,15 @@ int dma_port_flash_read(struct tb_dma_port *dma, unsigned int address,
                        void *buf, size_t size)
 {
        unsigned int retries = DMA_PORT_RETRIES;
-       unsigned int offset;
-
-       offset = address & 3;
-       address = address & ~3;
 
        do {
-               u32 nbytes = min_t(u32, size, MAIL_DATA_DWORDS * 4);
+               unsigned int offset;
+               size_t nbytes;
                int ret;
 
+               offset = address & 3;
+               nbytes = min_t(size_t, size + offset, MAIL_DATA_DWORDS * 4);
+
                ret = dma_port_flash_read_block(dma, address, dma->buf,
                                                ALIGN(nbytes, 4));
                if (ret) {
@@ -386,6 +386,7 @@ int dma_port_flash_read(struct tb_dma_port *dma, unsigned int address,
                        return ret;
                }
 
+               nbytes -= offset;
                memcpy(buf, dma->buf + offset, nbytes);
 
                size -= nbytes;
index 680bc738dd66dbaed41a67d0cf144426329da61c..671d72af8ba13b62fcbd04c46ec3bf67a266140f 100644 (file)
@@ -68,15 +68,15 @@ static int usb4_do_read_data(u16 address, void *buf, size_t size,
        unsigned int retries = USB4_DATA_RETRIES;
        unsigned int offset;
 
-       offset = address & 3;
-       address = address & ~3;
-
        do {
-               size_t nbytes = min_t(size_t, size, USB4_DATA_DWORDS * 4);
                unsigned int dwaddress, dwords;
                u8 data[USB4_DATA_DWORDS * 4];
+               size_t nbytes;
                int ret;
 
+               offset = address & 3;
+               nbytes = min_t(size_t, size + offset, USB4_DATA_DWORDS * 4);
+
                dwaddress = address / 4;
                dwords = ALIGN(nbytes, 4) / 4;
 
@@ -87,6 +87,7 @@ static int usb4_do_read_data(u16 address, void *buf, size_t size,
                        return ret;
                }
 
+               nbytes -= offset;
                memcpy(buf, data + offset, nbytes);
 
                size -= nbytes;
index 52bb21205bb682555ba6f22ce89b2ef388f0cc79..6473361525d1f792868db6117bd7c4555845a319 100644 (file)
@@ -7,6 +7,7 @@
  *  Copyright (C) 2001 Russell King.
  */
 
+#include <linux/bits.h>
 #include <linux/serial_8250.h>
 #include <linux/serial_reg.h>
 #include <linux/dmaengine.h>
@@ -70,24 +71,25 @@ struct serial8250_config {
        unsigned int    flags;
 };
 
-#define UART_CAP_FIFO  (1 << 8)        /* UART has FIFO */
-#define UART_CAP_EFR   (1 << 9)        /* UART has EFR */
-#define UART_CAP_SLEEP (1 << 10)       /* UART has IER sleep */
-#define UART_CAP_AFE   (1 << 11)       /* MCR-based hw flow control */
-#define UART_CAP_UUE   (1 << 12)       /* UART needs IER bit 6 set (Xscale) */
-#define UART_CAP_RTOIE (1 << 13)       /* UART needs IER bit 4 set (Xscale, Tegra) */
-#define UART_CAP_HFIFO (1 << 14)       /* UART has a "hidden" FIFO */
-#define UART_CAP_RPM   (1 << 15)       /* Runtime PM is active while idle */
-#define UART_CAP_IRDA  (1 << 16)       /* UART supports IrDA line discipline */
-#define UART_CAP_MINI  (1 << 17)       /* Mini UART on BCM283X family lacks:
+#define UART_CAP_FIFO  BIT(8)  /* UART has FIFO */
+#define UART_CAP_EFR   BIT(9)  /* UART has EFR */
+#define UART_CAP_SLEEP BIT(10) /* UART has IER sleep */
+#define UART_CAP_AFE   BIT(11) /* MCR-based hw flow control */
+#define UART_CAP_UUE   BIT(12) /* UART needs IER bit 6 set (Xscale) */
+#define UART_CAP_RTOIE BIT(13) /* UART needs IER bit 4 set (Xscale, Tegra) */
+#define UART_CAP_HFIFO BIT(14) /* UART has a "hidden" FIFO */
+#define UART_CAP_RPM   BIT(15) /* Runtime PM is active while idle */
+#define UART_CAP_IRDA  BIT(16) /* UART supports IrDA line discipline */
+#define UART_CAP_MINI  BIT(17) /* Mini UART on BCM283X family lacks:
                                         * STOP PARITY EPAR SPAR WLEN5 WLEN6
                                         */
 
-#define UART_BUG_QUOT  (1 << 0)        /* UART has buggy quot LSB */
-#define UART_BUG_TXEN  (1 << 1)        /* UART has buggy TX IIR status */
-#define UART_BUG_NOMSR (1 << 2)        /* UART has buggy MSR status bits (Au1x00) */
-#define UART_BUG_THRE  (1 << 3)        /* UART has buggy THRE reassertion */
-#define UART_BUG_PARITY        (1 << 4)        /* UART mishandles parity if FIFO enabled */
+#define UART_BUG_QUOT  BIT(0)  /* UART has buggy quot LSB */
+#define UART_BUG_TXEN  BIT(1)  /* UART has buggy TX IIR status */
+#define UART_BUG_NOMSR BIT(2)  /* UART has buggy MSR status bits (Au1x00) */
+#define UART_BUG_THRE  BIT(3)  /* UART has buggy THRE reassertion */
+#define UART_BUG_PARITY        BIT(4)  /* UART mishandles parity if FIFO enabled */
+#define UART_BUG_TXRACE        BIT(5)  /* UART Tx fails to set remote DR */
 
 
 #ifdef CONFIG_SERIAL_8250_SHARE_IRQ
index 61550f24a2d377e45e0a2a718c659b43eab9384c..d035d08cb98714186c0143458dc90814e6b91fc2 100644 (file)
@@ -437,6 +437,7 @@ static int aspeed_vuart_probe(struct platform_device *pdev)
        port.port.status = UPSTAT_SYNC_FIFO;
        port.port.dev = &pdev->dev;
        port.port.has_sysrq = IS_ENABLED(CONFIG_SERIAL_8250_CONSOLE);
+       port.bugs |= UART_BUG_TXRACE;
 
        rc = sysfs_create_group(&vuart->dev->kobj, &aspeed_vuart_attr_group);
        if (rc < 0)
index 9e204f9b799a1c868a21086e32ea5f4cd272c3f2..a3a0154da567da4a37fb9cc7a8fb1ba631b64bc8 100644 (file)
@@ -714,6 +714,7 @@ static const struct acpi_device_id dw8250_acpi_match[] = {
        { "APMC0D08", 0},
        { "AMD0020", 0 },
        { "AMDI0020", 0 },
+       { "AMDI0022", 0 },
        { "BRCM2032", 0 },
        { "HISI0031", 0 },
        { },
index 2f49c580139b86276952cb31443e17d9eac7e00c..bd4e9f6ac29ce6e1dcd25be85b3d69408b1e5477 100644 (file)
@@ -553,7 +553,11 @@ static void pci_xr17v35x_exit(struct pci_dev *pcidev)
 {
        struct exar8250 *priv = pci_get_drvdata(pcidev);
        struct uart_8250_port *port = serial8250_get_port(priv->line[0]);
-       struct platform_device *pdev = port->port.private_data;
+       struct platform_device *pdev;
+
+       pdev = port->port.private_data;
+       if (!pdev)
+               return;
 
        device_remove_software_node(&pdev->dev);
        platform_device_unregister(pdev);
index 689d8227f95f7dfbc04a27fa39a63f6b85eae2b7..780cc99732b6239e980072756d17c17699bfc8fa 100644 (file)
@@ -56,6 +56,8 @@ struct serial_private {
        int                     line[];
 };
 
+#define PCI_DEVICE_ID_HPE_PCI_SERIAL   0x37e
+
 static const struct pci_device_id pci_use_msi[] = {
        { PCI_DEVICE_SUB(PCI_VENDOR_ID_NETMOS, PCI_DEVICE_ID_NETMOS_9900,
                         0xA000, 0x1000) },
@@ -63,6 +65,8 @@ static const struct pci_device_id pci_use_msi[] = {
                         0xA000, 0x1000) },
        { PCI_DEVICE_SUB(PCI_VENDOR_ID_NETMOS, PCI_DEVICE_ID_NETMOS_9922,
                         0xA000, 0x1000) },
+       { PCI_DEVICE_SUB(PCI_VENDOR_ID_HP_3PAR, PCI_DEVICE_ID_HPE_PCI_SERIAL,
+                        PCI_ANY_ID, PCI_ANY_ID) },
        { }
 };
 
@@ -1997,6 +2001,16 @@ static struct pci_serial_quirk pci_serial_quirks[] = {
                .init           = pci_hp_diva_init,
                .setup          = pci_hp_diva_setup,
        },
+       /*
+        * HPE PCI serial device
+        */
+       {
+               .vendor         = PCI_VENDOR_ID_HP_3PAR,
+               .device         = PCI_DEVICE_ID_HPE_PCI_SERIAL,
+               .subvendor      = PCI_ANY_ID,
+               .subdevice      = PCI_ANY_ID,
+               .setup          = pci_hp_diva_setup,
+       },
        /*
         * Intel
         */
@@ -3944,21 +3958,26 @@ pciserial_init_ports(struct pci_dev *dev, const struct pciserial_board *board)
        uart.port.flags = UPF_SKIP_TEST | UPF_BOOT_AUTOCONF | UPF_SHARE_IRQ;
        uart.port.uartclk = board->base_baud * 16;
 
-       if (pci_match_id(pci_use_msi, dev)) {
-               dev_dbg(&dev->dev, "Using MSI(-X) interrupts\n");
-               pci_set_master(dev);
-               rc = pci_alloc_irq_vectors(dev, 1, 1, PCI_IRQ_ALL_TYPES);
+       if (board->flags & FL_NOIRQ) {
+               uart.port.irq = 0;
        } else {
-               dev_dbg(&dev->dev, "Using legacy interrupts\n");
-               rc = pci_alloc_irq_vectors(dev, 1, 1, PCI_IRQ_LEGACY);
-       }
-       if (rc < 0) {
-               kfree(priv);
-               priv = ERR_PTR(rc);
-               goto err_deinit;
+               if (pci_match_id(pci_use_msi, dev)) {
+                       dev_dbg(&dev->dev, "Using MSI(-X) interrupts\n");
+                       pci_set_master(dev);
+                       rc = pci_alloc_irq_vectors(dev, 1, 1, PCI_IRQ_ALL_TYPES);
+               } else {
+                       dev_dbg(&dev->dev, "Using legacy interrupts\n");
+                       rc = pci_alloc_irq_vectors(dev, 1, 1, PCI_IRQ_LEGACY);
+               }
+               if (rc < 0) {
+                       kfree(priv);
+                       priv = ERR_PTR(rc);
+                       goto err_deinit;
+               }
+
+               uart.port.irq = pci_irq_vector(dev, 0);
        }
 
-       uart.port.irq = pci_irq_vector(dev, 0);
        uart.port.dev = &dev->dev;
 
        for (i = 0; i < nr_ports; i++) {
@@ -4973,6 +4992,10 @@ static const struct pci_device_id serial_pci_tbl[] = {
        {       PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_DIVA_AUX,
                PCI_ANY_ID, PCI_ANY_ID, 0, 0,
                pbn_b2_1_115200 },
+       /* HPE PCI serial device */
+       {       PCI_VENDOR_ID_HP_3PAR, PCI_DEVICE_ID_HPE_PCI_SERIAL,
+               PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+               pbn_b1_1_115200 },
 
        {       PCI_VENDOR_ID_DCI, PCI_DEVICE_ID_DCI_PCCOM2,
                PCI_ANY_ID, PCI_ANY_ID, 0, 0,
index d45dab1ab316473d9e9af8138db33a4ec02b2316..fc5ab203228213c4b0524d59f9478db813d3f687 100644 (file)
@@ -1809,6 +1809,18 @@ void serial8250_tx_chars(struct uart_8250_port *up)
        count = up->tx_loadsz;
        do {
                serial_out(up, UART_TX, xmit->buf[xmit->tail]);
+               if (up->bugs & UART_BUG_TXRACE) {
+                       /*
+                        * The Aspeed BMC virtual UARTs have a bug where data
+                        * may get stuck in the BMC's Tx FIFO from bursts of
+                        * writes on the APB interface.
+                        *
+                        * Delay back-to-back writes by a read cycle to avoid
+                        * stalling the VUART. Read a register that won't have
+                        * side-effects and discard the result.
+                        */
+                       serial_in(up, UART_SCR);
+               }
                xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
                port->icount.tx++;
                if (uart_circ_empty(xmit))
index 8534d6e45a1d7b65c149a130526aa5314203b66b..3cbc757d7be76cdc6ebb105da44916069226874f 100644 (file)
@@ -1519,6 +1519,8 @@ static int __init max310x_uart_init(void)
 
 #ifdef CONFIG_SPI_MASTER
        ret = spi_register_driver(&max310x_spi_driver);
+       if (ret)
+               uart_unregister_driver(&max310x_uart);
 #endif
 
        return ret;
index e0c00a1b07639b5a12a583059af4150c3f8d44aa..51b0ecabf2ec963ffefcd33999b08f1d9830931e 100644 (file)
@@ -818,9 +818,6 @@ static int mvebu_uart_probe(struct platform_device *pdev)
                return -EINVAL;
        }
 
-       if (!match)
-               return -ENODEV;
-
        /* Assume that all UART ports have a DT alias or none has */
        id = of_alias_get_id(pdev->dev.of_node, "serial");
        if (!pdev->dev.of_node || id < 0)
index d60abffab70eca80e450174378c50a25d8fe0861..6689d8add8f7aa66926f80cd9d316e5a4a5882e2 100644 (file)
@@ -195,7 +195,6 @@ struct rp2_card {
        void __iomem                    *bar0;
        void __iomem                    *bar1;
        spinlock_t                      card_lock;
-       struct completion               fw_loaded;
 };
 
 #define RP_ID(prod) PCI_VDEVICE(RP, (prod))
@@ -662,17 +661,10 @@ static void rp2_remove_ports(struct rp2_card *card)
        card->initialized_ports = 0;
 }
 
-static void rp2_fw_cb(const struct firmware *fw, void *context)
+static int rp2_load_firmware(struct rp2_card *card, const struct firmware *fw)
 {
-       struct rp2_card *card = context;
        resource_size_t phys_base;
-       int i, rc = -ENOENT;
-
-       if (!fw) {
-               dev_err(&card->pdev->dev, "cannot find '%s' firmware image\n",
-                       RP2_FW_NAME);
-               goto no_fw;
-       }
+       int i, rc = 0;
 
        phys_base = pci_resource_start(card->pdev, 1);
 
@@ -718,23 +710,13 @@ static void rp2_fw_cb(const struct firmware *fw, void *context)
                card->initialized_ports++;
        }
 
-       release_firmware(fw);
-no_fw:
-       /*
-        * rp2_fw_cb() is called from a workqueue long after rp2_probe()
-        * has already returned success.  So if something failed here,
-        * we'll just leave the now-dormant device in place until somebody
-        * unbinds it.
-        */
-       if (rc)
-               dev_warn(&card->pdev->dev, "driver initialization failed\n");
-
-       complete(&card->fw_loaded);
+       return rc;
 }
 
 static int rp2_probe(struct pci_dev *pdev,
                                   const struct pci_device_id *id)
 {
+       const struct firmware *fw;
        struct rp2_card *card;
        struct rp2_uart_port *ports;
        void __iomem * const *bars;
@@ -745,7 +727,6 @@ static int rp2_probe(struct pci_dev *pdev,
                return -ENOMEM;
        pci_set_drvdata(pdev, card);
        spin_lock_init(&card->card_lock);
-       init_completion(&card->fw_loaded);
 
        rc = pcim_enable_device(pdev);
        if (rc)
@@ -778,21 +759,23 @@ static int rp2_probe(struct pci_dev *pdev,
                return -ENOMEM;
        card->ports = ports;
 
-       rc = devm_request_irq(&pdev->dev, pdev->irq, rp2_uart_interrupt,
-                             IRQF_SHARED, DRV_NAME, card);
-       if (rc)
+       rc = request_firmware(&fw, RP2_FW_NAME, &pdev->dev);
+       if (rc < 0) {
+               dev_err(&pdev->dev, "cannot find '%s' firmware image\n",
+                       RP2_FW_NAME);
                return rc;
+       }
 
-       /*
-        * Only catastrophic errors (e.g. ENOMEM) are reported here.
-        * If the FW image is missing, we'll find out in rp2_fw_cb()
-        * and print an error message.
-        */
-       rc = request_firmware_nowait(THIS_MODULE, 1, RP2_FW_NAME, &pdev->dev,
-                                    GFP_KERNEL, card, rp2_fw_cb);
+       rc = rp2_load_firmware(card, fw);
+
+       release_firmware(fw);
+       if (rc < 0)
+               return rc;
+
+       rc = devm_request_irq(&pdev->dev, pdev->irq, rp2_uart_interrupt,
+                             IRQF_SHARED, DRV_NAME, card);
        if (rc)
                return rc;
-       dev_dbg(&pdev->dev, "waiting for firmware blob...\n");
 
        return 0;
 }
@@ -801,7 +784,6 @@ static void rp2_remove(struct pci_dev *pdev)
 {
        struct rp2_card *card = pci_get_drvdata(pdev);
 
-       wait_for_completion(&card->fw_loaded);
        rp2_remove_ports(card);
 }
 
index bbae072a125db880e87c19db1432017455e8fedf..222032792d6c29633904d91feaad89a90a82243f 100644 (file)
@@ -338,7 +338,7 @@ static void tegra_uart_fifo_reset(struct tegra_uart_port *tup, u8 fcr_bits)
 
        do {
                lsr = tegra_uart_read(tup, UART_LSR);
-               if ((lsr | UART_LSR_TEMT) && !(lsr & UART_LSR_DR))
+               if ((lsr & UART_LSR_TEMT) && !(lsr & UART_LSR_DR))
                        break;
                udelay(1);
        } while (--tmout);
index 87f7127b57e6b1ca5d64414ab76bab5bb3ed9c15..18ff85a83f8067bcff0b8d3fb56acf839b19b0e6 100644 (file)
@@ -863,9 +863,11 @@ static int uart_set_info(struct tty_struct *tty, struct tty_port *port,
                goto check_and_exit;
        }
 
-       retval = security_locked_down(LOCKDOWN_TIOCSSERIAL);
-       if (retval && (change_irq || change_port))
-               goto exit;
+       if (change_irq || change_port) {
+               retval = security_locked_down(LOCKDOWN_TIOCSSERIAL);
+               if (retval)
+                       goto exit;
+       }
 
        /*
         * Ask the low level driver to verify the settings.
index ef37fdf37612f82f1ce280d885775f3178820fe1..4baf1316ea729931eb8675e433144103eabd6c19 100644 (file)
@@ -1023,10 +1023,10 @@ static int scif_set_rtrg(struct uart_port *port, int rx_trig)
 {
        unsigned int bits;
 
+       if (rx_trig >= port->fifosize)
+               rx_trig = port->fifosize - 1;
        if (rx_trig < 1)
                rx_trig = 1;
-       if (rx_trig >= port->fifosize)
-               rx_trig = port->fifosize;
 
        /* HSCIF can be set to an arbitrary level. */
        if (sci_getreg(port, HSRTRGR)->size) {
index 01645e87b3d5c3bb8fb961965eaeba7f3fe6a00d..fa1548d4f94bed40716daac47f50d6ae272ab198 100644 (file)
@@ -1171,7 +1171,7 @@ static inline int resize_screen(struct vc_data *vc, int width, int height,
        /* Resizes the resolution of the display adapater */
        int err = 0;
 
-       if (vc->vc_mode != KD_GRAPHICS && vc->vc_sw->con_resize)
+       if (vc->vc_sw->con_resize)
                err = vc->vc_sw->con_resize(vc, width, height, user);
 
        return err;
index 89aeaf3c1bca695fe47e15143ab96ac7da34c1ed..0e0cd9e9e589ecd1d9139a0e42d7ced5d4caab42 100644 (file)
@@ -671,21 +671,58 @@ static int vt_resizex(struct vc_data *vc, struct vt_consize __user *cs)
        if (copy_from_user(&v, cs, sizeof(struct vt_consize)))
                return -EFAULT;
 
-       if (v.v_vlin)
-               pr_info_once("\"struct vt_consize\"->v_vlin is ignored. Please report if you need this.\n");
-       if (v.v_clin)
-               pr_info_once("\"struct vt_consize\"->v_clin is ignored. Please report if you need this.\n");
+       /* FIXME: Should check the copies properly */
+       if (!v.v_vlin)
+               v.v_vlin = vc->vc_scan_lines;
+
+       if (v.v_clin) {
+               int rows = v.v_vlin / v.v_clin;
+               if (v.v_rows != rows) {
+                       if (v.v_rows) /* Parameters don't add up */
+                               return -EINVAL;
+                       v.v_rows = rows;
+               }
+       }
+
+       if (v.v_vcol && v.v_ccol) {
+               int cols = v.v_vcol / v.v_ccol;
+               if (v.v_cols != cols) {
+                       if (v.v_cols)
+                               return -EINVAL;
+                       v.v_cols = cols;
+               }
+       }
+
+       if (v.v_clin > 32)
+               return -EINVAL;
 
-       console_lock();
        for (i = 0; i < MAX_NR_CONSOLES; i++) {
-               vc = vc_cons[i].d;
+               struct vc_data *vcp;
 
-               if (vc) {
-                       vc->vc_resize_user = 1;
-                       vc_resize(vc, v.v_cols, v.v_rows);
+               if (!vc_cons[i].d)
+                       continue;
+               console_lock();
+               vcp = vc_cons[i].d;
+               if (vcp) {
+                       int ret;
+                       int save_scan_lines = vcp->vc_scan_lines;
+                       int save_cell_height = vcp->vc_cell_height;
+
+                       if (v.v_vlin)
+                               vcp->vc_scan_lines = v.v_vlin;
+                       if (v.v_clin)
+                               vcp->vc_cell_height = v.v_clin;
+                       vcp->vc_resize_user = 1;
+                       ret = vc_resize(vcp, v.v_cols, v.v_rows);
+                       if (ret) {
+                               vcp->vc_scan_lines = save_scan_lines;
+                               vcp->vc_cell_height = save_cell_height;
+                               console_unlock();
+                               return ret;
+                       }
                }
+               console_unlock();
        }
-       console_unlock();
 
        return 0;
 }
index 0330ba99730e2b2a21054c52b72cdf14bebfea9d..652fe2547587888d38d1b66c561ea7701586dcd8 100644 (file)
@@ -291,13 +291,15 @@ hv_uio_probe(struct hv_device *dev,
        pdata->recv_buf = vzalloc(RECV_BUFFER_SIZE);
        if (pdata->recv_buf == NULL) {
                ret = -ENOMEM;
-               goto fail_close;
+               goto fail_free_ring;
        }
 
        ret = vmbus_establish_gpadl(channel, pdata->recv_buf,
                                    RECV_BUFFER_SIZE, &pdata->recv_gpadl);
-       if (ret)
+       if (ret) {
+               vfree(pdata->recv_buf);
                goto fail_close;
+       }
 
        /* put Global Physical Address Label in name */
        snprintf(pdata->recv_name, sizeof(pdata->recv_name),
@@ -316,8 +318,10 @@ hv_uio_probe(struct hv_device *dev,
 
        ret = vmbus_establish_gpadl(channel, pdata->send_buf,
                                    SEND_BUFFER_SIZE, &pdata->send_gpadl);
-       if (ret)
+       if (ret) {
+               vfree(pdata->send_buf);
                goto fail_close;
+       }
 
        snprintf(pdata->send_name, sizeof(pdata->send_name),
                 "send:%u", pdata->send_gpadl);
@@ -347,6 +351,8 @@ hv_uio_probe(struct hv_device *dev,
 
 fail_close:
        hv_uio_cleanup(dev, pdata);
+fail_free_ring:
+       vmbus_free_ring(dev->channel);
 
        return ret;
 }
index c7d681fef198d6735dcbe0d2afcdaaed611a4b99..3bb0b0075467931c3b4d2e56fd915a1d56cf7efc 100644 (file)
@@ -82,7 +82,7 @@ static int probe(struct pci_dev *pdev,
        }
 
        if (pdev->irq && !pci_intx_mask_supported(pdev))
-               return -ENOMEM;
+               return -ENODEV;
 
        gdev = devm_kzalloc(&pdev->dev, sizeof(struct uio_pci_generic_dev), GFP_KERNEL);
        if (!gdev)
index 9b1bd417cec0393c1097f96800abd9e8f5ad8f28..5281f8d3fb3d130e36fd7099c62b1b5d0ed9e4bb 100644 (file)
@@ -2007,7 +2007,7 @@ static void cdns3_configure_dmult(struct cdns3_device *priv_dev,
                else
                        mask = BIT(priv_ep->num);
 
-               if (priv_ep->type != USB_ENDPOINT_XFER_ISOC) {
+               if (priv_ep->type != USB_ENDPOINT_XFER_ISOC  && !priv_ep->dir) {
                        cdns3_set_register_bit(&regs->tdl_from_trb, mask);
                        cdns3_set_register_bit(&regs->tdl_beh, mask);
                        cdns3_set_register_bit(&regs->tdl_beh2, mask);
@@ -2046,15 +2046,13 @@ int cdns3_ep_config(struct cdns3_endpoint *priv_ep, bool enable)
        case USB_ENDPOINT_XFER_INT:
                ep_cfg = EP_CFG_EPTYPE(USB_ENDPOINT_XFER_INT);
 
-               if ((priv_dev->dev_ver == DEV_VER_V2 && !priv_ep->dir) ||
-                   priv_dev->dev_ver > DEV_VER_V2)
+               if (priv_dev->dev_ver >= DEV_VER_V2 && !priv_ep->dir)
                        ep_cfg |= EP_CFG_TDL_CHK;
                break;
        case USB_ENDPOINT_XFER_BULK:
                ep_cfg = EP_CFG_EPTYPE(USB_ENDPOINT_XFER_BULK);
 
-               if ((priv_dev->dev_ver == DEV_VER_V2  && !priv_ep->dir) ||
-                   priv_dev->dev_ver > DEV_VER_V2)
+               if (priv_dev->dev_ver >= DEV_VER_V2 && !priv_ep->dir)
                        ep_cfg |= EP_CFG_TDL_CHK;
                break;
        default:
@@ -3268,8 +3266,10 @@ static int __cdns3_gadget_init(struct cdns *cdns)
        pm_runtime_get_sync(cdns->dev);
 
        ret = cdns3_gadget_start(cdns);
-       if (ret)
+       if (ret) {
+               pm_runtime_put_sync(cdns->dev);
                return ret;
+       }
 
        /*
         * Because interrupt line can be shared with other components in
index 56707b6b0f57ca2c5ac6ebff23e62fe5459aeb56..c083985e387b2db123309fb5ce1e6846b6a2b6bb 100644 (file)
@@ -422,17 +422,17 @@ unmap:
 int cdnsp_ep_dequeue(struct cdnsp_ep *pep, struct cdnsp_request *preq)
 {
        struct cdnsp_device *pdev = pep->pdev;
-       int ret;
+       int ret_stop = 0;
+       int ret_rem;
 
        trace_cdnsp_request_dequeue(preq);
 
-       if (GET_EP_CTX_STATE(pep->out_ctx) == EP_STATE_RUNNING) {
-               ret = cdnsp_cmd_stop_ep(pdev, pep);
-               if (ret)
-                       return ret;
-       }
+       if (GET_EP_CTX_STATE(pep->out_ctx) == EP_STATE_RUNNING)
+               ret_stop = cdnsp_cmd_stop_ep(pdev, pep);
+
+       ret_rem = cdnsp_remove_request(pdev, preq, pep);
 
-       return cdnsp_remove_request(pdev, preq, pep);
+       return ret_rem ? ret_rem : ret_stop;
 }
 
 static void cdnsp_zero_in_ctx(struct cdnsp_device *pdev)
index 5f0513c96c04ec768a8ca3f29854d6cebf717535..68972746e3636fadee03ad009a7c615e716db75d 100644 (file)
@@ -1517,13 +1517,14 @@ irqreturn_t cdnsp_thread_irq_handler(int irq, void *data)
 {
        struct cdnsp_device *pdev = (struct cdnsp_device *)data;
        union cdnsp_trb *event_ring_deq;
+       unsigned long flags;
        int counter = 0;
 
-       spin_lock(&pdev->lock);
+       spin_lock_irqsave(&pdev->lock, flags);
 
        if (pdev->cdnsp_state & (CDNSP_STATE_HALTED | CDNSP_STATE_DYING)) {
                cdnsp_died(pdev);
-               spin_unlock(&pdev->lock);
+               spin_unlock_irqrestore(&pdev->lock, flags);
                return IRQ_HANDLED;
        }
 
@@ -1539,7 +1540,7 @@ irqreturn_t cdnsp_thread_irq_handler(int irq, void *data)
 
        cdnsp_update_erst_dequeue(pdev, event_ring_deq, 1);
 
-       spin_unlock(&pdev->lock);
+       spin_unlock_irqrestore(&pdev->lock, flags);
 
        return IRQ_HANDLED;
 }
index c16d900cdaee3ce88bc33f605362561f86850fb5..393f216b916153c832b6f95eec764ac681c23171 100644 (file)
@@ -2061,6 +2061,7 @@ static int udc_start(struct ci_hdrc *ci)
        ci->gadget.name         = ci->platdata->name;
        ci->gadget.otg_caps     = otg_caps;
        ci->gadget.sg_supported = 1;
+       ci->gadget.irq          = ci->irq;
 
        if (ci->platdata->flags & CI_HDRC_REQUIRES_ALIGNED_DMA)
                ci->gadget.quirk_avoids_skb_reserve = 1;
index 508b1c3f8b731bae17acb90b39a9ab64f683adb0..d1e4a7379bebdb0bf73b00b6adb11f64b069c29b 100644 (file)
@@ -321,12 +321,23 @@ exit:
 
 }
 
-static void kill_urbs(struct wdm_device *desc)
+static void poison_urbs(struct wdm_device *desc)
 {
        /* the order here is essential */
-       usb_kill_urb(desc->command);
-       usb_kill_urb(desc->validity);
-       usb_kill_urb(desc->response);
+       usb_poison_urb(desc->command);
+       usb_poison_urb(desc->validity);
+       usb_poison_urb(desc->response);
+}
+
+static void unpoison_urbs(struct wdm_device *desc)
+{
+       /*
+        *  the order here is not essential
+        *  it is symmetrical just to be nice
+        */
+       usb_unpoison_urb(desc->response);
+       usb_unpoison_urb(desc->validity);
+       usb_unpoison_urb(desc->command);
 }
 
 static void free_urbs(struct wdm_device *desc)
@@ -741,11 +752,12 @@ static int wdm_release(struct inode *inode, struct file *file)
        if (!desc->count) {
                if (!test_bit(WDM_DISCONNECTING, &desc->flags)) {
                        dev_dbg(&desc->intf->dev, "wdm_release: cleanup\n");
-                       kill_urbs(desc);
+                       poison_urbs(desc);
                        spin_lock_irq(&desc->iuspin);
                        desc->resp_count = 0;
                        spin_unlock_irq(&desc->iuspin);
                        desc->manage_power(desc->intf, 0);
+                       unpoison_urbs(desc);
                } else {
                        /* must avoid dev_printk here as desc->intf is invalid */
                        pr_debug(KBUILD_MODNAME " %s: device gone - cleaning up\n", __func__);
@@ -1037,9 +1049,9 @@ static void wdm_disconnect(struct usb_interface *intf)
        wake_up_all(&desc->wait);
        mutex_lock(&desc->rlock);
        mutex_lock(&desc->wlock);
+       poison_urbs(desc);
        cancel_work_sync(&desc->rxwork);
        cancel_work_sync(&desc->service_outs_intr);
-       kill_urbs(desc);
        mutex_unlock(&desc->wlock);
        mutex_unlock(&desc->rlock);
 
@@ -1080,9 +1092,10 @@ static int wdm_suspend(struct usb_interface *intf, pm_message_t message)
                set_bit(WDM_SUSPENDING, &desc->flags);
                spin_unlock_irq(&desc->iuspin);
                /* callback submits work - order is essential */
-               kill_urbs(desc);
+               poison_urbs(desc);
                cancel_work_sync(&desc->rxwork);
                cancel_work_sync(&desc->service_outs_intr);
+               unpoison_urbs(desc);
        }
        if (!PMSG_IS_AUTO(message)) {
                mutex_unlock(&desc->wlock);
@@ -1140,7 +1153,7 @@ static int wdm_pre_reset(struct usb_interface *intf)
        wake_up_all(&desc->wait);
        mutex_lock(&desc->rlock);
        mutex_lock(&desc->wlock);
-       kill_urbs(desc);
+       poison_urbs(desc);
        cancel_work_sync(&desc->rxwork);
        cancel_work_sync(&desc->service_outs_intr);
        return 0;
@@ -1151,6 +1164,7 @@ static int wdm_post_reset(struct usb_interface *intf)
        struct wdm_device *desc = wdm_find_device(intf);
        int rv;
 
+       unpoison_urbs(desc);
        clear_bit(WDM_OVERFLOW, &desc->flags);
        clear_bit(WDM_RESETTING, &desc->flags);
        rv = recover_from_urb_loss(desc);
index 533236366a03b8d35b8c6a724946dff6d9fe44b3..2218941d35a3fb1df5d3541951f4b64e28bb7c0d 100644 (file)
@@ -1218,7 +1218,12 @@ static int do_proc_bulk(struct usb_dev_state *ps,
        ret = usbfs_increase_memory_usage(len1 + sizeof(struct urb));
        if (ret)
                return ret;
-       tbuf = kmalloc(len1, GFP_KERNEL);
+
+       /*
+        * len1 can be almost arbitrarily large.  Don't WARN if it's
+        * too big, just fail the request.
+        */
+       tbuf = kmalloc(len1, GFP_KERNEL | __GFP_NOWARN);
        if (!tbuf) {
                ret = -ENOMEM;
                goto done;
@@ -1696,7 +1701,7 @@ static int proc_do_submiturb(struct usb_dev_state *ps, struct usbdevfs_urb *uurb
        if (num_sgs) {
                as->urb->sg = kmalloc_array(num_sgs,
                                            sizeof(struct scatterlist),
-                                           GFP_KERNEL);
+                                           GFP_KERNEL | __GFP_NOWARN);
                if (!as->urb->sg) {
                        ret = -ENOMEM;
                        goto error;
@@ -1731,7 +1736,7 @@ static int proc_do_submiturb(struct usb_dev_state *ps, struct usbdevfs_urb *uurb
                                        (uurb_start - as->usbm->vm_start);
                } else {
                        as->urb->transfer_buffer = kmalloc(uurb->buffer_length,
-                                       GFP_KERNEL);
+                                       GFP_KERNEL | __GFP_NOWARN);
                        if (!as->urb->transfer_buffer) {
                                ret = -ENOMEM;
                                goto error;
index b2bc4b7c428953b8e7dca0e9c872d3d31619e1f9..fc7d6cdacf16b91921aa4bdb3cf427516044c995 100644 (file)
@@ -3642,9 +3642,6 @@ int usb_port_resume(struct usb_device *udev, pm_message_t msg)
                 * sequence.
                 */
                status = hub_port_status(hub, port1, &portstatus, &portchange);
-
-               /* TRSMRCY = 10 msec */
-               msleep(10);
        }
 
  SuspendCleared:
@@ -3659,6 +3656,9 @@ int usb_port_resume(struct usb_device *udev, pm_message_t msg)
                                usb_clear_port_feature(hub->hdev, port1,
                                                USB_PORT_FEAT_C_SUSPEND);
                }
+
+               /* TRSMRCY = 10 msec */
+               msleep(10);
        }
 
        if (udev->persist_enabled)
index da5ac4a4595b6c7b2133d12b62885adefcdc7c1f..ab6b815e0089ca1196fb6bf6911387154fc24bf1 100644 (file)
@@ -113,6 +113,7 @@ struct dwc2_hsotg_req;
  * @debugfs: File entry for debugfs file for this endpoint.
  * @dir_in: Set to true if this endpoint is of the IN direction, which
  *          means that it is sending data to the Host.
+ * @map_dir: Set to the value of dir_in when the DMA buffer is mapped.
  * @index: The index for the endpoint registers.
  * @mc: Multi Count - number of transactions per microframe
  * @interval: Interval for periodic endpoints, in frames or microframes.
@@ -162,6 +163,7 @@ struct dwc2_hsotg_ep {
        unsigned short          fifo_index;
 
        unsigned char           dir_in;
+       unsigned char           map_dir;
        unsigned char           index;
        unsigned char           mc;
        u16                     interval;
index e6bb1bdb2760390592a42cd7423b19b3395e06ee..184964174dc0c0f1723b6376c495ee862e94b2d4 100644 (file)
@@ -422,7 +422,7 @@ static void dwc2_hsotg_unmap_dma(struct dwc2_hsotg *hsotg,
 {
        struct usb_request *req = &hs_req->req;
 
-       usb_gadget_unmap_request(&hsotg->gadget, req, hs_ep->dir_in);
+       usb_gadget_unmap_request(&hsotg->gadget, req, hs_ep->map_dir);
 }
 
 /*
@@ -1242,6 +1242,7 @@ static int dwc2_hsotg_map_dma(struct dwc2_hsotg *hsotg,
 {
        int ret;
 
+       hs_ep->map_dir = hs_ep->dir_in;
        ret = usb_gadget_map_request(&hsotg->gadget, req, hs_ep->dir_in);
        if (ret)
                goto dma_error;
index 3024785d84cb8284454ea3ab3d20c49644f73d90..520a0beef77ca9f51e6838b4842981e9b24c5ac6 100644 (file)
@@ -776,7 +776,3 @@ static struct platform_driver dwc2_platform_driver = {
 };
 
 module_platform_driver(dwc2_platform_driver);
-
-MODULE_DESCRIPTION("DESIGNWARE HS OTG Platform Glue");
-MODULE_AUTHOR("Matthijs Kooijman <matthijs@stdin.nl>");
-MODULE_LICENSE("Dual BSD/GPL");
index b6e53d8212cd874788785c425ba8f27b8069f46c..21129d357f29578b50814f41414c90f21e3b61a0 100644 (file)
@@ -1690,11 +1690,6 @@ static int dwc3_remove(struct platform_device *pdev)
        return 0;
 }
 
-static void dwc3_shutdown(struct platform_device *pdev)
-{
-       dwc3_remove(pdev);
-}
-
 #ifdef CONFIG_PM
 static int dwc3_core_init_for_resume(struct dwc3 *dwc)
 {
@@ -2012,7 +2007,6 @@ MODULE_DEVICE_TABLE(acpi, dwc3_acpi_match);
 static struct platform_driver dwc3_driver = {
        .probe          = dwc3_probe,
        .remove         = dwc3_remove,
-       .shutdown   = dwc3_shutdown,
        .driver         = {
                .name   = "dwc3",
                .of_match_table = of_match_ptr(of_dwc3_match),
index b1e875c58f20f69813f206a117f738743a0fa366..c5d5760cdf53e63fdcbb6bed12b2cfccab084e01 100644 (file)
@@ -57,7 +57,7 @@
 #define DWC3_DEVICE_EVENT_LINK_STATUS_CHANGE   3
 #define DWC3_DEVICE_EVENT_WAKEUP               4
 #define DWC3_DEVICE_EVENT_HIBER_REQ            5
-#define DWC3_DEVICE_EVENT_EOPF                 6
+#define DWC3_DEVICE_EVENT_SUSPEND              6
 #define DWC3_DEVICE_EVENT_SOF                  7
 #define DWC3_DEVICE_EVENT_ERRATIC_ERROR                9
 #define DWC3_DEVICE_EVENT_CMD_CMPL             10
 #define DWC3_DEVTEN_CMDCMPLTEN         BIT(10)
 #define DWC3_DEVTEN_ERRTICERREN                BIT(9)
 #define DWC3_DEVTEN_SOFEN              BIT(7)
-#define DWC3_DEVTEN_EOPFEN             BIT(6)
+#define DWC3_DEVTEN_U3L2L1SUSPEN       BIT(6)
 #define DWC3_DEVTEN_HIBERNATIONREQEVTEN        BIT(5)
 #define DWC3_DEVTEN_WKUPEVTEN          BIT(4)
 #define DWC3_DEVTEN_ULSTCNGEN          BIT(3)
@@ -850,6 +850,7 @@ struct dwc3_trb {
  * @hwparams6: GHWPARAMS6
  * @hwparams7: GHWPARAMS7
  * @hwparams8: GHWPARAMS8
+ * @hwparams9: GHWPARAMS9
  */
 struct dwc3_hwparams {
        u32     hwparams0;
@@ -1374,7 +1375,7 @@ struct dwc3_event_depevt {
  *     3       - ULStChng
  *     4       - WkUpEvt
  *     5       - Reserved
- *     6       - EOPF
+ *     6       - Suspend (EOPF on revisions 2.10a and prior)
  *     7       - SOF
  *     8       - Reserved
  *     9       - ErrticErr
index db231de46bb3592410f6adc83bad13a4487ebf79..d223c54115f4a16abcb8a72a5642497da33c8dea 100644 (file)
@@ -221,8 +221,8 @@ static inline const char *dwc3_gadget_event_string(char *str, size_t size,
                snprintf(str, size, "WakeUp [%s]",
                                dwc3_gadget_link_string(state));
                break;
-       case DWC3_DEVICE_EVENT_EOPF:
-               snprintf(str, size, "End-Of-Frame [%s]",
+       case DWC3_DEVICE_EVENT_SUSPEND:
+               snprintf(str, size, "Suspend [%s]",
                                dwc3_gadget_link_string(state));
                break;
        case DWC3_DEVICE_EVENT_SOF:
@@ -353,8 +353,8 @@ static inline const char *dwc3_gadget_event_type_string(u8 event)
                return "Wake-Up";
        case DWC3_DEVICE_EVENT_HIBER_REQ:
                return "Hibernation";
-       case DWC3_DEVICE_EVENT_EOPF:
-               return "End of Periodic Frame";
+       case DWC3_DEVICE_EVENT_SUSPEND:
+               return "Suspend";
        case DWC3_DEVICE_EVENT_SOF:
                return "Start of Frame";
        case DWC3_DEVICE_EVENT_ERRATIC_ERROR:
@@ -413,9 +413,12 @@ static inline const char *dwc3_gadget_generic_cmd_status_string(int status)
 
 
 #ifdef CONFIG_DEBUG_FS
+extern void dwc3_debugfs_create_endpoint_dir(struct dwc3_ep *dep);
 extern void dwc3_debugfs_init(struct dwc3 *d);
 extern void dwc3_debugfs_exit(struct dwc3 *d);
 #else
+static inline void dwc3_debugfs_create_endpoint_dir(struct dwc3_ep *dep)
+{  }
 static inline void dwc3_debugfs_init(struct dwc3 *d)
 {  }
 static inline void dwc3_debugfs_exit(struct dwc3 *d)
index 7146ee2ac05765fd052d0cb5d6525abc52a51760..5dbbe53269d391c58eaed4f09dd6f41cd4b6394f 100644 (file)
@@ -886,30 +886,14 @@ static void dwc3_debugfs_create_endpoint_files(struct dwc3_ep *dep,
        }
 }
 
-static void dwc3_debugfs_create_endpoint_dir(struct dwc3_ep *dep,
-               struct dentry *parent)
+void dwc3_debugfs_create_endpoint_dir(struct dwc3_ep *dep)
 {
        struct dentry           *dir;
 
-       dir = debugfs_create_dir(dep->name, parent);
+       dir = debugfs_create_dir(dep->name, dep->dwc->root);
        dwc3_debugfs_create_endpoint_files(dep, dir);
 }
 
-static void dwc3_debugfs_create_endpoint_dirs(struct dwc3 *dwc,
-               struct dentry *parent)
-{
-       int                     i;
-
-       for (i = 0; i < dwc->num_eps; i++) {
-               struct dwc3_ep  *dep = dwc->eps[i];
-
-               if (!dep)
-                       continue;
-
-               dwc3_debugfs_create_endpoint_dir(dep, parent);
-       }
-}
-
 void dwc3_debugfs_init(struct dwc3 *dwc)
 {
        struct dentry           *root;
@@ -940,7 +924,6 @@ void dwc3_debugfs_init(struct dwc3 *dwc)
                                &dwc3_testmode_fops);
                debugfs_create_file("link_state", 0644, root, dwc,
                                    &dwc3_link_state_fops);
-               dwc3_debugfs_create_endpoint_dirs(dwc, root);
        }
 }
 
index b13cfab89d532da8fba2aa17d8f2747b1ea01f69..756faa46d33a70ddfc2cd67a3cc04a386dc35d72 100644 (file)
@@ -165,8 +165,9 @@ static int dwc3_imx8mp_probe(struct platform_device *pdev)
        if (err < 0)
                goto disable_rpm;
 
-       dwc3_np = of_get_child_by_name(node, "dwc3");
+       dwc3_np = of_get_compatible_child(node, "snps,dwc3");
        if (!dwc3_np) {
+               err = -ENODEV;
                dev_err(dev, "failed to find dwc3 core child\n");
                goto disable_rpm;
        }
index bdf1f98dfad8c861aeb8ad7b3ebf81bb1d34676a..ffe301d6ea3590eaff824dd96d6f72e97eed37a5 100644 (file)
@@ -651,7 +651,7 @@ static int dwc3_meson_g12a_setup_regmaps(struct dwc3_meson_g12a *priv,
                return PTR_ERR(priv->usb_glue_regmap);
 
        /* Create a regmap for each USB2 PHY control register set */
-       for (i = 0; i < priv->usb2_ports; i++) {
+       for (i = 0; i < priv->drvdata->num_phys; i++) {
                struct regmap_config u2p_regmap_config = {
                        .reg_bits = 8,
                        .val_bits = 32,
@@ -659,6 +659,9 @@ static int dwc3_meson_g12a_setup_regmaps(struct dwc3_meson_g12a *priv,
                        .max_register = U2P_R1,
                };
 
+               if (!strstr(priv->drvdata->phy_names[i], "usb2"))
+                       continue;
+
                u2p_regmap_config.name = devm_kasprintf(priv->dev, GFP_KERNEL,
                                                        "u2p-%d", i);
                if (!u2p_regmap_config.name)
@@ -772,13 +775,13 @@ static int dwc3_meson_g12a_probe(struct platform_device *pdev)
 
        ret = priv->drvdata->usb_init(priv);
        if (ret)
-               goto err_disable_clks;
+               goto err_disable_regulator;
 
        /* Init PHYs */
        for (i = 0 ; i < PHY_COUNT ; ++i) {
                ret = phy_init(priv->phys[i]);
                if (ret)
-                       goto err_disable_clks;
+                       goto err_disable_regulator;
        }
 
        /* Set PHY Power */
@@ -816,6 +819,10 @@ err_phys_exit:
        for (i = 0 ; i < PHY_COUNT ; ++i)
                phy_exit(priv->phys[i]);
 
+err_disable_regulator:
+       if (priv->vbus)
+               regulator_disable(priv->vbus);
+
 err_disable_clks:
        clk_bulk_disable_unprepare(priv->drvdata->num_clks,
                                   priv->drvdata->clks);
index 3db17806e92e7df6c132bb0c71027eb8346748dd..e196673f5c647cb03150c6007aa10db8ad0408bb 100644 (file)
@@ -437,8 +437,13 @@ static int dwc3_omap_extcon_register(struct dwc3_omap *omap)
 
                if (extcon_get_state(edev, EXTCON_USB) == true)
                        dwc3_omap_set_mailbox(omap, OMAP_DWC3_VBUS_VALID);
+               else
+                       dwc3_omap_set_mailbox(omap, OMAP_DWC3_VBUS_OFF);
+
                if (extcon_get_state(edev, EXTCON_USB_HOST) == true)
                        dwc3_omap_set_mailbox(omap, OMAP_DWC3_ID_GROUND);
+               else
+                       dwc3_omap_set_mailbox(omap, OMAP_DWC3_ID_FLOAT);
 
                omap->edev = edev;
        }
index e7b932dcbf820d7c6b7307cf615ccccc54f6d8cf..1e51460938b83c655dbbeae252356c1ac1bb472d 100644 (file)
@@ -123,6 +123,7 @@ static const struct property_entry dwc3_pci_mrfld_properties[] = {
        PROPERTY_ENTRY_STRING("linux,extcon-name", "mrfld_bcove_pwrsrc"),
        PROPERTY_ENTRY_BOOL("snps,dis_u3_susphy_quirk"),
        PROPERTY_ENTRY_BOOL("snps,dis_u2_susphy_quirk"),
+       PROPERTY_ENTRY_BOOL("snps,usb2-gadget-lpm-disable"),
        PROPERTY_ENTRY_BOOL("linux,sysdev_is_parent"),
        {}
 };
index 8b668ef46f7f1fbead3f3ef091229669e9006026..3cd294264372555ac10318e558dbdbf68c15438f 100644 (file)
@@ -292,6 +292,9 @@ static struct dwc3_ep *dwc3_wIndex_to_dep(struct dwc3 *dwc, __le16 wIndex_le)
                epnum |= 1;
 
        dep = dwc->eps[epnum];
+       if (dep == NULL)
+               return NULL;
+
        if (dep->flags & DWC3_EP_ENABLED)
                return dep;
 
index dd80e5ca8c78b6e402864a00b8e314a7a09e175f..f14c2aa83759825fcc6b81e887b030b53a32b479 100644 (file)
@@ -1244,6 +1244,7 @@ static int dwc3_prepare_trbs_sg(struct dwc3_ep *dep,
                        req->start_sg = sg_next(s);
 
                req->num_queued_sgs++;
+               req->num_pending_sgs--;
 
                /*
                 * The number of pending SG entries may not correspond to the
@@ -1251,7 +1252,7 @@ static int dwc3_prepare_trbs_sg(struct dwc3_ep *dep,
                 * don't include unused SG entries.
                 */
                if (length == 0) {
-                       req->num_pending_sgs -= req->request.num_mapped_sgs - req->num_queued_sgs;
+                       req->num_pending_sgs = 0;
                        break;
                }
 
@@ -1684,7 +1685,9 @@ static int __dwc3_gadget_ep_queue(struct dwc3_ep *dep, struct dwc3_request *req)
                }
        }
 
-       return __dwc3_gadget_kick_transfer(dep);
+       __dwc3_gadget_kick_transfer(dep);
+
+       return 0;
 }
 
 static int dwc3_gadget_ep_queue(struct usb_ep *ep, struct usb_request *request,
@@ -2258,13 +2261,10 @@ static int dwc3_gadget_pullup(struct usb_gadget *g, int is_on)
        }
 
        /*
-        * Synchronize any pending event handling before executing the controller
-        * halt routine.
+        * Synchronize and disable any further event handling while controller
+        * is being enabled/disabled.
         */
-       if (!is_on) {
-               dwc3_gadget_disable_irq(dwc);
-               synchronize_irq(dwc->irq_gadget);
-       }
+       disable_irq(dwc->irq_gadget);
 
        spin_lock_irqsave(&dwc->lock, flags);
 
@@ -2302,6 +2302,8 @@ static int dwc3_gadget_pullup(struct usb_gadget *g, int is_on)
 
        ret = dwc3_gadget_run_stop(dwc, is_on, false);
        spin_unlock_irqrestore(&dwc->lock, flags);
+       enable_irq(dwc->irq_gadget);
+
        pm_runtime_put(dwc->dev);
 
        return ret;
@@ -2323,6 +2325,10 @@ static void dwc3_gadget_enable_irq(struct dwc3 *dwc)
        if (DWC3_VER_IS_PRIOR(DWC3, 250A))
                reg |= DWC3_DEVTEN_ULSTCNGEN;
 
+       /* On 2.30a and above this bit enables U3/L2-L1 Suspend Events */
+       if (!DWC3_VER_IS_PRIOR(DWC3, 230A))
+               reg |= DWC3_DEVTEN_U3L2L1SUSPEN;
+
        dwc3_writel(dwc->regs, DWC3_DEVTEN, reg);
 }
 
@@ -2747,6 +2753,8 @@ static int dwc3_gadget_init_endpoint(struct dwc3 *dwc, u8 epnum)
        INIT_LIST_HEAD(&dep->started_list);
        INIT_LIST_HEAD(&dep->cancelled_list);
 
+       dwc3_debugfs_create_endpoint_dir(dep);
+
        return 0;
 }
 
@@ -2790,6 +2798,7 @@ static void dwc3_gadget_free_endpoints(struct dwc3 *dwc)
                        list_del(&dep->endpoint.ep_list);
                }
 
+               debugfs_remove_recursive(debugfs_lookup(dep->name, dwc->root));
                kfree(dep);
        }
 }
@@ -2867,15 +2876,15 @@ static int dwc3_gadget_ep_reclaim_trb_sg(struct dwc3_ep *dep,
        struct dwc3_trb *trb = &dep->trb_pool[dep->trb_dequeue];
        struct scatterlist *sg = req->sg;
        struct scatterlist *s;
-       unsigned int pending = req->num_pending_sgs;
+       unsigned int num_queued = req->num_queued_sgs;
        unsigned int i;
        int ret = 0;
 
-       for_each_sg(sg, s, pending, i) {
+       for_each_sg(sg, s, num_queued, i) {
                trb = &dep->trb_pool[dep->trb_dequeue];
 
                req->sg = sg_next(s);
-               req->num_pending_sgs--;
+               req->num_queued_sgs--;
 
                ret = dwc3_gadget_ep_reclaim_completed_trb(dep, req,
                                trb, event, status, true);
@@ -2898,7 +2907,7 @@ static int dwc3_gadget_ep_reclaim_trb_linear(struct dwc3_ep *dep,
 
 static bool dwc3_gadget_ep_request_completed(struct dwc3_request *req)
 {
-       return req->num_pending_sgs == 0;
+       return req->num_pending_sgs == 0 && req->num_queued_sgs == 0;
 }
 
 static int dwc3_gadget_ep_cleanup_completed_request(struct dwc3_ep *dep,
@@ -2907,7 +2916,7 @@ static int dwc3_gadget_ep_cleanup_completed_request(struct dwc3_ep *dep,
 {
        int ret;
 
-       if (req->num_pending_sgs)
+       if (req->request.num_mapped_sgs)
                ret = dwc3_gadget_ep_reclaim_trb_sg(dep, req, event,
                                status);
        else
@@ -3740,7 +3749,7 @@ static void dwc3_gadget_interrupt(struct dwc3 *dwc,
        case DWC3_DEVICE_EVENT_LINK_STATUS_CHANGE:
                dwc3_gadget_linksts_change_interrupt(dwc, event->event_info);
                break;
-       case DWC3_DEVICE_EVENT_EOPF:
+       case DWC3_DEVICE_EVENT_SUSPEND:
                /* It changed to be suspend event for version 2.30a and above */
                if (!DWC3_VER_IS_PRIOR(DWC3, 230A)) {
                        /*
@@ -4039,6 +4048,7 @@ err5:
        dwc3_gadget_free_endpoints(dwc);
 err4:
        usb_put_gadget(dwc->gadget);
+       dwc->gadget = NULL;
 err3:
        dma_free_coherent(dwc->sysdev, DWC3_BOUNCE_SIZE, dwc->bounce,
                        dwc->bounce_addr);
@@ -4058,8 +4068,12 @@ err0:
 
 void dwc3_gadget_exit(struct dwc3 *dwc)
 {
-       usb_del_gadget_udc(dwc->gadget);
+       if (!dwc->gadget)
+               return;
+
+       usb_del_gadget(dwc->gadget);
        dwc3_gadget_free_endpoints(dwc);
+       usb_put_gadget(dwc->gadget);
        dma_free_coherent(dwc->sysdev, DWC3_BOUNCE_SIZE, dwc->bounce,
                          dwc->bounce_addr);
        kfree(dwc->setup_buf);
index 8bb25773b61e91e83ee5a48fefe25a1566418640..05507606b2b42ae5e65a992e3421821861d4e641 100644 (file)
@@ -164,6 +164,14 @@ int usb_assign_descriptors(struct usb_function *f,
 {
        struct usb_gadget *g = f->config->cdev->gadget;
 
+       /* super-speed-plus descriptor falls back to super-speed one,
+        * if such a descriptor was provided, thus avoiding a NULL
+        * pointer dereference if a 5gbps capable gadget is used with
+        * a 10gbps capable config (device port + cable + host port)
+        */
+       if (!ssp)
+               ssp = ss;
+
        if (fs) {
                f->fs_descriptors = usb_copy_descriptors(fs);
                if (!f->fs_descriptors)
index 7f5cf488b2b1eda3ae69de6e5b85dd32e7992c8a..ffe2486fce71c95956a21b2c921cf84a247ada02 100644 (file)
@@ -791,7 +791,7 @@ ecm_bind(struct usb_configuration *c, struct usb_function *f)
                fs_ecm_notify_desc.bEndpointAddress;
 
        status = usb_assign_descriptors(f, ecm_fs_function, ecm_hs_function,
-                       ecm_ss_function, NULL);
+                       ecm_ss_function, ecm_ss_function);
        if (status)
                goto fail;
 
index cfcc4e81fb77656664077ba584dddf83f1f2f845..2cd9942707b46a132663cd0f2570ed077a7081d1 100644 (file)
@@ -302,7 +302,7 @@ static int eem_bind(struct usb_configuration *c, struct usb_function *f)
        eem_ss_out_desc.bEndpointAddress = eem_fs_out_desc.bEndpointAddress;
 
        status = usb_assign_descriptors(f, eem_fs_function, eem_hs_function,
-                       eem_ss_function, NULL);
+                       eem_ss_function, eem_ss_function);
        if (status)
                goto fail;
 
@@ -495,7 +495,7 @@ static int eem_unwrap(struct gether *port,
                        skb2 = skb_clone(skb, GFP_ATOMIC);
                        if (unlikely(!skb2)) {
                                DBG(cdev, "unable to unframe EEM packet\n");
-                               continue;
+                               goto next;
                        }
                        skb_trim(skb2, len - ETH_FCS_LEN);
 
@@ -505,7 +505,7 @@ static int eem_unwrap(struct gether *port,
                                                GFP_ATOMIC);
                        if (unlikely(!skb3)) {
                                dev_kfree_skb_any(skb2);
-                               continue;
+                               goto next;
                        }
                        dev_kfree_skb_any(skb2);
                        skb_queue_tail(list, skb3);
index bf109191659a5362c5edae0480d75736b10f28fe..d4844afeaffc272fae24b726ca44344eda15fb5e 100644 (file)
@@ -3567,6 +3567,9 @@ static void ffs_func_unbind(struct usb_configuration *c,
                ffs->func = NULL;
        }
 
+       /* Drain any pending AIO completions */
+       drain_workqueue(ffs->io_completion_wq);
+
        if (!--opts->refcnt)
                functionfs_unbind(ffs);
 
index 1125f4715830de1e5e201001e042f758a7ac395e..e556993081170ffb4294024473b6e2a76770eaa9 100644 (file)
@@ -802,7 +802,8 @@ static int hidg_bind(struct usb_configuration *c, struct usb_function *f)
                hidg_fs_out_ep_desc.bEndpointAddress;
 
        status = usb_assign_descriptors(f, hidg_fs_descriptors,
-                       hidg_hs_descriptors, hidg_ss_descriptors, NULL);
+                       hidg_hs_descriptors, hidg_ss_descriptors,
+                       hidg_ss_descriptors);
        if (status)
                goto fail;
 
index b56ad7c3838b86d191842a75408c8da060a00f96..ae41f556eb752b8d4c1628a54cf4f96b1815bb8e 100644 (file)
@@ -207,7 +207,7 @@ autoconf_fail:
        ss_loop_sink_desc.bEndpointAddress = fs_loop_sink_desc.bEndpointAddress;
 
        ret = usb_assign_descriptors(f, fs_loopback_descs, hs_loopback_descs,
-                       ss_loopback_descs, NULL);
+                       ss_loopback_descs, ss_loopback_descs);
        if (ret)
                return ret;
 
index 019bea8e09cceacd069f8b12dc25efb625cc5e5c..855127249f2424c321ea72fc59b4c45dd1debd5e 100644 (file)
@@ -583,7 +583,7 @@ static void ncm_do_notify(struct f_ncm *ncm)
                data[0] = cpu_to_le32(ncm_bitrate(cdev->gadget));
                data[1] = data[0];
 
-               DBG(cdev, "notify speed %d\n", ncm_bitrate(cdev->gadget));
+               DBG(cdev, "notify speed %u\n", ncm_bitrate(cdev->gadget));
                ncm->notify_state = NCM_NOTIFY_CONNECT;
                break;
        }
@@ -1101,11 +1101,11 @@ static struct sk_buff *ncm_wrap_ntb(struct gether *port,
                        ncm->ndp_dgram_count = 1;
 
                        /* Note: we skip opts->next_ndp_index */
-               }
 
-               /* Delay the timer. */
-               hrtimer_start(&ncm->task_timer, TX_TIMEOUT_NSECS,
-                             HRTIMER_MODE_REL_SOFT);
+                       /* Start the timer. */
+                       hrtimer_start(&ncm->task_timer, TX_TIMEOUT_NSECS,
+                                     HRTIMER_MODE_REL_SOFT);
+               }
 
                /* Add the datagram position entries */
                ntb_ndp = skb_put_zero(ncm->skb_tx_ndp, dgram_idx_len);
index f47fdc1fa7f138b9cd297697bb310a992d3c8e50..59d382fe1bbfc6e2b32e23b4c90e55c7cae9a7d8 100644 (file)
@@ -1101,7 +1101,8 @@ autoconf_fail:
        ss_ep_out_desc.bEndpointAddress = fs_ep_out_desc.bEndpointAddress;
 
        ret = usb_assign_descriptors(f, fs_printer_function,
-                       hs_printer_function, ss_printer_function, NULL);
+                       hs_printer_function, ss_printer_function,
+                       ss_printer_function);
        if (ret)
                return ret;
 
index 0739b05a0ef7b35eb5f18ff5a81790207467de07..ee95e8f5f9d489a58735d2394eca222509da8a17 100644 (file)
@@ -789,7 +789,7 @@ rndis_bind(struct usb_configuration *c, struct usb_function *f)
        ss_notify_desc.bEndpointAddress = fs_notify_desc.bEndpointAddress;
 
        status = usb_assign_descriptors(f, eth_fs_function, eth_hs_function,
-                       eth_ss_function, NULL);
+                       eth_ss_function, eth_ss_function);
        if (status)
                goto fail;
 
index e62713846350457234fd9e3587491aec76406bc2..1ed8ff0ac2d310002e1a8c33eabcdab1dca756e8 100644 (file)
@@ -233,7 +233,7 @@ static int gser_bind(struct usb_configuration *c, struct usb_function *f)
        gser_ss_out_desc.bEndpointAddress = gser_fs_out_desc.bEndpointAddress;
 
        status = usb_assign_descriptors(f, gser_fs_function, gser_hs_function,
-                       gser_ss_function, NULL);
+                       gser_ss_function, gser_ss_function);
        if (status)
                goto fail;
        dev_dbg(&cdev->gadget->dev, "generic ttyGS%d: %s speed IN/%s OUT/%s\n",
index 5a201ba7b155bb2bd7dbc083ce662319d4641975..1abf08e5164af9e9cba03adbe52be6aa6b5713fd 100644 (file)
@@ -431,7 +431,8 @@ no_iso:
        ss_iso_sink_desc.bEndpointAddress = fs_iso_sink_desc.bEndpointAddress;
 
        ret = usb_assign_descriptors(f, fs_source_sink_descs,
-                       hs_source_sink_descs, ss_source_sink_descs, NULL);
+                       hs_source_sink_descs, ss_source_sink_descs,
+                       ss_source_sink_descs);
        if (ret)
                return ret;
 
index 4d945254905d91957da12c01366d60883919699d..51c1cae162d9b0a4dc2e80c80a36db6dcc500e69 100644 (file)
@@ -358,7 +358,7 @@ geth_bind(struct usb_configuration *c, struct usb_function *f)
                fs_subset_out_desc.bEndpointAddress;
 
        status = usb_assign_descriptors(f, fs_eth_function, hs_eth_function,
-                       ss_eth_function, NULL);
+                       ss_eth_function, ss_eth_function);
        if (status)
                goto fail;
 
index 7acb507946e6795f4b5ae565c6d4748e693c8804..de161ee0b1f9b5f4a998a950ee7395264c75fa34 100644 (file)
@@ -2057,7 +2057,8 @@ static int tcm_bind(struct usb_configuration *c, struct usb_function *f)
        uasp_fs_cmd_desc.bEndpointAddress = uasp_ss_cmd_desc.bEndpointAddress;
 
        ret = usb_assign_descriptors(f, uasp_fs_function_desc,
-                       uasp_hs_function_desc, uasp_ss_function_desc, NULL);
+                       uasp_hs_function_desc, uasp_ss_function_desc,
+                       uasp_ss_function_desc);
        if (ret)
                goto ep_fail;
 
index 0c418ce50ba0f41a804d6b9000d0bcf9f3caaaea..f1b35a39d1ba8712daf42c7c754cd465e93179eb 100644 (file)
@@ -1488,7 +1488,7 @@ static void usb3_start_pipen(struct renesas_usb3_ep *usb3_ep,
                             struct renesas_usb3_request *usb3_req)
 {
        struct renesas_usb3 *usb3 = usb3_ep_to_usb3(usb3_ep);
-       struct renesas_usb3_request *usb3_req_first = usb3_get_request(usb3_ep);
+       struct renesas_usb3_request *usb3_req_first;
        unsigned long flags;
        int ret = -EAGAIN;
        u32 enable_bits = 0;
@@ -1496,7 +1496,8 @@ static void usb3_start_pipen(struct renesas_usb3_ep *usb3_ep,
        spin_lock_irqsave(&usb3->lock, flags);
        if (usb3_ep->halt || usb3_ep->started)
                goto out;
-       if (usb3_req != usb3_req_first)
+       usb3_req_first = __usb3_get_request(usb3_ep);
+       if (!usb3_req_first || usb3_req != usb3_req_first)
                goto out;
 
        if (usb3_pn_change(usb3, usb3_ep->num) < 0)
index 6cac642520fc8464ed3abce097cd4fd53c7fd271..9c2eda0918e13fd79ed85637fdc51eb9a5e9b843 100644 (file)
@@ -5568,7 +5568,7 @@ static int fotg210_hcd_probe(struct platform_device *pdev)
        struct usb_hcd *hcd;
        struct resource *res;
        int irq;
-       int retval = -ENODEV;
+       int retval;
        struct fotg210_hcd *fotg210;
 
        if (usb_disabled())
@@ -5588,7 +5588,7 @@ static int fotg210_hcd_probe(struct platform_device *pdev)
        hcd = usb_create_hcd(&fotg210_fotg210_hc_driver, dev,
                        dev_name(dev));
        if (!hcd) {
-               dev_err(dev, "failed to create hcd with err %d\n", retval);
+               dev_err(dev, "failed to create hcd\n");
                retval = -ENOMEM;
                goto fail_create_hcd;
        }
index fa59b242cd5154d075b156ee654c8311bbf04f68..e8af0a125f84b3bcce9ebd430df4559eaff1446c 100644 (file)
@@ -7,8 +7,9 @@
  * Author: Sarah Sharp
  * Some code borrowed from the Linux EHCI driver.
  */
-/* Up to 16 ms to halt an HC */
-#define XHCI_MAX_HALT_USEC     (16*1000)
+
+/* HC should halt within 16 ms, but use 32 ms as some hosts take longer */
+#define XHCI_MAX_HALT_USEC     (32 * 1000)
 /* HC not running - set to 1 when run/stop bit is cleared. */
 #define XHCI_STS_HALT          (1<<0)
 
index 5bbccc9a0179fff56b1799d35935db1f332adf08..18c2bbddf080b924f25706092fe885706b1e6d6b 100644 (file)
@@ -57,7 +57,9 @@
 #define PCI_DEVICE_ID_INTEL_CML_XHCI                   0xa3af
 #define PCI_DEVICE_ID_INTEL_TIGER_LAKE_XHCI            0x9a13
 #define PCI_DEVICE_ID_INTEL_MAPLE_RIDGE_XHCI           0x1138
+#define PCI_DEVICE_ID_INTEL_ALDER_LAKE_XHCI            0x461e
 
+#define PCI_DEVICE_ID_AMD_RENOIR_XHCI                  0x1639
 #define PCI_DEVICE_ID_AMD_PROMONTORYA_4                        0x43b9
 #define PCI_DEVICE_ID_AMD_PROMONTORYA_3                        0x43ba
 #define PCI_DEVICE_ID_AMD_PROMONTORYA_2                        0x43bb
@@ -166,8 +168,10 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
            (pdev->device == 0x15e0 || pdev->device == 0x15e1))
                xhci->quirks |= XHCI_SNPS_BROKEN_SUSPEND;
 
-       if (pdev->vendor == PCI_VENDOR_ID_AMD && pdev->device == 0x15e5)
+       if (pdev->vendor == PCI_VENDOR_ID_AMD && pdev->device == 0x15e5) {
                xhci->quirks |= XHCI_DISABLE_SPARSE;
+               xhci->quirks |= XHCI_RESET_ON_RESUME;
+       }
 
        if (pdev->vendor == PCI_VENDOR_ID_AMD)
                xhci->quirks |= XHCI_TRUST_TX_LENGTH;
@@ -179,6 +183,10 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
                (pdev->device == PCI_DEVICE_ID_AMD_PROMONTORYA_1)))
                xhci->quirks |= XHCI_U2_DISABLE_WAKE;
 
+       if (pdev->vendor == PCI_VENDOR_ID_AMD &&
+               pdev->device == PCI_DEVICE_ID_AMD_RENOIR_XHCI)
+               xhci->quirks |= XHCI_BROKEN_D3COLD;
+
        if (pdev->vendor == PCI_VENDOR_ID_INTEL) {
                xhci->quirks |= XHCI_LPM_SUPPORT;
                xhci->quirks |= XHCI_INTEL_HOST;
@@ -243,7 +251,8 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
             pdev->device == PCI_DEVICE_ID_INTEL_TITAN_RIDGE_DD_XHCI ||
             pdev->device == PCI_DEVICE_ID_INTEL_ICE_LAKE_XHCI ||
             pdev->device == PCI_DEVICE_ID_INTEL_TIGER_LAKE_XHCI ||
-            pdev->device == PCI_DEVICE_ID_INTEL_MAPLE_RIDGE_XHCI))
+            pdev->device == PCI_DEVICE_ID_INTEL_MAPLE_RIDGE_XHCI ||
+            pdev->device == PCI_DEVICE_ID_INTEL_ALDER_LAKE_XHCI))
                xhci->quirks |= XHCI_DEFAULT_PM_RUNTIME_ALLOW;
 
        if (pdev->vendor == PCI_VENDOR_ID_ETRON &&
@@ -535,7 +544,7 @@ static int xhci_pci_suspend(struct usb_hcd *hcd, bool do_wakeup)
         * Systems with the TI redriver that loses port status change events
         * need to have the registers polled during D3, so avoid D3cold.
         */
-       if (xhci->quirks & XHCI_COMP_MODE_QUIRK)
+       if (xhci->quirks & (XHCI_COMP_MODE_QUIRK | XHCI_BROKEN_D3COLD))
                pci_d3cold_disable(pdev);
 
        if (xhci->quirks & XHCI_PME_STUCK_QUIRK)
index 05c38dd3ee361a6e1fc5e002e8e1a3d2a68c8cae..6acd2329e08d49d6301fd62d7f82f7f9217fff11 100644 (file)
@@ -828,14 +828,10 @@ static void xhci_giveback_invalidated_tds(struct xhci_virt_ep *ep)
        list_for_each_entry_safe(td, tmp_td, &ep->cancelled_td_list,
                                 cancelled_td_list) {
 
-               /*
-                * Doesn't matter what we pass for status, since the core will
-                * just overwrite it (because the URB has been unlinked).
-                */
                ring = xhci_urb_to_transfer_ring(ep->xhci, td->urb);
 
                if (td->cancel_status == TD_CLEARED)
-                       xhci_td_cleanup(ep->xhci, td, ring, 0);
+                       xhci_td_cleanup(ep->xhci, td, ring, td->status);
 
                if (ep->xhci->xhc_state & XHCI_STATE_DYING)
                        return;
@@ -862,7 +858,7 @@ done:
        return ret;
 }
 
-static void xhci_handle_halted_endpoint(struct xhci_hcd *xhci,
+static int xhci_handle_halted_endpoint(struct xhci_hcd *xhci,
                                struct xhci_virt_ep *ep, unsigned int stream_id,
                                struct xhci_td *td,
                                enum xhci_ep_reset_type reset_type)
@@ -875,7 +871,7 @@ static void xhci_handle_halted_endpoint(struct xhci_hcd *xhci,
         * Device will be reset soon to recover the link so don't do anything
         */
        if (ep->vdev->flags & VDEV_PORT_ERROR)
-               return;
+               return -ENODEV;
 
        /* add td to cancelled list and let reset ep handler take care of it */
        if (reset_type == EP_HARD_RESET) {
@@ -888,16 +884,18 @@ static void xhci_handle_halted_endpoint(struct xhci_hcd *xhci,
 
        if (ep->ep_state & EP_HALTED) {
                xhci_dbg(xhci, "Reset ep command already pending\n");
-               return;
+               return 0;
        }
 
        err = xhci_reset_halted_ep(xhci, slot_id, ep->ep_index, reset_type);
        if (err)
-               return;
+               return err;
 
        ep->ep_state |= EP_HALTED;
 
        xhci_ring_cmd_db(xhci);
+
+       return 0;
 }
 
 /*
@@ -935,14 +933,18 @@ static int xhci_invalidate_cancelled_tds(struct xhci_virt_ep *ep)
                        continue;
                }
                /*
-                * If ring stopped on the TD we need to cancel, then we have to
+                * If a ring stopped on the TD we need to cancel then we have to
                 * move the xHC endpoint ring dequeue pointer past this TD.
+                * Rings halted due to STALL may show hw_deq is past the stalled
+                * TD, but still require a set TR Deq command to flush xHC cache.
                 */
                hw_deq = xhci_get_hw_deq(xhci, ep->vdev, ep->ep_index,
                                         td->urb->stream_id);
                hw_deq &= ~0xf;
 
-               if (trb_in_td(xhci, td->start_seg, td->first_trb,
+               if (td->cancel_status == TD_HALTED) {
+                       cached_td = td;
+               } else if (trb_in_td(xhci, td->start_seg, td->first_trb,
                              td->last_trb, hw_deq, false)) {
                        switch (td->cancel_status) {
                        case TD_CLEARED: /* TD is already no-op */
@@ -1014,6 +1016,7 @@ static void xhci_handle_cmd_stop_ep(struct xhci_hcd *xhci, int slot_id,
        struct xhci_td *td = NULL;
        enum xhci_ep_reset_type reset_type;
        struct xhci_command *command;
+       int err;
 
        if (unlikely(TRB_TO_SUSPEND_PORT(le32_to_cpu(trb->generic.field[3])))) {
                if (!xhci->devs[slot_id])
@@ -1058,7 +1061,10 @@ static void xhci_handle_cmd_stop_ep(struct xhci_hcd *xhci, int slot_id,
                                        td->status = -EPROTO;
                        }
                        /* reset ep, reset handler cleans up cancelled tds */
-                       xhci_handle_halted_endpoint(xhci, ep, 0, td, reset_type);
+                       err = xhci_handle_halted_endpoint(xhci, ep, 0, td,
+                                                         reset_type);
+                       if (err)
+                               break;
                        xhci_stop_watchdog_timer_in_irq(xhci, ep);
                        return;
                case EP_STATE_RUNNING:
index ca9385d22f68d424f7b31f1176e9683b740fe3c9..27283654ca08047a9fb0f50441101a4467485218 100644 (file)
@@ -1514,7 +1514,7 @@ static int xhci_configure_endpoint(struct xhci_hcd *xhci,
  * we need to issue an evaluate context command and wait on it.
  */
 static int xhci_check_maxpacket(struct xhci_hcd *xhci, unsigned int slot_id,
-               unsigned int ep_index, struct urb *urb)
+               unsigned int ep_index, struct urb *urb, gfp_t mem_flags)
 {
        struct xhci_container_ctx *out_ctx;
        struct xhci_input_control_ctx *ctrl_ctx;
@@ -1545,7 +1545,7 @@ static int xhci_check_maxpacket(struct xhci_hcd *xhci, unsigned int slot_id,
                 * changes max packet sizes.
                 */
 
-               command = xhci_alloc_command(xhci, true, GFP_KERNEL);
+               command = xhci_alloc_command(xhci, true, mem_flags);
                if (!command)
                        return -ENOMEM;
 
@@ -1639,7 +1639,7 @@ static int xhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flag
                 */
                if (urb->dev->speed == USB_SPEED_FULL) {
                        ret = xhci_check_maxpacket(xhci, slot_id,
-                                       ep_index, urb);
+                                       ep_index, urb, mem_flags);
                        if (ret < 0) {
                                xhci_urb_free_priv(urb_priv);
                                urb->hcpriv = NULL;
index 2595a8f057c43e8d5683ba8d130ff2ed6bcf73fb..e417f5ce13d1815425180f04aad0e67de2b2d607 100644 (file)
@@ -1892,6 +1892,7 @@ struct xhci_hcd {
 #define XHCI_DISABLE_SPARSE    BIT_ULL(38)
 #define XHCI_SG_TRB_CACHE_SIZE_QUIRK   BIT_ULL(39)
 #define XHCI_NO_SOFT_RETRY     BIT_ULL(40)
+#define XHCI_BROKEN_D3COLD     BIT_ULL(41)
 
        unsigned int            num_active_eps;
        unsigned int            limit_active_eps;
index b3cfe8666ea7dbc4cc2a030b78b70208355cac71..336653091e3b36bb27b4b7633ccc47ccbc78f6a6 100644 (file)
@@ -263,6 +263,8 @@ static int __init brcmstb_usb_pinmap_probe(struct platform_device *pdev)
                return -EINVAL;
 
        r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+       if (!r)
+               return -EINVAL;
 
        pdata = devm_kzalloc(&pdev->dev,
                             sizeof(*pdata) +
index a3dfc77578ea1200129d69e693740900b8721109..26baba3ab7d733d35aa1d46443da0a187a519685 100644 (file)
@@ -61,9 +61,9 @@ static ssize_t speed_store(struct device *dev, struct device_attribute *attr,
        /* Set speed */
        retval = usb_control_msg(tv->udev, usb_sndctrlpipe(tv->udev, 0),
                                 0x01, /* vendor request: set speed */
-                                USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_OTHER,
+                                USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_OTHER,
                                 tv->speed, /* speed value */
-                                0, NULL, 0, USB_CTRL_GET_TIMEOUT);
+                                0, NULL, 0, USB_CTRL_SET_TIMEOUT);
        if (retval) {
                tv->speed = old;
                dev_dbg(&tv->udev->dev, "retval = %d\n", retval);
index b5d6616442635b8bf4c814bfd629a9ec18e927a3..748139d262633cb1a8bec5c3a5843a2346c6614b 100644 (file)
@@ -736,6 +736,7 @@ static int uss720_probe(struct usb_interface *intf,
        parport_announce_port(pp);
 
        usb_set_intfdata(intf, pp);
+       usb_put_dev(usbdev);
        return 0;
 
 probe_abort:
index eebeadd269461421b7e6915adc1a106c249112d2..6b92d037d8fc839f98621779c9b90c9354fa1a3e 100644 (file)
@@ -518,8 +518,8 @@ static int mtk_musb_probe(struct platform_device *pdev)
 
        glue->xceiv = devm_usb_get_phy(dev, USB_PHY_TYPE_USB2);
        if (IS_ERR(glue->xceiv)) {
-               dev_err(dev, "fail to getting usb-phy %d\n", ret);
                ret = PTR_ERR(glue->xceiv);
+               dev_err(dev, "fail to getting usb-phy %d\n", ret);
                goto err_unregister_usb_phy;
        }
 
index 8f09a387b77385a7c3606f8bbcc8940e72a2cbdd..4c8f0112481f379cce95ffdc4d88bb7d03e2deec 100644 (file)
@@ -2009,9 +2009,8 @@ static void musb_pm_runtime_check_session(struct musb *musb)
                        schedule_delayed_work(&musb->irq_work,
                                              msecs_to_jiffies(1000));
                        musb->quirk_retries--;
-                       break;
                }
-               fallthrough;
+               break;
        case MUSB_QUIRK_B_INVALID_VBUS_91:
                if (musb->quirk_retries && !musb->flush_irq_work) {
                        musb_dbg(musb,
index ee595d1bea0a9f61bb5914be8834294d9a82b4ea..fcb812bc832cc42a4e2571903cdf5bba809c6b13 100644 (file)
@@ -252,9 +252,11 @@ struct cp210x_serial_private {
        u8                      gpio_input;
 #endif
        u8                      partnum;
+       u32                     fw_version;
        speed_t                 min_speed;
        speed_t                 max_speed;
        bool                    use_actual_rate;
+       bool                    no_flow_control;
 };
 
 enum cp210x_event_state {
@@ -398,6 +400,7 @@ struct cp210x_special_chars {
 
 /* CP210X_VENDOR_SPECIFIC values */
 #define CP210X_READ_2NCONFIG   0x000E
+#define CP210X_GET_FW_VER_2N   0x0010
 #define CP210X_READ_LATCH      0x00C2
 #define CP210X_GET_PARTNUM     0x370B
 #define CP210X_GET_PORTCONFIG  0x370C
@@ -537,6 +540,12 @@ struct cp210x_single_port_config {
 #define CP210X_2NCONFIG_GPIO_RSTLATCH_IDX      587
 #define CP210X_2NCONFIG_GPIO_CONTROL_IDX       600
 
+/* CP2102N QFN20 port configuration values */
+#define CP2102N_QFN20_GPIO2_TXLED_MODE         BIT(2)
+#define CP2102N_QFN20_GPIO3_RXLED_MODE         BIT(3)
+#define CP2102N_QFN20_GPIO1_RS485_MODE         BIT(4)
+#define CP2102N_QFN20_GPIO0_CLK_MODE           BIT(6)
+
 /* CP210X_VENDOR_SPECIFIC, CP210X_WRITE_LATCH call writes these 0x2 bytes. */
 struct cp210x_gpio_write {
        u8      mask;
@@ -1122,6 +1131,7 @@ static bool cp210x_termios_change(const struct ktermios *a, const struct ktermio
 static void cp210x_set_flow_control(struct tty_struct *tty,
                struct usb_serial_port *port, struct ktermios *old_termios)
 {
+       struct cp210x_serial_private *priv = usb_get_serial_data(port->serial);
        struct cp210x_port_private *port_priv = usb_get_serial_port_data(port);
        struct cp210x_special_chars chars;
        struct cp210x_flow_ctl flow_ctl;
@@ -1129,6 +1139,15 @@ static void cp210x_set_flow_control(struct tty_struct *tty,
        u32 ctl_hs;
        int ret;
 
+       /*
+        * Some CP2102N interpret ulXonLimit as ulFlowReplace (erratum
+        * CP2102N_E104). Report back that flow control is not supported.
+        */
+       if (priv->no_flow_control) {
+               tty->termios.c_cflag &= ~CRTSCTS;
+               tty->termios.c_iflag &= ~(IXON | IXOFF);
+       }
+
        if (old_termios &&
                        C_CRTSCTS(tty) == (old_termios->c_cflag & CRTSCTS) &&
                        I_IXON(tty) == (old_termios->c_iflag & IXON) &&
@@ -1185,19 +1204,20 @@ static void cp210x_set_flow_control(struct tty_struct *tty,
                port_priv->crtscts = false;
        }
 
-       if (I_IXOFF(tty))
+       if (I_IXOFF(tty)) {
                flow_repl |= CP210X_SERIAL_AUTO_RECEIVE;
-       else
+
+               flow_ctl.ulXonLimit = cpu_to_le32(128);
+               flow_ctl.ulXoffLimit = cpu_to_le32(128);
+       } else {
                flow_repl &= ~CP210X_SERIAL_AUTO_RECEIVE;
+       }
 
        if (I_IXON(tty))
                flow_repl |= CP210X_SERIAL_AUTO_TRANSMIT;
        else
                flow_repl &= ~CP210X_SERIAL_AUTO_TRANSMIT;
 
-       flow_ctl.ulXonLimit = cpu_to_le32(128);
-       flow_ctl.ulXoffLimit = cpu_to_le32(128);
-
        dev_dbg(&port->dev, "%s - ctrl = 0x%02x, flow = 0x%02x\n", __func__,
                        ctl_hs, flow_repl);
 
@@ -1733,7 +1753,19 @@ static int cp2102n_gpioconf_init(struct usb_serial *serial)
        priv->gpio_pushpull = (gpio_pushpull >> 3) & 0x0f;
 
        /* 0 indicates GPIO mode, 1 is alternate function */
-       priv->gpio_altfunc = (gpio_ctrl >> 2) & 0x0f;
+       if (priv->partnum == CP210X_PARTNUM_CP2102N_QFN20) {
+               /* QFN20 is special... */
+               if (gpio_ctrl & CP2102N_QFN20_GPIO0_CLK_MODE)   /* GPIO 0 */
+                       priv->gpio_altfunc |= BIT(0);
+               if (gpio_ctrl & CP2102N_QFN20_GPIO1_RS485_MODE) /* GPIO 1 */
+                       priv->gpio_altfunc |= BIT(1);
+               if (gpio_ctrl & CP2102N_QFN20_GPIO2_TXLED_MODE) /* GPIO 2 */
+                       priv->gpio_altfunc |= BIT(2);
+               if (gpio_ctrl & CP2102N_QFN20_GPIO3_RXLED_MODE) /* GPIO 3 */
+                       priv->gpio_altfunc |= BIT(3);
+       } else {
+               priv->gpio_altfunc = (gpio_ctrl >> 2) & 0x0f;
+       }
 
        if (priv->partnum == CP210X_PARTNUM_CP2102N_QFN28) {
                /*
@@ -1908,6 +1940,45 @@ static void cp210x_init_max_speed(struct usb_serial *serial)
        priv->use_actual_rate = use_actual_rate;
 }
 
+static int cp210x_get_fw_version(struct usb_serial *serial, u16 value)
+{
+       struct cp210x_serial_private *priv = usb_get_serial_data(serial);
+       u8 ver[3];
+       int ret;
+
+       ret = cp210x_read_vendor_block(serial, REQTYPE_DEVICE_TO_HOST, value,
+                       ver, sizeof(ver));
+       if (ret)
+               return ret;
+
+       dev_dbg(&serial->interface->dev, "%s - %d.%d.%d\n", __func__,
+                       ver[0], ver[1], ver[2]);
+
+       priv->fw_version = ver[0] << 16 | ver[1] << 8 | ver[2];
+
+       return 0;
+}
+
+static void cp210x_determine_quirks(struct usb_serial *serial)
+{
+       struct cp210x_serial_private *priv = usb_get_serial_data(serial);
+       int ret;
+
+       switch (priv->partnum) {
+       case CP210X_PARTNUM_CP2102N_QFN28:
+       case CP210X_PARTNUM_CP2102N_QFN24:
+       case CP210X_PARTNUM_CP2102N_QFN20:
+               ret = cp210x_get_fw_version(serial, CP210X_GET_FW_VER_2N);
+               if (ret)
+                       break;
+               if (priv->fw_version <= 0x10004)
+                       priv->no_flow_control = true;
+               break;
+       default:
+               break;
+       }
+}
+
 static int cp210x_attach(struct usb_serial *serial)
 {
        int result;
@@ -1928,6 +1999,7 @@ static int cp210x_attach(struct usb_serial *serial)
 
        usb_set_serial_data(serial, priv);
 
+       cp210x_determine_quirks(serial);
        cp210x_init_max_speed(serial);
 
        result = cp210x_gpio_init(serial);
index 6f2659e59b2ee7bdfc7c16da88d3d20e25acd696..4a1f3a95d0177980ca90b8fa56d8b12c1fa3d097 100644 (file)
@@ -611,6 +611,7 @@ static const struct usb_device_id id_table_combined[] = {
                .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
        { USB_DEVICE(FTDI_VID, FTDI_NT_ORIONLX_PLUS_PID) },
        { USB_DEVICE(FTDI_VID, FTDI_NT_ORION_IO_PID) },
+       { USB_DEVICE(FTDI_VID, FTDI_NT_ORIONMX_PID) },
        { USB_DEVICE(FTDI_VID, FTDI_SYNAPSE_SS200_PID) },
        { USB_DEVICE(FTDI_VID, FTDI_CUSTOMWARE_MINIPLEX_PID) },
        { USB_DEVICE(FTDI_VID, FTDI_CUSTOMWARE_MINIPLEX2_PID) },
@@ -1034,6 +1035,9 @@ static const struct usb_device_id id_table_combined[] = {
        /* Sienna devices */
        { USB_DEVICE(FTDI_VID, FTDI_SIENNA_PID) },
        { USB_DEVICE(ECHELON_VID, ECHELON_U20_PID) },
+       /* IDS GmbH devices */
+       { USB_DEVICE(IDS_VID, IDS_SI31A_PID) },
+       { USB_DEVICE(IDS_VID, IDS_CM31A_PID) },
        /* U-Blox devices */
        { USB_DEVICE(UBLOX_VID, UBLOX_C099F9P_ZED_PID) },
        { USB_DEVICE(UBLOX_VID, UBLOX_C099F9P_ODIN_PID) },
index 3d47c6d72256e383afad8d42cd2e5b5d1ccb01e5..add602bebd82018ff675808a0bc0f3c7203a9768 100644 (file)
 #define FTDI_NT_ORIONLXM_PID           0x7c90  /* OrionLXm Substation Automation Platform */
 #define FTDI_NT_ORIONLX_PLUS_PID       0x7c91  /* OrionLX+ Substation Automation Platform */
 #define FTDI_NT_ORION_IO_PID           0x7c92  /* Orion I/O */
+#define FTDI_NT_ORIONMX_PID            0x7c93  /* OrionMX */
 
 /*
  * Synapse Wireless product ids (FTDI_VID)
 #define UNJO_VID                       0x22B7
 #define UNJO_ISODEBUG_V1_PID           0x150D
 
+/*
+ * IDS GmbH
+ */
+#define IDS_VID                                0x2CAF
+#define IDS_SI31A_PID                  0x13A2
+#define IDS_CM31A_PID                  0x13A3
+
 /*
  * U-Blox products (http://www.u-blox.com).
  */
index 83c62f920c5019331094794a4088dab771216cce..41f1b872d277b43a73041c54288f2c45b882379f 100644 (file)
@@ -1,6 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0
 /*
- * USB ZyXEL omni.net LCD PLUS driver
+ * USB ZyXEL omni.net driver
  *
  * Copyright (C) 2013,2017 Johan Hovold <johan@kernel.org>
  *
 #include <linux/usb/serial.h>
 
 #define DRIVER_AUTHOR "Alessandro Zummo"
-#define DRIVER_DESC "USB ZyXEL omni.net LCD PLUS Driver"
+#define DRIVER_DESC "USB ZyXEL omni.net Driver"
 
 #define ZYXEL_VENDOR_ID                0x0586
 #define ZYXEL_OMNINET_ID       0x1000
+#define ZYXEL_OMNI_56K_PLUS_ID 0x1500
 /* This one seems to be a re-branded ZyXEL device */
 #define BT_IGNITIONPRO_ID      0x2000
 
@@ -40,6 +41,7 @@ static void omninet_port_remove(struct usb_serial_port *port);
 
 static const struct usb_device_id id_table[] = {
        { USB_DEVICE(ZYXEL_VENDOR_ID, ZYXEL_OMNINET_ID) },
+       { USB_DEVICE(ZYXEL_VENDOR_ID, ZYXEL_OMNI_56K_PLUS_ID) },
        { USB_DEVICE(ZYXEL_VENDOR_ID, BT_IGNITIONPRO_ID) },
        { }                                             /* Terminating entry */
 };
@@ -50,7 +52,7 @@ static struct usb_serial_driver zyxel_omninet_device = {
                .owner =        THIS_MODULE,
                .name =         "omninet",
        },
-       .description =          "ZyXEL - omni.net lcd plus usb",
+       .description =          "ZyXEL - omni.net usb",
        .id_table =             id_table,
        .num_bulk_out =         2,
        .calc_num_ports =       omninet_calc_num_ports,
index 3e79a543d3e771d515bdd502c7df5035becbe617..7608584ef4fe78f1394fc74dcce169efc2dc38ec 100644 (file)
@@ -1240,6 +1240,10 @@ static const struct usb_device_id option_ids[] = {
          .driver_info = NCTRL(0) | RSVD(1) },
        { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1901, 0xff),    /* Telit LN940 (MBIM) */
          .driver_info = NCTRL(0) },
+       { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x7010, 0xff),    /* Telit LE910-S1 (RNDIS) */
+         .driver_info = NCTRL(2) },
+       { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x7011, 0xff),    /* Telit LE910-S1 (ECM) */
+         .driver_info = NCTRL(2) },
        { USB_DEVICE(TELIT_VENDOR_ID, 0x9010),                          /* Telit SBL FN980 flashing device */
          .driver_info = NCTRL(0) | ZLP },
        { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_MF622, 0xff, 0xff, 0xff) }, /* ZTE WCDMA products */
index fd773d252691b86a42d4b17da31a2e1a6bc68217..940050c314822539260a99361dcc723e6490d1a6 100644 (file)
@@ -113,6 +113,7 @@ static const struct usb_device_id id_table[] = {
        { USB_DEVICE(SONY_VENDOR_ID, SONY_QN3USB_PRODUCT_ID) },
        { USB_DEVICE(SANWA_VENDOR_ID, SANWA_PRODUCT_ID) },
        { USB_DEVICE(ADLINK_VENDOR_ID, ADLINK_ND6530_PRODUCT_ID) },
+       { USB_DEVICE(ADLINK_VENDOR_ID, ADLINK_ND6530GC_PRODUCT_ID) },
        { USB_DEVICE(SMART_VENDOR_ID, SMART_PRODUCT_ID) },
        { USB_DEVICE(AT_VENDOR_ID, AT_VTKIT3_PRODUCT_ID) },
        { }                                     /* Terminating entry */
index 0f681ddbfd288de75af32f8e6453fb4f27844ee0..6097ee8fccb25cdbf6feaa28d49f784c913ca492 100644 (file)
 /* ADLINK ND-6530 RS232,RS485 and RS422 adapter */
 #define ADLINK_VENDOR_ID               0x0b63
 #define ADLINK_ND6530_PRODUCT_ID       0x6530
+#define ADLINK_ND6530GC_PRODUCT_ID     0x653a
 
 /* SMART USB Serial Adapter */
 #define SMART_VENDOR_ID        0x0b8c
index 5f2e7f668e687953305918df4bfcc8a99dc7572a..067690dac24cab56af8add647318f48776b4c97a 100644 (file)
@@ -416,7 +416,7 @@ static void qt2_close(struct usb_serial_port *port)
 
        /* flush the port transmit buffer */
        i = usb_control_msg(serial->dev,
-                           usb_rcvctrlpipe(serial->dev, 0),
+                           usb_sndctrlpipe(serial->dev, 0),
                            QT2_FLUSH_DEVICE, 0x40, 1,
                            port_priv->device_port, NULL, 0, QT2_USB_TIMEOUT);
 
@@ -426,7 +426,7 @@ static void qt2_close(struct usb_serial_port *port)
 
        /* flush the port receive buffer */
        i = usb_control_msg(serial->dev,
-                           usb_rcvctrlpipe(serial->dev, 0),
+                           usb_sndctrlpipe(serial->dev, 0),
                            QT2_FLUSH_DEVICE, 0x40, 0,
                            port_priv->device_port, NULL, 0, QT2_USB_TIMEOUT);
 
@@ -639,7 +639,7 @@ static int qt2_attach(struct usb_serial *serial)
        int status;
 
        /* power on unit */
-       status = usb_control_msg(serial->dev, usb_rcvctrlpipe(serial->dev, 0),
+       status = usb_control_msg(serial->dev, usb_sndctrlpipe(serial->dev, 0),
                                 0xc2, 0x40, 0x8000, 0, NULL, 0,
                                 QT2_USB_TIMEOUT);
        if (status < 0) {
index caa46ac23db90e8ab953c773aa16453604d9050a..310db5abea9d81267a1a66ab0c44749bf231e76d 100644 (file)
@@ -37,6 +37,7 @@
 /* Vendor and product ids */
 #define TI_VENDOR_ID                   0x0451
 #define IBM_VENDOR_ID                  0x04b3
+#define STARTECH_VENDOR_ID             0x14b0
 #define TI_3410_PRODUCT_ID             0x3410
 #define IBM_4543_PRODUCT_ID            0x4543
 #define IBM_454B_PRODUCT_ID            0x454b
@@ -370,6 +371,7 @@ static const struct usb_device_id ti_id_table_3410[] = {
        { USB_DEVICE(MXU1_VENDOR_ID, MXU1_1131_PRODUCT_ID) },
        { USB_DEVICE(MXU1_VENDOR_ID, MXU1_1150_PRODUCT_ID) },
        { USB_DEVICE(MXU1_VENDOR_ID, MXU1_1151_PRODUCT_ID) },
+       { USB_DEVICE(STARTECH_VENDOR_ID, TI_3410_PRODUCT_ID) },
        { }     /* terminator */
 };
 
@@ -408,6 +410,7 @@ static const struct usb_device_id ti_id_table_combined[] = {
        { USB_DEVICE(MXU1_VENDOR_ID, MXU1_1131_PRODUCT_ID) },
        { USB_DEVICE(MXU1_VENDOR_ID, MXU1_1150_PRODUCT_ID) },
        { USB_DEVICE(MXU1_VENDOR_ID, MXU1_1151_PRODUCT_ID) },
+       { USB_DEVICE(STARTECH_VENDOR_ID, TI_3410_PRODUCT_ID) },
        { }     /* terminator */
 };
 
index 9da22ae3006c970011624262084f4a97a954fc41..77dabd306ba8d84e11250a2fb6fe24aa2d4a9d64 100644 (file)
@@ -191,6 +191,7 @@ static void *typec_mux_match(struct fwnode_handle *fwnode, const char *id,
        bool match;
        int nval;
        u16 *val;
+       int ret;
        int i;
 
        /*
@@ -218,10 +219,10 @@ static void *typec_mux_match(struct fwnode_handle *fwnode, const char *id,
        if (!val)
                return ERR_PTR(-ENOMEM);
 
-       nval = fwnode_property_read_u16_array(fwnode, "svid", val, nval);
-       if (nval < 0) {
+       ret = fwnode_property_read_u16_array(fwnode, "svid", val, nval);
+       if (ret < 0) {
                kfree(val);
-               return ERR_PTR(nval);
+               return ERR_PTR(ret);
        }
 
        for (i = 0; i < nval; i++) {
@@ -238,7 +239,7 @@ find_mux:
        dev = class_find_device(&typec_mux_class, NULL, fwnode,
                                mux_fwnode_match);
 
-       return dev ? to_typec_switch(dev) : ERR_PTR(-EPROBE_DEFER);
+       return dev ? to_typec_mux(dev) : ERR_PTR(-EPROBE_DEFER);
 }
 
 /**
index 46a25b8db72e56424f6f163ab9763b80845dfa95..ffa8aa12d5f116980f476b309ac6c8d40c566cc9 100644 (file)
@@ -582,10 +582,15 @@ static int pmc_usb_probe_iom(struct pmc_usb *pmc)
        acpi_dev_free_resource_list(&resource_list);
 
        if (!pmc->iom_base) {
-               put_device(&adev->dev);
+               acpi_dev_put(adev);
                return -ENOMEM;
        }
 
+       if (IS_ERR(pmc->iom_base)) {
+               acpi_dev_put(adev);
+               return PTR_ERR(pmc->iom_base);
+       }
+
        pmc->iom_adev = adev;
 
        return 0;
@@ -636,8 +641,10 @@ static int pmc_usb_probe(struct platform_device *pdev)
                        break;
 
                ret = pmc_usb_register_port(pmc, i, fwnode);
-               if (ret)
+               if (ret) {
+                       fwnode_handle_put(fwnode);
                        goto err_remove_ports;
+               }
        }
 
        platform_set_drvdata(pdev, pmc);
@@ -651,7 +658,7 @@ err_remove_ports:
                usb_role_switch_unregister(pmc->port[i].usb_sw);
        }
 
-       put_device(&pmc->iom_adev->dev);
+       acpi_dev_put(pmc->iom_adev);
 
        return ret;
 }
@@ -667,7 +674,7 @@ static int pmc_usb_remove(struct platform_device *pdev)
                usb_role_switch_unregister(pmc->port[i].usb_sw);
        }
 
-       put_device(&pmc->iom_adev->dev);
+       acpi_dev_put(pmc->iom_adev);
 
        return 0;
 }
index c4fdc00a3bc8f543a85c06d3874895419fb1a9f6..63470cf7f4cd9796e3c550382bb8d469364552ea 100644 (file)
@@ -259,6 +259,7 @@ enum frs_typec_current {
 #define ALTMODE_DISCOVERY_MAX  (SVID_DISCOVERY_MAX * MODE_DISCOVERY_MAX)
 
 #define GET_SINK_CAP_RETRY_MS  100
+#define SEND_DISCOVER_RETRY_MS 100
 
 struct pd_mode_data {
        int svid_index;         /* current SVID index           */
@@ -366,6 +367,8 @@ struct tcpm_port {
        struct kthread_work vdm_state_machine;
        struct hrtimer enable_frs_timer;
        struct kthread_work enable_frs;
+       struct hrtimer send_discover_timer;
+       struct kthread_work send_discover_work;
        bool state_machine_running;
        bool vdm_sm_running;
 
@@ -398,6 +401,8 @@ struct tcpm_port {
        unsigned int nr_src_pdo;
        u32 snk_pdo[PDO_MAX_OBJECTS];
        unsigned int nr_snk_pdo;
+       u32 snk_vdo_v1[VDO_MAX_OBJECTS];
+       unsigned int nr_snk_vdo_v1;
        u32 snk_vdo[VDO_MAX_OBJECTS];
        unsigned int nr_snk_vdo;
 
@@ -1178,6 +1183,16 @@ static void mod_enable_frs_delayed_work(struct tcpm_port *port, unsigned int del
        }
 }
 
+static void mod_send_discover_delayed_work(struct tcpm_port *port, unsigned int delay_ms)
+{
+       if (delay_ms) {
+               hrtimer_start(&port->send_discover_timer, ms_to_ktime(delay_ms), HRTIMER_MODE_REL);
+       } else {
+               hrtimer_cancel(&port->send_discover_timer);
+               kthread_queue_work(port->wq, &port->send_discover_work);
+       }
+}
+
 static void tcpm_set_state(struct tcpm_port *port, enum tcpm_state state,
                           unsigned int delay_ms)
 {
@@ -1534,33 +1549,43 @@ static int tcpm_pd_svdm(struct tcpm_port *port, struct typec_altmode *adev,
                        if (PD_VDO_VID(p[0]) != USB_SID_PD)
                                break;
 
-                       if (PD_VDO_SVDM_VER(p[0]) < svdm_version)
+                       if (PD_VDO_SVDM_VER(p[0]) < svdm_version) {
                                typec_partner_set_svdm_version(port->partner,
                                                               PD_VDO_SVDM_VER(p[0]));
-                       /* 6.4.4.3.1: Only respond as UFP (device) */
-                       if (port->data_role == TYPEC_DEVICE &&
+                               svdm_version = PD_VDO_SVDM_VER(p[0]);
+                       }
+
+                       port->ams = DISCOVER_IDENTITY;
+                       /*
+                        * PD2.0 Spec 6.10.3: respond with NAK as DFP (data host)
+                        * PD3.1 Spec 6.4.4.2.5.1: respond with NAK if "invalid field" or
+                        * "wrong configuation" or "Unrecognized"
+                        */
+                       if ((port->data_role == TYPEC_DEVICE || svdm_version >= SVDM_VER_2_0) &&
                            port->nr_snk_vdo) {
-                               /*
-                                * Product Type DFP and Connector Type are not defined in SVDM
-                                * version 1.0 and shall be set to zero.
-                                */
-                               if (typec_get_negotiated_svdm_version(typec) < SVDM_VER_2_0)
-                                       response[1] = port->snk_vdo[0] & ~IDH_DFP_MASK
-                                                     & ~IDH_CONN_MASK;
-                               else
-                                       response[1] = port->snk_vdo[0];
-                               for (i = 1; i <  port->nr_snk_vdo; i++)
-                                       response[i + 1] = port->snk_vdo[i];
-                               rlen = port->nr_snk_vdo + 1;
+                               if (svdm_version < SVDM_VER_2_0) {
+                                       for (i = 0; i < port->nr_snk_vdo_v1; i++)
+                                               response[i + 1] = port->snk_vdo_v1[i];
+                                       rlen = port->nr_snk_vdo_v1 + 1;
+
+                               } else {
+                                       for (i = 0; i < port->nr_snk_vdo; i++)
+                                               response[i + 1] = port->snk_vdo[i];
+                                       rlen = port->nr_snk_vdo + 1;
+                               }
                        }
                        break;
                case CMD_DISCOVER_SVID:
+                       port->ams = DISCOVER_SVIDS;
                        break;
                case CMD_DISCOVER_MODES:
+                       port->ams = DISCOVER_MODES;
                        break;
                case CMD_ENTER_MODE:
+                       port->ams = DFP_TO_UFP_ENTER_MODE;
                        break;
                case CMD_EXIT_MODE:
+                       port->ams = DFP_TO_UFP_EXIT_MODE;
                        break;
                case CMD_ATTENTION:
                        /* Attention command does not have response */
@@ -1855,6 +1880,9 @@ static void vdm_run_state_machine(struct tcpm_port *port)
                                res = tcpm_ams_start(port, DISCOVER_IDENTITY);
                                if (res == 0)
                                        port->send_discover = false;
+                               else if (res == -EAGAIN)
+                                       mod_send_discover_delayed_work(port,
+                                                                      SEND_DISCOVER_RETRY_MS);
                                break;
                        case CMD_DISCOVER_SVID:
                                res = tcpm_ams_start(port, DISCOVER_SVIDS);
@@ -1880,7 +1908,7 @@ static void vdm_run_state_machine(struct tcpm_port *port)
                        }
 
                        if (res < 0) {
-                               port->vdm_sm_running = false;
+                               port->vdm_state = VDM_STATE_ERR_BUSY;
                                return;
                        }
                }
@@ -1896,6 +1924,7 @@ static void vdm_run_state_machine(struct tcpm_port *port)
                port->vdo_data[0] = port->vdo_retry;
                port->vdo_count = 1;
                port->vdm_state = VDM_STATE_READY;
+               tcpm_ams_finish(port);
                break;
        case VDM_STATE_BUSY:
                port->vdm_state = VDM_STATE_ERR_TMOUT;
@@ -1913,6 +1942,9 @@ static void vdm_run_state_machine(struct tcpm_port *port)
                        tcpm_log(port, "VDM Tx error, retry");
                        port->vdm_retries++;
                        port->vdm_state = VDM_STATE_READY;
+                       if (PD_VDO_SVDM(vdo_hdr) && PD_VDO_CMDT(vdo_hdr) == CMDT_INIT)
+                               tcpm_ams_finish(port);
+               } else {
                        tcpm_ams_finish(port);
                }
                break;
@@ -1961,7 +1993,7 @@ static void vdm_state_machine_work(struct kthread_work *work)
                 port->vdm_state != VDM_STATE_BUSY &&
                 port->vdm_state != VDM_STATE_SEND_MESSAGE);
 
-       if (port->vdm_state == VDM_STATE_ERR_TMOUT)
+       if (port->vdm_state < VDM_STATE_READY)
                port->vdm_sm_running = false;
 
        mutex_unlock(&port->lock);
@@ -2159,20 +2191,25 @@ static void tcpm_handle_alert(struct tcpm_port *port, const __le32 *payload,
 
        if (!type) {
                tcpm_log(port, "Alert message received with no type");
+               tcpm_queue_message(port, PD_MSG_CTRL_NOT_SUPP);
                return;
        }
 
        /* Just handling non-battery alerts for now */
        if (!(type & USB_PD_ADO_TYPE_BATT_STATUS_CHANGE)) {
-               switch (port->state) {
-               case SRC_READY:
-               case SNK_READY:
+               if (port->pwr_role == TYPEC_SOURCE) {
+                       port->upcoming_state = GET_STATUS_SEND;
+                       tcpm_ams_start(port, GETTING_SOURCE_SINK_STATUS);
+               } else {
+                       /*
+                        * Do not check SinkTxOk here in case the Source doesn't set its Rp to
+                        * SinkTxOk in time.
+                        */
+                       port->ams = GETTING_SOURCE_SINK_STATUS;
                        tcpm_set_state(port, GET_STATUS_SEND, 0);
-                       break;
-               default:
-                       tcpm_queue_message(port, PD_MSG_CTRL_WAIT);
-                       break;
                }
+       } else {
+               tcpm_queue_message(port, PD_MSG_CTRL_NOT_SUPP);
        }
 }
 
@@ -2270,6 +2307,12 @@ static void tcpm_pd_data_request(struct tcpm_port *port,
        bool frs_enable;
        int ret;
 
+       if (tcpm_vdm_ams(port) && type != PD_DATA_VENDOR_DEF) {
+               port->vdm_state = VDM_STATE_ERR_BUSY;
+               tcpm_ams_finish(port);
+               mod_vdm_delayed_work(port, 0);
+       }
+
        switch (type) {
        case PD_DATA_SOURCE_CAP:
                for (i = 0; i < cnt; i++)
@@ -2390,7 +2433,7 @@ static void tcpm_pd_data_request(struct tcpm_port *port,
                port->nr_sink_caps = cnt;
                port->sink_cap_done = true;
                if (port->ams == GET_SINK_CAPABILITIES)
-                       tcpm_pd_handle_state(port, ready_state(port), NONE_AMS, 0);
+                       tcpm_set_state(port, ready_state(port), 0);
                /* Unexpected Sink Capabilities */
                else
                        tcpm_pd_handle_msg(port,
@@ -2400,14 +2443,22 @@ static void tcpm_pd_data_request(struct tcpm_port *port,
                                           NONE_AMS);
                break;
        case PD_DATA_VENDOR_DEF:
-               tcpm_handle_vdm_request(port, msg->payload, cnt);
+               if (tcpm_vdm_ams(port) || port->nr_snk_vdo)
+                       tcpm_handle_vdm_request(port, msg->payload, cnt);
+               else if (port->negotiated_rev > PD_REV20)
+                       tcpm_pd_handle_msg(port, PD_MSG_CTRL_NOT_SUPP, NONE_AMS);
                break;
        case PD_DATA_BIST:
                port->bist_request = le32_to_cpu(msg->payload[0]);
                tcpm_pd_handle_state(port, BIST_RX, BIST, 0);
                break;
        case PD_DATA_ALERT:
-               tcpm_handle_alert(port, msg->payload, cnt);
+               if (port->state != SRC_READY && port->state != SNK_READY)
+                       tcpm_pd_handle_state(port, port->pwr_role == TYPEC_SOURCE ?
+                                            SRC_SOFT_RESET_WAIT_SNK_TX : SNK_SOFT_RESET,
+                                            NONE_AMS, 0);
+               else
+                       tcpm_handle_alert(port, msg->payload, cnt);
                break;
        case PD_DATA_BATT_STATUS:
        case PD_DATA_GET_COUNTRY_INFO:
@@ -2442,6 +2493,16 @@ static void tcpm_pd_ctrl_request(struct tcpm_port *port,
        enum pd_ctrl_msg_type type = pd_header_type_le(msg->header);
        enum tcpm_state next_state;
 
+       /*
+        * Stop VDM state machine if interrupted by other Messages while NOT_SUPP is allowed in
+        * VDM AMS if waiting for VDM responses and will be handled later.
+        */
+       if (tcpm_vdm_ams(port) && type != PD_CTRL_NOT_SUPP && type != PD_CTRL_GOOD_CRC) {
+               port->vdm_state = VDM_STATE_ERR_BUSY;
+               tcpm_ams_finish(port);
+               mod_vdm_delayed_work(port, 0);
+       }
+
        switch (type) {
        case PD_CTRL_GOOD_CRC:
        case PD_CTRL_PING:
@@ -2552,6 +2613,16 @@ static void tcpm_pd_ctrl_request(struct tcpm_port *port,
                        port->sink_cap_done = true;
                        tcpm_set_state(port, ready_state(port), 0);
                        break;
+               case SRC_READY:
+               case SNK_READY:
+                       if (port->vdm_state > VDM_STATE_READY) {
+                               port->vdm_state = VDM_STATE_DONE;
+                               if (tcpm_vdm_ams(port))
+                                       tcpm_ams_finish(port);
+                               mod_vdm_delayed_work(port, 0);
+                               break;
+                       }
+                       fallthrough;
                default:
                        tcpm_pd_handle_state(port,
                                             port->pwr_role == TYPEC_SOURCE ?
@@ -2690,7 +2761,14 @@ static void tcpm_pd_ext_msg_request(struct tcpm_port *port,
        enum pd_ext_msg_type type = pd_header_type_le(msg->header);
        unsigned int data_size = pd_ext_header_data_size_le(msg->ext_msg.header);
 
-       if (!(msg->ext_msg.header & PD_EXT_HDR_CHUNKED)) {
+       /* stopping VDM state machine if interrupted by other Messages */
+       if (tcpm_vdm_ams(port)) {
+               port->vdm_state = VDM_STATE_ERR_BUSY;
+               tcpm_ams_finish(port);
+               mod_vdm_delayed_work(port, 0);
+       }
+
+       if (!(le16_to_cpu(msg->ext_msg.header) & PD_EXT_HDR_CHUNKED)) {
                tcpm_pd_handle_msg(port, PD_MSG_CTRL_NOT_SUPP, NONE_AMS);
                tcpm_log(port, "Unchunked extended messages unsupported");
                return;
@@ -2704,24 +2782,16 @@ static void tcpm_pd_ext_msg_request(struct tcpm_port *port,
 
        switch (type) {
        case PD_EXT_STATUS:
-               /*
-                * If PPS related events raised then get PPS status to clear
-                * (see USB PD 3.0 Spec, 6.5.2.4)
-                */
-               if (msg->ext_msg.data[USB_PD_EXT_SDB_EVENT_FLAGS] &
-                   USB_PD_EXT_SDB_PPS_EVENTS)
-                       tcpm_pd_handle_state(port, GET_PPS_STATUS_SEND,
-                                            GETTING_SOURCE_SINK_STATUS, 0);
-
-               else
-                       tcpm_pd_handle_state(port, ready_state(port), NONE_AMS, 0);
-               break;
        case PD_EXT_PPS_STATUS:
-               /*
-                * For now the PPS status message is used to clear events
-                * and nothing more.
-                */
-               tcpm_pd_handle_state(port, ready_state(port), NONE_AMS, 0);
+               if (port->ams == GETTING_SOURCE_SINK_STATUS) {
+                       tcpm_ams_finish(port);
+                       tcpm_set_state(port, ready_state(port), 0);
+               } else {
+                       /* unexpected Status or PPS_Status Message */
+                       tcpm_pd_handle_state(port, port->pwr_role == TYPEC_SOURCE ?
+                                            SRC_SOFT_RESET_WAIT_SNK_TX : SNK_SOFT_RESET,
+                                            NONE_AMS, 0);
+               }
                break;
        case PD_EXT_SOURCE_CAP_EXT:
        case PD_EXT_GET_BATT_CAP:
@@ -2784,7 +2854,7 @@ static void tcpm_pd_rx_handler(struct kthread_work *work)
                                 "Data role mismatch, initiating error recovery");
                        tcpm_set_state(port, ERROR_RECOVERY, 0);
                } else {
-                       if (msg->header & PD_HEADER_EXT_HDR)
+                       if (le16_to_cpu(msg->header) & PD_HEADER_EXT_HDR)
                                tcpm_pd_ext_msg_request(port, msg);
                        else if (cnt)
                                tcpm_pd_data_request(port, msg);
@@ -3682,14 +3752,6 @@ static inline enum tcpm_state unattached_state(struct tcpm_port *port)
        return SNK_UNATTACHED;
 }
 
-static void tcpm_check_send_discover(struct tcpm_port *port)
-{
-       if ((port->data_role == TYPEC_HOST || port->negotiated_rev > PD_REV20) &&
-           port->send_discover && port->pd_capable)
-               tcpm_send_vdm(port, USB_SID_PD, CMD_DISCOVER_IDENT, NULL, 0);
-       port->send_discover = false;
-}
-
 static void tcpm_swap_complete(struct tcpm_port *port, int result)
 {
        if (port->swap_pending) {
@@ -3926,7 +3988,18 @@ static void run_state_machine(struct tcpm_port *port)
                        break;
                }
 
-               tcpm_check_send_discover(port);
+               /*
+                * 6.4.4.3.1 Discover Identity
+                * "The Discover Identity Command Shall only be sent to SOP when there is an
+                * Explicit Contract."
+                * For now, this driver only supports SOP for DISCOVER_IDENTITY, thus using
+                * port->explicit_contract to decide whether to send the command.
+                */
+               if (port->explicit_contract)
+                       mod_send_discover_delayed_work(port, 0);
+               else
+                       port->send_discover = false;
+
                /*
                 * 6.3.5
                 * Sending ping messages is not necessary if
@@ -4055,7 +4128,7 @@ static void run_state_machine(struct tcpm_port *port)
                if (port->vbus_present) {
                        u32 current_lim = tcpm_get_current_limit(port);
 
-                       if (port->slow_charger_loop || (current_lim > PD_P_SNK_STDBY_MW / 5))
+                       if (port->slow_charger_loop && (current_lim > PD_P_SNK_STDBY_MW / 5))
                                current_lim = PD_P_SNK_STDBY_MW / 5;
                        tcpm_set_current_limit(port, current_lim, 5000);
                        tcpm_set_charge(port, true);
@@ -4194,7 +4267,18 @@ static void run_state_machine(struct tcpm_port *port)
                        break;
                }
 
-               tcpm_check_send_discover(port);
+               /*
+                * 6.4.4.3.1 Discover Identity
+                * "The Discover Identity Command Shall only be sent to SOP when there is an
+                * Explicit Contract."
+                * For now, this driver only supports SOP for DISCOVER_IDENTITY, thus using
+                * port->explicit_contract.
+                */
+               if (port->explicit_contract)
+                       mod_send_discover_delayed_work(port, 0);
+               else
+                       port->send_discover = false;
+
                power_supply_changed(port->psy);
                break;
 
@@ -5288,6 +5372,29 @@ unlock:
        mutex_unlock(&port->lock);
 }
 
+static void tcpm_send_discover_work(struct kthread_work *work)
+{
+       struct tcpm_port *port = container_of(work, struct tcpm_port, send_discover_work);
+
+       mutex_lock(&port->lock);
+       /* No need to send DISCOVER_IDENTITY anymore */
+       if (!port->send_discover)
+               goto unlock;
+
+       /* Retry if the port is not idle */
+       if ((port->state != SRC_READY && port->state != SNK_READY) || port->vdm_sm_running) {
+               mod_send_discover_delayed_work(port, SEND_DISCOVER_RETRY_MS);
+               goto unlock;
+       }
+
+       /* Only send the Message if the port is host for PD rev2.0 */
+       if (port->data_role == TYPEC_HOST || port->negotiated_rev > PD_REV20)
+               tcpm_send_vdm(port, USB_SID_PD, CMD_DISCOVER_IDENT, NULL, 0);
+
+unlock:
+       mutex_unlock(&port->lock);
+}
+
 static int tcpm_dr_set(struct typec_port *p, enum typec_data_role data)
 {
        struct tcpm_port *port = typec_get_drvdata(p);
@@ -5754,6 +5861,15 @@ static int tcpm_fw_get_caps(struct tcpm_port *port,
        if (!fwnode)
                return -EINVAL;
 
+       /*
+        * This fwnode has a "compatible" property, but is never populated as a
+        * struct device. Instead we simply parse it to read the properties.
+        * This it breaks fw_devlink=on. To maintain backward compatibility
+        * with existing DT files, we work around this by deleting any
+        * fwnode_links to/from this fwnode.
+        */
+       fw_devlink_purge_absent_suppliers(fwnode);
+
        /* USB data support is optional */
        ret = fwnode_property_read_string(fwnode, "data-role", &cap_str);
        if (ret == 0) {
@@ -5841,6 +5957,22 @@ sink:
                        return ret;
        }
 
+       /* If sink-vdos is found, sink-vdos-v1 is expected for backward compatibility. */
+       if (port->nr_snk_vdo) {
+               ret = fwnode_property_count_u32(fwnode, "sink-vdos-v1");
+               if (ret < 0)
+                       return ret;
+               else if (ret == 0)
+                       return -ENODATA;
+
+               port->nr_snk_vdo_v1 = min(ret, VDO_MAX_OBJECTS);
+               ret = fwnode_property_read_u32_array(fwnode, "sink-vdos-v1",
+                                                    port->snk_vdo_v1,
+                                                    port->nr_snk_vdo_v1);
+               if (ret < 0)
+                       return ret;
+       }
+
        return 0;
 }
 
@@ -6093,6 +6225,14 @@ static enum hrtimer_restart enable_frs_timer_handler(struct hrtimer *timer)
        return HRTIMER_NORESTART;
 }
 
+static enum hrtimer_restart send_discover_timer_handler(struct hrtimer *timer)
+{
+       struct tcpm_port *port = container_of(timer, struct tcpm_port, send_discover_timer);
+
+       kthread_queue_work(port->wq, &port->send_discover_work);
+       return HRTIMER_NORESTART;
+}
+
 struct tcpm_port *tcpm_register_port(struct device *dev, struct tcpc_dev *tcpc)
 {
        struct tcpm_port *port;
@@ -6123,12 +6263,15 @@ struct tcpm_port *tcpm_register_port(struct device *dev, struct tcpc_dev *tcpc)
        kthread_init_work(&port->vdm_state_machine, vdm_state_machine_work);
        kthread_init_work(&port->event_work, tcpm_pd_event_handler);
        kthread_init_work(&port->enable_frs, tcpm_enable_frs_work);
+       kthread_init_work(&port->send_discover_work, tcpm_send_discover_work);
        hrtimer_init(&port->state_machine_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
        port->state_machine_timer.function = state_machine_timer_handler;
        hrtimer_init(&port->vdm_state_machine_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
        port->vdm_state_machine_timer.function = vdm_state_machine_timer_handler;
        hrtimer_init(&port->enable_frs_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
        port->enable_frs_timer.function = enable_frs_timer_handler;
+       hrtimer_init(&port->send_discover_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+       port->send_discover_timer.function = send_discover_timer_handler;
 
        spin_lock_init(&port->pd_event_lock);
 
@@ -6195,6 +6338,11 @@ void tcpm_unregister_port(struct tcpm_port *port)
 {
        int i;
 
+       hrtimer_cancel(&port->send_discover_timer);
+       hrtimer_cancel(&port->enable_frs_timer);
+       hrtimer_cancel(&port->vdm_state_machine_timer);
+       hrtimer_cancel(&port->state_machine_timer);
+
        tcpm_reset_port(port);
        for (i = 0; i < ARRAY_SIZE(port->port_altmode); i++)
                typec_unregister_altmode(port->port_altmode[i]);
index 79ae63950050c8074425ba91a2bc61cc7a830039..5d125339687a34a61b5017b67a756d8ead90ffbc 100644 (file)
@@ -378,7 +378,7 @@ static int wcove_pd_transmit(struct tcpc_dev *tcpc,
                const u8 *data = (void *)msg;
                int i;
 
-               for (i = 0; i < pd_header_cnt(msg->header) * 4 + 2; i++) {
+               for (i = 0; i < pd_header_cnt_le(msg->header) * 4 + 2; i++) {
                        ret = regmap_write(wcove->regmap, USBC_TX_DATA + i,
                                           data[i]);
                        if (ret)
index 282c3c825c136d984487afdba97c339515830335..b7d104c80d8570fa12113a746c80fab98f42a855 100644 (file)
@@ -495,7 +495,8 @@ static void ucsi_unregister_altmodes(struct ucsi_connector *con, u8 recipient)
        }
 }
 
-static void ucsi_get_pdos(struct ucsi_connector *con, int is_partner)
+static int ucsi_get_pdos(struct ucsi_connector *con, int is_partner,
+                        u32 *pdos, int offset, int num_pdos)
 {
        struct ucsi *ucsi = con->ucsi;
        u64 command;
@@ -503,17 +504,39 @@ static void ucsi_get_pdos(struct ucsi_connector *con, int is_partner)
 
        command = UCSI_COMMAND(UCSI_GET_PDOS) | UCSI_CONNECTOR_NUMBER(con->num);
        command |= UCSI_GET_PDOS_PARTNER_PDO(is_partner);
-       command |= UCSI_GET_PDOS_NUM_PDOS(UCSI_MAX_PDOS - 1);
+       command |= UCSI_GET_PDOS_PDO_OFFSET(offset);
+       command |= UCSI_GET_PDOS_NUM_PDOS(num_pdos - 1);
        command |= UCSI_GET_PDOS_SRC_PDOS;
-       ret = ucsi_send_command(ucsi, command, con->src_pdos,
-                              sizeof(con->src_pdos));
-       if (ret < 0) {
+       ret = ucsi_send_command(ucsi, command, pdos + offset,
+                               num_pdos * sizeof(u32));
+       if (ret < 0)
                dev_err(ucsi->dev, "UCSI_GET_PDOS failed (%d)\n", ret);
+       if (ret == 0 && offset == 0)
+               dev_warn(ucsi->dev, "UCSI_GET_PDOS returned 0 bytes\n");
+
+       return ret;
+}
+
+static void ucsi_get_src_pdos(struct ucsi_connector *con, int is_partner)
+{
+       int ret;
+
+       /* UCSI max payload means only getting at most 4 PDOs at a time */
+       ret = ucsi_get_pdos(con, 1, con->src_pdos, 0, UCSI_MAX_PDOS);
+       if (ret < 0)
                return;
-       }
+
        con->num_pdos = ret / sizeof(u32); /* number of bytes to 32-bit PDOs */
-       if (ret == 0)
-               dev_warn(ucsi->dev, "UCSI_GET_PDOS returned 0 bytes\n");
+       if (con->num_pdos < UCSI_MAX_PDOS)
+               return;
+
+       /* get the remaining PDOs, if any */
+       ret = ucsi_get_pdos(con, 1, con->src_pdos, UCSI_MAX_PDOS,
+                           PDO_MAX_OBJECTS - UCSI_MAX_PDOS);
+       if (ret < 0)
+               return;
+
+       con->num_pdos += ret / sizeof(u32);
 }
 
 static void ucsi_pwr_opmode_change(struct ucsi_connector *con)
@@ -522,7 +545,7 @@ static void ucsi_pwr_opmode_change(struct ucsi_connector *con)
        case UCSI_CONSTAT_PWR_OPMODE_PD:
                con->rdo = con->status.request_data_obj;
                typec_set_pwr_opmode(con->port, TYPEC_PWR_MODE_PD);
-               ucsi_get_pdos(con, 1);
+               ucsi_get_src_pdos(con, 1);
                break;
        case UCSI_CONSTAT_PWR_OPMODE_TYPEC1_5:
                con->rdo = 0;
@@ -694,8 +717,8 @@ static void ucsi_handle_connector_change(struct work_struct *work)
        ucsi_send_command(con->ucsi, command, NULL, 0);
 
        /* 3. ACK connector change */
-       clear_bit(EVENT_PENDING, &ucsi->flags);
        ret = ucsi_acknowledge_connector_change(ucsi);
+       clear_bit(EVENT_PENDING, &ucsi->flags);
        if (ret) {
                dev_err(ucsi->dev, "%s: ACK failed (%d)", __func__, ret);
                goto out_unlock;
@@ -999,6 +1022,7 @@ static const struct typec_operations ucsi_ops = {
        .pr_set = ucsi_pr_swap
 };
 
+/* Caller must call fwnode_handle_put() after use */
 static struct fwnode_handle *ucsi_find_fwnode(struct ucsi_connector *con)
 {
        struct fwnode_handle *fwnode;
@@ -1033,7 +1057,7 @@ static int ucsi_register_port(struct ucsi *ucsi, int index)
        command |= UCSI_CONNECTOR_NUMBER(con->num);
        ret = ucsi_send_command(ucsi, command, &con->cap, sizeof(con->cap));
        if (ret < 0)
-               goto out;
+               goto out_unlock;
 
        if (con->cap.op_mode & UCSI_CONCAP_OPMODE_DRP)
                cap->data = TYPEC_PORT_DRD;
@@ -1151,6 +1175,8 @@ static int ucsi_register_port(struct ucsi *ucsi, int index)
        trace_ucsi_register_port(con->num, &con->status);
 
 out:
+       fwnode_handle_put(cap->fwnode);
+out_unlock:
        mutex_unlock(&con->lock);
        return ret;
 }
@@ -1227,6 +1253,7 @@ err_unregister:
        }
 
 err_reset:
+       memset(&ucsi->cap, 0, sizeof(ucsi->cap));
        ucsi_reset_ppm(ucsi);
 err:
        return ret;
index 3920e20a9e9ef96d477f434d87ce4c30fe0957d0..cee666790907efeff19af8744f34452440068f7c 100644 (file)
@@ -8,6 +8,7 @@
 #include <linux/power_supply.h>
 #include <linux/types.h>
 #include <linux/usb/typec.h>
+#include <linux/usb/pd.h>
 #include <linux/usb/role.h>
 
 /* -------------------------------------------------------------------------- */
@@ -134,7 +135,9 @@ void ucsi_connector_change(struct ucsi *ucsi, u8 num);
 
 /* GET_PDOS command bits */
 #define UCSI_GET_PDOS_PARTNER_PDO(_r_)         ((u64)(_r_) << 23)
+#define UCSI_GET_PDOS_PDO_OFFSET(_r_)          ((u64)(_r_) << 24)
 #define UCSI_GET_PDOS_NUM_PDOS(_r_)            ((u64)(_r_) << 32)
+#define UCSI_MAX_PDOS                          (4)
 #define UCSI_GET_PDOS_SRC_PDOS                 ((u64)1 << 34)
 
 /* -------------------------------------------------------------------------- */
@@ -302,7 +305,6 @@ struct ucsi {
 
 #define UCSI_MAX_SVID          5
 #define UCSI_MAX_ALTMODES      (UCSI_MAX_SVID * 6)
-#define UCSI_MAX_PDOS          (4)
 
 #define UCSI_TYPEC_VSAFE5V     5000
 #define UCSI_TYPEC_1_5_CURRENT 1500
@@ -330,7 +332,7 @@ struct ucsi_connector {
        struct power_supply *psy;
        struct power_supply_desc psy_desc;
        u32 rdo;
-       u32 src_pdos[UCSI_MAX_PDOS];
+       u32 src_pdos[PDO_MAX_OBJECTS];
        int num_pdos;
 
        struct usb_role_switch *usb_role_sw;
index 189e4385df403337f626e3081364cf056ae54563..dda5dc6f7737897c580a388c33ac5ade3b8de73c 100644 (file)
@@ -15,6 +15,7 @@
 #include <linux/mlx5/vport.h>
 #include <linux/mlx5/fs.h>
 #include <linux/mlx5/mlx5_ifc_vdpa.h>
+#include <linux/mlx5/mpfs.h>
 #include "mlx5_vdpa.h"
 
 MODULE_AUTHOR("Eli Cohen <eli@mellanox.com>");
@@ -1859,11 +1860,16 @@ static int mlx5_vdpa_set_map(struct vdpa_device *vdev, struct vhost_iotlb *iotlb
 static void mlx5_vdpa_free(struct vdpa_device *vdev)
 {
        struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev);
+       struct mlx5_core_dev *pfmdev;
        struct mlx5_vdpa_net *ndev;
 
        ndev = to_mlx5_vdpa_ndev(mvdev);
 
        free_resources(ndev);
+       if (!is_zero_ether_addr(ndev->config.mac)) {
+               pfmdev = pci_get_drvdata(pci_physfn(mvdev->mdev->pdev));
+               mlx5_mpfs_del_mac(pfmdev, ndev->config.mac);
+       }
        mlx5_vdpa_free_resources(&ndev->mvdev);
        mutex_destroy(&ndev->reslock);
 }
@@ -1990,6 +1996,7 @@ static int mlx5_vdpa_dev_add(struct vdpa_mgmt_dev *v_mdev, const char *name)
 {
        struct mlx5_vdpa_mgmtdev *mgtdev = container_of(v_mdev, struct mlx5_vdpa_mgmtdev, mgtdev);
        struct virtio_net_config *config;
+       struct mlx5_core_dev *pfmdev;
        struct mlx5_vdpa_dev *mvdev;
        struct mlx5_vdpa_net *ndev;
        struct mlx5_core_dev *mdev;
@@ -2023,10 +2030,17 @@ static int mlx5_vdpa_dev_add(struct vdpa_mgmt_dev *v_mdev, const char *name)
        if (err)
                goto err_mtu;
 
+       if (!is_zero_ether_addr(config->mac)) {
+               pfmdev = pci_get_drvdata(pci_physfn(mdev->pdev));
+               err = mlx5_mpfs_add_mac(pfmdev, config->mac);
+               if (err)
+                       goto err_mtu;
+       }
+
        mvdev->vdev.dma_dev = mdev->device;
        err = mlx5_vdpa_alloc_resources(&ndev->mvdev);
        if (err)
-               goto err_mtu;
+               goto err_mpfs;
 
        err = alloc_resources(ndev);
        if (err)
@@ -2044,6 +2058,9 @@ err_reg:
        free_resources(ndev);
 err_res:
        mlx5_vdpa_free_resources(&ndev->mvdev);
+err_mpfs:
+       if (!is_zero_ether_addr(config->mac))
+               mlx5_mpfs_del_mac(pfmdev, config->mac);
 err_mtu:
        mutex_destroy(&ndev->reslock);
        put_device(&mvdev->vdev.dev);
index 53ce78d7d07be0f68834046f2c4d10cb6c63fed9..5e2e1b9a9fd3af111dfee7385f315cbfed1f792d 100644 (file)
@@ -2,6 +2,7 @@
 config VFIO_PCI
        tristate "VFIO support for PCI devices"
        depends on VFIO && PCI && EVENTFD
+       depends on MMU
        select VFIO_VIRQFD
        select IRQ_BYPASS_MANAGER
        help
index d57f037f65b85d48942e17a659e9bb7bc0fd137d..70e28efbc51f80e04b87edd010d8d27799795cca 100644 (file)
@@ -1581,7 +1581,7 @@ static int vfio_ecap_init(struct vfio_pci_device *vdev)
                        if (len == 0xFF) {
                                len = vfio_ext_cap_len(vdev, ecap, epos);
                                if (len < 0)
-                                       return ret;
+                                       return len;
                        }
                }
 
index 361e5b57e369327e4143717025d7aaad75765d7a..470fcf7dac564cece9a6fdc867b9e646649d6df6 100644 (file)
@@ -291,7 +291,7 @@ err_irq:
        vfio_platform_regions_cleanup(vdev);
 err_reg:
        mutex_unlock(&driver_lock);
-       module_put(THIS_MODULE);
+       module_put(vdev->parent_module);
        return ret;
 }
 
index a0747c35a7781ec3b71b1d06583abbcb05a9e6e2..a3e925a41b0de881d2a2e358ce4cb5fb7c93be33 100644 (file)
@@ -2795,7 +2795,7 @@ static int vfio_iommu_iova_build_caps(struct vfio_iommu *iommu,
                return 0;
        }
 
-       size = sizeof(*cap_iovas) + (iovas * sizeof(*cap_iovas->iova_ranges));
+       size = struct_size(cap_iovas, iova_ranges, iovas);
 
        cap_iovas = kzalloc(size, GFP_KERNEL);
        if (!cap_iovas)
index 39258f9d36a0e44ee01dfbe8ac71e871df923ea4..ef9c57ce090663d5f35c0f80dd425edcaa9a4c44 100644 (file)
@@ -380,7 +380,7 @@ static void vgacon_init(struct vc_data *c, int init)
                vc_resize(c, vga_video_num_columns, vga_video_num_lines);
 
        c->vc_scan_lines = vga_scan_lines;
-       c->vc_font.height = vga_video_font_height;
+       c->vc_font.height = c->vc_cell_height = vga_video_font_height;
        c->vc_complement_mask = 0x7700;
        if (vga_512_chars)
                c->vc_hi_font_mask = 0x0800;
@@ -515,32 +515,32 @@ static void vgacon_cursor(struct vc_data *c, int mode)
                switch (CUR_SIZE(c->vc_cursor_type)) {
                case CUR_UNDERLINE:
                        vgacon_set_cursor_size(c->state.x,
-                                              c->vc_font.height -
-                                              (c->vc_font.height <
+                                              c->vc_cell_height -
+                                              (c->vc_cell_height <
                                                10 ? 2 : 3),
-                                              c->vc_font.height -
-                                              (c->vc_font.height <
+                                              c->vc_cell_height -
+                                              (c->vc_cell_height <
                                                10 ? 1 : 2));
                        break;
                case CUR_TWO_THIRDS:
                        vgacon_set_cursor_size(c->state.x,
-                                              c->vc_font.height / 3,
-                                              c->vc_font.height -
-                                              (c->vc_font.height <
+                                              c->vc_cell_height / 3,
+                                              c->vc_cell_height -
+                                              (c->vc_cell_height <
                                                10 ? 1 : 2));
                        break;
                case CUR_LOWER_THIRD:
                        vgacon_set_cursor_size(c->state.x,
-                                              (c->vc_font.height * 2) / 3,
-                                              c->vc_font.height -
-                                              (c->vc_font.height <
+                                              (c->vc_cell_height * 2) / 3,
+                                              c->vc_cell_height -
+                                              (c->vc_cell_height <
                                                10 ? 1 : 2));
                        break;
                case CUR_LOWER_HALF:
                        vgacon_set_cursor_size(c->state.x,
-                                              c->vc_font.height / 2,
-                                              c->vc_font.height -
-                                              (c->vc_font.height <
+                                              c->vc_cell_height / 2,
+                                              c->vc_cell_height -
+                                              (c->vc_cell_height <
                                                10 ? 1 : 2));
                        break;
                case CUR_NONE:
@@ -551,7 +551,7 @@ static void vgacon_cursor(struct vc_data *c, int mode)
                        break;
                default:
                        vgacon_set_cursor_size(c->state.x, 1,
-                                              c->vc_font.height);
+                                              c->vc_cell_height);
                        break;
                }
                break;
@@ -562,13 +562,13 @@ static int vgacon_doresize(struct vc_data *c,
                unsigned int width, unsigned int height)
 {
        unsigned long flags;
-       unsigned int scanlines = height * c->vc_font.height;
+       unsigned int scanlines = height * c->vc_cell_height;
        u8 scanlines_lo = 0, r7 = 0, vsync_end = 0, mode, max_scan;
 
        raw_spin_lock_irqsave(&vga_lock, flags);
 
        vgacon_xres = width * VGA_FONTWIDTH;
-       vgacon_yres = height * c->vc_font.height;
+       vgacon_yres = height * c->vc_cell_height;
        if (vga_video_type >= VIDEO_TYPE_VGAC) {
                outb_p(VGA_CRTC_MAX_SCAN, vga_video_port_reg);
                max_scan = inb_p(vga_video_port_val);
@@ -623,9 +623,9 @@ static int vgacon_doresize(struct vc_data *c,
 static int vgacon_switch(struct vc_data *c)
 {
        int x = c->vc_cols * VGA_FONTWIDTH;
-       int y = c->vc_rows * c->vc_font.height;
+       int y = c->vc_rows * c->vc_cell_height;
        int rows = screen_info.orig_video_lines * vga_default_font_height/
-               c->vc_font.height;
+               c->vc_cell_height;
        /*
         * We need to save screen size here as it's the only way
         * we can spot the screen has been resized and we need to
@@ -1038,7 +1038,7 @@ static int vgacon_adjust_height(struct vc_data *vc, unsigned fontheight)
                                cursor_size_lastto = 0;
                                c->vc_sw->con_cursor(c, CM_DRAW);
                        }
-                       c->vc_font.height = fontheight;
+                       c->vc_font.height = c->vc_cell_height = fontheight;
                        vc_resize(c, 0, rows);  /* Adjust console size */
                }
        }
@@ -1086,12 +1086,20 @@ static int vgacon_resize(struct vc_data *c, unsigned int width,
        if ((width << 1) * height > vga_vram_size)
                return -EINVAL;
 
+       if (user) {
+               /*
+                * Ho ho!  Someone (svgatextmode, eh?) may have reprogrammed
+                * the video mode!  Set the new defaults then and go away.
+                */
+               screen_info.orig_video_cols = width;
+               screen_info.orig_video_lines = height;
+               vga_default_font_height = c->vc_cell_height;
+               return 0;
+       }
        if (width % 2 || width > screen_info.orig_video_cols ||
            height > (screen_info.orig_video_lines * vga_default_font_height)/
-           c->vc_font.height)
-               /* let svgatextmode tinker with video timings and
-                  return success */
-               return (user) ? 0 : -EINVAL;
+           c->vc_cell_height)
+               return -EINVAL;
 
        if (con_is_visible(c) && !vga_is_gfx) /* who knows */
                vgacon_doresize(c, width, height);
index b292887a248150d1669cfe85d857f71a5be3795e..a591d291b231af8531174bd7dd8057d767ab13f1 100644 (file)
@@ -52,6 +52,13 @@ static vm_fault_t fb_deferred_io_fault(struct vm_fault *vmf)
                return VM_FAULT_SIGBUS;
 
        get_page(page);
+
+       if (vmf->vma->vm_file)
+               page->mapping = vmf->vma->vm_file->f_mapping;
+       else
+               printk(KERN_ERR "no mapping available\n");
+
+       BUG_ON(!page->mapping);
        page->index = vmf->pgoff;
 
        vmf->page = page;
@@ -144,6 +151,17 @@ static const struct vm_operations_struct fb_deferred_io_vm_ops = {
        .page_mkwrite   = fb_deferred_io_mkwrite,
 };
 
+static int fb_deferred_io_set_page_dirty(struct page *page)
+{
+       if (!PageDirty(page))
+               SetPageDirty(page);
+       return 0;
+}
+
+static const struct address_space_operations fb_deferred_io_aops = {
+       .set_page_dirty = fb_deferred_io_set_page_dirty,
+};
+
 int fb_deferred_io_mmap(struct fb_info *info, struct vm_area_struct *vma)
 {
        vma->vm_ops = &fb_deferred_io_vm_ops;
@@ -194,12 +212,29 @@ void fb_deferred_io_init(struct fb_info *info)
 }
 EXPORT_SYMBOL_GPL(fb_deferred_io_init);
 
+void fb_deferred_io_open(struct fb_info *info,
+                        struct inode *inode,
+                        struct file *file)
+{
+       file->f_mapping->a_ops = &fb_deferred_io_aops;
+}
+EXPORT_SYMBOL_GPL(fb_deferred_io_open);
+
 void fb_deferred_io_cleanup(struct fb_info *info)
 {
        struct fb_deferred_io *fbdefio = info->fbdefio;
+       struct page *page;
+       int i;
 
        BUG_ON(!fbdefio);
        cancel_delayed_work_sync(&info->deferred_work);
+
+       /* clear out the mapping that we setup */
+       for (i = 0 ; i < info->fix.smem_len; i += PAGE_SIZE) {
+               page = fb_deferred_io_page(info, i);
+               page->mapping = NULL;
+       }
+
        mutex_destroy(&fbdefio->lock);
 }
 EXPORT_SYMBOL_GPL(fb_deferred_io_cleanup);
index 3406067985b1f3fbe3c379f86a2e2233d6757d94..22bb3892f6bd1d5601b2ac7c74ac4a319dd3cd7f 100644 (file)
@@ -2019,7 +2019,7 @@ static int fbcon_resize(struct vc_data *vc, unsigned int width,
                        return -EINVAL;
 
                pr_debug("resize now %ix%i\n", var.xres, var.yres);
-               if (con_is_visible(vc)) {
+               if (con_is_visible(vc) && vc->vc_mode == KD_TEXT) {
                        var.activate = FB_ACTIVATE_NOW |
                                FB_ACTIVATE_FORCE;
                        fb_set_var(info, &var);
index 072780b0e5702e9c39cc911b2deb04c1015577d5..98f193078c05aee4aabfb2d1d79cb8b6d2c31570 100644 (file)
@@ -1415,6 +1415,10 @@ __releases(&info->lock)
                if (res)
                        module_put(info->fbops->owner);
        }
+#ifdef CONFIG_FB_DEFERRED_IO
+       if (info->fbdefio)
+               fb_deferred_io_open(info, inode, file);
+#endif
 out:
        unlock_fb_info(info);
        if (res)
index 8bbac7182ad32a7e32f62142ba48612e83586834..bd3d07aa4f0ec48340a1373266f2231a34e07313 100644 (file)
@@ -286,7 +286,7 @@ static int hga_card_detect(void)
 
        hga_vram = ioremap(0xb0000, hga_vram_len);
        if (!hga_vram)
-               goto error;
+               return -ENOMEM;
 
        if (request_region(0x3b0, 12, "hgafb"))
                release_io_ports = 1;
@@ -346,13 +346,18 @@ static int hga_card_detect(void)
                        hga_type_name = "Hercules";
                        break;
        }
-       return 1;
+       return 0;
 error:
        if (release_io_ports)
                release_region(0x3b0, 12);
        if (release_io_port)
                release_region(0x3bf, 1);
-       return 0;
+
+       iounmap(hga_vram);
+
+       pr_err("hgafb: HGA card not detected.\n");
+
+       return -EINVAL;
 }
 
 /**
@@ -550,13 +555,11 @@ static const struct fb_ops hgafb_ops = {
 static int hgafb_probe(struct platform_device *pdev)
 {
        struct fb_info *info;
+       int ret;
 
-       if (! hga_card_detect()) {
-               printk(KERN_INFO "hgafb: HGA card not detected.\n");
-               if (hga_vram)
-                       iounmap(hga_vram);
-               return -EINVAL;
-       }
+       ret = hga_card_detect();
+       if (ret)
+               return ret;
 
        printk(KERN_INFO "hgafb: %s with %ldK of memory detected.\n",
                hga_type_name, hga_vram_len/1024);
index 3ac053b884958e040f8b77587518e993b941c585..16f272a508112c91a6381f2d08081ced787de643 100644 (file)
@@ -1469,6 +1469,7 @@ static int imsttfb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
        struct imstt_par *par;
        struct fb_info *info;
        struct device_node *dp;
+       int ret = -ENOMEM;
        
        dp = pci_device_to_OF_node(pdev);
        if(dp)
@@ -1504,28 +1505,37 @@ static int imsttfb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
                default:
                        printk(KERN_INFO "imsttfb: Device 0x%x unknown, "
                                         "contact maintainer.\n", pdev->device);
-                       release_mem_region(addr, size);
-                       framebuffer_release(info);
-                       return -ENODEV;
+                       ret = -ENODEV;
+                       goto error;
        }
 
        info->fix.smem_start = addr;
        info->screen_base = (__u8 *)ioremap(addr, par->ramdac == IBM ?
                                            0x400000 : 0x800000);
-       if (!info->screen_base) {
-               release_mem_region(addr, size);
-               framebuffer_release(info);
-               return -ENOMEM;
-       }
+       if (!info->screen_base)
+               goto error;
        info->fix.mmio_start = addr + 0x800000;
        par->dc_regs = ioremap(addr + 0x800000, 0x1000);
+       if (!par->dc_regs)
+               goto error;
        par->cmap_regs_phys = addr + 0x840000;
        par->cmap_regs = (__u8 *)ioremap(addr + 0x840000, 0x1000);
+       if (!par->cmap_regs)
+               goto error;
        info->pseudo_palette = par->palette;
        init_imstt(info);
 
        pci_set_drvdata(pdev, info);
        return 0;
+
+error:
+       if (par->dc_regs)
+               iounmap(par->dc_regs);
+       if (info->screen_base)
+               iounmap(info->screen_base);
+       release_mem_region(addr, size);
+       framebuffer_release(info);
+       return ret;
 }
 
 static void imsttfb_remove(struct pci_dev *pdev)
index f01d58c7a042ed747ce2c9227af0c5429a78546e..a3e7be96527d7e3dc1d1c207b5b3abc14578c60e 100644 (file)
@@ -1017,8 +1017,10 @@ static int gntdev_mmap(struct file *flip, struct vm_area_struct *vma)
                err = mmu_interval_notifier_insert_locked(
                        &map->notifier, vma->vm_mm, vma->vm_start,
                        vma->vm_end - vma->vm_start, &gntdev_mmu_ops);
-               if (err)
+               if (err) {
+                       map->vma = NULL;
                        goto out_unlock_put;
+               }
        }
        mutex_unlock(&priv->lock);
 
index 4c89afc0df62892dabc5a402d21c9618767b25a4..24d11861ac7d890e86c298cfb6aa277e5ab8a60d 100644 (file)
@@ -164,6 +164,11 @@ int __ref xen_swiotlb_init(void)
        int rc = -ENOMEM;
        char *start;
 
+       if (io_tlb_default_mem != NULL) {
+               pr_warn("swiotlb buffer already initialized\n");
+               return -EEXIST;
+       }
+
 retry:
        m_ret = XEN_SWIOTLB_ENOMEM;
        order = get_order(bytes);
index e64e6befc63b745d1f97f7fdb0a428e1a931e96b..87e6b7db892f54645aea32030687236919f9ea1c 100644 (file)
@@ -39,8 +39,10 @@ static int fill_list(unsigned int nr_pages)
        }
 
        pgmap = kzalloc(sizeof(*pgmap), GFP_KERNEL);
-       if (!pgmap)
+       if (!pgmap) {
+               ret = -ENOMEM;
                goto err_pgmap;
+       }
 
        pgmap->type = MEMORY_DEVICE_GENERIC;
        pgmap->range = (struct range) {
index 4162d0e7e00d7ed5334d20c2ad2ac226fe27837f..cc7450f2b2a93eda90da1602725b18f4e44b8028 100644 (file)
@@ -70,7 +70,7 @@ static int __xen_pcibk_add_pci_dev(struct xen_pcibk_device *pdev,
                                   struct pci_dev *dev, int devid,
                                   publish_pci_dev_cb publish_cb)
 {
-       int err = 0, slot, func = -1;
+       int err = 0, slot, func = PCI_FUNC(dev->devfn);
        struct pci_dev_entry *t, *dev_entry;
        struct vpci_dev_data *vpci_dev = pdev->pci_dev_data;
 
@@ -95,22 +95,25 @@ static int __xen_pcibk_add_pci_dev(struct xen_pcibk_device *pdev,
 
        /*
         * Keep multi-function devices together on the virtual PCI bus, except
-        * virtual functions.
+        * that we want to keep virtual functions at func 0 on their own. They
+        * aren't multi-function devices and hence their presence at func 0
+        * may cause guests to not scan the other functions.
         */
-       if (!dev->is_virtfn) {
+       if (!dev->is_virtfn || func) {
                for (slot = 0; slot < PCI_SLOT_MAX; slot++) {
                        if (list_empty(&vpci_dev->dev_list[slot]))
                                continue;
 
                        t = list_entry(list_first(&vpci_dev->dev_list[slot]),
                                       struct pci_dev_entry, list);
+                       if (t->dev->is_virtfn && !PCI_FUNC(t->dev->devfn))
+                               continue;
 
                        if (match_slot(dev, t->dev)) {
                                dev_info(&dev->dev, "vpci: assign to virtual slot %d func %d\n",
-                                        slot, PCI_FUNC(dev->devfn));
+                                        slot, func);
                                list_add_tail(&dev_entry->list,
                                              &vpci_dev->dev_list[slot]);
-                               func = PCI_FUNC(dev->devfn);
                                goto unlock;
                        }
                }
@@ -123,7 +126,6 @@ static int __xen_pcibk_add_pci_dev(struct xen_pcibk_device *pdev,
                                 slot);
                        list_add_tail(&dev_entry->list,
                                      &vpci_dev->dev_list[slot]);
-                       func = dev->is_virtfn ? 0 : PCI_FUNC(dev->devfn);
                        goto unlock;
                }
        }
index 5188f02e75fb3202d27f9f57dd42cb5aa0232627..c09c7ebd6968d03cf115ccf7af9a4a8d477cd8a9 100644 (file)
@@ -359,7 +359,8 @@ out:
        return err;
 }
 
-static int xen_pcibk_reconfigure(struct xen_pcibk_device *pdev)
+static int xen_pcibk_reconfigure(struct xen_pcibk_device *pdev,
+                                enum xenbus_state state)
 {
        int err = 0;
        int num_devs;
@@ -373,9 +374,7 @@ static int xen_pcibk_reconfigure(struct xen_pcibk_device *pdev)
        dev_dbg(&pdev->xdev->dev, "Reconfiguring device ...\n");
 
        mutex_lock(&pdev->dev_lock);
-       /* Make sure we only reconfigure once */
-       if (xenbus_read_driver_state(pdev->xdev->nodename) !=
-           XenbusStateReconfiguring)
+       if (xenbus_read_driver_state(pdev->xdev->nodename) != state)
                goto out;
 
        err = xenbus_scanf(XBT_NIL, pdev->xdev->nodename, "num_devs", "%d",
@@ -500,6 +499,10 @@ static int xen_pcibk_reconfigure(struct xen_pcibk_device *pdev)
                }
        }
 
+       if (state != XenbusStateReconfiguring)
+               /* Make sure we only reconfigure once. */
+               goto out;
+
        err = xenbus_switch_state(pdev->xdev, XenbusStateReconfigured);
        if (err) {
                xenbus_dev_fatal(pdev->xdev, err,
@@ -525,7 +528,7 @@ static void xen_pcibk_frontend_changed(struct xenbus_device *xdev,
                break;
 
        case XenbusStateReconfiguring:
-               xen_pcibk_reconfigure(pdev);
+               xen_pcibk_reconfigure(pdev, XenbusStateReconfiguring);
                break;
 
        case XenbusStateConnected:
@@ -664,6 +667,15 @@ static void xen_pcibk_be_watch(struct xenbus_watch *watch,
                xen_pcibk_setup_backend(pdev);
                break;
 
+       case XenbusStateInitialised:
+               /*
+                * We typically move to Initialised when the first device was
+                * added. Hence subsequent devices getting added may need
+                * reconfiguring.
+                */
+               xen_pcibk_reconfigure(pdev, XenbusStateInitialised);
+               break;
+
        default:
                break;
        }
index a4e9e6e07e9397321f61567a047f3fafb574fa62..d3c6bb22c5f4890081c2e5bf06af1d34fc63cc86 100644 (file)
@@ -322,6 +322,8 @@ static int afs_deliver_cb_callback(struct afs_call *call)
                        return ret;
 
                call->unmarshall++;
+               fallthrough;
+
        case 5:
                break;
        }
@@ -418,6 +420,7 @@ static int afs_deliver_cb_init_call_back_state3(struct afs_call *call)
                        r->node[loop] = ntohl(b[loop + 5]);
 
                call->unmarshall++;
+               fallthrough;
 
        case 2:
                break;
@@ -530,6 +533,7 @@ static int afs_deliver_cb_probe_uuid(struct afs_call *call)
                        r->node[loop] = ntohl(b[loop + 5]);
 
                call->unmarshall++;
+               fallthrough;
 
        case 2:
                break;
@@ -663,6 +667,7 @@ static int afs_deliver_yfs_cb_callback(struct afs_call *call)
 
                afs_extract_to_tmp(call);
                call->unmarshall++;
+               fallthrough;
 
        case 3:
                break;
index 9fbe5a5ec9bd493e35394c84e4c4ce6f8fd1b541..78719f2f567e9fbce41a927f012f0dffa45329a4 100644 (file)
@@ -1919,7 +1919,9 @@ static void afs_rename_edit_dir(struct afs_operation *op)
        new_inode = d_inode(new_dentry);
        if (new_inode) {
                spin_lock(&new_inode->i_lock);
-               if (new_inode->i_nlink > 0)
+               if (S_ISDIR(new_inode->i_mode))
+                       clear_nlink(new_inode);
+               else if (new_inode->i_nlink > 0)
                        drop_nlink(new_inode);
                spin_unlock(&new_inode->i_lock);
        }
index 2f695a260442553dffd102e4cef79a25c91b0c90..dd3f45d906d23cf9ea8926b876a51099fd525cae 100644 (file)
@@ -388,6 +388,7 @@ static int afs_deliver_fs_fetch_data(struct afs_call *call)
                req->file_size = vp->scb.status.size;
 
                call->unmarshall++;
+               fallthrough;
 
        case 5:
                break;
@@ -1408,6 +1409,7 @@ static int afs_deliver_fs_get_volume_status(struct afs_call *call)
                _debug("motd '%s'", p);
 
                call->unmarshall++;
+               fallthrough;
 
        case 8:
                break;
@@ -1845,6 +1847,7 @@ static int afs_deliver_fs_inline_bulk_status(struct afs_call *call)
                xdr_decode_AFSVolSync(&bp, &op->volsync);
 
                call->unmarshall++;
+               fallthrough;
 
        case 6:
                break;
@@ -1979,6 +1982,7 @@ static int afs_deliver_fs_fetch_acl(struct afs_call *call)
                xdr_decode_AFSVolSync(&bp, &op->volsync);
 
                call->unmarshall++;
+               fallthrough;
 
        case 4:
                break;
index dc9327332f069067fe1204cab325916054e060bf..00fca3c66ba616331d748f3280ae4564206efa98 100644 (file)
@@ -593,6 +593,7 @@ static int afs_deliver_yfsvl_get_endpoints(struct afs_call *call)
                if (ret < 0)
                        return ret;
                call->unmarshall = 6;
+               fallthrough;
 
        case 6:
                break;
index 3edb6204b937046db4bcf314beee115282c2a87e..a523bb86915d0d8dccc363561a276eeff7393e7c 100644 (file)
@@ -730,7 +730,7 @@ static int afs_writepages_region(struct address_space *mapping,
                        return ret;
                }
 
-               start += ret * PAGE_SIZE;
+               start += ret;
 
                cond_resched();
        } while (wbc->nr_to_write > 0);
index b8abccd03e5d55efd2abf8392420fdf1856c3479..6cc4d4cfe0c286dd3268969e834b59e2e9a284d7 100644 (file)
@@ -1244,6 +1244,9 @@ int bdev_disk_changed(struct block_device *bdev, bool invalidate)
 
        lockdep_assert_held(&bdev->bd_mutex);
 
+       if (!(disk->flags & GENHD_FL_UP))
+               return -ENXIO;
+
 rescan:
        if (bdev->bd_part_count)
                return -EBUSY;
@@ -1298,6 +1301,9 @@ static int __blkdev_get(struct block_device *bdev, fmode_t mode)
        struct gendisk *disk = bdev->bd_disk;
        int ret = 0;
 
+       if (!(disk->flags & GENHD_FL_UP))
+               return -ENXIO;
+
        if (!bdev->bd_openers) {
                if (!bdev_is_partition(bdev)) {
                        ret = 0;
@@ -1332,8 +1338,7 @@ static int __blkdev_get(struct block_device *bdev, fmode_t mode)
                        whole->bd_part_count++;
                        mutex_unlock(&whole->bd_mutex);
 
-                       if (!(disk->flags & GENHD_FL_UP) ||
-                           !bdev_nr_sectors(bdev)) {
+                       if (!bdev_nr_sectors(bdev)) {
                                __blkdev_put(whole, mode, 1);
                                bdput(whole);
                                return -ENXIO;
@@ -1364,16 +1369,12 @@ struct block_device *blkdev_get_no_open(dev_t dev)
        struct block_device *bdev;
        struct gendisk *disk;
 
-       down_read(&bdev_lookup_sem);
        bdev = bdget(dev);
        if (!bdev) {
-               up_read(&bdev_lookup_sem);
                blk_request_module(dev);
-               down_read(&bdev_lookup_sem);
-
                bdev = bdget(dev);
                if (!bdev)
-                       goto unlock;
+                       return NULL;
        }
 
        disk = bdev->bd_disk;
@@ -1383,14 +1384,11 @@ struct block_device *blkdev_get_no_open(dev_t dev)
                goto put_disk;
        if (!try_module_get(bdev->bd_disk->fops->owner))
                goto put_disk;
-       up_read(&bdev_lookup_sem);
        return bdev;
 put_disk:
        put_disk(disk);
 bdput:
        bdput(bdev);
-unlock:
-       up_read(&bdev_lookup_sem);
        return NULL;
 }
 
index 2bea01d23a5b57352cb659bbd5a20aa06017fc89..1346d698463a6dcd5021a94bb93ce005ecaae555 100644 (file)
@@ -28,6 +28,7 @@
 #include "compression.h"
 #include "extent_io.h"
 #include "extent_map.h"
+#include "zoned.h"
 
 static const char* const btrfs_compress_types[] = { "", "zlib", "lzo", "zstd" };
 
@@ -349,6 +350,7 @@ static void end_compressed_bio_write(struct bio *bio)
         */
        inode = cb->inode;
        cb->compressed_pages[0]->mapping = cb->inode->i_mapping;
+       btrfs_record_physical_zoned(inode, cb->start, bio);
        btrfs_writepage_endio_finish_ordered(cb->compressed_pages[0],
                        cb->start, cb->start + cb->len - 1,
                        bio->bi_status == BLK_STS_OK);
@@ -401,6 +403,8 @@ blk_status_t btrfs_submit_compressed_write(struct btrfs_inode *inode, u64 start,
        u64 first_byte = disk_start;
        blk_status_t ret;
        int skip_sum = inode->flags & BTRFS_INODE_NODATASUM;
+       const bool use_append = btrfs_use_zone_append(inode, disk_start);
+       const unsigned int bio_op = use_append ? REQ_OP_ZONE_APPEND : REQ_OP_WRITE;
 
        WARN_ON(!PAGE_ALIGNED(start));
        cb = kmalloc(compressed_bio_size(fs_info, compressed_len), GFP_NOFS);
@@ -418,10 +422,31 @@ blk_status_t btrfs_submit_compressed_write(struct btrfs_inode *inode, u64 start,
        cb->nr_pages = nr_pages;
 
        bio = btrfs_bio_alloc(first_byte);
-       bio->bi_opf = REQ_OP_WRITE | write_flags;
+       bio->bi_opf = bio_op | write_flags;
        bio->bi_private = cb;
        bio->bi_end_io = end_compressed_bio_write;
 
+       if (use_append) {
+               struct extent_map *em;
+               struct map_lookup *map;
+               struct block_device *bdev;
+
+               em = btrfs_get_chunk_map(fs_info, disk_start, PAGE_SIZE);
+               if (IS_ERR(em)) {
+                       kfree(cb);
+                       bio_put(bio);
+                       return BLK_STS_NOTSUPP;
+               }
+
+               map = em->map_lookup;
+               /* We only support single profile for now */
+               ASSERT(map->num_stripes == 1);
+               bdev = map->stripes[0].dev->bdev;
+
+               bio_set_dev(bio, bdev);
+               free_extent_map(em);
+       }
+
        if (blkcg_css) {
                bio->bi_opf |= REQ_CGROUP_PUNT;
                kthread_associate_blkcg(blkcg_css);
@@ -432,6 +457,7 @@ blk_status_t btrfs_submit_compressed_write(struct btrfs_inode *inode, u64 start,
        bytes_left = compressed_len;
        for (pg_index = 0; pg_index < cb->nr_pages; pg_index++) {
                int submit = 0;
+               int len = 0;
 
                page = compressed_pages[pg_index];
                page->mapping = inode->vfs_inode.i_mapping;
@@ -439,9 +465,20 @@ blk_status_t btrfs_submit_compressed_write(struct btrfs_inode *inode, u64 start,
                        submit = btrfs_bio_fits_in_stripe(page, PAGE_SIZE, bio,
                                                          0);
 
+               /*
+                * Page can only be added to bio if the current bio fits in
+                * stripe.
+                */
+               if (!submit) {
+                       if (pg_index == 0 && use_append)
+                               len = bio_add_zone_append_page(bio, page,
+                                                              PAGE_SIZE, 0);
+                       else
+                               len = bio_add_page(bio, page, PAGE_SIZE, 0);
+               }
+
                page->mapping = NULL;
-               if (submit || bio_add_page(bio, page, PAGE_SIZE, 0) <
-                   PAGE_SIZE) {
+               if (submit || len < PAGE_SIZE) {
                        /*
                         * inc the count before we submit the bio so
                         * we know the end IO handler won't happen before
@@ -465,11 +502,15 @@ blk_status_t btrfs_submit_compressed_write(struct btrfs_inode *inode, u64 start,
                        }
 
                        bio = btrfs_bio_alloc(first_byte);
-                       bio->bi_opf = REQ_OP_WRITE | write_flags;
+                       bio->bi_opf = bio_op | write_flags;
                        bio->bi_private = cb;
                        bio->bi_end_io = end_compressed_bio_write;
                        if (blkcg_css)
                                bio->bi_opf |= REQ_CGROUP_PUNT;
+                       /*
+                        * Use bio_add_page() to ensure the bio has at least one
+                        * page.
+                        */
                        bio_add_page(bio, page, PAGE_SIZE, 0);
                }
                if (bytes_left < PAGE_SIZE) {
index f83fd3cbf2439b9856285725bcf7d9b069dfca37..9fb76829a281c8d0d38936dcd0b225af54d5dfc4 100644 (file)
@@ -3127,7 +3127,7 @@ int btrfs_truncate_inode_items(struct btrfs_trans_handle *trans,
                               struct btrfs_inode *inode, u64 new_size,
                               u32 min_type);
 
-int btrfs_start_delalloc_snapshot(struct btrfs_root *root);
+int btrfs_start_delalloc_snapshot(struct btrfs_root *root, bool in_reclaim_context);
 int btrfs_start_delalloc_roots(struct btrfs_fs_info *fs_info, long nr,
                               bool in_reclaim_context);
 int btrfs_set_extent_delalloc(struct btrfs_inode *inode, u64 start, u64 end,
index c9a3036c23bfcd10e040fb12e806e1f8fde4575f..8d386a5587ee901eaf41899acdb84b5e427ab8c4 100644 (file)
@@ -2648,6 +2648,24 @@ static int validate_super(struct btrfs_fs_info *fs_info,
                ret = -EINVAL;
        }
 
+       if (memcmp(fs_info->fs_devices->fsid, fs_info->super_copy->fsid,
+                  BTRFS_FSID_SIZE)) {
+               btrfs_err(fs_info,
+               "superblock fsid doesn't match fsid of fs_devices: %pU != %pU",
+                       fs_info->super_copy->fsid, fs_info->fs_devices->fsid);
+               ret = -EINVAL;
+       }
+
+       if (btrfs_fs_incompat(fs_info, METADATA_UUID) &&
+           memcmp(fs_info->fs_devices->metadata_uuid,
+                  fs_info->super_copy->metadata_uuid, BTRFS_FSID_SIZE)) {
+               btrfs_err(fs_info,
+"superblock metadata_uuid doesn't match metadata uuid of fs_devices: %pU != %pU",
+                       fs_info->super_copy->metadata_uuid,
+                       fs_info->fs_devices->metadata_uuid);
+               ret = -EINVAL;
+       }
+
        if (memcmp(fs_info->fs_devices->metadata_uuid, sb->dev_item.fsid,
                   BTRFS_FSID_SIZE) != 0) {
                btrfs_err(fs_info,
@@ -3279,14 +3297,6 @@ int __cold open_ctree(struct super_block *sb, struct btrfs_fs_devices *fs_device
 
        disk_super = fs_info->super_copy;
 
-       ASSERT(!memcmp(fs_info->fs_devices->fsid, fs_info->super_copy->fsid,
-                      BTRFS_FSID_SIZE));
-
-       if (btrfs_fs_incompat(fs_info, METADATA_UUID)) {
-               ASSERT(!memcmp(fs_info->fs_devices->metadata_uuid,
-                               fs_info->super_copy->metadata_uuid,
-                               BTRFS_FSID_SIZE));
-       }
 
        features = btrfs_super_flags(disk_super);
        if (features & BTRFS_SUPER_FLAG_CHANGING_FSID_V2) {
index 7a28314189b4a3c0e05aeb14990adcbc33c3d686..3d5c35e4cb76e095439904c37363025dbbe2d584 100644 (file)
@@ -1340,12 +1340,16 @@ int btrfs_discard_extent(struct btrfs_fs_info *fs_info, u64 bytenr,
                stripe = bbio->stripes;
                for (i = 0; i < bbio->num_stripes; i++, stripe++) {
                        u64 bytes;
+                       struct btrfs_device *device = stripe->dev;
 
-                       if (!stripe->dev->bdev) {
+                       if (!device->bdev) {
                                ASSERT(btrfs_test_opt(fs_info, DEGRADED));
                                continue;
                        }
 
+                       if (!test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state))
+                               continue;
+
                        ret = do_discard_extent(stripe, &bytes);
                        if (!ret) {
                                discarded_bytes += bytes;
@@ -1864,7 +1868,7 @@ static int cleanup_ref_head(struct btrfs_trans_handle *trans,
        trace_run_delayed_ref_head(fs_info, head, 0);
        btrfs_delayed_ref_unlock(head);
        btrfs_put_delayed_ref_head(head);
-       return 0;
+       return ret;
 }
 
 static struct btrfs_delayed_ref_head *btrfs_obtain_ref_head(
index 074a78a202b8363b5aee1a74d33cf200b0b164e4..dee2dafbc872f379aa2fc861a822104aea7768f0 100644 (file)
@@ -3753,7 +3753,7 @@ static noinline_for_stack int __extent_writepage_io(struct btrfs_inode *inode,
                /* Note that em_end from extent_map_end() is exclusive */
                iosize = min(em_end, end + 1) - cur;
 
-               if (btrfs_use_zone_append(inode, em))
+               if (btrfs_use_zone_append(inode, em->block_start))
                        opf = REQ_OP_ZONE_APPEND;
 
                free_extent_map(em);
@@ -5196,7 +5196,7 @@ int extent_fiemap(struct btrfs_inode *inode, struct fiemap_extent_info *fieinfo,
                  u64 start, u64 len)
 {
        int ret = 0;
-       u64 off = start;
+       u64 off;
        u64 max = start + len;
        u32 flags = 0;
        u32 found_type;
@@ -5231,6 +5231,11 @@ int extent_fiemap(struct btrfs_inode *inode, struct fiemap_extent_info *fieinfo,
                goto out_free_ulist;
        }
 
+       /*
+        * We can't initialize that to 'start' as this could miss extents due
+        * to extent item merging
+        */
+       off = 0;
        start = round_down(start, btrfs_inode_sectorsize(inode));
        len = round_up(max, btrfs_inode_sectorsize(inode)) - start;
 
index 294602f139efa624dec02be6d06448ce1865ece9..441cee7fbb6290a15b2bbd39efc5dd4cb53633ab 100644 (file)
@@ -788,7 +788,7 @@ int btrfs_del_csums(struct btrfs_trans_handle *trans,
        u64 end_byte = bytenr + len;
        u64 csum_end;
        struct extent_buffer *leaf;
-       int ret;
+       int ret = 0;
        const u32 csum_size = fs_info->csum_size;
        u32 blocksize_bits = fs_info->sectorsize_bits;
 
@@ -806,6 +806,7 @@ int btrfs_del_csums(struct btrfs_trans_handle *trans,
 
                ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
                if (ret > 0) {
+                       ret = 0;
                        if (path->slots[0] == 0)
                                break;
                        path->slots[0]--;
@@ -862,7 +863,7 @@ int btrfs_del_csums(struct btrfs_trans_handle *trans,
                        ret = btrfs_del_items(trans, root, path,
                                              path->slots[0], del_nr);
                        if (ret)
-                               goto out;
+                               break;
                        if (key.offset == bytenr)
                                break;
                } else if (key.offset < bytenr && csum_end > end_byte) {
@@ -906,8 +907,9 @@ int btrfs_del_csums(struct btrfs_trans_handle *trans,
                        ret = btrfs_split_item(trans, root, path, &key, offset);
                        if (ret && ret != -EAGAIN) {
                                btrfs_abort_transaction(trans, ret);
-                               goto out;
+                               break;
                        }
+                       ret = 0;
 
                        key.offset = end_byte - 1;
                } else {
@@ -917,12 +919,41 @@ int btrfs_del_csums(struct btrfs_trans_handle *trans,
                }
                btrfs_release_path(path);
        }
-       ret = 0;
-out:
        btrfs_free_path(path);
        return ret;
 }
 
+static int find_next_csum_offset(struct btrfs_root *root,
+                                struct btrfs_path *path,
+                                u64 *next_offset)
+{
+       const u32 nritems = btrfs_header_nritems(path->nodes[0]);
+       struct btrfs_key found_key;
+       int slot = path->slots[0] + 1;
+       int ret;
+
+       if (nritems == 0 || slot >= nritems) {
+               ret = btrfs_next_leaf(root, path);
+               if (ret < 0) {
+                       return ret;
+               } else if (ret > 0) {
+                       *next_offset = (u64)-1;
+                       return 0;
+               }
+               slot = path->slots[0];
+       }
+
+       btrfs_item_key_to_cpu(path->nodes[0], &found_key, slot);
+
+       if (found_key.objectid != BTRFS_EXTENT_CSUM_OBJECTID ||
+           found_key.type != BTRFS_EXTENT_CSUM_KEY)
+               *next_offset = (u64)-1;
+       else
+               *next_offset = found_key.offset;
+
+       return 0;
+}
+
 int btrfs_csum_file_blocks(struct btrfs_trans_handle *trans,
                           struct btrfs_root *root,
                           struct btrfs_ordered_sum *sums)
@@ -938,7 +969,6 @@ int btrfs_csum_file_blocks(struct btrfs_trans_handle *trans,
        u64 total_bytes = 0;
        u64 csum_offset;
        u64 bytenr;
-       u32 nritems;
        u32 ins_size;
        int index = 0;
        int found_next;
@@ -981,26 +1011,10 @@ again:
                        goto insert;
                }
        } else {
-               int slot = path->slots[0] + 1;
-               /* we didn't find a csum item, insert one */
-               nritems = btrfs_header_nritems(path->nodes[0]);
-               if (!nritems || (path->slots[0] >= nritems - 1)) {
-                       ret = btrfs_next_leaf(root, path);
-                       if (ret < 0) {
-                               goto out;
-                       } else if (ret > 0) {
-                               found_next = 1;
-                               goto insert;
-                       }
-                       slot = path->slots[0];
-               }
-               btrfs_item_key_to_cpu(path->nodes[0], &found_key, slot);
-               if (found_key.objectid != BTRFS_EXTENT_CSUM_OBJECTID ||
-                   found_key.type != BTRFS_EXTENT_CSUM_KEY) {
-                       found_next = 1;
-                       goto insert;
-               }
-               next_offset = found_key.offset;
+               /* We didn't find a csum item, insert one. */
+               ret = find_next_csum_offset(root, path, &next_offset);
+               if (ret < 0)
+                       goto out;
                found_next = 1;
                goto insert;
        }
@@ -1056,8 +1070,48 @@ extend_csum:
                tmp = sums->len - total_bytes;
                tmp >>= fs_info->sectorsize_bits;
                WARN_ON(tmp < 1);
+               extend_nr = max_t(int, 1, tmp);
+
+               /*
+                * A log tree can already have checksum items with a subset of
+                * the checksums we are trying to log. This can happen after
+                * doing a sequence of partial writes into prealloc extents and
+                * fsyncs in between, with a full fsync logging a larger subrange
+                * of an extent for which a previous fast fsync logged a smaller
+                * subrange. And this happens in particular due to merging file
+                * extent items when we complete an ordered extent for a range
+                * covered by a prealloc extent - this is done at
+                * btrfs_mark_extent_written().
+                *
+                * So if we try to extend the previous checksum item, which has
+                * a range that ends at the start of the range we want to insert,
+                * make sure we don't extend beyond the start offset of the next
+                * checksum item. If we are at the last item in the leaf, then
+                * forget the optimization of extending and add a new checksum
+                * item - it is not worth the complexity of releasing the path,
+                * getting the first key for the next leaf, repeat the btree
+                * search, etc, because log trees are temporary anyway and it
+                * would only save a few bytes of leaf space.
+                */
+               if (root->root_key.objectid == BTRFS_TREE_LOG_OBJECTID) {
+                       if (path->slots[0] + 1 >=
+                           btrfs_header_nritems(path->nodes[0])) {
+                               ret = find_next_csum_offset(root, path, &next_offset);
+                               if (ret < 0)
+                                       goto out;
+                               found_next = 1;
+                               goto insert;
+                       }
+
+                       ret = find_next_csum_offset(root, path, &next_offset);
+                       if (ret < 0)
+                               goto out;
+
+                       tmp = (next_offset - bytenr) >> fs_info->sectorsize_bits;
+                       if (tmp <= INT_MAX)
+                               extend_nr = min_t(int, extend_nr, tmp);
+               }
 
-               extend_nr = max_t(int, 1, (int)tmp);
                diff = (csum_offset + extend_nr) * csum_size;
                diff = min(diff,
                           MAX_CSUM_ITEMS(fs_info, csum_size) * csum_size);
index 864c08d08a353452edc957e75932cb91b2c19538..55f68422061d1aac01d38a5c459f69c5d13d1fc6 100644 (file)
@@ -1094,7 +1094,7 @@ int btrfs_mark_extent_written(struct btrfs_trans_handle *trans,
        int del_nr = 0;
        int del_slot = 0;
        int recow;
-       int ret;
+       int ret = 0;
        u64 ino = btrfs_ino(inode);
 
        path = btrfs_alloc_path();
@@ -1315,7 +1315,7 @@ again:
        }
 out:
        btrfs_free_path(path);
-       return 0;
+       return ret;
 }
 
 /*
@@ -2067,6 +2067,30 @@ static int start_ordered_ops(struct inode *inode, loff_t start, loff_t end)
        return ret;
 }
 
+static inline bool skip_inode_logging(const struct btrfs_log_ctx *ctx)
+{
+       struct btrfs_inode *inode = BTRFS_I(ctx->inode);
+       struct btrfs_fs_info *fs_info = inode->root->fs_info;
+
+       if (btrfs_inode_in_log(inode, fs_info->generation) &&
+           list_empty(&ctx->ordered_extents))
+               return true;
+
+       /*
+        * If we are doing a fast fsync we can not bail out if the inode's
+        * last_trans is <= then the last committed transaction, because we only
+        * update the last_trans of the inode during ordered extent completion,
+        * and for a fast fsync we don't wait for that, we only wait for the
+        * writeback to complete.
+        */
+       if (inode->last_trans <= fs_info->last_trans_committed &&
+           (test_bit(BTRFS_INODE_NEEDS_FULL_SYNC, &inode->runtime_flags) ||
+            list_empty(&ctx->ordered_extents)))
+               return true;
+
+       return false;
+}
+
 /*
  * fsync call for both files and directories.  This logs the inode into
  * the tree log instead of forcing full commits whenever possible.
@@ -2185,17 +2209,8 @@ int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
 
        atomic_inc(&root->log_batch);
 
-       /*
-        * If we are doing a fast fsync we can not bail out if the inode's
-        * last_trans is <= then the last committed transaction, because we only
-        * update the last_trans of the inode during ordered extent completion,
-        * and for a fast fsync we don't wait for that, we only wait for the
-        * writeback to complete.
-        */
        smp_mb();
-       if (btrfs_inode_in_log(BTRFS_I(inode), fs_info->generation) ||
-           (BTRFS_I(inode)->last_trans <= fs_info->last_trans_committed &&
-            (full_sync || list_empty(&ctx.ordered_extents)))) {
+       if (skip_inode_logging(&ctx)) {
                /*
                 * We've had everything committed since the last time we were
                 * modified so clear this flag in case it was set for whatever
index e54466fc101f775a46c04dca21c559ee00986b1c..4806295116d884c510b479569c53a865c326b556 100644 (file)
@@ -3949,7 +3949,7 @@ static int cleanup_free_space_cache_v1(struct btrfs_fs_info *fs_info,
 {
        struct btrfs_block_group *block_group;
        struct rb_node *node;
-       int ret;
+       int ret = 0;
 
        btrfs_info(fs_info, "cleaning free space cache v1");
 
index 4af336008b12fed5d6b4224c8b1f9ea98e563f88..46f392943f4d0a051c80272b444d068e800f7816 100644 (file)
@@ -3000,6 +3000,18 @@ out:
        if (ret || truncated) {
                u64 unwritten_start = start;
 
+               /*
+                * If we failed to finish this ordered extent for any reason we
+                * need to make sure BTRFS_ORDERED_IOERR is set on the ordered
+                * extent, and mark the inode with the error if it wasn't
+                * already set.  Any error during writeback would have already
+                * set the mapping error, so we need to set it if we're the ones
+                * marking this ordered extent as failed.
+                */
+               if (ret && !test_and_set_bit(BTRFS_ORDERED_IOERR,
+                                            &ordered_extent->flags))
+                       mapping_set_error(ordered_extent->inode->i_mapping, -EIO);
+
                if (truncated)
                        unwritten_start += logical_len;
                clear_extent_uptodate(io_tree, unwritten_start, end, NULL);
@@ -3241,6 +3253,7 @@ void btrfs_run_delayed_iputs(struct btrfs_fs_info *fs_info)
                inode = list_first_entry(&fs_info->delayed_iputs,
                                struct btrfs_inode, delayed_iput);
                run_delayed_iput_locked(fs_info, inode);
+               cond_resched_lock(&fs_info->delayed_iput_lock);
        }
        spin_unlock(&fs_info->delayed_iput_lock);
 }
@@ -7785,7 +7798,7 @@ static int btrfs_dio_iomap_begin(struct inode *inode, loff_t start,
        iomap->bdev = fs_info->fs_devices->latest_bdev;
        iomap->length = len;
 
-       if (write && btrfs_use_zone_append(BTRFS_I(inode), em))
+       if (write && btrfs_use_zone_append(BTRFS_I(inode), em->block_start))
                iomap->flags |= IOMAP_F_ZONE_APPEND;
 
        free_extent_map(em);
@@ -9075,6 +9088,7 @@ static int btrfs_rename_exchange(struct inode *old_dir,
        int ret2;
        bool root_log_pinned = false;
        bool dest_log_pinned = false;
+       bool need_abort = false;
 
        /* we only allow rename subvolume link between subvolumes */
        if (old_ino != BTRFS_FIRST_FREE_OBJECTID && root != dest)
@@ -9134,6 +9148,7 @@ static int btrfs_rename_exchange(struct inode *old_dir,
                                             old_idx);
                if (ret)
                        goto out_fail;
+               need_abort = true;
        }
 
        /* And now for the dest. */
@@ -9149,8 +9164,11 @@ static int btrfs_rename_exchange(struct inode *old_dir,
                                             new_ino,
                                             btrfs_ino(BTRFS_I(old_dir)),
                                             new_idx);
-               if (ret)
+               if (ret) {
+                       if (need_abort)
+                               btrfs_abort_transaction(trans, ret);
                        goto out_fail;
+               }
        }
 
        /* Update inode version and ctime/mtime. */
@@ -9678,7 +9696,7 @@ out:
        return ret;
 }
 
-int btrfs_start_delalloc_snapshot(struct btrfs_root *root)
+int btrfs_start_delalloc_snapshot(struct btrfs_root *root, bool in_reclaim_context)
 {
        struct writeback_control wbc = {
                .nr_to_write = LONG_MAX,
@@ -9691,7 +9709,7 @@ int btrfs_start_delalloc_snapshot(struct btrfs_root *root)
        if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state))
                return -EROFS;
 
-       return start_delalloc_inodes(root, &wbc, true, false);
+       return start_delalloc_inodes(root, &wbc, true, in_reclaim_context);
 }
 
 int btrfs_start_delalloc_roots(struct btrfs_fs_info *fs_info, long nr,
index ee1dbabb5d3c42d35e4be395120729de5383a3d5..5dc2fd843ae37727609a9e4aa5950478ea5280c9 100644 (file)
@@ -259,6 +259,8 @@ int btrfs_fileattr_set(struct user_namespace *mnt_userns,
        if (!fa->flags_valid) {
                /* 1 item for the inode */
                trans = btrfs_start_transaction(root, 1);
+               if (IS_ERR(trans))
+                       return PTR_ERR(trans);
                goto update_flags;
        }
 
@@ -907,7 +909,7 @@ static noinline int btrfs_mksnapshot(const struct path *parent,
         */
        btrfs_drew_read_lock(&root->snapshot_lock);
 
-       ret = btrfs_start_delalloc_snapshot(root);
+       ret = btrfs_start_delalloc_snapshot(root, false);
        if (ret)
                goto out;
 
index 07b0b42187913ce4ae2263d33c6d43cbb1b461ae..6c413bb451a3dc2f59296b274100d83deeb7a38e 100644 (file)
@@ -984,7 +984,7 @@ int btrfs_split_ordered_extent(struct btrfs_ordered_extent *ordered, u64 pre,
 
        if (pre)
                ret = clone_ordered_extent(ordered, 0, pre);
-       if (post)
+       if (ret == 0 && post)
                ret = clone_ordered_extent(ordered, pre + ordered->disk_num_bytes,
                                           post);
 
index 2319c923c9e6993019f88ad9fff8686690373232..3ded812f522ccf511ecb8a84d6f23479acde8f49 100644 (file)
@@ -3545,11 +3545,15 @@ static int try_flush_qgroup(struct btrfs_root *root)
        struct btrfs_trans_handle *trans;
        int ret;
 
-       /* Can't hold an open transaction or we run the risk of deadlocking */
-       ASSERT(current->journal_info == NULL ||
-              current->journal_info == BTRFS_SEND_TRANS_STUB);
-       if (WARN_ON(current->journal_info &&
-                   current->journal_info != BTRFS_SEND_TRANS_STUB))
+       /*
+        * Can't hold an open transaction or we run the risk of deadlocking,
+        * and can't either be under the context of a send operation (where
+        * current->journal_info is set to BTRFS_SEND_TRANS_STUB), as that
+        * would result in a crash when starting a transaction and does not
+        * make sense either (send is a read-only operation).
+        */
+       ASSERT(current->journal_info == NULL);
+       if (WARN_ON(current->journal_info))
                return 0;
 
        /*
@@ -3562,7 +3566,7 @@ static int try_flush_qgroup(struct btrfs_root *root)
                return 0;
        }
 
-       ret = btrfs_start_delalloc_snapshot(root);
+       ret = btrfs_start_delalloc_snapshot(root, true);
        if (ret < 0)
                goto out;
        btrfs_wait_ordered_extents(root, U64_MAX, 0, (u64)-1);
index 3928ecc40d7b07d831e342ce61f51165c20d2f1d..9178da07cc9c91a7f5f1a294afb1e53ed539686d 100644 (file)
@@ -203,10 +203,7 @@ static int clone_copy_inline_extent(struct inode *dst,
                         * inline extent's data to the page.
                         */
                        ASSERT(key.offset > 0);
-                       ret = copy_inline_to_page(BTRFS_I(dst), new_key->offset,
-                                                 inline_data, size, datal,
-                                                 comp_type);
-                       goto out;
+                       goto copy_to_page;
                }
        } else if (i_size_read(dst) <= datal) {
                struct btrfs_file_extent_item *ei;
@@ -222,13 +219,10 @@ static int clone_copy_inline_extent(struct inode *dst,
                    BTRFS_FILE_EXTENT_INLINE)
                        goto copy_inline_extent;
 
-               ret = copy_inline_to_page(BTRFS_I(dst), new_key->offset,
-                                         inline_data, size, datal, comp_type);
-               goto out;
+               goto copy_to_page;
        }
 
 copy_inline_extent:
-       ret = 0;
        /*
         * We have no extent items, or we have an extent at offset 0 which may
         * or may not be inlined. All these cases are dealt the same way.
@@ -240,11 +234,13 @@ copy_inline_extent:
                 * clone. Deal with all these cases by copying the inline extent
                 * data into the respective page at the destination inode.
                 */
-               ret = copy_inline_to_page(BTRFS_I(dst), new_key->offset,
-                                         inline_data, size, datal, comp_type);
-               goto out;
+               goto copy_to_page;
        }
 
+       /*
+        * Release path before starting a new transaction so we don't hold locks
+        * that would confuse lockdep.
+        */
        btrfs_release_path(path);
        /*
         * If we end up here it means were copy the inline extent into a leaf
@@ -301,6 +297,21 @@ out:
                *trans_out = trans;
 
        return ret;
+
+copy_to_page:
+       /*
+        * Release our path because we don't need it anymore and also because
+        * copy_inline_to_page() needs to reserve data and metadata, which may
+        * need to flush delalloc when we are low on available space and
+        * therefore cause a deadlock if writeback of an inline extent needs to
+        * write to the same leaf or an ordered extent completion needs to write
+        * to the same leaf.
+        */
+       btrfs_release_path(path);
+
+       ret = copy_inline_to_page(BTRFS_I(dst), new_key->offset,
+                                 inline_data, size, datal, comp_type);
+       goto out;
 }
 
 /**
index 55741adf90712c4127c9e82ee6f8fe2756dafd6a..bd69db72acc5e4b73480d772579dea3d507dfc9d 100644 (file)
@@ -7170,7 +7170,7 @@ static int flush_delalloc_roots(struct send_ctx *sctx)
        int i;
 
        if (root) {
-               ret = btrfs_start_delalloc_snapshot(root);
+               ret = btrfs_start_delalloc_snapshot(root, false);
                if (ret)
                        return ret;
                btrfs_wait_ordered_extents(root, U64_MAX, 0, U64_MAX);
@@ -7178,7 +7178,7 @@ static int flush_delalloc_roots(struct send_ctx *sctx)
 
        for (i = 0; i < sctx->clone_roots_cnt; i++) {
                root = sctx->clone_roots[i].root;
-               ret = btrfs_start_delalloc_snapshot(root);
+               ret = btrfs_start_delalloc_snapshot(root, false);
                if (ret)
                        return ret;
                btrfs_wait_ordered_extents(root, U64_MAX, 0, U64_MAX);
index f67721d82e5d220e8f5e322378cd60c977e610ad..dbcf8bb2f3b9ae335989232e65b6620b2fa1d04a 100644 (file)
@@ -1574,7 +1574,9 @@ static noinline int add_inode_ref(struct btrfs_trans_handle *trans,
                        if (ret)
                                goto out;
 
-                       btrfs_update_inode(trans, root, BTRFS_I(inode));
+                       ret = btrfs_update_inode(trans, root, BTRFS_I(inode));
+                       if (ret)
+                               goto out;
                }
 
                ref_ptr = (unsigned long)(ref_ptr + ref_struct_size) + namelen;
@@ -1749,7 +1751,9 @@ static noinline int fixup_inode_link_count(struct btrfs_trans_handle *trans,
 
        if (nlink != inode->i_nlink) {
                set_nlink(inode, nlink);
-               btrfs_update_inode(trans, root, BTRFS_I(inode));
+               ret = btrfs_update_inode(trans, root, BTRFS_I(inode));
+               if (ret)
+                       goto out;
        }
        BTRFS_I(inode)->index_cnt = (u64)-1;
 
@@ -1787,6 +1791,7 @@ static noinline int fixup_inode_link_counts(struct btrfs_trans_handle *trans,
                        break;
 
                if (ret == 1) {
+                       ret = 0;
                        if (path->slots[0] == 0)
                                break;
                        path->slots[0]--;
@@ -1799,17 +1804,19 @@ static noinline int fixup_inode_link_counts(struct btrfs_trans_handle *trans,
 
                ret = btrfs_del_item(trans, root, path);
                if (ret)
-                       goto out;
+                       break;
 
                btrfs_release_path(path);
                inode = read_one_inode(root, key.offset);
-               if (!inode)
-                       return -EIO;
+               if (!inode) {
+                       ret = -EIO;
+                       break;
+               }
 
                ret = fixup_inode_link_count(trans, root, inode);
                iput(inode);
                if (ret)
-                       goto out;
+                       break;
 
                /*
                 * fixup on a directory may create new entries,
@@ -1818,8 +1825,6 @@ static noinline int fixup_inode_link_counts(struct btrfs_trans_handle *trans,
                 */
                key.offset = (u64)-1;
        }
-       ret = 0;
-out:
        btrfs_release_path(path);
        return ret;
 }
@@ -1858,8 +1863,6 @@ static noinline int link_to_fixup_dir(struct btrfs_trans_handle *trans,
                ret = btrfs_update_inode(trans, root, BTRFS_I(inode));
        } else if (ret == -EEXIST) {
                ret = 0;
-       } else {
-               BUG(); /* Logic Error */
        }
        iput(inode);
 
@@ -3299,6 +3302,22 @@ int btrfs_sync_log(struct btrfs_trans_handle *trans,
         *    begins and releases it only after writing its superblock.
         */
        mutex_lock(&fs_info->tree_log_mutex);
+
+       /*
+        * The previous transaction writeout phase could have failed, and thus
+        * marked the fs in an error state.  We must not commit here, as we
+        * could have updated our generation in the super_for_commit and
+        * writing the super here would result in transid mismatches.  If there
+        * is an error here just bail.
+        */
+       if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state)) {
+               ret = -EIO;
+               btrfs_set_log_full_commit(trans);
+               btrfs_abort_transaction(trans, ret);
+               mutex_unlock(&fs_info->tree_log_mutex);
+               goto out_wake_log_root;
+       }
+
        btrfs_set_super_log_root(fs_info->super_for_commit, log_root_start);
        btrfs_set_super_log_root_level(fs_info->super_for_commit, log_root_level);
        ret = write_all_supers(fs_info, 1);
@@ -6061,7 +6080,8 @@ static int btrfs_log_inode_parent(struct btrfs_trans_handle *trans,
         * (since logging them is pointless, a link count of 0 means they
         * will never be accessible).
         */
-       if (btrfs_inode_in_log(inode, trans->transid) ||
+       if ((btrfs_inode_in_log(inode, trans->transid) &&
+            list_empty(&ctx->ordered_extents)) ||
            inode->vfs_inode.i_nlink == 0) {
                ret = BTRFS_NO_LOG_SYNC;
                goto end_no_trans;
@@ -6462,6 +6482,24 @@ void btrfs_log_new_name(struct btrfs_trans_handle *trans,
            (!old_dir || old_dir->logged_trans < trans->transid))
                return;
 
+       /*
+        * If we are doing a rename (old_dir is not NULL) from a directory that
+        * was previously logged, make sure the next log attempt on the directory
+        * is not skipped and logs the inode again. This is because the log may
+        * not currently be authoritative for a range including the old
+        * BTRFS_DIR_ITEM_KEY and BTRFS_DIR_INDEX_KEY keys, so we want to make
+        * sure after a log replay we do not end up with both the new and old
+        * dentries around (in case the inode is a directory we would have a
+        * directory with two hard links and 2 inode references for different
+        * parents). The next log attempt of old_dir will happen at
+        * btrfs_log_all_parents(), called through btrfs_log_inode_parent()
+        * below, because we have previously set inode->last_unlink_trans to the
+        * current transaction ID, either here or at btrfs_record_unlink_dir() in
+        * case inode is a directory.
+        */
+       if (old_dir)
+               old_dir->logged_trans = 0;
+
        btrfs_init_log_ctx(&ctx, &inode->vfs_inode);
        ctx.logging_new_name = true;
        /*
index 9a1ead0c4a3113b24b0a7a658cf20cd84778e7af..47d27059d06411a78d8cc1d2fff1675ee7656a0d 100644 (file)
@@ -1459,7 +1459,7 @@ static bool dev_extent_hole_check_zoned(struct btrfs_device *device,
                /* Given hole range was invalid (outside of device) */
                if (ret == -ERANGE) {
                        *hole_start += *hole_size;
-                       *hole_size = false;
+                       *hole_size = 0;
                        return true;
                }
 
index 70b23a0d03b1043df7b81c7fcc0a0d1343ef4ec7..f1f3b10d1dbbe0bf91b6cb31fc8b99937b017d51 100644 (file)
@@ -150,6 +150,18 @@ static inline u32 sb_zone_number(int shift, int mirror)
        return (u32)zone;
 }
 
+static inline sector_t zone_start_sector(u32 zone_number,
+                                        struct block_device *bdev)
+{
+       return (sector_t)zone_number << ilog2(bdev_zone_sectors(bdev));
+}
+
+static inline u64 zone_start_physical(u32 zone_number,
+                                     struct btrfs_zoned_device_info *zone_info)
+{
+       return (u64)zone_number << zone_info->zone_size_shift;
+}
+
 /*
  * Emulate blkdev_report_zones() for a non-zoned device. It slices up the block
  * device into static sized chunks and fake a conventional zone on each of
@@ -405,8 +417,8 @@ int btrfs_get_dev_zone_info(struct btrfs_device *device)
                if (sb_zone + 1 >= zone_info->nr_zones)
                        continue;
 
-               sector = sb_zone << (zone_info->zone_size_shift - SECTOR_SHIFT);
-               ret = btrfs_get_dev_zones(device, sector << SECTOR_SHIFT,
+               ret = btrfs_get_dev_zones(device,
+                                         zone_start_physical(sb_zone, zone_info),
                                          &zone_info->sb_zones[sb_pos],
                                          &nr_zones);
                if (ret)
@@ -721,7 +733,7 @@ int btrfs_sb_log_location_bdev(struct block_device *bdev, int mirror, int rw,
        if (sb_zone + 1 >= nr_zones)
                return -ENOENT;
 
-       ret = blkdev_report_zones(bdev, sb_zone << zone_sectors_shift,
+       ret = blkdev_report_zones(bdev, zone_start_sector(sb_zone, bdev),
                                  BTRFS_NR_SB_LOG_ZONES, copy_zone_info_cb,
                                  zones);
        if (ret < 0)
@@ -826,7 +838,7 @@ int btrfs_reset_sb_log_zones(struct block_device *bdev, int mirror)
                return -ENOENT;
 
        return blkdev_zone_mgmt(bdev, REQ_OP_ZONE_RESET,
-                               sb_zone << zone_sectors_shift,
+                               zone_start_sector(sb_zone, bdev),
                                zone_sectors * BTRFS_NR_SB_LOG_ZONES, GFP_NOFS);
 }
 
@@ -878,7 +890,8 @@ u64 btrfs_find_allocatable_zones(struct btrfs_device *device, u64 hole_start,
                        if (!(end <= sb_zone ||
                              sb_zone + BTRFS_NR_SB_LOG_ZONES <= begin)) {
                                have_sb = true;
-                               pos = ((u64)sb_zone + BTRFS_NR_SB_LOG_ZONES) << shift;
+                               pos = zone_start_physical(
+                                       sb_zone + BTRFS_NR_SB_LOG_ZONES, zinfo);
                                break;
                        }
 
@@ -1126,6 +1139,11 @@ int btrfs_load_block_group_zone_info(struct btrfs_block_group *cache, bool new)
                        goto out;
                }
 
+               if (zone.type == BLK_ZONE_TYPE_CONVENTIONAL) {
+                       ret = -EIO;
+                       goto out;
+               }
+
                switch (zone.cond) {
                case BLK_ZONE_COND_OFFLINE:
                case BLK_ZONE_COND_READONLY:
@@ -1273,7 +1291,7 @@ void btrfs_free_redirty_list(struct btrfs_transaction *trans)
        spin_unlock(&trans->releasing_ebs_lock);
 }
 
-bool btrfs_use_zone_append(struct btrfs_inode *inode, struct extent_map *em)
+bool btrfs_use_zone_append(struct btrfs_inode *inode, u64 start)
 {
        struct btrfs_fs_info *fs_info = inode->root->fs_info;
        struct btrfs_block_group *cache;
@@ -1288,7 +1306,7 @@ bool btrfs_use_zone_append(struct btrfs_inode *inode, struct extent_map *em)
        if (!is_data_inode(&inode->vfs_inode))
                return false;
 
-       cache = btrfs_lookup_block_group(fs_info, em->block_start);
+       cache = btrfs_lookup_block_group(fs_info, start);
        ASSERT(cache);
        if (!cache)
                return false;
index 5e41a74a9cb24d22c30f92f2b21e1d87467dceee..e55d32595c2c03705441404acc324f03052e8dfe 100644 (file)
@@ -53,7 +53,7 @@ void btrfs_calc_zone_unusable(struct btrfs_block_group *cache);
 void btrfs_redirty_list_add(struct btrfs_transaction *trans,
                            struct extent_buffer *eb);
 void btrfs_free_redirty_list(struct btrfs_transaction *trans);
-bool btrfs_use_zone_append(struct btrfs_inode *inode, struct extent_map *em);
+bool btrfs_use_zone_append(struct btrfs_inode *inode, u64 start);
 void btrfs_record_physical_zoned(struct inode *inode, u64 file_offset,
                                 struct bio *bio);
 void btrfs_rewrite_logical_zoned(struct btrfs_ordered_extent *ordered);
@@ -152,8 +152,7 @@ static inline void btrfs_redirty_list_add(struct btrfs_transaction *trans,
                                          struct extent_buffer *eb) { }
 static inline void btrfs_free_redirty_list(struct btrfs_transaction *trans) { }
 
-static inline bool btrfs_use_zone_append(struct btrfs_inode *inode,
-                                        struct extent_map *em)
+static inline bool btrfs_use_zone_append(struct btrfs_inode *inode, u64 start)
 {
        return false;
 }
index 4a97fe12006b7f9e78aff30c38cdbd77cae809da..37fc7d6ac457adf3c356c8a1061a49f500081635 100644 (file)
@@ -72,15 +72,28 @@ struct smb3_key_debug_info {
 } __packed;
 
 /*
- * Dump full key (32 byte encrypt/decrypt keys instead of 16 bytes)
- * is needed if GCM256 (stronger encryption) negotiated
+ * Dump variable-sized keys
  */
 struct smb3_full_key_debug_info {
-       __u64   Suid;
+       /* INPUT: size of userspace buffer */
+       __u32   in_size;
+
+       /*
+        * INPUT: 0 for current user, otherwise session to dump
+        * OUTPUT: session id that was dumped
+        */
+       __u64   session_id;
        __u16   cipher_type;
-       __u8    auth_key[16]; /* SMB2_NTLMV2_SESSKEY_SIZE */
-       __u8    smb3encryptionkey[32]; /* SMB3_ENC_DEC_KEY_SIZE */
-       __u8    smb3decryptionkey[32]; /* SMB3_ENC_DEC_KEY_SIZE */
+       __u8    session_key_length;
+       __u8    server_in_key_length;
+       __u8    server_out_key_length;
+       __u8    data[];
+       /*
+        * return this struct with the keys appended at the end:
+        * __u8 session_key[session_key_length];
+        * __u8 server_in_key[server_in_key_length];
+        * __u8 server_out_key[server_out_key_length];
+        */
 } __packed;
 
 struct smb3_notify {
index d7ea9c5fe0f84948ce6fcdcf1fbb6ec0806fb614..2ffcb29d5c8f40440fe3a40dbd95fb0ecad5f028 100644 (file)
@@ -133,7 +133,7 @@ struct workqueue_struct     *cifsiod_wq;
 struct workqueue_struct        *decrypt_wq;
 struct workqueue_struct        *fileinfo_put_wq;
 struct workqueue_struct        *cifsoplockd_wq;
-struct workqueue_struct *deferredclose_wq;
+struct workqueue_struct        *deferredclose_wq;
 __u32 cifs_lock_secret;
 
 /*
index d88b4b523dcc4c74d5f9a49ecb8a504f2399ae3e..8488d70244620b40e5c37eb73ab60de99f061a24 100644 (file)
@@ -1257,8 +1257,7 @@ struct cifsFileInfo {
        struct work_struct oplock_break; /* work for oplock breaks */
        struct work_struct put; /* work for the final part of _put */
        struct delayed_work deferred;
-       bool oplock_break_received; /* Flag to indicate oplock break */
-       bool deferred_scheduled;
+       bool deferred_close_scheduled; /* Flag to indicate close is scheduled */
 };
 
 struct cifs_io_parms {
@@ -1418,6 +1417,7 @@ struct cifsInodeInfo {
        struct inode vfs_inode;
        struct list_head deferred_closes; /* list of deferred closes */
        spinlock_t deferred_lock; /* protection on deferred list */
+       bool lease_granted; /* Flag to indicate whether lease or oplock is granted. */
 };
 
 static inline struct cifsInodeInfo *
index b53a87db282f928f28757b4aee53808573af11c4..554d64fe171e039e7b0a5a21cbec65f5b14300ec 100644 (file)
 #define SMB3_SIGN_KEY_SIZE (16)
 
 /*
- * Size of the smb3 encryption/decryption keys
+ * Size of the smb3 encryption/decryption key storage.
+ * This size is big enough to store any cipher key types.
  */
 #define SMB3_ENC_DEC_KEY_SIZE (32)
 
index 6caad100c3f36b78e94c387c5f3c6b6feeab9dd8..379a427f3c2f1ccedc9515b61cce91e8d81e5587 100644 (file)
@@ -323,8 +323,7 @@ cifs_new_fileinfo(struct cifs_fid *fid, struct file *file,
        cfile->dentry = dget(dentry);
        cfile->f_flags = file->f_flags;
        cfile->invalidHandle = false;
-       cfile->oplock_break_received = false;
-       cfile->deferred_scheduled = false;
+       cfile->deferred_close_scheduled = false;
        cfile->tlink = cifs_get_tlink(tlink);
        INIT_WORK(&cfile->oplock_break, cifs_oplock_break);
        INIT_WORK(&cfile->put, cifsFileInfo_put_work);
@@ -574,21 +573,18 @@ int cifs_open(struct inode *inode, struct file *file)
                        file->f_op = &cifs_file_direct_ops;
        }
 
-       spin_lock(&CIFS_I(inode)->deferred_lock);
        /* Get the cached handle as SMB2 close is deferred */
        rc = cifs_get_readable_path(tcon, full_path, &cfile);
        if (rc == 0) {
                if (file->f_flags == cfile->f_flags) {
                        file->private_data = cfile;
+                       spin_lock(&CIFS_I(inode)->deferred_lock);
                        cifs_del_deferred_close(cfile);
                        spin_unlock(&CIFS_I(inode)->deferred_lock);
                        goto out;
                } else {
-                       spin_unlock(&CIFS_I(inode)->deferred_lock);
                        _cifsFileInfo_put(cfile, true, false);
                }
-       } else {
-               spin_unlock(&CIFS_I(inode)->deferred_lock);
        }
 
        if (server->oplocks)
@@ -878,12 +874,8 @@ void smb2_deferred_work_close(struct work_struct *work)
                        struct cifsFileInfo, deferred.work);
 
        spin_lock(&CIFS_I(d_inode(cfile->dentry))->deferred_lock);
-       if (!cfile->deferred_scheduled) {
-               spin_unlock(&CIFS_I(d_inode(cfile->dentry))->deferred_lock);
-               return;
-       }
        cifs_del_deferred_close(cfile);
-       cfile->deferred_scheduled = false;
+       cfile->deferred_close_scheduled = false;
        spin_unlock(&CIFS_I(d_inode(cfile->dentry))->deferred_lock);
        _cifsFileInfo_put(cfile, true, false);
 }
@@ -900,19 +892,26 @@ int cifs_close(struct inode *inode, struct file *file)
                file->private_data = NULL;
                dclose = kmalloc(sizeof(struct cifs_deferred_close), GFP_KERNEL);
                if ((cinode->oplock == CIFS_CACHE_RHW_FLG) &&
+                   cinode->lease_granted &&
                    dclose) {
                        if (test_bit(CIFS_INO_MODIFIED_ATTR, &cinode->flags))
                                inode->i_ctime = inode->i_mtime = current_time(inode);
                        spin_lock(&cinode->deferred_lock);
                        cifs_add_deferred_close(cfile, dclose);
-                       if (cfile->deferred_scheduled) {
-                               mod_delayed_work(deferredclose_wq,
-                                               &cfile->deferred, cifs_sb->ctx->acregmax);
+                       if (cfile->deferred_close_scheduled &&
+                           delayed_work_pending(&cfile->deferred)) {
+                               /*
+                                * If there is no pending work, mod_delayed_work queues new work.
+                                * So, Increase the ref count to avoid use-after-free.
+                                */
+                               if (!mod_delayed_work(deferredclose_wq,
+                                               &cfile->deferred, cifs_sb->ctx->acregmax))
+                                       cifsFileInfo_get(cfile);
                        } else {
                                /* Deferred close for files */
                                queue_delayed_work(deferredclose_wq,
                                                &cfile->deferred, cifs_sb->ctx->acregmax);
-                               cfile->deferred_scheduled = true;
+                               cfile->deferred_close_scheduled = true;
                                spin_unlock(&cinode->deferred_lock);
                                return 0;
                        }
@@ -2020,8 +2019,7 @@ struct cifsFileInfo *find_readable_file(struct cifsInodeInfo *cifs_inode,
                if (fsuid_only && !uid_eq(open_file->uid, current_fsuid()))
                        continue;
                if (OPEN_FMODE(open_file->f_flags) & FMODE_READ) {
-                       if ((!open_file->invalidHandle) &&
-                               (!open_file->oplock_break_received)) {
+                       if ((!open_file->invalidHandle)) {
                                /* found a good file */
                                /* lock it so it will not be closed on us */
                                cifsFileInfo_get(open_file);
@@ -4874,14 +4872,20 @@ oplock_break_ack:
        }
        /*
         * When oplock break is received and there are no active
-        * file handles but cached, then set the flag oplock_break_received.
+        * file handles but cached, then schedule deferred close immediately.
         * So, new open will not use cached handle.
         */
        spin_lock(&CIFS_I(inode)->deferred_lock);
        is_deferred = cifs_is_deferred_close(cfile, &dclose);
-       if (is_deferred && cfile->deferred_scheduled) {
-               cfile->oplock_break_received = true;
-               mod_delayed_work(deferredclose_wq, &cfile->deferred, 0);
+       if (is_deferred &&
+           cfile->deferred_close_scheduled &&
+           delayed_work_pending(&cfile->deferred)) {
+               /*
+                * If there is no pending work, mod_delayed_work queues new work.
+                * So, Increase the ref count to avoid use-after-free.
+                */
+               if (!mod_delayed_work(deferredclose_wq, &cfile->deferred, 0))
+                       cifsFileInfo_get(cfile);
        }
        spin_unlock(&CIFS_I(inode)->deferred_lock);
        _cifsFileInfo_put(cfile, false /* do not wait for ourself */, false);
index 5d21cd905315d4dca90ca5001d064f2bfae05e5c..92d4ab029c917dde84e575706559545f409c97ec 100644 (file)
@@ -1145,7 +1145,7 @@ static int smb3_fs_context_parse_param(struct fs_context *fc,
                /* if iocharset not set then load_nls_default
                 * is used by caller
                 */
-                cifs_dbg(FYI, "iocharset set to %s\n", ctx->iocharset);
+               cifs_dbg(FYI, "iocharset set to %s\n", ctx->iocharset);
                break;
        case Opt_netbiosname:
                memset(ctx->source_rfc1001_name, 0x20,
index 28ec8d7c521a978c744a228ff59c676740093077..d67d281ab86320750081114ff8bebd935d39d289 100644 (file)
@@ -33,6 +33,7 @@
 #include "cifsfs.h"
 #include "cifs_ioctl.h"
 #include "smb2proto.h"
+#include "smb2glob.h"
 #include <linux/btrfs.h>
 
 static long cifs_ioctl_query_info(unsigned int xid, struct file *filep,
@@ -214,48 +215,112 @@ static int cifs_shutdown(struct super_block *sb, unsigned long arg)
        return 0;
 }
 
-static int cifs_dump_full_key(struct cifs_tcon *tcon, unsigned long arg)
+static int cifs_dump_full_key(struct cifs_tcon *tcon, struct smb3_full_key_debug_info __user *in)
 {
-       struct smb3_full_key_debug_info pfull_key_inf;
-       __u64 suid;
-       struct list_head *tmp;
+       struct smb3_full_key_debug_info out;
        struct cifs_ses *ses;
+       int rc = 0;
        bool found = false;
+       u8 __user *end;
 
-       if (!smb3_encryption_required(tcon))
-               return -EOPNOTSUPP;
+       if (!smb3_encryption_required(tcon)) {
+               rc = -EOPNOTSUPP;
+               goto out;
+       }
+
+       /* copy user input into our output buffer */
+       if (copy_from_user(&out, in, sizeof(out))) {
+               rc = -EINVAL;
+               goto out;
+       }
+
+       if (!out.session_id) {
+               /* if ses id is 0, use current user session */
+               ses = tcon->ses;
+       } else {
+               /* otherwise if a session id is given, look for it in all our sessions */
+               struct cifs_ses *ses_it = NULL;
+               struct TCP_Server_Info *server_it = NULL;
 
-       ses = tcon->ses; /* default to user id for current user */
-       if (get_user(suid, (__u64 __user *)arg))
-               suid = 0;
-       if (suid) {
-               /* search to see if there is a session with a matching SMB UID */
                spin_lock(&cifs_tcp_ses_lock);
-               list_for_each(tmp, &tcon->ses->server->smb_ses_list) {
-                       ses = list_entry(tmp, struct cifs_ses, smb_ses_list);
-                       if (ses->Suid == suid) {
-                               found = true;
-                               break;
+               list_for_each_entry(server_it, &cifs_tcp_ses_list, tcp_ses_list) {
+                       list_for_each_entry(ses_it, &server_it->smb_ses_list, smb_ses_list) {
+                               if (ses_it->Suid == out.session_id) {
+                                       ses = ses_it;
+                                       /*
+                                        * since we are using the session outside the crit
+                                        * section, we need to make sure it won't be released
+                                        * so increment its refcount
+                                        */
+                                       ses->ses_count++;
+                                       found = true;
+                                       goto search_end;
+                               }
                        }
                }
+search_end:
                spin_unlock(&cifs_tcp_ses_lock);
-               if (found == false)
-                       return -EINVAL;
-       } /* else uses default user's SMB UID (ie current user) */
-
-       pfull_key_inf.cipher_type = le16_to_cpu(ses->server->cipher_type);
-       pfull_key_inf.Suid = ses->Suid;
-       memcpy(pfull_key_inf.auth_key, ses->auth_key.response,
-              16 /* SMB2_NTLMV2_SESSKEY_SIZE */);
-       memcpy(pfull_key_inf.smb3decryptionkey, ses->smb3decryptionkey,
-              32 /* SMB3_ENC_DEC_KEY_SIZE */);
-       memcpy(pfull_key_inf.smb3encryptionkey,
-              ses->smb3encryptionkey, 32 /* SMB3_ENC_DEC_KEY_SIZE */);
-       if (copy_to_user((void __user *)arg, &pfull_key_inf,
-                        sizeof(struct smb3_full_key_debug_info)))
-               return -EFAULT;
+               if (!found) {
+                       rc = -ENOENT;
+                       goto out;
+               }
+       }
 
-       return 0;
+       switch (ses->server->cipher_type) {
+       case SMB2_ENCRYPTION_AES128_CCM:
+       case SMB2_ENCRYPTION_AES128_GCM:
+               out.session_key_length = CIFS_SESS_KEY_SIZE;
+               out.server_in_key_length = out.server_out_key_length = SMB3_GCM128_CRYPTKEY_SIZE;
+               break;
+       case SMB2_ENCRYPTION_AES256_CCM:
+       case SMB2_ENCRYPTION_AES256_GCM:
+               out.session_key_length = CIFS_SESS_KEY_SIZE;
+               out.server_in_key_length = out.server_out_key_length = SMB3_GCM256_CRYPTKEY_SIZE;
+               break;
+       default:
+               rc = -EOPNOTSUPP;
+               goto out;
+       }
+
+       /* check if user buffer is big enough to store all the keys */
+       if (out.in_size < sizeof(out) + out.session_key_length + out.server_in_key_length
+           + out.server_out_key_length) {
+               rc = -ENOBUFS;
+               goto out;
+       }
+
+       out.session_id = ses->Suid;
+       out.cipher_type = le16_to_cpu(ses->server->cipher_type);
+
+       /* overwrite user input with our output */
+       if (copy_to_user(in, &out, sizeof(out))) {
+               rc = -EINVAL;
+               goto out;
+       }
+
+       /* append all the keys at the end of the user buffer */
+       end = in->data;
+       if (copy_to_user(end, ses->auth_key.response, out.session_key_length)) {
+               rc = -EINVAL;
+               goto out;
+       }
+       end += out.session_key_length;
+
+       if (copy_to_user(end, ses->smb3encryptionkey, out.server_in_key_length)) {
+               rc = -EINVAL;
+               goto out;
+       }
+       end += out.server_in_key_length;
+
+       if (copy_to_user(end, ses->smb3decryptionkey, out.server_out_key_length)) {
+               rc = -EINVAL;
+               goto out;
+       }
+
+out:
+       if (found)
+               cifs_put_smb_ses(ses);
+       return rc;
 }
 
 long cifs_ioctl(struct file *filep, unsigned int command, unsigned long arg)
@@ -371,6 +436,10 @@ long cifs_ioctl(struct file *filep, unsigned int command, unsigned long arg)
                                rc = -EOPNOTSUPP;
                        break;
                case CIFS_DUMP_KEY:
+                       /*
+                        * Dump encryption keys. This is an old ioctl that only
+                        * handles AES-128-{CCM,GCM}.
+                        */
                        if (pSMBFile == NULL)
                                break;
                        if (!capable(CAP_SYS_ADMIN)) {
@@ -398,11 +467,10 @@ long cifs_ioctl(struct file *filep, unsigned int command, unsigned long arg)
                        else
                                rc = 0;
                        break;
-               /*
-                * Dump full key (32 bytes instead of 16 bytes) is
-                * needed if GCM256 (stronger encryption) negotiated
-                */
                case CIFS_DUMP_FULL_KEY:
+                       /*
+                        * Dump encryption keys (handles any key sizes)
+                        */
                        if (pSMBFile == NULL)
                                break;
                        if (!capable(CAP_SYS_ADMIN)) {
@@ -410,8 +478,7 @@ long cifs_ioctl(struct file *filep, unsigned int command, unsigned long arg)
                                break;
                        }
                        tcon = tlink_tcon(pSMBFile->tlink);
-                       rc = cifs_dump_full_key(tcon, arg);
-
+                       rc = cifs_dump_full_key(tcon, (void __user *)arg);
                        break;
                case CIFS_IOC_NOTIFY:
                        if (!S_ISDIR(inode->i_mode)) {
index 524dbdfb7184d964b6e6fe7d0462d3cef0f1056c..7207a63819cbf271e1c53c0a4e8c9323c13f40d1 100644 (file)
@@ -672,6 +672,11 @@ cifs_add_pending_open(struct cifs_fid *fid, struct tcon_link *tlink,
        spin_unlock(&tlink_tcon(open->tlink)->open_file_lock);
 }
 
+/*
+ * Critical section which runs after acquiring deferred_lock.
+ * As there is no reference count on cifs_deferred_close, pdclose
+ * should not be used outside deferred_lock.
+ */
 bool
 cifs_is_deferred_close(struct cifsFileInfo *cfile, struct cifs_deferred_close **pdclose)
 {
@@ -688,6 +693,9 @@ cifs_is_deferred_close(struct cifsFileInfo *cfile, struct cifs_deferred_close **
        return false;
 }
 
+/*
+ * Critical section which runs after acquiring deferred_lock.
+ */
 void
 cifs_add_deferred_close(struct cifsFileInfo *cfile, struct cifs_deferred_close *dclose)
 {
@@ -707,6 +715,9 @@ cifs_add_deferred_close(struct cifsFileInfo *cfile, struct cifs_deferred_close *
        list_add_tail(&dclose->dlist, &CIFS_I(d_inode(cfile->dentry))->deferred_closes);
 }
 
+/*
+ * Critical section which runs after acquiring deferred_lock.
+ */
 void
 cifs_del_deferred_close(struct cifsFileInfo *cfile)
 {
@@ -738,15 +749,19 @@ void
 cifs_close_all_deferred_files(struct cifs_tcon *tcon)
 {
        struct cifsFileInfo *cfile;
-       struct cifsInodeInfo *cinode;
        struct list_head *tmp;
 
        spin_lock(&tcon->open_file_lock);
        list_for_each(tmp, &tcon->openFileList) {
                cfile = list_entry(tmp, struct cifsFileInfo, tlist);
-               cinode = CIFS_I(d_inode(cfile->dentry));
-               if (delayed_work_pending(&cfile->deferred))
-                       mod_delayed_work(deferredclose_wq, &cfile->deferred, 0);
+               if (delayed_work_pending(&cfile->deferred)) {
+                       /*
+                        * If there is no pending work, mod_delayed_work queues new work.
+                        * So, Increase the ref count to avoid use-after-free.
+                        */
+                       if (!mod_delayed_work(deferredclose_wq, &cfile->deferred, 0))
+                               cifsFileInfo_get(cfile);
+               }
        }
        spin_unlock(&tcon->open_file_lock);
 }
index dd0eb665b680c86549418e9044b130ea25ec3261..21ef51d338e0ca333e21a81610ab0cec1d6567b6 100644 (file)
@@ -1861,6 +1861,8 @@ smb2_copychunk_range(const unsigned int xid,
                        cpu_to_le32(min_t(u32, len, tcon->max_bytes_chunk));
 
                /* Request server copy to target from src identified by key */
+               kfree(retbuf);
+               retbuf = NULL;
                rc = SMB2_ioctl(xid, tcon, trgtfile->fid.persistent_fid,
                        trgtfile->fid.volatile_fid, FSCTL_SRV_COPYCHUNK_WRITE,
                        true /* is_fsctl */, (char *)pcchunk,
@@ -3981,6 +3983,7 @@ smb2_set_oplock_level(struct cifsInodeInfo *cinode, __u32 oplock,
                      unsigned int epoch, bool *purge_cache)
 {
        oplock &= 0xFF;
+       cinode->lease_granted = false;
        if (oplock == SMB2_OPLOCK_LEVEL_NOCHANGE)
                return;
        if (oplock == SMB2_OPLOCK_LEVEL_BATCH) {
@@ -4007,6 +4010,7 @@ smb21_set_oplock_level(struct cifsInodeInfo *cinode, __u32 oplock,
        unsigned int new_oplock = 0;
 
        oplock &= 0xFF;
+       cinode->lease_granted = true;
        if (oplock == SMB2_OPLOCK_LEVEL_NOCHANGE)
                return;
 
index a8bf431847730298c4da9f3a311d612337d1ee28..c205f93e0a10ff595fa04afdc9005dda6214309c 100644 (file)
@@ -958,6 +958,13 @@ SMB2_negotiate(const unsigned int xid, struct cifs_ses *ses)
        /* Internal types */
        server->capabilities |= SMB2_NT_FIND | SMB2_LARGE_FILES;
 
+       /*
+        * SMB3.0 supports only 1 cipher and doesn't have a encryption neg context
+        * Set the cipher type manually.
+        */
+       if (server->dialect == SMB30_PROT_ID && (server->capabilities & SMB2_GLOBAL_CAP_ENCRYPTION))
+               server->cipher_type = SMB2_ENCRYPTION_AES128_CCM;
+
        security_blob = smb2_get_data_area_len(&blob_offset, &blob_length,
                                               (struct smb2_sync_hdr *)rsp);
        /*
@@ -3900,10 +3907,10 @@ smb2_new_read_req(void **buf, unsigned int *total_len,
                         * Related requests use info from previous read request
                         * in chain.
                         */
-                       shdr->SessionId = 0xFFFFFFFF;
+                       shdr->SessionId = 0xFFFFFFFFFFFFFFFF;
                        shdr->TreeId = 0xFFFFFFFF;
-                       req->PersistentFileId = 0xFFFFFFFF;
-                       req->VolatileFileId = 0xFFFFFFFF;
+                       req->PersistentFileId = 0xFFFFFFFFFFFFFFFF;
+                       req->VolatileFileId = 0xFFFFFFFFFFFFFFFF;
                }
        }
        if (remaining_bytes > io_parms->length)
index d6df908dccade5b68eb877e0f45e6aab5e0ed5ee..dafcb6ab050dd9218d824b4808af3267b5c0e316 100644 (file)
 
 #include <linux/tracepoint.h>
 
+/*
+ * Please use this 3-part article as a reference for writing new tracepoints:
+ * https://lwn.net/Articles/379903/
+ */
+
 /* For logging errors in read or write */
 DECLARE_EVENT_CLASS(smb3_rw_err_class,
        TP_PROTO(unsigned int xid,
@@ -529,16 +534,16 @@ DECLARE_EVENT_CLASS(smb3_exit_err_class,
        TP_ARGS(xid, func_name, rc),
        TP_STRUCT__entry(
                __field(unsigned int, xid)
-               __field(const char *, func_name)
+               __string(func_name, func_name)
                __field(int, rc)
        ),
        TP_fast_assign(
                __entry->xid = xid;
-               __entry->func_name = func_name;
+               __assign_str(func_name, func_name);
                __entry->rc = rc;
        ),
        TP_printk("\t%s: xid=%u rc=%d",
-               __entry->func_name, __entry->xid, __entry->rc)
+               __get_str(func_name), __entry->xid, __entry->rc)
 )
 
 #define DEFINE_SMB3_EXIT_ERR_EVENT(name)          \
@@ -583,14 +588,14 @@ DECLARE_EVENT_CLASS(smb3_enter_exit_class,
        TP_ARGS(xid, func_name),
        TP_STRUCT__entry(
                __field(unsigned int, xid)
-               __field(const char *, func_name)
+               __string(func_name, func_name)
        ),
        TP_fast_assign(
                __entry->xid = xid;
-               __entry->func_name = func_name;
+               __assign_str(func_name, func_name);
        ),
        TP_printk("\t%s: xid=%u",
-               __entry->func_name, __entry->xid)
+               __get_str(func_name), __entry->xid)
 )
 
 #define DEFINE_SMB3_ENTER_EXIT_EVENT(name)        \
@@ -857,16 +862,16 @@ DECLARE_EVENT_CLASS(smb3_reconnect_class,
        TP_STRUCT__entry(
                __field(__u64, currmid)
                __field(__u64, conn_id)
-               __field(char *, hostname)
+               __string(hostname, hostname)
        ),
        TP_fast_assign(
                __entry->currmid = currmid;
                __entry->conn_id = conn_id;
-               __entry->hostname = hostname;
+               __assign_str(hostname, hostname);
        ),
        TP_printk("conn_id=0x%llx server=%s current_mid=%llu",
                __entry->conn_id,
-               __entry->hostname,
+               __get_str(hostname),
                __entry->currmid)
 )
 
@@ -891,7 +896,7 @@ DECLARE_EVENT_CLASS(smb3_credit_class,
        TP_STRUCT__entry(
                __field(__u64, currmid)
                __field(__u64, conn_id)
-               __field(char *, hostname)
+               __string(hostname, hostname)
                __field(int, credits)
                __field(int, credits_to_add)
                __field(int, in_flight)
@@ -899,7 +904,7 @@ DECLARE_EVENT_CLASS(smb3_credit_class,
        TP_fast_assign(
                __entry->currmid = currmid;
                __entry->conn_id = conn_id;
-               __entry->hostname = hostname;
+               __assign_str(hostname, hostname);
                __entry->credits = credits;
                __entry->credits_to_add = credits_to_add;
                __entry->in_flight = in_flight;
@@ -907,7 +912,7 @@ DECLARE_EVENT_CLASS(smb3_credit_class,
        TP_printk("conn_id=0x%llx server=%s current_mid=%llu "
                        "credits=%d credit_change=%d in_flight=%d",
                __entry->conn_id,
-               __entry->hostname,
+               __get_str(hostname),
                __entry->currmid,
                __entry->credits,
                __entry->credits_to_add,
index 2868e3e171ae615f9c065ec242e150c6eb5e30aa..c3d8fc14b993047bfb9a628e893537eb926e0e85 100644 (file)
@@ -519,7 +519,7 @@ static bool dump_interrupted(void)
         * but then we need to teach dump_write() to restart and clear
         * TIF_SIGPENDING.
         */
-       return signal_pending(current);
+       return fatal_signal_pending(current) || freezing(current);
 }
 
 static void wait_for_dump_helpers(struct file *file)
index 69216241392f23106e823f8d9d583a9710f4d17f..62352cbcf0f40db11ea9f04a4b4e69bba22eba54 100644 (file)
--- a/fs/dax.c
+++ b/fs/dax.c
@@ -144,6 +144,16 @@ struct wait_exceptional_entry_queue {
        struct exceptional_entry_key key;
 };
 
+/**
+ * enum dax_wake_mode: waitqueue wakeup behaviour
+ * @WAKE_ALL: wake all waiters in the waitqueue
+ * @WAKE_NEXT: wake only the first waiter in the waitqueue
+ */
+enum dax_wake_mode {
+       WAKE_ALL,
+       WAKE_NEXT,
+};
+
 static wait_queue_head_t *dax_entry_waitqueue(struct xa_state *xas,
                void *entry, struct exceptional_entry_key *key)
 {
@@ -182,7 +192,8 @@ static int wake_exceptional_entry_func(wait_queue_entry_t *wait,
  * The important information it's conveying is whether the entry at
  * this index used to be a PMD entry.
  */
-static void dax_wake_entry(struct xa_state *xas, void *entry, bool wake_all)
+static void dax_wake_entry(struct xa_state *xas, void *entry,
+                          enum dax_wake_mode mode)
 {
        struct exceptional_entry_key key;
        wait_queue_head_t *wq;
@@ -196,7 +207,7 @@ static void dax_wake_entry(struct xa_state *xas, void *entry, bool wake_all)
         * must be in the waitqueue and the following check will see them.
         */
        if (waitqueue_active(wq))
-               __wake_up(wq, TASK_NORMAL, wake_all ? 0 : 1, &key);
+               __wake_up(wq, TASK_NORMAL, mode == WAKE_ALL ? 0 : 1, &key);
 }
 
 /*
@@ -264,11 +275,11 @@ static void wait_entry_unlocked(struct xa_state *xas, void *entry)
        finish_wait(wq, &ewait.wait);
 }
 
-static void put_unlocked_entry(struct xa_state *xas, void *entry)
+static void put_unlocked_entry(struct xa_state *xas, void *entry,
+                              enum dax_wake_mode mode)
 {
-       /* If we were the only waiter woken, wake the next one */
        if (entry && !dax_is_conflict(entry))
-               dax_wake_entry(xas, entry, false);
+               dax_wake_entry(xas, entry, mode);
 }
 
 /*
@@ -286,7 +297,7 @@ static void dax_unlock_entry(struct xa_state *xas, void *entry)
        old = xas_store(xas, entry);
        xas_unlock_irq(xas);
        BUG_ON(!dax_is_locked(old));
-       dax_wake_entry(xas, entry, false);
+       dax_wake_entry(xas, entry, WAKE_NEXT);
 }
 
 /*
@@ -524,7 +535,7 @@ retry:
 
                dax_disassociate_entry(entry, mapping, false);
                xas_store(xas, NULL);   /* undo the PMD join */
-               dax_wake_entry(xas, entry, true);
+               dax_wake_entry(xas, entry, WAKE_ALL);
                mapping->nrpages -= PG_PMD_NR;
                entry = NULL;
                xas_set(xas, index);
@@ -622,7 +633,7 @@ struct page *dax_layout_busy_page_range(struct address_space *mapping,
                        entry = get_unlocked_entry(&xas, 0);
                if (entry)
                        page = dax_busy_page(entry);
-               put_unlocked_entry(&xas, entry);
+               put_unlocked_entry(&xas, entry, WAKE_NEXT);
                if (page)
                        break;
                if (++scanned % XA_CHECK_SCHED)
@@ -664,7 +675,7 @@ static int __dax_invalidate_entry(struct address_space *mapping,
        mapping->nrpages -= 1UL << dax_entry_order(entry);
        ret = 1;
 out:
-       put_unlocked_entry(&xas, entry);
+       put_unlocked_entry(&xas, entry, WAKE_ALL);
        xas_unlock_irq(&xas);
        return ret;
 }
@@ -937,13 +948,13 @@ static int dax_writeback_one(struct xa_state *xas, struct dax_device *dax_dev,
        xas_lock_irq(xas);
        xas_store(xas, entry);
        xas_clear_mark(xas, PAGECACHE_TAG_DIRTY);
-       dax_wake_entry(xas, entry, false);
+       dax_wake_entry(xas, entry, WAKE_NEXT);
 
        trace_dax_writeback_one(mapping->host, index, count);
        return ret;
 
  put_unlocked:
-       put_unlocked_entry(xas, entry);
+       put_unlocked_entry(xas, entry, WAKE_NEXT);
        return ret;
 }
 
@@ -1684,7 +1695,7 @@ dax_insert_pfn_mkwrite(struct vm_fault *vmf, pfn_t pfn, unsigned int order)
        /* Did we race with someone splitting entry or so? */
        if (!entry || dax_is_conflict(entry) ||
            (order == 0 && !dax_is_pte_entry(entry))) {
-               put_unlocked_entry(&xas, entry);
+               put_unlocked_entry(&xas, entry, WAKE_NEXT);
                xas_unlock_irq(&xas);
                trace_dax_insert_pfn_mkwrite_no_entry(mapping->host, vmf,
                                                      VM_FAULT_NOPAGE);
index e813acfaa6e8a1af2319eea3d865c4384eca2188..ba7c01cd9a5d2e762fba11fa185e356e2ee37e6f 100644 (file)
@@ -893,7 +893,7 @@ ssize_t debugfs_read_file_str(struct file *file, char __user *user_buf,
 
        copy[copy_len] = '\n';
 
-       ret = simple_read_from_buffer(user_buf, count, ppos, copy, copy_len);
+       ret = simple_read_from_buffer(user_buf, count, ppos, copy, len);
        kfree(copy);
 
        return ret;
index 1d252164d97b6f989f1836dc27e69e3dc698abdf..8129a430d789d92865a5ad71c0d5d9af43a87509 100644 (file)
@@ -45,10 +45,13 @@ static unsigned int debugfs_allow __ro_after_init = DEFAULT_DEBUGFS_ALLOW_BITS;
 static int debugfs_setattr(struct user_namespace *mnt_userns,
                           struct dentry *dentry, struct iattr *ia)
 {
-       int ret = security_locked_down(LOCKDOWN_DEBUGFS);
+       int ret;
 
-       if (ret && (ia->ia_valid & (ATTR_MODE | ATTR_UID | ATTR_GID)))
-               return ret;
+       if (ia->ia_valid & (ATTR_MODE | ATTR_UID | ATTR_GID)) {
+               ret = security_locked_down(LOCKDOWN_DEBUGFS);
+               if (ret)
+                       return ret;
+       }
        return simple_setattr(&init_user_ns, dentry, ia);
 }
 
index 345f8061e3b4abf424ea4079e2d01ac620ac13d3..e3f5d7f3c8a0ad12eb04be0c89b1b63910b31d6d 100644 (file)
@@ -296,10 +296,6 @@ static int crypt_scatterlist(struct ecryptfs_crypt_stat *crypt_stat,
        struct extent_crypt_result ecr;
        int rc = 0;
 
-       if (!crypt_stat || !crypt_stat->tfm
-              || !(crypt_stat->flags & ECRYPTFS_STRUCT_INITIALIZED))
-               return -EINVAL;
-
        if (unlikely(ecryptfs_verbosity > 0)) {
                ecryptfs_printk(KERN_DEBUG, "Key size [%zd]; key:\n",
                                crypt_stat->key_size);
index e62d813756f28b07926423ed039b5202ae17f87c..efaf32596b97f0ad5100c8c7c18c990b9078a5d3 100644 (file)
@@ -450,14 +450,31 @@ static int z_erofs_get_extent_compressedlen(struct z_erofs_maprecorder *m,
        lcn = m->lcn + 1;
        if (m->compressedlcs)
                goto out;
-       if (lcn == initial_lcn)
-               goto err_bonus_cblkcnt;
 
        err = z_erofs_load_cluster_from_disk(m, lcn);
        if (err)
                return err;
 
+       /*
+        * If the 1st NONHEAD lcluster has already been handled initially w/o
+        * valid compressedlcs, which means at least it mustn't be CBLKCNT, or
+        * an internal implemenatation error is detected.
+        *
+        * The following code can also handle it properly anyway, but let's
+        * BUG_ON in the debugging mode only for developers to notice that.
+        */
+       DBG_BUGON(lcn == initial_lcn &&
+                 m->type == Z_EROFS_VLE_CLUSTER_TYPE_NONHEAD);
+
        switch (m->type) {
+       case Z_EROFS_VLE_CLUSTER_TYPE_PLAIN:
+       case Z_EROFS_VLE_CLUSTER_TYPE_HEAD:
+               /*
+                * if the 1st NONHEAD lcluster is actually PLAIN or HEAD type
+                * rather than CBLKCNT, it's a 1 lcluster-sized pcluster.
+                */
+               m->compressedlcs = 1;
+               break;
        case Z_EROFS_VLE_CLUSTER_TYPE_NONHEAD:
                if (m->delta[0] != 1)
                        goto err_bonus_cblkcnt;
index 77c84d6f1af6b2489aa196e845f3b0b1a6060e83..cbf37b2cf871e59f8bac4a372d55db7ed223b41a 100644 (file)
@@ -3206,7 +3206,10 @@ static int ext4_split_extent_at(handle_t *handle,
                ext4_ext_mark_unwritten(ex2);
 
        err = ext4_ext_insert_extent(handle, inode, ppath, &newex, flags);
-       if (err == -ENOSPC && (EXT4_EXT_MAY_ZEROOUT & split_flag)) {
+       if (err != -ENOSPC && err != -EDQUOT)
+               goto out;
+
+       if (EXT4_EXT_MAY_ZEROOUT & split_flag) {
                if (split_flag & (EXT4_EXT_DATA_VALID1|EXT4_EXT_DATA_VALID2)) {
                        if (split_flag & EXT4_EXT_DATA_VALID1) {
                                err = ext4_ext_zeroout(inode, ex2);
@@ -3232,25 +3235,22 @@ static int ext4_split_extent_at(handle_t *handle,
                                              ext4_ext_pblock(&orig_ex));
                }
 
-               if (err)
-                       goto fix_extent_len;
-               /* update the extent length and mark as initialized */
-               ex->ee_len = cpu_to_le16(ee_len);
-               ext4_ext_try_to_merge(handle, inode, path, ex);
-               err = ext4_ext_dirty(handle, inode, path + path->p_depth);
-               if (err)
-                       goto fix_extent_len;
-
-               /* update extent status tree */
-               err = ext4_zeroout_es(inode, &zero_ex);
-
-               goto out;
-       } else if (err)
-               goto fix_extent_len;
-
-out:
-       ext4_ext_show_leaf(inode, path);
-       return err;
+               if (!err) {
+                       /* update the extent length and mark as initialized */
+                       ex->ee_len = cpu_to_le16(ee_len);
+                       ext4_ext_try_to_merge(handle, inode, path, ex);
+                       err = ext4_ext_dirty(handle, inode, path + path->p_depth);
+                       if (!err)
+                               /* update extent status tree */
+                               err = ext4_zeroout_es(inode, &zero_ex);
+                       /* If we failed at this point, we don't know in which
+                        * state the extent tree exactly is so don't try to fix
+                        * length of the original extent as it may do even more
+                        * damage.
+                        */
+                       goto out;
+               }
+       }
 
 fix_extent_len:
        ex->ee_len = orig_ex.ee_len;
@@ -3260,6 +3260,9 @@ fix_extent_len:
         */
        ext4_ext_dirty(handle, inode, path + path->p_depth);
        return err;
+out:
+       ext4_ext_show_leaf(inode, path);
+       return err;
 }
 
 /*
index f98ca4f37ef61db30245447acfbca564dbc8e6e6..e8195229c2529b3d5f1b78865fa3d4a74b2274fb 100644 (file)
@@ -1288,28 +1288,29 @@ struct dentry_info_args {
 };
 
 static inline void tl_to_darg(struct dentry_info_args *darg,
-                               struct  ext4_fc_tl *tl)
+                             struct  ext4_fc_tl *tl, u8 *val)
 {
-       struct ext4_fc_dentry_info *fcd;
+       struct ext4_fc_dentry_info fcd;
 
-       fcd = (struct ext4_fc_dentry_info *)ext4_fc_tag_val(tl);
+       memcpy(&fcd, val, sizeof(fcd));
 
-       darg->parent_ino = le32_to_cpu(fcd->fc_parent_ino);
-       darg->ino = le32_to_cpu(fcd->fc_ino);
-       darg->dname = fcd->fc_dname;
-       darg->dname_len = ext4_fc_tag_len(tl) -
-                       sizeof(struct ext4_fc_dentry_info);
+       darg->parent_ino = le32_to_cpu(fcd.fc_parent_ino);
+       darg->ino = le32_to_cpu(fcd.fc_ino);
+       darg->dname = val + offsetof(struct ext4_fc_dentry_info, fc_dname);
+       darg->dname_len = le16_to_cpu(tl->fc_len) -
+               sizeof(struct ext4_fc_dentry_info);
 }
 
 /* Unlink replay function */
-static int ext4_fc_replay_unlink(struct super_block *sb, struct ext4_fc_tl *tl)
+static int ext4_fc_replay_unlink(struct super_block *sb, struct ext4_fc_tl *tl,
+                                u8 *val)
 {
        struct inode *inode, *old_parent;
        struct qstr entry;
        struct dentry_info_args darg;
        int ret = 0;
 
-       tl_to_darg(&darg, tl);
+       tl_to_darg(&darg, tl, val);
 
        trace_ext4_fc_replay(sb, EXT4_FC_TAG_UNLINK, darg.ino,
                        darg.parent_ino, darg.dname_len);
@@ -1399,13 +1400,14 @@ out:
 }
 
 /* Link replay function */
-static int ext4_fc_replay_link(struct super_block *sb, struct ext4_fc_tl *tl)
+static int ext4_fc_replay_link(struct super_block *sb, struct ext4_fc_tl *tl,
+                              u8 *val)
 {
        struct inode *inode;
        struct dentry_info_args darg;
        int ret = 0;
 
-       tl_to_darg(&darg, tl);
+       tl_to_darg(&darg, tl, val);
        trace_ext4_fc_replay(sb, EXT4_FC_TAG_LINK, darg.ino,
                        darg.parent_ino, darg.dname_len);
 
@@ -1450,9 +1452,10 @@ static int ext4_fc_record_modified_inode(struct super_block *sb, int ino)
 /*
  * Inode replay function
  */
-static int ext4_fc_replay_inode(struct super_block *sb, struct ext4_fc_tl *tl)
+static int ext4_fc_replay_inode(struct super_block *sb, struct ext4_fc_tl *tl,
+                               u8 *val)
 {
-       struct ext4_fc_inode *fc_inode;
+       struct ext4_fc_inode fc_inode;
        struct ext4_inode *raw_inode;
        struct ext4_inode *raw_fc_inode;
        struct inode *inode = NULL;
@@ -1460,9 +1463,9 @@ static int ext4_fc_replay_inode(struct super_block *sb, struct ext4_fc_tl *tl)
        int inode_len, ino, ret, tag = le16_to_cpu(tl->fc_tag);
        struct ext4_extent_header *eh;
 
-       fc_inode = (struct ext4_fc_inode *)ext4_fc_tag_val(tl);
+       memcpy(&fc_inode, val, sizeof(fc_inode));
 
-       ino = le32_to_cpu(fc_inode->fc_ino);
+       ino = le32_to_cpu(fc_inode.fc_ino);
        trace_ext4_fc_replay(sb, tag, ino, 0, 0);
 
        inode = ext4_iget(sb, ino, EXT4_IGET_NORMAL);
@@ -1474,12 +1477,13 @@ static int ext4_fc_replay_inode(struct super_block *sb, struct ext4_fc_tl *tl)
 
        ext4_fc_record_modified_inode(sb, ino);
 
-       raw_fc_inode = (struct ext4_inode *)fc_inode->fc_raw_inode;
+       raw_fc_inode = (struct ext4_inode *)
+               (val + offsetof(struct ext4_fc_inode, fc_raw_inode));
        ret = ext4_get_fc_inode_loc(sb, ino, &iloc);
        if (ret)
                goto out;
 
-       inode_len = ext4_fc_tag_len(tl) - sizeof(struct ext4_fc_inode);
+       inode_len = le16_to_cpu(tl->fc_len) - sizeof(struct ext4_fc_inode);
        raw_inode = ext4_raw_inode(&iloc);
 
        memcpy(raw_inode, raw_fc_inode, offsetof(struct ext4_inode, i_block));
@@ -1547,14 +1551,15 @@ out:
  * inode for which we are trying to create a dentry here, should already have
  * been replayed before we start here.
  */
-static int ext4_fc_replay_create(struct super_block *sb, struct ext4_fc_tl *tl)
+static int ext4_fc_replay_create(struct super_block *sb, struct ext4_fc_tl *tl,
+                                u8 *val)
 {
        int ret = 0;
        struct inode *inode = NULL;
        struct inode *dir = NULL;
        struct dentry_info_args darg;
 
-       tl_to_darg(&darg, tl);
+       tl_to_darg(&darg, tl, val);
 
        trace_ext4_fc_replay(sb, EXT4_FC_TAG_CREAT, darg.ino,
                        darg.parent_ino, darg.dname_len);
@@ -1633,9 +1638,9 @@ static int ext4_fc_record_regions(struct super_block *sb, int ino,
 
 /* Replay add range tag */
 static int ext4_fc_replay_add_range(struct super_block *sb,
-                               struct ext4_fc_tl *tl)
+                                   struct ext4_fc_tl *tl, u8 *val)
 {
-       struct ext4_fc_add_range *fc_add_ex;
+       struct ext4_fc_add_range fc_add_ex;
        struct ext4_extent newex, *ex;
        struct inode *inode;
        ext4_lblk_t start, cur;
@@ -1645,15 +1650,14 @@ static int ext4_fc_replay_add_range(struct super_block *sb,
        struct ext4_ext_path *path = NULL;
        int ret;
 
-       fc_add_ex = (struct ext4_fc_add_range *)ext4_fc_tag_val(tl);
-       ex = (struct ext4_extent *)&fc_add_ex->fc_ex;
+       memcpy(&fc_add_ex, val, sizeof(fc_add_ex));
+       ex = (struct ext4_extent *)&fc_add_ex.fc_ex;
 
        trace_ext4_fc_replay(sb, EXT4_FC_TAG_ADD_RANGE,
-               le32_to_cpu(fc_add_ex->fc_ino), le32_to_cpu(ex->ee_block),
+               le32_to_cpu(fc_add_ex.fc_ino), le32_to_cpu(ex->ee_block),
                ext4_ext_get_actual_len(ex));
 
-       inode = ext4_iget(sb, le32_to_cpu(fc_add_ex->fc_ino),
-                               EXT4_IGET_NORMAL);
+       inode = ext4_iget(sb, le32_to_cpu(fc_add_ex.fc_ino), EXT4_IGET_NORMAL);
        if (IS_ERR(inode)) {
                jbd_debug(1, "Inode not found.");
                return 0;
@@ -1762,32 +1766,33 @@ next:
 
 /* Replay DEL_RANGE tag */
 static int
-ext4_fc_replay_del_range(struct super_block *sb, struct ext4_fc_tl *tl)
+ext4_fc_replay_del_range(struct super_block *sb, struct ext4_fc_tl *tl,
+                        u8 *val)
 {
        struct inode *inode;
-       struct ext4_fc_del_range *lrange;
+       struct ext4_fc_del_range lrange;
        struct ext4_map_blocks map;
        ext4_lblk_t cur, remaining;
        int ret;
 
-       lrange = (struct ext4_fc_del_range *)ext4_fc_tag_val(tl);
-       cur = le32_to_cpu(lrange->fc_lblk);
-       remaining = le32_to_cpu(lrange->fc_len);
+       memcpy(&lrange, val, sizeof(lrange));
+       cur = le32_to_cpu(lrange.fc_lblk);
+       remaining = le32_to_cpu(lrange.fc_len);
 
        trace_ext4_fc_replay(sb, EXT4_FC_TAG_DEL_RANGE,
-               le32_to_cpu(lrange->fc_ino), cur, remaining);
+               le32_to_cpu(lrange.fc_ino), cur, remaining);
 
-       inode = ext4_iget(sb, le32_to_cpu(lrange->fc_ino), EXT4_IGET_NORMAL);
+       inode = ext4_iget(sb, le32_to_cpu(lrange.fc_ino), EXT4_IGET_NORMAL);
        if (IS_ERR(inode)) {
-               jbd_debug(1, "Inode %d not found", le32_to_cpu(lrange->fc_ino));
+               jbd_debug(1, "Inode %d not found", le32_to_cpu(lrange.fc_ino));
                return 0;
        }
 
        ret = ext4_fc_record_modified_inode(sb, inode->i_ino);
 
        jbd_debug(1, "DEL_RANGE, inode %ld, lblk %d, len %d\n",
-                       inode->i_ino, le32_to_cpu(lrange->fc_lblk),
-                       le32_to_cpu(lrange->fc_len));
+                       inode->i_ino, le32_to_cpu(lrange.fc_lblk),
+                       le32_to_cpu(lrange.fc_len));
        while (remaining > 0) {
                map.m_lblk = cur;
                map.m_len = remaining;
@@ -1808,8 +1813,8 @@ ext4_fc_replay_del_range(struct super_block *sb, struct ext4_fc_tl *tl)
        }
 
        ret = ext4_punch_hole(inode,
-               le32_to_cpu(lrange->fc_lblk) << sb->s_blocksize_bits,
-               le32_to_cpu(lrange->fc_len) <<  sb->s_blocksize_bits);
+               le32_to_cpu(lrange.fc_lblk) << sb->s_blocksize_bits,
+               le32_to_cpu(lrange.fc_len) <<  sb->s_blocksize_bits);
        if (ret)
                jbd_debug(1, "ext4_punch_hole returned %d", ret);
        ext4_ext_replay_shrink_inode(inode,
@@ -1925,11 +1930,11 @@ static int ext4_fc_replay_scan(journal_t *journal,
        struct ext4_sb_info *sbi = EXT4_SB(sb);
        struct ext4_fc_replay_state *state;
        int ret = JBD2_FC_REPLAY_CONTINUE;
-       struct ext4_fc_add_range *ext;
-       struct ext4_fc_tl *tl;
-       struct ext4_fc_tail *tail;
-       __u8 *start, *end;
-       struct ext4_fc_head *head;
+       struct ext4_fc_add_range ext;
+       struct ext4_fc_tl tl;
+       struct ext4_fc_tail tail;
+       __u8 *start, *end, *cur, *val;
+       struct ext4_fc_head head;
        struct ext4_extent *ex;
 
        state = &sbi->s_fc_replay_state;
@@ -1956,15 +1961,17 @@ static int ext4_fc_replay_scan(journal_t *journal,
        }
 
        state->fc_replay_expected_off++;
-       fc_for_each_tl(start, end, tl) {
+       for (cur = start; cur < end; cur = cur + sizeof(tl) + le16_to_cpu(tl.fc_len)) {
+               memcpy(&tl, cur, sizeof(tl));
+               val = cur + sizeof(tl);
                jbd_debug(3, "Scan phase, tag:%s, blk %lld\n",
-                         tag2str(le16_to_cpu(tl->fc_tag)), bh->b_blocknr);
-               switch (le16_to_cpu(tl->fc_tag)) {
+                         tag2str(le16_to_cpu(tl.fc_tag)), bh->b_blocknr);
+               switch (le16_to_cpu(tl.fc_tag)) {
                case EXT4_FC_TAG_ADD_RANGE:
-                       ext = (struct ext4_fc_add_range *)ext4_fc_tag_val(tl);
-                       ex = (struct ext4_extent *)&ext->fc_ex;
+                       memcpy(&ext, val, sizeof(ext));
+                       ex = (struct ext4_extent *)&ext.fc_ex;
                        ret = ext4_fc_record_regions(sb,
-                               le32_to_cpu(ext->fc_ino),
+                               le32_to_cpu(ext.fc_ino),
                                le32_to_cpu(ex->ee_block), ext4_ext_pblock(ex),
                                ext4_ext_get_actual_len(ex));
                        if (ret < 0)
@@ -1978,18 +1985,18 @@ static int ext4_fc_replay_scan(journal_t *journal,
                case EXT4_FC_TAG_INODE:
                case EXT4_FC_TAG_PAD:
                        state->fc_cur_tag++;
-                       state->fc_crc = ext4_chksum(sbi, state->fc_crc, tl,
-                                       sizeof(*tl) + ext4_fc_tag_len(tl));
+                       state->fc_crc = ext4_chksum(sbi, state->fc_crc, cur,
+                                       sizeof(tl) + le16_to_cpu(tl.fc_len));
                        break;
                case EXT4_FC_TAG_TAIL:
                        state->fc_cur_tag++;
-                       tail = (struct ext4_fc_tail *)ext4_fc_tag_val(tl);
-                       state->fc_crc = ext4_chksum(sbi, state->fc_crc, tl,
-                                               sizeof(*tl) +
+                       memcpy(&tail, val, sizeof(tail));
+                       state->fc_crc = ext4_chksum(sbi, state->fc_crc, cur,
+                                               sizeof(tl) +
                                                offsetof(struct ext4_fc_tail,
                                                fc_crc));
-                       if (le32_to_cpu(tail->fc_tid) == expected_tid &&
-                               le32_to_cpu(tail->fc_crc) == state->fc_crc) {
+                       if (le32_to_cpu(tail.fc_tid) == expected_tid &&
+                               le32_to_cpu(tail.fc_crc) == state->fc_crc) {
                                state->fc_replay_num_tags = state->fc_cur_tag;
                                state->fc_regions_valid =
                                        state->fc_regions_used;
@@ -2000,19 +2007,19 @@ static int ext4_fc_replay_scan(journal_t *journal,
                        state->fc_crc = 0;
                        break;
                case EXT4_FC_TAG_HEAD:
-                       head = (struct ext4_fc_head *)ext4_fc_tag_val(tl);
-                       if (le32_to_cpu(head->fc_features) &
+                       memcpy(&head, val, sizeof(head));
+                       if (le32_to_cpu(head.fc_features) &
                                ~EXT4_FC_SUPPORTED_FEATURES) {
                                ret = -EOPNOTSUPP;
                                break;
                        }
-                       if (le32_to_cpu(head->fc_tid) != expected_tid) {
+                       if (le32_to_cpu(head.fc_tid) != expected_tid) {
                                ret = JBD2_FC_REPLAY_STOP;
                                break;
                        }
                        state->fc_cur_tag++;
-                       state->fc_crc = ext4_chksum(sbi, state->fc_crc, tl,
-                                       sizeof(*tl) + ext4_fc_tag_len(tl));
+                       state->fc_crc = ext4_chksum(sbi, state->fc_crc, cur,
+                                           sizeof(tl) + le16_to_cpu(tl.fc_len));
                        break;
                default:
                        ret = state->fc_replay_num_tags ?
@@ -2036,11 +2043,11 @@ static int ext4_fc_replay(journal_t *journal, struct buffer_head *bh,
 {
        struct super_block *sb = journal->j_private;
        struct ext4_sb_info *sbi = EXT4_SB(sb);
-       struct ext4_fc_tl *tl;
-       __u8 *start, *end;
+       struct ext4_fc_tl tl;
+       __u8 *start, *end, *cur, *val;
        int ret = JBD2_FC_REPLAY_CONTINUE;
        struct ext4_fc_replay_state *state = &sbi->s_fc_replay_state;
-       struct ext4_fc_tail *tail;
+       struct ext4_fc_tail tail;
 
        if (pass == PASS_SCAN) {
                state->fc_current_pass = PASS_SCAN;
@@ -2067,49 +2074,52 @@ static int ext4_fc_replay(journal_t *journal, struct buffer_head *bh,
        start = (u8 *)bh->b_data;
        end = (__u8 *)bh->b_data + journal->j_blocksize - 1;
 
-       fc_for_each_tl(start, end, tl) {
+       for (cur = start; cur < end; cur = cur + sizeof(tl) + le16_to_cpu(tl.fc_len)) {
+               memcpy(&tl, cur, sizeof(tl));
+               val = cur + sizeof(tl);
+
                if (state->fc_replay_num_tags == 0) {
                        ret = JBD2_FC_REPLAY_STOP;
                        ext4_fc_set_bitmaps_and_counters(sb);
                        break;
                }
                jbd_debug(3, "Replay phase, tag:%s\n",
-                               tag2str(le16_to_cpu(tl->fc_tag)));
+                               tag2str(le16_to_cpu(tl.fc_tag)));
                state->fc_replay_num_tags--;
-               switch (le16_to_cpu(tl->fc_tag)) {
+               switch (le16_to_cpu(tl.fc_tag)) {
                case EXT4_FC_TAG_LINK:
-                       ret = ext4_fc_replay_link(sb, tl);
+                       ret = ext4_fc_replay_link(sb, &tl, val);
                        break;
                case EXT4_FC_TAG_UNLINK:
-                       ret = ext4_fc_replay_unlink(sb, tl);
+                       ret = ext4_fc_replay_unlink(sb, &tl, val);
                        break;
                case EXT4_FC_TAG_ADD_RANGE:
-                       ret = ext4_fc_replay_add_range(sb, tl);
+                       ret = ext4_fc_replay_add_range(sb, &tl, val);
                        break;
                case EXT4_FC_TAG_CREAT:
-                       ret = ext4_fc_replay_create(sb, tl);
+                       ret = ext4_fc_replay_create(sb, &tl, val);
                        break;
                case EXT4_FC_TAG_DEL_RANGE:
-                       ret = ext4_fc_replay_del_range(sb, tl);
+                       ret = ext4_fc_replay_del_range(sb, &tl, val);
                        break;
                case EXT4_FC_TAG_INODE:
-                       ret = ext4_fc_replay_inode(sb, tl);
+                       ret = ext4_fc_replay_inode(sb, &tl, val);
                        break;
                case EXT4_FC_TAG_PAD:
                        trace_ext4_fc_replay(sb, EXT4_FC_TAG_PAD, 0,
-                               ext4_fc_tag_len(tl), 0);
+                                            le16_to_cpu(tl.fc_len), 0);
                        break;
                case EXT4_FC_TAG_TAIL:
                        trace_ext4_fc_replay(sb, EXT4_FC_TAG_TAIL, 0,
-                               ext4_fc_tag_len(tl), 0);
-                       tail = (struct ext4_fc_tail *)ext4_fc_tag_val(tl);
-                       WARN_ON(le32_to_cpu(tail->fc_tid) != expected_tid);
+                                            le16_to_cpu(tl.fc_len), 0);
+                       memcpy(&tail, val, sizeof(tail));
+                       WARN_ON(le32_to_cpu(tail.fc_tid) != expected_tid);
                        break;
                case EXT4_FC_TAG_HEAD:
                        break;
                default:
-                       trace_ext4_fc_replay(sb, le16_to_cpu(tl->fc_tag), 0,
-                               ext4_fc_tag_len(tl), 0);
+                       trace_ext4_fc_replay(sb, le16_to_cpu(tl.fc_tag), 0,
+                                            le16_to_cpu(tl.fc_len), 0);
                        ret = -ECANCELED;
                        break;
                }
index b77f70f55a622b4f58bacdb8c4346017cad5f7e7..937c381b4c85e3acaece25ef3115a7ac3733d2bf 100644 (file)
@@ -153,13 +153,6 @@ struct ext4_fc_replay_state {
 #define region_last(__region) (((__region)->lblk) + ((__region)->len) - 1)
 #endif
 
-#define fc_for_each_tl(__start, __end, __tl)                           \
-       for (tl = (struct ext4_fc_tl *)(__start);                       \
-            (__u8 *)tl < (__u8 *)(__end);                              \
-               tl = (struct ext4_fc_tl *)((__u8 *)tl +                 \
-                                       sizeof(struct ext4_fc_tl) +     \
-                                       + le16_to_cpu(tl->fc_len)))
-
 static inline const char *tag2str(__u16 tag)
 {
        switch (tag) {
@@ -186,16 +179,4 @@ static inline const char *tag2str(__u16 tag)
        }
 }
 
-/* Get length of a particular tlv */
-static inline int ext4_fc_tag_len(struct ext4_fc_tl *tl)
-{
-       return le16_to_cpu(tl->fc_len);
-}
-
-/* Get a pointer to "value" of a tlv */
-static inline __u8 *ext4_fc_tag_val(struct ext4_fc_tl *tl)
-{
-       return (__u8 *)tl + sizeof(*tl);
-}
-
 #endif /* __FAST_COMMIT_H__ */
index 81a17a3cd80ef9ff58237e4e22f6fb8f1d67607d..9bab7fd4ccd5726d65ed805e2f664d1a519f397b 100644 (file)
@@ -322,14 +322,16 @@ void ext4_free_inode(handle_t *handle, struct inode *inode)
        if (is_directory) {
                count = ext4_used_dirs_count(sb, gdp) - 1;
                ext4_used_dirs_set(sb, gdp, count);
-               percpu_counter_dec(&sbi->s_dirs_counter);
+               if (percpu_counter_initialized(&sbi->s_dirs_counter))
+                       percpu_counter_dec(&sbi->s_dirs_counter);
        }
        ext4_inode_bitmap_csum_set(sb, block_group, gdp, bitmap_bh,
                                   EXT4_INODES_PER_GROUP(sb) / 8);
        ext4_group_desc_csum_set(sb, block_group, gdp);
        ext4_unlock_group(sb, block_group);
 
-       percpu_counter_inc(&sbi->s_freeinodes_counter);
+       if (percpu_counter_initialized(&sbi->s_freeinodes_counter))
+               percpu_counter_inc(&sbi->s_freeinodes_counter);
        if (sbi->s_log_groups_per_flex) {
                struct flex_groups *fg;
 
index 3239e6669e843be728b7ccf93e9852f2adac768c..c2c22c2baac0b7590c9617d0269b9acc74ddc86e 100644 (file)
@@ -3217,7 +3217,7 @@ static int ext4_mb_init_backend(struct super_block *sb)
                 */
                if (sbi->s_es->s_log_groups_per_flex >= 32) {
                        ext4_msg(sb, KERN_ERR, "too many log groups per flexible block group");
-                       goto err_freesgi;
+                       goto err_freebuddy;
                }
                sbi->s_mb_prefetch = min_t(uint, 1 << sbi->s_es->s_log_groups_per_flex,
                        BLK_MAX_SEGMENT_SIZE >> (sb->s_blocksize_bits - 9));
index afb9d05a99bae5705a6403afbedc726e3c587cc2..a4af26d4459a3b947277d3c3f331f93b694840e1 100644 (file)
@@ -1376,7 +1376,8 @@ int ext4_fname_setup_ci_filename(struct inode *dir, const struct qstr *iname,
        struct dx_hash_info *hinfo = &name->hinfo;
        int len;
 
-       if (!IS_CASEFOLDED(dir) || !dir->i_sb->s_encoding) {
+       if (!IS_CASEFOLDED(dir) || !dir->i_sb->s_encoding ||
+           (IS_ENCRYPTED(dir) && !fscrypt_has_encryption_key(dir))) {
                cf_name->name = NULL;
                return 0;
        }
@@ -1427,7 +1428,8 @@ static bool ext4_match(struct inode *parent,
 #endif
 
 #ifdef CONFIG_UNICODE
-       if (parent->i_sb->s_encoding && IS_CASEFOLDED(parent)) {
+       if (parent->i_sb->s_encoding && IS_CASEFOLDED(parent) &&
+           (!IS_ENCRYPTED(parent) || fscrypt_has_encryption_key(parent))) {
                if (fname->cf_name.name) {
                        struct qstr cf = {.name = fname->cf_name.name,
                                          .len = fname->cf_name.len};
index 7dc94f3e18e6c611ca4a742c11fe821458a819bf..d29f6aa7d96eede2f7888e8ce29a59f253393ffb 100644 (file)
@@ -4462,14 +4462,20 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
        }
 
        if (sb->s_blocksize != blocksize) {
+               /*
+                * bh must be released before kill_bdev(), otherwise
+                * it won't be freed and its page also. kill_bdev()
+                * is called by sb_set_blocksize().
+                */
+               brelse(bh);
                /* Validate the filesystem blocksize */
                if (!sb_set_blocksize(sb, blocksize)) {
                        ext4_msg(sb, KERN_ERR, "bad block size %d",
                                        blocksize);
+                       bh = NULL;
                        goto failed_mount;
                }
 
-               brelse(bh);
                logical_sb_block = sb_block * EXT4_MIN_BLOCK_SIZE;
                offset = do_div(logical_sb_block, blocksize);
                bh = ext4_sb_bread_unmovable(sb, logical_sb_block);
@@ -5202,8 +5208,9 @@ failed_mount:
                kfree(get_qf_name(sb, sbi, i));
 #endif
        fscrypt_free_dummy_policy(&sbi->s_dummy_enc_policy);
-       ext4_blkdev_remove(sbi);
+       /* ext4_blkdev_remove() calls kill_bdev(), release bh before it. */
        brelse(bh);
+       ext4_blkdev_remove(sbi);
 out_fail:
        sb->s_fs_info = NULL;
        kfree(sbi->s_blockgroup_lock);
index 6f825dedc3d4257b389024167a434e9fe901be89..55fcab60a59a520929b93580be022f221b125622 100644 (file)
@@ -315,7 +315,9 @@ EXT4_ATTR_FEATURE(verity);
 #endif
 EXT4_ATTR_FEATURE(metadata_csum_seed);
 EXT4_ATTR_FEATURE(fast_commit);
+#if defined(CONFIG_UNICODE) && defined(CONFIG_FS_ENCRYPTION)
 EXT4_ATTR_FEATURE(encrypted_casefold);
+#endif
 
 static struct attribute *ext4_feat_attrs[] = {
        ATTR_LIST(lazy_itable_init),
@@ -333,7 +335,9 @@ static struct attribute *ext4_feat_attrs[] = {
 #endif
        ATTR_LIST(metadata_csum_seed),
        ATTR_LIST(fast_commit),
+#if defined(CONFIG_UNICODE) && defined(CONFIG_FS_ENCRYPTION)
        ATTR_LIST(encrypted_casefold),
+#endif
        NULL,
 };
 ATTRIBUTE_GROUPS(ext4_feat);
index 53b13787eb2c8f774a1cb3f3f5a14df8ceff5f0c..925a5ca3744a9ffa2e4270ef5a3b63423add43d7 100644 (file)
@@ -117,19 +117,6 @@ static void f2fs_unlock_rpages(struct compress_ctx *cc, int len)
        f2fs_drop_rpages(cc, len, true);
 }
 
-static void f2fs_put_rpages_mapping(struct address_space *mapping,
-                               pgoff_t start, int len)
-{
-       int i;
-
-       for (i = 0; i < len; i++) {
-               struct page *page = find_get_page(mapping, start + i);
-
-               put_page(page);
-               put_page(page);
-       }
-}
-
 static void f2fs_put_rpages_wbc(struct compress_ctx *cc,
                struct writeback_control *wbc, bool redirty, int unlock)
 {
@@ -158,13 +145,14 @@ int f2fs_init_compress_ctx(struct compress_ctx *cc)
        return cc->rpages ? 0 : -ENOMEM;
 }
 
-void f2fs_destroy_compress_ctx(struct compress_ctx *cc)
+void f2fs_destroy_compress_ctx(struct compress_ctx *cc, bool reuse)
 {
        page_array_free(cc->inode, cc->rpages, cc->cluster_size);
        cc->rpages = NULL;
        cc->nr_rpages = 0;
        cc->nr_cpages = 0;
-       cc->cluster_idx = NULL_CLUSTER;
+       if (!reuse)
+               cc->cluster_idx = NULL_CLUSTER;
 }
 
 void f2fs_compress_ctx_add_page(struct compress_ctx *cc, struct page *page)
@@ -1036,7 +1024,7 @@ retry:
                }
 
                if (PageUptodate(page))
-                       unlock_page(page);
+                       f2fs_put_page(page, 1);
                else
                        f2fs_compress_ctx_add_page(cc, page);
        }
@@ -1046,33 +1034,35 @@ retry:
 
                ret = f2fs_read_multi_pages(cc, &bio, cc->cluster_size,
                                        &last_block_in_bio, false, true);
-               f2fs_destroy_compress_ctx(cc);
+               f2fs_put_rpages(cc);
+               f2fs_destroy_compress_ctx(cc, true);
                if (ret)
-                       goto release_pages;
+                       goto out;
                if (bio)
                        f2fs_submit_bio(sbi, bio, DATA);
 
                ret = f2fs_init_compress_ctx(cc);
                if (ret)
-                       goto release_pages;
+                       goto out;
        }
 
        for (i = 0; i < cc->cluster_size; i++) {
                f2fs_bug_on(sbi, cc->rpages[i]);
 
                page = find_lock_page(mapping, start_idx + i);
-               f2fs_bug_on(sbi, !page);
+               if (!page) {
+                       /* page can be truncated */
+                       goto release_and_retry;
+               }
 
                f2fs_wait_on_page_writeback(page, DATA, true, true);
-
                f2fs_compress_ctx_add_page(cc, page);
-               f2fs_put_page(page, 0);
 
                if (!PageUptodate(page)) {
+release_and_retry:
+                       f2fs_put_rpages(cc);
                        f2fs_unlock_rpages(cc, i + 1);
-                       f2fs_put_rpages_mapping(mapping, start_idx,
-                                       cc->cluster_size);
-                       f2fs_destroy_compress_ctx(cc);
+                       f2fs_destroy_compress_ctx(cc, true);
                        goto retry;
                }
        }
@@ -1103,10 +1093,10 @@ retry:
        }
 
 unlock_pages:
+       f2fs_put_rpages(cc);
        f2fs_unlock_rpages(cc, i);
-release_pages:
-       f2fs_put_rpages_mapping(mapping, start_idx, i);
-       f2fs_destroy_compress_ctx(cc);
+       f2fs_destroy_compress_ctx(cc, true);
+out:
        return ret;
 }
 
@@ -1141,7 +1131,7 @@ bool f2fs_compress_write_end(struct inode *inode, void *fsdata,
                set_cluster_dirty(&cc);
 
        f2fs_put_rpages_wbc(&cc, NULL, false, 1);
-       f2fs_destroy_compress_ctx(&cc);
+       f2fs_destroy_compress_ctx(&cc, false);
 
        return first_index;
 }
@@ -1361,7 +1351,7 @@ unlock_continue:
        f2fs_put_rpages(cc);
        page_array_free(cc->inode, cc->cpages, cc->nr_cpages);
        cc->cpages = NULL;
-       f2fs_destroy_compress_ctx(cc);
+       f2fs_destroy_compress_ctx(cc, false);
        return 0;
 
 out_destroy_crypt:
@@ -1372,7 +1362,8 @@ out_destroy_crypt:
        for (i = 0; i < cc->nr_cpages; i++) {
                if (!cc->cpages[i])
                        continue;
-               f2fs_put_page(cc->cpages[i], 1);
+               f2fs_compress_free_page(cc->cpages[i]);
+               cc->cpages[i] = NULL;
        }
 out_put_cic:
        kmem_cache_free(cic_entry_slab, cic);
@@ -1522,7 +1513,7 @@ write:
        err = f2fs_write_raw_pages(cc, submitted, wbc, io_type);
        f2fs_put_rpages_wbc(cc, wbc, false, 0);
 destroy_out:
-       f2fs_destroy_compress_ctx(cc);
+       f2fs_destroy_compress_ctx(cc, false);
        return err;
 }
 
index 96f1a354f89fd303b82822a3e9e09a0deb2fc4e7..009a09fb9d88cf6b2804fbd131526b205d517783 100644 (file)
@@ -2287,7 +2287,7 @@ static int f2fs_mpage_readpages(struct inode *inode,
                                                        max_nr_pages,
                                                        &last_block_in_bio,
                                                        rac != NULL, false);
-                               f2fs_destroy_compress_ctx(&cc);
+                               f2fs_destroy_compress_ctx(&cc, false);
                                if (ret)
                                        goto set_error_page;
                        }
@@ -2332,7 +2332,7 @@ next_page:
                                                        max_nr_pages,
                                                        &last_block_in_bio,
                                                        rac != NULL, false);
-                               f2fs_destroy_compress_ctx(&cc);
+                               f2fs_destroy_compress_ctx(&cc, false);
                        }
                }
 #endif
@@ -3033,7 +3033,7 @@ next:
                }
        }
        if (f2fs_compressed_file(inode))
-               f2fs_destroy_compress_ctx(&cc);
+               f2fs_destroy_compress_ctx(&cc, false);
 #endif
        if (retry) {
                index = 0;
@@ -3801,6 +3801,7 @@ static int f2fs_is_file_aligned(struct inode *inode)
        block_t pblock;
        unsigned long nr_pblocks;
        unsigned int blocks_per_sec = BLKS_PER_SEC(sbi);
+       unsigned int not_aligned = 0;
        int ret = 0;
 
        cur_lblock = 0;
@@ -3833,13 +3834,20 @@ static int f2fs_is_file_aligned(struct inode *inode)
 
                if ((pblock - main_blkaddr) & (blocks_per_sec - 1) ||
                        nr_pblocks & (blocks_per_sec - 1)) {
-                       f2fs_err(sbi, "Swapfile does not align to section");
-                       ret = -EINVAL;
-                       goto out;
+                       if (f2fs_is_pinned_file(inode)) {
+                               f2fs_err(sbi, "Swapfile does not align to section");
+                               ret = -EINVAL;
+                               goto out;
+                       }
+                       not_aligned++;
                }
 
                cur_lblock += nr_pblocks;
        }
+       if (not_aligned)
+               f2fs_warn(sbi, "Swapfile (%u) is not align to section: \n"
+                       "\t1) creat(), 2) ioctl(F2FS_IOC_SET_PIN_FILE), 3) fallocate()",
+                       not_aligned);
 out:
        return ret;
 }
@@ -3858,6 +3866,7 @@ static int check_swap_activate_fast(struct swap_info_struct *sis,
        int nr_extents = 0;
        unsigned long nr_pblocks;
        unsigned int blocks_per_sec = BLKS_PER_SEC(sbi);
+       unsigned int not_aligned = 0;
        int ret = 0;
 
        /*
@@ -3887,7 +3896,7 @@ static int check_swap_activate_fast(struct swap_info_struct *sis,
                /* hole */
                if (!(map.m_flags & F2FS_MAP_FLAGS)) {
                        f2fs_err(sbi, "Swapfile has holes\n");
-                       ret = -ENOENT;
+                       ret = -EINVAL;
                        goto out;
                }
 
@@ -3896,9 +3905,12 @@ static int check_swap_activate_fast(struct swap_info_struct *sis,
 
                if ((pblock - SM_I(sbi)->main_blkaddr) & (blocks_per_sec - 1) ||
                                nr_pblocks & (blocks_per_sec - 1)) {
-                       f2fs_err(sbi, "Swapfile does not align to section");
-                       ret = -EINVAL;
-                       goto out;
+                       if (f2fs_is_pinned_file(inode)) {
+                               f2fs_err(sbi, "Swapfile does not align to section");
+                               ret = -EINVAL;
+                               goto out;
+                       }
+                       not_aligned++;
                }
 
                if (cur_lblock + nr_pblocks >= sis->max)
@@ -3927,6 +3939,11 @@ static int check_swap_activate_fast(struct swap_info_struct *sis,
        sis->max = cur_lblock;
        sis->pages = cur_lblock - 1;
        sis->highest_bit = cur_lblock - 1;
+
+       if (not_aligned)
+               f2fs_warn(sbi, "Swapfile (%u) is not align to section: \n"
+                       "\t1) creat(), 2) ioctl(F2FS_IOC_SET_PIN_FILE), 3) fallocate()",
+                       not_aligned);
 out:
        return ret;
 }
@@ -4035,7 +4052,7 @@ out:
        return ret;
 bad_bmap:
        f2fs_err(sbi, "Swapfile has holes\n");
-       return -ENOENT;
+       return -EINVAL;
 }
 
 static int f2fs_swap_activate(struct swap_info_struct *sis, struct file *file,
index 044878866ca349b5725b1c5ce875ee5ab6a4419a..c83d90125ebd9b315d20c45c74af13ec663b4b8a 100644 (file)
@@ -3956,7 +3956,7 @@ struct decompress_io_ctx *f2fs_alloc_dic(struct compress_ctx *cc);
 void f2fs_decompress_end_io(struct decompress_io_ctx *dic, bool failed);
 void f2fs_put_page_dic(struct page *page);
 int f2fs_init_compress_ctx(struct compress_ctx *cc);
-void f2fs_destroy_compress_ctx(struct compress_ctx *cc);
+void f2fs_destroy_compress_ctx(struct compress_ctx *cc, bool reuse);
 void f2fs_init_compress_info(struct f2fs_sb_info *sbi);
 int f2fs_init_page_array_cache(struct f2fs_sb_info *sbi);
 void f2fs_destroy_page_array_cache(struct f2fs_sb_info *sbi);
index 44a4650aea7b7604a7c6c83abaf8492ab48c6d59..ceb575f99048c171873a7bcf3bb5490c48b75675 100644 (file)
@@ -1817,7 +1817,8 @@ static int f2fs_setflags_common(struct inode *inode, u32 iflags, u32 mask)
        struct f2fs_inode_info *fi = F2FS_I(inode);
        u32 masked_flags = fi->i_flags & mask;
 
-       f2fs_bug_on(F2FS_I_SB(inode), (iflags & ~mask));
+       /* mask can be shrunk by flags_valid selector */
+       iflags &= mask;
 
        /* Is it quota file? Do not allow user to mess with it */
        if (IS_NOQUOTA(inode))
index c605415840b599b182a0f31a47baa89e70a6720b..51dc79fad4fe27218677afd425e16f430cb85c9a 100644 (file)
@@ -3574,12 +3574,12 @@ int f2fs_inplace_write_data(struct f2fs_io_info *fio)
 
        return err;
 drop_bio:
-       if (fio->bio) {
+       if (fio->bio && *(fio->bio)) {
                struct bio *bio = *(fio->bio);
 
                bio->bi_status = BLK_STS_IOERR;
                bio_endio(bio);
-               fio->bio = NULL;
+               *(fio->bio) = NULL;
        }
        return err;
 }
index a0b542d84cd9e3404a6ca76435a885c72a07a6fc..493a83e3f59061d59f163abb637e28a614b77e79 100644 (file)
@@ -911,8 +911,11 @@ static ssize_t gfs2_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
                current->backing_dev_info = inode_to_bdi(inode);
                buffered = iomap_file_buffered_write(iocb, from, &gfs2_iomap_ops);
                current->backing_dev_info = NULL;
-               if (unlikely(buffered <= 0))
+               if (unlikely(buffered <= 0)) {
+                       if (!ret)
+                               ret = buffered;
                        goto out_unlock;
+               }
 
                /*
                 * We need to ensure that the page cache pages are written to
index ea7fc5c641c7e8e25d9e4a477075b25a4614a36e..d9cb261f55b06cf421de7f77c34ae71751d78ba1 100644 (file)
@@ -582,6 +582,16 @@ out_locked:
        spin_unlock(&gl->gl_lockref.lock);
 }
 
+static bool is_system_glock(struct gfs2_glock *gl)
+{
+       struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
+       struct gfs2_inode *m_ip = GFS2_I(sdp->sd_statfs_inode);
+
+       if (gl == m_ip->i_gl)
+               return true;
+       return false;
+}
+
 /**
  * do_xmote - Calls the DLM to change the state of a lock
  * @gl: The lock state
@@ -671,17 +681,25 @@ skip_inval:
         * to see sd_log_error and withdraw, and in the meantime, requeue the
         * work for later.
         *
+        * We make a special exception for some system glocks, such as the
+        * system statfs inode glock, which needs to be granted before the
+        * gfs2_quotad daemon can exit, and that exit needs to finish before
+        * we can unmount the withdrawn file system.
+        *
         * However, if we're just unlocking the lock (say, for unmount, when
         * gfs2_gl_hash_clear calls clear_glock) and recovery is complete
         * then it's okay to tell dlm to unlock it.
         */
        if (unlikely(sdp->sd_log_error && !gfs2_withdrawn(sdp)))
                gfs2_withdraw_delayed(sdp);
-       if (glock_blocked_by_withdraw(gl)) {
-               if (target != LM_ST_UNLOCKED ||
-                   test_bit(SDF_WITHDRAW_RECOVERY, &sdp->sd_flags)) {
+       if (glock_blocked_by_withdraw(gl) &&
+           (target != LM_ST_UNLOCKED ||
+            test_bit(SDF_WITHDRAW_RECOVERY, &sdp->sd_flags))) {
+               if (!is_system_glock(gl)) {
                        gfs2_glock_queue_work(gl, GL_GLOCK_DFT_HOLD);
                        goto out;
+               } else {
+                       clear_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags);
                }
        }
 
@@ -1466,9 +1484,11 @@ void gfs2_glock_dq(struct gfs2_holder *gh)
            glock_blocked_by_withdraw(gl) &&
            gh->gh_gl != sdp->sd_jinode_gl) {
                sdp->sd_glock_dqs_held++;
+               spin_unlock(&gl->gl_lockref.lock);
                might_sleep();
                wait_on_bit(&sdp->sd_flags, SDF_WITHDRAW_RECOVERY,
                            TASK_UNINTERRUPTIBLE);
+               spin_lock(&gl->gl_lockref.lock);
        }
        if (gh->gh_flags & GL_NOCACHE)
                handle_callback(gl, LM_ST_UNLOCKED, 0, false);
@@ -1775,6 +1795,7 @@ __acquires(&lru_lock)
        while(!list_empty(list)) {
                gl = list_first_entry(list, struct gfs2_glock, gl_lru);
                list_del_init(&gl->gl_lru);
+               clear_bit(GLF_LRU, &gl->gl_flags);
                if (!spin_trylock(&gl->gl_lockref.lock)) {
 add_back_to_lru:
                        list_add(&gl->gl_lru, &lru_list);
@@ -1820,7 +1841,6 @@ static long gfs2_scan_glock_lru(int nr)
                if (!test_bit(GLF_LOCK, &gl->gl_flags)) {
                        list_move(&gl->gl_lru, &dispose);
                        atomic_dec(&lru_count);
-                       clear_bit(GLF_LRU, &gl->gl_flags);
                        freed++;
                        continue;
                }
index 454095e9fedfdedaeb1882308f8a89d030eda112..54d3fbeb3002f7dda23f141c9a93d3799346a22a 100644 (file)
@@ -396,7 +396,7 @@ static int gfs2_dinode_in(struct gfs2_inode *ip, const void *buf)
        struct timespec64 atime;
        u16 height, depth;
        umode_t mode = be32_to_cpu(str->di_mode);
-       bool is_new = ip->i_inode.i_flags & I_NEW;
+       bool is_new = ip->i_inode.i_state & I_NEW;
 
        if (unlikely(ip->i_no_addr != be64_to_cpu(str->di_num.no_addr)))
                goto corrupt;
index 97d54e581a7bd7e79231f895a0821c81540d413c..42c15cfc08219f0a2f17d45f00aa2ed362a58901 100644 (file)
@@ -926,10 +926,10 @@ static void log_write_header(struct gfs2_sbd *sdp, u32 flags)
 }
 
 /**
- * ail_drain - drain the ail lists after a withdraw
+ * gfs2_ail_drain - drain the ail lists after a withdraw
  * @sdp: Pointer to GFS2 superblock
  */
-static void ail_drain(struct gfs2_sbd *sdp)
+void gfs2_ail_drain(struct gfs2_sbd *sdp)
 {
        struct gfs2_trans *tr;
 
@@ -956,6 +956,7 @@ static void ail_drain(struct gfs2_sbd *sdp)
                list_del(&tr->tr_list);
                gfs2_trans_free(sdp, tr);
        }
+       gfs2_drain_revokes(sdp);
        spin_unlock(&sdp->sd_ail_lock);
 }
 
@@ -1162,7 +1163,6 @@ out_withdraw:
        if (tr && list_empty(&tr->tr_list))
                list_add(&tr->tr_list, &sdp->sd_ail1_list);
        spin_unlock(&sdp->sd_ail_lock);
-       ail_drain(sdp); /* frees all transactions */
        tr = NULL;
        goto out_end;
 }
index eea58015710e7ff9709a823d85868d0bf2766831..fc905c2af53ce928fa1ea1ee8dc99e53271d9c1c 100644 (file)
@@ -93,5 +93,6 @@ extern int gfs2_logd(void *data);
 extern void gfs2_add_revoke(struct gfs2_sbd *sdp, struct gfs2_bufdata *bd);
 extern void gfs2_glock_remove_revoke(struct gfs2_glock *gl);
 extern void gfs2_flush_revokes(struct gfs2_sbd *sdp);
+extern void gfs2_ail_drain(struct gfs2_sbd *sdp);
 
 #endif /* __LOG_DOT_H__ */
index 221e7118cc3b965b090a5b2c7aacb497b42eb10e..8ee05d25dfa6ce95f8451edf32a63e7794fdb452 100644 (file)
@@ -885,7 +885,7 @@ static void revoke_lo_before_commit(struct gfs2_sbd *sdp, struct gfs2_trans *tr)
        gfs2_log_write_page(sdp, page);
 }
 
-static void revoke_lo_after_commit(struct gfs2_sbd *sdp, struct gfs2_trans *tr)
+void gfs2_drain_revokes(struct gfs2_sbd *sdp)
 {
        struct list_head *head = &sdp->sd_log_revokes;
        struct gfs2_bufdata *bd;
@@ -900,6 +900,11 @@ static void revoke_lo_after_commit(struct gfs2_sbd *sdp, struct gfs2_trans *tr)
        }
 }
 
+static void revoke_lo_after_commit(struct gfs2_sbd *sdp, struct gfs2_trans *tr)
+{
+       gfs2_drain_revokes(sdp);
+}
+
 static void revoke_lo_before_scan(struct gfs2_jdesc *jd,
                                  struct gfs2_log_header_host *head, int pass)
 {
index 31b6dd0d2e5d80ef23b900a4b91d94353221562d..f707601597dccd2d277846768b1d06f0fbbc588e 100644 (file)
@@ -20,6 +20,7 @@ extern void gfs2_log_submit_bio(struct bio **biop, int opf);
 extern void gfs2_pin(struct gfs2_sbd *sdp, struct buffer_head *bh);
 extern int gfs2_find_jhead(struct gfs2_jdesc *jd,
                           struct gfs2_log_header_host *head, bool keep_cache);
+extern void gfs2_drain_revokes(struct gfs2_sbd *sdp);
 static inline unsigned int buf_limit(struct gfs2_sbd *sdp)
 {
        return sdp->sd_ldptrs;
index 3e08027a6c81bc7054b26441003470e0bf6fcb59..f4325b44956dc89b8165d7e5ceac3a3a750f836e 100644 (file)
@@ -131,6 +131,7 @@ static void signal_our_withdraw(struct gfs2_sbd *sdp)
        if (test_bit(SDF_NORECOVERY, &sdp->sd_flags) || !sdp->sd_jdesc)
                return;
 
+       gfs2_ail_drain(sdp); /* frees all transactions */
        inode = sdp->sd_jdesc->jd_inode;
        ip = GFS2_I(inode);
        i_gl = ip->i_gl;
index a930ddd156819caf54635655382d27ab5998fab9..7054a542689f9cfca6f94aadb5002aa0b7716119 100644 (file)
@@ -598,13 +598,15 @@ void hfsplus_file_truncate(struct inode *inode)
                res = __hfsplus_ext_cache_extent(&fd, inode, alloc_cnt);
                if (res)
                        break;
-               hfs_brec_remove(&fd);
 
-               mutex_unlock(&fd.tree->tree_lock);
                start = hip->cached_start;
+               if (blk_cnt <= start)
+                       hfs_brec_remove(&fd);
+               mutex_unlock(&fd.tree->tree_lock);
                hfsplus_free_extents(sb, hip->cached_extents,
                                     alloc_cnt - start, alloc_cnt - blk_cnt);
                hfsplus_dump_extent(hip->cached_extents);
+               mutex_lock(&fd.tree->tree_lock);
                if (blk_cnt > start) {
                        hip->extent_state |= HFSPLUS_EXT_DIRTY;
                        break;
@@ -612,7 +614,6 @@ void hfsplus_file_truncate(struct inode *inode)
                alloc_cnt = start;
                hip->cached_start = hip->cached_blocks = 0;
                hip->extent_state &= ~(HFSPLUS_EXT_DIRTY | HFSPLUS_EXT_NEW);
-               mutex_lock(&fd.tree->tree_lock);
        }
        hfs_find_exit(&fd);
 
index a2a42335e8fd25e363112d225069c3ef09ae6f9e..55efd3dd04f621643f928b9292f81b8ac6331284 100644 (file)
@@ -131,6 +131,7 @@ static void huge_pagevec_release(struct pagevec *pvec)
 static int hugetlbfs_file_mmap(struct file *file, struct vm_area_struct *vma)
 {
        struct inode *inode = file_inode(file);
+       struct hugetlbfs_inode_info *info = HUGETLBFS_I(inode);
        loff_t len, vma_len;
        int ret;
        struct hstate *h = hstate_file(file);
@@ -146,6 +147,10 @@ static int hugetlbfs_file_mmap(struct file *file, struct vm_area_struct *vma)
        vma->vm_flags |= VM_HUGETLB | VM_DONTEXPAND;
        vma->vm_ops = &hugetlb_vm_ops;
 
+       ret = seal_check_future_write(info->seals, vma);
+       if (ret)
+               return ret;
+
        /*
         * page based offset in vm_pgoff could be sufficiently large to
         * overflow a loff_t when converted to byte offset.  This can
@@ -524,7 +529,7 @@ static void remove_inode_hugepages(struct inode *inode, loff_t lstart,
                         * the subpool and global reserve usage count can need
                         * to be adjusted.
                         */
-                       VM_BUG_ON(PagePrivate(page));
+                       VM_BUG_ON(HPageRestoreReserve(page));
                        remove_huge_page(page);
                        freed++;
                        if (!truncate_op) {
index 5361a9b4b47b5f25904f0dbaa8112b79ec6611a8..b3e8624a37d0904c73151fc00a4b3bb64b60dc70 100644 (file)
@@ -979,13 +979,16 @@ static bool io_task_work_match(struct callback_head *cb, void *data)
        return cwd->wqe->wq == data;
 }
 
+void io_wq_exit_start(struct io_wq *wq)
+{
+       set_bit(IO_WQ_BIT_EXIT, &wq->state);
+}
+
 static void io_wq_exit_workers(struct io_wq *wq)
 {
        struct callback_head *cb;
        int node;
 
-       set_bit(IO_WQ_BIT_EXIT, &wq->state);
-
        if (!wq->task)
                return;
 
@@ -1003,13 +1006,16 @@ static void io_wq_exit_workers(struct io_wq *wq)
                struct io_wqe *wqe = wq->wqes[node];
 
                io_wq_for_each_worker(wqe, io_wq_worker_wake, NULL);
-               spin_lock_irq(&wq->hash->wait.lock);
-               list_del_init(&wq->wqes[node]->wait.entry);
-               spin_unlock_irq(&wq->hash->wait.lock);
        }
        rcu_read_unlock();
        io_worker_ref_put(wq);
        wait_for_completion(&wq->worker_done);
+
+       for_each_node(node) {
+               spin_lock_irq(&wq->hash->wait.lock);
+               list_del_init(&wq->wqes[node]->wait.entry);
+               spin_unlock_irq(&wq->hash->wait.lock);
+       }
        put_task_struct(wq->task);
        wq->task = NULL;
 }
@@ -1020,8 +1026,6 @@ static void io_wq_destroy(struct io_wq *wq)
 
        cpuhp_state_remove_instance_nocalls(io_wq_online, &wq->cpuhp_node);
 
-       io_wq_exit_workers(wq);
-
        for_each_node(node) {
                struct io_wqe *wqe = wq->wqes[node];
                struct io_cb_cancel_data match = {
@@ -1036,16 +1040,13 @@ static void io_wq_destroy(struct io_wq *wq)
        kfree(wq);
 }
 
-void io_wq_put(struct io_wq *wq)
-{
-       if (refcount_dec_and_test(&wq->refs))
-               io_wq_destroy(wq);
-}
-
 void io_wq_put_and_exit(struct io_wq *wq)
 {
+       WARN_ON_ONCE(!test_bit(IO_WQ_BIT_EXIT, &wq->state));
+
        io_wq_exit_workers(wq);
-       io_wq_put(wq);
+       if (refcount_dec_and_test(&wq->refs))
+               io_wq_destroy(wq);
 }
 
 static bool io_wq_worker_affinity(struct io_worker *worker, void *data)
index 0e6d310999e899a9d416459138be22a23f02bf56..af2df0680ee22b2d6363df48ee0016d439f96056 100644 (file)
@@ -122,7 +122,7 @@ struct io_wq_data {
 };
 
 struct io_wq *io_wq_create(unsigned bounded, struct io_wq_data *data);
-void io_wq_put(struct io_wq *wq);
+void io_wq_exit_start(struct io_wq *wq);
 void io_wq_put_and_exit(struct io_wq *wq);
 
 void io_wq_enqueue(struct io_wq *wq, struct io_wq_work *work);
index f46acbbeed57cad1a005b8c5b8a008ef70ac9b3c..fa8794c61af7be0f1145819d954ca0325c3c3b3f 100644 (file)
 #define IORING_MAX_RESTRICTIONS        (IORING_RESTRICTION_LAST + \
                                 IORING_REGISTER_LAST + IORING_OP_LAST)
 
+#define IORING_MAX_REG_BUFFERS (1U << 14)
+
 #define SQE_VALID_FLAGS        (IOSQE_FIXED_FILE|IOSQE_IO_DRAIN|IOSQE_IO_LINK| \
                                IOSQE_IO_HARDLINK | IOSQE_ASYNC | \
                                IOSQE_BUFFER_SELECT)
@@ -781,6 +783,11 @@ struct io_task_work {
        task_work_func_t        func;
 };
 
+enum {
+       IORING_RSRC_FILE                = 0,
+       IORING_RSRC_BUFFER              = 1,
+};
+
 /*
  * NOTE! Each of the iocb union members has the file pointer
  * as the first entry in their struct definition. So you can
@@ -4035,7 +4042,7 @@ static int io_epoll_ctl_prep(struct io_kiocb *req,
 #if defined(CONFIG_EPOLL)
        if (sqe->ioprio || sqe->buf_index)
                return -EINVAL;
-       if (unlikely(req->ctx->flags & (IORING_SETUP_IOPOLL | IORING_SETUP_SQPOLL)))
+       if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
                return -EINVAL;
 
        req->epoll.epfd = READ_ONCE(sqe->fd);
@@ -4150,7 +4157,7 @@ static int io_fadvise(struct io_kiocb *req, unsigned int issue_flags)
 
 static int io_statx_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
 {
-       if (unlikely(req->ctx->flags & (IORING_SETUP_IOPOLL | IORING_SETUP_SQPOLL)))
+       if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
                return -EINVAL;
        if (sqe->ioprio || sqe->buf_index)
                return -EINVAL;
@@ -5017,10 +5024,10 @@ static void __io_queue_proc(struct io_poll_iocb *poll, struct io_poll_table *pt,
                 * Can't handle multishot for double wait for now, turn it
                 * into one-shot mode.
                 */
-               if (!(req->poll.events & EPOLLONESHOT))
-                       req->poll.events |= EPOLLONESHOT;
+               if (!(poll_one->events & EPOLLONESHOT))
+                       poll_one->events |= EPOLLONESHOT;
                /* double add on the same waitqueue head, ignore */
-               if (poll->head == head)
+               if (poll_one->head == head)
                        return;
                poll = kmalloc(sizeof(*poll), GFP_ATOMIC);
                if (!poll) {
@@ -5827,8 +5834,6 @@ done:
 static int io_rsrc_update_prep(struct io_kiocb *req,
                                const struct io_uring_sqe *sqe)
 {
-       if (unlikely(req->ctx->flags & IORING_SETUP_SQPOLL))
-               return -EINVAL;
        if (unlikely(req->flags & (REQ_F_FIXED_FILE | REQ_F_BUFFER_SELECT)))
                return -EINVAL;
        if (sqe->ioprio || sqe->rw_flags)
@@ -6354,19 +6359,20 @@ static enum hrtimer_restart io_link_timeout_fn(struct hrtimer *timer)
         * We don't expect the list to be empty, that will only happen if we
         * race with the completion of the linked work.
         */
-       if (prev && req_ref_inc_not_zero(prev))
+       if (prev) {
                io_remove_next_linked(prev);
-       else
-               prev = NULL;
+               if (!req_ref_inc_not_zero(prev))
+                       prev = NULL;
+       }
        spin_unlock_irqrestore(&ctx->completion_lock, flags);
 
        if (prev) {
                io_async_find_and_cancel(ctx, req, prev->user_data, -ETIME);
                io_put_req_deferred(prev, 1);
+               io_put_req_deferred(req, 1);
        } else {
                io_req_complete_post(req, -ETIME, 0);
        }
-       io_put_req_deferred(req, 1);
        return HRTIMER_NORESTART;
 }
 
@@ -8227,6 +8233,7 @@ static int io_buffer_account_pin(struct io_ring_ctx *ctx, struct page **pages,
 {
        int i, ret;
 
+       imu->acct_pages = 0;
        for (i = 0; i < nr_pages; i++) {
                if (!PageCompound(pages[i])) {
                        imu->acct_pages++;
@@ -8390,7 +8397,7 @@ static int io_sqe_buffers_register(struct io_ring_ctx *ctx, void __user *arg,
 
        if (ctx->user_bufs)
                return -EBUSY;
-       if (!nr_args || nr_args > UIO_MAXIOV)
+       if (!nr_args || nr_args > IORING_MAX_REG_BUFFERS)
                return -EINVAL;
        ret = io_rsrc_node_switch_start(ctx);
        if (ret)
@@ -9034,14 +9041,19 @@ static void io_uring_del_task_file(unsigned long index)
 
 static void io_uring_clean_tctx(struct io_uring_task *tctx)
 {
+       struct io_wq *wq = tctx->io_wq;
        struct io_tctx_node *node;
        unsigned long index;
 
        xa_for_each(&tctx->xa, index, node)
                io_uring_del_task_file(index);
-       if (tctx->io_wq) {
-               io_wq_put_and_exit(tctx->io_wq);
+       if (wq) {
+               /*
+                * Must be after io_uring_del_task_file() (removes nodes under
+                * uring_lock) to avoid race with io_uring_try_cancel_iowq().
+                */
                tctx->io_wq = NULL;
+               io_wq_put_and_exit(wq);
        }
 }
 
@@ -9077,6 +9089,9 @@ static void io_uring_cancel_sqpoll(struct io_sq_data *sqd)
 
        if (!current->io_uring)
                return;
+       if (tctx->io_wq)
+               io_wq_exit_start(tctx->io_wq);
+
        WARN_ON_ONCE(!sqd || sqd->thread != current);
 
        atomic_inc(&tctx->in_idle);
@@ -9111,6 +9126,9 @@ void __io_uring_cancel(struct files_struct *files)
        DEFINE_WAIT(wait);
        s64 inflight;
 
+       if (tctx->io_wq)
+               io_wq_exit_start(tctx->io_wq);
+
        /* make sure overflow events are dropped */
        atomic_inc(&tctx->in_idle);
        do {
@@ -9658,7 +9676,8 @@ static int io_uring_create(unsigned entries, struct io_uring_params *p,
                        IORING_FEAT_SUBMIT_STABLE | IORING_FEAT_RW_CUR_POS |
                        IORING_FEAT_CUR_PERSONALITY | IORING_FEAT_FAST_POLL |
                        IORING_FEAT_POLL_32BITS | IORING_FEAT_SQPOLL_NONFIXED |
-                       IORING_FEAT_EXT_ARG | IORING_FEAT_NATIVE_WORKERS;
+                       IORING_FEAT_EXT_ARG | IORING_FEAT_NATIVE_WORKERS |
+                       IORING_FEAT_RSRC_TAGS;
 
        if (copy_to_user(params, p, sizeof(*p))) {
                ret = -EFAULT;
@@ -9898,7 +9917,7 @@ static int io_register_files_update(struct io_ring_ctx *ctx, void __user *arg,
 }
 
 static int io_register_rsrc_update(struct io_ring_ctx *ctx, void __user *arg,
-                                  unsigned size)
+                                  unsigned size, unsigned type)
 {
        struct io_uring_rsrc_update2 up;
 
@@ -9906,13 +9925,13 @@ static int io_register_rsrc_update(struct io_ring_ctx *ctx, void __user *arg,
                return -EINVAL;
        if (copy_from_user(&up, arg, sizeof(up)))
                return -EFAULT;
-       if (!up.nr)
+       if (!up.nr || up.resv)
                return -EINVAL;
-       return __io_register_rsrc_update(ctx, up.type, &up, up.nr);
+       return __io_register_rsrc_update(ctx, type, &up, up.nr);
 }
 
 static int io_register_rsrc(struct io_ring_ctx *ctx, void __user *arg,
-                           unsigned int size)
+                           unsigned int size, unsigned int type)
 {
        struct io_uring_rsrc_register rr;
 
@@ -9923,10 +9942,10 @@ static int io_register_rsrc(struct io_ring_ctx *ctx, void __user *arg,
        memset(&rr, 0, sizeof(rr));
        if (copy_from_user(&rr, arg, size))
                return -EFAULT;
-       if (!rr.nr)
+       if (!rr.nr || rr.resv || rr.resv2)
                return -EINVAL;
 
-       switch (rr.type) {
+       switch (type) {
        case IORING_RSRC_FILE:
                return io_sqe_files_register(ctx, u64_to_user_ptr(rr.data),
                                             rr.nr, u64_to_user_ptr(rr.tags));
@@ -9948,8 +9967,10 @@ static bool io_register_op_must_quiesce(int op)
        case IORING_REGISTER_PROBE:
        case IORING_REGISTER_PERSONALITY:
        case IORING_UNREGISTER_PERSONALITY:
-       case IORING_REGISTER_RSRC:
-       case IORING_REGISTER_RSRC_UPDATE:
+       case IORING_REGISTER_FILES2:
+       case IORING_REGISTER_FILES_UPDATE2:
+       case IORING_REGISTER_BUFFERS2:
+       case IORING_REGISTER_BUFFERS_UPDATE:
                return false;
        default:
                return true;
@@ -10075,11 +10096,19 @@ static int __io_uring_register(struct io_ring_ctx *ctx, unsigned opcode,
        case IORING_REGISTER_RESTRICTIONS:
                ret = io_register_restrictions(ctx, arg, nr_args);
                break;
-       case IORING_REGISTER_RSRC:
-               ret = io_register_rsrc(ctx, arg, nr_args);
+       case IORING_REGISTER_FILES2:
+               ret = io_register_rsrc(ctx, arg, nr_args, IORING_RSRC_FILE);
+               break;
+       case IORING_REGISTER_FILES_UPDATE2:
+               ret = io_register_rsrc_update(ctx, arg, nr_args,
+                                             IORING_RSRC_FILE);
+               break;
+       case IORING_REGISTER_BUFFERS2:
+               ret = io_register_rsrc(ctx, arg, nr_args, IORING_RSRC_BUFFER);
                break;
-       case IORING_REGISTER_RSRC_UPDATE:
-               ret = io_register_rsrc_update(ctx, arg, nr_args);
+       case IORING_REGISTER_BUFFERS_UPDATE:
+               ret = io_register_rsrc_update(ctx, arg, nr_args,
+                                             IORING_RSRC_BUFFER);
                break;
        default:
                ret = -EINVAL;
index f2cd2034a87bb21fb338f67e608e15f2f66952e1..9023717c5188b11ee78c811e2f7f2f7bc351fee3 100644 (file)
@@ -394,7 +394,7 @@ void iomap_readahead(struct readahead_control *rac, const struct iomap_ops *ops)
 {
        struct inode *inode = rac->mapping->host;
        loff_t pos = readahead_pos(rac);
-       loff_t length = readahead_length(rac);
+       size_t length = readahead_length(rac);
        struct iomap_readpage_ctx ctx = {
                .rac    = rac,
        };
@@ -402,7 +402,7 @@ void iomap_readahead(struct readahead_control *rac, const struct iomap_ops *ops)
        trace_iomap_readahead(inode, readahead_count(rac));
 
        while (length > 0) {
-               loff_t ret = iomap_apply(inode, pos, length, 0, ops,
+               ssize_t ret = iomap_apply(inode, pos, length, 0, ops,
                                &ctx, iomap_readahead_actor);
                if (ret <= 0) {
                        WARN_ON_ONCE(ret == 0);
index f63337828e1c474daa4ac8f13ebb99fc180e5eae..c3f1a78ba36977054f1acaf79889af664d664588 100644 (file)
@@ -3855,8 +3855,12 @@ static int can_idmap_mount(const struct mount_kattr *kattr, struct mount *mnt)
        if (!(m->mnt_sb->s_type->fs_flags & FS_ALLOW_IDMAP))
                return -EINVAL;
 
+       /* Don't yet support filesystem mountable in user namespaces. */
+       if (m->mnt_sb->s_user_ns != &init_user_ns)
+               return -EINVAL;
+
        /* We're not controlling the superblock. */
-       if (!ns_capable(m->mnt_sb->s_user_ns, CAP_SYS_ADMIN))
+       if (!capable(CAP_SYS_ADMIN))
                return -EPERM;
 
        /* Mount has already been visible in the filesystem hierarchy. */
index 578112713703b92b7cbfe578bdac9c4a374ec4ed..b4db21022cb43f3b371e31687a89ec309f5b3726 100644 (file)
@@ -1,7 +1,7 @@
 # SPDX-License-Identifier: GPL-2.0-only
 
 config NETFS_SUPPORT
-       tristate "Support for network filesystem high-level I/O"
+       tristate
        help
          This option enables support for network filesystems, including
          helpers for high-level buffered I/O, abstracting out read
index 193841d03de09b515b821ab67b50fcfc1b0b89a6..725614625ed4864cf6695aad85e2611b7946b5ed 100644 (file)
@@ -1068,7 +1068,7 @@ int netfs_write_begin(struct file *file, struct address_space *mapping,
        DEFINE_READAHEAD(ractl, file, NULL, mapping, index);
 
 retry:
-       page = grab_cache_page_write_begin(mapping, index, 0);
+       page = grab_cache_page_write_begin(mapping, index, flags);
        if (!page)
                return -ENOMEM;
 
index cfeaadf56bf0622b54559a228659b16ebd8e1e18..330f65727c4543e68abec9adb2953c2005e88c50 100644 (file)
@@ -406,7 +406,7 @@ struct nfs_client *nfs_get_client(const struct nfs_client_initdata *cl_init)
 
        if (cl_init->hostname == NULL) {
                WARN_ON(1);
-               return NULL;
+               return ERR_PTR(-EINVAL);
        }
 
        /* see if the client already exists */
index d158a500c25c65d048f7fd45809dabb0f3012467..d2103852475fa0f82cbf09fbefea870e46a8474d 100644 (file)
@@ -718,7 +718,7 @@ filelayout_decode_layout(struct pnfs_layout_hdr *flo,
                if (unlikely(!p))
                        goto out_err;
                fl->fh_array[i]->size = be32_to_cpup(p++);
-               if (sizeof(struct nfs_fh) < fl->fh_array[i]->size) {
+               if (fl->fh_array[i]->size > NFS_MAXFHSIZE) {
                        printk(KERN_ERR "NFS: Too big fh %d received %d\n",
                               i, fl->fh_array[i]->size);
                        goto out_err;
index 93e60e921f9261866d3ff6bfdf2c2c2a5472a0a4..bc0c698f3350814d9411886e216a5ca7db8f1db7 100644 (file)
@@ -362,7 +362,7 @@ static const struct kernel_param_ops param_ops_nfs_timeout = {
        .set = param_set_nfs_timeout,
        .get = param_get_nfs_timeout,
 };
-#define param_check_nfs_timeout(name, p) __param_check(name, p, int);
+#define param_check_nfs_timeout(name, p) __param_check(name, p, int)
 
 module_param(nfs_mountpoint_expiry_timeout, nfs_timeout, 0644);
 MODULE_PARM_DESC(nfs_mountpoint_expiry_timeout,
index 065cb04222a1bb117e7c3e0f9f5d96a2490f4b43..543d916f79abb6474eb5c05eb49f97803e634cd6 100644 (file)
@@ -205,6 +205,7 @@ struct nfs4_exception {
        struct inode *inode;
        nfs4_stateid *stateid;
        long timeout;
+       unsigned char task_is_privileged : 1;
        unsigned char delay : 1,
                      recovering : 1,
                      retry : 1;
index 889a9f4c0310d8ec0776d6fb69263fe53a062f7a..42719384e25fe62dfb3a199405ba70d046846d79 100644 (file)
@@ -435,8 +435,8 @@ struct nfs_client *nfs4_init_client(struct nfs_client *clp,
                 */
                nfs_mark_client_ready(clp, -EPERM);
        }
-       nfs_put_client(clp);
        clear_bit(NFS_CS_TSM_POSSIBLE, &clp->cl_flags);
+       nfs_put_client(clp);
        return old;
 
 error:
index 57b3821d975a35962e6eabafc27e9e461fe02b2b..a1e5c6b85dedc5836b5ecbc407bc02a29dad3e72 100644 (file)
@@ -211,7 +211,7 @@ static loff_t nfs4_file_llseek(struct file *filep, loff_t offset, int whence)
        case SEEK_HOLE:
        case SEEK_DATA:
                ret = nfs42_proc_llseek(filep, offset, whence);
-               if (ret != -ENOTSUPP)
+               if (ret != -EOPNOTSUPP)
                        return ret;
                fallthrough;
        default:
index 87d04f2c93852b28a8f7ce9c34d1ef627715ba56..e653654c10bcd8e49c2809891966448dc9975a41 100644 (file)
@@ -589,6 +589,8 @@ int nfs4_handle_exception(struct nfs_server *server, int errorcode, struct nfs4_
                goto out_retry;
        }
        if (exception->recovering) {
+               if (exception->task_is_privileged)
+                       return -EDEADLOCK;
                ret = nfs4_wait_clnt_recover(clp);
                if (test_bit(NFS_MIG_FAILED, &server->mig_status))
                        return -EIO;
@@ -614,6 +616,8 @@ nfs4_async_handle_exception(struct rpc_task *task, struct nfs_server *server,
                goto out_retry;
        }
        if (exception->recovering) {
+               if (exception->task_is_privileged)
+                       return -EDEADLOCK;
                rpc_sleep_on(&clp->cl_rpcwaitq, task, NULL);
                if (test_bit(NFS4CLNT_MANAGER_RUNNING, &clp->cl_state) == 0)
                        rpc_wake_up_queued_task(&clp->cl_rpcwaitq, task);
@@ -1706,7 +1710,7 @@ static void nfs_set_open_stateid_locked(struct nfs4_state *state,
                rcu_read_unlock();
                trace_nfs4_open_stateid_update_wait(state->inode, stateid, 0);
 
-               if (!signal_pending(current)) {
+               if (!fatal_signal_pending(current)) {
                        if (schedule_timeout(5*HZ) == 0)
                                status = -EAGAIN;
                        else
@@ -3487,7 +3491,7 @@ static bool nfs4_refresh_open_old_stateid(nfs4_stateid *dst,
                write_sequnlock(&state->seqlock);
                trace_nfs4_close_stateid_update_wait(state->inode, dst, 0);
 
-               if (signal_pending(current))
+               if (fatal_signal_pending(current))
                        status = -EINTR;
                else
                        if (schedule_timeout(5*HZ) != 0)
@@ -3878,6 +3882,10 @@ static int _nfs4_server_capabilities(struct nfs_server *server, struct nfs_fh *f
                        server->caps |= NFS_CAP_HARDLINKS;
                if (res.has_symlinks != 0)
                        server->caps |= NFS_CAP_SYMLINKS;
+#ifdef CONFIG_NFS_V4_SECURITY_LABEL
+               if (res.attr_bitmask[2] & FATTR4_WORD2_SECURITY_LABEL)
+                       server->caps |= NFS_CAP_SECURITY_LABEL;
+#endif
                if (!(res.attr_bitmask[0] & FATTR4_WORD0_FILEID))
                        server->fattr_valid &= ~NFS_ATTR_FATTR_FILEID;
                if (!(res.attr_bitmask[1] & FATTR4_WORD1_MODE))
@@ -3898,10 +3906,6 @@ static int _nfs4_server_capabilities(struct nfs_server *server, struct nfs_fh *f
                        server->fattr_valid &= ~NFS_ATTR_FATTR_CTIME;
                if (!(res.attr_bitmask[1] & FATTR4_WORD1_TIME_MODIFY))
                        server->fattr_valid &= ~NFS_ATTR_FATTR_MTIME;
-#ifdef CONFIG_NFS_V4_SECURITY_LABEL
-               if (!(res.attr_bitmask[2] & FATTR4_WORD2_SECURITY_LABEL))
-                       server->fattr_valid &= ~NFS_ATTR_FATTR_V4_SECURITY_LABEL;
-#endif
                memcpy(server->attr_bitmask_nl, res.attr_bitmask,
                                sizeof(server->attr_bitmask));
                server->attr_bitmask_nl[2] &= ~FATTR4_WORD2_SECURITY_LABEL;
@@ -5968,6 +5972,14 @@ static int nfs4_proc_set_acl(struct inode *inode, const void *buf, size_t buflen
        do {
                err = __nfs4_proc_set_acl(inode, buf, buflen);
                trace_nfs4_set_acl(inode, err);
+               if (err == -NFS4ERR_BADOWNER || err == -NFS4ERR_BADNAME) {
+                       /*
+                        * no need to retry since the kernel
+                        * isn't involved in encoding the ACEs.
+                        */
+                       err = -EINVAL;
+                       break;
+               }
                err = nfs4_handle_exception(NFS_SERVER(inode), err,
                                &exception);
        } while (exception.retry);
@@ -6409,6 +6421,7 @@ static void nfs4_delegreturn_done(struct rpc_task *task, void *calldata)
        struct nfs4_exception exception = {
                .inode = data->inode,
                .stateid = &data->stateid,
+               .task_is_privileged = data->args.seq_args.sa_privileged,
        };
 
        if (!nfs4_sequence_done(task, &data->res.seq_res))
@@ -6532,7 +6545,6 @@ static int _nfs4_proc_delegreturn(struct inode *inode, const struct cred *cred,
        data = kzalloc(sizeof(*data), GFP_NOFS);
        if (data == NULL)
                return -ENOMEM;
-       nfs4_init_sequence(&data->args.seq_args, &data->res.seq_res, 1, 0);
 
        nfs4_state_protect(server->nfs_client,
                        NFS_SP4_MACH_CRED_CLEANUP,
@@ -6563,6 +6575,12 @@ static int _nfs4_proc_delegreturn(struct inode *inode, const struct cred *cred,
                }
        }
 
+       if (!data->inode)
+               nfs4_init_sequence(&data->args.seq_args, &data->res.seq_res, 1,
+                                  1);
+       else
+               nfs4_init_sequence(&data->args.seq_args, &data->res.seq_res, 1,
+                                  0);
        task_setup_data.callback_data = data;
        msg.rpc_argp = &data->args;
        msg.rpc_resp = &data->res;
@@ -9640,15 +9658,20 @@ int nfs4_proc_layoutreturn(struct nfs4_layoutreturn *lrp, bool sync)
                        &task_setup_data.rpc_client, &msg);
 
        dprintk("--> %s\n", __func__);
+       lrp->inode = nfs_igrab_and_active(lrp->args.inode);
        if (!sync) {
-               lrp->inode = nfs_igrab_and_active(lrp->args.inode);
                if (!lrp->inode) {
                        nfs4_layoutreturn_release(lrp);
                        return -EAGAIN;
                }
                task_setup_data.flags |= RPC_TASK_ASYNC;
        }
-       nfs4_init_sequence(&lrp->args.seq_args, &lrp->res.seq_res, 1, 0);
+       if (!lrp->inode)
+               nfs4_init_sequence(&lrp->args.seq_args, &lrp->res.seq_res, 1,
+                                  1);
+       else
+               nfs4_init_sequence(&lrp->args.seq_args, &lrp->res.seq_res, 1,
+                                  0);
        task = rpc_run_task(&task_setup_data);
        if (IS_ERR(task))
                return PTR_ERR(task);
index eb1ef3462e84200d74d6753df6f0c17c3f34e894..ccef43e02b4818c22c16ce7a75a7fe3d86516f8f 100644 (file)
@@ -430,10 +430,6 @@ TRACE_DEFINE_ENUM(O_CLOEXEC);
                { O_NOATIME, "O_NOATIME" }, \
                { O_CLOEXEC, "O_CLOEXEC" })
 
-TRACE_DEFINE_ENUM(FMODE_READ);
-TRACE_DEFINE_ENUM(FMODE_WRITE);
-TRACE_DEFINE_ENUM(FMODE_EXEC);
-
 #define show_fmode_flags(mode) \
        __print_flags(mode, "|", \
                { ((__force unsigned long)FMODE_READ), "READ" }, \
index 6c20b28d9d7c1a9032e89f83a8e751b625469cb2..cf9cc62ec48ec32a29aa3ebc8f79a86fe24a8c29 100644 (file)
@@ -1094,15 +1094,16 @@ nfs_pageio_do_add_request(struct nfs_pageio_descriptor *desc,
        struct nfs_page *prev = NULL;
        unsigned int size;
 
-       if (mirror->pg_count != 0) {
-               prev = nfs_list_entry(mirror->pg_list.prev);
-       } else {
+       if (list_empty(&mirror->pg_list)) {
                if (desc->pg_ops->pg_init)
                        desc->pg_ops->pg_init(desc, req);
                if (desc->pg_error < 0)
                        return 0;
                mirror->pg_base = req->wb_pgbase;
-       }
+               mirror->pg_count = 0;
+               mirror->pg_recoalesce = 0;
+       } else
+               prev = nfs_list_entry(mirror->pg_list.prev);
 
        if (desc->pg_maxretrans && req->wb_nio > desc->pg_maxretrans) {
                if (NFS_SERVER(desc->pg_inode)->flags & NFS_MOUNT_SOFTERR)
@@ -1127,18 +1128,13 @@ static void nfs_pageio_doio(struct nfs_pageio_descriptor *desc)
 {
        struct nfs_pgio_mirror *mirror = nfs_pgio_current_mirror(desc);
 
-
        if (!list_empty(&mirror->pg_list)) {
                int error = desc->pg_ops->pg_doio(desc);
                if (error < 0)
                        desc->pg_error = error;
-               else
+               if (list_empty(&mirror->pg_list))
                        mirror->pg_bytes_written += mirror->pg_count;
        }
-       if (list_empty(&mirror->pg_list)) {
-               mirror->pg_count = 0;
-               mirror->pg_base = 0;
-       }
 }
 
 static void
@@ -1227,10 +1223,6 @@ static int nfs_do_recoalesce(struct nfs_pageio_descriptor *desc)
 
        do {
                list_splice_init(&mirror->pg_list, &head);
-               mirror->pg_bytes_written -= mirror->pg_count;
-               mirror->pg_count = 0;
-               mirror->pg_base = 0;
-               mirror->pg_recoalesce = 0;
 
                while (!list_empty(&head)) {
                        struct nfs_page *req;
index 03e0b34c4a64cc7a7299866ed13c99b417ade0aa..2c01ee805306c4e88067dcf84c719f9828046b7d 100644 (file)
@@ -1317,6 +1317,11 @@ _pnfs_return_layout(struct inode *ino)
 {
        struct pnfs_layout_hdr *lo = NULL;
        struct nfs_inode *nfsi = NFS_I(ino);
+       struct pnfs_layout_range range = {
+               .iomode         = IOMODE_ANY,
+               .offset         = 0,
+               .length         = NFS4_MAX_UINT64,
+       };
        LIST_HEAD(tmp_list);
        const struct cred *cred;
        nfs4_stateid stateid;
@@ -1344,16 +1349,10 @@ _pnfs_return_layout(struct inode *ino)
        }
        valid_layout = pnfs_layout_is_valid(lo);
        pnfs_clear_layoutcommit(ino, &tmp_list);
-       pnfs_mark_matching_lsegs_return(lo, &tmp_list, NULL, 0);
+       pnfs_mark_matching_lsegs_return(lo, &tmp_list, &range, 0);
 
-       if (NFS_SERVER(ino)->pnfs_curr_ld->return_range) {
-               struct pnfs_layout_range range = {
-                       .iomode         = IOMODE_ANY,
-                       .offset         = 0,
-                       .length         = NFS4_MAX_UINT64,
-               };
+       if (NFS_SERVER(ino)->pnfs_curr_ld->return_range)
                NFS_SERVER(ino)->pnfs_curr_ld->return_range(lo, &range);
-       }
 
        /* Don't send a LAYOUTRETURN if list was initially empty */
        if (!test_bit(NFS_LAYOUT_RETURN_REQUESTED, &lo->plh_flags) ||
@@ -2678,7 +2677,7 @@ EXPORT_SYMBOL_GPL(pnfs_generic_pg_check_range);
 void
 pnfs_generic_pg_init_read(struct nfs_pageio_descriptor *pgio, struct nfs_page *req)
 {
-       u64 rd_size = req->wb_bytes;
+       u64 rd_size;
 
        pnfs_generic_pg_check_layout(pgio);
        pnfs_generic_pg_check_range(pgio, req);
index 19a212f9725de4999ee983a2b31fb62842d7c06a..fe58525cfed48388364bc04ea97119d79b6409bb 100644 (file)
@@ -1379,7 +1379,7 @@ static const struct kernel_param_ops param_ops_portnr = {
        .set = param_set_portnr,
        .get = param_get_uint,
 };
-#define param_check_portnr(name, p) __param_check(name, p, unsigned int);
+#define param_check_portnr(name, p) __param_check(name, p, unsigned int)
 
 module_param_named(callback_tcpport, nfs_callback_set_tcpport, portnr, 0644);
 module_param_named(callback_nr_threads, nfs_callback_nr_threads, ushort, 0644);
index 71fefb30e015669259a3a4ed2ce759a715ae7740..be5b6d2c01e7ac5e65a70b7ed893082f1fdf05d5 100644 (file)
@@ -424,11 +424,18 @@ static ssize_t copy_event_to_user(struct fsnotify_group *group,
         * events generated by the listener process itself, without disclosing
         * the pids of other processes.
         */
-       if (!capable(CAP_SYS_ADMIN) &&
+       if (FAN_GROUP_FLAG(group, FANOTIFY_UNPRIV) &&
            task_tgid(current) != event->pid)
                metadata.pid = 0;
 
-       if (path && path->mnt && path->dentry) {
+       /*
+        * For now, fid mode is required for an unprivileged listener and
+        * fid mode does not report fd in events.  Keep this check anyway
+        * for safety in case fid mode requirement is relaxed in the future
+        * to allow unprivileged listener to get events with no fd and no fid.
+        */
+       if (!FAN_GROUP_FLAG(group, FANOTIFY_UNPRIV) &&
+           path && path->mnt && path->dentry) {
                fd = create_fd(group, path, &f);
                if (fd < 0)
                        return fd;
@@ -1040,6 +1047,7 @@ SYSCALL_DEFINE2(fanotify_init, unsigned int, flags, unsigned int, event_f_flags)
        int f_flags, fd;
        unsigned int fid_mode = flags & FANOTIFY_FID_BITS;
        unsigned int class = flags & FANOTIFY_CLASS_BITS;
+       unsigned int internal_flags = 0;
 
        pr_debug("%s: flags=%x event_f_flags=%x\n",
                 __func__, flags, event_f_flags);
@@ -1053,6 +1061,13 @@ SYSCALL_DEFINE2(fanotify_init, unsigned int, flags, unsigned int, event_f_flags)
                 */
                if ((flags & FANOTIFY_ADMIN_INIT_FLAGS) || !fid_mode)
                        return -EPERM;
+
+               /*
+                * Setting the internal flag FANOTIFY_UNPRIV on the group
+                * prevents setting mount/filesystem marks on this group and
+                * prevents reporting pid and open fd in events.
+                */
+               internal_flags |= FANOTIFY_UNPRIV;
        }
 
 #ifdef CONFIG_AUDITSYSCALL
@@ -1105,7 +1120,7 @@ SYSCALL_DEFINE2(fanotify_init, unsigned int, flags, unsigned int, event_f_flags)
                goto out_destroy_group;
        }
 
-       group->fanotify_data.flags = flags;
+       group->fanotify_data.flags = flags | internal_flags;
        group->memcg = get_mem_cgroup_from_mm(current->mm);
 
        group->fanotify_data.merge_hash = fanotify_alloc_merge_hash();
@@ -1305,11 +1320,13 @@ static int do_fanotify_mark(int fanotify_fd, unsigned int flags, __u64 mask,
        group = f.file->private_data;
 
        /*
-        * An unprivileged user is not allowed to watch a mount point nor
-        * a filesystem.
+        * An unprivileged user is not allowed to setup mount nor filesystem
+        * marks.  This also includes setting up such marks by a group that
+        * was initialized by an unprivileged user.
         */
        ret = -EPERM;
-       if (!capable(CAP_SYS_ADMIN) &&
+       if ((!capable(CAP_SYS_ADMIN) ||
+            FAN_GROUP_FLAG(group, FANOTIFY_UNPRIV)) &&
            mark_type != FAN_MARK_INODE)
                goto fput_and_out;
 
@@ -1460,6 +1477,7 @@ static int __init fanotify_user_setup(void)
        max_marks = clamp(max_marks, FANOTIFY_OLD_DEFAULT_MAX_MARKS,
                                     FANOTIFY_DEFAULT_MAX_USER_MARKS);
 
+       BUILD_BUG_ON(FANOTIFY_INIT_FLAGS & FANOTIFY_INTERNAL_GROUP_FLAGS);
        BUILD_BUG_ON(HWEIGHT32(FANOTIFY_INIT_FLAGS) != 10);
        BUILD_BUG_ON(HWEIGHT32(FANOTIFY_MARK_FLAGS) != 9);
 
index a712b2aaa9ac96542e370d0abb2bb252385ab898..57f0d5d9f934efecf793335386c09da4cfba1c09 100644 (file)
@@ -144,7 +144,7 @@ void fanotify_show_fdinfo(struct seq_file *m, struct file *f)
        struct fsnotify_group *group = f->private_data;
 
        seq_printf(m, "fanotify flags:%x event-flags:%x\n",
-                  group->fanotify_data.flags,
+                  group->fanotify_data.flags & FANOTIFY_INIT_FLAGS,
                   group->fanotify_data.f_flags);
 
        show_fdinfo(m, f, fanotify_fdinfo);
index f17c3d33fb18a03d20985d9215a8fc1ccc992f4a..7756579430578ba1f91892b321eab2d6dcd01941 100644 (file)
@@ -1855,6 +1855,45 @@ out:
        return ret;
 }
 
+/*
+ * zero out partial blocks of one cluster.
+ *
+ * start: file offset where zero starts, will be made upper block aligned.
+ * len: it will be trimmed to the end of current cluster if "start + len"
+ *      is bigger than it.
+ */
+static int ocfs2_zeroout_partial_cluster(struct inode *inode,
+                                       u64 start, u64 len)
+{
+       int ret;
+       u64 start_block, end_block, nr_blocks;
+       u64 p_block, offset;
+       u32 cluster, p_cluster, nr_clusters;
+       struct super_block *sb = inode->i_sb;
+       u64 end = ocfs2_align_bytes_to_clusters(sb, start);
+
+       if (start + len < end)
+               end = start + len;
+
+       start_block = ocfs2_blocks_for_bytes(sb, start);
+       end_block = ocfs2_blocks_for_bytes(sb, end);
+       nr_blocks = end_block - start_block;
+       if (!nr_blocks)
+               return 0;
+
+       cluster = ocfs2_bytes_to_clusters(sb, start);
+       ret = ocfs2_get_clusters(inode, cluster, &p_cluster,
+                               &nr_clusters, NULL);
+       if (ret)
+               return ret;
+       if (!p_cluster)
+               return 0;
+
+       offset = start_block - ocfs2_clusters_to_blocks(sb, cluster);
+       p_block = ocfs2_clusters_to_blocks(sb, p_cluster) + offset;
+       return sb_issue_zeroout(sb, p_block, nr_blocks, GFP_NOFS);
+}
+
 /*
  * Parts of this function taken from xfs_change_file_space()
  */
@@ -1865,7 +1904,7 @@ static int __ocfs2_change_file_space(struct file *file, struct inode *inode,
 {
        int ret;
        s64 llen;
-       loff_t size;
+       loff_t size, orig_isize;
        struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
        struct buffer_head *di_bh = NULL;
        handle_t *handle;
@@ -1896,6 +1935,7 @@ static int __ocfs2_change_file_space(struct file *file, struct inode *inode,
                goto out_inode_unlock;
        }
 
+       orig_isize = i_size_read(inode);
        switch (sr->l_whence) {
        case 0: /*SEEK_SET*/
                break;
@@ -1903,7 +1943,7 @@ static int __ocfs2_change_file_space(struct file *file, struct inode *inode,
                sr->l_start += f_pos;
                break;
        case 2: /*SEEK_END*/
-               sr->l_start += i_size_read(inode);
+               sr->l_start += orig_isize;
                break;
        default:
                ret = -EINVAL;
@@ -1957,6 +1997,14 @@ static int __ocfs2_change_file_space(struct file *file, struct inode *inode,
        default:
                ret = -EINVAL;
        }
+
+       /* zeroout eof blocks in the cluster. */
+       if (!ret && change_size && orig_isize < size) {
+               ret = ocfs2_zeroout_partial_cluster(inode, orig_isize,
+                                       size - orig_isize);
+               if (!ret)
+                       i_size_write(inode, size);
+       }
        up_write(&OCFS2_I(inode)->ip_alloc_sem);
        if (ret) {
                mlog_errno(ret);
@@ -1973,9 +2021,6 @@ static int __ocfs2_change_file_space(struct file *file, struct inode *inode,
                goto out_inode_unlock;
        }
 
-       if (change_size && i_size_read(inode) < size)
-               i_size_write(inode, size);
-
        inode->i_ctime = inode->i_mtime = current_time(inode);
        ret = ocfs2_mark_inode_dirty(handle, inode, di_bh);
        if (ret < 0)
index 3851bfcdba56ef929411e99a70bb8895bc609e85..7118ebe38fa62e9d2ae0eb51b96bbf11ac2d0ac8 100644 (file)
@@ -2674,6 +2674,11 @@ out:
 }
 
 #ifdef CONFIG_SECURITY
+static int proc_pid_attr_open(struct inode *inode, struct file *file)
+{
+       return __mem_open(inode, file, PTRACE_MODE_READ_FSCREDS);
+}
+
 static ssize_t proc_pid_attr_read(struct file * file, char __user * buf,
                                  size_t count, loff_t *ppos)
 {
@@ -2703,6 +2708,10 @@ static ssize_t proc_pid_attr_write(struct file * file, const char __user * buf,
        void *page;
        int rv;
 
+       /* A task may only write when it was the opener. */
+       if (file->private_data != current->mm)
+               return -EPERM;
+
        rcu_read_lock();
        task = pid_task(proc_pid(inode), PIDTYPE_PID);
        if (!task) {
@@ -2750,9 +2759,11 @@ out:
 }
 
 static const struct file_operations proc_pid_attr_operations = {
+       .open           = proc_pid_attr_open,
        .read           = proc_pid_attr_read,
        .write          = proc_pid_attr_write,
        .llseek         = generic_file_llseek,
+       .release        = mem_release,
 };
 
 #define LSM_DIR_OPS(LSM) \
index 4f13734637660056e299d4ed04ceb38fb04cb580..22d904bde6ab9158702e4d4d83a42ebb4056770d 100644 (file)
@@ -288,14 +288,12 @@ static inline void remove_dquot_hash(struct dquot *dquot)
 static struct dquot *find_dquot(unsigned int hashent, struct super_block *sb,
                                struct kqid qid)
 {
-       struct hlist_node *node;
        struct dquot *dquot;
 
-       hlist_for_each (node, dquot_hash+hashent) {
-               dquot = hlist_entry(node, struct dquot, dq_hash);
+       hlist_for_each_entry(dquot, dquot_hash+hashent, dq_hash)
                if (dquot->dq_sb == sb && qid_eq(dquot->dq_id, qid))
                        return dquot;
-       }
+
        return NULL;
 }
 
index 040a1142915fcd494dfb39c6ba036279c7492de1..167b5889db4bbc2df27b09401bade062d0ba140a 100644 (file)
@@ -114,29 +114,24 @@ static int signalfd_copyinfo(struct signalfd_siginfo __user *uinfo,
                break;
        case SIL_FAULT_BNDERR:
        case SIL_FAULT_PKUERR:
+       case SIL_PERF_EVENT:
                /*
-                * Fall through to the SIL_FAULT case.  Both SIL_FAULT_BNDERR
-                * and SIL_FAULT_PKUERR are only generated by faults that
-                * deliver them synchronously to userspace.  In case someone
-                * injects one of these signals and signalfd catches it treat
-                * it as SIL_FAULT.
+                * Fall through to the SIL_FAULT case.  SIL_FAULT_BNDERR,
+                * SIL_FAULT_PKUERR, and SIL_PERF_EVENT are only
+                * generated by faults that deliver them synchronously to
+                * userspace.  In case someone injects one of these signals
+                * and signalfd catches it treat it as SIL_FAULT.
                 */
        case SIL_FAULT:
                new.ssi_addr = (long) kinfo->si_addr;
-#ifdef __ARCH_SI_TRAPNO
-               new.ssi_trapno = kinfo->si_trapno;
-#endif
                break;
-       case SIL_FAULT_MCEERR:
+       case SIL_FAULT_TRAPNO:
                new.ssi_addr = (long) kinfo->si_addr;
-#ifdef __ARCH_SI_TRAPNO
                new.ssi_trapno = kinfo->si_trapno;
-#endif
-               new.ssi_addr_lsb = (short) kinfo->si_addr_lsb;
                break;
-       case SIL_PERF_EVENT:
+       case SIL_FAULT_MCEERR:
                new.ssi_addr = (long) kinfo->si_addr;
-               new.ssi_perf = kinfo->si_perf;
+               new.ssi_addr_lsb = (short) kinfo->si_addr_lsb;
                break;
        case SIL_CHLD:
                new.ssi_pid    = kinfo->si_pid;
index 7b1128398976eec2256be2a1a0476ae6045a9560..89d492916deaf80a7dca06c7201f5e22c3bca875 100644 (file)
@@ -211,11 +211,11 @@ failure:
  * If the skip factor is limited in this way then the file will use multiple
  * slots.
  */
-static inline int calculate_skip(int blocks)
+static inline int calculate_skip(u64 blocks)
 {
-       int skip = blocks / ((SQUASHFS_META_ENTRIES + 1)
+       u64 skip = blocks / ((SQUASHFS_META_ENTRIES + 1)
                 * SQUASHFS_META_INDEXES);
-       return min(SQUASHFS_CACHED_BLKS - 1, skip + 1);
+       return min((u64) SQUASHFS_CACHED_BLKS - 1, skip + 1);
 }
 
 
index e32a1833d5231e2a971b939d453eef1ca6c72fa5..bbfea8022a3b96c5bc25bb012b07a90f9241032b 100644 (file)
@@ -325,10 +325,22 @@ out:
                error2 = xfs_alloc_pagf_init(mp, tp, pag->pag_agno, 0);
                if (error2)
                        return error2;
-               ASSERT(xfs_perag_resv(pag, XFS_AG_RESV_METADATA)->ar_reserved +
-                      xfs_perag_resv(pag, XFS_AG_RESV_RMAPBT)->ar_reserved <=
-                      pag->pagf_freeblks + pag->pagf_flcount);
+
+               /*
+                * If there isn't enough space in the AG to satisfy the
+                * reservation, let the caller know that there wasn't enough
+                * space.  Callers are responsible for deciding what to do
+                * next, since (in theory) we can stumble along with
+                * insufficient reservation if data blocks are being freed to
+                * replenish the AG's free space.
+                */
+               if (!error &&
+                   xfs_perag_resv(pag, XFS_AG_RESV_METADATA)->ar_reserved +
+                   xfs_perag_resv(pag, XFS_AG_RESV_RMAPBT)->ar_reserved >
+                   pag->pagf_freeblks + pag->pagf_flcount)
+                       error = -ENOSPC;
        }
+
        return error;
 }
 
index 7e3b9b01431e573a92db759a1025d58e5a7f84ea..a3e0e6f672d63ad29ec04e0704eb036cb8a12172 100644 (file)
@@ -605,7 +605,6 @@ xfs_bmap_btree_to_extents(
 
        ASSERT(cur);
        ASSERT(whichfork != XFS_COW_FORK);
-       ASSERT(!xfs_need_iread_extents(ifp));
        ASSERT(ifp->if_format == XFS_DINODE_FMT_BTREE);
        ASSERT(be16_to_cpu(rblock->bb_level) == 1);
        ASSERT(be16_to_cpu(rblock->bb_numrecs) == 1);
@@ -5350,7 +5349,6 @@ __xfs_bunmapi(
        xfs_fsblock_t           sum;
        xfs_filblks_t           len = *rlen;    /* length to unmap in file */
        xfs_fileoff_t           max_len;
-       xfs_agnumber_t          prev_agno = NULLAGNUMBER, agno;
        xfs_fileoff_t           end;
        struct xfs_iext_cursor  icur;
        bool                    done = false;
@@ -5442,16 +5440,6 @@ __xfs_bunmapi(
                del = got;
                wasdel = isnullstartblock(del.br_startblock);
 
-               /*
-                * Make sure we don't touch multiple AGF headers out of order
-                * in a single transaction, as that could cause AB-BA deadlocks.
-                */
-               if (!wasdel && !isrt) {
-                       agno = XFS_FSB_TO_AGNO(mp, del.br_startblock);
-                       if (prev_agno != NULLAGNUMBER && prev_agno > agno)
-                               break;
-                       prev_agno = agno;
-               }
                if (got.br_startoff < start) {
                        del.br_startoff = start;
                        del.br_blockcount -= start - got.br_startoff;
index a83bdd0c47a846e2fa387818be20c6c2b6017521..bde2b4c64dbe30d13eef44f32005c35fe349ae1e 100644 (file)
@@ -770,6 +770,8 @@ struct xfs_scrub_metadata {
 /*
  * ioctl commands that are used by Linux filesystems
  */
+#define XFS_IOC_GETXFLAGS      FS_IOC_GETFLAGS
+#define XFS_IOC_SETXFLAGS      FS_IOC_SETFLAGS
 #define XFS_IOC_GETVERSION     FS_IOC_GETVERSION
 
 /*
@@ -780,6 +782,8 @@ struct xfs_scrub_metadata {
 #define XFS_IOC_ALLOCSP                _IOW ('X', 10, struct xfs_flock64)
 #define XFS_IOC_FREESP         _IOW ('X', 11, struct xfs_flock64)
 #define XFS_IOC_DIOINFO                _IOR ('X', 30, struct dioattr)
+#define XFS_IOC_FSGETXATTR     FS_IOC_FSGETXATTR
+#define XFS_IOC_FSSETXATTR     FS_IOC_FSSETXATTR
 #define XFS_IOC_ALLOCSP64      _IOW ('X', 36, struct xfs_flock64)
 #define XFS_IOC_FREESP64       _IOW ('X', 37, struct xfs_flock64)
 #define XFS_IOC_GETBMAP                _IOWR('X', 38, struct getbmap)
index 5c9a7440d9e4ed5eae62debe0242e152f7c182f3..f3254a4f4cb4bf5ea7417bb1a0364143890f98d3 100644 (file)
@@ -559,8 +559,17 @@ xfs_dinode_calc_crc(
 /*
  * Validate di_extsize hint.
  *
- * The rules are documented at xfs_ioctl_setattr_check_extsize().
- * These functions must be kept in sync with each other.
+ * 1. Extent size hint is only valid for directories and regular files.
+ * 2. FS_XFLAG_EXTSIZE is only valid for regular files.
+ * 3. FS_XFLAG_EXTSZINHERIT is only valid for directories.
+ * 4. Hint cannot be larger than MAXTEXTLEN.
+ * 5. Can be changed on directories at any time.
+ * 6. Hint value of 0 turns off hints, clears inode flags.
+ * 7. Extent size must be a multiple of the appropriate block size.
+ *    For realtime files, this is the rt extent size.
+ * 8. For non-realtime files, the extent size hint must be limited
+ *    to half the AG size to avoid alignment extending the extent beyond the
+ *    limits of the AG.
  */
 xfs_failaddr_t
 xfs_inode_validate_extsize(
@@ -580,6 +589,28 @@ xfs_inode_validate_extsize(
        inherit_flag = (flags & XFS_DIFLAG_EXTSZINHERIT);
        extsize_bytes = XFS_FSB_TO_B(mp, extsize);
 
+       /*
+        * This comment describes a historic gap in this verifier function.
+        *
+        * On older kernels, the extent size hint verifier doesn't check that
+        * the extent size hint is an integer multiple of the realtime extent
+        * size on a directory with both RTINHERIT and EXTSZINHERIT flags set.
+        * The verifier has always enforced the alignment rule for regular
+        * files with the REALTIME flag set.
+        *
+        * If a directory with a misaligned extent size hint is allowed to
+        * propagate that hint into a new regular realtime file, the result
+        * is that the inode cluster buffer verifier will trigger a corruption
+        * shutdown the next time it is run.
+        *
+        * Unfortunately, there could be filesystems with these misconfigured
+        * directories in the wild, so we cannot add a check to this verifier
+        * at this time because that will result a new source of directory
+        * corruption errors when reading an existing filesystem.  Instead, we
+        * permit the misconfiguration to pass through the verifiers so that
+        * callers of this function can correct and mitigate externally.
+        */
+
        if (rt_flag)
                blocksize_bytes = mp->m_sb.sb_rextsize << mp->m_sb.sb_blocklog;
        else
@@ -616,8 +647,15 @@ xfs_inode_validate_extsize(
 /*
  * Validate di_cowextsize hint.
  *
- * The rules are documented at xfs_ioctl_setattr_check_cowextsize().
- * These functions must be kept in sync with each other.
+ * 1. CoW extent size hint can only be set if reflink is enabled on the fs.
+ *    The inode does not have to have any shared blocks, but it must be a v3.
+ * 2. FS_XFLAG_COWEXTSIZE is only valid for directories and regular files;
+ *    for a directory, the hint is propagated to new files.
+ * 3. Can be changed on files & directories at any time.
+ * 4. Hint value of 0 turns off hints, clears inode flags.
+ * 5. Extent size must be a multiple of the appropriate block size.
+ * 6. The extent size hint must be limited to half the AG size to avoid
+ *    alignment extending the extent beyond the limits of the AG.
  */
 xfs_failaddr_t
 xfs_inode_validate_cowextsize(
index 78324e043e257224b802c8e666e43b4df246f5d8..8d595a5c4abd1d85a6efc442f090337b2fc76b3a 100644 (file)
@@ -142,6 +142,23 @@ xfs_trans_log_inode(
                flags |= XFS_ILOG_CORE;
        }
 
+       /*
+        * Inode verifiers on older kernels don't check that the extent size
+        * hint is an integer multiple of the rt extent size on a directory
+        * with both rtinherit and extszinherit flags set.  If we're logging a
+        * directory that is misconfigured in this way, clear the hint.
+        */
+       if ((ip->i_diflags & XFS_DIFLAG_RTINHERIT) &&
+           (ip->i_diflags & XFS_DIFLAG_EXTSZINHERIT) &&
+           (ip->i_extsize % ip->i_mount->m_sb.sb_rextsize) > 0) {
+               xfs_info_once(ip->i_mount,
+       "Correcting misaligned extent size hint in inode 0x%llx.", ip->i_ino);
+               ip->i_diflags &= ~(XFS_DIFLAG_EXTSIZE |
+                                  XFS_DIFLAG_EXTSZINHERIT);
+               ip->i_extsize = 0;
+               flags |= XFS_ILOG_CORE;
+       }
+
        /*
         * Record the specific change for fdatasync optimisation. This allows
         * fdatasync to skip log forces for inodes that are only timestamp
index aa874607618a22e0dbc763f5e540994619231057..be38c960da85863ca6f530b2db3e52fc4f22cc80 100644 (file)
@@ -74,7 +74,9 @@ __xchk_process_error(
                return true;
        case -EDEADLOCK:
                /* Used to restart an op with deadlock avoidance. */
-               trace_xchk_deadlock_retry(sc->ip, sc->sm, *error);
+               trace_xchk_deadlock_retry(
+                               sc->ip ? sc->ip : XFS_I(file_inode(sc->file)),
+                               sc->sm, *error);
                break;
        case -EFSBADCRC:
        case -EFSCORRUPTED:
index a5e9d7d34023f21643a4ca4fd1c9a903abafdb2b..0936f3a96fe6e845007e444bad9a33fa0e8fa12a 100644 (file)
@@ -71,18 +71,24 @@ xfs_zero_extent(
 #ifdef CONFIG_XFS_RT
 int
 xfs_bmap_rtalloc(
-       struct xfs_bmalloca     *ap)    /* bmap alloc argument struct */
+       struct xfs_bmalloca     *ap)
 {
-       int             error;          /* error return value */
-       xfs_mount_t     *mp;            /* mount point structure */
-       xfs_extlen_t    prod = 0;       /* product factor for allocators */
-       xfs_extlen_t    mod = 0;        /* product factor for allocators */
-       xfs_extlen_t    ralen = 0;      /* realtime allocation length */
-       xfs_extlen_t    align;          /* minimum allocation alignment */
-       xfs_rtblock_t   rtb;
-
-       mp = ap->ip->i_mount;
+       struct xfs_mount        *mp = ap->ip->i_mount;
+       xfs_fileoff_t           orig_offset = ap->offset;
+       xfs_rtblock_t           rtb;
+       xfs_extlen_t            prod = 0;  /* product factor for allocators */
+       xfs_extlen_t            mod = 0;   /* product factor for allocators */
+       xfs_extlen_t            ralen = 0; /* realtime allocation length */
+       xfs_extlen_t            align;     /* minimum allocation alignment */
+       xfs_extlen_t            orig_length = ap->length;
+       xfs_extlen_t            minlen = mp->m_sb.sb_rextsize;
+       xfs_extlen_t            raminlen;
+       bool                    rtlocked = false;
+       bool                    ignore_locality = false;
+       int                     error;
+
        align = xfs_get_extsz_hint(ap->ip);
+retry:
        prod = align / mp->m_sb.sb_rextsize;
        error = xfs_bmap_extsize_align(mp, &ap->got, &ap->prev,
                                        align, 1, ap->eof, 0,
@@ -92,6 +98,15 @@ xfs_bmap_rtalloc(
        ASSERT(ap->length);
        ASSERT(ap->length % mp->m_sb.sb_rextsize == 0);
 
+       /*
+        * If we shifted the file offset downward to satisfy an extent size
+        * hint, increase minlen by that amount so that the allocator won't
+        * give us an allocation that's too short to cover at least one of the
+        * blocks that the caller asked for.
+        */
+       if (ap->offset != orig_offset)
+               minlen += orig_offset - ap->offset;
+
        /*
         * If the offset & length are not perfectly aligned
         * then kill prod, it will just get us in trouble.
@@ -116,10 +131,13 @@ xfs_bmap_rtalloc(
        /*
         * Lock out modifications to both the RT bitmap and summary inodes
         */
-       xfs_ilock(mp->m_rbmip, XFS_ILOCK_EXCL|XFS_ILOCK_RTBITMAP);
-       xfs_trans_ijoin(ap->tp, mp->m_rbmip, XFS_ILOCK_EXCL);
-       xfs_ilock(mp->m_rsumip, XFS_ILOCK_EXCL|XFS_ILOCK_RTSUM);
-       xfs_trans_ijoin(ap->tp, mp->m_rsumip, XFS_ILOCK_EXCL);
+       if (!rtlocked) {
+               xfs_ilock(mp->m_rbmip, XFS_ILOCK_EXCL|XFS_ILOCK_RTBITMAP);
+               xfs_trans_ijoin(ap->tp, mp->m_rbmip, XFS_ILOCK_EXCL);
+               xfs_ilock(mp->m_rsumip, XFS_ILOCK_EXCL|XFS_ILOCK_RTSUM);
+               xfs_trans_ijoin(ap->tp, mp->m_rsumip, XFS_ILOCK_EXCL);
+               rtlocked = true;
+       }
 
        /*
         * If it's an allocation to an empty file at offset 0,
@@ -141,33 +159,59 @@ xfs_bmap_rtalloc(
        /*
         * Realtime allocation, done through xfs_rtallocate_extent.
         */
-       do_div(ap->blkno, mp->m_sb.sb_rextsize);
+       if (ignore_locality)
+               ap->blkno = 0;
+       else
+               do_div(ap->blkno, mp->m_sb.sb_rextsize);
        rtb = ap->blkno;
        ap->length = ralen;
-       error = xfs_rtallocate_extent(ap->tp, ap->blkno, 1, ap->length,
-                               &ralen, ap->wasdel, prod, &rtb);
+       raminlen = max_t(xfs_extlen_t, 1, minlen / mp->m_sb.sb_rextsize);
+       error = xfs_rtallocate_extent(ap->tp, ap->blkno, raminlen, ap->length,
+                       &ralen, ap->wasdel, prod, &rtb);
        if (error)
                return error;
 
-       ap->blkno = rtb;
-       if (ap->blkno != NULLFSBLOCK) {
-               ap->blkno *= mp->m_sb.sb_rextsize;
-               ralen *= mp->m_sb.sb_rextsize;
-               ap->length = ralen;
-               ap->ip->i_nblocks += ralen;
+       if (rtb != NULLRTBLOCK) {
+               ap->blkno = rtb * mp->m_sb.sb_rextsize;
+               ap->length = ralen * mp->m_sb.sb_rextsize;
+               ap->ip->i_nblocks += ap->length;
                xfs_trans_log_inode(ap->tp, ap->ip, XFS_ILOG_CORE);
                if (ap->wasdel)
-                       ap->ip->i_delayed_blks -= ralen;
+                       ap->ip->i_delayed_blks -= ap->length;
                /*
                 * Adjust the disk quota also. This was reserved
                 * earlier.
                 */
                xfs_trans_mod_dquot_byino(ap->tp, ap->ip,
                        ap->wasdel ? XFS_TRANS_DQ_DELRTBCOUNT :
-                                       XFS_TRANS_DQ_RTBCOUNT, (long) ralen);
-       } else {
-               ap->length = 0;
+                                       XFS_TRANS_DQ_RTBCOUNT, ap->length);
+               return 0;
        }
+
+       if (align > mp->m_sb.sb_rextsize) {
+               /*
+                * We previously enlarged the request length to try to satisfy
+                * an extent size hint.  The allocator didn't return anything,
+                * so reset the parameters to the original values and try again
+                * without alignment criteria.
+                */
+               ap->offset = orig_offset;
+               ap->length = orig_length;
+               minlen = align = mp->m_sb.sb_rextsize;
+               goto retry;
+       }
+
+       if (!ignore_locality && ap->blkno != 0) {
+               /*
+                * If we can't allocate near a specific rt extent, try again
+                * without locality criteria.
+                */
+               ignore_locality = true;
+               goto retry;
+       }
+
+       ap->blkno = NULLFSBLOCK;
+       ap->length = 0;
        return 0;
 }
 #endif /* CONFIG_XFS_RT */
index 0369eb22c1bb0fd04e8b97f99703494a23bf69b5..e4c2da4566f13f1748de5c8c5e54c683dd7be866 100644 (file)
@@ -690,6 +690,7 @@ xfs_inode_inherit_flags(
        const struct xfs_inode  *pip)
 {
        unsigned int            di_flags = 0;
+       xfs_failaddr_t          failaddr;
        umode_t                 mode = VFS_I(ip)->i_mode;
 
        if (S_ISDIR(mode)) {
@@ -729,6 +730,24 @@ xfs_inode_inherit_flags(
                di_flags |= XFS_DIFLAG_FILESTREAM;
 
        ip->i_diflags |= di_flags;
+
+       /*
+        * Inode verifiers on older kernels only check that the extent size
+        * hint is an integer multiple of the rt extent size on realtime files.
+        * They did not check the hint alignment on a directory with both
+        * rtinherit and extszinherit flags set.  If the misaligned hint is
+        * propagated from a directory into a new realtime file, new file
+        * allocations will fail due to math errors in the rt allocator and/or
+        * trip the verifiers.  Validate the hint settings in the new file so
+        * that we don't let broken hints propagate.
+        */
+       failaddr = xfs_inode_validate_extsize(ip->i_mount, ip->i_extsize,
+                       VFS_I(ip)->i_mode, ip->i_diflags);
+       if (failaddr) {
+               ip->i_diflags &= ~(XFS_DIFLAG_EXTSIZE |
+                                  XFS_DIFLAG_EXTSZINHERIT);
+               ip->i_extsize = 0;
+       }
 }
 
 /* Propagate di_flags2 from a parent inode to a child inode. */
@@ -737,12 +756,22 @@ xfs_inode_inherit_flags2(
        struct xfs_inode        *ip,
        const struct xfs_inode  *pip)
 {
+       xfs_failaddr_t          failaddr;
+
        if (pip->i_diflags2 & XFS_DIFLAG2_COWEXTSIZE) {
                ip->i_diflags2 |= XFS_DIFLAG2_COWEXTSIZE;
                ip->i_cowextsize = pip->i_cowextsize;
        }
        if (pip->i_diflags2 & XFS_DIFLAG2_DAX)
                ip->i_diflags2 |= XFS_DIFLAG2_DAX;
+
+       /* Don't let invalid cowextsize hints propagate. */
+       failaddr = xfs_inode_validate_cowextsize(ip->i_mount, ip->i_cowextsize,
+                       VFS_I(ip)->i_mode, ip->i_diflags, ip->i_diflags2);
+       if (failaddr) {
+               ip->i_diflags2 &= ~XFS_DIFLAG2_COWEXTSIZE;
+               ip->i_cowextsize = 0;
+       }
 }
 
 /*
index 3925bfcb236570bf75241e466b15d2aa70f30461..1fe4c1fc0aeaeeebe05c6391625dbfb48f85323f 100644 (file)
@@ -1267,20 +1267,8 @@ out_error:
 }
 
 /*
- * extent size hint validation is somewhat cumbersome. Rules are:
- *
- * 1. extent size hint is only valid for directories and regular files
- * 2. FS_XFLAG_EXTSIZE is only valid for regular files
- * 3. FS_XFLAG_EXTSZINHERIT is only valid for directories.
- * 4. can only be changed on regular files if no extents are allocated
- * 5. can be changed on directories at any time
- * 6. extsize hint of 0 turns off hints, clears inode flags.
- * 7. Extent size must be a multiple of the appropriate block size.
- * 8. for non-realtime files, the extent size hint must be limited
- *    to half the AG size to avoid alignment extending the extent beyond the
- *    limits of the AG.
- *
- * Please keep this function in sync with xfs_scrub_inode_extsize.
+ * Validate a proposed extent size hint.  For regular files, the hint can only
+ * be changed if no extents are allocated.
  */
 static int
 xfs_ioctl_setattr_check_extsize(
@@ -1288,86 +1276,65 @@ xfs_ioctl_setattr_check_extsize(
        struct fileattr         *fa)
 {
        struct xfs_mount        *mp = ip->i_mount;
-       xfs_extlen_t            size;
-       xfs_fsblock_t           extsize_fsb;
+       xfs_failaddr_t          failaddr;
+       uint16_t                new_diflags;
 
        if (!fa->fsx_valid)
                return 0;
 
        if (S_ISREG(VFS_I(ip)->i_mode) && ip->i_df.if_nextents &&
-           ((ip->i_extsize << mp->m_sb.sb_blocklog) != fa->fsx_extsize))
+           XFS_FSB_TO_B(mp, ip->i_extsize) != fa->fsx_extsize)
                return -EINVAL;
 
-       if (fa->fsx_extsize == 0)
-               return 0;
-
-       extsize_fsb = XFS_B_TO_FSB(mp, fa->fsx_extsize);
-       if (extsize_fsb > MAXEXTLEN)
+       if (fa->fsx_extsize & mp->m_blockmask)
                return -EINVAL;
 
-       if (XFS_IS_REALTIME_INODE(ip) ||
-           (fa->fsx_xflags & FS_XFLAG_REALTIME)) {
-               size = mp->m_sb.sb_rextsize << mp->m_sb.sb_blocklog;
-       } else {
-               size = mp->m_sb.sb_blocksize;
-               if (extsize_fsb > mp->m_sb.sb_agblocks / 2)
+       new_diflags = xfs_flags2diflags(ip, fa->fsx_xflags);
+
+       /*
+        * Inode verifiers on older kernels don't check that the extent size
+        * hint is an integer multiple of the rt extent size on a directory
+        * with both rtinherit and extszinherit flags set.  Don't let sysadmins
+        * misconfigure directories.
+        */
+       if ((new_diflags & XFS_DIFLAG_RTINHERIT) &&
+           (new_diflags & XFS_DIFLAG_EXTSZINHERIT)) {
+               unsigned int    rtextsize_bytes;
+
+               rtextsize_bytes = XFS_FSB_TO_B(mp, mp->m_sb.sb_rextsize);
+               if (fa->fsx_extsize % rtextsize_bytes)
                        return -EINVAL;
        }
 
-       if (fa->fsx_extsize % size)
-               return -EINVAL;
-
-       return 0;
+       failaddr = xfs_inode_validate_extsize(ip->i_mount,
+                       XFS_B_TO_FSB(mp, fa->fsx_extsize),
+                       VFS_I(ip)->i_mode, new_diflags);
+       return failaddr != NULL ? -EINVAL : 0;
 }
 
-/*
- * CoW extent size hint validation rules are:
- *
- * 1. CoW extent size hint can only be set if reflink is enabled on the fs.
- *    The inode does not have to have any shared blocks, but it must be a v3.
- * 2. FS_XFLAG_COWEXTSIZE is only valid for directories and regular files;
- *    for a directory, the hint is propagated to new files.
- * 3. Can be changed on files & directories at any time.
- * 4. CoW extsize hint of 0 turns off hints, clears inode flags.
- * 5. Extent size must be a multiple of the appropriate block size.
- * 6. The extent size hint must be limited to half the AG size to avoid
- *    alignment extending the extent beyond the limits of the AG.
- *
- * Please keep this function in sync with xfs_scrub_inode_cowextsize.
- */
 static int
 xfs_ioctl_setattr_check_cowextsize(
        struct xfs_inode        *ip,
        struct fileattr         *fa)
 {
        struct xfs_mount        *mp = ip->i_mount;
-       xfs_extlen_t            size;
-       xfs_fsblock_t           cowextsize_fsb;
+       xfs_failaddr_t          failaddr;
+       uint64_t                new_diflags2;
+       uint16_t                new_diflags;
 
        if (!fa->fsx_valid)
                return 0;
 
-       if (!(fa->fsx_xflags & FS_XFLAG_COWEXTSIZE))
-               return 0;
-
-       if (!xfs_sb_version_hasreflink(&ip->i_mount->m_sb))
+       if (fa->fsx_cowextsize & mp->m_blockmask)
                return -EINVAL;
 
-       if (fa->fsx_cowextsize == 0)
-               return 0;
+       new_diflags = xfs_flags2diflags(ip, fa->fsx_xflags);
+       new_diflags2 = xfs_flags2diflags2(ip, fa->fsx_xflags);
 
-       cowextsize_fsb = XFS_B_TO_FSB(mp, fa->fsx_cowextsize);
-       if (cowextsize_fsb > MAXEXTLEN)
-               return -EINVAL;
-
-       size = mp->m_sb.sb_blocksize;
-       if (cowextsize_fsb > mp->m_sb.sb_agblocks / 2)
-               return -EINVAL;
-
-       if (fa->fsx_cowextsize % size)
-               return -EINVAL;
-
-       return 0;
+       failaddr = xfs_inode_validate_cowextsize(ip->i_mount,
+                       XFS_B_TO_FSB(mp, fa->fsx_cowextsize),
+                       VFS_I(ip)->i_mode, new_diflags, new_diflags2);
+       return failaddr != NULL ? -EINVAL : 0;
 }
 
 static int
index 3c392b1512ac04e2810ced581c2f0f300bc89616..7ec1a9207517f85a0b99af23fd238c59615d9e18 100644 (file)
@@ -73,6 +73,8 @@ do {                                                                  \
        xfs_printk_once(xfs_warn, dev, fmt, ##__VA_ARGS__)
 #define xfs_notice_once(dev, fmt, ...)                         \
        xfs_printk_once(xfs_notice, dev, fmt, ##__VA_ARGS__)
+#define xfs_info_once(dev, fmt, ...)                           \
+       xfs_printk_once(xfs_info, dev, fmt, ##__VA_ARGS__)
 
 void assfail(struct xfs_mount *mp, char *expr, char *f, int l);
 void asswarn(struct xfs_mount *mp, char *expr, char *f, int l);
index 18197c16149f56d2d50fda6227728f901e69cd5b..3e8d969b22fe87737a8a487e3e67a01c9d737163 100644 (file)
@@ -207,4 +207,14 @@ struct acpi_pld_info {
 #define ACPI_PLD_GET_HORIZ_OFFSET(dword)        ACPI_GET_BITS (dword, 16, ACPI_16BIT_MASK)
 #define ACPI_PLD_SET_HORIZ_OFFSET(dword,value)  ACPI_SET_BITS (dword, 16, ACPI_16BIT_MASK, value)      /* Offset 128+16=144, Len 16 */
 
+/* Panel position defined in _PLD section of ACPI Specification 6.3 */
+
+#define ACPI_PLD_PANEL_TOP      0
+#define ACPI_PLD_PANEL_BOTTOM   1
+#define ACPI_PLD_PANEL_LEFT     2
+#define ACPI_PLD_PANEL_RIGHT    3
+#define ACPI_PLD_PANEL_FRONT    4
+#define ACPI_PLD_PANEL_BACK     5
+#define ACPI_PLD_PANEL_UNKNOWN  6
+
 #endif                         /* ACBUFFER_H */
index e92f84fa8c680838812fe545ffaa417df26f1100..0362cbb7235915315dbf9979353c8fdec914ae80 100644 (file)
 #define ACPI_MAX_GSBUS_DATA_SIZE        255
 #define ACPI_MAX_GSBUS_BUFFER_SIZE      ACPI_SERIAL_HEADER_SIZE + ACPI_MAX_GSBUS_DATA_SIZE
 
+#define ACPI_PRM_INPUT_BUFFER_SIZE      26
+
 /* _sx_d and _sx_w control methods */
 
 #define ACPI_NUM_sx_d_METHODS           4
index 3a82faac5767362b12401b0dd0a66c2cbe060319..d8e7235b4cf03e8b97cb98c021ec2d284979fcae 100644 (file)
@@ -280,6 +280,12 @@ struct acpi_device_power {
        struct acpi_device_power_state states[ACPI_D_STATE_COUNT];      /* Power states (D0-D3Cold) */
 };
 
+struct acpi_dep_data {
+       struct list_head node;
+       acpi_handle supplier;
+       acpi_handle consumer;
+};
+
 /* Performance Management */
 
 struct acpi_device_perf_flags {
@@ -498,8 +504,6 @@ extern int unregister_acpi_notifier(struct notifier_block *);
  */
 
 int acpi_bus_get_device(acpi_handle handle, struct acpi_device **device);
-struct acpi_device *acpi_bus_get_acpi_device(acpi_handle handle);
-void acpi_bus_put_acpi_device(struct acpi_device *adev);
 acpi_status acpi_bus_get_status_handle(acpi_handle handle,
                                       unsigned long long *sta);
 int acpi_bus_get_status(struct acpi_device *device);
@@ -685,6 +689,8 @@ static inline bool acpi_device_can_poweroff(struct acpi_device *adev)
 
 bool acpi_dev_hid_uid_match(struct acpi_device *adev, const char *hid2, const char *uid2);
 
+void acpi_dev_clear_dependencies(struct acpi_device *supplier);
+struct acpi_device *acpi_dev_get_first_consumer_dev(struct acpi_device *supplier);
 struct acpi_device *
 acpi_dev_get_next_match_dev(struct acpi_device *adev, const char *hid, const char *uid, s64 hrv);
 struct acpi_device *
@@ -718,6 +724,13 @@ static inline void acpi_dev_put(struct acpi_device *adev)
 {
        put_device(&adev->dev);
 }
+
+struct acpi_device *acpi_bus_get_acpi_device(acpi_handle handle);
+
+static inline void acpi_bus_put_acpi_device(struct acpi_device *adev)
+{
+       acpi_dev_put(adev);
+}
 #else  /* CONFIG_ACPI */
 
 static inline int register_acpi_bus_type(void *bus) { return 0; }
index f8d44b06f3e3a41245c4d9a24e5f11e392fc4aef..a43335961e30f8cb545fd9464275b9c9043c6050 100644 (file)
@@ -12,7 +12,7 @@
 
 /* Current ACPICA subsystem version in YYYYMMDD format */
 
-#define ACPI_CA_VERSION                 0x20210331
+#define ACPI_CA_VERSION                 0x20210604
 
 #include <acpi/acconfig.h>
 #include <acpi/actypes.h>
index ce59903c2695e61d1db245d27743887885878e4b..ef2872dea01ce6106ff02795995fd38aec54e82b 100644 (file)
@@ -327,9 +327,20 @@ struct acpi_cedt_header {
 
 enum acpi_cedt_type {
        ACPI_CEDT_TYPE_CHBS = 0,
-       ACPI_CEDT_TYPE_RESERVED = 1
+       ACPI_CEDT_TYPE_CFMWS = 1,
+       ACPI_CEDT_TYPE_RESERVED = 2,
 };
 
+/* Values for version field above */
+
+#define ACPI_CEDT_CHBS_VERSION_CXL11    (0)
+#define ACPI_CEDT_CHBS_VERSION_CXL20    (1)
+
+/* Values for length field above */
+
+#define ACPI_CEDT_CHBS_LENGTH_CXL11     (0x2000)
+#define ACPI_CEDT_CHBS_LENGTH_CXL20     (0x10000)
+
 /*
  * CEDT subtables
  */
@@ -345,6 +356,34 @@ struct acpi_cedt_chbs {
        u64 length;
 };
 
+/* 1: CXL Fixed Memory Window Structure */
+
+struct acpi_cedt_cfmws {
+       struct acpi_cedt_header header;
+       u32 reserved1;
+       u64 base_hpa;
+       u64 window_size;
+       u8 interleave_ways;
+       u8 interleave_arithmetic;
+       u16 reserved2;
+       u32 granularity;
+       u16 restrictions;
+       u16 qtg_id;
+       u32 interleave_targets[];
+};
+
+/* Values for Interleave Arithmetic field above */
+
+#define ACPI_CEDT_CFMWS_ARITHMETIC_MODULO      (0)
+
+/* Values for Restrictions field above */
+
+#define ACPI_CEDT_CFMWS_RESTRICT_TYPE2         (1)
+#define ACPI_CEDT_CFMWS_RESTRICT_TYPE3         (1<<1)
+#define ACPI_CEDT_CFMWS_RESTRICT_VOLATILE      (1<<2)
+#define ACPI_CEDT_CFMWS_RESTRICT_PMEM          (1<<3)
+#define ACPI_CEDT_CFMWS_RESTRICT_FIXED         (1<<4)
+
 /*******************************************************************************
  *
  * CPEP - Corrected Platform Error Polling table (ACPI 4.0)
index 18cafe3ebddc22926154d12f5c68a9e37162e6b9..2069ac38a4e2ecdd34f067e478c668734fe65ecc 100644 (file)
@@ -24,6 +24,7 @@
  * file. Useful because they make it more difficult to inadvertently type in
  * the wrong signature.
  */
+#define ACPI_SIG_BDAT           "BDAT" /* BIOS Data ACPI Table */
 #define ACPI_SIG_IORT           "IORT" /* IO Remapping Table */
 #define ACPI_SIG_IVRS           "IVRS" /* I/O Virtualization Reporting Structure */
 #define ACPI_SIG_LPIT           "LPIT" /* Low Power Idle Table */
 #define ACPI_SIG_PHAT           "PHAT" /* Platform Health Assessment Table */
 #define ACPI_SIG_PMTT           "PMTT" /* Platform Memory Topology Table */
 #define ACPI_SIG_PPTT           "PPTT" /* Processor Properties Topology Table */
+#define ACPI_SIG_PRMT           "PRMT" /* Platform Runtime Mechanism Table */
 #define ACPI_SIG_RASF           "RASF" /* RAS Feature table */
+#define ACPI_SIG_RGRT           "RGRT" /* Regulatory Graphics Resource Table */
 #define ACPI_SIG_SBST           "SBST" /* Smart Battery Specification Table */
 #define ACPI_SIG_SDEI           "SDEI" /* Software Delegated Exception Interface Table */
 #define ACPI_SIG_SDEV           "SDEV" /* Secure Devices table */
 #define ACPI_SIG_NHLT           "NHLT" /* Non-HDAudio Link Table */
+#define ACPI_SIG_SVKL           "SVKL" /* Storage Volume Key Location Table */
 
 /*
  * All tables must be byte-packed to match the ACPI specification, since
  * See http://stackoverflow.com/a/1053662/41661
  */
 
+/*******************************************************************************
+ *
+ * BDAT - BIOS Data ACPI Table
+ *
+ * Conforms to "BIOS Data ACPI Table", Interface Specification v4.0 Draft 5
+ * Nov 2020
+ *
+ ******************************************************************************/
+
+struct acpi_table_bdat {
+       struct acpi_table_header header;
+       struct acpi_generic_address gas;
+};
+
 /*******************************************************************************
  *
  * IORT - IO Remapping Table
@@ -446,6 +464,12 @@ struct acpi_ivrs_device_hid {
        u8 uid_length;
 };
 
+/* Values for uid_type above */
+
+#define ACPI_IVRS_UID_NOT_PRESENT   0
+#define ACPI_IVRS_UID_IS_INTEGER    1
+#define ACPI_IVRS_UID_IS_STRING     2
+
 /* 0x20, 0x21, 0x22: I/O Virtualization Memory Definition Block (IVMD) */
 
 struct acpi_ivrs_memory {
@@ -763,6 +787,20 @@ struct acpi_madt_multiproc_wakeup {
        u64 base_address;
 };
 
+#define ACPI_MULTIPROC_WAKEUP_MB_OS_SIZE       2032
+#define ACPI_MULTIPROC_WAKEUP_MB_FIRMWARE_SIZE 2048
+
+struct acpi_madt_multiproc_wakeup_mailbox {
+       u16 command;
+       u16 reserved;           /* reserved - must be zero */
+       u32 apic_id;
+       u64 wakeup_vector;
+       u8 reserved_os[ACPI_MULTIPROC_WAKEUP_MB_OS_SIZE];       /* reserved for OS use */
+       u8 reserved_firmware[ACPI_MULTIPROC_WAKEUP_MB_FIRMWARE_SIZE];   /* reserved for firmware use */
+};
+
+#define ACPI_MP_WAKE_COMMAND_WAKEUP    1
+
 /*
  * Common flags fields for MADT subtables
  */
@@ -1673,6 +1711,48 @@ struct acpi_pptt_id {
        u16 spin_rev;
 };
 
+/*******************************************************************************
+ *
+ * PRMT - Platform Runtime Mechanism Table
+ *        Version 1
+ *
+ ******************************************************************************/
+
+struct acpi_table_prmt {
+       struct acpi_table_header header;        /* Common ACPI table header */
+};
+
+struct acpi_table_prmt_header {
+       u8 platform_guid[16];
+       u32 module_info_offset;
+       u32 module_info_count;
+};
+
+struct acpi_prmt_module_header {
+       u16 revision;
+       u16 length;
+};
+
+struct acpi_prmt_module_info {
+       u16 revision;
+       u16 length;
+       u8 module_guid[16];
+       u16 major_rev;
+       u16 minor_rev;
+       u16 handler_info_count;
+       u32 handler_info_offset;
+       u64 mmio_list_pointer;
+};
+
+struct acpi_prmt_handler_info {
+       u16 revision;
+       u16 length;
+       u8 handler_guid[16];
+       u64 handler_address;
+       u64 static_data_buffer_address;
+       u64 acpi_param_buffer_address;
+};
+
 /*******************************************************************************
  *
  * RASF - RAS Feature Table (ACPI 5.0)
@@ -1769,6 +1849,32 @@ enum acpi_rasf_status {
 #define ACPI_RASF_ERROR                 (1<<2)
 #define ACPI_RASF_STATUS                (0x1F<<3)
 
+/*******************************************************************************
+ *
+ * RGRT - Regulatory Graphics Resource Table
+ *        Version 1
+ *
+ * Conforms to "ACPI RGRT" available at:
+ * https://microsoft.github.io/mu/dyn/mu_plus/ms_core_pkg/acpi_RGRT/feature_acpi_rgrt/
+ *
+ ******************************************************************************/
+
+struct acpi_table_rgrt {
+       struct acpi_table_header header;        /* Common ACPI table header */
+       u16 version;
+       u8 image_type;
+       u8 reserved;
+       u8 image[0];
+};
+
+/* image_type values */
+
+enum acpi_rgrt_image_type {
+       ACPI_RGRT_TYPE_RESERVED0 = 0,
+       ACPI_RGRT_IMAGE_TYPE_PNG = 1,
+       ACPI_RGRT_TYPE_RESERVED = 2     /* 2 and greater are reserved */
+};
+
 /*******************************************************************************
  *
  * SBST - Smart Battery Specification Table
@@ -1899,6 +2005,37 @@ struct acpi_sdev_pcie_path {
        u8 function;
 };
 
+/*******************************************************************************
+ *
+ * SVKL - Storage Volume Key Location Table (ACPI 6.4)
+ *        From: "Guest-Host-Communication Interface (GHCI) for Intel
+ *        Trust Domain Extensions (Intel TDX)".
+ *        Version 1
+ *
+ ******************************************************************************/
+
+struct acpi_table_svkl {
+       struct acpi_table_header header;        /* Common ACPI table header */
+       u32 count;
+};
+
+struct acpi_svkl_key {
+       u16 type;
+       u16 format;
+       u32 size;
+       u64 address;
+};
+
+enum acpi_svkl_type {
+       ACPI_SVKL_TYPE_MAIN_STORAGE = 0,
+       ACPI_SVKL_TYPE_RESERVED = 1     /* 1 and greater are reserved */
+};
+
+enum acpi_svkl_format {
+       ACPI_SVKL_FORMAT_RAW_BINARY = 0,
+       ACPI_SVKL_FORMAT_RESERVED = 1   /* 1 and greater are reserved */
+};
+
 /* Reset to default packing */
 
 #pragma pack()
index 40a9c101565ebe120e537eb0e12b471f25a6612a..17325416e2dee2e400dfb531e9bc5ffc64e13306 100644 (file)
 #ifdef CONFIG_AMD_MEM_ENCRYPT
 #define PERCPU_DECRYPTED_SECTION                                       \
        . = ALIGN(PAGE_SIZE);                                           \
+       *(.data..decrypted)                                             \
        *(.data..percpu..decrypted)                                     \
        . = ALIGN(PAGE_SIZE);
 #else
index fef3ef65967fa19378d8347f550dd6a32eaa0047..e6526b13817452abd7ec74bc64e94183feddd39e 100644 (file)
  * <20:16>  :: Reserved, Shall be set to zero
  * <15:0>   :: USB-IF assigned VID for this cable vendor
  */
+
+/* PD Rev2.0 definition */
+#define IDH_PTYPE_UNDEF                0
+
 /* SOP Product Type (UFP) */
 #define IDH_PTYPE_NOT_UFP       0
 #define IDH_PTYPE_HUB           1
 #define UFP_VDO_VER1_2         2
 
 /* Device Capability */
-#define DEV_USB2_CAPABLE       BIT(0)
-#define DEV_USB2_BILLBOARD     BIT(1)
-#define DEV_USB3_CAPABLE       BIT(2)
-#define DEV_USB4_CAPABLE       BIT(3)
+#define DEV_USB2_CAPABLE       (1 << 0)
+#define DEV_USB2_BILLBOARD     (1 << 1)
+#define DEV_USB3_CAPABLE       (1 << 2)
+#define DEV_USB4_CAPABLE       (1 << 3)
 
 /* Connector Type */
 #define UFP_RECEPTACLE         2
 
 /* Alternate Modes */
 #define UFP_ALTMODE_NOT_SUPP   0
-#define UFP_ALTMODE_TBT3       BIT(0)
-#define UFP_ALTMODE_RECFG      BIT(1)
-#define UFP_ALTMODE_NO_RECFG   BIT(2)
+#define UFP_ALTMODE_TBT3       (1 << 0)
+#define UFP_ALTMODE_RECFG      (1 << 1)
+#define UFP_ALTMODE_NO_RECFG   (1 << 2)
 
 /* USB Highest Speed */
 #define UFP_USB2_ONLY          0
  * <4:0>   :: Port number
  */
 #define DFP_VDO_VER1_1         1
-#define HOST_USB2_CAPABLE      BIT(0)
-#define HOST_USB3_CAPABLE      BIT(1)
-#define HOST_USB4_CAPABLE      BIT(2)
+#define HOST_USB2_CAPABLE      (1 << 0)
+#define HOST_USB3_CAPABLE      (1 << 1)
+#define HOST_USB4_CAPABLE      (1 << 2)
 #define DFP_RECEPTACLE         2
 #define DFP_CAPTIVE            3
 
         | ((pnum) & 0x1f))
 
 /*
- * Passive Cable VDO
+ * Cable VDO (for both Passive and Active Cable VDO in PD Rev2.0)
+ * ---------
+ * <31:28> :: Cable HW version
+ * <27:24> :: Cable FW version
+ * <23:20> :: Reserved, Shall be set to zero
+ * <19:18> :: type-C to Type-A/B/C/Captive (00b == A, 01 == B, 10 == C, 11 == Captive)
+ * <17>    :: Reserved, Shall be set to zero
+ * <16:13> :: cable latency (0001 == <10ns(~1m length))
+ * <12:11> :: cable termination type (11b == both ends active VCONN req)
+ * <10>    :: SSTX1 Directionality support (0b == fixed, 1b == cfgable)
+ * <9>     :: SSTX2 Directionality support
+ * <8>     :: SSRX1 Directionality support
+ * <7>     :: SSRX2 Directionality support
+ * <6:5>   :: Vbus current handling capability (01b == 3A, 10b == 5A)
+ * <4>     :: Vbus through cable (0b == no, 1b == yes)
+ * <3>     :: SOP" controller present? (0b == no, 1b == yes)
+ * <2:0>   :: USB SS Signaling support
+ *
+ * Passive Cable VDO (PD Rev3.0+)
  * ---------
  * <31:28> :: Cable HW version
  * <27:24> :: Cable FW version
  * <4:3>   :: Reserved, Shall be set to zero
  * <2:0>   :: USB highest speed
  *
- * Active Cable VDO 1
+ * Active Cable VDO 1 (PD Rev3.0+)
  * ---------
  * <31:28> :: Cable HW version
  * <27:24> :: Cable FW version
 #define CABLE_VDO_VER1_0       0
 #define CABLE_VDO_VER1_3       3
 
-/* Connector Type */
+/* Connector Type (_ATYPE and _BTYPE are for PD Rev2.0 only) */
+#define CABLE_ATYPE            0
+#define CABLE_BTYPE            1
 #define CABLE_CTYPE            2
 #define CABLE_CAPTIVE          3
 
 #define CABLE_CURR_3A          1
 #define CABLE_CURR_5A          2
 
+/* USB SuperSpeed Signaling Support (PD Rev2.0) */
+#define CABLE_USBSS_U2_ONLY    0
+#define CABLE_USBSS_U31_GEN1   1
+#define CABLE_USBSS_U31_GEN2   2
+
 /* USB Highest Speed */
 #define CABLE_USB2_ONLY                0
 #define CABLE_USB32_GEN1       1
 #define CABLE_USB32_4_GEN2     2
 #define CABLE_USB4_GEN3                3
 
+#define VDO_CABLE(hw, fw, cbl, lat, term, tx1d, tx2d, rx1d, rx2d, cur, vps, sopp, usbss) \
+       (((hw) & 0x7) << 28 | ((fw) & 0x7) << 24 | ((cbl) & 0x3) << 18          \
+        | ((lat) & 0x7) << 13 | ((term) & 0x3) << 11 | (tx1d) << 10            \
+        | (tx2d) << 9 | (rx1d) << 8 | (rx2d) << 7 | ((cur) & 0x3) << 5         \
+        | (vps) << 4 | (sopp) << 3 | ((usbss) & 0x7))
 #define VDO_PCABLE(hw, fw, ver, conn, lat, term, vbm, cur, spd)                        \
        (((hw) & 0xf) << 28 | ((fw) & 0xf) << 24 | ((ver) & 0x7) << 21          \
         | ((conn) & 0x3) << 18 | ((lat) & 0xf) << 13 | ((term) & 0x3) << 11    \
         | ((hops) & 0x3) << 6 | (u2) << 5 | (u32) << 4 | (lane) << 3           \
         | (iso) << 2 | (gen))
 
+/*
+ * AMA VDO (PD Rev2.0)
+ * ---------
+ * <31:28> :: Cable HW version
+ * <27:24> :: Cable FW version
+ * <23:12> :: Reserved, Shall be set to zero
+ * <11>    :: SSTX1 Directionality support (0b == fixed, 1b == cfgable)
+ * <10>    :: SSTX2 Directionality support
+ * <9>     :: SSRX1 Directionality support
+ * <8>     :: SSRX2 Directionality support
+ * <7:5>   :: Vconn power
+ * <4>     :: Vconn power required
+ * <3>     :: Vbus power required
+ * <2:0>   :: USB SS Signaling support
+ */
+#define VDO_AMA(hw, fw, tx1d, tx2d, rx1d, rx2d, vcpwr, vcr, vbr, usbss) \
+       (((hw) & 0x7) << 28 | ((fw) & 0x7) << 24                        \
+        | (tx1d) << 11 | (tx2d) << 10 | (rx1d) << 9 | (rx2d) << 8      \
+        | ((vcpwr) & 0x7) << 5 | (vcr) << 4 | (vbr) << 3               \
+        | ((usbss) & 0x7))
+
+#define PD_VDO_AMA_VCONN_REQ(vdo)      (((vdo) >> 4) & 1)
+#define PD_VDO_AMA_VBUS_REQ(vdo)       (((vdo) >> 3) & 1)
+
+#define AMA_USBSS_U2_ONLY      0
+#define AMA_USBSS_U31_GEN1     1
+#define AMA_USBSS_U31_GEN2     2
+#define AMA_USBSS_BBONLY       3
+
 /*
  * VPD VDO
  * ---------
index c60745f657e9c9fc10a5276bb90d466652fc9f9f..98e0f207e115d66e00624cae94d32647e4c75c20 100644 (file)
@@ -132,6 +132,7 @@ enum acpi_address_range_id {
 union acpi_subtable_headers {
        struct acpi_subtable_header common;
        struct acpi_hmat_structure hmat;
+       struct acpi_prmt_module_header prmt;
 };
 
 typedef int (*acpi_tbl_table_handler)(struct acpi_table_header *table);
@@ -550,6 +551,7 @@ acpi_status acpi_run_osc(acpi_handle handle, struct acpi_osc_context *context);
 #define OSC_SB_OSLPI_SUPPORT                   0x00000100
 #define OSC_SB_CPC_DIVERSE_HIGH_SUPPORT                0x00001000
 #define OSC_SB_GENERIC_INITIATOR_SUPPORT       0x00002000
+#define OSC_SB_PRM_SUPPORT                     0x00020000
 #define OSC_SB_NATIVE_USB4_SUPPORT             0x00040000
 
 extern bool osc_sb_apei_support_acked;
@@ -666,7 +668,6 @@ extern bool acpi_driver_match_device(struct device *dev,
                                     const struct device_driver *drv);
 int acpi_device_uevent_modalias(struct device *, struct kobj_uevent_env *);
 int acpi_device_modalias(struct device *, char *, int);
-void acpi_walk_dep_device_list(acpi_handle handle);
 
 struct platform_device *acpi_create_platform_device(struct acpi_device *,
                                                    struct property_entry *);
index 565deea6ffe88b99da66f597756e416be3da0f43..8612f8fc86c1db21cc013c0a1555002f1cd62b86 100644 (file)
@@ -830,6 +830,7 @@ VIRTCHNL_CHECK_STRUCT_LEN(72, virtchnl_proto_hdr);
 
 struct virtchnl_proto_hdrs {
        u8 tunnel_level;
+       u8 pad[3];
        /**
         * specify where protocol header start from.
         * 0 - from the outer layer
index 7f475d59a0974f17d7a9765c643b798a1dc6ee83..87d112650dfbb0ec9ac6825ae2746c61b2659960 100644 (file)
@@ -22,7 +22,7 @@
 #include <linux/build_bug.h>
 #define GENMASK_INPUT_CHECK(h, l) \
        (BUILD_BUG_ON_ZERO(__builtin_choose_expr( \
-               __builtin_constant_p((l) > (h)), (l) > (h), 0)))
+               __is_constexpr((l) > (h)), (l) > (h), 0)))
 #else
 /*
  * BUILD_BUG_ON_ZERO is not available in h files included from asm files,
index 1255823b2bc0fff0dcb75535333f809bc886d9c4..f69c75bd6d276f4768641af88cc831e7701e85a9 100644 (file)
@@ -676,11 +676,6 @@ bool blk_queue_flag_test_and_set(unsigned int flag, struct request_queue *q);
 extern void blk_set_pm_only(struct request_queue *q);
 extern void blk_clear_pm_only(struct request_queue *q);
 
-static inline bool blk_account_rq(struct request *rq)
-{
-       return (rq->rq_flags & RQF_STARTED) && !blk_rq_is_passthrough(rq);
-}
-
 #define list_entry_rq(ptr)     list_entry((ptr), struct request, queuelist)
 
 #define rq_data_dir(rq)                (op_is_write(req_op(rq)) ? WRITE : READ)
index 559ee05f86b2ecde788a5748a5d015f0ce233cc0..fb8f6d2cd1042067511867b96bac3305df827cd9 100644 (file)
@@ -232,7 +232,7 @@ struct css_set {
        struct list_head task_iters;
 
        /*
-        * On the default hierarhcy, ->subsys[ssid] may point to a css
+        * On the default hierarchy, ->subsys[ssid] may point to a css
         * attached to an ancestor instead of the cgroup this css_set is
         * associated with.  The following node is anchored at
         * ->subsys[ssid]->cgroup->e_csets[ssid] and provides a way to
@@ -668,7 +668,7 @@ struct cgroup_subsys {
         */
        bool threaded:1;
 
-       /* the following two fields are initialized automtically during boot */
+       /* the following two fields are initialized automatically during boot */
        int id;
        const char *name;
 
@@ -757,7 +757,7 @@ static inline void cgroup_threadgroup_change_end(struct task_struct *tsk) {}
  * sock_cgroup_data overloads (prioidx, classid) and the cgroup pointer.
  * On boot, sock_cgroup_data records the cgroup that the sock was created
  * in so that cgroup2 matches can be made; however, once either net_prio or
- * net_cls starts being used, the area is overriden to carry prioidx and/or
+ * net_cls starts being used, the area is overridden to carry prioidx and/or
  * classid.  The two modes are distinguished by whether the lowest bit is
  * set.  Clear bit indicates cgroup pointer while set bit prioidx and
  * classid.
index 4f2f79de083e213b53967d2808af15636f9eb222..6bc9c76680b228819fb9577aa5bfcd360d512959 100644 (file)
@@ -32,7 +32,7 @@ struct kernel_clone_args;
 #ifdef CONFIG_CGROUPS
 
 /*
- * All weight knobs on the default hierarhcy should use the following min,
+ * All weight knobs on the default hierarchy should use the following min,
  * default and max values.  The default value is the logarithmic center of
  * MIN and MAX and allows 100x to be expressed in both directions.
  */
index 98dd7b324c35074a98a9e3fca68bdd4c01154d45..8855b1b702b2a7e84ec0d8c75491f4347040d0d1 100644 (file)
@@ -213,12 +213,11 @@ typedef struct compat_siginfo {
                /* SIGILL, SIGFPE, SIGSEGV, SIGBUS, SIGTRAP, SIGEMT */
                struct {
                        compat_uptr_t _addr;    /* faulting insn/memory ref. */
-#ifdef __ARCH_SI_TRAPNO
-                       int _trapno;    /* TRAP # which caused the signal */
-#endif
 #define __COMPAT_ADDR_BND_PKEY_PAD  (__alignof__(compat_uptr_t) < sizeof(short) ? \
                                     sizeof(short) : __alignof__(compat_uptr_t))
                        union {
+                               /* used on alpha and sparc */
+                               int _trapno;    /* TRAP # which caused the signal */
                                /*
                                 * used when si_code=BUS_MCEERR_AR or
                                 * used when si_code=BUS_MCEERR_AO
@@ -236,7 +235,10 @@ typedef struct compat_siginfo {
                                        u32 _pkey;
                                } _addr_pkey;
                                /* used when si_code=TRAP_PERF */
-                               compat_ulong_t _perf;
+                               struct {
+                                       compat_ulong_t _data;
+                                       u32 _type;
+                               } _perf;
                        };
                } _sigfault;
 
index c043b8d2b17bf9a9bc5d441f801527937b39a50c..183ddd5fd07243a02729da72dadd91d8ccad856e 100644 (file)
  * must end with any of these keywords:
  *   break;
  *   fallthrough;
+ *   continue;
  *   goto <label>;
  *   return [expression];
  *
index 153734816b49cfdc534d8138e576f174a78d0218..d5b9c8d40c18e8d4bbe3d21fe3a1031a40a47acd 100644 (file)
@@ -101,6 +101,7 @@ struct vc_data {
        unsigned int    vc_rows;
        unsigned int    vc_size_row;            /* Bytes per row */
        unsigned int    vc_scan_lines;          /* # of scan lines */
+       unsigned int    vc_cell_height;         /* CRTC character cell height */
        unsigned long   vc_origin;              /* [!] Start of real screen */
        unsigned long   vc_scr_end;             /* [!] End of real screen */
        unsigned long   vc_visible_origin;      /* [!] Top of visible window */
index 81b8aae5a8559c9fabf0665c48d8e8659db4f239..435ddd72d2c464fdedd33889190009dde0a3bda9 100644 (file)
@@ -3,4 +3,12 @@
 
 #include <vdso/const.h>
 
+/*
+ * This returns a constant expression while determining if an argument is
+ * a constant expression, most importantly without evaluating the argument.
+ * Glory to Martin Uecker <Martin.Uecker@med.uni-goettingen.de>
+ */
+#define __is_constexpr(x) \
+       (sizeof(int) == sizeof(*(8 ? ((void *)((long)(x) * 0l)) : (int *)8)))
+
 #endif /* _LINUX_CONST_H */
index 38a2071cf77685a3b8c520f4d4eafb50ee080669..f1a00040fa5349beb2e650376b4c56ddeb6cc46a 100644 (file)
@@ -570,7 +570,7 @@ struct device {
  * @flags: Link flags.
  * @rpm_active: Whether or not the consumer device is runtime-PM-active.
  * @kref: Count repeated addition of the same link.
- * @rcu_head: An RCU head to use for deferred execution of SRCU callbacks.
+ * @rm_work: Work structure used for removing the link.
  * @supplier_preactivated: Supplier has been made active before consumer probe.
  */
 struct device_link {
@@ -583,9 +583,7 @@ struct device_link {
        u32 flags;
        refcount_t rpm_active;
        struct kref kref;
-#ifdef CONFIG_SRCU
-       struct rcu_head rcu_head;
-#endif
+       struct work_struct rm_work;
        bool supplier_preactivated; /* Owned by consumer probe. */
 };
 
index a57ee75342cf801e5d50b5d44702aa6c052a1be4..dce631e678dd6ce0bfd5c9985c0520b3bee78e7c 100644 (file)
@@ -32,6 +32,11 @@ struct _ddebug {
 #define _DPRINTK_FLAGS_INCL_FUNCNAME   (1<<2)
 #define _DPRINTK_FLAGS_INCL_LINENO     (1<<3)
 #define _DPRINTK_FLAGS_INCL_TID                (1<<4)
+
+#define _DPRINTK_FLAGS_INCL_ANY                \
+       (_DPRINTK_FLAGS_INCL_MODNAME | _DPRINTK_FLAGS_INCL_FUNCNAME |\
+        _DPRINTK_FLAGS_INCL_LINENO  | _DPRINTK_FLAGS_INCL_TID)
+
 #if defined DEBUG
 #define _DPRINTK_FLAGS_DEFAULT _DPRINTK_FLAGS_PRINT
 #else
index 1fe8e105b83bf365d41d595f340482493ce0547c..dcb2f9022c1dfdf70471c52b8499a2bb601f8789 100644 (file)
@@ -34,7 +34,7 @@ struct elevator_mq_ops {
        void (*depth_updated)(struct blk_mq_hw_ctx *);
 
        bool (*allow_merge)(struct request_queue *, struct request *, struct bio *);
-       bool (*bio_merge)(struct blk_mq_hw_ctx *, struct bio *, unsigned int);
+       bool (*bio_merge)(struct request_queue *, struct bio *, unsigned int);
        int (*request_merge)(struct request_queue *q, struct request **, struct bio *);
        void (*request_merged)(struct request_queue *, struct request *, enum elv_merge);
        void (*requests_merged)(struct request_queue *, struct request *, struct request *);
index 8b2b1d68b9545602534692c5670f41b7f6fcdf18..136b8d97d8c01216657e4e3a6587d73eae865efb 100644 (file)
@@ -3,6 +3,7 @@
 #define __LINUX_ENTRYKVM_H
 
 #include <linux/entry-common.h>
+#include <linux/tick.h>
 
 /* Transfer to guest mode work */
 #ifdef CONFIG_KVM_XFER_TO_GUEST_WORK
@@ -57,7 +58,7 @@ int xfer_to_guest_mode_handle_work(struct kvm_vcpu *vcpu);
 static inline void xfer_to_guest_mode_prepare(void)
 {
        lockdep_assert_irqs_disabled();
-       rcu_nocb_flush_deferred_wakeup();
+       tick_nohz_user_enter_prepare();
 }
 
 /**
index bad41bcb25dfb6dca227b9405802c27befb319dc..a16dbeced15289d1c9f06eaf55a25865e532dc8f 100644 (file)
@@ -51,6 +51,10 @@ extern struct ctl_table fanotify_table[]; /* for sysctl */
 #define FANOTIFY_INIT_FLAGS    (FANOTIFY_ADMIN_INIT_FLAGS | \
                                 FANOTIFY_USER_INIT_FLAGS)
 
+/* Internal group flags */
+#define FANOTIFY_UNPRIV                0x80000000
+#define FANOTIFY_INTERNAL_GROUP_FLAGS  (FANOTIFY_UNPRIV)
+
 #define FANOTIFY_MARK_TYPE_BITS        (FAN_MARK_INODE | FAN_MARK_MOUNT | \
                                 FAN_MARK_FILESYSTEM)
 
index a8dccd23c2499b532537a8bd4a239aab0b54bb8f..ecfbcc0553a5904d28cd6f3fb4a1952c43069439 100644 (file)
@@ -659,6 +659,9 @@ static inline void __fb_pad_aligned_buffer(u8 *dst, u32 d_pitch,
 /* drivers/video/fb_defio.c */
 int fb_deferred_io_mmap(struct fb_info *info, struct vm_area_struct *vma);
 extern void fb_deferred_io_init(struct fb_info *info);
+extern void fb_deferred_io_open(struct fb_info *info,
+                               struct inode *inode,
+                               struct file *file);
 extern void fb_deferred_io_cleanup(struct fb_info *info);
 extern int fb_deferred_io_fsync(struct file *file, loff_t start,
                                loff_t end, int datasync);
index ed4e67a7ff1c4bfe9992dee723190cd0a2719fe4..59828516ebaf1932a098eb9988f95fe8d17da5fb 100644 (file)
@@ -187,5 +187,6 @@ extern u32 fw_devlink_get_flags(void);
 extern bool fw_devlink_is_strict(void);
 int fwnode_link_add(struct fwnode_handle *con, struct fwnode_handle *sup);
 void fwnode_links_purge(struct fwnode_handle *fwnode);
+void fw_devlink_purge_absent_suppliers(struct fwnode_handle *fwnode);
 
 #endif
index 7e9660ea967d59c422d5ed6a9b6d557a66f86e04..6fc26f7bdf71ea48a7ca1693217f8be150d2d5d7 100644 (file)
@@ -306,8 +306,6 @@ static inline void bd_unlink_disk_holder(struct block_device *bdev,
 }
 #endif /* CONFIG_SYSFS */
 
-extern struct rw_semaphore bdev_lookup_sem;
-
 dev_t blk_lookup_devt(const char *name, int partno);
 void blk_request_module(dev_t devt);
 #ifdef CONFIG_BLOCK
index 271021e20a3f8d468bd507c7178ae2691a70cabd..10e922cee4ebbd732dad747a739b36e7bf12aa0c 100644 (file)
@@ -1167,8 +1167,7 @@ static inline void hid_hw_wait(struct hid_device *hdev)
  */
 static inline u32 hid_report_len(struct hid_report *report)
 {
-       /* equivalent to DIV_ROUND_UP(report->size, 8) + !!(report->id > 0) */
-       return ((report->size - 1) >> 3) + 1 + (report->id > 0);
+       return DIV_ROUND_UP(report->size, 8) + (report->id > 0);
 }
 
 int hid_report_raw_event(struct hid_device *hid, int type, u8 *data, u32 size,
index 232e1bd507a7e7fc992189ed5dad6ce07aef4a41..9b0487c8857194b8c6be40f163445ca325ce1445 100644 (file)
@@ -332,12 +332,30 @@ static inline struct host1x_device *to_host1x_device(struct device *dev)
 int host1x_device_init(struct host1x_device *device);
 int host1x_device_exit(struct host1x_device *device);
 
-int __host1x_client_register(struct host1x_client *client,
-                            struct lock_class_key *key);
-#define host1x_client_register(class) \
-       ({ \
-               static struct lock_class_key __key; \
-               __host1x_client_register(class, &__key); \
+void __host1x_client_init(struct host1x_client *client, struct lock_class_key *key);
+void host1x_client_exit(struct host1x_client *client);
+
+#define host1x_client_init(client)                     \
+       ({                                              \
+               static struct lock_class_key __key;     \
+               __host1x_client_init(client, &__key);   \
+       })
+
+int __host1x_client_register(struct host1x_client *client);
+
+/*
+ * Note that this wrapper calls __host1x_client_init() for compatibility
+ * with existing callers. Callers that want to separately initialize and
+ * register a host1x client must first initialize using either of the
+ * __host1x_client_init() or host1x_client_init() functions and then use
+ * the low-level __host1x_client_register() function to avoid the client
+ * getting reinitialized.
+ */
+#define host1x_client_register(client)                 \
+       ({                                              \
+               static struct lock_class_key __key;     \
+               __host1x_client_init(client, &__key);   \
+               __host1x_client_register(client);       \
        })
 
 int host1x_client_unregister(struct host1x_client *client);
index 045ad1650ed16dacf28f3894ef2414f279e20fe1..d82b4b2e1d25d69839ccd92292c2d9ae5d4736ed 100644 (file)
@@ -242,7 +242,8 @@ extern bool initcall_debug;
        asm(".section   \"" __sec "\", \"a\"            \n"     \
            __stringify(__name) ":                      \n"     \
            ".long      " __stringify(__stub) " - .     \n"     \
-           ".previous                                  \n");
+           ".previous                                  \n");   \
+       static_assert(__same_type(initcall_t, &fn));
 #else
 #define ____define_initcall(fn, __unused, __name, __sec)       \
        static initcall_t __name __used                         \
index 2f34487e21f213efc56b6bd7d841c5c8e5f9fa29..8583ed3ff3447d0eb1cc288cb216f680f1b19065 100644 (file)
@@ -10,6 +10,7 @@
 #include <linux/spinlock.h>
 #include <linux/signal.h>
 #include <linux/sched.h>
+#include <linux/sched/stat.h>
 #include <linux/bug.h>
 #include <linux/minmax.h>
 #include <linux/mm.h>
@@ -146,7 +147,7 @@ static inline bool is_error_page(struct page *page)
  */
 #define KVM_REQ_TLB_FLUSH         (0 | KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
 #define KVM_REQ_MMU_RELOAD        (1 | KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
-#define KVM_REQ_PENDING_TIMER     2
+#define KVM_REQ_UNBLOCK           2
 #define KVM_REQ_UNHALT            3
 #define KVM_REQUEST_ARCH_BASE     8
 
@@ -265,6 +266,11 @@ static inline bool kvm_vcpu_mapped(struct kvm_host_map *map)
        return !!map->hva;
 }
 
+static inline bool kvm_vcpu_can_poll(ktime_t cur, ktime_t stop)
+{
+       return single_task_running() && !need_resched() && ktime_before(cur, stop);
+}
+
 /*
  * Sometimes a large or cross-page mmio needs to be broken up into separate
  * exits for userspace servicing.
@@ -1179,7 +1185,15 @@ __gfn_to_memslot(struct kvm_memslots *slots, gfn_t gfn)
 static inline unsigned long
 __gfn_to_hva_memslot(const struct kvm_memory_slot *slot, gfn_t gfn)
 {
-       return slot->userspace_addr + (gfn - slot->base_gfn) * PAGE_SIZE;
+       /*
+        * The index was checked originally in search_memslots.  To avoid
+        * that a malicious guest builds a Spectre gadget out of e.g. page
+        * table walks, do not let the processor speculate loads outside
+        * the guest's registered memslots.
+        */
+       unsigned long offset = gfn - slot->base_gfn;
+       offset = array_index_nospec(offset, slot->npages);
+       return slot->userspace_addr + offset * PAGE_SIZE;
 }
 
 static inline int memslot_id(struct kvm *kvm, gfn_t gfn)
index 01f251b6e36c5788fa7fea4fadf29549e276289d..89b69e645ac74a7f0ed22ac52b5cb9ed99e02a4c 100644 (file)
@@ -141,7 +141,6 @@ static inline void __iomem *devm_nvdimm_ioremap(struct device *dev,
 
 struct nvdimm_bus;
 struct module;
-struct device;
 struct nd_blk_region;
 struct nd_blk_region_desc {
        int (*enable)(struct nvdimm_bus *nvdimm_bus, struct device *dev);
index a57af878fd0cdc8a37a6b18609cc0d1dd0d2192f..4a5966475a35aa452c8d695cc7f6c6b92faa4563 100644 (file)
@@ -26,9 +26,7 @@ struct bd70528_data {
        struct mutex rtc_timer_lock;
 };
 
-#define BD70528_BUCK_VOLTS 17
-#define BD70528_BUCK_VOLTS 17
-#define BD70528_BUCK_VOLTS 17
+#define BD70528_BUCK_VOLTS 0x10
 #define BD70528_LDO_VOLTS 0x20
 
 #define BD70528_REG_BUCK1_EN   0x0F
index c7ab69c87ee8c6fbe2c38d4f4b80d6cfe28983c6..3b5f3a7db4bd66cb4fa445a587ab69abec3a0c24 100644 (file)
@@ -26,11 +26,11 @@ enum {
        BD71828_REGULATOR_AMOUNT,
 };
 
-#define BD71828_BUCK1267_VOLTS         0xEF
-#define BD71828_BUCK3_VOLTS            0x10
-#define BD71828_BUCK4_VOLTS            0x20
-#define BD71828_BUCK5_VOLTS            0x10
-#define BD71828_LDO_VOLTS              0x32
+#define BD71828_BUCK1267_VOLTS         0x100
+#define BD71828_BUCK3_VOLTS            0x20
+#define BD71828_BUCK4_VOLTS            0x40
+#define BD71828_BUCK5_VOLTS            0x20
+#define BD71828_LDO_VOLTS              0x40
 /* LDO6 is fixed 1.8V voltage */
 #define BD71828_LDO_6_VOLTAGE          1800000
 
index c0f57b0c64d908b0ee298981031d14d6d3cb0de7..5433c08fcc685856798e833202c74b44330ce479 100644 (file)
@@ -2,6 +2,8 @@
 #ifndef _LINUX_MINMAX_H
 #define _LINUX_MINMAX_H
 
+#include <linux/const.h>
+
 /*
  * min()/max()/clamp() macros must accomplish three things:
  *
 #define __typecheck(x, y) \
        (!!(sizeof((typeof(x) *)1 == (typeof(y) *)1)))
 
-/*
- * This returns a constant expression while determining if an argument is
- * a constant expression, most importantly without evaluating the argument.
- * Glory to Martin Uecker <Martin.Uecker@med.uni-goettingen.de>
- */
-#define __is_constexpr(x) \
-       (sizeof(int) == sizeof(*(8 ? ((void *)((long)(x) * 0l)) : (int *)8)))
-
 #define __no_side_effects(x, y) \
                (__is_constexpr(x) && __is_constexpr(y))
 
index 236a7d04f891e7d9cfd7ac0abc8cc373cf9a5599..30bb59fe970cbb27864f4875bae90cd3a29a7f64 100644 (file)
@@ -630,6 +630,7 @@ struct mlx4_caps {
        bool                    wol_port[MLX4_MAX_PORTS + 1];
        struct mlx4_rate_limit_caps rl_caps;
        u32                     health_buffer_addrs;
+       bool                    map_clock_to_user;
 };
 
 struct mlx4_buf_list {
index f8e8d7e9061608cd915c60ce293db7bd40e86ca8..020a8f7fdbdd43e3215d0ad9a8dcb6b9d228f9ea 100644 (file)
@@ -703,6 +703,27 @@ struct mlx5_hv_vhca;
 #define MLX5_LOG_SW_ICM_BLOCK_SIZE(dev) (MLX5_CAP_DEV_MEM(dev, log_sw_icm_alloc_granularity))
 #define MLX5_SW_ICM_BLOCK_SIZE(dev) (1 << MLX5_LOG_SW_ICM_BLOCK_SIZE(dev))
 
+enum {
+       MLX5_PROF_MASK_QP_SIZE          = (u64)1 << 0,
+       MLX5_PROF_MASK_MR_CACHE         = (u64)1 << 1,
+};
+
+enum {
+       MR_CACHE_LAST_STD_ENTRY = 20,
+       MLX5_IMR_MTT_CACHE_ENTRY,
+       MLX5_IMR_KSM_CACHE_ENTRY,
+       MAX_MR_CACHE_ENTRIES
+};
+
+struct mlx5_profile {
+       u64     mask;
+       u8      log_max_qp;
+       struct {
+               int     size;
+               int     limit;
+       } mr_cache[MAX_MR_CACHE_ENTRIES];
+};
+
 struct mlx5_core_dev {
        struct device *device;
        enum mlx5_coredev_type coredev_type;
@@ -731,7 +752,7 @@ struct mlx5_core_dev {
        struct mutex            intf_state_mutex;
        unsigned long           intf_state;
        struct mlx5_priv        priv;
-       struct mlx5_profile     *profile;
+       struct mlx5_profile     profile;
        u32                     issi;
        struct mlx5e_resources  mlx5e_res;
        struct mlx5_dm          *dm;
@@ -1083,18 +1104,6 @@ static inline u8 mlx5_mkey_variant(u32 mkey)
        return mkey & 0xff;
 }
 
-enum {
-       MLX5_PROF_MASK_QP_SIZE          = (u64)1 << 0,
-       MLX5_PROF_MASK_MR_CACHE         = (u64)1 << 1,
-};
-
-enum {
-       MR_CACHE_LAST_STD_ENTRY = 20,
-       MLX5_IMR_MTT_CACHE_ENTRY,
-       MLX5_IMR_KSM_CACHE_ENTRY,
-       MAX_MR_CACHE_ENTRIES
-};
-
 /* Async-atomic event notifier used by mlx5 core to forward FW
  * evetns recived from event queue to mlx5 consumers.
  * Optimise event queue dipatching.
@@ -1148,15 +1157,6 @@ int mlx5_rdma_rn_get_params(struct mlx5_core_dev *mdev,
                            struct ib_device *device,
                            struct rdma_netdev_alloc_params *params);
 
-struct mlx5_profile {
-       u64     mask;
-       u8      log_max_qp;
-       struct {
-               int     size;
-               int     limit;
-       } mr_cache[MAX_MR_CACHE_ENTRIES];
-};
-
 enum {
        MLX5_PCI_DEV_IS_VF              = 1 << 0,
 };
index 6d16eed6850e50687d61e1a8ec2adc83e6825c58..eb86e80e4643f775ac155ab06818ae53a04658a8 100644 (file)
@@ -1289,6 +1289,8 @@ enum mlx5_fc_bulk_alloc_bitmask {
 
 #define MLX5_FC_BULK_NUM_FCS(fc_enum) (MLX5_FC_BULK_SIZE_FACTOR * (fc_enum))
 
+#define MLX5_FT_MAX_MULTIPATH_LEVEL 63
+
 enum {
        MLX5_STEERING_FORMAT_CONNECTX_5   = 0,
        MLX5_STEERING_FORMAT_CONNECTX_6DX = 1,
diff --git a/include/linux/mlx5/mpfs.h b/include/linux/mlx5/mpfs.h
new file mode 100644 (file)
index 0000000..bf700c8
--- /dev/null
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+ * Copyright (c) 2021 Mellanox Technologies Ltd.
+ */
+
+#ifndef _MLX5_MPFS_
+#define _MLX5_MPFS_
+
+struct mlx5_core_dev;
+
+#ifdef CONFIG_MLX5_MPFS
+int  mlx5_mpfs_add_mac(struct mlx5_core_dev *dev, u8 *mac);
+int  mlx5_mpfs_del_mac(struct mlx5_core_dev *dev, u8 *mac);
+#else /* #ifndef CONFIG_MLX5_MPFS */
+static inline int  mlx5_mpfs_add_mac(struct mlx5_core_dev *dev, u8 *mac) { return 0; }
+static inline int  mlx5_mpfs_del_mac(struct mlx5_core_dev *dev, u8 *mac) { return 0; }
+#endif
+
+#endif
index 322ec61d0da798daa6bced13c4a822cc1d7e8628..c274f75efcf97350f967ad57e10f78f2e4682fb5 100644 (file)
@@ -3216,5 +3216,37 @@ void mem_dump_obj(void *object);
 static inline void mem_dump_obj(void *object) {}
 #endif
 
+/**
+ * seal_check_future_write - Check for F_SEAL_FUTURE_WRITE flag and handle it
+ * @seals: the seals to check
+ * @vma: the vma to operate on
+ *
+ * Check whether F_SEAL_FUTURE_WRITE is set; if so, do proper check/handling on
+ * the vma flags.  Return 0 if check pass, or <0 for errors.
+ */
+static inline int seal_check_future_write(int seals, struct vm_area_struct *vma)
+{
+       if (seals & F_SEAL_FUTURE_WRITE) {
+               /*
+                * New PROT_WRITE and MAP_SHARED mmaps are not allowed when
+                * "future write" seal active.
+                */
+               if ((vma->vm_flags & VM_SHARED) && (vma->vm_flags & VM_WRITE))
+                       return -EPERM;
+
+               /*
+                * Since an F_SEAL_FUTURE_WRITE sealed memfd can be mapped as
+                * MAP_SHARED and read-only, take care to not allow mprotect to
+                * revert protections on such mappings. Do this only for shared
+                * mappings. For private mappings, don't need to mask
+                * VM_MAYWRITE as we still want them to be COW-writable.
+                */
+               if (vma->vm_flags & VM_SHARED)
+                       vma->vm_flags &= ~(VM_MAYWRITE);
+       }
+
+       return 0;
+}
+
 #endif /* __KERNEL__ */
 #endif /* _LINUX_MM_H */
index 6613b26a88946c41afb469b894d3b66fc4712010..8f0fb62e8975cb62946a044ac6fa99ff7280d16c 100644 (file)
@@ -97,10 +97,10 @@ struct page {
                };
                struct {        /* page_pool used by netstack */
                        /**
-                        * @dma_addr: might require a 64-bit value even on
+                        * @dma_addr: might require a 64-bit value on
                         * 32-bit architectures.
                         */
-                       dma_addr_t dma_addr;
+                       unsigned long dma_addr[2];
                };
                struct {        /* slab, slob and slub */
                        union {
@@ -445,13 +445,6 @@ struct mm_struct {
                 */
                atomic_t has_pinned;
 
-               /**
-                * @write_protect_seq: Locked when any thread is write
-                * protecting pages mapped by this mm to enforce a later COW,
-                * for instance during page table copying for fork().
-                */
-               seqcount_t write_protect_seq;
-
 #ifdef CONFIG_MMU
                atomic_long_t pgtables_bytes;   /* PTE page table pages */
 #endif
@@ -460,6 +453,18 @@ struct mm_struct {
                spinlock_t page_table_lock; /* Protects page tables and some
                                             * counters
                                             */
+               /*
+                * With some kernel config, the current mmap_lock's offset
+                * inside 'mm_struct' is at 0x120, which is very optimal, as
+                * its two hot fields 'count' and 'owner' sit in 2 different
+                * cachelines,  and when mmap_lock is highly contended, both
+                * of the 2 fields will be accessed frequently, current layout
+                * will help to reduce cache bouncing.
+                *
+                * So please be careful with adding new fields before
+                * mmap_lock, which can easily push the 2 fields into one
+                * cacheline.
+                */
                struct rw_semaphore mmap_lock;
 
                struct list_head mmlist; /* List of maybe swapped mm's. These
@@ -480,7 +485,15 @@ struct mm_struct {
                unsigned long stack_vm;    /* VM_STACK */
                unsigned long def_flags;
 
+               /**
+                * @write_protect_seq: Locked when any thread is write
+                * protecting pages mapped by this mm to enforce a later COW,
+                * for instance during page table copying for fork().
+                */
+               seqcount_t write_protect_seq;
+
                spinlock_t arg_lock; /* protect the below fields */
+
                unsigned long start_code, end_code, start_data, end_data;
                unsigned long start_brk, brk, start_stack;
                unsigned long arg_start, arg_end, env_start, env_end;
index a4bd41128bf318eb4ae536c56ccda93e6e29b436..e89df447fae32cac5a49cbf89b2abc985012111c 100644 (file)
@@ -997,9 +997,9 @@ static inline loff_t readahead_pos(struct readahead_control *rac)
  * readahead_length - The number of bytes in this readahead request.
  * @rac: The readahead request.
  */
-static inline loff_t readahead_length(struct readahead_control *rac)
+static inline size_t readahead_length(struct readahead_control *rac)
 {
-       return (loff_t)rac->_nr_pages * PAGE_SIZE;
+       return rac->_nr_pages * PAGE_SIZE;
 }
 
 /**
@@ -1024,7 +1024,7 @@ static inline unsigned int readahead_count(struct readahead_control *rac)
  * readahead_batch_length - The number of bytes in the current batch.
  * @rac: The readahead request.
  */
-static inline loff_t readahead_batch_length(struct readahead_control *rac)
+static inline size_t readahead_batch_length(struct readahead_control *rac)
 {
        return rac->_batch_count * PAGE_SIZE;
 }
index c20211e59a576a4b851ed651e127f56556701f1f..24306504226ab665be7323549d0759da1b7ac715 100644 (file)
@@ -2344,6 +2344,7 @@ int pci_vpd_find_info_keyword(const u8 *buf, unsigned int off,
 struct device_node;
 struct irq_domain;
 struct irq_domain *pci_host_bridge_of_msi_domain(struct pci_bus *bus);
+bool pci_host_of_has_msi_map(struct device *dev);
 
 /* Arch may override this (weak) */
 struct device_node *pcibios_get_phb_of_node(struct pci_bus *bus);
@@ -2351,6 +2352,7 @@ struct device_node *pcibios_get_phb_of_node(struct pci_bus *bus);
 #else  /* CONFIG_OF */
 static inline struct irq_domain *
 pci_host_bridge_of_msi_domain(struct pci_bus *bus) { return NULL; }
+static inline bool pci_host_of_has_msi_map(struct device *dev) { return false; }
 #endif  /* CONFIG_OF */
 
 static inline struct device_node *
index 46b13780c2c8c951c79af989ad06a57ed83b970f..a43047b1030dcf0636251c006b47772377fa08c6 100644 (file)
@@ -432,6 +432,14 @@ static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addres
  * To be differentiate with macro pte_mkyoung, this macro is used on platforms
  * where software maintains page access bit.
  */
+#ifndef pte_sw_mkyoung
+static inline pte_t pte_sw_mkyoung(pte_t pte)
+{
+       return pte;
+}
+#define pte_sw_mkyoung pte_sw_mkyoung
+#endif
+
 #ifndef pte_savedwrite
 #define pte_savedwrite pte_write
 #endif
index 60d2b26026a2dfd142f29ed5f6e9827b1361a8fe..852743f07e3e639f9ffae0b0b8dc00adda17f0c3 100644 (file)
@@ -496,6 +496,11 @@ struct macsec_ops;
  * @mac_managed_pm: Set true if MAC driver takes of suspending/resuming PHY
  * @state: State of the PHY for management purposes
  * @dev_flags: Device-specific flags used by the PHY driver.
+ *             Bits [15:0] are free to use by the PHY driver to communicate
+ *                         driver specific behavior.
+ *             Bits [23:16] are currently reserved for future use.
+ *             Bits [31:24] are reserved for defining generic
+ *                          PHY driver behavior.
  * @irq: IRQ number of the PHY's interrupt (-1 if none)
  * @phy_timer: The timer for handling the state machine
  * @phylink: Pointer to phylink instance for this PHY
index fafc1beea504ab7f79228f92ff1e6f8fcbd98e94..9837fb011f2fb0ce9ffc43cbfcac8392f7f22f7f 100644 (file)
@@ -50,6 +50,7 @@ struct sysc_regbits {
        s8 emufree_shift;
 };
 
+#define SYSC_QUIRK_REINIT_ON_RESUME    BIT(27)
 #define SYSC_QUIRK_GPMC_DEBUG          BIT(26)
 #define SYSC_MODULE_QUIRK_ENA_RESETDONE        BIT(25)
 #define SYSC_MODULE_QUIRK_PRUSS                BIT(24)
index c9657408fee1aec7f2a7b215c3699e15845ff05b..1d8209c09686c65edce26c98c10d95746b78f7cb 100644 (file)
@@ -601,6 +601,7 @@ struct dev_pm_info {
        unsigned int            idle_notification:1;
        unsigned int            request_pending:1;
        unsigned int            deferred_resume:1;
+       unsigned int            needs_force_resume:1;
        unsigned int            runtime_auto:1;
        bool                    ignore_children:1;
        unsigned int            no_callbacks:1;
diff --git a/include/linux/prmt.h b/include/linux/prmt.h
new file mode 100644 (file)
index 0000000..24da836
--- /dev/null
@@ -0,0 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+
+#ifdef CONFIG_ACPI_PRMT
+void init_prmt(void);
+#else
+static inline void init_prmt(void) { }
+#endif
index fd80fab663a96153e06f05e6eb0adfe7e152a97b..bebc911161b6f569403dcdb12bc98619da3470bd 100644 (file)
@@ -38,7 +38,7 @@ void *__builtin_alloca(size_t size);
                u32 offset = raw_cpu_read(kstack_offset);               \
                u8 *ptr = __builtin_alloca(KSTACK_OFFSET_MAX(offset));  \
                /* Keep allocation even after "ptr" loses scope. */     \
-               asm volatile("" : "=o"(*ptr) :: "memory");              \
+               asm volatile("" :: "r"(ptr) : "memory");                \
        }                                                               \
 } while (0)
 
index 6f155f99aa16033ce0cd6e6790c366c184c7124f..4ab7bfc675f119738344494e87e8f30d00792204 100644 (file)
@@ -1109,6 +1109,7 @@ struct pcr_ops {
 };
 
 enum PDEV_STAT  {PDEV_STAT_IDLE, PDEV_STAT_RUN};
+enum ASPM_MODE  {ASPM_MODE_CFG, ASPM_MODE_REG};
 
 #define ASPM_L1_1_EN                   BIT(0)
 #define ASPM_L1_2_EN                   BIT(1)
@@ -1234,6 +1235,7 @@ struct rtsx_pcr {
        u8                              card_drive_sel;
 #define ASPM_L1_EN                     0x02
        u8                              aspm_en;
+       enum ASPM_MODE                  aspm_mode;
        bool                            aspm_enabled;
 
 #define PCR_MS_PMOS                    (1 << 0)
index d2c881384517b02544e7604d376b1854fd13c538..28a98fc4ded4f6c6c397175962f24ac7ab3e3146 100644 (file)
@@ -350,11 +350,19 @@ struct load_weight {
  * Only for tasks we track a moving average of the past instantaneous
  * estimated utilization. This allows to absorb sporadic drops in utilization
  * of an otherwise almost periodic task.
+ *
+ * The UTIL_AVG_UNCHANGED flag is used to synchronize util_est with util_avg
+ * updates. When a task is dequeued, its util_est should not be updated if its
+ * util_avg has not been updated in the meantime.
+ * This information is mapped into the MSB bit of util_est.enqueued at dequeue
+ * time. Since max value of util_est.enqueued for a task is 1024 (PELT util_avg
+ * for a task) it is safe to use MSB.
  */
 struct util_est {
        unsigned int                    enqueued;
        unsigned int                    ewma;
 #define UTIL_EST_WEIGHT_SHIFT          2
+#define UTIL_AVG_UNCHANGED             0x80000000
 } __attribute__((__aligned__(sizeof(u64))));
 
 /*
index 3f6a0fcaa10cf47050a79f366a1371613178c8df..7f4278fa21fef7e2de8d832ed46461f1de9aba7d 100644 (file)
@@ -326,6 +326,7 @@ int send_sig_mceerr(int code, void __user *, short, struct task_struct *);
 
 int force_sig_bnderr(void __user *addr, void __user *lower, void __user *upper);
 int force_sig_pkuerr(void __user *addr, u32 pkey);
+int force_sig_perf(void __user *addr, u32 type, u64 sig_data);
 
 int force_sig_ptrace_errno_trap(int errno, void __user *addr);
 
index 0dbfda8d99d0e9e4290036a3b9fc9864a8a7254a..201f88e3738b2e641370a62ff18a564d6dbdd070 100644 (file)
@@ -40,6 +40,7 @@ enum siginfo_layout {
        SIL_TIMER,
        SIL_POLL,
        SIL_FAULT,
+       SIL_FAULT_TRAPNO,
        SIL_FAULT_MCEERR,
        SIL_FAULT_BNDERR,
        SIL_FAULT_PKUERR,
index 360a3bc767ca0442dd71f7bbfab7174eb942d4df..74239d65c7fd132143c921d822ed4a39e1ecf52c 100644 (file)
@@ -644,8 +644,8 @@ struct spi_controller {
        int                     *cs_gpios;
        struct gpio_desc        **cs_gpiods;
        bool                    use_gpio_descriptors;
-       u8                      unused_native_cs;
-       u8                      max_native_cs;
+       s8                      unused_native_cs;
+       s8                      max_native_cs;
 
        /* statistics */
        struct spi_statistics   statistics;
index d81fe8b364d004ba60cfcfa2139f4175d6569d82..61b622e334ee5fec2fc065efcb4e9c46002f6cf5 100644 (file)
@@ -368,6 +368,8 @@ struct rpc_xprt *   xprt_alloc(struct net *net, size_t size,
                                unsigned int num_prealloc,
                                unsigned int max_req);
 void                   xprt_free(struct rpc_xprt *);
+void                   xprt_add_backlog(struct rpc_xprt *xprt, struct rpc_task *task);
+bool                   xprt_wake_up_backlog(struct rpc_xprt *xprt, struct rpc_rqst *req);
 
 static inline int
 xprt_enable_swap(struct rpc_xprt *xprt)
index 4441ad667c3f9426200d7f56fee2f9d9693f6fb2..6ff9c58b3e17f31900104375de0fe784c212fc46 100644 (file)
@@ -98,9 +98,9 @@ struct ssam_device_uid {
                     | (((fun) != SSAM_ANY_FUN) ? SSAM_MATCH_FUNCTION : 0),     \
        .domain   = d,                                                          \
        .category = cat,                                                        \
-       .target   = ((tid) != SSAM_ANY_TID) ? (tid) : 0,                        \
-       .instance = ((iid) != SSAM_ANY_IID) ? (iid) : 0,                        \
-       .function = ((fun) != SSAM_ANY_FUN) ? (fun) : 0                         \
+       .target   = __builtin_choose_expr((tid) != SSAM_ANY_TID, (tid), 0),     \
+       .instance = __builtin_choose_expr((iid) != SSAM_ANY_IID, (iid), 0),     \
+       .function = __builtin_choose_expr((fun) != SSAM_ANY_FUN, (fun), 0)
 
 /**
  * SSAM_VDEV() - Initialize a &struct ssam_device_id as virtual device with
index 7340613c7eff7ad791634c76ae2ecd547f711c45..1a0ff88fa107b3e8fd05c3b5824367b7283752b9 100644 (file)
@@ -11,6 +11,7 @@
 #include <linux/context_tracking_state.h>
 #include <linux/cpumask.h>
 #include <linux/sched.h>
+#include <linux/rcupdate.h>
 
 #ifdef CONFIG_GENERIC_CLOCKEVENTS
 extern void __init tick_init(void);
@@ -300,4 +301,10 @@ static inline void tick_nohz_task_switch(void)
                __tick_nohz_task_switch();
 }
 
+static inline void tick_nohz_user_enter_prepare(void)
+{
+       if (tick_nohz_full_cpu(smp_processor_id()))
+               rcu_nocb_flush_deferred_wakeup();
+}
+
 #endif
index bf00259493e077f1198a055cc082fbaa0b23617f..96b7ff66f074b81eeca6a01bfdc0a6cb1715c0d8 100644 (file)
@@ -460,7 +460,7 @@ static inline unsigned int rdo_max_power(u32 rdo)
 #define PD_T_RECEIVER_RESPONSE 15      /* 15ms max */
 #define PD_T_SOURCE_ACTIVITY   45
 #define PD_T_SINK_ACTIVITY     135
-#define PD_T_SINK_WAIT_CAP     240
+#define PD_T_SINK_WAIT_CAP     310     /* 310 - 620 ms */
 #define PD_T_PS_TRANSITION     500
 #define PD_T_SRC_TRANSITION    35
 #define PD_T_DRP_SNK           40
index 0eb83ce1959708c3ae2d58e001685bdfe8037085..b517ebc8f0ff205c8e1cf142b78485a5d95a0498 100644 (file)
@@ -24,8 +24,4 @@ enum usb_pd_ext_sdb_fields {
 #define USB_PD_EXT_SDB_EVENT_OVP               BIT(3)
 #define USB_PD_EXT_SDB_EVENT_CF_CV_MODE                BIT(4)
 
-#define USB_PD_EXT_SDB_PPS_EVENTS      (USB_PD_EXT_SDB_EVENT_OCP |     \
-                                        USB_PD_EXT_SDB_EVENT_OTP |     \
-                                        USB_PD_EXT_SDB_EVENT_OVP)
-
 #endif /* __LINUX_USB_PD_EXT_SDB_H */
index 48ecca8530ffa4cef042984ee85760a73ad8f096..b655d8666f555aabf3f9c9c6bdac1db54c45604c 100644 (file)
@@ -119,7 +119,7 @@ void caif_free_client(struct cflayer *adap_layer);
  * The link_support layer is used to add any Link Layer specific
  * framing.
  */
-void caif_enroll_dev(struct net_device *dev, struct caif_dev_common *caifdev,
+int caif_enroll_dev(struct net_device *dev, struct caif_dev_common *caifdev,
                        struct cflayer *link_support, int head_room,
                        struct cflayer **layer, int (**rcv_func)(
                                struct sk_buff *, struct net_device *,
index 2aa5e91d84576a712280a2ace320e3664e7e3947..8819ff4db35a6936b317265a10864c2d6d2f89cf 100644 (file)
@@ -62,7 +62,7 @@ void cfcnfg_remove(struct cfcnfg *cfg);
  * @fcs:       Specify if checksum is used in CAIF Framing Layer.
  * @head_room: Head space needed by link specific protocol.
  */
-void
+int
 cfcnfg_add_phy_layer(struct cfcnfg *cnfg,
                     struct net_device *dev, struct cflayer *phy_layer,
                     enum cfcnfg_phy_preference pref,
index 14a55e03bb3ce8395381fc8fceb8ed770f674baa..67cce8757175a2651b065af6e5d32f7cb63b5214 100644 (file)
@@ -9,4 +9,5 @@
 #include <net/caif/caif_layer.h>
 
 struct cflayer *cfserl_create(int instance, bool use_stx);
+void cfserl_release(struct cflayer *layer);
 #endif
index 5224f885a99a129856f486249d9a77390ce87d9c..58c2cd417e89ae469a7f46b0e93ce4b1a17c822a 100644 (file)
@@ -5760,7 +5760,7 @@ unsigned int ieee80211_get_mesh_hdrlen(struct ieee80211s_hdr *meshhdr);
  */
 int ieee80211_data_to_8023_exthdr(struct sk_buff *skb, struct ethhdr *ehdr,
                                  const u8 *addr, enum nl80211_iftype iftype,
-                                 u8 data_offset);
+                                 u8 data_offset, bool is_amsdu);
 
 /**
  * ieee80211_data_to_8023 - convert an 802.11 data frame to 802.3
@@ -5772,7 +5772,7 @@ int ieee80211_data_to_8023_exthdr(struct sk_buff *skb, struct ethhdr *ehdr,
 static inline int ieee80211_data_to_8023(struct sk_buff *skb, const u8 *addr,
                                         enum nl80211_iftype iftype)
 {
-       return ieee80211_data_to_8023_exthdr(skb, NULL, addr, iftype, 0);
+       return ieee80211_data_to_8023_exthdr(skb, NULL, addr, iftype, 0, false);
 }
 
 /**
index 51d8eb99764dcbcccdd5eca9323ba199cdc9a9c2..48ef7460ff3043e20a1adc02e8e7c657ca0db0e0 100644 (file)
@@ -157,7 +157,6 @@ enum nf_flow_flags {
        NF_FLOW_HW,
        NF_FLOW_HW_DYING,
        NF_FLOW_HW_DEAD,
-       NF_FLOW_HW_REFRESH,
        NF_FLOW_HW_PENDING,
 };
 
index 27eeb613bb4e89daea9ee208e93ae6dbb7bf84f2..0a5655e300b51fe3f0b290e935ce46afc56810b9 100644 (file)
@@ -1506,16 +1506,10 @@ struct nft_trans_chain {
 
 struct nft_trans_table {
        bool                            update;
-       u8                              state;
-       u32                             flags;
 };
 
 #define nft_trans_table_update(trans)  \
        (((struct nft_trans_table *)trans->data)->update)
-#define nft_trans_table_state(trans)   \
-       (((struct nft_trans_table *)trans->data)->state)
-#define nft_trans_table_flags(trans)   \
-       (((struct nft_trans_table *)trans->data)->flags)
 
 struct nft_trans_elem {
        struct nft_set                  *set;
index bd76e8e082c0179b3c27b7393ff678909d62de07..1df0f8074c9d3c3b44c0e31699628b73fd1caf3b 100644 (file)
@@ -298,6 +298,7 @@ int nci_nfcc_loopback(struct nci_dev *ndev, void *data, size_t data_len,
                      struct sk_buff **resp);
 
 struct nci_hci_dev *nci_hci_allocate(struct nci_dev *ndev);
+void nci_hci_deallocate(struct nci_dev *ndev);
 int nci_hci_send_event(struct nci_dev *ndev, u8 gate, u8 event,
                       const u8 *param, size_t param_len);
 int nci_hci_send_cmd(struct nci_dev *ndev, u8 gate,
index 6d517a37c18bf92af948d2bd5f5984f71b700271..b4b6de909c934b3cc858d592590cf43c5d15d721 100644 (file)
@@ -198,7 +198,17 @@ static inline void page_pool_recycle_direct(struct page_pool *pool,
 
 static inline dma_addr_t page_pool_get_dma_addr(struct page *page)
 {
-       return page->dma_addr;
+       dma_addr_t ret = page->dma_addr[0];
+       if (sizeof(dma_addr_t) > sizeof(unsigned long))
+               ret |= (dma_addr_t)page->dma_addr[1] << 16 << 16;
+       return ret;
+}
+
+static inline void page_pool_set_dma_addr(struct page *page, dma_addr_t addr)
+{
+       page->dma_addr[0] = addr;
+       if (sizeof(dma_addr_t) > sizeof(unsigned long))
+               page->dma_addr[1] = upper_32_bits(addr);
 }
 
 static inline bool is_page_pool_compiled_in(void)
index 255e4f4b521f4095f2ab1e60e3b262366b5ff95f..ec7823921bd26ec7e33bfef5b849c347414228fa 100644 (file)
@@ -709,6 +709,17 @@ tc_cls_common_offload_init(struct flow_cls_common_offload *cls_common,
                cls_common->extack = extack;
 }
 
+#if IS_ENABLED(CONFIG_NET_TC_SKB_EXT)
+static inline struct tc_skb_ext *tc_skb_ext_alloc(struct sk_buff *skb)
+{
+       struct tc_skb_ext *tc_skb_ext = skb_ext_add(skb, TC_SKB_EXT);
+
+       if (tc_skb_ext)
+               memset(tc_skb_ext, 0, sizeof(*tc_skb_ext));
+       return tc_skb_ext;
+}
+#endif
+
 enum tc_matchall_command {
        TC_CLSMATCHALL_REPLACE,
        TC_CLSMATCHALL_DESTROY,
index f5c1bee0cd6ac94440f4b62fe066a065b8c2d70f..6d7b12cba0158f9a954949b1fc42ee418d4b56a7 100644 (file)
@@ -128,12 +128,7 @@ void __qdisc_run(struct Qdisc *q);
 static inline void qdisc_run(struct Qdisc *q)
 {
        if (qdisc_run_begin(q)) {
-               /* NOLOCK qdisc must check 'state' under the qdisc seqlock
-                * to avoid racing with dev_qdisc_reset()
-                */
-               if (!(q->flags & TCQ_F_NOLOCK) ||
-                   likely(!test_bit(__QDISC_STATE_DEACTIVATED, &q->state)))
-                       __qdisc_run(q);
+               __qdisc_run(q);
                qdisc_run_end(q);
        }
 }
index f7a6e14491fb6b0fe347a213e0295a11b263e0f2..1e625519ae9687608c087ceee6b30aac82513fca 100644 (file)
@@ -36,6 +36,7 @@ struct qdisc_rate_table {
 enum qdisc_state_t {
        __QDISC_STATE_SCHED,
        __QDISC_STATE_DEACTIVATED,
+       __QDISC_STATE_MISSED,
 };
 
 struct qdisc_size_table {
@@ -159,8 +160,33 @@ static inline bool qdisc_is_empty(const struct Qdisc *qdisc)
 static inline bool qdisc_run_begin(struct Qdisc *qdisc)
 {
        if (qdisc->flags & TCQ_F_NOLOCK) {
+               if (spin_trylock(&qdisc->seqlock))
+                       goto nolock_empty;
+
+               /* If the MISSED flag is set, it means other thread has
+                * set the MISSED flag before second spin_trylock(), so
+                * we can return false here to avoid multi cpus doing
+                * the set_bit() and second spin_trylock() concurrently.
+                */
+               if (test_bit(__QDISC_STATE_MISSED, &qdisc->state))
+                       return false;
+
+               /* Set the MISSED flag before the second spin_trylock(),
+                * if the second spin_trylock() return false, it means
+                * other cpu holding the lock will do dequeuing for us
+                * or it will see the MISSED flag set after releasing
+                * lock and reschedule the net_tx_action() to do the
+                * dequeuing.
+                */
+               set_bit(__QDISC_STATE_MISSED, &qdisc->state);
+
+               /* Retry again in case other CPU may not see the new flag
+                * after it releases the lock at the end of qdisc_run_end().
+                */
                if (!spin_trylock(&qdisc->seqlock))
                        return false;
+
+nolock_empty:
                WRITE_ONCE(qdisc->empty, false);
        } else if (qdisc_is_running(qdisc)) {
                return false;
@@ -176,8 +202,15 @@ static inline bool qdisc_run_begin(struct Qdisc *qdisc)
 static inline void qdisc_run_end(struct Qdisc *qdisc)
 {
        write_seqcount_end(&qdisc->running);
-       if (qdisc->flags & TCQ_F_NOLOCK)
+       if (qdisc->flags & TCQ_F_NOLOCK) {
                spin_unlock(&qdisc->seqlock);
+
+               if (unlikely(test_bit(__QDISC_STATE_MISSED,
+                                     &qdisc->state))) {
+                       clear_bit(__QDISC_STATE_MISSED, &qdisc->state);
+                       __netif_schedule(qdisc);
+               }
+       }
 }
 
 static inline bool qdisc_may_bulk(const struct Qdisc *qdisc)
index 42bc5e1a627f49a647a7d553f08aca9daa1f0be1..0e962d8bc73b1ce5a38ca1f64c6489f94ab587e4 100644 (file)
@@ -2231,13 +2231,15 @@ static inline void skb_set_owner_r(struct sk_buff *skb, struct sock *sk)
        sk_mem_charge(sk, skb->truesize);
 }
 
-static inline void skb_set_owner_sk_safe(struct sk_buff *skb, struct sock *sk)
+static inline __must_check bool skb_set_owner_sk_safe(struct sk_buff *skb, struct sock *sk)
 {
        if (sk && refcount_inc_not_zero(&sk->sk_refcnt)) {
                skb_orphan(skb);
                skb->destructor = sock_efree;
                skb->sk = sk;
+               return true;
        }
+       return false;
 }
 
 void sk_reset_timer(struct sock *sk, struct timer_list *timer,
index 3eccb525e8f7954c43cec1b8c4212c903a35e76e..8341a8d1e80733278eee6eb6150d5e2aea7b9e0c 100644 (file)
@@ -193,7 +193,11 @@ struct tls_offload_context_tx {
        (sizeof(struct tls_offload_context_tx) + TLS_DRIVER_STATE_SIZE_TX)
 
 enum tls_context_flags {
-       TLS_RX_SYNC_RUNNING = 0,
+       /* tls_device_down was called after the netdev went down, device state
+        * was released, and kTLS works in software, even though rx_conf is
+        * still TLS_HW (needed for transition).
+        */
+       TLS_RX_DEV_DEGRADED = 0,
        /* Unlike RX where resync is driven entirely by the core in TX only
         * the driver knows when things went out of sync, so we need the flag
         * to be atomic.
@@ -266,6 +270,7 @@ struct tls_context {
 
        /* cache cold stuff */
        struct proto *sk_proto;
+       struct sock *sk;
 
        void (*sk_destruct)(struct sock *sk);
 
@@ -448,6 +453,9 @@ static inline u16 tls_user_config(struct tls_context *ctx, bool tx)
 struct sk_buff *
 tls_validate_xmit_skb(struct sock *sk, struct net_device *dev,
                      struct sk_buff *skb);
+struct sk_buff *
+tls_validate_xmit_skb_sw(struct sock *sk, struct net_device *dev,
+                        struct sk_buff *skb);
 
 static inline bool tls_is_sk_tx_device_offloaded(struct sock *sk)
 {
index 1358a0ceb4d01c4bf32ebcf26f3806ec00d4cadc..0bc29c4516e7614c6b15868756cd01621a2b8306 100644 (file)
@@ -81,7 +81,7 @@ struct snd_compr_stream;
 #define SND_SOC_DAIFMT_CBP_CFP         (1 << 12) /* codec clk provider & frame provider */
 #define SND_SOC_DAIFMT_CBC_CFP         (2 << 12) /* codec clk consumer & frame provider */
 #define SND_SOC_DAIFMT_CBP_CFC         (3 << 12) /* codec clk provider & frame consumer */
-#define SND_SOC_DAIFMT_CBC_CFC         (4 << 12) /* codec clk consumer & frame follower */
+#define SND_SOC_DAIFMT_CBC_CFC         (4 << 12) /* codec clk consumer & frame consumer */
 
 /* previous definitions kept for backwards-compatibility, do not use in new contributions */
 #define SND_SOC_DAIFMT_CBM_CFM         SND_SOC_DAIFMT_CBP_CFP
index 03d6f6d2c1fe8298ec164513a7358a541d101c24..5a3c221f4c9d3837692a390fc2da301afadb04df 100644 (file)
@@ -63,9 +63,6 @@ union __sifields {
        /* SIGILL, SIGFPE, SIGSEGV, SIGBUS, SIGTRAP, SIGEMT */
        struct {
                void __user *_addr; /* faulting insn/memory ref. */
-#ifdef __ARCH_SI_TRAPNO
-               int _trapno;    /* TRAP # which caused the signal */
-#endif
 #ifdef __ia64__
                int _imm;               /* immediate value for "break" */
                unsigned int _flags;    /* see ia64 si_flags */
@@ -75,6 +72,8 @@ union __sifields {
 #define __ADDR_BND_PKEY_PAD  (__alignof__(void *) < sizeof(short) ? \
                              sizeof(short) : __alignof__(void *))
                union {
+                       /* used on alpha and sparc */
+                       int _trapno;    /* TRAP # which caused the signal */
                        /*
                         * used when si_code=BUS_MCEERR_AR or
                         * used when si_code=BUS_MCEERR_AO
@@ -92,7 +91,10 @@ union __sifields {
                                __u32 _pkey;
                        } _addr_pkey;
                        /* used when si_code=TRAP_PERF */
-                       unsigned long _perf;
+                       struct {
+                               unsigned long _data;
+                               __u32 _type;
+                       } _perf;
                };
        } _sigfault;
 
@@ -150,14 +152,13 @@ typedef struct siginfo {
 #define si_int         _sifields._rt._sigval.sival_int
 #define si_ptr         _sifields._rt._sigval.sival_ptr
 #define si_addr                _sifields._sigfault._addr
-#ifdef __ARCH_SI_TRAPNO
 #define si_trapno      _sifields._sigfault._trapno
-#endif
 #define si_addr_lsb    _sifields._sigfault._addr_lsb
 #define si_lower       _sifields._sigfault._addr_bnd._lower
 #define si_upper       _sifields._sigfault._addr_bnd._upper
 #define si_pkey                _sifields._sigfault._addr_pkey._pkey
-#define si_perf                _sifields._sigfault._perf
+#define si_perf_data   _sifields._sigfault._perf._data
+#define si_perf_type   _sifields._sigfault._perf._type
 #define si_band                _sifields._sigpoll._band
 #define si_fd          _sifields._sigpoll._fd
 #define si_call_addr   _sifields._sigsys._call_addr
index f44eb0a04afdd8cea369af1395c3637a5f69122d..4c32e97dcdf00a22ebbcfc3c03f849ea90b2d9f5 100644 (file)
@@ -185,7 +185,7 @@ struct fsxattr {
 #define BLKROTATIONAL _IO(0x12,126)
 #define BLKZEROOUT _IO(0x12,127)
 /*
- * A jump here: 130-131 are reserved for zoned block devices
+ * A jump here: 130-136 are reserved for zoned block devices
  * (see uapi/linux/blkzoned.h)
  */
 
index ee93428ced9a1723824413b69bdd88980170bca4..225ec87d4f2283c7e2aacba55fbf3d405753b570 100644 (file)
 #define KEY_VOICECOMMAND               0x246   /* Listening Voice Command */
 #define KEY_ASSISTANT          0x247   /* AL Context-aware desktop assistant */
 #define KEY_KBD_LAYOUT_NEXT    0x248   /* AC Next Keyboard Layout Select */
+#define KEY_EMOJI_PICKER       0x249   /* Show/hide emoji picker (HUTRR101) */
 
 #define KEY_BRIGHTNESS_MIN             0x250   /* Set Brightness to Minimum */
 #define KEY_BRIGHTNESS_MAX             0x251   /* Set Brightness to Maximum */
index e1ae466833017bf302db074aac2c3fe59feaad8f..162ff99ed2cb261e4f195b53a02cd588dde3542d 100644 (file)
@@ -280,6 +280,7 @@ struct io_uring_params {
 #define IORING_FEAT_SQPOLL_NONFIXED    (1U << 7)
 #define IORING_FEAT_EXT_ARG            (1U << 8)
 #define IORING_FEAT_NATIVE_WORKERS     (1U << 9)
+#define IORING_FEAT_RSRC_TAGS          (1U << 10)
 
 /*
  * io_uring_register(2) opcodes and arguments
@@ -298,8 +299,12 @@ enum {
        IORING_UNREGISTER_PERSONALITY           = 10,
        IORING_REGISTER_RESTRICTIONS            = 11,
        IORING_REGISTER_ENABLE_RINGS            = 12,
-       IORING_REGISTER_RSRC                    = 13,
-       IORING_REGISTER_RSRC_UPDATE             = 14,
+
+       /* extended with tagging */
+       IORING_REGISTER_FILES2                  = 13,
+       IORING_REGISTER_FILES_UPDATE2           = 14,
+       IORING_REGISTER_BUFFERS2                = 15,
+       IORING_REGISTER_BUFFERS_UPDATE          = 16,
 
        /* this goes last */
        IORING_REGISTER_LAST
@@ -312,14 +317,10 @@ struct io_uring_files_update {
        __aligned_u64 /* __s32 * */ fds;
 };
 
-enum {
-       IORING_RSRC_FILE                = 0,
-       IORING_RSRC_BUFFER              = 1,
-};
-
 struct io_uring_rsrc_register {
-       __u32 type;
        __u32 nr;
+       __u32 resv;
+       __u64 resv2;
        __aligned_u64 data;
        __aligned_u64 tags;
 };
@@ -335,8 +336,8 @@ struct io_uring_rsrc_update2 {
        __u32 resv;
        __aligned_u64 data;
        __aligned_u64 tags;
-       __u32 type;
        __u32 nr;
+       __u32 resv2;
 };
 
 /* Skip updating fd indexes set to this value in the fd table */
index 3fd9a7e9d90cd146e2edfdb128944f403983eb61..79d9c44d1ad734b14f815b1b5b246abddb66ced4 100644 (file)
@@ -8,6 +8,7 @@
  * Note: you must update KVM_API_VERSION if you change this interface.
  */
 
+#include <linux/const.h>
 #include <linux/types.h>
 #include <linux/compiler.h>
 #include <linux/ioctl.h>
@@ -1879,8 +1880,8 @@ struct kvm_hyperv_eventfd {
  * conversion after harvesting an entry.  Also, it must not skip any
  * dirty bits, so that dirty bits are always harvested in sequence.
  */
-#define KVM_DIRTY_GFN_F_DIRTY           BIT(0)
-#define KVM_DIRTY_GFN_F_RESET           BIT(1)
+#define KVM_DIRTY_GFN_F_DIRTY           _BITUL(0)
+#define KVM_DIRTY_GFN_F_RESET           _BITUL(1)
 #define KVM_DIRTY_GFN_F_MASK            0x3
 
 /*
index bf8143505c49d033c697653121ec0a2054810e89..f92880a15645a4177406eca6e153665ea1ce0ea0 100644 (file)
@@ -464,7 +464,7 @@ struct perf_event_attr {
 
        /*
         * User provided data if sigtrap=1, passed back to user via
-        * siginfo_t::si_perf, e.g. to permit user to identify the event.
+        * siginfo_t::si_perf_data, e.g. to permit user to identify the event.
         */
        __u64   sig_data;
 };
index 7e333042c7e3a1e2522f7146c4eb7021fbe7c288..83429a05b698e32fcc9be74827577058a5aec974 100644 (file)
@@ -39,8 +39,6 @@ struct signalfd_siginfo {
        __s32 ssi_syscall;
        __u64 ssi_call_addr;
        __u32 ssi_arch;
-       __u32 __pad3;
-       __u64 ssi_perf;
 
        /*
         * Pad strcture to 128 bytes. Remember to update the
@@ -51,7 +49,7 @@ struct signalfd_siginfo {
         * comes out of a read(2) and we really don't want to have
         * a compat on read(2).
         */
-       __u8 __pad[16];
+       __u8 __pad[28];
 };
 
 
index f0c35ce8628c933991b1731692ef68efee44f680..4fe842c3a3a9847ab102dd73f4b0d1134cc2abdf 100644 (file)
@@ -54,7 +54,7 @@
 #define VIRTIO_ID_SOUND                        25 /* virtio sound */
 #define VIRTIO_ID_FS                   26 /* virtio filesystem */
 #define VIRTIO_ID_PMEM                 27 /* virtio pmem */
-#define VIRTIO_ID_BT                   28 /* virtio bluetooth */
 #define VIRTIO_ID_MAC80211_HWSIM       29 /* virtio mac80211-hwsim */
+#define VIRTIO_ID_BT                   40 /* virtio bluetooth */
 
 #endif /* _LINUX_VIRTIO_IDS_H */
index d3e017b5f0dba52cd767160b34fef7fde7bbf2e6..6d2d34c9f375fb4e3169995d4585f0fec812efb7 100644 (file)
@@ -239,6 +239,39 @@ enum gaudi_engine_id {
        GAUDI_ENGINE_ID_SIZE
 };
 
+/*
+ * ASIC specific PLL index
+ *
+ * Used to retrieve in frequency info of different IPs via
+ * HL_INFO_PLL_FREQUENCY under HL_IOCTL_INFO IOCTL. The enums need to be
+ * used as an index in struct hl_pll_frequency_info
+ */
+
+enum hl_goya_pll_index {
+       HL_GOYA_CPU_PLL = 0,
+       HL_GOYA_IC_PLL,
+       HL_GOYA_MC_PLL,
+       HL_GOYA_MME_PLL,
+       HL_GOYA_PCI_PLL,
+       HL_GOYA_EMMC_PLL,
+       HL_GOYA_TPC_PLL,
+       HL_GOYA_PLL_MAX
+};
+
+enum hl_gaudi_pll_index {
+       HL_GAUDI_CPU_PLL = 0,
+       HL_GAUDI_PCI_PLL,
+       HL_GAUDI_SRAM_PLL,
+       HL_GAUDI_HBM_PLL,
+       HL_GAUDI_NIC_PLL,
+       HL_GAUDI_DMA_PLL,
+       HL_GAUDI_MESH_PLL,
+       HL_GAUDI_MME_PLL,
+       HL_GAUDI_TPC_PLL,
+       HL_GAUDI_IF_PLL,
+       HL_GAUDI_PLL_MAX
+};
+
 enum hl_device_status {
        HL_DEVICE_STATUS_OPERATIONAL,
        HL_DEVICE_STATUS_IN_RESET,
index 2994fe6031a0973bddde9d0d6f4d706661ee611b..33336ab58afcf982701cde528c2600ae4989be6c 100644 (file)
@@ -2,6 +2,19 @@
 #ifndef _ASM_ARM_SWIOTLB_XEN_H
 #define _ASM_ARM_SWIOTLB_XEN_H
 
-extern int xen_swiotlb_detect(void);
+#include <xen/features.h>
+#include <xen/xen.h>
+
+static inline int xen_swiotlb_detect(void)
+{
+       if (!xen_domain())
+               return 0;
+       if (xen_feature(XENFEAT_direct_mapped))
+               return 1;
+       /* legacy case */
+       if (!xen_feature(XENFEAT_not_direct_mapped) && xen_initial_domain())
+               return 1;
+       return 0;
+}
 
 #endif /* _ASM_ARM_SWIOTLB_XEN_H */
index 1ea12c64e4c9f3662bc8b76bd8760295ec735dc8..a61c92066c2e41aaadaed9df78c8a9be35dace32 100644 (file)
@@ -442,6 +442,7 @@ config AUDITSYSCALL
 
 source "kernel/irq/Kconfig"
 source "kernel/time/Kconfig"
+source "kernel/bpf/Kconfig"
 source "kernel/Kconfig.preempt"
 
 menu "CPU/Task time and stats accounting"
@@ -1713,46 +1714,6 @@ config KALLSYMS_BASE_RELATIVE
 
 # syscall, maps, verifier
 
-config BPF_LSM
-       bool "LSM Instrumentation with BPF"
-       depends on BPF_EVENTS
-       depends on BPF_SYSCALL
-       depends on SECURITY
-       depends on BPF_JIT
-       help
-         Enables instrumentation of the security hooks with eBPF programs for
-         implementing dynamic MAC and Audit Policies.
-
-         If you are unsure how to answer this question, answer N.
-
-config BPF_SYSCALL
-       bool "Enable bpf() system call"
-       select BPF
-       select IRQ_WORK
-       select TASKS_TRACE_RCU
-       select BINARY_PRINTF
-       select NET_SOCK_MSG if INET
-       default n
-       help
-         Enable the bpf() system call that allows to manipulate eBPF
-         programs and maps via file descriptors.
-
-config ARCH_WANT_DEFAULT_BPF_JIT
-       bool
-
-config BPF_JIT_ALWAYS_ON
-       bool "Permanently enable BPF JIT and remove BPF interpreter"
-       depends on BPF_SYSCALL && HAVE_EBPF_JIT && BPF_JIT
-       help
-         Enables BPF JIT and removes BPF interpreter to avoid
-         speculative execution of BPF instructions by the interpreter
-
-config BPF_JIT_DEFAULT_ON
-       def_bool ARCH_WANT_DEFAULT_BPF_JIT || BPF_JIT_ALWAYS_ON
-       depends on HAVE_EBPF_JIT && BPF_JIT
-
-source "kernel/bpf/preload/Kconfig"
-
 config USERFAULTFD
        bool "Enable userfaultfd() system call"
        depends on MMU
index eb01e121d2f15ca12f691277d5040eccf5c24476..e9c42a183e33900afc0f05acf55bbc781e12cc7c 100644 (file)
@@ -1537,7 +1537,7 @@ static noinline void __init kernel_init_freeable(void)
         */
        set_mems_allowed(node_states[N_MEMORY]);
 
-       cad_pid = task_pid(current);
+       cad_pid = get_pid(task_pid(current));
 
        smp_prepare_cpus(setup_max_cpus);
 
index 8031464ed4ae24e2611876a520d8232d0c0eacd8..4e4e61111500ca2d0a66d9013f7a57e3f3d5c1aa 100644 (file)
@@ -1004,12 +1004,14 @@ static inline void __pipelined_op(struct wake_q_head *wake_q,
                                  struct mqueue_inode_info *info,
                                  struct ext_wait_queue *this)
 {
+       struct task_struct *task;
+
        list_del(&this->list);
-       get_task_struct(this->task);
+       task = get_task_struct(this->task);
 
        /* see MQ_BARRIER for purpose/pairing */
        smp_store_release(&this->state, STATE_READY);
-       wake_q_add_safe(wake_q, this->task);
+       wake_q_add_safe(wake_q, task);
 }
 
 /* pipelined_send() - send a message directly to the task waiting in
index acd1bc7af55a26b7d680348ef576e0e5ed51a83d..6e6c8e0c9380e346efed7dbe7b88aad215560201 100644 (file)
--- a/ipc/msg.c
+++ b/ipc/msg.c
@@ -251,11 +251,13 @@ static void expunge_all(struct msg_queue *msq, int res,
        struct msg_receiver *msr, *t;
 
        list_for_each_entry_safe(msr, t, &msq->q_receivers, r_list) {
-               get_task_struct(msr->r_tsk);
+               struct task_struct *r_tsk;
+
+               r_tsk = get_task_struct(msr->r_tsk);
 
                /* see MSG_BARRIER for purpose/pairing */
                smp_store_release(&msr->r_msg, ERR_PTR(res));
-               wake_q_add_safe(wake_q, msr->r_tsk);
+               wake_q_add_safe(wake_q, r_tsk);
        }
 }
 
index e0ec239680cbd40f79895c260867438f8863e4d7..bf534c74293e1acf4bfdc9fdc12da007486940af 100644 (file)
--- a/ipc/sem.c
+++ b/ipc/sem.c
@@ -784,12 +784,14 @@ would_block:
 static inline void wake_up_sem_queue_prepare(struct sem_queue *q, int error,
                                             struct wake_q_head *wake_q)
 {
-       get_task_struct(q->sleeper);
+       struct task_struct *sleeper;
+
+       sleeper = get_task_struct(q->sleeper);
 
        /* see SEM_BARRIER_2 for purpose/pairing */
        smp_store_release(&q->status, error);
 
-       wake_q_add_safe(wake_q, q->sleeper);
+       wake_q_add_safe(wake_q, sleeper);
 }
 
 static void unlink_queue(struct sem_array *sma, struct sem_queue *q)
diff --git a/kernel/bpf/Kconfig b/kernel/bpf/Kconfig
new file mode 100644 (file)
index 0000000..bd04f4a
--- /dev/null
@@ -0,0 +1,89 @@
+# SPDX-License-Identifier: GPL-2.0-only
+
+# BPF interpreter that, for example, classic socket filters depend on.
+config BPF
+       bool
+
+# Used by archs to tell that they support BPF JIT compiler plus which
+# flavour. Only one of the two can be selected for a specific arch since
+# eBPF JIT supersedes the cBPF JIT.
+
+# Classic BPF JIT (cBPF)
+config HAVE_CBPF_JIT
+       bool
+
+# Extended BPF JIT (eBPF)
+config HAVE_EBPF_JIT
+       bool
+
+# Used by archs to tell that they want the BPF JIT compiler enabled by
+# default for kernels that were compiled with BPF JIT support.
+config ARCH_WANT_DEFAULT_BPF_JIT
+       bool
+
+menu "BPF subsystem"
+
+config BPF_SYSCALL
+       bool "Enable bpf() system call"
+       select BPF
+       select IRQ_WORK
+       select TASKS_TRACE_RCU
+       select BINARY_PRINTF
+       select NET_SOCK_MSG if INET
+       default n
+       help
+         Enable the bpf() system call that allows to manipulate BPF programs
+         and maps via file descriptors.
+
+config BPF_JIT
+       bool "Enable BPF Just In Time compiler"
+       depends on BPF
+       depends on HAVE_CBPF_JIT || HAVE_EBPF_JIT
+       depends on MODULES
+       help
+         BPF programs are normally handled by a BPF interpreter. This option
+         allows the kernel to generate native code when a program is loaded
+         into the kernel. This will significantly speed-up processing of BPF
+         programs.
+
+         Note, an admin should enable this feature changing:
+         /proc/sys/net/core/bpf_jit_enable
+         /proc/sys/net/core/bpf_jit_harden   (optional)
+         /proc/sys/net/core/bpf_jit_kallsyms (optional)
+
+config BPF_JIT_ALWAYS_ON
+       bool "Permanently enable BPF JIT and remove BPF interpreter"
+       depends on BPF_SYSCALL && HAVE_EBPF_JIT && BPF_JIT
+       help
+         Enables BPF JIT and removes BPF interpreter to avoid speculative
+         execution of BPF instructions by the interpreter.
+
+config BPF_JIT_DEFAULT_ON
+       def_bool ARCH_WANT_DEFAULT_BPF_JIT || BPF_JIT_ALWAYS_ON
+       depends on HAVE_EBPF_JIT && BPF_JIT
+
+config BPF_UNPRIV_DEFAULT_OFF
+       bool "Disable unprivileged BPF by default"
+       depends on BPF_SYSCALL
+       help
+         Disables unprivileged BPF by default by setting the corresponding
+         /proc/sys/kernel/unprivileged_bpf_disabled knob to 2. An admin can
+         still reenable it by setting it to 0 later on, or permanently
+         disable it by setting it to 1 (from which no other transition to
+         0 is possible anymore).
+
+source "kernel/bpf/preload/Kconfig"
+
+config BPF_LSM
+       bool "Enable BPF LSM Instrumentation"
+       depends on BPF_EVENTS
+       depends on BPF_SYSCALL
+       depends on SECURITY
+       depends on BPF_JIT
+       help
+         Enables instrumentation of the security hooks with BPF programs for
+         implementing dynamic MAC and Audit Policies.
+
+         If you are unsure how to answer this question, answer N.
+
+endmenu # "BPF subsystem"
index 5efb2b24012c90af20cf955a6abc951557c35d2f..da471bf01b977fbc5952930c866ed5df8bb05543 100644 (file)
@@ -107,10 +107,12 @@ bpf_lsm_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
                return &bpf_inode_storage_get_proto;
        case BPF_FUNC_inode_storage_delete:
                return &bpf_inode_storage_delete_proto;
+#ifdef CONFIG_NET
        case BPF_FUNC_sk_storage_get:
                return &bpf_sk_storage_get_proto;
        case BPF_FUNC_sk_storage_delete:
                return &bpf_sk_storage_delete_proto;
+#endif /* CONFIG_NET */
        case BPF_FUNC_spin_lock:
                return &bpf_spin_lock_proto;
        case BPF_FUNC_spin_unlock:
index 0600ed325fa0b72160cf5fb3c15890d993235cb3..f982a9f0dbc46135b1ae0feef6eb233daeaf92d9 100644 (file)
@@ -5206,6 +5206,12 @@ int btf_distill_func_proto(struct bpf_verifier_log *log,
        m->ret_size = ret;
 
        for (i = 0; i < nargs; i++) {
+               if (i == nargs - 1 && args[i].type == 0) {
+                       bpf_log(log,
+                               "The function %s with variable args is unsupported.\n",
+                               tname);
+                       return -EINVAL;
+               }
                ret = __get_type_size(btf, args[i].type, &t);
                if (ret < 0) {
                        bpf_log(log,
@@ -5213,6 +5219,12 @@ int btf_distill_func_proto(struct bpf_verifier_log *log,
                                tname, i, btf_kind_str[BTF_INFO_KIND(t->info)]);
                        return -EINVAL;
                }
+               if (ret == 0) {
+                       bpf_log(log,
+                               "The function %s has malformed void argument.\n",
+                               tname);
+                       return -EINVAL;
+               }
                m->arg_size[i] = ret;
        }
        m->nr_args = nargs;
index 544773970dbc6e7eb8dcd494371c30341124c416..a2f1f15ce43216de9b53ea7906aab7ed8791f980 100644 (file)
@@ -14,6 +14,7 @@
 #include <linux/jiffies.h>
 #include <linux/pid_namespace.h>
 #include <linux/proc_ns.h>
+#include <linux/security.h>
 
 #include "../../lib/kstrtox.h"
 
@@ -692,38 +693,41 @@ static int bpf_trace_copy_string(char *buf, void *unsafe_ptr, char fmt_ptype,
        return -EINVAL;
 }
 
-/* Per-cpu temp buffers which can be used by printf-like helpers for %s or %p
+/* Per-cpu temp buffers used by printf-like helpers to store the bprintf binary
+ * arguments representation.
  */
-#define MAX_PRINTF_BUF_LEN     512
+#define MAX_BPRINTF_BUF_LEN    512
 
-struct bpf_printf_buf {
-       char tmp_buf[MAX_PRINTF_BUF_LEN];
+/* Support executing three nested bprintf helper calls on a given CPU */
+#define MAX_BPRINTF_NEST_LEVEL 3
+struct bpf_bprintf_buffers {
+       char tmp_bufs[MAX_BPRINTF_NEST_LEVEL][MAX_BPRINTF_BUF_LEN];
 };
-static DEFINE_PER_CPU(struct bpf_printf_buf, bpf_printf_buf);
-static DEFINE_PER_CPU(int, bpf_printf_buf_used);
+static DEFINE_PER_CPU(struct bpf_bprintf_buffers, bpf_bprintf_bufs);
+static DEFINE_PER_CPU(int, bpf_bprintf_nest_level);
 
 static int try_get_fmt_tmp_buf(char **tmp_buf)
 {
-       struct bpf_printf_buf *bufs;
-       int used;
+       struct bpf_bprintf_buffers *bufs;
+       int nest_level;
 
        preempt_disable();
-       used = this_cpu_inc_return(bpf_printf_buf_used);
-       if (WARN_ON_ONCE(used > 1)) {
-               this_cpu_dec(bpf_printf_buf_used);
+       nest_level = this_cpu_inc_return(bpf_bprintf_nest_level);
+       if (WARN_ON_ONCE(nest_level > MAX_BPRINTF_NEST_LEVEL)) {
+               this_cpu_dec(bpf_bprintf_nest_level);
                preempt_enable();
                return -EBUSY;
        }
-       bufs = this_cpu_ptr(&bpf_printf_buf);
-       *tmp_buf = bufs->tmp_buf;
+       bufs = this_cpu_ptr(&bpf_bprintf_bufs);
+       *tmp_buf = bufs->tmp_bufs[nest_level - 1];
 
        return 0;
 }
 
 void bpf_bprintf_cleanup(void)
 {
-       if (this_cpu_read(bpf_printf_buf_used)) {
-               this_cpu_dec(bpf_printf_buf_used);
+       if (this_cpu_read(bpf_bprintf_nest_level)) {
+               this_cpu_dec(bpf_bprintf_nest_level);
                preempt_enable();
        }
 }
@@ -760,7 +764,7 @@ int bpf_bprintf_prepare(char *fmt, u32 fmt_size, const u64 *raw_args,
                if (num_args && try_get_fmt_tmp_buf(&tmp_buf))
                        return -EBUSY;
 
-               tmp_buf_end = tmp_buf + MAX_PRINTF_BUF_LEN;
+               tmp_buf_end = tmp_buf + MAX_BPRINTF_BUF_LEN;
                *bin_args = (u32 *)tmp_buf;
        }
 
@@ -1066,11 +1070,13 @@ bpf_base_func_proto(enum bpf_func_id func_id)
        case BPF_FUNC_probe_read_user:
                return &bpf_probe_read_user_proto;
        case BPF_FUNC_probe_read_kernel:
-               return &bpf_probe_read_kernel_proto;
+               return security_locked_down(LOCKDOWN_BPF_READ) < 0 ?
+                      NULL : &bpf_probe_read_kernel_proto;
        case BPF_FUNC_probe_read_user_str:
                return &bpf_probe_read_user_str_proto;
        case BPF_FUNC_probe_read_kernel_str:
-               return &bpf_probe_read_kernel_str_proto;
+               return security_locked_down(LOCKDOWN_BPF_READ) < 0 ?
+                      NULL : &bpf_probe_read_kernel_str_proto;
        case BPF_FUNC_snprintf_btf:
                return &bpf_snprintf_btf_proto;
        case BPF_FUNC_snprintf:
index f25b719ac786811c8d1f06b849e42422ccfba70e..84b3b35fc0d05067e9bd3b53d2711404a8fe4eb1 100644 (file)
@@ -221,25 +221,20 @@ static int ringbuf_map_get_next_key(struct bpf_map *map, void *key,
        return -ENOTSUPP;
 }
 
-static size_t bpf_ringbuf_mmap_page_cnt(const struct bpf_ringbuf *rb)
-{
-       size_t data_pages = (rb->mask + 1) >> PAGE_SHIFT;
-
-       /* consumer page + producer page + 2 x data pages */
-       return RINGBUF_POS_PAGES + 2 * data_pages;
-}
-
 static int ringbuf_map_mmap(struct bpf_map *map, struct vm_area_struct *vma)
 {
        struct bpf_ringbuf_map *rb_map;
-       size_t mmap_sz;
 
        rb_map = container_of(map, struct bpf_ringbuf_map, map);
-       mmap_sz = bpf_ringbuf_mmap_page_cnt(rb_map->rb) << PAGE_SHIFT;
-
-       if (vma->vm_pgoff * PAGE_SIZE + (vma->vm_end - vma->vm_start) > mmap_sz)
-               return -EINVAL;
 
+       if (vma->vm_flags & VM_WRITE) {
+               /* allow writable mapping for the consumer_pos only */
+               if (vma->vm_pgoff != 0 || vma->vm_end - vma->vm_start != PAGE_SIZE)
+                       return -EPERM;
+       } else {
+               vma->vm_flags &= ~VM_MAYWRITE;
+       }
+       /* remap_vmalloc_range() checks size and offset constraints */
        return remap_vmalloc_range(vma, rb_map->rb,
                                   vma->vm_pgoff + RINGBUF_PGOFF);
 }
@@ -315,6 +310,9 @@ static void *__bpf_ringbuf_reserve(struct bpf_ringbuf *rb, u64 size)
                return NULL;
 
        len = round_up(size + BPF_RINGBUF_HDR_SZ, 8);
+       if (len > rb->mask + 1)
+               return NULL;
+
        cons_pos = smp_load_acquire(&rb->consumer_pos);
 
        if (in_nmi()) {
index 941ca06d9dfa1714439f6d13ddfbc1ea607b4d2a..ea04b0deb5ce4467c5ef94b3928b8e5da7c640b0 100644 (file)
@@ -50,7 +50,8 @@ static DEFINE_SPINLOCK(map_idr_lock);
 static DEFINE_IDR(link_idr);
 static DEFINE_SPINLOCK(link_idr_lock);
 
-int sysctl_unprivileged_bpf_disabled __read_mostly;
+int sysctl_unprivileged_bpf_disabled __read_mostly =
+       IS_BUILTIN(CONFIG_BPF_UNPRIV_DEFAULT_OFF) ? 2 : 0;
 
 static const struct bpf_map_ops * const bpf_map_types[] = {
 #define BPF_PROG_TYPE(_id, _name, prog_ctx_type, kern_ctx_type)
index 757476c91c98414e83e926a9b14fe874bad284a7..94ba5163d4c540b8a5f19b92e60835831432e13a 100644 (file)
@@ -6409,18 +6409,10 @@ enum {
 };
 
 static int retrieve_ptr_limit(const struct bpf_reg_state *ptr_reg,
-                             const struct bpf_reg_state *off_reg,
-                             u32 *alu_limit, u8 opcode)
+                             u32 *alu_limit, bool mask_to_left)
 {
-       bool off_is_neg = off_reg->smin_value < 0;
-       bool mask_to_left = (opcode == BPF_ADD &&  off_is_neg) ||
-                           (opcode == BPF_SUB && !off_is_neg);
        u32 max = 0, ptr_limit = 0;
 
-       if (!tnum_is_const(off_reg->var_off) &&
-           (off_reg->smin_value < 0) != (off_reg->smax_value < 0))
-               return REASON_BOUNDS;
-
        switch (ptr_reg->type) {
        case PTR_TO_STACK:
                /* Offset 0 is out-of-bounds, but acceptable start for the
@@ -6486,15 +6478,20 @@ static bool sanitize_needed(u8 opcode)
        return opcode == BPF_ADD || opcode == BPF_SUB;
 }
 
+struct bpf_sanitize_info {
+       struct bpf_insn_aux_data aux;
+       bool mask_to_left;
+};
+
 static int sanitize_ptr_alu(struct bpf_verifier_env *env,
                            struct bpf_insn *insn,
                            const struct bpf_reg_state *ptr_reg,
                            const struct bpf_reg_state *off_reg,
                            struct bpf_reg_state *dst_reg,
-                           struct bpf_insn_aux_data *tmp_aux,
+                           struct bpf_sanitize_info *info,
                            const bool commit_window)
 {
-       struct bpf_insn_aux_data *aux = commit_window ? cur_aux(env) : tmp_aux;
+       struct bpf_insn_aux_data *aux = commit_window ? cur_aux(env) : &info->aux;
        struct bpf_verifier_state *vstate = env->cur_state;
        bool off_is_imm = tnum_is_const(off_reg->var_off);
        bool off_is_neg = off_reg->smin_value < 0;
@@ -6515,7 +6512,16 @@ static int sanitize_ptr_alu(struct bpf_verifier_env *env,
        if (vstate->speculative)
                goto do_sim;
 
-       err = retrieve_ptr_limit(ptr_reg, off_reg, &alu_limit, opcode);
+       if (!commit_window) {
+               if (!tnum_is_const(off_reg->var_off) &&
+                   (off_reg->smin_value < 0) != (off_reg->smax_value < 0))
+                       return REASON_BOUNDS;
+
+               info->mask_to_left = (opcode == BPF_ADD &&  off_is_neg) ||
+                                    (opcode == BPF_SUB && !off_is_neg);
+       }
+
+       err = retrieve_ptr_limit(ptr_reg, &alu_limit, info->mask_to_left);
        if (err < 0)
                return err;
 
@@ -6523,8 +6529,8 @@ static int sanitize_ptr_alu(struct bpf_verifier_env *env,
                /* In commit phase we narrow the masking window based on
                 * the observed pointer move after the simulated operation.
                 */
-               alu_state = tmp_aux->alu_state;
-               alu_limit = abs(tmp_aux->alu_limit - alu_limit);
+               alu_state = info->aux.alu_state;
+               alu_limit = abs(info->aux.alu_limit - alu_limit);
        } else {
                alu_state  = off_is_neg ? BPF_ALU_NEG_VALUE : 0;
                alu_state |= off_is_imm ? BPF_ALU_IMMEDIATE : 0;
@@ -6539,8 +6545,12 @@ do_sim:
        /* If we're in commit phase, we're done here given we already
         * pushed the truncated dst_reg into the speculative verification
         * stack.
+        *
+        * Also, when register is a known constant, we rewrite register-based
+        * operation to immediate-based, and thus do not need masking (and as
+        * a consequence, do not need to simulate the zero-truncation either).
         */
-       if (commit_window)
+       if (commit_window || off_is_imm)
                return 0;
 
        /* Simulate and find potential out-of-bounds access under
@@ -6685,7 +6695,7 @@ static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env,
            smin_ptr = ptr_reg->smin_value, smax_ptr = ptr_reg->smax_value;
        u64 umin_val = off_reg->umin_value, umax_val = off_reg->umax_value,
            umin_ptr = ptr_reg->umin_value, umax_ptr = ptr_reg->umax_value;
-       struct bpf_insn_aux_data tmp_aux = {};
+       struct bpf_sanitize_info info = {};
        u8 opcode = BPF_OP(insn->code);
        u32 dst = insn->dst_reg;
        int ret;
@@ -6754,7 +6764,7 @@ static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env,
 
        if (sanitize_needed(opcode)) {
                ret = sanitize_ptr_alu(env, insn, ptr_reg, off_reg, dst_reg,
-                                      &tmp_aux, false);
+                                      &info, false);
                if (ret < 0)
                        return sanitize_err(env, insn, ret, off_reg, dst_reg);
        }
@@ -6895,7 +6905,7 @@ static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env,
                return -EACCES;
        if (sanitize_needed(opcode)) {
                ret = sanitize_ptr_alu(env, insn, dst_reg, off_reg, dst_reg,
-                                      &tmp_aux, true);
+                                      &info, true);
                if (ret < 0)
                        return sanitize_err(env, insn, ret, off_reg, dst_reg);
        }
@@ -7084,11 +7094,10 @@ static void scalar32_min_max_and(struct bpf_reg_state *dst_reg,
        s32 smin_val = src_reg->s32_min_value;
        u32 umax_val = src_reg->u32_max_value;
 
-       /* Assuming scalar64_min_max_and will be called so its safe
-        * to skip updating register for known 32-bit case.
-        */
-       if (src_known && dst_known)
+       if (src_known && dst_known) {
+               __mark_reg32_known(dst_reg, var32_off.value);
                return;
+       }
 
        /* We get our minimum from the var_off, since that's inherently
         * bitwise.  Our maximum is the minimum of the operands' maxima.
@@ -7108,7 +7117,6 @@ static void scalar32_min_max_and(struct bpf_reg_state *dst_reg,
                dst_reg->s32_min_value = dst_reg->u32_min_value;
                dst_reg->s32_max_value = dst_reg->u32_max_value;
        }
-
 }
 
 static void scalar_min_max_and(struct bpf_reg_state *dst_reg,
@@ -7155,11 +7163,10 @@ static void scalar32_min_max_or(struct bpf_reg_state *dst_reg,
        s32 smin_val = src_reg->s32_min_value;
        u32 umin_val = src_reg->u32_min_value;
 
-       /* Assuming scalar64_min_max_or will be called so it is safe
-        * to skip updating register for known case.
-        */
-       if (src_known && dst_known)
+       if (src_known && dst_known) {
+               __mark_reg32_known(dst_reg, var32_off.value);
                return;
+       }
 
        /* We get our maximum from the var_off, and our minimum is the
         * maximum of the operands' minima
@@ -7224,11 +7231,10 @@ static void scalar32_min_max_xor(struct bpf_reg_state *dst_reg,
        struct tnum var32_off = tnum_subreg(dst_reg->var_off);
        s32 smin_val = src_reg->s32_min_value;
 
-       /* Assuming scalar64_min_max_xor will be called so it is safe
-        * to skip updating register for known case.
-        */
-       if (src_known && dst_known)
+       if (src_known && dst_known) {
+               __mark_reg32_known(dst_reg, var32_off.value);
                return;
+       }
 
        /* We get both minimum and maximum from the var32_off. */
        dst_reg->u32_min_value = var32_off.value;
@@ -13200,6 +13206,17 @@ int bpf_check_attach_target(struct bpf_verifier_log *log,
        return 0;
 }
 
+BTF_SET_START(btf_id_deny)
+BTF_ID_UNUSED
+#ifdef CONFIG_SMP
+BTF_ID(func, migrate_disable)
+BTF_ID(func, migrate_enable)
+#endif
+#if !defined CONFIG_PREEMPT_RCU && !defined CONFIG_TINY_RCU
+BTF_ID(func, rcu_read_unlock_strict)
+#endif
+BTF_SET_END(btf_id_deny)
+
 static int check_attach_btf_id(struct bpf_verifier_env *env)
 {
        struct bpf_prog *prog = env->prog;
@@ -13259,6 +13276,9 @@ static int check_attach_btf_id(struct bpf_verifier_env *env)
                ret = bpf_lsm_verify_prog(&env->log, prog);
                if (ret < 0)
                        return ret;
+       } else if (prog->type == BPF_PROG_TYPE_TRACING &&
+                  btf_id_set_contains(&btf_id_deny, btf_id)) {
+               return -EINVAL;
        }
 
        key = bpf_trampoline_compute_key(tgt_prog, prog->aux->attach_btf, btf_id);
@@ -13358,12 +13378,6 @@ int bpf_check(struct bpf_prog **prog, union bpf_attr *attr,
        if (is_priv)
                env->test_state_freq = attr->prog_flags & BPF_F_TEST_STATE_FREQ;
 
-       if (bpf_prog_is_dev_bound(env->prog->aux)) {
-               ret = bpf_prog_offload_verifier_prep(env->prog);
-               if (ret)
-                       goto skip_full_check;
-       }
-
        env->explored_states = kvcalloc(state_htab_size(env),
                                       sizeof(struct bpf_verifier_state_list *),
                                       GFP_USER);
@@ -13391,6 +13405,12 @@ int bpf_check(struct bpf_prog **prog, union bpf_attr *attr,
        if (ret < 0)
                goto skip_full_check;
 
+       if (bpf_prog_is_dev_bound(env->prog->aux)) {
+               ret = bpf_prog_offload_verifier_prep(env->prog);
+               if (ret)
+                       goto skip_full_check;
+       }
+
        ret = check_cfg(env);
        if (ret < 0)
                goto skip_full_check;
index 391aa570369bba0d4726d6822eab5da2c0b5dd11..1f274d7fc934e0846da80125288bf9fa2b1510a9 100644 (file)
@@ -820,6 +820,10 @@ static int cgroup1_rename(struct kernfs_node *kn, struct kernfs_node *new_parent
        struct cgroup *cgrp = kn->priv;
        int ret;
 
+       /* do not accept '\n' to prevent making /proc/<pid>/cgroup unparsable */
+       if (strchr(new_name_str, '\n'))
+               return -EINVAL;
+
        if (kernfs_type(kn) != KERNFS_DIR)
                return -ENOTDIR;
        if (kn->parent != new_parent)
@@ -1001,7 +1005,7 @@ static int check_cgroupfs_options(struct fs_context *fc)
        ctx->subsys_mask &= enabled;
 
        /*
-        * In absense of 'none', 'name=' or subsystem name options,
+        * In absence of 'none', 'name=' and subsystem name options,
         * let's default to 'all'.
         */
        if (!ctx->subsys_mask && !ctx->none && !ctx->name)
index e049edd66776096a0654c71c7f3c74217a9b3508..21ecc6ee6a6d3d3d97d551c830cf771c92cd782e 100644 (file)
@@ -468,7 +468,7 @@ static struct cgroup_subsys_state *cgroup_css(struct cgroup *cgrp,
  * @cgrp: the cgroup of interest
  * @ss: the subsystem of interest
  *
- * Find and get @cgrp's css assocaited with @ss.  If the css doesn't exist
+ * Find and get @cgrp's css associated with @ss.  If the css doesn't exist
  * or is offline, %NULL is returned.
  */
 static struct cgroup_subsys_state *cgroup_tryget_css(struct cgroup *cgrp,
@@ -1633,7 +1633,7 @@ static void cgroup_rm_file(struct cgroup *cgrp, const struct cftype *cft)
 
 /**
  * css_clear_dir - remove subsys files in a cgroup directory
- * @css: taget css
+ * @css: target css
  */
 static void css_clear_dir(struct cgroup_subsys_state *css)
 {
@@ -5350,7 +5350,7 @@ out_unlock:
 /*
  * This is called when the refcnt of a css is confirmed to be killed.
  * css_tryget_online() is now guaranteed to fail.  Tell the subsystem to
- * initate destruction and put the css ref from kill_css().
+ * initiate destruction and put the css ref from kill_css().
  */
 static void css_killed_work_fn(struct work_struct *work)
 {
@@ -5634,8 +5634,6 @@ int __init cgroup_init_early(void)
        return 0;
 }
 
-static u16 cgroup_disable_mask __initdata;
-
 /**
  * cgroup_init - cgroup initialization
  *
@@ -5694,12 +5692,8 @@ int __init cgroup_init(void)
                 * disabled flag and cftype registration needs kmalloc,
                 * both of which aren't available during early_init.
                 */
-               if (cgroup_disable_mask & (1 << ssid)) {
-                       static_branch_disable(cgroup_subsys_enabled_key[ssid]);
-                       printk(KERN_INFO "Disabling %s control group subsystem\n",
-                              ss->name);
+               if (!cgroup_ssid_enabled(ssid))
                        continue;
-               }
 
                if (cgroup1_ssid_disabled(ssid))
                        printk(KERN_INFO "Disabling %s control group subsystem in v1 mounts\n",
@@ -6058,7 +6052,7 @@ out_revert:
  * @kargs: the arguments passed to create the child process
  *
  * This calls the cancel_fork() callbacks if a fork failed *after*
- * cgroup_can_fork() succeded and cleans up references we took to
+ * cgroup_can_fork() succeeded and cleans up references we took to
  * prepare a new css_set for the child process in cgroup_can_fork().
  */
 void cgroup_cancel_fork(struct task_struct *child,
@@ -6214,7 +6208,10 @@ static int __init cgroup_disable(char *str)
                        if (strcmp(token, ss->name) &&
                            strcmp(token, ss->legacy_name))
                                continue;
-                       cgroup_disable_mask |= 1 << i;
+
+                       static_branch_disable(cgroup_subsys_enabled_key[i]);
+                       pr_info("Disabling %s control group subsystem\n",
+                               ss->name);
                }
        }
        return 1;
index a945504c0ae7f038ec76ab34f87b5a6e6874191f..adb5190c44296b854e8bcb28d29da990031d32c5 100644 (file)
@@ -3376,7 +3376,7 @@ nodemask_t cpuset_mems_allowed(struct task_struct *tsk)
 }
 
 /**
- * cpuset_nodemask_valid_mems_allowed - check nodemask vs. curremt mems_allowed
+ * cpuset_nodemask_valid_mems_allowed - check nodemask vs. current mems_allowed
  * @nodemask: the nodemask to be checked
  *
  * Are any of the nodes in the nodemask allowed in current->mems_allowed?
index ae042c347c640adc09191dfd5db18ce329edce70..3135406608c75db085e7d7bfcfb2ea1109156f55 100644 (file)
@@ -244,7 +244,7 @@ EXPORT_SYMBOL(rdmacg_uncharge);
  * This function follows charging resource in hierarchical way.
  * It will fail if the charge would cause the new value to exceed the
  * hierarchical limit.
- * Returns 0 if the charge succeded, otherwise -EAGAIN, -ENOMEM or -EINVAL.
+ * Returns 0 if the charge succeeded, otherwise -EAGAIN, -ENOMEM or -EINVAL.
  * Returns pointer to rdmacg for this resource when charging is successful.
  *
  * Charger needs to account resources on two criteria.
index 3a3fd2993a6500d79b8c206656222b15296f9a28..cee265cb535cc8178149319a0339d11454c28614 100644 (file)
@@ -75,7 +75,7 @@ void cgroup_rstat_updated(struct cgroup *cgrp, int cpu)
  * @root: root of the tree to traversal
  * @cpu: target cpu
  *
- * Walks the udpated rstat_cpu tree on @cpu from @root.  %NULL @pos starts
+ * Walks the updated rstat_cpu tree on @cpu from @root.  %NULL @pos starts
  * the traversal and %NULL return indicates the end.  During traversal,
  * each returned cgroup is unlinked from the tree.  Must be called with the
  * matching cgroup_rstat_cpu_lock held.
index a0b3b04fb5965995ea991f8aa6e375c62e8d9c2d..bf16395b9e1351f843edce4335df8bddb5a49686 100644 (file)
@@ -5,6 +5,7 @@
 #include <linux/highmem.h>
 #include <linux/livepatch.h>
 #include <linux/audit.h>
+#include <linux/tick.h>
 
 #include "common.h"
 
@@ -186,7 +187,7 @@ static unsigned long exit_to_user_mode_loop(struct pt_regs *regs,
                local_irq_disable_exit_to_user();
 
                /* Check if any of the above work has queued a deferred wakeup */
-               rcu_nocb_flush_deferred_wakeup();
+               tick_nohz_user_enter_prepare();
 
                ti_work = READ_ONCE(current_thread_info()->flags);
        }
@@ -202,7 +203,7 @@ static void exit_to_user_mode_prepare(struct pt_regs *regs)
        lockdep_assert_irqs_disabled();
 
        /* Flush pending rcuog wakeup before the last need_resched() check */
-       rcu_nocb_flush_deferred_wakeup();
+       tick_nohz_user_enter_prepare();
 
        if (unlikely(ti_work & EXIT_TO_USER_MODE_WORK))
                ti_work = exit_to_user_mode_loop(regs, ti_work);
index 2e947a485898344772980a740016dc071fbb030d..fe88d6eea3c2c892190625a097baa4ad7e732ee9 100644 (file)
@@ -4609,7 +4609,9 @@ find_get_context(struct pmu *pmu, struct task_struct *task,
                cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu);
                ctx = &cpuctx->ctx;
                get_ctx(ctx);
+               raw_spin_lock_irqsave(&ctx->lock, flags);
                ++ctx->pin_count;
+               raw_spin_unlock_irqrestore(&ctx->lock, flags);
 
                return ctx;
        }
@@ -6389,8 +6391,6 @@ void perf_event_wakeup(struct perf_event *event)
 
 static void perf_sigtrap(struct perf_event *event)
 {
-       struct kernel_siginfo info;
-
        /*
         * We'd expect this to only occur if the irq_work is delayed and either
         * ctx->task or current has changed in the meantime. This can be the
@@ -6405,13 +6405,8 @@ static void perf_sigtrap(struct perf_event *event)
        if (current->flags & PF_EXITING)
                return;
 
-       clear_siginfo(&info);
-       info.si_signo = SIGTRAP;
-       info.si_code = TRAP_PERF;
-       info.si_errno = event->attr.type;
-       info.si_perf = event->attr.sig_data;
-       info.si_addr = (void __user *)event->pending_addr;
-       force_sig_info(&info);
+       force_sig_perf((void __user *)event->pending_addr,
+                      event->attr.type, event->attr.sig_data);
 }
 
 static void perf_pending_event_disable(struct perf_event *event)
index 23a7a0ba1388a2aae1bbd253437bfaecb3ddd3d0..db8c248ebc8c81a9596bbc46ecd2583f85f62e24 100644 (file)
@@ -70,9 +70,6 @@ bool irq_work_queue(struct irq_work *work)
        if (!irq_work_claim(work))
                return false;
 
-       /*record irq_work call stack in order to print it in KASAN reports*/
-       kasan_record_aux_stack(work);
-
        /* Queue the entry and raise the IPI if needed. */
        preempt_disable();
        __irq_work_queue_local(work);
index c1dd02f3be8b8c5e22c7bb7de3588d9f32e6096a..e65de172ccf7cf81734db28d193e0ab4154bfb58 100644 (file)
@@ -266,9 +266,10 @@ static const struct file_operations debugfs_ops =
        .release = single_release
 };
 
-static void __init kcsan_debugfs_init(void)
+static int __init kcsan_debugfs_init(void)
 {
        debugfs_create_file("kcsan", 0644, NULL, NULL, &debugfs_ops);
+       return 0;
 }
 
 late_initcall(kcsan_debugfs_init);
index 48d736aa03b24bf377ece9fbe06199eb27adab44..7641bd4072390de193fdb28489a3dad76c132586 100644 (file)
@@ -5736,7 +5736,7 @@ void lock_contended(struct lockdep_map *lock, unsigned long ip)
 {
        unsigned long flags;
 
-       trace_lock_acquired(lock, ip);
+       trace_lock_contended(lock, ip);
 
        if (unlikely(!lock_stat || !lockdep_enabled()))
                return;
@@ -5754,7 +5754,7 @@ void lock_acquired(struct lockdep_map *lock, unsigned long ip)
 {
        unsigned long flags;
 
-       trace_lock_contended(lock, ip);
+       trace_lock_acquired(lock, ip);
 
        if (unlikely(!lock_stat || !lockdep_enabled()))
                return;
index a7276aaf2abc0b75ee5cf15738b5a2d7e0e46f5c..db9301591e3fcca7bfba67c11942a07ae2cf3de6 100644 (file)
@@ -57,7 +57,7 @@ void debug_mutex_add_waiter(struct mutex *lock, struct mutex_waiter *waiter,
        task->blocked_on = waiter;
 }
 
-void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
+void debug_mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
                         struct task_struct *task)
 {
        DEBUG_LOCKS_WARN_ON(list_empty(&waiter->list));
@@ -65,7 +65,7 @@ void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
        DEBUG_LOCKS_WARN_ON(task->blocked_on != waiter);
        task->blocked_on = NULL;
 
-       list_del_init(&waiter->list);
+       INIT_LIST_HEAD(&waiter->list);
        waiter->task = NULL;
 }
 
index 1edd3f45a4ecbad7979f26944a9173b3de028959..53e631e1d76da7858839a9aa92d47bff9603c9c2 100644 (file)
@@ -22,7 +22,7 @@ extern void debug_mutex_free_waiter(struct mutex_waiter *waiter);
 extern void debug_mutex_add_waiter(struct mutex *lock,
                                   struct mutex_waiter *waiter,
                                   struct task_struct *task);
-extern void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
+extern void debug_mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
                                struct task_struct *task);
 extern void debug_mutex_unlock(struct mutex *lock);
 extern void debug_mutex_init(struct mutex *lock, const char *name,
index cb6b112ce1550d9bcf17b53eb770279f1c040a5d..013e1b08a1bfb3438ec7424f26321dfd007e8742 100644 (file)
@@ -194,7 +194,7 @@ static inline bool __mutex_waiter_is_first(struct mutex *lock, struct mutex_wait
  * Add @waiter to a given location in the lock wait_list and set the
  * FLAG_WAITERS flag if it's the first waiter.
  */
-static void __sched
+static void
 __mutex_add_waiter(struct mutex *lock, struct mutex_waiter *waiter,
                   struct list_head *list)
 {
@@ -205,6 +205,16 @@ __mutex_add_waiter(struct mutex *lock, struct mutex_waiter *waiter,
                __mutex_set_flag(lock, MUTEX_FLAG_WAITERS);
 }
 
+static void
+__mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter)
+{
+       list_del(&waiter->list);
+       if (likely(list_empty(&lock->wait_list)))
+               __mutex_clear_flag(lock, MUTEX_FLAGS);
+
+       debug_mutex_remove_waiter(lock, waiter, current);
+}
+
 /*
  * Give up ownership to a specific task, when @task = NULL, this is equivalent
  * to a regular unlock. Sets PICKUP on a handoff, clears HANDOFF, preserves
@@ -1061,9 +1071,7 @@ acquired:
                        __ww_mutex_check_waiters(lock, ww_ctx);
        }
 
-       mutex_remove_waiter(lock, &waiter, current);
-       if (likely(list_empty(&lock->wait_list)))
-               __mutex_clear_flag(lock, MUTEX_FLAGS);
+       __mutex_remove_waiter(lock, &waiter);
 
        debug_mutex_free_waiter(&waiter);
 
@@ -1080,7 +1088,7 @@ skip_wait:
 
 err:
        __set_current_state(TASK_RUNNING);
-       mutex_remove_waiter(lock, &waiter, current);
+       __mutex_remove_waiter(lock, &waiter);
 err_early_kill:
        spin_unlock(&lock->wait_lock);
        debug_mutex_free_waiter(&waiter);
index 1c2287d3fa7191589c6d8f0d5f4e67c3282f8dfe..f0c710b1d1927131c2526dcfc7b96010ecaa45af 100644 (file)
  * !CONFIG_DEBUG_MUTEXES case. Most of them are NOPs:
  */
 
-#define mutex_remove_waiter(lock, waiter, task) \
-               __list_del((waiter)->list.prev, (waiter)->list.next)
-
 #define debug_mutex_wake_waiter(lock, waiter)          do { } while (0)
 #define debug_mutex_free_waiter(waiter)                        do { } while (0)
 #define debug_mutex_add_waiter(lock, waiter, ti)       do { } while (0)
+#define debug_mutex_remove_waiter(lock, waiter, ti)     do { } while (0)
 #define debug_mutex_unlock(lock)                       do { } while (0)
 #define debug_mutex_init(lock, name, key)              do { } while (0)
 
index b5dd92e35b02a78b44532cbdcf69292350dca456..7e78dfabca97fc652b1e2f7fd4d4e6168ae3a149 100644 (file)
@@ -2401,6 +2401,15 @@ static long get_offset(struct module *mod, unsigned int *size,
        return ret;
 }
 
+static bool module_init_layout_section(const char *sname)
+{
+#ifndef CONFIG_MODULE_UNLOAD
+       if (module_exit_section(sname))
+               return true;
+#endif
+       return module_init_section(sname);
+}
+
 /*
  * Lay out the SHF_ALLOC sections in a way not dissimilar to how ld
  * might -- code, read-only data, read-write data, small data.  Tally
@@ -2435,7 +2444,7 @@ static void layout_sections(struct module *mod, struct load_info *info)
                        if ((s->sh_flags & masks[m][0]) != masks[m][0]
                            || (s->sh_flags & masks[m][1])
                            || s->sh_entsize != ~0UL
-                           || module_init_section(sname))
+                           || module_init_layout_section(sname))
                                continue;
                        s->sh_entsize = get_offset(mod, &mod->core_layout.size, s, i);
                        pr_debug("\t%s\n", sname);
@@ -2468,7 +2477,7 @@ static void layout_sections(struct module *mod, struct load_info *info)
                        if ((s->sh_flags & masks[m][0]) != masks[m][0]
                            || (s->sh_flags & masks[m][1])
                            || s->sh_entsize != ~0UL
-                           || !module_init_section(sname))
+                           || !module_init_layout_section(sname))
                                continue;
                        s->sh_entsize = (get_offset(mod, &mod->init_layout.size, s, i)
                                         | INIT_OFFSET_MASK);
@@ -2807,11 +2816,7 @@ void * __weak module_alloc(unsigned long size)
 
 bool __weak module_init_section(const char *name)
 {
-#ifndef CONFIG_MODULE_UNLOAD
-       return strstarts(name, ".init") || module_exit_section(name);
-#else
        return strstarts(name, ".init");
-#endif
 }
 
 bool __weak module_exit_section(const char *name)
index 76f09456ec4bc129681a4635c276c2b8ee8c3395..2997ca600d1864379ff74f3860e4cb9d0818643e 100644 (file)
@@ -170,6 +170,21 @@ void __ptrace_unlink(struct task_struct *child)
        spin_unlock(&child->sighand->siglock);
 }
 
+static bool looks_like_a_spurious_pid(struct task_struct *task)
+{
+       if (task->exit_code != ((PTRACE_EVENT_EXEC << 8) | SIGTRAP))
+               return false;
+
+       if (task_pid_vnr(task) == task->ptrace_message)
+               return false;
+       /*
+        * The tracee changed its pid but the PTRACE_EVENT_EXEC event
+        * was not wait()'ed, most probably debugger targets the old
+        * leader which was destroyed in de_thread().
+        */
+       return true;
+}
+
 /* Ensure that nothing can wake it up, even SIGKILL */
 static bool ptrace_freeze_traced(struct task_struct *task)
 {
@@ -180,7 +195,8 @@ static bool ptrace_freeze_traced(struct task_struct *task)
                return ret;
 
        spin_lock_irq(&task->sighand->siglock);
-       if (task_is_traced(task) && !__fatal_signal_pending(task)) {
+       if (task_is_traced(task) && !looks_like_a_spurious_pid(task) &&
+           !__fatal_signal_pending(task)) {
                task->state = __TASK_TRACED;
                ret = true;
        }
index 028a5ab18818fd01c5c8313fe89168ed18b4bdbf..ca9f5198a01ff9b265f7197c03a0dd88d0b685d7 100644 (file)
@@ -1805,7 +1805,7 @@ static struct resource *__request_free_mem_region(struct device *dev,
                                REGION_DISJOINT)
                        continue;
 
-               if (!__request_region_locked(res, &iomem_resource, addr, size,
+               if (__request_region_locked(res, &iomem_resource, addr, size,
                                                name, 0))
                        break;
 
index 9c882f20803e06d5314682800c67f1e110772694..c5aacbd492a19df0c00179849affd57df5e7bacc 100644 (file)
@@ -885,6 +885,7 @@ static const struct seq_operations sched_debug_sops = {
 #define __PS(S, F) SEQ_printf(m, "%-45s:%21Ld\n", S, (long long)(F))
 #define __P(F) __PS(#F, F)
 #define   P(F) __PS(#F, p->F)
+#define   PM(F, M) __PS(#F, p->F & (M))
 #define __PSN(S, F) SEQ_printf(m, "%-45s:%14Ld.%06ld\n", S, SPLIT_NS((long long)(F)))
 #define __PN(F) __PSN(#F, F)
 #define   PN(F) __PSN(#F, p->F)
@@ -1011,7 +1012,7 @@ void proc_sched_show_task(struct task_struct *p, struct pid_namespace *ns,
        P(se.avg.util_avg);
        P(se.avg.last_update_time);
        P(se.avg.util_est.ewma);
-       P(se.avg.util_est.enqueued);
+       PM(se.avg.util_est.enqueued, ~UTIL_AVG_UNCHANGED);
 #endif
 #ifdef CONFIG_UCLAMP_TASK
        __PS("uclamp.min", p->uclamp_req[UCLAMP_MIN].value);
index 20aa234ffe04c02c9746a87b19078302e759c3a3..2c8a9352590d950818eaf2bcf0c706968fe19658 100644 (file)
@@ -3499,10 +3499,9 @@ update_tg_cfs_runnable(struct cfs_rq *cfs_rq, struct sched_entity *se, struct cf
 static inline void
 update_tg_cfs_load(struct cfs_rq *cfs_rq, struct sched_entity *se, struct cfs_rq *gcfs_rq)
 {
-       long delta_avg, running_sum, runnable_sum = gcfs_rq->prop_runnable_sum;
+       long delta, running_sum, runnable_sum = gcfs_rq->prop_runnable_sum;
        unsigned long load_avg;
        u64 load_sum = 0;
-       s64 delta_sum;
        u32 divider;
 
        if (!runnable_sum)
@@ -3549,13 +3548,13 @@ update_tg_cfs_load(struct cfs_rq *cfs_rq, struct sched_entity *se, struct cfs_rq
        load_sum = (s64)se_weight(se) * runnable_sum;
        load_avg = div_s64(load_sum, divider);
 
-       delta_sum = load_sum - (s64)se_weight(se) * se->avg.load_sum;
-       delta_avg = load_avg - se->avg.load_avg;
+       delta = load_avg - se->avg.load_avg;
 
        se->avg.load_sum = runnable_sum;
        se->avg.load_avg = load_avg;
-       add_positive(&cfs_rq->avg.load_avg, delta_avg);
-       add_positive(&cfs_rq->avg.load_sum, delta_sum);
+
+       add_positive(&cfs_rq->avg.load_avg, delta);
+       cfs_rq->avg.load_sum = cfs_rq->avg.load_avg * divider;
 }
 
 static inline void add_tg_cfs_propagate(struct cfs_rq *cfs_rq, long runnable_sum)
@@ -3766,11 +3765,17 @@ static void attach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *s
  */
 static void detach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se)
 {
+       /*
+        * cfs_rq->avg.period_contrib can be used for both cfs_rq and se.
+        * See ___update_load_avg() for details.
+        */
+       u32 divider = get_pelt_divider(&cfs_rq->avg);
+
        dequeue_load_avg(cfs_rq, se);
        sub_positive(&cfs_rq->avg.util_avg, se->avg.util_avg);
-       sub_positive(&cfs_rq->avg.util_sum, se->avg.util_sum);
+       cfs_rq->avg.util_sum = cfs_rq->avg.util_avg * divider;
        sub_positive(&cfs_rq->avg.runnable_avg, se->avg.runnable_avg);
-       sub_positive(&cfs_rq->avg.runnable_sum, se->avg.runnable_sum);
+       cfs_rq->avg.runnable_sum = cfs_rq->avg.runnable_avg * divider;
 
        add_tg_cfs_propagate(cfs_rq, -se->avg.load_sum);
 
@@ -3902,7 +3907,7 @@ static inline unsigned long _task_util_est(struct task_struct *p)
 {
        struct util_est ue = READ_ONCE(p->se.avg.util_est);
 
-       return (max(ue.ewma, ue.enqueued) | UTIL_AVG_UNCHANGED);
+       return max(ue.ewma, (ue.enqueued & ~UTIL_AVG_UNCHANGED));
 }
 
 static inline unsigned long task_util_est(struct task_struct *p)
@@ -4002,7 +4007,7 @@ static inline void util_est_update(struct cfs_rq *cfs_rq,
         * Reset EWMA on utilization increases, the moving average is used only
         * to smooth utilization decreases.
         */
-       ue.enqueued = (task_util(p) | UTIL_AVG_UNCHANGED);
+       ue.enqueued = task_util(p);
        if (sched_feat(UTIL_EST_FASTUP)) {
                if (ue.ewma < ue.enqueued) {
                        ue.ewma = ue.enqueued;
@@ -4051,6 +4056,7 @@ static inline void util_est_update(struct cfs_rq *cfs_rq,
        ue.ewma  += last_ewma_diff;
        ue.ewma >>= UTIL_EST_WEIGHT_SHIFT;
 done:
+       ue.enqueued |= UTIL_AVG_UNCHANGED;
        WRITE_ONCE(p->se.avg.util_est, ue);
 
        trace_sched_util_est_se_tp(&p->se);
@@ -6217,7 +6223,7 @@ static int select_idle_cpu(struct task_struct *p, struct sched_domain *sd, bool
        }
 
        if (has_idle_core)
-               set_idle_cores(this, false);
+               set_idle_cores(target, false);
 
        if (sched_feat(SIS_PROP) && !has_idle_core) {
                time = cpu_clock(this) - time;
@@ -8030,7 +8036,7 @@ static bool __update_blocked_fair(struct rq *rq, bool *done)
                /* Propagate pending load changes to the parent, if any: */
                se = cfs_rq->tg->se[cpu];
                if (se && !skip_blocked_update(se))
-                       update_load_avg(cfs_rq_of(se), se, 0);
+                       update_load_avg(cfs_rq_of(se), se, UPDATE_TG);
 
                /*
                 * There can be a lot of idle CPU cgroups.  Don't let fully
index 1462846d244e34c2f09f23ecb26198fa8738da8e..cfe94ffd2b3828b73b06de6c525e7ecd903b43f7 100644 (file)
@@ -42,15 +42,6 @@ static inline u32 get_pelt_divider(struct sched_avg *avg)
        return LOAD_AVG_MAX - 1024 + avg->period_contrib;
 }
 
-/*
- * When a task is dequeued, its estimated utilization should not be update if
- * its util_avg has not been updated at least once.
- * This flag is used to synchronize util_avg updates with util_est updates.
- * We map this information into the LSB bit of the utilization saved at
- * dequeue time (i.e. util_est.dequeued).
- */
-#define UTIL_AVG_UNCHANGED 0x1
-
 static inline void cfs_se_util_change(struct sched_avg *avg)
 {
        unsigned int enqueued;
@@ -58,7 +49,7 @@ static inline void cfs_se_util_change(struct sched_avg *avg)
        if (!sched_feat(UTIL_EST))
                return;
 
-       /* Avoid store if the flag has been already set */
+       /* Avoid store if the flag has been already reset */
        enqueued = avg->util_est.enqueued;
        if (!(enqueued & UTIL_AVG_UNCHANGED))
                return;
index 6ecd3f3a52b5b040f4c35aa918d8548b4817a295..9f58049ac16d906dac4af0e281a83133379b1eee 100644 (file)
@@ -1105,28 +1105,30 @@ static int seccomp_do_user_notification(int this_syscall,
 
        up(&match->notif->request);
        wake_up_poll(&match->wqh, EPOLLIN | EPOLLRDNORM);
-       mutex_unlock(&match->notify_lock);
 
        /*
         * This is where we wait for a reply from userspace.
         */
-wait:
-       err = wait_for_completion_interruptible(&n.ready);
-       mutex_lock(&match->notify_lock);
-       if (err == 0) {
-               /* Check if we were woken up by a addfd message */
+       do {
+               mutex_unlock(&match->notify_lock);
+               err = wait_for_completion_interruptible(&n.ready);
+               mutex_lock(&match->notify_lock);
+               if (err != 0)
+                       goto interrupted;
+
                addfd = list_first_entry_or_null(&n.addfd,
                                                 struct seccomp_kaddfd, list);
-               if (addfd && n.state != SECCOMP_NOTIFY_REPLIED) {
+               /* Check if we were woken up by a addfd message */
+               if (addfd)
                        seccomp_handle_addfd(addfd);
-                       mutex_unlock(&match->notify_lock);
-                       goto wait;
-               }
-               ret = n.val;
-               err = n.error;
-               flags = n.flags;
-       }
 
+       }  while (n.state != SECCOMP_NOTIFY_REPLIED);
+
+       ret = n.val;
+       err = n.error;
+       flags = n.flags;
+
+interrupted:
        /* If there were any pending addfd calls, clear them out */
        list_for_each_entry_safe(addfd, tmp, &n.addfd, list) {
                /* The process went away before we got a chance to handle it */
index 66e88649cf742043c4875ec2c8a81ef646f235a8..f7c6ffcbd04407adf91a0594a949a7bd4ca1463f 100644 (file)
@@ -1236,6 +1236,7 @@ static inline bool has_si_pid_and_uid(struct kernel_siginfo *info)
        case SIL_TIMER:
        case SIL_POLL:
        case SIL_FAULT:
+       case SIL_FAULT_TRAPNO:
        case SIL_FAULT_MCEERR:
        case SIL_FAULT_BNDERR:
        case SIL_FAULT_PKUERR:
@@ -1804,6 +1805,21 @@ int force_sig_pkuerr(void __user *addr, u32 pkey)
 }
 #endif
 
+int force_sig_perf(void __user *addr, u32 type, u64 sig_data)
+{
+       struct kernel_siginfo info;
+
+       clear_siginfo(&info);
+       info.si_signo     = SIGTRAP;
+       info.si_errno     = 0;
+       info.si_code      = TRAP_PERF;
+       info.si_addr      = addr;
+       info.si_perf_data = sig_data;
+       info.si_perf_type = type;
+
+       return force_sig_info(&info);
+}
+
 /* For the crazy architectures that include trap information in
  * the errno field, instead of an actual errno value.
  */
@@ -2564,6 +2580,7 @@ static void hide_si_addr_tag_bits(struct ksignal *ksig)
 {
        switch (siginfo_layout(ksig->sig, ksig->info.si_code)) {
        case SIL_FAULT:
+       case SIL_FAULT_TRAPNO:
        case SIL_FAULT_MCEERR:
        case SIL_FAULT_BNDERR:
        case SIL_FAULT_PKUERR:
@@ -3251,6 +3268,10 @@ enum siginfo_layout siginfo_layout(unsigned sig, int si_code)
 #endif
                        else if ((sig == SIGTRAP) && (si_code == TRAP_PERF))
                                layout = SIL_PERF_EVENT;
+#ifdef __ARCH_SI_TRAPNO
+                       else if (layout == SIL_FAULT)
+                               layout = SIL_FAULT_TRAPNO;
+#endif
                }
                else if (si_code <= NSIGPOLL)
                        layout = SIL_POLL;
@@ -3354,35 +3375,28 @@ void copy_siginfo_to_external32(struct compat_siginfo *to,
                break;
        case SIL_FAULT:
                to->si_addr = ptr_to_compat(from->si_addr);
-#ifdef __ARCH_SI_TRAPNO
+               break;
+       case SIL_FAULT_TRAPNO:
+               to->si_addr = ptr_to_compat(from->si_addr);
                to->si_trapno = from->si_trapno;
-#endif
                break;
        case SIL_FAULT_MCEERR:
                to->si_addr = ptr_to_compat(from->si_addr);
-#ifdef __ARCH_SI_TRAPNO
-               to->si_trapno = from->si_trapno;
-#endif
                to->si_addr_lsb = from->si_addr_lsb;
                break;
        case SIL_FAULT_BNDERR:
                to->si_addr = ptr_to_compat(from->si_addr);
-#ifdef __ARCH_SI_TRAPNO
-               to->si_trapno = from->si_trapno;
-#endif
                to->si_lower = ptr_to_compat(from->si_lower);
                to->si_upper = ptr_to_compat(from->si_upper);
                break;
        case SIL_FAULT_PKUERR:
                to->si_addr = ptr_to_compat(from->si_addr);
-#ifdef __ARCH_SI_TRAPNO
-               to->si_trapno = from->si_trapno;
-#endif
                to->si_pkey = from->si_pkey;
                break;
        case SIL_PERF_EVENT:
                to->si_addr = ptr_to_compat(from->si_addr);
-               to->si_perf = from->si_perf;
+               to->si_perf_data = from->si_perf_data;
+               to->si_perf_type = from->si_perf_type;
                break;
        case SIL_CHLD:
                to->si_pid = from->si_pid;
@@ -3438,35 +3452,28 @@ static int post_copy_siginfo_from_user32(kernel_siginfo_t *to,
                break;
        case SIL_FAULT:
                to->si_addr = compat_ptr(from->si_addr);
-#ifdef __ARCH_SI_TRAPNO
+               break;
+       case SIL_FAULT_TRAPNO:
+               to->si_addr = compat_ptr(from->si_addr);
                to->si_trapno = from->si_trapno;
-#endif
                break;
        case SIL_FAULT_MCEERR:
                to->si_addr = compat_ptr(from->si_addr);
-#ifdef __ARCH_SI_TRAPNO
-               to->si_trapno = from->si_trapno;
-#endif
                to->si_addr_lsb = from->si_addr_lsb;
                break;
        case SIL_FAULT_BNDERR:
                to->si_addr = compat_ptr(from->si_addr);
-#ifdef __ARCH_SI_TRAPNO
-               to->si_trapno = from->si_trapno;
-#endif
                to->si_lower = compat_ptr(from->si_lower);
                to->si_upper = compat_ptr(from->si_upper);
                break;
        case SIL_FAULT_PKUERR:
                to->si_addr = compat_ptr(from->si_addr);
-#ifdef __ARCH_SI_TRAPNO
-               to->si_trapno = from->si_trapno;
-#endif
                to->si_pkey = from->si_pkey;
                break;
        case SIL_PERF_EVENT:
                to->si_addr = compat_ptr(from->si_addr);
-               to->si_perf = from->si_perf;
+               to->si_perf_data = from->si_perf_data;
+               to->si_perf_type = from->si_perf_type;
                break;
        case SIL_CHLD:
                to->si_pid    = from->si_pid;
@@ -4644,11 +4651,13 @@ static inline void siginfo_buildtime_checks(void)
 
        /* sigfault */
        CHECK_OFFSET(si_addr);
+       CHECK_OFFSET(si_trapno);
        CHECK_OFFSET(si_addr_lsb);
        CHECK_OFFSET(si_lower);
        CHECK_OFFSET(si_upper);
        CHECK_OFFSET(si_pkey);
-       CHECK_OFFSET(si_perf);
+       CHECK_OFFSET(si_perf_data);
+       CHECK_OFFSET(si_perf_type);
 
        /* sigpoll */
        CHECK_OFFSET(si_band);
index 14edf84cc571f5c19b8eec330719e3f36807f19a..d4a78e08f6d89c3d26207ae405f15018fdd442bf 100644 (file)
@@ -225,7 +225,27 @@ static int bpf_stats_handler(struct ctl_table *table, int write,
        mutex_unlock(&bpf_stats_enabled_mutex);
        return ret;
 }
-#endif
+
+static int bpf_unpriv_handler(struct ctl_table *table, int write,
+                             void *buffer, size_t *lenp, loff_t *ppos)
+{
+       int ret, unpriv_enable = *(int *)table->data;
+       bool locked_state = unpriv_enable == 1;
+       struct ctl_table tmp = *table;
+
+       if (write && !capable(CAP_SYS_ADMIN))
+               return -EPERM;
+
+       tmp.data = &unpriv_enable;
+       ret = proc_dointvec_minmax(&tmp, write, buffer, lenp, ppos);
+       if (write && !ret) {
+               if (locked_state && unpriv_enable != 1)
+                       return -EPERM;
+               *(int *)table->data = unpriv_enable;
+       }
+       return ret;
+}
+#endif /* CONFIG_BPF_SYSCALL && CONFIG_SYSCTL */
 
 /*
  * /proc/sys support
@@ -2600,10 +2620,9 @@ static struct ctl_table kern_table[] = {
                .data           = &sysctl_unprivileged_bpf_disabled,
                .maxlen         = sizeof(sysctl_unprivileged_bpf_disabled),
                .mode           = 0644,
-               /* only handle a transition from default "0" to "1" */
-               .proc_handler   = proc_dointvec_minmax,
-               .extra1         = SYSCTL_ONE,
-               .extra2         = SYSCTL_ONE,
+               .proc_handler   = bpf_unpriv_handler,
+               .extra1         = SYSCTL_ZERO,
+               .extra2         = &two,
        },
        {
                .procname       = "bpf_stats_enabled",
index bea9d08b16988b33a82b7d911c3a174bd4a0bb16..5897828b9d7ed98c3a06a465027a350006d47933 100644 (file)
@@ -92,7 +92,7 @@ static int alarmtimer_rtc_add_device(struct device *dev,
        if (rtcdev)
                return -EBUSY;
 
-       if (!rtc->ops->set_alarm)
+       if (!test_bit(RTC_FEATURE_ALARM, rtc->features))
                return -1;
        if (!device_may_wakeup(rtc->dev.parent))
                return -1;
index 828b091501ca4571040e88b336276ee39b7a0cb2..6784f27a309937a5ecffad50e0dc1af0cdfd5e68 100644 (file)
@@ -230,6 +230,7 @@ static void tick_sched_handle(struct tick_sched *ts, struct pt_regs *regs)
 
 #ifdef CONFIG_NO_HZ_FULL
 cpumask_var_t tick_nohz_full_mask;
+EXPORT_SYMBOL_GPL(tick_nohz_full_mask);
 bool tick_nohz_full_running;
 EXPORT_SYMBOL_GPL(tick_nohz_full_running);
 static atomic_t tick_dep_mask;
index d2d7cf6cfe83e2099ec43ed6ccde735e693e341c..7a52bc1728414fc61ac5fadb37b223479a5b0b4e 100644 (file)
@@ -215,16 +215,11 @@ const struct bpf_func_proto bpf_probe_read_user_str_proto = {
 static __always_inline int
 bpf_probe_read_kernel_common(void *dst, u32 size, const void *unsafe_ptr)
 {
-       int ret = security_locked_down(LOCKDOWN_BPF_READ);
+       int ret;
 
-       if (unlikely(ret < 0))
-               goto fail;
        ret = copy_from_kernel_nofault(dst, unsafe_ptr, size);
        if (unlikely(ret < 0))
-               goto fail;
-       return ret;
-fail:
-       memset(dst, 0, size);
+               memset(dst, 0, size);
        return ret;
 }
 
@@ -246,10 +241,7 @@ const struct bpf_func_proto bpf_probe_read_kernel_proto = {
 static __always_inline int
 bpf_probe_read_kernel_str_common(void *dst, u32 size, const void *unsafe_ptr)
 {
-       int ret = security_locked_down(LOCKDOWN_BPF_READ);
-
-       if (unlikely(ret < 0))
-               goto fail;
+       int ret;
 
        /*
         * The strncpy_from_kernel_nofault() call will likely not fill the
@@ -262,11 +254,7 @@ bpf_probe_read_kernel_str_common(void *dst, u32 size, const void *unsafe_ptr)
         */
        ret = strncpy_from_kernel_nofault(dst, unsafe_ptr, size);
        if (unlikely(ret < 0))
-               goto fail;
-
-       return ret;
-fail:
-       memset(dst, 0, size);
+               memset(dst, 0, size);
        return ret;
 }
 
@@ -1011,16 +999,20 @@ bpf_tracing_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
        case BPF_FUNC_probe_read_user:
                return &bpf_probe_read_user_proto;
        case BPF_FUNC_probe_read_kernel:
-               return &bpf_probe_read_kernel_proto;
+               return security_locked_down(LOCKDOWN_BPF_READ) < 0 ?
+                      NULL : &bpf_probe_read_kernel_proto;
        case BPF_FUNC_probe_read_user_str:
                return &bpf_probe_read_user_str_proto;
        case BPF_FUNC_probe_read_kernel_str:
-               return &bpf_probe_read_kernel_str_proto;
+               return security_locked_down(LOCKDOWN_BPF_READ) < 0 ?
+                      NULL : &bpf_probe_read_kernel_str_proto;
 #ifdef CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE
        case BPF_FUNC_probe_read:
-               return &bpf_probe_read_compat_proto;
+               return security_locked_down(LOCKDOWN_BPF_READ) < 0 ?
+                      NULL : &bpf_probe_read_compat_proto;
        case BPF_FUNC_probe_read_str:
-               return &bpf_probe_read_compat_str_proto;
+               return security_locked_down(LOCKDOWN_BPF_READ) < 0 ?
+                      NULL : &bpf_probe_read_compat_str_proto;
 #endif
 #ifdef CONFIG_CGROUPS
        case BPF_FUNC_get_current_cgroup_id:
index 2e8a3fde710446e52cf48d593e453da0bf5b057b..72ef4dccbcc47393d6652544826fa98eb4a21ada 100644 (file)
@@ -1967,12 +1967,18 @@ static int ftrace_hash_ipmodify_update(struct ftrace_ops *ops,
 
 static void print_ip_ins(const char *fmt, const unsigned char *p)
 {
+       char ins[MCOUNT_INSN_SIZE];
        int i;
 
+       if (copy_from_kernel_nofault(ins, p, MCOUNT_INSN_SIZE)) {
+               printk(KERN_CONT "%s[FAULT] %px\n", fmt, p);
+               return;
+       }
+
        printk(KERN_CONT "%s", fmt);
 
        for (i = 0; i < MCOUNT_INSN_SIZE; i++)
-               printk(KERN_CONT "%s%02x", i ? ":" : "", p[i]);
+               printk(KERN_CONT "%s%02x", i ? ":" : "", ins[i]);
 }
 
 enum ftrace_bug_type ftrace_bug_type;
index 560e4c8d3825bd361db8a2dbf9ae939d3ef0a727..9299057feb56f75c7d08e942690c43dbfac94275 100644 (file)
@@ -2736,7 +2736,7 @@ trace_event_buffer_lock_reserve(struct trace_buffer **current_rb,
            (entry = this_cpu_read(trace_buffered_event))) {
                /* Try to use the per cpu buffer first */
                val = this_cpu_inc_return(trace_buffered_event_cnt);
-               if ((len < (PAGE_SIZE - sizeof(*entry))) && val == 1) {
+               if ((len < (PAGE_SIZE - sizeof(*entry) - sizeof(entry->array[0]))) && val == 1) {
                        trace_event_setup(entry, type, trace_ctx);
                        entry->array[0] = len;
                        return entry;
@@ -3704,6 +3704,9 @@ void trace_check_vprintf(struct trace_iterator *iter, const char *fmt,
                goto print;
 
        while (*p) {
+               bool star = false;
+               int len = 0;
+
                j = 0;
 
                /* We only care about %s and variants */
@@ -3725,13 +3728,17 @@ void trace_check_vprintf(struct trace_iterator *iter, const char *fmt,
                                /* Need to test cases like %08.*s */
                                for (j = 1; p[i+j]; j++) {
                                        if (isdigit(p[i+j]) ||
-                                           p[i+j] == '*' ||
                                            p[i+j] == '.')
                                                continue;
+                                       if (p[i+j] == '*') {
+                                               star = true;
+                                               continue;
+                                       }
                                        break;
                                }
                                if (p[i+j] == 's')
                                        break;
+                               star = false;
                        }
                        j = 0;
                }
@@ -3744,6 +3751,9 @@ void trace_check_vprintf(struct trace_iterator *iter, const char *fmt,
                iter->fmt[i] = '\0';
                trace_seq_vprintf(&iter->seq, iter->fmt, ap);
 
+               if (star)
+                       len = va_arg(ap, int);
+
                /* The ap now points to the string data of the %s */
                str = va_arg(ap, const char *);
 
@@ -3762,8 +3772,18 @@ void trace_check_vprintf(struct trace_iterator *iter, const char *fmt,
                        int ret;
 
                        /* Try to safely read the string */
-                       ret = strncpy_from_kernel_nofault(iter->fmt, str,
-                                                         iter->fmt_size);
+                       if (star) {
+                               if (len + 1 > iter->fmt_size)
+                                       len = iter->fmt_size - 1;
+                               if (len < 0)
+                                       len = 0;
+                               ret = copy_from_kernel_nofault(iter->fmt, str, len);
+                               iter->fmt[len] = 0;
+                               star = false;
+                       } else {
+                               ret = strncpy_from_kernel_nofault(iter->fmt, str,
+                                                                 iter->fmt_size);
+                       }
                        if (ret < 0)
                                trace_seq_printf(&iter->seq, "(0x%px)", str);
                        else
@@ -3775,7 +3795,10 @@ void trace_check_vprintf(struct trace_iterator *iter, const char *fmt,
                        strncpy(iter->fmt, p + i, j + 1);
                        iter->fmt[j+1] = '\0';
                }
-               trace_seq_printf(&iter->seq, iter->fmt, str);
+               if (star)
+                       trace_seq_printf(&iter->seq, iter->fmt, len, str);
+               else
+                       trace_seq_printf(&iter->seq, iter->fmt, str);
 
                p += i + j + 1;
        }
index 7c397907d0e950dcb2f2c140621905bfbee32c87..92d3bcc5a5e0984cc3e09828d3cab049aa08f070 100644 (file)
@@ -302,10 +302,10 @@ void touch_softlockup_watchdog_sync(void)
        __this_cpu_write(watchdog_report_ts, SOFTLOCKUP_DELAY_REPORT);
 }
 
-static int is_softlockup(unsigned long touch_ts, unsigned long period_ts)
+static int is_softlockup(unsigned long touch_ts,
+                        unsigned long period_ts,
+                        unsigned long now)
 {
-       unsigned long now = get_timestamp();
-
        if ((watchdog_enabled & SOFT_WATCHDOG_ENABLED) && watchdog_thresh){
                /* Warn about unreasonable delays. */
                if (time_after(now, period_ts + get_softlockup_thresh()))
@@ -353,8 +353,7 @@ static int softlockup_fn(void *data)
 /* watchdog kicker functions */
 static enum hrtimer_restart watchdog_timer_fn(struct hrtimer *hrtimer)
 {
-       unsigned long touch_ts = __this_cpu_read(watchdog_touch_ts);
-       unsigned long period_ts = __this_cpu_read(watchdog_report_ts);
+       unsigned long touch_ts, period_ts, now;
        struct pt_regs *regs = get_irq_regs();
        int duration;
        int softlockup_all_cpu_backtrace = sysctl_softlockup_all_cpu_backtrace;
@@ -376,12 +375,23 @@ static enum hrtimer_restart watchdog_timer_fn(struct hrtimer *hrtimer)
        /* .. and repeat */
        hrtimer_forward_now(hrtimer, ns_to_ktime(sample_period));
 
+       /*
+        * Read the current timestamp first. It might become invalid anytime
+        * when a virtual machine is stopped by the host or when the watchog
+        * is touched from NMI.
+        */
+       now = get_timestamp();
        /*
         * If a virtual machine is stopped by the host it can look to
-        * the watchdog like a soft lockup. Check to see if the host
-        * stopped the vm before we process the timestamps.
+        * the watchdog like a soft lockup. This function touches the watchdog.
         */
        kvm_check_and_clear_guest_paused();
+       /*
+        * The stored timestamp is comparable with @now only when not touched.
+        * It might get touched anytime from NMI. Make sure that is_softlockup()
+        * uses the same (valid) value.
+        */
+       period_ts = READ_ONCE(*this_cpu_ptr(&watchdog_report_ts));
 
        /* Reset the interval when touched by known problematic code. */
        if (period_ts == SOFTLOCKUP_DELAY_REPORT) {
@@ -398,13 +408,9 @@ static enum hrtimer_restart watchdog_timer_fn(struct hrtimer *hrtimer)
                return HRTIMER_RESTART;
        }
 
-       /* check for a softlockup
-        * This is done by making sure a high priority task is
-        * being scheduled.  The task touches the watchdog to
-        * indicate it is getting cpu time.  If it hasn't then
-        * this is a good indication some task is hogging the cpu
-        */
-       duration = is_softlockup(touch_ts, period_ts);
+       /* Check for a softlockup. */
+       touch_ts = __this_cpu_read(watchdog_touch_ts);
+       duration = is_softlockup(touch_ts, period_ts, now);
        if (unlikely(duration)) {
                /*
                 * Prevent multiple soft-lockup reports if one cpu is already
index b19d759e55a5d897e8d283f1b646c13d7e740466..50142fc08902d5a78cef0dca684e2b98b1bcd26f 100644 (file)
@@ -50,6 +50,7 @@
 #include <linux/uaccess.h>
 #include <linux/sched/isolation.h>
 #include <linux/nmi.h>
+#include <linux/kvm_para.h>
 
 #include "workqueue_internal.h"
 
@@ -5772,6 +5773,7 @@ static void wq_watchdog_timer_fn(struct timer_list *unused)
 {
        unsigned long thresh = READ_ONCE(wq_watchdog_thresh) * HZ;
        bool lockup_detected = false;
+       unsigned long now = jiffies;
        struct worker_pool *pool;
        int pi;
 
@@ -5786,6 +5788,12 @@ static void wq_watchdog_timer_fn(struct timer_list *unused)
                if (list_empty(&pool->worklist))
                        continue;
 
+               /*
+                * If a virtual machine is stopped by the host it can look to
+                * the watchdog like a stall.
+                */
+               kvm_check_and_clear_guest_paused();
+
                /* get the latest of pool and touched timestamps */
                if (pool->cpu >= 0)
                        touched = READ_ONCE(per_cpu(wq_watchdog_touched_cpu, pool->cpu));
@@ -5799,12 +5807,12 @@ static void wq_watchdog_timer_fn(struct timer_list *unused)
                        ts = touched;
 
                /* did we stall? */
-               if (time_after(jiffies, ts + thresh)) {
+               if (time_after(now, ts + thresh)) {
                        lockup_detected = true;
                        pr_emerg("BUG: workqueue lockup - pool");
                        pr_cont_pool_info(pool);
                        pr_cont(" stuck for %us!\n",
-                               jiffies_to_msecs(jiffies - pool_ts) / 1000);
+                               jiffies_to_msecs(now - pool_ts) / 1000);
                }
        }
 
index e11cfc18b6c0826ffc8bdcf1329dd1ec1aa2ef2e..2cc359ec1fdd3e1c7115bdc0dfbeaa0f59118bd9 100644 (file)
@@ -348,6 +348,7 @@ obj-$(CONFIG_OBJAGG) += objagg.o
 obj-$(CONFIG_PLDMFW) += pldmfw/
 
 # KUnit tests
+CFLAGS_bitfield_kunit.o := $(call cc-option,-Wframe-larger-than=10240)
 obj-$(CONFIG_BITFIELD_KUNIT) += bitfield_kunit.o
 obj-$(CONFIG_LIST_KUNIT_TEST) += list-test.o
 obj-$(CONFIG_LINEAR_RANGES_TEST) += test_linear_ranges.o
index 47cfa054827f3df027873dae9e7eb37c13ae167c..9f852a89ee2a1e4858b40d04373395e385a1d66c 100644 (file)
@@ -37,7 +37,7 @@ MODULE_LICENSE("GPL v2");
 /**
  * crc64_be - Calculate bitwise big-endian ECMA-182 CRC64
  * @crc: seed value for computation. 0 or (u64)~0 for a new CRC calculation,
-       or the previous crc64 value if computing incrementally.
      or the previous crc64 value if computing incrementally.
  * @p: pointer to buffer over which CRC64 is run
  * @len: length of buffer @p
  */
index 921d0a654243ce7c58d5288f25a78fd3ba781cf7..641767b0dce29be6d333b08b14031878e71436a7 100644 (file)
@@ -586,13 +586,11 @@ static int remaining(int wrote)
        return 0;
 }
 
-static char *dynamic_emit_prefix(const struct _ddebug *desc, char *buf)
+static char *__dynamic_emit_prefix(const struct _ddebug *desc, char *buf)
 {
        int pos_after_tid;
        int pos = 0;
 
-       *buf = '\0';
-
        if (desc->flags & _DPRINTK_FLAGS_INCL_TID) {
                if (in_interrupt())
                        pos += snprintf(buf + pos, remaining(pos), "<intr> ");
@@ -618,11 +616,18 @@ static char *dynamic_emit_prefix(const struct _ddebug *desc, char *buf)
        return buf;
 }
 
+static inline char *dynamic_emit_prefix(struct _ddebug *desc, char *buf)
+{
+       if (unlikely(desc->flags & _DPRINTK_FLAGS_INCL_ANY))
+               return __dynamic_emit_prefix(desc, buf);
+       return buf;
+}
+
 void __dynamic_pr_debug(struct _ddebug *descriptor, const char *fmt, ...)
 {
        va_list args;
        struct va_format vaf;
-       char buf[PREFIX_SIZE];
+       char buf[PREFIX_SIZE] = "";
 
        BUG_ON(!descriptor);
        BUG_ON(!fmt);
@@ -655,7 +660,7 @@ void __dynamic_dev_dbg(struct _ddebug *descriptor,
        if (!dev) {
                printk(KERN_DEBUG "(NULL device *): %pV", &vaf);
        } else {
-               char buf[PREFIX_SIZE];
+               char buf[PREFIX_SIZE] = "";
 
                dev_printk_emit(LOGLEVEL_DEBUG, dev, "%s%s %s: %pV",
                                dynamic_emit_prefix(descriptor, buf),
@@ -684,7 +689,7 @@ void __dynamic_netdev_dbg(struct _ddebug *descriptor,
        vaf.va = &args;
 
        if (dev && dev->dev.parent) {
-               char buf[PREFIX_SIZE];
+               char buf[PREFIX_SIZE] = "";
 
                dev_printk_emit(LOGLEVEL_DEBUG, dev->dev.parent,
                                "%s%s %s %s%s: %pV",
@@ -720,7 +725,7 @@ void __dynamic_ibdev_dbg(struct _ddebug *descriptor,
        vaf.va = &args;
 
        if (ibdev && ibdev->dev.parent) {
-               char buf[PREFIX_SIZE];
+               char buf[PREFIX_SIZE] = "";
 
                dev_printk_emit(LOGLEVEL_DEBUG, ibdev->dev.parent,
                                "%s%s %s %s: %pV",
@@ -915,7 +920,6 @@ static const struct seq_operations ddebug_proc_seqops = {
 
 static int ddebug_proc_open(struct inode *inode, struct file *file)
 {
-       vpr_info("called\n");
        return seq_open_private(file, &ddebug_proc_seqops,
                                sizeof(struct ddebug_iter));
 }
index a1071cdefb5aa4e629493ae1f32c7a15422a695b..af9302141bcf63983b8bac15468174f63ed0edc7 100644 (file)
@@ -275,7 +275,7 @@ static void __percpu_ref_switch_mode(struct percpu_ref *ref,
        wait_event_lock_irq(percpu_ref_switch_waitq, !data->confirm_switch,
                            percpu_ref_switch_lock);
 
-       if (data->force_atomic || (ref->percpu_count_ptr & __PERCPU_REF_DEAD))
+       if (data->force_atomic || percpu_ref_is_dying(ref))
                __percpu_ref_switch_to_atomic(ref, confirm_switch);
        else
                __percpu_ref_switch_to_percpu(ref);
@@ -385,7 +385,7 @@ void percpu_ref_kill_and_confirm(struct percpu_ref *ref,
 
        spin_lock_irqsave(&percpu_ref_switch_lock, flags);
 
-       WARN_ONCE(ref->percpu_count_ptr & __PERCPU_REF_DEAD,
+       WARN_ONCE(percpu_ref_is_dying(ref),
                  "%s called more than once on %ps!", __func__,
                  ref->data->release);
 
@@ -465,7 +465,7 @@ void percpu_ref_resurrect(struct percpu_ref *ref)
 
        spin_lock_irqsave(&percpu_ref_switch_lock, flags);
 
-       WARN_ON_ONCE(!(ref->percpu_count_ptr & __PERCPU_REF_DEAD));
+       WARN_ON_ONCE(!percpu_ref_is_dying(ref));
        WARN_ON_ONCE(__ref_is_percpu(ref, &percpu_count));
 
        ref->percpu_count_ptr &= ~__PERCPU_REF_DEAD;
index dc05cfc2d12f0707fa45215ac9bead7c8f7ae7a6..cacbbbdef768d056d010ebde123353192f502c50 100644 (file)
@@ -654,8 +654,20 @@ static char global_array[10];
 
 static void kasan_global_oob(struct kunit *test)
 {
-       volatile int i = 3;
-       char *p = &global_array[ARRAY_SIZE(global_array) + i];
+       /*
+        * Deliberate out-of-bounds access. To prevent CONFIG_UBSAN_LOCAL_BOUNDS
+        * from failing here and panicing the kernel, access the array via a
+        * volatile pointer, which will prevent the compiler from being able to
+        * determine the array bounds.
+        *
+        * This access uses a volatile pointer to char (char *volatile) rather
+        * than the more conventional pointer to volatile char (volatile char *)
+        * because we want to prevent the compiler from making inferences about
+        * the pointer itself (i.e. its array bounds), not the data that it
+        * refers to.
+        */
+       char *volatile array = global_array;
+       char *p = &array[ARRAY_SIZE(global_array) + 3];
 
        /* Only generic mode instruments globals. */
        KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_GENERIC);
@@ -703,8 +715,9 @@ static void ksize_uaf(struct kunit *test)
 static void kasan_stack_oob(struct kunit *test)
 {
        char stack_array[10];
-       volatile int i = OOB_TAG_OFF;
-       char *p = &stack_array[ARRAY_SIZE(stack_array) + i];
+       /* See comment in kasan_global_oob. */
+       char *volatile array = stack_array;
+       char *p = &array[ARRAY_SIZE(stack_array) + OOB_TAG_OFF];
 
        KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_STACK);
 
@@ -715,7 +728,9 @@ static void kasan_alloca_oob_left(struct kunit *test)
 {
        volatile int i = 10;
        char alloca_array[i];
-       char *p = alloca_array - 1;
+       /* See comment in kasan_global_oob. */
+       char *volatile array = alloca_array;
+       char *p = array - 1;
 
        /* Only generic mode instruments dynamic allocas. */
        KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_GENERIC);
@@ -728,7 +743,9 @@ static void kasan_alloca_oob_right(struct kunit *test)
 {
        volatile int i = 10;
        char alloca_array[i];
-       char *p = alloca_array + i;
+       /* See comment in kasan_global_oob. */
+       char *volatile array = alloca_array;
+       char *p = array + i;
 
        /* Only generic mode instruments dynamic allocas. */
        KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_GENERIC);
index 05efe98a9ac2c8c514ae7ea46db1e617cecd5251..297d1b349c1973d5fa8865a182e3e30adfe8ac5f 100644 (file)
@@ -192,7 +192,7 @@ static void __init pmd_advanced_tests(struct mm_struct *mm,
 
        pr_debug("Validating PMD advanced\n");
        /* Align the address wrt HPAGE_PMD_SIZE */
-       vaddr = (vaddr & HPAGE_PMD_MASK) + HPAGE_PMD_SIZE;
+       vaddr &= HPAGE_PMD_MASK;
 
        pgtable_trans_huge_deposit(mm, pmdp, pgtable);
 
@@ -330,7 +330,7 @@ static void __init pud_advanced_tests(struct mm_struct *mm,
 
        pr_debug("Validating PUD advanced\n");
        /* Align the address wrt HPAGE_PUD_SIZE */
-       vaddr = (vaddr & HPAGE_PUD_MASK) + HPAGE_PUD_SIZE;
+       vaddr &= HPAGE_PUD_MASK;
 
        set_pud_at(mm, vaddr, pudp, pud);
        pudp_set_wrprotect(mm, vaddr, pudp);
index 0697134b6a12cb4d0aec4eda2facaa67213487c9..3ded6a5f26b259e859cba3a9f164cd1b6e8b235c 100644 (file)
--- a/mm/gup.c
+++ b/mm/gup.c
@@ -1593,10 +1593,6 @@ struct page *get_dump_page(unsigned long addr)
                                      FOLL_FORCE | FOLL_DUMP | FOLL_GET);
        if (locked)
                mmap_read_unlock(mm);
-
-       if (ret == 1 && is_page_poisoned(page))
-               return NULL;
-
        return (ret == 1) ? page : NULL;
 }
 #endif /* CONFIG_ELF_CORE */
index 3db405dea3dc95bd0e8ca40d3f40b976d68acdb5..5560b50876fb7ad0a108f216320a3e0f560f625e 100644 (file)
@@ -1793,7 +1793,7 @@ retry:
                        SetPageHWPoison(page);
                        ClearPageHWPoison(head);
                }
-               remove_hugetlb_page(h, page, false);
+               remove_hugetlb_page(h, head, false);
                h->max_huge_pages--;
                spin_unlock_irq(&hugetlb_lock);
                update_and_free_page(h, head);
@@ -4056,6 +4056,7 @@ again:
                                 * See Documentation/vm/mmu_notifier.rst
                                 */
                                huge_ptep_set_wrprotect(src, addr, src_pte);
+                               entry = huge_pte_wrprotect(entry);
                        }
 
                        page_dup_rmap(ptepage, true);
@@ -4888,10 +4889,20 @@ int hugetlb_mcopy_atomic_pte(struct mm_struct *dst_mm,
                if (!page)
                        goto out;
        } else if (!*pagep) {
-               ret = -ENOMEM;
+               /* If a page already exists, then it's UFFDIO_COPY for
+                * a non-missing case. Return -EEXIST.
+                */
+               if (vm_shared &&
+                   hugetlbfs_pagecache_present(h, dst_vma, dst_addr)) {
+                       ret = -EEXIST;
+                       goto out;
+               }
+
                page = alloc_huge_page(dst_vma, dst_addr, 0);
-               if (IS_ERR(page))
+               if (IS_ERR(page)) {
+                       ret = -ENOMEM;
                        goto out;
+               }
 
                ret = copy_huge_page_from_user(page,
                                                (const void __user *) src_addr,
index 54bd0dc2c23c1b02858cadd73a5de94fe696e12c..2f1182948aa6e7a6603515c4dfe6e8eabfded734 100644 (file)
@@ -96,26 +96,6 @@ static inline void set_page_refcounted(struct page *page)
        set_page_count(page, 1);
 }
 
-/*
- * When kernel touch the user page, the user page may be have been marked
- * poison but still mapped in user space, if without this page, the kernel
- * can guarantee the data integrity and operation success, the kernel is
- * better to check the posion status and avoid touching it, be good not to
- * panic, coredump for process fatal signal is a sample case matching this
- * scenario. Or if kernel can't guarantee the data integrity, it's better
- * not to call this function, let kernel touch the poison page and get to
- * panic.
- */
-static inline bool is_page_poisoned(struct page *page)
-{
-       if (PageHWPoison(page))
-               return true;
-       else if (PageHuge(page) && PageHWPoison(compound_head(page)))
-               return true;
-
-       return false;
-}
-
 extern unsigned long highest_memmap_pfn;
 
 /*
index d1dcc7e744acfa5adbf2c77cc69d16c11bd826eb..8ee0136f8cb082c0c1e57c3462988b79d8d96c6a 100644 (file)
 #include "pgalloc-track.h"
 
 #ifdef CONFIG_HAVE_ARCH_HUGE_VMAP
-static bool __ro_after_init iomap_max_page_shift = PAGE_SHIFT;
+static unsigned int __ro_after_init iomap_max_page_shift = BITS_PER_LONG - 1;
 
 static int __init set_nohugeiomap(char *str)
 {
-       iomap_max_page_shift = P4D_SHIFT;
+       iomap_max_page_shift = PAGE_SHIFT;
        return 0;
 }
 early_param("nohugeiomap", set_nohugeiomap);
 #else /* CONFIG_HAVE_ARCH_HUGE_VMAP */
-static const bool iomap_max_page_shift = PAGE_SHIFT;
+static const unsigned int iomap_max_page_shift = PAGE_SHIFT;
 #endif /* CONFIG_HAVE_ARCH_HUGE_VMAP */
 
 int ioremap_page_range(unsigned long addr,
index c4605ac9837b011460d21c000fdc3c76db764348..348f31d15a971f54035e7ab51b535d4ba9e6fdcd 100644 (file)
@@ -220,8 +220,8 @@ static int __ref zero_p4d_populate(pgd_t *pgd, unsigned long addr,
 /**
  * kasan_populate_early_shadow - populate shadow memory region with
  *                               kasan_early_shadow_page
- * @shadow_start - start of the memory range to populate
- * @shadow_end   - end of the memory range to populate
+ * @shadow_start: start of the memory range to populate
+ * @shadow_end: end of the memory range to populate
  */
 int __ref kasan_populate_early_shadow(const void *shadow_start,
                                        const void *shadow_end)
index e18fbbd5d9b4863ad90bc882570c5b53025c57ad..4d21ac44d5d35cc08fcf6cf8b073dddf7f458f6e 100644 (file)
@@ -627,10 +627,10 @@ static void toggle_allocation_gate(struct work_struct *work)
                 * During low activity with no allocations we might wait a
                 * while; let's avoid the hung task warning.
                 */
-               wait_event_timeout(allocation_wait, atomic_read(&kfence_allocation_gate),
-                                  sysctl_hung_task_timeout_secs * HZ / 2);
+               wait_event_idle_timeout(allocation_wait, atomic_read(&kfence_allocation_gate),
+                                       sysctl_hung_task_timeout_secs * HZ / 2);
        } else {
-               wait_event(allocation_wait, atomic_read(&kfence_allocation_gate));
+               wait_event_idle(allocation_wait, atomic_read(&kfence_allocation_gate));
        }
 
        /* Disable static key and reset timer. */
index 6bbe314c5260373690c4cd20ddca2416c677b415..2f3aaeb34a42eec00b82945740444ad0e972c217 100644 (file)
--- a/mm/ksm.c
+++ b/mm/ksm.c
@@ -776,11 +776,12 @@ static void remove_rmap_item_from_tree(struct rmap_item *rmap_item)
                struct page *page;
 
                stable_node = rmap_item->head;
-               page = get_ksm_page(stable_node, GET_KSM_PAGE_NOLOCK);
+               page = get_ksm_page(stable_node, GET_KSM_PAGE_LOCK);
                if (!page)
                        goto out;
 
                hlist_del(&rmap_item->hlist);
+               unlock_page(page);
                put_page(page);
 
                if (!hlist_empty(&stable_node->hlist))
index 730daa00952ba48131f8e409445c7b379c620f5e..f3ffab9b9e39157b552068dd821adfe38c91cbb6 100644 (file)
@@ -2939,6 +2939,7 @@ static vm_fault_t wp_page_copy(struct vm_fault *vmf)
                }
                flush_cache_page(vma, vmf->address, pte_pfn(vmf->orig_pte));
                entry = mk_pte(new_page, vma->vm_page_prot);
+               entry = pte_sw_mkyoung(entry);
                entry = maybe_mkwrite(pte_mkdirty(entry), vma);
 
                /*
@@ -3602,6 +3603,7 @@ static vm_fault_t do_anonymous_page(struct vm_fault *vmf)
        __SetPageUptodate(page);
 
        entry = mk_pte(page, vma->vm_page_prot);
+       entry = pte_sw_mkyoung(entry);
        if (vma->vm_flags & VM_WRITE)
                entry = pte_mkwrite(pte_mkdirty(entry));
 
@@ -3786,6 +3788,8 @@ void do_set_pte(struct vm_fault *vmf, struct page *page, unsigned long addr)
 
        if (prefault && arch_wants_old_prefaulted_pte())
                entry = pte_mkold(entry);
+       else
+               entry = pte_sw_mkyoung(entry);
 
        if (write)
                entry = maybe_mkwrite(pte_mkdirty(entry), vma);
index aaa1655cf6820df137e08a07fcb492a819ddb733..d1f5de1c1283b08075b3bb5177ea5b1214a58c0f 100644 (file)
@@ -9158,6 +9158,8 @@ bool take_page_off_buddy(struct page *page)
                        del_page_from_free_list(page_head, zone, page_order);
                        break_down_buddy_pages(zone, page_head, page, 0,
                                                page_order, migratetype);
+                       if (!is_migrate_isolate(migratetype))
+                               __mod_zone_freepage_state(zone, -1, migratetype);
                        ret = true;
                        break;
                }
index a08cedefbfaa65f6eb28d9abc74545328e575960..5d46611cba8dca00882a9c2799288f93127fca48 100644 (file)
@@ -2258,25 +2258,11 @@ out_nomem:
 static int shmem_mmap(struct file *file, struct vm_area_struct *vma)
 {
        struct shmem_inode_info *info = SHMEM_I(file_inode(file));
+       int ret;
 
-       if (info->seals & F_SEAL_FUTURE_WRITE) {
-               /*
-                * New PROT_WRITE and MAP_SHARED mmaps are not allowed when
-                * "future write" seal active.
-                */
-               if ((vma->vm_flags & VM_SHARED) && (vma->vm_flags & VM_WRITE))
-                       return -EPERM;
-
-               /*
-                * Since an F_SEAL_FUTURE_WRITE sealed memfd can be mapped as
-                * MAP_SHARED and read-only, take care to not allow mprotect to
-                * revert protections on such mappings. Do this only for shared
-                * mappings. For private mappings, don't need to mask
-                * VM_MAYWRITE as we still want them to be COW-writable.
-                */
-               if (vma->vm_flags & VM_SHARED)
-                       vma->vm_flags &= ~(VM_MAYWRITE);
-       }
+       ret = seal_check_future_write(info->seals, vma);
+       if (ret)
+               return ret;
 
        /* arm64 - allow memory tagging on RAM-based files */
        vma->vm_flags |= VM_MTE_ALLOWED;
@@ -2375,8 +2361,18 @@ static int shmem_mfill_atomic_pte(struct mm_struct *dst_mm,
        pgoff_t offset, max_off;
 
        ret = -ENOMEM;
-       if (!shmem_inode_acct_block(inode, 1))
+       if (!shmem_inode_acct_block(inode, 1)) {
+               /*
+                * We may have got a page, returned -ENOENT triggering a retry,
+                * and now we find ourselves with -ENOMEM. Release the page, to
+                * avoid a BUG_ON in our caller.
+                */
+               if (unlikely(*pagep)) {
+                       put_page(*pagep);
+                       *pagep = NULL;
+               }
                goto out;
+       }
 
        if (!*pagep) {
                page = shmem_alloc_page(gfp, info, pgoff);
index 71b784f0b7c3ed4fc94f237828294938da219742..cec62984f7d3b22b346cc0b72d4b9aeaf51c7595 100644 (file)
@@ -10,7 +10,7 @@
 DECLARE_STATIC_KEY_FALSE(page_alloc_shuffle_key);
 extern void __shuffle_free_memory(pg_data_t *pgdat);
 extern bool shuffle_pick_tail(void);
-static inline void shuffle_free_memory(pg_data_t *pgdat)
+static inline void __meminit shuffle_free_memory(pg_data_t *pgdat)
 {
        if (!static_branch_unlikely(&page_alloc_shuffle_key))
                return;
@@ -18,7 +18,7 @@ static inline void shuffle_free_memory(pg_data_t *pgdat)
 }
 
 extern void __shuffle_zone(struct zone *z);
-static inline void shuffle_zone(struct zone *z)
+static inline void __meminit shuffle_zone(struct zone *z)
 {
        if (!static_branch_unlikely(&page_alloc_shuffle_key))
                return;
index f8833d3e5d47e1efe79fd6f4f2b73da9753abb55..a4a571428c511f5e8e9436e68fb5345f6a75dd74 100644 (file)
@@ -318,6 +318,16 @@ kmem_cache_create_usercopy(const char *name,
        const char *cache_name;
        int err;
 
+#ifdef CONFIG_SLUB_DEBUG
+       /*
+        * If no slub_debug was enabled globally, the static key is not yet
+        * enabled by setup_slub_debug(). Enable it if the cache is being
+        * created with any of the debugging flags passed explicitly.
+        */
+       if (flags & SLAB_DEBUG_FLAGS)
+               static_branch_enable(&slub_debug_enabled);
+#endif
+
        mutex_lock(&slab_mutex);
 
        err = kmem_cache_sanity_check(name, size);
index feda53ae62ba4e045c17729dda49a7d536c4e4dc..3f96e099817a17e9c09fc0263405026cf1c2ceb9 100644 (file)
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -301,6 +301,7 @@ static inline void *get_freepointer_safe(struct kmem_cache *s, void *object)
        if (!debug_pagealloc_enabled_static())
                return get_freepointer(s, object);
 
+       object = kasan_reset_tag(object);
        freepointer_addr = (unsigned long)object + s->offset;
        copy_from_kernel_nofault(&p, (void **)freepointer_addr, sizeof(p));
        return freelist_ptr(s, p, freepointer_addr);
@@ -3828,15 +3829,6 @@ static int calculate_sizes(struct kmem_cache *s, int forced_order)
 
 static int kmem_cache_open(struct kmem_cache *s, slab_flags_t flags)
 {
-#ifdef CONFIG_SLUB_DEBUG
-       /*
-        * If no slub_debug was enabled globally, the static key is not yet
-        * enabled by setup_slub_debug(). Enable it if the cache is being
-        * created with any of the debugging flags passed explicitly.
-        */
-       if (flags & SLAB_DEBUG_FLAGS)
-               static_branch_enable(&slub_debug_enabled);
-#endif
        s->flags = kmem_cache_flags(s->size, flags, s->name);
 #ifdef CONFIG_SLAB_FREELIST_HARDENED
        s->random = get_random_long();
index e14b3820c6a814ec3962227cc7dbdc52ffc72a50..63a73e164d5510fba964eec777b7486b634a1343 100644 (file)
@@ -360,38 +360,38 @@ out:
                 * If a reservation for the page existed in the reservation
                 * map of a private mapping, the map was modified to indicate
                 * the reservation was consumed when the page was allocated.
-                * We clear the PagePrivate flag now so that the global
+                * We clear the HPageRestoreReserve flag now so that the global
                 * reserve count will not be incremented in free_huge_page.
                 * The reservation map will still indicate the reservation
                 * was consumed and possibly prevent later page allocation.
                 * This is better than leaking a global reservation.  If no
-                * reservation existed, it is still safe to clear PagePrivate
-                * as no adjustments to reservation counts were made during
-                * allocation.
+                * reservation existed, it is still safe to clear
+                * HPageRestoreReserve as no adjustments to reservation counts
+                * were made during allocation.
                 *
                 * The reservation map for shared mappings indicates which
                 * pages have reservations.  When a huge page is allocated
                 * for an address with a reservation, no change is made to
-                * the reserve map.  In this case PagePrivate will be set
-                * to indicate that the global reservation count should be
+                * the reserve map.  In this case HPageRestoreReserve will be
+                * set to indicate that the global reservation count should be
                 * incremented when the page is freed.  This is the desired
                 * behavior.  However, when a huge page is allocated for an
                 * address without a reservation a reservation entry is added
-                * to the reservation map, and PagePrivate will not be set.
-                * When the page is freed, the global reserve count will NOT
-                * be incremented and it will appear as though we have leaked
-                * reserved page.  In this case, set PagePrivate so that the
-                * global reserve count will be incremented to match the
-                * reservation map entry which was created.
+                * to the reservation map, and HPageRestoreReserve will not be
+                * set. When the page is freed, the global reserve count will
+                * NOT be incremented and it will appear as though we have
+                * leaked reserved page.  In this case, set HPageRestoreReserve
+                * so that the global reserve count will be incremented to
+                * match the reservation map entry which was created.
                 *
                 * Note that vm_alloc_shared is based on the flags of the vma
                 * for which the page was originally allocated.  dst_vma could
                 * be different or NULL on error.
                 */
                if (vm_alloc_shared)
-                       SetPagePrivate(page);
+                       SetHPageRestoreReserve(page);
                else
-                       ClearPagePrivate(page);
+                       ClearHPageRestoreReserve(page);
                put_page(page);
        }
        BUG_ON(copied < 0);
index f5ee7c65e6b4b905970ae91c7ab8e06eeb9878fc..c7392c449b254b8887866711f660efd001b8e10b 100644 (file)
@@ -302,21 +302,6 @@ config BQL
        select DQL
        default y
 
-config BPF_JIT
-       bool "enable BPF Just In Time compiler"
-       depends on HAVE_CBPF_JIT || HAVE_EBPF_JIT
-       depends on MODULES
-       help
-         Berkeley Packet Filter filtering capabilities are normally handled
-         by an interpreter. This option allows kernel to generate a native
-         code when filter is loaded in memory. This should speedup
-         packet sniffing (libpcap/tcpdump).
-
-         Note, admin should enable this feature changing:
-         /proc/sys/net/core/bpf_jit_enable
-         /proc/sys/net/core/bpf_jit_harden   (optional)
-         /proc/sys/net/core/bpf_jit_kallsyms (optional)
-
 config BPF_STREAM_PARSER
        bool "enable BPF STREAM_PARSER"
        depends on INET
@@ -470,15 +455,3 @@ config ETHTOOL_NETLINK
          e.g. notification messages.
 
 endif   # if NET
-
-# Used by archs to tell that they support BPF JIT compiler plus which flavour.
-# Only one of the two can be selected for a specific arch since eBPF JIT supersedes
-# the cBPF JIT.
-
-# Classic BPF JIT (cBPF)
-config HAVE_CBPF_JIT
-       bool
-
-# Extended BPF JIT (eBPF)
-config HAVE_EBPF_JIT
-       bool
index fd12f1652bdf4c18291b78656140aef77ad882c4..7d71d104fdfda20e026b2b78670f576590e5974c 100644 (file)
@@ -1610,8 +1610,13 @@ setup_failed:
        } else {
                /* Init failed, cleanup */
                flush_work(&hdev->tx_work);
-               flush_work(&hdev->cmd_work);
+
+               /* Since hci_rx_work() is possible to awake new cmd_work
+                * it should be flushed first to avoid unexpected call of
+                * hci_cmd_work()
+                */
                flush_work(&hdev->rx_work);
+               flush_work(&hdev->cmd_work);
 
                skb_queue_purge(&hdev->cmd_q);
                skb_queue_purge(&hdev->rx_q);
index 251b9128f530a07217f148902a62a72a81c991cc..eed0dd066e12ceac2992ec4e6c1b6bf53e28345b 100644 (file)
@@ -762,7 +762,7 @@ void hci_sock_dev_event(struct hci_dev *hdev, int event)
                /* Detach sockets from device */
                read_lock(&hci_sk_list.lock);
                sk_for_each(sk, &hci_sk_list.head) {
-                       bh_lock_sock_nested(sk);
+                       lock_sock(sk);
                        if (hci_pi(sk)->hdev == hdev) {
                                hci_pi(sk)->hdev = NULL;
                                sk->sk_err = EPIPE;
@@ -771,7 +771,7 @@ void hci_sock_dev_event(struct hci_dev *hdev, int event)
 
                                hci_dev_put(hdev);
                        }
-                       bh_unlock_sock(sk);
+                       release_sock(sk);
                }
                read_unlock(&hci_sk_list.lock);
        }
index c10e5a55758d2294612b57f8a7d1ebff5734fa0a..440139706130af73ee3e74f19d4194e87b1ffc5d 100644 (file)
@@ -308,7 +308,7 @@ static void dev_flowctrl(struct net_device *dev, int on)
        caifd_put(caifd);
 }
 
-void caif_enroll_dev(struct net_device *dev, struct caif_dev_common *caifdev,
+int caif_enroll_dev(struct net_device *dev, struct caif_dev_common *caifdev,
                     struct cflayer *link_support, int head_room,
                     struct cflayer **layer,
                     int (**rcv_func)(struct sk_buff *, struct net_device *,
@@ -319,11 +319,12 @@ void caif_enroll_dev(struct net_device *dev, struct caif_dev_common *caifdev,
        enum cfcnfg_phy_preference pref;
        struct cfcnfg *cfg = get_cfcnfg(dev_net(dev));
        struct caif_device_entry_list *caifdevs;
+       int res;
 
        caifdevs = caif_device_list(dev_net(dev));
        caifd = caif_device_alloc(dev);
        if (!caifd)
-               return;
+               return -ENOMEM;
        *layer = &caifd->layer;
        spin_lock_init(&caifd->flow_lock);
 
@@ -344,7 +345,7 @@ void caif_enroll_dev(struct net_device *dev, struct caif_dev_common *caifdev,
        strlcpy(caifd->layer.name, dev->name,
                sizeof(caifd->layer.name));
        caifd->layer.transmit = transmit;
-       cfcnfg_add_phy_layer(cfg,
+       res = cfcnfg_add_phy_layer(cfg,
                                dev,
                                &caifd->layer,
                                pref,
@@ -354,6 +355,7 @@ void caif_enroll_dev(struct net_device *dev, struct caif_dev_common *caifdev,
        mutex_unlock(&caifdevs->lock);
        if (rcv_func)
                *rcv_func = receive;
+       return res;
 }
 EXPORT_SYMBOL(caif_enroll_dev);
 
@@ -368,6 +370,7 @@ static int caif_device_notify(struct notifier_block *me, unsigned long what,
        struct cflayer *layer, *link_support;
        int head_room = 0;
        struct caif_device_entry_list *caifdevs;
+       int res;
 
        cfg = get_cfcnfg(dev_net(dev));
        caifdevs = caif_device_list(dev_net(dev));
@@ -393,8 +396,10 @@ static int caif_device_notify(struct notifier_block *me, unsigned long what,
                                break;
                        }
                }
-               caif_enroll_dev(dev, caifdev, link_support, head_room,
+               res = caif_enroll_dev(dev, caifdev, link_support, head_room,
                                &layer, NULL);
+               if (res)
+                       cfserl_release(link_support);
                caifdev->flowctrl = dev_flowctrl;
                break;
 
index a0116b9503d9da9a2733f0b7f8aae4f1b3ceeda9..b02e1292f7f1929b535338e6197d26d6f5f475f3 100644 (file)
@@ -115,6 +115,11 @@ static struct cflayer *cfusbl_create(int phyid, u8 ethaddr[ETH_ALEN],
        return (struct cflayer *) this;
 }
 
+static void cfusbl_release(struct cflayer *layer)
+{
+       kfree(layer);
+}
+
 static struct packet_type caif_usb_type __read_mostly = {
        .type = cpu_to_be16(ETH_P_802_EX1),
 };
@@ -127,6 +132,7 @@ static int cfusbl_device_notify(struct notifier_block *me, unsigned long what,
        struct cflayer *layer, *link_support;
        struct usbnet *usbnet;
        struct usb_device *usbdev;
+       int res;
 
        /* Check whether we have a NCM device, and find its VID/PID. */
        if (!(dev->dev.parent && dev->dev.parent->driver &&
@@ -169,8 +175,11 @@ static int cfusbl_device_notify(struct notifier_block *me, unsigned long what,
        if (dev->num_tx_queues > 1)
                pr_warn("USB device uses more than one tx queue\n");
 
-       caif_enroll_dev(dev, &common, link_support, CFUSB_MAX_HEADLEN,
+       res = caif_enroll_dev(dev, &common, link_support, CFUSB_MAX_HEADLEN,
                        &layer, &caif_usb_type.func);
+       if (res)
+               goto err;
+
        if (!pack_added)
                dev_add_pack(&caif_usb_type);
        pack_added = true;
@@ -178,6 +187,9 @@ static int cfusbl_device_notify(struct notifier_block *me, unsigned long what,
        strlcpy(layer->name, dev->name, sizeof(layer->name));
 
        return 0;
+err:
+       cfusbl_release(link_support);
+       return res;
 }
 
 static struct notifier_block caif_device_notifier = {
index 399239a14420fadde08875c1288f434862f92979..cac30e676ac94fd36da349e437163a07000d0215 100644 (file)
@@ -450,7 +450,7 @@ unlock:
        rcu_read_unlock();
 }
 
-void
+int
 cfcnfg_add_phy_layer(struct cfcnfg *cnfg,
                     struct net_device *dev, struct cflayer *phy_layer,
                     enum cfcnfg_phy_preference pref,
@@ -459,7 +459,7 @@ cfcnfg_add_phy_layer(struct cfcnfg *cnfg,
 {
        struct cflayer *frml;
        struct cfcnfg_phyinfo *phyinfo = NULL;
-       int i;
+       int i, res = 0;
        u8 phyid;
 
        mutex_lock(&cnfg->lock);
@@ -473,12 +473,15 @@ cfcnfg_add_phy_layer(struct cfcnfg *cnfg,
                        goto got_phyid;
        }
        pr_warn("Too many CAIF Link Layers (max 6)\n");
+       res = -EEXIST;
        goto out;
 
 got_phyid:
        phyinfo = kzalloc(sizeof(struct cfcnfg_phyinfo), GFP_ATOMIC);
-       if (!phyinfo)
+       if (!phyinfo) {
+               res = -ENOMEM;
                goto out_err;
+       }
 
        phy_layer->id = phyid;
        phyinfo->pref = pref;
@@ -492,8 +495,10 @@ got_phyid:
 
        frml = cffrml_create(phyid, fcs);
 
-       if (!frml)
+       if (!frml) {
+               res = -ENOMEM;
                goto out_err;
+       }
        phyinfo->frm_layer = frml;
        layer_set_up(frml, cnfg->mux);
 
@@ -511,11 +516,12 @@ got_phyid:
        list_add_rcu(&phyinfo->node, &cnfg->phys);
 out:
        mutex_unlock(&cnfg->lock);
-       return;
+       return res;
 
 out_err:
        kfree(phyinfo);
        mutex_unlock(&cnfg->lock);
+       return res;
 }
 EXPORT_SYMBOL(cfcnfg_add_phy_layer);
 
index e11725a4bb0edb8c01d985accd885fe453f9b12d..40cd57ad0a0f4f51e2ab5d51d1629aee739f79a7 100644 (file)
@@ -31,6 +31,11 @@ static int cfserl_transmit(struct cflayer *layr, struct cfpkt *pkt);
 static void cfserl_ctrlcmd(struct cflayer *layr, enum caif_ctrlcmd ctrl,
                           int phyid);
 
+void cfserl_release(struct cflayer *layer)
+{
+       kfree(layer);
+}
+
 struct cflayer *cfserl_create(int instance, bool use_stx)
 {
        struct cfserl *this = kzalloc(sizeof(struct cfserl), GFP_ATOMIC);
index 9f94ad3caee92938a81e0fb22e850ec97d73b9c9..253b24417c8e5c05559c3e503aedfd6dcee7a13d 100644 (file)
@@ -1062,27 +1062,31 @@ static int isotp_bind(struct socket *sock, struct sockaddr *uaddr, int len)
        if (len < ISOTP_MIN_NAMELEN)
                return -EINVAL;
 
+       if (addr->can_addr.tp.tx_id & (CAN_ERR_FLAG | CAN_RTR_FLAG))
+               return -EADDRNOTAVAIL;
+
+       if (!addr->can_ifindex)
+               return -ENODEV;
+
+       lock_sock(sk);
+
        /* do not register frame reception for functional addressing */
        if (so->opt.flags & CAN_ISOTP_SF_BROADCAST)
                do_rx_reg = 0;
 
        /* do not validate rx address for functional addressing */
        if (do_rx_reg) {
-               if (addr->can_addr.tp.rx_id == addr->can_addr.tp.tx_id)
-                       return -EADDRNOTAVAIL;
+               if (addr->can_addr.tp.rx_id == addr->can_addr.tp.tx_id) {
+                       err = -EADDRNOTAVAIL;
+                       goto out;
+               }
 
-               if (addr->can_addr.tp.rx_id & (CAN_ERR_FLAG | CAN_RTR_FLAG))
-                       return -EADDRNOTAVAIL;
+               if (addr->can_addr.tp.rx_id & (CAN_ERR_FLAG | CAN_RTR_FLAG)) {
+                       err = -EADDRNOTAVAIL;
+                       goto out;
+               }
        }
 
-       if (addr->can_addr.tp.tx_id & (CAN_ERR_FLAG | CAN_RTR_FLAG))
-               return -EADDRNOTAVAIL;
-
-       if (!addr->can_ifindex)
-               return -ENODEV;
-
-       lock_sock(sk);
-
        if (so->bound && addr->can_ifindex == so->ifindex &&
            addr->can_addr.tp.rx_id == so->rxid &&
            addr->can_addr.tp.tx_id == so->txid)
@@ -1164,16 +1168,13 @@ static int isotp_getname(struct socket *sock, struct sockaddr *uaddr, int peer)
        return ISOTP_MIN_NAMELEN;
 }
 
-static int isotp_setsockopt(struct socket *sock, int level, int optname,
+static int isotp_setsockopt_locked(struct socket *sock, int level, int optname,
                            sockptr_t optval, unsigned int optlen)
 {
        struct sock *sk = sock->sk;
        struct isotp_sock *so = isotp_sk(sk);
        int ret = 0;
 
-       if (level != SOL_CAN_ISOTP)
-               return -EINVAL;
-
        if (so->bound)
                return -EISCONN;
 
@@ -1248,6 +1249,22 @@ static int isotp_setsockopt(struct socket *sock, int level, int optname,
        return ret;
 }
 
+static int isotp_setsockopt(struct socket *sock, int level, int optname,
+                           sockptr_t optval, unsigned int optlen)
+
+{
+       struct sock *sk = sock->sk;
+       int ret;
+
+       if (level != SOL_CAN_ISOTP)
+               return -EINVAL;
+
+       lock_sock(sk);
+       ret = isotp_setsockopt_locked(sock, level, optname, optval, optlen);
+       release_sock(sk);
+       return ret;
+}
+
 static int isotp_getsockopt(struct socket *sock, int level, int optname,
                            char __user *optval, int __user *optlen)
 {
index ddd15af3a2837b3b34261d66dfbab4b9e1b81972..210fc3b4d0d833a1360c76fb455effda93864320 100644 (file)
@@ -177,7 +177,7 @@ int cmsghdr_from_user_compat_to_kern(struct msghdr *kmsg, struct sock *sk,
        if (kcmlen > stackbuf_size)
                kcmsg_base = kcmsg = sock_kmalloc(sk, kcmlen, GFP_KERNEL);
        if (kcmsg == NULL)
-               return -ENOBUFS;
+               return -ENOMEM;
 
        /* Now copy them over neatly. */
        memset(kcmsg, 0, kcmlen);
index 222b1d322c9693e351ebc0c2548e3e3a50860e46..ef8cf7619bafa3a229ad9b18cc003792df7b3fac 100644 (file)
@@ -3853,7 +3853,8 @@ static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q,
 
        if (q->flags & TCQ_F_NOLOCK) {
                rc = q->enqueue(skb, q, &to_free) & NET_XMIT_MASK;
-               qdisc_run(q);
+               if (likely(!netif_xmit_frozen_or_stopped(txq)))
+                       qdisc_run(q);
 
                if (unlikely(to_free))
                        kfree_skb_list(to_free);
@@ -5025,25 +5026,43 @@ static __latent_entropy void net_tx_action(struct softirq_action *h)
                sd->output_queue_tailp = &sd->output_queue;
                local_irq_enable();
 
+               rcu_read_lock();
+
                while (head) {
                        struct Qdisc *q = head;
                        spinlock_t *root_lock = NULL;
 
                        head = head->next_sched;
 
-                       if (!(q->flags & TCQ_F_NOLOCK)) {
-                               root_lock = qdisc_lock(q);
-                               spin_lock(root_lock);
-                       }
                        /* We need to make sure head->next_sched is read
                         * before clearing __QDISC_STATE_SCHED
                         */
                        smp_mb__before_atomic();
+
+                       if (!(q->flags & TCQ_F_NOLOCK)) {
+                               root_lock = qdisc_lock(q);
+                               spin_lock(root_lock);
+                       } else if (unlikely(test_bit(__QDISC_STATE_DEACTIVATED,
+                                                    &q->state))) {
+                               /* There is a synchronize_net() between
+                                * STATE_DEACTIVATED flag being set and
+                                * qdisc_reset()/some_qdisc_is_busy() in
+                                * dev_deactivate(), so we can safely bail out
+                                * early here to avoid data race between
+                                * qdisc_deactivate() and some_qdisc_is_busy()
+                                * for lockless qdisc.
+                                */
+                               clear_bit(__QDISC_STATE_SCHED, &q->state);
+                               continue;
+                       }
+
                        clear_bit(__QDISC_STATE_SCHED, &q->state);
                        qdisc_run(q);
                        if (root_lock)
                                spin_unlock(root_lock);
                }
+
+               rcu_read_unlock();
        }
 
        xfrm_dev_backlog(sd);
index 4eb969518ee078c5d689b4bcb685d3ec6b8a179d..051432ea4f69ed1e84f08f50feb62e0ff80f3af5 100644 (file)
@@ -705,7 +705,6 @@ static int devlink_nl_port_attrs_put(struct sk_buff *msg,
        case DEVLINK_PORT_FLAVOUR_PHYSICAL:
        case DEVLINK_PORT_FLAVOUR_CPU:
        case DEVLINK_PORT_FLAVOUR_DSA:
-       case DEVLINK_PORT_FLAVOUR_VIRTUAL:
                if (nla_put_u32(msg, DEVLINK_ATTR_PORT_NUMBER,
                                attrs->phys.port_number))
                        return -EMSGSIZE;
@@ -8631,7 +8630,6 @@ static int __devlink_port_phys_port_name_get(struct devlink_port *devlink_port,
 
        switch (attrs->flavour) {
        case DEVLINK_PORT_FLAVOUR_PHYSICAL:
-       case DEVLINK_PORT_FLAVOUR_VIRTUAL:
                if (!attrs->split)
                        n = snprintf(name, len, "p%u", attrs->phys.port_number);
                else
@@ -8679,6 +8677,8 @@ static int __devlink_port_phys_port_name_get(struct devlink_port *devlink_port,
                n = snprintf(name, len, "pf%usf%u", attrs->pci_sf.pf,
                             attrs->pci_sf.sf);
                break;
+       case DEVLINK_PORT_FLAVOUR_VIRTUAL:
+               return -EOPNOTSUPP;
        }
 
        if (n >= len)
index cd80ffed6d267b58ffbece234f31bddc34a50d92..a9f9379750802b8003b404cc98476aa6d784bd86 100644 (file)
@@ -1168,7 +1168,7 @@ static void notify_rule_change(int event, struct fib_rule *rule,
 {
        struct net *net;
        struct sk_buff *skb;
-       int err = -ENOBUFS;
+       int err = -ENOMEM;
 
        net = ops->fro_net;
        skb = nlmsg_new(fib_rule_nlmsg_size(ops, rule), GFP_KERNEL);
index cae56d08a67079bb2fa0335eec6afbd4c91c6ced..65ab4e21c087f6854cd0af2f15571db6db47887f 100644 (file)
@@ -3784,6 +3784,7 @@ static inline int __bpf_skb_change_head(struct sk_buff *skb, u32 head_room,
                __skb_push(skb, head_room);
                memset(skb->data, 0, head_room);
                skb_reset_mac_header(skb);
+               skb_reset_mac_len(skb);
        }
 
        return ret;
index 9ec1aa9640adeb08ac8282b208cae2f4e0bfb263..3c4c4c7a04022625d219b7ab400072c7ae7446fb 100644 (file)
@@ -174,8 +174,10 @@ static void page_pool_dma_sync_for_device(struct page_pool *pool,
                                          struct page *page,
                                          unsigned int dma_sync_size)
 {
+       dma_addr_t dma_addr = page_pool_get_dma_addr(page);
+
        dma_sync_size = min(dma_sync_size, pool->p.max_len);
-       dma_sync_single_range_for_device(pool->p.dev, page->dma_addr,
+       dma_sync_single_range_for_device(pool->p.dev, dma_addr,
                                         pool->p.offset, dma_sync_size,
                                         pool->p.dma_dir);
 }
@@ -195,7 +197,7 @@ static bool page_pool_dma_map(struct page_pool *pool, struct page *page)
        if (dma_mapping_error(pool->p.dev, dma))
                return false;
 
-       page->dma_addr = dma;
+       page_pool_set_dma_addr(page, dma);
 
        if (pool->p.flags & PP_FLAG_DMA_SYNC_DEV)
                page_pool_dma_sync_for_device(pool, page, pool->p.max_len);
@@ -331,13 +333,13 @@ void page_pool_release_page(struct page_pool *pool, struct page *page)
                 */
                goto skip_dma_unmap;
 
-       dma = page->dma_addr;
+       dma = page_pool_get_dma_addr(page);
 
-       /* When page is unmapped, it cannot be returned our pool */
+       /* When page is unmapped, it cannot be returned to our pool */
        dma_unmap_page_attrs(pool->p.dev, dma,
                             PAGE_SIZE << pool->p.order, pool->p.dma_dir,
                             DMA_ATTR_SKIP_CPU_SYNC);
-       page->dma_addr = 0;
+       page_pool_set_dma_addr(page, 0);
 skip_dma_unmap:
        /* This may be the last page returned, releasing the pool, so
         * it is not safe to reference pool afterwards.
index 714d5fa3854685845475fa38f83491e33a68e389..3e84279c412363f3bf1e8bec240aca6a6f93705e 100644 (file)
@@ -4842,8 +4842,10 @@ static int rtnl_bridge_notify(struct net_device *dev)
        if (err < 0)
                goto errout;
 
-       if (!skb->len)
+       if (!skb->len) {
+               err = -EINVAL;
                goto errout;
+       }
 
        rtnl_notify(skb, net, 0, RTNLGRP_LINK, NULL, GFP_ATOMIC);
        return 0;
index c761c4a0b66b1d34ed1671727daf86738bff6c92..946888afef880342cc08940e6bad1f295a985dd8 100644 (file)
@@ -815,10 +815,18 @@ void sock_set_rcvbuf(struct sock *sk, int val)
 }
 EXPORT_SYMBOL(sock_set_rcvbuf);
 
+static void __sock_set_mark(struct sock *sk, u32 val)
+{
+       if (val != sk->sk_mark) {
+               sk->sk_mark = val;
+               sk_dst_reset(sk);
+       }
+}
+
 void sock_set_mark(struct sock *sk, u32 val)
 {
        lock_sock(sk);
-       sk->sk_mark = val;
+       __sock_set_mark(sk, val);
        release_sock(sk);
 }
 EXPORT_SYMBOL(sock_set_mark);
@@ -1126,10 +1134,10 @@ set_sndbuf:
        case SO_MARK:
                if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN)) {
                        ret = -EPERM;
-               } else if (val != sk->sk_mark) {
-                       sk->sk_mark = val;
-                       sk_dst_reset(sk);
+                       break;
                }
+
+               __sock_set_mark(sk, val);
                break;
 
        case SO_RXQ_OVFL:
@@ -2132,10 +2140,10 @@ void skb_orphan_partial(struct sk_buff *skb)
        if (skb_is_tcp_pure_ack(skb))
                return;
 
-       if (can_skb_orphan_partial(skb))
-               skb_set_owner_sk_safe(skb, skb->sk);
-       else
-               skb_orphan(skb);
+       if (can_skb_orphan_partial(skb) && skb_set_owner_sk_safe(skb, skb->sk))
+               return;
+
+       skb_orphan(skb);
 }
 EXPORT_SYMBOL(skb_orphan_partial);
 
index 052a977914a6ded18c2455631678392ea1c17c85..63adbc21a735a445ebf0408d188743b51821191d 100644 (file)
@@ -147,8 +147,7 @@ static void dsa_master_get_strings(struct net_device *dev, uint32_t stringset,
        struct dsa_switch *ds = cpu_dp->ds;
        int port = cpu_dp->index;
        int len = ETH_GSTRING_LEN;
-       int mcount = 0, count;
-       unsigned int i;
+       int mcount = 0, count, i;
        uint8_t pfx[4];
        uint8_t *ndata;
 
@@ -178,6 +177,8 @@ static void dsa_master_get_strings(struct net_device *dev, uint32_t stringset,
                 */
                ds->ops->get_strings(ds, port, stringset, ndata);
                count = ds->ops->get_sset_count(ds, port, stringset);
+               if (count < 0)
+                       return;
                for (i = 0; i < count; i++) {
                        memmove(ndata + (i * len + sizeof(pfx)),
                                ndata + i * len, len - sizeof(pfx));
index 8c0f3c6ab3654dd10f3ed1953fe2215afc539ac6..d4756b9201089292acfb05a7a0cb9557b7da5ce9 100644 (file)
@@ -776,13 +776,15 @@ static int dsa_slave_get_sset_count(struct net_device *dev, int sset)
        struct dsa_switch *ds = dp->ds;
 
        if (sset == ETH_SS_STATS) {
-               int count;
+               int count = 0;
 
-               count = 4;
-               if (ds->ops->get_sset_count)
-                       count += ds->ops->get_sset_count(ds, dp->index, sset);
+               if (ds->ops->get_sset_count) {
+                       count = ds->ops->get_sset_count(ds, dp->index, sset);
+                       if (count < 0)
+                               return count;
+               }
 
-               return count;
+               return count + 4;
        } else if (sset ==  ETH_SS_TEST) {
                return net_selftest_get_count();
        }
index 008c1ec6e20c15c4b49302bb4433349f83629276..122ad5833fb1cafcbb702900e65fa1b19e1ff3de 100644 (file)
@@ -64,7 +64,7 @@
 #define DSA_8021Q_SUBVLAN_HI_SHIFT     9
 #define DSA_8021Q_SUBVLAN_HI_MASK      GENMASK(9, 9)
 #define DSA_8021Q_SUBVLAN_LO_SHIFT     4
-#define DSA_8021Q_SUBVLAN_LO_MASK      GENMASK(4, 3)
+#define DSA_8021Q_SUBVLAN_LO_MASK      GENMASK(5, 4)
 #define DSA_8021Q_SUBVLAN_HI(x)                (((x) & GENMASK(2, 2)) >> 2)
 #define DSA_8021Q_SUBVLAN_LO(x)                ((x) & GENMASK(1, 0))
 #define DSA_8021Q_SUBVLAN(x)           \
index b7642dc96d5072d66efb43b63516f258551ac70b..ec07f5765e030d5f4784db178d60e0ceb12cdc63 100644 (file)
@@ -119,7 +119,7 @@ static int stats_prepare_data(const struct ethnl_req_info *req_base,
         */
        memset(&data->phy_stats, 0xff, sizeof(data->phy_stats));
        memset(&data->mac_stats, 0xff, sizeof(data->mac_stats));
-       memset(&data->ctrl_stats, 0xff, sizeof(data->mac_stats));
+       memset(&data->ctrl_stats, 0xff, sizeof(data->ctrl_stats));
        memset(&data->rmon_stats, 0xff, sizeof(data->rmon_stats));
 
        if (test_bit(ETHTOOL_STATS_ETH_PHY, req_info->stat_mask) &&
index bfcdc75fc01e65136a29370a79bac6ba181d5faa..26c32407f029096e9c3133e9da56c4f22e049e1f 100644 (file)
@@ -218,6 +218,7 @@ static netdev_tx_t hsr_dev_xmit(struct sk_buff *skb, struct net_device *dev)
        if (master) {
                skb->dev = master->dev;
                skb_reset_mac_header(skb);
+               skb_reset_mac_len(skb);
                hsr_forward_skb(skb, master);
        } else {
                atomic_long_inc(&dev->tx_dropped);
@@ -259,6 +260,7 @@ static struct sk_buff *hsr_init_skb(struct hsr_port *master)
                goto out;
 
        skb_reset_mac_header(skb);
+       skb_reset_mac_len(skb);
        skb_reset_network_header(skb);
        skb_reset_transport_header(skb);
 
index 6852e9bccf5b8c694004e6f94851bf528039ba6d..ceb8afb2a62f4bd34b6b081f682da3698285af07 100644 (file)
@@ -474,8 +474,8 @@ static void handle_std_frame(struct sk_buff *skb,
        }
 }
 
-void hsr_fill_frame_info(__be16 proto, struct sk_buff *skb,
-                        struct hsr_frame_info *frame)
+int hsr_fill_frame_info(__be16 proto, struct sk_buff *skb,
+                       struct hsr_frame_info *frame)
 {
        struct hsr_port *port = frame->port_rcv;
        struct hsr_priv *hsr = port->hsr;
@@ -483,20 +483,26 @@ void hsr_fill_frame_info(__be16 proto, struct sk_buff *skb,
        /* HSRv0 supervisory frames double as a tag so treat them as tagged. */
        if ((!hsr->prot_version && proto == htons(ETH_P_PRP)) ||
            proto == htons(ETH_P_HSR)) {
+               /* Check if skb contains hsr_ethhdr */
+               if (skb->mac_len < sizeof(struct hsr_ethhdr))
+                       return -EINVAL;
+
                /* HSR tagged frame :- Data or Supervision */
                frame->skb_std = NULL;
                frame->skb_prp = NULL;
                frame->skb_hsr = skb;
                frame->sequence_nr = hsr_get_skb_sequence_nr(skb);
-               return;
+               return 0;
        }
 
        /* Standard frame or PRP from master port */
        handle_std_frame(skb, frame);
+
+       return 0;
 }
 
-void prp_fill_frame_info(__be16 proto, struct sk_buff *skb,
-                        struct hsr_frame_info *frame)
+int prp_fill_frame_info(__be16 proto, struct sk_buff *skb,
+                       struct hsr_frame_info *frame)
 {
        /* Supervision frame */
        struct prp_rct *rct = skb_get_PRP_rct(skb);
@@ -507,9 +513,11 @@ void prp_fill_frame_info(__be16 proto, struct sk_buff *skb,
                frame->skb_std = NULL;
                frame->skb_prp = skb;
                frame->sequence_nr = prp_get_skb_sequence_nr(rct);
-               return;
+               return 0;
        }
        handle_std_frame(skb, frame);
+
+       return 0;
 }
 
 static int fill_frame_info(struct hsr_frame_info *frame,
@@ -519,9 +527,10 @@ static int fill_frame_info(struct hsr_frame_info *frame,
        struct hsr_vlan_ethhdr *vlan_hdr;
        struct ethhdr *ethhdr;
        __be16 proto;
+       int ret;
 
-       /* Check if skb contains hsr_ethhdr */
-       if (skb->mac_len < sizeof(struct hsr_ethhdr))
+       /* Check if skb contains ethhdr */
+       if (skb->mac_len < sizeof(struct ethhdr))
                return -EINVAL;
 
        memset(frame, 0, sizeof(*frame));
@@ -548,7 +557,10 @@ static int fill_frame_info(struct hsr_frame_info *frame,
 
        frame->is_from_san = false;
        frame->port_rcv = port;
-       hsr->proto_ops->fill_frame_info(proto, skb, frame);
+       ret = hsr->proto_ops->fill_frame_info(proto, skb, frame);
+       if (ret)
+               return ret;
+
        check_local_dest(port->hsr, skb, frame);
 
        return 0;
index b6acaafa83fc218506aa31031dc51941aa052ff8..206636750b3001d218fb52aacd9b5d822e75bd1f 100644 (file)
@@ -24,8 +24,8 @@ struct sk_buff *prp_get_untagged_frame(struct hsr_frame_info *frame,
                                       struct hsr_port *port);
 bool prp_drop_frame(struct hsr_frame_info *frame, struct hsr_port *port);
 bool hsr_drop_frame(struct hsr_frame_info *frame, struct hsr_port *port);
-void prp_fill_frame_info(__be16 proto, struct sk_buff *skb,
-                        struct hsr_frame_info *frame);
-void hsr_fill_frame_info(__be16 proto, struct sk_buff *skb,
-                        struct hsr_frame_info *frame);
+int prp_fill_frame_info(__be16 proto, struct sk_buff *skb,
+                       struct hsr_frame_info *frame);
+int hsr_fill_frame_info(__be16 proto, struct sk_buff *skb,
+                       struct hsr_frame_info *frame);
 #endif /* __HSR_FORWARD_H */
index 8f264672b70bd6bd042232db415112fe4efa2c28..53d1f7a8246308e437b7b8a20a6f677e891b5e1c 100644 (file)
@@ -186,8 +186,8 @@ struct hsr_proto_ops {
                                               struct hsr_port *port);
        struct sk_buff * (*create_tagged_frame)(struct hsr_frame_info *frame,
                                                struct hsr_port *port);
-       void (*fill_frame_info)(__be16 proto, struct sk_buff *skb,
-                               struct hsr_frame_info *frame);
+       int (*fill_frame_info)(__be16 proto, struct sk_buff *skb,
+                              struct hsr_frame_info *frame);
        bool (*invalid_dan_ingress_frame)(__be16 protocol);
        void (*update_san_info)(struct hsr_node *node, bool is_sup);
 };
index c5227d42faf56243e89db4027bdd79718e45ff3b..b70e6bbf6021f5dcc50ced0d8811aa0280680376 100644 (file)
@@ -60,12 +60,11 @@ static rx_handler_result_t hsr_handle_frame(struct sk_buff **pskb)
                goto finish_pass;
 
        skb_push(skb, ETH_HLEN);
-
-       if (skb_mac_header(skb) != skb->data) {
-               WARN_ONCE(1, "%s:%d: Malformed frame at source port %s)\n",
-                         __func__, __LINE__, port->dev->name);
-               goto finish_consume;
-       }
+       skb_reset_mac_header(skb);
+       if ((!hsr->prot_version && protocol == htons(ETH_P_PRP)) ||
+           protocol == htons(ETH_P_HSR))
+               skb_set_network_header(skb, ETH_HLEN + HSR_HLEN);
+       skb_reset_mac_len(skb);
 
        hsr_forward_skb(skb, port);
 
index 0c1b0770c59ea303e4334156f1d203b1d2f16fc3..29bf97640166451a616231fcf90c4b88644d50ed 100644 (file)
@@ -680,8 +680,10 @@ int ieee802154_llsec_getparams(struct sk_buff *skb, struct genl_info *info)
            nla_put_u8(msg, IEEE802154_ATTR_LLSEC_SECLEVEL, params.out_level) ||
            nla_put_u32(msg, IEEE802154_ATTR_LLSEC_FRAME_COUNTER,
                        be32_to_cpu(params.frame_counter)) ||
-           ieee802154_llsec_fill_key_id(msg, &params.out_key))
+           ieee802154_llsec_fill_key_id(msg, &params.out_key)) {
+               rc = -ENOBUFS;
                goto out_free;
+       }
 
        dev_put(dev);
 
@@ -1184,7 +1186,7 @@ static int llsec_iter_devkeys(struct llsec_dump_data *data)
 {
        struct ieee802154_llsec_device *dpos;
        struct ieee802154_llsec_device_key *kpos;
-       int rc = 0, idx = 0, idx2;
+       int idx = 0, idx2;
 
        list_for_each_entry(dpos, &data->table->devices, list) {
                if (idx++ < data->s_idx)
@@ -1200,7 +1202,7 @@ static int llsec_iter_devkeys(struct llsec_dump_data *data)
                                                      data->nlmsg_seq,
                                                      dpos->hwaddr, kpos,
                                                      data->dev)) {
-                               return rc = -EMSGSIZE;
+                               return -EMSGSIZE;
                        }
 
                        data->s_idx2++;
@@ -1209,7 +1211,7 @@ static int llsec_iter_devkeys(struct llsec_dump_data *data)
                data->s_idx++;
        }
 
-       return rc;
+       return 0;
 }
 
 int ieee802154_llsec_dump_devkeys(struct sk_buff *skb,
index 2cdc7e63fe172b4598c1848606e9cdbba54ed018..88215b5c93aa429bc9421745de9fcfb09843aa18 100644 (file)
@@ -241,8 +241,10 @@ int ieee802154_add_iface(struct sk_buff *skb, struct genl_info *info)
        }
 
        if (nla_put_string(msg, IEEE802154_ATTR_PHY_NAME, wpan_phy_name(phy)) ||
-           nla_put_string(msg, IEEE802154_ATTR_DEV_NAME, dev->name))
+           nla_put_string(msg, IEEE802154_ATTR_DEV_NAME, dev->name)) {
+               rc = -EMSGSIZE;
                goto nla_put_failure;
+       }
        dev_put(dev);
 
        wpan_phy_put(phy);
index 05f6bd89a7dd8e526369db8247a8fd9661cf41f2..0cf2374c143bdd9f02fa076ddacb086f1a1510bf 100644 (file)
@@ -1298,19 +1298,20 @@ ieee802154_llsec_parse_dev_addr(struct nlattr *nla,
        if (!nla || nla_parse_nested_deprecated(attrs, NL802154_DEV_ADDR_ATTR_MAX, nla, nl802154_dev_addr_policy, NULL))
                return -EINVAL;
 
-       if (!attrs[NL802154_DEV_ADDR_ATTR_PAN_ID] ||
-           !attrs[NL802154_DEV_ADDR_ATTR_MODE] ||
-           !(attrs[NL802154_DEV_ADDR_ATTR_SHORT] ||
-             attrs[NL802154_DEV_ADDR_ATTR_EXTENDED]))
+       if (!attrs[NL802154_DEV_ADDR_ATTR_PAN_ID] || !attrs[NL802154_DEV_ADDR_ATTR_MODE])
                return -EINVAL;
 
        addr->pan_id = nla_get_le16(attrs[NL802154_DEV_ADDR_ATTR_PAN_ID]);
        addr->mode = nla_get_u32(attrs[NL802154_DEV_ADDR_ATTR_MODE]);
        switch (addr->mode) {
        case NL802154_DEV_ADDR_SHORT:
+               if (!attrs[NL802154_DEV_ADDR_ATTR_SHORT])
+                       return -EINVAL;
                addr->short_addr = nla_get_le16(attrs[NL802154_DEV_ADDR_ATTR_SHORT]);
                break;
        case NL802154_DEV_ADDR_EXTENDED:
+               if (!attrs[NL802154_DEV_ADDR_ATTR_EXTENDED])
+                       return -EINVAL;
                addr->extended_addr = nla_get_le64(attrs[NL802154_DEV_ADDR_ATTR_EXTENDED]);
                break;
        default:
index dff4f0eb96b0f22d076a1168c8225c3100b4cabd..9e41eff4a68588e0cf869415dccddf9c6fd48a0c 100644 (file)
@@ -185,6 +185,7 @@ BTF_ID(func, tcp_reno_cong_avoid)
 BTF_ID(func, tcp_reno_undo_cwnd)
 BTF_ID(func, tcp_slow_start)
 BTF_ID(func, tcp_cong_avoid_ai)
+#ifdef CONFIG_X86
 #ifdef CONFIG_DYNAMIC_FTRACE
 #if IS_BUILTIN(CONFIG_TCP_CONG_CUBIC)
 BTF_ID(func, cubictcp_init)
@@ -213,6 +214,7 @@ BTF_ID(func, bbr_min_tso_segs)
 BTF_ID(func, bbr_set_state)
 #endif
 #endif  /* CONFIG_DYNAMIC_FTRACE */
+#endif /* CONFIG_X86 */
 BTF_SET_END(bpf_tcp_ca_kfunc_ids)
 
 static bool bpf_tcp_ca_check_kfunc_call(u32 kfunc_btf_id)
index bc2f6ca971520e39d3d18299b80454cc949fd892..816d8aad5a68438b33a3240a58f3d0f7f3b42d89 100644 (file)
@@ -886,7 +886,7 @@ static void __init ic_bootp_send_if(struct ic_device *d, unsigned long jiffies_d
 
 
 /*
- *  Copy BOOTP-supplied string if not already set.
+ *  Copy BOOTP-supplied string
  */
 static int __init ic_bootp_string(char *dest, char *src, int len, int max)
 {
@@ -935,12 +935,15 @@ static void __init ic_do_bootp_ext(u8 *ext)
                }
                break;
        case 12:        /* Host name */
-               ic_bootp_string(utsname()->nodename, ext+1, *ext,
-                               __NEW_UTS_LEN);
-               ic_host_name_set = 1;
+               if (!ic_host_name_set) {
+                       ic_bootp_string(utsname()->nodename, ext+1, *ext,
+                                       __NEW_UTS_LEN);
+                       ic_host_name_set = 1;
+               }
                break;
        case 15:        /* Domain name (DNS) */
-               ic_bootp_string(ic_domain, ext+1, *ext, sizeof(ic_domain));
+               if (!ic_domain[0])
+                       ic_bootp_string(ic_domain, ext+1, *ext, sizeof(ic_domain));
                break;
        case 17:        /* Root path */
                if (!root_server_path[0])
index 0d59efb6b49ecfe3d45cd59949ce93a6ea207e32..d36ef9d25e73cb14eed45701acafcaf78e08451e 100644 (file)
@@ -1745,10 +1745,7 @@ static struct sk_buff *mld_newpack(struct inet6_dev *idev, unsigned int mtu)
                     IPV6_TLV_PADN, 0 };
 
        /* we assume size > sizeof(ra) here */
-       /* limit our allocations to order-0 page */
-       size = min_t(int, size, SKB_MAX_ORDER(0, 0));
        skb = sock_alloc_send_skb(sk, size, 1, &err);
-
        if (!skb)
                return NULL;
 
index 47a0dc46cbdb0a16e6fb7cee505fcbe28d5d01fc..28e44782c94d1e0485c9d7c685179716ab252c22 100644 (file)
@@ -343,7 +343,7 @@ static int ipv6_frag_rcv(struct sk_buff *skb)
        hdr = ipv6_hdr(skb);
        fhdr = (struct frag_hdr *)skb_transport_header(skb);
 
-       if (!(fhdr->frag_off & htons(0xFFF9))) {
+       if (!(fhdr->frag_off & htons(IP6_OFFSET | IP6_MF))) {
                /* It is not a fragmented frame */
                skb->transport_header += sizeof(struct frag_hdr);
                __IP6_INC_STATS(net,
@@ -351,6 +351,8 @@ static int ipv6_frag_rcv(struct sk_buff *skb)
 
                IP6CB(skb)->nhoff = (u8 *)fhdr - skb_network_header(skb);
                IP6CB(skb)->flags |= IP6SKB_FRAGMENTED;
+               IP6CB(skb)->frag_max_size = ntohs(hdr->payload_len) +
+                                           sizeof(struct ipv6hdr);
                return 1;
        }
 
index a22822bdbf39ceda8ad57a04c4febc291c71e0b8..d417e514bd52c182ae1df93aa5cd225bf1c1c614 100644 (file)
@@ -3673,11 +3673,11 @@ static struct fib6_info *ip6_route_info_create(struct fib6_config *cfg,
        if (nh) {
                if (rt->fib6_src.plen) {
                        NL_SET_ERR_MSG(extack, "Nexthops can not be used with source routing");
-                       goto out;
+                       goto out_free;
                }
                if (!nexthop_get(nh)) {
                        NL_SET_ERR_MSG(extack, "Nexthop has been deleted");
-                       goto out;
+                       goto out_free;
                }
                rt->nh = nh;
                fib6_nh = nexthop_fib6_nh(rt->nh);
@@ -3714,6 +3714,10 @@ static struct fib6_info *ip6_route_info_create(struct fib6_config *cfg,
 out:
        fib6_info_release(rt);
        return ERR_PTR(err);
+out_free:
+       ip_fib_metrics_put(rt->fib6_metrics);
+       kfree(rt);
+       return ERR_PTR(err);
 }
 
 int ip6_route_add(struct fib6_config *cfg, gfp_t gfp_flags,
index aa98294a3ad3177d69f39a773b08d77b1b950e6d..f7c8110ece5fbcfe40830c9ae07d55145bca33ee 100644 (file)
@@ -271,6 +271,9 @@ static struct ip_tunnel *ipip6_tunnel_locate(struct net *net,
        if (ipip6_tunnel_create(dev) < 0)
                goto failed_free;
 
+       if (!parms->name[0])
+               strcpy(parms->name, dev->name);
+
        return nt;
 
 failed_free:
index 6201965bd822f0cef1f8716c26e28e7bc915701d..1c572c8daced03951491d5b2da667f6992d5f835 100644 (file)
@@ -1066,6 +1066,11 @@ out_error:
                goto partial_message;
        }
 
+       if (skb_has_frag_list(head)) {
+               kfree_skb_list(skb_shinfo(head)->frag_list);
+               skb_shinfo(head)->frag_list = NULL;
+       }
+
        if (head != kcm->seq_skb)
                kfree_skb(head);
 
index 8fcbaa1eedf3ed1809a46f99f8a5eaf39e8a07ae..214404a558fb6fea556c10cbb4886c175e34acf5 100644 (file)
@@ -50,12 +50,6 @@ struct ieee80211_local;
 #define IEEE80211_ENCRYPT_HEADROOM 8
 #define IEEE80211_ENCRYPT_TAILROOM 18
 
-/* IEEE 802.11 (Ch. 9.5 Defragmentation) requires support for concurrent
- * reception of at least three fragmented frames. This limit can be increased
- * by changing this define, at the cost of slower frame reassembly and
- * increased memory use (about 2 kB of RAM per entry). */
-#define IEEE80211_FRAGMENT_MAX 4
-
 /* power level hasn't been configured (or set to automatic) */
 #define IEEE80211_UNSET_POWER_LEVEL    INT_MIN
 
@@ -88,18 +82,6 @@ extern const u8 ieee80211_ac_to_qos_mask[IEEE80211_NUM_ACS];
 
 #define IEEE80211_MAX_NAN_INSTANCE_ID 255
 
-struct ieee80211_fragment_entry {
-       struct sk_buff_head skb_list;
-       unsigned long first_frag_time;
-       u16 seq;
-       u16 extra_len;
-       u16 last_frag;
-       u8 rx_queue;
-       bool check_sequential_pn; /* needed for CCMP/GCMP */
-       u8 last_pn[6]; /* PN of the last fragment if CCMP was used */
-};
-
-
 struct ieee80211_bss {
        u32 device_ts_beacon, device_ts_presp;
 
@@ -241,8 +223,15 @@ struct ieee80211_rx_data {
         */
        int security_idx;
 
-       u32 tkip_iv32;
-       u16 tkip_iv16;
+       union {
+               struct {
+                       u32 iv32;
+                       u16 iv16;
+               } tkip;
+               struct {
+                       u8 pn[IEEE80211_CCMP_PN_LEN];
+               } ccm_gcm;
+       };
 };
 
 struct ieee80211_csa_settings {
@@ -902,9 +891,7 @@ struct ieee80211_sub_if_data {
 
        char name[IFNAMSIZ];
 
-       /* Fragment table for host-based reassembly */
-       struct ieee80211_fragment_entry fragments[IEEE80211_FRAGMENT_MAX];
-       unsigned int fragment_next;
+       struct ieee80211_fragment_cache frags;
 
        /* TID bitmap for NoAck policy */
        u16 noack_map;
@@ -2320,4 +2307,7 @@ u32 ieee80211_calc_expected_tx_airtime(struct ieee80211_hw *hw,
 #define debug_noinline
 #endif
 
+void ieee80211_init_frag_cache(struct ieee80211_fragment_cache *cache);
+void ieee80211_destroy_frag_cache(struct ieee80211_fragment_cache *cache);
+
 #endif /* IEEE80211_I_H */
index 7032a2b59249cb2d42c3c38aaeec0cc57a4741ba..2e2f73a4aa7340bf3aed51eb12d0313667f81807 100644 (file)
@@ -8,7 +8,7 @@
  * Copyright 2008, Johannes Berg <johannes@sipsolutions.net>
  * Copyright 2013-2014  Intel Mobile Communications GmbH
  * Copyright (c) 2016        Intel Deutschland GmbH
- * Copyright (C) 2018-2020 Intel Corporation
+ * Copyright (C) 2018-2021 Intel Corporation
  */
 #include <linux/slab.h>
 #include <linux/kernel.h>
@@ -677,16 +677,12 @@ static void ieee80211_set_multicast_list(struct net_device *dev)
  */
 static void ieee80211_teardown_sdata(struct ieee80211_sub_if_data *sdata)
 {
-       int i;
-
        /* free extra data */
        ieee80211_free_keys(sdata, false);
 
        ieee80211_debugfs_remove_netdev(sdata);
 
-       for (i = 0; i < IEEE80211_FRAGMENT_MAX; i++)
-               __skb_queue_purge(&sdata->fragments[i].skb_list);
-       sdata->fragment_next = 0;
+       ieee80211_destroy_frag_cache(&sdata->frags);
 
        if (ieee80211_vif_is_mesh(&sdata->vif))
                ieee80211_mesh_teardown_sdata(sdata);
@@ -1930,8 +1926,7 @@ int ieee80211_if_add(struct ieee80211_local *local, const char *name,
        sdata->wdev.wiphy = local->hw.wiphy;
        sdata->local = local;
 
-       for (i = 0; i < IEEE80211_FRAGMENT_MAX; i++)
-               skb_queue_head_init(&sdata->fragments[i].skb_list);
+       ieee80211_init_frag_cache(&sdata->frags);
 
        INIT_LIST_HEAD(&sdata->key_list);
 
index 56c068cb49c4ddaef3d4fb9355b4912f5f72fc58..f695fc80088bc98a8380785c8fbed89063ffcb47 100644 (file)
@@ -799,6 +799,7 @@ int ieee80211_key_link(struct ieee80211_key *key,
                       struct ieee80211_sub_if_data *sdata,
                       struct sta_info *sta)
 {
+       static atomic_t key_color = ATOMIC_INIT(0);
        struct ieee80211_key *old_key;
        int idx = key->conf.keyidx;
        bool pairwise = key->conf.flags & IEEE80211_KEY_FLAG_PAIRWISE;
@@ -850,6 +851,12 @@ int ieee80211_key_link(struct ieee80211_key *key,
        key->sdata = sdata;
        key->sta = sta;
 
+       /*
+        * Assign a unique ID to every key so we can easily prevent mixed
+        * key and fragment cache attacks.
+        */
+       key->color = atomic_inc_return(&key_color);
+
        increment_tailroom_need_count(sdata);
 
        ret = ieee80211_key_replace(sdata, sta, pairwise, old_key, key);
index 7ad72e9b4991d2f4ca14df03a8ccc86fedf18b62..1e326c89d72173775dee1927b82e73b2ad8f817a 100644 (file)
@@ -128,6 +128,8 @@ struct ieee80211_key {
        } debugfs;
 #endif
 
+       unsigned int color;
+
        /*
         * key config, must be last because it contains key
         * material as variable length member
index 62047e93e217be086cada92237b978f614eaa29e..1bb43edd47b6cb7d5f62a22a8af6507bdb58d26a 100644 (file)
@@ -6,7 +6,7 @@
  * Copyright 2007-2010 Johannes Berg <johannes@sipsolutions.net>
  * Copyright 2013-2014  Intel Mobile Communications GmbH
  * Copyright(c) 2015 - 2017 Intel Deutschland GmbH
- * Copyright (C) 2018-2020 Intel Corporation
+ * Copyright (C) 2018-2021 Intel Corporation
  */
 
 #include <linux/jiffies.h>
@@ -2123,19 +2123,34 @@ ieee80211_rx_h_decrypt(struct ieee80211_rx_data *rx)
        return result;
 }
 
+void ieee80211_init_frag_cache(struct ieee80211_fragment_cache *cache)
+{
+       int i;
+
+       for (i = 0; i < ARRAY_SIZE(cache->entries); i++)
+               skb_queue_head_init(&cache->entries[i].skb_list);
+}
+
+void ieee80211_destroy_frag_cache(struct ieee80211_fragment_cache *cache)
+{
+       int i;
+
+       for (i = 0; i < ARRAY_SIZE(cache->entries); i++)
+               __skb_queue_purge(&cache->entries[i].skb_list);
+}
+
 static inline struct ieee80211_fragment_entry *
-ieee80211_reassemble_add(struct ieee80211_sub_if_data *sdata,
+ieee80211_reassemble_add(struct ieee80211_fragment_cache *cache,
                         unsigned int frag, unsigned int seq, int rx_queue,
                         struct sk_buff **skb)
 {
        struct ieee80211_fragment_entry *entry;
 
-       entry = &sdata->fragments[sdata->fragment_next++];
-       if (sdata->fragment_next >= IEEE80211_FRAGMENT_MAX)
-               sdata->fragment_next = 0;
+       entry = &cache->entries[cache->next++];
+       if (cache->next >= IEEE80211_FRAGMENT_MAX)
+               cache->next = 0;
 
-       if (!skb_queue_empty(&entry->skb_list))
-               __skb_queue_purge(&entry->skb_list);
+       __skb_queue_purge(&entry->skb_list);
 
        __skb_queue_tail(&entry->skb_list, *skb); /* no need for locking */
        *skb = NULL;
@@ -2150,14 +2165,14 @@ ieee80211_reassemble_add(struct ieee80211_sub_if_data *sdata,
 }
 
 static inline struct ieee80211_fragment_entry *
-ieee80211_reassemble_find(struct ieee80211_sub_if_data *sdata,
+ieee80211_reassemble_find(struct ieee80211_fragment_cache *cache,
                          unsigned int frag, unsigned int seq,
                          int rx_queue, struct ieee80211_hdr *hdr)
 {
        struct ieee80211_fragment_entry *entry;
        int i, idx;
 
-       idx = sdata->fragment_next;
+       idx = cache->next;
        for (i = 0; i < IEEE80211_FRAGMENT_MAX; i++) {
                struct ieee80211_hdr *f_hdr;
                struct sk_buff *f_skb;
@@ -2166,7 +2181,7 @@ ieee80211_reassemble_find(struct ieee80211_sub_if_data *sdata,
                if (idx < 0)
                        idx = IEEE80211_FRAGMENT_MAX - 1;
 
-               entry = &sdata->fragments[idx];
+               entry = &cache->entries[idx];
                if (skb_queue_empty(&entry->skb_list) || entry->seq != seq ||
                    entry->rx_queue != rx_queue ||
                    entry->last_frag + 1 != frag)
@@ -2194,15 +2209,27 @@ ieee80211_reassemble_find(struct ieee80211_sub_if_data *sdata,
        return NULL;
 }
 
+static bool requires_sequential_pn(struct ieee80211_rx_data *rx, __le16 fc)
+{
+       return rx->key &&
+               (rx->key->conf.cipher == WLAN_CIPHER_SUITE_CCMP ||
+                rx->key->conf.cipher == WLAN_CIPHER_SUITE_CCMP_256 ||
+                rx->key->conf.cipher == WLAN_CIPHER_SUITE_GCMP ||
+                rx->key->conf.cipher == WLAN_CIPHER_SUITE_GCMP_256) &&
+               ieee80211_has_protected(fc);
+}
+
 static ieee80211_rx_result debug_noinline
 ieee80211_rx_h_defragment(struct ieee80211_rx_data *rx)
 {
+       struct ieee80211_fragment_cache *cache = &rx->sdata->frags;
        struct ieee80211_hdr *hdr;
        u16 sc;
        __le16 fc;
        unsigned int frag, seq;
        struct ieee80211_fragment_entry *entry;
        struct sk_buff *skb;
+       struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb);
 
        hdr = (struct ieee80211_hdr *)rx->skb->data;
        fc = hdr->frame_control;
@@ -2218,6 +2245,9 @@ ieee80211_rx_h_defragment(struct ieee80211_rx_data *rx)
                goto out_no_led;
        }
 
+       if (rx->sta)
+               cache = &rx->sta->frags;
+
        if (likely(!ieee80211_has_morefrags(fc) && frag == 0))
                goto out;
 
@@ -2236,20 +2266,17 @@ ieee80211_rx_h_defragment(struct ieee80211_rx_data *rx)
 
        if (frag == 0) {
                /* This is the first fragment of a new frame. */
-               entry = ieee80211_reassemble_add(rx->sdata, frag, seq,
+               entry = ieee80211_reassemble_add(cache, frag, seq,
                                                 rx->seqno_idx, &(rx->skb));
-               if (rx->key &&
-                   (rx->key->conf.cipher == WLAN_CIPHER_SUITE_CCMP ||
-                    rx->key->conf.cipher == WLAN_CIPHER_SUITE_CCMP_256 ||
-                    rx->key->conf.cipher == WLAN_CIPHER_SUITE_GCMP ||
-                    rx->key->conf.cipher == WLAN_CIPHER_SUITE_GCMP_256) &&
-                   ieee80211_has_protected(fc)) {
+               if (requires_sequential_pn(rx, fc)) {
                        int queue = rx->security_idx;
 
                        /* Store CCMP/GCMP PN so that we can verify that the
                         * next fragment has a sequential PN value.
                         */
                        entry->check_sequential_pn = true;
+                       entry->is_protected = true;
+                       entry->key_color = rx->key->color;
                        memcpy(entry->last_pn,
                               rx->key->u.ccmp.rx_pn[queue],
                               IEEE80211_CCMP_PN_LEN);
@@ -2261,6 +2288,11 @@ ieee80211_rx_h_defragment(struct ieee80211_rx_data *rx)
                                     sizeof(rx->key->u.gcmp.rx_pn[queue]));
                        BUILD_BUG_ON(IEEE80211_CCMP_PN_LEN !=
                                     IEEE80211_GCMP_PN_LEN);
+               } else if (rx->key &&
+                          (ieee80211_has_protected(fc) ||
+                           (status->flag & RX_FLAG_DECRYPTED))) {
+                       entry->is_protected = true;
+                       entry->key_color = rx->key->color;
                }
                return RX_QUEUED;
        }
@@ -2268,7 +2300,7 @@ ieee80211_rx_h_defragment(struct ieee80211_rx_data *rx)
        /* This is a fragment for a frame that should already be pending in
         * fragment cache. Add this fragment to the end of the pending entry.
         */
-       entry = ieee80211_reassemble_find(rx->sdata, frag, seq,
+       entry = ieee80211_reassemble_find(cache, frag, seq,
                                          rx->seqno_idx, hdr);
        if (!entry) {
                I802_DEBUG_INC(rx->local->rx_handlers_drop_defrag);
@@ -2283,25 +2315,39 @@ ieee80211_rx_h_defragment(struct ieee80211_rx_data *rx)
        if (entry->check_sequential_pn) {
                int i;
                u8 pn[IEEE80211_CCMP_PN_LEN], *rpn;
-               int queue;
 
-               if (!rx->key ||
-                   (rx->key->conf.cipher != WLAN_CIPHER_SUITE_CCMP &&
-                    rx->key->conf.cipher != WLAN_CIPHER_SUITE_CCMP_256 &&
-                    rx->key->conf.cipher != WLAN_CIPHER_SUITE_GCMP &&
-                    rx->key->conf.cipher != WLAN_CIPHER_SUITE_GCMP_256))
+               if (!requires_sequential_pn(rx, fc))
+                       return RX_DROP_UNUSABLE;
+
+               /* Prevent mixed key and fragment cache attacks */
+               if (entry->key_color != rx->key->color)
                        return RX_DROP_UNUSABLE;
+
                memcpy(pn, entry->last_pn, IEEE80211_CCMP_PN_LEN);
                for (i = IEEE80211_CCMP_PN_LEN - 1; i >= 0; i--) {
                        pn[i]++;
                        if (pn[i])
                                break;
                }
-               queue = rx->security_idx;
-               rpn = rx->key->u.ccmp.rx_pn[queue];
+
+               rpn = rx->ccm_gcm.pn;
                if (memcmp(pn, rpn, IEEE80211_CCMP_PN_LEN))
                        return RX_DROP_UNUSABLE;
                memcpy(entry->last_pn, pn, IEEE80211_CCMP_PN_LEN);
+       } else if (entry->is_protected &&
+                  (!rx->key ||
+                   (!ieee80211_has_protected(fc) &&
+                    !(status->flag & RX_FLAG_DECRYPTED)) ||
+                   rx->key->color != entry->key_color)) {
+               /* Drop this as a mixed key or fragment cache attack, even
+                * if for TKIP Michael MIC should protect us, and WEP is a
+                * lost cause anyway.
+                */
+               return RX_DROP_UNUSABLE;
+       } else if (entry->is_protected && rx->key &&
+                  entry->key_color != rx->key->color &&
+                  (status->flag & RX_FLAG_DECRYPTED)) {
+               return RX_DROP_UNUSABLE;
        }
 
        skb_pull(rx->skb, ieee80211_hdrlen(fc));
@@ -2494,13 +2540,13 @@ static bool ieee80211_frame_allowed(struct ieee80211_rx_data *rx, __le16 fc)
        struct ethhdr *ehdr = (struct ethhdr *) rx->skb->data;
 
        /*
-        * Allow EAPOL frames to us/the PAE group address regardless
-        * of whether the frame was encrypted or not.
+        * Allow EAPOL frames to us/the PAE group address regardless of
+        * whether the frame was encrypted or not, and always disallow
+        * all other destination addresses for them.
         */
-       if (ehdr->h_proto == rx->sdata->control_port_protocol &&
-           (ether_addr_equal(ehdr->h_dest, rx->sdata->vif.addr) ||
-            ether_addr_equal(ehdr->h_dest, pae_group_addr)))
-               return true;
+       if (unlikely(ehdr->h_proto == rx->sdata->control_port_protocol))
+               return ether_addr_equal(ehdr->h_dest, rx->sdata->vif.addr) ||
+                      ether_addr_equal(ehdr->h_dest, pae_group_addr);
 
        if (ieee80211_802_1x_port_control(rx) ||
            ieee80211_drop_unencrypted(rx, fc))
@@ -2525,8 +2571,28 @@ static void ieee80211_deliver_skb_to_local_stack(struct sk_buff *skb,
                cfg80211_rx_control_port(dev, skb, noencrypt);
                dev_kfree_skb(skb);
        } else {
+               struct ethhdr *ehdr = (void *)skb_mac_header(skb);
+
                memset(skb->cb, 0, sizeof(skb->cb));
 
+               /*
+                * 802.1X over 802.11 requires that the authenticator address
+                * be used for EAPOL frames. However, 802.1X allows the use of
+                * the PAE group address instead. If the interface is part of
+                * a bridge and we pass the frame with the PAE group address,
+                * then the bridge will forward it to the network (even if the
+                * client was not associated yet), which isn't supposed to
+                * happen.
+                * To avoid that, rewrite the destination address to our own
+                * address, so that the authenticator (e.g. hostapd) will see
+                * the frame, but bridge won't forward it anywhere else. Note
+                * that due to earlier filtering, the only other address can
+                * be the PAE group address.
+                */
+               if (unlikely(skb->protocol == sdata->control_port_protocol &&
+                            !ether_addr_equal(ehdr->h_dest, sdata->vif.addr)))
+                       ether_addr_copy(ehdr->h_dest, sdata->vif.addr);
+
                /* deliver to local stack */
                if (rx->list)
                        list_add_tail(&skb->list, rx->list);
@@ -2566,6 +2632,7 @@ ieee80211_deliver_skb(struct ieee80211_rx_data *rx)
        if ((sdata->vif.type == NL80211_IFTYPE_AP ||
             sdata->vif.type == NL80211_IFTYPE_AP_VLAN) &&
            !(sdata->flags & IEEE80211_SDATA_DONT_BRIDGE_PACKETS) &&
+           ehdr->h_proto != rx->sdata->control_port_protocol &&
            (sdata->vif.type != NL80211_IFTYPE_AP_VLAN || !sdata->u.vlan.sta)) {
                if (is_multicast_ether_addr(ehdr->h_dest) &&
                    ieee80211_vif_get_num_mcast_if(sdata) != 0) {
@@ -2675,7 +2742,7 @@ __ieee80211_rx_h_amsdu(struct ieee80211_rx_data *rx, u8 data_offset)
        if (ieee80211_data_to_8023_exthdr(skb, &ethhdr,
                                          rx->sdata->vif.addr,
                                          rx->sdata->vif.type,
-                                         data_offset))
+                                         data_offset, true))
                return RX_DROP_UNUSABLE;
 
        ieee80211_amsdu_to_8023s(skb, &frame_list, dev->dev_addr,
@@ -2732,6 +2799,23 @@ ieee80211_rx_h_amsdu(struct ieee80211_rx_data *rx)
        if (is_multicast_ether_addr(hdr->addr1))
                return RX_DROP_UNUSABLE;
 
+       if (rx->key) {
+               /*
+                * We should not receive A-MSDUs on pre-HT connections,
+                * and HT connections cannot use old ciphers. Thus drop
+                * them, as in those cases we couldn't even have SPP
+                * A-MSDUs or such.
+                */
+               switch (rx->key->conf.cipher) {
+               case WLAN_CIPHER_SUITE_WEP40:
+               case WLAN_CIPHER_SUITE_WEP104:
+               case WLAN_CIPHER_SUITE_TKIP:
+                       return RX_DROP_UNUSABLE;
+               default:
+                       break;
+               }
+       }
+
        return __ieee80211_rx_h_amsdu(rx, 0);
 }
 
index ec6973ee88ef49d9169ce9141642859166c6059f..f2fb69da9b6e1e945387253aa46154454c7d6ba1 100644 (file)
@@ -4,7 +4,7 @@
  * Copyright 2006-2007 Jiri Benc <jbenc@suse.cz>
  * Copyright 2013-2014  Intel Mobile Communications GmbH
  * Copyright (C) 2015 - 2017 Intel Deutschland GmbH
- * Copyright (C) 2018-2020 Intel Corporation
+ * Copyright (C) 2018-2021 Intel Corporation
  */
 
 #include <linux/module.h>
@@ -392,6 +392,8 @@ struct sta_info *sta_info_alloc(struct ieee80211_sub_if_data *sdata,
 
        u64_stats_init(&sta->rx_stats.syncp);
 
+       ieee80211_init_frag_cache(&sta->frags);
+
        sta->sta_state = IEEE80211_STA_NONE;
 
        /* Mark TID as unreserved */
@@ -1102,6 +1104,8 @@ static void __sta_info_destroy_part2(struct sta_info *sta)
 
        ieee80211_sta_debugfs_remove(sta);
 
+       ieee80211_destroy_frag_cache(&sta->frags);
+
        cleanup_single_sta(sta);
 }
 
index 78b9d0c7cc583dc1d9455583ff93797bcfdfb54f..0333072ebd98223bd31d54187644a7b0f6c9c2f2 100644 (file)
@@ -3,7 +3,7 @@
  * Copyright 2002-2005, Devicescape Software, Inc.
  * Copyright 2013-2014  Intel Mobile Communications GmbH
  * Copyright(c) 2015-2017 Intel Deutschland GmbH
- * Copyright(c) 2020 Intel Corporation
+ * Copyright(c) 2020-2021 Intel Corporation
  */
 
 #ifndef STA_INFO_H
@@ -438,6 +438,34 @@ struct ieee80211_sta_rx_stats {
        u64 msdu[IEEE80211_NUM_TIDS + 1];
 };
 
+/*
+ * IEEE 802.11-2016 (10.6 "Defragmentation") recommends support for "concurrent
+ * reception of at least one MSDU per access category per associated STA"
+ * on APs, or "at least one MSDU per access category" on other interface types.
+ *
+ * This limit can be increased by changing this define, at the cost of slower
+ * frame reassembly and increased memory use while fragments are pending.
+ */
+#define IEEE80211_FRAGMENT_MAX 4
+
+struct ieee80211_fragment_entry {
+       struct sk_buff_head skb_list;
+       unsigned long first_frag_time;
+       u16 seq;
+       u16 extra_len;
+       u16 last_frag;
+       u8 rx_queue;
+       u8 check_sequential_pn:1, /* needed for CCMP/GCMP */
+          is_protected:1;
+       u8 last_pn[6]; /* PN of the last fragment if CCMP was used */
+       unsigned int key_color;
+};
+
+struct ieee80211_fragment_cache {
+       struct ieee80211_fragment_entry entries[IEEE80211_FRAGMENT_MAX];
+       unsigned int next;
+};
+
 /*
  * The bandwidth threshold below which the per-station CoDel parameters will be
  * scaled to be more lenient (to prevent starvation of slow stations). This
@@ -531,6 +559,7 @@ struct ieee80211_sta_rx_stats {
  * @status_stats.last_ack_signal: last ACK signal
  * @status_stats.ack_signal_filled: last ACK signal validity
  * @status_stats.avg_ack_signal: average ACK signal
+ * @frags: fragment cache
  */
 struct sta_info {
        /* General information, mostly static */
@@ -639,6 +668,8 @@ struct sta_info {
 
        struct cfg80211_chan_def tdls_chandef;
 
+       struct ieee80211_fragment_cache frags;
+
        /* keep last! */
        struct ieee80211_sta sta;
 };
index 91bf32af55e9aab807ee350d4db06da2c9bdb161..bca47fad5a16280b808bef4c8832d6f0e0626b37 100644 (file)
@@ -3,6 +3,7 @@
  * Copyright 2002-2004, Instant802 Networks, Inc.
  * Copyright 2008, Jouni Malinen <j@w1.fi>
  * Copyright (C) 2016-2017 Intel Deutschland GmbH
+ * Copyright (C) 2020-2021 Intel Corporation
  */
 
 #include <linux/netdevice.h>
@@ -167,8 +168,8 @@ ieee80211_rx_h_michael_mic_verify(struct ieee80211_rx_data *rx)
 
 update_iv:
        /* update IV in key information to be able to detect replays */
-       rx->key->u.tkip.rx[rx->security_idx].iv32 = rx->tkip_iv32;
-       rx->key->u.tkip.rx[rx->security_idx].iv16 = rx->tkip_iv16;
+       rx->key->u.tkip.rx[rx->security_idx].iv32 = rx->tkip.iv32;
+       rx->key->u.tkip.rx[rx->security_idx].iv16 = rx->tkip.iv16;
 
        return RX_CONTINUE;
 
@@ -294,8 +295,8 @@ ieee80211_crypto_tkip_decrypt(struct ieee80211_rx_data *rx)
                                          key, skb->data + hdrlen,
                                          skb->len - hdrlen, rx->sta->sta.addr,
                                          hdr->addr1, hwaccel, rx->security_idx,
-                                         &rx->tkip_iv32,
-                                         &rx->tkip_iv16);
+                                         &rx->tkip.iv32,
+                                         &rx->tkip.iv16);
        if (res != TKIP_DECRYPT_OK)
                return RX_DROP_UNUSABLE;
 
@@ -553,6 +554,8 @@ ieee80211_crypto_ccmp_decrypt(struct ieee80211_rx_data *rx,
                }
 
                memcpy(key->u.ccmp.rx_pn[queue], pn, IEEE80211_CCMP_PN_LEN);
+               if (unlikely(ieee80211_is_frag(hdr)))
+                       memcpy(rx->ccm_gcm.pn, pn, IEEE80211_CCMP_PN_LEN);
        }
 
        /* Remove CCMP header and MIC */
@@ -781,6 +784,8 @@ ieee80211_crypto_gcmp_decrypt(struct ieee80211_rx_data *rx)
                }
 
                memcpy(key->u.gcmp.rx_pn[queue], pn, IEEE80211_GCMP_PN_LEN);
+               if (unlikely(ieee80211_is_frag(hdr)))
+                       memcpy(rx->ccm_gcm.pn, pn, IEEE80211_CCMP_PN_LEN);
        }
 
        /* Remove GCMP header and MIC */
index 99fc21406168bc94500aa3740bf49dd62e05c28b..6b825fb3fa8322ef4fd96a7a92d02c4ec2bb6628 100644 (file)
@@ -130,7 +130,6 @@ static void mptcp_parse_option(const struct sk_buff *skb,
                        memcpy(mp_opt->hmac, ptr, MPTCPOPT_HMAC_LEN);
                        pr_debug("MP_JOIN hmac");
                } else {
-                       pr_warn("MP_JOIN bad option size");
                        mp_opt->mp_join = 0;
                }
                break;
@@ -1024,7 +1023,7 @@ void mptcp_incoming_options(struct sock *sk, struct sk_buff *skb)
                        MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_ADDADDR);
                } else {
                        mptcp_pm_add_addr_echoed(msk, &mp_opt.addr);
-                       mptcp_pm_del_add_timer(msk, &mp_opt.addr);
+                       mptcp_pm_del_add_timer(msk, &mp_opt.addr, true);
                        MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_ECHOADD);
                }
 
index 6ba040897738b8a9889406add01664c5093b7cf5..2469e06a3a9d6019973d5dd83c497f9e5ebf97c5 100644 (file)
@@ -346,18 +346,18 @@ out:
 
 struct mptcp_pm_add_entry *
 mptcp_pm_del_add_timer(struct mptcp_sock *msk,
-                      struct mptcp_addr_info *addr)
+                      struct mptcp_addr_info *addr, bool check_id)
 {
        struct mptcp_pm_add_entry *entry;
        struct sock *sk = (struct sock *)msk;
 
        spin_lock_bh(&msk->pm.lock);
        entry = mptcp_lookup_anno_list_by_saddr(msk, addr);
-       if (entry)
+       if (entry && (!check_id || entry->addr.id == addr->id))
                entry->retrans_times = ADD_ADDR_RETRANS_MAX;
        spin_unlock_bh(&msk->pm.lock);
 
-       if (entry)
+       if (entry && (!check_id || entry->addr.id == addr->id))
                sk_stop_timer_sync(sk, &entry->add_timer);
 
        return entry;
@@ -1064,7 +1064,7 @@ static bool remove_anno_list_by_saddr(struct mptcp_sock *msk,
 {
        struct mptcp_pm_add_entry *entry;
 
-       entry = mptcp_pm_del_add_timer(msk, addr);
+       entry = mptcp_pm_del_add_timer(msk, addr, false);
        if (entry) {
                list_del(&entry->list);
                kfree(entry);
index 29a2d690d8d5950cf4ff35189ae1e0d31392a7b4..5edc686faff154ed894f95f0fd1626d49c764dea 100644 (file)
@@ -879,12 +879,18 @@ static bool mptcp_skb_can_collapse_to(u64 write_seq,
               !mpext->frozen;
 }
 
+/* we can append data to the given data frag if:
+ * - there is space available in the backing page_frag
+ * - the data frag tail matches the current page_frag free offset
+ * - the data frag end sequence number matches the current write seq
+ */
 static bool mptcp_frag_can_collapse_to(const struct mptcp_sock *msk,
                                       const struct page_frag *pfrag,
                                       const struct mptcp_data_frag *df)
 {
        return df && pfrag->page == df->page &&
                pfrag->size - pfrag->offset > 0 &&
+               pfrag->offset == (df->offset + df->data_len) &&
                df->data_seq + df->data_len == msk->write_seq;
 }
 
@@ -941,6 +947,10 @@ static void __mptcp_update_wmem(struct sock *sk)
 {
        struct mptcp_sock *msk = mptcp_sk(sk);
 
+#ifdef CONFIG_LOCKDEP
+       WARN_ON_ONCE(!lockdep_is_held(&sk->sk_lock.slock));
+#endif
+
        if (!msk->wmem_reserved)
                return;
 
@@ -1079,10 +1089,20 @@ out:
 
 static void __mptcp_clean_una_wakeup(struct sock *sk)
 {
+#ifdef CONFIG_LOCKDEP
+       WARN_ON_ONCE(!lockdep_is_held(&sk->sk_lock.slock));
+#endif
        __mptcp_clean_una(sk);
        mptcp_write_space(sk);
 }
 
+static void mptcp_clean_una_wakeup(struct sock *sk)
+{
+       mptcp_data_lock(sk);
+       __mptcp_clean_una_wakeup(sk);
+       mptcp_data_unlock(sk);
+}
+
 static void mptcp_enter_memory_pressure(struct sock *sk)
 {
        struct mptcp_subflow_context *subflow;
@@ -2293,7 +2313,7 @@ static void __mptcp_retrans(struct sock *sk)
        struct sock *ssk;
        int ret;
 
-       __mptcp_clean_una_wakeup(sk);
+       mptcp_clean_una_wakeup(sk);
        dfrag = mptcp_rtx_head(sk);
        if (!dfrag) {
                if (mptcp_data_fin_enabled(msk)) {
@@ -2418,13 +2438,12 @@ static int __mptcp_init_sock(struct sock *sk)
        timer_setup(&msk->sk.icsk_retransmit_timer, mptcp_retransmit_timer, 0);
        timer_setup(&sk->sk_timer, mptcp_timeout_timer, 0);
 
-       tcp_assign_congestion_control(sk);
-
        return 0;
 }
 
 static int mptcp_init_sock(struct sock *sk)
 {
+       struct inet_connection_sock *icsk = inet_csk(sk);
        struct net *net = sock_net(sk);
        int ret;
 
@@ -2442,6 +2461,16 @@ static int mptcp_init_sock(struct sock *sk)
        if (ret)
                return ret;
 
+       /* fetch the ca name; do it outside __mptcp_init_sock(), so that clone will
+        * propagate the correct value
+        */
+       tcp_assign_congestion_control(sk);
+       strcpy(mptcp_sk(sk)->ca_name, icsk->icsk_ca_ops->name);
+
+       /* no need to keep a reference to the ops, the name will suffice */
+       tcp_cleanup_congestion_control(sk);
+       icsk->icsk_ca_ops = NULL;
+
        sk_sockets_allocated_inc(sk);
        sk->sk_rcvbuf = sock_net(sk)->ipv4.sysctl_tcp_rmem[1];
        sk->sk_sndbuf = sock_net(sk)->ipv4.sysctl_tcp_wmem[1];
@@ -2616,7 +2645,6 @@ static void __mptcp_destroy_sock(struct sock *sk)
        sk_stream_kill_queues(sk);
        xfrm_sk_free_policy(sk);
 
-       tcp_cleanup_congestion_control(sk);
        sk_refcnt_debug_release(sk);
        mptcp_dispose_initial_subflow(msk);
        sock_put(sk);
index edc0128730dfe0eb5d4be65c0e446530fa9cf8c9..0c6f99c6734575446d29ddbd839eebf9deefade2 100644 (file)
@@ -258,6 +258,7 @@ struct mptcp_sock {
        } rcvq_space;
 
        u32 setsockopt_seq;
+       char            ca_name[TCP_CA_NAME_MAX];
 };
 
 #define mptcp_lock_sock(___sk, cb) do {                                        \
@@ -671,7 +672,7 @@ void mptcp_pm_free_anno_list(struct mptcp_sock *msk);
 bool mptcp_pm_sport_in_anno_list(struct mptcp_sock *msk, const struct sock *sk);
 struct mptcp_pm_add_entry *
 mptcp_pm_del_add_timer(struct mptcp_sock *msk,
-                      struct mptcp_addr_info *addr);
+                      struct mptcp_addr_info *addr, bool check_id);
 struct mptcp_pm_add_entry *
 mptcp_lookup_anno_list_by_saddr(struct mptcp_sock *msk,
                                struct mptcp_addr_info *addr);
index 00d941b66c1e5907906eb1efc1e7aef909b6152d..a797981895995edae37ba612fdd120ac07d20e01 100644 (file)
@@ -547,7 +547,7 @@ static int mptcp_setsockopt_sol_tcp_congestion(struct mptcp_sock *msk, sockptr_t
        }
 
        if (ret == 0)
-               tcp_set_congestion_control(sk, name, false, cap_net_admin);
+               strcpy(msk->ca_name, name);
 
        release_sock(sk);
        return ret;
@@ -705,7 +705,7 @@ static void sync_socket_options(struct mptcp_sock *msk, struct sock *ssk)
        sock_valbool_flag(ssk, SOCK_DBG, sock_flag(sk, SOCK_DBG));
 
        if (inet_csk(sk)->icsk_ca_ops != inet_csk(ssk)->icsk_ca_ops)
-               tcp_set_congestion_control(ssk, inet_csk(sk)->icsk_ca_ops->name, false, true);
+               tcp_set_congestion_control(ssk, msk->ca_name, false, true);
 }
 
 static void __mptcp_sockopt_sync(struct mptcp_sock *msk, struct sock *ssk)
index a5ede357cfbc54ec1fc8fb43b0d6929f5942b425..ef3d037f984a90abb450f47c1c6e334a1824592b 100644 (file)
@@ -630,21 +630,20 @@ static struct sock *subflow_syn_recv_sock(const struct sock *sk,
 
        /* if the sk is MP_CAPABLE, we try to fetch the client key */
        if (subflow_req->mp_capable) {
-               if (TCP_SKB_CB(skb)->seq != subflow_req->ssn_offset + 1) {
-                       /* here we can receive and accept an in-window,
-                        * out-of-order pkt, which will not carry the MP_CAPABLE
-                        * opt even on mptcp enabled paths
-                        */
-                       goto create_msk;
-               }
-
+               /* we can receive and accept an in-window, out-of-order pkt,
+                * which may not carry the MP_CAPABLE opt even on mptcp enabled
+                * paths: always try to extract the peer key, and fallback
+                * for packets missing it.
+                * Even OoO DSS packets coming legitly after dropped or
+                * reordered MPC will cause fallback, but we don't have other
+                * options.
+                */
                mptcp_get_options(skb, &mp_opt);
                if (!mp_opt.mp_capable) {
                        fallback = true;
                        goto create_child;
                }
 
-create_msk:
                new_msk = mptcp_sk_clone(listener->conn, &mp_opt, req);
                if (!new_msk)
                        fallback = true;
@@ -867,7 +866,6 @@ static enum mapping_status get_mapping_status(struct sock *ssk,
 
        data_len = mpext->data_len;
        if (data_len == 0) {
-               pr_err("Infinite mapping not handled");
                MPTCP_INC_STATS(sock_net(ssk), MPTCP_MIB_INFINITEMAPRX);
                return MAPPING_INVALID;
        }
@@ -1013,21 +1011,11 @@ static bool subflow_check_data_avail(struct sock *ssk)
 
                status = get_mapping_status(ssk, msk);
                trace_subflow_check_data_avail(status, skb_peek(&ssk->sk_receive_queue));
-               if (status == MAPPING_INVALID) {
-                       ssk->sk_err = EBADMSG;
-                       goto fatal;
-               }
-               if (status == MAPPING_DUMMY) {
-                       __mptcp_do_fallback(msk);
-                       skb = skb_peek(&ssk->sk_receive_queue);
-                       subflow->map_valid = 1;
-                       subflow->map_seq = READ_ONCE(msk->ack_seq);
-                       subflow->map_data_len = skb->len;
-                       subflow->map_subflow_seq = tcp_sk(ssk)->copied_seq -
-                                                  subflow->ssn_offset;
-                       subflow->data_avail = MPTCP_SUBFLOW_DATA_AVAIL;
-                       return true;
-               }
+               if (unlikely(status == MAPPING_INVALID))
+                       goto fallback;
+
+               if (unlikely(status == MAPPING_DUMMY))
+                       goto fallback;
 
                if (status != MAPPING_OK)
                        goto no_data;
@@ -1040,10 +1028,8 @@ static bool subflow_check_data_avail(struct sock *ssk)
                 * MP_CAPABLE-based mapping
                 */
                if (unlikely(!READ_ONCE(msk->can_ack))) {
-                       if (!subflow->mpc_map) {
-                               ssk->sk_err = EBADMSG;
-                               goto fatal;
-                       }
+                       if (!subflow->mpc_map)
+                               goto fallback;
                        WRITE_ONCE(msk->remote_key, subflow->remote_key);
                        WRITE_ONCE(msk->ack_seq, subflow->map_seq);
                        WRITE_ONCE(msk->can_ack, true);
@@ -1071,17 +1057,31 @@ static bool subflow_check_data_avail(struct sock *ssk)
 no_data:
        subflow_sched_work_if_closed(msk, ssk);
        return false;
-fatal:
-       /* fatal protocol error, close the socket */
-       /* This barrier is coupled with smp_rmb() in tcp_poll() */
-       smp_wmb();
-       ssk->sk_error_report(ssk);
-       tcp_set_state(ssk, TCP_CLOSE);
-       subflow->reset_transient = 0;
-       subflow->reset_reason = MPTCP_RST_EMPTCP;
-       tcp_send_active_reset(ssk, GFP_ATOMIC);
-       subflow->data_avail = 0;
-       return false;
+
+fallback:
+       /* RFC 8684 section 3.7. */
+       if (subflow->mp_join || subflow->fully_established) {
+               /* fatal protocol error, close the socket.
+                * subflow_error_report() will introduce the appropriate barriers
+                */
+               ssk->sk_err = EBADMSG;
+               ssk->sk_error_report(ssk);
+               tcp_set_state(ssk, TCP_CLOSE);
+               subflow->reset_transient = 0;
+               subflow->reset_reason = MPTCP_RST_EMPTCP;
+               tcp_send_active_reset(ssk, GFP_ATOMIC);
+               subflow->data_avail = 0;
+               return false;
+       }
+
+       __mptcp_do_fallback(msk);
+       skb = skb_peek(&ssk->sk_receive_queue);
+       subflow->map_valid = 1;
+       subflow->map_seq = READ_ONCE(msk->ack_seq);
+       subflow->map_data_len = skb->len;
+       subflow->map_subflow_seq = tcp_sk(ssk)->copied_seq - subflow->ssn_offset;
+       subflow->data_avail = MPTCP_SUBFLOW_DATA_AVAIL;
+       return true;
 }
 
 bool mptcp_subflow_data_available(struct sock *sk)
index d45dbcba8b49c58d6872a13457373b6789573609..c25097092a060bf579edec4ac9638edb4f6fd569 100644 (file)
@@ -1367,7 +1367,7 @@ ip_vs_add_service(struct netns_ipvs *ipvs, struct ip_vs_service_user_kern *u,
        ip_vs_addr_copy(svc->af, &svc->addr, &u->addr);
        svc->port = u->port;
        svc->fwmark = u->fwmark;
-       svc->flags = u->flags;
+       svc->flags = u->flags & ~IP_VS_SVC_F_HASHED;
        svc->timeout = u->timeout * HZ;
        svc->netmask = u->netmask;
        svc->ipvs = ipvs;
index 89e5bac384d70162fca42975a8818b911f321c78..dc9ca12b0489c16e600cba9b9a5f9a2cef01e930 100644 (file)
@@ -664,7 +664,7 @@ int nf_conntrack_proto_init(void)
 
 #if IS_ENABLED(CONFIG_IPV6)
 cleanup_sockopt:
-       nf_unregister_sockopt(&so_getorigdst6);
+       nf_unregister_sockopt(&so_getorigdst);
 #endif
        return ret;
 }
index 39c02d1aeedfa11308e43fac67f28e890dcb36ad..1d02650dd715ab8b6e502f9d49f736ab73d9b1ba 100644 (file)
@@ -306,8 +306,7 @@ void flow_offload_refresh(struct nf_flowtable *flow_table,
 {
        flow->timeout = nf_flowtable_time_stamp + NF_FLOW_TIMEOUT;
 
-       if (likely(!nf_flowtable_hw_offload(flow_table) ||
-                  !test_and_clear_bit(NF_FLOW_HW_REFRESH, &flow->flags)))
+       if (likely(!nf_flowtable_hw_offload(flow_table)))
                return;
 
        nf_flow_offload_add(flow_table, flow);
index 2af7bdb384077a692fa6a743b0ffb25e1fb3c86c..528b2f1726844564fcce9054030fb4cd2a2f4419 100644 (file)
@@ -902,10 +902,11 @@ static void flow_offload_work_add(struct flow_offload_work *offload)
 
        err = flow_offload_rule_add(offload, flow_rule);
        if (err < 0)
-               set_bit(NF_FLOW_HW_REFRESH, &offload->flow->flags);
-       else
-               set_bit(IPS_HW_OFFLOAD_BIT, &offload->flow->ct->status);
+               goto out;
+
+       set_bit(IPS_HW_OFFLOAD_BIT, &offload->flow->ct->status);
 
+out:
        nf_flow_offload_destroy(flow_rule);
 }
 
index d63d2d8f769c3a5d4bfc83912a5ef6bc0d6eb34c..72bc759179efca7f4c378b763b4550a898e81eb2 100644 (file)
@@ -736,7 +736,8 @@ static int nf_tables_fill_table_info(struct sk_buff *skb, struct net *net,
                goto nla_put_failure;
 
        if (nla_put_string(skb, NFTA_TABLE_NAME, table->name) ||
-           nla_put_be32(skb, NFTA_TABLE_FLAGS, htonl(table->flags)) ||
+           nla_put_be32(skb, NFTA_TABLE_FLAGS,
+                        htonl(table->flags & NFT_TABLE_F_MASK)) ||
            nla_put_be32(skb, NFTA_TABLE_USE, htonl(table->use)) ||
            nla_put_be64(skb, NFTA_TABLE_HANDLE, cpu_to_be64(table->handle),
                         NFTA_TABLE_PAD))
@@ -947,20 +948,22 @@ err_register_hooks:
 
 static void nf_tables_table_disable(struct net *net, struct nft_table *table)
 {
+       table->flags &= ~NFT_TABLE_F_DORMANT;
        nft_table_disable(net, table, 0);
+       table->flags |= NFT_TABLE_F_DORMANT;
 }
 
-enum {
-       NFT_TABLE_STATE_UNCHANGED       = 0,
-       NFT_TABLE_STATE_DORMANT,
-       NFT_TABLE_STATE_WAKEUP
-};
+#define __NFT_TABLE_F_INTERNAL         (NFT_TABLE_F_MASK + 1)
+#define __NFT_TABLE_F_WAS_DORMANT      (__NFT_TABLE_F_INTERNAL << 0)
+#define __NFT_TABLE_F_WAS_AWAKEN       (__NFT_TABLE_F_INTERNAL << 1)
+#define __NFT_TABLE_F_UPDATE           (__NFT_TABLE_F_WAS_DORMANT | \
+                                        __NFT_TABLE_F_WAS_AWAKEN)
 
 static int nf_tables_updtable(struct nft_ctx *ctx)
 {
        struct nft_trans *trans;
        u32 flags;
-       int ret = 0;
+       int ret;
 
        if (!ctx->nla[NFTA_TABLE_FLAGS])
                return 0;
@@ -985,21 +988,27 @@ static int nf_tables_updtable(struct nft_ctx *ctx)
 
        if ((flags & NFT_TABLE_F_DORMANT) &&
            !(ctx->table->flags & NFT_TABLE_F_DORMANT)) {
-               nft_trans_table_state(trans) = NFT_TABLE_STATE_DORMANT;
+               ctx->table->flags |= NFT_TABLE_F_DORMANT;
+               if (!(ctx->table->flags & __NFT_TABLE_F_UPDATE))
+                       ctx->table->flags |= __NFT_TABLE_F_WAS_AWAKEN;
        } else if (!(flags & NFT_TABLE_F_DORMANT) &&
                   ctx->table->flags & NFT_TABLE_F_DORMANT) {
-               ret = nf_tables_table_enable(ctx->net, ctx->table);
-               if (ret >= 0)
-                       nft_trans_table_state(trans) = NFT_TABLE_STATE_WAKEUP;
+               ctx->table->flags &= ~NFT_TABLE_F_DORMANT;
+               if (!(ctx->table->flags & __NFT_TABLE_F_UPDATE)) {
+                       ret = nf_tables_table_enable(ctx->net, ctx->table);
+                       if (ret < 0)
+                               goto err_register_hooks;
+
+                       ctx->table->flags |= __NFT_TABLE_F_WAS_DORMANT;
+               }
        }
-       if (ret < 0)
-               goto err;
 
-       nft_trans_table_flags(trans) = flags;
        nft_trans_table_update(trans) = true;
        nft_trans_commit_list_add_tail(ctx->net, trans);
+
        return 0;
-err:
+
+err_register_hooks:
        nft_trans_destroy(trans);
        return ret;
 }
@@ -1905,7 +1914,7 @@ static int nft_chain_parse_netdev(struct net *net,
 static int nft_chain_parse_hook(struct net *net,
                                const struct nlattr * const nla[],
                                struct nft_chain_hook *hook, u8 family,
-                               bool autoload)
+                               struct netlink_ext_ack *extack, bool autoload)
 {
        struct nftables_pernet *nft_net = nft_pernet(net);
        struct nlattr *ha[NFTA_HOOK_MAX + 1];
@@ -1935,8 +1944,10 @@ static int nft_chain_parse_hook(struct net *net,
        if (nla[NFTA_CHAIN_TYPE]) {
                type = nf_tables_chain_type_lookup(net, nla[NFTA_CHAIN_TYPE],
                                                   family, autoload);
-               if (IS_ERR(type))
+               if (IS_ERR(type)) {
+                       NL_SET_BAD_ATTR(extack, nla[NFTA_CHAIN_TYPE]);
                        return PTR_ERR(type);
+               }
        }
        if (hook->num >= NFT_MAX_HOOKS || !(type->hook_mask & (1 << hook->num)))
                return -EOPNOTSUPP;
@@ -1945,8 +1956,11 @@ static int nft_chain_parse_hook(struct net *net,
            hook->priority <= NF_IP_PRI_CONNTRACK)
                return -EOPNOTSUPP;
 
-       if (!try_module_get(type->owner))
+       if (!try_module_get(type->owner)) {
+               if (nla[NFTA_CHAIN_TYPE])
+                       NL_SET_BAD_ATTR(extack, nla[NFTA_CHAIN_TYPE]);
                return -ENOENT;
+       }
 
        hook->type = type;
 
@@ -2057,7 +2071,8 @@ static int nft_chain_add(struct nft_table *table, struct nft_chain *chain)
 static u64 chain_id;
 
 static int nf_tables_addchain(struct nft_ctx *ctx, u8 family, u8 genmask,
-                             u8 policy, u32 flags)
+                             u8 policy, u32 flags,
+                             struct netlink_ext_ack *extack)
 {
        const struct nlattr * const *nla = ctx->nla;
        struct nft_table *table = ctx->table;
@@ -2079,7 +2094,8 @@ static int nf_tables_addchain(struct nft_ctx *ctx, u8 family, u8 genmask,
                if (flags & NFT_CHAIN_BINDING)
                        return -EOPNOTSUPP;
 
-               err = nft_chain_parse_hook(net, nla, &hook, family, true);
+               err = nft_chain_parse_hook(net, nla, &hook, family, extack,
+                                          true);
                if (err < 0)
                        return err;
 
@@ -2234,7 +2250,7 @@ static int nf_tables_updchain(struct nft_ctx *ctx, u8 genmask, u8 policy,
                        return -EEXIST;
                }
                err = nft_chain_parse_hook(ctx->net, nla, &hook, ctx->family,
-                                          false);
+                                          extack, false);
                if (err < 0)
                        return err;
 
@@ -2447,7 +2463,7 @@ static int nf_tables_newchain(struct sk_buff *skb, const struct nfnl_info *info,
                                          extack);
        }
 
-       return nf_tables_addchain(&ctx, family, genmask, policy, flags);
+       return nf_tables_addchain(&ctx, family, genmask, policy, flags, extack);
 }
 
 static int nf_tables_delchain(struct sk_buff *skb, const struct nfnl_info *info,
@@ -3328,8 +3344,10 @@ static int nf_tables_newrule(struct sk_buff *skb, const struct nfnl_info *info,
                        if (n == NFT_RULE_MAXEXPRS)
                                goto err1;
                        err = nf_tables_expr_parse(&ctx, tmp, &expr_info[n]);
-                       if (err < 0)
+                       if (err < 0) {
+                               NL_SET_BAD_ATTR(extack, tmp);
                                goto err1;
+                       }
                        size += expr_info[n].ops->size;
                        n++;
                }
@@ -8547,10 +8565,14 @@ static int nf_tables_commit(struct net *net, struct sk_buff *skb)
                switch (trans->msg_type) {
                case NFT_MSG_NEWTABLE:
                        if (nft_trans_table_update(trans)) {
-                               if (nft_trans_table_state(trans) == NFT_TABLE_STATE_DORMANT)
+                               if (!(trans->ctx.table->flags & __NFT_TABLE_F_UPDATE)) {
+                                       nft_trans_destroy(trans);
+                                       break;
+                               }
+                               if (trans->ctx.table->flags & NFT_TABLE_F_DORMANT)
                                        nf_tables_table_disable(net, trans->ctx.table);
 
-                               trans->ctx.table->flags = nft_trans_table_flags(trans);
+                               trans->ctx.table->flags &= ~__NFT_TABLE_F_UPDATE;
                        } else {
                                nft_clear(net, trans->ctx.table);
                        }
@@ -8768,9 +8790,17 @@ static int __nf_tables_abort(struct net *net, enum nfnl_abort_action action)
                switch (trans->msg_type) {
                case NFT_MSG_NEWTABLE:
                        if (nft_trans_table_update(trans)) {
-                               if (nft_trans_table_state(trans) == NFT_TABLE_STATE_WAKEUP)
+                               if (!(trans->ctx.table->flags & __NFT_TABLE_F_UPDATE)) {
+                                       nft_trans_destroy(trans);
+                                       break;
+                               }
+                               if (trans->ctx.table->flags & __NFT_TABLE_F_WAS_DORMANT) {
                                        nf_tables_table_disable(net, trans->ctx.table);
-
+                                       trans->ctx.table->flags |= NFT_TABLE_F_DORMANT;
+                               } else if (trans->ctx.table->flags & __NFT_TABLE_F_WAS_AWAKEN) {
+                                       trans->ctx.table->flags &= ~NFT_TABLE_F_DORMANT;
+                               }
+                               trans->ctx.table->flags &= ~__NFT_TABLE_F_UPDATE;
                                nft_trans_destroy(trans);
                        } else {
                                list_del_rcu(&trans->ctx.table->list);
index 322ac5dd540222d23b078da1e95f2530b0416161..752b10cae524283ede959947168a0464d8cad93d 100644 (file)
@@ -380,10 +380,14 @@ static int
 nfnl_cthelper_update(const struct nlattr * const tb[],
                     struct nf_conntrack_helper *helper)
 {
+       u32 size;
        int ret;
 
-       if (tb[NFCTH_PRIV_DATA_LEN])
-               return -EBUSY;
+       if (tb[NFCTH_PRIV_DATA_LEN]) {
+               size = ntohl(nla_get_be32(tb[NFCTH_PRIV_DATA_LEN]));
+               if (size != helper->data_len)
+                       return -EBUSY;
+       }
 
        if (tb[NFCTH_POLICY]) {
                ret = nfnl_cthelper_update_policy(helper, tb[NFCTH_POLICY]);
index 0592a94560843322364275e7fd1f5a896dd940dc..337e22d8b40b1e060e6e4bd57e0982fc87d6262b 100644 (file)
@@ -1217,7 +1217,7 @@ static void nft_ct_expect_obj_eval(struct nft_object *obj,
        struct nf_conn *ct;
 
        ct = nf_ct_get(pkt->skb, &ctinfo);
-       if (!ct || ctinfo == IP_CT_UNTRACKED) {
+       if (!ct || nf_ct_is_confirmed(ct) || nf_ct_is_template(ct)) {
                regs->verdict.code = NFT_BREAK;
                return;
        }
index 528a2d7ca9918372b072b51962b0a4be97ca3db8..dce866d93feed1bf60493d7ff89a4892096f6e30 100644 (file)
@@ -408,8 +408,8 @@ int pipapo_refill(unsigned long *map, int len, int rules, unsigned long *dst,
  *
  * Return: true on match, false otherwise.
  */
-static bool nft_pipapo_lookup(const struct net *net, const struct nft_set *set,
-                             const u32 *key, const struct nft_set_ext **ext)
+bool nft_pipapo_lookup(const struct net *net, const struct nft_set *set,
+                      const u32 *key, const struct nft_set_ext **ext)
 {
        struct nft_pipapo *priv = nft_set_priv(set);
        unsigned long *res_map, *fill_map;
index 25a75591583ebec982a53a67135721c7f4902047..d84afb8fa79a13b563ef175cddcf12ee4cb1ef24 100644 (file)
@@ -178,6 +178,8 @@ struct nft_pipapo_elem {
 
 int pipapo_refill(unsigned long *map, int len, int rules, unsigned long *dst,
                  union nft_pipapo_map_bucket *mt, bool match_only);
+bool nft_pipapo_lookup(const struct net *net, const struct nft_set *set,
+                      const u32 *key, const struct nft_set_ext **ext);
 
 /**
  * pipapo_and_field_buckets_4bit() - Intersect 4-bit buckets
index d65ae0e23028d10fbca9109cb378251f7b53edc2..eabdb8d552eef95329fbcc792d20f42ca01449ba 100644 (file)
@@ -1131,6 +1131,9 @@ bool nft_pipapo_avx2_lookup(const struct net *net, const struct nft_set *set,
        bool map_index;
        int i, ret = 0;
 
+       if (unlikely(!irq_fpu_usable()))
+               return nft_pipapo_lookup(net, set, key, ext);
+
        m = rcu_dereference(priv->match);
 
        /* This also protects access to all data related to scratch maps */
index 3a62f97acf39d11a18547308a2f375eb5f37abaa..6133e412b948c4cd3373d249bd5c29f917ba4322 100644 (file)
@@ -461,11 +461,13 @@ void netlink_table_ungrab(void)
 static inline void
 netlink_lock_table(void)
 {
+       unsigned long flags;
+
        /* read_lock() synchronizes us to netlink_table_grab */
 
-       read_lock(&nl_table_lock);
+       read_lock_irqsave(&nl_table_lock, flags);
        atomic_inc(&nl_table_users);
-       read_unlock(&nl_table_lock);
+       read_unlock_irqrestore(&nl_table_lock, flags);
 }
 
 static inline void
index 53dbe733f998164c59796161b089be7663382fdf..6cfd30fc07985e69fb68e84b71f3c9fa64aa62b2 100644 (file)
@@ -110,6 +110,7 @@ static int llcp_sock_bind(struct socket *sock, struct sockaddr *addr, int alen)
        if (!llcp_sock->service_name) {
                nfc_llcp_local_put(llcp_sock->local);
                llcp_sock->local = NULL;
+               llcp_sock->dev = NULL;
                ret = -ENOMEM;
                goto put_dev;
        }
@@ -119,6 +120,7 @@ static int llcp_sock_bind(struct socket *sock, struct sockaddr *addr, int alen)
                llcp_sock->local = NULL;
                kfree(llcp_sock->service_name);
                llcp_sock->service_name = NULL;
+               llcp_sock->dev = NULL;
                ret = -EADDRINUSE;
                goto put_dev;
        }
index 9a585332ea84a488f6186058900e2ada8a3864fe..da7fe9db1b00fdfed75f26d5181c2d80bfbd2e81 100644 (file)
@@ -1191,6 +1191,7 @@ EXPORT_SYMBOL(nci_allocate_device);
 void nci_free_device(struct nci_dev *ndev)
 {
        nfc_free_device(ndev->nfc_dev);
+       nci_hci_deallocate(ndev);
        kfree(ndev);
 }
 EXPORT_SYMBOL(nci_free_device);
index 6b275a387a92acf9f046c22b3136062239bb2586..96865142104f48f71e291f42cee8297bc1471e1c 100644 (file)
@@ -792,3 +792,8 @@ struct nci_hci_dev *nci_hci_allocate(struct nci_dev *ndev)
 
        return hdev;
 }
+
+void nci_hci_deallocate(struct nci_dev *ndev)
+{
+       kfree(ndev->hci_dev);
+}
index 9c7eb8455ba8eb275133f0d0bfc0b8d5a6fbdd70..5f1d438a0a23f15946a6055edad795e5000576b9 100644 (file)
@@ -329,7 +329,7 @@ static int rawsock_create(struct net *net, struct socket *sock,
                return -ESOCKTNOSUPPORT;
 
        if (sock->type == SOCK_RAW) {
-               if (!capable(CAP_NET_RAW))
+               if (!ns_capable(net->user_ns, CAP_NET_RAW))
                        return -EPERM;
                sock->ops = &rawsock_raw_ops;
        } else {
index 96b524ceabca4f53b07411ae4d5d5c49e7d24656..896b8f5bc885350980a9547fc921583515dff7ec 100644 (file)
@@ -611,6 +611,14 @@ bool ovs_meter_execute(struct datapath *dp, struct sk_buff *skb,
        spin_lock(&meter->lock);
 
        long_delta_ms = (now_ms - meter->used); /* ms */
+       if (long_delta_ms < 0) {
+               /* This condition means that we have several threads fighting
+                * for a meter lock, and the one who received the packets a
+                * bit later wins. Assuming that all racing threads received
+                * packets at the same time to avoid overflow.
+                */
+               long_delta_ms = 0;
+       }
 
        /* Make sure delta_ms will not be too large, so that bucket will not
         * wrap around below.
index ba96db1880eae89febf77ba6ff943b054cd268d7..ae906eb4b269e858434828a383491e8d4c33c422 100644 (file)
@@ -422,7 +422,8 @@ static __u32 tpacket_get_timestamp(struct sk_buff *skb, struct timespec64 *ts,
            ktime_to_timespec64_cond(shhwtstamps->hwtstamp, ts))
                return TP_STATUS_TS_RAW_HARDWARE;
 
-       if (ktime_to_timespec64_cond(skb->tstamp, ts))
+       if ((flags & SOF_TIMESTAMPING_SOFTWARE) &&
+           ktime_to_timespec64_cond(skb->tstamp, ts))
                return TP_STATUS_TS_SOFTWARE;
 
        return 0;
@@ -2340,7 +2341,12 @@ static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev,
 
        skb_copy_bits(skb, 0, h.raw + macoff, snaplen);
 
-       if (!(ts_status = tpacket_get_timestamp(skb, &ts, po->tp_tstamp)))
+       /* Always timestamp; prefer an existing software timestamp taken
+        * closer to the time of capture.
+        */
+       ts_status = tpacket_get_timestamp(skb, &ts,
+                                         po->tp_tstamp | SOF_TIMESTAMPING_SOFTWARE);
+       if (!ts_status)
                ktime_get_real_ts64(&ts);
 
        status |= ts_status;
index f2fcab182095cbc39f991a4d2886aa2237aec1c8..a3bc4b54d4910567c915f41cd6bc44f279eaadf3 100644 (file)
@@ -240,12 +240,23 @@ static struct rds_connection *__rds_conn_create(struct net *net,
        if (loop_trans) {
                rds_trans_put(loop_trans);
                conn->c_loopback = 1;
-               if (is_outgoing && trans->t_prefer_loopback) {
-                       /* "outgoing" connection - and the transport
-                        * says it wants the connection handled by the
-                        * loopback transport. This is what TCP does.
-                        */
-                       trans = &rds_loop_transport;
+               if (trans->t_prefer_loopback) {
+                       if (likely(is_outgoing)) {
+                               /* "outgoing" connection to local address.
+                                * Protocol says it wants the connection
+                                * handled by the loopback transport.
+                                * This is what TCP does.
+                                */
+                               trans = &rds_loop_transport;
+                       } else {
+                               /* No transport currently in use
+                                * should end up here, but if it
+                                * does, reset/destroy the connection.
+                                */
+                               kmem_cache_free(rds_conn_slab, conn);
+                               conn = ERR_PTR(-EOPNOTSUPP);
+                               goto out;
+                       }
                }
        }
 
index 43db0eca911fad13c21a4e4091580e0c9f3901cc..abf19c0e3ba0bfcf0396df2d78b937e288b84ab0 100644 (file)
@@ -313,8 +313,8 @@ out:
 }
 #endif
 
-static int rds_tcp_laddr_check(struct net *net, const struct in6_addr *addr,
-                              __u32 scope_id)
+int rds_tcp_laddr_check(struct net *net, const struct in6_addr *addr,
+                       __u32 scope_id)
 {
        struct net_device *dev = NULL;
 #if IS_ENABLED(CONFIG_IPV6)
index bad9cf49d56572cd163019c2beb6ea5e65b17d7f..dc8d745d68575f019ca96c706efc77125552a5d2 100644 (file)
@@ -59,7 +59,8 @@ u32 rds_tcp_snd_una(struct rds_tcp_connection *tc);
 u64 rds_tcp_map_seq(struct rds_tcp_connection *tc, u32 seq);
 extern struct rds_transport rds_tcp_transport;
 void rds_tcp_accept_work(struct sock *sk);
-
+int rds_tcp_laddr_check(struct net *net, const struct in6_addr *addr,
+                       __u32 scope_id);
 /* tcp_connect.c */
 int rds_tcp_conn_path_connect(struct rds_conn_path *cp);
 void rds_tcp_conn_path_shutdown(struct rds_conn_path *conn);
index 101cf14215a0b30f5395fd3d4317655d3167f0f9..09cadd556d1e188fde086f356718ab149d567632 100644 (file)
@@ -167,6 +167,12 @@ int rds_tcp_accept_one(struct socket *sock)
        }
 #endif
 
+       if (!rds_tcp_laddr_check(sock_net(sock->sk), peer_addr, dev_if)) {
+               /* local address connection is only allowed via loopback */
+               ret = -EOPNOTSUPP;
+               goto out;
+       }
+
        conn = rds_conn_create(sock_net(sock->sk),
                               my_addr, peer_addr,
                               &rds_tcp_transport, 0, GFP_KERNEL, dev_if);
index ec7a1c438df94254943eebf654563161496a1f74..18edd9ad1410947c0464341cf601b87bf7a7a6ff 100644 (file)
@@ -984,7 +984,7 @@ static int tcf_ct_act(struct sk_buff *skb, const struct tc_action *a,
         */
        cached = tcf_ct_skb_nfct_cached(net, skb, p->zone, force);
        if (!cached) {
-               if (!commit && tcf_ct_flow_table_lookup(p, skb, family)) {
+               if (tcf_ct_flow_table_lookup(p, skb, family)) {
                        skip_add = true;
                        goto do_nat;
                }
@@ -1022,10 +1022,11 @@ do_nat:
                 * even if the connection is already confirmed.
                 */
                nf_conntrack_confirm(skb);
-       } else if (!skip_add) {
-               tcf_ct_flow_table_process_conn(p->ct_ft, ct, ctinfo);
        }
 
+       if (!skip_add)
+               tcf_ct_flow_table_process_conn(p->ct_ft, ct, ctinfo);
+
 out_push:
        skb_push_rcsum(skb, nh_ofs);
 
@@ -1202,9 +1203,6 @@ static int tcf_ct_fill_params(struct net *net,
                                   sizeof(p->zone));
        }
 
-       if (p->zone == NF_CT_DEFAULT_ZONE_ID)
-               return 0;
-
        nf_ct_zone_init(&zone, p->zone, NF_CT_DEFAULT_ZONE_DIR, 0);
        tmpl = nf_ct_tmpl_alloc(net, &zone, GFP_KERNEL);
        if (!tmpl) {
index 40fbea626dfd2a4ea9f36abfe55210f99c04c2db..279f9e2a2319ad6f026b7cfa4c56be856b4b96a8 100644 (file)
@@ -1624,7 +1624,7 @@ int tcf_classify_ingress(struct sk_buff *skb,
 
        /* If we missed on some chain */
        if (ret == TC_ACT_UNSPEC && last_executed_chain) {
-               ext = skb_ext_add(skb, TC_SKB_EXT);
+               ext = tc_skb_ext_alloc(skb);
                if (WARN_ON_ONCE(!ext))
                        return TC_ACT_SHOT;
                ext->chain = last_executed_chain;
index cd2748e2d4a2057e327e61478618a1ca8b1bf5b7..d320bcfb2da2c9aac72c546750dac1586860df25 100644 (file)
@@ -407,7 +407,8 @@ static void dsmark_reset(struct Qdisc *sch)
        struct dsmark_qdisc_data *p = qdisc_priv(sch);
 
        pr_debug("%s(sch %p,[qdisc %p])\n", __func__, sch, p);
-       qdisc_reset(p->q);
+       if (p->q)
+               qdisc_reset(p->q);
        sch->qstats.backlog = 0;
        sch->q.qlen = 0;
 }
index 949163fe68afdeb95638998b8b28499d930ae023..cac684952edc55712197b307536fdc9ad3c7e064 100644 (file)
@@ -138,8 +138,15 @@ static int fq_pie_qdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch,
 
        /* Classifies packet into corresponding flow */
        idx = fq_pie_classify(skb, sch, &ret);
-       sel_flow = &q->flows[idx];
+       if (idx == 0) {
+               if (ret & __NET_XMIT_BYPASS)
+                       qdisc_qstats_drop(sch);
+               __qdisc_drop(skb, to_free);
+               return ret;
+       }
+       idx--;
 
+       sel_flow = &q->flows[idx];
        /* Checks whether adding a new packet would exceed memory limit */
        get_pie_cb(skb)->mem_usage = skb->truesize;
        memory_limited = q->memory_usage > q->memory_limit + skb->truesize;
@@ -297,9 +304,9 @@ static int fq_pie_change(struct Qdisc *sch, struct nlattr *opt,
                        goto flow_error;
                }
                q->flows_cnt = nla_get_u32(tb[TCA_FQ_PIE_FLOWS]);
-               if (!q->flows_cnt || q->flows_cnt >= 65536) {
+               if (!q->flows_cnt || q->flows_cnt > 65536) {
                        NL_SET_ERR_MSG_MOD(extack,
-                                          "Number of flows must range in [1..65535]");
+                                          "Number of flows must range in [1..65536]");
                        goto flow_error;
                }
        }
@@ -367,7 +374,7 @@ static void fq_pie_timer(struct timer_list *t)
        struct fq_pie_sched_data *q = from_timer(q, t, adapt_timer);
        struct Qdisc *sch = q->sch;
        spinlock_t *root_lock; /* to lock qdisc for probability calculations */
-       u16 idx;
+       u32 idx;
 
        root_lock = qdisc_lock(qdisc_root_sleeping(sch));
        spin_lock(root_lock);
@@ -388,7 +395,7 @@ static int fq_pie_init(struct Qdisc *sch, struct nlattr *opt,
 {
        struct fq_pie_sched_data *q = qdisc_priv(sch);
        int err;
-       u16 idx;
+       u32 idx;
 
        pie_params_init(&q->p_params);
        sch->limit = 10 * 1024;
@@ -500,7 +507,7 @@ static int fq_pie_dump_stats(struct Qdisc *sch, struct gnet_dump *d)
 static void fq_pie_reset(struct Qdisc *sch)
 {
        struct fq_pie_sched_data *q = qdisc_priv(sch);
-       u16 idx;
+       u32 idx;
 
        INIT_LIST_HEAD(&q->new_flows);
        INIT_LIST_HEAD(&q->old_flows);
index 44991ea726fc730596ade2c8814961f016656938..fc8b56bcabf39f01053894781ddf040df42e0c1e 100644 (file)
 const struct Qdisc_ops *default_qdisc_ops = &pfifo_fast_ops;
 EXPORT_SYMBOL(default_qdisc_ops);
 
+static void qdisc_maybe_clear_missed(struct Qdisc *q,
+                                    const struct netdev_queue *txq)
+{
+       clear_bit(__QDISC_STATE_MISSED, &q->state);
+
+       /* Make sure the below netif_xmit_frozen_or_stopped()
+        * checking happens after clearing STATE_MISSED.
+        */
+       smp_mb__after_atomic();
+
+       /* Checking netif_xmit_frozen_or_stopped() again to
+        * make sure STATE_MISSED is set if the STATE_MISSED
+        * set by netif_tx_wake_queue()'s rescheduling of
+        * net_tx_action() is cleared by the above clear_bit().
+        */
+       if (!netif_xmit_frozen_or_stopped(txq))
+               set_bit(__QDISC_STATE_MISSED, &q->state);
+}
+
 /* Main transmission queue. */
 
 /* Modifications to data participating in scheduling must be protected with
@@ -74,6 +93,7 @@ static inline struct sk_buff *__skb_dequeue_bad_txq(struct Qdisc *q)
                        }
                } else {
                        skb = SKB_XOFF_MAGIC;
+                       qdisc_maybe_clear_missed(q, txq);
                }
        }
 
@@ -242,6 +262,7 @@ static struct sk_buff *dequeue_skb(struct Qdisc *q, bool *validate,
                        }
                } else {
                        skb = NULL;
+                       qdisc_maybe_clear_missed(q, txq);
                }
                if (lock)
                        spin_unlock(lock);
@@ -251,8 +272,10 @@ validate:
        *validate = true;
 
        if ((q->flags & TCQ_F_ONETXQUEUE) &&
-           netif_xmit_frozen_or_stopped(txq))
+           netif_xmit_frozen_or_stopped(txq)) {
+               qdisc_maybe_clear_missed(q, txq);
                return skb;
+       }
 
        skb = qdisc_dequeue_skb_bad_txq(q);
        if (unlikely(skb)) {
@@ -311,6 +334,8 @@ bool sch_direct_xmit(struct sk_buff *skb, struct Qdisc *q,
                HARD_TX_LOCK(dev, txq, smp_processor_id());
                if (!netif_xmit_frozen_or_stopped(txq))
                        skb = dev_hard_start_xmit(skb, dev, txq, &ret);
+               else
+                       qdisc_maybe_clear_missed(q, txq);
 
                HARD_TX_UNLOCK(dev, txq);
        } else {
@@ -640,8 +665,10 @@ static struct sk_buff *pfifo_fast_dequeue(struct Qdisc *qdisc)
 {
        struct pfifo_fast_priv *priv = qdisc_priv(qdisc);
        struct sk_buff *skb = NULL;
+       bool need_retry = true;
        int band;
 
+retry:
        for (band = 0; band < PFIFO_FAST_BANDS && !skb; band++) {
                struct skb_array *q = band2list(priv, band);
 
@@ -652,6 +679,23 @@ static struct sk_buff *pfifo_fast_dequeue(struct Qdisc *qdisc)
        }
        if (likely(skb)) {
                qdisc_update_stats_at_dequeue(qdisc, skb);
+       } else if (need_retry &&
+                  test_bit(__QDISC_STATE_MISSED, &qdisc->state)) {
+               /* Delay clearing the STATE_MISSED here to reduce
+                * the overhead of the second spin_trylock() in
+                * qdisc_run_begin() and __netif_schedule() calling
+                * in qdisc_run_end().
+                */
+               clear_bit(__QDISC_STATE_MISSED, &qdisc->state);
+
+               /* Make sure dequeuing happens after clearing
+                * STATE_MISSED.
+                */
+               smp_mb__after_atomic();
+
+               need_retry = false;
+
+               goto retry;
        } else {
                WRITE_ONCE(qdisc->empty, true);
        }
@@ -1158,8 +1202,10 @@ static void dev_reset_queue(struct net_device *dev,
        qdisc_reset(qdisc);
 
        spin_unlock_bh(qdisc_lock(qdisc));
-       if (nolock)
+       if (nolock) {
+               clear_bit(__QDISC_STATE_MISSED, &qdisc->state);
                spin_unlock_bh(&qdisc->seqlock);
+       }
 }
 
 static bool some_qdisc_is_busy(struct net_device *dev)
index 081c11d5717c4a7ad2d76a45f1fd45c47100a5bf..8827987ba903451e0f7c5e38bd2efb4cb1262924 100644 (file)
@@ -1488,7 +1488,8 @@ static void htb_parent_to_leaf_offload(struct Qdisc *sch,
        struct Qdisc *old_q;
 
        /* One ref for cl->leaf.q, the other for dev_queue->qdisc. */
-       qdisc_refcount_inc(new_q);
+       if (new_q)
+               qdisc_refcount_inc(new_q);
        old_q = htb_graft_helper(dev_queue, new_q);
        WARN_ON(!(old_q->flags & TCQ_F_BUILTIN));
 }
@@ -1675,10 +1676,9 @@ static int htb_delete(struct Qdisc *sch, unsigned long arg,
                                          cl->parent->common.classid,
                                          NULL);
                if (q->offload) {
-                       if (new_q) {
+                       if (new_q)
                                htb_set_lockdep_class_child(new_q);
-                               htb_parent_to_leaf_offload(sch, dev_queue, new_q);
-                       }
+                       htb_parent_to_leaf_offload(sch, dev_queue, new_q);
                }
        }
 
index 40f9f6c4a0a1d4592e5eb3e6b28911fde7131f59..a79d193ff872012a913bccad583a77a0d824d65a 100644 (file)
@@ -4473,6 +4473,7 @@ static int sctp_setsockopt_encap_port(struct sock *sk,
                                    transports)
                        t->encap_port = encap_port;
 
+               asoc->encap_port = encap_port;
                return 0;
        }
 
index e92df779af73381351b6521910bcd8e79e6df715..55871b277f475e8068692015f4a67ce1ce111c00 100644 (file)
@@ -307,7 +307,7 @@ static struct ctl_table sctp_net_table[] = {
                .data           = &init_net.sctp.encap_port,
                .maxlen         = sizeof(int),
                .mode           = 0644,
-               .proc_handler   = proc_dointvec,
+               .proc_handler   = proc_dointvec_minmax,
                .extra1         = SYSCTL_ZERO,
                .extra2         = &udp_port_max,
        },
index 9c6e95882553eb97374e3d080b581308bba91a89..967712ba52a0d36d525ce808ad1d0ffb62bf5837 100644 (file)
@@ -402,6 +402,14 @@ struct smcd_dev *smcd_alloc_dev(struct device *parent, const char *name,
                return NULL;
        }
 
+       smcd->event_wq = alloc_ordered_workqueue("ism_evt_wq-%s)",
+                                                WQ_MEM_RECLAIM, name);
+       if (!smcd->event_wq) {
+               kfree(smcd->conn);
+               kfree(smcd);
+               return NULL;
+       }
+
        smcd->dev.parent = parent;
        smcd->dev.release = smcd_release;
        device_initialize(&smcd->dev);
@@ -415,19 +423,14 @@ struct smcd_dev *smcd_alloc_dev(struct device *parent, const char *name,
        INIT_LIST_HEAD(&smcd->vlan);
        INIT_LIST_HEAD(&smcd->lgr_list);
        init_waitqueue_head(&smcd->lgrs_deleted);
-       smcd->event_wq = alloc_ordered_workqueue("ism_evt_wq-%s)",
-                                                WQ_MEM_RECLAIM, name);
-       if (!smcd->event_wq) {
-               kfree(smcd->conn);
-               kfree(smcd);
-               return NULL;
-       }
        return smcd;
 }
 EXPORT_SYMBOL_GPL(smcd_alloc_dev);
 
 int smcd_register_dev(struct smcd_dev *smcd)
 {
+       int rc;
+
        mutex_lock(&smcd_dev_list.mutex);
        if (list_empty(&smcd_dev_list.list)) {
                u8 *system_eid = NULL;
@@ -447,7 +450,14 @@ int smcd_register_dev(struct smcd_dev *smcd)
                            dev_name(&smcd->dev), smcd->pnetid,
                            smcd->pnetid_by_user ? " (user defined)" : "");
 
-       return device_add(&smcd->dev);
+       rc = device_add(&smcd->dev);
+       if (rc) {
+               mutex_lock(&smcd_dev_list.mutex);
+               list_del(&smcd->list);
+               mutex_unlock(&smcd_dev_list.mutex);
+       }
+
+       return rc;
 }
 EXPORT_SYMBOL_GPL(smcd_register_dev);
 
index f555d335e910d878d85d956510f08e622e367a58..42623d6b8f0ecb9eb3f78c11eca0a7cf87ac2daa 100644 (file)
@@ -1677,13 +1677,6 @@ call_reserveresult(struct rpc_task *task)
                return;
        }
 
-       /*
-        * Even though there was an error, we may have acquired
-        * a request slot somehow.  Make sure not to leak it.
-        */
-       if (task->tk_rqstp)
-               xprt_release(task);
-
        switch (status) {
        case -ENOMEM:
                rpc_delay(task, HZ >> 2);
index e5b5a960a69b874a409e7fa7dae9cdc9dcfc1ddd..3509a7f139b98c6c11f91cdafd0d9d0c1c0a5dfe 100644 (file)
@@ -70,6 +70,7 @@
 static void     xprt_init(struct rpc_xprt *xprt, struct net *net);
 static __be32  xprt_alloc_xid(struct rpc_xprt *xprt);
 static void     xprt_destroy(struct rpc_xprt *xprt);
+static void     xprt_request_init(struct rpc_task *task);
 
 static DEFINE_SPINLOCK(xprt_list_lock);
 static LIST_HEAD(xprt_list);
@@ -1606,17 +1607,40 @@ xprt_transmit(struct rpc_task *task)
        spin_unlock(&xprt->queue_lock);
 }
 
-static void xprt_add_backlog(struct rpc_xprt *xprt, struct rpc_task *task)
+static void xprt_complete_request_init(struct rpc_task *task)
+{
+       if (task->tk_rqstp)
+               xprt_request_init(task);
+}
+
+void xprt_add_backlog(struct rpc_xprt *xprt, struct rpc_task *task)
 {
        set_bit(XPRT_CONGESTED, &xprt->state);
-       rpc_sleep_on(&xprt->backlog, task, NULL);
+       rpc_sleep_on(&xprt->backlog, task, xprt_complete_request_init);
+}
+EXPORT_SYMBOL_GPL(xprt_add_backlog);
+
+static bool __xprt_set_rq(struct rpc_task *task, void *data)
+{
+       struct rpc_rqst *req = data;
+
+       if (task->tk_rqstp == NULL) {
+               memset(req, 0, sizeof(*req));   /* mark unused */
+               task->tk_rqstp = req;
+               return true;
+       }
+       return false;
 }
 
-static void xprt_wake_up_backlog(struct rpc_xprt *xprt)
+bool xprt_wake_up_backlog(struct rpc_xprt *xprt, struct rpc_rqst *req)
 {
-       if (rpc_wake_up_next(&xprt->backlog) == NULL)
+       if (rpc_wake_up_first(&xprt->backlog, __xprt_set_rq, req) == NULL) {
                clear_bit(XPRT_CONGESTED, &xprt->state);
+               return false;
+       }
+       return true;
 }
+EXPORT_SYMBOL_GPL(xprt_wake_up_backlog);
 
 static bool xprt_throttle_congested(struct rpc_xprt *xprt, struct rpc_task *task)
 {
@@ -1626,7 +1650,7 @@ static bool xprt_throttle_congested(struct rpc_xprt *xprt, struct rpc_task *task
                goto out;
        spin_lock(&xprt->reserve_lock);
        if (test_bit(XPRT_CONGESTED, &xprt->state)) {
-               rpc_sleep_on(&xprt->backlog, task, NULL);
+               xprt_add_backlog(xprt, task);
                ret = true;
        }
        spin_unlock(&xprt->reserve_lock);
@@ -1703,11 +1727,11 @@ EXPORT_SYMBOL_GPL(xprt_alloc_slot);
 void xprt_free_slot(struct rpc_xprt *xprt, struct rpc_rqst *req)
 {
        spin_lock(&xprt->reserve_lock);
-       if (!xprt_dynamic_free_slot(xprt, req)) {
+       if (!xprt_wake_up_backlog(xprt, req) &&
+           !xprt_dynamic_free_slot(xprt, req)) {
                memset(req, 0, sizeof(*req));   /* mark unused */
                list_add(&req->rq_list, &xprt->free);
        }
-       xprt_wake_up_backlog(xprt);
        spin_unlock(&xprt->reserve_lock);
 }
 EXPORT_SYMBOL_GPL(xprt_free_slot);
@@ -1894,10 +1918,10 @@ void xprt_release(struct rpc_task *task)
        xdr_free_bvec(&req->rq_snd_buf);
        if (req->rq_cred != NULL)
                put_rpccred(req->rq_cred);
-       task->tk_rqstp = NULL;
        if (req->rq_release_snd_buf)
                req->rq_release_snd_buf(req);
 
+       task->tk_rqstp = NULL;
        if (likely(!bc_prealloc(req)))
                xprt->ops->free_slot(xprt, req);
        else
index 649f7d8b973313dbebc8434f32ea07d920d4e48c..c335c13615645cc73478f84b3ac72ec5d3b19893 100644 (file)
@@ -628,8 +628,9 @@ out_mapping_err:
        return false;
 }
 
-/* The tail iovec might not reside in the same page as the
- * head iovec.
+/* The tail iovec may include an XDR pad for the page list,
+ * as well as additional content, and may not reside in the
+ * same page as the head iovec.
  */
 static bool rpcrdma_prepare_tail_iov(struct rpcrdma_req *req,
                                     struct xdr_buf *xdr,
@@ -747,19 +748,27 @@ static bool rpcrdma_prepare_readch(struct rpcrdma_xprt *r_xprt,
                                   struct rpcrdma_req *req,
                                   struct xdr_buf *xdr)
 {
-       struct kvec *tail = &xdr->tail[0];
-
        if (!rpcrdma_prepare_head_iov(r_xprt, req, xdr->head[0].iov_len))
                return false;
 
-       /* If there is a Read chunk, the page list is handled
+       /* If there is a Read chunk, the page list is being handled
         * via explicit RDMA, and thus is skipped here.
         */
 
-       if (tail->iov_len) {
-               if (!rpcrdma_prepare_tail_iov(req, xdr,
-                                             offset_in_page(tail->iov_base),
-                                             tail->iov_len))
+       /* Do not include the tail if it is only an XDR pad */
+       if (xdr->tail[0].iov_len > 3) {
+               unsigned int page_base, len;
+
+               /* If the content in the page list is an odd length,
+                * xdr_write_pages() adds a pad at the beginning of
+                * the tail iovec. Force the tail's non-pad content to
+                * land at the next XDR position in the Send message.
+                */
+               page_base = offset_in_page(xdr->tail[0].iov_base);
+               len = xdr->tail[0].iov_len;
+               page_base += len & 3;
+               len -= len & 3;
+               if (!rpcrdma_prepare_tail_iov(req, xdr, page_base, len))
                        return false;
                kref_get(&req->rl_kref);
        }
index 09953597d055a01ae52f54eb04b17189733f1930..19a49d26b1e41063e85fde128f1c555325cb382b 100644 (file)
@@ -520,9 +520,8 @@ xprt_rdma_alloc_slot(struct rpc_xprt *xprt, struct rpc_task *task)
        return;
 
 out_sleep:
-       set_bit(XPRT_CONGESTED, &xprt->state);
-       rpc_sleep_on(&xprt->backlog, task, NULL);
        task->tk_status = -EAGAIN;
+       xprt_add_backlog(xprt, task);
 }
 
 /**
@@ -537,10 +536,11 @@ xprt_rdma_free_slot(struct rpc_xprt *xprt, struct rpc_rqst *rqst)
        struct rpcrdma_xprt *r_xprt =
                container_of(xprt, struct rpcrdma_xprt, rx_xprt);
 
-       memset(rqst, 0, sizeof(*rqst));
-       rpcrdma_buffer_put(&r_xprt->rx_buf, rpcr_to_rdmar(rqst));
-       if (unlikely(!rpc_wake_up_next(&xprt->backlog)))
-               clear_bit(XPRT_CONGESTED, &xprt->state);
+       rpcrdma_reply_put(&r_xprt->rx_buf, rpcr_to_rdmar(rqst));
+       if (!xprt_wake_up_backlog(xprt, rqst)) {
+               memset(rqst, 0, sizeof(*rqst));
+               rpcrdma_buffer_put(&r_xprt->rx_buf, rpcr_to_rdmar(rqst));
+       }
 }
 
 static bool rpcrdma_check_regbuf(struct rpcrdma_xprt *r_xprt,
index 1e965a38089666a04485e50d2fefcd8f781d2d02..649c23518ec042285a12159b498c06359ffc49f2 100644 (file)
@@ -1200,6 +1200,20 @@ rpcrdma_mr_get(struct rpcrdma_xprt *r_xprt)
        return mr;
 }
 
+/**
+ * rpcrdma_reply_put - Put reply buffers back into pool
+ * @buffers: buffer pool
+ * @req: object to return
+ *
+ */
+void rpcrdma_reply_put(struct rpcrdma_buffer *buffers, struct rpcrdma_req *req)
+{
+       if (req->rl_reply) {
+               rpcrdma_rep_put(buffers, req->rl_reply);
+               req->rl_reply = NULL;
+       }
+}
+
 /**
  * rpcrdma_buffer_get - Get a request buffer
  * @buffers: Buffer pool from which to obtain a buffer
@@ -1228,9 +1242,7 @@ rpcrdma_buffer_get(struct rpcrdma_buffer *buffers)
  */
 void rpcrdma_buffer_put(struct rpcrdma_buffer *buffers, struct rpcrdma_req *req)
 {
-       if (req->rl_reply)
-               rpcrdma_rep_put(buffers, req->rl_reply);
-       req->rl_reply = NULL;
+       rpcrdma_reply_put(buffers, req);
 
        spin_lock(&buffers->rb_lock);
        list_add(&req->rl_list, &buffers->rb_send_bufs);
index 436ad731261410b46180401cac45d4c71fe45746..5d231d94e94402bf6718701152719b4e994ab77d 100644 (file)
@@ -479,6 +479,7 @@ struct rpcrdma_req *rpcrdma_buffer_get(struct rpcrdma_buffer *);
 void rpcrdma_buffer_put(struct rpcrdma_buffer *buffers,
                        struct rpcrdma_req *req);
 void rpcrdma_rep_put(struct rpcrdma_buffer *buf, struct rpcrdma_rep *rep);
+void rpcrdma_reply_put(struct rpcrdma_buffer *buffers, struct rpcrdma_req *req);
 
 bool rpcrdma_regbuf_realloc(struct rpcrdma_regbuf *rb, size_t size,
                            gfp_t flags);
index 47aa47a2b07c50f93abcfb3661829c1e67f6f784..316d049455876a95b7281ac4fb69db825a0f2408 100644 (file)
@@ -1010,6 +1010,8 @@ static int xs_tcp_send_request(struct rpc_rqst *req)
                        kernel_sock_shutdown(transport->sock, SHUT_RDWR);
                return -ENOTCONN;
        }
+       if (!transport->inet)
+               return -ENOTCONN;
 
        xs_pktdump("packet data:",
                                req->rq_svec->iov_base,
index 5cc1f03072150f31b05b7f5117e11c9f85d75b63..3f4542e0f0650ba81935cb57d9d362738a55eb8f 100644 (file)
@@ -60,7 +60,7 @@ static int __net_init tipc_init_net(struct net *net)
        tn->trial_addr = 0;
        tn->addr_trial_end = 0;
        tn->capabilities = TIPC_NODE_CAPABILITIES;
-       INIT_WORK(&tn->final_work.work, tipc_net_finalize_work);
+       INIT_WORK(&tn->work, tipc_net_finalize_work);
        memset(tn->node_id, 0, sizeof(tn->node_id));
        memset(tn->node_id_string, 0, sizeof(tn->node_id_string));
        tn->mon_threshold = TIPC_DEF_MON_THRESHOLD;
@@ -110,7 +110,7 @@ static void __net_exit tipc_exit_net(struct net *net)
 
        tipc_detach_loopback(net);
        /* Make sure the tipc_net_finalize_work() finished */
-       cancel_work_sync(&tn->final_work.work);
+       cancel_work_sync(&tn->work);
        tipc_net_stop(net);
 
        tipc_bcast_stop(net);
@@ -119,6 +119,8 @@ static void __net_exit tipc_exit_net(struct net *net)
 #ifdef CONFIG_TIPC_CRYPTO
        tipc_crypto_stop(&tipc_net(net)->crypto_tx);
 #endif
+       while (atomic_read(&tn->wq_count))
+               cond_resched();
 }
 
 static void __net_exit tipc_pernet_pre_exit(struct net *net)
index 03de7b213f55346409f1f472713c80d8cec80d16..0a3f7a70a50a17b2785da8010e7a7d7814ac305b 100644 (file)
@@ -91,12 +91,6 @@ extern unsigned int tipc_net_id __read_mostly;
 extern int sysctl_tipc_rmem[3] __read_mostly;
 extern int sysctl_tipc_named_timeout __read_mostly;
 
-struct tipc_net_work {
-       struct work_struct work;
-       struct net *net;
-       u32 addr;
-};
-
 struct tipc_net {
        u8  node_id[NODE_ID_LEN];
        u32 node_addr;
@@ -148,7 +142,9 @@ struct tipc_net {
        struct tipc_crypto *crypto_tx;
 #endif
        /* Work item for net finalize */
-       struct tipc_net_work final_work;
+       struct work_struct work;
+       /* The numbers of work queues in schedule */
+       atomic_t wq_count;
 };
 
 static inline struct tipc_net *tipc_net(struct net *net)
index 5380f605b8514409befa7e88dc13fd97358b6a94..da69e1abf68ff7359c9b9918014eafd58ab73da1 100644 (file)
@@ -168,7 +168,7 @@ static bool tipc_disc_addr_trial_msg(struct tipc_discoverer *d,
 
        /* Apply trial address if we just left trial period */
        if (!trial && !self) {
-               tipc_sched_net_finalize(net, tn->trial_addr);
+               schedule_work(&tn->work);
                msg_set_prevnode(buf_msg(d->skb), tn->trial_addr);
                msg_set_type(buf_msg(d->skb), DSC_REQ_MSG);
        }
@@ -308,7 +308,7 @@ static void tipc_disc_timeout(struct timer_list *t)
        if (!time_before(jiffies, tn->addr_trial_end) && !tipc_own_addr(net)) {
                mod_timer(&d->timer, jiffies + TIPC_DISC_INIT);
                spin_unlock_bh(&d->lock);
-               tipc_sched_net_finalize(net, tn->trial_addr);
+               schedule_work(&tn->work);
                return;
        }
 
index 11510925943020b96436ab3a371f7e9e2a100dfe..c44b4bfaaee6a9e6534debab968bc611db9e6515 100644 (file)
@@ -372,6 +372,11 @@ char tipc_link_plane(struct tipc_link *l)
        return l->net_plane;
 }
 
+struct net *tipc_link_net(struct tipc_link *l)
+{
+       return l->net;
+}
+
 void tipc_link_update_caps(struct tipc_link *l, u16 capabilities)
 {
        l->peer_caps = capabilities;
index fc07232c9a12782ccbd628c87ac59a4abaa7ca1c..a16f401fdabda346e71ff4ef964c98287ba1d7b0 100644 (file)
@@ -156,4 +156,5 @@ int tipc_link_bc_sync_rcv(struct tipc_link *l,   struct tipc_msg *hdr,
 int tipc_link_bc_nack_rcv(struct tipc_link *l, struct sk_buff *skb,
                          struct sk_buff_head *xmitq);
 bool tipc_link_too_silent(struct tipc_link *l);
+struct net *tipc_link_net(struct tipc_link *l);
 #endif
index 3f0a25345a7c089ca4e328aa4a668148a2311767..ce6ab54822d8db184ac6b41dc380467668408547 100644 (file)
@@ -149,18 +149,13 @@ int tipc_buf_append(struct sk_buff **headbuf, struct sk_buff **buf)
                if (unlikely(head))
                        goto err;
                *buf = NULL;
+               if (skb_has_frag_list(frag) && __skb_linearize(frag))
+                       goto err;
                frag = skb_unshare(frag, GFP_ATOMIC);
                if (unlikely(!frag))
                        goto err;
                head = *headbuf = frag;
                TIPC_SKB_CB(head)->tail = NULL;
-               if (skb_is_nonlinear(head)) {
-                       skb_walk_frags(head, tail) {
-                               TIPC_SKB_CB(head)->tail = tail;
-                       }
-               } else {
-                       skb_frag_list_init(head);
-               }
                return 0;
        }
 
index a130195af188302b3aa306d1e6b94c3d643e0914..0e95572e56b41eb24ffd8e016cbf771fd174f725 100644 (file)
@@ -41,6 +41,7 @@
 #include "socket.h"
 #include "node.h"
 #include "bcast.h"
+#include "link.h"
 #include "netlink.h"
 #include "monitor.h"
 
@@ -142,19 +143,9 @@ static void tipc_net_finalize(struct net *net, u32 addr)
 
 void tipc_net_finalize_work(struct work_struct *work)
 {
-       struct tipc_net_work *fwork;
+       struct tipc_net *tn = container_of(work, struct tipc_net, work);
 
-       fwork = container_of(work, struct tipc_net_work, work);
-       tipc_net_finalize(fwork->net, fwork->addr);
-}
-
-void tipc_sched_net_finalize(struct net *net, u32 addr)
-{
-       struct tipc_net *tn = tipc_net(net);
-
-       tn->final_work.net = net;
-       tn->final_work.addr = addr;
-       schedule_work(&tn->final_work.work);
+       tipc_net_finalize(tipc_link_net(tn->bcl), tn->trial_addr);
 }
 
 void tipc_net_stop(struct net *net)
index 8217905348f48a454444eb943c0432135e991b30..81af92954c6c250045570641f14362a2ebd6a539 100644 (file)
@@ -423,18 +423,18 @@ static void tipc_node_write_unlock(struct tipc_node *n)
        write_unlock_bh(&n->lock);
 
        if (flags & TIPC_NOTIFY_NODE_DOWN)
-               tipc_publ_notify(net, publ_list, n->addr, n->capabilities);
+               tipc_publ_notify(net, publ_list, sk.node, n->capabilities);
 
        if (flags & TIPC_NOTIFY_NODE_UP)
-               tipc_named_node_up(net, n->addr, n->capabilities);
+               tipc_named_node_up(net, sk.node, n->capabilities);
 
        if (flags & TIPC_NOTIFY_LINK_UP) {
-               tipc_mon_peer_up(net, n->addr, bearer_id);
-               tipc_nametbl_publish(net, &ua, &sk, n->link_id);
+               tipc_mon_peer_up(net, sk.node, bearer_id);
+               tipc_nametbl_publish(net, &ua, &sk, sk.ref);
        }
        if (flags & TIPC_NOTIFY_LINK_DOWN) {
-               tipc_mon_peer_down(net, n->addr, bearer_id);
-               tipc_nametbl_withdraw(net, &ua, &sk, n->link_id);
+               tipc_mon_peer_down(net, sk.node, bearer_id);
+               tipc_nametbl_withdraw(net, &ua, &sk, sk.ref);
        }
 }
 
index 58935cd0d068a155fc11d0a59344b211ba33b761..53af72824c9cebc9aee0a392045e51fd8acaa010 100644 (file)
@@ -1262,7 +1262,10 @@ void tipc_sk_mcast_rcv(struct net *net, struct sk_buff_head *arrvq,
                spin_lock_bh(&inputq->lock);
                if (skb_peek(arrvq) == skb) {
                        skb_queue_splice_tail_init(&tmpq, inputq);
-                       __skb_dequeue(arrvq);
+                       /* Decrease the skb's refcnt as increasing in the
+                        * function tipc_skb_peek
+                        */
+                       kfree_skb(__skb_dequeue(arrvq));
                }
                spin_unlock_bh(&inputq->lock);
                __skb_queue_purge(&tmpq);
index e556d2cdc06447eb6cbc5088b5317c1648a9f485..c2bb818704c8ff19c03030816857e4eb06a020bf 100644 (file)
@@ -814,6 +814,7 @@ static void cleanup_bearer(struct work_struct *work)
                kfree_rcu(rcast, rcu);
        }
 
+       atomic_dec(&tipc_net(sock_net(ub->ubsock->sk))->wq_count);
        dst_cache_destroy(&ub->rcast.dst_cache);
        udp_tunnel_sock_release(ub->ubsock);
        synchronize_net();
@@ -834,6 +835,7 @@ static void tipc_udp_disable(struct tipc_bearer *b)
        RCU_INIT_POINTER(ub->bearer, NULL);
 
        /* sock_release need to be done outside of rtnl lock */
+       atomic_inc(&tipc_net(sock_net(ub->ubsock->sk))->wq_count);
        INIT_WORK(&ub->work, cleanup_bearer);
        schedule_work(&ub->work);
 }
index 76a6f8c2eec4bf2a71a4e02d9b598f702c488819..bd9f1567aa3929171e84a5b64b46bd38f949ca4e 100644 (file)
@@ -50,6 +50,7 @@ static void tls_device_gc_task(struct work_struct *work);
 static DECLARE_WORK(tls_device_gc_work, tls_device_gc_task);
 static LIST_HEAD(tls_device_gc_list);
 static LIST_HEAD(tls_device_list);
+static LIST_HEAD(tls_device_down_list);
 static DEFINE_SPINLOCK(tls_device_lock);
 
 static void tls_device_free_ctx(struct tls_context *ctx)
@@ -680,15 +681,13 @@ static void tls_device_resync_rx(struct tls_context *tls_ctx,
        struct tls_offload_context_rx *rx_ctx = tls_offload_ctx_rx(tls_ctx);
        struct net_device *netdev;
 
-       if (WARN_ON(test_and_set_bit(TLS_RX_SYNC_RUNNING, &tls_ctx->flags)))
-               return;
-
        trace_tls_device_rx_resync_send(sk, seq, rcd_sn, rx_ctx->resync_type);
+       rcu_read_lock();
        netdev = READ_ONCE(tls_ctx->netdev);
        if (netdev)
                netdev->tlsdev_ops->tls_dev_resync(netdev, sk, seq, rcd_sn,
                                                   TLS_OFFLOAD_CTX_DIR_RX);
-       clear_bit_unlock(TLS_RX_SYNC_RUNNING, &tls_ctx->flags);
+       rcu_read_unlock();
        TLS_INC_STATS(sock_net(sk), LINUX_MIB_TLSRXDEVICERESYNC);
 }
 
@@ -761,6 +760,8 @@ void tls_device_rx_resync_new_rec(struct sock *sk, u32 rcd_len, u32 seq)
 
        if (tls_ctx->rx_conf != TLS_HW)
                return;
+       if (unlikely(test_bit(TLS_RX_DEV_DEGRADED, &tls_ctx->flags)))
+               return;
 
        prot = &tls_ctx->prot_info;
        rx_ctx = tls_offload_ctx_rx(tls_ctx);
@@ -963,6 +964,17 @@ int tls_device_decrypted(struct sock *sk, struct tls_context *tls_ctx,
 
        ctx->sw.decrypted |= is_decrypted;
 
+       if (unlikely(test_bit(TLS_RX_DEV_DEGRADED, &tls_ctx->flags))) {
+               if (likely(is_encrypted || is_decrypted))
+                       return 0;
+
+               /* After tls_device_down disables the offload, the next SKB will
+                * likely have initial fragments decrypted, and final ones not
+                * decrypted. We need to reencrypt that single SKB.
+                */
+               return tls_device_reencrypt(sk, skb);
+       }
+
        /* Return immediately if the record is either entirely plaintext or
         * entirely ciphertext. Otherwise handle reencrypt partially decrypted
         * record.
@@ -1292,6 +1304,26 @@ static int tls_device_down(struct net_device *netdev)
        spin_unlock_irqrestore(&tls_device_lock, flags);
 
        list_for_each_entry_safe(ctx, tmp, &list, list) {
+               /* Stop offloaded TX and switch to the fallback.
+                * tls_is_sk_tx_device_offloaded will return false.
+                */
+               WRITE_ONCE(ctx->sk->sk_validate_xmit_skb, tls_validate_xmit_skb_sw);
+
+               /* Stop the RX and TX resync.
+                * tls_dev_resync must not be called after tls_dev_del.
+                */
+               WRITE_ONCE(ctx->netdev, NULL);
+
+               /* Start skipping the RX resync logic completely. */
+               set_bit(TLS_RX_DEV_DEGRADED, &ctx->flags);
+
+               /* Sync with inflight packets. After this point:
+                * TX: no non-encrypted packets will be passed to the driver.
+                * RX: resync requests from the driver will be ignored.
+                */
+               synchronize_net();
+
+               /* Release the offload context on the driver side. */
                if (ctx->tx_conf == TLS_HW)
                        netdev->tlsdev_ops->tls_dev_del(netdev, ctx,
                                                        TLS_OFFLOAD_CTX_DIR_TX);
@@ -1299,15 +1331,21 @@ static int tls_device_down(struct net_device *netdev)
                    !test_bit(TLS_RX_DEV_CLOSED, &ctx->flags))
                        netdev->tlsdev_ops->tls_dev_del(netdev, ctx,
                                                        TLS_OFFLOAD_CTX_DIR_RX);
-               WRITE_ONCE(ctx->netdev, NULL);
-               smp_mb__before_atomic(); /* pairs with test_and_set_bit() */
-               while (test_bit(TLS_RX_SYNC_RUNNING, &ctx->flags))
-                       usleep_range(10, 200);
+
                dev_put(netdev);
-               list_del_init(&ctx->list);
 
-               if (refcount_dec_and_test(&ctx->refcount))
-                       tls_device_free_ctx(ctx);
+               /* Move the context to a separate list for two reasons:
+                * 1. When the context is deallocated, list_del is called.
+                * 2. It's no longer an offloaded context, so we don't want to
+                *    run offload-specific code on this context.
+                */
+               spin_lock_irqsave(&tls_device_lock, flags);
+               list_move_tail(&ctx->list, &tls_device_down_list);
+               spin_unlock_irqrestore(&tls_device_lock, flags);
+
+               /* Device contexts for RX and TX will be freed in on sk_destruct
+                * by tls_device_free_ctx. rx_conf and tx_conf stay in TLS_HW.
+                */
        }
 
        up_write(&device_offload_lock);
index cacf040872c74c59002e8dcdfe98a5449bed9b94..e40bedd112b68573ab8bfd845002e17253028c16 100644 (file)
@@ -431,6 +431,13 @@ struct sk_buff *tls_validate_xmit_skb(struct sock *sk,
 }
 EXPORT_SYMBOL_GPL(tls_validate_xmit_skb);
 
+struct sk_buff *tls_validate_xmit_skb_sw(struct sock *sk,
+                                        struct net_device *dev,
+                                        struct sk_buff *skb)
+{
+       return tls_sw_fallback(sk, skb);
+}
+
 struct sk_buff *tls_encrypt_skb(struct sk_buff *skb)
 {
        return tls_sw_fallback(skb->sk, skb);
index 47b7c5334c346f2130a87682bb9040a6fd0fe4ce..fde56ff491637b6e2a67596d39094edbd032105c 100644 (file)
@@ -636,6 +636,7 @@ struct tls_context *tls_ctx_create(struct sock *sk)
        mutex_init(&ctx->tx_lock);
        rcu_assign_pointer(icsk->icsk_ulp_data, ctx);
        ctx->sk_proto = READ_ONCE(sk->sk_prot);
+       ctx->sk = sk;
        return ctx;
 }
 
index 1dcb34dfd56b35a8cd4e29f5ab3dcbb8a91db42e..694de024d0ee6cc1722b4cd7420b5a191bbb4b63 100644 (file)
@@ -37,6 +37,7 @@
 
 #include <linux/sched/signal.h>
 #include <linux/module.h>
+#include <linux/splice.h>
 #include <crypto/aead.h>
 
 #include <net/strparser.h>
@@ -1281,7 +1282,7 @@ int tls_sw_sendpage(struct sock *sk, struct page *page,
 }
 
 static struct sk_buff *tls_wait_data(struct sock *sk, struct sk_psock *psock,
-                                    int flags, long timeo, int *err)
+                                    bool nonblock, long timeo, int *err)
 {
        struct tls_context *tls_ctx = tls_get_ctx(sk);
        struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
@@ -1306,7 +1307,7 @@ static struct sk_buff *tls_wait_data(struct sock *sk, struct sk_psock *psock,
                if (sock_flag(sk, SOCK_DONE))
                        return NULL;
 
-               if ((flags & MSG_DONTWAIT) || !timeo) {
+               if (nonblock || !timeo) {
                        *err = -EAGAIN;
                        return NULL;
                }
@@ -1786,7 +1787,7 @@ int tls_sw_recvmsg(struct sock *sk,
                bool async_capable;
                bool async = false;
 
-               skb = tls_wait_data(sk, psock, flags, timeo, &err);
+               skb = tls_wait_data(sk, psock, flags & MSG_DONTWAIT, timeo, &err);
                if (!skb) {
                        if (psock) {
                                int ret = sk_msg_recvmsg(sk, psock, msg, len,
@@ -1990,9 +1991,9 @@ ssize_t tls_sw_splice_read(struct socket *sock,  loff_t *ppos,
 
        lock_sock(sk);
 
-       timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
+       timeo = sock_rcvtimeo(sk, flags & SPLICE_F_NONBLOCK);
 
-       skb = tls_wait_data(sk, NULL, flags, timeo, &err);
+       skb = tls_wait_data(sk, NULL, flags & SPLICE_F_NONBLOCK, timeo, &err);
        if (!skb)
                goto splice_read_end;
 
index 382c5262d997dd7c439b11382ab3846e941c9400..7ec021a610aebd69edda19f2ca25ff9ef681d5b0 100644 (file)
@@ -542,7 +542,7 @@ EXPORT_SYMBOL(ieee80211_get_mesh_hdrlen);
 
 int ieee80211_data_to_8023_exthdr(struct sk_buff *skb, struct ethhdr *ehdr,
                                  const u8 *addr, enum nl80211_iftype iftype,
-                                 u8 data_offset)
+                                 u8 data_offset, bool is_amsdu)
 {
        struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
        struct {
@@ -629,7 +629,7 @@ int ieee80211_data_to_8023_exthdr(struct sk_buff *skb, struct ethhdr *ehdr,
        skb_copy_bits(skb, hdrlen, &payload, sizeof(payload));
        tmp.h_proto = payload.proto;
 
-       if (likely((ether_addr_equal(payload.hdr, rfc1042_header) &&
+       if (likely((!is_amsdu && ether_addr_equal(payload.hdr, rfc1042_header) &&
                    tmp.h_proto != htons(ETH_P_AARP) &&
                    tmp.h_proto != htons(ETH_P_IPX)) ||
                   ether_addr_equal(payload.hdr, bridge_tunnel_header)))
@@ -771,6 +771,9 @@ void ieee80211_amsdu_to_8023s(struct sk_buff *skb, struct sk_buff_head *list,
                remaining = skb->len - offset;
                if (subframe_len > remaining)
                        goto purge;
+               /* mitigate A-MSDU aggregation injection attacks */
+               if (ether_addr_equal(eth.h_dest, rfc1042_header))
+                       goto purge;
 
                offset += sizeof(struct ethhdr);
                last = remaining <= subframe_len + padding;
index 44d6566dd23e43f3c5de3c140037ccbb68e0cd38..1816899499ce89931da0904f227425cef4672964 100644 (file)
@@ -536,7 +536,7 @@ static int x25_create(struct net *net, struct socket *sock, int protocol,
        if (protocol)
                goto out;
 
-       rc = -ENOBUFS;
+       rc = -ENOMEM;
        if ((sk = x25_alloc_socket(net, kern)) == NULL)
                goto out;
 
index aa696854be787e117a878fd9b8f4ab38af99fa8e..53e300f860bb4f4290307e8443fbf68f4d8cf7e0 100644 (file)
@@ -1255,7 +1255,7 @@ static void tx_only(struct xsk_socket_info *xsk, u32 *frame_nb, int batch_size)
        for (i = 0; i < batch_size; i++) {
                struct xdp_desc *tx_desc = xsk_ring_prod__tx_desc(&xsk->tx,
                                                                  idx + i);
-               tx_desc->addr = (*frame_nb + i) << XSK_UMEM__DEFAULT_FRAME_SHIFT;
+               tx_desc->addr = (*frame_nb + i) * opt_xsk_frame_size;
                tx_desc->len = PKT_SIZE;
        }
 
index 21dbf63d6e415f19a0c795d51d62bd17e7a1f344..9ec93d90e8a5a647f1e8ddce1ed2f005abdfeeee 100644 (file)
@@ -117,22 +117,27 @@ static int mdpy_fb_probe(struct pci_dev *pdev,
        if (format != DRM_FORMAT_XRGB8888) {
                pci_err(pdev, "format mismatch (0x%x != 0x%x)\n",
                        format, DRM_FORMAT_XRGB8888);
-               return -EINVAL;
+               ret = -EINVAL;
+               goto err_release_regions;
        }
        if (width < 100  || width > 10000) {
                pci_err(pdev, "width (%d) out of range\n", width);
-               return -EINVAL;
+               ret = -EINVAL;
+               goto err_release_regions;
        }
        if (height < 100 || height > 10000) {
                pci_err(pdev, "height (%d) out of range\n", height);
-               return -EINVAL;
+               ret = -EINVAL;
+               goto err_release_regions;
        }
        pci_info(pdev, "mdpy found: %dx%d framebuffer\n",
                 width, height);
 
        info = framebuffer_alloc(sizeof(struct mdpy_fb_par), &pdev->dev);
-       if (!info)
+       if (!info) {
+               ret = -ENOMEM;
                goto err_release_regions;
+       }
        pci_set_drvdata(pdev, info);
        par = info->par;
 
index dd87cea9fba7852e7e8b9d3f729205f1dd5945c4..a7883e455290eeb695f350ac0a4f85add6810a5f 100644 (file)
@@ -59,7 +59,7 @@ quiet_cmd_ld_ko_o = LD [M]  $@
 quiet_cmd_btf_ko = BTF [M] $@
       cmd_btf_ko =                                                     \
        if [ -f vmlinux ]; then                                         \
-               LLVM_OBJCOPY=$(OBJCOPY) $(PAHOLE) -J --btf_base vmlinux $@; \
+               LLVM_OBJCOPY="$(OBJCOPY)" $(PAHOLE) -J --btf_base vmlinux $@; \
        else                                                            \
                printf "Skipping BTF generation for %s due to unavailability of vmlinux\n" $@ 1>&2; \
        fi;
index f6d543725f1ed685870ae14d3528522ac133f716..b2483149bbe55028527b8bbf439e663d6d032560 100755 (executable)
@@ -76,7 +76,11 @@ fi
 if arg_contain -S "$@"; then
        # For scripts/gcc-x86-*-has-stack-protector.sh
        if arg_contain -fstack-protector "$@"; then
-               echo "%gs"
+               if arg_contain -mstack-protector-guard-reg=fs "$@"; then
+                       echo "%fs"
+               else
+                       echo "%gs"
+               fi
                exit 0
        fi
 
index 48d141e3ec56f280045a996f926a8213ace36dac..8762887a970ce2479eb6a43ae654c36136369f6c 100755 (executable)
@@ -10,7 +10,7 @@ from __future__ import print_function
 import os, sys, errno
 import subprocess
 
-# Extract and prepare jobserver file descriptors from envirnoment.
+# Extract and prepare jobserver file descriptors from environment.
 claim = 0
 jobs = b""
 try:
index f4de4c97015bcd2458e052bb2f8d0fc0742eb267..0e0f6466b18d6019335e2db19ad6439c9e9c19c3 100755 (executable)
@@ -240,7 +240,7 @@ gen_btf()
        fi
 
        info "BTF" ${2}
-       LLVM_OBJCOPY=${OBJCOPY} ${PAHOLE} -J ${extra_paholeopt} ${1}
+       LLVM_OBJCOPY="${OBJCOPY}" ${PAHOLE} -J ${extra_paholeopt} ${1}
 
        # Create ${2} which contains just .BTF section but no symbols. Add
        # SHF_ALLOC because .BTF will be part of the vmlinux image. --strip-all
index 4693945508019c277b6a9cf2628b7bdeff25581f..aa108bea6739b302e9ef4f274f47a05d44014dce 100644 (file)
@@ -493,10 +493,12 @@ static int tpm_seal(struct tpm_buf *tb, uint16_t keytype,
 
        ret = tpm_get_random(chip, td->nonceodd, TPM_NONCE_SIZE);
        if (ret < 0)
-               return ret;
+               goto out;
 
-       if (ret != TPM_NONCE_SIZE)
-               return -EIO;
+       if (ret != TPM_NONCE_SIZE) {
+               ret = -EIO;
+               goto out;
+       }
 
        ordinal = htonl(TPM_ORD_SEAL);
        datsize = htonl(datalen);
index 617fabd4d913b81caa5e0537e12477671dbe3a77..0165da386289c315cc581aa45fbd4ecfa5b88693 100644 (file)
@@ -336,9 +336,9 @@ out:
                        rc = -EPERM;
        }
        if (blob_len < 0)
-               return blob_len;
-
-       payload->blob_len = blob_len;
+               rc = blob_len;
+       else
+               payload->blob_len = blob_len;
 
        tpm_put_ops(chip);
        return rc;
index 25f57c14f294f7ffa7e1fbe045b714ef470aa343..a90e31dbde61f4bfd918f223c116fb93aa5c2942 100644 (file)
@@ -17,6 +17,9 @@ MODULE_LICENSE("GPL");
 #define MAX_LED (((SNDRV_CTL_ELEM_ACCESS_MIC_LED - SNDRV_CTL_ELEM_ACCESS_SPK_LED) \
                        >> SNDRV_CTL_ELEM_ACCESS_LED_SHIFT) + 1)
 
+#define to_led_card_dev(_dev) \
+       container_of(_dev, struct snd_ctl_led_card, dev)
+
 enum snd_ctl_led_mode {
         MODE_FOLLOW_MUTE = 0,
         MODE_FOLLOW_ROUTE,
@@ -371,6 +374,21 @@ static void snd_ctl_led_disconnect(struct snd_card *card)
        snd_ctl_led_refresh();
 }
 
+static void snd_ctl_led_card_release(struct device *dev)
+{
+       struct snd_ctl_led_card *led_card = to_led_card_dev(dev);
+
+       kfree(led_card);
+}
+
+static void snd_ctl_led_release(struct device *dev)
+{
+}
+
+static void snd_ctl_led_dev_release(struct device *dev)
+{
+}
+
 /*
  * sysfs
  */
@@ -663,6 +681,7 @@ static void snd_ctl_led_sysfs_add(struct snd_card *card)
                led_card->number = card->number;
                led_card->led = led;
                device_initialize(&led_card->dev);
+               led_card->dev.release = snd_ctl_led_card_release;
                if (dev_set_name(&led_card->dev, "card%d", card->number) < 0)
                        goto cerr;
                led_card->dev.parent = &led->dev;
@@ -681,7 +700,6 @@ cerr:
                put_device(&led_card->dev);
 cerr2:
                printk(KERN_ERR "snd_ctl_led: unable to add card%d", card->number);
-               kfree(led_card);
        }
 }
 
@@ -700,8 +718,7 @@ static void snd_ctl_led_sysfs_remove(struct snd_card *card)
                snprintf(link_name, sizeof(link_name), "led-%s", led->name);
                sysfs_remove_link(&card->ctl_dev.kobj, link_name);
                sysfs_remove_link(&led_card->dev.kobj, "card");
-               device_del(&led_card->dev);
-               kfree(led_card);
+               device_unregister(&led_card->dev);
                led->cards[card->number] = NULL;
        }
 }
@@ -723,6 +740,7 @@ static int __init snd_ctl_led_init(void)
 
        device_initialize(&snd_ctl_led_dev);
        snd_ctl_led_dev.class = sound_class;
+       snd_ctl_led_dev.release = snd_ctl_led_dev_release;
        dev_set_name(&snd_ctl_led_dev, "ctl-led");
        if (device_add(&snd_ctl_led_dev)) {
                put_device(&snd_ctl_led_dev);
@@ -733,15 +751,16 @@ static int __init snd_ctl_led_init(void)
                INIT_LIST_HEAD(&led->controls);
                device_initialize(&led->dev);
                led->dev.parent = &snd_ctl_led_dev;
+               led->dev.release = snd_ctl_led_release;
                led->dev.groups = snd_ctl_led_dev_attr_groups;
                dev_set_name(&led->dev, led->name);
                if (device_add(&led->dev)) {
                        put_device(&led->dev);
                        for (; group > 0; group--) {
                                led = &snd_ctl_leds[group - 1];
-                               device_del(&led->dev);
+                               device_unregister(&led->dev);
                        }
-                       device_del(&snd_ctl_led_dev);
+                       device_unregister(&snd_ctl_led_dev);
                        return -ENOMEM;
                }
        }
@@ -767,9 +786,9 @@ static void __exit snd_ctl_led_exit(void)
        }
        for (group = 0; group < MAX_LED; group++) {
                led = &snd_ctl_leds[group];
-               device_del(&led->dev);
+               device_unregister(&led->dev);
        }
-       device_del(&snd_ctl_led_dev);
+       device_unregister(&snd_ctl_led_dev);
        snd_ctl_led_clean(NULL);
 }
 
index 1645e4142e30246dc30af69857d1dc8e9f736219..9863be6fd43e1a34aece5ffe258ffc627eb05bc2 100644 (file)
@@ -297,8 +297,16 @@ int snd_seq_timer_open(struct snd_seq_queue *q)
                return err;
        }
        spin_lock_irq(&tmr->lock);
-       tmr->timeri = t;
+       if (tmr->timeri)
+               err = -EBUSY;
+       else
+               tmr->timeri = t;
        spin_unlock_irq(&tmr->lock);
+       if (err < 0) {
+               snd_timer_close(t);
+               snd_timer_instance_free(t);
+               return err;
+       }
        return 0;
 }
 
index 6898b1ac0d7f4572e36958834d6c602f4c937c70..92b7008fcdb86a57b91fd90d02474303943db9c2 100644 (file)
@@ -520,9 +520,10 @@ static void snd_timer_notify1(struct snd_timer_instance *ti, int event)
                return;
        if (timer->hw.flags & SNDRV_TIMER_HW_SLAVE)
                return;
+       event += 10; /* convert to SNDRV_TIMER_EVENT_MXXX */
        list_for_each_entry(ts, &ti->slave_active_head, active_list)
                if (ts->ccallback)
-                       ts->ccallback(ts, event + 100, &tstamp, resolution);
+                       ts->ccallback(ts, event, &tstamp, resolution);
 }
 
 /* start/continue a master timer */
index 25778765cbfe932f4b702737441a5141bd1cab62..9897bd26a438852603d28b6f96a6f4f801754451 100644 (file)
@@ -38,7 +38,7 @@ config SND_OXFW
           * Mackie(Loud) Onyx 1640i (former model)
           * Mackie(Loud) Onyx Satellite
           * Mackie(Loud) Tapco Link.Firewire
-          * Mackie(Loud) d.2 pro/d.4 pro
+          * Mackie(Loud) d.4 pro
           * Mackie(Loud) U.420/U.420d
           * TASCAM FireOne
           * Stanton Controllers & Systems 1 Deck/Mixer
@@ -84,7 +84,7 @@ config SND_BEBOB
          * PreSonus FIREBOX/FIREPOD/FP10/Inspire1394
          * BridgeCo RDAudio1/Audio5
          * Mackie Onyx 1220/1620/1640 (FireWire I/O Card)
-         * Mackie d.2 (FireWire Option)
+         * Mackie d.2 (FireWire Option) and d.2 Pro
          * Stanton FinalScratch 2 (ScratchAmp)
          * Tascam IF-FW/DM
          * Behringer XENIX UFX 1204/1604
index 26e7cb555d3c593d81c2653267f95469190e6cbe..aa53c13b89d34ec0ba77338e839429106689095f 100644 (file)
@@ -14,8 +14,8 @@
 #include <linux/tracepoint.h>
 
 TRACE_EVENT(amdtp_packet,
-       TP_PROTO(const struct amdtp_stream *s, u32 cycles, const __be32 *cip_header, unsigned int payload_length, unsigned int data_blocks, unsigned int data_block_counter, unsigned int index),
-       TP_ARGS(s, cycles, cip_header, payload_length, data_blocks, data_block_counter, index),
+       TP_PROTO(const struct amdtp_stream *s, u32 cycles, const __be32 *cip_header, unsigned int payload_length, unsigned int data_blocks, unsigned int data_block_counter, unsigned int packet_index, unsigned int index),
+       TP_ARGS(s, cycles, cip_header, payload_length, data_blocks, data_block_counter, packet_index, index),
        TP_STRUCT__entry(
                __field(unsigned int, second)
                __field(unsigned int, cycle)
@@ -48,7 +48,7 @@ TRACE_EVENT(amdtp_packet,
                __entry->payload_quadlets = payload_length / sizeof(__be32);
                __entry->data_blocks = data_blocks;
                __entry->data_block_counter = data_block_counter,
-               __entry->packet_index = s->packet_index;
+               __entry->packet_index = packet_index;
                __entry->irq = !!in_interrupt();
                __entry->index = index;
        ),
index 4e2f2bb7879fb2fcdbb1fcc5868542ea836a2a7b..5805c5de39fbf7fc8c4011aa2e9433e4ab367ae1 100644 (file)
@@ -526,7 +526,7 @@ static void build_it_pkt_header(struct amdtp_stream *s, unsigned int cycle,
        }
 
        trace_amdtp_packet(s, cycle, cip_header, payload_length, data_blocks,
-                          data_block_counter, index);
+                          data_block_counter, s->packet_index, index);
 }
 
 static int check_cip_header(struct amdtp_stream *s, const __be32 *buf,
@@ -630,21 +630,27 @@ static int parse_ir_ctx_header(struct amdtp_stream *s, unsigned int cycle,
                               unsigned int *payload_length,
                               unsigned int *data_blocks,
                               unsigned int *data_block_counter,
-                              unsigned int *syt, unsigned int index)
+                              unsigned int *syt, unsigned int packet_index, unsigned int index)
 {
        const __be32 *cip_header;
+       unsigned int cip_header_size;
        int err;
 
        *payload_length = be32_to_cpu(ctx_header[0]) >> ISO_DATA_LENGTH_SHIFT;
-       if (*payload_length > s->ctx_data.tx.ctx_header_size +
-                                       s->ctx_data.tx.max_ctx_payload_length) {
+
+       if (!(s->flags & CIP_NO_HEADER))
+               cip_header_size = 8;
+       else
+               cip_header_size = 0;
+
+       if (*payload_length > cip_header_size + s->ctx_data.tx.max_ctx_payload_length) {
                dev_err(&s->unit->device,
                        "Detect jumbo payload: %04x %04x\n",
-                       *payload_length, s->ctx_data.tx.max_ctx_payload_length);
+                       *payload_length, cip_header_size + s->ctx_data.tx.max_ctx_payload_length);
                return -EIO;
        }
 
-       if (!(s->flags & CIP_NO_HEADER)) {
+       if (cip_header_size > 0) {
                cip_header = ctx_header + 2;
                err = check_cip_header(s, cip_header, *payload_length,
                                       data_blocks, data_block_counter, syt);
@@ -662,7 +668,7 @@ static int parse_ir_ctx_header(struct amdtp_stream *s, unsigned int cycle,
        }
 
        trace_amdtp_packet(s, cycle, cip_header, *payload_length, *data_blocks,
-                          *data_block_counter, index);
+                          *data_block_counter, packet_index, index);
 
        return err;
 }
@@ -701,12 +707,13 @@ static int generate_device_pkt_descs(struct amdtp_stream *s,
                                     unsigned int packets)
 {
        unsigned int dbc = s->data_block_counter;
+       unsigned int packet_index = s->packet_index;
+       unsigned int queue_size = s->queue_size;
        int i;
        int err;
 
        for (i = 0; i < packets; ++i) {
                struct pkt_desc *desc = descs + i;
-               unsigned int index = (s->packet_index + i) % s->queue_size;
                unsigned int cycle;
                unsigned int payload_length;
                unsigned int data_blocks;
@@ -715,7 +722,7 @@ static int generate_device_pkt_descs(struct amdtp_stream *s,
                cycle = compute_cycle_count(ctx_header[1]);
 
                err = parse_ir_ctx_header(s, cycle, ctx_header, &payload_length,
-                                         &data_blocks, &dbc, &syt, i);
+                                         &data_blocks, &dbc, &syt, packet_index, i);
                if (err < 0)
                        return err;
 
@@ -723,13 +730,15 @@ static int generate_device_pkt_descs(struct amdtp_stream *s,
                desc->syt = syt;
                desc->data_blocks = data_blocks;
                desc->data_block_counter = dbc;
-               desc->ctx_payload = s->buffer.packets[index].buffer;
+               desc->ctx_payload = s->buffer.packets[packet_index].buffer;
 
                if (!(s->flags & CIP_DBC_IS_END_EVENT))
                        dbc = (dbc + desc->data_blocks) & 0xff;
 
                ctx_header +=
                        s->ctx_data.tx.ctx_header_size / sizeof(*ctx_header);
+
+               packet_index = (packet_index + 1) % queue_size;
        }
 
        s->data_block_counter = dbc;
@@ -795,7 +804,7 @@ static void generate_pkt_descs(struct amdtp_stream *s, struct pkt_desc *descs,
 static inline void cancel_stream(struct amdtp_stream *s)
 {
        s->packet_index = -1;
-       if (current_work() == &s->period_work)
+       if (in_interrupt())
                amdtp_stream_pcm_abort(s);
        WRITE_ONCE(s->pcm_buffer_pointer, SNDRV_PCM_POS_XRUN);
 }
@@ -1065,23 +1074,22 @@ static int amdtp_stream_start(struct amdtp_stream *s, int channel, int speed,
                s->data_block_counter = 0;
        }
 
-       /* initialize packet buffer */
+       // initialize packet buffer.
+       max_ctx_payload_size = amdtp_stream_get_max_payload(s);
        if (s->direction == AMDTP_IN_STREAM) {
                dir = DMA_FROM_DEVICE;
                type = FW_ISO_CONTEXT_RECEIVE;
-               if (!(s->flags & CIP_NO_HEADER))
+               if (!(s->flags & CIP_NO_HEADER)) {
+                       max_ctx_payload_size -= 8;
                        ctx_header_size = IR_CTX_HEADER_SIZE_CIP;
-               else
+               } else {
                        ctx_header_size = IR_CTX_HEADER_SIZE_NO_CIP;
-
-               max_ctx_payload_size = amdtp_stream_get_max_payload(s) -
-                                      ctx_header_size;
+               }
        } else {
                dir = DMA_TO_DEVICE;
                type = FW_ISO_CONTEXT_TRANSMIT;
                ctx_header_size = 0;    // No effect for IT context.
 
-               max_ctx_payload_size = amdtp_stream_get_max_payload(s);
                if (!(s->flags & CIP_NO_HEADER))
                        max_ctx_payload_size -= IT_PKT_HEADER_SIZE_CIP;
        }
index 2c8e3392a4903a129acc28ecb9d21ffc698eae18..daeecfa8b9aac3e6666de77fcf4d31cb91264dbb 100644 (file)
@@ -387,7 +387,7 @@ static const struct ieee1394_device_id bebob_id_table[] = {
        SND_BEBOB_DEV_ENTRY(VEN_BRIDGECO, 0x00010049, &spec_normal),
        /* Mackie, Onyx 1220/1620/1640 (Firewire I/O Card) */
        SND_BEBOB_DEV_ENTRY(VEN_MACKIE2, 0x00010065, &spec_normal),
-       /* Mackie, d.2 (Firewire Option) */
+       // Mackie, d.2 (Firewire option card) and d.2 Pro (the card is built-in).
        SND_BEBOB_DEV_ENTRY(VEN_MACKIE1, 0x00010067, &spec_normal),
        /* Stanton, ScratchAmp */
        SND_BEBOB_DEV_ENTRY(VEN_STANTON, 0x00000001, &spec_normal),
index 0916864511d50607e9326b498a77f2587778fef9..27c13b9cc9efda96346088cdc4dd0ab7ce6a5fdc 100644 (file)
@@ -16,7 +16,7 @@ alesis_io14_tx_pcm_chs[MAX_STREAMS][SND_DICE_RATE_MODE_COUNT] = {
 static const unsigned int
 alesis_io26_tx_pcm_chs[MAX_STREAMS][SND_DICE_RATE_MODE_COUNT] = {
        {10, 10, 4},    /* Tx0 = Analog + S/PDIF. */
-       {16, 8, 0},     /* Tx1 = ADAT1 + ADAT2. */
+       {16, 4, 0},     /* Tx1 = ADAT1 + ADAT2 (available at low rate). */
 };
 
 int snd_dice_detect_alesis_formats(struct snd_dice *dice)
index af8a90ee40f395adb5e7c2dbefae9684d2110e80..a69ca1111b0332f5386f154d6a2d4998d23c55b6 100644 (file)
@@ -218,7 +218,7 @@ static int pcm_open(struct snd_pcm_substream *substream)
 
                if (frames_per_period > 0) {
                        // For double_pcm_frame quirk.
-                       if (rate > 96000) {
+                       if (rate > 96000 && !dice->disable_double_pcm_frames) {
                                frames_per_period *= 2;
                                frames_per_buffer *= 2;
                        }
@@ -273,7 +273,7 @@ static int pcm_hw_params(struct snd_pcm_substream *substream,
 
                mutex_lock(&dice->mutex);
                // For double_pcm_frame quirk.
-               if (rate > 96000) {
+               if (rate > 96000 && !dice->disable_double_pcm_frames) {
                        events_per_period /= 2;
                        events_per_buffer /= 2;
                }
index 1a14c083e8ceaac63488d37f367878323d5a7459..c4dfe76500c29038a8cbcbddd12ae7694c4d23d7 100644 (file)
@@ -181,7 +181,7 @@ static int keep_resources(struct snd_dice *dice, struct amdtp_stream *stream,
        // as 'Dual Wire'.
        // For this quirk, blocking mode is required and PCM buffer size should
        // be aligned to SYT_INTERVAL.
-       double_pcm_frames = rate > 96000;
+       double_pcm_frames = (rate > 96000 && !dice->disable_double_pcm_frames);
        if (double_pcm_frames) {
                rate /= 2;
                pcm_chs *= 2;
index a8875d24ba2aa00aa1743dbde59c310af8ec310d..43a3bcb15b3d17e5ee4d18bbe0b4c9f70487ea82 100644 (file)
@@ -38,8 +38,8 @@ static const struct dice_tc_spec konnekt_24d = {
 };
 
 static const struct dice_tc_spec konnekt_live = {
-       .tx_pcm_chs = {{16, 16, 16}, {0, 0, 0} },
-       .rx_pcm_chs = {{16, 16, 16}, {0, 0, 0} },
+       .tx_pcm_chs = {{16, 16, 6}, {0, 0, 0} },
+       .rx_pcm_chs = {{16, 16, 6}, {0, 0, 0} },
        .has_midi = true,
 };
 
index 107a81691f0e819fad9bb79c858c844c58723cc9..239d164b0eea85e5d0667c3d8e45d60716a70e9b 100644 (file)
@@ -21,6 +21,7 @@ MODULE_LICENSE("GPL v2");
 #define OUI_SSL                        0x0050c2        // Actually ID reserved by IEEE.
 #define OUI_PRESONUS           0x000a92
 #define OUI_HARMAN             0x000fd7
+#define OUI_AVID               0x00a07e
 
 #define DICE_CATEGORY_ID       0x04
 #define WEISS_CATEGORY_ID      0x00
@@ -222,6 +223,14 @@ static int dice_probe(struct fw_unit *unit,
                                (snd_dice_detect_formats_t)entry->driver_data;
        }
 
+       // Below models are compliant to IEC 61883-1/6 and have no quirk at high sampling transfer
+       // frequency.
+       // * Avid M-Box 3 Pro
+       // * M-Audio Profire 610
+       // * M-Audio Profire 2626
+       if (entry->vendor_id == OUI_MAUDIO || entry->vendor_id == OUI_AVID)
+               dice->disable_double_pcm_frames = true;
+
        spin_lock_init(&dice->lock);
        mutex_init(&dice->mutex);
        init_completion(&dice->clock_accepted);
@@ -278,7 +287,22 @@ static void dice_bus_reset(struct fw_unit *unit)
 
 #define DICE_INTERFACE 0x000001
 
+#define DICE_DEV_ENTRY_TYPICAL(vendor, model, data) \
+       { \
+               .match_flags    = IEEE1394_MATCH_VENDOR_ID | \
+                                 IEEE1394_MATCH_MODEL_ID | \
+                                 IEEE1394_MATCH_SPECIFIER_ID | \
+                                 IEEE1394_MATCH_VERSION, \
+               .vendor_id      = (vendor), \
+               .model_id       = (model), \
+               .specifier_id   = (vendor), \
+               .version        = DICE_INTERFACE, \
+               .driver_data = (kernel_ulong_t)(data), \
+       }
+
 static const struct ieee1394_device_id dice_id_table[] = {
+       // Avid M-Box 3 Pro. To match in probe function.
+       DICE_DEV_ENTRY_TYPICAL(OUI_AVID, 0x000004, snd_dice_detect_extension_formats),
        /* M-Audio Profire 2626 has a different value in version field. */
        {
                .match_flags    = IEEE1394_MATCH_VENDOR_ID |
index adc6f7c8446091f0b2ef5370ff0765dee879ae68..3c967d1b3605d83f763ae8e163ffdd5f411c79bc 100644 (file)
@@ -109,7 +109,8 @@ struct snd_dice {
        struct fw_iso_resources rx_resources[MAX_STREAMS];
        struct amdtp_stream tx_stream[MAX_STREAMS];
        struct amdtp_stream rx_stream[MAX_STREAMS];
-       bool global_enabled;
+       bool global_enabled:1;
+       bool disable_double_pcm_frames:1;
        struct completion clock_accepted;
        unsigned int substreams_counter;
 
index 1f1e3236efb8e045300d4cc074f95bda46fd6a8c..9eea25c46dc7e8b43305fb22effa0482a7910707 100644 (file)
@@ -355,7 +355,6 @@ static const struct ieee1394_device_id oxfw_id_table[] = {
         *  Onyx-i series (former models):      0x081216
         *  Mackie Onyx Satellite:              0x00200f
         *  Tapco LINK.firewire 4x6:            0x000460
-        *  d.2 pro:                            Unknown
         *  d.4 pro:                            Unknown
         *  U.420:                              Unknown
         *  U.420d:                             Unknown
index ab5ff7867eb996cc73178a888c2caaaa697ab9c4..d8be146793eee2d513a53539a0c1197904bdedd0 100644 (file)
@@ -331,6 +331,10 @@ static const struct config_entry config_table[] = {
                .flags = FLAG_SOF | FLAG_SOF_ONLY_IF_DMIC_OR_SOUNDWIRE,
                .device = 0x51c8,
        },
+       {
+               .flags = FLAG_SOF | FLAG_SOF_ONLY_IF_DMIC_OR_SOUNDWIRE,
+               .device = 0x51cc,
+       },
 #endif
 
 };
index afc088f0377ce4f36136a8fbcda49b34ee45eeaf..b7518122a10d6ff3bdf17d1dcfd6090896c5869c 100644 (file)
@@ -77,17 +77,8 @@ static const struct snd_kcontrol_new snd_gus_joystick_control = {
 
 static void snd_gus_init_control(struct snd_gus_card *gus)
 {
-       int ret;
-
-       if (!gus->ace_flag) {
-               ret =
-                       snd_ctl_add(gus->card,
-                                       snd_ctl_new1(&snd_gus_joystick_control,
-                                               gus));
-               if (ret)
-                       snd_printk(KERN_ERR "gus: snd_ctl_add failed: %d\n",
-                                       ret);
-       }
+       if (!gus->ace_flag)
+               snd_ctl_add(gus->card, snd_ctl_new1(&snd_gus_joystick_control, gus));
 }
 
 /*
index 38dc1fde25f3cbbe53c754ad87d51cebbe3223cf..aa48705310231c274ce5d8c8d332a1aca25723d2 100644 (file)
@@ -846,14 +846,10 @@ int snd_sb16dsp_pcm(struct snd_sb *chip, int device)
        snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_PLAYBACK, &snd_sb16_playback_ops);
        snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_CAPTURE, &snd_sb16_capture_ops);
 
-       if (chip->dma16 >= 0 && chip->dma8 != chip->dma16) {
-               err = snd_ctl_add(card, snd_ctl_new1(
-                                       &snd_sb16_dma_control, chip));
-               if (err)
-                       return err;
-       } else {
+       if (chip->dma16 >= 0 && chip->dma8 != chip->dma16)
+               snd_ctl_add(card, snd_ctl_new1(&snd_sb16_dma_control, chip));
+       else
                pcm->info_flags = SNDRV_PCM_INFO_HALF_DUPLEX;
-       }
 
        snd_pcm_set_managed_buffer_all(pcm, SNDRV_DMA_TYPE_DEV,
                                       card->dev, 64*1024, 128*1024);
index 6c9d534ce8b6133f57cd0f47dfd61b7a2945e83e..ed3a87ebe3f4119cc0698dc3852471891826c03a 100644 (file)
@@ -93,12 +93,12 @@ static int snd_sb8_probe(struct device *pdev, unsigned int dev)
        acard = card->private_data;
        card->private_free = snd_sb8_free;
 
-       /* block the 0x388 port to avoid PnP conflicts */
+       /*
+        * Block the 0x388 port to avoid PnP conflicts.
+        * No need to check this value after request_region,
+        * as we never do anything with it.
+        */
        acard->fm_res = request_region(0x388, 4, "SoundBlaster FM");
-       if (!acard->fm_res) {
-               err = -EBUSY;
-               goto _err;
-       }
 
        if (port[dev] != SNDRV_AUTO_PORT) {
                if ((err = snd_sbdsp_create(card, port[dev], irq[dev],
index a31009afc025f673741983ed7de7fc24ba58de6c..5462f771c2f90664df3eac9e9c3b5a2205210129 100644 (file)
@@ -2917,6 +2917,7 @@ static int hda_codec_runtime_resume(struct device *dev)
 #ifdef CONFIG_PM_SLEEP
 static int hda_codec_pm_prepare(struct device *dev)
 {
+       dev->power.power_state = PMSG_SUSPEND;
        return pm_runtime_suspended(dev);
 }
 
@@ -2924,6 +2925,10 @@ static void hda_codec_pm_complete(struct device *dev)
 {
        struct hda_codec *codec = dev_to_hda_codec(dev);
 
+       /* If no other pm-functions are called between prepare() and complete() */
+       if (dev->power.power_state.event == PM_EVENT_SUSPEND)
+               dev->power.power_state = PMSG_RESUME;
+
        if (pm_runtime_suspended(dev) && (codec->jackpoll_interval ||
            hda_codec_need_resume(codec) || codec->forced_resume))
                pm_request_resume(dev);
index b638fc2ef6f72173ccf60eaa06f52c631d6f9ea9..1f8018f9ce57addc6da04adaa8dc8b1edac5aa51 100644 (file)
@@ -3520,6 +3520,7 @@ static int cap_sw_put(struct snd_kcontrol *kcontrol,
 static const struct snd_kcontrol_new cap_sw_temp = {
        .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
        .name = "Capture Switch",
+       .access = SNDRV_CTL_ELEM_ACCESS_READWRITE,
        .info = cap_sw_info,
        .get = cap_sw_get,
        .put = cap_sw_put,
index 79ade335c8a09494c1b7b5698c83ad674b8ec604..470753b36c8a1e2b96e90ad0b7579521d0ca87f7 100644 (file)
@@ -2485,6 +2485,9 @@ static const struct pci_device_id azx_ids[] = {
        /* Alderlake-P */
        { PCI_DEVICE(0x8086, 0x51c8),
          .driver_data = AZX_DRIVER_SKL | AZX_DCAPS_INTEL_SKYLAKE},
+       /* Alderlake-M */
+       { PCI_DEVICE(0x8086, 0x51cc),
+         .driver_data = AZX_DRIVER_SKL | AZX_DCAPS_INTEL_SKYLAKE},
        /* Elkhart Lake */
        { PCI_DEVICE(0x8086, 0x4b55),
          .driver_data = AZX_DRIVER_SKL | AZX_DCAPS_INTEL_SKYLAKE},
index 726507d0b04ceffd9cc0407124cd7eb015a01a83..8629e84fef23df7d739cfa30ddb9f01d90593b9f 100644 (file)
@@ -2206,10 +2206,9 @@ static void cs8409_cs42l42_fixups(struct hda_codec *codec,
                break;
        case HDA_FIXUP_ACT_PROBE:
 
-               /* Set initial volume on Bullseye to -26 dB */
-               if (codec->fixup_id == CS8409_BULLSEYE)
-                       snd_hda_codec_amp_init_stereo(codec, CS8409_CS42L42_DMIC_ADC_PIN_NID,
-                                       HDA_INPUT, 0, 0xff, 0x19);
+               /* Set initial DMIC volume to -26 dB */
+               snd_hda_codec_amp_init_stereo(codec, CS8409_CS42L42_DMIC_ADC_PIN_NID,
+                               HDA_INPUT, 0, 0xff, 0x19);
                snd_hda_gen_add_kctl(&spec->gen,
                        NULL, &cs8409_cs42l42_hp_volume_mixer);
                snd_hda_gen_add_kctl(&spec->gen,
index 6d58f24c9702f963ad60b5d9c4af4de9a970ece0..ab5113cccffae4e16062f2a3e8b21203f5633f5c 100644 (file)
@@ -395,7 +395,6 @@ static void alc_fill_eapd_coef(struct hda_codec *codec)
        case 0x10ec0282:
        case 0x10ec0283:
        case 0x10ec0286:
-       case 0x10ec0287:
        case 0x10ec0288:
        case 0x10ec0285:
        case 0x10ec0298:
@@ -406,6 +405,10 @@ static void alc_fill_eapd_coef(struct hda_codec *codec)
        case 0x10ec0275:
                alc_update_coef_idx(codec, 0xe, 0, 1<<0);
                break;
+       case 0x10ec0287:
+               alc_update_coef_idx(codec, 0x10, 1<<9, 0);
+               alc_write_coef_idx(codec, 0x8, 0x4ab7);
+               break;
        case 0x10ec0293:
                alc_update_coef_idx(codec, 0xa, 1<<13, 0);
                break;
@@ -2600,6 +2603,28 @@ static const struct hda_model_fixup alc882_fixup_models[] = {
        {}
 };
 
+static const struct snd_hda_pin_quirk alc882_pin_fixup_tbl[] = {
+       SND_HDA_PIN_QUIRK(0x10ec1220, 0x1043, "ASUS", ALC1220_FIXUP_CLEVO_P950,
+               {0x14, 0x01014010},
+               {0x15, 0x01011012},
+               {0x16, 0x01016011},
+               {0x18, 0x01a19040},
+               {0x19, 0x02a19050},
+               {0x1a, 0x0181304f},
+               {0x1b, 0x0221401f},
+               {0x1e, 0x01456130}),
+       SND_HDA_PIN_QUIRK(0x10ec1220, 0x1462, "MS-7C35", ALC1220_FIXUP_CLEVO_P950,
+               {0x14, 0x01015010},
+               {0x15, 0x01011012},
+               {0x16, 0x01011011},
+               {0x18, 0x01a11040},
+               {0x19, 0x02a19050},
+               {0x1a, 0x0181104f},
+               {0x1b, 0x0221401f},
+               {0x1e, 0x01451130}),
+       {}
+};
+
 /*
  * BIOS auto configuration
  */
@@ -2641,6 +2666,7 @@ static int patch_alc882(struct hda_codec *codec)
 
        snd_hda_pick_fixup(codec, alc882_fixup_models, alc882_fixup_tbl,
                       alc882_fixups);
+       snd_hda_pick_pin_fixup(codec, alc882_pin_fixup_tbl, alc882_fixups, true);
        snd_hda_apply_fixup(codec, HDA_FIXUP_ACT_PRE_PROBE);
 
        alc_auto_parse_customize_define(codec);
@@ -6251,6 +6277,35 @@ static void alc294_fixup_gx502_hp(struct hda_codec *codec,
        }
 }
 
+static void alc294_gu502_toggle_output(struct hda_codec *codec,
+                                      struct hda_jack_callback *cb)
+{
+       /* Windows sets 0x10 to 0x8420 for Node 0x20 which is
+        * responsible from changes between speakers and headphones
+        */
+       if (snd_hda_jack_detect_state(codec, 0x21) == HDA_JACK_PRESENT)
+               alc_write_coef_idx(codec, 0x10, 0x8420);
+       else
+               alc_write_coef_idx(codec, 0x10, 0x0a20);
+}
+
+static void alc294_fixup_gu502_hp(struct hda_codec *codec,
+                                 const struct hda_fixup *fix, int action)
+{
+       if (!is_jack_detectable(codec, 0x21))
+               return;
+
+       switch (action) {
+       case HDA_FIXUP_ACT_PRE_PROBE:
+               snd_hda_jack_detect_enable_callback(codec, 0x21,
+                               alc294_gu502_toggle_output);
+               break;
+       case HDA_FIXUP_ACT_INIT:
+               alc294_gu502_toggle_output(codec, NULL);
+               break;
+       }
+}
+
 static void  alc285_fixup_hp_gpio_amp_init(struct hda_codec *codec,
                              const struct hda_fixup *fix, int action)
 {
@@ -6468,6 +6523,9 @@ enum {
        ALC294_FIXUP_ASUS_GX502_HP,
        ALC294_FIXUP_ASUS_GX502_PINS,
        ALC294_FIXUP_ASUS_GX502_VERBS,
+       ALC294_FIXUP_ASUS_GU502_HP,
+       ALC294_FIXUP_ASUS_GU502_PINS,
+       ALC294_FIXUP_ASUS_GU502_VERBS,
        ALC285_FIXUP_HP_GPIO_LED,
        ALC285_FIXUP_HP_MUTE_LED,
        ALC236_FIXUP_HP_GPIO_LED,
@@ -6507,6 +6565,10 @@ enum {
        ALC285_FIXUP_HP_LIMIT_INT_MIC_BOOST,
        ALC295_FIXUP_ASUS_DACS,
        ALC295_FIXUP_HP_OMEN,
+       ALC285_FIXUP_HP_SPECTRE_X360,
+       ALC287_FIXUP_IDEAPAD_BASS_SPK_AMP,
+       ALC623_FIXUP_LENOVO_THINKSTATION_P340,
+       ALC255_FIXUP_ACER_HEADPHONE_AND_MIC,
 };
 
 static const struct hda_fixup alc269_fixups[] = {
@@ -7709,6 +7771,35 @@ static const struct hda_fixup alc269_fixups[] = {
                .type = HDA_FIXUP_FUNC,
                .v.func = alc294_fixup_gx502_hp,
        },
+       [ALC294_FIXUP_ASUS_GU502_PINS] = {
+               .type = HDA_FIXUP_PINS,
+               .v.pins = (const struct hda_pintbl[]) {
+                       { 0x19, 0x01a11050 }, /* rear HP mic */
+                       { 0x1a, 0x01a11830 }, /* rear external mic */
+                       { 0x21, 0x012110f0 }, /* rear HP out */
+                       { }
+               },
+               .chained = true,
+               .chain_id = ALC294_FIXUP_ASUS_GU502_VERBS
+       },
+       [ALC294_FIXUP_ASUS_GU502_VERBS] = {
+               .type = HDA_FIXUP_VERBS,
+               .v.verbs = (const struct hda_verb[]) {
+                       /* set 0x15 to HP-OUT ctrl */
+                       { 0x15, AC_VERB_SET_PIN_WIDGET_CONTROL, 0xc0 },
+                       /* unmute the 0x15 amp */
+                       { 0x15, AC_VERB_SET_AMP_GAIN_MUTE, 0xb000 },
+                       /* set 0x1b to HP-OUT */
+                       { 0x1b, AC_VERB_SET_PIN_WIDGET_CONTROL, 0x24 },
+                       { }
+               },
+               .chained = true,
+               .chain_id = ALC294_FIXUP_ASUS_GU502_HP
+       },
+       [ALC294_FIXUP_ASUS_GU502_HP] = {
+               .type = HDA_FIXUP_FUNC,
+               .v.func = alc294_fixup_gu502_hp,
+       },
        [ALC294_FIXUP_ASUS_COEF_1B] = {
                .type = HDA_FIXUP_VERBS,
                .v.verbs = (const struct hda_verb[]) {
@@ -8035,6 +8126,36 @@ static const struct hda_fixup alc269_fixups[] = {
                .chained = true,
                .chain_id = ALC269_FIXUP_HP_LINE1_MIC1_LED,
        },
+       [ALC285_FIXUP_HP_SPECTRE_X360] = {
+               .type = HDA_FIXUP_PINS,
+               .v.pins = (const struct hda_pintbl[]) {
+                       { 0x14, 0x90170110 }, /* enable top speaker */
+                       {}
+               },
+               .chained = true,
+               .chain_id = ALC285_FIXUP_SPEAKER2_TO_DAC1,
+       },
+       [ALC287_FIXUP_IDEAPAD_BASS_SPK_AMP] = {
+               .type = HDA_FIXUP_FUNC,
+               .v.func = alc285_fixup_ideapad_s740_coef,
+               .chained = true,
+               .chain_id = ALC285_FIXUP_THINKPAD_HEADSET_JACK,
+       },
+       [ALC623_FIXUP_LENOVO_THINKSTATION_P340] = {
+               .type = HDA_FIXUP_FUNC,
+               .v.func = alc_fixup_no_shutup,
+               .chained = true,
+               .chain_id = ALC283_FIXUP_HEADSET_MIC,
+       },
+       [ALC255_FIXUP_ACER_HEADPHONE_AND_MIC] = {
+               .type = HDA_FIXUP_PINS,
+               .v.pins = (const struct hda_pintbl[]) {
+                       { 0x21, 0x03211030 }, /* Change the Headphone location to Left */
+                       { }
+               },
+               .chained = true,
+               .chain_id = ALC255_FIXUP_XIAOMI_HEADSET_MIC
+       },
 };
 
 static const struct snd_pci_quirk alc269_fixup_tbl[] = {
@@ -8071,6 +8192,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
        SND_PCI_QUIRK(0x1025, 0x132a, "Acer TravelMate B114-21", ALC233_FIXUP_ACER_HEADSET_MIC),
        SND_PCI_QUIRK(0x1025, 0x1330, "Acer TravelMate X514-51T", ALC255_FIXUP_ACER_HEADSET_MIC),
        SND_PCI_QUIRK(0x1025, 0x1430, "Acer TravelMate B311R-31", ALC256_FIXUP_ACER_MIC_NO_PRESENCE),
+       SND_PCI_QUIRK(0x1025, 0x1466, "Acer Aspire A515-56", ALC255_FIXUP_ACER_HEADPHONE_AND_MIC),
        SND_PCI_QUIRK(0x1028, 0x0470, "Dell M101z", ALC269_FIXUP_DELL_M101Z),
        SND_PCI_QUIRK(0x1028, 0x054b, "Dell XPS one 2710", ALC275_FIXUP_DELL_XPS),
        SND_PCI_QUIRK(0x1028, 0x05bd, "Dell Latitude E6440", ALC292_FIXUP_DELL_E7X),
@@ -8192,11 +8314,15 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
        SND_PCI_QUIRK(0x103c, 0x82bf, "HP G3 mini", ALC221_FIXUP_HP_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x103c, 0x82c0, "HP G3 mini premium", ALC221_FIXUP_HP_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x103c, 0x83b9, "HP Spectre x360", ALC269_FIXUP_HP_MUTE_LED_MIC3),
+       SND_PCI_QUIRK(0x103c, 0x841c, "HP Pavilion 15-CK0xx", ALC269_FIXUP_HP_MUTE_LED_MIC3),
        SND_PCI_QUIRK(0x103c, 0x8497, "HP Envy x360", ALC269_FIXUP_HP_MUTE_LED_MIC3),
        SND_PCI_QUIRK(0x103c, 0x84da, "HP OMEN dc0019-ur", ALC295_FIXUP_HP_OMEN),
        SND_PCI_QUIRK(0x103c, 0x84e7, "HP Pavilion 15", ALC269_FIXUP_HP_MUTE_LED_MIC3),
+       SND_PCI_QUIRK(0x103c, 0x8519, "HP Spectre x360 15-df0xxx", ALC285_FIXUP_HP_SPECTRE_X360),
        SND_PCI_QUIRK(0x103c, 0x869d, "HP", ALC236_FIXUP_HP_MUTE_LED),
        SND_PCI_QUIRK(0x103c, 0x86c7, "HP Envy AiO 32", ALC274_FIXUP_HP_ENVY_GPIO),
+       SND_PCI_QUIRK(0x103c, 0x8716, "HP Elite Dragonfly G2 Notebook PC", ALC285_FIXUP_HP_GPIO_AMP_INIT),
+       SND_PCI_QUIRK(0x103c, 0x8720, "HP EliteBook x360 1040 G8 Notebook PC", ALC285_FIXUP_HP_GPIO_AMP_INIT),
        SND_PCI_QUIRK(0x103c, 0x8724, "HP EliteBook 850 G7", ALC285_FIXUP_HP_GPIO_LED),
        SND_PCI_QUIRK(0x103c, 0x8729, "HP", ALC285_FIXUP_HP_GPIO_LED),
        SND_PCI_QUIRK(0x103c, 0x8730, "HP ProBook 445 G7", ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF),
@@ -8215,7 +8341,13 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
        SND_PCI_QUIRK(0x103c, 0x87f5, "HP", ALC287_FIXUP_HP_GPIO_LED),
        SND_PCI_QUIRK(0x103c, 0x87f7, "HP Spectre x360 14", ALC245_FIXUP_HP_X360_AMP),
        SND_PCI_QUIRK(0x103c, 0x8846, "HP EliteBook 850 G8 Notebook PC", ALC285_FIXUP_HP_GPIO_LED),
+       SND_PCI_QUIRK(0x103c, 0x884b, "HP EliteBook 840 Aero G8 Notebook PC", ALC285_FIXUP_HP_GPIO_LED),
        SND_PCI_QUIRK(0x103c, 0x884c, "HP EliteBook 840 G8 Notebook PC", ALC285_FIXUP_HP_GPIO_LED),
+       SND_PCI_QUIRK(0x103c, 0x886d, "HP ZBook Fury 17.3 Inch G8 Mobile Workstation PC", ALC285_FIXUP_HP_GPIO_AMP_INIT),
+       SND_PCI_QUIRK(0x103c, 0x8870, "HP ZBook Fury 15.6 Inch G8 Mobile Workstation PC", ALC285_FIXUP_HP_GPIO_AMP_INIT),
+       SND_PCI_QUIRK(0x103c, 0x8873, "HP ZBook Studio 15.6 Inch G8 Mobile Workstation PC", ALC285_FIXUP_HP_GPIO_AMP_INIT),
+       SND_PCI_QUIRK(0x103c, 0x888d, "HP ZBook Power 15.6 inch G8 Mobile Workstation PC", ALC236_FIXUP_HP_GPIO_LED),
+       SND_PCI_QUIRK(0x103c, 0x8896, "HP EliteBook 855 G8 Notebook PC", ALC285_FIXUP_HP_MUTE_LED),
        SND_PCI_QUIRK(0x103c, 0x8898, "HP EliteBook 845 G8 Notebook PC", ALC285_FIXUP_HP_LIMIT_INT_MIC_BOOST),
        SND_PCI_QUIRK(0x1043, 0x103e, "ASUS X540SA", ALC256_FIXUP_ASUS_MIC),
        SND_PCI_QUIRK(0x1043, 0x103f, "ASUS TX300", ALC282_FIXUP_ASUS_TX300),
@@ -8253,6 +8385,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
        SND_PCI_QUIRK(0x1043, 0x1ccd, "ASUS X555UB", ALC256_FIXUP_ASUS_MIC),
        SND_PCI_QUIRK(0x1043, 0x1d4e, "ASUS TM420", ALC256_FIXUP_ASUS_HPE),
        SND_PCI_QUIRK(0x1043, 0x1e11, "ASUS Zephyrus G15", ALC289_FIXUP_ASUS_GA502),
+       SND_PCI_QUIRK(0x1043, 0x1e51, "ASUS Zephyrus M15", ALC294_FIXUP_ASUS_GU502_PINS),
        SND_PCI_QUIRK(0x1043, 0x1e8e, "ASUS Zephyrus G15", ALC289_FIXUP_ASUS_GA401),
        SND_PCI_QUIRK(0x1043, 0x1f11, "ASUS Zephyrus G14", ALC289_FIXUP_ASUS_GA401),
        SND_PCI_QUIRK(0x1043, 0x3030, "ASUS ZN270IE", ALC256_FIXUP_ASUS_AIO_GPIO2),
@@ -8309,12 +8442,19 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
        SND_PCI_QUIRK(0x1558, 0x50b8, "Clevo NK50SZ", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x1558, 0x50d5, "Clevo NP50D5", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x1558, 0x50f0, "Clevo NH50A[CDF]", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
+       SND_PCI_QUIRK(0x1558, 0x50f2, "Clevo NH50E[PR]", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x1558, 0x50f3, "Clevo NH58DPQ", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
+       SND_PCI_QUIRK(0x1558, 0x50f5, "Clevo NH55EPY", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
+       SND_PCI_QUIRK(0x1558, 0x50f6, "Clevo NH55DPQ", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x1558, 0x5101, "Clevo S510WU", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x1558, 0x5157, "Clevo W517GU1", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x1558, 0x51a1, "Clevo NS50MU", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x1558, 0x70a1, "Clevo NB70T[HJK]", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x1558, 0x70b3, "Clevo NK70SB", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
+       SND_PCI_QUIRK(0x1558, 0x70f2, "Clevo NH79EPY", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
+       SND_PCI_QUIRK(0x1558, 0x70f3, "Clevo NH77DPQ", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
+       SND_PCI_QUIRK(0x1558, 0x70f4, "Clevo NH77EPY", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
+       SND_PCI_QUIRK(0x1558, 0x70f6, "Clevo NH77DPQ-Y", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x1558, 0x8228, "Clevo NR40BU", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x1558, 0x8520, "Clevo NH50D[CD]", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x1558, 0x8521, "Clevo NH77D[CD]", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
@@ -8332,11 +8472,19 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
        SND_PCI_QUIRK(0x1558, 0x8a51, "Clevo NH70RCQ-Y", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x1558, 0x8d50, "Clevo NH55RCQ-M", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x1558, 0x951d, "Clevo N950T[CDF]", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
+       SND_PCI_QUIRK(0x1558, 0x9600, "Clevo N960K[PR]", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x1558, 0x961d, "Clevo N960S[CDF]", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x1558, 0x971d, "Clevo N970T[CDF]", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x1558, 0xa500, "Clevo NL53RU", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
+       SND_PCI_QUIRK(0x1558, 0xa600, "Clevo NL5XNU", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
+       SND_PCI_QUIRK(0x1558, 0xb018, "Clevo NP50D[BE]", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
+       SND_PCI_QUIRK(0x1558, 0xb019, "Clevo NH77D[BE]Q", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
+       SND_PCI_QUIRK(0x1558, 0xb022, "Clevo NH77D[DC][QW]", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
+       SND_PCI_QUIRK(0x1558, 0xc018, "Clevo NP50D[BE]", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
+       SND_PCI_QUIRK(0x1558, 0xc019, "Clevo NH77D[BE]Q", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
+       SND_PCI_QUIRK(0x1558, 0xc022, "Clevo NH77[DC][QW]", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x17aa, 0x1036, "Lenovo P520", ALC233_FIXUP_LENOVO_MULTI_CODECS),
-       SND_PCI_QUIRK(0x17aa, 0x1048, "ThinkCentre Station", ALC283_FIXUP_HEADSET_MIC),
+       SND_PCI_QUIRK(0x17aa, 0x1048, "ThinkCentre Station", ALC623_FIXUP_LENOVO_THINKSTATION_P340),
        SND_PCI_QUIRK(0x17aa, 0x20f2, "Thinkpad SL410/510", ALC269_FIXUP_SKU_IGNORE),
        SND_PCI_QUIRK(0x17aa, 0x215e, "Thinkpad L512", ALC269_FIXUP_SKU_IGNORE),
        SND_PCI_QUIRK(0x17aa, 0x21b8, "Thinkpad Edge 14", ALC269_FIXUP_SKU_IGNORE),
@@ -8386,6 +8534,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
        SND_PCI_QUIRK(0x17aa, 0x3178, "ThinkCentre Station", ALC283_FIXUP_HEADSET_MIC),
        SND_PCI_QUIRK(0x17aa, 0x3818, "Lenovo C940", ALC298_FIXUP_LENOVO_SPK_VOLUME),
        SND_PCI_QUIRK(0x17aa, 0x3827, "Ideapad S740", ALC285_FIXUP_IDEAPAD_S740_COEF),
+       SND_PCI_QUIRK(0x17aa, 0x3843, "Yoga 9i", ALC287_FIXUP_IDEAPAD_BASS_SPK_AMP),
        SND_PCI_QUIRK(0x17aa, 0x3902, "Lenovo E50-80", ALC269_FIXUP_DMIC_THINKPAD_ACPI),
        SND_PCI_QUIRK(0x17aa, 0x3977, "IdeaPad S210", ALC283_FIXUP_INT_MIC),
        SND_PCI_QUIRK(0x17aa, 0x3978, "Lenovo B50-70", ALC269_FIXUP_DMIC_THINKPAD_ACPI),
@@ -8600,6 +8749,10 @@ static const struct hda_model_fixup alc269_fixup_models[] = {
        {.id = ALC274_FIXUP_HP_MIC, .name = "alc274-hp-mic-detect"},
        {.id = ALC245_FIXUP_HP_X360_AMP, .name = "alc245-hp-x360-amp"},
        {.id = ALC295_FIXUP_HP_OMEN, .name = "alc295-hp-omen"},
+       {.id = ALC285_FIXUP_HP_SPECTRE_X360, .name = "alc285-hp-spectre-x360"},
+       {.id = ALC287_FIXUP_IDEAPAD_BASS_SPK_AMP, .name = "alc287-ideapad-bass-spk-amp"},
+       {.id = ALC623_FIXUP_LENOVO_THINKSTATION_P340, .name = "alc623-lenovo-thinkstation-p340"},
+       {.id = ALC255_FIXUP_ACER_HEADPHONE_AND_MIC, .name = "alc255-acer-headphone-and-mic"},
        {}
 };
 #define ALC225_STANDARD_PINS \
index 35903d1a1cbd27696e28cac536d5cfc03ea629cf..5b124c4ad5725b108a8aec464cbe5581419bef20 100644 (file)
@@ -331,6 +331,7 @@ struct ichdev {
        unsigned int ali_slot;                  /* ALI DMA slot */
        struct ac97_pcm *pcm;
        int pcm_open_flag;
+       unsigned int prepared:1;
        unsigned int suspended: 1;
 };
 
@@ -691,6 +692,9 @@ static inline void snd_intel8x0_update(struct intel8x0 *chip, struct ichdev *ich
        int status, civ, i, step;
        int ack = 0;
 
+       if (!ichdev->prepared || ichdev->suspended)
+               return;
+
        spin_lock_irqsave(&chip->reg_lock, flags);
        status = igetbyte(chip, port + ichdev->roff_sr);
        civ = igetbyte(chip, port + ICH_REG_OFF_CIV);
@@ -881,6 +885,7 @@ static int snd_intel8x0_hw_params(struct snd_pcm_substream *substream,
        if (ichdev->pcm_open_flag) {
                snd_ac97_pcm_close(ichdev->pcm);
                ichdev->pcm_open_flag = 0;
+               ichdev->prepared = 0;
        }
        err = snd_ac97_pcm_open(ichdev->pcm, params_rate(hw_params),
                                params_channels(hw_params),
@@ -902,6 +907,7 @@ static int snd_intel8x0_hw_free(struct snd_pcm_substream *substream)
        if (ichdev->pcm_open_flag) {
                snd_ac97_pcm_close(ichdev->pcm);
                ichdev->pcm_open_flag = 0;
+               ichdev->prepared = 0;
        }
        return 0;
 }
@@ -976,6 +982,7 @@ static int snd_intel8x0_pcm_prepare(struct snd_pcm_substream *substream)
                        ichdev->pos_shift = (runtime->sample_bits > 16) ? 2 : 1;
        }
        snd_intel8x0_setup_periods(chip, ichdev);
+       ichdev->prepared = 1;
        return 0;
 }
 
index f22bb2bdf527a94364e78a2892fbf0722abdb94d..8148b0d22e880398fbaed3f9e0fbd5a1bffbb661 100644 (file)
@@ -235,10 +235,6 @@ static int acp3x_dma_open(struct snd_soc_component *component,
                return ret;
        }
 
-       if (!adata->play_stream && !adata->capture_stream &&
-           !adata->i2ssp_play_stream && !adata->i2ssp_capture_stream)
-               rv_writel(1, adata->acp3x_base + mmACP_EXTERNAL_INTR_ENB);
-
        i2s_data->acp3x_base = adata->acp3x_base;
        runtime->private_data = i2s_data;
        return ret;
@@ -365,12 +361,6 @@ static int acp3x_dma_close(struct snd_soc_component *component,
                }
        }
 
-       /* Disable ACP irq, when the current stream is being closed and
-        * another stream is also not active.
-        */
-       if (!adata->play_stream && !adata->capture_stream &&
-               !adata->i2ssp_play_stream && !adata->i2ssp_capture_stream)
-               rv_writel(0, adata->acp3x_base + mmACP_EXTERNAL_INTR_ENB);
        return 0;
 }
 
index 03fe93913e12e39ce506d5218e2ec06c4f481734..c3f0c8b7545db11760e1c20167cc7c9270e9fb9e 100644 (file)
@@ -77,6 +77,7 @@
 #define ACP_POWER_OFF_IN_PROGRESS      0x03
 
 #define ACP3x_ITER_IRER_SAMP_LEN_MASK  0x38
+#define ACP_EXT_INTR_STAT_CLEAR_MASK 0xFFFFFFFF
 
 struct acp3x_platform_info {
        u16 play_i2s_instance;
index d3536fd6a12400644a4cfa5b26795956fd2cedb3..a013a607b3d4705c1b9ea6e44dfcf5900849dc5d 100644 (file)
@@ -76,6 +76,19 @@ static int acp3x_reset(void __iomem *acp3x_base)
        return -ETIMEDOUT;
 }
 
+static void acp3x_enable_interrupts(void __iomem *acp_base)
+{
+       rv_writel(0x01, acp_base + mmACP_EXTERNAL_INTR_ENB);
+}
+
+static void acp3x_disable_interrupts(void __iomem *acp_base)
+{
+       rv_writel(ACP_EXT_INTR_STAT_CLEAR_MASK, acp_base +
+                 mmACP_EXTERNAL_INTR_STAT);
+       rv_writel(0x00, acp_base + mmACP_EXTERNAL_INTR_CNTL);
+       rv_writel(0x00, acp_base + mmACP_EXTERNAL_INTR_ENB);
+}
+
 static int acp3x_init(struct acp3x_dev_data *adata)
 {
        void __iomem *acp3x_base = adata->acp3x_base;
@@ -93,6 +106,7 @@ static int acp3x_init(struct acp3x_dev_data *adata)
                pr_err("ACP3x reset failed\n");
                return ret;
        }
+       acp3x_enable_interrupts(acp3x_base);
        return 0;
 }
 
@@ -100,6 +114,7 @@ static int acp3x_deinit(void __iomem *acp3x_base)
 {
        int ret;
 
+       acp3x_disable_interrupts(acp3x_base);
        /* Reset */
        ret = acp3x_reset(acp3x_base);
        if (ret) {
index 34aed80db0eb046b26a70f2110b31cedc5fd419d..37d4600b6f2c27c737830f1cfaeda1386497ba7a 100644 (file)
@@ -307,7 +307,7 @@ static struct snd_soc_dai_driver ak5558_dai = {
 };
 
 static struct snd_soc_dai_driver ak5552_dai = {
-       .name = "ak5558-aif",
+       .name = "ak5552-aif",
        .capture = {
                .stream_name = "Capture",
                .channels_min = 1,
index f4067230ac425a5388771343bb50cc527759436a..88e79b9f52edca0893eb98e8375f87f47fd90d43 100644 (file)
@@ -261,6 +261,9 @@ static const struct regmap_config cs35l32_regmap = {
        .readable_reg = cs35l32_readable_register,
        .precious_reg = cs35l32_precious_register,
        .cache_type = REGCACHE_RBTREE,
+
+       .use_single_read = true,
+       .use_single_write = true,
 };
 
 static int cs35l32_handle_of_data(struct i2c_client *i2c_client,
index 7ad7b733af9b6731c4778660b27af8278302af5a..e8f3dcfd144dadb61b3ebcdbc69adc677ce7f47e 100644 (file)
@@ -1201,6 +1201,7 @@ static int cs35l33_i2c_probe(struct i2c_client *i2c_client,
                dev_err(&i2c_client->dev,
                        "CS35L33 Device ID (%X). Expected ID %X\n",
                        devid, CS35L33_CHIP_ID);
+               ret = -EINVAL;
                goto err_enable;
        }
 
index 110ee2d063581c0879c9792565bf3d1c974c6ad7..3d3c3c34dfe273ee3cc79c89996a2a469fe8bdbb 100644 (file)
@@ -800,6 +800,9 @@ static struct regmap_config cs35l34_regmap = {
        .readable_reg = cs35l34_readable_register,
        .precious_reg = cs35l34_precious_register,
        .cache_type = REGCACHE_RBTREE,
+
+       .use_single_read = true,
+       .use_single_write = true,
 };
 
 static int cs35l34_handle_of_data(struct i2c_client *i2c_client,
index bf982e145e945f4774d5c306522426f381433c14..77473c226f9ec022e8b73b1edeb7ece1fb1ef8e2 100644 (file)
@@ -399,6 +399,9 @@ static const struct regmap_config cs42l42_regmap = {
        .reg_defaults = cs42l42_reg_defaults,
        .num_reg_defaults = ARRAY_SIZE(cs42l42_reg_defaults),
        .cache_type = REGCACHE_RBTREE,
+
+       .use_single_read = true,
+       .use_single_write = true,
 };
 
 static DECLARE_TLV_DB_SCALE(adc_tlv, -9600, 100, false);
index c44a5cdb796ecf8e0350ff5f488af0028408440b..7cdffdf6b8cf05381a2a74bbdaffc1470f94a4cd 100644 (file)
@@ -1175,7 +1175,7 @@ static int cs42l56_i2c_probe(struct i2c_client *i2c_client,
        struct cs42l56_platform_data *pdata =
                dev_get_platdata(&i2c_client->dev);
        int ret, i;
-       unsigned int devid = 0;
+       unsigned int devid;
        unsigned int alpha_rev, metal_rev;
        unsigned int reg;
 
@@ -1245,6 +1245,11 @@ static int cs42l56_i2c_probe(struct i2c_client *i2c_client,
        }
 
        ret = regmap_read(cs42l56->regmap, CS42L56_CHIP_ID_1, &reg);
+       if (ret) {
+               dev_err(&i2c_client->dev, "Failed to read chip ID: %d\n", ret);
+               return ret;
+       }
+
        devid = reg & CS42L56_CHIP_ID_MASK;
        if (devid != CS42L56_DEVID) {
                dev_err(&i2c_client->dev,
index c3f974ec78e58b31d9c88f9c394ccc5c02027ef0..e92bacaab53fccdc81e6e332852f9658236d6691 100644 (file)
@@ -1268,6 +1268,9 @@ static const struct regmap_config cs42l73_regmap = {
        .volatile_reg = cs42l73_volatile_register,
        .readable_reg = cs42l73_readable_register,
        .cache_type = REGCACHE_RBTREE,
+
+       .use_single_read = true,
+       .use_single_write = true,
 };
 
 static int cs42l73_i2c_probe(struct i2c_client *i2c_client,
index 80bc7c10ed757fc693c4a9f57d64bbaf8ae3e495..80cd3ea0c1577261b8df0ecee488ad8b198dc025 100644 (file)
@@ -1735,6 +1735,14 @@ static DEVICE_ATTR(hpload_dc_r, 0444, cs43130_show_dc_r, NULL);
 static DEVICE_ATTR(hpload_ac_l, 0444, cs43130_show_ac_l, NULL);
 static DEVICE_ATTR(hpload_ac_r, 0444, cs43130_show_ac_r, NULL);
 
+static struct attribute *hpload_attrs[] = {
+       &dev_attr_hpload_dc_l.attr,
+       &dev_attr_hpload_dc_r.attr,
+       &dev_attr_hpload_ac_l.attr,
+       &dev_attr_hpload_ac_r.attr,
+};
+ATTRIBUTE_GROUPS(hpload);
+
 static struct reg_sequence hp_en_cal_seq[] = {
        {CS43130_INT_MASK_4, CS43130_INT_MASK_ALL},
        {CS43130_HP_MEAS_LOAD_1, 0},
@@ -2302,25 +2310,15 @@ static int cs43130_probe(struct snd_soc_component *component)
 
        cs43130->hpload_done = false;
        if (cs43130->dc_meas) {
-               ret = device_create_file(component->dev, &dev_attr_hpload_dc_l);
-               if (ret < 0)
-                       return ret;
-
-               ret = device_create_file(component->dev, &dev_attr_hpload_dc_r);
-               if (ret < 0)
-                       return ret;
-
-               ret = device_create_file(component->dev, &dev_attr_hpload_ac_l);
-               if (ret < 0)
-                       return ret;
-
-               ret = device_create_file(component->dev, &dev_attr_hpload_ac_r);
-               if (ret < 0)
+               ret = sysfs_create_groups(&component->dev->kobj, hpload_groups);
+               if (ret)
                        return ret;
 
                cs43130->wq = create_singlethread_workqueue("cs43130_hp");
-               if (!cs43130->wq)
+               if (!cs43130->wq) {
+                       sysfs_remove_groups(&component->dev->kobj, hpload_groups);
                        return -ENOMEM;
+               }
                INIT_WORK(&cs43130->work, cs43130_imp_meas);
        }
 
index 3d67cbf9eaaa2b58e78860cfbbec37020900089d..abe0cc0bc03a94bf6d4838eb56e6d0235a55e7e0 100644 (file)
@@ -912,6 +912,9 @@ static struct regmap_config cs53l30_regmap = {
        .writeable_reg = cs53l30_writeable_register,
        .readable_reg = cs53l30_readable_register,
        .cache_type = REGCACHE_RBTREE,
+
+       .use_single_read = true,
+       .use_single_write = true,
 };
 
 static int cs53l30_i2c_probe(struct i2c_client *client,
index bd3c523a86171e31ddf9e887c796f0d8abc53f27..13009d08b09ac5ed3271ffdc66b60d7d57cd1b41 100644 (file)
@@ -2181,10 +2181,7 @@ static int da7219_register_dai_clks(struct snd_soc_component *component)
                                 ret);
                        goto err;
                }
-
-               da7219->dai_clks[i] = devm_clk_hw_get_clk(dev, dai_clk_hw, NULL);
-               if (IS_ERR(da7219->dai_clks[i]))
-                       return PTR_ERR(da7219->dai_clks[i]);
+               da7219->dai_clks[i] = dai_clk_hw->clk;
 
                /* For DT setup onecell data, otherwise create lookup */
                if (np) {
index b0ebfc8d180ca231d0d65bee41f9fc348de6266e..171ab7f519c0461ce61a1431ca745fee4ffa60b7 100644 (file)
@@ -3579,6 +3579,7 @@ static const struct of_device_id rx_macro_dt_match[] = {
        { .compatible = "qcom,sm8250-lpass-rx-macro" },
        { }
 };
+MODULE_DEVICE_TABLE(of, rx_macro_dt_match);
 
 static struct platform_driver rx_macro_driver = {
        .driver = {
index acd2fbc0ca7c69b9d19cf69c59e21045a2da4a26..27a0d5defd273da7dd4f5e8316ab2617b118d1e7 100644 (file)
@@ -1846,6 +1846,7 @@ static const struct of_device_id tx_macro_dt_match[] = {
        { .compatible = "qcom,sm8250-lpass-tx-macro" },
        { }
 };
+MODULE_DEVICE_TABLE(of, tx_macro_dt_match);
 static struct platform_driver tx_macro_driver = {
        .driver = {
                .name = "tx_macro",
index 4be24e7f51c89e41a979a9ee14c4f23fc1ba1ab3..f8e49e45ce33f112a707a80ed520ff9327c0e261 100644 (file)
@@ -41,6 +41,7 @@ struct max98088_priv {
        enum max98088_type devtype;
        struct max98088_pdata *pdata;
        struct clk *mclk;
+       unsigned char mclk_prescaler;
        unsigned int sysclk;
        struct max98088_cdata dai[2];
        int eq_textcnt;
@@ -998,13 +999,16 @@ static int max98088_dai1_hw_params(struct snd_pcm_substream *substream,
        /* Configure NI when operating as master */
        if (snd_soc_component_read(component, M98088_REG_14_DAI1_FORMAT)
                & M98088_DAI_MAS) {
+               unsigned long pclk;
+
                if (max98088->sysclk == 0) {
                        dev_err(component->dev, "Invalid system clock frequency\n");
                        return -EINVAL;
                }
                ni = 65536ULL * (rate < 50000 ? 96ULL : 48ULL)
                                * (unsigned long long int)rate;
-               do_div(ni, (unsigned long long int)max98088->sysclk);
+               pclk = DIV_ROUND_CLOSEST(max98088->sysclk, max98088->mclk_prescaler);
+               ni = DIV_ROUND_CLOSEST_ULL(ni, pclk);
                snd_soc_component_write(component, M98088_REG_12_DAI1_CLKCFG_HI,
                        (ni >> 8) & 0x7F);
                snd_soc_component_write(component, M98088_REG_13_DAI1_CLKCFG_LO,
@@ -1065,13 +1069,16 @@ static int max98088_dai2_hw_params(struct snd_pcm_substream *substream,
        /* Configure NI when operating as master */
        if (snd_soc_component_read(component, M98088_REG_1C_DAI2_FORMAT)
                & M98088_DAI_MAS) {
+               unsigned long pclk;
+
                if (max98088->sysclk == 0) {
                        dev_err(component->dev, "Invalid system clock frequency\n");
                        return -EINVAL;
                }
                ni = 65536ULL * (rate < 50000 ? 96ULL : 48ULL)
                                * (unsigned long long int)rate;
-               do_div(ni, (unsigned long long int)max98088->sysclk);
+               pclk = DIV_ROUND_CLOSEST(max98088->sysclk, max98088->mclk_prescaler);
+               ni = DIV_ROUND_CLOSEST_ULL(ni, pclk);
                snd_soc_component_write(component, M98088_REG_1A_DAI2_CLKCFG_HI,
                        (ni >> 8) & 0x7F);
                snd_soc_component_write(component, M98088_REG_1B_DAI2_CLKCFG_LO,
@@ -1113,8 +1120,10 @@ static int max98088_dai_set_sysclk(struct snd_soc_dai *dai,
         */
        if ((freq >= 10000000) && (freq < 20000000)) {
                snd_soc_component_write(component, M98088_REG_10_SYS_CLK, 0x10);
+               max98088->mclk_prescaler = 1;
        } else if ((freq >= 20000000) && (freq < 30000000)) {
                snd_soc_component_write(component, M98088_REG_10_SYS_CLK, 0x20);
+               max98088->mclk_prescaler = 2;
        } else {
                dev_err(component->dev, "Invalid master clock frequency\n");
                return -EINVAL;
index 9408ee63cb2688127d150acc99480cab60ac49d9..438fa18bcb55d6a67a18dbd86265d63aaf1f18ea 100644 (file)
@@ -3388,30 +3388,44 @@ static int rt5645_probe(struct snd_soc_component *component)
 {
        struct snd_soc_dapm_context *dapm = snd_soc_component_get_dapm(component);
        struct rt5645_priv *rt5645 = snd_soc_component_get_drvdata(component);
+       int ret = 0;
 
        rt5645->component = component;
 
        switch (rt5645->codec_type) {
        case CODEC_TYPE_RT5645:
-               snd_soc_dapm_new_controls(dapm,
+               ret = snd_soc_dapm_new_controls(dapm,
                        rt5645_specific_dapm_widgets,
                        ARRAY_SIZE(rt5645_specific_dapm_widgets));
-               snd_soc_dapm_add_routes(dapm,
+               if (ret < 0)
+                       goto exit;
+
+               ret = snd_soc_dapm_add_routes(dapm,
                        rt5645_specific_dapm_routes,
                        ARRAY_SIZE(rt5645_specific_dapm_routes));
+               if (ret < 0)
+                       goto exit;
+
                if (rt5645->v_id < 3) {
-                       snd_soc_dapm_add_routes(dapm,
+                       ret = snd_soc_dapm_add_routes(dapm,
                                rt5645_old_dapm_routes,
                                ARRAY_SIZE(rt5645_old_dapm_routes));
+                       if (ret < 0)
+                               goto exit;
                }
                break;
        case CODEC_TYPE_RT5650:
-               snd_soc_dapm_new_controls(dapm,
+               ret = snd_soc_dapm_new_controls(dapm,
                        rt5650_specific_dapm_widgets,
                        ARRAY_SIZE(rt5650_specific_dapm_widgets));
-               snd_soc_dapm_add_routes(dapm,
+               if (ret < 0)
+                       goto exit;
+
+               ret = snd_soc_dapm_add_routes(dapm,
                        rt5650_specific_dapm_routes,
                        ARRAY_SIZE(rt5650_specific_dapm_routes));
+               if (ret < 0)
+                       goto exit;
                break;
        }
 
@@ -3419,9 +3433,17 @@ static int rt5645_probe(struct snd_soc_component *component)
 
        /* for JD function */
        if (rt5645->pdata.jd_mode) {
-               snd_soc_dapm_force_enable_pin(dapm, "JD Power");
-               snd_soc_dapm_force_enable_pin(dapm, "LDO2");
-               snd_soc_dapm_sync(dapm);
+               ret = snd_soc_dapm_force_enable_pin(dapm, "JD Power");
+               if (ret < 0)
+                       goto exit;
+
+               ret = snd_soc_dapm_force_enable_pin(dapm, "LDO2");
+               if (ret < 0)
+                       goto exit;
+
+               ret = snd_soc_dapm_sync(dapm);
+               if (ret < 0)
+                       goto exit;
        }
 
        if (rt5645->pdata.long_name)
@@ -3432,9 +3454,14 @@ static int rt5645_probe(struct snd_soc_component *component)
                GFP_KERNEL);
 
        if (!rt5645->eq_param)
-               return -ENOMEM;
-
-       return 0;
+               ret = -ENOMEM;
+exit:
+       /*
+        * If there was an error above, everything will be cleaned up by the
+        * caller if we return an error here.  This will be done with a later
+        * call to rt5645_remove().
+        */
+       return ret;
 }
 
 static void rt5645_remove(struct snd_soc_component *component)
index 87f5709fe2cca3207abdf381483ca6584e78f76e..4a50b169fe032085f69d34627c806a94aa1e20f5 100644 (file)
@@ -2433,13 +2433,18 @@ static int set_dmic_power(struct snd_soc_dapm_widget *w,
        return 0;
 }
 
-static const struct snd_soc_dapm_widget rt5659_dapm_widgets[] = {
+static const struct snd_soc_dapm_widget rt5659_particular_dapm_widgets[] = {
        SND_SOC_DAPM_SUPPLY("LDO2", RT5659_PWR_ANLG_3, RT5659_PWR_LDO2_BIT, 0,
                NULL, 0),
-       SND_SOC_DAPM_SUPPLY("PLL", RT5659_PWR_ANLG_3, RT5659_PWR_PLL_BIT, 0,
-               NULL, 0),
+       SND_SOC_DAPM_SUPPLY("MICBIAS1", RT5659_PWR_ANLG_2, RT5659_PWR_MB1_BIT,
+               0, NULL, 0),
        SND_SOC_DAPM_SUPPLY("Mic Det Power", RT5659_PWR_VOL,
                RT5659_PWR_MIC_DET_BIT, 0, NULL, 0),
+};
+
+static const struct snd_soc_dapm_widget rt5659_dapm_widgets[] = {
+       SND_SOC_DAPM_SUPPLY("PLL", RT5659_PWR_ANLG_3, RT5659_PWR_PLL_BIT, 0,
+               NULL, 0),
        SND_SOC_DAPM_SUPPLY("Mono Vref", RT5659_PWR_ANLG_1,
                RT5659_PWR_VREF3_BIT, 0, NULL, 0),
 
@@ -2464,8 +2469,6 @@ static const struct snd_soc_dapm_widget rt5659_dapm_widgets[] = {
                RT5659_ADC_MONO_R_ASRC_SFT, 0, NULL, 0),
 
        /* Input Side */
-       SND_SOC_DAPM_SUPPLY("MICBIAS1", RT5659_PWR_ANLG_2, RT5659_PWR_MB1_BIT,
-               0, NULL, 0),
        SND_SOC_DAPM_SUPPLY("MICBIAS2", RT5659_PWR_ANLG_2, RT5659_PWR_MB2_BIT,
                0, NULL, 0),
        SND_SOC_DAPM_SUPPLY("MICBIAS3", RT5659_PWR_ANLG_2, RT5659_PWR_MB3_BIT,
@@ -3660,10 +3663,23 @@ static int rt5659_set_bias_level(struct snd_soc_component *component,
 
 static int rt5659_probe(struct snd_soc_component *component)
 {
+       struct snd_soc_dapm_context *dapm =
+               snd_soc_component_get_dapm(component);
        struct rt5659_priv *rt5659 = snd_soc_component_get_drvdata(component);
 
        rt5659->component = component;
 
+       switch (rt5659->pdata.jd_src) {
+       case RT5659_JD_HDA_HEADER:
+               break;
+
+       default:
+               snd_soc_dapm_new_controls(dapm,
+                       rt5659_particular_dapm_widgets,
+                       ARRAY_SIZE(rt5659_particular_dapm_widgets));
+               break;
+       }
+
        return 0;
 }
 
index fed80c8f994fdd8b7fa16e67097d6fe03ad192a8..e78ba3b064c4f0dcbfb63a825ac1ae6061eba943 100644 (file)
@@ -462,7 +462,8 @@ static int rt5682_io_init(struct device *dev, struct sdw_slave *slave)
 
        regmap_update_bits(rt5682->regmap, RT5682_CBJ_CTRL_2,
                RT5682_EXT_JD_SRC, RT5682_EXT_JD_SRC_MANUAL);
-       regmap_write(rt5682->regmap, RT5682_CBJ_CTRL_1, 0xd042);
+       regmap_write(rt5682->regmap, RT5682_CBJ_CTRL_1, 0xd142);
+       regmap_update_bits(rt5682->regmap, RT5682_CBJ_CTRL_5, 0x0700, 0x0600);
        regmap_update_bits(rt5682->regmap, RT5682_CBJ_CTRL_3,
                RT5682_CBJ_IN_BUF_EN, RT5682_CBJ_IN_BUF_EN);
        regmap_update_bits(rt5682->regmap, RT5682_SAR_IL_CMD_1,
index cc36739f7fcfb8223ca532b67b596fe1f604229f..24a084e0b48a1b71e5044dc4f36d78baa417c31f 100644 (file)
@@ -683,13 +683,13 @@ static int rt711_sdca_set_fu1e_capture_ctl(struct rt711_sdca_priv *rt711)
        ch_r = (rt711->fu1e_dapm_mute || rt711->fu1e_mixer_r_mute) ? 0x01 : 0x00;
 
        err = regmap_write(rt711->regmap,
-                       SDW_SDCA_CTL(FUNC_NUM_JACK_CODEC, RT711_SDCA_ENT_USER_FU1E,
+                       SDW_SDCA_CTL(FUNC_NUM_MIC_ARRAY, RT711_SDCA_ENT_USER_FU1E,
                        RT711_SDCA_CTL_FU_MUTE, CH_L), ch_l);
        if (err < 0)
                return err;
 
        err = regmap_write(rt711->regmap,
-                       SDW_SDCA_CTL(FUNC_NUM_JACK_CODEC, RT711_SDCA_ENT_USER_FU1E,
+                       SDW_SDCA_CTL(FUNC_NUM_MIC_ARRAY, RT711_SDCA_ENT_USER_FU1E,
                        RT711_SDCA_CTL_FU_MUTE, CH_R), ch_r);
        if (err < 0)
                return err;
index ffdf7e55951537609c21fdd9c714f6716568ee76..82a24e330065f882d1243d3cb2126a991fe2241f 100644 (file)
@@ -408,6 +408,7 @@ static const struct of_device_id sti_sas_dev_match[] = {
        },
        {},
 };
+MODULE_DEVICE_TABLE(of, sti_sas_dev_match);
 
 static int sti_sas_driver_probe(struct platform_device *pdev)
 {
index 81866aeb3fbfadc321dc1500778990485b4e3d4c..55b2a1f52ca379e2d57f16d95be88180d2de9ee6 100644 (file)
 #define TAS2562_TDM_CFG0_RAMPRATE_MASK         BIT(5)
 #define TAS2562_TDM_CFG0_RAMPRATE_44_1         BIT(5)
 #define TAS2562_TDM_CFG0_SAMPRATE_MASK         GENMASK(3, 1)
-#define TAS2562_TDM_CFG0_SAMPRATE_7305_8KHZ    0x0
-#define TAS2562_TDM_CFG0_SAMPRATE_14_7_16KHZ   0x1
-#define TAS2562_TDM_CFG0_SAMPRATE_22_05_24KHZ  0x2
-#define TAS2562_TDM_CFG0_SAMPRATE_29_4_32KHZ   0x3
-#define TAS2562_TDM_CFG0_SAMPRATE_44_1_48KHZ   0x4
-#define TAS2562_TDM_CFG0_SAMPRATE_88_2_96KHZ   0x5
-#define TAS2562_TDM_CFG0_SAMPRATE_176_4_192KHZ 0x6
+#define TAS2562_TDM_CFG0_SAMPRATE_7305_8KHZ    (0x0 << 1)
+#define TAS2562_TDM_CFG0_SAMPRATE_14_7_16KHZ   (0x1 << 1)
+#define TAS2562_TDM_CFG0_SAMPRATE_22_05_24KHZ  (0x2 << 1)
+#define TAS2562_TDM_CFG0_SAMPRATE_29_4_32KHZ   (0x3 << 1)
+#define TAS2562_TDM_CFG0_SAMPRATE_44_1_48KHZ   (0x4 << 1)
+#define TAS2562_TDM_CFG0_SAMPRATE_88_2_96KHZ   (0x5 << 1)
+#define TAS2562_TDM_CFG0_SAMPRATE_176_4_192KHZ (0x6 << 1)
 
 #define TAS2562_TDM_CFG2_RIGHT_JUSTIFY BIT(6)
 
index 0917d65d69213f57f380e523fda6e509ee74901e..556c284f49dd3531789b3eb109189b54f2574dc2 100644 (file)
@@ -119,6 +119,7 @@ config SND_SOC_FSL_RPMSG
        tristate "NXP Audio Base On RPMSG support"
        depends on COMMON_CLK
        depends on RPMSG
+       depends on SND_IMX_SOC || SND_IMX_SOC = n
        select SND_SOC_IMX_RPMSG if SND_IMX_SOC != n
        help
          Say Y if you want to add rpmsg audio support for the Freescale CPUs.
index c62bfd1c3ac7ca415fa314ff6c8ae43cf4e839dd..4f55b316cf0fb8567142412cb3c4e666c9c40c1f 100644 (file)
@@ -744,6 +744,7 @@ static int fsl_asoc_card_probe(struct platform_device *pdev)
        /* Initialize sound card */
        priv->pdev = pdev;
        priv->card.dev = &pdev->dev;
+       priv->card.owner = THIS_MODULE;
        ret = snd_soc_of_parse_card_name(&priv->card, "model");
        if (ret) {
                snprintf(priv->name, sizeof(priv->name), "%s-audio",
index 2c8a2fcb7922a24841507136e7aec24587a4581f..5e71382467e88614200853e069ae757e85196a9f 100644 (file)
@@ -209,7 +209,7 @@ static void graph_parse_mclk_fs(struct device_node *top,
 static int graph_parse_node(struct asoc_simple_priv *priv,
                            struct device_node *ep,
                            struct link_info *li,
-                           int is_cpu)
+                           int *cpu)
 {
        struct device *dev = simple_priv_to_dev(priv);
        struct device_node *top = dev->of_node;
@@ -217,9 +217,9 @@ static int graph_parse_node(struct asoc_simple_priv *priv,
        struct simple_dai_props *dai_props = simple_priv_to_props(priv, li->link);
        struct snd_soc_dai_link_component *dlc;
        struct asoc_simple_dai *dai;
-       int ret, single = 0;
+       int ret;
 
-       if (is_cpu) {
+       if (cpu) {
                dlc = asoc_link_to_cpu(dai_link, 0);
                dai = simple_props_to_dai_cpu(dai_props, 0);
        } else {
@@ -229,7 +229,7 @@ static int graph_parse_node(struct asoc_simple_priv *priv,
 
        graph_parse_mclk_fs(top, ep, dai_props);
 
-       ret = asoc_simple_parse_dai(ep, dlc, &single);
+       ret = asoc_simple_parse_dai(ep, dlc, cpu);
        if (ret < 0)
                return ret;
 
@@ -241,9 +241,6 @@ static int graph_parse_node(struct asoc_simple_priv *priv,
        if (ret < 0)
                return ret;
 
-       if (is_cpu)
-               asoc_simple_canonicalize_cpu(dlc, single);
-
        return 0;
 }
 
@@ -276,33 +273,29 @@ static int graph_dai_link_of_dpcm(struct asoc_simple_priv *priv,
                                  struct link_info *li)
 {
        struct device *dev = simple_priv_to_dev(priv);
-       struct snd_soc_card *card = simple_priv_to_card(priv);
        struct snd_soc_dai_link *dai_link = simple_priv_to_link(priv, li->link);
        struct simple_dai_props *dai_props = simple_priv_to_props(priv, li->link);
        struct device_node *top = dev->of_node;
        struct device_node *ep = li->cpu ? cpu_ep : codec_ep;
-       struct device_node *port;
-       struct device_node *ports;
-       struct snd_soc_dai_link_component *cpus = asoc_link_to_cpu(dai_link, 0);
-       struct snd_soc_dai_link_component *codecs = asoc_link_to_codec(dai_link, 0);
        char dai_name[64];
        int ret;
 
-       port    = of_get_parent(ep);
-       ports   = of_get_parent(port);
-
        dev_dbg(dev, "link_of DPCM (%pOF)\n", ep);
 
        if (li->cpu) {
+               struct snd_soc_card *card = simple_priv_to_card(priv);
+               struct snd_soc_dai_link_component *cpus = asoc_link_to_cpu(dai_link, 0);
+               int is_single_links = 0;
+
                /* Codec is dummy */
 
                /* FE settings */
                dai_link->dynamic               = 1;
                dai_link->dpcm_merged_format    = 1;
 
-               ret = graph_parse_node(priv, cpu_ep, li, 1);
+               ret = graph_parse_node(priv, cpu_ep, li, &is_single_links);
                if (ret)
-                       goto out_put_node;
+                       return ret;
 
                snprintf(dai_name, sizeof(dai_name),
                         "fe.%pOFP.%s", cpus->of_node, cpus->dai_name);
@@ -318,8 +311,13 @@ static int graph_dai_link_of_dpcm(struct asoc_simple_priv *priv,
                 */
                if (card->component_chaining && !soc_component_is_pcm(cpus))
                        dai_link->no_pcm = 1;
+
+               asoc_simple_canonicalize_cpu(cpus, is_single_links);
        } else {
-               struct snd_soc_codec_conf *cconf;
+               struct snd_soc_codec_conf *cconf = simple_props_to_codec_conf(dai_props, 0);
+               struct snd_soc_dai_link_component *codecs = asoc_link_to_codec(dai_link, 0);
+               struct device_node *port;
+               struct device_node *ports;
 
                /* CPU is dummy */
 
@@ -327,22 +325,25 @@ static int graph_dai_link_of_dpcm(struct asoc_simple_priv *priv,
                dai_link->no_pcm                = 1;
                dai_link->be_hw_params_fixup    = asoc_simple_be_hw_params_fixup;
 
-               cconf   = simple_props_to_codec_conf(dai_props, 0);
-
-               ret = graph_parse_node(priv, codec_ep, li, 0);
+               ret = graph_parse_node(priv, codec_ep, li, NULL);
                if (ret < 0)
-                       goto out_put_node;
+                       return ret;
 
                snprintf(dai_name, sizeof(dai_name),
                         "be.%pOFP.%s", codecs->of_node, codecs->dai_name);
 
                /* check "prefix" from top node */
+               port = of_get_parent(ep);
+               ports = of_get_parent(port);
                snd_soc_of_parse_node_prefix(top, cconf, codecs->of_node,
                                              "prefix");
                if (of_node_name_eq(ports, "ports"))
                        snd_soc_of_parse_node_prefix(ports, cconf, codecs->of_node, "prefix");
                snd_soc_of_parse_node_prefix(port, cconf, codecs->of_node,
                                             "prefix");
+
+               of_node_put(ports);
+               of_node_put(port);
        }
 
        graph_parse_convert(dev, ep, &dai_props->adata);
@@ -351,11 +352,8 @@ static int graph_dai_link_of_dpcm(struct asoc_simple_priv *priv,
 
        ret = graph_link_init(priv, cpu_ep, codec_ep, li, dai_name);
 
-out_put_node:
        li->link++;
 
-       of_node_put(ports);
-       of_node_put(port);
        return ret;
 }
 
@@ -369,20 +367,23 @@ static int graph_dai_link_of(struct asoc_simple_priv *priv,
        struct snd_soc_dai_link_component *cpus = asoc_link_to_cpu(dai_link, 0);
        struct snd_soc_dai_link_component *codecs = asoc_link_to_codec(dai_link, 0);
        char dai_name[64];
-       int ret;
+       int ret, is_single_links = 0;
 
        dev_dbg(dev, "link_of (%pOF)\n", cpu_ep);
 
-       ret = graph_parse_node(priv, cpu_ep, li, 1);
+       ret = graph_parse_node(priv, cpu_ep, li, &is_single_links);
        if (ret < 0)
                return ret;
 
-       ret = graph_parse_node(priv, codec_ep, li, 0);
+       ret = graph_parse_node(priv, codec_ep, li, NULL);
        if (ret < 0)
                return ret;
 
        snprintf(dai_name, sizeof(dai_name),
                 "%s-%s", cpus->dai_name, codecs->dai_name);
+
+       asoc_simple_canonicalize_cpu(cpus, is_single_links);
+
        ret = graph_link_init(priv, cpu_ep, codec_ep, li, dai_name);
        if (ret < 0)
                return ret;
index a1373be4558f3f40faf84a37b4b69d6249737453..0015f534d42d9fe511cef9fce1150b7c0b9ac1eb 100644 (file)
@@ -93,12 +93,11 @@ static void simple_parse_convert(struct device *dev,
 }
 
 static void simple_parse_mclk_fs(struct device_node *top,
-                                struct device_node *cpu,
-                                struct device_node *codec,
+                                struct device_node *np,
                                 struct simple_dai_props *props,
                                 char *prefix)
 {
-       struct device_node *node = of_get_parent(cpu);
+       struct device_node *node = of_get_parent(np);
        char prop[128];
 
        snprintf(prop, sizeof(prop), "%smclk-fs", PREFIX);
@@ -106,12 +105,71 @@ static void simple_parse_mclk_fs(struct device_node *top,
 
        snprintf(prop, sizeof(prop), "%smclk-fs", prefix);
        of_property_read_u32(node,      prop, &props->mclk_fs);
-       of_property_read_u32(cpu,       prop, &props->mclk_fs);
-       of_property_read_u32(codec,     prop, &props->mclk_fs);
+       of_property_read_u32(np,        prop, &props->mclk_fs);
 
        of_node_put(node);
 }
 
+static int simple_parse_node(struct asoc_simple_priv *priv,
+                            struct device_node *np,
+                            struct link_info *li,
+                            char *prefix,
+                            int *cpu)
+{
+       struct device *dev = simple_priv_to_dev(priv);
+       struct device_node *top = dev->of_node;
+       struct snd_soc_dai_link *dai_link = simple_priv_to_link(priv, li->link);
+       struct simple_dai_props *dai_props = simple_priv_to_props(priv, li->link);
+       struct snd_soc_dai_link_component *dlc;
+       struct asoc_simple_dai *dai;
+       int ret;
+
+       if (cpu) {
+               dlc = asoc_link_to_cpu(dai_link, 0);
+               dai = simple_props_to_dai_cpu(dai_props, 0);
+       } else {
+               dlc = asoc_link_to_codec(dai_link, 0);
+               dai = simple_props_to_dai_codec(dai_props, 0);
+       }
+
+       simple_parse_mclk_fs(top, np, dai_props, prefix);
+
+       ret = asoc_simple_parse_dai(np, dlc, cpu);
+       if (ret)
+               return ret;
+
+       ret = asoc_simple_parse_clk(dev, np, dai, dlc);
+       if (ret)
+               return ret;
+
+       ret = asoc_simple_parse_tdm(np, dai);
+       if (ret)
+               return ret;
+
+       return 0;
+}
+
+static int simple_link_init(struct asoc_simple_priv *priv,
+                           struct device_node *node,
+                           struct device_node *codec,
+                           struct link_info *li,
+                           char *prefix, char *name)
+{
+       struct device *dev = simple_priv_to_dev(priv);
+       struct snd_soc_dai_link *dai_link = simple_priv_to_link(priv, li->link);
+       int ret;
+
+       ret = asoc_simple_parse_daifmt(dev, node, codec,
+                                      prefix, &dai_link->dai_fmt);
+       if (ret < 0)
+               return 0;
+
+       dai_link->init                  = asoc_simple_dai_init;
+       dai_link->ops                   = &simple_ops;
+
+       return asoc_simple_set_dailink_name(dev, dai_link, name);
+}
+
 static int simple_dai_link_of_dpcm(struct asoc_simple_priv *priv,
                                   struct device_node *np,
                                   struct device_node *codec,
@@ -121,24 +179,21 @@ static int simple_dai_link_of_dpcm(struct asoc_simple_priv *priv,
        struct device *dev = simple_priv_to_dev(priv);
        struct snd_soc_dai_link *dai_link = simple_priv_to_link(priv, li->link);
        struct simple_dai_props *dai_props = simple_priv_to_props(priv, li->link);
-       struct asoc_simple_dai *dai;
-       struct snd_soc_dai_link_component *cpus = asoc_link_to_cpu(dai_link, 0);
-       struct snd_soc_dai_link_component *codecs = asoc_link_to_codec(dai_link, 0);
-       struct snd_soc_dai_link_component *platforms = asoc_link_to_platform(dai_link, 0);
        struct device_node *top = dev->of_node;
        struct device_node *node = of_get_parent(np);
        char *prefix = "";
+       char dai_name[64];
        int ret;
 
        dev_dbg(dev, "link_of DPCM (%pOF)\n", np);
 
-       li->link++;
-
        /* For single DAI link & old style of DT node */
        if (is_top)
                prefix = PREFIX;
 
        if (li->cpu) {
+               struct snd_soc_dai_link_component *cpus = asoc_link_to_cpu(dai_link, 0);
+               struct snd_soc_dai_link_component *platforms = asoc_link_to_platform(dai_link, 0);
                int is_single_links = 0;
 
                /* Codec is dummy */
@@ -147,25 +202,16 @@ static int simple_dai_link_of_dpcm(struct asoc_simple_priv *priv,
                dai_link->dynamic               = 1;
                dai_link->dpcm_merged_format    = 1;
 
-               dai = simple_props_to_dai_cpu(dai_props, 0);
-
-               ret = asoc_simple_parse_dai(np, cpus, &is_single_links);
-               if (ret)
-                       goto out_put_node;
-
-               ret = asoc_simple_parse_clk(dev, np, dai, cpus);
+               ret = simple_parse_node(priv, np, li, prefix, &is_single_links);
                if (ret < 0)
                        goto out_put_node;
 
-               ret = asoc_simple_set_dailink_name(dev, dai_link,
-                                                  "fe.%s",
-                                                  cpus->dai_name);
-               if (ret < 0)
-                       goto out_put_node;
+               snprintf(dai_name, sizeof(dai_name), "fe.%s", cpus->dai_name);
 
                asoc_simple_canonicalize_cpu(cpus, is_single_links);
                asoc_simple_canonicalize_platform(platforms, cpus);
        } else {
+               struct snd_soc_dai_link_component *codecs = asoc_link_to_codec(dai_link, 0);
                struct snd_soc_codec_conf *cconf;
 
                /* CPU is dummy */
@@ -174,22 +220,13 @@ static int simple_dai_link_of_dpcm(struct asoc_simple_priv *priv,
                dai_link->no_pcm                = 1;
                dai_link->be_hw_params_fixup    = asoc_simple_be_hw_params_fixup;
 
-               dai     = simple_props_to_dai_codec(dai_props, 0);
                cconf   = simple_props_to_codec_conf(dai_props, 0);
 
-               ret = asoc_simple_parse_dai(np, codecs, NULL);
+               ret = simple_parse_node(priv, np, li, prefix, NULL);
                if (ret < 0)
                        goto out_put_node;
 
-               ret = asoc_simple_parse_clk(dev, np, dai, codecs);
-               if (ret < 0)
-                       goto out_put_node;
-
-               ret = asoc_simple_set_dailink_name(dev, dai_link,
-                                                  "be.%s",
-                                                  codecs->dai_name);
-               if (ret < 0)
-                       goto out_put_node;
+               snprintf(dai_name, sizeof(dai_name), "be.%s", codecs->dai_name);
 
                /* check "prefix" from top node */
                snd_soc_of_parse_node_prefix(top, cconf, codecs->of_node,
@@ -201,23 +238,14 @@ static int simple_dai_link_of_dpcm(struct asoc_simple_priv *priv,
        }
 
        simple_parse_convert(dev, np, &dai_props->adata);
-       simple_parse_mclk_fs(top, np, codec, dai_props, prefix);
-
-       ret = asoc_simple_parse_tdm(np, dai);
-       if (ret)
-               goto out_put_node;
-
-       ret = asoc_simple_parse_daifmt(dev, node, codec,
-                                      prefix, &dai_link->dai_fmt);
-       if (ret < 0)
-               goto out_put_node;
 
        snd_soc_dai_link_set_capabilities(dai_link);
 
-       dai_link->ops                   = &simple_ops;
-       dai_link->init                  = asoc_simple_dai_init;
+       ret = simple_link_init(priv, node, codec, li, prefix, dai_name);
 
 out_put_node:
+       li->link++;
+
        of_node_put(node);
        return ret;
 }
@@ -230,23 +258,19 @@ static int simple_dai_link_of(struct asoc_simple_priv *priv,
 {
        struct device *dev = simple_priv_to_dev(priv);
        struct snd_soc_dai_link *dai_link = simple_priv_to_link(priv, li->link);
-       struct simple_dai_props *dai_props = simple_priv_to_props(priv, li->link);
-       struct asoc_simple_dai *cpu_dai = simple_props_to_dai_cpu(dai_props, 0);
-       struct asoc_simple_dai *codec_dai = simple_props_to_dai_codec(dai_props, 0);
        struct snd_soc_dai_link_component *cpus = asoc_link_to_cpu(dai_link, 0);
        struct snd_soc_dai_link_component *codecs = asoc_link_to_codec(dai_link, 0);
        struct snd_soc_dai_link_component *platforms = asoc_link_to_platform(dai_link, 0);
-       struct device_node *top = dev->of_node;
        struct device_node *cpu = NULL;
        struct device_node *node = NULL;
        struct device_node *plat = NULL;
+       char dai_name[64];
        char prop[128];
        char *prefix = "";
        int ret, single_cpu = 0;
 
        cpu  = np;
        node = of_get_parent(np);
-       li->link++;
 
        dev_dbg(dev, "link_of (%pOF)\n", node);
 
@@ -257,18 +281,11 @@ static int simple_dai_link_of(struct asoc_simple_priv *priv,
        snprintf(prop, sizeof(prop), "%splat", prefix);
        plat = of_get_child_by_name(node, prop);
 
-       ret = asoc_simple_parse_daifmt(dev, node, codec,
-                                      prefix, &dai_link->dai_fmt);
-       if (ret < 0)
-               goto dai_link_of_err;
-
-       simple_parse_mclk_fs(top, cpu, codec, dai_props, prefix);
-
-       ret = asoc_simple_parse_dai(cpu, cpus, &single_cpu);
+       ret = simple_parse_node(priv, cpu, li, prefix, &single_cpu);
        if (ret < 0)
                goto dai_link_of_err;
 
-       ret = asoc_simple_parse_dai(codec, codecs, NULL);
+       ret = simple_parse_node(priv, codec, li, prefix, NULL);
        if (ret < 0)
                goto dai_link_of_err;
 
@@ -276,39 +293,20 @@ static int simple_dai_link_of(struct asoc_simple_priv *priv,
        if (ret < 0)
                goto dai_link_of_err;
 
-       ret = asoc_simple_parse_tdm(cpu, cpu_dai);
-       if (ret < 0)
-               goto dai_link_of_err;
-
-       ret = asoc_simple_parse_tdm(codec, codec_dai);
-       if (ret < 0)
-               goto dai_link_of_err;
-
-       ret = asoc_simple_parse_clk(dev, cpu, cpu_dai, cpus);
-       if (ret < 0)
-               goto dai_link_of_err;
-
-       ret = asoc_simple_parse_clk(dev, codec, codec_dai, codecs);
-       if (ret < 0)
-               goto dai_link_of_err;
-
-       ret = asoc_simple_set_dailink_name(dev, dai_link,
-                                          "%s-%s",
-                                          cpus->dai_name,
-                                          codecs->dai_name);
-       if (ret < 0)
-               goto dai_link_of_err;
-
-       dai_link->ops = &simple_ops;
-       dai_link->init = asoc_simple_dai_init;
+       snprintf(dai_name, sizeof(dai_name),
+                "%s-%s", cpus->dai_name, codecs->dai_name);
 
        asoc_simple_canonicalize_cpu(cpus, single_cpu);
        asoc_simple_canonicalize_platform(platforms, cpus);
 
+       ret = simple_link_init(priv, node, codec, li, prefix, dai_name);
+
 dai_link_of_err:
        of_node_put(plat);
        of_node_put(node);
 
+       li->link++;
+
        return ret;
 }
 
index df2f5d55e8ffec9d1501bdb4ff8ea97bcadd115e..22dbd9d93c1ef5bece4b63fffeb57e1851e5bf39 100644 (file)
@@ -574,6 +574,17 @@ static const struct dmi_system_id byt_rt5640_quirk_table[] = {
                                        BYT_RT5640_SSP0_AIF1 |
                                        BYT_RT5640_MCLK_EN),
        },
+       {       /* Glavey TM800A550L */
+               .matches = {
+                       DMI_MATCH(DMI_BOARD_VENDOR, "AMI Corporation"),
+                       DMI_MATCH(DMI_BOARD_NAME, "Aptio CRB"),
+                       /* Above strings are too generic, also match on BIOS version */
+                       DMI_MATCH(DMI_BIOS_VERSION, "ZY-8-BI-PX4S70VTR400-X423B-005-D"),
+               },
+               .driver_data = (void *)(BYTCR_INPUT_DEFAULTS |
+                                       BYT_RT5640_SSP0_AIF1 |
+                                       BYT_RT5640_MCLK_EN),
+       },
        {
                .matches = {
                        DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
@@ -652,6 +663,20 @@ static const struct dmi_system_id byt_rt5640_quirk_table[] = {
                                        BYT_RT5640_MONO_SPEAKER |
                                        BYT_RT5640_MCLK_EN),
        },
+       {       /* Lenovo Miix 3-830 */
+               .matches = {
+                       DMI_EXACT_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+                       DMI_EXACT_MATCH(DMI_PRODUCT_VERSION, "Lenovo MIIX 3-830"),
+               },
+               .driver_data = (void *)(BYT_RT5640_IN1_MAP |
+                                       BYT_RT5640_JD_SRC_JD2_IN4N |
+                                       BYT_RT5640_OVCD_TH_2000UA |
+                                       BYT_RT5640_OVCD_SF_0P75 |
+                                       BYT_RT5640_MONO_SPEAKER |
+                                       BYT_RT5640_DIFF_MIC |
+                                       BYT_RT5640_SSP0_AIF1 |
+                                       BYT_RT5640_MCLK_EN),
+       },
        {       /* Linx Linx7 tablet */
                .matches = {
                        DMI_EXACT_MATCH(DMI_SYS_VENDOR, "LINX"),
index c62d2612e8f5e281cc8482250a78d130bad1fe9e..a6e95db6b3fbc0680464b458134d2cebd05a5128 100644 (file)
@@ -93,8 +93,30 @@ static void lpass_cpu_daiops_shutdown(struct snd_pcm_substream *substream,
                struct snd_soc_dai *dai)
 {
        struct lpass_data *drvdata = snd_soc_dai_get_drvdata(dai);
+       struct lpaif_i2sctl *i2sctl = drvdata->i2sctl;
+       unsigned int id = dai->driver->id;
 
        clk_disable_unprepare(drvdata->mi2s_osr_clk[dai->driver->id]);
+       /*
+        * Ensure LRCLK is disabled even in device node validation.
+        * Will not impact if disabled in lpass_cpu_daiops_trigger()
+        * suspend.
+        */
+       if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
+               regmap_fields_write(i2sctl->spken, id, LPAIF_I2SCTL_SPKEN_DISABLE);
+       else
+               regmap_fields_write(i2sctl->micen, id, LPAIF_I2SCTL_MICEN_DISABLE);
+
+       /*
+        * BCLK may not be enabled if lpass_cpu_daiops_prepare is called before
+        * lpass_cpu_daiops_shutdown. It's paired with the clk_enable in
+        * lpass_cpu_daiops_prepare.
+        */
+       if (drvdata->mi2s_was_prepared[dai->driver->id]) {
+               drvdata->mi2s_was_prepared[dai->driver->id] = false;
+               clk_disable(drvdata->mi2s_bit_clk[dai->driver->id]);
+       }
+
        clk_unprepare(drvdata->mi2s_bit_clk[dai->driver->id]);
 }
 
@@ -275,6 +297,18 @@ static int lpass_cpu_daiops_trigger(struct snd_pcm_substream *substream,
        case SNDRV_PCM_TRIGGER_START:
        case SNDRV_PCM_TRIGGER_RESUME:
        case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
+               /*
+                * Ensure lpass BCLK/LRCLK is enabled during
+                * device resume as lpass_cpu_daiops_prepare() is not called
+                * after the device resumes. We don't check mi2s_was_prepared before
+                * enable/disable BCLK in trigger events because:
+                *  1. These trigger events are paired, so the BCLK
+                *     enable_count is balanced.
+                *  2. the BCLK can be shared (ex: headset and headset mic),
+                *     we need to increase the enable_count so that we don't
+                *     turn off the shared BCLK while other devices are using
+                *     it.
+                */
                if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
                        ret = regmap_fields_write(i2sctl->spken, id,
                                                 LPAIF_I2SCTL_SPKEN_ENABLE);
@@ -296,6 +330,10 @@ static int lpass_cpu_daiops_trigger(struct snd_pcm_substream *substream,
        case SNDRV_PCM_TRIGGER_STOP:
        case SNDRV_PCM_TRIGGER_SUSPEND:
        case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
+               /*
+                * To ensure lpass BCLK/LRCLK is disabled during
+                * device suspend.
+                */
                if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
                        ret = regmap_fields_write(i2sctl->spken, id,
                                                 LPAIF_I2SCTL_SPKEN_DISABLE);
@@ -315,12 +353,53 @@ static int lpass_cpu_daiops_trigger(struct snd_pcm_substream *substream,
        return ret;
 }
 
+static int lpass_cpu_daiops_prepare(struct snd_pcm_substream *substream,
+               struct snd_soc_dai *dai)
+{
+       struct lpass_data *drvdata = snd_soc_dai_get_drvdata(dai);
+       struct lpaif_i2sctl *i2sctl = drvdata->i2sctl;
+       unsigned int id = dai->driver->id;
+       int ret;
+
+       /*
+        * Ensure lpass BCLK/LRCLK is enabled bit before playback/capture
+        * data flow starts. This allows other codec to have some delay before
+        * the data flow.
+        * (ex: to drop start up pop noise before capture starts).
+        */
+       if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
+               ret = regmap_fields_write(i2sctl->spken, id, LPAIF_I2SCTL_SPKEN_ENABLE);
+       else
+               ret = regmap_fields_write(i2sctl->micen, id, LPAIF_I2SCTL_MICEN_ENABLE);
+
+       if (ret) {
+               dev_err(dai->dev, "error writing to i2sctl reg: %d\n", ret);
+               return ret;
+       }
+
+       /*
+        * Check mi2s_was_prepared before enabling BCLK as lpass_cpu_daiops_prepare can
+        * be called multiple times. It's paired with the clk_disable in
+        * lpass_cpu_daiops_shutdown.
+        */
+       if (!drvdata->mi2s_was_prepared[dai->driver->id]) {
+               ret = clk_enable(drvdata->mi2s_bit_clk[id]);
+               if (ret) {
+                       dev_err(dai->dev, "error in enabling mi2s bit clk: %d\n", ret);
+                       return ret;
+               }
+               drvdata->mi2s_was_prepared[dai->driver->id] = true;
+       }
+       return 0;
+}
+
 const struct snd_soc_dai_ops asoc_qcom_lpass_cpu_dai_ops = {
        .set_sysclk     = lpass_cpu_daiops_set_sysclk,
        .startup        = lpass_cpu_daiops_startup,
        .shutdown       = lpass_cpu_daiops_shutdown,
        .hw_params      = lpass_cpu_daiops_hw_params,
        .trigger        = lpass_cpu_daiops_trigger,
+       .prepare        = lpass_cpu_daiops_prepare,
 };
 EXPORT_SYMBOL_GPL(asoc_qcom_lpass_cpu_dai_ops);
 
@@ -835,18 +914,8 @@ int asoc_qcom_lpass_cpu_platform_probe(struct platform_device *pdev)
                if (dai_id == LPASS_DP_RX)
                        continue;
 
-               drvdata->mi2s_osr_clk[dai_id] = devm_clk_get(dev,
+               drvdata->mi2s_osr_clk[dai_id] = devm_clk_get_optional(dev,
                                             variant->dai_osr_clk_names[i]);
-               if (IS_ERR(drvdata->mi2s_osr_clk[dai_id])) {
-                       dev_warn(dev,
-                               "%s() error getting optional %s: %ld\n",
-                               __func__,
-                               variant->dai_osr_clk_names[i],
-                               PTR_ERR(drvdata->mi2s_osr_clk[dai_id]));
-
-                       drvdata->mi2s_osr_clk[dai_id] = NULL;
-               }
-
                drvdata->mi2s_bit_clk[dai_id] = devm_clk_get(dev,
                                                variant->dai_bit_clk_names[i]);
                if (IS_ERR(drvdata->mi2s_bit_clk[dai_id])) {
index 83b2e08ade0603b9b83deae24f441dd0e02da15f..7f72214404baf26c0a5176c26f534fd01e10e686 100644 (file)
@@ -67,6 +67,10 @@ struct lpass_data {
        /* MI2S SD lines to use for playback/capture */
        unsigned int mi2s_playback_sd_mode[LPASS_MAX_MI2S_PORTS];
        unsigned int mi2s_capture_sd_mode[LPASS_MAX_MI2S_PORTS];
+
+       /* The state of MI2S prepare dai_ops was called */
+       bool mi2s_was_prepared[LPASS_MAX_MI2S_PORTS];
+
        int hdmi_port_enable;
 
        /* low-power audio interface (LPAIF) registers */
index 1c0904acb93564ded5d409b6d027aabc4d5d575f..a76974ccfce10663194a2ee9b9a3676f35c7a430 100644 (file)
@@ -2225,6 +2225,8 @@ static char *fmt_single_name(struct device *dev, int *id)
                return NULL;
 
        name = devm_kstrdup(dev, devname, GFP_KERNEL);
+       if (!name)
+               return NULL;
 
        /* are we a "%s.%d" name (platform and SPI components) */
        found = strstr(name, dev->driver->name);
index 73076d425efb37904284b24b63fa5ff259e5f290..4893a56208e08a38f9b362929ff0d543f7ac22cd 100644 (file)
@@ -1901,7 +1901,7 @@ static void stream_caps_new_ver(struct snd_soc_tplg_stream_caps *dest,
  * @src: older version of pcm as a source
  * @pcm: latest version of pcm created from the source
  *
- * Support from vesion 4. User should free the returned pcm manually.
+ * Support from version 4. User should free the returned pcm manually.
  */
 static int pcm_new_ver(struct soc_tplg *tplg,
                       struct snd_soc_tplg_pcm *src,
@@ -2089,7 +2089,7 @@ static void set_link_hw_format(struct snd_soc_dai_link *link,
  * @src: old version of phyical link config as a source
  * @link: latest version of physical link config created from the source
  *
- * Support from vesion 4. User need free the returned link config manually.
+ * Support from version 4. User need free the returned link config manually.
  */
 static int link_new_ver(struct soc_tplg *tplg,
                        struct snd_soc_tplg_link_config *src,
@@ -2400,7 +2400,7 @@ static int soc_tplg_dai_elems_load(struct soc_tplg *tplg,
  * @src: old version of manifest as a source
  * @manifest: latest version of manifest created from the source
  *
- * Support from vesion 4. Users need free the returned manifest manually.
+ * Support from version 4. Users need free the returned manifest manually.
  */
 static int manifest_new_ver(struct soc_tplg *tplg,
                            struct snd_soc_tplg_manifest *src,
index 8d7bab433fb33b4ee63f084cb406e07d0723574c..c1f9f0f5846476a3641f849d10006390dda7796d 100644 (file)
@@ -421,11 +421,16 @@ static int ssp_dai_hw_params(struct snd_pcm_substream *substream,
        struct snd_soc_pcm_runtime *rtd = asoc_substream_to_rtd(substream);
        struct snd_soc_component *component = snd_soc_rtdcom_lookup(rtd, SOF_AUDIO_PCM_DRV_NAME);
        struct snd_sof_dev *sdev = snd_soc_component_get_drvdata(component);
+       struct sof_ipc_fw_version *v = &sdev->fw_ready.version;
        struct sof_ipc_dai_config *config;
        struct snd_sof_dai *sof_dai;
        struct sof_ipc_reply reply;
        int ret;
 
+       /* DAI_CONFIG IPC during hw_params is not supported in older firmware */
+       if (v->abi_version < SOF_ABI_VER(3, 18, 0))
+               return 0;
+
        list_for_each_entry(sof_dai, &sdev->dai_list, list) {
                if (!sof_dai->cpu_dai_name || !sof_dai->dai_config)
                        continue;
index fd265803f7bc79454ee605c171ef724fea507596..c83fb62559616cddb4f786e98e2c8c8ef333f54f 100644 (file)
@@ -256,6 +256,7 @@ suspend:
 
        /* reset FW state */
        sdev->fw_state = SOF_FW_BOOT_NOT_STARTED;
+       sdev->enabled_cores_mask = 0;
 
        return ret;
 }
index c1561237ee24b292a80d872115c7bd65701a7d47..3aa1cf262402034f0706ec8bb923e19fe4b52d1f 100644 (file)
@@ -484,10 +484,7 @@ static int stm32_sai_add_mclk_provider(struct stm32_sai_sub_data *sai)
                dev_err(dev, "mclk register returned %d\n", ret);
                return ret;
        }
-
-       sai->sai_mclk = devm_clk_hw_get_clk(dev, hw, NULL);
-       if (IS_ERR(sai->sai_mclk))
-               return PTR_ERR(sai->sai_mclk);
+       sai->sai_mclk = hw->clk;
 
        /* register mclk provider */
        return devm_of_clk_add_hw_provider(dev, of_clk_hw_simple_get, hw);
index e6ff317a678520a20ff5f6109886d5018fab9043..2287f8c653150a4aa6ed555287b6a80e2b8c32e2 100644 (file)
@@ -436,7 +436,7 @@ static bool check_valid_altsetting_v2v3(struct snd_usb_audio *chip, int iface,
        if (snd_BUG_ON(altsetting >= 64 - 8))
                return false;
 
-       err = snd_usb_ctl_msg(dev, usb_sndctrlpipe(dev, 0), UAC2_CS_CUR,
+       err = snd_usb_ctl_msg(dev, usb_rcvctrlpipe(dev, 0), UAC2_CS_CUR,
                              USB_TYPE_CLASS | USB_RECIP_INTERFACE | USB_DIR_IN,
                              UAC2_AS_VAL_ALT_SETTINGS << 8,
                              iface, &raw_data, sizeof(raw_data));
index a030dd65eb2805993a03ef49cd78b4b438bc9fec..9602929b7de9001df963e137c7f4c77a50b05ed8 100644 (file)
@@ -699,6 +699,10 @@ static int line6_init_cap_control(struct usb_line6 *line6)
                line6->buffer_message = kmalloc(LINE6_MIDI_MESSAGE_MAXLEN, GFP_KERNEL);
                if (!line6->buffer_message)
                        return -ENOMEM;
+
+               ret = line6_init_midi(line6);
+               if (ret < 0)
+                       return ret;
        } else {
                ret = line6_hwdep_init(line6);
                if (ret < 0)
index cd44cb5f1310c639f5772705d4660a0ec52425bc..16e644330c4d63c9a0ff40548bac7e975b82a59f 100644 (file)
@@ -376,11 +376,6 @@ static int pod_init(struct usb_line6 *line6,
        if (err < 0)
                return err;
 
-       /* initialize MIDI subsystem: */
-       err = line6_init_midi(line6);
-       if (err < 0)
-               return err;
-
        /* initialize PCM subsystem: */
        err = line6_init_pcm(line6, &pod_pcm_properties);
        if (err < 0)
index ed158f04de80f9c0a0b4c1bfbde24b1895bce543..c2245aa93b08ff3a50cfe23691c88ce2fe40f83b 100644 (file)
@@ -159,7 +159,6 @@ static int variax_init(struct usb_line6 *line6,
                       const struct usb_device_id *id)
 {
        struct usb_line6_variax *variax = line6_to_variax(line6);
-       int err;
 
        line6->process_message = line6_variax_process_message;
        line6->disconnect = line6_variax_disconnect;
@@ -172,11 +171,6 @@ static int variax_init(struct usb_line6 *line6,
        if (variax->buffer_activate == NULL)
                return -ENOMEM;
 
-       /* initialize MIDI subsystem: */
-       err = line6_init_midi(&variax->line6);
-       if (err < 0)
-               return err;
-
        /* initiate startup procedure: */
        schedule_delayed_work(&line6->startup_work,
                              msecs_to_jiffies(VARIAX_STARTUP_DELAY1));
index a10ac75969a8f05486b491ffe11569e3e9f60864..2c01649c70f619d6e9994e81bdc16c171bb6e471 100644 (file)
@@ -1750,7 +1750,7 @@ static struct usb_midi_in_jack_descriptor *find_usb_in_jack_descriptor(
                struct usb_midi_in_jack_descriptor *injd =
                                (struct usb_midi_in_jack_descriptor *)extra;
 
-               if (injd->bLength > 4 &&
+               if (injd->bLength >= sizeof(*injd) &&
                    injd->bDescriptorType == USB_DT_CS_INTERFACE &&
                    injd->bDescriptorSubtype == UAC_MIDI_IN_JACK &&
                                injd->bJackID == jack_id)
@@ -1773,7 +1773,7 @@ static struct usb_midi_out_jack_descriptor *find_usb_out_jack_descriptor(
                struct usb_midi_out_jack_descriptor *outjd =
                                (struct usb_midi_out_jack_descriptor *)extra;
 
-               if (outjd->bLength > 4 &&
+               if (outjd->bLength >= sizeof(*outjd) &&
                    outjd->bDescriptorType == USB_DT_CS_INTERFACE &&
                    outjd->bDescriptorSubtype == UAC_MIDI_OUT_JACK &&
                                outjd->bJackID == jack_id)
@@ -1820,7 +1820,8 @@ static void snd_usbmidi_init_substream(struct snd_usb_midi *umidi,
                        outjd = find_usb_out_jack_descriptor(hostif, jack_id);
                        if (outjd) {
                                sz = USB_DT_MIDI_OUT_SIZE(outjd->bNrInputPins);
-                               iJack = *(((uint8_t *) outjd) + sz - sizeof(uint8_t));
+                               if (outjd->bLength >= sz)
+                                       iJack = *(((uint8_t *) outjd) + sz - sizeof(uint8_t));
                        }
                } else {
                        /* and out jacks connect to ins */
@@ -1956,8 +1957,12 @@ static int snd_usbmidi_get_ms_info(struct snd_usb_midi *umidi,
                ms_ep = find_usb_ms_endpoint_descriptor(hostep);
                if (!ms_ep)
                        continue;
+               if (ms_ep->bLength <= sizeof(*ms_ep))
+                       continue;
                if (ms_ep->bNumEmbMIDIJack > 0x10)
                        continue;
+               if (ms_ep->bLength < sizeof(*ms_ep) + ms_ep->bNumEmbMIDIJack)
+                       continue;
                if (usb_endpoint_dir_out(ep)) {
                        if (endpoints[epidx].out_ep) {
                                if (++epidx >= MIDI_MAX_ENDPOINTS) {
index fda66b2dbb017b80bee78a793f0150dda7724534..37ad77524c0b3c40fee5d8114463b1d24d26bb84 100644 (file)
@@ -3060,7 +3060,7 @@ int snd_usb_mixer_apply_create_quirk(struct usb_mixer_interface *mixer)
        case USB_ID(0x1235, 0x8203): /* Focusrite Scarlett 6i6 2nd Gen */
        case USB_ID(0x1235, 0x8204): /* Focusrite Scarlett 18i8 2nd Gen */
        case USB_ID(0x1235, 0x8201): /* Focusrite Scarlett 18i20 2nd Gen */
-               err = snd_scarlett_gen2_controls_create(mixer);
+               err = snd_scarlett_gen2_init(mixer);
                break;
 
        case USB_ID(0x041e, 0x323b): /* Creative Sound Blaster E1 */
index 560c2ade829d08b0c501a963e35a1b4e47692fb0..4caf379d5b9912a71ae23296505cbd1b6297fc5a 100644 (file)
@@ -635,7 +635,7 @@ static int scarlett2_usb(
        /* send a second message to get the response */
 
        err = snd_usb_ctl_msg(mixer->chip->dev,
-                       usb_sndctrlpipe(mixer->chip->dev, 0),
+                       usb_rcvctrlpipe(mixer->chip->dev, 0),
                        SCARLETT2_USB_VENDOR_SPECIFIC_CMD_RESP,
                        USB_RECIP_INTERFACE | USB_TYPE_CLASS | USB_DIR_IN,
                        0,
@@ -1997,38 +1997,11 @@ static int scarlett2_mixer_status_create(struct usb_mixer_interface *mixer)
        return usb_submit_urb(mixer->urb, GFP_KERNEL);
 }
 
-/* Entry point */
-int snd_scarlett_gen2_controls_create(struct usb_mixer_interface *mixer)
+static int snd_scarlett_gen2_controls_create(struct usb_mixer_interface *mixer,
+                                            const struct scarlett2_device_info *info)
 {
-       const struct scarlett2_device_info *info;
        int err;
 
-       /* only use UAC_VERSION_2 */
-       if (!mixer->protocol)
-               return 0;
-
-       switch (mixer->chip->usb_id) {
-       case USB_ID(0x1235, 0x8203):
-               info = &s6i6_gen2_info;
-               break;
-       case USB_ID(0x1235, 0x8204):
-               info = &s18i8_gen2_info;
-               break;
-       case USB_ID(0x1235, 0x8201):
-               info = &s18i20_gen2_info;
-               break;
-       default: /* device not (yet) supported */
-               return -EINVAL;
-       }
-
-       if (!(mixer->chip->setup & SCARLETT2_ENABLE)) {
-               usb_audio_err(mixer->chip,
-                       "Focusrite Scarlett Gen 2 Mixer Driver disabled; "
-                       "use options snd_usb_audio device_setup=1 "
-                       "to enable and report any issues to g@b4.vu");
-               return 0;
-       }
-
        /* Initialise private data, routing, sequence number */
        err = scarlett2_init_private(mixer, info);
        if (err < 0)
@@ -2073,3 +2046,51 @@ int snd_scarlett_gen2_controls_create(struct usb_mixer_interface *mixer)
 
        return 0;
 }
+
+int snd_scarlett_gen2_init(struct usb_mixer_interface *mixer)
+{
+       struct snd_usb_audio *chip = mixer->chip;
+       const struct scarlett2_device_info *info;
+       int err;
+
+       /* only use UAC_VERSION_2 */
+       if (!mixer->protocol)
+               return 0;
+
+       switch (chip->usb_id) {
+       case USB_ID(0x1235, 0x8203):
+               info = &s6i6_gen2_info;
+               break;
+       case USB_ID(0x1235, 0x8204):
+               info = &s18i8_gen2_info;
+               break;
+       case USB_ID(0x1235, 0x8201):
+               info = &s18i20_gen2_info;
+               break;
+       default: /* device not (yet) supported */
+               return -EINVAL;
+       }
+
+       if (!(chip->setup & SCARLETT2_ENABLE)) {
+               usb_audio_info(chip,
+                       "Focusrite Scarlett Gen 2 Mixer Driver disabled; "
+                       "use options snd_usb_audio vid=0x%04x pid=0x%04x "
+                       "device_setup=1 to enable and report any issues "
+                       "to g@b4.vu",
+                       USB_ID_VENDOR(chip->usb_id),
+                       USB_ID_PRODUCT(chip->usb_id));
+               return 0;
+       }
+
+       usb_audio_info(chip,
+               "Focusrite Scarlett Gen 2 Mixer Driver enabled pid=0x%04x",
+               USB_ID_PRODUCT(chip->usb_id));
+
+       err = snd_scarlett_gen2_controls_create(mixer, info);
+       if (err < 0)
+               usb_audio_err(mixer->chip,
+                             "Error initialising Scarlett Mixer Driver: %d",
+                             err);
+
+       return err;
+}
index 52e1dad77afd44bb1c0f084b0de61588cf0d7362..668c6b0cb50a63bf10c6ca99ab49f74f5b3d5872 100644 (file)
@@ -2,6 +2,6 @@
 #ifndef __USB_MIXER_SCARLETT_GEN2_H
 #define __USB_MIXER_SCARLETT_GEN2_H
 
-int snd_scarlett_gen2_controls_create(struct usb_mixer_interface *mixer);
+int snd_scarlett_gen2_init(struct usb_mixer_interface *mixer);
 
 #endif /* __USB_MIXER_SCARLETT_GEN2_H */
diff --git a/tools/arch/mips/include/uapi/asm/perf_regs.h b/tools/arch/mips/include/uapi/asm/perf_regs.h
new file mode 100644 (file)
index 0000000..d0f4ecd
--- /dev/null
@@ -0,0 +1,40 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+#ifndef _ASM_MIPS_PERF_REGS_H
+#define _ASM_MIPS_PERF_REGS_H
+
+enum perf_event_mips_regs {
+       PERF_REG_MIPS_PC,
+       PERF_REG_MIPS_R1,
+       PERF_REG_MIPS_R2,
+       PERF_REG_MIPS_R3,
+       PERF_REG_MIPS_R4,
+       PERF_REG_MIPS_R5,
+       PERF_REG_MIPS_R6,
+       PERF_REG_MIPS_R7,
+       PERF_REG_MIPS_R8,
+       PERF_REG_MIPS_R9,
+       PERF_REG_MIPS_R10,
+       PERF_REG_MIPS_R11,
+       PERF_REG_MIPS_R12,
+       PERF_REG_MIPS_R13,
+       PERF_REG_MIPS_R14,
+       PERF_REG_MIPS_R15,
+       PERF_REG_MIPS_R16,
+       PERF_REG_MIPS_R17,
+       PERF_REG_MIPS_R18,
+       PERF_REG_MIPS_R19,
+       PERF_REG_MIPS_R20,
+       PERF_REG_MIPS_R21,
+       PERF_REG_MIPS_R22,
+       PERF_REG_MIPS_R23,
+       PERF_REG_MIPS_R24,
+       PERF_REG_MIPS_R25,
+       PERF_REG_MIPS_R26,
+       PERF_REG_MIPS_R27,
+       PERF_REG_MIPS_R28,
+       PERF_REG_MIPS_R29,
+       PERF_REG_MIPS_R30,
+       PERF_REG_MIPS_R31,
+       PERF_REG_MIPS_MAX = PERF_REG_MIPS_R31 + 1,
+};
+#endif /* _ASM_MIPS_PERF_REGS_H */
index cc79856896a192a0ef6e4b22f9c433f4c2f11103..4ba87de32be0026a8843e72ac36ae4e723011eee 100644 (file)
@@ -2,6 +2,7 @@
 #ifndef _ASM_POWERPC_ERRNO_H
 #define _ASM_POWERPC_ERRNO_H
 
+#undef EDEADLOCK
 #include <asm-generic/errno.h>
 
 #undef EDEADLOCK
index cc96e26d69f7ae9fa04ccdbc35d1f72f93d735a2..ac37830ae9412f2a9efd91248775b9a98f1b9d17 100644 (file)
@@ -84,7 +84,7 @@
 
 /* CPU types for specific tunings: */
 #define X86_FEATURE_K8                 ( 3*32+ 4) /* "" Opteron, Athlon64 */
-#define X86_FEATURE_K7                 ( 3*32+ 5) /* "" Athlon */
+/* FREE, was #define X86_FEATURE_K7                    ( 3*32+ 5) "" Athlon */
 #define X86_FEATURE_P3                 ( 3*32+ 6) /* "" P3 */
 #define X86_FEATURE_P4                 ( 3*32+ 7) /* "" P4 */
 #define X86_FEATURE_CONSTANT_TSC       ( 3*32+ 8) /* TSC ticks at a constant rate */
 #define X86_FEATURE_EPT_AD             ( 8*32+17) /* Intel Extended Page Table access-dirty bit */
 #define X86_FEATURE_VMCALL             ( 8*32+18) /* "" Hypervisor supports the VMCALL instruction */
 #define X86_FEATURE_VMW_VMMCALL                ( 8*32+19) /* "" VMware prefers VMMCALL hypercall instruction */
+#define X86_FEATURE_PVUNLOCK           ( 8*32+20) /* "" PV unlock function */
+#define X86_FEATURE_VCPUPREEMPT                ( 8*32+21) /* "" PV vcpu_is_preempted function */
 
 /* Intel-defined CPU features, CPUID level 0x00000007:0 (EBX), word 9 */
 #define X86_FEATURE_FSGSBASE           ( 9*32+ 0) /* RDFSBASE, WRFSBASE, RDGSBASE, WRGSBASE instructions*/
 #define X86_FEATURE_FENCE_SWAPGS_KERNEL        (11*32+ 5) /* "" LFENCE in kernel entry SWAPGS path */
 #define X86_FEATURE_SPLIT_LOCK_DETECT  (11*32+ 6) /* #AC for split lock */
 #define X86_FEATURE_PER_THREAD_MBA     (11*32+ 7) /* "" Per-thread Memory Bandwidth Allocation */
+#define X86_FEATURE_SGX1               (11*32+ 8) /* "" Basic SGX */
+#define X86_FEATURE_SGX2               (11*32+ 9) /* "" SGX Enclave Dynamic Memory Management (EDMM) */
 
 /* Intel-defined CPU features, CPUID level 0x00000007:1 (EAX), word 12 */
 #define X86_FEATURE_AVX_VNNI           (12*32+ 4) /* AVX VNNI instructions */
 #define X86_FEATURE_AVIC               (15*32+13) /* Virtual Interrupt Controller */
 #define X86_FEATURE_V_VMSAVE_VMLOAD    (15*32+15) /* Virtual VMSAVE VMLOAD */
 #define X86_FEATURE_VGIF               (15*32+16) /* Virtual GIF */
+#define X86_FEATURE_V_SPEC_CTRL                (15*32+20) /* Virtual SPEC_CTRL */
 #define X86_FEATURE_SVME_ADDR_CHK      (15*32+28) /* "" SVME addr check */
 
 /* Intel-defined CPU features, CPUID level 0x00000007:0 (ECX), word 16 */
 #define X86_FEATURE_AVX512_VPOPCNTDQ   (16*32+14) /* POPCNT for vectors of DW/QW */
 #define X86_FEATURE_LA57               (16*32+16) /* 5-level page tables */
 #define X86_FEATURE_RDPID              (16*32+22) /* RDPID instruction */
+#define X86_FEATURE_BUS_LOCK_DETECT    (16*32+24) /* Bus Lock detect */
 #define X86_FEATURE_CLDEMOTE           (16*32+25) /* CLDEMOTE instruction */
 #define X86_FEATURE_MOVDIRI            (16*32+27) /* MOVDIRI instruction */
 #define X86_FEATURE_MOVDIR64B          (16*32+28) /* MOVDIR64B instruction */
 #define X86_FEATURE_MD_CLEAR           (18*32+10) /* VERW clears CPU buffers */
 #define X86_FEATURE_TSX_FORCE_ABORT    (18*32+13) /* "" TSX_FORCE_ABORT */
 #define X86_FEATURE_SERIALIZE          (18*32+14) /* SERIALIZE instruction */
+#define X86_FEATURE_HYBRID_CPU         (18*32+15) /* "" This part has CPUs of more than one type */
 #define X86_FEATURE_TSXLDTRK           (18*32+16) /* TSX Suspend Load Address Tracking */
 #define X86_FEATURE_PCONFIG            (18*32+18) /* Intel PCONFIG */
 #define X86_FEATURE_ARCH_LBR           (18*32+19) /* Intel ARCH LBR */
index b7dd944dc8673f908b3227246622629318ce59e9..8f28fafa98b32e6f9b775f4c8b2d16713f9b7686 100644 (file)
 # define DISABLE_PTI           (1 << (X86_FEATURE_PTI & 31))
 #endif
 
-#ifdef CONFIG_IOMMU_SUPPORT
-# define DISABLE_ENQCMD        0
-#else
-# define DISABLE_ENQCMD (1 << (X86_FEATURE_ENQCMD & 31))
-#endif
+/* Force disable because it's broken beyond repair */
+#define DISABLE_ENQCMD         (1 << (X86_FEATURE_ENQCMD & 31))
 
 #ifdef CONFIG_X86_SGX
 # define DISABLE_SGX   0
index 45029354e0a8b42f94e379ecafdf9ba7d50f24bf..211ba3375ee9602b4671df659ee599b830be13fa 100644 (file)
 #define MSR_PEBS_DATA_CFG              0x000003f2
 #define MSR_IA32_DS_AREA               0x00000600
 #define MSR_IA32_PERF_CAPABILITIES     0x00000345
+#define PERF_CAP_METRICS_IDX           15
+#define PERF_CAP_PT_IDX                        16
+
 #define MSR_PEBS_LD_LAT_THRESHOLD      0x000003f6
 
 #define MSR_IA32_RTIT_CTL              0x00000570
 #define DEBUGCTLMSR_LBR                        (1UL <<  0) /* last branch recording */
 #define DEBUGCTLMSR_BTF_SHIFT          1
 #define DEBUGCTLMSR_BTF                        (1UL <<  1) /* single-step on branches */
+#define DEBUGCTLMSR_BUS_LOCK_DETECT    (1UL <<  2)
 #define DEBUGCTLMSR_TR                 (1UL <<  6)
 #define DEBUGCTLMSR_BTS                        (1UL <<  7)
 #define DEBUGCTLMSR_BTINT              (1UL <<  8)
 /* K8 MSRs */
 #define MSR_K8_TOP_MEM1                        0xc001001a
 #define MSR_K8_TOP_MEM2                        0xc001001d
-#define MSR_K8_SYSCFG                  0xc0010010
-#define MSR_K8_SYSCFG_MEM_ENCRYPT_BIT  23
-#define MSR_K8_SYSCFG_MEM_ENCRYPT      BIT_ULL(MSR_K8_SYSCFG_MEM_ENCRYPT_BIT)
+#define MSR_AMD64_SYSCFG               0xc0010010
+#define MSR_AMD64_SYSCFG_MEM_ENCRYPT_BIT       23
+#define MSR_AMD64_SYSCFG_MEM_ENCRYPT   BIT_ULL(MSR_AMD64_SYSCFG_MEM_ENCRYPT_BIT)
 #define MSR_K8_INT_PENDING_MSG         0xc0010055
 /* C1E active bits in int pending message */
 #define K8_INTP_C1E_ACTIVE_MASK                0x18000000
index 5a3022c8af82b8af1b88e5ed9d8b9af8afc5c9dd..0662f644aad9da71e25b9f7554d20d6981a5e7c7 100644 (file)
@@ -437,6 +437,8 @@ struct kvm_vmx_nested_state_hdr {
                __u16 flags;
        } smm;
 
+       __u16 pad;
+
        __u32 flags;
        __u64 preemption_timer_deadline;
 };
index b8e650a985e358f9ef91c140d910a781f93edd7b..946d761adbd3df33ed49c2589cb4042d3709d617 100644 (file)
@@ -27,6 +27,7 @@
 
 
 #define VMX_EXIT_REASONS_FAILED_VMENTRY         0x80000000
+#define VMX_EXIT_REASONS_SGX_ENCLAVE_MODE      0x08000000
 
 #define EXIT_REASON_EXCEPTION_NMI       0
 #define EXIT_REASON_EXTERNAL_INTERRUPT  1
index 1e299ac73c8698622c7e631a4571b5b88e8b32f7..1cc9da6e29c7926be24bea853952f115c4fa5a3f 100644 (file)
@@ -4,7 +4,7 @@
 #include <linux/linkage.h>
 #include <asm/errno.h>
 #include <asm/cpufeatures.h>
-#include <asm/alternative-asm.h>
+#include <asm/alternative.h>
 #include <asm/export.h>
 
 .pushsection .noinstr.text, "ax"
index 0bfd26e4ca9e938af774c5b1e4fea753d47d156c..9827ae267f96e00660870f775fb8d2d297c20445 100644 (file)
@@ -3,7 +3,7 @@
 
 #include <linux/linkage.h>
 #include <asm/cpufeatures.h>
-#include <asm/alternative-asm.h>
+#include <asm/alternative.h>
 #include <asm/export.h>
 
 /*
index 078cbd2ba651dc2096bd5d4e33916f3641b206a3..de7f30f99af384304b0ef865d1060e9b1672a55b 100644 (file)
@@ -4,4 +4,8 @@
 
 #include "../../../../include/linux/bootconfig.h"
 
+#ifndef fallthrough
+# define fallthrough
+#endif
+
 #endif
index 7362bef1a36834df0a72ed06abbde64f523f505c..6cd6080cac04cf44517476b0378ddb500f39bd92 100644 (file)
@@ -399,6 +399,7 @@ static int apply_xbc(const char *path, const char *xbc_path)
        }
        /* TODO: Ensure the @path is initramfs/initrd image */
        if (fstat(fd, &stat) < 0) {
+               ret = -errno;
                pr_err("Failed to get the size of %s\n", path);
                goto out;
        }
index 790944c356025f31df49787f3e60407a025cabda..baee8591ac76a4f9a869a558aac52fab37733e55 100644 (file)
@@ -30,7 +30,8 @@ CGROUP COMMANDS
 |      *ATTACH_TYPE* := { **ingress** | **egress** | **sock_create** | **sock_ops** | **device** |
 |              **bind4** | **bind6** | **post_bind4** | **post_bind6** | **connect4** | **connect6** |
 |               **getpeername4** | **getpeername6** | **getsockname4** | **getsockname6** | **sendmsg4** |
-|               **sendmsg6** | **recvmsg4** | **recvmsg6** | **sysctl** | **getsockopt** | **setsockopt** }
+|               **sendmsg6** | **recvmsg4** | **recvmsg6** | **sysctl** | **getsockopt** | **setsockopt** |
+|               **sock_release** }
 |      *ATTACH_FLAGS* := { **multi** | **override** }
 
 DESCRIPTION
@@ -106,6 +107,7 @@ DESCRIPTION
                  **getpeername6** call to getpeername(2) for an inet6 socket (since 5.8);
                  **getsockname4** call to getsockname(2) for an inet4 socket (since 5.8);
                  **getsockname6** call to getsockname(2) for an inet6 socket (since 5.8).
+                 **sock_release** closing an userspace inet socket (since 5.9).
 
        **bpftool cgroup detach** *CGROUP* *ATTACH_TYPE* *PROG*
                  Detach *PROG* from the cgroup *CGROUP* and attach type
index 358c7309d4191e01deb727f5e5df36da5a3703c0..fe1b38e7e887d3013b4ed940dd493375029255b9 100644 (file)
@@ -44,7 +44,7 @@ PROG COMMANDS
 |              **cgroup/connect4** | **cgroup/connect6** | **cgroup/getpeername4** | **cgroup/getpeername6** |
 |               **cgroup/getsockname4** | **cgroup/getsockname6** | **cgroup/sendmsg4** | **cgroup/sendmsg6** |
 |              **cgroup/recvmsg4** | **cgroup/recvmsg6** | **cgroup/sysctl** |
-|              **cgroup/getsockopt** | **cgroup/setsockopt** |
+|              **cgroup/getsockopt** | **cgroup/setsockopt** | **cgroup/sock_release** |
 |              **struct_ops** | **fentry** | **fexit** | **freplace** | **sk_lookup**
 |      }
 |       *ATTACH_TYPE* := {
index d67518bcbd4484e165fc4317036180832ee22eaf..cc33c5824a2f277444b7987e0bda90d900074574 100644 (file)
@@ -478,7 +478,7 @@ _bpftool()
                                 cgroup/recvmsg4 cgroup/recvmsg6 \
                                 cgroup/post_bind4 cgroup/post_bind6 \
                                 cgroup/sysctl cgroup/getsockopt \
-                                cgroup/setsockopt struct_ops \
+                                cgroup/setsockopt cgroup/sock_release struct_ops \
                                 fentry fexit freplace sk_lookup" -- \
                                                    "$cur" ) )
                             return 0
@@ -1021,7 +1021,7 @@ _bpftool()
                         device bind4 bind6 post_bind4 post_bind6 connect4 connect6 \
                         getpeername4 getpeername6 getsockname4 getsockname6 \
                         sendmsg4 sendmsg6 recvmsg4 recvmsg6 sysctl getsockopt \
-                        setsockopt'
+                        setsockopt sock_release'
                     local ATTACH_FLAGS='multi override'
                     local PROG_TYPE='id pinned tag name'
                     case $prev in
@@ -1032,7 +1032,7 @@ _bpftool()
                         ingress|egress|sock_create|sock_ops|device|bind4|bind6|\
                         post_bind4|post_bind6|connect4|connect6|getpeername4|\
                         getpeername6|getsockname4|getsockname6|sendmsg4|sendmsg6|\
-                        recvmsg4|recvmsg6|sysctl|getsockopt|setsockopt)
+                        recvmsg4|recvmsg6|sysctl|getsockopt|setsockopt|sock_release)
                             COMPREPLY=( $( compgen -W "$PROG_TYPE" -- \
                                 "$cur" ) )
                             return 0
index d901cc1b904afebbab9cf5e1b638597ef9f1bf25..6e53b1d393f4a497a700ee9d4da97361b109a3dd 100644 (file)
@@ -28,7 +28,8 @@
        "                        connect6 | getpeername4 | getpeername6 |\n"   \
        "                        getsockname4 | getsockname6 | sendmsg4 |\n"   \
        "                        sendmsg6 | recvmsg4 | recvmsg6 |\n"           \
-       "                        sysctl | getsockopt | setsockopt }"
+       "                        sysctl | getsockopt | setsockopt |\n"         \
+       "                        sock_release }"
 
 static unsigned int query_flags;
 
index 3f067d2d7584508821eb068ea553e7afd561f6cd..da4846c9856af9e1f314bc8f300863ebb7469b50 100644 (file)
@@ -2138,7 +2138,7 @@ static int do_help(int argc, char **argv)
                "                 cgroup/getpeername4 | cgroup/getpeername6 |\n"
                "                 cgroup/getsockname4 | cgroup/getsockname6 | cgroup/sendmsg4 |\n"
                "                 cgroup/sendmsg6 | cgroup/recvmsg4 | cgroup/recvmsg6 |\n"
-               "                 cgroup/getsockopt | cgroup/setsockopt |\n"
+               "                 cgroup/getsockopt | cgroup/setsockopt | cgroup/sock_release |\n"
                "                 struct_ops | fentry | fexit | freplace | sk_lookup }\n"
                "       ATTACH_TYPE := { msg_verdict | stream_verdict | stream_parser |\n"
                "                        flow_dissector }\n"
index cd72016c3cfa71159511549a5dc74e7cc36e82b0..715092fc6a239e3f848198ca7bf6047e2363fb37 100644 (file)
@@ -51,39 +51,39 @@ subdir-obj-y :=
 build-file := $(dir)/Build
 -include $(build-file)
 
-quiet_cmd_flex  = FLEX     $@
-quiet_cmd_bison = BISON    $@
+quiet_cmd_flex  = FLEX    $@
+quiet_cmd_bison = BISON   $@
 
 # Create directory unless it exists
-quiet_cmd_mkdir = MKDIR    $(dir $@)
+quiet_cmd_mkdir = MKDIR   $(dir $@)
       cmd_mkdir = mkdir -p $(dir $@)
      rule_mkdir = $(if $(wildcard $(dir $@)),,@$(call echo-cmd,mkdir) $(cmd_mkdir))
 
 # Compile command
-quiet_cmd_cc_o_c = CC       $@
+quiet_cmd_cc_o_c = CC      $@
       cmd_cc_o_c = $(CC) $(c_flags) -c -o $@ $<
 
-quiet_cmd_host_cc_o_c = HOSTCC   $@
+quiet_cmd_host_cc_o_c = HOSTCC  $@
       cmd_host_cc_o_c = $(HOSTCC) $(host_c_flags) -c -o $@ $<
 
-quiet_cmd_cxx_o_c = CXX      $@
+quiet_cmd_cxx_o_c = CXX     $@
       cmd_cxx_o_c = $(CXX) $(cxx_flags) -c -o $@ $<
 
-quiet_cmd_cpp_i_c = CPP      $@
+quiet_cmd_cpp_i_c = CPP     $@
       cmd_cpp_i_c = $(CC) $(c_flags) -E -o $@ $<
 
-quiet_cmd_cc_s_c = AS       $@
+quiet_cmd_cc_s_c = AS      $@
       cmd_cc_s_c = $(CC) $(c_flags) -S -o $@ $<
 
-quiet_cmd_gen = GEN      $@
+quiet_cmd_gen = GEN     $@
 
 # Link agregate command
 # If there's nothing to link, create empty $@ object.
-quiet_cmd_ld_multi = LD       $@
+quiet_cmd_ld_multi = LD      $@
       cmd_ld_multi = $(if $(strip $(obj-y)),\
                      $(LD) -r -o $@  $(filter $(obj-y),$^),rm -f $@; $(AR) rcs $@)
 
-quiet_cmd_host_ld_multi = HOSTLD   $@
+quiet_cmd_host_ld_multi = HOSTLD  $@
       cmd_host_ld_multi = $(if $(strip $(obj-y)),\
                           $(HOSTLD) -r -o $@  $(filter $(obj-y),$^),rm -f $@; $(HOSTAR) rcs $@)
 
index 7f475d59a0974f17d7a9765c643b798a1dc6ee83..87d112650dfbb0ec9ac6825ae2746c61b2659960 100644 (file)
@@ -22,7 +22,7 @@
 #include <linux/build_bug.h>
 #define GENMASK_INPUT_CHECK(h, l) \
        (BUILD_BUG_ON_ZERO(__builtin_choose_expr( \
-               __builtin_constant_p((l) > (h)), (l) > (h), 0)))
+               __is_constexpr((l) > (h)), (l) > (h), 0)))
 #else
 /*
  * BUILD_BUG_ON_ZERO is not available in h files included from asm files,
index 81b8aae5a8559c9fabf0665c48d8e8659db4f239..435ddd72d2c464fdedd33889190009dde0a3bda9 100644 (file)
@@ -3,4 +3,12 @@
 
 #include <vdso/const.h>
 
+/*
+ * This returns a constant expression while determining if an argument is
+ * a constant expression, most importantly without evaluating the argument.
+ * Glory to Martin Uecker <Martin.Uecker@med.uni-goettingen.de>
+ */
+#define __is_constexpr(x) \
+       (sizeof(int) == sizeof(*(8 ? ((void *)((long)(x) * 0l)) : (int *)8)))
+
 #endif /* _LINUX_CONST_H */
index ce58cff99b66530a02e2bc85c7264b236f5b343d..6de5a7fc066b8fd4ca47dd276e0b899fad1658cf 100644 (file)
@@ -863,9 +863,18 @@ __SYSCALL(__NR_process_madvise, sys_process_madvise)
 __SC_COMP(__NR_epoll_pwait2, sys_epoll_pwait2, compat_sys_epoll_pwait2)
 #define __NR_mount_setattr 442
 __SYSCALL(__NR_mount_setattr, sys_mount_setattr)
+#define __NR_quotactl_path 443
+__SYSCALL(__NR_quotactl_path, sys_quotactl_path)
+
+#define __NR_landlock_create_ruleset 444
+__SYSCALL(__NR_landlock_create_ruleset, sys_landlock_create_ruleset)
+#define __NR_landlock_add_rule 445
+__SYSCALL(__NR_landlock_add_rule, sys_landlock_add_rule)
+#define __NR_landlock_restrict_self 446
+__SYSCALL(__NR_landlock_restrict_self, sys_landlock_restrict_self)
 
 #undef __NR_syscalls
-#define __NR_syscalls 443
+#define __NR_syscalls 447
 
 /*
  * 32 bit systems traditionally used different
index 0827037c54847898c6771146c5ac285d57849ede..67b94bc3c88522fc29d2a3adb25cba34521d64cf 100644 (file)
@@ -625,30 +625,147 @@ struct drm_gem_open {
        __u64 size;
 };
 
+/**
+ * DRM_CAP_DUMB_BUFFER
+ *
+ * If set to 1, the driver supports creating dumb buffers via the
+ * &DRM_IOCTL_MODE_CREATE_DUMB ioctl.
+ */
 #define DRM_CAP_DUMB_BUFFER            0x1
+/**
+ * DRM_CAP_VBLANK_HIGH_CRTC
+ *
+ * If set to 1, the kernel supports specifying a CRTC index in the high bits of
+ * &drm_wait_vblank_request.type.
+ *
+ * Starting kernel version 2.6.39, this capability is always set to 1.
+ */
 #define DRM_CAP_VBLANK_HIGH_CRTC       0x2
+/**
+ * DRM_CAP_DUMB_PREFERRED_DEPTH
+ *
+ * The preferred bit depth for dumb buffers.
+ *
+ * The bit depth is the number of bits used to indicate the color of a single
+ * pixel excluding any padding. This is different from the number of bits per
+ * pixel. For instance, XRGB8888 has a bit depth of 24 but has 32 bits per
+ * pixel.
+ *
+ * Note that this preference only applies to dumb buffers, it's irrelevant for
+ * other types of buffers.
+ */
 #define DRM_CAP_DUMB_PREFERRED_DEPTH   0x3
+/**
+ * DRM_CAP_DUMB_PREFER_SHADOW
+ *
+ * If set to 1, the driver prefers userspace to render to a shadow buffer
+ * instead of directly rendering to a dumb buffer. For best speed, userspace
+ * should do streaming ordered memory copies into the dumb buffer and never
+ * read from it.
+ *
+ * Note that this preference only applies to dumb buffers, it's irrelevant for
+ * other types of buffers.
+ */
 #define DRM_CAP_DUMB_PREFER_SHADOW     0x4
+/**
+ * DRM_CAP_PRIME
+ *
+ * Bitfield of supported PRIME sharing capabilities. See &DRM_PRIME_CAP_IMPORT
+ * and &DRM_PRIME_CAP_EXPORT.
+ *
+ * PRIME buffers are exposed as dma-buf file descriptors. See
+ * Documentation/gpu/drm-mm.rst, section "PRIME Buffer Sharing".
+ */
 #define DRM_CAP_PRIME                  0x5
+/**
+ * DRM_PRIME_CAP_IMPORT
+ *
+ * If this bit is set in &DRM_CAP_PRIME, the driver supports importing PRIME
+ * buffers via the &DRM_IOCTL_PRIME_FD_TO_HANDLE ioctl.
+ */
 #define  DRM_PRIME_CAP_IMPORT          0x1
+/**
+ * DRM_PRIME_CAP_EXPORT
+ *
+ * If this bit is set in &DRM_CAP_PRIME, the driver supports exporting PRIME
+ * buffers via the &DRM_IOCTL_PRIME_HANDLE_TO_FD ioctl.
+ */
 #define  DRM_PRIME_CAP_EXPORT          0x2
+/**
+ * DRM_CAP_TIMESTAMP_MONOTONIC
+ *
+ * If set to 0, the kernel will report timestamps with ``CLOCK_REALTIME`` in
+ * struct drm_event_vblank. If set to 1, the kernel will report timestamps with
+ * ``CLOCK_MONOTONIC``. See ``clock_gettime(2)`` for the definition of these
+ * clocks.
+ *
+ * Starting from kernel version 2.6.39, the default value for this capability
+ * is 1. Starting kernel version 4.15, this capability is always set to 1.
+ */
 #define DRM_CAP_TIMESTAMP_MONOTONIC    0x6
+/**
+ * DRM_CAP_ASYNC_PAGE_FLIP
+ *
+ * If set to 1, the driver supports &DRM_MODE_PAGE_FLIP_ASYNC.
+ */
 #define DRM_CAP_ASYNC_PAGE_FLIP                0x7
-/*
- * The CURSOR_WIDTH and CURSOR_HEIGHT capabilities return a valid widthxheight
- * combination for the hardware cursor. The intention is that a hardware
- * agnostic userspace can query a cursor plane size to use.
+/**
+ * DRM_CAP_CURSOR_WIDTH
+ *
+ * The ``CURSOR_WIDTH`` and ``CURSOR_HEIGHT`` capabilities return a valid
+ * width x height combination for the hardware cursor. The intention is that a
+ * hardware agnostic userspace can query a cursor plane size to use.
  *
  * Note that the cross-driver contract is to merely return a valid size;
  * drivers are free to attach another meaning on top, eg. i915 returns the
  * maximum plane size.
  */
 #define DRM_CAP_CURSOR_WIDTH           0x8
+/**
+ * DRM_CAP_CURSOR_HEIGHT
+ *
+ * See &DRM_CAP_CURSOR_WIDTH.
+ */
 #define DRM_CAP_CURSOR_HEIGHT          0x9
+/**
+ * DRM_CAP_ADDFB2_MODIFIERS
+ *
+ * If set to 1, the driver supports supplying modifiers in the
+ * &DRM_IOCTL_MODE_ADDFB2 ioctl.
+ */
 #define DRM_CAP_ADDFB2_MODIFIERS       0x10
+/**
+ * DRM_CAP_PAGE_FLIP_TARGET
+ *
+ * If set to 1, the driver supports the &DRM_MODE_PAGE_FLIP_TARGET_ABSOLUTE and
+ * &DRM_MODE_PAGE_FLIP_TARGET_RELATIVE flags in
+ * &drm_mode_crtc_page_flip_target.flags for the &DRM_IOCTL_MODE_PAGE_FLIP
+ * ioctl.
+ */
 #define DRM_CAP_PAGE_FLIP_TARGET       0x11
+/**
+ * DRM_CAP_CRTC_IN_VBLANK_EVENT
+ *
+ * If set to 1, the kernel supports reporting the CRTC ID in
+ * &drm_event_vblank.crtc_id for the &DRM_EVENT_VBLANK and
+ * &DRM_EVENT_FLIP_COMPLETE events.
+ *
+ * Starting kernel version 4.12, this capability is always set to 1.
+ */
 #define DRM_CAP_CRTC_IN_VBLANK_EVENT   0x12
+/**
+ * DRM_CAP_SYNCOBJ
+ *
+ * If set to 1, the driver supports sync objects. See
+ * Documentation/gpu/drm-mm.rst, section "DRM Sync Objects".
+ */
 #define DRM_CAP_SYNCOBJ                0x13
+/**
+ * DRM_CAP_SYNCOBJ_TIMELINE
+ *
+ * If set to 1, the driver supports timeline operations on sync objects. See
+ * Documentation/gpu/drm-mm.rst, section "DRM Sync Objects".
+ */
 #define DRM_CAP_SYNCOBJ_TIMELINE       0x14
 
 /* DRM_IOCTL_GET_CAP ioctl argument type */
index 1987e2ea79a3b86263529b5d3207ac89c767e6fd..ddc47bbf48b6d7af2d191ed40a5e508f8790e5bb 100644 (file)
@@ -943,6 +943,7 @@ struct drm_i915_gem_exec_object {
        __u64 offset;
 };
 
+/* DRM_IOCTL_I915_GEM_EXECBUFFER was removed in Linux 5.13 */
 struct drm_i915_gem_execbuffer {
        /**
         * List of buffers to be validated with their relocations to be
index f44eb0a04afdd8cea369af1395c3637a5f69122d..4c32e97dcdf00a22ebbcfc3c03f849ea90b2d9f5 100644 (file)
@@ -185,7 +185,7 @@ struct fsxattr {
 #define BLKROTATIONAL _IO(0x12,126)
 #define BLKZEROOUT _IO(0x12,127)
 /*
- * A jump here: 130-131 are reserved for zoned block devices
+ * A jump here: 130-136 are reserved for zoned block devices
  * (see uapi/linux/blkzoned.h)
  */
 
index f6afee209620d81c8b0314c8f03c1f05d1949fbb..79d9c44d1ad734b14f815b1b5b246abddb66ced4 100644 (file)
@@ -8,6 +8,7 @@
  * Note: you must update KVM_API_VERSION if you change this interface.
  */
 
+#include <linux/const.h>
 #include <linux/types.h>
 #include <linux/compiler.h>
 #include <linux/ioctl.h>
@@ -1078,6 +1079,10 @@ struct kvm_ppc_resize_hpt {
 #define KVM_CAP_DIRTY_LOG_RING 192
 #define KVM_CAP_X86_BUS_LOCK_EXIT 193
 #define KVM_CAP_PPC_DAWR1 194
+#define KVM_CAP_SET_GUEST_DEBUG2 195
+#define KVM_CAP_SGX_ATTRIBUTE 196
+#define KVM_CAP_VM_COPY_ENC_CONTEXT_FROM 197
+#define KVM_CAP_PTP_KVM 198
 
 #ifdef KVM_CAP_IRQ_ROUTING
 
@@ -1671,6 +1676,8 @@ enum sev_cmd_id {
        KVM_SEV_CERT_EXPORT,
        /* Attestation report */
        KVM_SEV_GET_ATTESTATION_REPORT,
+       /* Guest Migration Extension */
+       KVM_SEV_SEND_CANCEL,
 
        KVM_SEV_NR_MAX,
 };
@@ -1729,6 +1736,45 @@ struct kvm_sev_attestation_report {
        __u32 len;
 };
 
+struct kvm_sev_send_start {
+       __u32 policy;
+       __u64 pdh_cert_uaddr;
+       __u32 pdh_cert_len;
+       __u64 plat_certs_uaddr;
+       __u32 plat_certs_len;
+       __u64 amd_certs_uaddr;
+       __u32 amd_certs_len;
+       __u64 session_uaddr;
+       __u32 session_len;
+};
+
+struct kvm_sev_send_update_data {
+       __u64 hdr_uaddr;
+       __u32 hdr_len;
+       __u64 guest_uaddr;
+       __u32 guest_len;
+       __u64 trans_uaddr;
+       __u32 trans_len;
+};
+
+struct kvm_sev_receive_start {
+       __u32 handle;
+       __u32 policy;
+       __u64 pdh_uaddr;
+       __u32 pdh_len;
+       __u64 session_uaddr;
+       __u32 session_len;
+};
+
+struct kvm_sev_receive_update_data {
+       __u64 hdr_uaddr;
+       __u32 hdr_len;
+       __u64 guest_uaddr;
+       __u32 guest_len;
+       __u64 trans_uaddr;
+       __u32 trans_len;
+};
+
 #define KVM_DEV_ASSIGN_ENABLE_IOMMU    (1 << 0)
 #define KVM_DEV_ASSIGN_PCI_2_3         (1 << 1)
 #define KVM_DEV_ASSIGN_MASK_INTX       (1 << 2)
@@ -1834,8 +1880,8 @@ struct kvm_hyperv_eventfd {
  * conversion after harvesting an entry.  Also, it must not skip any
  * dirty bits, so that dirty bits are always harvested in sequence.
  */
-#define KVM_DIRTY_GFN_F_DIRTY           BIT(0)
-#define KVM_DIRTY_GFN_F_RESET           BIT(1)
+#define KVM_DIRTY_GFN_F_DIRTY           _BITUL(0)
+#define KVM_DIRTY_GFN_F_RESET           _BITUL(1)
 #define KVM_DIRTY_GFN_F_MASK            0x3
 
 /*
index 14332f4cf8167c91dd71224cf3d59fbe021abdf1..f92880a15645a4177406eca6e153665ea1ce0ea0 100644 (file)
@@ -127,6 +127,7 @@ enum perf_sw_ids {
        PERF_COUNT_SW_EMULATION_FAULTS          = 8,
        PERF_COUNT_SW_DUMMY                     = 9,
        PERF_COUNT_SW_BPF_OUTPUT                = 10,
+       PERF_COUNT_SW_CGROUP_SWITCHES           = 11,
 
        PERF_COUNT_SW_MAX,                      /* non-ABI */
 };
@@ -326,6 +327,7 @@ enum perf_event_read_format {
 #define PERF_ATTR_SIZE_VER4    104     /* add: sample_regs_intr */
 #define PERF_ATTR_SIZE_VER5    112     /* add: aux_watermark */
 #define PERF_ATTR_SIZE_VER6    120     /* add: aux_sample_size */
+#define PERF_ATTR_SIZE_VER7    128     /* add: sig_data */
 
 /*
  * Hardware event_id to monitor via a performance monitoring event:
@@ -404,7 +406,10 @@ struct perf_event_attr {
                                cgroup         :  1, /* include cgroup events */
                                text_poke      :  1, /* include text poke events */
                                build_id       :  1, /* use build id in mmap2 events */
-                               __reserved_1   : 29;
+                               inherit_thread :  1, /* children only inherit if cloned with CLONE_THREAD */
+                               remove_on_exec :  1, /* event is removed from task on exec */
+                               sigtrap        :  1, /* send synchronous SIGTRAP on event */
+                               __reserved_1   : 26;
 
        union {
                __u32           wakeup_events;    /* wakeup every n events */
@@ -456,6 +461,12 @@ struct perf_event_attr {
        __u16   __reserved_2;
        __u32   aux_sample_size;
        __u32   __reserved_3;
+
+       /*
+        * User provided data if sigtrap=1, passed back to user via
+        * siginfo_t::si_perf_data, e.g. to permit user to identify the event.
+        */
+       __u64   sig_data;
 };
 
 /*
@@ -1171,10 +1182,15 @@ enum perf_callchain_context {
 /**
  * PERF_RECORD_AUX::flags bits
  */
-#define PERF_AUX_FLAG_TRUNCATED                0x01    /* record was truncated to fit */
-#define PERF_AUX_FLAG_OVERWRITE                0x02    /* snapshot from overwrite mode */
-#define PERF_AUX_FLAG_PARTIAL          0x04    /* record contains gaps */
-#define PERF_AUX_FLAG_COLLISION                0x08    /* sample collided with another */
+#define PERF_AUX_FLAG_TRUNCATED                        0x01    /* record was truncated to fit */
+#define PERF_AUX_FLAG_OVERWRITE                        0x02    /* snapshot from overwrite mode */
+#define PERF_AUX_FLAG_PARTIAL                  0x04    /* record contains gaps */
+#define PERF_AUX_FLAG_COLLISION                        0x08    /* sample collided with another */
+#define PERF_AUX_FLAG_PMU_FORMAT_TYPE_MASK     0xff00  /* PMU specific trace format type */
+
+/* CoreSight PMU AUX buffer formats */
+#define PERF_AUX_FLAG_CORESIGHT_FORMAT_CORESIGHT       0x0000 /* Default for backward compatibility */
+#define PERF_AUX_FLAG_CORESIGHT_FORMAT_RAW             0x0100 /* Raw format of the source */
 
 #define PERF_FLAG_FD_NO_GROUP          (1UL << 0)
 #define PERF_FLAG_FD_OUTPUT            (1UL << 1)
index 667f1aed091c23c45494264a9c6940ed5ff623ef..18a9f59dc067f333d3a1c86343439917606cb36b 100644 (file)
@@ -255,4 +255,8 @@ struct prctl_mm_map {
 # define SYSCALL_DISPATCH_FILTER_ALLOW 0
 # define SYSCALL_DISPATCH_FILTER_BLOCK 1
 
+/* Set/get enabled arm64 pointer authentication keys */
+#define PR_PAC_SET_ENABLED_KEYS                60
+#define PR_PAC_GET_ENABLED_KEYS                61
+
 #endif /* _LINUX_PRCTL_H */
index feaf46451e838dab520da542cb823eddd107578b..3a9f2037bd23f08360a6475c39b5daa1bf6a7379 100644 (file)
@@ -111,7 +111,7 @@ OPTIONS
 --tracepoints::
         retrieve statistics from tracepoints
 
-*z*::
+-z::
 --skip-zero-records::
         omit records with all zeros in logging mode
 
index e2a3cf4378140f2ccabe45df70d40accc63d7bb3..c41d9b2b59aceff4ca23767527d187eafbc3ce92 100644 (file)
@@ -3216,6 +3216,9 @@ static int add_dummy_ksym_var(struct btf *btf)
        const struct btf_var_secinfo *vs;
        const struct btf_type *sec;
 
+       if (!btf)
+               return 0;
+
        sec_btf_id = btf__find_by_name_kind(btf, KSYMS_SEC,
                                            BTF_KIND_DATASEC);
        if (sec_btf_id < 0)
index ee426226928f1283ddb94ce867f7922bc1e71593..acbcf6c7bdf82cf219d8e985d8f9a6e5b69e5489 100644 (file)
 #define ELF_C_READ_MMAP ELF_C_READ
 #endif
 
+/* Older libelf all end up in this expression, for both 32 and 64 bit */
+#ifndef GELF_ST_VISIBILITY
+#define GELF_ST_VISIBILITY(o) ((o) & 0x03)
+#endif
+
 #define BTF_INFO_ENC(kind, kind_flag, vlen) \
        ((!!(kind_flag) << 31) | ((kind) << 24) | ((vlen) & BTF_MAX_VLEN))
 #define BTF_TYPE_ENC(name, info, size_or_type) (name), (info), (size_or_type)
index cedf3ede75455ffe57648d1168b480db9ffc1aba..523aa4157f801c82686f19a2cc9403a6ca6113bb 100644 (file)
@@ -19,6 +19,7 @@
 #include <objtool/elf.h>
 #include <objtool/arch.h>
 #include <objtool/warn.h>
+#include <objtool/endianness.h>
 #include <arch/elf.h>
 
 static int is_x86_64(const struct elf *elf)
@@ -725,7 +726,7 @@ static int elf_add_alternative(struct elf *elf,
                return -1;
        }
 
-       alt->cpuid = cpuid;
+       alt->cpuid = bswap_if_needed(cpuid);
        alt->instrlen = orig_len;
        alt->replacementlen = repl_len;
 
@@ -746,6 +747,10 @@ int arch_rewrite_retpolines(struct objtool_file *file)
 
        list_for_each_entry(insn, &file->retpoline_call_list, call_node) {
 
+               if (insn->type != INSN_JUMP_DYNAMIC &&
+                   insn->type != INSN_CALL_DYNAMIC)
+                       continue;
+
                if (!strcmp(insn->sec->name, ".text.__x86.indirect_thunk"))
                        continue;
 
index d08f5f3670f88c28c2580c3be870834a180f0ab5..41bca1d13d8e40bea0a4bf15402c1389585e4d29 100644 (file)
@@ -717,7 +717,7 @@ static int elf_add_string(struct elf *elf, struct section *strtab, char *str)
 
 struct symbol *elf_create_undef_symbol(struct elf *elf, const char *name)
 {
-       struct section *symtab;
+       struct section *symtab, *symtab_shndx;
        struct symbol *sym;
        Elf_Data *data;
        Elf_Scn *s;
@@ -762,12 +762,36 @@ struct symbol *elf_create_undef_symbol(struct elf *elf, const char *name)
        data->d_buf = &sym->sym;
        data->d_size = sizeof(sym->sym);
        data->d_align = 1;
+       data->d_type = ELF_T_SYM;
 
        sym->idx = symtab->len / sizeof(sym->sym);
 
        symtab->len += data->d_size;
        symtab->changed = true;
 
+       symtab_shndx = find_section_by_name(elf, ".symtab_shndx");
+       if (symtab_shndx) {
+               s = elf_getscn(elf->elf, symtab_shndx->idx);
+               if (!s) {
+                       WARN_ELF("elf_getscn");
+                       return NULL;
+               }
+
+               data = elf_newdata(s);
+               if (!data) {
+                       WARN_ELF("elf_newdata");
+                       return NULL;
+               }
+
+               data->d_buf = &sym->sym.st_size; /* conveniently 0 */
+               data->d_size = sizeof(Elf32_Word);
+               data->d_align = 4;
+               data->d_type = ELF_T_WORD;
+
+               symtab_shndx->len += 4;
+               symtab_shndx->changed = true;
+       }
+
        sym->sec = find_section_by_index(elf, 0);
 
        elf_add_symbol(elf, sym);
index 1dcec73c910c192434b2a48ce519a3e9abcbff34..bcf3eca5afbe989b7df7fc75040df967162a06f7 100644 (file)
@@ -108,9 +108,9 @@ displayed as follows:
 
        perf script --itrace=ibxwpe -F+flags
 
-The flags are "bcrosyiABEx" which stand for branch, call, return, conditional,
-system, asynchronous, interrupt, transaction abort, trace begin, trace end, and
-in transaction, respectively.
+The flags are "bcrosyiABExgh" which stand for branch, call, return, conditional,
+system, asynchronous, interrupt, transaction abort, trace begin, trace end,
+in transaction, VM-entry, and VM-exit respectively.
 
 perf script also supports higher level ways to dump instruction traces:
 
index 5b8b610750393be05868e580f281f6a88b7bb9b1..48a5f5b26dd4450a1140336059c0b5d05f3afc3f 100644 (file)
@@ -183,14 +183,15 @@ OPTIONS
        At this point usage is displayed, and perf-script exits.
 
        The flags field is synthesized and may have a value when Instruction
-       Trace decoding. The flags are "bcrosyiABEx" which stand for branch,
+       Trace decoding. The flags are "bcrosyiABExgh" which stand for branch,
        call, return, conditional, system, asynchronous, interrupt,
-       transaction abort, trace begin, trace end, and in transaction,
+       transaction abort, trace begin, trace end, in transaction, VM-Entry, and VM-Exit
        respectively. Known combinations of flags are printed more nicely e.g.
        "call" for "bc", "return" for "br", "jcc" for "bo", "jmp" for "b",
        "int" for "bci", "iret" for "bri", "syscall" for "bcs", "sysret" for "brs",
        "async" for "by", "hw int" for "bcyi", "tx abrt" for "bA", "tr strt" for "bB",
-       "tr end" for "bE". However the "x" flag will be display separately in those
+       "tr end" for "bE", "vmentry" for "bcg", "vmexit" for "bch".
+       However the "x" flag will be displayed separately in those
        cases e.g. "jcc     (x)" for a condition branch within a transaction.
 
        The callindent field is synthesized and may have a value when
index 0d6619064a83835bee32b577e711e37a58c2a148..73df23dd664c14f0fa1cb7de78331e7347a5cc6c 100644 (file)
@@ -90,7 +90,6 @@ endif
 ifeq ($(ARCH),mips)
   NO_PERF_REGS := 0
   CFLAGS += -I$(OUTPUT)arch/mips/include/generated
-  CFLAGS += -I../../arch/mips/include/uapi -I../../arch/mips/include/generated/uapi
   LIBUNWIND_LIBS = -lunwind -lunwind-mips
 endif
 
@@ -540,6 +539,7 @@ ifndef NO_LIBELF
       ifdef LIBBPF_DYNAMIC
         ifeq ($(feature-libbpf), 1)
           EXTLIBS += -lbpf
+          $(call detected,CONFIG_LIBBPF_DYNAMIC)
         else
           dummy := $(error Error: No libbpf devel library found, please install libbpf-devel);
         endif
index 2303256b7d05e5f466425893d9a1c258c03963af..73d18e0ed6f6aaccdf3127df2710563b428ec2b5 100644 (file)
@@ -71,7 +71,7 @@ struct kvm_reg_events_ops kvm_reg_events_ops[] = {
                .name   = "vmexit",
                .ops    = &exit_events,
        },
-       { NULL },
+       { NULL, NULL },
 };
 
 const char * const kvm_skip_events[] = {
index 91649690b52f1b25ac229ad4e224366861ab66b9..9cd1c34f31b5040434060a7b242be5946bd5c988 100644 (file)
 439    n64     faccessat2                      sys_faccessat2
 440    n64     process_madvise                 sys_process_madvise
 441    n64     epoll_pwait2                    sys_epoll_pwait2
+442    n64     mount_setattr                   sys_mount_setattr
+# 443 reserved for quotactl_path
+444    n64     landlock_create_ruleset         sys_landlock_create_ruleset
+445    n64     landlock_add_rule               sys_landlock_add_rule
+446    n64     landlock_restrict_self          sys_landlock_restrict_self
index 0b2480cf3e4793bdedf18942696d8b2f118aa564..8f052ff4058ce5534327ea64550af52bff5f5de2 100644 (file)
 440    common  process_madvise                 sys_process_madvise
 441    common  epoll_pwait2                    sys_epoll_pwait2                compat_sys_epoll_pwait2
 442    common  mount_setattr                   sys_mount_setattr
+# 443 reserved for quotactl_path
+444    common  landlock_create_ruleset         sys_landlock_create_ruleset
+445    common  landlock_add_rule               sys_landlock_add_rule
+446    common  landlock_restrict_self          sys_landlock_restrict_self
index 3abef2144dac79b69b25852ce28ffb2e5b284afe..0690263df1dd0b58268941074441cb4e820744bc 100644 (file)
 440  common    process_madvise         sys_process_madvise             sys_process_madvise
 441  common    epoll_pwait2            sys_epoll_pwait2                compat_sys_epoll_pwait2
 442  common    mount_setattr           sys_mount_setattr               sys_mount_setattr
+# 443 reserved for quotactl_path
+444  common    landlock_create_ruleset sys_landlock_create_ruleset     sys_landlock_create_ruleset
+445  common    landlock_add_rule       sys_landlock_add_rule           sys_landlock_add_rule
+446  common    landlock_restrict_self  sys_landlock_restrict_self      sys_landlock_restrict_self
index 7bf01cbe582f03bc26adb4092bb9023384ec217f..ce18119ea0d0f0c6deaf54959a9a7de1ed2e89ef 100644 (file)
 440    common  process_madvise         sys_process_madvise
 441    common  epoll_pwait2            sys_epoll_pwait2
 442    common  mount_setattr           sys_mount_setattr
+# 443 reserved for quotactl_path
+444    common  landlock_create_ruleset sys_landlock_create_ruleset
+445    common  landlock_add_rule       sys_landlock_add_rule
+446    common  landlock_restrict_self  sys_landlock_restrict_self
 
 #
 # Due to a historical design error, certain syscalls are numbered differently
index 87f5b1a4a7fa84d4704da213aed22ddd3824434e..833405c27dae2abd66324422a54cb8f6fa0a88f3 100644 (file)
@@ -80,6 +80,9 @@ static int perf_session__list_build_ids(bool force, bool with_hits)
        if (!perf_header__has_feat(&session->header, HEADER_BUILD_ID))
                with_hits = true;
 
+       if (zstd_init(&(session->zstd_data), 0) < 0)
+               pr_warning("Decompression initialization failed. Reported data may be incomplete.\n");
+
        /*
         * in pipe-mode, the only way to get the buildids is to parse
         * the record stream. Buildids are stored as RECORD_HEADER_BUILD_ID
index 3337b5f93336d945cee5e2febfae2b0116e24d1b..84803abeb94251b737200901ba64f91ed67edfd6 100644 (file)
@@ -2714,6 +2714,12 @@ int cmd_record(int argc, const char **argv)
                rec->no_buildid = true;
        }
 
+       if (rec->opts.record_cgroup && !perf_can_record_cgroup()) {
+               pr_err("Kernel has no cgroup sampling support.\n");
+               err = -EINVAL;
+               goto out_opts;
+       }
+
        if (rec->opts.kcore)
                rec->data.is_dir = true;
 
index 5a830ae09418edb76e56d206127a3171b38e25b9..f9f74a5143159ffbcc0d322604fc8ccfb35a1b97 100644 (file)
@@ -572,7 +572,8 @@ static int enable_counters(void)
         * - we have initial delay configured
         */
        if (!target__none(&target) || stat_config.initial_delay) {
-               evlist__enable(evsel_list);
+               if (!all_counters_use_bpf)
+                       evlist__enable(evsel_list);
                if (stat_config.initial_delay > 0)
                        pr_info(EVLIST_ENABLED_MSG);
        }
@@ -581,13 +582,19 @@ static int enable_counters(void)
 
 static void disable_counters(void)
 {
+       struct evsel *counter;
+
        /*
         * If we don't have tracee (attaching to task or cpu), counters may
         * still be running. To get accurate group ratios, we must stop groups
         * from counting before reading their constituent counters.
         */
-       if (!target__none(&target))
-               evlist__disable(evsel_list);
+       if (!target__none(&target)) {
+               evlist__for_each_entry(evsel_list, counter)
+                       bpf_counter__disable(counter);
+               if (!all_counters_use_bpf)
+                       evlist__disable(evsel_list);
+       }
 }
 
 static volatile int workload_exec_errno;
index dd8ff287e9302f8bf7774e47c5c2373a1e5fc9ee..c783558332b858129fb822c36951e1cf1ca29120 100755 (executable)
@@ -39,6 +39,7 @@ arch/x86/lib/x86-opcode-map.txt
 arch/x86/tools/gen-insn-attr-x86.awk
 arch/arm/include/uapi/asm/perf_regs.h
 arch/arm64/include/uapi/asm/perf_regs.h
+arch/mips/include/uapi/asm/perf_regs.h
 arch/powerpc/include/uapi/asm/perf_regs.h
 arch/s390/include/uapi/asm/perf_regs.h
 arch/x86/include/uapi/asm/perf_regs.h
index 20cb91ef06ffc9ef97c3aff3b191098ea08bb83d..2f6b67189b426c33b07f66f6cd6209743afc297b 100644 (file)
@@ -443,6 +443,8 @@ int main(int argc, const char **argv)
        const char *cmd;
        char sbuf[STRERR_BUFSIZE];
 
+       perf_debug_setup();
+
        /* libsubcmd init */
        exec_cmd_init("perf", PREFIX, PERF_EXEC_PATH, EXEC_PATH_ENVIRONMENT);
        pager_init(PERF_PAGER_ENVIRONMENT);
@@ -531,8 +533,6 @@ int main(int argc, const char **argv)
         */
        pthread__block_sigwinch();
 
-       perf_debug_setup();
-
        while (1) {
                static int done_help;
 
index 616f29098c710cb84150f512f8f4d912649cedac..605be14f441c8153ad308b0ab2b3e95dbdd69076 100644 (file)
@@ -1,46 +1,56 @@
 [
   {
-    "EventCode": "1003C",
+    "EventCode": "0x1003C",
     "EventName": "PM_EXEC_STALL_DMISS_L2L3",
     "BriefDescription": "Cycles in which the oldest instruction in the pipeline was waiting for a load miss to resolve from either the local L2 or local L3."
   },
   {
-    "EventCode": "34056",
+    "EventCode": "0x1E054",
+    "EventName": "PM_EXEC_STALL_DMISS_L21_L31",
+    "BriefDescription": "Cycles in which the oldest instruction in the pipeline was waiting for a load miss to resolve from another core's L2 or L3 on the same chip."
+  },
+  {
+    "EventCode": "0x34054",
+    "EventName": "PM_EXEC_STALL_DMISS_L2L3_NOCONFLICT",
+    "BriefDescription": "Cycles in which the oldest instruction in the pipeline was waiting for a load miss to resolve from the local L2 or local L3, without a dispatch conflict."
+  },
+  {
+    "EventCode": "0x34056",
     "EventName": "PM_EXEC_STALL_LOAD_FINISH",
-    "BriefDescription": "Cycles in which the oldest instruction in the pipeline was finishing a load after its data was reloaded from a data source beyond the local L1; cycles in which the LSU was processing an L1-hit; cycles in which the NTF instruction merged with another load in the LMQ."
+    "BriefDescription": "Cycles in which the oldest instruction in the pipeline was finishing a load after its data was reloaded from a data source beyond the local L1; cycles in which the LSU was processing an L1-hit; cycles in which the NTF instruction merged with another load in the LMQ; cycles in which the NTF instruction is waiting for a data reload for a load miss, but the data comes back with a non-NTF instruction."
   },
   {
-    "EventCode": "3006C",
+    "EventCode": "0x3006C",
     "EventName": "PM_RUN_CYC_SMT2_MODE",
     "BriefDescription": "Cycles when this thread's run latch is set and the core is in SMT2 mode."
   },
   {
-    "EventCode": "300F4",
+    "EventCode": "0x300F4",
     "EventName": "PM_RUN_INST_CMPL_CONC",
     "BriefDescription": "PowerPC instructions completed by this thread when all threads in the core had the run-latch set."
   },
   {
-    "EventCode": "4C016",
+    "EventCode": "0x4C016",
     "EventName": "PM_EXEC_STALL_DMISS_L2L3_CONFLICT",
     "BriefDescription": "Cycles in which the oldest instruction in the pipeline was waiting for a load miss to resolve from the local L2 or local L3, with a dispatch conflict."
   },
   {
-    "EventCode": "4D014",
+    "EventCode": "0x4D014",
     "EventName": "PM_EXEC_STALL_LOAD",
     "BriefDescription": "Cycles in which the oldest instruction in the pipeline was a load instruction executing in the Load Store Unit."
   },
   {
-    "EventCode": "4D016",
+    "EventCode": "0x4D016",
     "EventName": "PM_EXEC_STALL_PTESYNC",
     "BriefDescription": "Cycles in which the oldest instruction in the pipeline was a PTESYNC instruction executing in the Load Store Unit."
   },
   {
-    "EventCode": "401EA",
+    "EventCode": "0x401EA",
     "EventName": "PM_THRESH_EXC_128",
     "BriefDescription": "Threshold counter exceeded a value of 128."
   },
   {
-    "EventCode": "400F6",
+    "EventCode": "0x400F6",
     "EventName": "PM_BR_MPRED_CMPL",
     "BriefDescription": "A mispredicted branch completed. Includes direction and target."
   }
index 703cd431ae5b0e0c0c6a08d67bc1da85b1a09731..54acb55e2c8c683bde7021c3631d06c136661284 100644 (file)
@@ -1,6 +1,6 @@
 [
   {
-    "EventCode": "4016E",
+    "EventCode": "0x4016E",
     "EventName": "PM_THRESH_NOT_MET",
     "BriefDescription": "Threshold counter did not meet threshold."
   }
index eac8609dcc90d61ae052fbe5482b3660970a2a1a..558f9530f54ecb9d5fb48d0b54e4c343b4e46d2f 100644 (file)
 [
   {
-    "EventCode": "10004",
+    "EventCode": "0x10004",
     "EventName": "PM_EXEC_STALL_TRANSLATION",
     "BriefDescription": "Cycles in which the oldest instruction in the pipeline suffered a TLB miss or ERAT miss and waited for it to resolve."
   },
   {
-    "EventCode": "10010",
+    "EventCode": "0x10006",
+    "EventName": "PM_DISP_STALL_HELD_OTHER_CYC",
+    "BriefDescription": "Cycles in which the NTC instruction is held at dispatch for any other reason."
+  },
+  {
+    "EventCode": "0x10010",
     "EventName": "PM_PMC4_OVERFLOW",
     "BriefDescription": "The event selected for PMC4 caused the event counter to overflow."
   },
   {
-    "EventCode": "10020",
+    "EventCode": "0x10020",
     "EventName": "PM_PMC4_REWIND",
     "BriefDescription": "The speculative event selected for PMC4 rewinds and the counter for PMC4 is not charged."
   },
   {
-    "EventCode": "10038",
+    "EventCode": "0x10038",
     "EventName": "PM_DISP_STALL_TRANSLATION",
     "BriefDescription": "Cycles when dispatch was stalled for this thread because the MMU was handling a translation miss."
   },
   {
-    "EventCode": "1003A",
+    "EventCode": "0x1003A",
     "EventName": "PM_DISP_STALL_BR_MPRED_IC_L2",
     "BriefDescription": "Cycles when dispatch was stalled while the instruction was fetched from the local L2 after suffering a branch mispredict."
   },
   {
-    "EventCode": "1E050",
+    "EventCode": "0x1D05E",
+    "EventName": "PM_DISP_STALL_HELD_HALT_CYC",
+    "BriefDescription": "Cycles in which the NTC instruction is held at dispatch because of power management."
+  },
+  {
+    "EventCode": "0x1E050",
     "EventName": "PM_DISP_STALL_HELD_STF_MAPPER_CYC",
     "BriefDescription": "Cycles in which the NTC instruction is held at dispatch because the STF mapper/SRB was full. Includes GPR (count, link, tar), VSR, VMR, FPR."
   },
   {
-    "EventCode": "1F054",
+    "EventCode": "0x1F054",
     "EventName": "PM_DTLB_HIT",
     "BriefDescription": "The PTE required by the instruction was resident in the TLB (data TLB access). When MMCR1[16]=0 this event counts only demand hits. When MMCR1[16]=1 this event includes demand and prefetch. Applies to both HPT and RPT."
   },
   {
-    "EventCode": "101E8",
+    "EventCode": "0x10064",
+    "EventName": "PM_DISP_STALL_IC_L2",
+    "BriefDescription": "Cycles when dispatch was stalled while the instruction was fetched from the local L2."
+  },
+  {
+    "EventCode": "0x101E8",
     "EventName": "PM_THRESH_EXC_256",
     "BriefDescription": "Threshold counter exceeded a count of 256."
   },
   {
-    "EventCode": "101EC",
+    "EventCode": "0x101EC",
     "EventName": "PM_THRESH_MET",
     "BriefDescription": "Threshold exceeded."
   },
   {
-    "EventCode": "100F2",
+    "EventCode": "0x100F2",
     "EventName": "PM_1PLUS_PPC_CMPL",
     "BriefDescription": "Cycles in which at least one instruction is completed by this thread."
   },
   {
-    "EventCode": "100F6",
+    "EventCode": "0x100F6",
     "EventName": "PM_IERAT_MISS",
     "BriefDescription": "IERAT Reloaded to satisfy an IERAT miss. All page sizes are counted by this event."
   },
   {
-    "EventCode": "100F8",
+    "EventCode": "0x100F8",
     "EventName": "PM_DISP_STALL_CYC",
     "BriefDescription": "Cycles the ICT has no itags assigned to this thread (no instructions were dispatched during these cycles)."
   },
   {
-    "EventCode": "20114",
+    "EventCode": "0x20006",
+    "EventName": "PM_DISP_STALL_HELD_ISSQ_FULL_CYC",
+    "BriefDescription": "Cycles in which the NTC instruction is held at dispatch due to Issue queue full. Includes issue queue and branch queue."
+  },
+  {
+    "EventCode": "0x20114",
     "EventName": "PM_MRK_L2_RC_DISP",
     "BriefDescription": "Marked instruction RC dispatched in L2."
   },
   {
-    "EventCode": "2C010",
+    "EventCode": "0x2C010",
     "EventName": "PM_EXEC_STALL_LSU",
     "BriefDescription": "Cycles in which the oldest instruction in the pipeline was executing in the Load Store Unit. This does not include simple fixed point instructions."
   },
   {
-    "EventCode": "2C016",
+    "EventCode": "0x2C016",
     "EventName": "PM_DISP_STALL_IERAT_ONLY_MISS",
     "BriefDescription": "Cycles when dispatch was stalled while waiting to resolve an instruction ERAT miss."
   },
   {
-    "EventCode": "2C01E",
+    "EventCode": "0x2C01E",
     "EventName": "PM_DISP_STALL_BR_MPRED_IC_L3",
     "BriefDescription": "Cycles when dispatch was stalled while the instruction was fetched from the local L3 after suffering a branch mispredict."
   },
   {
-    "EventCode": "2D01A",
+    "EventCode": "0x2D01A",
     "EventName": "PM_DISP_STALL_IC_MISS",
     "BriefDescription": "Cycles when dispatch was stalled for this thread due to an Icache Miss."
   },
   {
-    "EventCode": "2D01C",
-    "EventName": "PM_CMPL_STALL_STCX",
-    "BriefDescription": "Cycles in which the oldest instruction in the pipeline was a stcx waiting for resolution from the nest before completing."
-  },
-  {
-    "EventCode": "2E018",
+    "EventCode": "0x2E018",
     "EventName": "PM_DISP_STALL_FETCH",
     "BriefDescription": "Cycles when dispatch was stalled for this thread because Fetch was being held."
   },
   {
-    "EventCode": "2E01A",
+    "EventCode": "0x2E01A",
     "EventName": "PM_DISP_STALL_HELD_XVFC_MAPPER_CYC",
     "BriefDescription": "Cycles in which the NTC instruction is held at dispatch because the XVFC mapper/SRB was full."
   },
   {
-    "EventCode": "2C142",
+    "EventCode": "0x2C142",
     "EventName": "PM_MRK_XFER_FROM_SRC_PMC2",
     "BriefDescription": "For a marked data transfer instruction, the processor's L1 data cache was reloaded from the source specified in MMCR3[15:27]. If MMCR1[16|17] is 0 (default), this count includes only lines that were reloaded to satisfy a demand miss. If MMCR1[16|17] is 1, this count includes both demand misses and prefetch reloads."
   },
   {
-    "EventCode": "24050",
+    "EventCode": "0x24050",
     "EventName": "PM_IOPS_DISP",
     "BriefDescription": "Internal Operations dispatched. PM_IOPS_DISP / PM_INST_DISP will show the average number of internal operations per PowerPC instruction."
   },
   {
-    "EventCode": "2405E",
+    "EventCode": "0x2405E",
     "EventName": "PM_ISSUE_CANCEL",
     "BriefDescription": "An instruction issued and the issue was later cancelled. Only one cancel per PowerPC instruction."
   },
   {
-    "EventCode": "200FA",
+    "EventCode": "0x200FA",
     "EventName": "PM_BR_TAKEN_CMPL",
     "BriefDescription": "Branch Taken instruction completed."
   },
   {
-    "EventCode": "30012",
+    "EventCode": "0x30004",
+    "EventName": "PM_DISP_STALL_FLUSH",
+    "BriefDescription": "Cycles when dispatch was stalled because of a flush that happened to an instruction(s) that was not yet NTC. PM_EXEC_STALL_NTC_FLUSH only includes instructions that were flushed after becoming NTC."
+  },
+  {
+    "EventCode": "0x3000A",
+    "EventName": "PM_DISP_STALL_ITLB_MISS",
+    "BriefDescription": "Cycles when dispatch was stalled while waiting to resolve an instruction TLB miss."
+  },
+  {
+    "EventCode": "0x30012",
     "EventName": "PM_FLUSH_COMPLETION",
     "BriefDescription": "The instruction that was next to complete (oldest in the pipeline) did not complete because it suffered a flush."
   },
   {
-    "EventCode": "30014",
+    "EventCode": "0x30014",
     "EventName": "PM_EXEC_STALL_STORE",
     "BriefDescription": "Cycles in which the oldest instruction in the pipeline was a store instruction executing in the Load Store Unit."
   },
   {
-    "EventCode": "30018",
+    "EventCode": "0x30018",
     "EventName": "PM_DISP_STALL_HELD_SCOREBOARD_CYC",
     "BriefDescription": "Cycles in which the NTC instruction is held at dispatch while waiting on the Scoreboard. This event combines VSCR and FPSCR together."
   },
   {
-    "EventCode": "30026",
+    "EventCode": "0x30026",
     "EventName": "PM_EXEC_STALL_STORE_MISS",
     "BriefDescription": "Cycles in which the oldest instruction in the pipeline was a store whose cache line was not resident in the L1 and was waiting for allocation of the missing line into the L1."
   },
   {
-    "EventCode": "3012A",
+    "EventCode": "0x3012A",
     "EventName": "PM_MRK_L2_RC_DONE",
     "BriefDescription": "L2 RC machine completed the transaction for the marked instruction."
   },
   {
-    "EventCode": "3F046",
+    "EventCode": "0x3F046",
     "EventName": "PM_ITLB_HIT_1G",
     "BriefDescription": "Instruction TLB hit (IERAT reload) page size 1G, which implies Radix Page Table translation is in use. When MMCR1[17]=0 this event counts only for demand misses. When MMCR1[17]=1 this event includes demand misses and prefetches."
   },
   {
-    "EventCode": "34058",
+    "EventCode": "0x34058",
     "EventName": "PM_DISP_STALL_BR_MPRED_ICMISS",
     "BriefDescription": "Cycles when dispatch was stalled after a mispredicted branch resulted in an instruction cache miss."
   },
   {
-    "EventCode": "3D05C",
+    "EventCode": "0x3D05C",
     "EventName": "PM_DISP_STALL_HELD_RENAME_CYC",
     "BriefDescription": "Cycles in which the NTC instruction is held at dispatch because the mapper/SRB was full. Includes GPR (count, link, tar), VSR, VMR, FPR and XVFC."
   },
   {
-    "EventCode": "3E052",
+    "EventCode": "0x3E052",
     "EventName": "PM_DISP_STALL_IC_L3",
     "BriefDescription": "Cycles when dispatch was stalled while the instruction was fetched from the local L3."
   },
   {
-    "EventCode": "3E054",
+    "EventCode": "0x3E054",
     "EventName": "PM_LD_MISS_L1",
     "BriefDescription": "Load Missed L1, counted at execution time (can be greater than loads finished). LMQ merges are not included in this count. i.e. if a load instruction misses on an address that is already allocated on the LMQ, this event will not increment for that load). Note that this count is per slice, so if a load spans multiple slices this event will increment multiple times for a single load."
   },
   {
-    "EventCode": "301EA",
+    "EventCode": "0x301EA",
     "EventName": "PM_THRESH_EXC_1024",
     "BriefDescription": "Threshold counter exceeded a value of 1024."
   },
   {
-    "EventCode": "300FA",
+    "EventCode": "0x300FA",
     "EventName": "PM_INST_FROM_L3MISS",
     "BriefDescription": "The processor's instruction cache was reloaded from a source other than the local core's L1, L2, or L3 due to a demand miss."
   },
   {
-    "EventCode": "40006",
+    "EventCode": "0x40006",
     "EventName": "PM_ISSUE_KILL",
     "BriefDescription": "Cycles in which an instruction or group of instructions were cancelled after being issued. This event increments once per occurrence, regardless of how many instructions are included in the issue group."
   },
   {
-    "EventCode": "40116",
+    "EventCode": "0x40116",
     "EventName": "PM_MRK_LARX_FIN",
     "BriefDescription": "Marked load and reserve instruction (LARX) finished. LARX and STCX are instructions used to acquire a lock."
   },
   {
-    "EventCode": "4C010",
+    "EventCode": "0x4C010",
     "EventName": "PM_DISP_STALL_BR_MPRED_IC_L3MISS",
     "BriefDescription": "Cycles when dispatch was stalled while the instruction was fetched from sources beyond the local L3 after suffering a mispredicted branch."
   },
   {
-    "EventCode": "4D01E",
+    "EventCode": "0x4D01E",
     "EventName": "PM_DISP_STALL_BR_MPRED",
     "BriefDescription": "Cycles when dispatch was stalled for this thread due to a mispredicted branch."
   },
   {
-    "EventCode": "4E010",
+    "EventCode": "0x4E010",
     "EventName": "PM_DISP_STALL_IC_L3MISS",
     "BriefDescription": "Cycles when dispatch was stalled while the instruction was fetched from any source beyond the local L3."
   },
   {
-    "EventCode": "4E01A",
+    "EventCode": "0x4E01A",
     "EventName": "PM_DISP_STALL_HELD_CYC",
     "BriefDescription": "Cycles in which the NTC instruction is held at dispatch for any reason."
   },
   {
-    "EventCode": "44056",
+    "EventCode": "0x4003C",
+    "EventName": "PM_DISP_STALL_HELD_SYNC_CYC",
+    "BriefDescription": "Cycles in which the NTC instruction is held at dispatch because of a synchronizing instruction that requires the ICT to be empty before dispatch."
+  },
+  {
+    "EventCode": "0x44056",
     "EventName": "PM_VECTOR_ST_CMPL",
     "BriefDescription": "Vector store instructions completed."
   }
index 016d8de0e14ac281717db3d3d8f3897657908893..b5a0d65219631fb170debedeb49bef93114394a5 100644 (file)
@@ -1,11 +1,11 @@
 [
   {
-    "EventCode": "1E058",
+    "EventCode": "0x1E058",
     "EventName": "PM_STCX_FAIL_FIN",
     "BriefDescription": "Conditional store instruction (STCX) failed. LARX and STCX are instructions used to acquire a lock."
   },
   {
-    "EventCode": "4E050",
+    "EventCode": "0x4E050",
     "EventName": "PM_STCX_PASS_FIN",
     "BriefDescription": "Conditional store instruction (STCX) passed. LARX and STCX are instructions used to acquire a lock."
   }
index 93a5a59106480ab55fa93957105a3b67315b7568..58b5dfe3a273150ce9c7fd7ab38852c275bfef24 100644 (file)
 [
   {
-    "EventCode": "1002C",
+    "EventCode": "0x1002C",
     "EventName": "PM_LD_PREFETCH_CACHE_LINE_MISS",
     "BriefDescription": "The L1 cache was reloaded with a line that fulfills a prefetch request."
   },
   {
-    "EventCode": "10132",
+    "EventCode": "0x10132",
     "EventName": "PM_MRK_INST_ISSUED",
     "BriefDescription": "Marked instruction issued. Note that stores always get issued twice, the address gets issued to the LSU and the data gets issued to the VSU. Also, issues can sometimes get killed/cancelled and cause multiple sequential issues for the same instruction."
   },
   {
-    "EventCode": "101E0",
+    "EventCode": "0x101E0",
     "EventName": "PM_MRK_INST_DISP",
     "BriefDescription": "The thread has dispatched a randomly sampled marked instruction."
   },
   {
-    "EventCode": "101E2",
+    "EventCode": "0x101E2",
     "EventName": "PM_MRK_BR_TAKEN_CMPL",
     "BriefDescription": "Marked Branch Taken instruction completed."
   },
   {
-    "EventCode": "20112",
+    "EventCode": "0x20112",
     "EventName": "PM_MRK_NTF_FIN",
     "BriefDescription": "The marked instruction became the oldest in the pipeline before it finished. It excludes instructions that finish at dispatch."
   },
   {
-    "EventCode": "2C01C",
+    "EventCode": "0x2C01C",
     "EventName": "PM_EXEC_STALL_DMISS_OFF_CHIP",
     "BriefDescription": "Cycles in which the oldest instruction in the pipeline was waiting for a load miss to resolve from a remote chip."
   },
   {
-    "EventCode": "20138",
+    "EventCode": "0x20138",
     "EventName": "PM_MRK_ST_NEST",
     "BriefDescription": "A store has been sampled/marked and is at the point of execution where it has completed in the core and can no longer be flushed. At this point the store is sent to the L2."
   },
   {
-    "EventCode": "2013A",
+    "EventCode": "0x2013A",
     "EventName": "PM_MRK_BRU_FIN",
     "BriefDescription": "Marked Branch instruction finished."
   },
   {
-    "EventCode": "2C144",
+    "EventCode": "0x2C144",
     "EventName": "PM_MRK_XFER_FROM_SRC_CYC_PMC2",
     "BriefDescription": "Cycles taken for a marked demand miss to reload a line from the source specified in MMCR3[15:27]."
   },
   {
-    "EventCode": "24156",
+    "EventCode": "0x24156",
     "EventName": "PM_MRK_STCX_FIN",
     "BriefDescription": "Marked conditional store instruction (STCX) finished. LARX and STCX are instructions used to acquire a lock."
   },
   {
-    "EventCode": "24158",
+    "EventCode": "0x24158",
     "EventName": "PM_MRK_INST",
     "BriefDescription": "An instruction was marked. Includes both Random Instruction Sampling (RIS) at decode time and Random Event Sampling (RES) at the time the configured event happens."
   },
   {
-    "EventCode": "2415C",
+    "EventCode": "0x2415C",
     "EventName": "PM_MRK_BR_CMPL",
     "BriefDescription": "A marked branch completed. All branches are included."
   },
   {
-    "EventCode": "200FD",
+    "EventCode": "0x200FD",
     "EventName": "PM_L1_ICACHE_MISS",
     "BriefDescription": "Demand iCache Miss."
   },
   {
-    "EventCode": "30130",
+    "EventCode": "0x30130",
     "EventName": "PM_MRK_INST_FIN",
     "BriefDescription": "marked instruction finished. Excludes instructions that finish at dispatch. Note that stores always finish twice since the address gets issued to the LSU and the data gets issued to the VSU."
   },
   {
-    "EventCode": "34146",
+    "EventCode": "0x34146",
     "EventName": "PM_MRK_LD_CMPL",
     "BriefDescription": "Marked loads completed."
   },
   {
-    "EventCode": "3E158",
+    "EventCode": "0x3E158",
     "EventName": "PM_MRK_STCX_FAIL",
     "BriefDescription": "Marked conditional store instruction (STCX) failed. LARX and STCX are instructions used to acquire a lock."
   },
   {
-    "EventCode": "3E15A",
+    "EventCode": "0x3E15A",
     "EventName": "PM_MRK_ST_FIN",
     "BriefDescription": "The marked instruction was a store of any kind."
   },
   {
-    "EventCode": "30068",
+    "EventCode": "0x30068",
     "EventName": "PM_L1_ICACHE_RELOADED_PREF",
     "BriefDescription": "Counts all Icache prefetch reloads ( includes demand turned into prefetch)."
   },
   {
-    "EventCode": "301E4",
+    "EventCode": "0x301E4",
     "EventName": "PM_MRK_BR_MPRED_CMPL",
     "BriefDescription": "Marked Branch Mispredicted. Includes direction and target."
   },
   {
-    "EventCode": "300F6",
+    "EventCode": "0x300F6",
     "EventName": "PM_LD_DEMAND_MISS_L1",
     "BriefDescription": "The L1 cache was reloaded with a line that fulfills a demand miss request. Counted at reload time, before finish."
   },
   {
-    "EventCode": "300FE",
+    "EventCode": "0x300FE",
     "EventName": "PM_DATA_FROM_L3MISS",
     "BriefDescription": "The processor's data cache was reloaded from a source other than the local core's L1, L2, or L3 due to a demand miss."
   },
   {
-    "EventCode": "40012",
+    "EventCode": "0x40012",
     "EventName": "PM_L1_ICACHE_RELOADED_ALL",
     "BriefDescription": "Counts all Icache reloads includes demand, prefetch, prefetch turned into demand and demand turned into prefetch."
   },
   {
-    "EventCode": "40134",
+    "EventCode": "0x40134",
     "EventName": "PM_MRK_INST_TIMEO",
     "BriefDescription": "Marked instruction finish timeout (instruction was lost)."
   },
   {
-    "EventCode": "4003C",
-    "EventName": "PM_DISP_STALL_HELD_SYNC_CYC",
-    "BriefDescription": "Cycles in which the NTC instruction is held at dispatch because of a synchronizing instruction that requires the ICT to be empty before dispatch."
-  },
-  {
-    "EventCode": "4505A",
+    "EventCode": "0x4505A",
     "EventName": "PM_SP_FLOP_CMPL",
     "BriefDescription": "Single Precision floating point instructions completed."
   },
   {
-    "EventCode": "4D058",
+    "EventCode": "0x4D058",
     "EventName": "PM_VECTOR_FLOP_CMPL",
     "BriefDescription": "Vector floating point instructions completed."
   },
   {
-    "EventCode": "4D05A",
+    "EventCode": "0x4D05A",
     "EventName": "PM_NON_MATH_FLOP_CMPL",
     "BriefDescription": "Non Math instructions completed."
   },
   {
-    "EventCode": "401E0",
+    "EventCode": "0x401E0",
     "EventName": "PM_MRK_INST_CMPL",
     "BriefDescription": "marked instruction completed."
   },
   {
-    "EventCode": "400FE",
+    "EventCode": "0x400FE",
     "EventName": "PM_DATA_FROM_MEMORY",
     "BriefDescription": "The processor's data cache was reloaded from local, remote, or distant memory due to a demand miss."
   }
index b01141eeebee1b45de89e08023940bb3f9cb5f6f..843b51f531e9533bbc2374316e9c550af2831340 100644 (file)
 [
   {
-    "EventCode": "1000A",
+    "EventCode": "0x1000A",
     "EventName": "PM_PMC3_REWIND",
     "BriefDescription": "The speculative event selected for PMC3 rewinds and the counter for PMC3 is not charged."
   },
   {
-    "EventCode": "1C040",
+    "EventCode": "0x1C040",
     "EventName": "PM_XFER_FROM_SRC_PMC1",
     "BriefDescription": "The processor's L1 data cache was reloaded from the source specified in MMCR3[0:12]. If MMCR1[16|17] is 0 (default), this count includes only lines that were reloaded to satisfy a demand miss. If MMCR1[16|17] is 1, this count includes both demand misses and prefetch reloads."
   },
   {
-    "EventCode": "1C142",
+    "EventCode": "0x1C142",
     "EventName": "PM_MRK_XFER_FROM_SRC_PMC1",
     "BriefDescription": "For a marked data transfer instruction, the processor's L1 data cache was reloaded from the source specified in MMCR3[0:12]. If MMCR1[16|17] is 0 (default), this count includes only lines that were reloaded to satisfy a demand miss. If MMCR1[16|17] is 1, this count includes both demand misses and prefetch reloads."
   },
   {
-    "EventCode": "1C144",
+    "EventCode": "0x1C144",
     "EventName": "PM_MRK_XFER_FROM_SRC_CYC_PMC1",
     "BriefDescription": "Cycles taken for a marked demand miss to reload a line from the source specified in MMCR3[0:12]."
   },
   {
-    "EventCode": "1C056",
+    "EventCode": "0x1C056",
     "EventName": "PM_DERAT_MISS_4K",
     "BriefDescription": "Data ERAT Miss (Data TLB Access) page size 4K. When MMCR1[16]=0 this event counts only DERAT reloads for demand misses. When MMCR1[16]=1 this event includes demand misses and prefetches."
   },
   {
-    "EventCode": "1C058",
+    "EventCode": "0x1C058",
     "EventName": "PM_DTLB_MISS_16G",
     "BriefDescription": "Data TLB reload (after a miss) page size 16G. When MMCR1[16]=0 this event counts only for demand misses. When MMCR1[16]=1 this event includes demand misses and prefetches."
   },
   {
-    "EventCode": "1C05C",
+    "EventCode": "0x1C05C",
     "EventName": "PM_DTLB_MISS_2M",
     "BriefDescription": "Data TLB reload (after a miss) page size 2M. Implies radix translation was used. When MMCR1[16]=0 this event counts only for demand misses. When MMCR1[16]=1 this event includes demand misses and prefetches."
   },
   {
-    "EventCode": "1E056",
+    "EventCode": "0x1E056",
     "EventName": "PM_EXEC_STALL_STORE_PIPE",
     "BriefDescription": "Cycles in which the oldest instruction in the pipeline was executing in the store unit. This does not include cycles spent handling store misses, PTESYNC instructions or TLBIE instructions."
   },
   {
-    "EventCode": "1F150",
+    "EventCode": "0x1F150",
     "EventName": "PM_MRK_ST_L2_CYC",
     "BriefDescription": "Cycles from L2 RC dispatch to L2 RC completion."
   },
   {
-    "EventCode": "10062",
+    "EventCode": "0x10062",
     "EventName": "PM_LD_L3MISS_PEND_CYC",
     "BriefDescription": "Cycles L3 miss was pending for this thread."
   },
   {
-    "EventCode": "20010",
+    "EventCode": "0x20010",
     "EventName": "PM_PMC1_OVERFLOW",
     "BriefDescription": "The event selected for PMC1 caused the event counter to overflow."
   },
   {
-    "EventCode": "2001A",
+    "EventCode": "0x2001A",
     "EventName": "PM_ITLB_HIT",
     "BriefDescription": "The PTE required to translate the instruction address was resident in the TLB (instruction TLB access/IERAT reload). Applies to both HPT and RPT. When MMCR1[17]=0 this event counts only for demand misses. When MMCR1[17]=1 this event includes demand misses and prefetches."
   },
   {
-    "EventCode": "2003E",
+    "EventCode": "0x2003E",
     "EventName": "PM_PTESYNC_FIN",
     "BriefDescription": "Ptesync instruction finished in the store unit. Only one ptesync can finish at a time."
   },
   {
-    "EventCode": "2C040",
+    "EventCode": "0x2C040",
     "EventName": "PM_XFER_FROM_SRC_PMC2",
     "BriefDescription": "The processor's L1 data cache was reloaded from the source specified in MMCR3[15:27]. If MMCR1[16|17] is 0 (default), this count includes only lines that were reloaded to satisfy a demand miss. If MMCR1[16|17] is 1, this count includes both demand misses and prefetch reloads."
   },
   {
-    "EventCode": "2C054",
+    "EventCode": "0x2C054",
     "EventName": "PM_DERAT_MISS_64K",
     "BriefDescription": "Data ERAT Miss (Data TLB Access) page size 64K. When MMCR1[16]=0 this event counts only DERAT reloads for demand misses. When MMCR1[16]=1 this event includes demand misses and prefetches."
   },
   {
-    "EventCode": "2C056",
+    "EventCode": "0x2C056",
     "EventName": "PM_DTLB_MISS_4K",
     "BriefDescription": "Data TLB reload (after a miss) page size 4K. When MMCR1[16]=0 this event counts only for demand misses. When MMCR1[16]=1 this event includes demand misses and prefetches."
   },
   {
-    "EventCode": "2D154",
+    "EventCode": "0x2D154",
     "EventName": "PM_MRK_DERAT_MISS_64K",
     "BriefDescription": "Data ERAT Miss (Data TLB Access) page size 64K for a marked instruction. When MMCR1[16]=0 this event counts only DERAT reloads for demand misses. When MMCR1[16]=1 this event includes demand misses and prefetches."
   },
   {
-    "EventCode": "200F6",
+    "EventCode": "0x200F6",
     "EventName": "PM_DERAT_MISS",
     "BriefDescription": "DERAT Reloaded to satisfy a DERAT miss. All page sizes are counted by this event. When MMCR1[16]=0 this event counts only DERAT reloads for demand misses. When MMCR1[16]=1 this event includes demand misses and prefetches."
   },
   {
-    "EventCode": "3000A",
-    "EventName": "PM_DISP_STALL_ITLB_MISS",
-    "BriefDescription": "Cycles when dispatch was stalled while waiting to resolve an instruction TLB miss."
-  },
-  {
-    "EventCode": "30016",
+    "EventCode": "0x30016",
     "EventName": "PM_EXEC_STALL_DERAT_DTLB_MISS",
     "BriefDescription": "Cycles in which the oldest instruction in the pipeline suffered a TLB miss and waited for it resolve."
   },
   {
-    "EventCode": "3C040",
+    "EventCode": "0x3C040",
     "EventName": "PM_XFER_FROM_SRC_PMC3",
     "BriefDescription": "The processor's L1 data cache was reloaded from the source specified in MMCR3[30:42]. If MMCR1[16|17] is 0 (default), this count includes only lines that were reloaded to satisfy a demand miss. If MMCR1[16|17] is 1, this count includes both demand misses and prefetch reloads."
   },
   {
-    "EventCode": "3C142",
+    "EventCode": "0x3C142",
     "EventName": "PM_MRK_XFER_FROM_SRC_PMC3",
     "BriefDescription": "For a marked data transfer instruction, the processor's L1 data cache was reloaded from the source specified in MMCR3[30:42]. If MMCR1[16|17] is 0 (default), this count includes only lines that were reloaded to satisfy a demand miss. If MMCR1[16|17] is 1, this count includes both demand misses and prefetch reloads."
   },
   {
-    "EventCode": "3C144",
+    "EventCode": "0x3C144",
     "EventName": "PM_MRK_XFER_FROM_SRC_CYC_PMC3",
     "BriefDescription": "Cycles taken for a marked demand miss to reload a line from the source specified in MMCR3[30:42]."
   },
   {
-    "EventCode": "3C054",
+    "EventCode": "0x3C054",
     "EventName": "PM_DERAT_MISS_16M",
     "BriefDescription": "Data ERAT Miss (Data TLB Access) page size 16M. When MMCR1[16]=0 this event counts only DERAT reloads for demand misses. When MMCR1[16]=1 this event includes demand misses and prefetches."
   },
   {
-    "EventCode": "3C056",
+    "EventCode": "0x3C056",
     "EventName": "PM_DTLB_MISS_64K",
     "BriefDescription": "Data TLB reload (after a miss) page size 64K. When MMCR1[16]=0 this event counts only for demand misses. When MMCR1[16]=1 this event includes demand misses and prefetches."
   },
   {
-    "EventCode": "3C058",
+    "EventCode": "0x3C058",
     "EventName": "PM_LARX_FIN",
     "BriefDescription": "Load and reserve instruction (LARX) finished. LARX and STCX are instructions used to acquire a lock."
   },
   {
-    "EventCode": "301E2",
+    "EventCode": "0x301E2",
     "EventName": "PM_MRK_ST_CMPL",
     "BriefDescription": "Marked store completed and sent to nest. Note that this count excludes cache-inhibited stores."
   },
   {
-    "EventCode": "300FC",
+    "EventCode": "0x300FC",
     "EventName": "PM_DTLB_MISS",
     "BriefDescription": "The DPTEG required for the load/store instruction in execution was missing from the TLB. It includes pages of all sizes for demand and prefetch activity."
   },
   {
-    "EventCode": "4D02C",
+    "EventCode": "0x4D02C",
     "EventName": "PM_PMC1_REWIND",
     "BriefDescription": "The speculative event selected for PMC1 rewinds and the counter for PMC1 is not charged."
   },
   {
-    "EventCode": "4003E",
+    "EventCode": "0x4003E",
     "EventName": "PM_LD_CMPL",
     "BriefDescription": "Loads completed."
   },
   {
-    "EventCode": "4C040",
+    "EventCode": "0x4C040",
     "EventName": "PM_XFER_FROM_SRC_PMC4",
     "BriefDescription": "The processor's L1 data cache was reloaded from the source specified in MMCR3[45:57]. If MMCR1[16|17] is 0 (default), this count includes only lines that were reloaded to satisfy a demand miss. If MMCR1[16|17] is 1, this count includes both demand misses and prefetch reloads."
   },
   {
-    "EventCode": "4C142",
+    "EventCode": "0x4C142",
     "EventName": "PM_MRK_XFER_FROM_SRC_PMC4",
     "BriefDescription": "For a marked data transfer instruction, the processor's L1 data cache was reloaded from the source specified in MMCR3[45:57]. If MMCR1[16|17] is 0 (default), this count includes only lines that were reloaded to satisfy a demand miss. If MMCR1[16|17] is 1, this count includes both demand misses and prefetch reloads."
   },
   {
-    "EventCode": "4C144",
+    "EventCode": "0x4C144",
     "EventName": "PM_MRK_XFER_FROM_SRC_CYC_PMC4",
     "BriefDescription": "Cycles taken for a marked demand miss to reload a line from the source specified in MMCR3[45:57]."
   },
   {
-    "EventCode": "4C056",
+    "EventCode": "0x4C056",
     "EventName": "PM_DTLB_MISS_16M",
     "BriefDescription": "Data TLB reload (after a miss) page size 16M. When MMCR1[16]=0 this event counts only for demand misses. When MMCR1[16]=1 this event includes demand misses and prefetches."
   },
   {
-    "EventCode": "4C05A",
+    "EventCode": "0x4C05A",
     "EventName": "PM_DTLB_MISS_1G",
     "BriefDescription": "Data TLB reload (after a miss) page size 1G. Implies radix translation was used. When MMCR1[16]=0 this event counts only for demand misses. When MMCR1[16]=1 this event includes demand misses and prefetches."
   },
   {
-    "EventCode": "4C15E",
+    "EventCode": "0x4C15E",
     "EventName": "PM_MRK_DTLB_MISS_64K",
     "BriefDescription": "Marked Data TLB reload (after a miss) page size 64K. When MMCR1[16]=0 this event counts only for demand misses. When MMCR1[16]=1 this event includes demand misses and prefetches."
   },
   {
-    "EventCode": "4D056",
+    "EventCode": "0x4D056",
     "EventName": "PM_NON_FMA_FLOP_CMPL",
     "BriefDescription": "Non FMA instruction completed."
   },
   {
-    "EventCode": "40164",
+    "EventCode": "0x40164",
     "EventName": "PM_MRK_DERAT_MISS_2M",
     "BriefDescription": "Data ERAT Miss (Data TLB Access) page size 2M for a marked instruction. When MMCR1[16]=0 this event counts only DERAT reloads for demand misses. When MMCR1[16]=1 this event includes demand misses and prefetches."
   }
index a119e56cbf1c383adb051eeb47b8c5dcb8f7caad..7d0de1a2860b49cf4c58b37c901210076fd47c9a 100644 (file)
 [
   {
-    "EventCode": "10016",
+    "EventCode": "0x10016",
     "EventName": "PM_VSU0_ISSUE",
     "BriefDescription": "VSU instructions issued to VSU pipe 0."
   },
   {
-    "EventCode": "1001C",
+    "EventCode": "0x1001C",
     "EventName": "PM_ULTRAVISOR_INST_CMPL",
     "BriefDescription": "PowerPC instructions that completed while the thread was in ultravisor state."
   },
   {
-    "EventCode": "100F0",
+    "EventCode": "0x100F0",
     "EventName": "PM_CYC",
     "BriefDescription": "Processor cycles."
   },
   {
-    "EventCode": "10134",
+    "EventCode": "0x10134",
     "EventName": "PM_MRK_ST_DONE_L2",
     "BriefDescription": "Marked stores completed in L2 (RC machine done)."
   },
   {
-    "EventCode": "1505E",
+    "EventCode": "0x1505E",
     "EventName": "PM_LD_HIT_L1",
     "BriefDescription": "Loads that finished without experiencing an L1 miss."
   },
   {
-    "EventCode": "1D05E",
-    "EventName": "PM_DISP_STALL_HELD_HALT_CYC",
-    "BriefDescription": "Cycles in which the NTC instruction is held at dispatch because of power management."
-  },
-  {
-    "EventCode": "1E054",
-    "EventName": "PM_EXEC_STALL_DMISS_L21_L31",
-    "BriefDescription": "Cycles in which the oldest instruction in the pipeline was waiting for a load miss to resolve from another core's L2 or L3 on the same chip."
-  },
-  {
-    "EventCode": "1E05A",
-    "EventName": "PM_CMPL_STALL_LWSYNC",
-    "BriefDescription": "Cycles in which the oldest instruction in the pipeline was a lwsync waiting to complete."
-  },
-  {
-    "EventCode": "1F056",
+    "EventCode": "0x1F056",
     "EventName": "PM_DISP_SS0_2_INSTR_CYC",
     "BriefDescription": "Cycles in which Superslice 0 dispatches either 1 or 2 instructions."
   },
   {
-    "EventCode": "1F15C",
+    "EventCode": "0x1F15C",
     "EventName": "PM_MRK_STCX_L2_CYC",
     "BriefDescription": "Cycles spent in the nest portion of a marked Stcx instruction. It starts counting when the operation starts to drain to the L2 and it stops counting when the instruction retires from the Instruction Completion Table (ICT) in the Instruction Sequencing Unit (ISU)."
   },
   {
-    "EventCode": "10066",
+    "EventCode": "0x10066",
     "EventName": "PM_ADJUNCT_CYC",
     "BriefDescription": "Cycles in which the thread is in Adjunct state. MSR[S HV PR] bits = 011."
   },
   {
-    "EventCode": "101E4",
+    "EventCode": "0x101E4",
     "EventName": "PM_MRK_L1_ICACHE_MISS",
     "BriefDescription": "Marked Instruction suffered an icache Miss."
   },
   {
-    "EventCode": "101EA",
+    "EventCode": "0x101EA",
     "EventName": "PM_MRK_L1_RELOAD_VALID",
     "BriefDescription": "Marked demand reload."
   },
   {
-    "EventCode": "100F4",
+    "EventCode": "0x100F4",
     "EventName": "PM_FLOP_CMPL",
     "BriefDescription": "Floating Point Operations Completed. Includes any type. It counts once for each 1, 2, 4 or 8 flop instruction. Use PM_1|2|4|8_FLOP_CMPL events to count flops."
   },
   {
-    "EventCode": "100FA",
+    "EventCode": "0x100FA",
     "EventName": "PM_RUN_LATCH_ANY_THREAD_CYC",
     "BriefDescription": "Cycles when at least one thread has the run latch set."
   },
   {
-    "EventCode": "100FC",
+    "EventCode": "0x100FC",
     "EventName": "PM_LD_REF_L1",
     "BriefDescription": "All L1 D cache load references counted at finish, gated by reject. In P9 and earlier this event counted only cacheable loads but in P10 both cacheable and non-cacheable loads are included."
   },
   {
-    "EventCode": "20006",
-    "EventName": "PM_DISP_STALL_HELD_ISSQ_FULL_CYC",
-    "BriefDescription": "Cycles in which the NTC instruction is held at dispatch due to Issue queue full. Includes issue queue and branch queue."
-  },
-  {
-    "EventCode": "2000C",
+    "EventCode": "0x2000C",
     "EventName": "PM_RUN_LATCH_ALL_THREADS_CYC",
     "BriefDescription": "Cycles when the run latch is set for all threads."
   },
   {
-    "EventCode": "2E010",
+    "EventCode": "0x2E010",
     "EventName": "PM_ADJUNCT_INST_CMPL",
     "BriefDescription": "PowerPC instructions that completed while the thread is in Adjunct state."
   },
   {
-    "EventCode": "2E014",
+    "EventCode": "0x2E014",
     "EventName": "PM_STCX_FIN",
     "BriefDescription": "Conditional store instruction (STCX) finished. LARX and STCX are instructions used to acquire a lock."
   },
   {
-    "EventCode": "20130",
+    "EventCode": "0x20130",
     "EventName": "PM_MRK_INST_DECODED",
     "BriefDescription": "An instruction was marked at decode time. Random Instruction Sampling (RIS) only."
   },
   {
-    "EventCode": "20132",
+    "EventCode": "0x20132",
     "EventName": "PM_MRK_DFU_ISSUE",
     "BriefDescription": "The marked instruction was a decimal floating point operation issued to the VSU. Measured at issue time."
   },
   {
-    "EventCode": "20134",
+    "EventCode": "0x20134",
     "EventName": "PM_MRK_FXU_ISSUE",
     "BriefDescription": "The marked instruction was a fixed point operation issued to the VSU. Measured at issue time."
   },
   {
-    "EventCode": "2505C",
+    "EventCode": "0x2505C",
     "EventName": "PM_VSU_ISSUE",
     "BriefDescription": "At least one VSU instruction was issued to one of the VSU pipes. Up to 4 per cycle. Includes fixed point operations."
   },
   {
-    "EventCode": "2F054",
+    "EventCode": "0x2F054",
     "EventName": "PM_DISP_SS1_2_INSTR_CYC",
     "BriefDescription": "Cycles in which Superslice 1 dispatches either 1 or 2 instructions."
   },
   {
-    "EventCode": "2F056",
+    "EventCode": "0x2F056",
     "EventName": "PM_DISP_SS1_4_INSTR_CYC",
     "BriefDescription": "Cycles in which Superslice 1 dispatches either 3 or 4 instructions."
   },
   {
-    "EventCode": "2006C",
+    "EventCode": "0x2006C",
     "EventName": "PM_RUN_CYC_SMT4_MODE",
     "BriefDescription": "Cycles when this thread's run latch is set and the core is in SMT4 mode."
   },
   {
-    "EventCode": "201E0",
+    "EventCode": "0x201E0",
     "EventName": "PM_MRK_DATA_FROM_MEMORY",
     "BriefDescription": "The processor's data cache was reloaded from local, remote, or distant memory due to a demand miss for a marked load."
   },
   {
-    "EventCode": "201E4",
+    "EventCode": "0x201E4",
     "EventName": "PM_MRK_DATA_FROM_L3MISS",
     "BriefDescription": "The processor's data cache was reloaded from a source other than the local core's L1, L2, or L3 due to a demand miss for a marked load."
   },
   {
-    "EventCode": "201E8",
+    "EventCode": "0x201E8",
     "EventName": "PM_THRESH_EXC_512",
     "BriefDescription": "Threshold counter exceeded a value of 512."
   },
   {
-    "EventCode": "200F2",
+    "EventCode": "0x200F2",
     "EventName": "PM_INST_DISP",
     "BriefDescription": "PowerPC instructions dispatched."
   },
   {
-    "EventCode": "30132",
+    "EventCode": "0x30132",
     "EventName": "PM_MRK_VSU_FIN",
     "BriefDescription": "VSU marked instructions finished. Excludes simple FX instructions issued to the Store Unit."
   },
   {
-    "EventCode": "30038",
+    "EventCode": "0x30038",
     "EventName": "PM_EXEC_STALL_DMISS_LMEM",
     "BriefDescription": "Cycles in which the oldest instruction in the pipeline was waiting for a load miss to resolve from the local memory, local OpenCapp cache, or local OpenCapp memory."
   },
   {
-    "EventCode": "3F04A",
+    "EventCode": "0x3F04A",
     "EventName": "PM_LSU_ST5_FIN",
     "BriefDescription": "LSU Finished an internal operation in ST2 port."
   },
   {
-    "EventCode": "34054",
-    "EventName": "PM_EXEC_STALL_DMISS_L2L3_NOCONFLICT",
-    "BriefDescription": "Cycles in which the oldest instruction in the pipeline was waiting for a load miss to resolve from the local L2 or local L3, without a dispatch conflict."
-  },
-  {
-    "EventCode": "3405A",
+    "EventCode": "0x3405A",
     "EventName": "PM_PRIVILEGED_INST_CMPL",
     "BriefDescription": "PowerPC Instructions that completed while the thread is in Privileged state."
   },
   {
-    "EventCode": "3F150",
+    "EventCode": "0x3F150",
     "EventName": "PM_MRK_ST_DRAIN_CYC",
     "BriefDescription": "cycles to drain st from core to L2."
   },
   {
-    "EventCode": "3F054",
+    "EventCode": "0x3F054",
     "EventName": "PM_DISP_SS0_4_INSTR_CYC",
     "BriefDescription": "Cycles in which Superslice 0 dispatches either 3 or 4 instructions."
   },
   {
-    "EventCode": "3F056",
+    "EventCode": "0x3F056",
     "EventName": "PM_DISP_SS0_8_INSTR_CYC",
     "BriefDescription": "Cycles in which Superslice 0 dispatches either 5, 6, 7 or 8 instructions."
   },
   {
-    "EventCode": "30162",
+    "EventCode": "0x30162",
     "EventName": "PM_MRK_ISSUE_DEPENDENT_LOAD",
     "BriefDescription": "The marked instruction was dependent on a load. It is eligible for issue kill."
   },
   {
-    "EventCode": "40114",
+    "EventCode": "0x40114",
     "EventName": "PM_MRK_START_PROBE_NOP_DISP",
     "BriefDescription": "Marked Start probe nop dispatched. Instruction AND R0,R0,R0."
   },
   {
-    "EventCode": "4001C",
+    "EventCode": "0x4001C",
     "EventName": "PM_VSU_FIN",
     "BriefDescription": "VSU instructions finished."
   },
   {
-    "EventCode": "4C01A",
+    "EventCode": "0x4C01A",
     "EventName": "PM_EXEC_STALL_DMISS_OFF_NODE",
     "BriefDescription": "Cycles in which the oldest instruction in the pipeline was waiting for a load miss to resolve from a distant chip."
   },
   {
-    "EventCode": "4D012",
+    "EventCode": "0x4D012",
     "EventName": "PM_PMC3_SAVED",
     "BriefDescription": "The conditions for the speculative event selected for PMC3 are met and PMC3 is charged."
   },
   {
-    "EventCode": "4D022",
+    "EventCode": "0x4D022",
     "EventName": "PM_HYPERVISOR_INST_CMPL",
     "BriefDescription": "PowerPC instructions that completed while the thread is in hypervisor state."
   },
   {
-    "EventCode": "4D026",
+    "EventCode": "0x4D026",
     "EventName": "PM_ULTRAVISOR_CYC",
     "BriefDescription": "Cycles when the thread is in Ultravisor state. MSR[S HV PR]=110."
   },
   {
-    "EventCode": "4D028",
+    "EventCode": "0x4D028",
     "EventName": "PM_PRIVILEGED_CYC",
     "BriefDescription": "Cycles when the thread is in Privileged state. MSR[S HV PR]=x00."
   },
   {
-    "EventCode": "40030",
+    "EventCode": "0x40030",
     "EventName": "PM_INST_FIN",
     "BriefDescription": "Instructions finished."
   },
   {
-    "EventCode": "44146",
+    "EventCode": "0x44146",
     "EventName": "PM_MRK_STCX_CORE_CYC",
     "BriefDescription": "Cycles spent in the core portion of a marked Stcx instruction. It starts counting when the instruction is decoded and stops counting when it drains into the L2."
   },
   {
-    "EventCode": "44054",
+    "EventCode": "0x44054",
     "EventName": "PM_VECTOR_LD_CMPL",
     "BriefDescription": "Vector load instructions completed."
   },
   {
-    "EventCode": "45054",
+    "EventCode": "0x45054",
     "EventName": "PM_FMA_CMPL",
     "BriefDescription": "Two floating point instructions completed (FMA class of instructions: fmadd, fnmadd, fmsub, fnmsub). Scalar instructions only."
   },
   {
-    "EventCode": "45056",
+    "EventCode": "0x45056",
     "EventName": "PM_SCALAR_FLOP_CMPL",
     "BriefDescription": "Scalar floating point instructions completed."
   },
   {
-    "EventCode": "4505C",
+    "EventCode": "0x4505C",
     "EventName": "PM_MATH_FLOP_CMPL",
     "BriefDescription": "Math floating point instructions completed."
   },
   {
-    "EventCode": "4D05E",
+    "EventCode": "0x4D05E",
     "EventName": "PM_BR_CMPL",
     "BriefDescription": "A branch completed. All branches are included."
   },
   {
-    "EventCode": "4E15E",
+    "EventCode": "0x4E15E",
     "EventName": "PM_MRK_INST_FLUSHED",
     "BriefDescription": "The marked instruction was flushed."
   },
   {
-    "EventCode": "401E6",
+    "EventCode": "0x401E6",
     "EventName": "PM_MRK_INST_FROM_L3MISS",
     "BriefDescription": "The processor's instruction cache was reloaded from a source other than the local core's L1, L2, or L3 due to a demand miss for a marked instruction."
   },
   {
-    "EventCode": "401E8",
+    "EventCode": "0x401E8",
     "EventName": "PM_MRK_DATA_FROM_L2MISS",
     "BriefDescription": "The processor's data cache was reloaded from a source other than the local core's L1 or L2 due to a demand miss for a marked load."
   },
   {
-    "EventCode": "400F0",
+    "EventCode": "0x400F0",
     "EventName": "PM_LD_DEMAND_MISS_L1_FIN",
     "BriefDescription": "Load Missed L1, counted at finish time."
   },
   {
-    "EventCode": "400FA",
+    "EventCode": "0x400FA",
     "EventName": "PM_RUN_INST_CMPL",
     "BriefDescription": "Completed PowerPC instructions gated by the run latch."
   }
index b61b5cc157ee32dd89addd0529f54d8e48504f7d..b8aded6045faaf9c9697c192e63bfa7bd7833f99 100644 (file)
 [
   {
-    "EventCode": "100FE",
+    "EventCode": "0x100FE",
     "EventName": "PM_INST_CMPL",
     "BriefDescription": "PowerPC instructions completed."
   },
   {
-    "EventCode": "10006",
-    "EventName": "PM_DISP_STALL_HELD_OTHER_CYC",
-    "BriefDescription": "Cycles in which the NTC instruction is held at dispatch for any other reason."
-  },
-  {
-    "EventCode": "1000C",
+    "EventCode": "0x1000C",
     "EventName": "PM_LSU_LD0_FIN",
     "BriefDescription": "LSU Finished an internal operation in LD0 port."
   },
   {
-    "EventCode": "1000E",
+    "EventCode": "0x1000E",
     "EventName": "PM_MMA_ISSUED",
     "BriefDescription": "MMA instructions issued."
   },
   {
-    "EventCode": "10012",
+    "EventCode": "0x10012",
     "EventName": "PM_LSU_ST0_FIN",
     "BriefDescription": "LSU Finished an internal operation in ST0 port."
   },
   {
-    "EventCode": "10014",
+    "EventCode": "0x10014",
     "EventName": "PM_LSU_ST4_FIN",
     "BriefDescription": "LSU Finished an internal operation in ST4 port."
   },
   {
-    "EventCode": "10018",
+    "EventCode": "0x10018",
     "EventName": "PM_IC_DEMAND_CYC",
     "BriefDescription": "Cycles in which an instruction reload is pending to satisfy a demand miss."
   },
   {
-    "EventCode": "10022",
+    "EventCode": "0x10022",
     "EventName": "PM_PMC2_SAVED",
     "BriefDescription": "The conditions for the speculative event selected for PMC2 are met and PMC2 is charged."
   },
   {
-    "EventCode": "10024",
+    "EventCode": "0x10024",
     "EventName": "PM_PMC5_OVERFLOW",
     "BriefDescription": "The event selected for PMC5 caused the event counter to overflow."
   },
   {
-    "EventCode": "10058",
+    "EventCode": "0x10058",
     "EventName": "PM_EXEC_STALL_FIN_AT_DISP",
     "BriefDescription": "Cycles in which the oldest instruction in the pipeline finished at dispatch and did not require execution in the LSU, BRU or VSU."
   },
   {
-    "EventCode": "1005A",
+    "EventCode": "0x1005A",
     "EventName": "PM_FLUSH_MPRED",
     "BriefDescription": "A flush occurred due to a mispredicted branch. Includes target and direction."
   },
   {
-    "EventCode": "1C05A",
+    "EventCode": "0x1C05A",
     "EventName": "PM_DERAT_MISS_2M",
     "BriefDescription": "Data ERAT Miss (Data TLB Access) page size 2M. Implies radix translation. When MMCR1[16]=0 this event counts only DERAT reloads for demand misses. When MMCR1[16]=1 this event includes demand misses and prefetches."
   },
   {
-    "EventCode": "10064",
-    "EventName": "PM_DISP_STALL_IC_L2",
-    "BriefDescription": "Cycles when dispatch was stalled while the instruction was fetched from the local L2."
+    "EventCode": "0x1E05A",
+    "EventName": "PM_CMPL_STALL_LWSYNC",
+    "BriefDescription": "Cycles in which the oldest instruction in the pipeline was a lwsync waiting to complete."
   },
   {
-    "EventCode": "10068",
+    "EventCode": "0x10068",
     "EventName": "PM_BR_FIN",
     "BriefDescription": "A branch instruction finished. Includes predicted/mispredicted/unconditional."
   },
   {
-    "EventCode": "1006A",
+    "EventCode": "0x1006A",
     "EventName": "PM_FX_LSU_FIN",
     "BriefDescription": "Simple fixed point instruction issued to the store unit. Measured at finish time."
   },
   {
-    "EventCode": "1006C",
+    "EventCode": "0x1006C",
     "EventName": "PM_RUN_CYC_ST_MODE",
     "BriefDescription": "Cycles when the run latch is set and the core is in ST mode."
   },
   {
-    "EventCode": "20004",
+    "EventCode": "0x20004",
     "EventName": "PM_ISSUE_STALL",
     "BriefDescription": "Cycles in which the oldest instruction in the pipeline was dispatched but not issued yet."
   },
   {
-    "EventCode": "2000A",
+    "EventCode": "0x2000A",
     "EventName": "PM_HYPERVISOR_CYC",
     "BriefDescription": "Cycles when the thread is in Hypervisor state. MSR[S HV PR]=010."
   },
   {
-    "EventCode": "2000E",
+    "EventCode": "0x2000E",
     "EventName": "PM_LSU_LD1_FIN",
     "BriefDescription": "LSU Finished an internal operation in LD1 port."
   },
   {
-    "EventCode": "2C014",
+    "EventCode": "0x2C014",
     "EventName": "PM_CMPL_STALL_SPECIAL",
     "BriefDescription": "Cycles in which the oldest instruction in the pipeline required special handling before completing."
   },
   {
-    "EventCode": "2C018",
+    "EventCode": "0x2C018",
     "EventName": "PM_EXEC_STALL_DMISS_L3MISS",
     "BriefDescription": "Cycles in which the oldest instruction in the pipeline was waiting for a load miss to resolve from a source beyond the local L2 or local L3."
   },
   {
-    "EventCode": "2D010",
+    "EventCode": "0x2D010",
     "EventName": "PM_LSU_ST1_FIN",
     "BriefDescription": "LSU Finished an internal operation in ST1 port."
   },
   {
-    "EventCode": "2D012",
+    "EventCode": "0x2D012",
     "EventName": "PM_VSU1_ISSUE",
     "BriefDescription": "VSU instructions issued to VSU pipe 1."
   },
   {
-    "EventCode": "2D018",
+    "EventCode": "0x2D018",
     "EventName": "PM_EXEC_STALL_VSU",
     "BriefDescription": "Cycles in which the oldest instruction in the pipeline was executing in the VSU (includes FXU, VSU, CRU)."
   },
   {
-    "EventCode": "2E01E",
+    "EventCode": "0x2D01C",
+    "EventName": "PM_CMPL_STALL_STCX",
+    "BriefDescription": "Cycles in which the oldest instruction in the pipeline was a stcx waiting for resolution from the nest before completing."
+  },
+  {
+    "EventCode": "0x2E01E",
     "EventName": "PM_EXEC_STALL_NTC_FLUSH",
-    "BriefDescription": "Cycles in which the oldest instruction in the pipeline was executing in any unit before it was flushed. Note that if the flush of the oldest instruction happens after finish, the cycles from dispatch to issue will be included in PM_DISP_STALL and the cycles from issue to finish will be included in PM_EXEC_STALL and its corresponding children."
+    "BriefDescription": "Cycles in which the oldest instruction in the pipeline was executing in any unit before it was flushed. Note that if the flush of the oldest instruction happens after finish, the cycles from dispatch to issue will be included in PM_DISP_STALL and the cycles from issue to finish will be included in PM_EXEC_STALL and its corresponding children. This event will also count cycles when the previous NTF instruction is still completing and the new NTF instruction is stalled at dispatch."
   },
   {
-    "EventCode": "2013C",
+    "EventCode": "0x2013C",
     "EventName": "PM_MRK_FX_LSU_FIN",
     "BriefDescription": "The marked instruction was simple fixed point that was issued to the store unit. Measured at finish time."
   },
   {
-    "EventCode": "2405A",
+    "EventCode": "0x2405A",
     "EventName": "PM_NTC_FIN",
     "BriefDescription": "Cycles in which the oldest instruction in the pipeline (NTC) finishes. Note that instructions can finish out of order, therefore not all the instructions that finish have a Next-to-complete status."
   },
   {
-    "EventCode": "201E2",
+    "EventCode": "0x201E2",
     "EventName": "PM_MRK_LD_MISS_L1",
     "BriefDescription": "Marked DL1 Demand Miss counted at finish time."
   },
   {
-    "EventCode": "200F4",
+    "EventCode": "0x200F4",
     "EventName": "PM_RUN_CYC",
     "BriefDescription": "Processor cycles gated by the run latch."
   },
   {
-    "EventCode": "30004",
-    "EventName": "PM_DISP_STALL_FLUSH",
-    "BriefDescription": "Cycles when dispatch was stalled because of a flush that happened to an instruction(s) that was not yet NTC. PM_EXEC_STALL_NTC_FLUSH only includes instructions that were flushed after becoming NTC."
-  },
-  {
-    "EventCode": "30008",
+    "EventCode": "0x30008",
     "EventName": "PM_EXEC_STALL",
     "BriefDescription": "Cycles in which the oldest instruction in the pipeline was waiting to finish in one of the execution units (BRU, LSU, VSU). Only cycles between issue and finish are counted in this category."
   },
   {
-    "EventCode": "3001A",
+    "EventCode": "0x3001A",
     "EventName": "PM_LSU_ST2_FIN",
     "BriefDescription": "LSU Finished an internal operation in ST2 port."
   },
   {
-    "EventCode": "30020",
+    "EventCode": "0x30020",
     "EventName": "PM_PMC2_REWIND",
     "BriefDescription": "The speculative event selected for PMC2 rewinds and the counter for PMC2 is not charged."
   },
   {
-    "EventCode": "30022",
+    "EventCode": "0x30022",
     "EventName": "PM_PMC4_SAVED",
     "BriefDescription": "The conditions for the speculative event selected for PMC4 are met and PMC4 is charged."
   },
   {
-    "EventCode": "30024",
+    "EventCode": "0x30024",
     "EventName": "PM_PMC6_OVERFLOW",
     "BriefDescription": "The event selected for PMC6 caused the event counter to overflow."
   },
   {
-    "EventCode": "30028",
+    "EventCode": "0x30028",
     "EventName": "PM_CMPL_STALL_MEM_ECC",
     "BriefDescription": "Cycles in which the oldest instruction in the pipeline was waiting for the non-speculative finish of either a stcx waiting for its result or a load waiting for non-critical sectors of data and ECC."
   },
   {
-    "EventCode": "30036",
+    "EventCode": "0x30036",
     "EventName": "PM_EXEC_STALL_SIMPLE_FX",
     "BriefDescription": "Cycles in which the oldest instruction in the pipeline was a simple fixed point instruction executing in the Load Store Unit."
   },
   {
-    "EventCode": "3003A",
+    "EventCode": "0x3003A",
     "EventName": "PM_CMPL_STALL_EXCEPTION",
     "BriefDescription": "Cycles in which the oldest instruction in the pipeline was not allowed to complete because it was interrupted by ANY exception, which has to be serviced before the instruction can complete."
   },
   {
-    "EventCode": "3F044",
+    "EventCode": "0x3F044",
     "EventName": "PM_VSU2_ISSUE",
     "BriefDescription": "VSU instructions issued to VSU pipe 2."
   },
   {
-    "EventCode": "30058",
+    "EventCode": "0x30058",
     "EventName": "PM_TLBIE_FIN",
     "BriefDescription": "TLBIE instructions finished in the LSU. Two TLBIEs can finish each cycle. All will be counted."
   },
   {
-    "EventCode": "3D058",
+    "EventCode": "0x3D058",
     "EventName": "PM_SCALAR_FSQRT_FDIV_ISSUE",
     "BriefDescription": "Scalar versions of four floating point operations: fdiv,fsqrt (xvdivdp, xvdivsp, xvsqrtdp, xvsqrtsp)."
   },
   {
-    "EventCode": "30066",
+    "EventCode": "0x30066",
     "EventName": "PM_LSU_FIN",
     "BriefDescription": "LSU Finished an internal operation (up to 4 per cycle)."
   },
   {
-    "EventCode": "40004",
+    "EventCode": "0x40004",
     "EventName": "PM_FXU_ISSUE",
     "BriefDescription": "A fixed point instruction was issued to the VSU."
   },
   {
-    "EventCode": "40008",
+    "EventCode": "0x40008",
     "EventName": "PM_NTC_ALL_FIN",
     "BriefDescription": "Cycles in which both instructions in the ICT entry pair show as finished. These are the cycles between finish and completion for the oldest pair of instructions in the pipeline."
   },
   {
-    "EventCode": "40010",
+    "EventCode": "0x40010",
     "EventName": "PM_PMC3_OVERFLOW",
     "BriefDescription": "The event selected for PMC3 caused the event counter to overflow."
   },
   {
-    "EventCode": "4C012",
+    "EventCode": "0x4C012",
     "EventName": "PM_EXEC_STALL_DERAT_ONLY_MISS",
     "BriefDescription": "Cycles in which the oldest instruction in the pipeline suffered an ERAT miss and waited for it resolve."
   },
   {
-    "EventCode": "4C018",
+    "EventCode": "0x4C018",
     "EventName": "PM_CMPL_STALL",
     "BriefDescription": "Cycles in which the oldest instruction in the pipeline cannot complete because the thread was blocked for any reason."
   },
   {
-    "EventCode": "4C01E",
+    "EventCode": "0x4C01E",
     "EventName": "PM_LSU_ST3_FIN",
     "BriefDescription": "LSU Finished an internal operation in ST3 port."
   },
   {
-    "EventCode": "4D018",
+    "EventCode": "0x4D018",
     "EventName": "PM_EXEC_STALL_BRU",
     "BriefDescription": "Cycles in which the oldest instruction in the pipeline was executing in the Branch unit."
   },
   {
-    "EventCode": "4D01A",
+    "EventCode": "0x4D01A",
     "EventName": "PM_CMPL_STALL_HWSYNC",
     "BriefDescription": "Cycles in which the oldest instruction in the pipeline was a hwsync waiting for response from L2 before completing."
   },
   {
-    "EventCode": "4D01C",
+    "EventCode": "0x4D01C",
     "EventName": "PM_EXEC_STALL_TLBIEL",
     "BriefDescription": "Cycles in which the oldest instruction in the pipeline was a TLBIEL instruction executing in the Load Store Unit. TLBIEL instructions have lower overhead than TLBIE instructions because they don't get set to the nest."
   },
   {
-    "EventCode": "4E012",
+    "EventCode": "0x4E012",
     "EventName": "PM_EXEC_STALL_UNKNOWN",
     "BriefDescription": "Cycles in which the oldest instruction in the pipeline completed without an ntf_type pulse. The ntf_pulse was missed by the ISU because the NTF finishes and completions came too close together."
   },
   {
-    "EventCode": "4D020",
+    "EventCode": "0x4D020",
     "EventName": "PM_VSU3_ISSUE",
     "BriefDescription": "VSU instruction was issued to VSU pipe 3."
   },
   {
-    "EventCode": "40132",
+    "EventCode": "0x40132",
     "EventName": "PM_MRK_LSU_FIN",
     "BriefDescription": "LSU marked instruction finish."
   },
   {
-    "EventCode": "45058",
+    "EventCode": "0x45058",
     "EventName": "PM_IC_MISS_CMPL",
     "BriefDescription": "Non-speculative icache miss, counted at completion."
   },
   {
-    "EventCode": "4D050",
+    "EventCode": "0x4D050",
     "EventName": "PM_VSU_NON_FLOP_CMPL",
     "BriefDescription": "Non-floating point VSU instructions completed."
   },
   {
-    "EventCode": "4D052",
+    "EventCode": "0x4D052",
     "EventName": "PM_2FLOP_CMPL",
     "BriefDescription": "Double Precision vector version of fmul, fsub, fcmp, fsel, fabs, fnabs, fres, fsqrte, fneg completed."
   },
   {
-    "EventCode": "400F2",
+    "EventCode": "0x400F2",
     "EventName": "PM_1PLUS_PPC_DISP",
     "BriefDescription": "Cycles at least one Instr Dispatched."
   },
   {
-    "EventCode": "400F8",
+    "EventCode": "0x400F8",
     "EventName": "PM_FLUSH",
     "BriefDescription": "Flush (any type)."
   }
index ea122a91ceb0ffe2d3658351c442d9aee66dfcb1..b5d1bd39cfb2243c7f145676f65e30562f7c3486 100644 (file)
@@ -1,21 +1,21 @@
 [
   {
-    "EventCode": "301E8",
+    "EventCode": "0x301E8",
     "EventName": "PM_THRESH_EXC_64",
     "BriefDescription": "Threshold counter exceeded a value of 64."
   },
   {
-    "EventCode": "45050",
+    "EventCode": "0x45050",
     "EventName": "PM_1FLOP_CMPL",
     "BriefDescription": "One floating point instruction completed (fadd, fmul, fsub, fcmp, fsel, fabs, fnabs, fres, fsqrte, fneg)."
   },
   {
-    "EventCode": "45052",
+    "EventCode": "0x45052",
     "EventName": "PM_4FLOP_CMPL",
     "BriefDescription": "Four floating point instructions completed (fadd, fmul, fsub, fcmp, fsel, fabs, fnabs, fres, fsqrte, fneg)."
   },
   {
-    "EventCode": "4D054",
+    "EventCode": "0x4D054",
     "EventName": "PM_8FLOP_CMPL",
     "BriefDescription": "Four Double Precision vector instructions completed."
   }
index 5a714e3dd71acc9256396655424765020411de9f..db3766dca07c5955b4aa9510532d1d1b3699ad9c 100644 (file)
@@ -1,56 +1,56 @@
 [
   {
-    "EventCode": "1F15E",
+    "EventCode": "0x1F15E",
     "EventName": "PM_MRK_START_PROBE_NOP_CMPL",
     "BriefDescription": "Marked Start probe nop (AND R0,R0,R0) completed."
   },
   {
-    "EventCode": "20016",
+    "EventCode": "0x20016",
     "EventName": "PM_ST_FIN",
     "BriefDescription": "Store finish count. Includes speculative activity."
   },
   {
-    "EventCode": "20018",
+    "EventCode": "0x20018",
     "EventName": "PM_ST_FWD",
     "BriefDescription": "Store forwards that finished."
   },
   {
-    "EventCode": "2011C",
+    "EventCode": "0x2011C",
     "EventName": "PM_MRK_NTF_CYC",
     "BriefDescription": "Cycles during which the marked instruction is the oldest in the pipeline (NTF or NTC)."
   },
   {
-    "EventCode": "2E01C",
+    "EventCode": "0x2E01C",
     "EventName": "PM_EXEC_STALL_TLBIE",
     "BriefDescription": "Cycles in which the oldest instruction in the pipeline was a TLBIE instruction executing in the Load Store Unit."
   },
   {
-    "EventCode": "201E6",
+    "EventCode": "0x201E6",
     "EventName": "PM_THRESH_EXC_32",
     "BriefDescription": "Threshold counter exceeded a value of 32."
   },
   {
-    "EventCode": "200F0",
+    "EventCode": "0x200F0",
     "EventName": "PM_ST_CMPL",
     "BriefDescription": "Stores completed from S2Q (2nd-level store queue). This event includes regular stores, stcx and cache inhibited stores. The following operations are excluded (pteupdate, snoop tlbie complete, store atomics, miso, load atomic payloads, tlbie, tlbsync, slbieg, isync, msgsnd, slbiag, cpabort, copy, tcheck, tend, stsync, dcbst, icbi, dcbf, hwsync, lwsync, ptesync, eieio, msgsync)."
   },
   {
-    "EventCode": "200FE",
+    "EventCode": "0x200FE",
     "EventName": "PM_DATA_FROM_L2MISS",
     "BriefDescription": "The processor's data cache was reloaded from a source other than the local core's L1 or L2 due to a demand miss."
   },
   {
-    "EventCode": "30010",
+    "EventCode": "0x30010",
     "EventName": "PM_PMC2_OVERFLOW",
     "BriefDescription": "The event selected for PMC2 caused the event counter to overflow."
   },
   {
-    "EventCode": "4D010",
+    "EventCode": "0x4D010",
     "EventName": "PM_PMC1_SAVED",
     "BriefDescription": "The conditions for the speculative event selected for PMC1 are met and PMC1 is charged."
   },
   {
-    "EventCode": "4D05C",
+    "EventCode": "0x4D05C",
     "EventName": "PM_DPP_FLOP_CMPL",
     "BriefDescription": "Double-Precision or Quad-Precision instructions completed."
   }
index ed4f0bd72e5a3087818e8fa97caa96071a5e95dd..9604446f8360be061a5e5c911c9c29fad6e224c6 100644 (file)
@@ -960,7 +960,7 @@ static int get_maxfds(void)
        struct rlimit rlim;
 
        if (getrlimit(RLIMIT_NOFILE, &rlim) == 0)
-               return min((int)rlim.rlim_max / 2, 512);
+               return min(rlim.rlim_max / 2, (rlim_t)512);
 
        return 512;
 }
@@ -1123,8 +1123,10 @@ static int process_one_file(const char *fpath, const struct stat *sb,
                        mapfile = strdup(fpath);
                        return 0;
                }
-
-               pr_info("%s: Ignoring file %s\n", prog, fpath);
+               if (is_json_file(bname))
+                       pr_debug("%s: ArchStd json is preprocessed %s\n", prog, fpath);
+               else
+                       pr_info("%s: Ignoring file %s\n", prog, fpath);
                return 0;
        }
 
index 7daa8bb70a5a09e85189bfd15da7f59b263f2fda..711d4f9f5645cf33aa721f7c8a263a6b2d691305 100755 (executable)
 from __future__ import print_function
 
 import sys
+# Only change warnings if the python -W option was not used
+if not sys.warnoptions:
+       import warnings
+       # PySide2 causes deprecation warnings, ignore them.
+       warnings.filterwarnings("ignore", category=DeprecationWarning)
 import argparse
 import weakref
 import threading
@@ -125,8 +130,9 @@ if pyside_version_1:
        from PySide.QtGui import *
        from PySide.QtSql import *
 
-from decimal import *
-from ctypes import *
+from decimal import Decimal, ROUND_HALF_UP
+from ctypes import CDLL, Structure, create_string_buffer, addressof, sizeof, \
+                  c_void_p, c_bool, c_byte, c_char, c_int, c_uint, c_longlong, c_ulonglong
 from multiprocessing import Process, Array, Value, Event
 
 # xrange is range in Python3
@@ -3868,7 +3874,7 @@ def CopyTableCellsToClipboard(view, as_csv=False, with_hdr=False):
        if with_hdr:
                model = indexes[0].model()
                for col in range(min_col, max_col + 1):
-                       val = model.headerData(col, Qt.Horizontal)
+                       val = model.headerData(col, Qt.Horizontal, Qt.DisplayRole)
                        if as_csv:
                                text += sep + ToCSValue(val)
                                sep = ","
index 645009c08b3cb63f5dd9018ef40640ac6dcf4322..8c10955eff939603063f5caf02c946e010d19365 100644 (file)
@@ -5,7 +5,7 @@ group_fd=-1
 flags=0|8
 cpu=*
 type=0|1
-size=120
+size=128
 config=0
 sample_period=*
 sample_type=263
@@ -16,7 +16,7 @@ pinned=0
 exclusive=0
 exclude_user=0
 exclude_kernel=0|1
-exclude_hv=0
+exclude_hv=0|1
 exclude_idle=0
 mmap=1
 comm=1
index b0f42c34882e86d03bd016cdb6a905431a1502a5..408164456530697d7a761c10c9e97264adb58600 100644 (file)
@@ -5,7 +5,7 @@ group_fd=-1
 flags=0|8
 cpu=*
 type=0
-size=120
+size=128
 config=0
 sample_period=0
 sample_type=65536
index eba723cc0d380ecc2661a98986cf3c3bb6f8deb6..86a15dd359d93179519603c12cbba37757f64765 100644 (file)
@@ -7,7 +7,7 @@ cpu=*
 pid=-1
 flags=8
 type=1
-size=120
+size=128
 config=9
 sample_period=4000
 sample_type=455
index 76a53126efdf173bdd035017a547a56b0739c081..d4b0ef74defccfea45805a75c293eefec30e18e8 100644 (file)
@@ -131,8 +131,8 @@ static int test__pfm_group(void)
                },
                {
                        .events = "{},{instructions}",
-                       .nr_events = 0,
-                       .nr_groups = 0,
+                       .nr_events = 1,
+                       .nr_groups = 1,
                },
                {
                        .events = "{instructions},{instructions}",
index 8c0d9f368ebcf2cecca5f27d8e4547a43cdb7958..b64bdc1a7026df3e5219097b0f1ef44cadc73cd6 100644 (file)
@@ -145,7 +145,14 @@ perf-$(CONFIG_LIBELF) += symbol-elf.o
 perf-$(CONFIG_LIBELF) += probe-file.o
 perf-$(CONFIG_LIBELF) += probe-event.o
 
+ifdef CONFIG_LIBBPF_DYNAMIC
+  hashmap := 1
+endif
 ifndef CONFIG_LIBBPF
+  hashmap := 1
+endif
+
+ifdef hashmap
 perf-y += hashmap.o
 endif
 
index ddb52f748c8e8613cc293a366d4475d3b2f73bd1..5ed674a2f55e8f45d728227e0eadef7274c00a14 100644 (file)
@@ -451,10 +451,10 @@ static int bperf_reload_leader_program(struct evsel *evsel, int attr_map_fd,
                goto out;
        }
 
-       err = -1;
        link = bpf_program__attach(skel->progs.on_switch);
-       if (!link) {
+       if (IS_ERR(link)) {
                pr_err("Failed to attach leader program\n");
+               err = PTR_ERR(link);
                goto out;
        }
 
@@ -521,9 +521,10 @@ static int bperf__load(struct evsel *evsel, struct target *target)
 
        evsel->bperf_leader_link_fd = bpf_link_get_fd_by_id(entry.link_id);
        if (evsel->bperf_leader_link_fd < 0 &&
-           bperf_reload_leader_program(evsel, attr_map_fd, &entry))
+           bperf_reload_leader_program(evsel, attr_map_fd, &entry)) {
+               err = -1;
                goto out;
-
+       }
        /*
         * The bpf_link holds reference to the leader program, and the
         * leader program holds reference to the maps. Therefore, if
@@ -550,6 +551,7 @@ static int bperf__load(struct evsel *evsel, struct target *target)
        /* Step 2: load the follower skeleton */
        evsel->follower_skel = bperf_follower_bpf__open();
        if (!evsel->follower_skel) {
+               err = -1;
                pr_err("Failed to open follower skeleton\n");
                goto out;
        }
index b2f4920e19a6d4b329f45e3c68144b77901221f1..7d2ba8419b0c467549fd7ab4ffc442cbc4fb2897 100644 (file)
@@ -975,9 +975,13 @@ static int __die_find_variable_cb(Dwarf_Die *die_mem, void *data)
        if ((tag == DW_TAG_formal_parameter ||
             tag == DW_TAG_variable) &&
            die_compare_name(die_mem, fvp->name) &&
-       /* Does the DIE have location information or external instance? */
+       /*
+        * Does the DIE have location information or const value
+        * or external instance?
+        */
            (dwarf_attr(die_mem, DW_AT_external, &attr) ||
-            dwarf_attr(die_mem, DW_AT_location, &attr)))
+            dwarf_attr(die_mem, DW_AT_location, &attr) ||
+            dwarf_attr(die_mem, DW_AT_const_value, &attr)))
                return DIE_FIND_CB_END;
        if (dwarf_haspc(die_mem, fvp->addr))
                return DIE_FIND_CB_CONTINUE;
index 9130f6fad8d54d63a080c95bd5bb97c925e203a0..bc5e4f294e9e9da528a092bc49ee0bbc8f757c13 100644 (file)
@@ -144,6 +144,7 @@ static void perf_env__purge_bpf(struct perf_env *env)
                node = rb_entry(next, struct bpf_prog_info_node, rb_node);
                next = rb_next(&node->rb_node);
                rb_erase(&node->rb_node, root);
+               free(node->info_linear);
                free(node);
        }
 
index 8a62fb39e365cac4a6c5dafb6fb152d8b71668e1..19ad64f2bd8302a9e33a2554703a3853ffae78d5 100644 (file)
@@ -100,7 +100,7 @@ enum {
        PERF_IP_FLAG_VMEXIT             = 1ULL << 12,
 };
 
-#define PERF_IP_FLAG_CHARS "bcrosyiABEx"
+#define PERF_IP_FLAG_CHARS "bcrosyiABExgh"
 
 #define PERF_BRANCH_MASK               (\
        PERF_IP_FLAG_BRANCH             |\
index 6e5c41528c7d0c7f91c4f9196485737c6b104f0d..6ea3e677dc1e7328dbda9d653c909241ae9baeb2 100644 (file)
@@ -425,9 +425,6 @@ static void __evlist__disable(struct evlist *evlist, char *evsel_name)
        if (affinity__setup(&affinity) < 0)
                return;
 
-       evlist__for_each_entry(evlist, pos)
-               bpf_counter__disable(pos);
-
        /* Disable 'immediate' events last */
        for (imm = 0; imm <= 1; imm++) {
                evlist__for_each_cpu(evlist, i, cpu) {
index 4a3cd1b5bb33ecce8a96e4bd3bf6ddb0c143c340..a8d8463f8ee5d7c8e8ebf0d375d8f00dfc80d1f4 100644 (file)
@@ -428,6 +428,7 @@ struct evsel *evsel__clone(struct evsel *orig)
        evsel->auto_merge_stats = orig->auto_merge_stats;
        evsel->collect_stat = orig->collect_stat;
        evsel->weak_group = orig->weak_group;
+       evsel->use_config_name = orig->use_config_name;
 
        if (evsel__copy_config_terms(evsel, orig) < 0)
                goto out_err;
index 75cf5dbfe2080a0850526a802451fdbd51d4b7ba..bdad52a0643815e81bea2f416ca2baf570d430a5 100644 (file)
@@ -83,8 +83,10 @@ struct evsel {
                bool                    collect_stat;
                bool                    weak_group;
                bool                    bpf_counter;
+               bool                    use_config_name;
                int                     bpf_fd;
                struct bpf_object       *bpf_obj;
+               struct list_head        config_terms;
        };
 
        /*
@@ -116,10 +118,8 @@ struct evsel {
        bool                    merged_stat;
        bool                    reset_group;
        bool                    errored;
-       bool                    use_config_name;
        struct hashmap          *per_pkg_mask;
        struct evsel            *leader;
-       struct list_head        config_terms;
        int                     err;
        int                     cpu_iter;
        struct {
index 8c59677bee130f5643f61382ebadb33a9421e7fc..20ad663978cc463074e56c1afcdce6c16c52de01 100644 (file)
@@ -1146,6 +1146,8 @@ static bool intel_pt_fup_event(struct intel_pt_decoder *decoder)
                decoder->set_fup_tx_flags = false;
                decoder->tx_flags = decoder->fup_tx_flags;
                decoder->state.type = INTEL_PT_TRANSACTION;
+               if (decoder->fup_tx_flags & INTEL_PT_ABORT_TX)
+                       decoder->state.type |= INTEL_PT_BRANCH;
                decoder->state.from_ip = decoder->ip;
                decoder->state.to_ip = 0;
                decoder->state.flags = decoder->fup_tx_flags;
@@ -1220,8 +1222,10 @@ static int intel_pt_walk_fup(struct intel_pt_decoder *decoder)
                        return 0;
                if (err == -EAGAIN ||
                    intel_pt_fup_with_nlip(decoder, &intel_pt_insn, ip, err)) {
+                       bool no_tip = decoder->pkt_state != INTEL_PT_STATE_FUP;
+
                        decoder->pkt_state = INTEL_PT_STATE_IN_SYNC;
-                       if (intel_pt_fup_event(decoder))
+                       if (intel_pt_fup_event(decoder) && no_tip)
                                return 0;
                        return -EAGAIN;
                }
index 8658d42ce57a09d97c42a348b2d94bb229524120..0dfec8761b9ac5b83d460ad888226ee8087660bb 100644 (file)
@@ -707,8 +707,10 @@ static int intel_pt_walk_next_insn(struct intel_pt_insn *intel_pt_insn,
 
                        *ip += intel_pt_insn->length;
 
-                       if (to_ip && *ip == to_ip)
+                       if (to_ip && *ip == to_ip) {
+                               intel_pt_insn->length = 0;
                                goto out_no_cache;
+                       }
 
                        if (*ip >= al.map->end)
                                break;
@@ -1198,6 +1200,7 @@ static void intel_pt_set_pid_tid_cpu(struct intel_pt *pt,
 
 static void intel_pt_sample_flags(struct intel_pt_queue *ptq)
 {
+       ptq->insn_len = 0;
        if (ptq->state->flags & INTEL_PT_ABORT_TX) {
                ptq->flags = PERF_IP_FLAG_BRANCH | PERF_IP_FLAG_TX_ABORT;
        } else if (ptq->state->flags & INTEL_PT_ASYNC) {
@@ -1211,7 +1214,6 @@ static void intel_pt_sample_flags(struct intel_pt_queue *ptq)
                        ptq->flags = PERF_IP_FLAG_BRANCH | PERF_IP_FLAG_CALL |
                                     PERF_IP_FLAG_ASYNC |
                                     PERF_IP_FLAG_INTERRUPT;
-               ptq->insn_len = 0;
        } else {
                if (ptq->state->from_ip)
                        ptq->flags = intel_pt_insn_type(ptq->state->insn_op);
index 4dad14265b81dfb85f48122a924bbeb74ee2c77f..84108c17f48d4a81f14d53b7c2df9f0aa75f563a 100644 (file)
@@ -150,6 +150,10 @@ struct event_symbol event_symbols_sw[PERF_COUNT_SW_MAX] = {
                .symbol = "bpf-output",
                .alias  = "",
        },
+       [PERF_COUNT_SW_CGROUP_SWITCHES] = {
+               .symbol = "cgroup-switches",
+               .alias  = "",
+       },
 };
 
 #define __PERF_EVENT_FIELD(config, name) \
@@ -2928,9 +2932,14 @@ restart:
        }
 
        for (i = 0; i < max; i++, syms++) {
+               /*
+                * New attr.config still not supported here, the latest
+                * example was PERF_COUNT_SW_CGROUP_SWITCHES
+                */
+               if (syms->symbol == NULL)
+                       continue;
 
-               if (event_glob != NULL && syms->symbol != NULL &&
-                   !(strglobmatch(syms->symbol, event_glob) ||
+               if (event_glob != NULL && !(strglobmatch(syms->symbol, event_glob) ||
                      (syms->alias && strglobmatch(syms->alias, event_glob))))
                        continue;
 
index fb8646cc3e834e288fd4a0ca163b69f8244f85ba..923849024b15fad18181de0526203a2fa46e5437 100644 (file)
@@ -347,6 +347,7 @@ emulation-faults                            { return sym(yyscanner, PERF_TYPE_SOFTWARE, PERF_COUNT_SW_EM
 dummy                                          { return sym(yyscanner, PERF_TYPE_SOFTWARE, PERF_COUNT_SW_DUMMY); }
 duration_time                                  { return tool(yyscanner, PERF_TOOL_DURATION_TIME); }
 bpf-output                                     { return sym(yyscanner, PERF_TYPE_SOFTWARE, PERF_COUNT_SW_BPF_OUTPUT); }
+cgroup-switches                                        { return sym(yyscanner, PERF_TYPE_SOFTWARE, PERF_COUNT_SW_CGROUP_SWITCHES); }
 
        /*
         * We have to handle the kernel PMU event cycles-ct/cycles-t/mem-loads/mem-stores separately.
index 829af17a0867b2c766620db692e1a97393a61a34..020411682a3cb433cd2fb6c96eaa27a1e2484e23 100644 (file)
@@ -103,6 +103,11 @@ static void perf_probe_build_id(struct evsel *evsel)
        evsel->core.attr.build_id = 1;
 }
 
+static void perf_probe_cgroup(struct evsel *evsel)
+{
+       evsel->core.attr.cgroup = 1;
+}
+
 bool perf_can_sample_identifier(void)
 {
        return perf_probe_api(perf_probe_sample_identifier);
@@ -182,3 +187,8 @@ bool perf_can_record_build_id(void)
 {
        return perf_probe_api(perf_probe_build_id);
 }
+
+bool perf_can_record_cgroup(void)
+{
+       return perf_probe_api(perf_probe_cgroup);
+}
index f12ca55f509a8386986a04dc74f6a0abbf6ddb89..b104168efb154d5c8fb24aacea66b951867f7a9a 100644 (file)
@@ -12,5 +12,6 @@ bool perf_can_record_switch_events(void);
 bool perf_can_record_text_poke_events(void);
 bool perf_can_sample_identifier(void);
 bool perf_can_record_build_id(void);
+bool perf_can_record_cgroup(void);
 
 #endif // __PERF_API_PROBE_H
index d735acb6c29cb0baa4ae08ffb73454254e6dc603..6eef6dfeaa574a74b76617e6df29153fbe7cdadb 100644 (file)
@@ -62,8 +62,16 @@ int parse_libpfm_events_option(const struct option *opt, const char *str,
                }
 
                /* no event */
-               if (*q == '\0')
+               if (*q == '\0') {
+                       if (*sep == '}') {
+                               if (grp_evt < 0) {
+                                       ui__error("cannot close a non-existing event group\n");
+                                       goto error;
+                               }
+                               grp_evt--;
+                       }
                        continue;
+               }
 
                memset(&attr, 0, sizeof(attr));
                event_attr_init(&attr);
@@ -107,6 +115,7 @@ int parse_libpfm_events_option(const struct option *opt, const char *str,
                        grp_evt = -1;
                }
        }
+       free(p_orig);
        return 0;
 error:
        free(p_orig);
index 866f2d514d7299b0416dd4e99b54472e787b0e50..b029c29ce227a89f311be351d40308ceb33a845d 100644 (file)
@@ -190,6 +190,9 @@ static int convert_variable_location(Dwarf_Die *vr_die, Dwarf_Addr addr,
            immediate_value_is_supported()) {
                Dwarf_Sword snum;
 
+               if (!tvar)
+                       return 0;
+
                dwarf_formsdata(&attr, &snum);
                ret = asprintf(&tvar->value, "\\%ld", (long)snum);
 
index f99852d54b147f1ed746d1c2dd32843e804ba454..43e5b563dee890be53aa82380811931e70607d2c 100644 (file)
@@ -157,9 +157,15 @@ static int get_max_rate(unsigned int *rate)
 static int record_opts__config_freq(struct record_opts *opts)
 {
        bool user_freq = opts->user_freq != UINT_MAX;
+       bool user_interval = opts->user_interval != ULLONG_MAX;
        unsigned int max_rate;
 
-       if (opts->user_interval != ULLONG_MAX)
+       if (user_interval && user_freq) {
+               pr_err("cannot set frequency and period at the same time\n");
+               return -1;
+       }
+
+       if (user_interval)
                opts->default_interval = opts->user_interval;
        if (user_freq)
                opts->freq = opts->user_freq;
index a12cf4f0e97a72f5a39ee4f43606f5f96718c7fc..e59242c361ce5ea8c21d630f40dada1ab0e390ef 100644 (file)
@@ -904,7 +904,7 @@ static void perf_event__cpu_map_swap(union perf_event *event,
        struct perf_record_record_cpu_map *mask;
        unsigned i;
 
-       data->type = bswap_64(data->type);
+       data->type = bswap_16(data->type);
 
        switch (data->type) {
        case PERF_CPU_MAP__CPUS:
@@ -937,7 +937,7 @@ static void perf_event__stat_config_swap(union perf_event *event,
 {
        u64 size;
 
-       size  = event->stat_config.nr * sizeof(event->stat_config.data[0]);
+       size  = bswap_64(event->stat_config.nr) * sizeof(event->stat_config.data[0]);
        size += 1; /* nr item itself */
        mem_bswap_64(&event->stat_config.nr, size);
 }
@@ -1723,6 +1723,7 @@ int perf_session__peek_event(struct perf_session *session, off_t file_offset,
        if (event->header.size < hdr_sz || event->header.size > buf_sz)
                return -1;
 
+       buf += hdr_sz;
        rest = event->header.size - hdr_sz;
 
        if (readn(fd, buf, rest) != (ssize_t)rest)
index a76fff5e7d8303360dc5811bb38921b65d415e7b..ca326f98c7a2a23aa4d3d48fa4df16fa2e0a966c 100644 (file)
@@ -541,7 +541,7 @@ static void uniquify_event_name(struct evsel *counter)
        char *config;
        int ret = 0;
 
-       if (counter->uniquified_name ||
+       if (counter->uniquified_name || counter->use_config_name ||
            !counter->pmu_name || !strncmp(counter->name, counter->pmu_name,
                                           strlen(counter->pmu_name)))
                return;
@@ -555,10 +555,8 @@ static void uniquify_event_name(struct evsel *counter)
                }
        } else {
                if (perf_pmu__has_hybrid()) {
-                       if (!counter->use_config_name) {
-                               ret = asprintf(&new_name, "%s/%s/",
-                                              counter->pmu_name, counter->name);
-                       }
+                       ret = asprintf(&new_name, "%s/%s/",
+                                      counter->pmu_name, counter->name);
                } else {
                        ret = asprintf(&new_name, "%s [%s]",
                                       counter->name, counter->pmu_name);
index 4c56aa8374344909aa3017fb60b77257b9d7f0d7..a73345730ba90daef8cc923690c74467e0c7fea5 100644 (file)
@@ -2412,6 +2412,7 @@ int cleanup_sdt_note_list(struct list_head *sdt_notes)
 
        list_for_each_entry_safe(pos, tmp, sdt_notes, note_list) {
                list_del_init(&pos->note_list);
+               zfree(&pos->args);
                zfree(&pos->name);
                zfree(&pos->provider);
                free(pos);
index f9271f3ea91299d32fdf7a516093b99e695743cd..071312f5eb9280e8a7d37d6e4931a2aaac123e1a 100644 (file)
@@ -131,29 +131,29 @@ QUIET_SUBDIR1  =
 
 ifneq ($(silent),1)
   ifneq ($(V),1)
-       QUIET_CC       = @echo '  CC       '$@;
-       QUIET_CC_FPIC  = @echo '  CC FPIC  '$@;
-       QUIET_CLANG    = @echo '  CLANG    '$@;
-       QUIET_AR       = @echo '  AR       '$@;
-       QUIET_LINK     = @echo '  LINK     '$@;
-       QUIET_MKDIR    = @echo '  MKDIR    '$@;
-       QUIET_GEN      = @echo '  GEN      '$@;
+       QUIET_CC       = @echo '  CC      '$@;
+       QUIET_CC_FPIC  = @echo '  CC FPIC '$@;
+       QUIET_CLANG    = @echo '  CLANG   '$@;
+       QUIET_AR       = @echo '  AR      '$@;
+       QUIET_LINK     = @echo '  LINK    '$@;
+       QUIET_MKDIR    = @echo '  MKDIR   '$@;
+       QUIET_GEN      = @echo '  GEN     '$@;
        QUIET_SUBDIR0  = +@subdir=
        QUIET_SUBDIR1  = ;$(NO_SUBDIR) \
-                         echo '  SUBDIR   '$$subdir; \
+                         echo '  SUBDIR  '$$subdir; \
                         $(MAKE) $(PRINT_DIR) -C $$subdir
-       QUIET_FLEX     = @echo '  FLEX     '$@;
-       QUIET_BISON    = @echo '  BISON    '$@;
-       QUIET_GENSKEL  = @echo '  GEN-SKEL '$@;
+       QUIET_FLEX     = @echo '  FLEX    '$@;
+       QUIET_BISON    = @echo '  BISON   '$@;
+       QUIET_GENSKEL  = @echo '  GENSKEL '$@;
 
        descend = \
-               +@echo         '  DESCEND  '$(1); \
+               +@echo         '  DESCEND '$(1); \
                mkdir -p $(OUTPUT)$(1) && \
                $(MAKE) $(COMMAND_O) subdir=$(if $(subdir),$(subdir)/$(1),$(1)) $(PRINT_DIR) -C $(1) $(2)
 
-       QUIET_CLEAN    = @printf '  CLEAN    %s\n' $1;
-       QUIET_INSTALL  = @printf '  INSTALL  %s\n' $1;
-       QUIET_UNINST   = @printf '  UNINST   %s\n' $1;
+       QUIET_CLEAN    = @printf '  CLEAN   %s\n' $1;
+       QUIET_INSTALL  = @printf '  INSTALL %s\n' $1;
+       QUIET_UNINST   = @printf '  UNINST  %s\n' $1;
   endif
 endif
 
index c62d372d426fb3f58f049d9ff94684790107570c..ed563bdd88f39a1f9d6b5bfc6aadf4d6f50788d1 100644 (file)
@@ -62,7 +62,7 @@ struct nfit_test_resource *get_nfit_res(resource_size_t resource)
 }
 EXPORT_SYMBOL(get_nfit_res);
 
-void __iomem *__nfit_test_ioremap(resource_size_t offset, unsigned long size,
+static void __iomem *__nfit_test_ioremap(resource_size_t offset, unsigned long size,
                void __iomem *(*fallback_fn)(resource_size_t, unsigned long))
 {
        struct nfit_test_resource *nfit_res = get_nfit_res(offset);
index 9b185bf82da8794c420d3f839b20c5b63f6112f5..54f367cbadaee9093270da036fc63a696ee68414 100644 (file)
@@ -1871,9 +1871,16 @@ static void smart_init(struct nfit_test *t)
        }
 }
 
+static size_t sizeof_spa(struct acpi_nfit_system_address *spa)
+{
+       /* until spa location cookie support is added... */
+       return sizeof(*spa) - 8;
+}
+
 static int nfit_test0_alloc(struct nfit_test *t)
 {
-       size_t nfit_size = sizeof(struct acpi_nfit_system_address) * NUM_SPA
+       struct acpi_nfit_system_address *spa = NULL;
+       size_t nfit_size = sizeof_spa(spa) * NUM_SPA
                        + sizeof(struct acpi_nfit_memory_map) * NUM_MEM
                        + sizeof(struct acpi_nfit_control_region) * NUM_DCR
                        + offsetof(struct acpi_nfit_control_region,
@@ -1937,7 +1944,8 @@ static int nfit_test0_alloc(struct nfit_test *t)
 
 static int nfit_test1_alloc(struct nfit_test *t)
 {
-       size_t nfit_size = sizeof(struct acpi_nfit_system_address) * 2
+       struct acpi_nfit_system_address *spa = NULL;
+       size_t nfit_size = sizeof_spa(spa) * 2
                + sizeof(struct acpi_nfit_memory_map) * 2
                + offsetof(struct acpi_nfit_control_region, window_size) * 2;
        int i;
@@ -2000,7 +2008,7 @@ static void nfit_test0_setup(struct nfit_test *t)
         */
        spa = nfit_buf;
        spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS;
-       spa->header.length = sizeof(*spa);
+       spa->header.length = sizeof_spa(spa);
        memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_PM), 16);
        spa->range_index = 0+1;
        spa->address = t->spa_set_dma[0];
@@ -2014,7 +2022,7 @@ static void nfit_test0_setup(struct nfit_test *t)
         */
        spa = nfit_buf + offset;
        spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS;
-       spa->header.length = sizeof(*spa);
+       spa->header.length = sizeof_spa(spa);
        memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_PM), 16);
        spa->range_index = 1+1;
        spa->address = t->spa_set_dma[1];
@@ -2024,7 +2032,7 @@ static void nfit_test0_setup(struct nfit_test *t)
        /* spa2 (dcr0) dimm0 */
        spa = nfit_buf + offset;
        spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS;
-       spa->header.length = sizeof(*spa);
+       spa->header.length = sizeof_spa(spa);
        memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_DCR), 16);
        spa->range_index = 2+1;
        spa->address = t->dcr_dma[0];
@@ -2034,7 +2042,7 @@ static void nfit_test0_setup(struct nfit_test *t)
        /* spa3 (dcr1) dimm1 */
        spa = nfit_buf + offset;
        spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS;
-       spa->header.length = sizeof(*spa);
+       spa->header.length = sizeof_spa(spa);
        memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_DCR), 16);
        spa->range_index = 3+1;
        spa->address = t->dcr_dma[1];
@@ -2044,7 +2052,7 @@ static void nfit_test0_setup(struct nfit_test *t)
        /* spa4 (dcr2) dimm2 */
        spa = nfit_buf + offset;
        spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS;
-       spa->header.length = sizeof(*spa);
+       spa->header.length = sizeof_spa(spa);
        memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_DCR), 16);
        spa->range_index = 4+1;
        spa->address = t->dcr_dma[2];
@@ -2054,7 +2062,7 @@ static void nfit_test0_setup(struct nfit_test *t)
        /* spa5 (dcr3) dimm3 */
        spa = nfit_buf + offset;
        spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS;
-       spa->header.length = sizeof(*spa);
+       spa->header.length = sizeof_spa(spa);
        memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_DCR), 16);
        spa->range_index = 5+1;
        spa->address = t->dcr_dma[3];
@@ -2064,7 +2072,7 @@ static void nfit_test0_setup(struct nfit_test *t)
        /* spa6 (bdw for dcr0) dimm0 */
        spa = nfit_buf + offset;
        spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS;
-       spa->header.length = sizeof(*spa);
+       spa->header.length = sizeof_spa(spa);
        memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_BDW), 16);
        spa->range_index = 6+1;
        spa->address = t->dimm_dma[0];
@@ -2074,7 +2082,7 @@ static void nfit_test0_setup(struct nfit_test *t)
        /* spa7 (bdw for dcr1) dimm1 */
        spa = nfit_buf + offset;
        spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS;
-       spa->header.length = sizeof(*spa);
+       spa->header.length = sizeof_spa(spa);
        memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_BDW), 16);
        spa->range_index = 7+1;
        spa->address = t->dimm_dma[1];
@@ -2084,7 +2092,7 @@ static void nfit_test0_setup(struct nfit_test *t)
        /* spa8 (bdw for dcr2) dimm2 */
        spa = nfit_buf + offset;
        spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS;
-       spa->header.length = sizeof(*spa);
+       spa->header.length = sizeof_spa(spa);
        memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_BDW), 16);
        spa->range_index = 8+1;
        spa->address = t->dimm_dma[2];
@@ -2094,7 +2102,7 @@ static void nfit_test0_setup(struct nfit_test *t)
        /* spa9 (bdw for dcr3) dimm3 */
        spa = nfit_buf + offset;
        spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS;
-       spa->header.length = sizeof(*spa);
+       spa->header.length = sizeof_spa(spa);
        memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_BDW), 16);
        spa->range_index = 9+1;
        spa->address = t->dimm_dma[3];
@@ -2581,7 +2589,7 @@ static void nfit_test0_setup(struct nfit_test *t)
                /* spa10 (dcr4) dimm4 */
                spa = nfit_buf + offset;
                spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS;
-               spa->header.length = sizeof(*spa);
+               spa->header.length = sizeof_spa(spa);
                memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_DCR), 16);
                spa->range_index = 10+1;
                spa->address = t->dcr_dma[4];
@@ -2595,7 +2603,7 @@ static void nfit_test0_setup(struct nfit_test *t)
                 */
                spa = nfit_buf + offset;
                spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS;
-               spa->header.length = sizeof(*spa);
+               spa->header.length = sizeof_spa(spa);
                memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_PM), 16);
                spa->range_index = 11+1;
                spa->address = t->spa_set_dma[2];
@@ -2605,7 +2613,7 @@ static void nfit_test0_setup(struct nfit_test *t)
                /* spa12 (bdw for dcr4) dimm4 */
                spa = nfit_buf + offset;
                spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS;
-               spa->header.length = sizeof(*spa);
+               spa->header.length = sizeof_spa(spa);
                memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_BDW), 16);
                spa->range_index = 12+1;
                spa->address = t->dimm_dma[4];
@@ -2739,7 +2747,7 @@ static void nfit_test1_setup(struct nfit_test *t)
        /* spa0 (flat range with no bdw aliasing) */
        spa = nfit_buf + offset;
        spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS;
-       spa->header.length = sizeof(*spa);
+       spa->header.length = sizeof_spa(spa);
        memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_PM), 16);
        spa->range_index = 0+1;
        spa->address = t->spa_set_dma[0];
@@ -2749,7 +2757,7 @@ static void nfit_test1_setup(struct nfit_test *t)
        /* virtual cd region */
        spa = nfit_buf + offset;
        spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS;
-       spa->header.length = sizeof(*spa);
+       spa->header.length = sizeof_spa(spa);
        memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_VCD), 16);
        spa->range_index = 0;
        spa->address = t->spa_set_dma[1];
index 656b04976ccc6389e03dd46f6b344c1217c41137..67b77ab83c20ed2098578c0ab14877b63259035a 100644 (file)
@@ -6,6 +6,7 @@
 
 #include "system.h"
 
+#include <stddef.h>
 #include <linux/errno.h>
 #include <linux/auxvec.h>
 #include <linux/signal.h>
index 12ee40284da021686fc6501019d73cf649dfad87..2060bc122c53095b2fad4dd89f9ece2255f5c37f 100644 (file)
@@ -40,7 +40,7 @@ struct ipv6_packet pkt_v6 = {
        .tcp.doff = 5,
 };
 
-static int settimeo(int fd, int timeout_ms)
+int settimeo(int fd, int timeout_ms)
 {
        struct timeval timeout = { .tv_sec = 3 };
 
index 7205f8afdba1187fb1a2de3f0f767e1d5df7ce16..5e0d51c07b632ebf3cbf20c7d29bd576a63b2dbb 100644 (file)
@@ -33,6 +33,7 @@ struct ipv6_packet {
 } __packed;
 extern struct ipv6_packet pkt_v6;
 
+int settimeo(int fd, int timeout_ms);
 int start_server(int family, int type, const char *addr, __u16 port,
                 int timeout_ms);
 int connect_to_fd(int server_fd, int timeout_ms);
index de78617f65501f8b4cf7c9ca6c613dfb1f1cdd36..f9a8ae331963d6db8d5bc1e9afc391fe9f8e31fc 100644 (file)
@@ -86,8 +86,9 @@ void test_ringbuf(void)
        const size_t rec_sz = BPF_RINGBUF_HDR_SZ + sizeof(struct sample);
        pthread_t thread;
        long bg_ret = -1;
-       int err, cnt;
+       int err, cnt, rb_fd;
        int page_size = getpagesize();
+       void *mmap_ptr, *tmp_ptr;
 
        skel = test_ringbuf__open();
        if (CHECK(!skel, "skel_open", "skeleton open failed\n"))
@@ -101,6 +102,52 @@ void test_ringbuf(void)
        if (CHECK(err != 0, "skel_load", "skeleton load failed\n"))
                goto cleanup;
 
+       rb_fd = bpf_map__fd(skel->maps.ringbuf);
+       /* good read/write cons_pos */
+       mmap_ptr = mmap(NULL, page_size, PROT_READ | PROT_WRITE, MAP_SHARED, rb_fd, 0);
+       ASSERT_OK_PTR(mmap_ptr, "rw_cons_pos");
+       tmp_ptr = mremap(mmap_ptr, page_size, 2 * page_size, MREMAP_MAYMOVE);
+       if (!ASSERT_ERR_PTR(tmp_ptr, "rw_extend"))
+               goto cleanup;
+       ASSERT_ERR(mprotect(mmap_ptr, page_size, PROT_EXEC), "exec_cons_pos_protect");
+       ASSERT_OK(munmap(mmap_ptr, page_size), "unmap_rw");
+
+       /* bad writeable prod_pos */
+       mmap_ptr = mmap(NULL, page_size, PROT_WRITE, MAP_SHARED, rb_fd, page_size);
+       err = -errno;
+       ASSERT_ERR_PTR(mmap_ptr, "wr_prod_pos");
+       ASSERT_EQ(err, -EPERM, "wr_prod_pos_err");
+
+       /* bad writeable data pages */
+       mmap_ptr = mmap(NULL, page_size, PROT_WRITE, MAP_SHARED, rb_fd, 2 * page_size);
+       err = -errno;
+       ASSERT_ERR_PTR(mmap_ptr, "wr_data_page_one");
+       ASSERT_EQ(err, -EPERM, "wr_data_page_one_err");
+       mmap_ptr = mmap(NULL, page_size, PROT_WRITE, MAP_SHARED, rb_fd, 3 * page_size);
+       ASSERT_ERR_PTR(mmap_ptr, "wr_data_page_two");
+       mmap_ptr = mmap(NULL, 2 * page_size, PROT_WRITE, MAP_SHARED, rb_fd, 2 * page_size);
+       ASSERT_ERR_PTR(mmap_ptr, "wr_data_page_all");
+
+       /* good read-only pages */
+       mmap_ptr = mmap(NULL, 4 * page_size, PROT_READ, MAP_SHARED, rb_fd, 0);
+       if (!ASSERT_OK_PTR(mmap_ptr, "ro_prod_pos"))
+               goto cleanup;
+
+       ASSERT_ERR(mprotect(mmap_ptr, 4 * page_size, PROT_WRITE), "write_protect");
+       ASSERT_ERR(mprotect(mmap_ptr, 4 * page_size, PROT_EXEC), "exec_protect");
+       ASSERT_ERR_PTR(mremap(mmap_ptr, 0, 4 * page_size, MREMAP_MAYMOVE), "ro_remap");
+       ASSERT_OK(munmap(mmap_ptr, 4 * page_size), "unmap_ro");
+
+       /* good read-only pages with initial offset */
+       mmap_ptr = mmap(NULL, page_size, PROT_READ, MAP_SHARED, rb_fd, page_size);
+       if (!ASSERT_OK_PTR(mmap_ptr, "ro_prod_pos"))
+               goto cleanup;
+
+       ASSERT_ERR(mprotect(mmap_ptr, page_size, PROT_WRITE), "write_protect");
+       ASSERT_ERR(mprotect(mmap_ptr, page_size, PROT_EXEC), "exec_protect");
+       ASSERT_ERR_PTR(mremap(mmap_ptr, 0, 3 * page_size, MREMAP_MAYMOVE), "ro_remap");
+       ASSERT_OK(munmap(mmap_ptr, page_size), "unmap_ro");
+
        /* only trigger BPF program for current process */
        skel->bss->pid = getpid();
 
diff --git a/tools/testing/selftests/bpf/prog_tests/tc_redirect.c b/tools/testing/selftests/bpf/prog_tests/tc_redirect.c
new file mode 100644 (file)
index 0000000..5703c91
--- /dev/null
@@ -0,0 +1,785 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+
+/*
+ * This test sets up 3 netns (src <-> fwd <-> dst). There is no direct veth link
+ * between src and dst. The netns fwd has veth links to each src and dst. The
+ * client is in src and server in dst. The test installs a TC BPF program to each
+ * host facing veth in fwd which calls into i) bpf_redirect_neigh() to perform the
+ * neigh addr population and redirect or ii) bpf_redirect_peer() for namespace
+ * switch from ingress side; it also installs a checker prog on the egress side
+ * to drop unexpected traffic.
+ */
+
+#define _GNU_SOURCE
+
+#include <arpa/inet.h>
+#include <linux/limits.h>
+#include <linux/sysctl.h>
+#include <linux/if_tun.h>
+#include <linux/if.h>
+#include <sched.h>
+#include <stdbool.h>
+#include <stdio.h>
+#include <sys/stat.h>
+#include <sys/mount.h>
+
+#include "test_progs.h"
+#include "network_helpers.h"
+#include "test_tc_neigh_fib.skel.h"
+#include "test_tc_neigh.skel.h"
+#include "test_tc_peer.skel.h"
+
+#define NS_SRC "ns_src"
+#define NS_FWD "ns_fwd"
+#define NS_DST "ns_dst"
+
+#define IP4_SRC "172.16.1.100"
+#define IP4_DST "172.16.2.100"
+#define IP4_TUN_SRC "172.17.1.100"
+#define IP4_TUN_FWD "172.17.1.200"
+#define IP4_PORT 9004
+
+#define IP6_SRC "0::1:dead:beef:cafe"
+#define IP6_DST "0::2:dead:beef:cafe"
+#define IP6_TUN_SRC "1::1:dead:beef:cafe"
+#define IP6_TUN_FWD "1::2:dead:beef:cafe"
+#define IP6_PORT 9006
+
+#define IP4_SLL "169.254.0.1"
+#define IP4_DLL "169.254.0.2"
+#define IP4_NET "169.254.0.0"
+
+#define MAC_DST_FWD "00:11:22:33:44:55"
+#define MAC_DST "00:22:33:44:55:66"
+
+#define IFADDR_STR_LEN 18
+#define PING_ARGS "-i 0.2 -c 3 -w 10 -q"
+
+#define SRC_PROG_PIN_FILE "/sys/fs/bpf/test_tc_src"
+#define DST_PROG_PIN_FILE "/sys/fs/bpf/test_tc_dst"
+#define CHK_PROG_PIN_FILE "/sys/fs/bpf/test_tc_chk"
+
+#define TIMEOUT_MILLIS 10000
+
+#define log_err(MSG, ...) \
+       fprintf(stderr, "(%s:%d: errno: %s) " MSG "\n", \
+               __FILE__, __LINE__, strerror(errno), ##__VA_ARGS__)
+
+static const char * const namespaces[] = {NS_SRC, NS_FWD, NS_DST, NULL};
+
+static int write_file(const char *path, const char *newval)
+{
+       FILE *f;
+
+       f = fopen(path, "r+");
+       if (!f)
+               return -1;
+       if (fwrite(newval, strlen(newval), 1, f) != 1) {
+               log_err("writing to %s failed", path);
+               fclose(f);
+               return -1;
+       }
+       fclose(f);
+       return 0;
+}
+
+struct nstoken {
+       int orig_netns_fd;
+};
+
+static int setns_by_fd(int nsfd)
+{
+       int err;
+
+       err = setns(nsfd, CLONE_NEWNET);
+       close(nsfd);
+
+       if (!ASSERT_OK(err, "setns"))
+               return err;
+
+       /* Switch /sys to the new namespace so that e.g. /sys/class/net
+        * reflects the devices in the new namespace.
+        */
+       err = unshare(CLONE_NEWNS);
+       if (!ASSERT_OK(err, "unshare"))
+               return err;
+
+       err = umount2("/sys", MNT_DETACH);
+       if (!ASSERT_OK(err, "umount2 /sys"))
+               return err;
+
+       err = mount("sysfs", "/sys", "sysfs", 0, NULL);
+       if (!ASSERT_OK(err, "mount /sys"))
+               return err;
+
+       err = mount("bpffs", "/sys/fs/bpf", "bpf", 0, NULL);
+       if (!ASSERT_OK(err, "mount /sys/fs/bpf"))
+               return err;
+
+       return 0;
+}
+
+/**
+ * open_netns() - Switch to specified network namespace by name.
+ *
+ * Returns token with which to restore the original namespace
+ * using close_netns().
+ */
+static struct nstoken *open_netns(const char *name)
+{
+       int nsfd;
+       char nspath[PATH_MAX];
+       int err;
+       struct nstoken *token;
+
+       token = malloc(sizeof(struct nstoken));
+       if (!ASSERT_OK_PTR(token, "malloc token"))
+               return NULL;
+
+       token->orig_netns_fd = open("/proc/self/ns/net", O_RDONLY);
+       if (!ASSERT_GE(token->orig_netns_fd, 0, "open /proc/self/ns/net"))
+               goto fail;
+
+       snprintf(nspath, sizeof(nspath), "%s/%s", "/var/run/netns", name);
+       nsfd = open(nspath, O_RDONLY | O_CLOEXEC);
+       if (!ASSERT_GE(nsfd, 0, "open netns fd"))
+               goto fail;
+
+       err = setns_by_fd(nsfd);
+       if (!ASSERT_OK(err, "setns_by_fd"))
+               goto fail;
+
+       return token;
+fail:
+       free(token);
+       return NULL;
+}
+
+static void close_netns(struct nstoken *token)
+{
+       ASSERT_OK(setns_by_fd(token->orig_netns_fd), "setns_by_fd");
+       free(token);
+}
+
+static int netns_setup_namespaces(const char *verb)
+{
+       const char * const *ns = namespaces;
+       char cmd[128];
+
+       while (*ns) {
+               snprintf(cmd, sizeof(cmd), "ip netns %s %s", verb, *ns);
+               if (!ASSERT_OK(system(cmd), cmd))
+                       return -1;
+               ns++;
+       }
+       return 0;
+}
+
+struct netns_setup_result {
+       int ifindex_veth_src_fwd;
+       int ifindex_veth_dst_fwd;
+};
+
+static int get_ifaddr(const char *name, char *ifaddr)
+{
+       char path[PATH_MAX];
+       FILE *f;
+       int ret;
+
+       snprintf(path, PATH_MAX, "/sys/class/net/%s/address", name);
+       f = fopen(path, "r");
+       if (!ASSERT_OK_PTR(f, path))
+               return -1;
+
+       ret = fread(ifaddr, 1, IFADDR_STR_LEN, f);
+       if (!ASSERT_EQ(ret, IFADDR_STR_LEN, "fread ifaddr")) {
+               fclose(f);
+               return -1;
+       }
+       fclose(f);
+       return 0;
+}
+
+static int get_ifindex(const char *name)
+{
+       char path[PATH_MAX];
+       char buf[32];
+       FILE *f;
+       int ret;
+
+       snprintf(path, PATH_MAX, "/sys/class/net/%s/ifindex", name);
+       f = fopen(path, "r");
+       if (!ASSERT_OK_PTR(f, path))
+               return -1;
+
+       ret = fread(buf, 1, sizeof(buf), f);
+       if (!ASSERT_GT(ret, 0, "fread ifindex")) {
+               fclose(f);
+               return -1;
+       }
+       fclose(f);
+       return atoi(buf);
+}
+
+#define SYS(fmt, ...)                                          \
+       ({                                                      \
+               char cmd[1024];                                 \
+               snprintf(cmd, sizeof(cmd), fmt, ##__VA_ARGS__); \
+               if (!ASSERT_OK(system(cmd), cmd))               \
+                       goto fail;                              \
+       })
+
+static int netns_setup_links_and_routes(struct netns_setup_result *result)
+{
+       struct nstoken *nstoken = NULL;
+       char veth_src_fwd_addr[IFADDR_STR_LEN+1] = {};
+
+       SYS("ip link add veth_src type veth peer name veth_src_fwd");
+       SYS("ip link add veth_dst type veth peer name veth_dst_fwd");
+
+       SYS("ip link set veth_dst_fwd address " MAC_DST_FWD);
+       SYS("ip link set veth_dst address " MAC_DST);
+
+       if (get_ifaddr("veth_src_fwd", veth_src_fwd_addr))
+               goto fail;
+
+       result->ifindex_veth_src_fwd = get_ifindex("veth_src_fwd");
+       if (result->ifindex_veth_src_fwd < 0)
+               goto fail;
+       result->ifindex_veth_dst_fwd = get_ifindex("veth_dst_fwd");
+       if (result->ifindex_veth_dst_fwd < 0)
+               goto fail;
+
+       SYS("ip link set veth_src netns " NS_SRC);
+       SYS("ip link set veth_src_fwd netns " NS_FWD);
+       SYS("ip link set veth_dst_fwd netns " NS_FWD);
+       SYS("ip link set veth_dst netns " NS_DST);
+
+       /** setup in 'src' namespace */
+       nstoken = open_netns(NS_SRC);
+       if (!ASSERT_OK_PTR(nstoken, "setns src"))
+               goto fail;
+
+       SYS("ip addr add " IP4_SRC "/32 dev veth_src");
+       SYS("ip addr add " IP6_SRC "/128 dev veth_src nodad");
+       SYS("ip link set dev veth_src up");
+
+       SYS("ip route add " IP4_DST "/32 dev veth_src scope global");
+       SYS("ip route add " IP4_NET "/16 dev veth_src scope global");
+       SYS("ip route add " IP6_DST "/128 dev veth_src scope global");
+
+       SYS("ip neigh add " IP4_DST " dev veth_src lladdr %s",
+           veth_src_fwd_addr);
+       SYS("ip neigh add " IP6_DST " dev veth_src lladdr %s",
+           veth_src_fwd_addr);
+
+       close_netns(nstoken);
+
+       /** setup in 'fwd' namespace */
+       nstoken = open_netns(NS_FWD);
+       if (!ASSERT_OK_PTR(nstoken, "setns fwd"))
+               goto fail;
+
+       /* The fwd netns automatically gets a v6 LL address / routes, but also
+        * needs v4 one in order to start ARP probing. IP4_NET route is added
+        * to the endpoints so that the ARP processing will reply.
+        */
+       SYS("ip addr add " IP4_SLL "/32 dev veth_src_fwd");
+       SYS("ip addr add " IP4_DLL "/32 dev veth_dst_fwd");
+       SYS("ip link set dev veth_src_fwd up");
+       SYS("ip link set dev veth_dst_fwd up");
+
+       SYS("ip route add " IP4_SRC "/32 dev veth_src_fwd scope global");
+       SYS("ip route add " IP6_SRC "/128 dev veth_src_fwd scope global");
+       SYS("ip route add " IP4_DST "/32 dev veth_dst_fwd scope global");
+       SYS("ip route add " IP6_DST "/128 dev veth_dst_fwd scope global");
+
+       close_netns(nstoken);
+
+       /** setup in 'dst' namespace */
+       nstoken = open_netns(NS_DST);
+       if (!ASSERT_OK_PTR(nstoken, "setns dst"))
+               goto fail;
+
+       SYS("ip addr add " IP4_DST "/32 dev veth_dst");
+       SYS("ip addr add " IP6_DST "/128 dev veth_dst nodad");
+       SYS("ip link set dev veth_dst up");
+
+       SYS("ip route add " IP4_SRC "/32 dev veth_dst scope global");
+       SYS("ip route add " IP4_NET "/16 dev veth_dst scope global");
+       SYS("ip route add " IP6_SRC "/128 dev veth_dst scope global");
+
+       SYS("ip neigh add " IP4_SRC " dev veth_dst lladdr " MAC_DST_FWD);
+       SYS("ip neigh add " IP6_SRC " dev veth_dst lladdr " MAC_DST_FWD);
+
+       close_netns(nstoken);
+
+       return 0;
+fail:
+       if (nstoken)
+               close_netns(nstoken);
+       return -1;
+}
+
+static int netns_load_bpf(void)
+{
+       SYS("tc qdisc add dev veth_src_fwd clsact");
+       SYS("tc filter add dev veth_src_fwd ingress bpf da object-pinned "
+           SRC_PROG_PIN_FILE);
+       SYS("tc filter add dev veth_src_fwd egress bpf da object-pinned "
+           CHK_PROG_PIN_FILE);
+
+       SYS("tc qdisc add dev veth_dst_fwd clsact");
+       SYS("tc filter add dev veth_dst_fwd ingress bpf da object-pinned "
+           DST_PROG_PIN_FILE);
+       SYS("tc filter add dev veth_dst_fwd egress bpf da object-pinned "
+           CHK_PROG_PIN_FILE);
+
+       return 0;
+fail:
+       return -1;
+}
+
+static void test_tcp(int family, const char *addr, __u16 port)
+{
+       int listen_fd = -1, accept_fd = -1, client_fd = -1;
+       char buf[] = "testing testing";
+       int n;
+       struct nstoken *nstoken;
+
+       nstoken = open_netns(NS_DST);
+       if (!ASSERT_OK_PTR(nstoken, "setns dst"))
+               return;
+
+       listen_fd = start_server(family, SOCK_STREAM, addr, port, 0);
+       if (!ASSERT_GE(listen_fd, 0, "listen"))
+               goto done;
+
+       close_netns(nstoken);
+       nstoken = open_netns(NS_SRC);
+       if (!ASSERT_OK_PTR(nstoken, "setns src"))
+               goto done;
+
+       client_fd = connect_to_fd(listen_fd, TIMEOUT_MILLIS);
+       if (!ASSERT_GE(client_fd, 0, "connect_to_fd"))
+               goto done;
+
+       accept_fd = accept(listen_fd, NULL, NULL);
+       if (!ASSERT_GE(accept_fd, 0, "accept"))
+               goto done;
+
+       if (!ASSERT_OK(settimeo(accept_fd, TIMEOUT_MILLIS), "settimeo"))
+               goto done;
+
+       n = write(client_fd, buf, sizeof(buf));
+       if (!ASSERT_EQ(n, sizeof(buf), "send to server"))
+               goto done;
+
+       n = read(accept_fd, buf, sizeof(buf));
+       ASSERT_EQ(n, sizeof(buf), "recv from server");
+
+done:
+       if (nstoken)
+               close_netns(nstoken);
+       if (listen_fd >= 0)
+               close(listen_fd);
+       if (accept_fd >= 0)
+               close(accept_fd);
+       if (client_fd >= 0)
+               close(client_fd);
+}
+
+static int test_ping(int family, const char *addr)
+{
+       const char *ping = family == AF_INET6 ? "ping6" : "ping";
+
+       SYS("ip netns exec " NS_SRC " %s " PING_ARGS " %s > /dev/null", ping, addr);
+       return 0;
+fail:
+       return -1;
+}
+
+static void test_connectivity(void)
+{
+       test_tcp(AF_INET, IP4_DST, IP4_PORT);
+       test_ping(AF_INET, IP4_DST);
+       test_tcp(AF_INET6, IP6_DST, IP6_PORT);
+       test_ping(AF_INET6, IP6_DST);
+}
+
+static int set_forwarding(bool enable)
+{
+       int err;
+
+       err = write_file("/proc/sys/net/ipv4/ip_forward", enable ? "1" : "0");
+       if (!ASSERT_OK(err, "set ipv4.ip_forward=0"))
+               return err;
+
+       err = write_file("/proc/sys/net/ipv6/conf/all/forwarding", enable ? "1" : "0");
+       if (!ASSERT_OK(err, "set ipv6.forwarding=0"))
+               return err;
+
+       return 0;
+}
+
+static void test_tc_redirect_neigh_fib(struct netns_setup_result *setup_result)
+{
+       struct nstoken *nstoken = NULL;
+       struct test_tc_neigh_fib *skel = NULL;
+       int err;
+
+       nstoken = open_netns(NS_FWD);
+       if (!ASSERT_OK_PTR(nstoken, "setns fwd"))
+               return;
+
+       skel = test_tc_neigh_fib__open();
+       if (!ASSERT_OK_PTR(skel, "test_tc_neigh_fib__open"))
+               goto done;
+
+       if (!ASSERT_OK(test_tc_neigh_fib__load(skel), "test_tc_neigh_fib__load"))
+               goto done;
+
+       err = bpf_program__pin(skel->progs.tc_src, SRC_PROG_PIN_FILE);
+       if (!ASSERT_OK(err, "pin " SRC_PROG_PIN_FILE))
+               goto done;
+
+       err = bpf_program__pin(skel->progs.tc_chk, CHK_PROG_PIN_FILE);
+       if (!ASSERT_OK(err, "pin " CHK_PROG_PIN_FILE))
+               goto done;
+
+       err = bpf_program__pin(skel->progs.tc_dst, DST_PROG_PIN_FILE);
+       if (!ASSERT_OK(err, "pin " DST_PROG_PIN_FILE))
+               goto done;
+
+       if (netns_load_bpf())
+               goto done;
+
+       /* bpf_fib_lookup() checks if forwarding is enabled */
+       if (!ASSERT_OK(set_forwarding(true), "enable forwarding"))
+               goto done;
+
+       test_connectivity();
+
+done:
+       if (skel)
+               test_tc_neigh_fib__destroy(skel);
+       close_netns(nstoken);
+}
+
+static void test_tc_redirect_neigh(struct netns_setup_result *setup_result)
+{
+       struct nstoken *nstoken = NULL;
+       struct test_tc_neigh *skel = NULL;
+       int err;
+
+       nstoken = open_netns(NS_FWD);
+       if (!ASSERT_OK_PTR(nstoken, "setns fwd"))
+               return;
+
+       skel = test_tc_neigh__open();
+       if (!ASSERT_OK_PTR(skel, "test_tc_neigh__open"))
+               goto done;
+
+       skel->rodata->IFINDEX_SRC = setup_result->ifindex_veth_src_fwd;
+       skel->rodata->IFINDEX_DST = setup_result->ifindex_veth_dst_fwd;
+
+       err = test_tc_neigh__load(skel);
+       if (!ASSERT_OK(err, "test_tc_neigh__load"))
+               goto done;
+
+       err = bpf_program__pin(skel->progs.tc_src, SRC_PROG_PIN_FILE);
+       if (!ASSERT_OK(err, "pin " SRC_PROG_PIN_FILE))
+               goto done;
+
+       err = bpf_program__pin(skel->progs.tc_chk, CHK_PROG_PIN_FILE);
+       if (!ASSERT_OK(err, "pin " CHK_PROG_PIN_FILE))
+               goto done;
+
+       err = bpf_program__pin(skel->progs.tc_dst, DST_PROG_PIN_FILE);
+       if (!ASSERT_OK(err, "pin " DST_PROG_PIN_FILE))
+               goto done;
+
+       if (netns_load_bpf())
+               goto done;
+
+       if (!ASSERT_OK(set_forwarding(false), "disable forwarding"))
+               goto done;
+
+       test_connectivity();
+
+done:
+       if (skel)
+               test_tc_neigh__destroy(skel);
+       close_netns(nstoken);
+}
+
+static void test_tc_redirect_peer(struct netns_setup_result *setup_result)
+{
+       struct nstoken *nstoken;
+       struct test_tc_peer *skel;
+       int err;
+
+       nstoken = open_netns(NS_FWD);
+       if (!ASSERT_OK_PTR(nstoken, "setns fwd"))
+               return;
+
+       skel = test_tc_peer__open();
+       if (!ASSERT_OK_PTR(skel, "test_tc_peer__open"))
+               goto done;
+
+       skel->rodata->IFINDEX_SRC = setup_result->ifindex_veth_src_fwd;
+       skel->rodata->IFINDEX_DST = setup_result->ifindex_veth_dst_fwd;
+
+       err = test_tc_peer__load(skel);
+       if (!ASSERT_OK(err, "test_tc_peer__load"))
+               goto done;
+
+       err = bpf_program__pin(skel->progs.tc_src, SRC_PROG_PIN_FILE);
+       if (!ASSERT_OK(err, "pin " SRC_PROG_PIN_FILE))
+               goto done;
+
+       err = bpf_program__pin(skel->progs.tc_chk, CHK_PROG_PIN_FILE);
+       if (!ASSERT_OK(err, "pin " CHK_PROG_PIN_FILE))
+               goto done;
+
+       err = bpf_program__pin(skel->progs.tc_dst, DST_PROG_PIN_FILE);
+       if (!ASSERT_OK(err, "pin " DST_PROG_PIN_FILE))
+               goto done;
+
+       if (netns_load_bpf())
+               goto done;
+
+       if (!ASSERT_OK(set_forwarding(false), "disable forwarding"))
+               goto done;
+
+       test_connectivity();
+
+done:
+       if (skel)
+               test_tc_peer__destroy(skel);
+       close_netns(nstoken);
+}
+
+static int tun_open(char *name)
+{
+       struct ifreq ifr;
+       int fd, err;
+
+       fd = open("/dev/net/tun", O_RDWR);
+       if (!ASSERT_GE(fd, 0, "open /dev/net/tun"))
+               return -1;
+
+       memset(&ifr, 0, sizeof(ifr));
+
+       ifr.ifr_flags = IFF_TUN | IFF_NO_PI;
+       if (*name)
+               strncpy(ifr.ifr_name, name, IFNAMSIZ);
+
+       err = ioctl(fd, TUNSETIFF, &ifr);
+       if (!ASSERT_OK(err, "ioctl TUNSETIFF"))
+               goto fail;
+
+       SYS("ip link set dev %s up", name);
+
+       return fd;
+fail:
+       close(fd);
+       return -1;
+}
+
+#define MAX(a, b) ((a) > (b) ? (a) : (b))
+enum {
+       SRC_TO_TARGET = 0,
+       TARGET_TO_SRC = 1,
+};
+
+static int tun_relay_loop(int src_fd, int target_fd)
+{
+       fd_set rfds, wfds;
+
+       FD_ZERO(&rfds);
+       FD_ZERO(&wfds);
+
+       for (;;) {
+               char buf[1500];
+               int direction, nread, nwrite;
+
+               FD_SET(src_fd, &rfds);
+               FD_SET(target_fd, &rfds);
+
+               if (select(1 + MAX(src_fd, target_fd), &rfds, NULL, NULL, NULL) < 0) {
+                       log_err("select failed");
+                       return 1;
+               }
+
+               direction = FD_ISSET(src_fd, &rfds) ? SRC_TO_TARGET : TARGET_TO_SRC;
+
+               nread = read(direction == SRC_TO_TARGET ? src_fd : target_fd, buf, sizeof(buf));
+               if (nread < 0) {
+                       log_err("read failed");
+                       return 1;
+               }
+
+               nwrite = write(direction == SRC_TO_TARGET ? target_fd : src_fd, buf, nread);
+               if (nwrite != nread) {
+                       log_err("write failed");
+                       return 1;
+               }
+       }
+}
+
+static void test_tc_redirect_peer_l3(struct netns_setup_result *setup_result)
+{
+       struct test_tc_peer *skel = NULL;
+       struct nstoken *nstoken = NULL;
+       int err;
+       int tunnel_pid = -1;
+       int src_fd, target_fd;
+       int ifindex;
+
+       /* Start a L3 TUN/TAP tunnel between the src and dst namespaces.
+        * This test is using TUN/TAP instead of e.g. IPIP or GRE tunnel as those
+        * expose the L2 headers encapsulating the IP packet to BPF and hence
+        * don't have skb in suitable state for this test. Alternative to TUN/TAP
+        * would be e.g. Wireguard which would appear as a pure L3 device to BPF,
+        * but that requires much more complicated setup.
+        */
+       nstoken = open_netns(NS_SRC);
+       if (!ASSERT_OK_PTR(nstoken, "setns " NS_SRC))
+               return;
+
+       src_fd = tun_open("tun_src");
+       if (!ASSERT_GE(src_fd, 0, "tun_open tun_src"))
+               goto fail;
+
+       close_netns(nstoken);
+
+       nstoken = open_netns(NS_FWD);
+       if (!ASSERT_OK_PTR(nstoken, "setns " NS_FWD))
+               goto fail;
+
+       target_fd = tun_open("tun_fwd");
+       if (!ASSERT_GE(target_fd, 0, "tun_open tun_fwd"))
+               goto fail;
+
+       tunnel_pid = fork();
+       if (!ASSERT_GE(tunnel_pid, 0, "fork tun_relay_loop"))
+               goto fail;
+
+       if (tunnel_pid == 0)
+               exit(tun_relay_loop(src_fd, target_fd));
+
+       skel = test_tc_peer__open();
+       if (!ASSERT_OK_PTR(skel, "test_tc_peer__open"))
+               goto fail;
+
+       ifindex = get_ifindex("tun_fwd");
+       if (!ASSERT_GE(ifindex, 0, "get_ifindex tun_fwd"))
+               goto fail;
+
+       skel->rodata->IFINDEX_SRC = ifindex;
+       skel->rodata->IFINDEX_DST = setup_result->ifindex_veth_dst_fwd;
+
+       err = test_tc_peer__load(skel);
+       if (!ASSERT_OK(err, "test_tc_peer__load"))
+               goto fail;
+
+       err = bpf_program__pin(skel->progs.tc_src_l3, SRC_PROG_PIN_FILE);
+       if (!ASSERT_OK(err, "pin " SRC_PROG_PIN_FILE))
+               goto fail;
+
+       err = bpf_program__pin(skel->progs.tc_dst_l3, DST_PROG_PIN_FILE);
+       if (!ASSERT_OK(err, "pin " DST_PROG_PIN_FILE))
+               goto fail;
+
+       err = bpf_program__pin(skel->progs.tc_chk, CHK_PROG_PIN_FILE);
+       if (!ASSERT_OK(err, "pin " CHK_PROG_PIN_FILE))
+               goto fail;
+
+       /* Load "tc_src_l3" to the tun_fwd interface to redirect packets
+        * towards dst, and "tc_dst" to redirect packets
+        * and "tc_chk" on veth_dst_fwd to drop non-redirected packets.
+        */
+       SYS("tc qdisc add dev tun_fwd clsact");
+       SYS("tc filter add dev tun_fwd ingress bpf da object-pinned "
+           SRC_PROG_PIN_FILE);
+
+       SYS("tc qdisc add dev veth_dst_fwd clsact");
+       SYS("tc filter add dev veth_dst_fwd ingress bpf da object-pinned "
+           DST_PROG_PIN_FILE);
+       SYS("tc filter add dev veth_dst_fwd egress bpf da object-pinned "
+           CHK_PROG_PIN_FILE);
+
+       /* Setup route and neigh tables */
+       SYS("ip -netns " NS_SRC " addr add dev tun_src " IP4_TUN_SRC "/24");
+       SYS("ip -netns " NS_FWD " addr add dev tun_fwd " IP4_TUN_FWD "/24");
+
+       SYS("ip -netns " NS_SRC " addr add dev tun_src " IP6_TUN_SRC "/64 nodad");
+       SYS("ip -netns " NS_FWD " addr add dev tun_fwd " IP6_TUN_FWD "/64 nodad");
+
+       SYS("ip -netns " NS_SRC " route del " IP4_DST "/32 dev veth_src scope global");
+       SYS("ip -netns " NS_SRC " route add " IP4_DST "/32 via " IP4_TUN_FWD
+           " dev tun_src scope global");
+       SYS("ip -netns " NS_DST " route add " IP4_TUN_SRC "/32 dev veth_dst scope global");
+       SYS("ip -netns " NS_SRC " route del " IP6_DST "/128 dev veth_src scope global");
+       SYS("ip -netns " NS_SRC " route add " IP6_DST "/128 via " IP6_TUN_FWD
+           " dev tun_src scope global");
+       SYS("ip -netns " NS_DST " route add " IP6_TUN_SRC "/128 dev veth_dst scope global");
+
+       SYS("ip -netns " NS_DST " neigh add " IP4_TUN_SRC " dev veth_dst lladdr " MAC_DST_FWD);
+       SYS("ip -netns " NS_DST " neigh add " IP6_TUN_SRC " dev veth_dst lladdr " MAC_DST_FWD);
+
+       if (!ASSERT_OK(set_forwarding(false), "disable forwarding"))
+               goto fail;
+
+       test_connectivity();
+
+fail:
+       if (tunnel_pid > 0) {
+               kill(tunnel_pid, SIGTERM);
+               waitpid(tunnel_pid, NULL, 0);
+       }
+       if (src_fd >= 0)
+               close(src_fd);
+       if (target_fd >= 0)
+               close(target_fd);
+       if (skel)
+               test_tc_peer__destroy(skel);
+       if (nstoken)
+               close_netns(nstoken);
+}
+
+#define RUN_TEST(name)                                                                      \
+       ({                                                                                  \
+               struct netns_setup_result setup_result;                                     \
+               if (test__start_subtest(#name))                                             \
+                       if (ASSERT_OK(netns_setup_namespaces("add"), "setup namespaces")) { \
+                               if (ASSERT_OK(netns_setup_links_and_routes(&setup_result),  \
+                                             "setup links and routes"))                    \
+                                       test_ ## name(&setup_result);                       \
+                               netns_setup_namespaces("delete");                           \
+                       }                                                                   \
+       })
+
+static void *test_tc_redirect_run_tests(void *arg)
+{
+       RUN_TEST(tc_redirect_peer);
+       RUN_TEST(tc_redirect_peer_l3);
+       RUN_TEST(tc_redirect_neigh);
+       RUN_TEST(tc_redirect_neigh_fib);
+       return NULL;
+}
+
+void test_tc_redirect(void)
+{
+       pthread_t test_thread;
+       int err;
+
+       /* Run the tests in their own thread to isolate the namespace changes
+        * so they do not affect the environment of other tests.
+        * (specifically needed because of unshare(CLONE_NEWNS) in open_netns())
+        */
+       err = pthread_create(&test_thread, NULL, &test_tc_redirect_run_tests, NULL);
+       if (ASSERT_OK(err, "pthread_create"))
+               ASSERT_OK(pthread_join(test_thread, NULL), "pthread_join");
+}
index b985ac4e7a814a94a9a5e54f851152bfe6eacf80..0c93d326a663f90a1f95432ec89f71b115e0213d 100644 (file)
                                 a.s6_addr32[3] == b.s6_addr32[3])
 #endif
 
-enum {
-       dev_src,
-       dev_dst,
-};
-
-struct bpf_map_def SEC("maps") ifindex_map = {
-       .type           = BPF_MAP_TYPE_ARRAY,
-       .key_size       = sizeof(int),
-       .value_size     = sizeof(int),
-       .max_entries    = 2,
-};
+volatile const __u32 IFINDEX_SRC;
+volatile const __u32 IFINDEX_DST;
 
 static __always_inline bool is_remote_ep_v4(struct __sk_buff *skb,
                                            __be32 addr)
@@ -79,14 +70,8 @@ static __always_inline bool is_remote_ep_v6(struct __sk_buff *skb,
        return v6_equal(ip6h->daddr, addr);
 }
 
-static __always_inline int get_dev_ifindex(int which)
-{
-       int *ifindex = bpf_map_lookup_elem(&ifindex_map, &which);
-
-       return ifindex ? *ifindex : 0;
-}
-
-SEC("chk_egress") int tc_chk(struct __sk_buff *skb)
+SEC("classifier/chk_egress")
+int tc_chk(struct __sk_buff *skb)
 {
        void *data_end = ctx_ptr(skb->data_end);
        void *data = ctx_ptr(skb->data);
@@ -98,7 +83,8 @@ SEC("chk_egress") int tc_chk(struct __sk_buff *skb)
        return !raw[0] && !raw[1] && !raw[2] ? TC_ACT_SHOT : TC_ACT_OK;
 }
 
-SEC("dst_ingress") int tc_dst(struct __sk_buff *skb)
+SEC("classifier/dst_ingress")
+int tc_dst(struct __sk_buff *skb)
 {
        __u8 zero[ETH_ALEN * 2];
        bool redirect = false;
@@ -119,10 +105,11 @@ SEC("dst_ingress") int tc_dst(struct __sk_buff *skb)
        if (bpf_skb_store_bytes(skb, 0, &zero, sizeof(zero), 0) < 0)
                return TC_ACT_SHOT;
 
-       return bpf_redirect_neigh(get_dev_ifindex(dev_src), NULL, 0, 0);
+       return bpf_redirect_neigh(IFINDEX_SRC, NULL, 0, 0);
 }
 
-SEC("src_ingress") int tc_src(struct __sk_buff *skb)
+SEC("classifier/src_ingress")
+int tc_src(struct __sk_buff *skb)
 {
        __u8 zero[ETH_ALEN * 2];
        bool redirect = false;
@@ -143,7 +130,7 @@ SEC("src_ingress") int tc_src(struct __sk_buff *skb)
        if (bpf_skb_store_bytes(skb, 0, &zero, sizeof(zero), 0) < 0)
                return TC_ACT_SHOT;
 
-       return bpf_redirect_neigh(get_dev_ifindex(dev_dst), NULL, 0, 0);
+       return bpf_redirect_neigh(IFINDEX_DST, NULL, 0, 0);
 }
 
 char __license[] SEC("license") = "GPL";
index d82ed3457030f32584b0c23733ced91cfad77187..f7ab69cf018e5a81705a1219b87ab903774bd3a5 100644 (file)
@@ -75,7 +75,8 @@ static __always_inline int fill_fib_params_v6(struct __sk_buff *skb,
        return 0;
 }
 
-SEC("chk_egress") int tc_chk(struct __sk_buff *skb)
+SEC("classifier/chk_egress")
+int tc_chk(struct __sk_buff *skb)
 {
        void *data_end = ctx_ptr(skb->data_end);
        void *data = ctx_ptr(skb->data);
@@ -142,12 +143,14 @@ static __always_inline int tc_redir(struct __sk_buff *skb)
 /* these are identical, but keep them separate for compatibility with the
  * section names expected by test_tc_redirect.sh
  */
-SEC("dst_ingress") int tc_dst(struct __sk_buff *skb)
+SEC("classifier/dst_ingress")
+int tc_dst(struct __sk_buff *skb)
 {
        return tc_redir(skb);
 }
 
-SEC("src_ingress") int tc_src(struct __sk_buff *skb)
+SEC("classifier/src_ingress")
+int tc_src(struct __sk_buff *skb)
 {
        return tc_redir(skb);
 }
index fc84a7685aa2cc3b35a5a7e5a94896e3913d0117..fe818cd5f0109f440d702922e2055a6c875cbe34 100644 (file)
@@ -5,41 +5,59 @@
 #include <linux/bpf.h>
 #include <linux/stddef.h>
 #include <linux/pkt_cls.h>
+#include <linux/if_ether.h>
+#include <linux/ip.h>
 
 #include <bpf/bpf_helpers.h>
 
-enum {
-       dev_src,
-       dev_dst,
-};
+volatile const __u32 IFINDEX_SRC;
+volatile const __u32 IFINDEX_DST;
 
-struct bpf_map_def SEC("maps") ifindex_map = {
-       .type           = BPF_MAP_TYPE_ARRAY,
-       .key_size       = sizeof(int),
-       .value_size     = sizeof(int),
-       .max_entries    = 2,
-};
+static const __u8 src_mac[] = {0x00, 0x11, 0x22, 0x33, 0x44, 0x55};
+static const __u8 dst_mac[] = {0x00, 0x22, 0x33, 0x44, 0x55, 0x66};
 
-static __always_inline int get_dev_ifindex(int which)
+SEC("classifier/chk_egress")
+int tc_chk(struct __sk_buff *skb)
 {
-       int *ifindex = bpf_map_lookup_elem(&ifindex_map, &which);
+       return TC_ACT_SHOT;
+}
 
-       return ifindex ? *ifindex : 0;
+SEC("classifier/dst_ingress")
+int tc_dst(struct __sk_buff *skb)
+{
+       return bpf_redirect_peer(IFINDEX_SRC, 0);
 }
 
-SEC("chk_egress") int tc_chk(struct __sk_buff *skb)
+SEC("classifier/src_ingress")
+int tc_src(struct __sk_buff *skb)
 {
-       return TC_ACT_SHOT;
+       return bpf_redirect_peer(IFINDEX_DST, 0);
 }
 
-SEC("dst_ingress") int tc_dst(struct __sk_buff *skb)
+SEC("classifier/dst_ingress_l3")
+int tc_dst_l3(struct __sk_buff *skb)
 {
-       return bpf_redirect_peer(get_dev_ifindex(dev_src), 0);
+       return bpf_redirect(IFINDEX_SRC, 0);
 }
 
-SEC("src_ingress") int tc_src(struct __sk_buff *skb)
+SEC("classifier/src_ingress_l3")
+int tc_src_l3(struct __sk_buff *skb)
 {
-       return bpf_redirect_peer(get_dev_ifindex(dev_dst), 0);
+       __u16 proto = skb->protocol;
+
+       if (bpf_skb_change_head(skb, ETH_HLEN, 0) != 0)
+               return TC_ACT_SHOT;
+
+       if (bpf_skb_store_bytes(skb, 0, &src_mac, ETH_ALEN, 0) != 0)
+               return TC_ACT_SHOT;
+
+       if (bpf_skb_store_bytes(skb, ETH_ALEN, &dst_mac, ETH_ALEN, 0) != 0)
+               return TC_ACT_SHOT;
+
+       if (bpf_skb_store_bytes(skb, ETH_ALEN + ETH_ALEN, &proto, sizeof(__u16), 0) != 0)
+               return TC_ACT_SHOT;
+
+       return bpf_redirect_peer(IFINDEX_DST, 0);
 }
 
 char __license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/test_tc_redirect.sh b/tools/testing/selftests/bpf/test_tc_redirect.sh
deleted file mode 100755 (executable)
index 8868aa1..0000000
+++ /dev/null
@@ -1,216 +0,0 @@
-#!/bin/bash
-# SPDX-License-Identifier: GPL-2.0
-#
-# This test sets up 3 netns (src <-> fwd <-> dst). There is no direct veth link
-# between src and dst. The netns fwd has veth links to each src and dst. The
-# client is in src and server in dst. The test installs a TC BPF program to each
-# host facing veth in fwd which calls into i) bpf_redirect_neigh() to perform the
-# neigh addr population and redirect or ii) bpf_redirect_peer() for namespace
-# switch from ingress side; it also installs a checker prog on the egress side
-# to drop unexpected traffic.
-
-if [[ $EUID -ne 0 ]]; then
-       echo "This script must be run as root"
-       echo "FAIL"
-       exit 1
-fi
-
-# check that needed tools are present
-command -v nc >/dev/null 2>&1 || \
-       { echo >&2 "nc is not available"; exit 1; }
-command -v dd >/dev/null 2>&1 || \
-       { echo >&2 "dd is not available"; exit 1; }
-command -v timeout >/dev/null 2>&1 || \
-       { echo >&2 "timeout is not available"; exit 1; }
-command -v ping >/dev/null 2>&1 || \
-       { echo >&2 "ping is not available"; exit 1; }
-if command -v ping6 >/dev/null 2>&1; then PING6=ping6; else PING6=ping; fi
-command -v perl >/dev/null 2>&1 || \
-       { echo >&2 "perl is not available"; exit 1; }
-command -v jq >/dev/null 2>&1 || \
-       { echo >&2 "jq is not available"; exit 1; }
-command -v bpftool >/dev/null 2>&1 || \
-       { echo >&2 "bpftool is not available"; exit 1; }
-
-readonly GREEN='\033[0;92m'
-readonly RED='\033[0;31m'
-readonly NC='\033[0m' # No Color
-
-readonly PING_ARG="-c 3 -w 10 -q"
-
-readonly TIMEOUT=10
-
-readonly NS_SRC="ns-src-$(mktemp -u XXXXXX)"
-readonly NS_FWD="ns-fwd-$(mktemp -u XXXXXX)"
-readonly NS_DST="ns-dst-$(mktemp -u XXXXXX)"
-
-readonly IP4_SRC="172.16.1.100"
-readonly IP4_DST="172.16.2.100"
-
-readonly IP6_SRC="::1:dead:beef:cafe"
-readonly IP6_DST="::2:dead:beef:cafe"
-
-readonly IP4_SLL="169.254.0.1"
-readonly IP4_DLL="169.254.0.2"
-readonly IP4_NET="169.254.0.0"
-
-netns_cleanup()
-{
-       ip netns del ${NS_SRC}
-       ip netns del ${NS_FWD}
-       ip netns del ${NS_DST}
-}
-
-netns_setup()
-{
-       ip netns add "${NS_SRC}"
-       ip netns add "${NS_FWD}"
-       ip netns add "${NS_DST}"
-
-       ip link add veth_src type veth peer name veth_src_fwd
-       ip link add veth_dst type veth peer name veth_dst_fwd
-
-       ip link set veth_src netns ${NS_SRC}
-       ip link set veth_src_fwd netns ${NS_FWD}
-
-       ip link set veth_dst netns ${NS_DST}
-       ip link set veth_dst_fwd netns ${NS_FWD}
-
-       ip -netns ${NS_SRC} addr add ${IP4_SRC}/32 dev veth_src
-       ip -netns ${NS_DST} addr add ${IP4_DST}/32 dev veth_dst
-
-       # The fwd netns automatically get a v6 LL address / routes, but also
-       # needs v4 one in order to start ARP probing. IP4_NET route is added
-       # to the endpoints so that the ARP processing will reply.
-
-       ip -netns ${NS_FWD} addr add ${IP4_SLL}/32 dev veth_src_fwd
-       ip -netns ${NS_FWD} addr add ${IP4_DLL}/32 dev veth_dst_fwd
-
-       ip -netns ${NS_SRC} addr add ${IP6_SRC}/128 dev veth_src nodad
-       ip -netns ${NS_DST} addr add ${IP6_DST}/128 dev veth_dst nodad
-
-       ip -netns ${NS_SRC} link set dev veth_src up
-       ip -netns ${NS_FWD} link set dev veth_src_fwd up
-
-       ip -netns ${NS_DST} link set dev veth_dst up
-       ip -netns ${NS_FWD} link set dev veth_dst_fwd up
-
-       ip -netns ${NS_SRC} route add ${IP4_DST}/32 dev veth_src scope global
-       ip -netns ${NS_SRC} route add ${IP4_NET}/16 dev veth_src scope global
-       ip -netns ${NS_FWD} route add ${IP4_SRC}/32 dev veth_src_fwd scope global
-
-       ip -netns ${NS_SRC} route add ${IP6_DST}/128 dev veth_src scope global
-       ip -netns ${NS_FWD} route add ${IP6_SRC}/128 dev veth_src_fwd scope global
-
-       ip -netns ${NS_DST} route add ${IP4_SRC}/32 dev veth_dst scope global
-       ip -netns ${NS_DST} route add ${IP4_NET}/16 dev veth_dst scope global
-       ip -netns ${NS_FWD} route add ${IP4_DST}/32 dev veth_dst_fwd scope global
-
-       ip -netns ${NS_DST} route add ${IP6_SRC}/128 dev veth_dst scope global
-       ip -netns ${NS_FWD} route add ${IP6_DST}/128 dev veth_dst_fwd scope global
-
-       fmac_src=$(ip netns exec ${NS_FWD} cat /sys/class/net/veth_src_fwd/address)
-       fmac_dst=$(ip netns exec ${NS_FWD} cat /sys/class/net/veth_dst_fwd/address)
-
-       ip -netns ${NS_SRC} neigh add ${IP4_DST} dev veth_src lladdr $fmac_src
-       ip -netns ${NS_DST} neigh add ${IP4_SRC} dev veth_dst lladdr $fmac_dst
-
-       ip -netns ${NS_SRC} neigh add ${IP6_DST} dev veth_src lladdr $fmac_src
-       ip -netns ${NS_DST} neigh add ${IP6_SRC} dev veth_dst lladdr $fmac_dst
-}
-
-netns_test_connectivity()
-{
-       set +e
-
-       ip netns exec ${NS_DST} bash -c "nc -4 -l -p 9004 &"
-       ip netns exec ${NS_DST} bash -c "nc -6 -l -p 9006 &"
-
-       TEST="TCPv4 connectivity test"
-       ip netns exec ${NS_SRC} bash -c "timeout ${TIMEOUT} dd if=/dev/zero bs=1000 count=100 > /dev/tcp/${IP4_DST}/9004"
-       if [ $? -ne 0 ]; then
-               echo -e "${TEST}: ${RED}FAIL${NC}"
-               exit 1
-       fi
-       echo -e "${TEST}: ${GREEN}PASS${NC}"
-
-       TEST="TCPv6 connectivity test"
-       ip netns exec ${NS_SRC} bash -c "timeout ${TIMEOUT} dd if=/dev/zero bs=1000 count=100 > /dev/tcp/${IP6_DST}/9006"
-       if [ $? -ne 0 ]; then
-               echo -e "${TEST}: ${RED}FAIL${NC}"
-               exit 1
-       fi
-       echo -e "${TEST}: ${GREEN}PASS${NC}"
-
-       TEST="ICMPv4 connectivity test"
-       ip netns exec ${NS_SRC} ping  $PING_ARG ${IP4_DST}
-       if [ $? -ne 0 ]; then
-               echo -e "${TEST}: ${RED}FAIL${NC}"
-               exit 1
-       fi
-       echo -e "${TEST}: ${GREEN}PASS${NC}"
-
-       TEST="ICMPv6 connectivity test"
-       ip netns exec ${NS_SRC} $PING6 $PING_ARG ${IP6_DST}
-       if [ $? -ne 0 ]; then
-               echo -e "${TEST}: ${RED}FAIL${NC}"
-               exit 1
-       fi
-       echo -e "${TEST}: ${GREEN}PASS${NC}"
-
-       set -e
-}
-
-hex_mem_str()
-{
-       perl -e 'print join(" ", unpack("(H2)8", pack("L", @ARGV)))' $1
-}
-
-netns_setup_bpf()
-{
-       local obj=$1
-       local use_forwarding=${2:-0}
-
-       ip netns exec ${NS_FWD} tc qdisc add dev veth_src_fwd clsact
-       ip netns exec ${NS_FWD} tc filter add dev veth_src_fwd ingress bpf da obj $obj sec src_ingress
-       ip netns exec ${NS_FWD} tc filter add dev veth_src_fwd egress  bpf da obj $obj sec chk_egress
-
-       ip netns exec ${NS_FWD} tc qdisc add dev veth_dst_fwd clsact
-       ip netns exec ${NS_FWD} tc filter add dev veth_dst_fwd ingress bpf da obj $obj sec dst_ingress
-       ip netns exec ${NS_FWD} tc filter add dev veth_dst_fwd egress  bpf da obj $obj sec chk_egress
-
-       if [ "$use_forwarding" -eq "1" ]; then
-               # bpf_fib_lookup() checks if forwarding is enabled
-               ip netns exec ${NS_FWD} sysctl -w net.ipv4.ip_forward=1
-               ip netns exec ${NS_FWD} sysctl -w net.ipv6.conf.veth_dst_fwd.forwarding=1
-               ip netns exec ${NS_FWD} sysctl -w net.ipv6.conf.veth_src_fwd.forwarding=1
-               return 0
-       fi
-
-       veth_src=$(ip netns exec ${NS_FWD} cat /sys/class/net/veth_src_fwd/ifindex)
-       veth_dst=$(ip netns exec ${NS_FWD} cat /sys/class/net/veth_dst_fwd/ifindex)
-
-       progs=$(ip netns exec ${NS_FWD} bpftool net --json | jq -r '.[] | .tc | map(.id) | .[]')
-       for prog in $progs; do
-               map=$(bpftool prog show id $prog --json | jq -r '.map_ids | .? | .[]')
-               if [ ! -z "$map" ]; then
-                       bpftool map update id $map key hex $(hex_mem_str 0) value hex $(hex_mem_str $veth_src)
-                       bpftool map update id $map key hex $(hex_mem_str 1) value hex $(hex_mem_str $veth_dst)
-               fi
-       done
-}
-
-trap netns_cleanup EXIT
-set -e
-
-netns_setup
-netns_setup_bpf test_tc_neigh.o
-netns_test_connectivity
-netns_cleanup
-netns_setup
-netns_setup_bpf test_tc_neigh_fib.o 1
-netns_test_connectivity
-netns_cleanup
-netns_setup
-netns_setup_bpf test_tc_peer.o
-netns_test_connectivity
index 07eaa04412ae40dfbdc034e9e28076fc63d3035e..8ab94d65f3d5439c1f57807cfb8c798ef3de6cf3 100644 (file)
        BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 0),
        BPF_EXIT_INSN(),
        },
-       .result_unpriv = REJECT,
-       .errstr_unpriv = "invalid write to stack R1 off=0 size=1",
        .result = ACCEPT,
        .retval = 42,
 },
index e5913fd3b9030d41f944a1fd0babdd629d31293e..7ae2859d495c58a3fdea4c07956f656c75927208 100644 (file)
        },
        .fixup_map_array_48b = { 3 },
        .result = ACCEPT,
-       .result_unpriv = REJECT,
-       .errstr_unpriv = "R0 pointer arithmetic of map value goes out of range",
        .retval = 1,
 },
 {
        },
        .fixup_map_array_48b = { 3 },
        .result = ACCEPT,
-       .result_unpriv = REJECT,
-       .errstr_unpriv = "R0 pointer arithmetic of map value goes out of range",
        .retval = 1,
 },
 {
        },
        .fixup_map_array_48b = { 3 },
        .result = ACCEPT,
-       .result_unpriv = REJECT,
-       .errstr_unpriv = "R0 pointer arithmetic of map value goes out of range",
        .retval = 1,
 },
 {
        },
        .fixup_map_array_48b = { 3 },
        .result = ACCEPT,
-       .result_unpriv = REJECT,
-       .errstr_unpriv = "R0 pointer arithmetic of map value goes out of range",
        .retval = 1,
 },
 {
index cf69b2fcce59ede2d862ea6a69fd41757ec3b07d..dd61118df66edbcde89f92a2626d2e9186122fda 100644 (file)
@@ -28,8 +28,8 @@ $(OUTPUT)/execveat.denatured: $(OUTPUT)/execveat
        cp $< $@
        chmod -x $@
 $(OUTPUT)/load_address_4096: load_address.c
-       $(CC) $(CFLAGS) $(LDFLAGS) -Wl,-z,max-page-size=0x1000 -pie $< -o $@
+       $(CC) $(CFLAGS) $(LDFLAGS) -Wl,-z,max-page-size=0x1000 -pie -static $< -o $@
 $(OUTPUT)/load_address_2097152: load_address.c
-       $(CC) $(CFLAGS) $(LDFLAGS) -Wl,-z,max-page-size=0x200000 -pie $< -o $@
+       $(CC) $(CFLAGS) $(LDFLAGS) -Wl,-z,max-page-size=0x200000 -pie -static $< -o $@
 $(OUTPUT)/load_address_16777216: load_address.c
-       $(CC) $(CFLAGS) $(LDFLAGS) -Wl,-z,max-page-size=0x1000000 -pie $< -o $@
+       $(CC) $(CFLAGS) $(LDFLAGS) -Wl,-z,max-page-size=0x1000000 -pie -static $< -o $@
index bd83158e0e0b580d2577985d4bf6148a47669e3e..524c857a049c3aff62968a15535071329eba4c4c 100644 (file)
@@ -41,5 +41,6 @@
 /kvm_create_max_vcpus
 /kvm_page_table_test
 /memslot_modification_stress_test
+/memslot_perf_test
 /set_memory_region_test
 /steal_time
index e439d027939dd34b3a75eba497675dea1cd5b47d..daaee1888b1288143a308a6b011fb8c627f0174e 100644 (file)
@@ -33,7 +33,7 @@ ifeq ($(ARCH),s390)
        UNAME_M := s390x
 endif
 
-LIBKVM = lib/assert.c lib/elf.c lib/io.c lib/kvm_util.c lib/sparsebit.c lib/test_util.c lib/guest_modes.c lib/perf_test_util.c
+LIBKVM = lib/assert.c lib/elf.c lib/io.c lib/kvm_util.c lib/rbtree.c lib/sparsebit.c lib/test_util.c lib/guest_modes.c lib/perf_test_util.c
 LIBKVM_x86_64 = lib/x86_64/processor.c lib/x86_64/vmx.c lib/x86_64/svm.c lib/x86_64/ucall.c lib/x86_64/handlers.S
 LIBKVM_aarch64 = lib/aarch64/processor.c lib/aarch64/ucall.c
 LIBKVM_s390x = lib/s390x/processor.c lib/s390x/ucall.c lib/s390x/diag318_test_handler.c
@@ -74,6 +74,7 @@ TEST_GEN_PROGS_x86_64 += hardware_disable_test
 TEST_GEN_PROGS_x86_64 += kvm_create_max_vcpus
 TEST_GEN_PROGS_x86_64 += kvm_page_table_test
 TEST_GEN_PROGS_x86_64 += memslot_modification_stress_test
+TEST_GEN_PROGS_x86_64 += memslot_perf_test
 TEST_GEN_PROGS_x86_64 += set_memory_region_test
 TEST_GEN_PROGS_x86_64 += steal_time
 
index 5f7a229c3af10174f7cd97ca2a83492108527add..b74704305835e5b5398fa2b46fa99fd7147e0973 100644 (file)
@@ -9,6 +9,7 @@
 
 #define _GNU_SOURCE /* for pipe2 */
 
+#include <inttypes.h>
 #include <stdio.h>
 #include <stdlib.h>
 #include <time.h>
@@ -38,6 +39,7 @@
 
 static int nr_vcpus = 1;
 static uint64_t guest_percpu_mem_size = DEFAULT_PER_VCPU_MEM_SIZE;
+static size_t demand_paging_size;
 static char *guest_data_prototype;
 
 static void *vcpu_worker(void *data)
@@ -71,36 +73,51 @@ static void *vcpu_worker(void *data)
        return NULL;
 }
 
-static int handle_uffd_page_request(int uffd, uint64_t addr)
+static int handle_uffd_page_request(int uffd_mode, int uffd, uint64_t addr)
 {
-       pid_t tid;
+       pid_t tid = syscall(__NR_gettid);
        struct timespec start;
        struct timespec ts_diff;
-       struct uffdio_copy copy;
        int r;
 
-       tid = syscall(__NR_gettid);
+       clock_gettime(CLOCK_MONOTONIC, &start);
 
-       copy.src = (uint64_t)guest_data_prototype;
-       copy.dst = addr;
-       copy.len = perf_test_args.host_page_size;
-       copy.mode = 0;
+       if (uffd_mode == UFFDIO_REGISTER_MODE_MISSING) {
+               struct uffdio_copy copy;
 
-       clock_gettime(CLOCK_MONOTONIC, &start);
+               copy.src = (uint64_t)guest_data_prototype;
+               copy.dst = addr;
+               copy.len = demand_paging_size;
+               copy.mode = 0;
 
-       r = ioctl(uffd, UFFDIO_COPY, &copy);
-       if (r == -1) {
-               pr_info("Failed Paged in 0x%lx from thread %d with errno: %d\n",
-                       addr, tid, errno);
-               return r;
+               r = ioctl(uffd, UFFDIO_COPY, &copy);
+               if (r == -1) {
+                       pr_info("Failed UFFDIO_COPY in 0x%lx from thread %d with errno: %d\n",
+                               addr, tid, errno);
+                       return r;
+               }
+       } else if (uffd_mode == UFFDIO_REGISTER_MODE_MINOR) {
+               struct uffdio_continue cont = {0};
+
+               cont.range.start = addr;
+               cont.range.len = demand_paging_size;
+
+               r = ioctl(uffd, UFFDIO_CONTINUE, &cont);
+               if (r == -1) {
+                       pr_info("Failed UFFDIO_CONTINUE in 0x%lx from thread %d with errno: %d\n",
+                               addr, tid, errno);
+                       return r;
+               }
+       } else {
+               TEST_FAIL("Invalid uffd mode %d", uffd_mode);
        }
 
        ts_diff = timespec_elapsed(start);
 
-       PER_PAGE_DEBUG("UFFDIO_COPY %d \t%ld ns\n", tid,
+       PER_PAGE_DEBUG("UFFD page-in %d \t%ld ns\n", tid,
                       timespec_to_ns(ts_diff));
        PER_PAGE_DEBUG("Paged in %ld bytes at 0x%lx from thread %d\n",
-                      perf_test_args.host_page_size, addr, tid);
+                      demand_paging_size, addr, tid);
 
        return 0;
 }
@@ -108,6 +125,7 @@ static int handle_uffd_page_request(int uffd, uint64_t addr)
 bool quit_uffd_thread;
 
 struct uffd_handler_args {
+       int uffd_mode;
        int uffd;
        int pipefd;
        useconds_t delay;
@@ -169,7 +187,7 @@ static void *uffd_handler_thread_fn(void *arg)
                if (r == -1) {
                        if (errno == EAGAIN)
                                continue;
-                       pr_info("Read of uffd gor errno %d", errno);
+                       pr_info("Read of uffd got errno %d\n", errno);
                        return NULL;
                }
 
@@ -184,7 +202,7 @@ static void *uffd_handler_thread_fn(void *arg)
                if (delay)
                        usleep(delay);
                addr =  msg.arg.pagefault.address;
-               r = handle_uffd_page_request(uffd, addr);
+               r = handle_uffd_page_request(uffd_args->uffd_mode, uffd, addr);
                if (r < 0)
                        return NULL;
                pages++;
@@ -198,43 +216,53 @@ static void *uffd_handler_thread_fn(void *arg)
        return NULL;
 }
 
-static int setup_demand_paging(struct kvm_vm *vm,
-                              pthread_t *uffd_handler_thread, int pipefd,
-                              useconds_t uffd_delay,
-                              struct uffd_handler_args *uffd_args,
-                              void *hva, uint64_t len)
+static void setup_demand_paging(struct kvm_vm *vm,
+                               pthread_t *uffd_handler_thread, int pipefd,
+                               int uffd_mode, useconds_t uffd_delay,
+                               struct uffd_handler_args *uffd_args,
+                               void *hva, void *alias, uint64_t len)
 {
+       bool is_minor = (uffd_mode == UFFDIO_REGISTER_MODE_MINOR);
        int uffd;
        struct uffdio_api uffdio_api;
        struct uffdio_register uffdio_register;
+       uint64_t expected_ioctls = ((uint64_t) 1) << _UFFDIO_COPY;
 
-       uffd = syscall(__NR_userfaultfd, O_CLOEXEC | O_NONBLOCK);
-       if (uffd == -1) {
-               pr_info("uffd creation failed\n");
-               return -1;
+       PER_PAGE_DEBUG("Userfaultfd %s mode, faults resolved with %s\n",
+                      is_minor ? "MINOR" : "MISSING",
+                      is_minor ? "UFFDIO_CONINUE" : "UFFDIO_COPY");
+
+       /* In order to get minor faults, prefault via the alias. */
+       if (is_minor) {
+               size_t p;
+
+               expected_ioctls = ((uint64_t) 1) << _UFFDIO_CONTINUE;
+
+               TEST_ASSERT(alias != NULL, "Alias required for minor faults");
+               for (p = 0; p < (len / demand_paging_size); ++p) {
+                       memcpy(alias + (p * demand_paging_size),
+                              guest_data_prototype, demand_paging_size);
+               }
        }
 
+       uffd = syscall(__NR_userfaultfd, O_CLOEXEC | O_NONBLOCK);
+       TEST_ASSERT(uffd >= 0, "uffd creation failed, errno: %d", errno);
+
        uffdio_api.api = UFFD_API;
        uffdio_api.features = 0;
-       if (ioctl(uffd, UFFDIO_API, &uffdio_api) == -1) {
-               pr_info("ioctl uffdio_api failed\n");
-               return -1;
-       }
+       TEST_ASSERT(ioctl(uffd, UFFDIO_API, &uffdio_api) != -1,
+                   "ioctl UFFDIO_API failed: %" PRIu64,
+                   (uint64_t)uffdio_api.api);
 
        uffdio_register.range.start = (uint64_t)hva;
        uffdio_register.range.len = len;
-       uffdio_register.mode = UFFDIO_REGISTER_MODE_MISSING;
-       if (ioctl(uffd, UFFDIO_REGISTER, &uffdio_register) == -1) {
-               pr_info("ioctl uffdio_register failed\n");
-               return -1;
-       }
-
-       if ((uffdio_register.ioctls & UFFD_API_RANGE_IOCTLS) !=
-                       UFFD_API_RANGE_IOCTLS) {
-               pr_info("unexpected userfaultfd ioctl set\n");
-               return -1;
-       }
+       uffdio_register.mode = uffd_mode;
+       TEST_ASSERT(ioctl(uffd, UFFDIO_REGISTER, &uffdio_register) != -1,
+                   "ioctl UFFDIO_REGISTER failed");
+       TEST_ASSERT((uffdio_register.ioctls & expected_ioctls) ==
+                   expected_ioctls, "missing userfaultfd ioctls");
 
+       uffd_args->uffd_mode = uffd_mode;
        uffd_args->uffd = uffd;
        uffd_args->pipefd = pipefd;
        uffd_args->delay = uffd_delay;
@@ -243,13 +271,12 @@ static int setup_demand_paging(struct kvm_vm *vm,
 
        PER_VCPU_DEBUG("Created uffd thread for HVA range [%p, %p)\n",
                       hva, hva + len);
-
-       return 0;
 }
 
 struct test_params {
-       bool use_uffd;
+       int uffd_mode;
        useconds_t uffd_delay;
+       enum vm_mem_backing_src_type src_type;
        bool partition_vcpu_memory_access;
 };
 
@@ -267,14 +294,16 @@ static void run_test(enum vm_guest_mode mode, void *arg)
        int r;
 
        vm = perf_test_create_vm(mode, nr_vcpus, guest_percpu_mem_size,
-                                VM_MEM_SRC_ANONYMOUS);
+                                p->src_type);
 
        perf_test_args.wr_fract = 1;
 
-       guest_data_prototype = malloc(perf_test_args.host_page_size);
+       demand_paging_size = get_backing_src_pagesz(p->src_type);
+
+       guest_data_prototype = malloc(demand_paging_size);
        TEST_ASSERT(guest_data_prototype,
                    "Failed to allocate buffer for guest data pattern");
-       memset(guest_data_prototype, 0xAB, perf_test_args.host_page_size);
+       memset(guest_data_prototype, 0xAB, demand_paging_size);
 
        vcpu_threads = malloc(nr_vcpus * sizeof(*vcpu_threads));
        TEST_ASSERT(vcpu_threads, "Memory allocation failed");
@@ -282,7 +311,7 @@ static void run_test(enum vm_guest_mode mode, void *arg)
        perf_test_setup_vcpus(vm, nr_vcpus, guest_percpu_mem_size,
                              p->partition_vcpu_memory_access);
 
-       if (p->use_uffd) {
+       if (p->uffd_mode) {
                uffd_handler_threads =
                        malloc(nr_vcpus * sizeof(*uffd_handler_threads));
                TEST_ASSERT(uffd_handler_threads, "Memory allocation failed");
@@ -296,6 +325,7 @@ static void run_test(enum vm_guest_mode mode, void *arg)
                for (vcpu_id = 0; vcpu_id < nr_vcpus; vcpu_id++) {
                        vm_paddr_t vcpu_gpa;
                        void *vcpu_hva;
+                       void *vcpu_alias;
                        uint64_t vcpu_mem_size;
 
 
@@ -310,8 +340,9 @@ static void run_test(enum vm_guest_mode mode, void *arg)
                        PER_VCPU_DEBUG("Added VCPU %d with test mem gpa [%lx, %lx)\n",
                                       vcpu_id, vcpu_gpa, vcpu_gpa + vcpu_mem_size);
 
-                       /* Cache the HVA pointer of the region */
+                       /* Cache the host addresses of the region */
                        vcpu_hva = addr_gpa2hva(vm, vcpu_gpa);
+                       vcpu_alias = addr_gpa2alias(vm, vcpu_gpa);
 
                        /*
                         * Set up user fault fd to handle demand paging
@@ -321,13 +352,11 @@ static void run_test(enum vm_guest_mode mode, void *arg)
                                  O_CLOEXEC | O_NONBLOCK);
                        TEST_ASSERT(!r, "Failed to set up pipefd");
 
-                       r = setup_demand_paging(vm,
-                                               &uffd_handler_threads[vcpu_id],
-                                               pipefds[vcpu_id * 2],
-                                               p->uffd_delay, &uffd_args[vcpu_id],
-                                               vcpu_hva, vcpu_mem_size);
-                       if (r < 0)
-                               exit(-r);
+                       setup_demand_paging(vm, &uffd_handler_threads[vcpu_id],
+                                           pipefds[vcpu_id * 2], p->uffd_mode,
+                                           p->uffd_delay, &uffd_args[vcpu_id],
+                                           vcpu_hva, vcpu_alias,
+                                           vcpu_mem_size);
                }
        }
 
@@ -355,7 +384,7 @@ static void run_test(enum vm_guest_mode mode, void *arg)
 
        pr_info("All vCPU threads joined\n");
 
-       if (p->use_uffd) {
+       if (p->uffd_mode) {
                char c;
 
                /* Tell the user fault fd handler threads to quit */
@@ -377,7 +406,7 @@ static void run_test(enum vm_guest_mode mode, void *arg)
 
        free(guest_data_prototype);
        free(vcpu_threads);
-       if (p->use_uffd) {
+       if (p->uffd_mode) {
                free(uffd_handler_threads);
                free(uffd_args);
                free(pipefds);
@@ -387,17 +416,19 @@ static void run_test(enum vm_guest_mode mode, void *arg)
 static void help(char *name)
 {
        puts("");
-       printf("usage: %s [-h] [-m mode] [-u] [-d uffd_delay_usec]\n"
-              "          [-b memory] [-v vcpus] [-o]\n", name);
+       printf("usage: %s [-h] [-m vm_mode] [-u uffd_mode] [-d uffd_delay_usec]\n"
+              "          [-b memory] [-t type] [-v vcpus] [-o]\n", name);
        guest_modes_help();
-       printf(" -u: use User Fault FD to handle vCPU page\n"
-              "     faults.\n");
+       printf(" -u: use userfaultfd to handle vCPU page faults. Mode is a\n"
+              "     UFFD registration mode: 'MISSING' or 'MINOR'.\n");
        printf(" -d: add a delay in usec to the User Fault\n"
               "     FD handler to simulate demand paging\n"
               "     overheads. Ignored without -u.\n");
        printf(" -b: specify the size of the memory region which should be\n"
               "     demand paged by each vCPU. e.g. 10M or 3G.\n"
               "     Default: 1G\n");
+       printf(" -t: The type of backing memory to use. Default: anonymous\n");
+       backing_src_help();
        printf(" -v: specify the number of vCPUs to run.\n");
        printf(" -o: Overlap guest memory accesses instead of partitioning\n"
               "     them into a separate region of memory for each vCPU.\n");
@@ -409,19 +440,24 @@ int main(int argc, char *argv[])
 {
        int max_vcpus = kvm_check_cap(KVM_CAP_MAX_VCPUS);
        struct test_params p = {
+               .src_type = VM_MEM_SRC_ANONYMOUS,
                .partition_vcpu_memory_access = true,
        };
        int opt;
 
        guest_modes_append_default();
 
-       while ((opt = getopt(argc, argv, "hm:ud:b:v:o")) != -1) {
+       while ((opt = getopt(argc, argv, "hm:u:d:b:t:v:o")) != -1) {
                switch (opt) {
                case 'm':
                        guest_modes_cmdline(optarg);
                        break;
                case 'u':
-                       p.use_uffd = true;
+                       if (!strcmp("MISSING", optarg))
+                               p.uffd_mode = UFFDIO_REGISTER_MODE_MISSING;
+                       else if (!strcmp("MINOR", optarg))
+                               p.uffd_mode = UFFDIO_REGISTER_MODE_MINOR;
+                       TEST_ASSERT(p.uffd_mode, "UFFD mode must be 'MISSING' or 'MINOR'.");
                        break;
                case 'd':
                        p.uffd_delay = strtoul(optarg, NULL, 0);
@@ -430,6 +466,9 @@ int main(int argc, char *argv[])
                case 'b':
                        guest_percpu_mem_size = parse_size(optarg);
                        break;
+               case 't':
+                       p.src_type = parse_backing_src_type(optarg);
+                       break;
                case 'v':
                        nr_vcpus = atoi(optarg);
                        TEST_ASSERT(nr_vcpus > 0 && nr_vcpus <= max_vcpus,
@@ -445,6 +484,11 @@ int main(int argc, char *argv[])
                }
        }
 
+       if (p.uffd_mode == UFFDIO_REGISTER_MODE_MINOR &&
+           !backing_src_is_shared(p.src_type)) {
+               TEST_FAIL("userfaultfd MINOR mode requires shared memory; pick a different -t");
+       }
+
        for_each_guest_mode(run_test, &p);
 
        return 0;
index 5aadf84c91c04911d6b574f8f7f4e9e8e8d3b91c..4b8db3bce6102324eede96581fd2ee9bfa6aae97 100644 (file)
@@ -132,6 +132,36 @@ static void run_test(uint32_t run)
        TEST_ASSERT(false, "%s: [%d] child escaped the ninja\n", __func__, run);
 }
 
+void wait_for_child_setup(pid_t pid)
+{
+       /*
+        * Wait for the child to post to the semaphore, but wake up periodically
+        * to check if the child exited prematurely.
+        */
+       for (;;) {
+               const struct timespec wait_period = { .tv_sec = 1 };
+               int status;
+
+               if (!sem_timedwait(sem, &wait_period))
+                       return;
+
+               /* Child is still running, keep waiting. */
+               if (pid != waitpid(pid, &status, WNOHANG))
+                       continue;
+
+               /*
+                * Child is no longer running, which is not expected.
+                *
+                * If it exited with a non-zero status, we explicitly forward
+                * the child's status in case it exited with KSFT_SKIP.
+                */
+               if (WIFEXITED(status))
+                       exit(WEXITSTATUS(status));
+               else
+                       TEST_ASSERT(false, "Child exited unexpectedly");
+       }
+}
+
 int main(int argc, char **argv)
 {
        uint32_t i;
@@ -148,7 +178,7 @@ int main(int argc, char **argv)
                        run_test(i); /* This function always exits */
 
                pr_debug("%s: [%d] waiting semaphore\n", __func__, i);
-               sem_wait(sem);
+               wait_for_child_setup(pid);
                r = (rand() % DELAY_US_MAX) + 1;
                pr_debug("%s: [%d] waiting %dus\n", __func__, i, r);
                usleep(r);
index a8f022794ce3cbf5640aae6aed274196d540cb1d..35739567189e09d8ad9533440d965e8592d70a9d 100644 (file)
@@ -43,6 +43,7 @@ enum vm_guest_mode {
        VM_MODE_P40V48_4K,
        VM_MODE_P40V48_64K,
        VM_MODE_PXXV48_4K,      /* For 48bits VA but ANY bits PA */
+       VM_MODE_P47V64_4K,
        NUM_VM_MODES,
 };
 
@@ -60,7 +61,7 @@ enum vm_guest_mode {
 
 #elif defined(__s390x__)
 
-#define VM_MODE_DEFAULT                        VM_MODE_P52V48_4K
+#define VM_MODE_DEFAULT                        VM_MODE_P47V64_4K
 #define MIN_PAGE_SHIFT                 12U
 #define ptes_per_page(page_size)       ((page_size) / 16)
 
@@ -77,6 +78,7 @@ struct vm_guest_mode_params {
 };
 extern const struct vm_guest_mode_params vm_guest_mode_params[];
 
+int open_kvm_dev_path_or_exit(void);
 int kvm_check_cap(long cap);
 int vm_enable_cap(struct kvm_vm *vm, struct kvm_enable_cap *cap);
 int vcpu_enable_cap(struct kvm_vm *vm, uint32_t vcpu_id,
@@ -146,6 +148,7 @@ void virt_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr,
 void *addr_gpa2hva(struct kvm_vm *vm, vm_paddr_t gpa);
 void *addr_gva2hva(struct kvm_vm *vm, vm_vaddr_t gva);
 vm_paddr_t addr_hva2gpa(struct kvm_vm *vm, void *hva);
+void *addr_gpa2alias(struct kvm_vm *vm, vm_paddr_t gpa);
 
 /*
  * Address Guest Virtual to Guest Physical
@@ -283,10 +286,11 @@ struct kvm_vm *vm_create_default_with_vcpus(uint32_t nr_vcpus, uint64_t extra_me
                                            uint32_t num_percpu_pages, void *guest_code,
                                            uint32_t vcpuids[]);
 
-/* Like vm_create_default_with_vcpus, but accepts mode as a parameter */
+/* Like vm_create_default_with_vcpus, but accepts mode and slot0 memory as a parameter */
 struct kvm_vm *vm_create_with_vcpus(enum vm_guest_mode mode, uint32_t nr_vcpus,
-                                   uint64_t extra_mem_pages, uint32_t num_percpu_pages,
-                                   void *guest_code, uint32_t vcpuids[]);
+                                   uint64_t slot0_mem_pages, uint64_t extra_mem_pages,
+                                   uint32_t num_percpu_pages, void *guest_code,
+                                   uint32_t vcpuids[]);
 
 /*
  * Adds a vCPU with reasonable defaults (e.g. a stack)
@@ -302,7 +306,7 @@ bool vm_is_unrestricted_guest(struct kvm_vm *vm);
 
 unsigned int vm_get_page_size(struct kvm_vm *vm);
 unsigned int vm_get_page_shift(struct kvm_vm *vm);
-unsigned int vm_get_max_gfn(struct kvm_vm *vm);
+uint64_t vm_get_max_gfn(struct kvm_vm *vm);
 int vm_get_fd(struct kvm_vm *vm);
 
 unsigned int vm_calc_num_guest_pages(enum vm_guest_mode mode, size_t size);
index fade3130eb01e3fa717589dac95cef39d3a4f1ae..d79be15dd3d204d29ec66d3fc997373709077b11 100644 (file)
@@ -17,6 +17,7 @@
 #include <errno.h>
 #include <unistd.h>
 #include <fcntl.h>
+#include <sys/mman.h>
 #include "kselftest.h"
 
 static inline int _no_printf(const char *format, ...) { return 0; }
@@ -84,6 +85,8 @@ enum vm_mem_backing_src_type {
        VM_MEM_SRC_ANONYMOUS_HUGETLB_1GB,
        VM_MEM_SRC_ANONYMOUS_HUGETLB_2GB,
        VM_MEM_SRC_ANONYMOUS_HUGETLB_16GB,
+       VM_MEM_SRC_SHMEM,
+       VM_MEM_SRC_SHARED_HUGETLB,
        NUM_SRC_TYPES,
 };
 
@@ -100,4 +103,13 @@ size_t get_backing_src_pagesz(uint32_t i);
 void backing_src_help(void);
 enum vm_mem_backing_src_type parse_backing_src_type(const char *type_name);
 
+/*
+ * Whether or not the given source type is shared memory (as opposed to
+ * anonymous).
+ */
+static inline bool backing_src_is_shared(enum vm_mem_backing_src_type t)
+{
+       return vm_mem_backing_src_alias(t)->flag & MAP_SHARED;
+}
+
 #endif /* SELFTEST_KVM_TEST_UTIL_H */
index 1c4753fff19e00a0e45a3c02e45a5ed30e451a9c..82171f17c1d7f4416667688b5c12a8466a9b9346 100644 (file)
@@ -268,7 +268,7 @@ static struct kvm_vm *pre_init_before_test(enum vm_guest_mode mode, void *arg)
 
        /* Create a VM with enough guest pages */
        guest_num_pages = test_mem_size / guest_page_size;
-       vm = vm_create_with_vcpus(mode, nr_vcpus,
+       vm = vm_create_with_vcpus(mode, nr_vcpus, DEFAULT_GUEST_PHY_PAGES,
                                  guest_num_pages, 0, guest_code, NULL);
 
        /* Align down GPA of the testing memslot */
index fc83f6c5902dd994125229bb2b3478795aa72705..5c70596dd1b9883421e9dfc03c8f1813c4bc940e 100644 (file)
@@ -31,6 +31,34 @@ static void *align(void *x, size_t size)
        return (void *) (((size_t) x + mask) & ~mask);
 }
 
+/*
+ * Open KVM_DEV_PATH if available, otherwise exit the entire program.
+ *
+ * Input Args:
+ *   flags - The flags to pass when opening KVM_DEV_PATH.
+ *
+ * Return:
+ *   The opened file descriptor of /dev/kvm.
+ */
+static int _open_kvm_dev_path_or_exit(int flags)
+{
+       int fd;
+
+       fd = open(KVM_DEV_PATH, flags);
+       if (fd < 0) {
+               print_skip("%s not available, is KVM loaded? (errno: %d)",
+                          KVM_DEV_PATH, errno);
+               exit(KSFT_SKIP);
+       }
+
+       return fd;
+}
+
+int open_kvm_dev_path_or_exit(void)
+{
+       return _open_kvm_dev_path_or_exit(O_RDONLY);
+}
+
 /*
  * Capability
  *
@@ -52,10 +80,7 @@ int kvm_check_cap(long cap)
        int ret;
        int kvm_fd;
 
-       kvm_fd = open(KVM_DEV_PATH, O_RDONLY);
-       if (kvm_fd < 0)
-               exit(KSFT_SKIP);
-
+       kvm_fd = open_kvm_dev_path_or_exit();
        ret = ioctl(kvm_fd, KVM_CHECK_EXTENSION, cap);
        TEST_ASSERT(ret != -1, "KVM_CHECK_EXTENSION IOCTL failed,\n"
                "  rc: %i errno: %i", ret, errno);
@@ -128,9 +153,7 @@ void vm_enable_dirty_ring(struct kvm_vm *vm, uint32_t ring_size)
 
 static void vm_open(struct kvm_vm *vm, int perm)
 {
-       vm->kvm_fd = open(KVM_DEV_PATH, perm);
-       if (vm->kvm_fd < 0)
-               exit(KSFT_SKIP);
+       vm->kvm_fd = _open_kvm_dev_path_or_exit(perm);
 
        if (!kvm_check_cap(KVM_CAP_IMMEDIATE_EXIT)) {
                print_skip("immediate_exit not available");
@@ -152,6 +175,7 @@ const char *vm_guest_mode_string(uint32_t i)
                [VM_MODE_P40V48_4K]     = "PA-bits:40,  VA-bits:48,  4K pages",
                [VM_MODE_P40V48_64K]    = "PA-bits:40,  VA-bits:48, 64K pages",
                [VM_MODE_PXXV48_4K]     = "PA-bits:ANY, VA-bits:48,  4K pages",
+               [VM_MODE_P47V64_4K]     = "PA-bits:47,  VA-bits:64,  4K pages",
        };
        _Static_assert(sizeof(strings)/sizeof(char *) == NUM_VM_MODES,
                       "Missing new mode strings?");
@@ -169,6 +193,7 @@ const struct vm_guest_mode_params vm_guest_mode_params[] = {
        { 40, 48,  0x1000, 12 },
        { 40, 48, 0x10000, 16 },
        {  0,  0,  0x1000, 12 },
+       { 47, 64,  0x1000, 12 },
 };
 _Static_assert(sizeof(vm_guest_mode_params)/sizeof(struct vm_guest_mode_params) == NUM_VM_MODES,
               "Missing new mode params?");
@@ -203,7 +228,9 @@ struct kvm_vm *vm_create(enum vm_guest_mode mode, uint64_t phy_pages, int perm)
        TEST_ASSERT(vm != NULL, "Insufficient Memory");
 
        INIT_LIST_HEAD(&vm->vcpus);
-       INIT_LIST_HEAD(&vm->userspace_mem_regions);
+       vm->regions.gpa_tree = RB_ROOT;
+       vm->regions.hva_tree = RB_ROOT;
+       hash_init(vm->regions.slot_hash);
 
        vm->mode = mode;
        vm->type = 0;
@@ -252,6 +279,9 @@ struct kvm_vm *vm_create(enum vm_guest_mode mode, uint64_t phy_pages, int perm)
                TEST_FAIL("VM_MODE_PXXV48_4K not supported on non-x86 platforms");
 #endif
                break;
+       case VM_MODE_P47V64_4K:
+               vm->pgtable_levels = 5;
+               break;
        default:
                TEST_FAIL("Unknown guest mode, mode: 0x%x", mode);
        }
@@ -283,21 +313,50 @@ struct kvm_vm *vm_create(enum vm_guest_mode mode, uint64_t phy_pages, int perm)
        return vm;
 }
 
+/*
+ * VM Create with customized parameters
+ *
+ * Input Args:
+ *   mode - VM Mode (e.g. VM_MODE_P52V48_4K)
+ *   nr_vcpus - VCPU count
+ *   slot0_mem_pages - Slot0 physical memory size
+ *   extra_mem_pages - Non-slot0 physical memory total size
+ *   num_percpu_pages - Per-cpu physical memory pages
+ *   guest_code - Guest entry point
+ *   vcpuids - VCPU IDs
+ *
+ * Output Args: None
+ *
+ * Return:
+ *   Pointer to opaque structure that describes the created VM.
+ *
+ * Creates a VM with the mode specified by mode (e.g. VM_MODE_P52V48_4K),
+ * with customized slot0 memory size, at least 512 pages currently.
+ * extra_mem_pages is only used to calculate the maximum page table size,
+ * no real memory allocation for non-slot0 memory in this function.
+ */
 struct kvm_vm *vm_create_with_vcpus(enum vm_guest_mode mode, uint32_t nr_vcpus,
-                                   uint64_t extra_mem_pages, uint32_t num_percpu_pages,
-                                   void *guest_code, uint32_t vcpuids[])
+                                   uint64_t slot0_mem_pages, uint64_t extra_mem_pages,
+                                   uint32_t num_percpu_pages, void *guest_code,
+                                   uint32_t vcpuids[])
 {
+       uint64_t vcpu_pages, extra_pg_pages, pages;
+       struct kvm_vm *vm;
+       int i;
+
+       /* Force slot0 memory size not small than DEFAULT_GUEST_PHY_PAGES */
+       if (slot0_mem_pages < DEFAULT_GUEST_PHY_PAGES)
+               slot0_mem_pages = DEFAULT_GUEST_PHY_PAGES;
+
        /* The maximum page table size for a memory region will be when the
         * smallest pages are used. Considering each page contains x page
         * table descriptors, the total extra size for page tables (for extra
         * N pages) will be: N/x+N/x^2+N/x^3+... which is definitely smaller
         * than N/x*2.
         */
-       uint64_t vcpu_pages = (DEFAULT_STACK_PGS + num_percpu_pages) * nr_vcpus;
-       uint64_t extra_pg_pages = (extra_mem_pages + vcpu_pages) / PTES_PER_MIN_PAGE * 2;
-       uint64_t pages = DEFAULT_GUEST_PHY_PAGES + vcpu_pages + extra_pg_pages;
-       struct kvm_vm *vm;
-       int i;
+       vcpu_pages = (DEFAULT_STACK_PGS + num_percpu_pages) * nr_vcpus;
+       extra_pg_pages = (slot0_mem_pages + extra_mem_pages + vcpu_pages) / PTES_PER_MIN_PAGE * 2;
+       pages = slot0_mem_pages + vcpu_pages + extra_pg_pages;
 
        TEST_ASSERT(nr_vcpus <= kvm_check_cap(KVM_CAP_MAX_VCPUS),
                    "nr_vcpus = %d too large for host, max-vcpus = %d",
@@ -329,8 +388,8 @@ struct kvm_vm *vm_create_default_with_vcpus(uint32_t nr_vcpus, uint64_t extra_me
                                            uint32_t num_percpu_pages, void *guest_code,
                                            uint32_t vcpuids[])
 {
-       return vm_create_with_vcpus(VM_MODE_DEFAULT, nr_vcpus, extra_mem_pages,
-                                   num_percpu_pages, guest_code, vcpuids);
+       return vm_create_with_vcpus(VM_MODE_DEFAULT, nr_vcpus, DEFAULT_GUEST_PHY_PAGES,
+                                   extra_mem_pages, num_percpu_pages, guest_code, vcpuids);
 }
 
 struct kvm_vm *vm_create_default(uint32_t vcpuid, uint64_t extra_mem_pages,
@@ -355,13 +414,14 @@ struct kvm_vm *vm_create_default(uint32_t vcpuid, uint64_t extra_mem_pages,
  */
 void kvm_vm_restart(struct kvm_vm *vmp, int perm)
 {
+       int ctr;
        struct userspace_mem_region *region;
 
        vm_open(vmp, perm);
        if (vmp->has_irqchip)
                vm_create_irqchip(vmp);
 
-       list_for_each_entry(region, &vmp->userspace_mem_regions, list) {
+       hash_for_each(vmp->regions.slot_hash, ctr, region, slot_node) {
                int ret = ioctl(vmp->fd, KVM_SET_USER_MEMORY_REGION, &region->region);
                TEST_ASSERT(ret == 0, "KVM_SET_USER_MEMORY_REGION IOCTL failed,\n"
                            "  rc: %i errno: %i\n"
@@ -424,14 +484,21 @@ uint32_t kvm_vm_reset_dirty_ring(struct kvm_vm *vm)
 static struct userspace_mem_region *
 userspace_mem_region_find(struct kvm_vm *vm, uint64_t start, uint64_t end)
 {
-       struct userspace_mem_region *region;
+       struct rb_node *node;
 
-       list_for_each_entry(region, &vm->userspace_mem_regions, list) {
+       for (node = vm->regions.gpa_tree.rb_node; node; ) {
+               struct userspace_mem_region *region =
+                       container_of(node, struct userspace_mem_region, gpa_node);
                uint64_t existing_start = region->region.guest_phys_addr;
                uint64_t existing_end = region->region.guest_phys_addr
                        + region->region.memory_size - 1;
                if (start <= existing_end && end >= existing_start)
                        return region;
+
+               if (start < existing_start)
+                       node = node->rb_left;
+               else
+                       node = node->rb_right;
        }
 
        return NULL;
@@ -546,11 +613,16 @@ void kvm_vm_release(struct kvm_vm *vmp)
 }
 
 static void __vm_mem_region_delete(struct kvm_vm *vm,
-                                  struct userspace_mem_region *region)
+                                  struct userspace_mem_region *region,
+                                  bool unlink)
 {
        int ret;
 
-       list_del(&region->list);
+       if (unlink) {
+               rb_erase(&region->gpa_node, &vm->regions.gpa_tree);
+               rb_erase(&region->hva_node, &vm->regions.hva_tree);
+               hash_del(&region->slot_node);
+       }
 
        region->region.memory_size = 0;
        ret = ioctl(vm->fd, KVM_SET_USER_MEMORY_REGION, &region->region);
@@ -569,14 +641,16 @@ static void __vm_mem_region_delete(struct kvm_vm *vm,
  */
 void kvm_vm_free(struct kvm_vm *vmp)
 {
-       struct userspace_mem_region *region, *tmp;
+       int ctr;
+       struct hlist_node *node;
+       struct userspace_mem_region *region;
 
        if (vmp == NULL)
                return;
 
        /* Free userspace_mem_regions. */
-       list_for_each_entry_safe(region, tmp, &vmp->userspace_mem_regions, list)
-               __vm_mem_region_delete(vmp, region);
+       hash_for_each_safe(vmp->regions.slot_hash, ctr, node, region, slot_node)
+               __vm_mem_region_delete(vmp, region, false);
 
        /* Free sparsebit arrays. */
        sparsebit_free(&vmp->vpages_valid);
@@ -658,13 +732,64 @@ int kvm_memcmp_hva_gva(void *hva, struct kvm_vm *vm, vm_vaddr_t gva, size_t len)
        return 0;
 }
 
+static void vm_userspace_mem_region_gpa_insert(struct rb_root *gpa_tree,
+                                              struct userspace_mem_region *region)
+{
+       struct rb_node **cur, *parent;
+
+       for (cur = &gpa_tree->rb_node, parent = NULL; *cur; ) {
+               struct userspace_mem_region *cregion;
+
+               cregion = container_of(*cur, typeof(*cregion), gpa_node);
+               parent = *cur;
+               if (region->region.guest_phys_addr <
+                   cregion->region.guest_phys_addr)
+                       cur = &(*cur)->rb_left;
+               else {
+                       TEST_ASSERT(region->region.guest_phys_addr !=
+                                   cregion->region.guest_phys_addr,
+                                   "Duplicate GPA in region tree");
+
+                       cur = &(*cur)->rb_right;
+               }
+       }
+
+       rb_link_node(&region->gpa_node, parent, cur);
+       rb_insert_color(&region->gpa_node, gpa_tree);
+}
+
+static void vm_userspace_mem_region_hva_insert(struct rb_root *hva_tree,
+                                              struct userspace_mem_region *region)
+{
+       struct rb_node **cur, *parent;
+
+       for (cur = &hva_tree->rb_node, parent = NULL; *cur; ) {
+               struct userspace_mem_region *cregion;
+
+               cregion = container_of(*cur, typeof(*cregion), hva_node);
+               parent = *cur;
+               if (region->host_mem < cregion->host_mem)
+                       cur = &(*cur)->rb_left;
+               else {
+                       TEST_ASSERT(region->host_mem !=
+                                   cregion->host_mem,
+                                   "Duplicate HVA in region tree");
+
+                       cur = &(*cur)->rb_right;
+               }
+       }
+
+       rb_link_node(&region->hva_node, parent, cur);
+       rb_insert_color(&region->hva_node, hva_tree);
+}
+
 /*
  * VM Userspace Memory Region Add
  *
  * Input Args:
  *   vm - Virtual Machine
- *   backing_src - Storage source for this region.
- *                 NULL to use anonymous memory.
+ *   src_type - Storage source for this region.
+ *              NULL to use anonymous memory.
  *   guest_paddr - Starting guest physical address
  *   slot - KVM region slot
  *   npages - Number of physical pages
@@ -722,7 +847,8 @@ void vm_userspace_mem_region_add(struct kvm_vm *vm,
                        (uint64_t) region->region.memory_size);
 
        /* Confirm no region with the requested slot already exists. */
-       list_for_each_entry(region, &vm->userspace_mem_regions, list) {
+       hash_for_each_possible(vm->regions.slot_hash, region, slot_node,
+                              slot) {
                if (region->region.slot != slot)
                        continue;
 
@@ -755,11 +881,30 @@ void vm_userspace_mem_region_add(struct kvm_vm *vm,
        if (alignment > 1)
                region->mmap_size += alignment;
 
+       region->fd = -1;
+       if (backing_src_is_shared(src_type)) {
+               int memfd_flags = MFD_CLOEXEC;
+
+               if (src_type == VM_MEM_SRC_SHARED_HUGETLB)
+                       memfd_flags |= MFD_HUGETLB;
+
+               region->fd = memfd_create("kvm_selftest", memfd_flags);
+               TEST_ASSERT(region->fd != -1,
+                           "memfd_create failed, errno: %i", errno);
+
+               ret = ftruncate(region->fd, region->mmap_size);
+               TEST_ASSERT(ret == 0, "ftruncate failed, errno: %i", errno);
+
+               ret = fallocate(region->fd,
+                               FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE, 0,
+                               region->mmap_size);
+               TEST_ASSERT(ret == 0, "fallocate failed, errno: %i", errno);
+       }
+
        region->mmap_start = mmap(NULL, region->mmap_size,
                                  PROT_READ | PROT_WRITE,
-                                 MAP_PRIVATE | MAP_ANONYMOUS
-                                 | vm_mem_backing_src_alias(src_type)->flag,
-                                 -1, 0);
+                                 vm_mem_backing_src_alias(src_type)->flag,
+                                 region->fd, 0);
        TEST_ASSERT(region->mmap_start != MAP_FAILED,
                    "test_malloc failed, mmap_start: %p errno: %i",
                    region->mmap_start, errno);
@@ -793,8 +938,23 @@ void vm_userspace_mem_region_add(struct kvm_vm *vm,
                ret, errno, slot, flags,
                guest_paddr, (uint64_t) region->region.memory_size);
 
-       /* Add to linked-list of memory regions. */
-       list_add(&region->list, &vm->userspace_mem_regions);
+       /* Add to quick lookup data structures */
+       vm_userspace_mem_region_gpa_insert(&vm->regions.gpa_tree, region);
+       vm_userspace_mem_region_hva_insert(&vm->regions.hva_tree, region);
+       hash_add(vm->regions.slot_hash, &region->slot_node, slot);
+
+       /* If shared memory, create an alias. */
+       if (region->fd >= 0) {
+               region->mmap_alias = mmap(NULL, region->mmap_size,
+                                         PROT_READ | PROT_WRITE,
+                                         vm_mem_backing_src_alias(src_type)->flag,
+                                         region->fd, 0);
+               TEST_ASSERT(region->mmap_alias != MAP_FAILED,
+                           "mmap of alias failed, errno: %i", errno);
+
+               /* Align host alias address */
+               region->host_alias = align(region->mmap_alias, alignment);
+       }
 }
 
 /*
@@ -817,10 +977,10 @@ memslot2region(struct kvm_vm *vm, uint32_t memslot)
 {
        struct userspace_mem_region *region;
 
-       list_for_each_entry(region, &vm->userspace_mem_regions, list) {
+       hash_for_each_possible(vm->regions.slot_hash, region, slot_node,
+                              memslot)
                if (region->region.slot == memslot)
                        return region;
-       }
 
        fprintf(stderr, "No mem region with the requested slot found,\n"
                "  requested slot: %u\n", memslot);
@@ -905,7 +1065,7 @@ void vm_mem_region_move(struct kvm_vm *vm, uint32_t slot, uint64_t new_gpa)
  */
 void vm_mem_region_delete(struct kvm_vm *vm, uint32_t slot)
 {
-       __vm_mem_region_delete(vm, memslot2region(vm, slot));
+       __vm_mem_region_delete(vm, memslot2region(vm, slot), true);
 }
 
 /*
@@ -925,9 +1085,7 @@ static int vcpu_mmap_sz(void)
 {
        int dev_fd, ret;
 
-       dev_fd = open(KVM_DEV_PATH, O_RDONLY);
-       if (dev_fd < 0)
-               exit(KSFT_SKIP);
+       dev_fd = open_kvm_dev_path_or_exit();
 
        ret = ioctl(dev_fd, KVM_GET_VCPU_MMAP_SIZE, NULL);
        TEST_ASSERT(ret >= sizeof(struct kvm_run),
@@ -1099,6 +1257,9 @@ vm_vaddr_t vm_vaddr_alloc(struct kvm_vm *vm, size_t sz, vm_vaddr_t vaddr_min,
        uint64_t pages = (sz >> vm->page_shift) + ((sz % vm->page_size) != 0);
 
        virt_pgd_alloc(vm, pgd_memslot);
+       vm_paddr_t paddr = vm_phy_pages_alloc(vm, pages,
+                                             KVM_UTIL_MIN_PFN * vm->page_size,
+                                             data_memslot);
 
        /*
         * Find an unused range of virtual page addresses of at least
@@ -1108,11 +1269,7 @@ vm_vaddr_t vm_vaddr_alloc(struct kvm_vm *vm, size_t sz, vm_vaddr_t vaddr_min,
 
        /* Map the virtual pages. */
        for (vm_vaddr_t vaddr = vaddr_start; pages > 0;
-               pages--, vaddr += vm->page_size) {
-               vm_paddr_t paddr;
-
-               paddr = vm_phy_page_alloc(vm,
-                               KVM_UTIL_MIN_PFN * vm->page_size, data_memslot);
+               pages--, vaddr += vm->page_size, paddr += vm->page_size) {
 
                virt_pg_map(vm, vaddr, paddr, pgd_memslot);
 
@@ -1177,16 +1334,14 @@ void *addr_gpa2hva(struct kvm_vm *vm, vm_paddr_t gpa)
 {
        struct userspace_mem_region *region;
 
-       list_for_each_entry(region, &vm->userspace_mem_regions, list) {
-               if ((gpa >= region->region.guest_phys_addr)
-                       && (gpa <= (region->region.guest_phys_addr
-                               + region->region.memory_size - 1)))
-                       return (void *) ((uintptr_t) region->host_mem
-                               + (gpa - region->region.guest_phys_addr));
+       region = userspace_mem_region_find(vm, gpa, gpa);
+       if (!region) {
+               TEST_FAIL("No vm physical memory at 0x%lx", gpa);
+               return NULL;
        }
 
-       TEST_FAIL("No vm physical memory at 0x%lx", gpa);
-       return NULL;
+       return (void *)((uintptr_t)region->host_mem
+               + (gpa - region->region.guest_phys_addr));
 }
 
 /*
@@ -1208,21 +1363,64 @@ void *addr_gpa2hva(struct kvm_vm *vm, vm_paddr_t gpa)
  */
 vm_paddr_t addr_hva2gpa(struct kvm_vm *vm, void *hva)
 {
-       struct userspace_mem_region *region;
+       struct rb_node *node;
 
-       list_for_each_entry(region, &vm->userspace_mem_regions, list) {
-               if ((hva >= region->host_mem)
-                       && (hva <= (region->host_mem
-                               + region->region.memory_size - 1)))
-                       return (vm_paddr_t) ((uintptr_t)
-                               region->region.guest_phys_addr
-                               + (hva - (uintptr_t) region->host_mem));
+       for (node = vm->regions.hva_tree.rb_node; node; ) {
+               struct userspace_mem_region *region =
+                       container_of(node, struct userspace_mem_region, hva_node);
+
+               if (hva >= region->host_mem) {
+                       if (hva <= (region->host_mem
+                               + region->region.memory_size - 1))
+                               return (vm_paddr_t)((uintptr_t)
+                                       region->region.guest_phys_addr
+                                       + (hva - (uintptr_t)region->host_mem));
+
+                       node = node->rb_right;
+               } else
+                       node = node->rb_left;
        }
 
        TEST_FAIL("No mapping to a guest physical address, hva: %p", hva);
        return -1;
 }
 
+/*
+ * Address VM physical to Host Virtual *alias*.
+ *
+ * Input Args:
+ *   vm - Virtual Machine
+ *   gpa - VM physical address
+ *
+ * Output Args: None
+ *
+ * Return:
+ *   Equivalent address within the host virtual *alias* area, or NULL
+ *   (without failing the test) if the guest memory is not shared (so
+ *   no alias exists).
+ *
+ * When vm_create() and related functions are called with a shared memory
+ * src_type, we also create a writable, shared alias mapping of the
+ * underlying guest memory. This allows the host to manipulate guest memory
+ * without mapping that memory in the guest's address space. And, for
+ * userfaultfd-based demand paging, we can do so without triggering userfaults.
+ */
+void *addr_gpa2alias(struct kvm_vm *vm, vm_paddr_t gpa)
+{
+       struct userspace_mem_region *region;
+       uintptr_t offset;
+
+       region = userspace_mem_region_find(vm, gpa, gpa);
+       if (!region)
+               return NULL;
+
+       if (!region->host_alias)
+               return NULL;
+
+       offset = gpa - region->region.guest_phys_addr;
+       return (void *) ((uintptr_t) region->host_alias + offset);
+}
+
 /*
  * VM Create IRQ Chip
  *
@@ -1822,6 +2020,7 @@ int kvm_device_access(int dev_fd, uint32_t group, uint64_t attr,
  */
 void vm_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent)
 {
+       int ctr;
        struct userspace_mem_region *region;
        struct vcpu *vcpu;
 
@@ -1829,7 +2028,7 @@ void vm_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent)
        fprintf(stream, "%*sfd: %i\n", indent, "", vm->fd);
        fprintf(stream, "%*spage_size: 0x%x\n", indent, "", vm->page_size);
        fprintf(stream, "%*sMem Regions:\n", indent, "");
-       list_for_each_entry(region, &vm->userspace_mem_regions, list) {
+       hash_for_each(vm->regions.slot_hash, ctr, region, slot_node) {
                fprintf(stream, "%*sguest_phys: 0x%lx size: 0x%lx "
                        "host_virt: %p\n", indent + 2, "",
                        (uint64_t) region->region.guest_phys_addr,
@@ -2015,10 +2214,7 @@ bool vm_is_unrestricted_guest(struct kvm_vm *vm)
 
        if (vm == NULL) {
                /* Ensure that the KVM vendor-specific module is loaded. */
-               f = fopen(KVM_DEV_PATH, "r");
-               TEST_ASSERT(f != NULL, "Error in opening KVM dev file: %d",
-                           errno);
-               fclose(f);
+               close(open_kvm_dev_path_or_exit());
        }
 
        f = fopen("/sys/module/kvm_intel/parameters/unrestricted_guest", "r");
@@ -2041,7 +2237,7 @@ unsigned int vm_get_page_shift(struct kvm_vm *vm)
        return vm->page_shift;
 }
 
-unsigned int vm_get_max_gfn(struct kvm_vm *vm)
+uint64_t vm_get_max_gfn(struct kvm_vm *vm)
 {
        return vm->max_gfn;
 }
index 91ce1b5d480b2d8b0987851f8864b930d27a6ff6..a03febc24ba633a5574de2776d60f2c9f97d5bf8 100644 (file)
@@ -8,6 +8,9 @@
 #ifndef SELFTEST_KVM_UTIL_INTERNAL_H
 #define SELFTEST_KVM_UTIL_INTERNAL_H
 
+#include "linux/hashtable.h"
+#include "linux/rbtree.h"
+
 #include "sparsebit.h"
 
 struct userspace_mem_region {
@@ -16,9 +19,13 @@ struct userspace_mem_region {
        int fd;
        off_t offset;
        void *host_mem;
+       void *host_alias;
        void *mmap_start;
+       void *mmap_alias;
        size_t mmap_size;
-       struct list_head list;
+       struct rb_node gpa_node;
+       struct rb_node hva_node;
+       struct hlist_node slot_node;
 };
 
 struct vcpu {
@@ -31,6 +38,12 @@ struct vcpu {
        uint32_t dirty_gfns_count;
 };
 
+struct userspace_mem_regions {
+       struct rb_root gpa_tree;
+       struct rb_root hva_tree;
+       DECLARE_HASHTABLE(slot_hash, 9);
+};
+
 struct kvm_vm {
        int mode;
        unsigned long type;
@@ -43,7 +56,7 @@ struct kvm_vm {
        unsigned int va_bits;
        uint64_t max_gfn;
        struct list_head vcpus;
-       struct list_head userspace_mem_regions;
+       struct userspace_mem_regions regions;
        struct sparsebit *vpages_valid;
        struct sparsebit *vpages_mapped;
        bool has_irqchip;
index 81490b9b4e32a96ecdadf918a6136fcd83aa5b9e..7397ca2998358c09d737d174039df78c3ba469f1 100644 (file)
@@ -2,6 +2,7 @@
 /*
  * Copyright (C) 2020, Google LLC.
  */
+#include <inttypes.h>
 
 #include "kvm_util.h"
 #include "perf_test_util.h"
@@ -68,7 +69,7 @@ struct kvm_vm *perf_test_create_vm(enum vm_guest_mode mode, int vcpus,
        TEST_ASSERT(vcpu_memory_bytes % perf_test_args.guest_page_size == 0,
                    "Guest memory size is not guest page size aligned.");
 
-       vm = vm_create_with_vcpus(mode, vcpus,
+       vm = vm_create_with_vcpus(mode, vcpus, DEFAULT_GUEST_PHY_PAGES,
                                  (vcpus * vcpu_memory_bytes) / perf_test_args.guest_page_size,
                                  0, guest_code, NULL);
 
@@ -80,7 +81,8 @@ struct kvm_vm *perf_test_create_vm(enum vm_guest_mode mode, int vcpus,
         */
        TEST_ASSERT(guest_num_pages < vm_get_max_gfn(vm),
                    "Requested more guest memory than address space allows.\n"
-                   "    guest pages: %lx max gfn: %x vcpus: %d wss: %lx]\n",
+                   "    guest pages: %" PRIx64 " max gfn: %" PRIx64
+                   " vcpus: %d wss: %" PRIx64 "]\n",
                    guest_num_pages, vm_get_max_gfn(vm), vcpus,
                    vcpu_memory_bytes);
 
diff --git a/tools/testing/selftests/kvm/lib/rbtree.c b/tools/testing/selftests/kvm/lib/rbtree.c
new file mode 100644 (file)
index 0000000..a703f01
--- /dev/null
@@ -0,0 +1 @@
+#include "../../../../lib/rbtree.c"
index 63d2bc7d757b4799e94b6c17663c0dc1ae0c4a0f..6ad6c8276b2ebc19a0441f720d8415ee685a782d 100644 (file)
@@ -168,70 +168,87 @@ size_t get_def_hugetlb_pagesz(void)
 
 const struct vm_mem_backing_src_alias *vm_mem_backing_src_alias(uint32_t i)
 {
+       static const int anon_flags = MAP_PRIVATE | MAP_ANONYMOUS;
+       static const int anon_huge_flags = anon_flags | MAP_HUGETLB;
+
        static const struct vm_mem_backing_src_alias aliases[] = {
                [VM_MEM_SRC_ANONYMOUS] = {
                        .name = "anonymous",
-                       .flag = 0,
+                       .flag = anon_flags,
                },
                [VM_MEM_SRC_ANONYMOUS_THP] = {
                        .name = "anonymous_thp",
-                       .flag = 0,
+                       .flag = anon_flags,
                },
                [VM_MEM_SRC_ANONYMOUS_HUGETLB] = {
                        .name = "anonymous_hugetlb",
-                       .flag = MAP_HUGETLB,
+                       .flag = anon_huge_flags,
                },
                [VM_MEM_SRC_ANONYMOUS_HUGETLB_16KB] = {
                        .name = "anonymous_hugetlb_16kb",
-                       .flag = MAP_HUGETLB | MAP_HUGE_16KB,
+                       .flag = anon_huge_flags | MAP_HUGE_16KB,
                },
                [VM_MEM_SRC_ANONYMOUS_HUGETLB_64KB] = {
                        .name = "anonymous_hugetlb_64kb",
-                       .flag = MAP_HUGETLB | MAP_HUGE_64KB,
+                       .flag = anon_huge_flags | MAP_HUGE_64KB,
                },
                [VM_MEM_SRC_ANONYMOUS_HUGETLB_512KB] = {
                        .name = "anonymous_hugetlb_512kb",
-                       .flag = MAP_HUGETLB | MAP_HUGE_512KB,
+                       .flag = anon_huge_flags | MAP_HUGE_512KB,
                },
                [VM_MEM_SRC_ANONYMOUS_HUGETLB_1MB] = {
                        .name = "anonymous_hugetlb_1mb",
-                       .flag = MAP_HUGETLB | MAP_HUGE_1MB,
+                       .flag = anon_huge_flags | MAP_HUGE_1MB,
                },
                [VM_MEM_SRC_ANONYMOUS_HUGETLB_2MB] = {
                        .name = "anonymous_hugetlb_2mb",
-                       .flag = MAP_HUGETLB | MAP_HUGE_2MB,
+                       .flag = anon_huge_flags | MAP_HUGE_2MB,
                },
                [VM_MEM_SRC_ANONYMOUS_HUGETLB_8MB] = {
                        .name = "anonymous_hugetlb_8mb",
-                       .flag = MAP_HUGETLB | MAP_HUGE_8MB,
+                       .flag = anon_huge_flags | MAP_HUGE_8MB,
                },
                [VM_MEM_SRC_ANONYMOUS_HUGETLB_16MB] = {
                        .name = "anonymous_hugetlb_16mb",
-                       .flag = MAP_HUGETLB | MAP_HUGE_16MB,
+                       .flag = anon_huge_flags | MAP_HUGE_16MB,
                },
                [VM_MEM_SRC_ANONYMOUS_HUGETLB_32MB] = {
                        .name = "anonymous_hugetlb_32mb",
-                       .flag = MAP_HUGETLB | MAP_HUGE_32MB,
+                       .flag = anon_huge_flags | MAP_HUGE_32MB,
                },
                [VM_MEM_SRC_ANONYMOUS_HUGETLB_256MB] = {
                        .name = "anonymous_hugetlb_256mb",
-                       .flag = MAP_HUGETLB | MAP_HUGE_256MB,
+                       .flag = anon_huge_flags | MAP_HUGE_256MB,
                },
                [VM_MEM_SRC_ANONYMOUS_HUGETLB_512MB] = {
                        .name = "anonymous_hugetlb_512mb",
-                       .flag = MAP_HUGETLB | MAP_HUGE_512MB,
+                       .flag = anon_huge_flags | MAP_HUGE_512MB,
                },
                [VM_MEM_SRC_ANONYMOUS_HUGETLB_1GB] = {
                        .name = "anonymous_hugetlb_1gb",
-                       .flag = MAP_HUGETLB | MAP_HUGE_1GB,
+                       .flag = anon_huge_flags | MAP_HUGE_1GB,
                },
                [VM_MEM_SRC_ANONYMOUS_HUGETLB_2GB] = {
                        .name = "anonymous_hugetlb_2gb",
-                       .flag = MAP_HUGETLB | MAP_HUGE_2GB,
+                       .flag = anon_huge_flags | MAP_HUGE_2GB,
                },
                [VM_MEM_SRC_ANONYMOUS_HUGETLB_16GB] = {
                        .name = "anonymous_hugetlb_16gb",
-                       .flag = MAP_HUGETLB | MAP_HUGE_16GB,
+                       .flag = anon_huge_flags | MAP_HUGE_16GB,
+               },
+               [VM_MEM_SRC_SHMEM] = {
+                       .name = "shmem",
+                       .flag = MAP_SHARED,
+               },
+               [VM_MEM_SRC_SHARED_HUGETLB] = {
+                       .name = "shared_hugetlb",
+                       /*
+                        * No MAP_HUGETLB, we use MFD_HUGETLB instead. Since
+                        * we're using "file backed" memory, we need to specify
+                        * this when the FD is created, not when the area is
+                        * mapped.
+                        */
+                       .flag = MAP_SHARED,
                },
        };
        _Static_assert(ARRAY_SIZE(aliases) == NUM_SRC_TYPES,
@@ -250,10 +267,12 @@ size_t get_backing_src_pagesz(uint32_t i)
 
        switch (i) {
        case VM_MEM_SRC_ANONYMOUS:
+       case VM_MEM_SRC_SHMEM:
                return getpagesize();
        case VM_MEM_SRC_ANONYMOUS_THP:
                return get_trans_hugepagesz();
        case VM_MEM_SRC_ANONYMOUS_HUGETLB:
+       case VM_MEM_SRC_SHARED_HUGETLB:
                return get_def_hugetlb_pagesz();
        default:
                return MAP_HUGE_PAGE_SIZE(flag);
index aaf7bc7d2ce18a9d9d2a67c65ad6eae7e2469663..7629819734afd7871e66c1c04940e663580fb72f 100644 (file)
@@ -54,9 +54,9 @@ idt_handlers:
        .align 8
 
        /* Fetch current address and append it to idt_handlers. */
-       current_handler = .
+666 :
 .pushsection .rodata
-.quad current_handler
+       .quad 666b
 .popsection
 
        .if ! \has_error
index a8906e60a1081ab3ef60005921a8469faab72d13..efe235044421379f0852d6280db21ad139de0fad 100644 (file)
@@ -657,9 +657,7 @@ struct kvm_cpuid2 *kvm_get_supported_cpuid(void)
                return cpuid;
 
        cpuid = allocate_kvm_cpuid2();
-       kvm_fd = open(KVM_DEV_PATH, O_RDONLY);
-       if (kvm_fd < 0)
-               exit(KSFT_SKIP);
+       kvm_fd = open_kvm_dev_path_or_exit();
 
        ret = ioctl(kvm_fd, KVM_GET_SUPPORTED_CPUID, cpuid);
        TEST_ASSERT(ret == 0, "KVM_GET_SUPPORTED_CPUID failed %d %d\n",
@@ -691,9 +689,7 @@ uint64_t kvm_get_feature_msr(uint64_t msr_index)
 
        buffer.header.nmsrs = 1;
        buffer.entry.index = msr_index;
-       kvm_fd = open(KVM_DEV_PATH, O_RDONLY);
-       if (kvm_fd < 0)
-               exit(KSFT_SKIP);
+       kvm_fd = open_kvm_dev_path_or_exit();
 
        r = ioctl(kvm_fd, KVM_GET_MSRS, &buffer.header);
        TEST_ASSERT(r == 1, "KVM_GET_MSRS IOCTL failed,\n"
@@ -986,9 +982,7 @@ struct kvm_msr_list *kvm_get_msr_index_list(void)
        struct kvm_msr_list *list;
        int nmsrs, r, kvm_fd;
 
-       kvm_fd = open(KVM_DEV_PATH, O_RDONLY);
-       if (kvm_fd < 0)
-               exit(KSFT_SKIP);
+       kvm_fd = open_kvm_dev_path_or_exit();
 
        nmsrs = kvm_get_num_msrs_fd(kvm_fd);
        list = malloc(sizeof(*list) + nmsrs * sizeof(list->indices[0]));
@@ -1312,9 +1306,7 @@ struct kvm_cpuid2 *kvm_get_supported_hv_cpuid(void)
                return cpuid;
 
        cpuid = allocate_kvm_cpuid2();
-       kvm_fd = open(KVM_DEV_PATH, O_RDONLY);
-       if (kvm_fd < 0)
-               exit(KSFT_SKIP);
+       kvm_fd = open_kvm_dev_path_or_exit();
 
        ret = ioctl(kvm_fd, KVM_GET_SUPPORTED_HV_CPUID, cpuid);
        TEST_ASSERT(ret == 0, "KVM_GET_SUPPORTED_HV_CPUID failed %d %d\n",
index 6096bf0a5b34f0f8cb37cc15310e76fc721c73a1..98351ba0933cdb4e89281cdaf98c5a8ccc85a05f 100644 (file)
@@ -71,14 +71,22 @@ struct memslot_antagonist_args {
 };
 
 static void add_remove_memslot(struct kvm_vm *vm, useconds_t delay,
-                             uint64_t nr_modifications, uint64_t gpa)
+                              uint64_t nr_modifications)
 {
+       const uint64_t pages = 1;
+       uint64_t gpa;
        int i;
 
+       /*
+        * Add the dummy memslot just below the perf_test_util memslot, which is
+        * at the top of the guest physical address space.
+        */
+       gpa = guest_test_phys_mem - pages * vm_get_page_size(vm);
+
        for (i = 0; i < nr_modifications; i++) {
                usleep(delay);
                vm_userspace_mem_region_add(vm, VM_MEM_SRC_ANONYMOUS, gpa,
-                                           DUMMY_MEMSLOT_INDEX, 1, 0);
+                                           DUMMY_MEMSLOT_INDEX, pages, 0);
 
                vm_mem_region_delete(vm, DUMMY_MEMSLOT_INDEX);
        }
@@ -120,11 +128,7 @@ static void run_test(enum vm_guest_mode mode, void *arg)
        pr_info("Started all vCPUs\n");
 
        add_remove_memslot(vm, p->memslot_modification_delay,
-                          p->nr_memslot_modifications,
-                          guest_test_phys_mem +
-                          (guest_percpu_mem_size * nr_vcpus) +
-                          perf_test_args.host_page_size +
-                          perf_test_args.guest_page_size);
+                          p->nr_memslot_modifications);
 
        run_vcpus = false;
 
diff --git a/tools/testing/selftests/kvm/memslot_perf_test.c b/tools/testing/selftests/kvm/memslot_perf_test.c
new file mode 100644 (file)
index 0000000..1123965
--- /dev/null
@@ -0,0 +1,1037 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * A memslot-related performance benchmark.
+ *
+ * Copyright (C) 2021 Oracle and/or its affiliates.
+ *
+ * Basic guest setup / host vCPU thread code lifted from set_memory_region_test.
+ */
+#include <pthread.h>
+#include <sched.h>
+#include <semaphore.h>
+#include <stdatomic.h>
+#include <stdbool.h>
+#include <stdint.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <sys/mman.h>
+#include <time.h>
+#include <unistd.h>
+
+#include <linux/compiler.h>
+
+#include <test_util.h>
+#include <kvm_util.h>
+#include <processor.h>
+
+#define VCPU_ID 0
+
+#define MEM_SIZE               ((512U << 20) + 4096)
+#define MEM_SIZE_PAGES         (MEM_SIZE / 4096)
+#define MEM_GPA                0x10000000UL
+#define MEM_AUX_GPA            MEM_GPA
+#define MEM_SYNC_GPA           MEM_AUX_GPA
+#define MEM_TEST_GPA           (MEM_AUX_GPA + 4096)
+#define MEM_TEST_SIZE          (MEM_SIZE - 4096)
+static_assert(MEM_SIZE % 4096 == 0, "invalid mem size");
+static_assert(MEM_TEST_SIZE % 4096 == 0, "invalid mem test size");
+
+/*
+ * 32 MiB is max size that gets well over 100 iterations on 509 slots.
+ * Considering that each slot needs to have at least one page up to
+ * 8194 slots in use can then be tested (although with slightly
+ * limited resolution).
+ */
+#define MEM_SIZE_MAP           ((32U << 20) + 4096)
+#define MEM_SIZE_MAP_PAGES     (MEM_SIZE_MAP / 4096)
+#define MEM_TEST_MAP_SIZE      (MEM_SIZE_MAP - 4096)
+#define MEM_TEST_MAP_SIZE_PAGES (MEM_TEST_MAP_SIZE / 4096)
+static_assert(MEM_SIZE_MAP % 4096 == 0, "invalid map test region size");
+static_assert(MEM_TEST_MAP_SIZE % 4096 == 0, "invalid map test region size");
+static_assert(MEM_TEST_MAP_SIZE_PAGES % 2 == 0, "invalid map test region size");
+static_assert(MEM_TEST_MAP_SIZE_PAGES > 2, "invalid map test region size");
+
+/*
+ * 128 MiB is min size that fills 32k slots with at least one page in each
+ * while at the same time gets 100+ iterations in such test
+ */
+#define MEM_TEST_UNMAP_SIZE            (128U << 20)
+#define MEM_TEST_UNMAP_SIZE_PAGES      (MEM_TEST_UNMAP_SIZE / 4096)
+/* 2 MiB chunk size like a typical huge page */
+#define MEM_TEST_UNMAP_CHUNK_PAGES     (2U << (20 - 12))
+static_assert(MEM_TEST_UNMAP_SIZE <= MEM_TEST_SIZE,
+             "invalid unmap test region size");
+static_assert(MEM_TEST_UNMAP_SIZE % 4096 == 0,
+             "invalid unmap test region size");
+static_assert(MEM_TEST_UNMAP_SIZE_PAGES %
+             (2 * MEM_TEST_UNMAP_CHUNK_PAGES) == 0,
+             "invalid unmap test region size");
+
+/*
+ * For the move active test the middle of the test area is placed on
+ * a memslot boundary: half lies in the memslot being moved, half in
+ * other memslot(s).
+ *
+ * When running this test with 32k memslots (32764, really) each memslot
+ * contains 4 pages.
+ * The last one additionally contains the remaining 21 pages of memory,
+ * for the total size of 25 pages.
+ * Hence, the maximum size here is 50 pages.
+ */
+#define MEM_TEST_MOVE_SIZE_PAGES       (50)
+#define MEM_TEST_MOVE_SIZE             (MEM_TEST_MOVE_SIZE_PAGES * 4096)
+#define MEM_TEST_MOVE_GPA_DEST         (MEM_GPA + MEM_SIZE)
+static_assert(MEM_TEST_MOVE_SIZE <= MEM_TEST_SIZE,
+             "invalid move test region size");
+
+#define MEM_TEST_VAL_1 0x1122334455667788
+#define MEM_TEST_VAL_2 0x99AABBCCDDEEFF00
+
+struct vm_data {
+       struct kvm_vm *vm;
+       pthread_t vcpu_thread;
+       uint32_t nslots;
+       uint64_t npages;
+       uint64_t pages_per_slot;
+       void **hva_slots;
+       bool mmio_ok;
+       uint64_t mmio_gpa_min;
+       uint64_t mmio_gpa_max;
+};
+
+struct sync_area {
+       atomic_bool start_flag;
+       atomic_bool exit_flag;
+       atomic_bool sync_flag;
+       void *move_area_ptr;
+};
+
+/*
+ * Technically, we need also for the atomic bool to be address-free, which
+ * is recommended, but not strictly required, by C11 for lockless
+ * implementations.
+ * However, in practice both GCC and Clang fulfill this requirement on
+ * all KVM-supported platforms.
+ */
+static_assert(ATOMIC_BOOL_LOCK_FREE == 2, "atomic bool is not lockless");
+
+static sem_t vcpu_ready;
+
+static bool map_unmap_verify;
+
+static bool verbose;
+#define pr_info_v(...)                         \
+       do {                                    \
+               if (verbose)                    \
+                       pr_info(__VA_ARGS__);   \
+       } while (0)
+
+static void *vcpu_worker(void *data)
+{
+       struct vm_data *vm = data;
+       struct kvm_run *run;
+       struct ucall uc;
+       uint64_t cmd;
+
+       run = vcpu_state(vm->vm, VCPU_ID);
+       while (1) {
+               vcpu_run(vm->vm, VCPU_ID);
+
+               if (run->exit_reason == KVM_EXIT_IO) {
+                       cmd = get_ucall(vm->vm, VCPU_ID, &uc);
+                       if (cmd != UCALL_SYNC)
+                               break;
+
+                       sem_post(&vcpu_ready);
+                       continue;
+               }
+
+               if (run->exit_reason != KVM_EXIT_MMIO)
+                       break;
+
+               TEST_ASSERT(vm->mmio_ok, "Unexpected mmio exit");
+               TEST_ASSERT(run->mmio.is_write, "Unexpected mmio read");
+               TEST_ASSERT(run->mmio.len == 8,
+                           "Unexpected exit mmio size = %u", run->mmio.len);
+               TEST_ASSERT(run->mmio.phys_addr >= vm->mmio_gpa_min &&
+                           run->mmio.phys_addr <= vm->mmio_gpa_max,
+                           "Unexpected exit mmio address = 0x%llx",
+                           run->mmio.phys_addr);
+       }
+
+       if (run->exit_reason == KVM_EXIT_IO && cmd == UCALL_ABORT)
+               TEST_FAIL("%s at %s:%ld, val = %lu", (const char *)uc.args[0],
+                         __FILE__, uc.args[1], uc.args[2]);
+
+       return NULL;
+}
+
+static void wait_for_vcpu(void)
+{
+       struct timespec ts;
+
+       TEST_ASSERT(!clock_gettime(CLOCK_REALTIME, &ts),
+                   "clock_gettime() failed: %d\n", errno);
+
+       ts.tv_sec += 2;
+       TEST_ASSERT(!sem_timedwait(&vcpu_ready, &ts),
+                   "sem_timedwait() failed: %d\n", errno);
+}
+
+static void *vm_gpa2hva(struct vm_data *data, uint64_t gpa, uint64_t *rempages)
+{
+       uint64_t gpage, pgoffs;
+       uint32_t slot, slotoffs;
+       void *base;
+
+       TEST_ASSERT(gpa >= MEM_GPA, "Too low gpa to translate");
+       TEST_ASSERT(gpa < MEM_GPA + data->npages * 4096,
+                   "Too high gpa to translate");
+       gpa -= MEM_GPA;
+
+       gpage = gpa / 4096;
+       pgoffs = gpa % 4096;
+       slot = min(gpage / data->pages_per_slot, (uint64_t)data->nslots - 1);
+       slotoffs = gpage - (slot * data->pages_per_slot);
+
+       if (rempages) {
+               uint64_t slotpages;
+
+               if (slot == data->nslots - 1)
+                       slotpages = data->npages - slot * data->pages_per_slot;
+               else
+                       slotpages = data->pages_per_slot;
+
+               TEST_ASSERT(!pgoffs,
+                           "Asking for remaining pages in slot but gpa not page aligned");
+               *rempages = slotpages - slotoffs;
+       }
+
+       base = data->hva_slots[slot];
+       return (uint8_t *)base + slotoffs * 4096 + pgoffs;
+}
+
+static uint64_t vm_slot2gpa(struct vm_data *data, uint32_t slot)
+{
+       TEST_ASSERT(slot < data->nslots, "Too high slot number");
+
+       return MEM_GPA + slot * data->pages_per_slot * 4096;
+}
+
+static struct vm_data *alloc_vm(void)
+{
+       struct vm_data *data;
+
+       data = malloc(sizeof(*data));
+       TEST_ASSERT(data, "malloc(vmdata) failed");
+
+       data->vm = NULL;
+       data->hva_slots = NULL;
+
+       return data;
+}
+
+static bool prepare_vm(struct vm_data *data, int nslots, uint64_t *maxslots,
+                      void *guest_code, uint64_t mempages,
+                      struct timespec *slot_runtime)
+{
+       uint32_t max_mem_slots;
+       uint64_t rempages;
+       uint64_t guest_addr;
+       uint32_t slot;
+       struct timespec tstart;
+       struct sync_area *sync;
+
+       max_mem_slots = kvm_check_cap(KVM_CAP_NR_MEMSLOTS);
+       TEST_ASSERT(max_mem_slots > 1,
+                   "KVM_CAP_NR_MEMSLOTS should be greater than 1");
+       TEST_ASSERT(nslots > 1 || nslots == -1,
+                   "Slot count cap should be greater than 1");
+       if (nslots != -1)
+               max_mem_slots = min(max_mem_slots, (uint32_t)nslots);
+       pr_info_v("Allowed number of memory slots: %"PRIu32"\n", max_mem_slots);
+
+       TEST_ASSERT(mempages > 1,
+                   "Can't test without any memory");
+
+       data->npages = mempages;
+       data->nslots = max_mem_slots - 1;
+       data->pages_per_slot = mempages / data->nslots;
+       if (!data->pages_per_slot) {
+               *maxslots = mempages + 1;
+               return false;
+       }
+
+       rempages = mempages % data->nslots;
+       data->hva_slots = malloc(sizeof(*data->hva_slots) * data->nslots);
+       TEST_ASSERT(data->hva_slots, "malloc() fail");
+
+       data->vm = vm_create_default(VCPU_ID, mempages, guest_code);
+
+       pr_info_v("Adding slots 1..%i, each slot with %"PRIu64" pages + %"PRIu64" extra pages last\n",
+               max_mem_slots - 1, data->pages_per_slot, rempages);
+
+       clock_gettime(CLOCK_MONOTONIC, &tstart);
+       for (slot = 1, guest_addr = MEM_GPA; slot < max_mem_slots; slot++) {
+               uint64_t npages;
+
+               npages = data->pages_per_slot;
+               if (slot == max_mem_slots - 1)
+                       npages += rempages;
+
+               vm_userspace_mem_region_add(data->vm, VM_MEM_SRC_ANONYMOUS,
+                                           guest_addr, slot, npages,
+                                           0);
+               guest_addr += npages * 4096;
+       }
+       *slot_runtime = timespec_elapsed(tstart);
+
+       for (slot = 0, guest_addr = MEM_GPA; slot < max_mem_slots - 1; slot++) {
+               uint64_t npages;
+               uint64_t gpa;
+
+               npages = data->pages_per_slot;
+               if (slot == max_mem_slots - 2)
+                       npages += rempages;
+
+               gpa = vm_phy_pages_alloc(data->vm, npages, guest_addr,
+                                        slot + 1);
+               TEST_ASSERT(gpa == guest_addr,
+                           "vm_phy_pages_alloc() failed\n");
+
+               data->hva_slots[slot] = addr_gpa2hva(data->vm, guest_addr);
+               memset(data->hva_slots[slot], 0, npages * 4096);
+
+               guest_addr += npages * 4096;
+       }
+
+       virt_map(data->vm, MEM_GPA, MEM_GPA, mempages, 0);
+
+       sync = (typeof(sync))vm_gpa2hva(data, MEM_SYNC_GPA, NULL);
+       atomic_init(&sync->start_flag, false);
+       atomic_init(&sync->exit_flag, false);
+       atomic_init(&sync->sync_flag, false);
+
+       data->mmio_ok = false;
+
+       return true;
+}
+
+static void launch_vm(struct vm_data *data)
+{
+       pr_info_v("Launching the test VM\n");
+
+       pthread_create(&data->vcpu_thread, NULL, vcpu_worker, data);
+
+       /* Ensure the guest thread is spun up. */
+       wait_for_vcpu();
+}
+
+static void free_vm(struct vm_data *data)
+{
+       kvm_vm_free(data->vm);
+       free(data->hva_slots);
+       free(data);
+}
+
+static void wait_guest_exit(struct vm_data *data)
+{
+       pthread_join(data->vcpu_thread, NULL);
+}
+
+static void let_guest_run(struct sync_area *sync)
+{
+       atomic_store_explicit(&sync->start_flag, true, memory_order_release);
+}
+
+static void guest_spin_until_start(void)
+{
+       struct sync_area *sync = (typeof(sync))MEM_SYNC_GPA;
+
+       while (!atomic_load_explicit(&sync->start_flag, memory_order_acquire))
+               ;
+}
+
+static void make_guest_exit(struct sync_area *sync)
+{
+       atomic_store_explicit(&sync->exit_flag, true, memory_order_release);
+}
+
+static bool _guest_should_exit(void)
+{
+       struct sync_area *sync = (typeof(sync))MEM_SYNC_GPA;
+
+       return atomic_load_explicit(&sync->exit_flag, memory_order_acquire);
+}
+
+#define guest_should_exit() unlikely(_guest_should_exit())
+
+/*
+ * noinline so we can easily see how much time the host spends waiting
+ * for the guest.
+ * For the same reason use alarm() instead of polling clock_gettime()
+ * to implement a wait timeout.
+ */
+static noinline void host_perform_sync(struct sync_area *sync)
+{
+       alarm(2);
+
+       atomic_store_explicit(&sync->sync_flag, true, memory_order_release);
+       while (atomic_load_explicit(&sync->sync_flag, memory_order_acquire))
+               ;
+
+       alarm(0);
+}
+
+static bool guest_perform_sync(void)
+{
+       struct sync_area *sync = (typeof(sync))MEM_SYNC_GPA;
+       bool expected;
+
+       do {
+               if (guest_should_exit())
+                       return false;
+
+               expected = true;
+       } while (!atomic_compare_exchange_weak_explicit(&sync->sync_flag,
+                                                       &expected, false,
+                                                       memory_order_acq_rel,
+                                                       memory_order_relaxed));
+
+       return true;
+}
+
+static void guest_code_test_memslot_move(void)
+{
+       struct sync_area *sync = (typeof(sync))MEM_SYNC_GPA;
+       uintptr_t base = (typeof(base))READ_ONCE(sync->move_area_ptr);
+
+       GUEST_SYNC(0);
+
+       guest_spin_until_start();
+
+       while (!guest_should_exit()) {
+               uintptr_t ptr;
+
+               for (ptr = base; ptr < base + MEM_TEST_MOVE_SIZE;
+                    ptr += 4096)
+                       *(uint64_t *)ptr = MEM_TEST_VAL_1;
+
+               /*
+                * No host sync here since the MMIO exits are so expensive
+                * that the host would spend most of its time waiting for
+                * the guest and so instead of measuring memslot move
+                * performance we would measure the performance and
+                * likelihood of MMIO exits
+                */
+       }
+
+       GUEST_DONE();
+}
+
+static void guest_code_test_memslot_map(void)
+{
+       struct sync_area *sync = (typeof(sync))MEM_SYNC_GPA;
+
+       GUEST_SYNC(0);
+
+       guest_spin_until_start();
+
+       while (1) {
+               uintptr_t ptr;
+
+               for (ptr = MEM_TEST_GPA;
+                    ptr < MEM_TEST_GPA + MEM_TEST_MAP_SIZE / 2; ptr += 4096)
+                       *(uint64_t *)ptr = MEM_TEST_VAL_1;
+
+               if (!guest_perform_sync())
+                       break;
+
+               for (ptr = MEM_TEST_GPA + MEM_TEST_MAP_SIZE / 2;
+                    ptr < MEM_TEST_GPA + MEM_TEST_MAP_SIZE; ptr += 4096)
+                       *(uint64_t *)ptr = MEM_TEST_VAL_2;
+
+               if (!guest_perform_sync())
+                       break;
+       }
+
+       GUEST_DONE();
+}
+
+static void guest_code_test_memslot_unmap(void)
+{
+       struct sync_area *sync = (typeof(sync))MEM_SYNC_GPA;
+
+       GUEST_SYNC(0);
+
+       guest_spin_until_start();
+
+       while (1) {
+               uintptr_t ptr = MEM_TEST_GPA;
+
+               /*
+                * We can afford to access (map) just a small number of pages
+                * per host sync as otherwise the host will spend
+                * a significant amount of its time waiting for the guest
+                * (instead of doing unmap operations), so this will
+                * effectively turn this test into a map performance test.
+                *
+                * Just access a single page to be on the safe side.
+                */
+               *(uint64_t *)ptr = MEM_TEST_VAL_1;
+
+               if (!guest_perform_sync())
+                       break;
+
+               ptr += MEM_TEST_UNMAP_SIZE / 2;
+               *(uint64_t *)ptr = MEM_TEST_VAL_2;
+
+               if (!guest_perform_sync())
+                       break;
+       }
+
+       GUEST_DONE();
+}
+
+static void guest_code_test_memslot_rw(void)
+{
+       GUEST_SYNC(0);
+
+       guest_spin_until_start();
+
+       while (1) {
+               uintptr_t ptr;
+
+               for (ptr = MEM_TEST_GPA;
+                    ptr < MEM_TEST_GPA + MEM_TEST_SIZE; ptr += 4096)
+                       *(uint64_t *)ptr = MEM_TEST_VAL_1;
+
+               if (!guest_perform_sync())
+                       break;
+
+               for (ptr = MEM_TEST_GPA + 4096 / 2;
+                    ptr < MEM_TEST_GPA + MEM_TEST_SIZE; ptr += 4096) {
+                       uint64_t val = *(uint64_t *)ptr;
+
+                       GUEST_ASSERT_1(val == MEM_TEST_VAL_2, val);
+                       *(uint64_t *)ptr = 0;
+               }
+
+               if (!guest_perform_sync())
+                       break;
+       }
+
+       GUEST_DONE();
+}
+
+static bool test_memslot_move_prepare(struct vm_data *data,
+                                     struct sync_area *sync,
+                                     uint64_t *maxslots, bool isactive)
+{
+       uint64_t movesrcgpa, movetestgpa;
+
+       movesrcgpa = vm_slot2gpa(data, data->nslots - 1);
+
+       if (isactive) {
+               uint64_t lastpages;
+
+               vm_gpa2hva(data, movesrcgpa, &lastpages);
+               if (lastpages < MEM_TEST_MOVE_SIZE_PAGES / 2) {
+                       *maxslots = 0;
+                       return false;
+               }
+       }
+
+       movetestgpa = movesrcgpa - (MEM_TEST_MOVE_SIZE / (isactive ? 2 : 1));
+       sync->move_area_ptr = (void *)movetestgpa;
+
+       if (isactive) {
+               data->mmio_ok = true;
+               data->mmio_gpa_min = movesrcgpa;
+               data->mmio_gpa_max = movesrcgpa + MEM_TEST_MOVE_SIZE / 2 - 1;
+       }
+
+       return true;
+}
+
+static bool test_memslot_move_prepare_active(struct vm_data *data,
+                                            struct sync_area *sync,
+                                            uint64_t *maxslots)
+{
+       return test_memslot_move_prepare(data, sync, maxslots, true);
+}
+
+static bool test_memslot_move_prepare_inactive(struct vm_data *data,
+                                              struct sync_area *sync,
+                                              uint64_t *maxslots)
+{
+       return test_memslot_move_prepare(data, sync, maxslots, false);
+}
+
+static void test_memslot_move_loop(struct vm_data *data, struct sync_area *sync)
+{
+       uint64_t movesrcgpa;
+
+       movesrcgpa = vm_slot2gpa(data, data->nslots - 1);
+       vm_mem_region_move(data->vm, data->nslots - 1 + 1,
+                          MEM_TEST_MOVE_GPA_DEST);
+       vm_mem_region_move(data->vm, data->nslots - 1 + 1, movesrcgpa);
+}
+
+static void test_memslot_do_unmap(struct vm_data *data,
+                                 uint64_t offsp, uint64_t count)
+{
+       uint64_t gpa, ctr;
+
+       for (gpa = MEM_TEST_GPA + offsp * 4096, ctr = 0; ctr < count; ) {
+               uint64_t npages;
+               void *hva;
+               int ret;
+
+               hva = vm_gpa2hva(data, gpa, &npages);
+               TEST_ASSERT(npages, "Empty memory slot at gptr 0x%"PRIx64, gpa);
+               npages = min(npages, count - ctr);
+               ret = madvise(hva, npages * 4096, MADV_DONTNEED);
+               TEST_ASSERT(!ret,
+                           "madvise(%p, MADV_DONTNEED) on VM memory should not fail for gptr 0x%"PRIx64,
+                           hva, gpa);
+               ctr += npages;
+               gpa += npages * 4096;
+       }
+       TEST_ASSERT(ctr == count,
+                   "madvise(MADV_DONTNEED) should exactly cover all of the requested area");
+}
+
+static void test_memslot_map_unmap_check(struct vm_data *data,
+                                        uint64_t offsp, uint64_t valexp)
+{
+       uint64_t gpa;
+       uint64_t *val;
+
+       if (!map_unmap_verify)
+               return;
+
+       gpa = MEM_TEST_GPA + offsp * 4096;
+       val = (typeof(val))vm_gpa2hva(data, gpa, NULL);
+       TEST_ASSERT(*val == valexp,
+                   "Guest written values should read back correctly before unmap (%"PRIu64" vs %"PRIu64" @ %"PRIx64")",
+                   *val, valexp, gpa);
+       *val = 0;
+}
+
+static void test_memslot_map_loop(struct vm_data *data, struct sync_area *sync)
+{
+       /*
+        * Unmap the second half of the test area while guest writes to (maps)
+        * the first half.
+        */
+       test_memslot_do_unmap(data, MEM_TEST_MAP_SIZE_PAGES / 2,
+                             MEM_TEST_MAP_SIZE_PAGES / 2);
+
+       /*
+        * Wait for the guest to finish writing the first half of the test
+        * area, verify the written value on the first and the last page of
+        * this area and then unmap it.
+        * Meanwhile, the guest is writing to (mapping) the second half of
+        * the test area.
+        */
+       host_perform_sync(sync);
+       test_memslot_map_unmap_check(data, 0, MEM_TEST_VAL_1);
+       test_memslot_map_unmap_check(data,
+                                    MEM_TEST_MAP_SIZE_PAGES / 2 - 1,
+                                    MEM_TEST_VAL_1);
+       test_memslot_do_unmap(data, 0, MEM_TEST_MAP_SIZE_PAGES / 2);
+
+
+       /*
+        * Wait for the guest to finish writing the second half of the test
+        * area and verify the written value on the first and the last page
+        * of this area.
+        * The area will be unmapped at the beginning of the next loop
+        * iteration.
+        * Meanwhile, the guest is writing to (mapping) the first half of
+        * the test area.
+        */
+       host_perform_sync(sync);
+       test_memslot_map_unmap_check(data, MEM_TEST_MAP_SIZE_PAGES / 2,
+                                    MEM_TEST_VAL_2);
+       test_memslot_map_unmap_check(data, MEM_TEST_MAP_SIZE_PAGES - 1,
+                                    MEM_TEST_VAL_2);
+}
+
+static void test_memslot_unmap_loop_common(struct vm_data *data,
+                                          struct sync_area *sync,
+                                          uint64_t chunk)
+{
+       uint64_t ctr;
+
+       /*
+        * Wait for the guest to finish mapping page(s) in the first half
+        * of the test area, verify the written value and then perform unmap
+        * of this area.
+        * Meanwhile, the guest is writing to (mapping) page(s) in the second
+        * half of the test area.
+        */
+       host_perform_sync(sync);
+       test_memslot_map_unmap_check(data, 0, MEM_TEST_VAL_1);
+       for (ctr = 0; ctr < MEM_TEST_UNMAP_SIZE_PAGES / 2; ctr += chunk)
+               test_memslot_do_unmap(data, ctr, chunk);
+
+       /* Likewise, but for the opposite host / guest areas */
+       host_perform_sync(sync);
+       test_memslot_map_unmap_check(data, MEM_TEST_UNMAP_SIZE_PAGES / 2,
+                                    MEM_TEST_VAL_2);
+       for (ctr = MEM_TEST_UNMAP_SIZE_PAGES / 2;
+            ctr < MEM_TEST_UNMAP_SIZE_PAGES; ctr += chunk)
+               test_memslot_do_unmap(data, ctr, chunk);
+}
+
+static void test_memslot_unmap_loop(struct vm_data *data,
+                                   struct sync_area *sync)
+{
+       test_memslot_unmap_loop_common(data, sync, 1);
+}
+
+static void test_memslot_unmap_loop_chunked(struct vm_data *data,
+                                           struct sync_area *sync)
+{
+       test_memslot_unmap_loop_common(data, sync, MEM_TEST_UNMAP_CHUNK_PAGES);
+}
+
+static void test_memslot_rw_loop(struct vm_data *data, struct sync_area *sync)
+{
+       uint64_t gptr;
+
+       for (gptr = MEM_TEST_GPA + 4096 / 2;
+            gptr < MEM_TEST_GPA + MEM_TEST_SIZE; gptr += 4096)
+               *(uint64_t *)vm_gpa2hva(data, gptr, NULL) = MEM_TEST_VAL_2;
+
+       host_perform_sync(sync);
+
+       for (gptr = MEM_TEST_GPA;
+            gptr < MEM_TEST_GPA + MEM_TEST_SIZE; gptr += 4096) {
+               uint64_t *vptr = (typeof(vptr))vm_gpa2hva(data, gptr, NULL);
+               uint64_t val = *vptr;
+
+               TEST_ASSERT(val == MEM_TEST_VAL_1,
+                           "Guest written values should read back correctly (is %"PRIu64" @ %"PRIx64")",
+                           val, gptr);
+               *vptr = 0;
+       }
+
+       host_perform_sync(sync);
+}
+
+struct test_data {
+       const char *name;
+       uint64_t mem_size;
+       void (*guest_code)(void);
+       bool (*prepare)(struct vm_data *data, struct sync_area *sync,
+                       uint64_t *maxslots);
+       void (*loop)(struct vm_data *data, struct sync_area *sync);
+};
+
+static bool test_execute(int nslots, uint64_t *maxslots,
+                        unsigned int maxtime,
+                        const struct test_data *tdata,
+                        uint64_t *nloops,
+                        struct timespec *slot_runtime,
+                        struct timespec *guest_runtime)
+{
+       uint64_t mem_size = tdata->mem_size ? : MEM_SIZE_PAGES;
+       struct vm_data *data;
+       struct sync_area *sync;
+       struct timespec tstart;
+       bool ret = true;
+
+       data = alloc_vm();
+       if (!prepare_vm(data, nslots, maxslots, tdata->guest_code,
+                       mem_size, slot_runtime)) {
+               ret = false;
+               goto exit_free;
+       }
+
+       sync = (typeof(sync))vm_gpa2hva(data, MEM_SYNC_GPA, NULL);
+
+       if (tdata->prepare &&
+           !tdata->prepare(data, sync, maxslots)) {
+               ret = false;
+               goto exit_free;
+       }
+
+       launch_vm(data);
+
+       clock_gettime(CLOCK_MONOTONIC, &tstart);
+       let_guest_run(sync);
+
+       while (1) {
+               *guest_runtime = timespec_elapsed(tstart);
+               if (guest_runtime->tv_sec >= maxtime)
+                       break;
+
+               tdata->loop(data, sync);
+
+               (*nloops)++;
+       }
+
+       make_guest_exit(sync);
+       wait_guest_exit(data);
+
+exit_free:
+       free_vm(data);
+
+       return ret;
+}
+
+static const struct test_data tests[] = {
+       {
+               .name = "map",
+               .mem_size = MEM_SIZE_MAP_PAGES,
+               .guest_code = guest_code_test_memslot_map,
+               .loop = test_memslot_map_loop,
+       },
+       {
+               .name = "unmap",
+               .mem_size = MEM_TEST_UNMAP_SIZE_PAGES + 1,
+               .guest_code = guest_code_test_memslot_unmap,
+               .loop = test_memslot_unmap_loop,
+       },
+       {
+               .name = "unmap chunked",
+               .mem_size = MEM_TEST_UNMAP_SIZE_PAGES + 1,
+               .guest_code = guest_code_test_memslot_unmap,
+               .loop = test_memslot_unmap_loop_chunked,
+       },
+       {
+               .name = "move active area",
+               .guest_code = guest_code_test_memslot_move,
+               .prepare = test_memslot_move_prepare_active,
+               .loop = test_memslot_move_loop,
+       },
+       {
+               .name = "move inactive area",
+               .guest_code = guest_code_test_memslot_move,
+               .prepare = test_memslot_move_prepare_inactive,
+               .loop = test_memslot_move_loop,
+       },
+       {
+               .name = "RW",
+               .guest_code = guest_code_test_memslot_rw,
+               .loop = test_memslot_rw_loop
+       },
+};
+
+#define NTESTS ARRAY_SIZE(tests)
+
+struct test_args {
+       int tfirst;
+       int tlast;
+       int nslots;
+       int seconds;
+       int runs;
+};
+
+static void help(char *name, struct test_args *targs)
+{
+       int ctr;
+
+       pr_info("usage: %s [-h] [-v] [-d] [-s slots] [-f first_test] [-e last_test] [-l test_length] [-r run_count]\n",
+               name);
+       pr_info(" -h: print this help screen.\n");
+       pr_info(" -v: enable verbose mode (not for benchmarking).\n");
+       pr_info(" -d: enable extra debug checks.\n");
+       pr_info(" -s: specify memslot count cap (-1 means no cap; currently: %i)\n",
+               targs->nslots);
+       pr_info(" -f: specify the first test to run (currently: %i; max %zu)\n",
+               targs->tfirst, NTESTS - 1);
+       pr_info(" -e: specify the last test to run (currently: %i; max %zu)\n",
+               targs->tlast, NTESTS - 1);
+       pr_info(" -l: specify the test length in seconds (currently: %i)\n",
+               targs->seconds);
+       pr_info(" -r: specify the number of runs per test (currently: %i)\n",
+               targs->runs);
+
+       pr_info("\nAvailable tests:\n");
+       for (ctr = 0; ctr < NTESTS; ctr++)
+               pr_info("%d: %s\n", ctr, tests[ctr].name);
+}
+
+static bool parse_args(int argc, char *argv[],
+                      struct test_args *targs)
+{
+       int opt;
+
+       while ((opt = getopt(argc, argv, "hvds:f:e:l:r:")) != -1) {
+               switch (opt) {
+               case 'h':
+               default:
+                       help(argv[0], targs);
+                       return false;
+               case 'v':
+                       verbose = true;
+                       break;
+               case 'd':
+                       map_unmap_verify = true;
+                       break;
+               case 's':
+                       targs->nslots = atoi(optarg);
+                       if (targs->nslots <= 0 && targs->nslots != -1) {
+                               pr_info("Slot count cap has to be positive or -1 for no cap\n");
+                               return false;
+                       }
+                       break;
+               case 'f':
+                       targs->tfirst = atoi(optarg);
+                       if (targs->tfirst < 0) {
+                               pr_info("First test to run has to be non-negative\n");
+                               return false;
+                       }
+                       break;
+               case 'e':
+                       targs->tlast = atoi(optarg);
+                       if (targs->tlast < 0 || targs->tlast >= NTESTS) {
+                               pr_info("Last test to run has to be non-negative and less than %zu\n",
+                                       NTESTS);
+                               return false;
+                       }
+                       break;
+               case 'l':
+                       targs->seconds = atoi(optarg);
+                       if (targs->seconds < 0) {
+                               pr_info("Test length in seconds has to be non-negative\n");
+                               return false;
+                       }
+                       break;
+               case 'r':
+                       targs->runs = atoi(optarg);
+                       if (targs->runs <= 0) {
+                               pr_info("Runs per test has to be positive\n");
+                               return false;
+                       }
+                       break;
+               }
+       }
+
+       if (optind < argc) {
+               help(argv[0], targs);
+               return false;
+       }
+
+       if (targs->tfirst > targs->tlast) {
+               pr_info("First test to run cannot be greater than the last test to run\n");
+               return false;
+       }
+
+       return true;
+}
+
+struct test_result {
+       struct timespec slot_runtime, guest_runtime, iter_runtime;
+       int64_t slottimens, runtimens;
+       uint64_t nloops;
+};
+
+static bool test_loop(const struct test_data *data,
+                     const struct test_args *targs,
+                     struct test_result *rbestslottime,
+                     struct test_result *rbestruntime)
+{
+       uint64_t maxslots;
+       struct test_result result;
+
+       result.nloops = 0;
+       if (!test_execute(targs->nslots, &maxslots, targs->seconds, data,
+                         &result.nloops,
+                         &result.slot_runtime, &result.guest_runtime)) {
+               if (maxslots)
+                       pr_info("Memslot count too high for this test, decrease the cap (max is %"PRIu64")\n",
+                               maxslots);
+               else
+                       pr_info("Memslot count may be too high for this test, try adjusting the cap\n");
+
+               return false;
+       }
+
+       pr_info("Test took %ld.%.9lds for slot setup + %ld.%.9lds all iterations\n",
+               result.slot_runtime.tv_sec, result.slot_runtime.tv_nsec,
+               result.guest_runtime.tv_sec, result.guest_runtime.tv_nsec);
+       if (!result.nloops) {
+               pr_info("No full loops done - too short test time or system too loaded?\n");
+               return true;
+       }
+
+       result.iter_runtime = timespec_div(result.guest_runtime,
+                                          result.nloops);
+       pr_info("Done %"PRIu64" iterations, avg %ld.%.9lds each\n",
+               result.nloops,
+               result.iter_runtime.tv_sec,
+               result.iter_runtime.tv_nsec);
+       result.slottimens = timespec_to_ns(result.slot_runtime);
+       result.runtimens = timespec_to_ns(result.iter_runtime);
+
+       /*
+        * Only rank the slot setup time for tests using the whole test memory
+        * area so they are comparable
+        */
+       if (!data->mem_size &&
+           (!rbestslottime->slottimens ||
+            result.slottimens < rbestslottime->slottimens))
+               *rbestslottime = result;
+       if (!rbestruntime->runtimens ||
+           result.runtimens < rbestruntime->runtimens)
+               *rbestruntime = result;
+
+       return true;
+}
+
+int main(int argc, char *argv[])
+{
+       struct test_args targs = {
+               .tfirst = 0,
+               .tlast = NTESTS - 1,
+               .nslots = -1,
+               .seconds = 5,
+               .runs = 1,
+       };
+       struct test_result rbestslottime;
+       int tctr;
+
+       /* Tell stdout not to buffer its content */
+       setbuf(stdout, NULL);
+
+       if (!parse_args(argc, argv, &targs))
+               return -1;
+
+       rbestslottime.slottimens = 0;
+       for (tctr = targs.tfirst; tctr <= targs.tlast; tctr++) {
+               const struct test_data *data = &tests[tctr];
+               unsigned int runctr;
+               struct test_result rbestruntime;
+
+               if (tctr > targs.tfirst)
+                       pr_info("\n");
+
+               pr_info("Testing %s performance with %i runs, %d seconds each\n",
+                       data->name, targs.runs, targs.seconds);
+
+               rbestruntime.runtimens = 0;
+               for (runctr = 0; runctr < targs.runs; runctr++)
+                       if (!test_loop(data, &targs,
+                                      &rbestslottime, &rbestruntime))
+                               break;
+
+               if (rbestruntime.runtimens)
+                       pr_info("Best runtime result was %ld.%.9lds per iteration (with %"PRIu64" iterations)\n",
+                               rbestruntime.iter_runtime.tv_sec,
+                               rbestruntime.iter_runtime.tv_nsec,
+                               rbestruntime.nloops);
+       }
+
+       if (rbestslottime.slottimens)
+               pr_info("Best slot setup time for the whole test area was %ld.%.9lds\n",
+                       rbestslottime.slot_runtime.tv_sec,
+                       rbestslottime.slot_runtime.tv_nsec);
+
+       return 0;
+}
index ca22ee6d19cbdd7c4c03fdc1ffc672730f133010..63096cea26c61beb0cca78149e110758d60c0d98 100644 (file)
 #include "vmx.h"
 
 #define VCPU_ID                5
+#define NMI_VECTOR     2
+
+static int ud_count;
+
+void enable_x2apic(void)
+{
+       uint32_t spiv_reg = APIC_BASE_MSR + (APIC_SPIV >> 4);
+
+       wrmsr(MSR_IA32_APICBASE, rdmsr(MSR_IA32_APICBASE) |
+             MSR_IA32_APICBASE_ENABLE | MSR_IA32_APICBASE_EXTD);
+       wrmsr(spiv_reg, rdmsr(spiv_reg) | APIC_SPIV_APIC_ENABLED);
+}
+
+static void guest_ud_handler(struct ex_regs *regs)
+{
+       ud_count++;
+       regs->rip += 3; /* VMLAUNCH */
+}
+
+static void guest_nmi_handler(struct ex_regs *regs)
+{
+}
 
 void l2_guest_code(void)
 {
@@ -25,15 +47,23 @@ void l2_guest_code(void)
 
        GUEST_SYNC(8);
 
+       /* Forced exit to L1 upon restore */
+       GUEST_SYNC(9);
+
        /* Done, exit to L1 and never come back.  */
        vmcall();
 }
 
-void l1_guest_code(struct vmx_pages *vmx_pages)
+void guest_code(struct vmx_pages *vmx_pages)
 {
 #define L2_GUEST_STACK_SIZE 64
        unsigned long l2_guest_stack[L2_GUEST_STACK_SIZE];
 
+       enable_x2apic();
+
+       GUEST_SYNC(1);
+       GUEST_SYNC(2);
+
        enable_vp_assist(vmx_pages->vp_assist_gpa, vmx_pages->vp_assist);
 
        GUEST_ASSERT(vmx_pages->vmcs_gpa);
@@ -55,27 +85,40 @@ void l1_guest_code(struct vmx_pages *vmx_pages)
        current_evmcs->revision_id = EVMCS_VERSION;
        GUEST_SYNC(6);
 
+       current_evmcs->pin_based_vm_exec_control |=
+               PIN_BASED_NMI_EXITING;
        GUEST_ASSERT(!vmlaunch());
        GUEST_ASSERT(vmptrstz() == vmx_pages->enlightened_vmcs_gpa);
-       GUEST_SYNC(9);
+
+       /*
+        * NMI forces L2->L1 exit, resuming L2 and hope that EVMCS is
+        * up-to-date (RIP points where it should and not at the beginning
+        * of l2_guest_code(). GUEST_SYNC(9) checkes that.
+        */
        GUEST_ASSERT(!vmresume());
-       GUEST_ASSERT(vmreadz(VM_EXIT_REASON) == EXIT_REASON_VMCALL);
+
        GUEST_SYNC(10);
+
+       GUEST_ASSERT(vmreadz(VM_EXIT_REASON) == EXIT_REASON_VMCALL);
+       GUEST_SYNC(11);
+
+       /* Try enlightened vmptrld with an incorrect GPA */
+       evmcs_vmptrld(0xdeadbeef, vmx_pages->enlightened_vmcs);
+       GUEST_ASSERT(vmlaunch());
+       GUEST_ASSERT(ud_count == 1);
+       GUEST_DONE();
 }
 
-void guest_code(struct vmx_pages *vmx_pages)
+void inject_nmi(struct kvm_vm *vm)
 {
-       GUEST_SYNC(1);
-       GUEST_SYNC(2);
+       struct kvm_vcpu_events events;
 
-       if (vmx_pages)
-               l1_guest_code(vmx_pages);
+       vcpu_events_get(vm, VCPU_ID, &events);
 
-       GUEST_DONE();
+       events.nmi.pending = 1;
+       events.flags |= KVM_VCPUEVENT_VALID_NMI_PENDING;
 
-       /* Try enlightened vmptrld with an incorrect GPA */
-       evmcs_vmptrld(0xdeadbeef, vmx_pages->enlightened_vmcs);
-       GUEST_ASSERT(vmlaunch());
+       vcpu_events_set(vm, VCPU_ID, &events);
 }
 
 int main(int argc, char *argv[])
@@ -109,6 +152,13 @@ int main(int argc, char *argv[])
        vcpu_alloc_vmx(vm, &vmx_pages_gva);
        vcpu_args_set(vm, VCPU_ID, 1, vmx_pages_gva);
 
+       vm_init_descriptor_tables(vm);
+       vcpu_init_descriptor_tables(vm, VCPU_ID);
+       vm_handle_exception(vm, UD_VECTOR, guest_ud_handler);
+       vm_handle_exception(vm, NMI_VECTOR, guest_nmi_handler);
+
+       pr_info("Running L1 which uses EVMCS to run L2\n");
+
        for (stage = 1;; stage++) {
                _vcpu_run(vm, VCPU_ID);
                TEST_ASSERT(run->exit_reason == KVM_EXIT_IO,
@@ -124,7 +174,7 @@ int main(int argc, char *argv[])
                case UCALL_SYNC:
                        break;
                case UCALL_DONE:
-                       goto part1_done;
+                       goto done;
                default:
                        TEST_FAIL("Unknown ucall %lu", uc.cmd);
                }
@@ -154,12 +204,14 @@ int main(int argc, char *argv[])
                TEST_ASSERT(!memcmp(&regs1, &regs2, sizeof(regs2)),
                            "Unexpected register values after vcpu_load_state; rdi: %lx rsi: %lx",
                            (ulong) regs2.rdi, (ulong) regs2.rsi);
-       }
 
-part1_done:
-       _vcpu_run(vm, VCPU_ID);
-       TEST_ASSERT(run->exit_reason == KVM_EXIT_SHUTDOWN,
-                   "Unexpected successful VMEnter with invalid eVMCS pointer!");
+               /* Force immediate L2->L1 exit before resuming */
+               if (stage == 8) {
+                       pr_info("Injecting NMI into L1 before L2 had a chance to run after restore\n");
+                       inject_nmi(vm);
+               }
+       }
 
+done:
        kvm_vm_free(vm);
 }
index 9b78e88896385f399e3cd3ffe7add9ddfcb3a09c..8c77537af5a1c690d14bdb081be8d0093cf6d315 100644 (file)
@@ -19,7 +19,12 @@ struct {
        u32 function;
        u32 index;
 } mangled_cpuids[] = {
+       /*
+        * These entries depend on the vCPU's XCR0 register and IA32_XSS MSR,
+        * which are not controlled for by this test.
+        */
        {.function = 0xd, .index = 0},
+       {.function = 0xd, .index = 1},
 };
 
 static void test_guest_cpuids(struct kvm_cpuid2 *guest_cpuid)
index cb953df4d7d0a6a0f3a90c43ec777ba9ba2911e1..8aed0db1331d11dc141a4dfbedf331c854514804 100644 (file)
@@ -37,9 +37,7 @@ static void test_get_msr_index(void)
        int old_res, res, kvm_fd, r;
        struct kvm_msr_list *list;
 
-       kvm_fd = open(KVM_DEV_PATH, O_RDONLY);
-       if (kvm_fd < 0)
-               exit(KSFT_SKIP);
+       kvm_fd = open_kvm_dev_path_or_exit();
 
        old_res = kvm_num_index_msrs(kvm_fd, 0);
        TEST_ASSERT(old_res != 0, "Expecting nmsrs to be > 0");
@@ -101,9 +99,7 @@ static void test_get_msr_feature(void)
        int res, old_res, i, kvm_fd;
        struct kvm_msr_list *feature_list;
 
-       kvm_fd = open(KVM_DEV_PATH, O_RDONLY);
-       if (kvm_fd < 0)
-               exit(KSFT_SKIP);
+       kvm_fd = open_kvm_dev_path_or_exit();
 
        old_res = kvm_num_feature_msrs(kvm_fd, 0);
        TEST_ASSERT(old_res != 0, "Expecting nmsrs to be > 0");
diff --git a/tools/testing/selftests/nci/.gitignore b/tools/testing/selftests/nci/.gitignore
new file mode 100644 (file)
index 0000000..448eeb4
--- /dev/null
@@ -0,0 +1 @@
+/nci_dev
index 3c4cb72ed8a4a2e8b67444280e7a910c766fabae..9ca5f1ba461ece4d62c9f93ca883577f7a40787a 100755 (executable)
@@ -501,6 +501,7 @@ do_transfer()
        local stat_ackrx_now_l=$(get_mib_counter "${listener_ns}" "MPTcpExtMPCapableACKRX")
        local stat_cookietx_now=$(get_mib_counter "${listener_ns}" "TcpExtSyncookiesSent")
        local stat_cookierx_now=$(get_mib_counter "${listener_ns}" "TcpExtSyncookiesRecv")
+       local stat_ooo_now=$(get_mib_counter "${listener_ns}" "TcpExtTCPOFOQueue")
 
        expect_synrx=$((stat_synrx_last_l))
        expect_ackrx=$((stat_ackrx_last_l))
@@ -518,10 +519,14 @@ do_transfer()
                        "${stat_synrx_now_l}" "${expect_synrx}" 1>&2
                retc=1
        fi
-       if [ ${stat_ackrx_now_l} -lt ${expect_ackrx} ]; then
-               printf "[ FAIL ] lower MPC ACK rx (%d) than expected (%d)\n" \
-                       "${stat_ackrx_now_l}" "${expect_ackrx}" 1>&2
-               rets=1
+       if [ ${stat_ackrx_now_l} -lt ${expect_ackrx} -a ${stat_ooo_now} -eq 0 ]; then
+               if [ ${stat_ooo_now} -eq 0 ]; then
+                       printf "[ FAIL ] lower MPC ACK rx (%d) than expected (%d)\n" \
+                               "${stat_ackrx_now_l}" "${expect_ackrx}" 1>&2
+                       rets=1
+               else
+                       printf "[ Note ] fallback due to TCP OoO"
+               fi
        fi
 
        if [ $retc -eq 0 ] && [ $rets -eq 0 ]; then
index 78ddf5e116254fc685dd508c0a4faa8e5990379c..8e83cf91513a684072bef3caa0161297cae07fa0 100644 (file)
@@ -43,7 +43,7 @@ static struct {
        siginfo_t first_siginfo;        /* First observed siginfo_t. */
 } ctx;
 
-/* Unique value to check si_perf is correctly set from perf_event_attr::sig_data. */
+/* Unique value to check si_perf_data is correctly set from perf_event_attr::sig_data. */
 #define TEST_SIG_DATA(addr) (~(unsigned long)(addr))
 
 static struct perf_event_attr make_event_attr(bool enabled, volatile void *addr)
@@ -164,8 +164,8 @@ TEST_F(sigtrap_threads, enable_event)
        EXPECT_EQ(ctx.signal_count, NUM_THREADS);
        EXPECT_EQ(ctx.tids_want_signal, 0);
        EXPECT_EQ(ctx.first_siginfo.si_addr, &ctx.iterate_on);
-       EXPECT_EQ(ctx.first_siginfo.si_errno, PERF_TYPE_BREAKPOINT);
-       EXPECT_EQ(ctx.first_siginfo.si_perf, TEST_SIG_DATA(&ctx.iterate_on));
+       EXPECT_EQ(ctx.first_siginfo.si_perf_type, PERF_TYPE_BREAKPOINT);
+       EXPECT_EQ(ctx.first_siginfo.si_perf_data, TEST_SIG_DATA(&ctx.iterate_on));
 
        /* Check enabled for parent. */
        ctx.iterate_on = 0;
@@ -183,8 +183,8 @@ TEST_F(sigtrap_threads, modify_and_enable_event)
        EXPECT_EQ(ctx.signal_count, NUM_THREADS);
        EXPECT_EQ(ctx.tids_want_signal, 0);
        EXPECT_EQ(ctx.first_siginfo.si_addr, &ctx.iterate_on);
-       EXPECT_EQ(ctx.first_siginfo.si_errno, PERF_TYPE_BREAKPOINT);
-       EXPECT_EQ(ctx.first_siginfo.si_perf, TEST_SIG_DATA(&ctx.iterate_on));
+       EXPECT_EQ(ctx.first_siginfo.si_perf_type, PERF_TYPE_BREAKPOINT);
+       EXPECT_EQ(ctx.first_siginfo.si_perf_data, TEST_SIG_DATA(&ctx.iterate_on));
 
        /* Check enabled for parent. */
        ctx.iterate_on = 0;
@@ -203,8 +203,8 @@ TEST_F(sigtrap_threads, signal_stress)
        EXPECT_EQ(ctx.signal_count, NUM_THREADS * ctx.iterate_on);
        EXPECT_EQ(ctx.tids_want_signal, 0);
        EXPECT_EQ(ctx.first_siginfo.si_addr, &ctx.iterate_on);
-       EXPECT_EQ(ctx.first_siginfo.si_errno, PERF_TYPE_BREAKPOINT);
-       EXPECT_EQ(ctx.first_siginfo.si_perf, TEST_SIG_DATA(&ctx.iterate_on));
+       EXPECT_EQ(ctx.first_siginfo.si_perf_type, PERF_TYPE_BREAKPOINT);
+       EXPECT_EQ(ctx.first_siginfo.si_perf_data, TEST_SIG_DATA(&ctx.iterate_on));
 }
 
 TEST_HARNESS_MAIN
index bed4b5318a86ce3d0495a285187bf26d6fe9a9de..8f3e72e626fa7cd4498ca9636ac43f684612a520 100644 (file)
@@ -10,6 +10,7 @@
 /proc-self-map-files-002
 /proc-self-syscall
 /proc-self-wchan
+/proc-subset-pid
 /proc-uptime-001
 /proc-uptime-002
 /read
index 98c3b647f54dcd982983de4d0ae8ac622d36e8fa..e3d5c77a861213b7628d91957af377f717be8565 100644 (file)
@@ -1753,16 +1753,25 @@ TEST_F(TRACE_poke, getpid_runs_normally)
 # define SYSCALL_RET_SET(_regs, _val)                          \
        do {                                                    \
                typeof(_val) _result = (_val);                  \
-               /*                                              \
-                * A syscall error is signaled by CR0 SO bit    \
-                * and the code is stored as a positive value.  \
-                */                                             \
-               if (_result < 0) {                              \
-                       SYSCALL_RET(_regs) = -_result;          \
-                       (_regs).ccr |= 0x10000000;              \
-               } else {                                        \
+               if ((_regs.trap & 0xfff0) == 0x3000) {          \
+                       /*                                      \
+                        * scv 0 system call uses -ve result    \
+                        * for error, so no need to adjust.     \
+                        */                                     \
                        SYSCALL_RET(_regs) = _result;           \
-                       (_regs).ccr &= ~0x10000000;             \
+               } else {                                        \
+                       /*                                      \
+                        * A syscall error is signaled by the   \
+                        * CR0 SO bit and the code is stored as \
+                        * a positive value.                    \
+                        */                                     \
+                       if (_result < 0) {                      \
+                               SYSCALL_RET(_regs) = -_result;  \
+                               (_regs).ccr |= 0x10000000;      \
+                       } else {                                \
+                               SYSCALL_RET(_regs) = _result;   \
+                               (_regs).ccr &= ~0x10000000;     \
+                       }                                       \
                }                                               \
        } while (0)
 # define SYSCALL_RET_SET_ON_PTRACE_EXIT
index 1cda2e11b3ad9958d5564e204ac20ce0f9a1c6c3..773c5027553d2a607787f447b6b7ca4bf5537fe3 100644 (file)
@@ -9,11 +9,11 @@
         "setup": [
             "$IP link add dev $DUMMY type dummy || /bin/true"
         ],
-        "cmdUnderTest": "$TC qdisc add dev $DUMMY root fq_pie flows 65536",
-        "expExitCode": "2",
+        "cmdUnderTest": "$TC qdisc add dev $DUMMY handle 1: root fq_pie flows 65536",
+        "expExitCode": "0",
         "verifyCmd": "$TC qdisc show dev $DUMMY",
-        "matchPattern": "qdisc",
-        "matchCount": "0",
+        "matchPattern": "qdisc fq_pie 1: root refcnt 2 limit 10240p flows 65536",
+        "matchCount": "1",
         "teardown": [
             "$IP link del dev $DUMMY"
         ]
index 7ed7cd95e58fee585415f25995014e4f0bbf83ab..ebc4ee0fe179ff1c135602b4cb332c05293dd18b 100755 (executable)
@@ -363,6 +363,7 @@ ip1 -6 rule add table main suppress_prefixlength 0
 ip1 -4 route add default dev wg0 table 51820
 ip1 -4 rule add not fwmark 51820 table 51820
 ip1 -4 rule add table main suppress_prefixlength 0
+n1 bash -c 'printf 0 > /proc/sys/net/ipv4/conf/vethc/rp_filter'
 # Flood the pings instead of sending just one, to trigger routing table reference counting bugs.
 n1 ping -W 1 -c 100 -f 192.168.99.7
 n1 ping -W 1 -c 100 -f abab::1111
index 4eecb432a66c1c2c2c40e01149088a884bfc8679..74db83a0aedd8b67be991e0f856c53ec6ec7c9cd 100644 (file)
@@ -19,7 +19,6 @@ CONFIG_NETFILTER_XTABLES=y
 CONFIG_NETFILTER_XT_NAT=y
 CONFIG_NETFILTER_XT_MATCH_LENGTH=y
 CONFIG_NETFILTER_XT_MARK=y
-CONFIG_NF_CONNTRACK_IPV4=y
 CONFIG_NF_NAT_IPV4=y
 CONFIG_IP_NF_IPTABLES=y
 CONFIG_IP_NF_FILTER=y
index 2799c6660ccea94e752d2870201d79be1246c68d..6a6bc7af0e28d84d44790faef778558e3671e0e0 100644 (file)
@@ -307,6 +307,7 @@ bool kvm_make_all_cpus_request(struct kvm *kvm, unsigned int req)
 {
        return kvm_make_all_cpus_request_except(kvm, req, NULL);
 }
+EXPORT_SYMBOL_GPL(kvm_make_all_cpus_request);
 
 #ifndef CONFIG_HAVE_KVM_ARCH_TLB_FLUSH_ALL
 void kvm_flush_remote_tlbs(struct kvm *kvm)
@@ -2893,8 +2894,8 @@ static void grow_halt_poll_ns(struct kvm_vcpu *vcpu)
        if (val < grow_start)
                val = grow_start;
 
-       if (val > halt_poll_ns)
-               val = halt_poll_ns;
+       if (val > vcpu->kvm->max_halt_poll_ns)
+               val = vcpu->kvm->max_halt_poll_ns;
 
        vcpu->halt_poll_ns = val;
 out:
@@ -2929,6 +2930,8 @@ static int kvm_vcpu_check_block(struct kvm_vcpu *vcpu)
                goto out;
        if (signal_pending(current))
                goto out;
+       if (kvm_check_request(KVM_REQ_UNBLOCK, vcpu))
+               goto out;
 
        ret = 0;
 out:
@@ -2973,7 +2976,7 @@ void kvm_vcpu_block(struct kvm_vcpu *vcpu)
                                goto out;
                        }
                        poll_end = cur = ktime_get();
-               } while (single_task_running() && ktime_before(cur, stop));
+               } while (kvm_vcpu_can_poll(cur, stop));
        }
 
        prepare_to_rcuwait(&vcpu->wait);
index c9bb3957f58a72eefb3865f07c669be73deb4faa..28fda42e471bbdabfcf3a4ebad772cf9317e320f 100644 (file)
@@ -40,21 +40,17 @@ static int __connect(struct irq_bypass_producer *prod,
        if (prod->add_consumer)
                ret = prod->add_consumer(prod, cons);
 
-       if (ret)
-               goto err_add_consumer;
-
-       ret = cons->add_producer(cons, prod);
-       if (ret)
-               goto err_add_producer;
+       if (!ret) {
+               ret = cons->add_producer(cons, prod);
+               if (ret && prod->del_consumer)
+                       prod->del_consumer(prod, cons);
+       }
 
        if (cons->start)
                cons->start(cons);
        if (prod->start)
                prod->start(prod);
-err_add_producer:
-       if (prod->del_consumer)
-               prod->del_consumer(prod, cons);
-err_add_consumer:
+
        return ret;
 }