Merge tag 'x86_cleanups_for_v5.15' of git://git.kernel.org/pub/scm/linux/kernel/git...
authorLinus Torvalds <torvalds@linux-foundation.org>
Mon, 30 Aug 2021 20:35:36 +0000 (13:35 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Mon, 30 Aug 2021 20:35:36 +0000 (13:35 -0700)
Pull x86 cleanups from Borislav Petkov:
 "The usual round of minor cleanups and fixes"

* tag 'x86_cleanups_for_v5.15' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  x86/kaslr: Have process_mem_region() return a boolean
  x86/power: Fix kernel-doc warnings in cpu.c
  x86/mce/inject: Replace deprecated CPU-hotplug functions.
  x86/microcode: Replace deprecated CPU-hotplug functions.
  x86/mtrr: Replace deprecated CPU-hotplug functions.
  x86/mmiotrace: Replace deprecated CPU-hotplug functions.

1085 files changed:
Documentation/RCU/Design/Memory-Ordering/Tree-RCU-Memory-Ordering.rst
Documentation/RCU/Design/Requirements/Requirements.rst
Documentation/RCU/checklist.rst
Documentation/RCU/rcu_dereference.rst
Documentation/RCU/stallwarn.rst
Documentation/bpf/libbpf/libbpf_naming_convention.rst
Documentation/devicetree/bindings/fsi/ibm,fsi2spi.yaml
Documentation/devicetree/bindings/iio/st,st-sensors.yaml
Documentation/devicetree/bindings/power/supply/battery.yaml
Documentation/devicetree/bindings/power/supply/maxim,max17042.yaml
Documentation/devicetree/bindings/power/supply/mt6360_charger.yaml [new file with mode: 0644]
Documentation/devicetree/bindings/power/supply/summit,smb347-charger.yaml
Documentation/devicetree/bindings/power/supply/x-powers,axp20x-ac-power-supply.yaml
Documentation/devicetree/bindings/power/supply/x-powers,axp20x-battery-power-supply.yaml
Documentation/devicetree/bindings/power/supply/x-powers,axp20x-usb-power-supply.yaml
Documentation/devicetree/bindings/regulator/richtek,rtq2134-regulator.yaml [new file with mode: 0644]
Documentation/devicetree/bindings/regulator/richtek,rtq6752-regulator.yaml [new file with mode: 0644]
Documentation/devicetree/bindings/regulator/socionext,uniphier-regulator.yaml [new file with mode: 0644]
Documentation/devicetree/bindings/regulator/uniphier-regulator.txt [deleted file]
Documentation/devicetree/bindings/riscv/sifive-l2-cache.yaml
Documentation/devicetree/bindings/spi/omap-spi.txt [deleted file]
Documentation/devicetree/bindings/spi/omap-spi.yaml [new file with mode: 0644]
Documentation/devicetree/bindings/spi/rockchip-sfc.yaml [new file with mode: 0644]
Documentation/devicetree/bindings/spi/spi-mt65xx.txt
Documentation/devicetree/bindings/spi/spi-sprd-adi.txt [deleted file]
Documentation/devicetree/bindings/spi/sprd,spi-adi.yaml [new file with mode: 0644]
Documentation/filesystems/locking.rst
Documentation/filesystems/mandatory-locking.rst [deleted file]
Documentation/gpu/rfc/i915_gem_lmem.rst
Documentation/i2c/index.rst
Documentation/networking/nf_conntrack-sysctl.rst
Documentation/userspace-api/seccomp_filter.rst
Documentation/virt/kvm/locking.rst
MAINTAINERS
Makefile
arch/arc/Kconfig
arch/arc/include/asm/checksum.h
arch/arc/include/asm/perf_event.h
arch/arc/kernel/fpu.c
arch/arc/kernel/unwind.c
arch/arc/kernel/vmlinux.lds.S
arch/arm/configs/nhk8815_defconfig
arch/arm/crypto/curve25519-glue.c
arch/arm/include/asm/memory.h
arch/arm/kernel/head.S
arch/arm/mach-ixp4xx/include/mach/ixp4xx-regs.h
arch/arm/mm/mmu.c
arch/arm/mm/pv-fixup-asm.S
arch/arm64/Kconfig
arch/arm64/Makefile
arch/arm64/boot/dts/qcom/msm8992-bullhead-rev-101.dts
arch/arm64/boot/dts/qcom/msm8994-angler-rev-101.dts
arch/arm64/boot/dts/qcom/sc7280.dtsi
arch/arm64/boot/dts/qcom/sdm845-oneplus-common.dtsi
arch/arm64/boot/dts/qcom/sdm850-lenovo-yoga-c630.dts
arch/arm64/crypto/Kconfig
arch/arm64/crypto/sm4-ce-glue.c
arch/arm64/include/asm/el2_setup.h
arch/arm64/include/asm/page.h
arch/arm64/kvm/arm.c
arch/arm64/kvm/hyp/nvhe/mem_protect.c
arch/arm64/mm/init.c
arch/m68k/Kconfig.cpu
arch/m68k/configs/amiga_defconfig
arch/m68k/configs/apollo_defconfig
arch/m68k/configs/atari_defconfig
arch/m68k/configs/bvme6000_defconfig
arch/m68k/configs/hp300_defconfig
arch/m68k/configs/mac_defconfig
arch/m68k/configs/multi_defconfig
arch/m68k/configs/mvme147_defconfig
arch/m68k/configs/mvme16x_defconfig
arch/m68k/configs/q40_defconfig
arch/m68k/configs/sun3_defconfig
arch/m68k/configs/sun3x_defconfig
arch/m68k/emu/nfeth.c
arch/m68k/include/asm/atomic.h
arch/parisc/include/asm/string.h
arch/parisc/kernel/parisc_ksyms.c
arch/parisc/lib/Makefile
arch/parisc/lib/memset.c [new file with mode: 0644]
arch/parisc/lib/string.S [deleted file]
arch/powerpc/include/asm/book3s/32/kup.h
arch/powerpc/include/asm/interrupt.h
arch/powerpc/include/asm/irq.h
arch/powerpc/include/asm/ptrace.h
arch/powerpc/kernel/asm-offsets.c
arch/powerpc/kernel/exceptions-64s.S
arch/powerpc/kernel/head_book3s_32.S
arch/powerpc/kernel/head_booke.h
arch/powerpc/kernel/irq.c
arch/powerpc/kernel/kprobes.c
arch/powerpc/kernel/sysfs.c
arch/powerpc/kernel/time.c
arch/powerpc/kernel/traps.c
arch/powerpc/mm/pageattr.c
arch/powerpc/platforms/Kconfig.cputype
arch/powerpc/platforms/pseries/setup.c
arch/powerpc/sysdev/xive/common.c
arch/riscv/boot/dts/microchip/microchip-mpfs-icicle-kit.dts
arch/riscv/boot/dts/microchip/microchip-mpfs.dtsi
arch/riscv/kernel/Makefile
arch/riscv/kernel/ptrace.c
arch/riscv/kernel/setup.c
arch/riscv/mm/init.c
arch/s390/Kconfig
arch/s390/Makefile
arch/s390/boot/Makefile
arch/s390/boot/boot.h
arch/s390/boot/compressed/Makefile
arch/s390/boot/compressed/decompressor.c
arch/s390/boot/compressed/decompressor.h
arch/s390/boot/compressed/vmlinux.lds.S
arch/s390/boot/head.S
arch/s390/boot/ipl_report.c
arch/s390/boot/kaslr.c
arch/s390/boot/mem_detect.c
arch/s390/boot/pgm_check_info.c
arch/s390/boot/sclp_early_core.c
arch/s390/boot/startup.c
arch/s390/boot/uv.c
arch/s390/boot/uv.h [new file with mode: 0644]
arch/s390/configs/debug_defconfig
arch/s390/configs/defconfig
arch/s390/hypfs/hypfs_diag0c.c
arch/s390/include/asm/cio.h
arch/s390/include/asm/cpacf.h
arch/s390/include/asm/cpufeature.h
arch/s390/include/asm/ctl_reg.h
arch/s390/include/asm/debug.h
arch/s390/include/asm/diag.h
arch/s390/include/asm/elf.h
arch/s390/include/asm/extable.h
arch/s390/include/asm/ftrace.h
arch/s390/include/asm/ftrace.lds.h [new file with mode: 0644]
arch/s390/include/asm/ipl.h
arch/s390/include/asm/kfence.h [new file with mode: 0644]
arch/s390/include/asm/kvm_para.h
arch/s390/include/asm/linkage.h
arch/s390/include/asm/lowcore.h
arch/s390/include/asm/module.h
arch/s390/include/asm/page.h
arch/s390/include/asm/pci.h
arch/s390/include/asm/pci_dma.h
arch/s390/include/asm/pgtable.h
arch/s390/include/asm/processor.h
arch/s390/include/asm/qdio.h
arch/s390/include/asm/sclp.h
arch/s390/include/asm/sections.h
arch/s390/include/asm/set_memory.h
arch/s390/include/asm/setup.h
arch/s390/include/asm/syscall.h
arch/s390/include/asm/uv.h
arch/s390/include/asm/vdso/gettimeofday.h
arch/s390/kernel/Makefile
arch/s390/kernel/asm-offsets.c
arch/s390/kernel/crash_dump.c
arch/s390/kernel/debug.c
arch/s390/kernel/diag.c
arch/s390/kernel/dis.c
arch/s390/kernel/early.c
arch/s390/kernel/entry.S
arch/s390/kernel/entry.h
arch/s390/kernel/ftrace.c
arch/s390/kernel/ftrace.h [new file with mode: 0644]
arch/s390/kernel/head64.S
arch/s390/kernel/ipl.c
arch/s390/kernel/ipl_vmparm.c
arch/s390/kernel/irq.c
arch/s390/kernel/jump_label.c
arch/s390/kernel/machine_kexec.c
arch/s390/kernel/module.c
arch/s390/kernel/os_info.c
arch/s390/kernel/perf_cpum_cf.c
arch/s390/kernel/processor.c
arch/s390/kernel/setup.c
arch/s390/kernel/signal.c
arch/s390/kernel/smp.c
arch/s390/kernel/text_amode31.S [moved from arch/s390/boot/text_dma.S with 69% similarity]
arch/s390/kernel/topology.c
arch/s390/kernel/traps.c
arch/s390/kernel/uv.c
arch/s390/kernel/vdso32/Makefile
arch/s390/kernel/vdso64/Makefile
arch/s390/kernel/vmlinux.lds.S
arch/s390/lib/delay.c
arch/s390/mm/dump_pagetables.c
arch/s390/mm/fault.c
arch/s390/mm/init.c
arch/s390/mm/kasan_init.c
arch/s390/mm/maccess.c
arch/s390/mm/page-states.c
arch/s390/mm/pageattr.c
arch/s390/mm/vmem.c
arch/s390/pci/pci.c
arch/s390/pci/pci_bus.c
arch/s390/pci/pci_bus.h
arch/s390/pci/pci_clp.c
arch/s390/pci/pci_dma.c
arch/s390/pci/pci_event.c
arch/s390/pci/pci_sysfs.c
arch/s390/purgatory/Makefile
arch/s390/tools/opcodes.txt
arch/x86/Makefile
arch/x86/boot/Makefile
arch/x86/boot/compressed/efi_thunk_64.S
arch/x86/boot/compressed/head_64.S
arch/x86/crypto/Makefile
arch/x86/crypto/aesni-intel_glue.c
arch/x86/crypto/sm4-aesni-avx-asm_64.S [new file with mode: 0644]
arch/x86/crypto/sm4-aesni-avx2-asm_64.S [new file with mode: 0644]
arch/x86/crypto/sm4-avx.h [new file with mode: 0644]
arch/x86/crypto/sm4_aesni_avx2_glue.c [new file with mode: 0644]
arch/x86/crypto/sm4_aesni_avx_glue.c [new file with mode: 0644]
arch/x86/events/amd/ibs.c
arch/x86/events/amd/power.c
arch/x86/events/intel/pt.c
arch/x86/events/intel/uncore_snbep.c
arch/x86/include/asm/kfence.h
arch/x86/include/asm/kvm_host.h
arch/x86/include/asm/mce.h
arch/x86/include/asm/svm.h
arch/x86/kernel/apic/io_apic.c
arch/x86/kernel/apic/msi.c
arch/x86/kernel/cpu/mce/core.c
arch/x86/kernel/cpu/resctrl/core.c
arch/x86/kernel/cpu/resctrl/ctrlmondata.c
arch/x86/kernel/cpu/resctrl/internal.h
arch/x86/kernel/cpu/resctrl/monitor.c
arch/x86/kernel/cpu/resctrl/pseudo_lock.c
arch/x86/kernel/cpu/resctrl/rdtgroup.c
arch/x86/kernel/hpet.c
arch/x86/kvm/cpuid.c
arch/x86/kvm/hyperv.c
arch/x86/kvm/mmu/mmu.c
arch/x86/kvm/mmu/tdp_mmu.c
arch/x86/kvm/svm/nested.c
arch/x86/kvm/svm/svm.c
arch/x86/kvm/vmx/nested.c
arch/x86/kvm/vmx/vmx.h
arch/x86/tools/chkobjdump.awk
arch/x86/tools/relocs.c
arch/x86/tools/relocs.h
block/Kconfig.iosched
block/Makefile
block/blk-cgroup.c
block/blk-core.c
block/blk-flush.c
block/blk-iocost.c
block/blk-mq.c
block/blk.h
block/mq-deadline-cgroup.c [deleted file]
block/mq-deadline-cgroup.h [deleted file]
block/mq-deadline.c [moved from block/mq-deadline-main.c with 91% similarity]
certs/Kconfig
certs/Makefile
crypto/Kconfig
crypto/Makefile
crypto/asymmetric_keys/pkcs7_parser.c
crypto/ecc.h
crypto/sha512_generic.c
crypto/skcipher.c
crypto/sm4_generic.c
crypto/tcrypt.c
crypto/testmgr.c
crypto/testmgr.h
crypto/wp512.c
drivers/acpi/nfit/core.c
drivers/acpi/prmt.c
drivers/acpi/x86/s2idle.c
drivers/base/core.c
drivers/base/power/domain.c
drivers/base/regmap/internal.h
drivers/base/regmap/regmap-debugfs.c
drivers/base/regmap/regmap-mmio.c
drivers/base/regmap/regmap.c
drivers/block/Kconfig
drivers/block/cryptoloop.c
drivers/block/nbd.c
drivers/block/paride/pd.c
drivers/block/virtio_blk.c
drivers/bus/mhi/core/internal.h
drivers/bus/mhi/core/main.c
drivers/bus/ti-sysc.c
drivers/char/hw_random/Kconfig
drivers/char/hw_random/Makefile
drivers/char/hw_random/amd-rng.c
drivers/char/hw_random/arm_smccc_trng.c [new file with mode: 0644]
drivers/char/hw_random/geode-rng.c
drivers/char/hw_random/intel-rng.c
drivers/char/hw_random/via-rng.c
drivers/char/tpm/Kconfig
drivers/char/tpm/tpm_ibmvtpm.c
drivers/char/tpm/tpm_ibmvtpm.h
drivers/char/tpm/tpm_tis_i2c_cr50.c
drivers/clk/imx/clk-imx6q.c
drivers/clk/qcom/gdsc.c
drivers/clk/renesas/rcar-usb2-clock-sel.c
drivers/cpufreq/armada-37xx-cpufreq.c
drivers/cpufreq/cpufreq-dt-platdev.c
drivers/cpufreq/scmi-cpufreq.c
drivers/crypto/allwinner/sun8i-ce/sun8i-ce-prng.c
drivers/crypto/allwinner/sun8i-ce/sun8i-ce-trng.c
drivers/crypto/allwinner/sun8i-ss/sun8i-ss-prng.c
drivers/crypto/atmel-aes.c
drivers/crypto/atmel-tdes.c
drivers/crypto/ccp/sev-dev.c
drivers/crypto/ccp/sp-pci.c
drivers/crypto/hisilicon/hpre/hpre_main.c
drivers/crypto/hisilicon/qm.c
drivers/crypto/hisilicon/qm.h
drivers/crypto/hisilicon/sec2/sec.h
drivers/crypto/hisilicon/sec2/sec_main.c
drivers/crypto/hisilicon/zip/zip_main.c
drivers/crypto/mxs-dcp.c
drivers/crypto/omap-aes.c
drivers/crypto/omap-crypto.c
drivers/crypto/omap-des.c
drivers/crypto/omap-sham.c
drivers/crypto/qat/qat_4xxx/adf_4xxx_hw_data.c
drivers/crypto/qat/qat_4xxx/adf_drv.c
drivers/crypto/qat/qat_c3xxx/adf_c3xxx_hw_data.c
drivers/crypto/qat/qat_c3xxx/adf_c3xxx_hw_data.h
drivers/crypto/qat/qat_c3xxx/adf_drv.c
drivers/crypto/qat/qat_c3xxxvf/adf_c3xxxvf_hw_data.c
drivers/crypto/qat/qat_c3xxxvf/adf_c3xxxvf_hw_data.h
drivers/crypto/qat/qat_c3xxxvf/adf_drv.c
drivers/crypto/qat/qat_c62x/adf_c62x_hw_data.c
drivers/crypto/qat/qat_c62x/adf_c62x_hw_data.h
drivers/crypto/qat/qat_c62x/adf_drv.c
drivers/crypto/qat/qat_c62xvf/adf_c62xvf_hw_data.c
drivers/crypto/qat/qat_c62xvf/adf_c62xvf_hw_data.h
drivers/crypto/qat/qat_c62xvf/adf_drv.c
drivers/crypto/qat/qat_common/adf_accel_devices.h
drivers/crypto/qat/qat_common/adf_aer.c
drivers/crypto/qat/qat_common/adf_common_drv.h
drivers/crypto/qat/qat_common/adf_init.c
drivers/crypto/qat/qat_common/adf_isr.c
drivers/crypto/qat/qat_common/adf_pf2vf_msg.c
drivers/crypto/qat/qat_common/adf_pf2vf_msg.h
drivers/crypto/qat/qat_common/adf_sriov.c
drivers/crypto/qat/qat_common/adf_vf2pf_msg.c
drivers/crypto/qat/qat_common/adf_vf_isr.c
drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.c
drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.h
drivers/crypto/qat/qat_dh895xcc/adf_drv.c
drivers/crypto/qat/qat_dh895xccvf/adf_dh895xccvf_hw_data.c
drivers/crypto/qat/qat_dh895xccvf/adf_dh895xccvf_hw_data.h
drivers/crypto/qat/qat_dh895xccvf/adf_drv.c
drivers/crypto/virtio/virtio_crypto_core.c
drivers/dax/super.c
drivers/edac/altera_edac.c
drivers/edac/amd64_edac.c
drivers/edac/edac_mc.c
drivers/edac/i10nm_base.c
drivers/edac/mce_amd.c
drivers/edac/skx_base.c
drivers/edac/skx_common.c
drivers/edac/skx_common.h
drivers/firmware/efi/libstub/arm64-stub.c
drivers/firmware/efi/libstub/randomalloc.c
drivers/firmware/smccc/smccc.c
drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c
drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.h
drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c
drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c
drivers/gpu/drm/amd/amdkfd/kfd_svm.c
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c
drivers/gpu/drm/amd/display/dc/core/dc.c
drivers/gpu/drm/amd/display/dc/core/dc_vm_helper.c
drivers/gpu/drm/amd/display/dc/dc.h
drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c
drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c
drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.c
drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.h
drivers/gpu/drm/amd/display/dc/dcn31/dcn31_init.c
drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h
drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
drivers/gpu/drm/amd/include/atomfirmware.h
drivers/gpu/drm/amd/pm/inc/smu_v13_0_1_pmfw.h
drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.c
drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c
drivers/gpu/drm/drm_ioc32.c
drivers/gpu/drm/i915/display/intel_ddi.c
drivers/gpu/drm/i915/display/intel_display.c
drivers/gpu/drm/i915/display/intel_display_power.c
drivers/gpu/drm/i915/display/intel_dp.c
drivers/gpu/drm/i915/display/intel_dp_link_training.c
drivers/gpu/drm/i915/gt/intel_timeline.c
drivers/gpu/drm/i915/gvt/handlers.c
drivers/gpu/drm/i915/gvt/mmio_context.c
drivers/gpu/drm/i915/i915_gpu_error.c
drivers/gpu/drm/i915/i915_irq.c
drivers/gpu/drm/i915/i915_reg.h
drivers/gpu/drm/imx/ipuv3-plane.c
drivers/gpu/drm/mediatek/mtk_disp_color.c
drivers/gpu/drm/mediatek/mtk_disp_ovl.c
drivers/gpu/drm/mediatek/mtk_dpi.c
drivers/gpu/drm/mediatek/mtk_drm_crtc.c
drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c
drivers/gpu/drm/mediatek/mtk_drm_plane.c
drivers/gpu/drm/meson/meson_registers.h
drivers/gpu/drm/meson/meson_viu.c
drivers/gpu/drm/nouveau/dispnv50/disp.c
drivers/gpu/drm/nouveau/dispnv50/head.c
drivers/gpu/drm/nouveau/dispnv50/head.h
drivers/gpu/drm/nouveau/include/nvif/cl0080.h
drivers/gpu/drm/nouveau/include/nvif/class.h
drivers/gpu/drm/nouveau/include/nvif/client.h
drivers/gpu/drm/nouveau/include/nvif/driver.h
drivers/gpu/drm/nouveau/include/nvkm/core/client.h
drivers/gpu/drm/nouveau/include/nvkm/core/ioctl.h
drivers/gpu/drm/nouveau/include/nvkm/subdev/mmu.h
drivers/gpu/drm/nouveau/nouveau_abi16.c
drivers/gpu/drm/nouveau/nouveau_chan.c
drivers/gpu/drm/nouveau/nouveau_drm.c
drivers/gpu/drm/nouveau/nouveau_mem.c
drivers/gpu/drm/nouveau/nouveau_nvif.c
drivers/gpu/drm/nouveau/nouveau_svm.c
drivers/gpu/drm/nouveau/nouveau_usif.c
drivers/gpu/drm/nouveau/nvif/client.c
drivers/gpu/drm/nouveau/nvif/object.c
drivers/gpu/drm/nouveau/nvkm/core/ioctl.c
drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
drivers/gpu/drm/nouveau/nvkm/engine/device/user.c
drivers/gpu/drm/nouveau/nvkm/engine/disp/dp.c
drivers/gpu/drm/nouveau/nvkm/engine/disp/dp.h
drivers/gpu/drm/nouveau/nvkm/engine/disp/outp.c
drivers/gpu/drm/nouveau/nvkm/engine/dma/user.c
drivers/gpu/drm/nouveau/nvkm/engine/fifo/Kbuild
drivers/gpu/drm/nouveau/nvkm/engine/fifo/channv50.h
drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmag84.c [deleted file]
drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmanv50.c [deleted file]
drivers/gpu/drm/nouveau/nvkm/engine/fifo/g84.c
drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogk104.c
drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogv100.c
drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifotu102.c
drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv50.c
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/umem.c
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/umem.h
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/ummu.c
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/uvmm.c
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgp100.c
drivers/gpu/drm/ttm/ttm_device.c
drivers/gpu/ipu-v3/ipu-cpmem.c
drivers/i2c/busses/i2c-bcm-iproc.c
drivers/i2c/i2c-dev.c
drivers/iio/accel/Kconfig
drivers/iio/accel/fxls8962af-core.c
drivers/iio/adc/palmas_gpadc.c
drivers/iio/adc/rn5t618-adc.c
drivers/iio/adc/ti-ads7950.c
drivers/iio/humidity/hdc100x.c
drivers/iio/imu/adis.c
drivers/infiniband/core/uverbs_std_types_mr.c
drivers/infiniband/hw/bnxt_re/ib_verbs.c
drivers/infiniband/hw/bnxt_re/main.c
drivers/infiniband/hw/efa/efa_main.c
drivers/infiniband/hw/hfi1/sdma.c
drivers/infiniband/hw/irdma/Kconfig
drivers/infiniband/hw/mlx5/cq.c
drivers/infiniband/hw/mlx5/devx.c
drivers/infiniband/hw/mlx5/main.c
drivers/infiniband/sw/rxe/rxe_mcast.c
drivers/infiniband/sw/rxe/rxe_queue.c
drivers/interconnect/qcom/icc-rpmh.c
drivers/iommu/dma-iommu.c
drivers/iommu/intel/pasid.c
drivers/iommu/intel/pasid.h
drivers/iommu/intel/svm.c
drivers/iommu/iommu.c
drivers/iommu/s390-iommu.c
drivers/ipack/carriers/tpci200.c
drivers/media/pci/intel/ipu3/cio2-bridge.c
drivers/mmc/host/dw_mmc.c
drivers/mmc/host/mmci_stm32_sdmmc.c
drivers/mmc/host/sdhci-iproc.c
drivers/mmc/host/sdhci-msm.c
drivers/mtd/chips/cfi_cmdset_0002.c
drivers/mtd/devices/mchp48l640.c
drivers/mtd/mtd_blkdevs.c
drivers/mtd/mtdcore.c
drivers/mtd/nand/raw/nand_base.c
drivers/net/bareudp.c
drivers/net/can/m_can/m_can.c
drivers/net/can/usb/esd_usb2.c
drivers/net/dsa/hirschmann/hellcreek.c
drivers/net/dsa/lan9303-core.c
drivers/net/dsa/lantiq_gswip.c
drivers/net/dsa/microchip/ksz8795.c
drivers/net/dsa/microchip/ksz8795_reg.h
drivers/net/dsa/microchip/ksz_common.h
drivers/net/dsa/mt7530.c
drivers/net/dsa/mv88e6xxx/serdes.c
drivers/net/dsa/qca/ar9331.c
drivers/net/dsa/sja1105/sja1105_main.c
drivers/net/dsa/sja1105/sja1105_mdio.c
drivers/net/ethernet/apm/xgene-v2/main.c
drivers/net/ethernet/broadcom/bnxt/bnxt.c
drivers/net/ethernet/broadcom/bnxt/bnxt.h
drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h
drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c
drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.h
drivers/net/ethernet/cadence/macb_ptp.c
drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c
drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c
drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.h
drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c
drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h
drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c
drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c
drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.h
drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h
drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c
drivers/net/ethernet/intel/e1000e/ich8lan.c
drivers/net/ethernet/intel/e1000e/ich8lan.h
drivers/net/ethernet/intel/i40e/i40e_txrx.c
drivers/net/ethernet/intel/iavf/iavf.h
drivers/net/ethernet/intel/iavf/iavf_main.c
drivers/net/ethernet/intel/iavf/iavf_virtchnl.c
drivers/net/ethernet/intel/ice/ice.h
drivers/net/ethernet/intel/ice/ice_devlink.c
drivers/net/ethernet/intel/ice/ice_main.c
drivers/net/ethernet/intel/ice/ice_ptp.c
drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c
drivers/net/ethernet/intel/igc/igc_main.c
drivers/net/ethernet/intel/igc/igc_ptp.c
drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c
drivers/net/ethernet/marvell/mvneta.c
drivers/net/ethernet/marvell/mvpp2/mvpp2.h
drivers/net/ethernet/marvell/prestera/prestera_switchdev.c
drivers/net/ethernet/mellanox/mlx5/core/cq.c
drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c
drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
drivers/net/ethernet/mellanox/mlx5/core/eq.c
drivers/net/ethernet/mellanox/mlx5/core/esw/bridge.c
drivers/net/ethernet/mellanox/mlx5/core/esw/sample.c
drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c
drivers/net/ethernet/mellanox/mlx5/core/lib/eq.h
drivers/net/ethernet/mellanox/mlx5/core/main.c
drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c
drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c
drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v0.c
drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
drivers/net/ethernet/microchip/sparx5/sparx5_mactable.c
drivers/net/ethernet/mscc/ocelot.c
drivers/net/ethernet/mscc/ocelot_io.c
drivers/net/ethernet/qlogic/qed/qed_ll2.c
drivers/net/ethernet/qlogic/qed/qed_main.c
drivers/net/ethernet/qlogic/qed/qed_rdma.c
drivers/net/ethernet/qlogic/qede/qede_main.c
drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
drivers/net/ethernet/realtek/r8169_main.c
drivers/net/ethernet/rocker/rocker_main.c
drivers/net/ethernet/rocker/rocker_ofdpa.c
drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
drivers/net/ethernet/stmicro/stmmac/stmmac.h
drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c
drivers/net/ethernet/stmicro/stmmac/stmmac_xdp.c
drivers/net/ethernet/ti/am65-cpsw-switchdev.c
drivers/net/ethernet/ti/cpsw_new.c
drivers/net/ethernet/ti/cpsw_priv.h
drivers/net/ethernet/ti/cpsw_switchdev.c
drivers/net/hamradio/6pack.c
drivers/net/ieee802154/mac802154_hwsim.c
drivers/net/mdio/mdio-mux.c
drivers/net/mhi/net.c
drivers/net/pcs/pcs-xpcs.c
drivers/net/phy/mediatek-ge.c
drivers/net/phy/micrel.c
drivers/net/ppp/ppp_generic.c
drivers/net/usb/asix.h
drivers/net/usb/asix_common.c
drivers/net/usb/asix_devices.c
drivers/net/usb/pegasus.c
drivers/net/usb/r8152.c
drivers/net/virtio_net.c
drivers/net/vrf.c
drivers/net/wireless/intel/iwlwifi/fw/pnvm.c
drivers/net/wireless/intel/iwlwifi/pcie/drv.c
drivers/net/wireless/mediatek/mt76/mt7915/mcu.c
drivers/net/wireless/mediatek/mt76/mt7915/mcu.h
drivers/net/wireless/mediatek/mt76/mt7921/mcu.c
drivers/net/wireless/mediatek/mt76/mt7921/mcu.h
drivers/net/wwan/iosm/iosm_ipc_chnl_cfg.c
drivers/net/wwan/mhi_wwan_ctrl.c
drivers/net/wwan/wwan_core.c
drivers/nvdimm/namespace_devs.c
drivers/opp/core.c
drivers/opp/of.c
drivers/pci/controller/pci-ixp4xx.c
drivers/pci/msi.c
drivers/pci/pci-sysfs.c
drivers/pci/quirks.c
drivers/pinctrl/intel/pinctrl-tigerlake.c
drivers/pinctrl/mediatek/pinctrl-mtk-common-v2.c
drivers/pinctrl/pinctrl-amd.c
drivers/pinctrl/pinctrl-k210.c
drivers/pinctrl/qcom/Kconfig
drivers/pinctrl/sunxi/pinctrl-sunxi.c
drivers/platform/x86/Kconfig
drivers/platform/x86/asus-nb-wmi.c
drivers/platform/x86/dual_accel_detect.h [new file with mode: 0644]
drivers/platform/x86/gigabyte-wmi.c
drivers/platform/x86/intel-hid.c
drivers/platform/x86/intel-vbtn.c
drivers/platform/x86/pcengines-apuv2.c
drivers/platform/x86/thinkpad_acpi.c
drivers/power/reset/Kconfig
drivers/power/reset/Makefile
drivers/power/reset/linkstation-poweroff.c
drivers/power/reset/tps65086-restart.c [new file with mode: 0644]
drivers/power/supply/Kconfig
drivers/power/supply/Makefile
drivers/power/supply/ab8500-bm.h
drivers/power/supply/ab8500_bmdata.c
drivers/power/supply/ab8500_btemp.c
drivers/power/supply/ab8500_chargalg.c [moved from drivers/power/supply/abx500_chargalg.c with 74% similarity]
drivers/power/supply/ab8500_charger.c
drivers/power/supply/ab8500_fg.c
drivers/power/supply/axp288_charger.c
drivers/power/supply/axp288_fuel_gauge.c
drivers/power/supply/bq24735-charger.c
drivers/power/supply/cros_peripheral_charger.c [new file with mode: 0644]
drivers/power/supply/cw2015_battery.c
drivers/power/supply/max17042_battery.c
drivers/power/supply/mt6360_charger.c [new file with mode: 0644]
drivers/power/supply/power_supply_core.c
drivers/power/supply/qcom_smbb.c
drivers/power/supply/rn5t618_power.c
drivers/power/supply/sbs-battery.c
drivers/power/supply/sc27xx_fuel_gauge.c
drivers/power/supply/smb347-charger.c
drivers/ptp/Kconfig
drivers/ptp/ptp_sysfs.c
drivers/regulator/Kconfig
drivers/regulator/Makefile
drivers/regulator/bd718x7-regulator.c
drivers/regulator/da9063-regulator.c
drivers/regulator/dbx500-prcmu.c
drivers/regulator/devres.c
drivers/regulator/fixed.c
drivers/regulator/hi6421v600-regulator.c
drivers/regulator/irq_helpers.c
drivers/regulator/mt6358-regulator.c
drivers/regulator/mt6359-regulator.c
drivers/regulator/mt6397-regulator.c
drivers/regulator/rt5033-regulator.c
drivers/regulator/rt6245-regulator.c
drivers/regulator/rtq2134-regulator.c [new file with mode: 0644]
drivers/regulator/rtq6752-regulator.c [new file with mode: 0644]
drivers/regulator/sy7636a-regulator.c
drivers/regulator/sy8824x.c
drivers/regulator/sy8827n.c
drivers/regulator/tps65910-regulator.c
drivers/regulator/vctrl-regulator.c
drivers/reset/Kconfig
drivers/reset/reset-zynqmp.c
drivers/s390/block/dasd_ioctl.c
drivers/s390/char/sclp.c
drivers/s390/char/sclp.h
drivers/s390/char/sclp_cmd.c
drivers/s390/char/sclp_config.c
drivers/s390/char/sclp_early_core.c
drivers/s390/char/zcore.c
drivers/s390/cio/css.c
drivers/s390/cio/qdio.h
drivers/s390/cio/qdio_debug.c
drivers/s390/cio/qdio_main.c
drivers/s390/cio/qdio_setup.c
drivers/s390/crypto/ap_bus.c
drivers/s390/crypto/ap_bus.h
drivers/s390/crypto/ap_queue.c
drivers/s390/crypto/vfio_ap_ops.c
drivers/s390/crypto/zcrypt_api.c
drivers/s390/crypto/zcrypt_card.c
drivers/s390/crypto/zcrypt_ccamisc.c
drivers/s390/crypto/zcrypt_cex2a.c
drivers/s390/crypto/zcrypt_cex2c.c
drivers/s390/crypto/zcrypt_cex4.c
drivers/s390/crypto/zcrypt_queue.c
drivers/s390/net/qeth_core_main.c
drivers/s390/net/qeth_l2_main.c
drivers/s390/scsi/zfcp_qdio.c
drivers/scsi/lpfc/lpfc_init.c
drivers/scsi/mpt3sas/mpt3sas_base.c
drivers/scsi/scsi_sysfs.c
drivers/scsi/storvsc_drv.c
drivers/slimbus/messaging.c
drivers/slimbus/qcom-ngd-ctrl.c
drivers/soc/fsl/qe/qe_ic.c
drivers/spi/Kconfig
drivers/spi/Makefile
drivers/spi/spi-bcm2835aux.c
drivers/spi/spi-coldfire-qspi.c
drivers/spi/spi-davinci.c
drivers/spi/spi-ep93xx.c
drivers/spi/spi-fsi.c
drivers/spi/spi-fsl-dspi.c
drivers/spi/spi-geni-qcom.c
drivers/spi/spi-imx.c
drivers/spi/spi-mt65xx.c
drivers/spi/spi-mxic.c
drivers/spi/spi-orion.c
drivers/spi/spi-pic32.c
drivers/spi/spi-pxa2xx.c
drivers/spi/spi-rockchip-sfc.c [new file with mode: 0644]
drivers/spi/spi-sprd-adi.c
drivers/spi/spi-stm32.c
drivers/spi/spi-tegra114.c
drivers/spi/spi-tegra20-slink.c
drivers/spi/spi-zynq-qspi.c
drivers/spi/spi.c
drivers/staging/media/av7110/av7110.h
drivers/tty/vt/vt.c
drivers/tty/vt/vt_ioctl.c
drivers/usb/dwc3/gadget.c
drivers/usb/gadget/function/u_audio.c
drivers/usb/host/xhci-pci-renesas.c
drivers/usb/serial/ch341.c
drivers/usb/serial/option.c
drivers/usb/typec/tcpm/tcpm.c
drivers/vdpa/ifcvf/ifcvf_main.c
drivers/vdpa/mlx5/core/mr.c
drivers/vdpa/mlx5/net/mlx5_vnet.c
drivers/vdpa/vdpa_sim/vdpa_sim.c
drivers/vdpa/virtio_pci/vp_vdpa.c
drivers/vhost/vdpa.c
drivers/vhost/vhost.c
drivers/vhost/vringh.c
drivers/virtio/virtio.c
drivers/virtio/virtio_mem.c
drivers/virtio/virtio_pci_common.c
drivers/virtio/virtio_ring.c
drivers/virtio/virtio_vdpa.c
drivers/xen/events/events_base.c
fs/9p/vfs_file.c
fs/Kconfig
fs/afs/flock.c
fs/btrfs/inode.c
fs/ceph/addr.c
fs/ceph/caps.c
fs/ceph/file.c
fs/ceph/locks.c
fs/ceph/mds_client.c
fs/ceph/mdsmap.c
fs/ceph/snap.c
fs/ceph/super.h
fs/cifs/cifsglob.h
fs/cifs/dir.c
fs/cifs/file.c
fs/cifs/inode.c
fs/cifs/misc.c
fs/cifs/smb2ops.c
fs/cifs/smb2pdu.c
fs/configfs/file.c
fs/dax.c
fs/ext2/Kconfig
fs/ext2/ext2.h
fs/ext2/file.c
fs/ext2/inode.c
fs/ext2/super.c
fs/ext4/ext4.h
fs/ext4/extents.c
fs/ext4/file.c
fs/ext4/inode.c
fs/ext4/ioctl.c
fs/ext4/super.c
fs/ext4/truncate.h
fs/f2fs/data.c
fs/f2fs/f2fs.h
fs/f2fs/file.c
fs/f2fs/super.c
fs/fcntl.c
fs/fuse/dax.c
fs/fuse/dir.c
fs/fuse/file.c
fs/fuse/fuse_i.h
fs/fuse/inode.c
fs/gfs2/file.c
fs/hpfs/Kconfig
fs/hpfs/file.c
fs/inode.c
fs/io-wq.c
fs/io_uring.c
fs/ioctl.c
fs/isofs/inode.c
fs/isofs/isofs.h
fs/isofs/joliet.c
fs/locks.c
fs/namei.c
fs/namespace.c
fs/nfs/file.c
fs/nfsd/nfs4state.c
fs/nfsd/vfs.c
fs/notify/fanotify/fanotify_user.c
fs/notify/fsnotify.c
fs/notify/fsnotify.h
fs/notify/inotify/inotify_user.c
fs/notify/mark.c
fs/ocfs2/locks.c
fs/open.c
fs/overlayfs/export.c
fs/overlayfs/file.c
fs/overlayfs/readdir.c
fs/pipe.c
fs/read_write.c
fs/remap_range.c
fs/udf/dir.c
fs/udf/ecma_167.h
fs/udf/inode.c
fs/udf/misc.c
fs/udf/namei.c
fs/udf/osta_udf.h
fs/udf/super.c
fs/udf/udf_sb.h
fs/udf/udfdecl.h
fs/udf/unicode.c
fs/xfs/xfs_bmap_util.c
fs/xfs/xfs_file.c
fs/xfs/xfs_inode.c
fs/xfs/xfs_inode.h
fs/xfs/xfs_super.c
fs/zonefs/super.c
fs/zonefs/zonefs.h
include/asm-generic/vmlinux.lds.h
include/crypto/public_key.h
include/crypto/sm4.h
include/dt-bindings/power/summit,smb347-charger.h
include/linux/bpf-cgroup.h
include/linux/device.h
include/linux/edac.h
include/linux/fanotify.h
include/linux/fiemap.h
include/linux/fs.h
include/linux/fsnotify.h
include/linux/ftrace.h
include/linux/inetdevice.h
include/linux/irq.h
include/linux/kfence.h
include/linux/linear_range.h
include/linux/memcontrol.h
include/linux/mfd/rt5033-private.h
include/linux/mhi.h
include/linux/mlx5/driver.h
include/linux/mlx5/mlx5_ifc_vdpa.h
include/linux/msi.h
include/linux/netfilter/ipset/ip_set.h
include/linux/once.h
include/linux/padata.h
include/linux/pid.h
include/linux/pipe_fs_i.h
include/linux/platform_data/cros_ec_commands.h
include/linux/platform_data/spi-mt65xx.h
include/linux/power/max17042_battery.h
include/linux/power_supply.h
include/linux/rculist.h
include/linux/rcupdate.h
include/linux/rcutiny.h
include/linux/regmap.h
include/linux/regulator/consumer.h
include/linux/regulator/driver.h
include/linux/regulator/machine.h
include/linux/resctrl.h
include/linux/security.h
include/linux/spi/spi.h
include/linux/srcutiny.h
include/linux/vdpa.h
include/linux/virtio.h
include/linux/vringh.h
include/net/flow_offload.h
include/net/ip6_fib.h
include/net/netns/conntrack.h
include/net/psample.h
include/trace/events/mmflags.h
include/uapi/linux/dvb/audio.h [moved from drivers/staging/media/av7110/audio.h with 100% similarity]
include/uapi/linux/dvb/osd.h [moved from drivers/staging/media/av7110/osd.h with 100% similarity]
include/uapi/linux/dvb/video.h [moved from drivers/staging/media/av7110/video.h with 100% similarity]
include/uapi/linux/fanotify.h
include/uapi/linux/neighbour.h
include/uapi/linux/netfilter/nfnetlink_hook.h
init/main.c
kernel/bpf/core.c
kernel/bpf/hashtab.c
kernel/bpf/helpers.c
kernel/bpf/verifier.c
kernel/cfi.c
kernel/cgroup/rstat.c
kernel/cred.c
kernel/fork.c
kernel/irq/chip.c
kernel/irq/msi.c
kernel/irq/timings.c
kernel/kcsan/debugfs.c
kernel/locking/locktorture.c
kernel/locking/rtmutex.c
kernel/padata.c
kernel/pid.c
kernel/rcu/rcuscale.c
kernel/rcu/rcutorture.c
kernel/rcu/refscale.c
kernel/rcu/srcutiny.c
kernel/rcu/tasks.h
kernel/rcu/tree.c
kernel/rcu/tree_nocb.h [new file with mode: 0644]
kernel/rcu/tree_plugin.h
kernel/rcu/tree_stall.h
kernel/scftorture.c
kernel/sched/core.c
kernel/sched/sched.h
kernel/seccomp.c
kernel/torture.c
kernel/trace/Kconfig
kernel/trace/bpf_trace.c
kernel/trace/ftrace.c
kernel/trace/trace.c
kernel/trace/trace.h
kernel/trace/trace_events_hist.c
kernel/trace/trace_osnoise.c
kernel/ucount.c
lib/crypto/Kconfig
lib/crypto/Makefile
lib/crypto/blake2s.c
lib/crypto/chacha20poly1305.c
lib/crypto/curve25519.c
lib/crypto/sm4.c [new file with mode: 0644]
lib/devmem_is_allowed.c
lib/linear_ranges.c
lib/mpi/mpiutil.c
lib/once.c
lib/string.c
mm/filemap.c
mm/gup.c
mm/hugetlb.c
mm/kfence/kfence_test.c
mm/kmemleak.c
mm/madvise.c
mm/memcontrol.c
mm/memory-failure.c
mm/memory_hotplug.c
mm/mmap.c
mm/nommu.c
mm/page_alloc.c
mm/readahead.c
mm/rmap.c
mm/shmem.c
mm/slub.c
mm/swap_state.c
mm/truncate.c
mm/vmscan.c
net/bpf/test_run.c
net/bridge/br.c
net/bridge/br_fdb.c
net/bridge/br_if.c
net/bridge/br_private.h
net/bridge/netfilter/nf_conntrack_bridge.c
net/core/link_watch.c
net/core/page_pool.c
net/core/rtnetlink.c
net/dccp/dccp.h
net/dsa/slave.c
net/ieee802154/socket.c
net/ipv4/cipso_ipv4.c
net/ipv4/igmp.c
net/ipv4/ip_gre.c
net/ipv4/route.c
net/ipv4/tcp_bbr.c
net/ipv6/ip6_fib.c
net/ipv6/ip6_gre.c
net/ipv6/route.c
net/mac80211/main.c
net/mptcp/options.c
net/mptcp/pm_netlink.c
net/netfilter/ipset/ip_set_hash_ip.c
net/netfilter/ipset/ip_set_hash_ipmark.c
net/netfilter/ipset/ip_set_hash_ipport.c
net/netfilter/ipset/ip_set_hash_ipportip.c
net/netfilter/ipset/ip_set_hash_ipportnet.c
net/netfilter/ipset/ip_set_hash_net.c
net/netfilter/ipset/ip_set_hash_netiface.c
net/netfilter/ipset/ip_set_hash_netnet.c
net/netfilter/ipset/ip_set_hash_netport.c
net/netfilter/ipset/ip_set_hash_netportnet.c
net/netfilter/nf_conntrack_core.c
net/netfilter/nf_conntrack_proto_tcp.c
net/netfilter/nf_conntrack_proto_udp.c
net/netfilter/nf_conntrack_standalone.c
net/netfilter/nf_flow_table_core.c
net/netfilter/nfnetlink_hook.c
net/openvswitch/flow.c
net/openvswitch/vport.c
net/qrtr/mhi.c
net/qrtr/qrtr.c
net/rds/ib_frmr.c
net/sched/act_mirred.c
net/sched/sch_cake.c
net/sched/sch_ets.c
net/smc/af_smc.c
net/smc/smc_core.c
net/smc/smc_core.h
net/smc/smc_llc.c
net/smc/smc_tx.c
net/smc/smc_wr.c
net/sunrpc/svc_xprt.c
net/tipc/link.c
net/tipc/socket.c
net/vmw_vsock/virtio_transport.c
security/security.c
sound/core/pcm_native.c
sound/firewire/oxfw/oxfw-stream.c
sound/firewire/oxfw/oxfw.c
sound/firewire/oxfw/oxfw.h
sound/pci/hda/hda_generic.c
sound/pci/hda/hda_intel.c
sound/pci/hda/patch_realtek.c
sound/pci/hda/patch_via.c
sound/soc/Kconfig
sound/soc/amd/acp-da7219-max98357a.c
sound/soc/amd/acp-pcm-dma.c
sound/soc/amd/raven/acp3x-pcm-dma.c
sound/soc/amd/renoir/acp3x-pdm-dma.c
sound/soc/amd/renoir/rn-pci-acp3x.c
sound/soc/codecs/Kconfig
sound/soc/codecs/Makefile
sound/soc/codecs/cs42l42.c
sound/soc/codecs/cs42l42.h
sound/soc/codecs/nau8824.c
sound/soc/codecs/rt5682.c
sound/soc/codecs/tlv320aic31xx.c
sound/soc/codecs/tlv320aic32x4.c
sound/soc/codecs/wm_adsp.c
sound/soc/intel/atom/sst-mfld-platform-pcm.c
sound/soc/intel/boards/sof_da7219_max98373.c
sound/soc/kirkwood/kirkwood-dma.c
sound/soc/soc-component.c
sound/soc/sof/intel/Kconfig
sound/soc/sof/intel/hda-ipc.c
sound/soc/sof/intel/hda.c
sound/soc/uniphier/aio-dma.c
sound/soc/xilinx/xlnx_formatter_pcm.c
tools/include/nolibc/nolibc.h
tools/io_uring/io_uring-cp.c
tools/lib/bpf/btf.c
tools/lib/bpf/libbpf_probes.c
tools/testing/nvdimm/test/nfit.c
tools/testing/selftests/bpf/verifier/dead_code.c
tools/testing/selftests/rcutorture/bin/jitter.sh
tools/testing/selftests/rcutorture/bin/kcsan-collapse.sh
tools/testing/selftests/rcutorture/bin/kvm-again.sh
tools/testing/selftests/rcutorture/bin/kvm-assign-cpus.sh [new file with mode: 0755]
tools/testing/selftests/rcutorture/bin/kvm-get-cpus-script.sh [new file with mode: 0755]
tools/testing/selftests/rcutorture/bin/kvm-recheck-lock.sh
tools/testing/selftests/rcutorture/bin/kvm-recheck-scf.sh
tools/testing/selftests/rcutorture/bin/kvm-recheck.sh
tools/testing/selftests/rcutorture/bin/kvm-remote-noreap.sh [new file with mode: 0755]
tools/testing/selftests/rcutorture/bin/kvm-remote.sh
tools/testing/selftests/rcutorture/bin/kvm-test-1-run-batch.sh
tools/testing/selftests/rcutorture/bin/kvm-test-1-run-qemu.sh
tools/testing/selftests/rcutorture/bin/kvm-test-1-run.sh
tools/testing/selftests/rcutorture/bin/kvm.sh
tools/testing/selftests/rcutorture/bin/torture.sh
tools/testing/selftests/rcutorture/configs/rcu/RUDE01
tools/testing/selftests/rcutorture/configs/rcu/TASKS01
tools/testing/selftests/rcutorture/configs/rcu/TASKS03
tools/testing/selftests/sgx/sigstruct.c
tools/virtio/Makefile
tools/virtio/linux/spinlock.h [new file with mode: 0644]
tools/virtio/linux/virtio.h

index 11cdab037bff6b00a70745a5e4cebfd16ea86a6b..eeb351296df111f9419b2d19d72f978b78f0efcd 100644 (file)
@@ -112,6 +112,35 @@ on PowerPC.
 The ``smp_mb__after_unlock_lock()`` invocations prevent this
 ``WARN_ON()`` from triggering.
 
++-----------------------------------------------------------------------+
+| **Quick Quiz**:                                                       |
++-----------------------------------------------------------------------+
+| But the chain of rcu_node-structure lock acquisitions guarantees      |
+| that new readers will see all of the updater's pre-grace-period       |
+| accesses and also guarantees that the updater's post-grace-period     |
+| accesses will see all of the old reader's accesses.  So why do we     |
+| need all of those calls to smp_mb__after_unlock_lock()?               |
++-----------------------------------------------------------------------+
+| **Answer**:                                                           |
++-----------------------------------------------------------------------+
+| Because we must provide ordering for RCU's polling grace-period       |
+| primitives, for example, get_state_synchronize_rcu() and              |
+| poll_state_synchronize_rcu().  Consider this code::                   |
+|                                                                       |
+|  CPU 0                                     CPU 1                      |
+|  ----                                      ----                       |
+|  WRITE_ONCE(X, 1)                          WRITE_ONCE(Y, 1)           |
+|  g = get_state_synchronize_rcu()           smp_mb()                   |
+|  while (!poll_state_synchronize_rcu(g))    r1 = READ_ONCE(X)          |
+|          continue;                                                    |
+|  r0 = READ_ONCE(Y)                                                    |
+|                                                                       |
+| RCU guarantees that the outcome r0 == 0 && r1 == 0 will not           |
+| happen, even if CPU 1 is in an RCU extended quiescent state           |
+| (idle or offline) and thus won't interact directly with the RCU       |
+| core processing at all.                                               |
++-----------------------------------------------------------------------+
+
 This approach must be extended to include idle CPUs, which need
 RCU's grace-period memory ordering guarantee to extend to any
 RCU read-side critical sections preceding and following the current
index 38a39476fc248af893ab574bd13e4eed92278fdf..45278e2974c04c13df06c63fe3f6deaaf7ac1ccf 100644 (file)
@@ -362,9 +362,8 @@ do_something_gp() uses rcu_dereference() to fetch from ``gp``:
       12 }
 
 The rcu_dereference() uses volatile casts and (for DEC Alpha) memory
-barriers in the Linux kernel. Should a `high-quality implementation of
-C11 ``memory_order_consume``
-[PDF] <http://www.rdrop.com/users/paulmck/RCU/consume.2015.07.13a.pdf>`__
+barriers in the Linux kernel. Should a |high-quality implementation of
+C11 memory_order_consume [PDF]|_
 ever appear, then rcu_dereference() could be implemented as a
 ``memory_order_consume`` load. Regardless of the exact implementation, a
 pointer fetched by rcu_dereference() may not be used outside of the
@@ -374,6 +373,9 @@ element has been passed from RCU to some other synchronization
 mechanism, most commonly locking or `reference
 counting <https://www.kernel.org/doc/Documentation/RCU/rcuref.txt>`__.
 
+.. |high-quality implementation of C11 memory_order_consume [PDF]| replace:: high-quality implementation of C11 ``memory_order_consume`` [PDF]
+.. _high-quality implementation of C11 memory_order_consume [PDF]: http://www.rdrop.com/users/paulmck/RCU/consume.2015.07.13a.pdf
+
 In short, updaters use rcu_assign_pointer() and readers use
 rcu_dereference(), and these two RCU API elements work together to
 ensure that readers have a consistent view of newly added data elements.
index 01cc21f17f7bdbc6d9b06df245cfb560e213bb10..f4545b7c9a63d29c7b2353802e11d2e58659343f 100644 (file)
@@ -37,7 +37,7 @@ over a rather long period of time, but improvements are always welcome!
 
 1.     Does the update code have proper mutual exclusion?
 
-       RCU does allow -readers- to run (almost) naked, but -writers- must
+       RCU does allow *readers* to run (almost) naked, but *writers* must
        still use some sort of mutual exclusion, such as:
 
        a.      locking,
@@ -73,7 +73,7 @@ over a rather long period of time, but improvements are always welcome!
        critical section is every bit as bad as letting them leak out
        from under a lock.  Unless, of course, you have arranged some
        other means of protection, such as a lock or a reference count
-       -before- letting them out of the RCU read-side critical section.
+       *before* letting them out of the RCU read-side critical section.
 
 3.     Does the update code tolerate concurrent accesses?
 
@@ -101,7 +101,7 @@ over a rather long period of time, but improvements are always welcome!
        c.      Make updates appear atomic to readers.  For example,
                pointer updates to properly aligned fields will
                appear atomic, as will individual atomic primitives.
-               Sequences of operations performed under a lock will -not-
+               Sequences of operations performed under a lock will *not*
                appear to be atomic to RCU readers, nor will sequences
                of multiple atomic primitives.
 
@@ -333,7 +333,7 @@ over a rather long period of time, but improvements are always welcome!
        for example) may be omitted.
 
 10.    Conversely, if you are in an RCU read-side critical section,
-       and you don't hold the appropriate update-side lock, you -must-
+       and you don't hold the appropriate update-side lock, you *must*
        use the "_rcu()" variants of the list macros.  Failing to do so
        will break Alpha, cause aggressive compilers to generate bad code,
        and confuse people trying to read your code.
@@ -359,12 +359,12 @@ over a rather long period of time, but improvements are always welcome!
        callback pending, then that RCU callback will execute on some
        surviving CPU.  (If this was not the case, a self-spawning RCU
        callback would prevent the victim CPU from ever going offline.)
-       Furthermore, CPUs designated by rcu_nocbs= might well -always-
+       Furthermore, CPUs designated by rcu_nocbs= might well *always*
        have their RCU callbacks executed on some other CPUs, in fact,
        for some  real-time workloads, this is the whole point of using
        the rcu_nocbs= kernel boot parameter.
 
-13.    Unlike other forms of RCU, it -is- permissible to block in an
+13.    Unlike other forms of RCU, it *is* permissible to block in an
        SRCU read-side critical section (demarked by srcu_read_lock()
        and srcu_read_unlock()), hence the "SRCU": "sleepable RCU".
        Please note that if you don't need to sleep in read-side critical
@@ -411,16 +411,16 @@ over a rather long period of time, but improvements are always welcome!
 14.    The whole point of call_rcu(), synchronize_rcu(), and friends
        is to wait until all pre-existing readers have finished before
        carrying out some otherwise-destructive operation.  It is
-       therefore critically important to -first- remove any path
+       therefore critically important to *first* remove any path
        that readers can follow that could be affected by the
-       destructive operation, and -only- -then- invoke call_rcu(),
+       destructive operation, and *only then* invoke call_rcu(),
        synchronize_rcu(), or friends.
 
        Because these primitives only wait for pre-existing readers, it
        is the caller's responsibility to guarantee that any subsequent
        readers will execute safely.
 
-15.    The various RCU read-side primitives do -not- necessarily contain
+15.    The various RCU read-side primitives do *not* necessarily contain
        memory barriers.  You should therefore plan for the CPU
        and the compiler to freely reorder code into and out of RCU
        read-side critical sections.  It is the responsibility of the
@@ -459,8 +459,8 @@ over a rather long period of time, but improvements are always welcome!
        pass in a function defined within a loadable module, then it in
        necessary to wait for all pending callbacks to be invoked after
        the last invocation and before unloading that module.  Note that
-       it is absolutely -not- sufficient to wait for a grace period!
-       The current (say) synchronize_rcu() implementation is -not-
+       it is absolutely *not* sufficient to wait for a grace period!
+       The current (say) synchronize_rcu() implementation is *not*
        guaranteed to wait for callbacks registered on other CPUs.
        Or even on the current CPU if that CPU recently went offline
        and came back online.
@@ -470,7 +470,7 @@ over a rather long period of time, but improvements are always welcome!
        -       call_rcu() -> rcu_barrier()
        -       call_srcu() -> srcu_barrier()
 
-       However, these barrier functions are absolutely -not- guaranteed
+       However, these barrier functions are absolutely *not* guaranteed
        to wait for a grace period.  In fact, if there are no call_rcu()
        callbacks waiting anywhere in the system, rcu_barrier() is within
        its rights to return immediately.
index f3e587acb4deb732ee77a1c14a1c58a8f9cbeff5..0b418a5b243c535b5de8949b493c4a2892870258 100644 (file)
@@ -43,7 +43,7 @@ Follow these rules to keep your RCU code working properly:
        -       Set bits and clear bits down in the must-be-zero low-order
                bits of that pointer.  This clearly means that the pointer
                must have alignment constraints, for example, this does
-               -not- work in general for char* pointers.
+               *not* work in general for char* pointers.
 
        -       XOR bits to translate pointers, as is done in some
                classic buddy-allocator algorithms.
@@ -174,7 +174,7 @@ Follow these rules to keep your RCU code working properly:
                Please see the "CONTROL DEPENDENCIES" section of
                Documentation/memory-barriers.txt for more details.
 
-       -       The pointers are not equal -and- the compiler does
+       -       The pointers are not equal *and* the compiler does
                not have enough information to deduce the value of the
                pointer.  Note that the volatile cast in rcu_dereference()
                will normally prevent the compiler from knowing too much.
@@ -360,7 +360,7 @@ in turn destroying the ordering between this load and the loads of the
 return values.  This can result in "p->b" returning pre-initialization
 garbage values.
 
-In short, rcu_dereference() is -not- optional when you are going to
+In short, rcu_dereference() is *not* optional when you are going to
 dereference the resulting pointer.
 
 
index 7148e9be08c34a4148808c8b742225be63a94115..5036df24ae61c56176bebddc8a3cc33d5ec4e92f 100644 (file)
@@ -32,7 +32,7 @@ warnings:
 
 -      Booting Linux using a console connection that is too slow to
        keep up with the boot-time console-message rate.  For example,
-       a 115Kbaud serial console can be -way- too slow to keep up
+       a 115Kbaud serial console can be *way* too slow to keep up
        with boot-time message rates, and will frequently result in
        RCU CPU stall warning messages.  Especially if you have added
        debug printk()s.
@@ -105,7 +105,7 @@ warnings:
        leading the realization that the CPU had failed.
 
 The RCU, RCU-sched, and RCU-tasks implementations have CPU stall warning.
-Note that SRCU does -not- have CPU stall warnings.  Please note that
+Note that SRCU does *not* have CPU stall warnings.  Please note that
 RCU only detects CPU stalls when there is a grace period in progress.
 No grace period, no CPU stall warnings.
 
@@ -145,7 +145,7 @@ CONFIG_RCU_CPU_STALL_TIMEOUT
        this parameter is checked only at the beginning of a cycle.
        So if you are 10 seconds into a 40-second stall, setting this
        sysfs parameter to (say) five will shorten the timeout for the
-       -next- stall, or the following warning for the current stall
+       *next* stall, or the following warning for the current stall
        (assuming the stall lasts long enough).  It will not affect the
        timing of the next warning for the current stall.
 
@@ -189,8 +189,8 @@ rcupdate.rcu_task_stall_timeout
 Interpreting RCU's CPU Stall-Detector "Splats"
 ==============================================
 
-For non-RCU-tasks flavors of RCU, when a CPU detects that it is stalling,
-it will print a message similar to the following::
+For non-RCU-tasks flavors of RCU, when a CPU detects that some other
+CPU is stalling, it will print a message similar to the following::
 
        INFO: rcu_sched detected stalls on CPUs/tasks:
        2-...: (3 GPs behind) idle=06c/0/0 softirq=1453/1455 fqs=0
@@ -202,8 +202,10 @@ causing stalls, and that the stall was affecting RCU-sched.  This message
 will normally be followed by stack dumps for each CPU.  Please note that
 PREEMPT_RCU builds can be stalled by tasks as well as by CPUs, and that
 the tasks will be indicated by PID, for example, "P3421".  It is even
-possible for an rcu_state stall to be caused by both CPUs -and- tasks,
+possible for an rcu_state stall to be caused by both CPUs *and* tasks,
 in which case the offending CPUs and tasks will all be called out in the list.
+In some cases, CPUs will detect themselves stalling, which will result
+in a self-detected stall.
 
 CPU 2's "(3 GPs behind)" indicates that this CPU has not interacted with
 the RCU core for the past three grace periods.  In contrast, CPU 16's "(0
@@ -224,7 +226,7 @@ is the number that had executed since boot at the time that this CPU
 last noted the beginning of a grace period, which might be the current
 (stalled) grace period, or it might be some earlier grace period (for
 example, if the CPU might have been in dyntick-idle mode for an extended
-time period.  The number after the "/" is the number that have executed
+time period).  The number after the "/" is the number that have executed
 since boot until the current time.  If this latter number stays constant
 across repeated stall-warning messages, it is possible that RCU's softirq
 handlers are no longer able to execute on this CPU.  This can happen if
@@ -283,7 +285,8 @@ If the relevant grace-period kthread has been unable to run prior to
 the stall warning, as was the case in the "All QSes seen" line above,
 the following additional line is printed::
 
-       kthread starved for 23807 jiffies! g7075 f0x0 RCU_GP_WAIT_FQS(3) ->state=0x1 ->cpu=5
+       rcu_sched kthread starved for 23807 jiffies! g7075 f0x0 RCU_GP_WAIT_FQS(3) ->state=0x1 ->cpu=5
+       Unless rcu_sched kthread gets sufficient CPU time, OOM is now expected behavior.
 
 Starving the grace-period kthreads of CPU time can of course result
 in RCU CPU stall warnings even when all CPUs and tasks have passed
@@ -313,15 +316,21 @@ is the current ``TIMER_SOFTIRQ`` count on cpu 4.  If this value does not
 change on successive RCU CPU stall warnings, there is further reason to
 suspect a timer problem.
 
+These messages are usually followed by stack dumps of the CPUs and tasks
+involved in the stall.  These stack traces can help you locate the cause
+of the stall, keeping in mind that the CPU detecting the stall will have
+an interrupt frame that is mainly devoted to detecting the stall.
+
 
 Multiple Warnings From One Stall
 ================================
 
-If a stall lasts long enough, multiple stall-warning messages will be
-printed for it.  The second and subsequent messages are printed at
+If a stall lasts long enough, multiple stall-warning messages will
+be printed for it.  The second and subsequent messages are printed at
 longer intervals, so that the time between (say) the first and second
 message will be about three times the interval between the beginning
-of the stall and the first message.
+of the stall and the first message.  It can be helpful to compare the
+stack dumps for the different messages for the same stalled grace period.
 
 
 Stall Warnings for Expedited Grace Periods
index 3de1d51e41da8b4db2864eeff58603779b385807..6bf9c5ac75768e524c9f1479f65246e005ab304a 100644 (file)
@@ -108,7 +108,7 @@ This bump in ABI version is at most once per kernel development cycle.
 
 For example, if current state of ``libbpf.map`` is:
 
-.. code-block:: c
+.. code-block:: none
 
         LIBBPF_0.0.1 {
                global:
@@ -121,7 +121,7 @@ For example, if current state of ``libbpf.map`` is:
 , and a new symbol ``bpf_func_c`` is being introduced, then
 ``libbpf.map`` should be changed like this:
 
-.. code-block:: c
+.. code-block:: none
 
         LIBBPF_0.0.1 {
                global:
index e425278653f5b3bd4a5372693ed461dc179f1fb4..e2ca0b00047147d398bcf70252ce976354c465d7 100644 (file)
@@ -19,7 +19,6 @@ properties:
   compatible:
     enum:
       - ibm,fsi2spi
-      - ibm,fsi2spi-restricted
 
   reg:
     items:
index b2a1e42c56faec0e871c56ada8b79a31573a6bc0..71de5631ebaef276b07cb853e40037b22781ba32 100644 (file)
@@ -152,47 +152,6 @@ allOf:
           maxItems: 1
         st,drdy-int-pin: false
 
-  - if:
-      properties:
-        compatible:
-          enum:
-            # Two intertial interrupts i.e. accelerometer/gyro interrupts
-            - st,h3lis331dl-accel
-            - st,l3g4200d-gyro
-            - st,l3g4is-gyro
-            - st,l3gd20-gyro
-            - st,l3gd20h-gyro
-            - st,lis2de12
-            - st,lis2dw12
-            - st,lis2hh12
-            - st,lis2dh12-accel
-            - st,lis331dl-accel
-            - st,lis331dlh-accel
-            - st,lis3de
-            - st,lis3dh-accel
-            - st,lis3dhh
-            - st,lis3mdl-magn
-            - st,lng2dm-accel
-            - st,lps331ap-press
-            - st,lsm303agr-accel
-            - st,lsm303dlh-accel
-            - st,lsm303dlhc-accel
-            - st,lsm303dlm-accel
-            - st,lsm330-accel
-            - st,lsm330-gyro
-            - st,lsm330d-accel
-            - st,lsm330d-gyro
-            - st,lsm330dl-accel
-            - st,lsm330dl-gyro
-            - st,lsm330dlc-accel
-            - st,lsm330dlc-gyro
-            - st,lsm9ds0-gyro
-            - st,lsm9ds1-magn
-    then:
-      properties:
-        interrupts:
-          maxItems: 2
-
 required:
   - compatible
   - reg
index c3b4b754359186ab8dae6c1bc685926b34592ec8..d56ac484fec50bc35e70e54e74691afb7fbaaa86 100644 (file)
@@ -31,6 +31,20 @@ properties:
   compatible:
     const: simple-battery
 
+  device-chemistry:
+    description: This describes the chemical technology of the battery.
+    oneOf:
+      - const: nickel-cadmium
+      - const: nickel-metal-hydride
+      - const: lithium-ion
+        description: This is a blanket type for all lithium-ion batteries,
+          including those below. If possible, a precise compatible string
+          from below should be used, but sometimes it is unknown which specific
+          lithium ion battery is employed and this wide compatible can be used.
+      - const: lithium-ion-polymer
+      - const: lithium-ion-iron-phosphate
+      - const: lithium-ion-manganese-oxide
+
   over-voltage-threshold-microvolt:
     description: battery over-voltage limit
 
index c70f05ea6d277951590bc449db41caca77843c7f..971b53c58cc6affe25c40122e2267f9bb41d77d1 100644 (file)
@@ -19,12 +19,15 @@ properties:
       - maxim,max17047
       - maxim,max17050
       - maxim,max17055
+      - maxim,max77849-battery
 
   reg:
     maxItems: 1
 
   interrupts:
     maxItems: 1
+    description: |
+      The ALRT pin, an open-drain interrupt.
 
   maxim,rsns-microohm:
     $ref: /schemas/types.yaml#/definitions/uint32
diff --git a/Documentation/devicetree/bindings/power/supply/mt6360_charger.yaml b/Documentation/devicetree/bindings/power/supply/mt6360_charger.yaml
new file mode 100644 (file)
index 0000000..b89b15a
--- /dev/null
@@ -0,0 +1,48 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/power/supply/mt6360_charger.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Battery charger driver for MT6360 PMIC from MediaTek Integrated.
+
+maintainers:
+  - Gene Chen <gene_chen@richtek.com>
+
+description: |
+  This module is part of the MT6360 MFD device.
+  Provides Battery Charger, Boost for OTG devices and BC1.2 detection.
+
+properties:
+  compatible:
+    const: mediatek,mt6360-chg
+
+  richtek,vinovp-microvolt:
+    description: Maximum CHGIN regulation voltage in uV.
+    enum: [ 5500000, 6500000, 11000000, 14500000 ]
+
+
+  usb-otg-vbus-regulator:
+    type: object
+    description: OTG boost regulator.
+    $ref: /schemas/regulator/regulator.yaml#
+
+required:
+  - compatible
+
+additionalProperties: false
+
+examples:
+  - |
+    mt6360_charger: charger {
+      compatible = "mediatek,mt6360-chg";
+      richtek,vinovp-microvolt = <14500000>;
+
+      otg_vbus_regulator: usb-otg-vbus-regulator {
+        regulator-compatible = "usb-otg-vbus";
+        regulator-name = "usb-otg-vbus";
+        regulator-min-microvolt = <4425000>;
+        regulator-max-microvolt = <5825000>;
+      };
+    };
+...
index 983fc215c1e51ecfd778b21051eff3dd618d50c8..20862cdfc1163ac52222d5b003e16f8c98d57b32 100644 (file)
@@ -73,6 +73,26 @@ properties:
       - 1 # SMB3XX_SOFT_TEMP_COMPENSATE_CURRENT Current compensation
       - 2 # SMB3XX_SOFT_TEMP_COMPENSATE_VOLTAGE Voltage compensation
 
+  summit,inok-polarity:
+    description: |
+      Polarity of INOK signal indicating presence of external power supply.
+    $ref: /schemas/types.yaml#/definitions/uint32
+    enum:
+      - 0 # SMB3XX_SYSOK_INOK_ACTIVE_LOW
+      - 1 # SMB3XX_SYSOK_INOK_ACTIVE_HIGH
+
+  usb-vbus:
+    $ref: "../../regulator/regulator.yaml#"
+    type: object
+
+    properties:
+      summit,needs-inok-toggle:
+        type: boolean
+        description: INOK signal is fixed and polarity needs to be toggled
+                     in order to enable/disable output mode.
+
+    unevaluatedProperties: false
+
 allOf:
   - if:
       properties:
@@ -134,6 +154,7 @@ examples:
             reg = <0x7f>;
 
             summit,enable-charge-control = <SMB3XX_CHG_ENABLE_PIN_ACTIVE_HIGH>;
+            summit,inok-polarity = <SMB3XX_SYSOK_INOK_ACTIVE_LOW>;
             summit,chip-temperature-threshold-celsius = <110>;
             summit,mains-current-limit-microamp = <2000000>;
             summit,usb-current-limit-microamp = <500000>;
@@ -141,6 +162,15 @@ examples:
             summit,enable-mains-charging;
 
             monitored-battery = <&battery>;
+
+            usb-vbus {
+                regulator-name = "usb_vbus";
+                regulator-min-microvolt = <5000000>;
+                regulator-max-microvolt = <5000000>;
+                regulator-min-microamp = <750000>;
+                regulator-max-microamp = <750000>;
+                summit,needs-inok-toggle;
+            };
         };
     };
 
index dcda6660b8ed50b7a347c7dff6983f5200223eea..de6a23aee9773668466ad5c11620431f4b69bd16 100644 (file)
@@ -21,10 +21,13 @@ allOf:
 
 properties:
   compatible:
-    enum:
-      - x-powers,axp202-ac-power-supply
-      - x-powers,axp221-ac-power-supply
-      - x-powers,axp813-ac-power-supply
+    oneOf:
+      - const: x-powers,axp202-ac-power-supply
+      - const: x-powers,axp221-ac-power-supply
+      - items:
+          - const: x-powers,axp803-ac-power-supply
+          - const: x-powers,axp813-ac-power-supply
+      - const: x-powers,axp813-ac-power-supply
 
 required:
   - compatible
index 86e8a713d4e2f9f1ddf51775ea83fad439a78f7d..d055428ae39f3619aa889776421dcb0ed1ce4a2d 100644 (file)
@@ -19,10 +19,14 @@ allOf:
 
 properties:
   compatible:
-    enum:
-      - x-powers,axp209-battery-power-supply
-      - x-powers,axp221-battery-power-supply
-      - x-powers,axp813-battery-power-supply
+    oneOf:
+      - const: x-powers,axp202-battery-power-supply
+      - const: x-powers,axp209-battery-power-supply
+      - const: x-powers,axp221-battery-power-supply
+      - items:
+          - const: x-powers,axp803-battery-power-supply
+          - const: x-powers,axp813-battery-power-supply
+      - const: x-powers,axp813-battery-power-supply
 
 required:
   - compatible
index 61f1b320c1572b4bb23207aa4afd4efca5d4541e..0c371b55c9e1beec8d5c4034a802c7ece1e9eaa8 100644 (file)
@@ -20,11 +20,15 @@ allOf:
 
 properties:
   compatible:
-    enum:
-      - x-powers,axp202-usb-power-supply
-      - x-powers,axp221-usb-power-supply
-      - x-powers,axp223-usb-power-supply
-      - x-powers,axp813-usb-power-supply
+    oneOf:
+      - enum:
+          - x-powers,axp202-usb-power-supply
+          - x-powers,axp221-usb-power-supply
+          - x-powers,axp223-usb-power-supply
+          - x-powers,axp813-usb-power-supply
+      - items:
+          - const: x-powers,axp803-usb-power-supply
+          - const: x-powers,axp813-usb-power-supply
 
 
 required:
diff --git a/Documentation/devicetree/bindings/regulator/richtek,rtq2134-regulator.yaml b/Documentation/devicetree/bindings/regulator/richtek,rtq2134-regulator.yaml
new file mode 100644 (file)
index 0000000..3f47e8e
--- /dev/null
@@ -0,0 +1,106 @@
+# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/regulator/richtek,rtq2134-regulator.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Richtek RTQ2134 SubPMIC Regulator
+
+maintainers:
+  - ChiYuan Huang <cy_huang@richtek.com>
+
+description: |
+  The RTQ2134 is a multi-phase, programmable power management IC that
+  integrates with four high efficient, synchronous step-down converter cores.
+
+  Datasheet is available at
+  https://www.richtek.com/assets/product_file/RTQ2134-QA/DSQ2134-QA-01.pdf
+
+properties:
+  compatible:
+    enum:
+      - richtek,rtq2134
+
+  reg:
+    maxItems: 1
+
+  regulators:
+    type: object
+
+    patternProperties:
+      "^buck[1-3]$":
+        type: object
+        $ref: regulator.yaml#
+        description: |
+          regulator description for buck[1-3].
+
+        properties:
+          richtek,use-vsel-dvs:
+            type: boolean
+            description: |
+              If specified, buck will listen to 'vsel' pin for dvs config.
+              Else, use dvs0 voltage by default.
+
+          richtek,uv-shutdown:
+            type: boolean
+            description: |
+              If specified, use shutdown as UV action. Else, hiccup by default.
+
+        unevaluatedProperties: false
+
+    additionalProperties: false
+
+required:
+  - compatible
+  - reg
+  - regulators
+
+additionalProperties: false
+
+examples:
+  - |
+    i2c {
+      #address-cells = <1>;
+      #size-cells = <0>;
+
+      rtq2134@18 {
+        compatible = "richtek,rtq2134";
+        reg = <0x18>;
+
+        regulators {
+          buck1 {
+            regulator-name = "rtq2134-buck1";
+            regulator-min-microvolt = <300000>;
+            regulator-max-microvolt = <1850000>;
+            regulator-always-on;
+            richtek,use-vsel-dvs;
+            regulator-state-mem {
+              regulator-suspend-min-microvolt = <550000>;
+              regulator-suspend-max-microvolt = <550000>;
+            };
+          };
+          buck2 {
+            regulator-name = "rtq2134-buck2";
+            regulator-min-microvolt = <1120000>;
+            regulator-max-microvolt = <1120000>;
+            regulator-always-on;
+            richtek,use-vsel-dvs;
+            regulator-state-mem {
+              regulator-suspend-min-microvolt = <1120000>;
+              regulator-suspend-max-microvolt = <1120000>;
+            };
+          };
+          buck3 {
+            regulator-name = "rtq2134-buck3";
+            regulator-min-microvolt = <600000>;
+            regulator-max-microvolt = <600000>;
+            regulator-always-on;
+            richtek,use-vsel-dvs;
+            regulator-state-mem {
+              regulator-suspend-min-microvolt = <600000>;
+              regulator-suspend-max-microvolt = <600000>;
+            };
+          };
+        };
+      };
+    };
diff --git a/Documentation/devicetree/bindings/regulator/richtek,rtq6752-regulator.yaml b/Documentation/devicetree/bindings/regulator/richtek,rtq6752-regulator.yaml
new file mode 100644 (file)
index 0000000..e6e5a9a
--- /dev/null
@@ -0,0 +1,76 @@
+# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/regulator/richtek,rtq6752-regulator.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Richtek RTQ6752 TFT LCD Voltage Regulator
+
+maintainers:
+  - ChiYuan Huang <cy_huang@richtek.com>
+
+description: |
+  The RTQ6752 is an I2C interface pgorammable power management IC. It includes
+  two synchronous boost converter for PAVDD, and one synchronous NAVDD
+  buck-boost. The device is suitable for automotive TFT-LCD panel.
+
+properties:
+  compatible:
+    enum:
+      - richtek,rtq6752
+
+  reg:
+    maxItems: 1
+
+  enable-gpios:
+    description: |
+      A connection of the chip 'enable' gpio line. If not provided, treat it as
+      external pull up.
+    maxItems: 1
+
+  regulators:
+    type: object
+
+    patternProperties:
+      "^(p|n)avdd$":
+        type: object
+        $ref: regulator.yaml#
+        description: |
+          regulator description for pavdd and navdd.
+
+    additionalProperties: false
+
+required:
+  - compatible
+  - reg
+  - regulators
+
+additionalProperties: false
+
+examples:
+  - |
+    i2c {
+      #address-cells = <1>;
+      #size-cells = <0>;
+
+      rtq6752@6b {
+        compatible = "richtek,rtq6752";
+        reg = <0x6b>;
+        enable-gpios = <&gpio26 2 0>;
+
+        regulators {
+          pavdd {
+            regulator-name = "rtq6752-pavdd";
+            regulator-min-microvolt = <5000000>;
+            regulator-max-microvolt = <7300000>;
+            regulator-boot-on;
+          };
+          navdd {
+            regulator-name = "rtq6752-navdd";
+            regulator-min-microvolt = <5000000>;
+            regulator-max-microvolt = <7300000>;
+            regulator-boot-on;
+          };
+        };
+      };
+    };
diff --git a/Documentation/devicetree/bindings/regulator/socionext,uniphier-regulator.yaml b/Documentation/devicetree/bindings/regulator/socionext,uniphier-regulator.yaml
new file mode 100644 (file)
index 0000000..861d5f3
--- /dev/null
@@ -0,0 +1,85 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/regulator/socionext,uniphier-regulator.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Socionext UniPhier regulator controller
+
+description: |
+  This regulator controls VBUS and belongs to USB3 glue layer. Before using
+  the regulator, it is necessary to control the clocks and resets to enable
+  this layer. These clocks and resets should be described in each property.
+
+maintainers:
+  - Kunihiko Hayashi <hayashi.kunihiko@socionext.com>
+
+allOf:
+  - $ref: "regulator.yaml#"
+
+# USB3 Controller
+
+properties:
+  compatible:
+    enum:
+      - socionext,uniphier-pro4-usb3-regulator
+      - socionext,uniphier-pro5-usb3-regulator
+      - socionext,uniphier-pxs2-usb3-regulator
+      - socionext,uniphier-ld20-usb3-regulator
+      - socionext,uniphier-pxs3-usb3-regulator
+
+  reg:
+    maxItems: 1
+
+  clocks:
+    minItems: 1
+    maxItems: 2
+
+  clock-names:
+    oneOf:
+      - items:          # for Pro4, Pro5
+          - const: gio
+          - const: link
+      - items:          # for others
+          - const: link
+
+  resets:
+    minItems: 1
+    maxItems: 2
+
+  reset-names:
+    oneOf:
+      - items:          # for Pro4, Pro5
+          - const: gio
+          - const: link
+      - items:
+          - const: link
+
+additionalProperties: false
+
+required:
+  - compatible
+  - reg
+  - clocks
+  - clock-names
+  - resets
+  - reset-names
+
+examples:
+  - |
+    usb-glue@65b00000 {
+        compatible = "simple-mfd";
+        #address-cells = <1>;
+        #size-cells = <1>;
+        ranges = <0 0x65b00000 0x400>;
+
+        usb_vbus0: regulators@100 {
+            compatible = "socionext,uniphier-ld20-usb3-regulator";
+            reg = <0x100 0x10>;
+            clock-names = "link";
+            clocks = <&sys_clk 14>;
+            reset-names = "link";
+            resets = <&sys_rst 14>;
+        };
+    };
+
diff --git a/Documentation/devicetree/bindings/regulator/uniphier-regulator.txt b/Documentation/devicetree/bindings/regulator/uniphier-regulator.txt
deleted file mode 100644 (file)
index 94fd38b..0000000
+++ /dev/null
@@ -1,58 +0,0 @@
-Socionext UniPhier Regulator Controller
-
-This describes the devicetree bindings for regulator controller implemented
-on Socionext UniPhier SoCs.
-
-USB3 Controller
----------------
-
-This regulator controls VBUS and belongs to USB3 glue layer. Before using
-the regulator, it is necessary to control the clocks and resets to enable
-this layer. These clocks and resets should be described in each property.
-
-Required properties:
-- compatible: Should be
-    "socionext,uniphier-pro4-usb3-regulator" - for Pro4 SoC
-    "socionext,uniphier-pro5-usb3-regulator" - for Pro5 SoC
-    "socionext,uniphier-pxs2-usb3-regulator" - for PXs2 SoC
-    "socionext,uniphier-ld20-usb3-regulator" - for LD20 SoC
-    "socionext,uniphier-pxs3-usb3-regulator" - for PXs3 SoC
-- reg: Specifies offset and length of the register set for the device.
-- clocks: A list of phandles to the clock gate for USB3 glue layer.
-       According to the clock-names, appropriate clocks are required.
-- clock-names: Should contain
-    "gio", "link" - for Pro4 and Pro5 SoCs
-    "link"        - for others
-- resets: A list of phandles to the reset control for USB3 glue layer.
-       According to the reset-names, appropriate resets are required.
-- reset-names: Should contain
-    "gio", "link" - for Pro4 and Pro5 SoCs
-    "link"        - for others
-
-See Documentation/devicetree/bindings/regulator/regulator.txt
-for more details about the regulator properties.
-
-Example:
-
-       usb-glue@65b00000 {
-               compatible = "socionext,uniphier-ld20-dwc3-glue",
-                            "simple-mfd";
-               #address-cells = <1>;
-               #size-cells = <1>;
-               ranges = <0 0x65b00000 0x400>;
-
-               usb_vbus0: regulators@100 {
-                       compatible = "socionext,uniphier-ld20-usb3-regulator";
-                       reg = <0x100 0x10>;
-                       clock-names = "link";
-                       clocks = <&sys_clk 14>;
-                       reset-names = "link";
-                       resets = <&sys_rst 14>;
-               };
-
-               phy {
-                       ...
-                       phy-supply = <&usb_vbus0>;
-               };
-               ...
-       };
index 1d38ff76d18fd53334bf9acdb5c45ca0ccd53a56..2b1f9160389721fdcc9db79d96459fb86ef1df5d 100644 (file)
@@ -24,10 +24,10 @@ allOf:
 select:
   properties:
     compatible:
-      items:
-        enum:
-            - sifive,fu540-c000-ccache
-            - sifive,fu740-c000-ccache
+      contains:
+        enum:
+          - sifive,fu540-c000-ccache
+          - sifive,fu740-c000-ccache
 
   required:
     - compatible
diff --git a/Documentation/devicetree/bindings/spi/omap-spi.txt b/Documentation/devicetree/bindings/spi/omap-spi.txt
deleted file mode 100644 (file)
index 487208c..0000000
+++ /dev/null
@@ -1,48 +0,0 @@
-OMAP2+ McSPI device
-
-Required properties:
-- compatible :
-  - "ti,am654-mcspi" for AM654.
-  - "ti,omap2-mcspi" for OMAP2 & OMAP3.
-  - "ti,omap4-mcspi" for OMAP4+.
-- ti,spi-num-cs : Number of chipselect supported  by the instance.
-- ti,hwmods: Name of the hwmod associated to the McSPI
-- ti,pindir-d0-out-d1-in: Select the D0 pin as output and D1 as
-                         input. The default is D0 as input and
-                         D1 as output.
-
-Optional properties:
-- dmas: List of DMA specifiers with the controller specific format
-       as described in the generic DMA client binding. A tx and rx
-       specifier is required for each chip select.
-- dma-names: List of DMA request names. These strings correspond
-       1:1 with the DMA specifiers listed in dmas. The string naming
-       is to be "rxN" and "txN" for RX and TX requests,
-       respectively, where N equals the chip select number.
-
-Examples:
-
-[hwmod populated DMA resources]
-
-mcspi1: mcspi@1 {
-    #address-cells = <1>;
-    #size-cells = <0>;
-    compatible = "ti,omap4-mcspi";
-    ti,hwmods = "mcspi1";
-    ti,spi-num-cs = <4>;
-};
-
-[generic DMA request binding]
-
-mcspi1: mcspi@1 {
-    #address-cells = <1>;
-    #size-cells = <0>;
-    compatible = "ti,omap4-mcspi";
-    ti,hwmods = "mcspi1";
-    ti,spi-num-cs = <2>;
-    dmas = <&edma 42
-           &edma 43
-           &edma 44
-           &edma 45>;
-    dma-names = "tx0", "rx0", "tx1", "rx1";
-};
diff --git a/Documentation/devicetree/bindings/spi/omap-spi.yaml b/Documentation/devicetree/bindings/spi/omap-spi.yaml
new file mode 100644 (file)
index 0000000..e555381
--- /dev/null
@@ -0,0 +1,117 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/spi/omap-spi.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: SPI controller bindings for OMAP and K3 SoCs
+
+maintainers:
+  - Aswath Govindraju <a-govindraju@ti.com>
+
+allOf:
+  - $ref: spi-controller.yaml#
+
+properties:
+  compatible:
+    oneOf:
+      - items:
+          - enum:
+              - ti,am654-mcspi
+              - ti,am4372-mcspi
+          - const: ti,omap4-mcspi
+      - items:
+          - enum:
+              - ti,omap2-mcspi
+              - ti,omap4-mcspi
+
+  reg:
+    maxItems: 1
+
+  interrupts:
+    maxItems: 1
+
+  clocks:
+    maxItems: 1
+
+  power-domains:
+    maxItems: 1
+
+  ti,spi-num-cs:
+    $ref: /schemas/types.yaml#/definitions/uint32
+    description: Number of chipselect supported  by the instance.
+    minimum: 1
+    maximum: 4
+
+  ti,hwmods:
+    $ref: /schemas/types.yaml#/definitions/string
+    description:
+      Must be "mcspi<n>", n being the instance number (1-based).
+      This property is applicable only on legacy platforms mainly omap2/3
+      and ti81xx and should not be used on other platforms.
+    deprecated: true
+
+  ti,pindir-d0-out-d1-in:
+    description:
+      Select the D0 pin as output and D1 as input. The default is D0
+      as input and D1 as output.
+    type: boolean
+
+  dmas:
+    description:
+      List of DMA specifiers with the controller specific format as
+      described in the generic DMA client binding. A tx and rx
+      specifier is required for each chip select.
+    minItems: 1
+    maxItems: 8
+
+  dma-names:
+    description:
+      List of DMA request names. These strings correspond 1:1 with
+      the DMA sepecifiers listed in dmas. The string names is to be
+      "rxN" and "txN" for RX and TX requests, respectively. Where N
+      is the chip select number.
+    minItems: 1
+    maxItems: 8
+
+required:
+  - compatible
+  - reg
+  - interrupts
+
+unevaluatedProperties: false
+
+if:
+  properties:
+    compatible:
+      oneOf:
+        - const: ti,omap2-mcspi
+        - const: ti,omap4-mcspi
+
+then:
+  properties:
+    ti,hwmods:
+      items:
+        - pattern: "^mcspi([1-9])$"
+
+else:
+  properties:
+    ti,hwmods: false
+
+examples:
+  - |
+    #include <dt-bindings/interrupt-controller/irq.h>
+    #include <dt-bindings/interrupt-controller/arm-gic.h>
+    #include <dt-bindings/soc/ti,sci_pm_domain.h>
+
+    spi@2100000 {
+      compatible = "ti,am654-mcspi","ti,omap4-mcspi";
+      reg = <0x2100000 0x400>;
+      interrupts = <GIC_SPI 184 IRQ_TYPE_LEVEL_HIGH>;
+      clocks = <&k3_clks 137 1>;
+      power-domains = <&k3_pds 137 TI_SCI_PD_EXCLUSIVE>;
+      #address-cells = <1>;
+      #size-cells = <0>;
+      dmas = <&main_udmap 0xc500>, <&main_udmap 0x4500>;
+      dma-names = "tx0", "rx0";
+    };
diff --git a/Documentation/devicetree/bindings/spi/rockchip-sfc.yaml b/Documentation/devicetree/bindings/spi/rockchip-sfc.yaml
new file mode 100644 (file)
index 0000000..339fb39
--- /dev/null
@@ -0,0 +1,91 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/spi/rockchip-sfc.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Rockchip Serial Flash Controller (SFC)
+
+maintainers:
+  - Heiko Stuebner <heiko@sntech.de>
+  - Chris Morgan <macromorgan@hotmail.com>
+
+allOf:
+  - $ref: spi-controller.yaml#
+
+properties:
+  compatible:
+    const: rockchip,sfc
+    description:
+      The rockchip sfc controller is a standalone IP with version register,
+      and the driver can handle all the feature difference inside the IP
+      depending on the version register.
+
+  reg:
+    maxItems: 1
+
+  interrupts:
+    maxItems: 1
+
+  clocks:
+    items:
+      - description: Bus Clock
+      - description: Module Clock
+
+  clock-names:
+    items:
+      - const: clk_sfc
+      - const: hclk_sfc
+
+  power-domains:
+    maxItems: 1
+
+  rockchip,sfc-no-dma:
+    description: Disable DMA and utilize FIFO mode only
+    type: boolean
+
+patternProperties:
+  "^flash@[0-3]$":
+    type: object
+    properties:
+      reg:
+        minimum: 0
+        maximum: 3
+
+required:
+  - compatible
+  - reg
+  - interrupts
+  - clocks
+  - clock-names
+
+unevaluatedProperties: false
+
+examples:
+  - |
+    #include <dt-bindings/clock/px30-cru.h>
+    #include <dt-bindings/interrupt-controller/arm-gic.h>
+    #include <dt-bindings/power/px30-power.h>
+
+    sfc: spi@ff3a0000 {
+        compatible = "rockchip,sfc";
+        reg = <0xff3a0000 0x4000>;
+        interrupts = <GIC_SPI 56 IRQ_TYPE_LEVEL_HIGH>;
+        clocks = <&cru SCLK_SFC>, <&cru HCLK_SFC>;
+        clock-names = "clk_sfc", "hclk_sfc";
+        pinctrl-0 = <&sfc_clk &sfc_cs &sfc_bus2>;
+        pinctrl-names = "default";
+        power-domains = <&power PX30_PD_MMC_NAND>;
+        #address-cells = <1>;
+        #size-cells = <0>;
+
+        flash@0 {
+            compatible = "jedec,spi-nor";
+            reg = <0>;
+            spi-max-frequency = <108000000>;
+            spi-rx-bus-width = <2>;
+            spi-tx-bus-width = <2>;
+        };
+    };
+
+...
index 4d0e4c15c4ea79a2ec83fa04f19a560df6a47988..2a24969159cc11a2ff1120c358f03fe5ab36b861 100644 (file)
@@ -11,6 +11,7 @@ Required properties:
     - mediatek,mt8135-spi: for mt8135 platforms
     - mediatek,mt8173-spi: for mt8173 platforms
     - mediatek,mt8183-spi: for mt8183 platforms
+    - mediatek,mt6893-spi: for mt6893 platforms
     - "mediatek,mt8192-spi", "mediatek,mt6765-spi": for mt8192 platforms
     - "mediatek,mt8195-spi", "mediatek,mt6765-spi": for mt8195 platforms
     - "mediatek,mt8516-spi", "mediatek,mt2712-spi": for mt8516 platforms
diff --git a/Documentation/devicetree/bindings/spi/spi-sprd-adi.txt b/Documentation/devicetree/bindings/spi/spi-sprd-adi.txt
deleted file mode 100644 (file)
index 2567c82..0000000
+++ /dev/null
@@ -1,63 +0,0 @@
-Spreadtrum ADI controller
-
-ADI is the abbreviation of Anolog-Digital interface, which is used to access
-analog chip (such as PMIC) from digital chip. ADI controller follows the SPI
-framework for its hardware implementation is alike to SPI bus and its timing
-is compatile to SPI timing.
-
-ADI controller has 50 channels including 2 software read/write channels and
-48 hardware channels to access analog chip. For 2 software read/write channels,
-users should set ADI registers to access analog chip. For hardware channels,
-we can configure them to allow other hardware components to use it independently,
-which means we can just link one analog chip address to one hardware channel,
-then users can access the mapped analog chip address by this hardware channel
-triggered by hardware components instead of ADI software channels.
-
-Thus we introduce one property named "sprd,hw-channels" to configure hardware
-channels, the first value specifies the hardware channel id which is used to
-transfer data triggered by hardware automatically, and the second value specifies
-the analog chip address where user want to access by hardware components.
-
-Since we have multi-subsystems will use unique ADI to access analog chip, when
-one system is reading/writing data by ADI software channels, that should be under
-one hardware spinlock protection to prevent other systems from reading/writing
-data by ADI software channels at the same time, or two parallel routine of setting
-ADI registers will make ADI controller registers chaos to lead incorrect results.
-Then we need one hardware spinlock to synchronize between the multiple subsystems.
-
-The new version ADI controller supplies multiple master channels for different
-subsystem accessing, that means no need to add hardware spinlock to synchronize,
-thus change the hardware spinlock support to be optional to keep backward
-compatibility.
-
-Required properties:
-- compatible: Should be "sprd,sc9860-adi".
-- reg: Offset and length of ADI-SPI controller register space.
-- #address-cells: Number of cells required to define a chip select address
-       on the ADI-SPI bus. Should be set to 1.
-- #size-cells: Size of cells required to define a chip select address size
-       on the ADI-SPI bus. Should be set to 0.
-
-Optional properties:
-- hwlocks: Reference to a phandle of a hwlock provider node.
-- hwlock-names: Reference to hwlock name strings defined in the same order
-       as the hwlocks, should be "adi".
-- sprd,hw-channels: This is an array of channel values up to 49 channels.
-       The first value specifies the hardware channel id which is used to
-       transfer data triggered by hardware automatically, and the second
-       value specifies the analog chip address where user want to access
-       by hardware components.
-
-SPI slave nodes must be children of the SPI controller node and can contain
-properties described in Documentation/devicetree/bindings/spi/spi-bus.txt.
-
-Example:
-       adi_bus: spi@40030000 {
-               compatible = "sprd,sc9860-adi";
-               reg = <0 0x40030000 0 0x10000>;
-               hwlocks = <&hwlock1 0>;
-               hwlock-names = "adi";
-               #address-cells = <1>;
-               #size-cells = <0>;
-               sprd,hw-channels = <30 0x8c20>;
-       };
diff --git a/Documentation/devicetree/bindings/spi/sprd,spi-adi.yaml b/Documentation/devicetree/bindings/spi/sprd,spi-adi.yaml
new file mode 100644 (file)
index 0000000..fe01402
--- /dev/null
@@ -0,0 +1,104 @@
+# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
+
+%YAML 1.2
+---
+$id: "http://devicetree.org/schemas/spi/sprd,spi-adi.yaml#"
+$schema: "http://devicetree.org/meta-schemas/core.yaml#"
+
+title: Spreadtrum ADI controller
+
+maintainers:
+  - Orson Zhai <orsonzhai@gmail.com>
+  - Baolin Wang <baolin.wang7@gmail.com>
+  - Chunyan Zhang <zhang.lyra@gmail.com>
+
+description: |
+  ADI is the abbreviation of Anolog-Digital interface, which is used to access
+  analog chip (such as PMIC) from digital chip. ADI controller follows the SPI
+  framework for its hardware implementation is alike to SPI bus and its timing
+  is compatile to SPI timing.
+
+  ADI controller has 50 channels including 2 software read/write channels and
+  48 hardware channels to access analog chip. For 2 software read/write channels,
+  users should set ADI registers to access analog chip. For hardware channels,
+  we can configure them to allow other hardware components to use it independently,
+  which means we can just link one analog chip address to one hardware channel,
+  then users can access the mapped analog chip address by this hardware channel
+  triggered by hardware components instead of ADI software channels.
+
+  Thus we introduce one property named "sprd,hw-channels" to configure hardware
+  channels, the first value specifies the hardware channel id which is used to
+  transfer data triggered by hardware automatically, and the second value specifies
+  the analog chip address where user want to access by hardware components.
+
+  Since we have multi-subsystems will use unique ADI to access analog chip, when
+  one system is reading/writing data by ADI software channels, that should be under
+  one hardware spinlock protection to prevent other systems from reading/writing
+  data by ADI software channels at the same time, or two parallel routine of setting
+  ADI registers will make ADI controller registers chaos to lead incorrect results.
+  Then we need one hardware spinlock to synchronize between the multiple subsystems.
+
+  The new version ADI controller supplies multiple master channels for different
+  subsystem accessing, that means no need to add hardware spinlock to synchronize,
+  thus change the hardware spinlock support to be optional to keep backward
+  compatibility.
+
+allOf:
+  - $ref: /spi/spi-controller.yaml#
+
+properties:
+  compatible:
+    enum:
+      - sprd,sc9860-adi
+      - sprd,sc9863-adi
+      - sprd,ums512-adi
+
+  reg:
+    maxItems: 1
+
+  hwlocks:
+    maxItems: 1
+
+  hwlock-names:
+    const: adi
+
+  sprd,hw-channels:
+    $ref: /schemas/types.yaml#/definitions/uint32-matrix
+    description: A list of hardware channels
+    minItems: 1
+    maxItems: 48
+    items:
+      items:
+        - description: The hardware channel id which is used to transfer data
+            triggered by hardware automatically, channel id 0-1 are for software
+            use, 2-49 are hardware channels.
+          minimum: 2
+          maximum: 49
+        - description: The analog chip address where user want to access by
+            hardware components.
+
+required:
+  - compatible
+  - reg
+  - '#address-cells'
+  - '#size-cells'
+
+unevaluatedProperties: false
+
+examples:
+  - |
+    aon {
+        #address-cells = <2>;
+        #size-cells = <2>;
+
+        adi_bus: spi@40030000 {
+            compatible = "sprd,sc9860-adi";
+            reg = <0 0x40030000 0 0x10000>;
+            hwlocks = <&hwlock1 0>;
+            hwlock-names = "adi";
+            #address-cells = <1>;
+            #size-cells = <0>;
+            sprd,hw-channels = <30 0x8c20>;
+        };
+    };
+...
index 2183fd8cc350457a0cff3a42d15cdccba8594a36..2a75dd5da7b5ae246623de0e04da7be60b59cbca 100644 (file)
@@ -271,19 +271,19 @@ prototypes::
 locking rules:
        All except set_page_dirty and freepage may block
 
-====================== ======================== =========
-ops                    PageLocked(page)         i_rwsem
-====================== ======================== =========
+====================== ======================== =========      ===============
+ops                    PageLocked(page)         i_rwsem        invalidate_lock
+====================== ======================== =========      ===============
 writepage:             yes, unlocks (see below)
-readpage:              yes, unlocks
+readpage:              yes, unlocks                            shared
 writepages:
 set_page_dirty         no
-readahead:             yes, unlocks
-readpages:             no
+readahead:             yes, unlocks                            shared
+readpages:             no                                      shared
 write_begin:           locks the page           exclusive
 write_end:             yes, unlocks             exclusive
 bmap:
-invalidatepage:                yes
+invalidatepage:                yes                                     exclusive
 releasepage:           yes
 freepage:              yes
 direct_IO:
@@ -295,7 +295,7 @@ is_partially_uptodate:      yes
 error_remove_page:     yes
 swap_activate:         no
 swap_deactivate:       no
-====================== ======================== =========
+====================== ======================== =========      ===============
 
 ->write_begin(), ->write_end() and ->readpage() may be called from
 the request handler (/dev/loop).
@@ -378,7 +378,10 @@ keep it that way and don't breed new callers.
 ->invalidatepage() is called when the filesystem must attempt to drop
 some or all of the buffers from the page when it is being truncated. It
 returns zero on success. If ->invalidatepage is zero, the kernel uses
-block_invalidatepage() instead.
+block_invalidatepage() instead. The filesystem must exclusively acquire
+invalidate_lock before invalidating page cache in truncate / hole punch path
+(and thus calling into ->invalidatepage) to block races between page cache
+invalidation and page cache filling functions (fault, read, ...).
 
 ->releasepage() is called when the kernel is about to try to drop the
 buffers from the page in preparation for freeing it.  It returns zero to
@@ -506,6 +509,7 @@ prototypes::
        ssize_t (*write) (struct file *, const char __user *, size_t, loff_t *);
        ssize_t (*read_iter) (struct kiocb *, struct iov_iter *);
        ssize_t (*write_iter) (struct kiocb *, struct iov_iter *);
+       int (*iopoll) (struct kiocb *kiocb, bool spin);
        int (*iterate) (struct file *, struct dir_context *);
        int (*iterate_shared) (struct file *, struct dir_context *);
        __poll_t (*poll) (struct file *, struct poll_table_struct *);
@@ -518,12 +522,6 @@ prototypes::
        int (*fsync) (struct file *, loff_t start, loff_t end, int datasync);
        int (*fasync) (int, struct file *, int);
        int (*lock) (struct file *, int, struct file_lock *);
-       ssize_t (*readv) (struct file *, const struct iovec *, unsigned long,
-                       loff_t *);
-       ssize_t (*writev) (struct file *, const struct iovec *, unsigned long,
-                       loff_t *);
-       ssize_t (*sendfile) (struct file *, loff_t *, size_t, read_actor_t,
-                       void __user *);
        ssize_t (*sendpage) (struct file *, struct page *, int, size_t,
                        loff_t *, int);
        unsigned long (*get_unmapped_area)(struct file *, unsigned long,
@@ -536,6 +534,14 @@ prototypes::
                        size_t, unsigned int);
        int (*setlease)(struct file *, long, struct file_lock **, void **);
        long (*fallocate)(struct file *, int, loff_t, loff_t);
+       void (*show_fdinfo)(struct seq_file *m, struct file *f);
+       unsigned (*mmap_capabilities)(struct file *);
+       ssize_t (*copy_file_range)(struct file *, loff_t, struct file *,
+                       loff_t, size_t, unsigned int);
+       loff_t (*remap_file_range)(struct file *file_in, loff_t pos_in,
+                       struct file *file_out, loff_t pos_out,
+                       loff_t len, unsigned int remap_flags);
+       int (*fadvise)(struct file *, loff_t, loff_t, int);
 
 locking rules:
        All may block.
@@ -570,6 +576,25 @@ in sys_read() and friends.
 the lease within the individual filesystem to record the result of the
 operation
 
+->fallocate implementation must be really careful to maintain page cache
+consistency when punching holes or performing other operations that invalidate
+page cache contents. Usually the filesystem needs to call
+truncate_inode_pages_range() to invalidate relevant range of the page cache.
+However the filesystem usually also needs to update its internal (and on disk)
+view of file offset -> disk block mapping. Until this update is finished, the
+filesystem needs to block page faults and reads from reloading now-stale page
+cache contents from the disk. Since VFS acquires mapping->invalidate_lock in
+shared mode when loading pages from disk (filemap_fault(), filemap_read(),
+readahead paths), the fallocate implementation must take the invalidate_lock to
+prevent reloading.
+
+->copy_file_range and ->remap_file_range implementations need to serialize
+against modifications of file data while the operation is running. For
+blocking changes through write(2) and similar operations inode->i_rwsem can be
+used. To block changes to file contents via a memory mapping during the
+operation, the filesystem must take mapping->invalidate_lock to coordinate
+with ->page_mkwrite.
+
 dquot_operations
 ================
 
@@ -627,11 +652,11 @@ pfn_mkwrite:      yes
 access:                yes
 =============  =========       ===========================
 
-->fault() is called when a previously not present pte is about
-to be faulted in. The filesystem must find and return the page associated
-with the passed in "pgoff" in the vm_fault structure. If it is possible that
-the page may be truncated and/or invalidated, then the filesystem must lock
-the page, then ensure it is not already truncated (the page lock will block
+->fault() is called when a previously not present pte is about to be faulted
+in. The filesystem must find and return the page associated with the passed in
+"pgoff" in the vm_fault structure. If it is possible that the page may be
+truncated and/or invalidated, then the filesystem must lock invalidate_lock,
+then ensure the page is not already truncated (invalidate_lock will block
 subsequent truncate), and then return with VM_FAULT_LOCKED, and the page
 locked. The VM will unlock the page.
 
@@ -644,12 +669,14 @@ page table entry. Pointer to entry associated with the page is passed in
 "pte" field in vm_fault structure. Pointers to entries for other offsets
 should be calculated relative to "pte".
 
-->page_mkwrite() is called when a previously read-only pte is
-about to become writeable. The filesystem again must ensure that there are
-no truncate/invalidate races, and then return with the page locked. If
-the page has been truncated, the filesystem should not look up a new page
-like the ->fault() handler, but simply return with VM_FAULT_NOPAGE, which
-will cause the VM to retry the fault.
+->page_mkwrite() is called when a previously read-only pte is about to become
+writeable. The filesystem again must ensure that there are no
+truncate/invalidate races or races with operations such as ->remap_file_range
+or ->copy_file_range, and then return with the page locked. Usually
+mapping->invalidate_lock is suitable for proper serialization. If the page has
+been truncated, the filesystem should not look up a new page like the ->fault()
+handler, but simply return with VM_FAULT_NOPAGE, which will cause the VM to
+retry the fault.
 
 ->pfn_mkwrite() is the same as page_mkwrite but when the pte is
 VM_PFNMAP or VM_MIXEDMAP with a page-less entry. Expected return is
diff --git a/Documentation/filesystems/mandatory-locking.rst b/Documentation/filesystems/mandatory-locking.rst
deleted file mode 100644 (file)
index 9ce7354..0000000
+++ /dev/null
@@ -1,188 +0,0 @@
-.. SPDX-License-Identifier: GPL-2.0
-
-=====================================================
-Mandatory File Locking For The Linux Operating System
-=====================================================
-
-               Andy Walker <andy@lysaker.kvaerner.no>
-
-                          15 April 1996
-
-                    (Updated September 2007)
-
-0. Why you should avoid mandatory locking
------------------------------------------
-
-The Linux implementation is prey to a number of difficult-to-fix race
-conditions which in practice make it not dependable:
-
-       - The write system call checks for a mandatory lock only once
-         at its start.  It is therefore possible for a lock request to
-         be granted after this check but before the data is modified.
-         A process may then see file data change even while a mandatory
-         lock was held.
-       - Similarly, an exclusive lock may be granted on a file after
-         the kernel has decided to proceed with a read, but before the
-         read has actually completed, and the reading process may see
-         the file data in a state which should not have been visible
-         to it.
-       - Similar races make the claimed mutual exclusion between lock
-         and mmap similarly unreliable.
-
-1. What is  mandatory locking?
-------------------------------
-
-Mandatory locking is kernel enforced file locking, as opposed to the more usual
-cooperative file locking used to guarantee sequential access to files among
-processes. File locks are applied using the flock() and fcntl() system calls
-(and the lockf() library routine which is a wrapper around fcntl().) It is
-normally a process' responsibility to check for locks on a file it wishes to
-update, before applying its own lock, updating the file and unlocking it again.
-The most commonly used example of this (and in the case of sendmail, the most
-troublesome) is access to a user's mailbox. The mail user agent and the mail
-transfer agent must guard against updating the mailbox at the same time, and
-prevent reading the mailbox while it is being updated.
-
-In a perfect world all processes would use and honour a cooperative, or
-"advisory" locking scheme. However, the world isn't perfect, and there's
-a lot of poorly written code out there.
-
-In trying to address this problem, the designers of System V UNIX came up
-with a "mandatory" locking scheme, whereby the operating system kernel would
-block attempts by a process to write to a file that another process holds a
-"read" -or- "shared" lock on, and block attempts to both read and write to a 
-file that a process holds a "write " -or- "exclusive" lock on.
-
-The System V mandatory locking scheme was intended to have as little impact as
-possible on existing user code. The scheme is based on marking individual files
-as candidates for mandatory locking, and using the existing fcntl()/lockf()
-interface for applying locks just as if they were normal, advisory locks.
-
-.. Note::
-
-   1. In saying "file" in the paragraphs above I am actually not telling
-      the whole truth. System V locking is based on fcntl(). The granularity of
-      fcntl() is such that it allows the locking of byte ranges in files, in
-      addition to entire files, so the mandatory locking rules also have byte
-      level granularity.
-
-   2. POSIX.1 does not specify any scheme for mandatory locking, despite
-      borrowing the fcntl() locking scheme from System V. The mandatory locking
-      scheme is defined by the System V Interface Definition (SVID) Version 3.
-
-2. Marking a file for mandatory locking
----------------------------------------
-
-A file is marked as a candidate for mandatory locking by setting the group-id
-bit in its file mode but removing the group-execute bit. This is an otherwise
-meaningless combination, and was chosen by the System V implementors so as not
-to break existing user programs.
-
-Note that the group-id bit is usually automatically cleared by the kernel when
-a setgid file is written to. This is a security measure. The kernel has been
-modified to recognize the special case of a mandatory lock candidate and to
-refrain from clearing this bit. Similarly the kernel has been modified not
-to run mandatory lock candidates with setgid privileges.
-
-3. Available implementations
-----------------------------
-
-I have considered the implementations of mandatory locking available with
-SunOS 4.1.x, Solaris 2.x and HP-UX 9.x.
-
-Generally I have tried to make the most sense out of the behaviour exhibited
-by these three reference systems. There are many anomalies.
-
-All the reference systems reject all calls to open() for a file on which
-another process has outstanding mandatory locks. This is in direct
-contravention of SVID 3, which states that only calls to open() with the
-O_TRUNC flag set should be rejected. The Linux implementation follows the SVID
-definition, which is the "Right Thing", since only calls with O_TRUNC can
-modify the contents of the file.
-
-HP-UX even disallows open() with O_TRUNC for a file with advisory locks, not
-just mandatory locks. That would appear to contravene POSIX.1.
-
-mmap() is another interesting case. All the operating systems mentioned
-prevent mandatory locks from being applied to an mmap()'ed file, but  HP-UX
-also disallows advisory locks for such a file. SVID actually specifies the
-paranoid HP-UX behaviour.
-
-In my opinion only MAP_SHARED mappings should be immune from locking, and then
-only from mandatory locks - that is what is currently implemented.
-
-SunOS is so hopeless that it doesn't even honour the O_NONBLOCK flag for
-mandatory locks, so reads and writes to locked files always block when they
-should return EAGAIN.
-
-I'm afraid that this is such an esoteric area that the semantics described
-below are just as valid as any others, so long as the main points seem to
-agree. 
-
-4. Semantics
-------------
-
-1. Mandatory locks can only be applied via the fcntl()/lockf() locking
-   interface - in other words the System V/POSIX interface. BSD style
-   locks using flock() never result in a mandatory lock.
-
-2. If a process has locked a region of a file with a mandatory read lock, then
-   other processes are permitted to read from that region. If any of these
-   processes attempts to write to the region it will block until the lock is
-   released, unless the process has opened the file with the O_NONBLOCK
-   flag in which case the system call will return immediately with the error
-   status EAGAIN.
-
-3. If a process has locked a region of a file with a mandatory write lock, all
-   attempts to read or write to that region block until the lock is released,
-   unless a process has opened the file with the O_NONBLOCK flag in which case
-   the system call will return immediately with the error status EAGAIN.
-
-4. Calls to open() with O_TRUNC, or to creat(), on a existing file that has
-   any mandatory locks owned by other processes will be rejected with the
-   error status EAGAIN.
-
-5. Attempts to apply a mandatory lock to a file that is memory mapped and
-   shared (via mmap() with MAP_SHARED) will be rejected with the error status
-   EAGAIN.
-
-6. Attempts to create a shared memory map of a file (via mmap() with MAP_SHARED)
-   that has any mandatory locks in effect will be rejected with the error status
-   EAGAIN.
-
-5. Which system calls are affected?
------------------------------------
-
-Those which modify a file's contents, not just the inode. That gives read(),
-write(), readv(), writev(), open(), creat(), mmap(), truncate() and
-ftruncate(). truncate() and ftruncate() are considered to be "write" actions
-for the purposes of mandatory locking.
-
-The affected region is usually defined as stretching from the current position
-for the total number of bytes read or written. For the truncate calls it is
-defined as the bytes of a file removed or added (we must also consider bytes
-added, as a lock can specify just "the whole file", rather than a specific
-range of bytes.)
-
-Note 3: I may have overlooked some system calls that need mandatory lock
-checking in my eagerness to get this code out the door. Please let me know, or
-better still fix the system calls yourself and submit a patch to me or Linus.
-
-6. Warning!
------------
-
-Not even root can override a mandatory lock, so runaway processes can wreak
-havoc if they lock crucial files. The way around it is to change the file
-permissions (remove the setgid bit) before trying to read or write to it.
-Of course, that might be a bit tricky if the system is hung :-(
-
-7. The "mand" mount option
---------------------------
-Mandatory locking is disabled on all filesystems by default, and must be
-administratively enabled by mounting with "-o mand". That mount option
-is only allowed if the mounting task has the CAP_SYS_ADMIN capability.
-
-Since kernel v4.5, it is possible to disable mandatory locking
-altogether by setting CONFIG_MANDATORY_FILE_LOCKING to "n". A kernel
-with this disabled will reject attempts to mount filesystems with the
-"mand" mount option with the error status EPERM.
index 675ba8620d664d9ff629f3bf09fdcbc7f81fb98f..b421a3c1806ecd0052ebdb5ed35dfdbe08937487 100644 (file)
@@ -18,114 +18,5 @@ real, with all the uAPI bits is:
         * Route shmem backend over to TTM SYSTEM for discrete
         * TTM purgeable object support
         * Move i915 buddy allocator over to TTM
-        * MMAP ioctl mode(see `I915 MMAP`_)
-        * SET/GET ioctl caching(see `I915 SET/GET CACHING`_)
 * Send RFC(with mesa-dev on cc) for final sign off on the uAPI
 * Add pciid for DG1 and turn on uAPI for real
-
-New object placement and region query uAPI
-==========================================
-Starting from DG1 we need to give userspace the ability to allocate buffers from
-device local-memory. Currently the driver supports gem_create, which can place
-buffers in system memory via shmem, and the usual assortment of other
-interfaces, like dumb buffers and userptr.
-
-To support this new capability, while also providing a uAPI which will work
-beyond just DG1, we propose to offer three new bits of uAPI:
-
-DRM_I915_QUERY_MEMORY_REGIONS
------------------------------
-New query ID which allows userspace to discover the list of supported memory
-regions(like system-memory and local-memory) for a given device. We identify
-each region with a class and instance pair, which should be unique. The class
-here would be DEVICE or SYSTEM, and the instance would be zero, on platforms
-like DG1.
-
-Side note: The class/instance design is borrowed from our existing engine uAPI,
-where we describe every physical engine in terms of its class, and the
-particular instance, since we can have more than one per class.
-
-In the future we also want to expose more information which can further
-describe the capabilities of a region.
-
-.. kernel-doc:: include/uapi/drm/i915_drm.h
-        :functions: drm_i915_gem_memory_class drm_i915_gem_memory_class_instance drm_i915_memory_region_info drm_i915_query_memory_regions
-
-GEM_CREATE_EXT
---------------
-New ioctl which is basically just gem_create but now allows userspace to provide
-a chain of possible extensions. Note that if we don't provide any extensions and
-set flags=0 then we get the exact same behaviour as gem_create.
-
-Side note: We also need to support PXP[1] in the near future, which is also
-applicable to integrated platforms, and adds its own gem_create_ext extension,
-which basically lets userspace mark a buffer as "protected".
-
-.. kernel-doc:: include/uapi/drm/i915_drm.h
-        :functions: drm_i915_gem_create_ext
-
-I915_GEM_CREATE_EXT_MEMORY_REGIONS
-----------------------------------
-Implemented as an extension for gem_create_ext, we would now allow userspace to
-optionally provide an immutable list of preferred placements at creation time,
-in priority order, for a given buffer object.  For the placements we expect
-them each to use the class/instance encoding, as per the output of the regions
-query. Having the list in priority order will be useful in the future when
-placing an object, say during eviction.
-
-.. kernel-doc:: include/uapi/drm/i915_drm.h
-        :functions: drm_i915_gem_create_ext_memory_regions
-
-One fair criticism here is that this seems a little over-engineered[2]. If we
-just consider DG1 then yes, a simple gem_create.flags or something is totally
-all that's needed to tell the kernel to allocate the buffer in local-memory or
-whatever. However looking to the future we need uAPI which can also support
-upcoming Xe HP multi-tile architecture in a sane way, where there can be
-multiple local-memory instances for a given device, and so using both class and
-instance in our uAPI to describe regions is desirable, although specifically
-for DG1 it's uninteresting, since we only have a single local-memory instance.
-
-Existing uAPI issues
-====================
-Some potential issues we still need to resolve.
-
-I915 MMAP
----------
-In i915 there are multiple ways to MMAP GEM object, including mapping the same
-object using different mapping types(WC vs WB), i.e multiple active mmaps per
-object. TTM expects one MMAP at most for the lifetime of the object. If it
-turns out that we have to backpedal here, there might be some potential
-userspace fallout.
-
-I915 SET/GET CACHING
---------------------
-In i915 we have set/get_caching ioctl. TTM doesn't let us to change this, but
-DG1 doesn't support non-snooped pcie transactions, so we can just always
-allocate as WB for smem-only buffers.  If/when our hw gains support for
-non-snooped pcie transactions then we must fix this mode at allocation time as
-a new GEM extension.
-
-This is related to the mmap problem, because in general (meaning, when we're
-not running on intel cpus) the cpu mmap must not, ever, be inconsistent with
-allocation mode.
-
-Possible idea is to let the kernel picks the mmap mode for userspace from the
-following table:
-
-smem-only: WB. Userspace does not need to call clflush.
-
-smem+lmem: We only ever allow a single mode, so simply allocate this as uncached
-memory, and always give userspace a WC mapping. GPU still does snooped access
-here(assuming we can't turn it off like on DG1), which is a bit inefficient.
-
-lmem only: always WC
-
-This means on discrete you only get a single mmap mode, all others must be
-rejected. That's probably going to be a new default mode or something like
-that.
-
-Links
-=====
-[1] https://patchwork.freedesktop.org/series/86798/
-
-[2] https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/5599#note_553791
index 8b76217e370aa6c46f14b8ddb2b2a6b63d447793..6270f1fd7d4eda95fd89f4b8834160d98428ea90 100644 (file)
@@ -17,6 +17,7 @@ Introduction
    busses/index
    i2c-topology
    muxes/i2c-mux-gpio
+   i2c-sysfs
 
 Writing device drivers
 ======================
index d31ed6c1cb0d12a50e30b4adc43f93bac8586a3b..024d784157c8f40b058f388fab5192274a66ce5a 100644 (file)
@@ -191,19 +191,9 @@ nf_flowtable_tcp_timeout - INTEGER (seconds)
         TCP connections may be offloaded from nf conntrack to nf flow table.
         Once aged, the connection is returned to nf conntrack with tcp pickup timeout.
 
-nf_flowtable_tcp_pickup - INTEGER (seconds)
-        default 120
-
-        TCP connection timeout after being aged from nf flow table offload.
-
 nf_flowtable_udp_timeout - INTEGER (seconds)
         default 30
 
         Control offload timeout for udp connections.
         UDP connections may be offloaded from nf conntrack to nf flow table.
         Once aged, the connection is returned to nf conntrack with udp pickup timeout.
-
-nf_flowtable_udp_pickup - INTEGER (seconds)
-        default 30
-
-        UDP connection timeout after being aged from nf flow table offload.
index d61219889e4940706a77170932d3c13e56e0564f..539e9d4a4860c2405fac7cd63341a2c148c643be 100644 (file)
@@ -263,7 +263,7 @@ Userspace can also add file descriptors to the notifying process via
 ``ioctl(SECCOMP_IOCTL_NOTIF_ADDFD)``. The ``id`` member of
 ``struct seccomp_notif_addfd`` should be the same ``id`` as in
 ``struct seccomp_notif``. The ``newfd_flags`` flag may be used to set flags
-like O_EXEC on the file descriptor in the notifying process. If the supervisor
+like O_CLOEXEC on the file descriptor in the notifying process. If the supervisor
 wants to inject the file descriptor with a specific number, the
 ``SECCOMP_ADDFD_FLAG_SETFD`` flag can be used, and set the ``newfd`` member to
 the specific number to use. If that file descriptor is already open in the
index 35eca377543dfd19e4f0b1ad8632399061094b22..88fa495abbaca445a15258fb3e2c4a8d2da10dfb 100644 (file)
@@ -25,10 +25,10 @@ On x86:
 
 - vcpu->mutex is taken outside kvm->arch.hyperv.hv_lock
 
-- kvm->arch.mmu_lock is an rwlock.  kvm->arch.tdp_mmu_pages_lock is
-  taken inside kvm->arch.mmu_lock, and cannot be taken without already
-  holding kvm->arch.mmu_lock (typically with ``read_lock``, otherwise
-  there's no need to take kvm->arch.tdp_mmu_pages_lock at all).
+- kvm->arch.mmu_lock is an rwlock.  kvm->arch.tdp_mmu_pages_lock and
+  kvm->arch.mmu_unsync_pages_lock are taken inside kvm->arch.mmu_lock, and
+  cannot be taken without already holding kvm->arch.mmu_lock (typically with
+  ``read_lock`` for the TDP MMU, thus the need for additional spinlocks).
 
 Everything else is a leaf: no other lock is taken inside the critical
 sections.
index c9467d2839f5e894adc11c06949a842a0a9bd39f..d7b4f32875a94d385b5670d9498ca7e3673bd5e4 100644 (file)
@@ -3866,6 +3866,16 @@ L:       bcm-kernel-feedback-list@broadcom.com
 S:     Maintained
 F:     drivers/mtd/nand/raw/brcmnand/
 
+BROADCOM STB PCIE DRIVER
+M:     Jim Quinlan <jim2101024@gmail.com>
+M:     Nicolas Saenz Julienne <nsaenz@kernel.org>
+M:     Florian Fainelli <f.fainelli@gmail.com>
+M:     bcm-kernel-feedback-list@broadcom.com
+L:     linux-pci@vger.kernel.org
+S:     Maintained
+F:     Documentation/devicetree/bindings/pci/brcm,stb-pcie.yaml
+F:     drivers/pci/controller/pcie-brcmstb.c
+
 BROADCOM SYSTEMPORT ETHERNET DRIVER
 M:     Florian Fainelli <f.fainelli@gmail.com>
 L:     bcm-kernel-feedback-list@broadcom.com
@@ -4498,7 +4508,7 @@ L:        clang-built-linux@googlegroups.com
 S:     Supported
 W:     https://clangbuiltlinux.github.io/
 B:     https://github.com/ClangBuiltLinux/linux/issues
-C:     irc://chat.freenode.net/clangbuiltlinux
+C:     irc://irc.libera.chat/clangbuiltlinux
 F:     Documentation/kbuild/llvm.rst
 F:     include/linux/compiler-clang.h
 F:     scripts/clang-tools/
@@ -6945,7 +6955,7 @@ F:        include/uapi/linux/mdio.h
 F:     include/uapi/linux/mii.h
 
 EXFAT FILE SYSTEM
-M:     Namjae Jeon <namjae.jeon@samsung.com>
+M:     Namjae Jeon <linkinjeon@kernel.org>
 M:     Sungjong Seo <sj1557.seo@samsung.com>
 L:     linux-fsdevel@vger.kernel.org
 S:     Maintained
@@ -11327,7 +11337,7 @@ W:      https://linuxtv.org
 T:     git git://linuxtv.org/media_tree.git
 F:     drivers/media/radio/radio-maxiradio*
 
-MCAB MICROCHIP CAN BUS ANALYZER TOOL DRIVER
+MCBA MICROCHIP CAN BUS ANALYZER TOOL DRIVER
 R:     Yasushi SHOJI <yashi@spacecubics.com>
 L:     linux-can@vger.kernel.org
 S:     Maintained
@@ -14430,6 +14440,13 @@ S:     Maintained
 F:     Documentation/devicetree/bindings/pci/hisilicon-histb-pcie.txt
 F:     drivers/pci/controller/dwc/pcie-histb.c
 
+PCIE DRIVER FOR INTEL LGM GW SOC
+M:     Rahul Tanwar <rtanwar@maxlinear.com>
+L:     linux-pci@vger.kernel.org
+S:     Maintained
+F:     Documentation/devicetree/bindings/pci/intel-gw-pcie.yaml
+F:     drivers/pci/controller/dwc/pcie-intel-gw.c
+
 PCIE DRIVER FOR MEDIATEK
 M:     Ryder Lee <ryder.lee@mediatek.com>
 M:     Jianjun Wang <jianjun.wang@mediatek.com>
@@ -15803,7 +15820,7 @@ F:      Documentation/devicetree/bindings/i2c/renesas,iic-emev2.yaml
 F:     drivers/i2c/busses/i2c-emev2.c
 
 RENESAS ETHERNET DRIVERS
-R:     Sergei Shtylyov <sergei.shtylyov@gmail.com>
+R:     Sergey Shtylyov <s.shtylyov@omp.ru>
 L:     netdev@vger.kernel.org
 L:     linux-renesas-soc@vger.kernel.org
 F:     Documentation/devicetree/bindings/net/renesas,*.yaml
@@ -17815,7 +17832,7 @@ F:      include/linux/sync_file.h
 F:     include/uapi/linux/sync_file.h
 
 SYNOPSYS ARC ARCHITECTURE
-M:     Vineet Gupta <vgupta@synopsys.com>
+M:     Vineet Gupta <vgupta@kernel.org>
 L:     linux-snps-arc@lists.infradead.org
 S:     Supported
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/vgupta/arc.git
@@ -20017,7 +20034,8 @@ F:      Documentation/devicetree/bindings/extcon/wlf,arizona.yaml
 F:     Documentation/devicetree/bindings/mfd/wlf,arizona.yaml
 F:     Documentation/devicetree/bindings/mfd/wm831x.txt
 F:     Documentation/devicetree/bindings/regulator/wlf,arizona.yaml
-F:     Documentation/devicetree/bindings/sound/wlf,arizona.yaml
+F:     Documentation/devicetree/bindings/sound/wlf,*.yaml
+F:     Documentation/devicetree/bindings/sound/wm*
 F:     Documentation/hwmon/wm83??.rst
 F:     arch/arm/mach-s3c/mach-crag6410*
 F:     drivers/clk/clk-wm83*.c
index eae1314a5b86b127bb36d500ca69aaf0c387ef36..61741e9d9c6e61ffb234a5f19de2ed56fc36c03d 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -2,7 +2,7 @@
 VERSION = 5
 PATCHLEVEL = 14
 SUBLEVEL = 0
-EXTRAVERSION = -rc5
+EXTRAVERSION =
 NAME = Opossums on Parade
 
 # *DOCUMENTATION*
index d8f51eb8963bb13ae53b609903d51bb0e0cf9f83..b5bf68e747320bdbaa981da441e699c318e0c721 100644 (file)
@@ -409,7 +409,7 @@ choice
        help
          Depending on the configuration, CPU can contain DSP registers
          (ACC0_GLO, ACC0_GHI, DSP_BFLY0, DSP_CTRL, DSP_FFT_CTRL).
-         Bellow is options describing how to handle these registers in
+         Below are options describing how to handle these registers in
          interrupt entry / exit and in context switch.
 
 config ARC_DSP_NONE
index 69debd77cd046eacf9df1799a30641a034546181..0b485800a392e0e1d12684700ce4dc5017895431 100644 (file)
@@ -24,7 +24,7 @@
  */
 static inline __sum16 csum_fold(__wsum s)
 {
-       unsigned r = s << 16 | s >> 16; /* ror */
+       unsigned int r = s << 16 | s >> 16;     /* ror */
        s = ~s;
        s -= r;
        return s >> 16;
index 30b9ae511ea9e70ae397808eaa8df8d959b7a51e..e1971d34ef30e8f3f9904bfbadd77642f605a05e 100644 (file)
@@ -123,7 +123,7 @@ static const char * const arc_pmu_ev_hw_map[] = {
 #define C(_x)                  PERF_COUNT_HW_CACHE_##_x
 #define CACHE_OP_UNSUPPORTED   0xffff
 
-static const unsigned arc_pmu_cache_map[C(MAX)][C(OP_MAX)][C(RESULT_MAX)] = {
+static const unsigned int arc_pmu_cache_map[C(MAX)][C(OP_MAX)][C(RESULT_MAX)] = {
        [C(L1D)] = {
                [C(OP_READ)] = {
                        [C(RESULT_ACCESS)]      = PERF_COUNT_ARC_LDC,
index c67c0f0f5f7781a3bc555cf807f5c8953f154b4b..ec640219d989f3d01722ccb8831c810d5837fa09 100644 (file)
@@ -57,23 +57,26 @@ void fpu_save_restore(struct task_struct *prev, struct task_struct *next)
 
 void fpu_init_task(struct pt_regs *regs)
 {
+       const unsigned int fwe = 0x80000000;
+
        /* default rounding mode */
        write_aux_reg(ARC_REG_FPU_CTRL, 0x100);
 
-       /* set "Write enable" to allow explicit write to exception flags */
-       write_aux_reg(ARC_REG_FPU_STATUS, 0x80000000);
+       /* Initialize to zero: setting requires FWE be set */
+       write_aux_reg(ARC_REG_FPU_STATUS, fwe);
 }
 
 void fpu_save_restore(struct task_struct *prev, struct task_struct *next)
 {
        struct arc_fpu *save = &prev->thread.fpu;
        struct arc_fpu *restore = &next->thread.fpu;
+       const unsigned int fwe = 0x80000000;
 
        save->ctrl = read_aux_reg(ARC_REG_FPU_CTRL);
        save->status = read_aux_reg(ARC_REG_FPU_STATUS);
 
        write_aux_reg(ARC_REG_FPU_CTRL, restore->ctrl);
-       write_aux_reg(ARC_REG_FPU_STATUS, restore->status);
+       write_aux_reg(ARC_REG_FPU_STATUS, (fwe | restore->status));
 }
 
 #endif
index 47bab67f8649b53f0fffddca05ea5d87983ec37d..9e28058cdba85930d0697056b3017a1900f75016 100644 (file)
@@ -260,7 +260,7 @@ static void init_unwind_hdr(struct unwind_table *table,
 {
        const u8 *ptr;
        unsigned long tableSize = table->size, hdrSize;
-       unsigned n;
+       unsigned int n;
        const u32 *fde;
        struct {
                u8 version;
@@ -462,7 +462,7 @@ static uleb128_t get_uleb128(const u8 **pcur, const u8 *end)
 {
        const u8 *cur = *pcur;
        uleb128_t value;
-       unsigned shift;
+       unsigned int shift;
 
        for (shift = 0, value = 0; cur < end; shift += 7) {
                if (shift + 7 > 8 * sizeof(value)
@@ -483,7 +483,7 @@ static sleb128_t get_sleb128(const u8 **pcur, const u8 *end)
 {
        const u8 *cur = *pcur;
        sleb128_t value;
-       unsigned shift;
+       unsigned int shift;
 
        for (shift = 0, value = 0; cur < end; shift += 7) {
                if (shift + 7 > 8 * sizeof(value)
@@ -609,7 +609,7 @@ static unsigned long read_pointer(const u8 **pLoc, const void *end,
 static signed fde_pointer_type(const u32 *cie)
 {
        const u8 *ptr = (const u8 *)(cie + 2);
-       unsigned version = *ptr;
+       unsigned int version = *ptr;
 
        if (*++ptr) {
                const char *aug;
@@ -904,7 +904,7 @@ int arc_unwind(struct unwind_frame_info *frame)
        const u8 *ptr = NULL, *end = NULL;
        unsigned long pc = UNW_PC(frame) - frame->call_frame;
        unsigned long startLoc = 0, endLoc = 0, cfa;
-       unsigned i;
+       unsigned int i;
        signed ptrType = -1;
        uleb128_t retAddrReg = 0;
        const struct unwind_table *table;
index e2146a8da1953a2c6c22f203b22b74c96a7c6294..529ae50f9fe23ced655e9dbf72a4914761ef02d9 100644 (file)
@@ -88,6 +88,8 @@ SECTIONS
                CPUIDLE_TEXT
                LOCK_TEXT
                KPROBES_TEXT
+               IRQENTRY_TEXT
+               SOFTIRQENTRY_TEXT
                *(.fixup)
                *(.gnu.warning)
        }
index 3f35761dc9ff287a7a190b44d595081c8735719a..23595fc5a29a932250a4ff43cd4aacd37c16d66a 100644 (file)
@@ -15,8 +15,6 @@ CONFIG_SLAB=y
 CONFIG_ARCH_NOMADIK=y
 CONFIG_MACH_NOMADIK_8815NHK=y
 CONFIG_AEABI=y
-CONFIG_ZBOOT_ROM_TEXT=0x0
-CONFIG_ZBOOT_ROM_BSS=0x0
 CONFIG_MODULES=y
 CONFIG_MODULE_UNLOAD=y
 # CONFIG_BLK_DEV_BSG is not set
@@ -52,9 +50,9 @@ CONFIG_MTD_BLOCK=y
 CONFIG_MTD_ONENAND=y
 CONFIG_MTD_ONENAND_VERIFY_WRITE=y
 CONFIG_MTD_ONENAND_GENERIC=y
-CONFIG_MTD_NAND_ECC_SW_HAMMING_SMC=y
 CONFIG_MTD_RAW_NAND=y
 CONFIG_MTD_NAND_FSMC=y
+CONFIG_MTD_NAND_ECC_SW_HAMMING_SMC=y
 CONFIG_BLK_DEV_LOOP=y
 CONFIG_BLK_DEV_CRYPTOLOOP=y
 CONFIG_BLK_DEV_RAM=y
@@ -97,6 +95,7 @@ CONFIG_REGULATOR=y
 CONFIG_DRM=y
 CONFIG_DRM_PANEL_TPO_TPG110=y
 CONFIG_DRM_PL111=y
+CONFIG_FB=y
 CONFIG_BACKLIGHT_CLASS_DEVICE=y
 CONFIG_BACKLIGHT_PWM=y
 CONFIG_FRAMEBUFFER_CONSOLE=y
@@ -136,9 +135,8 @@ CONFIG_NLS_ISO8859_15=y
 CONFIG_CRYPTO_MD5=y
 CONFIG_CRYPTO_SHA1=y
 CONFIG_CRYPTO_DES=y
+# CONFIG_DEBUG_BUGVERBOSE is not set
 CONFIG_DEBUG_INFO=y
-# CONFIG_ENABLE_MUST_CHECK is not set
 CONFIG_DEBUG_FS=y
 # CONFIG_SCHED_DEBUG is not set
 # CONFIG_DEBUG_PREEMPT is not set
-# CONFIG_DEBUG_BUGVERBOSE is not set
index 31eb75b6002fbd5bccc6bd8b86a9256bbd157115..9bdafd57888c859f580ffbcadfd3b63070e8c780 100644 (file)
@@ -112,7 +112,7 @@ static struct kpp_alg curve25519_alg = {
        .max_size               = curve25519_max_size,
 };
 
-static int __init mod_init(void)
+static int __init arm_curve25519_init(void)
 {
        if (elf_hwcap & HWCAP_NEON) {
                static_branch_enable(&have_neon);
@@ -122,14 +122,14 @@ static int __init mod_init(void)
        return 0;
 }
 
-static void __exit mod_exit(void)
+static void __exit arm_curve25519_exit(void)
 {
        if (IS_REACHABLE(CONFIG_CRYPTO_KPP) && elf_hwcap & HWCAP_NEON)
                crypto_unregister_kpp(&curve25519_alg);
 }
 
-module_init(mod_init);
-module_exit(mod_exit);
+module_init(arm_curve25519_init);
+module_exit(arm_curve25519_exit);
 
 MODULE_ALIAS_CRYPTO("curve25519");
 MODULE_ALIAS_CRYPTO("curve25519-neon");
index cfc9dfd70aad29ec0b10cc591fafe0be97688c08..f673e13e0f942279a2fc4b58d71400bbebf14838 100644 (file)
@@ -160,10 +160,11 @@ extern unsigned long vectors_base;
 
 /*
  * Physical start and end address of the kernel sections. These addresses are
- * 2MB-aligned to match the section mappings placed over the kernel.
+ * 2MB-aligned to match the section mappings placed over the kernel. We use
+ * u64 so that LPAE mappings beyond the 32bit limit will work out as well.
  */
-extern u32 kernel_sec_start;
-extern u32 kernel_sec_end;
+extern u64 kernel_sec_start;
+extern u64 kernel_sec_end;
 
 /*
  * Physical vs virtual RAM address space conversion.  These are
index 9eb0b4dbcc12ce39b66280e6628897e6ed8d7ba2..29070eb8df7d9d89c9e1b96f6d96b70514430665 100644 (file)
@@ -49,7 +49,8 @@
 
        /*
         * This needs to be assigned at runtime when the linker symbols are
-        * resolved.
+        * resolved. These are unsigned 64bit really, but in this assembly code
+        * We store them as 32bit.
         */
        .pushsection .data
        .align  2
@@ -57,7 +58,9 @@
        .globl  kernel_sec_end
 kernel_sec_start:
        .long   0
+       .long   0
 kernel_sec_end:
+       .long   0
        .long   0
        .popsection
 
@@ -250,7 +253,11 @@ __create_page_tables:
        add     r0, r4, #KERNEL_OFFSET >> (SECTION_SHIFT - PMD_ORDER)
        ldr     r6, =(_end - 1)
        adr_l   r5, kernel_sec_start            @ _pa(kernel_sec_start)
-       str     r8, [r5]                        @ Save physical start of kernel
+#ifdef CONFIG_CPU_ENDIAN_BE8
+       str     r8, [r5, #4]                    @ Save physical start of kernel (BE)
+#else
+       str     r8, [r5]                        @ Save physical start of kernel (LE)
+#endif
        orr     r3, r8, r7                      @ Add the MMU flags
        add     r6, r4, r6, lsr #(SECTION_SHIFT - PMD_ORDER)
 1:     str     r3, [r0], #1 << PMD_ORDER
@@ -259,7 +266,11 @@ __create_page_tables:
        bls     1b
        eor     r3, r3, r7                      @ Remove the MMU flags
        adr_l   r5, kernel_sec_end              @ _pa(kernel_sec_end)
-       str     r3, [r5]                        @ Save physical end of kernel
+#ifdef CONFIG_CPU_ENDIAN_BE8
+       str     r3, [r5, #4]                    @ Save physical end of kernel (BE)
+#else
+       str     r3, [r5]                        @ Save physical end of kernel (LE)
+#endif
 
 #ifdef CONFIG_XIP_KERNEL
        /*
index abb07f10551534cb3a5e257f1fe9b6a550544ce3..74e63d4531aaec9e60f2c8bcd8220ba4784e68cf 100644 (file)
 /*
  * PCI Control/Status Registers
  */
-#define IXP4XX_PCI_CSR(x) ((volatile u32 *)(IXP4XX_PCI_CFG_BASE_VIRT+(x)))
-
-#define PCI_NP_AD               IXP4XX_PCI_CSR(PCI_NP_AD_OFFSET)
-#define PCI_NP_CBE              IXP4XX_PCI_CSR(PCI_NP_CBE_OFFSET)
-#define PCI_NP_WDATA            IXP4XX_PCI_CSR(PCI_NP_WDATA_OFFSET)
-#define PCI_NP_RDATA            IXP4XX_PCI_CSR(PCI_NP_RDATA_OFFSET)
-#define PCI_CRP_AD_CBE          IXP4XX_PCI_CSR(PCI_CRP_AD_CBE_OFFSET)
-#define PCI_CRP_WDATA           IXP4XX_PCI_CSR(PCI_CRP_WDATA_OFFSET)
-#define PCI_CRP_RDATA           IXP4XX_PCI_CSR(PCI_CRP_RDATA_OFFSET)
-#define PCI_CSR                 IXP4XX_PCI_CSR(PCI_CSR_OFFSET) 
-#define PCI_ISR                 IXP4XX_PCI_CSR(PCI_ISR_OFFSET)
-#define PCI_INTEN               IXP4XX_PCI_CSR(PCI_INTEN_OFFSET)
-#define PCI_DMACTRL             IXP4XX_PCI_CSR(PCI_DMACTRL_OFFSET)
-#define PCI_AHBMEMBASE          IXP4XX_PCI_CSR(PCI_AHBMEMBASE_OFFSET)
-#define PCI_AHBIOBASE           IXP4XX_PCI_CSR(PCI_AHBIOBASE_OFFSET)
-#define PCI_PCIMEMBASE          IXP4XX_PCI_CSR(PCI_PCIMEMBASE_OFFSET)
-#define PCI_AHBDOORBELL         IXP4XX_PCI_CSR(PCI_AHBDOORBELL_OFFSET)
-#define PCI_PCIDOORBELL         IXP4XX_PCI_CSR(PCI_PCIDOORBELL_OFFSET)
-#define PCI_ATPDMA0_AHBADDR     IXP4XX_PCI_CSR(PCI_ATPDMA0_AHBADDR_OFFSET)
-#define PCI_ATPDMA0_PCIADDR     IXP4XX_PCI_CSR(PCI_ATPDMA0_PCIADDR_OFFSET)
-#define PCI_ATPDMA0_LENADDR     IXP4XX_PCI_CSR(PCI_ATPDMA0_LENADDR_OFFSET)
-#define PCI_ATPDMA1_AHBADDR     IXP4XX_PCI_CSR(PCI_ATPDMA1_AHBADDR_OFFSET)
-#define PCI_ATPDMA1_PCIADDR     IXP4XX_PCI_CSR(PCI_ATPDMA1_PCIADDR_OFFSET)
-#define PCI_ATPDMA1_LENADDR     IXP4XX_PCI_CSR(PCI_ATPDMA1_LENADDR_OFFSET)
+#define _IXP4XX_PCI_CSR(x) ((volatile u32 *)(IXP4XX_PCI_CFG_BASE_VIRT+(x)))
+
+#define PCI_NP_AD               _IXP4XX_PCI_CSR(PCI_NP_AD_OFFSET)
+#define PCI_NP_CBE              _IXP4XX_PCI_CSR(PCI_NP_CBE_OFFSET)
+#define PCI_NP_WDATA            _IXP4XX_PCI_CSR(PCI_NP_WDATA_OFFSET)
+#define PCI_NP_RDATA            _IXP4XX_PCI_CSR(PCI_NP_RDATA_OFFSET)
+#define PCI_CRP_AD_CBE          _IXP4XX_PCI_CSR(PCI_CRP_AD_CBE_OFFSET)
+#define PCI_CRP_WDATA           _IXP4XX_PCI_CSR(PCI_CRP_WDATA_OFFSET)
+#define PCI_CRP_RDATA           _IXP4XX_PCI_CSR(PCI_CRP_RDATA_OFFSET)
+#define PCI_CSR                 _IXP4XX_PCI_CSR(PCI_CSR_OFFSET) 
+#define PCI_ISR                 _IXP4XX_PCI_CSR(PCI_ISR_OFFSET)
+#define PCI_INTEN               _IXP4XX_PCI_CSR(PCI_INTEN_OFFSET)
+#define PCI_DMACTRL             _IXP4XX_PCI_CSR(PCI_DMACTRL_OFFSET)
+#define PCI_AHBMEMBASE          _IXP4XX_PCI_CSR(PCI_AHBMEMBASE_OFFSET)
+#define PCI_AHBIOBASE           _IXP4XX_PCI_CSR(PCI_AHBIOBASE_OFFSET)
+#define PCI_PCIMEMBASE          _IXP4XX_PCI_CSR(PCI_PCIMEMBASE_OFFSET)
+#define PCI_AHBDOORBELL         _IXP4XX_PCI_CSR(PCI_AHBDOORBELL_OFFSET)
+#define PCI_PCIDOORBELL         _IXP4XX_PCI_CSR(PCI_PCIDOORBELL_OFFSET)
+#define PCI_ATPDMA0_AHBADDR     _IXP4XX_PCI_CSR(PCI_ATPDMA0_AHBADDR_OFFSET)
+#define PCI_ATPDMA0_PCIADDR     _IXP4XX_PCI_CSR(PCI_ATPDMA0_PCIADDR_OFFSET)
+#define PCI_ATPDMA0_LENADDR     _IXP4XX_PCI_CSR(PCI_ATPDMA0_LENADDR_OFFSET)
+#define PCI_ATPDMA1_AHBADDR     _IXP4XX_PCI_CSR(PCI_ATPDMA1_AHBADDR_OFFSET)
+#define PCI_ATPDMA1_PCIADDR     _IXP4XX_PCI_CSR(PCI_ATPDMA1_PCIADDR_OFFSET)
+#define PCI_ATPDMA1_LENADDR     _IXP4XX_PCI_CSR(PCI_ATPDMA1_LENADDR_OFFSET)
 
 /*
  * PCI register values and bit definitions 
index 7583bda5ea7dba7cb7f88991cc09dd641294e758..a4e0060051070f5dec2c910c7d7f708794313d09 100644 (file)
@@ -1608,6 +1608,13 @@ static void __init early_paging_init(const struct machine_desc *mdesc)
        if (offset == 0)
                return;
 
+       /*
+        * Offset the kernel section physical offsets so that the kernel
+        * mapping will work out later on.
+        */
+       kernel_sec_start += offset;
+       kernel_sec_end += offset;
+
        /*
         * Get the address of the remap function in the 1:1 identity
         * mapping setup by the early page table assembly code.  We
@@ -1716,7 +1723,7 @@ void __init paging_init(const struct machine_desc *mdesc)
 {
        void *zero_page;
 
-       pr_debug("physical kernel sections: 0x%08x-0x%08x\n",
+       pr_debug("physical kernel sections: 0x%08llx-0x%08llx\n",
                 kernel_sec_start, kernel_sec_end);
 
        prepare_page_table();
index 5c5e1952000ab49aeeb03fecafe9a5e032ee0935..f8e11f7c78807d3dbdfc1d5738a76b995ee53a52 100644 (file)
@@ -29,7 +29,7 @@ ENTRY(lpae_pgtables_remap_asm)
        ldr     r6, =(_end - 1)
        add     r7, r2, #0x1000
        add     r6, r7, r6, lsr #SECTION_SHIFT - L2_ORDER
-       add     r7, r7, #PAGE_OFFSET >> (SECTION_SHIFT - L2_ORDER)
+       add     r7, r7, #KERNEL_OFFSET >> (SECTION_SHIFT - L2_ORDER)
 1:     ldrd    r4, r5, [r7]
        adds    r4, r4, r0
        adc     r5, r5, r1
index fdcd54d39c1efcd789e7860b81d1033f2104f4b9..62c3c1d2190f6a3a66c9b1ebf58ec0018a83c875 100644 (file)
@@ -156,6 +156,7 @@ config ARM64
        select HAVE_ARCH_KGDB
        select HAVE_ARCH_MMAP_RND_BITS
        select HAVE_ARCH_MMAP_RND_COMPAT_BITS if COMPAT
+       select HAVE_ARCH_PFN_VALID
        select HAVE_ARCH_PREL32_RELOCATIONS
        select HAVE_ARCH_RANDOMIZE_KSTACK_OFFSET
        select HAVE_ARCH_SECCOMP_FILTER
index 7b668db432612941375bd315215e1195a1445513..1110d386f3b49254b6c817657b87a4c5fa5106ff 100644 (file)
@@ -183,6 +183,8 @@ endif
 # We use MRPROPER_FILES and CLEAN_FILES now
 archclean:
        $(Q)$(MAKE) $(clean)=$(boot)
+       $(Q)$(MAKE) $(clean)=arch/arm64/kernel/vdso
+       $(Q)$(MAKE) $(clean)=arch/arm64/kernel/vdso32
 
 ifeq ($(KBUILD_EXTMOD),)
 # We need to generate vdso-offsets.h before compiling certain files in kernel/.
index 23cdcc9f7c72577aaf2a6193b2dedb9cc54c67ac..1ccca83292ac9ee3a4d1bcd2b5486b893b9d7d61 100644 (file)
@@ -1,6 +1,7 @@
 // SPDX-License-Identifier: GPL-2.0-only
 /* Copyright (c) 2015, LGE Inc. All rights reserved.
  * Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2021, Petr Vorel <petr.vorel@gmail.com>
  */
 
 /dts-v1/;
@@ -9,6 +10,9 @@
 #include "pm8994.dtsi"
 #include "pmi8994.dtsi"
 
+/* cont_splash_mem has different memory mapping */
+/delete-node/ &cont_splash_mem;
+
 / {
        model = "LG Nexus 5X";
        compatible = "lg,bullhead", "qcom,msm8992";
@@ -17,6 +21,9 @@
        qcom,board-id = <0xb64 0>;
        qcom,pmic-id = <0x10009 0x1000A 0x0 0x0>;
 
+       /* Bullhead firmware doesn't support PSCI */
+       /delete-node/ psci;
+
        aliases {
                serial0 = &blsp1_uart2;
        };
                        ftrace-size = <0x10000>;
                        pmsg-size = <0x20000>;
                };
+
+               cont_splash_mem: memory@3400000 {
+                       reg = <0 0x03400000 0 0x1200000>;
+                       no-map;
+               };
        };
 };
 
index ffe1a9bd8f705368888b548d56faeeca5169a278..c096b7758aa0e43bd66e1f0886ead25f54541639 100644 (file)
@@ -1,12 +1,16 @@
 // SPDX-License-Identifier: GPL-2.0-only
 /* Copyright (c) 2015, Huawei Inc. All rights reserved.
  * Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2021, Petr Vorel <petr.vorel@gmail.com>
  */
 
 /dts-v1/;
 
 #include "msm8994.dtsi"
 
+/* Angler's firmware does not report where the memory is allocated */
+/delete-node/ &cont_splash_mem;
+
 / {
        model = "Huawei Nexus 6P";
        compatible = "huawei,angler", "qcom,msm8994";
index a8c274ad74c47b55a2e4f4751503cdd697ee0be7..188c5768a55ae076a7d58444bc68efdc574c9092 100644 (file)
                                           &BIG_CPU_SLEEP_1
                                           &CLUSTER_SLEEP_0>;
                        next-level-cache = <&L2_700>;
-                       qcom,freq-domain = <&cpufreq_hw 1>;
+                       qcom,freq-domain = <&cpufreq_hw 2>;
                        #cooling-cells = <2>;
                        L2_700: l2-cache {
                                compatible = "cache";
index 4d052e39b34891bbfac21a34580042c89de98311..eb6b1d15293dfc110f3839028d8918ec81373523 100644 (file)
@@ -69,7 +69,7 @@
                };
                rmtfs_upper_guard: memory@f5d01000 {
                        no-map;
-                       reg = <0 0xf5d01000 0 0x2000>;
+                       reg = <0 0xf5d01000 0 0x1000>;
                };
 
                /*
@@ -78,7 +78,7 @@
                 */
                removed_region: memory@88f00000 {
                        no-map;
-                       reg = <0 0x88f00000 0 0x200000>;
+                       reg = <0 0x88f00000 0 0x1c00000>;
                };
 
                ramoops: ramoops@ac300000 {
index c2a709a384e9ee833627bc1c34e69a65958e61d4..d7591a4621a2fd56a0e248af7887beb772040017 100644 (file)
                left_spkr: wsa8810-left{
                        compatible = "sdw10217211000";
                        reg = <0 3>;
-                       powerdown-gpios = <&wcdgpio 2 GPIO_ACTIVE_HIGH>;
+                       powerdown-gpios = <&wcdgpio 1 GPIO_ACTIVE_HIGH>;
                        #thermal-sensor-cells = <0>;
                        sound-name-prefix = "SpkrLeft";
                        #sound-dai-cells = <0>;
 
                right_spkr: wsa8810-right{
                        compatible = "sdw10217211000";
-                       powerdown-gpios = <&wcdgpio 3 GPIO_ACTIVE_HIGH>;
+                       powerdown-gpios = <&wcdgpio 2 GPIO_ACTIVE_HIGH>;
                        reg = <0 4>;
                        #thermal-sensor-cells = <0>;
                        sound-name-prefix = "SpkrRight";
index b8eb0453123d1a34c293b67ede4bf931baa454e6..55f19450091b20de917ec929b695a6f18e02d178 100644 (file)
@@ -51,7 +51,7 @@ config CRYPTO_SM4_ARM64_CE
        tristate "SM4 symmetric cipher (ARMv8.2 Crypto Extensions)"
        depends on KERNEL_MODE_NEON
        select CRYPTO_ALGAPI
-       select CRYPTO_SM4
+       select CRYPTO_LIB_SM4
 
 config CRYPTO_GHASH_ARM64_CE
        tristate "GHASH/AES-GCM using ARMv8 Crypto Extensions"
index 2754c875d39c3ed7bb85da2e6084128e9c865584..9c93cfc4841bc3527eef0371fa0f579cb49a1842 100644 (file)
@@ -17,12 +17,20 @@ MODULE_LICENSE("GPL v2");
 
 asmlinkage void sm4_ce_do_crypt(const u32 *rk, void *out, const void *in);
 
+static int sm4_ce_setkey(struct crypto_tfm *tfm, const u8 *key,
+                      unsigned int key_len)
+{
+       struct sm4_ctx *ctx = crypto_tfm_ctx(tfm);
+
+       return sm4_expandkey(ctx, key, key_len);
+}
+
 static void sm4_ce_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
 {
-       const struct crypto_sm4_ctx *ctx = crypto_tfm_ctx(tfm);
+       const struct sm4_ctx *ctx = crypto_tfm_ctx(tfm);
 
        if (!crypto_simd_usable()) {
-               crypto_sm4_encrypt(tfm, out, in);
+               sm4_crypt_block(ctx->rkey_enc, out, in);
        } else {
                kernel_neon_begin();
                sm4_ce_do_crypt(ctx->rkey_enc, out, in);
@@ -32,10 +40,10 @@ static void sm4_ce_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
 
 static void sm4_ce_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
 {
-       const struct crypto_sm4_ctx *ctx = crypto_tfm_ctx(tfm);
+       const struct sm4_ctx *ctx = crypto_tfm_ctx(tfm);
 
        if (!crypto_simd_usable()) {
-               crypto_sm4_decrypt(tfm, out, in);
+               sm4_crypt_block(ctx->rkey_dec, out, in);
        } else {
                kernel_neon_begin();
                sm4_ce_do_crypt(ctx->rkey_dec, out, in);
@@ -49,12 +57,12 @@ static struct crypto_alg sm4_ce_alg = {
        .cra_priority                   = 200,
        .cra_flags                      = CRYPTO_ALG_TYPE_CIPHER,
        .cra_blocksize                  = SM4_BLOCK_SIZE,
-       .cra_ctxsize                    = sizeof(struct crypto_sm4_ctx),
+       .cra_ctxsize                    = sizeof(struct sm4_ctx),
        .cra_module                     = THIS_MODULE,
        .cra_u.cipher = {
                .cia_min_keysize        = SM4_KEY_SIZE,
                .cia_max_keysize        = SM4_KEY_SIZE,
-               .cia_setkey             = crypto_sm4_set_key,
+               .cia_setkey             = sm4_ce_setkey,
                .cia_encrypt            = sm4_ce_encrypt,
                .cia_decrypt            = sm4_ce_decrypt
        }
index 21fa330f498dd7214985aab691d33126f4cabb68..b83fb24954b77b6cce9467107cff019eebb5cf5b 100644 (file)
@@ -33,8 +33,7 @@
  * EL2.
  */
 .macro __init_el2_timers
-       mrs     x0, cnthctl_el2
-       orr     x0, x0, #3                      // Enable EL1 physical timers
+       mov     x0, #3                          // Enable EL1 physical timers
        msr     cnthctl_el2, x0
        msr     cntvoff_el2, xzr                // Clear virtual offset
 .endm
index 993a27ea6f543c3087f2f28c7990db54dca3ca4f..f98c91bbd7c17e5014a4d8cef74fb537dabea73e 100644 (file)
@@ -41,6 +41,7 @@ void tag_clear_highpage(struct page *to);
 
 typedef struct page *pgtable_t;
 
+int pfn_valid(unsigned long pfn);
 int pfn_is_map_memory(unsigned long pfn);
 
 #include <asm/memory.h>
index e9a2b8f277922f85ed33ac4c09eefb75f7c5613c..0ca72f5cda41bbe7b52434606359c2e41bf4c95f 100644 (file)
@@ -94,10 +94,14 @@ int kvm_vm_ioctl_enable_cap(struct kvm *kvm,
                kvm->arch.return_nisv_io_abort_to_user = true;
                break;
        case KVM_CAP_ARM_MTE:
-               if (!system_supports_mte() || kvm->created_vcpus)
-                       return -EINVAL;
-               r = 0;
-               kvm->arch.mte_enabled = true;
+               mutex_lock(&kvm->lock);
+               if (!system_supports_mte() || kvm->created_vcpus) {
+                       r = -EINVAL;
+               } else {
+                       r = 0;
+                       kvm->arch.mte_enabled = true;
+               }
+               mutex_unlock(&kvm->lock);
                break;
        default:
                r = -EINVAL;
index d938ce95d3bdf255251cc0c69722c89275832749..a6ce991b146793d6dbd18f708763cd6f04e98bd4 100644 (file)
@@ -193,7 +193,7 @@ static bool range_is_memory(u64 start, u64 end)
 {
        struct kvm_mem_range r1, r2;
 
-       if (!find_mem_range(start, &r1) || !find_mem_range(end, &r2))
+       if (!find_mem_range(start, &r1) || !find_mem_range(end - 1, &r2))
                return false;
        if (r1.start != r2.start)
                return false;
index 8490ed2917ff2430bc3f17b743355a89bed038f5..1fdb7bb7c19845fdc5a1a6ad218be1d8219f88f6 100644 (file)
@@ -219,6 +219,43 @@ static void __init zone_sizes_init(unsigned long min, unsigned long max)
        free_area_init(max_zone_pfns);
 }
 
+int pfn_valid(unsigned long pfn)
+{
+       phys_addr_t addr = PFN_PHYS(pfn);
+       struct mem_section *ms;
+
+       /*
+        * Ensure the upper PAGE_SHIFT bits are clear in the
+        * pfn. Else it might lead to false positives when
+        * some of the upper bits are set, but the lower bits
+        * match a valid pfn.
+        */
+       if (PHYS_PFN(addr) != pfn)
+               return 0;
+
+       if (pfn_to_section_nr(pfn) >= NR_MEM_SECTIONS)
+               return 0;
+
+       ms = __pfn_to_section(pfn);
+       if (!valid_section(ms))
+               return 0;
+
+       /*
+        * ZONE_DEVICE memory does not have the memblock entries.
+        * memblock_is_map_memory() check for ZONE_DEVICE based
+        * addresses will always fail. Even the normal hotplugged
+        * memory will never have MEMBLOCK_NOMAP flag set in their
+        * memblock entries. Skip memblock search for all non early
+        * memory sections covering all of hotplug memory including
+        * both normal and ZONE_DEVICE based.
+        */
+       if (!early_section(ms))
+               return pfn_section_valid(ms, pfn);
+
+       return memblock_is_memory(addr);
+}
+EXPORT_SYMBOL(pfn_valid);
+
 int pfn_is_map_memory(unsigned long pfn)
 {
        phys_addr_t addr = PFN_PHYS(pfn);
index 29e946394fdb401753eae664c0de65362083d796..277d61a094637ce32b1966dac259ad9ca361d3b0 100644 (file)
@@ -26,6 +26,7 @@ config COLDFIRE
        bool "Coldfire CPU family support"
        select ARCH_HAVE_CUSTOM_GPIO_H
        select CPU_HAS_NO_BITFIELDS
+       select CPU_HAS_NO_CAS
        select CPU_HAS_NO_MULDIV64
        select GENERIC_CSUM
        select GPIOLIB
@@ -39,6 +40,7 @@ config M68000
        bool
        depends on !MMU
        select CPU_HAS_NO_BITFIELDS
+       select CPU_HAS_NO_CAS
        select CPU_HAS_NO_MULDIV64
        select CPU_HAS_NO_UNALIGNED
        select GENERIC_CSUM
@@ -54,6 +56,7 @@ config M68000
 config MCPU32
        bool
        select CPU_HAS_NO_BITFIELDS
+       select CPU_HAS_NO_CAS
        select CPU_HAS_NO_UNALIGNED
        select CPU_NO_EFFICIENT_FFS
        help
@@ -383,7 +386,7 @@ config ADVANCED
 
 config RMW_INSNS
        bool "Use read-modify-write instructions"
-       depends on ADVANCED
+       depends on ADVANCED && !CPU_HAS_NO_CAS
        help
          This allows to use certain instructions that work with indivisible
          read-modify-write bus cycles. While this is faster than the
@@ -450,6 +453,9 @@ config M68K_L2_CACHE
 config CPU_HAS_NO_BITFIELDS
        bool
 
+config CPU_HAS_NO_CAS
+       bool
+
 config CPU_HAS_NO_MULDIV64
        bool
 
index 0a2cacf7be08255ca8351113089983ee41930d8e..5f536286f5fce88daceb06d28609a64902b54730 100644 (file)
@@ -84,6 +84,7 @@ CONFIG_IPV6_ILA=m
 CONFIG_IPV6_VTI=m
 CONFIG_IPV6_GRE=m
 CONFIG_NETFILTER=y
+CONFIG_NETFILTER_NETLINK_HOOK=m
 CONFIG_NF_CONNTRACK=m
 CONFIG_NF_CONNTRACK_ZONES=y
 # CONFIG_NF_CONNTRACK_PROCFS is not set
@@ -323,7 +324,6 @@ CONFIG_CDROM_PKTCDVD=m
 CONFIG_ATA_OVER_ETH=m
 CONFIG_DUMMY_IRQ=m
 CONFIG_RAID_ATTRS=m
-CONFIG_SCSI=y
 CONFIG_BLK_DEV_SD=y
 CONFIG_CHR_DEV_ST=m
 CONFIG_BLK_DEV_SR=y
@@ -502,6 +502,7 @@ CONFIG_ROOT_NFS=y
 CONFIG_NFSD=m
 CONFIG_NFSD_V3=y
 CONFIG_CIFS=m
+# CONFIG_CIFS_STATS2 is not set
 # CONFIG_CIFS_DEBUG is not set
 CONFIG_CODA_FS=m
 CONFIG_NLS_CODEPAGE_437=y
@@ -616,6 +617,7 @@ CONFIG_PRIME_NUMBERS=m
 CONFIG_CRC32_SELFTEST=m
 CONFIG_CRC64=m
 CONFIG_XZ_DEC_TEST=m
+CONFIG_GLOB_SELFTEST=m
 CONFIG_STRING_SELFTEST=m
 # CONFIG_SECTION_MISMATCH_WARN_ONLY is not set
 CONFIG_MAGIC_SYSRQ=y
@@ -624,7 +626,6 @@ CONFIG_WW_MUTEX_SELFTEST=m
 CONFIG_EARLY_PRINTK=y
 CONFIG_KUNIT=m
 CONFIG_KUNIT_ALL_TESTS=m
-CONFIG_TEST_LIST_SORT=m
 CONFIG_TEST_MIN_HEAP=m
 CONFIG_TEST_SORT=m
 CONFIG_TEST_DIV64=m
@@ -636,6 +637,7 @@ CONFIG_TEST_STRING_HELPERS=m
 CONFIG_TEST_STRSCPY=m
 CONFIG_TEST_KSTRTOX=m
 CONFIG_TEST_PRINTF=m
+CONFIG_TEST_SCANF=m
 CONFIG_TEST_BITMAP=m
 CONFIG_TEST_UUID=m
 CONFIG_TEST_XARRAY=m
index 4dc6dcfaf28ab323458532e2e94a6987168aacfb..d9568644051adb242675ec2c5e0a7a8ab7da7516 100644 (file)
@@ -80,6 +80,7 @@ CONFIG_IPV6_ILA=m
 CONFIG_IPV6_VTI=m
 CONFIG_IPV6_GRE=m
 CONFIG_NETFILTER=y
+CONFIG_NETFILTER_NETLINK_HOOK=m
 CONFIG_NF_CONNTRACK=m
 CONFIG_NF_CONNTRACK_ZONES=y
 # CONFIG_NF_CONNTRACK_PROCFS is not set
@@ -458,6 +459,7 @@ CONFIG_ROOT_NFS=y
 CONFIG_NFSD=m
 CONFIG_NFSD_V3=y
 CONFIG_CIFS=m
+# CONFIG_CIFS_STATS2 is not set
 # CONFIG_CIFS_DEBUG is not set
 CONFIG_CODA_FS=m
 CONFIG_NLS_CODEPAGE_437=y
@@ -580,7 +582,6 @@ CONFIG_WW_MUTEX_SELFTEST=m
 CONFIG_EARLY_PRINTK=y
 CONFIG_KUNIT=m
 CONFIG_KUNIT_ALL_TESTS=m
-CONFIG_TEST_LIST_SORT=m
 CONFIG_TEST_MIN_HEAP=m
 CONFIG_TEST_SORT=m
 CONFIG_TEST_DIV64=m
@@ -592,6 +593,7 @@ CONFIG_TEST_STRING_HELPERS=m
 CONFIG_TEST_STRSCPY=m
 CONFIG_TEST_KSTRTOX=m
 CONFIG_TEST_PRINTF=m
+CONFIG_TEST_SCANF=m
 CONFIG_TEST_BITMAP=m
 CONFIG_TEST_UUID=m
 CONFIG_TEST_XARRAY=m
index 23d910a692ab7d5efb34a2059fdef9e60246a431..dbf1960c666972c780b9275f5266ed76ab66aa7b 100644 (file)
@@ -87,6 +87,7 @@ CONFIG_IPV6_ILA=m
 CONFIG_IPV6_VTI=m
 CONFIG_IPV6_GRE=m
 CONFIG_NETFILTER=y
+CONFIG_NETFILTER_NETLINK_HOOK=m
 CONFIG_NF_CONNTRACK=m
 CONFIG_NF_CONNTRACK_ZONES=y
 # CONFIG_NF_CONNTRACK_PROCFS is not set
@@ -324,7 +325,6 @@ CONFIG_CDROM_PKTCDVD=m
 CONFIG_ATA_OVER_ETH=m
 CONFIG_DUMMY_IRQ=m
 CONFIG_RAID_ATTRS=m
-CONFIG_SCSI=y
 CONFIG_BLK_DEV_SD=y
 CONFIG_CHR_DEV_ST=m
 CONFIG_BLK_DEV_SR=y
@@ -480,6 +480,7 @@ CONFIG_ROOT_NFS=y
 CONFIG_NFSD=m
 CONFIG_NFSD_V3=y
 CONFIG_CIFS=m
+# CONFIG_CIFS_STATS2 is not set
 # CONFIG_CIFS_DEBUG is not set
 CONFIG_CODA_FS=m
 CONFIG_NLS_CODEPAGE_437=y
@@ -594,6 +595,7 @@ CONFIG_PRIME_NUMBERS=m
 CONFIG_CRC32_SELFTEST=m
 CONFIG_CRC64=m
 CONFIG_XZ_DEC_TEST=m
+CONFIG_GLOB_SELFTEST=m
 CONFIG_STRING_SELFTEST=m
 # CONFIG_SECTION_MISMATCH_WARN_ONLY is not set
 CONFIG_MAGIC_SYSRQ=y
@@ -602,7 +604,6 @@ CONFIG_WW_MUTEX_SELFTEST=m
 CONFIG_EARLY_PRINTK=y
 CONFIG_KUNIT=m
 CONFIG_KUNIT_ALL_TESTS=m
-CONFIG_TEST_LIST_SORT=m
 CONFIG_TEST_MIN_HEAP=m
 CONFIG_TEST_SORT=m
 CONFIG_TEST_DIV64=m
@@ -614,6 +615,7 @@ CONFIG_TEST_STRING_HELPERS=m
 CONFIG_TEST_STRSCPY=m
 CONFIG_TEST_KSTRTOX=m
 CONFIG_TEST_PRINTF=m
+CONFIG_TEST_SCANF=m
 CONFIG_TEST_BITMAP=m
 CONFIG_TEST_UUID=m
 CONFIG_TEST_XARRAY=m
index 2c3f428338469686598f4431422b89dbf5c24d41..7620db3e33e7fabc2b5ac6067460fb9402740eda 100644 (file)
@@ -77,6 +77,7 @@ CONFIG_IPV6_ILA=m
 CONFIG_IPV6_VTI=m
 CONFIG_IPV6_GRE=m
 CONFIG_NETFILTER=y
+CONFIG_NETFILTER_NETLINK_HOOK=m
 CONFIG_NF_CONNTRACK=m
 CONFIG_NF_CONNTRACK_ZONES=y
 # CONFIG_NF_CONNTRACK_PROCFS is not set
@@ -451,6 +452,7 @@ CONFIG_ROOT_NFS=y
 CONFIG_NFSD=m
 CONFIG_NFSD_V3=y
 CONFIG_CIFS=m
+# CONFIG_CIFS_STATS2 is not set
 # CONFIG_CIFS_DEBUG is not set
 CONFIG_CODA_FS=m
 CONFIG_NLS_CODEPAGE_437=y
@@ -573,7 +575,6 @@ CONFIG_WW_MUTEX_SELFTEST=m
 CONFIG_EARLY_PRINTK=y
 CONFIG_KUNIT=m
 CONFIG_KUNIT_ALL_TESTS=m
-CONFIG_TEST_LIST_SORT=m
 CONFIG_TEST_MIN_HEAP=m
 CONFIG_TEST_SORT=m
 CONFIG_TEST_DIV64=m
@@ -585,6 +586,7 @@ CONFIG_TEST_STRING_HELPERS=m
 CONFIG_TEST_STRSCPY=m
 CONFIG_TEST_KSTRTOX=m
 CONFIG_TEST_PRINTF=m
+CONFIG_TEST_SCANF=m
 CONFIG_TEST_BITMAP=m
 CONFIG_TEST_UUID=m
 CONFIG_TEST_XARRAY=m
index 5b1898d4b249a467ebd31093625fa7f6692de5ab..113a02d47ebbfae9db962874cb63350693cba56d 100644 (file)
@@ -79,6 +79,7 @@ CONFIG_IPV6_ILA=m
 CONFIG_IPV6_VTI=m
 CONFIG_IPV6_GRE=m
 CONFIG_NETFILTER=y
+CONFIG_NETFILTER_NETLINK_HOOK=m
 CONFIG_NF_CONNTRACK=m
 CONFIG_NF_CONNTRACK_ZONES=y
 # CONFIG_NF_CONNTRACK_PROCFS is not set
@@ -460,6 +461,7 @@ CONFIG_ROOT_NFS=y
 CONFIG_NFSD=m
 CONFIG_NFSD_V3=y
 CONFIG_CIFS=m
+# CONFIG_CIFS_STATS2 is not set
 # CONFIG_CIFS_DEBUG is not set
 CONFIG_CODA_FS=m
 CONFIG_NLS_CODEPAGE_437=y
@@ -582,7 +584,6 @@ CONFIG_WW_MUTEX_SELFTEST=m
 CONFIG_EARLY_PRINTK=y
 CONFIG_KUNIT=m
 CONFIG_KUNIT_ALL_TESTS=m
-CONFIG_TEST_LIST_SORT=m
 CONFIG_TEST_MIN_HEAP=m
 CONFIG_TEST_SORT=m
 CONFIG_TEST_DIV64=m
@@ -594,6 +595,7 @@ CONFIG_TEST_STRING_HELPERS=m
 CONFIG_TEST_STRSCPY=m
 CONFIG_TEST_KSTRTOX=m
 CONFIG_TEST_PRINTF=m
+CONFIG_TEST_SCANF=m
 CONFIG_TEST_BITMAP=m
 CONFIG_TEST_UUID=m
 CONFIG_TEST_XARRAY=m
index 9606ccd8dafa895688033c3d8f81cb2707b4f462..a8e006e8da6688fcca9cf3da85941ce173d6b90c 100644 (file)
@@ -78,6 +78,7 @@ CONFIG_IPV6_ILA=m
 CONFIG_IPV6_VTI=m
 CONFIG_IPV6_GRE=m
 CONFIG_NETFILTER=y
+CONFIG_NETFILTER_NETLINK_HOOK=m
 CONFIG_NF_CONNTRACK=m
 CONFIG_NF_CONNTRACK_ZONES=y
 # CONFIG_NF_CONNTRACK_PROCFS is not set
@@ -315,7 +316,6 @@ CONFIG_CDROM_PKTCDVD=m
 CONFIG_ATA_OVER_ETH=m
 CONFIG_DUMMY_IRQ=m
 CONFIG_RAID_ATTRS=m
-CONFIG_SCSI=y
 CONFIG_BLK_DEV_SD=y
 CONFIG_CHR_DEV_ST=m
 CONFIG_BLK_DEV_SR=y
@@ -482,6 +482,7 @@ CONFIG_ROOT_NFS=y
 CONFIG_NFSD=m
 CONFIG_NFSD_V3=y
 CONFIG_CIFS=m
+# CONFIG_CIFS_STATS2 is not set
 # CONFIG_CIFS_DEBUG is not set
 CONFIG_CODA_FS=m
 CONFIG_NLS_CODEPAGE_437=y
@@ -596,6 +597,7 @@ CONFIG_PRIME_NUMBERS=m
 CONFIG_CRC32_SELFTEST=m
 CONFIG_CRC64=m
 CONFIG_XZ_DEC_TEST=m
+CONFIG_GLOB_SELFTEST=m
 CONFIG_STRING_SELFTEST=m
 # CONFIG_SECTION_MISMATCH_WARN_ONLY is not set
 CONFIG_MAGIC_SYSRQ=y
@@ -604,7 +606,6 @@ CONFIG_WW_MUTEX_SELFTEST=m
 CONFIG_EARLY_PRINTK=y
 CONFIG_KUNIT=m
 CONFIG_KUNIT_ALL_TESTS=m
-CONFIG_TEST_LIST_SORT=m
 CONFIG_TEST_MIN_HEAP=m
 CONFIG_TEST_SORT=m
 CONFIG_TEST_DIV64=m
@@ -616,6 +617,7 @@ CONFIG_TEST_STRING_HELPERS=m
 CONFIG_TEST_STRSCPY=m
 CONFIG_TEST_KSTRTOX=m
 CONFIG_TEST_PRINTF=m
+CONFIG_TEST_SCANF=m
 CONFIG_TEST_BITMAP=m
 CONFIG_TEST_UUID=m
 CONFIG_TEST_XARRAY=m
index 3175ba5007e1f5d2513f7b668f4d054ed32c7e6d..b6655907a1f3cc3f0c1f744c21d72103c8d483f8 100644 (file)
@@ -98,6 +98,7 @@ CONFIG_IPV6_ILA=m
 CONFIG_IPV6_VTI=m
 CONFIG_IPV6_GRE=m
 CONFIG_NETFILTER=y
+CONFIG_NETFILTER_NETLINK_HOOK=m
 CONFIG_NF_CONNTRACK=m
 CONFIG_NF_CONNTRACK_ZONES=y
 # CONFIG_NF_CONNTRACK_PROCFS is not set
@@ -344,7 +345,6 @@ CONFIG_CDROM_PKTCDVD=m
 CONFIG_ATA_OVER_ETH=m
 CONFIG_DUMMY_IRQ=m
 CONFIG_RAID_ATTRS=m
-CONFIG_SCSI=y
 CONFIG_BLK_DEV_SD=y
 CONFIG_CHR_DEV_ST=m
 CONFIG_BLK_DEV_SR=y
@@ -567,6 +567,7 @@ CONFIG_ROOT_NFS=y
 CONFIG_NFSD=m
 CONFIG_NFSD_V3=y
 CONFIG_CIFS=m
+# CONFIG_CIFS_STATS2 is not set
 # CONFIG_CIFS_DEBUG is not set
 CONFIG_CODA_FS=m
 CONFIG_NLS_CODEPAGE_437=y
@@ -681,6 +682,7 @@ CONFIG_PRIME_NUMBERS=m
 CONFIG_CRC32_SELFTEST=m
 CONFIG_CRC64=m
 CONFIG_XZ_DEC_TEST=m
+CONFIG_GLOB_SELFTEST=m
 CONFIG_STRING_SELFTEST=m
 # CONFIG_SECTION_MISMATCH_WARN_ONLY is not set
 CONFIG_MAGIC_SYSRQ=y
@@ -689,7 +691,6 @@ CONFIG_WW_MUTEX_SELFTEST=m
 CONFIG_EARLY_PRINTK=y
 CONFIG_KUNIT=m
 CONFIG_KUNIT_ALL_TESTS=m
-CONFIG_TEST_LIST_SORT=m
 CONFIG_TEST_MIN_HEAP=m
 CONFIG_TEST_SORT=m
 CONFIG_TEST_DIV64=m
@@ -701,6 +702,7 @@ CONFIG_TEST_STRING_HELPERS=m
 CONFIG_TEST_STRSCPY=m
 CONFIG_TEST_KSTRTOX=m
 CONFIG_TEST_PRINTF=m
+CONFIG_TEST_SCANF=m
 CONFIG_TEST_BITMAP=m
 CONFIG_TEST_UUID=m
 CONFIG_TEST_XARRAY=m
index 793085f00c99f551a5b6b584d785ebd605cc7f40..563ba47db8c68dd0b10a91230f4cb874aa2b0a19 100644 (file)
@@ -76,6 +76,7 @@ CONFIG_IPV6_ILA=m
 CONFIG_IPV6_VTI=m
 CONFIG_IPV6_GRE=m
 CONFIG_NETFILTER=y
+CONFIG_NETFILTER_NETLINK_HOOK=m
 CONFIG_NF_CONNTRACK=m
 CONFIG_NF_CONNTRACK_ZONES=y
 # CONFIG_NF_CONNTRACK_PROCFS is not set
@@ -450,6 +451,7 @@ CONFIG_ROOT_NFS=y
 CONFIG_NFSD=m
 CONFIG_NFSD_V3=y
 CONFIG_CIFS=m
+# CONFIG_CIFS_STATS2 is not set
 # CONFIG_CIFS_DEBUG is not set
 CONFIG_CODA_FS=m
 CONFIG_NLS_CODEPAGE_437=y
@@ -572,7 +574,6 @@ CONFIG_WW_MUTEX_SELFTEST=m
 CONFIG_EARLY_PRINTK=y
 CONFIG_KUNIT=m
 CONFIG_KUNIT_ALL_TESTS=m
-CONFIG_TEST_LIST_SORT=m
 CONFIG_TEST_MIN_HEAP=m
 CONFIG_TEST_SORT=m
 CONFIG_TEST_DIV64=m
@@ -584,6 +585,7 @@ CONFIG_TEST_STRING_HELPERS=m
 CONFIG_TEST_STRSCPY=m
 CONFIG_TEST_KSTRTOX=m
 CONFIG_TEST_PRINTF=m
+CONFIG_TEST_SCANF=m
 CONFIG_TEST_BITMAP=m
 CONFIG_TEST_UUID=m
 CONFIG_TEST_XARRAY=m
index 56fbac7943b2ee8c6ad10ba673178b78e8d2817d..9f1b44de4706e1b09cfd8b4815da0721ab071097 100644 (file)
@@ -77,6 +77,7 @@ CONFIG_IPV6_ILA=m
 CONFIG_IPV6_VTI=m
 CONFIG_IPV6_GRE=m
 CONFIG_NETFILTER=y
+CONFIG_NETFILTER_NETLINK_HOOK=m
 CONFIG_NF_CONNTRACK=m
 CONFIG_NF_CONNTRACK_ZONES=y
 # CONFIG_NF_CONNTRACK_PROCFS is not set
@@ -451,6 +452,7 @@ CONFIG_ROOT_NFS=y
 CONFIG_NFSD=m
 CONFIG_NFSD_V3=y
 CONFIG_CIFS=m
+# CONFIG_CIFS_STATS2 is not set
 # CONFIG_CIFS_DEBUG is not set
 CONFIG_CODA_FS=m
 CONFIG_NLS_CODEPAGE_437=y
@@ -573,7 +575,6 @@ CONFIG_WW_MUTEX_SELFTEST=m
 CONFIG_EARLY_PRINTK=y
 CONFIG_KUNIT=m
 CONFIG_KUNIT_ALL_TESTS=m
-CONFIG_TEST_LIST_SORT=m
 CONFIG_TEST_MIN_HEAP=m
 CONFIG_TEST_SORT=m
 CONFIG_TEST_DIV64=m
@@ -585,6 +586,7 @@ CONFIG_TEST_STRING_HELPERS=m
 CONFIG_TEST_STRSCPY=m
 CONFIG_TEST_KSTRTOX=m
 CONFIG_TEST_PRINTF=m
+CONFIG_TEST_SCANF=m
 CONFIG_TEST_BITMAP=m
 CONFIG_TEST_UUID=m
 CONFIG_TEST_XARRAY=m
index 0e15431b65e2ac44a549ade1fb7fc62de096c8a8..1993433d08406eeb792ecb36e440b87651f062f6 100644 (file)
@@ -78,6 +78,7 @@ CONFIG_IPV6_ILA=m
 CONFIG_IPV6_VTI=m
 CONFIG_IPV6_GRE=m
 CONFIG_NETFILTER=y
+CONFIG_NETFILTER_NETLINK_HOOK=m
 CONFIG_NF_CONNTRACK=m
 CONFIG_NF_CONNTRACK_ZONES=y
 # CONFIG_NF_CONNTRACK_PROCFS is not set
@@ -314,7 +315,6 @@ CONFIG_CDROM_PKTCDVD=m
 CONFIG_ATA_OVER_ETH=m
 CONFIG_DUMMY_IRQ=m
 CONFIG_RAID_ATTRS=m
-CONFIG_SCSI=y
 CONFIG_BLK_DEV_SD=y
 CONFIG_CHR_DEV_ST=m
 CONFIG_BLK_DEV_SR=y
@@ -469,6 +469,7 @@ CONFIG_ROOT_NFS=y
 CONFIG_NFSD=m
 CONFIG_NFSD_V3=y
 CONFIG_CIFS=m
+# CONFIG_CIFS_STATS2 is not set
 # CONFIG_CIFS_DEBUG is not set
 CONFIG_CODA_FS=m
 CONFIG_NLS_CODEPAGE_437=y
@@ -583,6 +584,7 @@ CONFIG_PRIME_NUMBERS=m
 CONFIG_CRC32_SELFTEST=m
 CONFIG_CRC64=m
 CONFIG_XZ_DEC_TEST=m
+CONFIG_GLOB_SELFTEST=m
 CONFIG_STRING_SELFTEST=m
 # CONFIG_SECTION_MISMATCH_WARN_ONLY is not set
 CONFIG_MAGIC_SYSRQ=y
@@ -591,7 +593,6 @@ CONFIG_WW_MUTEX_SELFTEST=m
 CONFIG_EARLY_PRINTK=y
 CONFIG_KUNIT=m
 CONFIG_KUNIT_ALL_TESTS=m
-CONFIG_TEST_LIST_SORT=m
 CONFIG_TEST_MIN_HEAP=m
 CONFIG_TEST_SORT=m
 CONFIG_TEST_DIV64=m
@@ -603,6 +604,7 @@ CONFIG_TEST_STRING_HELPERS=m
 CONFIG_TEST_STRSCPY=m
 CONFIG_TEST_KSTRTOX=m
 CONFIG_TEST_PRINTF=m
+CONFIG_TEST_SCANF=m
 CONFIG_TEST_BITMAP=m
 CONFIG_TEST_UUID=m
 CONFIG_TEST_XARRAY=m
index 3490a05f29b82d404b67501dc0c496d16c8349aa..56dbc63cef5bc48b8215a5299b588c211316ce3a 100644 (file)
@@ -74,6 +74,7 @@ CONFIG_IPV6_ILA=m
 CONFIG_IPV6_VTI=m
 CONFIG_IPV6_GRE=m
 CONFIG_NETFILTER=y
+CONFIG_NETFILTER_NETLINK_HOOK=m
 CONFIG_NF_CONNTRACK=m
 CONFIG_NF_CONNTRACK_ZONES=y
 # CONFIG_NF_CONNTRACK_PROCFS is not set
@@ -453,6 +454,7 @@ CONFIG_ROOT_NFS=y
 CONFIG_NFSD=m
 CONFIG_NFSD_V3=y
 CONFIG_CIFS=m
+# CONFIG_CIFS_STATS2 is not set
 # CONFIG_CIFS_DEBUG is not set
 CONFIG_CODA_FS=m
 CONFIG_NLS_CODEPAGE_437=y
@@ -574,7 +576,6 @@ CONFIG_TEST_LOCKUP=m
 CONFIG_WW_MUTEX_SELFTEST=m
 CONFIG_KUNIT=m
 CONFIG_KUNIT_ALL_TESTS=m
-CONFIG_TEST_LIST_SORT=m
 CONFIG_TEST_MIN_HEAP=m
 CONFIG_TEST_SORT=m
 CONFIG_TEST_DIV64=m
@@ -586,6 +587,7 @@ CONFIG_TEST_STRING_HELPERS=m
 CONFIG_TEST_STRSCPY=m
 CONFIG_TEST_KSTRTOX=m
 CONFIG_TEST_PRINTF=m
+CONFIG_TEST_SCANF=m
 CONFIG_TEST_BITMAP=m
 CONFIG_TEST_UUID=m
 CONFIG_TEST_XARRAY=m
index 4e92c8c332fc5f979e140facc08b9814270abd5e..6bd1bba81ac328760ff1439fee41ebca0cfef78e 100644 (file)
@@ -74,6 +74,7 @@ CONFIG_IPV6_ILA=m
 CONFIG_IPV6_VTI=m
 CONFIG_IPV6_GRE=m
 CONFIG_NETFILTER=y
+CONFIG_NETFILTER_NETLINK_HOOK=m
 CONFIG_NF_CONNTRACK=m
 CONFIG_NF_CONNTRACK_ZONES=y
 # CONFIG_NF_CONNTRACK_PROCFS is not set
@@ -452,6 +453,7 @@ CONFIG_ROOT_NFS=y
 CONFIG_NFSD=m
 CONFIG_NFSD_V3=y
 CONFIG_CIFS=m
+# CONFIG_CIFS_STATS2 is not set
 # CONFIG_CIFS_DEBUG is not set
 CONFIG_CODA_FS=m
 CONFIG_NLS_CODEPAGE_437=y
@@ -574,7 +576,6 @@ CONFIG_WW_MUTEX_SELFTEST=m
 CONFIG_EARLY_PRINTK=y
 CONFIG_KUNIT=m
 CONFIG_KUNIT_ALL_TESTS=m
-CONFIG_TEST_LIST_SORT=m
 CONFIG_TEST_MIN_HEAP=m
 CONFIG_TEST_SORT=m
 CONFIG_TEST_DIV64=m
@@ -586,6 +587,7 @@ CONFIG_TEST_STRING_HELPERS=m
 CONFIG_TEST_STRSCPY=m
 CONFIG_TEST_KSTRTOX=m
 CONFIG_TEST_PRINTF=m
+CONFIG_TEST_SCANF=m
 CONFIG_TEST_BITMAP=m
 CONFIG_TEST_UUID=m
 CONFIG_TEST_XARRAY=m
index d2875e32abfca96f9f043d366b568e2061a5fb99..79e55421cfb18a8499f4f547df3d0cd924355984 100644 (file)
@@ -254,8 +254,8 @@ static void __exit nfeth_cleanup(void)
 
        for (i = 0; i < MAX_UNIT; i++) {
                if (nfeth_dev[i]) {
-                       unregister_netdev(nfeth_dev[0]);
-                       free_netdev(nfeth_dev[0]);
+                       unregister_netdev(nfeth_dev[i]);
+                       free_netdev(nfeth_dev[i]);
                }
        }
        free_irq(nfEtherIRQ, nfeth_interrupt);
index 8637bf8a2f6520094420bc98fcc886671369c438..cfba83d230fdec37b34685e914be435e0eafec5d 100644 (file)
@@ -48,7 +48,7 @@ static inline int arch_atomic_##op##_return(int i, atomic_t *v)               \
                        "       casl %2,%1,%0\n"                        \
                        "       jne 1b"                                 \
                        : "+m" (*v), "=&d" (t), "=&d" (tmp)             \
-                       : "g" (i), "2" (arch_atomic_read(v)));          \
+                       : "di" (i), "2" (arch_atomic_read(v)));         \
        return t;                                                       \
 }
 
@@ -63,7 +63,7 @@ static inline int arch_atomic_fetch_##op(int i, atomic_t *v)          \
                        "       casl %2,%1,%0\n"                        \
                        "       jne 1b"                                 \
                        : "+m" (*v), "=&d" (t), "=&d" (tmp)             \
-                       : "g" (i), "2" (arch_atomic_read(v)));          \
+                       : "di" (i), "2" (arch_atomic_read(v)));         \
        return tmp;                                                     \
 }
 
index 4a0c9dbd62fd0cf56744490d069697f7062c9aa7..f6e1132f4e352d40ab5c5454f3be06aa016b6211 100644 (file)
@@ -8,19 +8,4 @@ extern void * memset(void *, int, size_t);
 #define __HAVE_ARCH_MEMCPY
 void * memcpy(void * dest,const void *src,size_t count);
 
-#define __HAVE_ARCH_STRLEN
-extern size_t strlen(const char *s);
-
-#define __HAVE_ARCH_STRCPY
-extern char *strcpy(char *dest, const char *src);
-
-#define __HAVE_ARCH_STRNCPY
-extern char *strncpy(char *dest, const char *src, size_t count);
-
-#define __HAVE_ARCH_STRCAT
-extern char *strcat(char *dest, const char *src);
-
-#define __HAVE_ARCH_MEMSET
-extern void *memset(void *, int, size_t);
-
 #endif
index 8ed409ecec9335e186a5d2684e3e10ad79facba4..e8a6a751dfd8eec60f337035d36feef0afe0007f 100644 (file)
 
 #include <linux/string.h>
 EXPORT_SYMBOL(memset);
-EXPORT_SYMBOL(strlen);
-EXPORT_SYMBOL(strcpy);
-EXPORT_SYMBOL(strncpy);
-EXPORT_SYMBOL(strcat);
 
 #include <linux/atomic.h>
 EXPORT_SYMBOL(__xchg8);
index 2d7a9974dbaef3cdd84be3daf739589f18f8376a..7b197667faf6cda53fa8ccafdd0e3ebddf3c22f3 100644 (file)
@@ -3,7 +3,7 @@
 # Makefile for parisc-specific library files
 #
 
-lib-y  := lusercopy.o bitops.o checksum.o io.o memcpy.o \
-          ucmpdi2.o delay.o string.o
+lib-y  := lusercopy.o bitops.o checksum.o io.o memset.o memcpy.o \
+          ucmpdi2.o delay.o
 
 obj-y  := iomap.o
diff --git a/arch/parisc/lib/memset.c b/arch/parisc/lib/memset.c
new file mode 100644 (file)
index 0000000..133e480
--- /dev/null
@@ -0,0 +1,72 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+#include <linux/types.h>
+#include <asm/string.h>
+
+#define OPSIZ (BITS_PER_LONG/8)
+typedef unsigned long op_t;
+
+void *
+memset (void *dstpp, int sc, size_t len)
+{
+  unsigned int c = sc;
+  long int dstp = (long int) dstpp;
+
+  if (len >= 8)
+    {
+      size_t xlen;
+      op_t cccc;
+
+      cccc = (unsigned char) c;
+      cccc |= cccc << 8;
+      cccc |= cccc << 16;
+      if (OPSIZ > 4)
+       /* Do the shift in two steps to avoid warning if long has 32 bits.  */
+       cccc |= (cccc << 16) << 16;
+
+      /* There are at least some bytes to set.
+        No need to test for LEN == 0 in this alignment loop.  */
+      while (dstp % OPSIZ != 0)
+       {
+         ((unsigned char *) dstp)[0] = c;
+         dstp += 1;
+         len -= 1;
+       }
+
+      /* Write 8 `op_t' per iteration until less than 8 `op_t' remain.  */
+      xlen = len / (OPSIZ * 8);
+      while (xlen > 0)
+       {
+         ((op_t *) dstp)[0] = cccc;
+         ((op_t *) dstp)[1] = cccc;
+         ((op_t *) dstp)[2] = cccc;
+         ((op_t *) dstp)[3] = cccc;
+         ((op_t *) dstp)[4] = cccc;
+         ((op_t *) dstp)[5] = cccc;
+         ((op_t *) dstp)[6] = cccc;
+         ((op_t *) dstp)[7] = cccc;
+         dstp += 8 * OPSIZ;
+         xlen -= 1;
+       }
+      len %= OPSIZ * 8;
+
+      /* Write 1 `op_t' per iteration until less than OPSIZ bytes remain.  */
+      xlen = len / OPSIZ;
+      while (xlen > 0)
+       {
+         ((op_t *) dstp)[0] = cccc;
+         dstp += OPSIZ;
+         xlen -= 1;
+       }
+      len %= OPSIZ;
+    }
+
+  /* Write the last few bytes.  */
+  while (len > 0)
+    {
+      ((unsigned char *) dstp)[0] = c;
+      dstp += 1;
+      len -= 1;
+    }
+
+  return dstpp;
+}
diff --git a/arch/parisc/lib/string.S b/arch/parisc/lib/string.S
deleted file mode 100644 (file)
index 4a64264..0000000
+++ /dev/null
@@ -1,136 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- *    PA-RISC assembly string functions
- *
- *    Copyright (C) 2019 Helge Deller <deller@gmx.de>
- */
-
-#include <asm/assembly.h>
-#include <linux/linkage.h>
-
-       .section .text.hot
-       .level PA_ASM_LEVEL
-
-       t0 = r20
-       t1 = r21
-       t2 = r22
-
-ENTRY_CFI(strlen, frame=0,no_calls)
-       or,COND(<>) arg0,r0,ret0
-       b,l,n   .Lstrlen_null_ptr,r0
-       depwi   0,31,2,ret0
-       cmpb,COND(<>) arg0,ret0,.Lstrlen_not_aligned
-       ldw,ma  4(ret0),t0
-       cmpib,tr 0,r0,.Lstrlen_loop
-       uxor,nbz r0,t0,r0
-.Lstrlen_not_aligned:
-       uaddcm  arg0,ret0,t1
-       shladd  t1,3,r0,t1
-       mtsar   t1
-       depwi   -1,%sar,32,t0
-       uxor,nbz r0,t0,r0
-.Lstrlen_loop:
-       b,l,n   .Lstrlen_end_loop,r0
-       ldw,ma  4(ret0),t0
-       cmpib,tr 0,r0,.Lstrlen_loop
-       uxor,nbz r0,t0,r0
-.Lstrlen_end_loop:
-       extrw,u,<> t0,7,8,r0
-       addib,tr,n -3,ret0,.Lstrlen_out
-       extrw,u,<> t0,15,8,r0
-       addib,tr,n -2,ret0,.Lstrlen_out
-       extrw,u,<> t0,23,8,r0
-       addi    -1,ret0,ret0
-.Lstrlen_out:
-       bv r0(rp)
-       uaddcm ret0,arg0,ret0
-.Lstrlen_null_ptr:
-       bv,n r0(rp)
-ENDPROC_CFI(strlen)
-
-
-ENTRY_CFI(strcpy, frame=0,no_calls)
-       ldb     0(arg1),t0
-       stb     t0,0(arg0)
-       ldo     0(arg0),ret0
-       ldo     1(arg1),t1
-       cmpb,=  r0,t0,2f
-       ldo     1(arg0),t2
-1:     ldb     0(t1),arg1
-       stb     arg1,0(t2)
-       ldo     1(t1),t1
-       cmpb,<> r0,arg1,1b
-       ldo     1(t2),t2
-2:     bv,n    r0(rp)
-ENDPROC_CFI(strcpy)
-
-
-ENTRY_CFI(strncpy, frame=0,no_calls)
-       ldb     0(arg1),t0
-       stb     t0,0(arg0)
-       ldo     1(arg1),t1
-       ldo     0(arg0),ret0
-       cmpb,=  r0,t0,2f
-       ldo     1(arg0),arg1
-1:     ldo     -1(arg2),arg2
-       cmpb,COND(=),n r0,arg2,2f
-       ldb     0(t1),arg0
-       stb     arg0,0(arg1)
-       ldo     1(t1),t1
-       cmpb,<> r0,arg0,1b
-       ldo     1(arg1),arg1
-2:     bv,n    r0(rp)
-ENDPROC_CFI(strncpy)
-
-
-ENTRY_CFI(strcat, frame=0,no_calls)
-       ldb     0(arg0),t0
-       cmpb,=  t0,r0,2f
-       ldo     0(arg0),ret0
-       ldo     1(arg0),arg0
-1:     ldb     0(arg0),t1
-       cmpb,<>,n r0,t1,1b
-       ldo     1(arg0),arg0
-2:     ldb     0(arg1),t2
-       stb     t2,0(arg0)
-       ldo     1(arg0),arg0
-       ldb     0(arg1),t0
-       cmpb,<> r0,t0,2b
-       ldo     1(arg1),arg1
-       bv,n    r0(rp)
-ENDPROC_CFI(strcat)
-
-
-ENTRY_CFI(memset, frame=0,no_calls)
-       copy    arg0,ret0
-       cmpb,COND(=) r0,arg0,4f
-       copy    arg0,t2
-       cmpb,COND(=) r0,arg2,4f
-       ldo     -1(arg2),arg3
-       subi    -1,arg3,t0
-       subi    0,t0,t1
-       cmpiclr,COND(>=) 0,t1,arg2
-       ldo     -1(t1),arg2
-       extru arg2,31,2,arg0
-2:     stb     arg1,0(t2)
-       ldo     1(t2),t2
-       addib,>= -1,arg0,2b
-       ldo     -1(arg3),arg3
-       cmpiclr,COND(<=) 4,arg2,r0
-       b,l,n   4f,r0
-#ifdef CONFIG_64BIT
-       depd,*  r0,63,2,arg2
-#else
-       depw    r0,31,2,arg2
-#endif
-       ldo     1(t2),t2
-3:     stb     arg1,-1(t2)
-       stb     arg1,0(t2)
-       stb     arg1,1(t2)
-       stb     arg1,2(t2)
-       addib,COND(>) -4,arg2,3b
-       ldo     4(t2),t2
-4:     bv,n    r0(rp)
-ENDPROC_CFI(memset)
-
-       .end
index 64201125a287bd22a5ff3fbbee9b23309f00a52e..d4b145b279f6c51bc2d644720af92db03e488350 100644 (file)
@@ -4,6 +4,8 @@
 
 #include <asm/bug.h>
 #include <asm/book3s/32/mmu-hash.h>
+#include <asm/mmu.h>
+#include <asm/synch.h>
 
 #ifndef __ASSEMBLY__
 
@@ -28,6 +30,15 @@ static inline void kuep_lock(void)
                return;
 
        update_user_segments(mfsr(0) | SR_NX);
+       /*
+        * This isync() shouldn't be necessary as the kernel is not excepted to
+        * run any instruction in userspace soon after the update of segments,
+        * but hash based cores (at least G3) seem to exhibit a random
+        * behaviour when the 'isync' is not there. 603 cores don't have this
+        * behaviour so don't do the 'isync' as it saves several CPU cycles.
+        */
+       if (mmu_has_feature(MMU_FTR_HPTE_TABLE))
+               isync();        /* Context sync required after mtsr() */
 }
 
 static inline void kuep_unlock(void)
@@ -36,6 +47,15 @@ static inline void kuep_unlock(void)
                return;
 
        update_user_segments(mfsr(0) & ~SR_NX);
+       /*
+        * This isync() shouldn't be necessary as a 'rfi' will soon be executed
+        * to return to userspace, but hash based cores (at least G3) seem to
+        * exhibit a random behaviour when the 'isync' is not there. 603 cores
+        * don't have this behaviour so don't do the 'isync' as it saves several
+        * CPU cycles.
+        */
+       if (mmu_has_feature(MMU_FTR_HPTE_TABLE))
+               isync();        /* Context sync required after mtsr() */
 }
 
 #ifdef CONFIG_PPC_KUAP
index d4bdf7d274ac1492e703370ad20b64c6ce7a8a2d..6b800d3e2681f6082a163b24d63a2be06f2f275d 100644 (file)
@@ -583,6 +583,9 @@ DECLARE_INTERRUPT_HANDLER_NMI(hmi_exception_realmode);
 
 DECLARE_INTERRUPT_HANDLER_ASYNC(TAUException);
 
+/* irq.c */
+DECLARE_INTERRUPT_HANDLER_ASYNC(do_IRQ);
+
 void __noreturn unrecoverable_exception(struct pt_regs *regs);
 
 void replay_system_reset(void);
index 4982f3711fc3f75919a03472b6e4891ffa49ac57..2b3278534bc1495c20a5c041fb4f54554c625a5b 100644 (file)
@@ -52,7 +52,7 @@ extern void *mcheckirq_ctx[NR_CPUS];
 extern void *hardirq_ctx[NR_CPUS];
 extern void *softirq_ctx[NR_CPUS];
 
-extern void do_IRQ(struct pt_regs *regs);
+void __do_IRQ(struct pt_regs *regs);
 extern void __init init_IRQ(void);
 extern void __do_irq(struct pt_regs *regs);
 
index 3e5d470a6155a7226e456fa7bed81c23cb6a892e..14422e85149461ac3b8a55fae85dce7ce280c6e1 100644 (file)
@@ -70,6 +70,22 @@ struct pt_regs
                unsigned long __pad[4]; /* Maintain 16 byte interrupt stack alignment */
        };
 #endif
+#if defined(CONFIG_PPC32) && defined(CONFIG_BOOKE)
+       struct { /* Must be a multiple of 16 bytes */
+               unsigned long mas0;
+               unsigned long mas1;
+               unsigned long mas2;
+               unsigned long mas3;
+               unsigned long mas6;
+               unsigned long mas7;
+               unsigned long srr0;
+               unsigned long srr1;
+               unsigned long csrr0;
+               unsigned long csrr1;
+               unsigned long dsrr0;
+               unsigned long dsrr1;
+       };
+#endif
 };
 #endif
 
index a47eefa09bcb2fb56b65263885bb59647cf9639c..5bee245d832b10f50718fa0e5a9e4fb2a81a7fd0 100644 (file)
@@ -309,24 +309,21 @@ int main(void)
        STACK_PT_REGS_OFFSET(STACK_REGS_IAMR, iamr);
 #endif
 
-#if defined(CONFIG_PPC32)
-#if defined(CONFIG_BOOKE) || defined(CONFIG_40x)
-       DEFINE(EXC_LVL_SIZE, STACK_EXC_LVL_FRAME_SIZE);
-       DEFINE(MAS0, STACK_INT_FRAME_SIZE+offsetof(struct exception_regs, mas0));
+#if defined(CONFIG_PPC32) && defined(CONFIG_BOOKE)
+       STACK_PT_REGS_OFFSET(MAS0, mas0);
        /* we overload MMUCR for 44x on MAS0 since they are mutually exclusive */
-       DEFINE(MMUCR, STACK_INT_FRAME_SIZE+offsetof(struct exception_regs, mas0));
-       DEFINE(MAS1, STACK_INT_FRAME_SIZE+offsetof(struct exception_regs, mas1));
-       DEFINE(MAS2, STACK_INT_FRAME_SIZE+offsetof(struct exception_regs, mas2));
-       DEFINE(MAS3, STACK_INT_FRAME_SIZE+offsetof(struct exception_regs, mas3));
-       DEFINE(MAS6, STACK_INT_FRAME_SIZE+offsetof(struct exception_regs, mas6));
-       DEFINE(MAS7, STACK_INT_FRAME_SIZE+offsetof(struct exception_regs, mas7));
-       DEFINE(_SRR0, STACK_INT_FRAME_SIZE+offsetof(struct exception_regs, srr0));
-       DEFINE(_SRR1, STACK_INT_FRAME_SIZE+offsetof(struct exception_regs, srr1));
-       DEFINE(_CSRR0, STACK_INT_FRAME_SIZE+offsetof(struct exception_regs, csrr0));
-       DEFINE(_CSRR1, STACK_INT_FRAME_SIZE+offsetof(struct exception_regs, csrr1));
-       DEFINE(_DSRR0, STACK_INT_FRAME_SIZE+offsetof(struct exception_regs, dsrr0));
-       DEFINE(_DSRR1, STACK_INT_FRAME_SIZE+offsetof(struct exception_regs, dsrr1));
-#endif
+       STACK_PT_REGS_OFFSET(MMUCR, mas0);
+       STACK_PT_REGS_OFFSET(MAS1, mas1);
+       STACK_PT_REGS_OFFSET(MAS2, mas2);
+       STACK_PT_REGS_OFFSET(MAS3, mas3);
+       STACK_PT_REGS_OFFSET(MAS6, mas6);
+       STACK_PT_REGS_OFFSET(MAS7, mas7);
+       STACK_PT_REGS_OFFSET(_SRR0, srr0);
+       STACK_PT_REGS_OFFSET(_SRR1, srr1);
+       STACK_PT_REGS_OFFSET(_CSRR0, csrr0);
+       STACK_PT_REGS_OFFSET(_CSRR1, csrr1);
+       STACK_PT_REGS_OFFSET(_DSRR0, dsrr0);
+       STACK_PT_REGS_OFFSET(_DSRR1, dsrr1);
 #endif
 
        /* About the CPU features table */
index 4aec59a77d4c540add7c3d923091fa23ab5ee4c2..37859e62a8dcba2af7ecebc5f158127121844063 100644 (file)
@@ -812,7 +812,6 @@ __start_interrupts:
  * syscall register convention is in Documentation/powerpc/syscall64-abi.rst
  */
 EXC_VIRT_BEGIN(system_call_vectored, 0x3000, 0x1000)
-1:
        /* SCV 0 */
        mr      r9,r13
        GET_PACA(r13)
@@ -842,10 +841,12 @@ EXC_VIRT_BEGIN(system_call_vectored, 0x3000, 0x1000)
        b       system_call_vectored_sigill
 #endif
        .endr
-2:
 EXC_VIRT_END(system_call_vectored, 0x3000, 0x1000)
 
-SOFT_MASK_TABLE(1b, 2b) // Treat scv vectors as soft-masked, see comment above.
+// Treat scv vectors as soft-masked, see comment above.
+// Use absolute values rather than labels here, so they don't get relocated,
+// because this code runs unrelocated.
+SOFT_MASK_TABLE(0xc000000000003000, 0xc000000000004000)
 
 #ifdef CONFIG_RELOCATABLE
 TRAMP_VIRT_BEGIN(system_call_vectored_tramp)
index 764edd860ed46cc8f8a5307f36ae804e999ec082..68e5c0a7e99d178a4f65775a7047c6651bbc2d14 100644 (file)
@@ -300,7 +300,7 @@ ALT_MMU_FTR_SECTION_END_IFSET(MMU_FTR_HPTE_TABLE)
        EXCEPTION_PROLOG_1
        EXCEPTION_PROLOG_2 INTERRUPT_DATA_STORAGE DataAccess handle_dar_dsisr=1
        prepare_transfer_to_handler
-       lwz     r5, _DSISR(r11)
+       lwz     r5, _DSISR(r1)
        andis.  r0, r5, DSISR_DABRMATCH@h
        bne-    1f
        bl      do_page_fault
index 87b806e8eded959e1c235868c86be76645cfbc42..e5503420b6c6da576ec6ce7fa994bb6ffc4c5b87 100644 (file)
@@ -168,20 +168,18 @@ ALT_FTR_SECTION_END_IFSET(CPU_FTR_EMB_HV)
 /* only on e500mc */
 #define DBG_STACK_BASE         dbgirq_ctx
 
-#define EXC_LVL_FRAME_OVERHEAD (THREAD_SIZE - INT_FRAME_SIZE - EXC_LVL_SIZE)
-
 #ifdef CONFIG_SMP
 #define BOOKE_LOAD_EXC_LEVEL_STACK(level)              \
        mfspr   r8,SPRN_PIR;                            \
        slwi    r8,r8,2;                                \
        addis   r8,r8,level##_STACK_BASE@ha;            \
        lwz     r8,level##_STACK_BASE@l(r8);            \
-       addi    r8,r8,EXC_LVL_FRAME_OVERHEAD;
+       addi    r8,r8,THREAD_SIZE - INT_FRAME_SIZE;
 #else
 #define BOOKE_LOAD_EXC_LEVEL_STACK(level)              \
        lis     r8,level##_STACK_BASE@ha;               \
        lwz     r8,level##_STACK_BASE@l(r8);            \
-       addi    r8,r8,EXC_LVL_FRAME_OVERHEAD;
+       addi    r8,r8,THREAD_SIZE - INT_FRAME_SIZE;
 #endif
 
 /*
@@ -208,7 +206,7 @@ ALT_FTR_SECTION_END_IFSET(CPU_FTR_EMB_HV)
        mtmsr   r11;                                                    \
        mfspr   r11,SPRN_SPRG_THREAD;   /* if from user, start at top of   */\
        lwz     r11, TASK_STACK - THREAD(r11); /* this thread's kernel stack */\
-       addi    r11,r11,EXC_LVL_FRAME_OVERHEAD; /* allocate stack frame    */\
+       addi    r11,r11,THREAD_SIZE - INT_FRAME_SIZE;   /* allocate stack frame    */\
        beq     1f;                                                          \
        /* COMING FROM USER MODE */                                          \
        stw     r9,_CCR(r11);           /* save CR                         */\
@@ -516,24 +514,5 @@ label:
        bl      kernel_fp_unavailable_exception;                              \
        b       interrupt_return
 
-#else /* __ASSEMBLY__ */
-struct exception_regs {
-       unsigned long mas0;
-       unsigned long mas1;
-       unsigned long mas2;
-       unsigned long mas3;
-       unsigned long mas6;
-       unsigned long mas7;
-       unsigned long srr0;
-       unsigned long srr1;
-       unsigned long csrr0;
-       unsigned long csrr1;
-       unsigned long dsrr0;
-       unsigned long dsrr1;
-};
-
-/* ensure this structure is always sized to a multiple of the stack alignment */
-#define STACK_EXC_LVL_FRAME_SIZE       ALIGN(sizeof (struct exception_regs), 16)
-
 #endif /* __ASSEMBLY__ */
 #endif /* __HEAD_BOOKE_H__ */
index 91e63eac4e8fac47e6cf6cad85e93c9a53d016d6..551b653228c47fff328a35a0cc24eba8973899df 100644 (file)
@@ -750,7 +750,7 @@ void __do_irq(struct pt_regs *regs)
        trace_irq_exit(regs);
 }
 
-DEFINE_INTERRUPT_HANDLER_ASYNC(do_IRQ)
+void __do_IRQ(struct pt_regs *regs)
 {
        struct pt_regs *old_regs = set_irq_regs(regs);
        void *cursp, *irqsp, *sirqsp;
@@ -774,6 +774,11 @@ DEFINE_INTERRUPT_HANDLER_ASYNC(do_IRQ)
        set_irq_regs(old_regs);
 }
 
+DEFINE_INTERRUPT_HANDLER_ASYNC(do_IRQ)
+{
+       __do_IRQ(regs);
+}
+
 static void *__init alloc_vm_stack(void)
 {
        return __vmalloc_node(THREAD_SIZE, THREAD_ALIGN, THREADINFO_GFP,
index cbc28d1a2e1b1a767955ee3f0f77196aa50bee73..7a7cd6bda53ead1c783f778bde2bc7cbb502a4ad 100644 (file)
@@ -292,7 +292,8 @@ int kprobe_handler(struct pt_regs *regs)
        if (user_mode(regs))
                return 0;
 
-       if (!(regs->msr & MSR_IR) || !(regs->msr & MSR_DR))
+       if (!IS_ENABLED(CONFIG_BOOKE) &&
+           (!(regs->msr & MSR_IR) || !(regs->msr & MSR_DR)))
                return 0;
 
        /*
index 5ff0e55d0db1008d3f629b798c89c3d5bcc184de..defecb3b1b15c95b958b6d29b6da6ec982912196 100644 (file)
@@ -1167,7 +1167,7 @@ static int __init topology_init(void)
                 * CPU.  For instance, the boot cpu might never be valid
                 * for hotplugging.
                 */
-               if (smp_ops->cpu_offline_self)
+               if (smp_ops && smp_ops->cpu_offline_self)
                        c->hotpluggable = 1;
 #endif
 
index e45ce427bffb1f51ab52c801cf51184378a3e1ef..c487ba5a6e11c386f4e7fc755e679bf1776b47d1 100644 (file)
@@ -586,7 +586,7 @@ DEFINE_INTERRUPT_HANDLER_ASYNC(timer_interrupt)
 
 #if defined(CONFIG_PPC32) && defined(CONFIG_PPC_PMAC)
        if (atomic_read(&ppc_n_lost_interrupts) != 0)
-               do_IRQ(regs);
+               __do_IRQ(regs);
 #endif
 
        old_regs = set_irq_regs(regs);
index dfbce527c98ed2632252a2e7b453c6d006109ca5..d56254f05e174be819e9b746260481c55ec8f1ef 100644 (file)
@@ -1104,7 +1104,7 @@ DEFINE_INTERRUPT_HANDLER(RunModeException)
        _exception(SIGTRAP, regs, TRAP_UNK, 0);
 }
 
-DEFINE_INTERRUPT_HANDLER(single_step_exception)
+static void __single_step_exception(struct pt_regs *regs)
 {
        clear_single_step(regs);
        clear_br_trace(regs);
@@ -1121,6 +1121,11 @@ DEFINE_INTERRUPT_HANDLER(single_step_exception)
        _exception(SIGTRAP, regs, TRAP_TRACE, regs->nip);
 }
 
+DEFINE_INTERRUPT_HANDLER(single_step_exception)
+{
+       __single_step_exception(regs);
+}
+
 /*
  * After we have successfully emulated an instruction, we have to
  * check if the instruction was being single-stepped, and if so,
@@ -1130,7 +1135,7 @@ DEFINE_INTERRUPT_HANDLER(single_step_exception)
 static void emulate_single_step(struct pt_regs *regs)
 {
        if (single_stepping(regs))
-               single_step_exception(regs);
+               __single_step_exception(regs);
 }
 
 static inline int __parse_fpscr(unsigned long fpscr)
index 0876216ceee671af764dc8f0602f8f8ef8d6e861..edea388e9d3fbbf4076c113ac8e14179dce18cac 100644 (file)
 /*
  * Updates the attributes of a page in three steps:
  *
- * 1. invalidate the page table entry
- * 2. flush the TLB
- * 3. install the new entry with the updated attributes
- *
- * Invalidating the pte means there are situations where this will not work
- * when in theory it should.
- * For example:
- * - removing write from page whilst it is being executed
- * - setting a page read-only whilst it is being read by another CPU
+ * 1. take the page_table_lock
+ * 2. install the new entry with the updated attributes
+ * 3. flush the TLB
  *
+ * This sequence is safe against concurrent updates, and also allows updating the
+ * attributes of a page currently being executed or accessed.
  */
 static int change_page_attr(pte_t *ptep, unsigned long addr, void *data)
 {
@@ -36,9 +32,7 @@ static int change_page_attr(pte_t *ptep, unsigned long addr, void *data)
 
        spin_lock(&init_mm.page_table_lock);
 
-       /* invalidate the PTE so it's safe to modify */
-       pte = ptep_get_and_clear(&init_mm, addr, ptep);
-       flush_tlb_kernel_range(addr, addr + PAGE_SIZE);
+       pte = ptep_get(ptep);
 
        /* modify the PTE bits as desired, then apply */
        switch (action) {
@@ -59,11 +53,14 @@ static int change_page_attr(pte_t *ptep, unsigned long addr, void *data)
                break;
        }
 
-       set_pte_at(&init_mm, addr, ptep, pte);
+       pte_update(&init_mm, addr, ptep, ~0UL, pte_val(pte), 0);
 
        /* See ptesync comment in radix__set_pte_at() */
        if (radix_enabled())
                asm volatile("ptesync": : :"memory");
+
+       flush_tlb_kernel_range(addr, addr + PAGE_SIZE);
+
        spin_unlock(&init_mm.page_table_lock);
 
        return 0;
index 6794145603de43289d88e0bf6ab6ea27e8c94b52..a208997ade88be2f26a237ab94ecf2ce5e5c510a 100644 (file)
@@ -98,7 +98,7 @@ config PPC_BOOK3S_64
        select PPC_HAVE_PMU_SUPPORT
        select HAVE_ARCH_TRANSPARENT_HUGEPAGE
        select ARCH_ENABLE_HUGEPAGE_MIGRATION if HUGETLB_PAGE && MIGRATION
-       select ARCH_ENABLE_PMD_SPLIT_PTLOCK
+       select ARCH_ENABLE_SPLIT_PMD_PTLOCK
        select ARCH_ENABLE_THP_MIGRATION if TRANSPARENT_HUGEPAGE
        select ARCH_SUPPORTS_HUGETLBFS
        select ARCH_SUPPORTS_NUMA_BALANCING
index 6b08866684650844c5db2b7ea776fdd8dbb4364e..0dfaa6ab44cc51294c137653143497ab308756a5 100644 (file)
@@ -539,9 +539,10 @@ static void init_cpu_char_feature_flags(struct h_cpu_char_result *result)
         * H_CPU_BEHAV_FAVOUR_SECURITY_H could be set only if
         * H_CPU_BEHAV_FAVOUR_SECURITY is.
         */
-       if (!(result->behaviour & H_CPU_BEHAV_FAVOUR_SECURITY))
+       if (!(result->behaviour & H_CPU_BEHAV_FAVOUR_SECURITY)) {
                security_ftr_clear(SEC_FTR_FAVOUR_SECURITY);
-       else if (result->behaviour & H_CPU_BEHAV_FAVOUR_SECURITY_H)
+               pseries_security_flavor = 0;
+       } else if (result->behaviour & H_CPU_BEHAV_FAVOUR_SECURITY_H)
                pseries_security_flavor = 1;
        else
                pseries_security_flavor = 2;
index dbdbbc2f1dc518094674f43e9239bef7f9553193..8183ca343675a4c3c90ca806fddd6d3fc5d2c6dd 100644 (file)
@@ -67,6 +67,7 @@ static struct irq_domain *xive_irq_domain;
 static struct xive_ipi_desc {
        unsigned int irq;
        char name[16];
+       atomic_t started;
 } *xive_ipis;
 
 /*
@@ -1120,7 +1121,7 @@ static const struct irq_domain_ops xive_ipi_irq_domain_ops = {
        .alloc  = xive_ipi_irq_domain_alloc,
 };
 
-static int __init xive_request_ipi(void)
+static int __init xive_init_ipis(void)
 {
        struct fwnode_handle *fwnode;
        struct irq_domain *ipi_domain;
@@ -1144,10 +1145,6 @@ static int __init xive_request_ipi(void)
                struct xive_ipi_desc *xid = &xive_ipis[node];
                struct xive_ipi_alloc_info info = { node };
 
-               /* Skip nodes without CPUs */
-               if (cpumask_empty(cpumask_of_node(node)))
-                       continue;
-
                /*
                 * Map one IPI interrupt per node for all cpus of that node.
                 * Since the HW interrupt number doesn't have any meaning,
@@ -1159,11 +1156,6 @@ static int __init xive_request_ipi(void)
                xid->irq = ret;
 
                snprintf(xid->name, sizeof(xid->name), "IPI-%d", node);
-
-               ret = request_irq(xid->irq, xive_muxed_ipi_action,
-                                 IRQF_PERCPU | IRQF_NO_THREAD, xid->name, NULL);
-
-               WARN(ret < 0, "Failed to request IPI %d: %d\n", xid->irq, ret);
        }
 
        return ret;
@@ -1178,6 +1170,22 @@ out:
        return ret;
 }
 
+static int xive_request_ipi(unsigned int cpu)
+{
+       struct xive_ipi_desc *xid = &xive_ipis[early_cpu_to_node(cpu)];
+       int ret;
+
+       if (atomic_inc_return(&xid->started) > 1)
+               return 0;
+
+       ret = request_irq(xid->irq, xive_muxed_ipi_action,
+                         IRQF_PERCPU | IRQF_NO_THREAD,
+                         xid->name, NULL);
+
+       WARN(ret < 0, "Failed to request IPI %d: %d\n", xid->irq, ret);
+       return ret;
+}
+
 static int xive_setup_cpu_ipi(unsigned int cpu)
 {
        unsigned int xive_ipi_irq = xive_ipi_cpu_to_irq(cpu);
@@ -1192,6 +1200,9 @@ static int xive_setup_cpu_ipi(unsigned int cpu)
        if (xc->hw_ipi != XIVE_BAD_IRQ)
                return 0;
 
+       /* Register the IPI */
+       xive_request_ipi(cpu);
+
        /* Grab an IPI from the backend, this will populate xc->hw_ipi */
        if (xive_ops->get_ipi(cpu, xc))
                return -EIO;
@@ -1231,6 +1242,8 @@ static void xive_cleanup_cpu_ipi(unsigned int cpu, struct xive_cpu *xc)
        if (xc->hw_ipi == XIVE_BAD_IRQ)
                return;
 
+       /* TODO: clear IPI mapping */
+
        /* Mask the IPI */
        xive_do_source_set_mask(&xc->ipi_data, true);
 
@@ -1253,7 +1266,7 @@ void __init xive_smp_probe(void)
        smp_ops->cause_ipi = xive_cause_ipi;
 
        /* Register the IPI */
-       xive_request_ipi();
+       xive_init_ipis();
 
        /* Allocate and setup IPI for the boot CPU */
        xive_setup_cpu_ipi(smp_processor_id());
index ec79944065c98ef2d14f7cdb15e0044088daf07e..baea7d204639a18539e31c59b44a951ae00bdc4e 100644 (file)
        model = "Microchip PolarFire-SoC Icicle Kit";
        compatible = "microchip,mpfs-icicle-kit";
 
+       aliases {
+               ethernet0 = &emac1;
+       };
+
        chosen {
                stdout-path = &serial0;
        };
index b9819570a7d170e9c03807ed29bcdfd20d2b0b2f..9d2fbbc1f7778f19db82a3dd63e11bfc101d39ef 100644 (file)
                        reg = <0x0 0x20112000 0x0 0x2000>;
                        interrupt-parent = <&plic>;
                        interrupts = <70 71 72 73>;
-                       mac-address = [00 00 00 00 00 00];
+                       local-mac-address = [00 00 00 00 00 00];
                        clocks = <&clkcfg 5>, <&clkcfg 2>;
                        status = "disabled";
                        clock-names = "pclk", "hclk";
index d3081e4d96006d3ea33ec21f50da493025d36c2e..3397ddac1a30caeab19cc7ee7fc99ff58a52fd7a 100644 (file)
@@ -11,7 +11,7 @@ endif
 CFLAGS_syscall_table.o += $(call cc-option,-Wno-override-init,)
 
 ifdef CONFIG_KEXEC
-AFLAGS_kexec_relocate.o := -mcmodel=medany -mno-relax
+AFLAGS_kexec_relocate.o := -mcmodel=medany $(call cc-option,-mno-relax)
 endif
 
 extra-y += head.o
index 1a85305720e848033fcbd0fb7e4c7b45883e5d52..9c0511119bad9bada528a858ea5e82dc1453b060 100644 (file)
@@ -10,6 +10,7 @@
 #include <asm/ptrace.h>
 #include <asm/syscall.h>
 #include <asm/thread_info.h>
+#include <asm/switch_to.h>
 #include <linux/audit.h>
 #include <linux/ptrace.h>
 #include <linux/elf.h>
@@ -56,6 +57,9 @@ static int riscv_fpr_get(struct task_struct *target,
 {
        struct __riscv_d_ext_state *fstate = &target->thread.fstate;
 
+       if (target == current)
+               fstate_save(current, task_pt_regs(current));
+
        membuf_write(&to, fstate, offsetof(struct __riscv_d_ext_state, fcsr));
        membuf_store(&to, fstate->fcsr);
        return membuf_zero(&to, 4);     // explicitly pad
index 18bd0e4bc36cf2905f8e9652fca076c8202062fe..120b2f6f71bc097362c97ed4081eee4bc2575683 100644 (file)
@@ -229,8 +229,8 @@ static void __init init_resources(void)
        }
 
        /* Clean-up any unused pre-allocated resources */
-       mem_res_sz = (num_resources - res_idx + 1) * sizeof(*mem_res);
-       memblock_free(__pa(mem_res), mem_res_sz);
+       if (res_idx >= 0)
+               memblock_free(__pa(mem_res), (res_idx + 1) * sizeof(*mem_res));
        return;
 
  error:
index 88134cc288d9a60b1a881a04315c413a0b7e1988..7cb4f391d106f4561250648144fb773584a70037 100644 (file)
@@ -197,7 +197,7 @@ static void __init setup_bootmem(void)
         * if end of dram is equal to maximum addressable memory.  For 64-bit
         * kernel, this problem can't happen here as the end of the virtual
         * address space is occupied by the kernel mapping then this check must
-        * be done in create_kernel_page_table.
+        * be done as soon as the kernel mapping base address is determined.
         */
        max_mapped_addr = __pa(~(ulong)0);
        if (max_mapped_addr == (phys_ram_end - 1))
index a0e2130f0100ca4ad8420d6cfbc826d724716517..92c0a1b4c528244c3f6be4e4962b7f5c540ea4e9 100644 (file)
@@ -138,6 +138,8 @@ config S390
        select HAVE_ARCH_JUMP_LABEL_RELATIVE
        select HAVE_ARCH_KASAN
        select HAVE_ARCH_KASAN_VMALLOC
+       select HAVE_ARCH_KCSAN
+       select HAVE_ARCH_KFENCE
        select HAVE_ARCH_RANDOMIZE_KSTACK_OFFSET
        select HAVE_ARCH_SECCOMP_FILTER
        select HAVE_ARCH_SOFT_DIRTY
index 1e3172877982e1ceb6b73cdcd9c87f56a217ab13..17dc4f1ac4faee5d55fdbd69f4f48d7de2350dc2 100644 (file)
@@ -142,7 +142,8 @@ all: bzImage
 KBUILD_IMAGE   := $(boot)/bzImage
 
 install:
-       $(Q)$(MAKE) $(build)=$(boot) $@
+       sh -x $(srctree)/$(boot)/install.sh $(KERNELRELEASE) $(KBUILD_IMAGE) \
+             System.map "$(INSTALL_PATH)"
 
 bzImage: vmlinux
        $(Q)$(MAKE) $(build)=$(boot) $(boot)/$@
index 41a64b8dce252531c0cb5bed6c1c5fddb916885b..0ba646899131625ab29ab3afdf84336957870353 100644 (file)
@@ -7,6 +7,7 @@ KCOV_INSTRUMENT := n
 GCOV_PROFILE := n
 UBSAN_SANITIZE := n
 KASAN_SANITIZE := n
+KCSAN_SANITIZE := n
 
 KBUILD_AFLAGS := $(KBUILD_AFLAGS_DECOMPRESSOR)
 KBUILD_CFLAGS := $(KBUILD_CFLAGS_DECOMPRESSOR)
@@ -36,7 +37,7 @@ CFLAGS_sclp_early_core.o += -I$(srctree)/drivers/s390/char
 
 obj-y  := head.o als.o startup.o mem_detect.o ipl_parm.o ipl_report.o
 obj-y  += string.o ebcdic.o sclp_early_core.o mem.o ipl_vmparm.o cmdline.o
-obj-y  += version.o pgm_check_info.o ctype.o text_dma.o
+obj-y  += version.o pgm_check_info.o ctype.o
 obj-$(findstring y, $(CONFIG_PROTECTED_VIRTUALIZATION_GUEST) $(CONFIG_PGSTE))  += uv.o
 obj-$(CONFIG_RELOCATABLE)      += machine_kexec_reloc.o
 obj-$(CONFIG_RANDOMIZE_BASE)   += kaslr.o
@@ -69,7 +70,3 @@ $(obj)/compressed/vmlinux: $(obj)/startup.a FORCE
 
 $(obj)/startup.a: $(OBJECTS) FORCE
        $(call if_changed,ar)
-
-install:
-       sh -x  $(srctree)/$(obj)/install.sh $(KERNELRELEASE) $(obj)/bzImage \
-             System.map "$(INSTALL_PATH)"
index ae04e1c93764cb7bf343c23442909de6061dd5df..641ce0fc5c3eb0d2028028bbdad48c18f8e7c5c3 100644 (file)
@@ -2,14 +2,9 @@
 #ifndef BOOT_BOOT_H
 #define BOOT_BOOT_H
 
+#include <asm/extable.h>
 #include <linux/types.h>
 
-#define BOOT_STACK_OFFSET 0x8000
-
-#ifndef __ASSEMBLY__
-
-#include <linux/compiler.h>
-
 void startup_kernel(void);
 unsigned long detect_memory(void);
 bool is_ipl_block_dump(void);
@@ -18,17 +13,22 @@ void setup_boot_command_line(void);
 void parse_boot_command_line(void);
 void verify_facilities(void);
 void print_missing_facilities(void);
+void sclp_early_setup_buffer(void);
 void print_pgm_check_info(void);
 unsigned long get_random_base(unsigned long safe_addr);
 void __printf(1, 2) decompressor_printk(const char *fmt, ...);
 
+/* Symbols defined by linker scripts */
 extern const char kernel_version[];
 extern unsigned long memory_limit;
 extern unsigned long vmalloc_size;
 extern int vmalloc_size_set;
 extern int kaslr_enabled;
+extern char __boot_data_start[], __boot_data_end[];
+extern char __boot_data_preserved_start[], __boot_data_preserved_end[];
+extern char _decompressor_syms_start[], _decompressor_syms_end[];
+extern char _stack_start[], _stack_end[];
 
 unsigned long read_ipl_report(unsigned long safe_offset);
 
-#endif /* __ASSEMBLY__ */
 #endif /* BOOT_BOOT_H */
index e30d3fdbbc7816dd5b098c39d4aebeaecaea183f..3b860061e84d0ebcb1456a85bfd8b25449480a68 100644 (file)
@@ -9,6 +9,7 @@ KCOV_INSTRUMENT := n
 GCOV_PROFILE := n
 UBSAN_SANITIZE := n
 KASAN_SANITIZE := n
+KCSAN_SANITIZE := n
 
 obj-y  := $(if $(CONFIG_KERNEL_UNCOMPRESSED),,decompressor.o) info.o
 obj-$(CONFIG_KERNEL_ZSTD) += clz_ctz.o
index 37a4a8d33c6c5b89a301f08015874a2347773c6b..e27c2140d620689eb8270695059f6815b38b981e 100644 (file)
 #define memmove memmove
 #define memzero(s, n) memset((s), 0, (n))
 
-/* Symbols defined by linker scripts */
-extern char _end[];
-extern unsigned char _compressed_start[];
-extern unsigned char _compressed_end[];
-
 #ifdef CONFIG_KERNEL_BZIP2
 #define BOOT_HEAP_SIZE 0x400000
 #elif CONFIG_KERNEL_ZSTD
index 41f0ad97a4db269716569689bdc82546238ff2a4..a59f75c5b04903828ad83165932080931fd2fdc1 100644 (file)
@@ -26,7 +26,12 @@ struct vmlinux_info {
        unsigned long rela_dyn_end;
 };
 
+/* Symbols defined by linker scripts */
+extern char _end[];
+extern unsigned char _compressed_start[];
+extern unsigned char _compressed_end[];
 extern char _vmlinux_info[];
+
 #define vmlinux (*(struct vmlinux_info *)_vmlinux_info)
 
 #endif /* BOOT_COMPRESSED_DECOMPRESSOR_H */
index 27a09c1c78f6a701fac12b0f9d9150a389675ad6..918e05137d4c6ca935cbb0e4cb510e24e86aa52a 100644 (file)
@@ -1,6 +1,9 @@
 /* SPDX-License-Identifier: GPL-2.0 */
 #include <asm-generic/vmlinux.lds.h>
 #include <asm/vmlinux.lds.h>
+#include <asm/thread_info.h>
+#include <asm/page.h>
+#include <asm/sclp.h>
 
 OUTPUT_FORMAT("elf64-s390", "elf64-s390", "elf64-s390")
 OUTPUT_ARCH(s390:64-bit)
@@ -34,27 +37,6 @@ SECTIONS
                *(.data.*)
                _edata = . ;
        }
-       /*
-       * .dma section for code, data, ex_table that need to stay below 2 GB,
-       * even when the kernel is relocate: above 2 GB.
-       */
-       . = ALIGN(PAGE_SIZE);
-       _sdma = .;
-       .dma.text : {
-               _stext_dma = .;
-               *(.dma.text)
-               . = ALIGN(PAGE_SIZE);
-               _etext_dma = .;
-       }
-       . = ALIGN(16);
-       .dma.ex_table : {
-               _start_dma_ex_table = .;
-               KEEP(*(.dma.ex_table))
-               _stop_dma_ex_table = .;
-       }
-       .dma.data : { *(.dma.data) }
-       . = ALIGN(PAGE_SIZE);
-       _edma = .;
 
        BOOT_DATA
        BOOT_DATA_PRESERVED
@@ -69,6 +51,17 @@ SECTIONS
                *(.bss)
                *(.bss.*)
                *(COMMON)
+               /*
+                * Stacks for the decompressor
+                */
+               . = ALIGN(PAGE_SIZE);
+               _dump_info_stack_start = .;
+               . += PAGE_SIZE;
+               _dump_info_stack_end = .;
+               . = ALIGN(PAGE_SIZE);
+               _stack_start = .;
+               . += BOOT_STACK_SIZE;
+               _stack_end = .;
                _ebss = .;
        }
 
index 51693cfb65c2c3b35c9fcd87d6e19e6404876462..40f4cff538b8d830b9912c92037feef8c8acee88 100644 (file)
 #include <linux/init.h>
 #include <linux/linkage.h>
 #include <asm/asm-offsets.h>
-#include <asm/thread_info.h>
 #include <asm/page.h>
 #include <asm/ptrace.h>
-#include "boot.h"
+#include <asm/sclp.h>
 
 #define ARCH_OFFSET    4
 
+#define EP_OFFSET      0x10008
+#define EP_STRING      "S390EP"
+
 __HEAD
 
 #define IPL_BS 0x730
@@ -275,11 +277,11 @@ iplstart:
 .Lcpuid:.fill  8,1,0
 
 #
-# startup-code at 0x10000, running in absolute addressing mode
+# normal startup-code, running in absolute addressing mode
 # this is called either by the ipl loader or directly by PSW restart
 # or linload or SALIPL
 #
-       .org    0x10000
+       .org    STARTUP_NORMAL_OFFSET
 SYM_CODE_START(startup)
        j       startup_normal
        .org    EP_OFFSET
@@ -292,9 +294,9 @@ SYM_CODE_START(startup)
        .ascii  EP_STRING
        .byte   0x00,0x01
 #
-# kdump startup-code at 0x10010, running in 64 bit absolute addressing mode
+# kdump startup-code, running in 64 bit absolute addressing mode
 #
-       .org    0x10010
+       .org    STARTUP_KDUMP_OFFSET
        j       startup_kdump
 SYM_CODE_END(startup)
 SYM_CODE_START_LOCAL(startup_normal)
@@ -315,18 +317,16 @@ SYM_CODE_START_LOCAL(startup_normal)
        xc      0x300(256),0x300
        xc      0xe00(256),0xe00
        xc      0xf00(256),0xf00
-       lctlg   %c0,%c15,.Lctl-.LPG0(%r13)      # load control registers
        stcke   __LC_BOOT_CLOCK
        mvc     __LC_LAST_UPDATE_CLOCK(8),__LC_BOOT_CLOCK+1
        spt     6f-.LPG0(%r13)
        mvc     __LC_LAST_UPDATE_TIMER(8),6f-.LPG0(%r13)
-       l       %r15,.Lstack-.LPG0(%r13)
+       larl    %r15,_stack_end-STACK_FRAME_OVERHEAD
+       brasl   %r14,sclp_early_setup_buffer
        brasl   %r14,verify_facilities
        brasl   %r14,startup_kernel
 SYM_CODE_END(startup_normal)
 
-.Lstack:
-       .long   BOOT_STACK_OFFSET + BOOT_STACK_SIZE - STACK_FRAME_OVERHEAD
        .align  8
 6:     .long   0x7fffffff,0xffffffff
 .Lext_new_psw:
@@ -335,35 +335,6 @@ SYM_CODE_END(startup_normal)
        .quad   0x0000000180000000,startup_pgm_check_handler
 .Lio_new_psw:
        .quad   0x0002000180000000,0x1f0        # disabled wait
-.Lctl: .quad   0x04040000              # cr0: AFP registers & secondary space
-       .quad   0                       # cr1: primary space segment table
-       .quad   .Lduct                  # cr2: dispatchable unit control table
-       .quad   0                       # cr3: instruction authorization
-       .quad   0xffff                  # cr4: instruction authorization
-       .quad   .Lduct                  # cr5: primary-aste origin
-       .quad   0                       # cr6:  I/O interrupts
-       .quad   0                       # cr7:  secondary space segment table
-       .quad   0x0000000000008000      # cr8:  access registers translation
-       .quad   0                       # cr9:  tracing off
-       .quad   0                       # cr10: tracing off
-       .quad   0                       # cr11: tracing off
-       .quad   0                       # cr12: tracing off
-       .quad   0                       # cr13: home space segment table
-       .quad   0xc0000000              # cr14: machine check handling off
-       .quad   .Llinkage_stack         # cr15: linkage stack operations
-
-       .section .dma.data,"aw",@progbits
-.Lduct: .long  0,.Laste,.Laste,0,.Lduald,0,0,0
-       .long   0,0,0,0,0,0,0,0
-.Llinkage_stack:
-       .long   0,0,0x89000000,0,0,0,0x8a000000,0
-       .align 64
-.Laste:        .quad   0,0xffffffffffffffff,0,0,0,0,0,0
-       .align  128
-.Lduald:.rept  8
-       .long   0x80000000,0,0,0        # invalid access-list entries
-       .endr
-       .previous
 
 #include "head_kdump.S"
 
@@ -386,15 +357,13 @@ SYM_CODE_START_LOCAL(startup_pgm_check_handler)
        oi      __LC_RETURN_PSW+1,0x2   # set wait state bit
        larl    %r9,.Lold_psw_disabled_wait
        stg     %r9,__LC_PGM_NEW_PSW+8
-       l       %r15,.Ldump_info_stack-.Lold_psw_disabled_wait(%r9)
+       larl    %r15,_dump_info_stack_end-STACK_FRAME_OVERHEAD
        brasl   %r14,print_pgm_check_info
 .Lold_psw_disabled_wait:
        la      %r8,4095
        lmg     %r0,%r15,__LC_GPREGS_SAVE_AREA-4095(%r8)
        lpswe   __LC_RETURN_PSW         # disabled wait
 SYM_CODE_END(startup_pgm_check_handler)
-.Ldump_info_stack:
-       .long   0x5000 + PAGE_SIZE - STACK_FRAME_OVERHEAD
 
 #
 # params at 10400 (setup.h)
@@ -415,7 +384,4 @@ SYM_DATA_START(parmarea)
        .org    PARMAREA+__PARMAREA_SIZE
 SYM_DATA_END(parmarea)
 
-       .org    EARLY_SCCB_OFFSET
-       .fill   4096
-
        .org    HEAD_END
index 0b4965573656f668fe84e56f0df27e2a536414d5..9b14045065b6e1e4bfbfcfa6c714a1d5d1e08a6e 100644 (file)
@@ -54,9 +54,9 @@ static unsigned long find_bootdata_space(struct ipl_rb_components *comps,
         * not overlap with any component or any certificate.
         */
 repeat:
-       if (IS_ENABLED(CONFIG_BLK_DEV_INITRD) && INITRD_START && INITRD_SIZE &&
-           intersects(INITRD_START, INITRD_SIZE, safe_addr, size))
-               safe_addr = INITRD_START + INITRD_SIZE;
+       if (IS_ENABLED(CONFIG_BLK_DEV_INITRD) && initrd_data.start && initrd_data.size &&
+           intersects(initrd_data.start, initrd_data.size, safe_addr, size))
+               safe_addr = initrd_data.start + initrd_data.size;
        for_each_rb_entry(comp, comps)
                if (intersects(safe_addr, size, comp->addr, comp->len)) {
                        safe_addr = comp->addr + comp->len;
index 0dd48fbdbaa48499e18c8a304f70fddaae568fc5..d8984462071ffc8d6f09622eafa85d7c9ab4c303 100644 (file)
@@ -186,9 +186,9 @@ unsigned long get_random_base(unsigned long safe_addr)
         */
        memory_limit -= kasan_estimate_memory_needs(memory_limit);
 
-       if (IS_ENABLED(CONFIG_BLK_DEV_INITRD) && INITRD_START && INITRD_SIZE) {
-               if (safe_addr < INITRD_START + INITRD_SIZE)
-                       safe_addr = INITRD_START + INITRD_SIZE;
+       if (IS_ENABLED(CONFIG_BLK_DEV_INITRD) && initrd_data.start && initrd_data.size) {
+               if (safe_addr < initrd_data.start + initrd_data.size)
+                       safe_addr = initrd_data.start + initrd_data.size;
        }
        safe_addr = ALIGN(safe_addr, THREAD_SIZE);
 
index 4e17adbde4954d3ea177e293aa49f1ee963b63ac..2f949cd9076b81dbc4b1536d25229e49e7515a8f 100644 (file)
@@ -1,6 +1,8 @@
 // SPDX-License-Identifier: GPL-2.0
 #include <linux/errno.h>
 #include <linux/init.h>
+#include <asm/setup.h>
+#include <asm/processor.h>
 #include <asm/sclp.h>
 #include <asm/sections.h>
 #include <asm/mem_detect.h>
@@ -24,9 +26,9 @@ static void *mem_detect_alloc_extended(void)
 {
        unsigned long offset = ALIGN(mem_safe_offset(), sizeof(u64));
 
-       if (IS_ENABLED(CONFIG_BLK_DEV_INITRD) && INITRD_START && INITRD_SIZE &&
-           INITRD_START < offset + ENTRIES_EXTENDED_MAX)
-               offset = ALIGN(INITRD_START + INITRD_SIZE, sizeof(u64));
+       if (IS_ENABLED(CONFIG_BLK_DEV_INITRD) && initrd_data.start && initrd_data.size &&
+           initrd_data.start < offset + ENTRIES_EXTENDED_MAX)
+               offset = ALIGN(initrd_data.start + initrd_data.size, sizeof(u64));
 
        return (void *)offset;
 }
index 3a46abed2549a259aa05e8fc7b15502335c385e9..209f6ae5a1972b5f7cf74dca8ab63c22b7b38995 100644 (file)
@@ -29,7 +29,6 @@ static char *symstart(char *p)
        return p + 1;
 }
 
-extern char _decompressor_syms_start[], _decompressor_syms_end[];
 static noinline char *findsym(unsigned long ip, unsigned short *off, unsigned short *len)
 {
        /* symbol entries are in a form "10000 c4 startup\0" */
@@ -126,8 +125,8 @@ out:
 
 static noinline void print_stacktrace(void)
 {
-       struct stack_info boot_stack = { STACK_TYPE_TASK, BOOT_STACK_OFFSET,
-                                        BOOT_STACK_OFFSET + BOOT_STACK_SIZE };
+       struct stack_info boot_stack = { STACK_TYPE_TASK, (unsigned long)_stack_start,
+                                        (unsigned long)_stack_end };
        unsigned long sp = S390_lowcore.gpregs_save_area[15];
        bool first = true;
 
index 5a19fd7020b5e88a2434112218947d92c3b44673..6f30646afbd0915749be6ba62866cb6b1bcb2841 100644 (file)
@@ -1,2 +1,11 @@
 // SPDX-License-Identifier: GPL-2.0
+#include "boot.h"
 #include "../../../drivers/s390/char/sclp_early_core.c"
+
+/* SCLP early buffer must stay page-aligned and below 2GB */
+static char __sclp_early_sccb[EXT_SCCB_READ_SCP] __aligned(PAGE_SIZE);
+
+void sclp_early_setup_buffer(void)
+{
+       sclp_early_set_buffer(&__sclp_early_sccb);
+}
index d0cf21641e3a9dd13ecbde5201ae98fb5687f224..6dc8d0a53864005800f7fdac618f1f879803d689 100644 (file)
@@ -12,9 +12,8 @@
 #include <asm/uv.h>
 #include "compressed/decompressor.h"
 #include "boot.h"
+#include "uv.h"
 
-extern char __boot_data_start[], __boot_data_end[];
-extern char __boot_data_preserved_start[], __boot_data_preserved_end[];
 unsigned long __bootdata_preserved(__kaslr_offset);
 unsigned long __bootdata_preserved(VMALLOC_START);
 unsigned long __bootdata_preserved(VMALLOC_END);
@@ -24,44 +23,11 @@ unsigned long __bootdata_preserved(MODULES_VADDR);
 unsigned long __bootdata_preserved(MODULES_END);
 unsigned long __bootdata(ident_map_size);
 int __bootdata(is_full_image) = 1;
+struct initrd_data __bootdata(initrd_data);
 
 u64 __bootdata_preserved(stfle_fac_list[16]);
 u64 __bootdata_preserved(alt_stfle_fac_list[16]);
-
-/*
- * Some code and data needs to stay below 2 GB, even when the kernel would be
- * relocated above 2 GB, because it has to use 31 bit addresses.
- * Such code and data is part of the .dma section, and its location is passed
- * over to the decompressed / relocated kernel via the .boot.preserved.data
- * section.
- */
-extern char _sdma[], _edma[];
-extern char _stext_dma[], _etext_dma[];
-extern struct exception_table_entry _start_dma_ex_table[];
-extern struct exception_table_entry _stop_dma_ex_table[];
-unsigned long __bootdata_preserved(__sdma) = __pa(&_sdma);
-unsigned long __bootdata_preserved(__edma) = __pa(&_edma);
-unsigned long __bootdata_preserved(__stext_dma) = __pa(&_stext_dma);
-unsigned long __bootdata_preserved(__etext_dma) = __pa(&_etext_dma);
-struct exception_table_entry *
-       __bootdata_preserved(__start_dma_ex_table) = _start_dma_ex_table;
-struct exception_table_entry *
-       __bootdata_preserved(__stop_dma_ex_table) = _stop_dma_ex_table;
-
-int _diag210_dma(struct diag210 *addr);
-int _diag26c_dma(void *req, void *resp, enum diag26c_sc subcode);
-int _diag14_dma(unsigned long rx, unsigned long ry1, unsigned long subcode);
-void _diag0c_dma(struct hypfs_diag0c_entry *entry);
-void _diag308_reset_dma(void);
-struct diag_ops __bootdata_preserved(diag_dma_ops) = {
-       .diag210 = _diag210_dma,
-       .diag26c = _diag26c_dma,
-       .diag14 = _diag14_dma,
-       .diag0c = _diag0c_dma,
-       .diag308_reset = _diag308_reset_dma
-};
-static struct diag210 _diag210_tmp_dma __section(".dma.data");
-struct diag210 *__bootdata_preserved(__diag210_tmp_dma) = &_diag210_tmp_dma;
+struct oldmem_data __bootdata_preserved(oldmem_data);
 
 void error(char *x)
 {
@@ -91,12 +57,12 @@ static void rescue_initrd(unsigned long addr)
 {
        if (!IS_ENABLED(CONFIG_BLK_DEV_INITRD))
                return;
-       if (!INITRD_START || !INITRD_SIZE)
+       if (!initrd_data.start || !initrd_data.size)
                return;
-       if (addr <= INITRD_START)
+       if (addr <= initrd_data.start)
                return;
-       memmove((void *)addr, (void *)INITRD_START, INITRD_SIZE);
-       INITRD_START = addr;
+       memmove((void *)addr, (void *)initrd_data.start, initrd_data.size);
+       initrd_data.start = addr;
 }
 
 static void copy_bootdata(void)
@@ -169,9 +135,9 @@ static void setup_ident_map_size(unsigned long max_physmem_end)
        ident_map_size = min(ident_map_size, 1UL << MAX_PHYSMEM_BITS);
 
 #ifdef CONFIG_CRASH_DUMP
-       if (OLDMEM_BASE) {
+       if (oldmem_data.start) {
                kaslr_enabled = 0;
-               ident_map_size = min(ident_map_size, OLDMEM_SIZE);
+               ident_map_size = min(ident_map_size, oldmem_data.size);
        } else if (ipl_block_valid && is_ipl_block_dump()) {
                kaslr_enabled = 0;
                if (!sclp_early_get_hsa_size(&hsa_size) && hsa_size)
@@ -282,12 +248,28 @@ static void setup_vmalloc_size(void)
        vmalloc_size = max(size, vmalloc_size);
 }
 
+static void offset_vmlinux_info(unsigned long offset)
+{
+       vmlinux.default_lma += offset;
+       *(unsigned long *)(&vmlinux.entry) += offset;
+       vmlinux.bootdata_off += offset;
+       vmlinux.bootdata_preserved_off += offset;
+       vmlinux.rela_dyn_start += offset;
+       vmlinux.rela_dyn_end += offset;
+       vmlinux.dynsym_start += offset;
+}
+
 void startup_kernel(void)
 {
        unsigned long random_lma;
        unsigned long safe_addr;
        void *img;
 
+       initrd_data.start = parmarea.initrd_start;
+       initrd_data.size = parmarea.initrd_size;
+       oldmem_data.start = parmarea.oldmem_base;
+       oldmem_data.size = parmarea.oldmem_size;
+
        setup_lpp();
        store_ipl_parmblock();
        safe_addr = mem_safe_offset();
@@ -297,23 +279,17 @@ void startup_kernel(void)
        sclp_early_read_info();
        setup_boot_command_line();
        parse_boot_command_line();
+       sanitize_prot_virt_host();
        setup_ident_map_size(detect_memory());
        setup_vmalloc_size();
        setup_kernel_memory_layout();
 
-       random_lma = __kaslr_offset = 0;
        if (IS_ENABLED(CONFIG_RANDOMIZE_BASE) && kaslr_enabled) {
                random_lma = get_random_base(safe_addr);
                if (random_lma) {
                        __kaslr_offset = random_lma - vmlinux.default_lma;
                        img = (void *)vmlinux.default_lma;
-                       vmlinux.default_lma += __kaslr_offset;
-                       vmlinux.entry += __kaslr_offset;
-                       vmlinux.bootdata_off += __kaslr_offset;
-                       vmlinux.bootdata_preserved_off += __kaslr_offset;
-                       vmlinux.rela_dyn_start += __kaslr_offset;
-                       vmlinux.rela_dyn_end += __kaslr_offset;
-                       vmlinux.dynsym_start += __kaslr_offset;
+                       offset_vmlinux_info(__kaslr_offset);
                }
        }
 
index f6b0c4f43c99e78237927fe1b9b4463db880529f..e6be155ab2e52a4dc82dfd5d702349eb85a7248c 100644 (file)
@@ -1,8 +1,12 @@
 // SPDX-License-Identifier: GPL-2.0
 #include <asm/uv.h>
+#include <asm/boot_data.h>
 #include <asm/facility.h>
 #include <asm/sections.h>
 
+#include "boot.h"
+#include "uv.h"
+
 /* will be used in arch/s390/kernel/uv.c */
 #ifdef CONFIG_PROTECTED_VIRTUALIZATION_GUEST
 int __bootdata_preserved(prot_virt_guest);
@@ -47,26 +51,34 @@ void uv_query_info(void)
 }
 
 #if IS_ENABLED(CONFIG_KVM)
-static bool has_uv_sec_stor_limit(void)
+void adjust_to_uv_max(unsigned long *vmax)
 {
-       /*
-        * keep these conditions in line with setup_uv()
-        */
-       if (!is_prot_virt_host())
-               return false;
+       if (is_prot_virt_host() && uv_info.max_sec_stor_addr)
+               *vmax = min_t(unsigned long, *vmax, uv_info.max_sec_stor_addr);
+}
 
+static int is_prot_virt_host_capable(void)
+{
+       /* disable if no prot_virt=1 given on command-line */
+       if (!is_prot_virt_host())
+               return 0;
+       /* disable if protected guest virtualization is enabled */
        if (is_prot_virt_guest())
-               return false;
-
+               return 0;
+       /* disable if no hardware support */
        if (!test_facility(158))
-               return false;
-
-       return !!uv_info.max_sec_stor_addr;
+               return 0;
+       /* disable if kdump */
+       if (oldmem_data.start)
+               return 0;
+       /* disable if stand-alone dump */
+       if (ipl_block_valid && is_ipl_block_dump())
+               return 0;
+       return 1;
 }
 
-void adjust_to_uv_max(unsigned long *vmax)
+void sanitize_prot_virt_host(void)
 {
-       if (has_uv_sec_stor_limit())
-               *vmax = min_t(unsigned long, *vmax, uv_info.max_sec_stor_addr);
+       prot_virt_host = is_prot_virt_host_capable();
 }
 #endif
diff --git a/arch/s390/boot/uv.h b/arch/s390/boot/uv.h
new file mode 100644 (file)
index 0000000..690ce01
--- /dev/null
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef BOOT_UV_H
+#define BOOT_UV_H
+
+#if IS_ENABLED(CONFIG_KVM)
+void adjust_to_uv_max(unsigned long *vmax);
+void sanitize_prot_virt_host(void);
+#else
+static inline void adjust_to_uv_max(unsigned long *vmax) {}
+static inline void sanitize_prot_virt_host(void) {}
+#endif
+
+#if defined(CONFIG_PROTECTED_VIRTUALIZATION_GUEST) || IS_ENABLED(CONFIG_KVM)
+void uv_query_info(void);
+#else
+static inline void uv_query_info(void) {}
+#endif
+
+#endif /* BOOT_UV_H */
index b88184019af9c04b403b930b4bb6bfb10ed92324..11ffc7c37ada71148b33b92737e1506e2c59cee9 100644 (file)
@@ -10,7 +10,6 @@ CONFIG_BPF_JIT=y
 CONFIG_BPF_JIT_ALWAYS_ON=y
 CONFIG_BPF_LSM=y
 CONFIG_PREEMPT=y
-CONFIG_SCHED_CORE=y
 CONFIG_BSD_PROCESS_ACCT=y
 CONFIG_BSD_PROCESS_ACCT_V3=y
 CONFIG_TASKSTATS=y
@@ -75,7 +74,6 @@ CONFIG_MODULE_FORCE_UNLOAD=y
 CONFIG_MODVERSIONS=y
 CONFIG_MODULE_SRCVERSION_ALL=y
 CONFIG_MODULE_SIG_SHA256=y
-CONFIG_BLK_DEV_INTEGRITY=y
 CONFIG_BLK_DEV_THROTTLING=y
 CONFIG_BLK_WBT=y
 CONFIG_BLK_CGROUP_IOLATENCY=y
@@ -466,6 +464,7 @@ CONFIG_DM_FLAKEY=m
 CONFIG_DM_VERITY=m
 CONFIG_DM_VERITY_VERIFY_ROOTHASH_SIG=y
 CONFIG_DM_SWITCH=m
+CONFIG_DM_INTEGRITY=m
 CONFIG_NETDEVICES=y
 CONFIG_BONDING=m
 CONFIG_DUMMY=m
index 1667a3cdcf0a168d0a83d2c67ef064aa802e9514..e1642d2cba599ecafac5020268790fd7d097665c 100644 (file)
@@ -8,7 +8,6 @@ CONFIG_BPF_SYSCALL=y
 CONFIG_BPF_JIT=y
 CONFIG_BPF_JIT_ALWAYS_ON=y
 CONFIG_BPF_LSM=y
-CONFIG_SCHED_CORE=y
 CONFIG_BSD_PROCESS_ACCT=y
 CONFIG_BSD_PROCESS_ACCT_V3=y
 CONFIG_TASKSTATS=y
index 6c43d2ba207974617b604ed3108801dfb4921565..9a2786079e3a791f151900f0fb80aaaafefc9e97 100644 (file)
@@ -21,7 +21,7 @@
 static void diag0c_fn(void *data)
 {
        diag_stat_inc(DIAG_STAT_X00C);
-       diag_dma_ops.diag0c(((void **) data)[smp_processor_id()]);
+       diag_amode31_ops.diag0c(((void **)data)[smp_processor_id()]);
 }
 
 /*
@@ -33,12 +33,12 @@ static void *diag0c_store(unsigned int *count)
        unsigned int cpu_count, cpu, i;
        void **cpu_vec;
 
-       get_online_cpus();
+       cpus_read_lock();
        cpu_count = num_online_cpus();
        cpu_vec = kmalloc_array(num_possible_cpus(), sizeof(*cpu_vec),
                                GFP_KERNEL);
        if (!cpu_vec)
-               goto fail_put_online_cpus;
+               goto fail_unlock_cpus;
        /* Note: Diag 0c needs 8 byte alignment and real storage */
        diag0c_data = kzalloc(struct_size(diag0c_data, entry, cpu_count),
                              GFP_KERNEL | GFP_DMA);
@@ -54,13 +54,13 @@ static void *diag0c_store(unsigned int *count)
        on_each_cpu(diag0c_fn, cpu_vec, 1);
        *count = cpu_count;
        kfree(cpu_vec);
-       put_online_cpus();
+       cpus_read_unlock();
        return diag0c_data;
 
 fail_kfree_cpu_vec:
        kfree(cpu_vec);
-fail_put_online_cpus:
-       put_online_cpus();
+fail_unlock_cpus:
+       cpus_read_unlock();
        return ERR_PTR(-ENOMEM);
 }
 
index f58c92f28701ff2a445f781800fb0b75685cd67d..1effac6a01520de46093dd4a0483eb8f0a97bc44 100644 (file)
@@ -5,7 +5,6 @@
 #ifndef _ASM_S390_CIO_H_
 #define _ASM_S390_CIO_H_
 
-#include <linux/spinlock.h>
 #include <linux/bitops.h>
 #include <linux/genalloc.h>
 #include <asm/types.h>
index c0f3bfeddcbeb5762134063b910d66b115be7b49..646b12981f2080ce410a65f510a368e657990105 100644 (file)
@@ -173,17 +173,16 @@ typedef struct { unsigned char bytes[16]; } cpacf_mask_t;
  */
 static __always_inline void __cpacf_query(unsigned int opcode, cpacf_mask_t *mask)
 {
-       register unsigned long r0 asm("0") = 0; /* query function */
-       register unsigned long r1 asm("1") = (unsigned long) mask;
-
        asm volatile(
-               "       spm 0\n" /* pckmo doesn't change the cc */
+               "       lghi    0,0\n" /* query function */
+               "       lgr     1,%[mask]\n"
+               "       spm     0\n" /* pckmo doesn't change the cc */
                /* Parameter regs are ignored, but must be nonzero and unique */
                "0:     .insn   rrf,%[opc] << 16,2,4,6,0\n"
                "       brc     1,0b\n" /* handle partial completion */
                : "=m" (*mask)
-               : [fc] "d" (r0), [pba] "a" (r1), [opc] "i" (opcode)
-               : "cc");
+               : [mask] "d" ((unsigned long)mask), [opc] "i" (opcode)
+               : "cc", "0", "1");
 }
 
 static __always_inline int __cpacf_check_opcode(unsigned int opcode)
@@ -249,20 +248,22 @@ static __always_inline int cpacf_query_func(unsigned int opcode, unsigned int fu
 static inline int cpacf_km(unsigned long func, void *param,
                           u8 *dest, const u8 *src, long src_len)
 {
-       register unsigned long r0 asm("0") = (unsigned long) func;
-       register unsigned long r1 asm("1") = (unsigned long) param;
-       register unsigned long r2 asm("2") = (unsigned long) src;
-       register unsigned long r3 asm("3") = (unsigned long) src_len;
-       register unsigned long r4 asm("4") = (unsigned long) dest;
+       union register_pair d, s;
 
+       d.even = (unsigned long)dest;
+       s.even = (unsigned long)src;
+       s.odd  = (unsigned long)src_len;
        asm volatile(
+               "       lgr     0,%[fc]\n"
+               "       lgr     1,%[pba]\n"
                "0:     .insn   rre,%[opc] << 16,%[dst],%[src]\n"
                "       brc     1,0b\n" /* handle partial completion */
-               : [src] "+a" (r2), [len] "+d" (r3), [dst] "+a" (r4)
-               : [fc] "d" (r0), [pba] "a" (r1), [opc] "i" (CPACF_KM)
-               : "cc", "memory");
+               : [src] "+&d" (s.pair), [dst] "+&d" (d.pair)
+               : [fc] "d" (func), [pba] "d" ((unsigned long)param),
+                 [opc] "i" (CPACF_KM)
+               : "cc", "memory", "0", "1");
 
-       return src_len - r3;
+       return src_len - s.odd;
 }
 
 /**
@@ -279,20 +280,22 @@ static inline int cpacf_km(unsigned long func, void *param,
 static inline int cpacf_kmc(unsigned long func, void *param,
                            u8 *dest, const u8 *src, long src_len)
 {
-       register unsigned long r0 asm("0") = (unsigned long) func;
-       register unsigned long r1 asm("1") = (unsigned long) param;
-       register unsigned long r2 asm("2") = (unsigned long) src;
-       register unsigned long r3 asm("3") = (unsigned long) src_len;
-       register unsigned long r4 asm("4") = (unsigned long) dest;
+       union register_pair d, s;
 
+       d.even = (unsigned long)dest;
+       s.even = (unsigned long)src;
+       s.odd  = (unsigned long)src_len;
        asm volatile(
+               "       lgr     0,%[fc]\n"
+               "       lgr     1,%[pba]\n"
                "0:     .insn   rre,%[opc] << 16,%[dst],%[src]\n"
                "       brc     1,0b\n" /* handle partial completion */
-               : [src] "+a" (r2), [len] "+d" (r3), [dst] "+a" (r4)
-               : [fc] "d" (r0), [pba] "a" (r1), [opc] "i" (CPACF_KMC)
-               : "cc", "memory");
+               : [src] "+&d" (s.pair), [dst] "+&d" (d.pair)
+               : [fc] "d" (func), [pba] "d" ((unsigned long)param),
+                 [opc] "i" (CPACF_KMC)
+               : "cc", "memory", "0", "1");
 
-       return src_len - r3;
+       return src_len - s.odd;
 }
 
 /**
@@ -306,17 +309,19 @@ static inline int cpacf_kmc(unsigned long func, void *param,
 static inline void cpacf_kimd(unsigned long func, void *param,
                              const u8 *src, long src_len)
 {
-       register unsigned long r0 asm("0") = (unsigned long) func;
-       register unsigned long r1 asm("1") = (unsigned long) param;
-       register unsigned long r2 asm("2") = (unsigned long) src;
-       register unsigned long r3 asm("3") = (unsigned long) src_len;
+       union register_pair s;
 
+       s.even = (unsigned long)src;
+       s.odd  = (unsigned long)src_len;
        asm volatile(
+               "       lgr     0,%[fc]\n"
+               "       lgr     1,%[pba]\n"
                "0:     .insn   rre,%[opc] << 16,0,%[src]\n"
                "       brc     1,0b\n" /* handle partial completion */
-               : [src] "+a" (r2), [len] "+d" (r3)
-               : [fc] "d" (r0), [pba] "a" (r1), [opc] "i" (CPACF_KIMD)
-               : "cc", "memory");
+               : [src] "+&d" (s.pair)
+               : [fc] "d" (func), [pba] "d" ((unsigned long)(param)),
+                 [opc] "i" (CPACF_KIMD)
+               : "cc", "memory", "0", "1");
 }
 
 /**
@@ -329,17 +334,19 @@ static inline void cpacf_kimd(unsigned long func, void *param,
 static inline void cpacf_klmd(unsigned long func, void *param,
                              const u8 *src, long src_len)
 {
-       register unsigned long r0 asm("0") = (unsigned long) func;
-       register unsigned long r1 asm("1") = (unsigned long) param;
-       register unsigned long r2 asm("2") = (unsigned long) src;
-       register unsigned long r3 asm("3") = (unsigned long) src_len;
+       union register_pair s;
 
+       s.even = (unsigned long)src;
+       s.odd  = (unsigned long)src_len;
        asm volatile(
+               "       lgr     0,%[fc]\n"
+               "       lgr     1,%[pba]\n"
                "0:     .insn   rre,%[opc] << 16,0,%[src]\n"
                "       brc     1,0b\n" /* handle partial completion */
-               : [src] "+a" (r2), [len] "+d" (r3)
-               : [fc] "d" (r0), [pba] "a" (r1), [opc] "i" (CPACF_KLMD)
-               : "cc", "memory");
+               : [src] "+&d" (s.pair)
+               : [fc] "d" (func), [pba] "d" ((unsigned long)param),
+                 [opc] "i" (CPACF_KLMD)
+               : "cc", "memory", "0", "1");
 }
 
 /**
@@ -355,19 +362,21 @@ static inline void cpacf_klmd(unsigned long func, void *param,
 static inline int cpacf_kmac(unsigned long func, void *param,
                             const u8 *src, long src_len)
 {
-       register unsigned long r0 asm("0") = (unsigned long) func;
-       register unsigned long r1 asm("1") = (unsigned long) param;
-       register unsigned long r2 asm("2") = (unsigned long) src;
-       register unsigned long r3 asm("3") = (unsigned long) src_len;
+       union register_pair s;
 
+       s.even = (unsigned long)src;
+       s.odd  = (unsigned long)src_len;
        asm volatile(
+               "       lgr     0,%[fc]\n"
+               "       lgr     1,%[pba]\n"
                "0:     .insn   rre,%[opc] << 16,0,%[src]\n"
                "       brc     1,0b\n" /* handle partial completion */
-               : [src] "+a" (r2), [len] "+d" (r3)
-               : [fc] "d" (r0), [pba] "a" (r1), [opc] "i" (CPACF_KMAC)
-               : "cc", "memory");
+               : [src] "+&d" (s.pair)
+               : [fc] "d" (func), [pba] "d" ((unsigned long)param),
+                 [opc] "i" (CPACF_KMAC)
+               : "cc", "memory", "0", "1");
 
-       return src_len - r3;
+       return src_len - s.odd;
 }
 
 /**
@@ -385,22 +394,24 @@ static inline int cpacf_kmac(unsigned long func, void *param,
 static inline int cpacf_kmctr(unsigned long func, void *param, u8 *dest,
                              const u8 *src, long src_len, u8 *counter)
 {
-       register unsigned long r0 asm("0") = (unsigned long) func;
-       register unsigned long r1 asm("1") = (unsigned long) param;
-       register unsigned long r2 asm("2") = (unsigned long) src;
-       register unsigned long r3 asm("3") = (unsigned long) src_len;
-       register unsigned long r4 asm("4") = (unsigned long) dest;
-       register unsigned long r6 asm("6") = (unsigned long) counter;
+       union register_pair d, s, c;
 
+       d.even = (unsigned long)dest;
+       s.even = (unsigned long)src;
+       s.odd  = (unsigned long)src_len;
+       c.even = (unsigned long)counter;
        asm volatile(
+               "       lgr     0,%[fc]\n"
+               "       lgr     1,%[pba]\n"
                "0:     .insn   rrf,%[opc] << 16,%[dst],%[src],%[ctr],0\n"
                "       brc     1,0b\n" /* handle partial completion */
-               : [src] "+a" (r2), [len] "+d" (r3),
-                 [dst] "+a" (r4), [ctr] "+a" (r6)
-               : [fc] "d" (r0), [pba] "a" (r1), [opc] "i" (CPACF_KMCTR)
-               : "cc", "memory");
+               : [src] "+&d" (s.pair), [dst] "+&d" (d.pair),
+                 [ctr] "+&d" (c.pair)
+               : [fc] "d" (func), [pba] "d" ((unsigned long)param),
+                 [opc] "i" (CPACF_KMCTR)
+               : "cc", "memory", "0", "1");
 
-       return src_len - r3;
+       return src_len - s.odd;
 }
 
 /**
@@ -417,20 +428,21 @@ static inline void cpacf_prno(unsigned long func, void *param,
                              u8 *dest, unsigned long dest_len,
                              const u8 *seed, unsigned long seed_len)
 {
-       register unsigned long r0 asm("0") = (unsigned long) func;
-       register unsigned long r1 asm("1") = (unsigned long) param;
-       register unsigned long r2 asm("2") = (unsigned long) dest;
-       register unsigned long r3 asm("3") = (unsigned long) dest_len;
-       register unsigned long r4 asm("4") = (unsigned long) seed;
-       register unsigned long r5 asm("5") = (unsigned long) seed_len;
+       union register_pair d, s;
 
+       d.even = (unsigned long)dest;
+       d.odd  = (unsigned long)dest_len;
+       s.even = (unsigned long)seed;
+       s.odd  = (unsigned long)seed_len;
        asm volatile (
+               "       lgr     0,%[fc]\n"
+               "       lgr     1,%[pba]\n"
                "0:     .insn   rre,%[opc] << 16,%[dst],%[seed]\n"
                "       brc     1,0b\n"   /* handle partial completion */
-               : [dst] "+a" (r2), [dlen] "+d" (r3)
-               : [fc] "d" (r0), [pba] "a" (r1),
-                 [seed] "a" (r4), [slen] "d" (r5), [opc] "i" (CPACF_PRNO)
-               : "cc", "memory");
+               : [dst] "+&d" (d.pair)
+               : [fc] "d" (func), [pba] "d" ((unsigned long)param),
+                 [seed] "d" (s.pair), [opc] "i" (CPACF_PRNO)
+               : "cc", "memory", "0", "1");
 }
 
 /**
@@ -443,19 +455,19 @@ static inline void cpacf_prno(unsigned long func, void *param,
 static inline void cpacf_trng(u8 *ucbuf, unsigned long ucbuf_len,
                              u8 *cbuf, unsigned long cbuf_len)
 {
-       register unsigned long r0 asm("0") = (unsigned long) CPACF_PRNO_TRNG;
-       register unsigned long r2 asm("2") = (unsigned long) ucbuf;
-       register unsigned long r3 asm("3") = (unsigned long) ucbuf_len;
-       register unsigned long r4 asm("4") = (unsigned long) cbuf;
-       register unsigned long r5 asm("5") = (unsigned long) cbuf_len;
+       union register_pair u, c;
 
+       u.even = (unsigned long)ucbuf;
+       u.odd  = (unsigned long)ucbuf_len;
+       c.even = (unsigned long)cbuf;
+       c.odd  = (unsigned long)cbuf_len;
        asm volatile (
+               "       lghi    0,%[fc]\n"
                "0:     .insn   rre,%[opc] << 16,%[ucbuf],%[cbuf]\n"
                "       brc     1,0b\n"   /* handle partial completion */
-               : [ucbuf] "+a" (r2), [ucbuflen] "+d" (r3),
-                 [cbuf] "+a" (r4), [cbuflen] "+d" (r5)
-               : [fc] "d" (r0), [opc] "i" (CPACF_PRNO)
-               : "cc", "memory");
+               : [ucbuf] "+&d" (u.pair), [cbuf] "+&d" (c.pair)
+               : [fc] "K" (CPACF_PRNO_TRNG), [opc] "i" (CPACF_PRNO)
+               : "cc", "memory", "0");
 }
 
 /**
@@ -466,15 +478,15 @@ static inline void cpacf_trng(u8 *ucbuf, unsigned long ucbuf_len,
  */
 static inline void cpacf_pcc(unsigned long func, void *param)
 {
-       register unsigned long r0 asm("0") = (unsigned long) func;
-       register unsigned long r1 asm("1") = (unsigned long) param;
-
        asm volatile(
+               "       lgr     0,%[fc]\n"
+               "       lgr     1,%[pba]\n"
                "0:     .insn   rre,%[opc] << 16,0,0\n" /* PCC opcode */
                "       brc     1,0b\n" /* handle partial completion */
                :
-               : [fc] "d" (r0), [pba] "a" (r1), [opc] "i" (CPACF_PCC)
-               : "cc", "memory");
+               : [fc] "d" (func), [pba] "d" ((unsigned long)param),
+                 [opc] "i" (CPACF_PCC)
+               : "cc", "memory", "0", "1");
 }
 
 /**
@@ -487,14 +499,14 @@ static inline void cpacf_pcc(unsigned long func, void *param)
  */
 static inline void cpacf_pckmo(long func, void *param)
 {
-       register unsigned long r0 asm("0") = (unsigned long) func;
-       register unsigned long r1 asm("1") = (unsigned long) param;
-
        asm volatile(
+               "       lgr     0,%[fc]\n"
+               "       lgr     1,%[pba]\n"
                "       .insn   rre,%[opc] << 16,0,0\n" /* PCKMO opcode */
                :
-               : [fc] "d" (r0), [pba] "a" (r1), [opc] "i" (CPACF_PCKMO)
-               : "cc", "memory");
+               : [fc] "d" (func), [pba] "d" ((unsigned long)param),
+                 [opc] "i" (CPACF_PCKMO)
+               : "cc", "memory", "0", "1");
 }
 
 /**
@@ -512,21 +524,23 @@ static inline void cpacf_kma(unsigned long func, void *param, u8 *dest,
                             const u8 *src, unsigned long src_len,
                             const u8 *aad, unsigned long aad_len)
 {
-       register unsigned long r0 asm("0") = (unsigned long) func;
-       register unsigned long r1 asm("1") = (unsigned long) param;
-       register unsigned long r2 asm("2") = (unsigned long) src;
-       register unsigned long r3 asm("3") = (unsigned long) src_len;
-       register unsigned long r4 asm("4") = (unsigned long) aad;
-       register unsigned long r5 asm("5") = (unsigned long) aad_len;
-       register unsigned long r6 asm("6") = (unsigned long) dest;
+       union register_pair d, s, a;
 
+       d.even = (unsigned long)dest;
+       s.even = (unsigned long)src;
+       s.odd  = (unsigned long)src_len;
+       a.even = (unsigned long)aad;
+       a.odd  = (unsigned long)aad_len;
        asm volatile(
+               "       lgr     0,%[fc]\n"
+               "       lgr     1,%[pba]\n"
                "0:     .insn   rrf,%[opc] << 16,%[dst],%[src],%[aad],0\n"
                "       brc     1,0b\n" /* handle partial completion */
-               : [dst] "+a" (r6), [src] "+a" (r2), [slen] "+d" (r3),
-                 [aad] "+a" (r4), [alen] "+d" (r5)
-               : [fc] "d" (r0), [pba] "a" (r1), [opc] "i" (CPACF_KMA)
-               : "cc", "memory");
+               : [dst] "+&d" (d.pair), [src] "+&d" (s.pair),
+                 [aad] "+&d" (a.pair)
+               : [fc] "d" (func), [pba] "d" ((unsigned long)param),
+                 [opc] "i" (CPACF_KMA)
+               : "cc", "memory", "0", "1");
 }
 
 #endif /* _ASM_S390_CPACF_H */
index 1d007c6ede954b609a350a28db5a52e11099aca5..14cfd48d598e1ed73f68b2a8fbf1409f20f6bad1 100644 (file)
@@ -23,7 +23,7 @@
 #define MAX_ELF_HWCAP_FEATURES (8 * sizeof(elf_hwcap))
 #define MAX_CPU_FEATURES       MAX_ELF_HWCAP_FEATURES
 
-#define cpu_feature(feat)      ilog2(HWCAP_S390_ ## feat)
+#define cpu_feature(feat)      ilog2(HWCAP_ ## feat)
 
 int cpu_have_feature(unsigned int nr);
 
index adc0179fa34e98e91ae3216737949c7a63e4ad0b..04dc65f8901dcc24c9b5a23b26885215cb0b9d29 100644 (file)
@@ -111,6 +111,23 @@ union ctlreg2 {
        };
 };
 
+union ctlreg5 {
+       unsigned long val;
+       struct {
+               unsigned long       : 33;
+               unsigned long pasteo: 25;
+               unsigned long       : 6;
+       };
+};
+
+union ctlreg15 {
+       unsigned long val;
+       struct {
+               unsigned long lsea  : 61;
+               unsigned long       : 3;
+       };
+};
+
 #define ctl_set_bit(cr, bit) smp_ctl_set_bit(cr, bit)
 #define ctl_clear_bit(cr, bit) smp_ctl_clear_bit(cr, bit)
 
index c1b82bcc017cf043989413c10025f5eb8e8e76ec..19a55e1e3a0c591994a7f8f7176e54704270daa8 100644 (file)
@@ -13,6 +13,7 @@
 #include <linux/time.h>
 #include <linux/refcount.h>
 #include <linux/fs.h>
+#include <linux/init.h>
 
 #define DEBUG_MAX_LEVEL                   6  /* debug levels range from 0 to 6 */
 #define DEBUG_OFF_LEVEL                   -1 /* level where debug is switched off */
@@ -391,38 +392,99 @@ int debug_register_view(debug_info_t *id, struct debug_view *view);
 
 int debug_unregister_view(debug_info_t *id, struct debug_view *view);
 
+#ifndef MODULE
+
+/*
+ * Note: Initial page and area numbers must be fixed to allow static
+ * initialization. This enables very early tracing. Changes to these values
+ * must be reflected in __DEFINE_STATIC_AREA.
+ */
+#define EARLY_PAGES            8
+#define EARLY_AREAS            1
+
+#define VNAME(var, suffix)     __##var##_##suffix
+
 /*
-   define the debug levels:
-   - 0 No debugging output to console or syslog
-   - 1 Log internal errors to syslog, ignore check conditions
-   - 2 Log internal errors and check conditions to syslog
-   - 3 Log internal errors to console, log check conditions to syslog
-   - 4 Log internal errors and check conditions to console
-   - 5 panic on internal errors, log check conditions to console
-   - 6 panic on both, internal errors and check conditions
+ * Define static areas for early trace data. During boot debug_register_static()
+ * will replace these with dynamically allocated areas to allow custom page and
+ * area sizes, and dynamic resizing.
  */
+#define __DEFINE_STATIC_AREA(var)                                      \
+static char VNAME(var, data)[EARLY_PAGES][PAGE_SIZE] __initdata;       \
+static debug_entry_t *VNAME(var, pages)[EARLY_PAGES] __initdata = {    \
+       (debug_entry_t *)VNAME(var, data)[0],                           \
+       (debug_entry_t *)VNAME(var, data)[1],                           \
+       (debug_entry_t *)VNAME(var, data)[2],                           \
+       (debug_entry_t *)VNAME(var, data)[3],                           \
+       (debug_entry_t *)VNAME(var, data)[4],                           \
+       (debug_entry_t *)VNAME(var, data)[5],                           \
+       (debug_entry_t *)VNAME(var, data)[6],                           \
+       (debug_entry_t *)VNAME(var, data)[7],                           \
+};                                                                     \
+static debug_entry_t **VNAME(var, areas)[EARLY_AREAS] __initdata = {   \
+       (debug_entry_t **)VNAME(var, pages),                            \
+};                                                                     \
+static int VNAME(var, active_pages)[EARLY_AREAS] __initdata;           \
+static int VNAME(var, active_entries)[EARLY_AREAS] __initdata
+
+#define __DEBUG_INFO_INIT(var, _name, _buf_size) {                     \
+       .next = NULL,                                                   \
+       .prev = NULL,                                                   \
+       .ref_count = REFCOUNT_INIT(1),                                  \
+       .lock = __SPIN_LOCK_UNLOCKED(var.lock),                         \
+       .level = DEBUG_DEFAULT_LEVEL,                                   \
+       .nr_areas = EARLY_AREAS,                                        \
+       .pages_per_area = EARLY_PAGES,                                  \
+       .buf_size = (_buf_size),                                        \
+       .entry_size = sizeof(debug_entry_t) + (_buf_size),              \
+       .areas = VNAME(var, areas),                                     \
+       .active_area = 0,                                               \
+       .active_pages = VNAME(var, active_pages),                       \
+       .active_entries = VNAME(var, active_entries),                   \
+       .debugfs_root_entry = NULL,                                     \
+       .debugfs_entries = { NULL },                                    \
+       .views = { NULL },                                              \
+       .name = (_name),                                                \
+       .mode = 0600,                                                   \
+}
+
+#define __REGISTER_STATIC_DEBUG_INFO(var, name, pages, areas, view)    \
+static int __init VNAME(var, reg)(void)                                        \
+{                                                                      \
+       debug_register_static(&var, (pages), (areas));                  \
+       debug_register_view(&var, (view));                              \
+       return 0;                                                       \
+}                                                                      \
+arch_initcall(VNAME(var, reg))
+
+/**
+ * DEFINE_STATIC_DEBUG_INFO - Define static debug_info_t
+ *
+ * @var: Name of debug_info_t variable
+ * @name: Name of debug log (e.g. used for debugfs entry)
+ * @pages_per_area: Number of pages per area
+ * @nr_areas: Number of debug areas
+ * @buf_size: Size of data area in each debug entry
+ * @view: Pointer to debug view struct
+ *
+ * Define a static debug_info_t for early tracing. The associated debugfs log
+ * is automatically registered with the specified debug view.
+ *
+ * Important: Users of this macro must not call any of the
+ * debug_register/_unregister() functions for this debug_info_t!
+ *
+ * Note: Tracing will start with a fixed number of initial pages and areas.
+ * The debug area will be changed to use the specified numbers during
+ * arch_initcall.
+ */
+#define DEFINE_STATIC_DEBUG_INFO(var, name, pages, nr_areas, buf_size, view) \
+__DEFINE_STATIC_AREA(var);                                             \
+static debug_info_t __refdata var =                                    \
+       __DEBUG_INFO_INIT(var, (name), (buf_size));                     \
+__REGISTER_STATIC_DEBUG_INFO(var, name, pages, nr_areas, view)
+
+void debug_register_static(debug_info_t *id, int pages_per_area, int nr_areas);
 
-#ifndef DEBUG_LEVEL
-#define DEBUG_LEVEL 4
-#endif
-
-#define INTERNAL_ERRMSG(x,y...) "E" __FILE__ "%d: " x, __LINE__, y
-#define INTERNAL_WRNMSG(x,y...) "W" __FILE__ "%d: " x, __LINE__, y
-#define INTERNAL_INFMSG(x,y...) "I" __FILE__ "%d: " x, __LINE__, y
-#define INTERNAL_DEBMSG(x,y...) "D" __FILE__ "%d: " x, __LINE__, y
-
-#if DEBUG_LEVEL > 0
-#define PRINT_DEBUG(x...)      printk(KERN_DEBUG PRINTK_HEADER x)
-#define PRINT_INFO(x...)       printk(KERN_INFO PRINTK_HEADER x)
-#define PRINT_WARN(x...)       printk(KERN_WARNING PRINTK_HEADER x)
-#define PRINT_ERR(x...)                printk(KERN_ERR PRINTK_HEADER x)
-#define PRINT_FATAL(x...)      panic(PRINTK_HEADER x)
-#else
-#define PRINT_DEBUG(x...)      printk(KERN_DEBUG PRINTK_HEADER x)
-#define PRINT_INFO(x...)       printk(KERN_DEBUG PRINTK_HEADER x)
-#define PRINT_WARN(x...)       printk(KERN_DEBUG PRINTK_HEADER x)
-#define PRINT_ERR(x...)                printk(KERN_DEBUG PRINTK_HEADER x)
-#define PRINT_FATAL(x...)      printk(KERN_DEBUG PRINTK_HEADER x)
-#endif /* DASD_DEBUG */
+#endif /* MODULE */
 
 #endif /* DEBUG_H */
index ca8f85b53a902f2368129d0008828b4ec1b7da8a..b3a8cb4daed6142fcbec753efb3b39b5528722cc 100644 (file)
@@ -309,6 +309,10 @@ int diag26c(void *req, void *resp, enum diag26c_sc subcode);
 
 struct hypfs_diag0c_entry;
 
+/*
+ * This structure must contain only pointers/references into
+ * the AMODE31 text section.
+ */
 struct diag_ops {
        int (*diag210)(struct diag210 *addr);
        int (*diag26c)(void *req, void *resp, enum diag26c_sc subcode);
@@ -317,6 +321,13 @@ struct diag_ops {
        void (*diag308_reset)(void);
 };
 
-extern struct diag_ops diag_dma_ops;
-extern struct diag210 *__diag210_tmp_dma;
+extern struct diag_ops diag_amode31_ops;
+extern struct diag210 *__diag210_tmp_amode31;
+
+int _diag210_amode31(struct diag210 *addr);
+int _diag26c_amode31(void *req, void *resp, enum diag26c_sc subcode);
+int _diag14_amode31(unsigned long rx, unsigned long ry1, unsigned long subcode);
+void _diag0c_amode31(struct hypfs_diag0c_entry *entry);
+void _diag308_reset_amode31(void);
+
 #endif /* _ASM_S390_DIAG_H */
index bd00c94620d3cd4b5fbead036fbbe4fb6982d221..70a30ae258b7e6f9ead9ebc06c8e4b7efae06f21 100644 (file)
 /* Keep this the last entry.  */
 #define R_390_NUM      61
 
-/* Bits present in AT_HWCAP. */
-#define HWCAP_S390_ESAN3       1
-#define HWCAP_S390_ZARCH       2
-#define HWCAP_S390_STFLE       4
-#define HWCAP_S390_MSA         8
-#define HWCAP_S390_LDISP       16
-#define HWCAP_S390_EIMM                32
-#define HWCAP_S390_DFP         64
-#define HWCAP_S390_HPAGE       128
-#define HWCAP_S390_ETF3EH      256
-#define HWCAP_S390_HIGH_GPRS   512
-#define HWCAP_S390_TE          1024
-#define HWCAP_S390_VXRS                2048
-#define HWCAP_S390_VXRS_BCD    4096
-#define HWCAP_S390_VXRS_EXT    8192
-#define HWCAP_S390_GS          16384
-#define HWCAP_S390_VXRS_EXT2   32768
-#define HWCAP_S390_VXRS_PDE    65536
-#define HWCAP_S390_SORT                131072
-#define HWCAP_S390_DFLT                262144
+enum {
+       HWCAP_NR_ESAN3          = 0,
+       HWCAP_NR_ZARCH          = 1,
+       HWCAP_NR_STFLE          = 2,
+       HWCAP_NR_MSA            = 3,
+       HWCAP_NR_LDISP          = 4,
+       HWCAP_NR_EIMM           = 5,
+       HWCAP_NR_DFP            = 6,
+       HWCAP_NR_HPAGE          = 7,
+       HWCAP_NR_ETF3EH         = 8,
+       HWCAP_NR_HIGH_GPRS      = 9,
+       HWCAP_NR_TE             = 10,
+       HWCAP_NR_VXRS           = 11,
+       HWCAP_NR_VXRS_BCD       = 12,
+       HWCAP_NR_VXRS_EXT       = 13,
+       HWCAP_NR_GS             = 14,
+       HWCAP_NR_VXRS_EXT2      = 15,
+       HWCAP_NR_VXRS_PDE       = 16,
+       HWCAP_NR_SORT           = 17,
+       HWCAP_NR_DFLT           = 18,
+       HWCAP_NR_VXRS_PDE2      = 19,
+       HWCAP_NR_NNPA           = 20,
+       HWCAP_NR_PCI_MIO        = 21,
+       HWCAP_NR_SIE            = 22,
+       HWCAP_NR_MAX
+};
 
-/* Internal bits, not exposed via elf */
-#define HWCAP_INT_SIE          1UL
+/* Bits present in AT_HWCAP. */
+#define HWCAP_ESAN3            BIT(HWCAP_NR_ESAN3)
+#define HWCAP_ZARCH            BIT(HWCAP_NR_ZARCH)
+#define HWCAP_STFLE            BIT(HWCAP_NR_STFLE)
+#define HWCAP_MSA              BIT(HWCAP_NR_MSA)
+#define HWCAP_LDISP            BIT(HWCAP_NR_LDISP)
+#define HWCAP_EIMM             BIT(HWCAP_NR_EIMM)
+#define HWCAP_DFP              BIT(HWCAP_NR_DFP)
+#define HWCAP_HPAGE            BIT(HWCAP_NR_HPAGE)
+#define HWCAP_ETF3EH           BIT(HWCAP_NR_ETF3EH)
+#define HWCAP_HIGH_GPRS                BIT(HWCAP_NR_HIGH_GPRS)
+#define HWCAP_TE               BIT(HWCAP_NR_TE)
+#define HWCAP_VXRS             BIT(HWCAP_NR_VXRS)
+#define HWCAP_VXRS_BCD         BIT(HWCAP_NR_VXRS_BCD)
+#define HWCAP_VXRS_EXT         BIT(HWCAP_NR_VXRS_EXT)
+#define HWCAP_GS               BIT(HWCAP_NR_GS)
+#define HWCAP_VXRS_EXT2                BIT(HWCAP_NR_VXRS_EXT2)
+#define HWCAP_VXRS_PDE         BIT(HWCAP_NR_VXRS_PDE)
+#define HWCAP_SORT             BIT(HWCAP_NR_SORT)
+#define HWCAP_DFLT             BIT(HWCAP_NR_DFLT)
+#define HWCAP_VXRS_PDE2                BIT(HWCAP_NR_VXRS_PDE2)
+#define HWCAP_NNPA             BIT(HWCAP_NR_NNPA)
+#define HWCAP_PCI_MIO          BIT(HWCAP_NR_PCI_MIO)
+#define HWCAP_SIE              BIT(HWCAP_NR_SIE)
 
 /*
  * These are used to set parameters in the core dumps.
@@ -209,10 +237,6 @@ struct arch_elf_state {
 extern unsigned long elf_hwcap;
 #define ELF_HWCAP (elf_hwcap)
 
-/* Internal hardware capabilities, not exposed via elf */
-
-extern unsigned long int_hwcap;
-
 /* This yields a string that ld.so will use to load implementation
    specific libraries for optimization.  This is more specific in
    intent than poking at uname or /proc/cpuinfo.
index 3beb294fd553148486014d1a63057807066a20ac..16dc57dd90b303bdc6a89809915cdbbc807c3905 100644 (file)
@@ -28,8 +28,8 @@ struct exception_table_entry
        long handler;
 };
 
-extern struct exception_table_entry *__start_dma_ex_table;
-extern struct exception_table_entry *__stop_dma_ex_table;
+extern struct exception_table_entry *__start_amode31_ex_table;
+extern struct exception_table_entry *__stop_amode31_ex_table;
 
 const struct exception_table_entry *s390_search_extables(unsigned long addr);
 
index 345cbe982a8bfa6c995185fa082647ab95fecc55..e8b460f39c588414673b6456b6c356a95c7c593c 100644 (file)
@@ -18,7 +18,6 @@
 void ftrace_caller(void);
 
 extern char ftrace_graph_caller_end;
-extern unsigned long ftrace_plt;
 extern void *ftrace_func;
 
 struct dyn_arch_ftrace { };
@@ -31,10 +30,11 @@ struct dyn_arch_ftrace { };
 
 struct module;
 struct dyn_ftrace;
-/*
- * Either -mhotpatch or -mnop-mcount is used - no explicit init is required
- */
-static inline int ftrace_init_nop(struct module *mod, struct dyn_ftrace *rec) { return 0; }
+
+bool ftrace_need_init_nop(void);
+#define ftrace_need_init_nop ftrace_need_init_nop
+
+int ftrace_init_nop(struct module *mod, struct dyn_ftrace *rec);
 #define ftrace_init_nop ftrace_init_nop
 
 static inline unsigned long ftrace_call_adjust(unsigned long addr)
@@ -42,42 +42,6 @@ static inline unsigned long ftrace_call_adjust(unsigned long addr)
        return addr;
 }
 
-struct ftrace_insn {
-       u16 opc;
-       s32 disp;
-} __packed;
-
-static inline void ftrace_generate_nop_insn(struct ftrace_insn *insn)
-{
-#ifdef CONFIG_FUNCTION_TRACER
-       /* brcl 0,0 */
-       insn->opc = 0xc004;
-       insn->disp = 0;
-#endif
-}
-
-static inline int is_ftrace_nop(struct ftrace_insn *insn)
-{
-#ifdef CONFIG_FUNCTION_TRACER
-       if (insn->disp == 0)
-               return 1;
-#endif
-       return 0;
-}
-
-static inline void ftrace_generate_call_insn(struct ftrace_insn *insn,
-                                            unsigned long ip)
-{
-#ifdef CONFIG_FUNCTION_TRACER
-       unsigned long target;
-
-       /* brasl r0,ftrace_caller */
-       target = is_module_addr((void *) ip) ? ftrace_plt : FTRACE_ADDR;
-       insn->opc = 0xc005;
-       insn->disp = (target - ip) / 2;
-#endif
-}
-
 /*
  * Even though the system call numbers are identical for s390/s390x a
  * different system call table is used for compat tasks. This may lead
diff --git a/arch/s390/include/asm/ftrace.lds.h b/arch/s390/include/asm/ftrace.lds.h
new file mode 100644 (file)
index 0000000..968adfd
--- /dev/null
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef DIV_ROUND_UP
+#define DIV_ROUND_UP(n, d) (((n) + (d) - 1) / (d))
+#endif
+
+#define SIZEOF_MCOUNT_LOC_ENTRY 8
+#define SIZEOF_FTRACE_HOTPATCH_TRAMPOLINE 24
+#define FTRACE_HOTPATCH_TRAMPOLINES_SIZE(n)                                   \
+       DIV_ROUND_UP(SIZEOF_FTRACE_HOTPATCH_TRAMPOLINE * (n),                  \
+                    SIZEOF_MCOUNT_LOC_ENTRY)
+
+#ifdef CONFIG_FUNCTION_TRACER
+#define FTRACE_HOTPATCH_TRAMPOLINES_TEXT                                      \
+       . = ALIGN(8);                                                          \
+       __ftrace_hotpatch_trampolines_start = .;                               \
+       . = . + FTRACE_HOTPATCH_TRAMPOLINES_SIZE(__stop_mcount_loc -           \
+                                                __start_mcount_loc);          \
+       __ftrace_hotpatch_trampolines_end = .;
+#else
+#define FTRACE_HOTPATCH_TRAMPOLINES_TEXT
+#endif
index a9e2c7295b3518e91db012b6d0a06077c0b6fed5..3f8ee257f9aa3629e33efa472722ee895c9e6daa 100644 (file)
@@ -12,6 +12,7 @@
 #include <asm/types.h>
 #include <asm/cio.h>
 #include <asm/setup.h>
+#include <asm/page.h>
 #include <uapi/asm/ipl.h>
 
 struct ipl_parameter_block {
diff --git a/arch/s390/include/asm/kfence.h b/arch/s390/include/asm/kfence.h
new file mode 100644 (file)
index 0000000..d55ba87
--- /dev/null
@@ -0,0 +1,42 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef _ASM_S390_KFENCE_H
+#define _ASM_S390_KFENCE_H
+
+#include <linux/mm.h>
+#include <linux/kfence.h>
+#include <asm/set_memory.h>
+#include <asm/page.h>
+
+void __kernel_map_pages(struct page *page, int numpages, int enable);
+
+static __always_inline bool arch_kfence_init_pool(void)
+{
+       return true;
+}
+
+#define arch_kfence_test_address(addr) ((addr) & PAGE_MASK)
+
+/*
+ * Do not split kfence pool to 4k mapping with arch_kfence_init_pool(),
+ * but earlier where page table allocations still happen with memblock.
+ * Reason is that arch_kfence_init_pool() gets called when the system
+ * is still in a limbo state - disabling and enabling bottom halves is
+ * not yet allowed, but that is what our page_table_alloc() would do.
+ */
+static __always_inline void kfence_split_mapping(void)
+{
+#ifdef CONFIG_KFENCE
+       unsigned long pool_pages = KFENCE_POOL_SIZE >> PAGE_SHIFT;
+
+       set_memory_4k((unsigned long)__kfence_pool, pool_pages);
+#endif
+}
+
+static inline bool kfence_protect_page(unsigned long addr, bool protect)
+{
+       __kernel_map_pages(virt_to_page(addr), 1, !protect);
+       return true;
+}
+
+#endif /* _ASM_S390_KFENCE_H */
index cbc7c3a68e4dfdec0bdd76a767f0843afa1cd6f6..df73a052760cfc28ba24407c9d4a5d65448bc3ff 100644 (file)
 #include <uapi/asm/kvm_para.h>
 #include <asm/diag.h>
 
-static inline long __kvm_hypercall0(unsigned long nr)
-{
-       register unsigned long __nr asm("1") = nr;
-       register long __rc asm("2");
-
-       asm volatile ("diag 2,4,0x500\n"
-                     : "=d" (__rc) : "d" (__nr): "memory", "cc");
-       return __rc;
-}
-
-static inline long kvm_hypercall0(unsigned long nr)
-{
-       diag_stat_inc(DIAG_STAT_X500);
-       return __kvm_hypercall0(nr);
-}
-
-static inline long __kvm_hypercall1(unsigned long nr, unsigned long p1)
-{
-       register unsigned long __nr asm("1") = nr;
-       register unsigned long __p1 asm("2") = p1;
-       register long __rc asm("2");
-
-       asm volatile ("diag 2,4,0x500\n"
-                     : "=d" (__rc) : "d" (__nr), "0" (__p1) : "memory", "cc");
-       return __rc;
-}
-
-static inline long kvm_hypercall1(unsigned long nr, unsigned long p1)
-{
-       diag_stat_inc(DIAG_STAT_X500);
-       return __kvm_hypercall1(nr, p1);
-}
-
-static inline long __kvm_hypercall2(unsigned long nr, unsigned long p1,
-                              unsigned long p2)
-{
-       register unsigned long __nr asm("1") = nr;
-       register unsigned long __p1 asm("2") = p1;
-       register unsigned long __p2 asm("3") = p2;
-       register long __rc asm("2");
-
-       asm volatile ("diag 2,4,0x500\n"
-                     : "=d" (__rc) : "d" (__nr), "0" (__p1), "d" (__p2)
-                     : "memory", "cc");
-       return __rc;
-}
-
-static inline long kvm_hypercall2(unsigned long nr, unsigned long p1,
-                              unsigned long p2)
-{
-       diag_stat_inc(DIAG_STAT_X500);
-       return __kvm_hypercall2(nr, p1, p2);
-}
-
-static inline long __kvm_hypercall3(unsigned long nr, unsigned long p1,
-                              unsigned long p2, unsigned long p3)
-{
-       register unsigned long __nr asm("1") = nr;
-       register unsigned long __p1 asm("2") = p1;
-       register unsigned long __p2 asm("3") = p2;
-       register unsigned long __p3 asm("4") = p3;
-       register long __rc asm("2");
-
-       asm volatile ("diag 2,4,0x500\n"
-                     : "=d" (__rc) : "d" (__nr), "0" (__p1), "d" (__p2),
-                       "d" (__p3) : "memory", "cc");
-       return __rc;
-}
-
-static inline long kvm_hypercall3(unsigned long nr, unsigned long p1,
-                              unsigned long p2, unsigned long p3)
-{
-       diag_stat_inc(DIAG_STAT_X500);
-       return __kvm_hypercall3(nr, p1, p2, p3);
-}
-
-static inline long __kvm_hypercall4(unsigned long nr, unsigned long p1,
-                              unsigned long p2, unsigned long p3,
-                              unsigned long p4)
-{
-       register unsigned long __nr asm("1") = nr;
-       register unsigned long __p1 asm("2") = p1;
-       register unsigned long __p2 asm("3") = p2;
-       register unsigned long __p3 asm("4") = p3;
-       register unsigned long __p4 asm("5") = p4;
-       register long __rc asm("2");
-
-       asm volatile ("diag 2,4,0x500\n"
-                     : "=d" (__rc) : "d" (__nr), "0" (__p1), "d" (__p2),
-                       "d" (__p3), "d" (__p4) : "memory", "cc");
-       return __rc;
-}
-
-static inline long kvm_hypercall4(unsigned long nr, unsigned long p1,
-                              unsigned long p2, unsigned long p3,
-                              unsigned long p4)
-{
-       diag_stat_inc(DIAG_STAT_X500);
-       return __kvm_hypercall4(nr, p1, p2, p3, p4);
-}
-
-static inline long __kvm_hypercall5(unsigned long nr, unsigned long p1,
-                              unsigned long p2, unsigned long p3,
-                              unsigned long p4, unsigned long p5)
-{
-       register unsigned long __nr asm("1") = nr;
-       register unsigned long __p1 asm("2") = p1;
-       register unsigned long __p2 asm("3") = p2;
-       register unsigned long __p3 asm("4") = p3;
-       register unsigned long __p4 asm("5") = p4;
-       register unsigned long __p5 asm("6") = p5;
-       register long __rc asm("2");
-
-       asm volatile ("diag 2,4,0x500\n"
-                     : "=d" (__rc) : "d" (__nr), "0" (__p1), "d" (__p2),
-                       "d" (__p3), "d" (__p4), "d" (__p5)  : "memory", "cc");
-       return __rc;
-}
-
-static inline long kvm_hypercall5(unsigned long nr, unsigned long p1,
-                              unsigned long p2, unsigned long p3,
-                              unsigned long p4, unsigned long p5)
-{
-       diag_stat_inc(DIAG_STAT_X500);
-       return __kvm_hypercall5(nr, p1, p2, p3, p4, p5);
-}
-
-static inline long __kvm_hypercall6(unsigned long nr, unsigned long p1,
-                              unsigned long p2, unsigned long p3,
-                              unsigned long p4, unsigned long p5,
-                              unsigned long p6)
-{
-       register unsigned long __nr asm("1") = nr;
-       register unsigned long __p1 asm("2") = p1;
-       register unsigned long __p2 asm("3") = p2;
-       register unsigned long __p3 asm("4") = p3;
-       register unsigned long __p4 asm("5") = p4;
-       register unsigned long __p5 asm("6") = p5;
-       register unsigned long __p6 asm("7") = p6;
-       register long __rc asm("2");
-
-       asm volatile ("diag 2,4,0x500\n"
-                     : "=d" (__rc) : "d" (__nr), "0" (__p1), "d" (__p2),
-                       "d" (__p3), "d" (__p4), "d" (__p5), "d" (__p6)
-                     : "memory", "cc");
-       return __rc;
-}
-
-static inline long kvm_hypercall6(unsigned long nr, unsigned long p1,
-                              unsigned long p2, unsigned long p3,
-                              unsigned long p4, unsigned long p5,
-                              unsigned long p6)
-{
-       diag_stat_inc(DIAG_STAT_X500);
-       return __kvm_hypercall6(nr, p1, p2, p3, p4, p5, p6);
-}
+#define HYPERCALL_FMT_0
+#define HYPERCALL_FMT_1 , "0" (r2)
+#define HYPERCALL_FMT_2 , "d" (r3) HYPERCALL_FMT_1
+#define HYPERCALL_FMT_3 , "d" (r4) HYPERCALL_FMT_2
+#define HYPERCALL_FMT_4 , "d" (r5) HYPERCALL_FMT_3
+#define HYPERCALL_FMT_5 , "d" (r6) HYPERCALL_FMT_4
+#define HYPERCALL_FMT_6 , "d" (r7) HYPERCALL_FMT_5
+
+#define HYPERCALL_PARM_0
+#define HYPERCALL_PARM_1 , unsigned long arg1
+#define HYPERCALL_PARM_2 HYPERCALL_PARM_1, unsigned long arg2
+#define HYPERCALL_PARM_3 HYPERCALL_PARM_2, unsigned long arg3
+#define HYPERCALL_PARM_4 HYPERCALL_PARM_3, unsigned long arg4
+#define HYPERCALL_PARM_5 HYPERCALL_PARM_4, unsigned long arg5
+#define HYPERCALL_PARM_6 HYPERCALL_PARM_5, unsigned long arg6
+
+#define HYPERCALL_REGS_0
+#define HYPERCALL_REGS_1                                               \
+       register unsigned long r2 asm("2") = arg1
+#define HYPERCALL_REGS_2                                               \
+       HYPERCALL_REGS_1;                                               \
+       register unsigned long r3 asm("3") = arg2
+#define HYPERCALL_REGS_3                                               \
+       HYPERCALL_REGS_2;                                               \
+       register unsigned long r4 asm("4") = arg3
+#define HYPERCALL_REGS_4                                               \
+       HYPERCALL_REGS_3;                                               \
+       register unsigned long r5 asm("5") = arg4
+#define HYPERCALL_REGS_5                                               \
+       HYPERCALL_REGS_4;                                               \
+       register unsigned long r6 asm("6") = arg5
+#define HYPERCALL_REGS_6                                               \
+       HYPERCALL_REGS_5;                                               \
+       register unsigned long r7 asm("7") = arg6
+
+#define HYPERCALL_ARGS_0
+#define HYPERCALL_ARGS_1 , arg1
+#define HYPERCALL_ARGS_2 HYPERCALL_ARGS_1, arg2
+#define HYPERCALL_ARGS_3 HYPERCALL_ARGS_2, arg3
+#define HYPERCALL_ARGS_4 HYPERCALL_ARGS_3, arg4
+#define HYPERCALL_ARGS_5 HYPERCALL_ARGS_4, arg5
+#define HYPERCALL_ARGS_6 HYPERCALL_ARGS_5, arg6
+
+#define GENERATE_KVM_HYPERCALL_FUNC(args)                              \
+static inline                                                          \
+long __kvm_hypercall##args(unsigned long nr HYPERCALL_PARM_##args)     \
+{                                                                      \
+       register unsigned long __nr asm("1") = nr;                      \
+       register long __rc asm("2");                                    \
+       HYPERCALL_REGS_##args;                                          \
+                                                                       \
+       asm volatile (                                                  \
+               "       diag    2,4,0x500\n"                            \
+               : "=d" (__rc)                                           \
+               : "d" (__nr) HYPERCALL_FMT_##args                       \
+               : "memory", "cc");                                      \
+       return __rc;                                                    \
+}                                                                      \
+                                                                       \
+static inline                                                          \
+long kvm_hypercall##args(unsigned long nr HYPERCALL_PARM_##args)       \
+{                                                                      \
+       diag_stat_inc(DIAG_STAT_X500);                                  \
+       return __kvm_hypercall##args(nr HYPERCALL_ARGS_##args);         \
+}
+
+GENERATE_KVM_HYPERCALL_FUNC(0)
+GENERATE_KVM_HYPERCALL_FUNC(1)
+GENERATE_KVM_HYPERCALL_FUNC(2)
+GENERATE_KVM_HYPERCALL_FUNC(3)
+GENERATE_KVM_HYPERCALL_FUNC(4)
+GENERATE_KVM_HYPERCALL_FUNC(5)
+GENERATE_KVM_HYPERCALL_FUNC(6)
 
 /* kvm on s390 is always paravirtualization enabled */
 static inline int kvm_para_available(void)
index 24e8fed150cf8b2d73cb068cbf11a69a63844756..1ffea75b8ebca57d92c6a034e0eb7da2691aec6f 100644 (file)
@@ -22,7 +22,7 @@
 
 #define EX_TABLE(_fault, _target)                                      \
        __EX_TABLE(__ex_table, _fault, _target)
-#define EX_TABLE_DMA(_fault, _target)                                  \
-       __EX_TABLE(.dma.ex_table, _fault, _target)
+#define EX_TABLE_AMODE31(_fault, _target)                              \
+       __EX_TABLE(.amode31.ex_table, _fault, _target)
 
 #endif
index 47bde5a20a41cf92bacaa23adca7e2d6051628f4..11213c8bfca56e4640aa7dba443dd1f4c62a64e8 100644 (file)
@@ -124,7 +124,8 @@ struct lowcore {
        /* Restart function and parameter. */
        __u64   restart_fn;                     /* 0x0370 */
        __u64   restart_data;                   /* 0x0378 */
-       __u64   restart_source;                 /* 0x0380 */
+       __u32   restart_source;                 /* 0x0380 */
+       __u32   restart_flags;                  /* 0x0384 */
 
        /* Address space pointer. */
        __u64   kernel_asce;                    /* 0x0388 */
index e0a6d29846e260edd7842cbc59cb3f3dba2b7536..9f1eea15872ca36a720a0c5963f21ebb26b64627 100644 (file)
@@ -8,16 +8,14 @@
  * This file contains the s390 architecture specific module code.
  */
 
-struct mod_arch_syminfo
-{
+struct mod_arch_syminfo {
        unsigned long got_offset;
        unsigned long plt_offset;
        int got_initialized;
        int plt_initialized;
 };
 
-struct mod_arch_specific
-{
+struct mod_arch_specific {
        /* Starting offset of got in the module core memory. */
        unsigned long got_offset;
        /* Starting offset of plt in the module core memory. */
@@ -30,6 +28,14 @@ struct mod_arch_specific
        int nsyms;
        /* Additional symbol information (got and plt offsets). */
        struct mod_arch_syminfo *syminfo;
+#ifdef CONFIG_FUNCTION_TRACER
+       /* Start of memory reserved for ftrace hotpatch trampolines. */
+       struct ftrace_hotpatch_trampoline *trampolines_start;
+       /* End of memory reserved for ftrace hotpatch trampolines. */
+       struct ftrace_hotpatch_trampoline *trampolines_end;
+       /* Next unused ftrace hotpatch trampoline slot. */
+       struct ftrace_hotpatch_trampoline *next_trampoline;
+#endif /* CONFIG_FUNCTION_TRACER */
 };
 
 #endif /* _ASM_S390_MODULE_H */
index 3ba945c6b9dc54fef82526b650b986c40f39e2d3..d98d17a36c7b83d2398bb33b2100e0c73968c144 100644 (file)
@@ -144,9 +144,6 @@ struct page;
 void arch_free_page(struct page *page, int order);
 void arch_alloc_page(struct page *page, int order);
 void arch_set_page_dat(struct page *page, int order);
-void arch_set_page_nodat(struct page *page, int order);
-int arch_test_page_nodat(struct page *page);
-void arch_set_page_states(int make_stable);
 
 static inline int devmem_is_allowed(unsigned long pfn)
 {
index 5509b224c2ecaac9757876d4c3bb95b6d41a7a7f..e4803ec51110c65252f14e82bcb6e089661eb5cc 100644 (file)
@@ -216,9 +216,10 @@ void zpci_remove_reserved_devices(void);
 int clp_setup_writeback_mio(void);
 int clp_scan_pci_devices(void);
 int clp_query_pci_fn(struct zpci_dev *zdev);
-int clp_enable_fh(struct zpci_dev *, u8);
-int clp_disable_fh(struct zpci_dev *);
+int clp_enable_fh(struct zpci_dev *zdev, u32 *fh, u8 nr_dma_as);
+int clp_disable_fh(struct zpci_dev *zdev, u32 *fh);
 int clp_get_state(u32 fid, enum zpci_state *state);
+int clp_refresh_fh(u32 fid, u32 *fh);
 
 /* UID */
 void update_uid_checking(bool new);
@@ -271,6 +272,8 @@ struct zpci_dev *get_zdev_by_fid(u32);
 /* DMA */
 int zpci_dma_init(void);
 void zpci_dma_exit(void);
+int zpci_dma_init_device(struct zpci_dev *zdev);
+int zpci_dma_exit_device(struct zpci_dev *zdev);
 
 /* IRQ */
 int __init zpci_irq_init(void);
index f62cd3ed2d443589f00620cecd27a0dd1c622be6..3b8e89d4578a8857f8ff66641ce9306c30235219 100644 (file)
@@ -182,8 +182,6 @@ static inline unsigned long *get_st_pto(unsigned long entry)
 }
 
 /* Prototypes */
-int zpci_dma_init_device(struct zpci_dev *);
-void zpci_dma_exit_device(struct zpci_dev *);
 void dma_free_seg_table(unsigned long);
 unsigned long *dma_alloc_cpu_table(void);
 void dma_cleanup_tables(unsigned long *);
index dcac7b2df72ce798594cccf4243b154f18dc25d3..b61426c9ef1780b7d5a489fffe0c2c78700092d0 100644 (file)
@@ -67,15 +67,15 @@ extern unsigned long zero_page_mask;
 /* TODO: s390 cannot support io_remap_pfn_range... */
 
 #define pte_ERROR(e) \
-       printk("%s:%d: bad pte %p.\n", __FILE__, __LINE__, (void *) pte_val(e))
+       pr_err("%s:%d: bad pte %016lx.\n", __FILE__, __LINE__, pte_val(e))
 #define pmd_ERROR(e) \
-       printk("%s:%d: bad pmd %p.\n", __FILE__, __LINE__, (void *) pmd_val(e))
+       pr_err("%s:%d: bad pmd %016lx.\n", __FILE__, __LINE__, pmd_val(e))
 #define pud_ERROR(e) \
-       printk("%s:%d: bad pud %p.\n", __FILE__, __LINE__, (void *) pud_val(e))
+       pr_err("%s:%d: bad pud %016lx.\n", __FILE__, __LINE__, pud_val(e))
 #define p4d_ERROR(e) \
-       printk("%s:%d: bad p4d %p.\n", __FILE__, __LINE__, (void *) p4d_val(e))
+       pr_err("%s:%d: bad p4d %016lx.\n", __FILE__, __LINE__, p4d_val(e))
 #define pgd_ERROR(e) \
-       printk("%s:%d: bad pgd %p.\n", __FILE__, __LINE__, (void *) pgd_val(e))
+       pr_err("%s:%d: bad pgd %016lx.\n", __FILE__, __LINE__, pgd_val(e))
 
 /*
  * The vmalloc and module area will always be on the topmost area of the
index ddc7858bbce40eaad2b64c63a0e2383bd0d7a208..879b8e3f609cd53abea5f44e211ed29e1d5b235c 100644 (file)
@@ -26,6 +26,8 @@
 #define _CIF_MCCK_GUEST                BIT(CIF_MCCK_GUEST)
 #define _CIF_DEDICATED_CPU     BIT(CIF_DEDICATED_CPU)
 
+#define RESTART_FLAG_CTLREGS   _AC(1 << 0, U)
+
 #ifndef __ASSEMBLY__
 
 #include <linux/cpumask.h>
index cb4f73c7228db6ca03ea4ad80b7b238f0b3df1bb..25b5dc34db75f5780a5d1a0ff9e197823b799aa7 100644 (file)
@@ -291,16 +291,15 @@ struct qdio_ssqd_desc {
 typedef void qdio_handler_t(struct ccw_device *, unsigned int, int,
                            int, int, unsigned long);
 
-/* qdio errors reported to the upper-layer program */
+/* qdio errors reported through the queue handlers: */
 #define QDIO_ERROR_ACTIVATE                    0x0001
 #define QDIO_ERROR_GET_BUF_STATE               0x0002
 #define QDIO_ERROR_SET_BUF_STATE               0x0004
+
+/* extra info for completed SBALs: */
 #define QDIO_ERROR_SLSB_STATE                  0x0100
 #define QDIO_ERROR_SLSB_PENDING                        0x0200
 
-#define QDIO_ERROR_FATAL                       0x00ff
-#define QDIO_ERROR_TEMPORARY                   0xff00
-
 /* for qdio_cleanup */
 #define QDIO_FLAG_CLEANUP_USING_CLEAR          0x01
 #define QDIO_FLAG_CLEANUP_USING_HALT           0x02
@@ -312,8 +311,6 @@ typedef void qdio_handler_t(struct ccw_device *, unsigned int, int,
  * @qib_param_field_format: format for qib_parm_field
  * @qib_param_field: pointer to 128 bytes or NULL, if no param field
  * @qib_rflags: rflags to set
- * @input_slib_elements: pointer to no_input_qs * 128 words of data or NULL
- * @output_slib_elements: pointer to no_output_qs * 128 words of data or NULL
  * @no_input_qs: number of input queues
  * @no_output_qs: number of output queues
  * @input_handler: handler to be called for input queues
@@ -330,27 +327,18 @@ struct qdio_initialize {
        unsigned int qib_param_field_format;
        unsigned char *qib_param_field;
        unsigned char qib_rflags;
-       unsigned long *input_slib_elements;
-       unsigned long *output_slib_elements;
        unsigned int no_input_qs;
        unsigned int no_output_qs;
        qdio_handler_t *input_handler;
        qdio_handler_t *output_handler;
        void (*irq_poll)(struct ccw_device *cdev, unsigned long data);
-       unsigned int scan_threshold;
        unsigned long int_parm;
        struct qdio_buffer ***input_sbal_addr_array;
        struct qdio_buffer ***output_sbal_addr_array;
 };
 
-#define QDIO_STATE_INACTIVE            0x00000002 /* after qdio_cleanup */
-#define QDIO_STATE_ESTABLISHED         0x00000004 /* after qdio_establish */
-#define QDIO_STATE_ACTIVE              0x00000008 /* after qdio_activate */
-#define QDIO_STATE_STOPPED             0x00000010 /* after queues went down */
-
 #define QDIO_FLAG_SYNC_INPUT           0x01
 #define QDIO_FLAG_SYNC_OUTPUT          0x02
-#define QDIO_FLAG_PCI_OUT              0x10
 
 int qdio_alloc_buffers(struct qdio_buffer **buf, unsigned int count);
 void qdio_free_buffers(struct qdio_buffer **buf, unsigned int count);
@@ -367,7 +355,6 @@ extern int do_QDIO(struct ccw_device *cdev, unsigned int callflags, int q_nr,
                   unsigned int bufnr, unsigned int count, struct qaob *aob);
 extern int qdio_start_irq(struct ccw_device *cdev);
 extern int qdio_stop_irq(struct ccw_device *cdev);
-extern int qdio_get_next_buffers(struct ccw_device *, int, int *, int *);
 extern int qdio_inspect_queue(struct ccw_device *cdev, unsigned int nr,
                              bool is_input, unsigned int *bufnr,
                              unsigned int *error);
index 5763769a39b65fddbd25af5d8ad5a9af00f2f2f7..e3ae937bef1c6e6bd6fcaeada43e652bdf774a5f 100644 (file)
@@ -8,8 +8,6 @@
 #define _ASM_S390_SCLP_H
 
 #include <linux/types.h>
-#include <asm/chpid.h>
-#include <asm/cpu.h>
 
 #define SCLP_CHP_INFO_MASK_SIZE                32
 #define EARLY_SCCB_SIZE                PAGE_SIZE
 /* 24 + 16 * SCLP_MAX_CORES */
 #define EXT_SCCB_READ_CPU      (3 * PAGE_SIZE)
 
+#ifndef __ASSEMBLY__
+#include <asm/chpid.h>
+#include <asm/cpu.h>
+
 struct sclp_chp_info {
        u8 recognized[SCLP_CHP_INFO_MASK_SIZE];
        u8 standby[SCLP_CHP_INFO_MASK_SIZE];
@@ -113,6 +115,9 @@ struct zpci_report_error_header {
        u8 data[0];     /* Subsequent Data passed verbatim to SCLP ET 24 */
 } __packed;
 
+extern char *sclp_early_sccb;
+
+void sclp_early_set_buffer(void *sccb);
 int sclp_early_read_info(void);
 int sclp_early_read_storage_info(void);
 int sclp_early_get_core_info(struct sclp_core_info *info);
@@ -147,4 +152,5 @@ static inline int sclp_get_core_info(struct sclp_core_info *info, int early)
        return _sclp_get_core_info(info);
 }
 
+#endif /* __ASSEMBLY__ */
 #endif /* _ASM_S390_SCLP_H */
index 0c2151451ba593be330d1252f8609d9c53cb762b..85881dd48022a03df5706a6f47b72a331b7e397d 100644 (file)
@@ -35,7 +35,7 @@ static inline int arch_is_kernel_initmem_freed(unsigned long addr)
  */
 #define __bootdata_preserved(var) __section(".boot.preserved.data." #var) var
 
-extern unsigned long __sdma, __edma;
-extern unsigned long __stext_dma, __etext_dma;
+extern unsigned long __samode31, __eamode31;
+extern unsigned long __stext_amode31, __etext_amode31;
 
 #endif
index a22a5a81811cf289449512f250dc446787831fcf..950d87bd997a7b32a0d055038ad567b5a0418be5 100644 (file)
@@ -10,6 +10,7 @@ extern struct mutex cpa_mutex;
 #define SET_MEMORY_RW  2UL
 #define SET_MEMORY_NX  4UL
 #define SET_MEMORY_X   8UL
+#define SET_MEMORY_4K  16UL
 
 int __set_memory(unsigned long addr, int numpages, unsigned long flags);
 
@@ -33,4 +34,9 @@ static inline int set_memory_x(unsigned long addr, int numpages)
        return __set_memory(addr, numpages, SET_MEMORY_X);
 }
 
+static inline int set_memory_4k(unsigned long addr, int numpages)
+{
+       return __set_memory(addr, numpages, SET_MEMORY_4K);
+}
+
 #endif
index 3a77aa96d09251bc90619d29ba05b759e02baef5..b6606ffd85d898ba7ae9593b27e0d5cc9aac4df0 100644 (file)
 #include <uapi/asm/setup.h>
 #include <linux/build_bug.h>
 
-#define EP_OFFSET              0x10008
-#define EP_STRING              "S390EP"
 #define PARMAREA               0x10400
-#define EARLY_SCCB_OFFSET      0x11000
-#define HEAD_END               0x12000
+#define HEAD_END               0x11000
 
 /*
  * Machine features detected in early.c
@@ -36,6 +33,7 @@
 #define MACHINE_FLAG_NX                BIT(15)
 #define MACHINE_FLAG_GS                BIT(16)
 #define MACHINE_FLAG_SCC       BIT(17)
+#define MACHINE_FLAG_PCI_MIO   BIT(18)
 
 #define LPP_MAGIC              BIT(31)
 #define LPP_PID_MASK           _AC(0xffffffff, UL)
 #define STARTUP_NORMAL_OFFSET  0x10000
 #define STARTUP_KDUMP_OFFSET   0x10010
 
-/* Offsets to parameters in kernel/head.S  */
-
-#define IPL_DEVICE_OFFSET      0x10400
-#define INITRD_START_OFFSET    0x10408
-#define INITRD_SIZE_OFFSET     0x10410
-#define OLDMEM_BASE_OFFSET     0x10418
-#define OLDMEM_SIZE_OFFSET     0x10420
-#define KERNEL_VERSION_OFFSET  0x10428
-#define COMMAND_LINE_OFFSET    0x10480
-
 #ifndef __ASSEMBLY__
 
 #include <asm/lowcore.h>
 #include <asm/types.h>
 
-#define IPL_DEVICE     (*(unsigned long *)  (IPL_DEVICE_OFFSET))
-#define INITRD_START   (*(unsigned long *)  (INITRD_START_OFFSET))
-#define INITRD_SIZE    (*(unsigned long *)  (INITRD_SIZE_OFFSET))
-#define OLDMEM_BASE    (*(unsigned long *)  (OLDMEM_BASE_OFFSET))
-#define OLDMEM_SIZE    (*(unsigned long *)  (OLDMEM_SIZE_OFFSET))
-#define COMMAND_LINE   ((char *)            (COMMAND_LINE_OFFSET))
-
 struct parmarea {
        unsigned long ipl_device;                       /* 0x10400 */
        unsigned long initrd_start;                     /* 0x10408 */
@@ -110,6 +91,7 @@ extern unsigned long mio_wb_bit_mask;
 #define MACHINE_HAS_NX         (S390_lowcore.machine_flags & MACHINE_FLAG_NX)
 #define MACHINE_HAS_GS         (S390_lowcore.machine_flags & MACHINE_FLAG_GS)
 #define MACHINE_HAS_SCC                (S390_lowcore.machine_flags & MACHINE_FLAG_SCC)
+#define MACHINE_HAS_PCI_MIO    (S390_lowcore.machine_flags & MACHINE_FLAG_PCI_MIO)
 
 /*
  * Console mode. Override with conmode=
@@ -161,20 +143,22 @@ static inline unsigned long kaslr_offset(void)
 
 extern int is_full_image;
 
+struct initrd_data {
+       unsigned long start;
+       unsigned long size;
+};
+extern struct initrd_data initrd_data;
+
+struct oldmem_data {
+       unsigned long start;
+       unsigned long size;
+};
+extern struct oldmem_data oldmem_data;
+
 static inline u32 gen_lpswe(unsigned long addr)
 {
        BUILD_BUG_ON(addr > 0xfff);
        return 0xb2b20000 | addr;
 }
-
-#else /* __ASSEMBLY__ */
-
-#define IPL_DEVICE     (IPL_DEVICE_OFFSET)
-#define INITRD_START   (INITRD_START_OFFSET)
-#define INITRD_SIZE    (INITRD_SIZE_OFFSET)
-#define OLDMEM_BASE    (OLDMEM_BASE_OFFSET)
-#define OLDMEM_SIZE    (OLDMEM_SIZE_OFFSET)
-#define COMMAND_LINE   (COMMAND_LINE_OFFSET)
-
 #endif /* __ASSEMBLY__ */
 #endif /* _ASM_S390_SETUP_H */
index 9107e3dab68c43adac90879a2bf9337f63d8a036..b3dd883699e71f1ee221a56852f2dceb02af2560 100644 (file)
@@ -104,4 +104,63 @@ static inline bool arch_syscall_is_vdso_sigreturn(struct pt_regs *regs)
        return false;
 }
 
+#define SYSCALL_FMT_0
+#define SYSCALL_FMT_1 , "0" (r2)
+#define SYSCALL_FMT_2 , "d" (r3) SYSCALL_FMT_1
+#define SYSCALL_FMT_3 , "d" (r4) SYSCALL_FMT_2
+#define SYSCALL_FMT_4 , "d" (r5) SYSCALL_FMT_3
+#define SYSCALL_FMT_5 , "d" (r6) SYSCALL_FMT_4
+#define SYSCALL_FMT_6 , "d" (r7) SYSCALL_FMT_5
+
+#define SYSCALL_PARM_0
+#define SYSCALL_PARM_1 , long arg1
+#define SYSCALL_PARM_2 SYSCALL_PARM_1, long arg2
+#define SYSCALL_PARM_3 SYSCALL_PARM_2, long arg3
+#define SYSCALL_PARM_4 SYSCALL_PARM_3, long arg4
+#define SYSCALL_PARM_5 SYSCALL_PARM_4, long arg5
+#define SYSCALL_PARM_6 SYSCALL_PARM_5, long arg6
+
+#define SYSCALL_REGS_0
+#define SYSCALL_REGS_1                                                 \
+       register long r2 asm("2") = arg1
+#define SYSCALL_REGS_2                                                 \
+       SYSCALL_REGS_1;                                                 \
+       register long r3 asm("3") = arg2
+#define SYSCALL_REGS_3                                                 \
+       SYSCALL_REGS_2;                                                 \
+       register long r4 asm("4") = arg3
+#define SYSCALL_REGS_4                                                 \
+       SYSCALL_REGS_3;                                                 \
+       register long r5 asm("5") = arg4
+#define SYSCALL_REGS_5                                                 \
+       SYSCALL_REGS_4;                                                 \
+       register long r6 asm("6") = arg5
+#define SYSCALL_REGS_6                                                 \
+       SYSCALL_REGS_5;                                                 \
+       register long r7 asm("7") = arg6
+
+#define GENERATE_SYSCALL_FUNC(nr)                                      \
+static __always_inline                                                 \
+long syscall##nr(unsigned long syscall SYSCALL_PARM_##nr)              \
+{                                                                      \
+       register unsigned long r1 asm ("1") = syscall;                  \
+       register long rc asm ("2");                                     \
+       SYSCALL_REGS_##nr;                                              \
+                                                                       \
+       asm volatile (                                                  \
+               "       svc     0\n"                                    \
+               : "=d" (rc)                                             \
+               : "d" (r1) SYSCALL_FMT_##nr                             \
+               : "memory");                                            \
+       return rc;                                                      \
+}
+
+GENERATE_SYSCALL_FUNC(0)
+GENERATE_SYSCALL_FUNC(1)
+GENERATE_SYSCALL_FUNC(2)
+GENERATE_SYSCALL_FUNC(3)
+GENERATE_SYSCALL_FUNC(4)
+GENERATE_SYSCALL_FUNC(5)
+GENERATE_SYSCALL_FUNC(6)
+
 #endif /* _ASM_SYSCALL_H */
index 12c5f006c1364b714c4a7187b659c3c2fb71d0d2..fe92a4caf5ec8b38316282350988f2a89643e382 100644 (file)
@@ -356,11 +356,9 @@ int uv_convert_from_secure(unsigned long paddr);
 int gmap_convert_to_secure(struct gmap *gmap, unsigned long gaddr);
 
 void setup_uv(void);
-void adjust_to_uv_max(unsigned long *vmax);
 #else
 #define is_prot_virt_host() 0
 static inline void setup_uv(void) {}
-static inline void adjust_to_uv_max(unsigned long *vmax) {}
 
 static inline int uv_destroy_page(unsigned long paddr)
 {
@@ -373,10 +371,4 @@ static inline int uv_convert_from_secure(unsigned long paddr)
 }
 #endif
 
-#if defined(CONFIG_PROTECTED_VIRTUALIZATION_GUEST) || IS_ENABLED(CONFIG_KVM)
-void uv_query_info(void);
-#else
-static inline void uv_query_info(void) {}
-#endif
-
 #endif /* _ASM_S390_UV_H */
index d6465b22ffe30deb6ce6a5c1ae78fa7541819709..db84942eb78ffbd9312c60b31991f364b789de8f 100644 (file)
@@ -6,6 +6,7 @@
 
 #define VDSO_HAS_CLOCK_GETRES 1
 
+#include <asm/syscall.h>
 #include <asm/timex.h>
 #include <asm/unistd.h>
 #include <linux/compiler.h>
@@ -35,35 +36,20 @@ static inline u64 __arch_get_hw_counter(s32 clock_mode, const struct vdso_data *
 static __always_inline
 long clock_gettime_fallback(clockid_t clkid, struct __kernel_timespec *ts)
 {
-       register unsigned long r1 __asm__("r1") = __NR_clock_gettime;
-       register unsigned long r2 __asm__("r2") = (unsigned long)clkid;
-       register void *r3 __asm__("r3") = ts;
-
-       asm ("svc 0\n" : "+d" (r2) : "d" (r1), "d" (r3) : "cc", "memory");
-       return r2;
+       return syscall2(__NR_clock_gettime, (long)clkid, (long)ts);
 }
 
 static __always_inline
 long gettimeofday_fallback(register struct __kernel_old_timeval *tv,
                           register struct timezone *tz)
 {
-       register unsigned long r1 __asm__("r1") = __NR_gettimeofday;
-       register unsigned long r2 __asm__("r2") = (unsigned long)tv;
-       register void *r3 __asm__("r3") = tz;
-
-       asm ("svc 0\n" : "+d" (r2) : "d" (r1), "d" (r3) : "cc", "memory");
-       return r2;
+       return syscall2(__NR_gettimeofday, (long)tv, (long)tz);
 }
 
 static __always_inline
 long clock_getres_fallback(clockid_t clkid, struct __kernel_timespec *ts)
 {
-       register unsigned long r1 __asm__("r1") = __NR_clock_getres;
-       register unsigned long r2 __asm__("r2") = (unsigned long)clkid;
-       register void *r3 __asm__("r3") = ts;
-
-       asm ("svc 0\n" : "+d" (r2) : "d" (r1), "d" (r3) : "cc", "memory");
-       return r2;
+       return syscall2(__NR_clock_getres, (long)clkid, (long)ts);
 }
 
 #ifdef CONFIG_TIME_NS
index 4a44ba5a2d73427eeccc388b64377a2dd1f20ffa..80f500ffb55c3b0df903a960d35c20071e9ac34b 100644 (file)
@@ -40,7 +40,7 @@ obj-y += sysinfo.o lgr.o os_info.o machine_kexec.o
 obj-y  += runtime_instr.o cache.o fpu.o dumpstack.o guarded_storage.o sthyi.o
 obj-y  += entry.o reipl.o relocate_kernel.o kdebugfs.o alternative.o
 obj-y  += nospec-branch.o ipl_vmparm.o machine_kexec_reloc.o unwind_bc.o
-obj-y  += smp.o
+obj-y  += smp.o text_amode31.o
 
 extra-y                                += head64.o vmlinux.lds
 
index 77ff2130cb04529f40683e2d7ebe3584574e14fd..b57da933858888cb5f77bb31d68de9ab11d86beb 100644 (file)
@@ -116,6 +116,7 @@ int main(void)
        OFFSET(__LC_RESTART_FN, lowcore, restart_fn);
        OFFSET(__LC_RESTART_DATA, lowcore, restart_data);
        OFFSET(__LC_RESTART_SOURCE, lowcore, restart_source);
+       OFFSET(__LC_RESTART_FLAGS, lowcore, restart_flags);
        OFFSET(__LC_KERNEL_ASCE, lowcore, kernel_asce);
        OFFSET(__LC_USER_ASCE, lowcore, user_asce);
        OFFSET(__LC_LPP, lowcore, lpp);
@@ -152,5 +153,12 @@ int main(void)
        DEFINE(__KEXEC_SHA_REGION_SIZE, sizeof(struct kexec_sha_region));
        /* sizeof kernel parameter area */
        DEFINE(__PARMAREA_SIZE, sizeof(struct parmarea));
+       /* kernel parameter area offsets */
+       DEFINE(IPL_DEVICE, PARMAREA + offsetof(struct parmarea, ipl_device));
+       DEFINE(INITRD_START, PARMAREA + offsetof(struct parmarea, initrd_start));
+       DEFINE(INITRD_SIZE, PARMAREA + offsetof(struct parmarea, initrd_size));
+       DEFINE(OLDMEM_BASE, PARMAREA + offsetof(struct parmarea, oldmem_base));
+       DEFINE(OLDMEM_SIZE, PARMAREA + offsetof(struct parmarea, oldmem_size));
+       DEFINE(COMMAND_LINE, PARMAREA + offsetof(struct parmarea, command_line));
        return 0;
 }
index 0e36dfc9ccd6e3435c68f8e8a5fb2a6f0b9c6689..d72a6df058d79f99f9c1702b28682a1b868f84c2 100644 (file)
@@ -140,7 +140,7 @@ int copy_oldmem_kernel(void *dst, void *src, size_t count)
 
        while (count) {
                from = __pa(src);
-               if (!OLDMEM_BASE && from < sclp.hsa_size) {
+               if (!oldmem_data.start && from < sclp.hsa_size) {
                        /* Copy from zfcp/nvme dump HSA area */
                        len = min(count, sclp.hsa_size - from);
                        rc = memcpy_hsa_kernel(dst, from, len);
@@ -148,12 +148,12 @@ int copy_oldmem_kernel(void *dst, void *src, size_t count)
                                return rc;
                } else {
                        /* Check for swapped kdump oldmem areas */
-                       if (OLDMEM_BASE && from - OLDMEM_BASE < OLDMEM_SIZE) {
-                               from -= OLDMEM_BASE;
-                               len = min(count, OLDMEM_SIZE - from);
-                       } else if (OLDMEM_BASE && from < OLDMEM_SIZE) {
-                               len = min(count, OLDMEM_SIZE - from);
-                               from += OLDMEM_BASE;
+                       if (oldmem_data.start && from - oldmem_data.start < oldmem_data.size) {
+                               from -= oldmem_data.start;
+                               len = min(count, oldmem_data.size - from);
+                       } else if (oldmem_data.start && from < oldmem_data.size) {
+                               len = min(count, oldmem_data.size - from);
+                               from += oldmem_data.start;
                        } else {
                                len = count;
                        }
@@ -183,7 +183,7 @@ static int copy_oldmem_user(void __user *dst, void *src, size_t count)
 
        while (count) {
                from = __pa(src);
-               if (!OLDMEM_BASE && from < sclp.hsa_size) {
+               if (!oldmem_data.start && from < sclp.hsa_size) {
                        /* Copy from zfcp/nvme dump HSA area */
                        len = min(count, sclp.hsa_size - from);
                        rc = memcpy_hsa_user(dst, from, len);
@@ -191,12 +191,12 @@ static int copy_oldmem_user(void __user *dst, void *src, size_t count)
                                return rc;
                } else {
                        /* Check for swapped kdump oldmem areas */
-                       if (OLDMEM_BASE && from - OLDMEM_BASE < OLDMEM_SIZE) {
-                               from -= OLDMEM_BASE;
-                               len = min(count, OLDMEM_SIZE - from);
-                       } else if (OLDMEM_BASE && from < OLDMEM_SIZE) {
-                               len = min(count, OLDMEM_SIZE - from);
-                               from += OLDMEM_BASE;
+                       if (oldmem_data.start && from - oldmem_data.size < oldmem_data.size) {
+                               from -= oldmem_data.size;
+                               len = min(count, oldmem_data.size - from);
+                       } else if (oldmem_data.start && from < oldmem_data.size) {
+                               len = min(count, oldmem_data.size - from);
+                               from += oldmem_data.start;
                        } else {
                                len = count;
                        }
@@ -243,10 +243,10 @@ static int remap_oldmem_pfn_range_kdump(struct vm_area_struct *vma,
        unsigned long size_old;
        int rc;
 
-       if (pfn < OLDMEM_SIZE >> PAGE_SHIFT) {
-               size_old = min(size, OLDMEM_SIZE - (pfn << PAGE_SHIFT));
+       if (pfn < oldmem_data.size >> PAGE_SHIFT) {
+               size_old = min(size, oldmem_data.size - (pfn << PAGE_SHIFT));
                rc = remap_pfn_range(vma, from,
-                                    pfn + (OLDMEM_BASE >> PAGE_SHIFT),
+                                    pfn + (oldmem_data.start >> PAGE_SHIFT),
                                     size_old, prot);
                if (rc || size == size_old)
                        return rc;
@@ -288,7 +288,7 @@ static int remap_oldmem_pfn_range_zfcpdump(struct vm_area_struct *vma,
 int remap_oldmem_pfn_range(struct vm_area_struct *vma, unsigned long from,
                           unsigned long pfn, unsigned long size, pgprot_t prot)
 {
-       if (OLDMEM_BASE)
+       if (oldmem_data.start)
                return remap_oldmem_pfn_range_kdump(vma, from, pfn, size, prot);
        else
                return remap_oldmem_pfn_range_zfcpdump(vma, from, pfn, size,
@@ -633,17 +633,17 @@ int elfcorehdr_alloc(unsigned long long *addr, unsigned long long *size)
        u64 hdr_off;
 
        /* If we are not in kdump or zfcp/nvme dump mode return */
-       if (!OLDMEM_BASE && !is_ipl_type_dump())
+       if (!oldmem_data.start && !is_ipl_type_dump())
                return 0;
        /* If we cannot get HSA size for zfcp/nvme dump return error */
        if (is_ipl_type_dump() && !sclp.hsa_size)
                return -ENODEV;
 
        /* For kdump, exclude previous crashkernel memory */
-       if (OLDMEM_BASE) {
-               oldmem_region.base = OLDMEM_BASE;
-               oldmem_region.size = OLDMEM_SIZE;
-               oldmem_type.total_size = OLDMEM_SIZE;
+       if (oldmem_data.start) {
+               oldmem_region.base = oldmem_data.start;
+               oldmem_region.size = oldmem_data.size;
+               oldmem_type.total_size = oldmem_data.size;
        }
 
        mem_chunk_cnt = get_mem_chunk_cnt();
index 09b6c6402f9b7918192dd99dc81d4de68b174c12..4331c7e6e1c0320083735780b2b2b1b5a2075a37 100644 (file)
@@ -24,6 +24,7 @@
 #include <linux/export.h>
 #include <linux/init.h>
 #include <linux/fs.h>
+#include <linux/minmax.h>
 #include <linux/debugfs.h>
 
 #include <asm/debug.h>
@@ -92,6 +93,8 @@ static int debug_hex_ascii_format_fn(debug_info_t *id, struct debug_view *view,
                                     char *out_buf, const char *in_buf);
 static int debug_sprintf_format_fn(debug_info_t *id, struct debug_view *view,
                                   char *out_buf, debug_sprintf_entry_t *curr_event);
+static void debug_areas_swap(debug_info_t *a, debug_info_t *b);
+static void debug_events_append(debug_info_t *dest, debug_info_t *src);
 
 /* globals */
 
@@ -311,24 +314,6 @@ static debug_info_t *debug_info_create(const char *name, int pages_per_area,
                goto out;
 
        rc->mode = mode & ~S_IFMT;
-
-       /* create root directory */
-       rc->debugfs_root_entry = debugfs_create_dir(rc->name,
-                                                   debug_debugfs_root_entry);
-
-       /* append new element to linked list */
-       if (!debug_area_first) {
-               /* first element in list */
-               debug_area_first = rc;
-               rc->prev = NULL;
-       } else {
-               /* append element to end of list */
-               debug_area_last->next = rc;
-               rc->prev = debug_area_last;
-       }
-       debug_area_last = rc;
-       rc->next = NULL;
-
        refcount_set(&rc->ref_count, 1);
 out:
        return rc;
@@ -388,27 +373,10 @@ static void debug_info_get(debug_info_t *db_info)
  */
 static void debug_info_put(debug_info_t *db_info)
 {
-       int i;
-
        if (!db_info)
                return;
-       if (refcount_dec_and_test(&db_info->ref_count)) {
-               for (i = 0; i < DEBUG_MAX_VIEWS; i++) {
-                       if (!db_info->views[i])
-                               continue;
-                       debugfs_remove(db_info->debugfs_entries[i]);
-               }
-               debugfs_remove(db_info->debugfs_root_entry);
-               if (db_info == debug_area_first)
-                       debug_area_first = db_info->next;
-               if (db_info == debug_area_last)
-                       debug_area_last = db_info->prev;
-               if (db_info->prev)
-                       db_info->prev->next = db_info->next;
-               if (db_info->next)
-                       db_info->next->prev = db_info->prev;
+       if (refcount_dec_and_test(&db_info->ref_count))
                debug_info_free(db_info);
-       }
 }
 
 /*
@@ -632,6 +600,31 @@ static int debug_close(struct inode *inode, struct file *file)
        return 0; /* success */
 }
 
+/* Create debugfs entries and add to internal list. */
+static void _debug_register(debug_info_t *id)
+{
+       /* create root directory */
+       id->debugfs_root_entry = debugfs_create_dir(id->name,
+                                                   debug_debugfs_root_entry);
+
+       /* append new element to linked list */
+       if (!debug_area_first) {
+               /* first element in list */
+               debug_area_first = id;
+               id->prev = NULL;
+       } else {
+               /* append element to end of list */
+               debug_area_last->next = id;
+               id->prev = debug_area_last;
+       }
+       debug_area_last = id;
+       id->next = NULL;
+
+       debug_register_view(id, &debug_level_view);
+       debug_register_view(id, &debug_flush_view);
+       debug_register_view(id, &debug_pages_view);
+}
+
 /**
  * debug_register_mode() - creates and initializes debug area.
  *
@@ -661,19 +654,16 @@ debug_info_t *debug_register_mode(const char *name, int pages_per_area,
        if ((uid != 0) || (gid != 0))
                pr_warn("Root becomes the owner of all s390dbf files in sysfs\n");
        BUG_ON(!initialized);
-       mutex_lock(&debug_mutex);
 
        /* create new debug_info */
        rc = debug_info_create(name, pages_per_area, nr_areas, buf_size, mode);
-       if (!rc)
-               goto out;
-       debug_register_view(rc, &debug_level_view);
-       debug_register_view(rc, &debug_flush_view);
-       debug_register_view(rc, &debug_pages_view);
-out:
-       if (!rc)
+       if (rc) {
+               mutex_lock(&debug_mutex);
+               _debug_register(rc);
+               mutex_unlock(&debug_mutex);
+       } else {
                pr_err("Registering debug feature %s failed\n", name);
-       mutex_unlock(&debug_mutex);
+       }
        return rc;
 }
 EXPORT_SYMBOL(debug_register_mode);
@@ -702,6 +692,82 @@ debug_info_t *debug_register(const char *name, int pages_per_area,
 }
 EXPORT_SYMBOL(debug_register);
 
+/**
+ * debug_register_static() - registers a static debug area
+ *
+ * @id: Handle for static debug area
+ * @pages_per_area: Number of pages per area
+ * @nr_areas: Number of debug areas
+ *
+ * Register debug_info_t defined using DEFINE_STATIC_DEBUG_INFO.
+ *
+ * Note: This function is called automatically via an initcall generated by
+ *      DEFINE_STATIC_DEBUG_INFO.
+ */
+void debug_register_static(debug_info_t *id, int pages_per_area, int nr_areas)
+{
+       unsigned long flags;
+       debug_info_t *copy;
+
+       if (!initialized) {
+               pr_err("Tried to register debug feature %s too early\n",
+                      id->name);
+               return;
+       }
+
+       copy = debug_info_alloc("", pages_per_area, nr_areas, id->buf_size,
+                               id->level, ALL_AREAS);
+       if (!copy) {
+               pr_err("Registering debug feature %s failed\n", id->name);
+
+               /* Clear pointers to prevent tracing into released initdata. */
+               spin_lock_irqsave(&id->lock, flags);
+               id->areas = NULL;
+               id->active_pages = NULL;
+               id->active_entries = NULL;
+               spin_unlock_irqrestore(&id->lock, flags);
+
+               return;
+       }
+
+       /* Replace static trace area with dynamic copy. */
+       spin_lock_irqsave(&id->lock, flags);
+       debug_events_append(copy, id);
+       debug_areas_swap(id, copy);
+       spin_unlock_irqrestore(&id->lock, flags);
+
+       /* Clear pointers to initdata and discard copy. */
+       copy->areas = NULL;
+       copy->active_pages = NULL;
+       copy->active_entries = NULL;
+       debug_info_free(copy);
+
+       mutex_lock(&debug_mutex);
+       _debug_register(id);
+       mutex_unlock(&debug_mutex);
+}
+
+/* Remove debugfs entries and remove from internal list. */
+static void _debug_unregister(debug_info_t *id)
+{
+       int i;
+
+       for (i = 0; i < DEBUG_MAX_VIEWS; i++) {
+               if (!id->views[i])
+                       continue;
+               debugfs_remove(id->debugfs_entries[i]);
+       }
+       debugfs_remove(id->debugfs_root_entry);
+       if (id == debug_area_first)
+               debug_area_first = id->next;
+       if (id == debug_area_last)
+               debug_area_last = id->prev;
+       if (id->prev)
+               id->prev->next = id->next;
+       if (id->next)
+               id->next->prev = id->prev;
+}
+
 /**
  * debug_unregister() - give back debug area.
  *
@@ -715,8 +781,10 @@ void debug_unregister(debug_info_t *id)
        if (!id)
                return;
        mutex_lock(&debug_mutex);
-       debug_info_put(id);
+       _debug_unregister(id);
        mutex_unlock(&debug_mutex);
+
+       debug_info_put(id);
 }
 EXPORT_SYMBOL(debug_unregister);
 
@@ -726,35 +794,28 @@ EXPORT_SYMBOL(debug_unregister);
  */
 static int debug_set_size(debug_info_t *id, int nr_areas, int pages_per_area)
 {
-       debug_entry_t ***new_areas;
+       debug_info_t *new_id;
        unsigned long flags;
-       int rc = 0;
 
        if (!id || (nr_areas <= 0) || (pages_per_area < 0))
                return -EINVAL;
-       if (pages_per_area > 0) {
-               new_areas = debug_areas_alloc(pages_per_area, nr_areas);
-               if (!new_areas) {
-                       pr_info("Allocating memory for %i pages failed\n",
-                               pages_per_area);
-                       rc = -ENOMEM;
-                       goto out;
-               }
-       } else {
-               new_areas = NULL;
+
+       new_id = debug_info_alloc("", pages_per_area, nr_areas, id->buf_size,
+                                 id->level, ALL_AREAS);
+       if (!new_id) {
+               pr_info("Allocating memory for %i pages failed\n",
+                       pages_per_area);
+               return -ENOMEM;
        }
+
        spin_lock_irqsave(&id->lock, flags);
-       debug_areas_free(id);
-       id->areas = new_areas;
-       id->nr_areas = nr_areas;
-       id->pages_per_area = pages_per_area;
-       id->active_area = 0;
-       memset(id->active_entries, 0, sizeof(int)*id->nr_areas);
-       memset(id->active_pages, 0, sizeof(int)*id->nr_areas);
+       debug_events_append(new_id, id);
+       debug_areas_swap(new_id, id);
+       debug_info_free(new_id);
        spin_unlock_irqrestore(&id->lock, flags);
        pr_info("%s: set new size (%i pages)\n", id->name, pages_per_area);
-out:
-       return rc;
+
+       return 0;
 }
 
 /**
@@ -772,16 +833,17 @@ void debug_set_level(debug_info_t *id, int new_level)
 
        if (!id)
                return;
-       spin_lock_irqsave(&id->lock, flags);
+
        if (new_level == DEBUG_OFF_LEVEL) {
-               id->level = DEBUG_OFF_LEVEL;
                pr_info("%s: switched off\n", id->name);
        } else if ((new_level > DEBUG_MAX_LEVEL) || (new_level < 0)) {
                pr_info("%s: level %i is out of range (%i - %i)\n",
                        id->name, new_level, 0, DEBUG_MAX_LEVEL);
-       } else {
-               id->level = new_level;
+               return;
        }
+
+       spin_lock_irqsave(&id->lock, flags);
+       id->level = new_level;
        spin_unlock_irqrestore(&id->lock, flags);
 }
 EXPORT_SYMBOL(debug_set_level);
@@ -821,6 +883,42 @@ static inline debug_entry_t *get_active_entry(debug_info_t *id)
                                  id->active_entries[id->active_area]);
 }
 
+/* Swap debug areas of a and b. */
+static void debug_areas_swap(debug_info_t *a, debug_info_t *b)
+{
+       swap(a->nr_areas, b->nr_areas);
+       swap(a->pages_per_area, b->pages_per_area);
+       swap(a->areas, b->areas);
+       swap(a->active_area, b->active_area);
+       swap(a->active_pages, b->active_pages);
+       swap(a->active_entries, b->active_entries);
+}
+
+/* Append all debug events in active area from source to destination log. */
+static void debug_events_append(debug_info_t *dest, debug_info_t *src)
+{
+       debug_entry_t *from, *to, *last;
+
+       if (!src->areas || !dest->areas)
+               return;
+
+       /* Loop over all entries in src, starting with oldest. */
+       from = get_active_entry(src);
+       last = from;
+       do {
+               if (from->clock != 0LL) {
+                       to = get_active_entry(dest);
+                       memset(to, 0, dest->entry_size);
+                       memcpy(to, from, min(src->entry_size,
+                                            dest->entry_size));
+                       proceed_active_entry(dest);
+               }
+
+               proceed_active_entry(src);
+               from = get_active_entry(src);
+       } while (from != last);
+}
+
 /*
  * debug_finish_entry:
  * - set timestamp, caller address, cpu number etc.
@@ -1111,16 +1209,17 @@ int debug_register_view(debug_info_t *id, struct debug_view *view)
                        break;
        }
        if (i == DEBUG_MAX_VIEWS) {
-               pr_err("Registering view %s/%s would exceed the maximum "
-                      "number of views %i\n", id->name, view->name, i);
                rc = -1;
        } else {
                id->views[i] = view;
                id->debugfs_entries[i] = pde;
        }
        spin_unlock_irqrestore(&id->lock, flags);
-       if (rc)
+       if (rc) {
+               pr_err("Registering view %s/%s would exceed the maximum "
+                      "number of views %i\n", id->name, view->name, i);
                debugfs_remove(pde);
+       }
 out:
        return rc;
 }
index a3f47464c3f1748d6b08b3d634e4062174445180..76a656b2146fc47c7c5e7101ff5c109e96d236f0 100644 (file)
@@ -14,6 +14,7 @@
 #include <asm/diag.h>
 #include <asm/trace/diag.h>
 #include <asm/sections.h>
+#include "entry.h"
 
 struct diag_stat {
        unsigned int counter[NR_DIAG_STAT];
@@ -50,8 +51,16 @@ static const struct diag_desc diag_map[NR_DIAG_STAT] = {
        [DIAG_STAT_X500] = { .code = 0x500, .name = "Virtio Service" },
 };
 
-struct diag_ops __bootdata_preserved(diag_dma_ops);
-struct diag210 *__bootdata_preserved(__diag210_tmp_dma);
+struct diag_ops __amode31_ref diag_amode31_ops = {
+       .diag210 = _diag210_amode31,
+       .diag26c = _diag26c_amode31,
+       .diag14 = _diag14_amode31,
+       .diag0c = _diag0c_amode31,
+       .diag308_reset = _diag308_reset_amode31
+};
+
+static struct diag210 _diag210_tmp_amode31 __section(".amode31.data");
+struct diag210 __amode31_ref *__diag210_tmp_amode31 = &_diag210_tmp_amode31;
 
 static int show_diag_stat(struct seq_file *m, void *v)
 {
@@ -59,7 +68,7 @@ static int show_diag_stat(struct seq_file *m, void *v)
        unsigned long n = (unsigned long) v - 1;
        int cpu, prec, tmp;
 
-       get_online_cpus();
+       cpus_read_lock();
        if (n == 0) {
                seq_puts(m, "         ");
 
@@ -78,7 +87,7 @@ static int show_diag_stat(struct seq_file *m, void *v)
                }
                seq_printf(m, "    %s\n", diag_map[n-1].name);
        }
-       put_online_cpus();
+       cpus_read_unlock();
        return 0;
 }
 
@@ -135,7 +144,7 @@ EXPORT_SYMBOL(diag_stat_inc_norecursion);
 int diag14(unsigned long rx, unsigned long ry1, unsigned long subcode)
 {
        diag_stat_inc(DIAG_STAT_X014);
-       return diag_dma_ops.diag14(rx, ry1, subcode);
+       return diag_amode31_ops.diag14(rx, ry1, subcode);
 }
 EXPORT_SYMBOL(diag14);
 
@@ -172,12 +181,12 @@ int diag210(struct diag210 *addr)
        int ccode;
 
        spin_lock_irqsave(&diag210_lock, flags);
-       *__diag210_tmp_dma = *addr;
+       *__diag210_tmp_amode31 = *addr;
 
        diag_stat_inc(DIAG_STAT_X210);
-       ccode = diag_dma_ops.diag210(__diag210_tmp_dma);
+       ccode = diag_amode31_ops.diag210(__diag210_tmp_amode31);
 
-       *addr = *__diag210_tmp_dma;
+       *addr = *__diag210_tmp_amode31;
        spin_unlock_irqrestore(&diag210_lock, flags);
 
        return ccode;
@@ -205,6 +214,6 @@ EXPORT_SYMBOL(diag224);
 int diag26c(void *req, void *resp, enum diag26c_sc subcode)
 {
        diag_stat_inc(DIAG_STAT_X26C);
-       return diag_dma_ops.diag26c(req, resp, subcode);
+       return diag_amode31_ops.diag26c(req, resp, subcode);
 }
 EXPORT_SYMBOL(diag26c);
index 5412efe328f80d2db7e4751a1409e5e17458d1d7..ec5515423f17c495804b4b9af219eea1ed4f27a4 100644 (file)
@@ -312,10 +312,12 @@ static const unsigned char formats[][6] = {
        [INSTR_VRR_VV]       = { V_8, V_12, 0, 0, 0, 0 },
        [INSTR_VRR_VV0U]     = { V_8, V_12, U4_32, 0, 0, 0 },
        [INSTR_VRR_VV0U0U]   = { V_8, V_12, U4_32, U4_24, 0, 0 },
+       [INSTR_VRR_VV0U2]    = { V_8, V_12, U4_24, 0, 0, 0 },
        [INSTR_VRR_VV0UU2]   = { V_8, V_12, U4_32, U4_28, 0, 0 },
        [INSTR_VRR_VV0UUU]   = { V_8, V_12, U4_32, U4_28, U4_24, 0 },
        [INSTR_VRR_VVV]      = { V_8, V_12, V_16, 0, 0, 0 },
        [INSTR_VRR_VVV0U]    = { V_8, V_12, V_16, U4_32, 0, 0 },
+       [INSTR_VRR_VVV0U0]   = { V_8, V_12, V_16, U4_24, 0, 0 },
        [INSTR_VRR_VVV0U0U]  = { V_8, V_12, V_16, U4_32, U4_24, 0 },
        [INSTR_VRR_VVV0UU]   = { V_8, V_12, V_16, U4_32, U4_28, 0 },
        [INSTR_VRR_VVV0UUU]  = { V_8, V_12, V_16, U4_32, U4_28, U4_24 },
index fb84e3fc1686d49618706373f4490234220a544e..9857cb04672680c81c4ab81b82a380b8f1e2ba58 100644 (file)
@@ -236,6 +236,10 @@ static __init void detect_machine_facilities(void)
                clock_comparator_max = -1ULL >> 1;
                __ctl_set_bit(0, 53);
        }
+       if (IS_ENABLED(CONFIG_PCI) && test_facility(153)) {
+               S390_lowcore.machine_flags |= MACHINE_FLAG_PCI_MIO;
+               /* the control bit is set during PCI initialization */
+       }
 }
 
 static inline void save_vector_registers(void)
index 5a2f70cbd3a9da8e34f458212e1198ba62d3cb50..b9716a7e326d02d7d50290f2666c9d2938562996 100644 (file)
@@ -624,12 +624,15 @@ ENTRY(mcck_int_handler)
 4:     j       4b
 ENDPROC(mcck_int_handler)
 
-#
-# PSW restart interrupt handler
-#
 ENTRY(restart_int_handler)
        ALTERNATIVE "", ".insn s,0xb2800000,_LPP_OFFSET", 40
        stg     %r15,__LC_SAVE_AREA_RESTART
+       TSTMSK  __LC_RESTART_FLAGS,RESTART_FLAG_CTLREGS,4
+       jz      0f
+       la      %r15,4095
+       lctlg   %c0,%c15,__LC_CREGS_SAVE_AREA-4095(%r15)
+0:     larl    %r15,.Lstosm_tmp
+       stosm   0(%r15),0x04                    # turn dat on, keep irqs off
        lg      %r15,__LC_RESTART_STACK
        xc      STACK_FRAME_OVERHEAD(__PT_SIZE,%r15),STACK_FRAME_OVERHEAD(%r15)
        stmg    %r0,%r14,STACK_FRAME_OVERHEAD+__PT_R0(%r15)
@@ -638,7 +641,7 @@ ENTRY(restart_int_handler)
        xc      0(STACK_FRAME_OVERHEAD,%r15),0(%r15)
        lg      %r1,__LC_RESTART_FN             # load fn, parm & source cpu
        lg      %r2,__LC_RESTART_DATA
-       lg      %r3,__LC_RESTART_SOURCE
+       lgf     %r3,__LC_RESTART_SOURCE
        ltgr    %r3,%r3                         # test source cpu address
        jm      1f                              # negative -> skip source stop
 0:     sigp    %r4,%r3,SIGP_SENSE              # sigp sense to source cpu
index 1ab33465382ff7ecae40b42adeb7a17d4a57a402..7f2696e8d511ed631b1b6dd0efbbfc0176dbf5fd 100644 (file)
@@ -28,10 +28,8 @@ void do_non_secure_storage_access(struct pt_regs *regs);
 void do_secure_storage_violation(struct pt_regs *regs);
 void do_report_trap(struct pt_regs *regs, int si_signo, int si_code, char *str);
 void kernel_stack_overflow(struct pt_regs * regs);
-void do_signal(struct pt_regs *regs);
 void handle_signal32(struct ksignal *ksig, sigset_t *oldset,
                     struct pt_regs *regs);
-void do_notify_resume(struct pt_regs *regs);
 
 void __init init_IRQ(void);
 void do_io_irq(struct pt_regs *regs);
@@ -64,4 +62,13 @@ void stack_free(unsigned long stack);
 
 extern char kprobes_insn_page[];
 
+extern char _samode31[], _eamode31[];
+extern char _stext_amode31[], _etext_amode31[];
+extern struct exception_table_entry _start_amode31_ex_table[];
+extern struct exception_table_entry _stop_amode31_ex_table[];
+
+#define __amode31_data __section(".amode31.data")
+#define __amode31_ref __section(".amode31.refs")
+extern long _start_amode31_refs[], _end_amode31_refs[];
+
 #endif /* _ENTRY_H */
index 2d8f595d9196163abc0276e14adadd3d741c19c3..0a464d32846792aac4e13a32aa9539c75a7e58a3 100644 (file)
 #include <trace/syscall.h>
 #include <asm/asm-offsets.h>
 #include <asm/cacheflush.h>
+#include <asm/ftrace.lds.h>
+#include <asm/nospec-branch.h>
 #include <asm/set_memory.h>
 #include "entry.h"
+#include "ftrace.h"
 
 /*
  * To generate function prologue either gcc's hotpatch feature (since gcc 4.8)
  */
 
 void *ftrace_func __read_mostly = ftrace_stub;
-unsigned long ftrace_plt;
+struct ftrace_insn {
+       u16 opc;
+       s32 disp;
+} __packed;
+
+asm(
+       "       .align 16\n"
+       "ftrace_shared_hotpatch_trampoline_br:\n"
+       "       lmg     %r0,%r1,2(%r1)\n"
+       "       br      %r1\n"
+       "ftrace_shared_hotpatch_trampoline_br_end:\n"
+);
+
+#ifdef CONFIG_EXPOLINE
+asm(
+       "       .align 16\n"
+       "ftrace_shared_hotpatch_trampoline_ex:\n"
+       "       lmg     %r0,%r1,2(%r1)\n"
+       "       ex      %r0," __stringify(__LC_BR_R1) "(%r0)\n"
+       "       j       .\n"
+       "ftrace_shared_hotpatch_trampoline_ex_end:\n"
+);
+
+asm(
+       "       .align 16\n"
+       "ftrace_shared_hotpatch_trampoline_exrl:\n"
+       "       lmg     %r0,%r1,2(%r1)\n"
+       "       .insn   ril,0xc60000000000,%r0,0f\n" /* exrl */
+       "       j       .\n"
+       "0:     br      %r1\n"
+       "ftrace_shared_hotpatch_trampoline_exrl_end:\n"
+);
+#endif /* CONFIG_EXPOLINE */
+
+#ifdef CONFIG_MODULES
+static char *ftrace_plt;
+
+asm(
+       "       .data\n"
+       "ftrace_plt_template:\n"
+       "       basr    %r1,%r0\n"
+       "       lg      %r1,0f-.(%r1)\n"
+       "       br      %r1\n"
+       "0:     .quad   ftrace_caller\n"
+       "ftrace_plt_template_end:\n"
+       "       .previous\n"
+);
+#endif /* CONFIG_MODULES */
+
+static const char *ftrace_shared_hotpatch_trampoline(const char **end)
+{
+       const char *tstart, *tend;
+
+       tstart = ftrace_shared_hotpatch_trampoline_br;
+       tend = ftrace_shared_hotpatch_trampoline_br_end;
+#ifdef CONFIG_EXPOLINE
+       if (!nospec_disable) {
+               tstart = ftrace_shared_hotpatch_trampoline_ex;
+               tend = ftrace_shared_hotpatch_trampoline_ex_end;
+               if (test_facility(35)) { /* exrl */
+                       tstart = ftrace_shared_hotpatch_trampoline_exrl;
+                       tend = ftrace_shared_hotpatch_trampoline_exrl_end;
+               }
+       }
+#endif /* CONFIG_EXPOLINE */
+       if (end)
+               *end = tend;
+       return tstart;
+}
+
+bool ftrace_need_init_nop(void)
+{
+       return ftrace_shared_hotpatch_trampoline(NULL);
+}
+
+int ftrace_init_nop(struct module *mod, struct dyn_ftrace *rec)
+{
+       static struct ftrace_hotpatch_trampoline *next_vmlinux_trampoline =
+               __ftrace_hotpatch_trampolines_start;
+       static const char orig[6] = { 0xc0, 0x04, 0x00, 0x00, 0x00, 0x00 };
+       static struct ftrace_hotpatch_trampoline *trampoline;
+       struct ftrace_hotpatch_trampoline **next_trampoline;
+       struct ftrace_hotpatch_trampoline *trampolines_end;
+       struct ftrace_hotpatch_trampoline tmp;
+       struct ftrace_insn *insn;
+       const char *shared;
+       s32 disp;
+
+       BUILD_BUG_ON(sizeof(struct ftrace_hotpatch_trampoline) !=
+                    SIZEOF_FTRACE_HOTPATCH_TRAMPOLINE);
+
+       next_trampoline = &next_vmlinux_trampoline;
+       trampolines_end = __ftrace_hotpatch_trampolines_end;
+       shared = ftrace_shared_hotpatch_trampoline(NULL);
+#ifdef CONFIG_MODULES
+       if (mod) {
+               next_trampoline = &mod->arch.next_trampoline;
+               trampolines_end = mod->arch.trampolines_end;
+               shared = ftrace_plt;
+       }
+#endif
+
+       if (WARN_ON_ONCE(*next_trampoline >= trampolines_end))
+               return -ENOMEM;
+       trampoline = (*next_trampoline)++;
+
+       /* Check for the compiler-generated fentry nop (brcl 0, .). */
+       if (WARN_ON_ONCE(memcmp((const void *)rec->ip, &orig, sizeof(orig))))
+               return -EINVAL;
+
+       /* Generate the trampoline. */
+       tmp.brasl_opc = 0xc015; /* brasl %r1, shared */
+       tmp.brasl_disp = (shared - (const char *)&trampoline->brasl_opc) / 2;
+       tmp.interceptor = FTRACE_ADDR;
+       tmp.rest_of_intercepted_function = rec->ip + sizeof(struct ftrace_insn);
+       s390_kernel_write(trampoline, &tmp, sizeof(tmp));
+
+       /* Generate a jump to the trampoline. */
+       disp = ((char *)trampoline - (char *)rec->ip) / 2;
+       insn = (struct ftrace_insn *)rec->ip;
+       s390_kernel_write(&insn->disp, &disp, sizeof(disp));
+
+       return 0;
+}
 
 int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
                       unsigned long addr)
@@ -49,11 +175,45 @@ int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
        return 0;
 }
 
+static void ftrace_generate_nop_insn(struct ftrace_insn *insn)
+{
+       /* brcl 0,0 */
+       insn->opc = 0xc004;
+       insn->disp = 0;
+}
+
+static void ftrace_generate_call_insn(struct ftrace_insn *insn,
+                                     unsigned long ip)
+{
+       unsigned long target;
+
+       /* brasl r0,ftrace_caller */
+       target = FTRACE_ADDR;
+#ifdef CONFIG_MODULES
+       if (is_module_addr((void *)ip))
+               target = (unsigned long)ftrace_plt;
+#endif /* CONFIG_MODULES */
+       insn->opc = 0xc005;
+       insn->disp = (target - ip) / 2;
+}
+
+static void brcl_disable(void *brcl)
+{
+       u8 op = 0x04; /* set mask field to zero */
+
+       s390_kernel_write((char *)brcl + 1, &op, sizeof(op));
+}
+
 int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec,
                    unsigned long addr)
 {
        struct ftrace_insn orig, new, old;
 
+       if (ftrace_shared_hotpatch_trampoline(NULL)) {
+               brcl_disable((void *)rec->ip);
+               return 0;
+       }
+
        if (copy_from_kernel_nofault(&old, (void *) rec->ip, sizeof(old)))
                return -EFAULT;
        /* Replace ftrace call with a nop. */
@@ -67,10 +227,22 @@ int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec,
        return 0;
 }
 
+static void brcl_enable(void *brcl)
+{
+       u8 op = 0xf4; /* set mask field to all ones */
+
+       s390_kernel_write((char *)brcl + 1, &op, sizeof(op));
+}
+
 int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
 {
        struct ftrace_insn orig, new, old;
 
+       if (ftrace_shared_hotpatch_trampoline(NULL)) {
+               brcl_enable((void *)rec->ip);
+               return 0;
+       }
+
        if (copy_from_kernel_nofault(&old, (void *) rec->ip, sizeof(old)))
                return -EFAULT;
        /* Replace nop with an ftrace call. */
@@ -95,22 +267,44 @@ int __init ftrace_dyn_arch_init(void)
        return 0;
 }
 
+void arch_ftrace_update_code(int command)
+{
+       if (ftrace_shared_hotpatch_trampoline(NULL))
+               ftrace_modify_all_code(command);
+       else
+               ftrace_run_stop_machine(command);
+}
+
+static void __ftrace_sync(void *dummy)
+{
+}
+
+int ftrace_arch_code_modify_post_process(void)
+{
+       if (ftrace_shared_hotpatch_trampoline(NULL)) {
+               /* Send SIGP to the other CPUs, so they see the new code. */
+               smp_call_function(__ftrace_sync, NULL, 1);
+       }
+       return 0;
+}
+
 #ifdef CONFIG_MODULES
 
 static int __init ftrace_plt_init(void)
 {
-       unsigned int *ip;
+       const char *start, *end;
 
-       ftrace_plt = (unsigned long) module_alloc(PAGE_SIZE);
+       ftrace_plt = module_alloc(PAGE_SIZE);
        if (!ftrace_plt)
                panic("cannot allocate ftrace plt\n");
-       ip = (unsigned int *) ftrace_plt;
-       ip[0] = 0x0d10e310; /* basr 1,0; lg 1,10(1); br 1 */
-       ip[1] = 0x100a0004;
-       ip[2] = 0x07f10000;
-       ip[3] = FTRACE_ADDR >> 32;
-       ip[4] = FTRACE_ADDR & 0xffffffff;
-       set_memory_ro(ftrace_plt, 1);
+
+       start = ftrace_shared_hotpatch_trampoline(&end);
+       if (!start) {
+               start = ftrace_plt_template;
+               end = ftrace_plt_template_end;
+       }
+       memcpy(ftrace_plt, start, end - start);
+       set_memory_ro((unsigned long)ftrace_plt, 1);
        return 0;
 }
 device_initcall(ftrace_plt_init);
@@ -147,17 +341,13 @@ NOKPROBE_SYMBOL(prepare_ftrace_return);
  */
 int ftrace_enable_ftrace_graph_caller(void)
 {
-       u8 op = 0x04; /* set mask field to zero */
-
-       s390_kernel_write(__va(ftrace_graph_caller)+1, &op, sizeof(op));
+       brcl_disable(__va(ftrace_graph_caller));
        return 0;
 }
 
 int ftrace_disable_ftrace_graph_caller(void)
 {
-       u8 op = 0xf4; /* set mask field to all ones */
-
-       s390_kernel_write(__va(ftrace_graph_caller)+1, &op, sizeof(op));
+       brcl_enable(__va(ftrace_graph_caller));
        return 0;
 }
 
diff --git a/arch/s390/kernel/ftrace.h b/arch/s390/kernel/ftrace.h
new file mode 100644 (file)
index 0000000..69e416f
--- /dev/null
@@ -0,0 +1,26 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _FTRACE_H
+#define _FTRACE_H
+
+#include <asm/types.h>
+
+struct ftrace_hotpatch_trampoline {
+       u16 brasl_opc;
+       s32 brasl_disp;
+       s16: 16;
+       u64 rest_of_intercepted_function;
+       u64 interceptor;
+} __packed;
+
+extern struct ftrace_hotpatch_trampoline __ftrace_hotpatch_trampolines_start[];
+extern struct ftrace_hotpatch_trampoline __ftrace_hotpatch_trampolines_end[];
+extern const char ftrace_shared_hotpatch_trampoline_br[];
+extern const char ftrace_shared_hotpatch_trampoline_br_end[];
+extern const char ftrace_shared_hotpatch_trampoline_ex[];
+extern const char ftrace_shared_hotpatch_trampoline_ex_end[];
+extern const char ftrace_shared_hotpatch_trampoline_exrl[];
+extern const char ftrace_shared_hotpatch_trampoline_exrl_end[];
+extern const char ftrace_plt_template[];
+extern const char ftrace_plt_template_end[];
+
+#endif /* _FTRACE_H */
index 0c253886da7892ae32b6e642ed69741b94b73625..114b5490ad8ebace2e0d3d2fed4b447d746c2580 100644 (file)
@@ -21,6 +21,7 @@ ENTRY(startup_continue)
        larl    %r1,tod_clock_base
        mvc     0(16,%r1),__LC_BOOT_CLOCK
        larl    %r13,.LPG1              # get base
+       lctlg   %c0,%c15,.Lctl-.LPG1(%r13)      # load control registers
 #
 # Setup stack
 #
@@ -41,3 +42,19 @@ ENTRY(startup_continue)
        .align  16
 .LPG1:
 .Ldw:  .quad   0x0002000180000000,0x0000000000000000
+.Lctl: .quad   0x04040000              # cr0: AFP registers & secondary space
+       .quad   0                       # cr1: primary space segment table
+       .quad   0                       # cr2: dispatchable unit control table
+       .quad   0                       # cr3: instruction authorization
+       .quad   0xffff                  # cr4: instruction authorization
+       .quad   0                       # cr5: primary-aste origin
+       .quad   0                       # cr6: I/O interrupts
+       .quad   0                       # cr7: secondary space segment table
+       .quad   0x0000000000008000      # cr8: access registers translation
+       .quad   0                       # cr9: tracing off
+       .quad   0                       # cr10: tracing off
+       .quad   0                       # cr11: tracing off
+       .quad   0                       # cr12: tracing off
+       .quad   0                       # cr13: home space segment table
+       .quad   0xc0000000              # cr14: machine check handling off
+       .quad   0                       # cr15: linkage stack operations
index 50e2c21e0ec94eb95ebc3daad110372b040d7f42..e2cc35775b99670d02e30a99b6e8c020ccbecc84 100644 (file)
@@ -179,8 +179,6 @@ static inline int __diag308(unsigned long subcode, void *addr)
 
 int diag308(unsigned long subcode, void *addr)
 {
-       if (IS_ENABLED(CONFIG_KASAN))
-               __arch_local_irq_stosm(0x04); /* enable DAT */
        diag_stat_inc(DIAG_STAT_X308);
        return __diag308(subcode, addr);
 }
@@ -1843,7 +1841,6 @@ static struct kobj_attribute on_restart_attr = __ATTR_RW(on_restart);
 
 static void __do_restart(void *ignore)
 {
-       __arch_local_irq_stosm(0x04); /* enable DAT */
        smp_send_stop();
 #ifdef CONFIG_CRASH_DUMP
        crash_kexec(NULL);
@@ -2082,7 +2079,7 @@ void s390_reset_system(void)
 
        /* Disable lowcore protection */
        __ctl_clear_bit(0, 28);
-       diag_dma_ops.diag308_reset();
+       diag_amode31_ops.diag308_reset();
 }
 
 #ifdef CONFIG_KEXEC_FILE
index af43535a976df3647f487475415efbf1d6638c6d..b5245fadcfb03991ac832e8ef46f5104462f1416 100644 (file)
@@ -1,4 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0
+#include <linux/minmax.h>
+#include <linux/string.h>
 #include <asm/ebcdic.h>
 #include <asm/ipl.h>
 
index 234d085257eb1aa106159a53475f68b9efe22869..3a3145c4a3ba4dfd612d1f8ecb1adbd701a82a4b 100644 (file)
@@ -228,7 +228,7 @@ int show_interrupts(struct seq_file *p, void *v)
        int index = *(loff_t *) v;
        int cpu, irq;
 
-       get_online_cpus();
+       cpus_read_lock();
        if (index == 0) {
                seq_puts(p, "           ");
                for_each_online_cpu(cpu)
@@ -258,7 +258,7 @@ int show_interrupts(struct seq_file *p, void *v)
                seq_putc(p, '\n');
        }
 out:
-       put_online_cpus();
+       cpus_read_unlock();
        return 0;
 }
 
index ab584e8e352754996078054fefef4883d07e5997..9156653b56f69b4482351fb9a020316b72556f6d 100644 (file)
@@ -36,7 +36,7 @@ static void jump_label_bug(struct jump_entry *entry, struct insn *expected,
        unsigned char *ipe = (unsigned char *)expected;
        unsigned char *ipn = (unsigned char *)new;
 
-       pr_emerg("Jump label code mismatch at %pS [%p]\n", ipc, ipc);
+       pr_emerg("Jump label code mismatch at %pS [%px]\n", ipc, ipc);
        pr_emerg("Found:    %6ph\n", ipc);
        pr_emerg("Expected: %6ph\n", ipe);
        pr_emerg("New:      %6ph\n", ipn);
index 1005a6935fbe34b123bd2b2e714ec9fb85559689..0505e55a62979f5974530cb4b220d1fc0256b633 100644 (file)
@@ -224,8 +224,8 @@ void arch_crash_save_vmcoreinfo(void)
        VMCOREINFO_SYMBOL(lowcore_ptr);
        VMCOREINFO_SYMBOL(high_memory);
        VMCOREINFO_LENGTH(lowcore_ptr, NR_CPUS);
-       vmcoreinfo_append_str("SDMA=%lx\n", __sdma);
-       vmcoreinfo_append_str("EDMA=%lx\n", __edma);
+       vmcoreinfo_append_str("SAMODE31=%lx\n", __samode31);
+       vmcoreinfo_append_str("EAMODE31=%lx\n", __eamode31);
        vmcoreinfo_append_str("KERNELOFFSET=%lx\n", kaslr_offset());
        mem_assign_absolute(S390_lowcore.vmcore_info, paddr_vmcoreinfo_note());
 }
@@ -263,7 +263,6 @@ static void __do_machine_kexec(void *data)
  */
 static void __machine_kexec(void *data)
 {
-       __arch_local_irq_stosm(0x04); /* enable DAT */
        pfault_fini();
        tracing_off();
        debug_locks_off();
index 4055f1c4981472b049f9029e49f53bbddc3cdb6e..b01ba460b7cad8e3e8164df891bb94ee241d72f0 100644 (file)
@@ -14,6 +14,7 @@
 #include <linux/elf.h>
 #include <linux/vmalloc.h>
 #include <linux/fs.h>
+#include <linux/ftrace.h>
 #include <linux/string.h>
 #include <linux/kernel.h>
 #include <linux/kasan.h>
@@ -23,6 +24,8 @@
 #include <asm/alternative.h>
 #include <asm/nospec-branch.h>
 #include <asm/facility.h>
+#include <asm/ftrace.lds.h>
+#include <asm/set_memory.h>
 
 #if 0
 #define DEBUGP printk
@@ -48,6 +51,13 @@ void *module_alloc(unsigned long size)
        return p;
 }
 
+#ifdef CONFIG_FUNCTION_TRACER
+void module_arch_cleanup(struct module *mod)
+{
+       module_memfree(mod->arch.trampolines_start);
+}
+#endif
+
 void module_arch_freeing_init(struct module *mod)
 {
        if (is_livepatch_module(mod) &&
@@ -466,6 +476,30 @@ int apply_relocate_add(Elf_Shdr *sechdrs, const char *strtab,
                                    write);
 }
 
+#ifdef CONFIG_FUNCTION_TRACER
+static int module_alloc_ftrace_hotpatch_trampolines(struct module *me,
+                                                   const Elf_Shdr *s)
+{
+       char *start, *end;
+       int numpages;
+       size_t size;
+
+       size = FTRACE_HOTPATCH_TRAMPOLINES_SIZE(s->sh_size);
+       numpages = DIV_ROUND_UP(size, PAGE_SIZE);
+       start = module_alloc(numpages * PAGE_SIZE);
+       if (!start)
+               return -ENOMEM;
+       set_memory_ro((unsigned long)start, numpages);
+       end = start + size;
+
+       me->arch.trampolines_start = (struct ftrace_hotpatch_trampoline *)start;
+       me->arch.trampolines_end = (struct ftrace_hotpatch_trampoline *)end;
+       me->arch.next_trampoline = me->arch.trampolines_start;
+
+       return 0;
+}
+#endif /* CONFIG_FUNCTION_TRACER */
+
 int module_finalize(const Elf_Ehdr *hdr,
                    const Elf_Shdr *sechdrs,
                    struct module *me)
@@ -473,6 +507,9 @@ int module_finalize(const Elf_Ehdr *hdr,
        const Elf_Shdr *s;
        char *secstrings, *secname;
        void *aseg;
+#ifdef CONFIG_FUNCTION_TRACER
+       int ret;
+#endif
 
        if (IS_ENABLED(CONFIG_EXPOLINE) &&
            !nospec_disable && me->arch.plt_size) {
@@ -507,6 +544,14 @@ int module_finalize(const Elf_Ehdr *hdr,
                if (IS_ENABLED(CONFIG_EXPOLINE) &&
                    (str_has_prefix(secname, ".s390_return")))
                        nospec_revert(aseg, aseg + s->sh_size);
+
+#ifdef CONFIG_FUNCTION_TRACER
+               if (!strcmp(FTRACE_CALLSITE_SECTION, secname)) {
+                       ret = module_alloc_ftrace_hotpatch_trampolines(me, s);
+                       if (ret < 0)
+                               return ret;
+               }
+#endif /* CONFIG_FUNCTION_TRACER */
        }
 
        jump_label_apply_nops(me);
index 5a7420b23aa89bb4b40d469f75b1d81112db6e7c..4bef35b79b938bb4785505e0f55025b34c5b2497 100644 (file)
@@ -121,7 +121,7 @@ static void os_info_old_init(void)
 
        if (os_info_init)
                return;
-       if (!OLDMEM_BASE)
+       if (!oldmem_data.start)
                goto fail;
        if (copy_oldmem_kernel(&addr, &S390_lowcore.os_info, sizeof(addr)))
                goto fail;
index d7dc36ec0a60e987a84d17481ff923cc36fc5226..2e3bb633acf6ce442cd59652e116b39cd13ac696 100644 (file)
@@ -1138,7 +1138,7 @@ static long cfset_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
 {
        int ret;
 
-       get_online_cpus();
+       cpus_read_lock();
        mutex_lock(&cfset_ctrset_mutex);
        switch (cmd) {
        case S390_HWCTR_START:
@@ -1155,7 +1155,7 @@ static long cfset_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
                break;
        }
        mutex_unlock(&cfset_ctrset_mutex);
-       put_online_cpus();
+       cpus_read_unlock();
        return ret;
 }
 
index 82df39b17bb559e42a0a7ac7271585e4b7c80ce5..d9d4a806979ed4555f76882d47808248bac3cf26 100644 (file)
@@ -11,6 +11,7 @@
 #include <linux/cpufeature.h>
 #include <linux/bitops.h>
 #include <linux/kernel.h>
+#include <linux/random.h>
 #include <linux/sched/mm.h>
 #include <linux/init.h>
 #include <linux/seq_file.h>
 #include <asm/elf.h>
 #include <asm/lowcore.h>
 #include <asm/param.h>
+#include <asm/sclp.h>
 #include <asm/smp.h>
 
+unsigned long __read_mostly elf_hwcap;
+char elf_platform[ELF_PLATFORM_SIZE];
+
 struct cpu_info {
        unsigned int cpu_mhz_dynamic;
        unsigned int cpu_mhz_static;
@@ -113,15 +118,33 @@ static void show_facilities(struct seq_file *m)
 static void show_cpu_summary(struct seq_file *m, void *v)
 {
        static const char *hwcap_str[] = {
-               "esan3", "zarch", "stfle", "msa", "ldisp", "eimm", "dfp",
-               "edat", "etf3eh", "highgprs", "te", "vx", "vxd", "vxe", "gs",
-               "vxe2", "vxp", "sort", "dflt"
-       };
-       static const char * const int_hwcap_str[] = {
-               "sie"
+               [HWCAP_NR_ESAN3]        = "esan3",
+               [HWCAP_NR_ZARCH]        = "zarch",
+               [HWCAP_NR_STFLE]        = "stfle",
+               [HWCAP_NR_MSA]          = "msa",
+               [HWCAP_NR_LDISP]        = "ldisp",
+               [HWCAP_NR_EIMM]         = "eimm",
+               [HWCAP_NR_DFP]          = "dfp",
+               [HWCAP_NR_HPAGE]        = "edat",
+               [HWCAP_NR_ETF3EH]       = "etf3eh",
+               [HWCAP_NR_HIGH_GPRS]    = "highgprs",
+               [HWCAP_NR_TE]           = "te",
+               [HWCAP_NR_VXRS]         = "vx",
+               [HWCAP_NR_VXRS_BCD]     = "vxd",
+               [HWCAP_NR_VXRS_EXT]     = "vxe",
+               [HWCAP_NR_GS]           = "gs",
+               [HWCAP_NR_VXRS_EXT2]    = "vxe2",
+               [HWCAP_NR_VXRS_PDE]     = "vxp",
+               [HWCAP_NR_SORT]         = "sort",
+               [HWCAP_NR_DFLT]         = "dflt",
+               [HWCAP_NR_VXRS_PDE2]    = "vxp2",
+               [HWCAP_NR_NNPA]         = "nnpa",
+               [HWCAP_NR_PCI_MIO]      = "pcimio",
+               [HWCAP_NR_SIE]          = "sie",
        };
        int i, cpu;
 
+       BUILD_BUG_ON(ARRAY_SIZE(hwcap_str) != HWCAP_NR_MAX);
        seq_printf(m, "vendor_id       : IBM/S390\n"
                   "# processors    : %i\n"
                   "bogomips per cpu: %lu.%02lu\n",
@@ -132,9 +155,6 @@ static void show_cpu_summary(struct seq_file *m, void *v)
        for (i = 0; i < ARRAY_SIZE(hwcap_str); i++)
                if (hwcap_str[i] && (elf_hwcap & (1UL << i)))
                        seq_printf(m, "%s ", hwcap_str[i]);
-       for (i = 0; i < ARRAY_SIZE(int_hwcap_str); i++)
-               if (int_hwcap_str[i] && (int_hwcap & (1UL << i)))
-                       seq_printf(m, "%s ", int_hwcap_str[i]);
        seq_puts(m, "\n");
        show_facilities(m);
        show_cacheinfo(m);
@@ -149,6 +169,141 @@ static void show_cpu_summary(struct seq_file *m, void *v)
        }
 }
 
+static int __init setup_hwcaps(void)
+{
+       /* instructions named N3, "backported" to esa-mode */
+       if (test_facility(0))
+               elf_hwcap |= HWCAP_ESAN3;
+
+       /* z/Architecture mode active */
+       elf_hwcap |= HWCAP_ZARCH;
+
+       /* store-facility-list-extended */
+       if (test_facility(7))
+               elf_hwcap |= HWCAP_STFLE;
+
+       /* message-security assist */
+       if (test_facility(17))
+               elf_hwcap |= HWCAP_MSA;
+
+       /* long-displacement */
+       if (test_facility(19))
+               elf_hwcap |= HWCAP_LDISP;
+
+       /* extended-immediate */
+       if (test_facility(21))
+               elf_hwcap |= HWCAP_EIMM;
+
+       /* extended-translation facility 3 enhancement */
+       if (test_facility(22) && test_facility(30))
+               elf_hwcap |= HWCAP_ETF3EH;
+
+       /* decimal floating point & perform floating point operation */
+       if (test_facility(42) && test_facility(44))
+               elf_hwcap |= HWCAP_DFP;
+
+       /* huge page support */
+       if (MACHINE_HAS_EDAT1)
+               elf_hwcap |= HWCAP_HPAGE;
+
+       /* 64-bit register support for 31-bit processes */
+       elf_hwcap |= HWCAP_HIGH_GPRS;
+
+       /* transactional execution */
+       if (MACHINE_HAS_TE)
+               elf_hwcap |= HWCAP_TE;
+
+       /*
+        * Vector extension can be disabled with the "novx" parameter.
+        * Use MACHINE_HAS_VX instead of facility bit 129.
+        */
+       if (MACHINE_HAS_VX) {
+               elf_hwcap |= HWCAP_VXRS;
+               if (test_facility(134))
+                       elf_hwcap |= HWCAP_VXRS_BCD;
+               if (test_facility(135))
+                       elf_hwcap |= HWCAP_VXRS_EXT;
+               if (test_facility(148))
+                       elf_hwcap |= HWCAP_VXRS_EXT2;
+               if (test_facility(152))
+                       elf_hwcap |= HWCAP_VXRS_PDE;
+               if (test_facility(192))
+                       elf_hwcap |= HWCAP_VXRS_PDE2;
+       }
+
+       if (test_facility(150))
+               elf_hwcap |= HWCAP_SORT;
+
+       if (test_facility(151))
+               elf_hwcap |= HWCAP_DFLT;
+
+       if (test_facility(165))
+               elf_hwcap |= HWCAP_NNPA;
+
+       /* guarded storage */
+       if (MACHINE_HAS_GS)
+               elf_hwcap |= HWCAP_GS;
+
+       if (MACHINE_HAS_PCI_MIO)
+               elf_hwcap |= HWCAP_PCI_MIO;
+
+       /* virtualization support */
+       if (sclp.has_sief2)
+               elf_hwcap |= HWCAP_SIE;
+
+       return 0;
+}
+arch_initcall(setup_hwcaps);
+
+static int __init setup_elf_platform(void)
+{
+       struct cpuid cpu_id;
+
+       get_cpu_id(&cpu_id);
+       add_device_randomness(&cpu_id, sizeof(cpu_id));
+       switch (cpu_id.machine) {
+       case 0x2064:
+       case 0x2066:
+       default:        /* Use "z900" as default for 64 bit kernels. */
+               strcpy(elf_platform, "z900");
+               break;
+       case 0x2084:
+       case 0x2086:
+               strcpy(elf_platform, "z990");
+               break;
+       case 0x2094:
+       case 0x2096:
+               strcpy(elf_platform, "z9-109");
+               break;
+       case 0x2097:
+       case 0x2098:
+               strcpy(elf_platform, "z10");
+               break;
+       case 0x2817:
+       case 0x2818:
+               strcpy(elf_platform, "z196");
+               break;
+       case 0x2827:
+       case 0x2828:
+               strcpy(elf_platform, "zEC12");
+               break;
+       case 0x2964:
+       case 0x2965:
+               strcpy(elf_platform, "z13");
+               break;
+       case 0x3906:
+       case 0x3907:
+               strcpy(elf_platform, "z14");
+               break;
+       case 0x8561:
+       case 0x8562:
+               strcpy(elf_platform, "z15");
+               break;
+       }
+       return 0;
+}
+arch_initcall(setup_elf_platform);
+
 static void show_cpu_topology(struct seq_file *m, unsigned long n)
 {
 #ifdef CONFIG_SCHED_TOPOLOGY
@@ -210,7 +365,7 @@ static inline void *c_update(loff_t *pos)
 
 static void *c_start(struct seq_file *m, loff_t *pos)
 {
-       get_online_cpus();
+       cpus_read_lock();
        return c_update(pos);
 }
 
@@ -222,7 +377,7 @@ static void *c_next(struct seq_file *m, void *v, loff_t *pos)
 
 static void c_stop(struct seq_file *m, void *v)
 {
-       put_online_cpus();
+       cpus_read_unlock();
 }
 
 const struct seq_operations cpuinfo_op = {
index ff0f9e8389162799e3453e348dc95402c5e04973..fe14beb338e5cdef26299862d38b46a98c3ca8a8 100644 (file)
@@ -89,27 +89,71 @@ EXPORT_SYMBOL(console_devno);
 unsigned int console_irq = -1;
 EXPORT_SYMBOL(console_irq);
 
-unsigned long elf_hwcap __read_mostly = 0;
-char elf_platform[ELF_PLATFORM_SIZE];
+/*
+ * Some code and data needs to stay below 2 GB, even when the kernel would be
+ * relocated above 2 GB, because it has to use 31 bit addresses.
+ * Such code and data is part of the .amode31 section.
+ */
+unsigned long __amode31_ref __samode31 = __pa(&_samode31);
+unsigned long __amode31_ref __eamode31 = __pa(&_eamode31);
+unsigned long __amode31_ref __stext_amode31 = __pa(&_stext_amode31);
+unsigned long __amode31_ref __etext_amode31 = __pa(&_etext_amode31);
+struct exception_table_entry __amode31_ref *__start_amode31_ex_table = _start_amode31_ex_table;
+struct exception_table_entry __amode31_ref *__stop_amode31_ex_table = _stop_amode31_ex_table;
+
+/*
+ * Control registers CR2, CR5 and CR15 are initialized with addresses
+ * of tables that must be placed below 2G which is handled by the AMODE31
+ * sections.
+ * Because the AMODE31 sections are relocated below 2G at startup,
+ * the content of control registers CR2, CR5 and CR15 must be updated
+ * with new addresses after the relocation. The initial initialization of
+ * control registers occurs in head64.S and then gets updated again after AMODE31
+ * relocation. We must access the relevant AMODE31 tables indirectly via
+ * pointers placed in the .amode31.refs linker section. Those pointers get
+ * updated automatically during AMODE31 relocation and always contain a valid
+ * address within AMODE31 sections.
+ */
+
+static __amode31_data u32 __ctl_duct_amode31[16] __aligned(64);
+
+static __amode31_data u64 __ctl_aste_amode31[8] __aligned(64) = {
+       [1] = 0xffffffffffffffff
+};
+
+static __amode31_data u32 __ctl_duald_amode31[32] __aligned(128) = {
+       0x80000000, 0, 0, 0,
+       0x80000000, 0, 0, 0,
+       0x80000000, 0, 0, 0,
+       0x80000000, 0, 0, 0,
+       0x80000000, 0, 0, 0,
+       0x80000000, 0, 0, 0,
+       0x80000000, 0, 0, 0,
+       0x80000000, 0, 0, 0
+};
+
+static __amode31_data u32 __ctl_linkage_stack_amode31[8] __aligned(64) = {
+       0, 0, 0x89000000, 0,
+       0, 0, 0x8a000000, 0
+};
 
-unsigned long int_hwcap = 0;
+static u64 __amode31_ref *__ctl_aste = __ctl_aste_amode31;
+static u32 __amode31_ref *__ctl_duald = __ctl_duald_amode31;
+static u32 __amode31_ref *__ctl_linkage_stack = __ctl_linkage_stack_amode31;
+static u32 __amode31_ref *__ctl_duct = __ctl_duct_amode31;
 
 int __bootdata(noexec_disabled);
 unsigned long __bootdata(ident_map_size);
 struct mem_detect_info __bootdata(mem_detect);
+struct initrd_data __bootdata(initrd_data);
 
-struct exception_table_entry *__bootdata_preserved(__start_dma_ex_table);
-struct exception_table_entry *__bootdata_preserved(__stop_dma_ex_table);
-unsigned long __bootdata_preserved(__stext_dma);
-unsigned long __bootdata_preserved(__etext_dma);
-unsigned long __bootdata_preserved(__sdma);
-unsigned long __bootdata_preserved(__edma);
 unsigned long __bootdata_preserved(__kaslr_offset);
 unsigned int __bootdata_preserved(zlib_dfltcc_support);
 EXPORT_SYMBOL(zlib_dfltcc_support);
 u64 __bootdata_preserved(stfle_fac_list[16]);
 EXPORT_SYMBOL(stfle_fac_list);
 u64 __bootdata_preserved(alt_stfle_fac_list[16]);
+struct oldmem_data __bootdata_preserved(oldmem_data);
 
 unsigned long VMALLOC_START;
 EXPORT_SYMBOL(VMALLOC_START);
@@ -254,7 +298,7 @@ static void __init setup_zfcpdump(void)
 {
        if (!is_ipl_type_dump())
                return;
-       if (OLDMEM_BASE)
+       if (oldmem_data.start)
                return;
        strcat(boot_command_line, " cio_ignore=all,!ipldev,!condev");
        console_loglevel = 2;
@@ -421,7 +465,7 @@ static void __init setup_lowcore_dat_off(void)
        lc->restart_stack = (unsigned long) restart_stack;
        lc->restart_fn = (unsigned long) do_restart;
        lc->restart_data = 0;
-       lc->restart_source = -1UL;
+       lc->restart_source = -1U;
 
        mcck_stack = (unsigned long)memblock_alloc(THREAD_SIZE, THREAD_SIZE);
        if (!mcck_stack)
@@ -450,12 +494,19 @@ static void __init setup_lowcore_dat_off(void)
 
 static void __init setup_lowcore_dat_on(void)
 {
+       struct lowcore *lc = lowcore_ptr[0];
+
        __ctl_clear_bit(0, 28);
        S390_lowcore.external_new_psw.mask |= PSW_MASK_DAT;
        S390_lowcore.svc_new_psw.mask |= PSW_MASK_DAT;
        S390_lowcore.program_new_psw.mask |= PSW_MASK_DAT;
        S390_lowcore.io_new_psw.mask |= PSW_MASK_DAT;
+       __ctl_store(S390_lowcore.cregs_save_area, 0, 15);
        __ctl_set_bit(0, 28);
+       mem_assign_absolute(S390_lowcore.restart_flags, RESTART_FLAG_CTLREGS);
+       mem_assign_absolute(S390_lowcore.program_new_psw, lc->program_new_psw);
+       memcpy_absolute(&S390_lowcore.cregs_save_area, lc->cregs_save_area,
+                       sizeof(S390_lowcore.cregs_save_area));
 }
 
 static struct resource code_resource = {
@@ -610,9 +661,9 @@ static void __init reserve_crashkernel(void)
                return;
        }
 
-       low = crash_base ?: OLDMEM_BASE;
+       low = crash_base ?: oldmem_data.start;
        high = low + crash_size;
-       if (low >= OLDMEM_BASE && high <= OLDMEM_BASE + OLDMEM_SIZE) {
+       if (low >= oldmem_data.start && high <= oldmem_data.start + oldmem_data.size) {
                /* The crashkernel fits into OLDMEM, reuse OLDMEM */
                crash_base = low;
        } else {
@@ -639,7 +690,7 @@ static void __init reserve_crashkernel(void)
        if (register_memory_notifier(&kdump_mem_nb))
                return;
 
-       if (!OLDMEM_BASE && MACHINE_IS_VM)
+       if (!oldmem_data.start && MACHINE_IS_VM)
                diag10_range(PFN_DOWN(crash_base), PFN_DOWN(crash_size));
        crashk_res.start = crash_base;
        crashk_res.end = crash_base + crash_size - 1;
@@ -658,11 +709,11 @@ static void __init reserve_crashkernel(void)
 static void __init reserve_initrd(void)
 {
 #ifdef CONFIG_BLK_DEV_INITRD
-       if (!INITRD_START || !INITRD_SIZE)
+       if (!initrd_data.start || !initrd_data.size)
                return;
-       initrd_start = INITRD_START;
-       initrd_end = initrd_start + INITRD_SIZE;
-       memblock_reserve(INITRD_START, INITRD_SIZE);
+       initrd_start = initrd_data.start;
+       initrd_end = initrd_start + initrd_data.size;
+       memblock_reserve(initrd_data.start, initrd_data.size);
 #endif
 }
 
@@ -732,10 +783,10 @@ static void __init memblock_add_mem_detect_info(void)
 static void __init check_initrd(void)
 {
 #ifdef CONFIG_BLK_DEV_INITRD
-       if (INITRD_START && INITRD_SIZE &&
-           !memblock_is_region_memory(INITRD_START, INITRD_SIZE)) {
+       if (initrd_data.start && initrd_data.size &&
+           !memblock_is_region_memory(initrd_data.start, initrd_data.size)) {
                pr_err("The initial RAM disk does not fit into the memory\n");
-               memblock_free(INITRD_START, INITRD_SIZE);
+               memblock_free(initrd_data.start, initrd_data.size);
                initrd_start = initrd_end = 0;
        }
 #endif
@@ -748,10 +799,10 @@ static void __init reserve_kernel(void)
 {
        unsigned long start_pfn = PFN_UP(__pa(_end));
 
-       memblock_reserve(0, HEAD_END);
+       memblock_reserve(0, STARTUP_NORMAL_OFFSET);
+       memblock_reserve((unsigned long)sclp_early_sccb, EXT_SCCB_READ_SCP);
        memblock_reserve((unsigned long)_stext, PFN_PHYS(start_pfn)
                         - (unsigned long)_stext);
-       memblock_reserve(__sdma, __edma - __sdma);
 }
 
 static void __init setup_memory(void)
@@ -771,152 +822,52 @@ static void __init setup_memory(void)
        memblock_enforce_memory_limit(memblock_end_of_DRAM());
 }
 
-/*
- * Setup hardware capabilities.
- */
-static int __init setup_hwcaps(void)
+static void __init relocate_amode31_section(void)
 {
-       static const int stfl_bits[6] = { 0, 2, 7, 17, 19, 21 };
-       struct cpuid cpu_id;
-       int i;
-
-       /*
-        * The store facility list bits numbers as found in the principles
-        * of operation are numbered with bit 1UL<<31 as number 0 to
-        * bit 1UL<<0 as number 31.
-        *   Bit 0: instructions named N3, "backported" to esa-mode
-        *   Bit 2: z/Architecture mode is active
-        *   Bit 7: the store-facility-list-extended facility is installed
-        *   Bit 17: the message-security assist is installed
-        *   Bit 19: the long-displacement facility is installed
-        *   Bit 21: the extended-immediate facility is installed
-        *   Bit 22: extended-translation facility 3 is installed
-        *   Bit 30: extended-translation facility 3 enhancement facility
-        * These get translated to:
-        *   HWCAP_S390_ESAN3 bit 0, HWCAP_S390_ZARCH bit 1,
-        *   HWCAP_S390_STFLE bit 2, HWCAP_S390_MSA bit 3,
-        *   HWCAP_S390_LDISP bit 4, HWCAP_S390_EIMM bit 5 and
-        *   HWCAP_S390_ETF3EH bit 8 (22 && 30).
-        */
-       for (i = 0; i < 6; i++)
-               if (test_facility(stfl_bits[i]))
-                       elf_hwcap |= 1UL << i;
-
-       if (test_facility(22) && test_facility(30))
-               elf_hwcap |= HWCAP_S390_ETF3EH;
-
-       /*
-        * Check for additional facilities with store-facility-list-extended.
-        * stfle stores doublewords (8 byte) with bit 1ULL<<63 as bit 0
-        * and 1ULL<<0 as bit 63. Bits 0-31 contain the same information
-        * as stored by stfl, bits 32-xxx contain additional facilities.
-        * How many facility words are stored depends on the number of
-        * doublewords passed to the instruction. The additional facilities
-        * are:
-        *   Bit 42: decimal floating point facility is installed
-        *   Bit 44: perform floating point operation facility is installed
-        * translated to:
-        *   HWCAP_S390_DFP bit 6 (42 && 44).
-        */
-       if ((elf_hwcap & (1UL << 2)) && test_facility(42) && test_facility(44))
-               elf_hwcap |= HWCAP_S390_DFP;
-
-       /*
-        * Huge page support HWCAP_S390_HPAGE is bit 7.
-        */
-       if (MACHINE_HAS_EDAT1)
-               elf_hwcap |= HWCAP_S390_HPAGE;
-
-       /*
-        * 64-bit register support for 31-bit processes
-        * HWCAP_S390_HIGH_GPRS is bit 9.
-        */
-       elf_hwcap |= HWCAP_S390_HIGH_GPRS;
-
-       /*
-        * Transactional execution support HWCAP_S390_TE is bit 10.
-        */
-       if (MACHINE_HAS_TE)
-               elf_hwcap |= HWCAP_S390_TE;
-
-       /*
-        * Vector extension HWCAP_S390_VXRS is bit 11. The Vector extension
-        * can be disabled with the "novx" parameter. Use MACHINE_HAS_VX
-        * instead of facility bit 129.
-        */
-       if (MACHINE_HAS_VX) {
-               elf_hwcap |= HWCAP_S390_VXRS;
-               if (test_facility(134))
-                       elf_hwcap |= HWCAP_S390_VXRS_BCD;
-               if (test_facility(135))
-                       elf_hwcap |= HWCAP_S390_VXRS_EXT;
-               if (test_facility(148))
-                       elf_hwcap |= HWCAP_S390_VXRS_EXT2;
-               if (test_facility(152))
-                       elf_hwcap |= HWCAP_S390_VXRS_PDE;
-       }
-       if (test_facility(150))
-               elf_hwcap |= HWCAP_S390_SORT;
-       if (test_facility(151))
-               elf_hwcap |= HWCAP_S390_DFLT;
-
-       /*
-        * Guarded storage support HWCAP_S390_GS is bit 12.
-        */
-       if (MACHINE_HAS_GS)
-               elf_hwcap |= HWCAP_S390_GS;
-
-       get_cpu_id(&cpu_id);
-       add_device_randomness(&cpu_id, sizeof(cpu_id));
-       switch (cpu_id.machine) {
-       case 0x2064:
-       case 0x2066:
-       default:        /* Use "z900" as default for 64 bit kernels. */
-               strcpy(elf_platform, "z900");
-               break;
-       case 0x2084:
-       case 0x2086:
-               strcpy(elf_platform, "z990");
-               break;
-       case 0x2094:
-       case 0x2096:
-               strcpy(elf_platform, "z9-109");
-               break;
-       case 0x2097:
-       case 0x2098:
-               strcpy(elf_platform, "z10");
-               break;
-       case 0x2817:
-       case 0x2818:
-               strcpy(elf_platform, "z196");
-               break;
-       case 0x2827:
-       case 0x2828:
-               strcpy(elf_platform, "zEC12");
-               break;
-       case 0x2964:
-       case 0x2965:
-               strcpy(elf_platform, "z13");
-               break;
-       case 0x3906:
-       case 0x3907:
-               strcpy(elf_platform, "z14");
-               break;
-       case 0x8561:
-       case 0x8562:
-               strcpy(elf_platform, "z15");
-               break;
-       }
-
-       /*
-        * Virtualization support HWCAP_INT_SIE is bit 0.
-        */
-       if (sclp.has_sief2)
-               int_hwcap |= HWCAP_INT_SIE;
+       unsigned long amode31_addr, amode31_size;
+       long amode31_offset;
+       long *ptr;
+
+       /* Allocate a new AMODE31 capable memory region */
+       amode31_size = __eamode31 - __samode31;
+       pr_info("Relocating AMODE31 section of size 0x%08lx\n", amode31_size);
+       amode31_addr = (unsigned long)memblock_alloc_low(amode31_size, PAGE_SIZE);
+       if (!amode31_addr)
+               panic("Failed to allocate memory for AMODE31 section\n");
+       amode31_offset = amode31_addr - __samode31;
+
+       /* Move original AMODE31 section to the new one */
+       memmove((void *)amode31_addr, (void *)__samode31, amode31_size);
+       /* Zero out the old AMODE31 section to catch invalid accesses within it */
+       memset((void *)__samode31, 0, amode31_size);
+
+       /* Update all AMODE31 region references */
+       for (ptr = _start_amode31_refs; ptr != _end_amode31_refs; ptr++)
+               *ptr += amode31_offset;
+}
 
-       return 0;
+/* This must be called after AMODE31 relocation */
+static void __init setup_cr(void)
+{
+       union ctlreg2 cr2;
+       union ctlreg5 cr5;
+       union ctlreg15 cr15;
+
+       __ctl_duct[1] = (unsigned long)__ctl_aste;
+       __ctl_duct[2] = (unsigned long)__ctl_aste;
+       __ctl_duct[4] = (unsigned long)__ctl_duald;
+
+       /* Update control registers CR2, CR5 and CR15 */
+       __ctl_store(cr2.val, 2, 2);
+       __ctl_store(cr5.val, 5, 5);
+       __ctl_store(cr15.val, 15, 15);
+       cr2.ducto = (unsigned long)__ctl_duct >> 6;
+       cr5.pasteo = (unsigned long)__ctl_duct >> 6;
+       cr15.lsea = (unsigned long)__ctl_linkage_stack >> 3;
+       __ctl_load(cr2.val, 2, 2);
+       __ctl_load(cr5.val, 5, 5);
+       __ctl_load(cr15.val, 15, 15);
 }
-arch_initcall(setup_hwcaps);
 
 /*
  * Add system information as device randomness
@@ -1059,6 +1010,9 @@ void __init setup_arch(char **cmdline_p)
 
        free_mem_detect_info();
 
+       relocate_amode31_section();
+       setup_cr();
+
        setup_uv();
        setup_memory_end();
        setup_memory();
index 78ef53b299588e74bc46905e5b4bdaf075c7cc5f..307f5d99514d70d811fbf800dec48164bdd2cb87 100644 (file)
@@ -533,9 +533,3 @@ void arch_do_signal_or_restart(struct pt_regs *regs, bool has_signal)
         */
        restore_saved_sigmask();
 }
-
-void do_notify_resume(struct pt_regs *regs)
-{
-       tracehook_notify_resume(regs);
-       rseq_handle_notify_resume(NULL, regs);
-}
index 8984711f72ede59f650bdb9c759249f9f5e67901..2a991e43ead3d23aafe27681e94b8d4c11c70eb5 100644 (file)
@@ -252,6 +252,7 @@ static void pcpu_prepare_secondary(struct pcpu *pcpu, int cpu)
        cpumask_set_cpu(cpu, &init_mm.context.cpu_attach_mask);
        cpumask_set_cpu(cpu, mm_cpumask(&init_mm));
        lc->cpu_nr = cpu;
+       lc->restart_flags = RESTART_FLAG_CTLREGS;
        lc->spinlock_lockval = arch_spin_lockval(cpu);
        lc->spinlock_index = 0;
        lc->percpu_offset = __per_cpu_offset[cpu];
@@ -294,10 +295,10 @@ static void pcpu_start_fn(struct pcpu *pcpu, void (*func)(void *), void *data)
 
        cpu = pcpu - pcpu_devices;
        lc = lowcore_ptr[cpu];
-       lc->restart_stack = lc->nodat_stack;
+       lc->restart_stack = lc->kernel_stack;
        lc->restart_fn = (unsigned long) func;
        lc->restart_data = (unsigned long) data;
-       lc->restart_source = -1UL;
+       lc->restart_source = -1U;
        pcpu_sigp_retry(pcpu, SIGP_RESTART, 0);
 }
 
@@ -311,12 +312,12 @@ static void __pcpu_delegate(pcpu_delegate_fn *func, void *data)
        func(data);     /* should not return */
 }
 
-static void __no_sanitize_address pcpu_delegate(struct pcpu *pcpu,
-                                               pcpu_delegate_fn *func,
-                                               void *data, unsigned long stack)
+static void pcpu_delegate(struct pcpu *pcpu,
+                         pcpu_delegate_fn *func,
+                         void *data, unsigned long stack)
 {
        struct lowcore *lc = lowcore_ptr[pcpu - pcpu_devices];
-       unsigned long source_cpu = stap();
+       unsigned int source_cpu = stap();
 
        __load_psw_mask(PSW_KERNEL_BITS | PSW_MASK_DAT);
        if (pcpu->address == source_cpu) {
@@ -569,6 +570,9 @@ static void smp_ctl_bit_callback(void *info)
        __ctl_load(cregs, 0, 15);
 }
 
+static DEFINE_SPINLOCK(ctl_lock);
+static unsigned long ctlreg;
+
 /*
  * Set a bit in a control register of all cpus
  */
@@ -576,6 +580,11 @@ void smp_ctl_set_bit(int cr, int bit)
 {
        struct ec_creg_mask_parms parms = { 1UL << bit, -1UL, cr };
 
+       spin_lock(&ctl_lock);
+       memcpy_absolute(&ctlreg, &S390_lowcore.cregs_save_area[cr], sizeof(ctlreg));
+       __set_bit(bit, &ctlreg);
+       memcpy_absolute(&S390_lowcore.cregs_save_area[cr], &ctlreg, sizeof(ctlreg));
+       spin_unlock(&ctl_lock);
        on_each_cpu(smp_ctl_bit_callback, &parms, 1);
 }
 EXPORT_SYMBOL(smp_ctl_set_bit);
@@ -587,6 +596,11 @@ void smp_ctl_clear_bit(int cr, int bit)
 {
        struct ec_creg_mask_parms parms = { 0, ~(1UL << bit), cr };
 
+       spin_lock(&ctl_lock);
+       memcpy_absolute(&ctlreg, &S390_lowcore.cregs_save_area[cr], sizeof(ctlreg));
+       __clear_bit(bit, &ctlreg);
+       memcpy_absolute(&S390_lowcore.cregs_save_area[cr], &ctlreg, sizeof(ctlreg));
+       spin_unlock(&ctl_lock);
        on_each_cpu(smp_ctl_bit_callback, &parms, 1);
 }
 EXPORT_SYMBOL(smp_ctl_clear_bit);
@@ -673,7 +687,7 @@ void __init smp_save_dump_cpus(void)
        unsigned long page;
        bool is_boot_cpu;
 
-       if (!(OLDMEM_BASE || is_ipl_type_dump()))
+       if (!(oldmem_data.start || is_ipl_type_dump()))
                /* No previous system present, normal boot. */
                return;
        /* Allocate a page as dumping area for the store status sigps */
@@ -704,12 +718,12 @@ void __init smp_save_dump_cpus(void)
                 * these registers an SCLP request is required which is
                 * done by drivers/s390/char/zcore.c:init_cpu_info()
                 */
-               if (!is_boot_cpu || OLDMEM_BASE)
+               if (!is_boot_cpu || oldmem_data.start)
                        /* Get the CPU registers */
                        smp_save_cpu_regs(sa, addr, is_boot_cpu, page);
        }
        memblock_free(page, PAGE_SIZE);
-       diag_dma_ops.diag308_reset();
+       diag_amode31_ops.diag308_reset();
        pcpu_set_smt(0);
 }
 #endif /* CONFIG_CRASH_DUMP */
@@ -793,7 +807,7 @@ static int __smp_rescan_cpus(struct sclp_core_info *info, bool early)
        u16 core_id;
        int nr, i;
 
-       get_online_cpus();
+       cpus_read_lock();
        mutex_lock(&smp_cpu_state_mutex);
        nr = 0;
        cpumask_xor(&avail, cpu_possible_mask, cpu_present_mask);
@@ -816,7 +830,7 @@ static int __smp_rescan_cpus(struct sclp_core_info *info, bool early)
                nr += smp_add_core(&info->core[i], &avail, configured, early);
        }
        mutex_unlock(&smp_cpu_state_mutex);
-       put_online_cpus();
+       cpus_read_unlock();
        return nr;
 }
 
@@ -868,11 +882,19 @@ void __init smp_detect_cpus(void)
        memblock_free_early((unsigned long)info, sizeof(*info));
 }
 
-static void smp_init_secondary(void)
+/*
+ *     Activate a secondary processor.
+ */
+static void smp_start_secondary(void *cpuvoid)
 {
        int cpu = raw_smp_processor_id();
 
        S390_lowcore.last_update_clock = get_tod_clock();
+       S390_lowcore.restart_stack = (unsigned long)restart_stack;
+       S390_lowcore.restart_fn = (unsigned long)do_restart;
+       S390_lowcore.restart_data = 0;
+       S390_lowcore.restart_source = -1U;
+       S390_lowcore.restart_flags = 0;
        restore_access_regs(S390_lowcore.access_regs_save_area);
        cpu_init();
        rcu_cpu_starting(cpu);
@@ -892,20 +914,6 @@ static void smp_init_secondary(void)
        cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
 }
 
-/*
- *     Activate a secondary processor.
- */
-static void __no_sanitize_address smp_start_secondary(void *cpuvoid)
-{
-       S390_lowcore.restart_stack = (unsigned long) restart_stack;
-       S390_lowcore.restart_fn = (unsigned long) do_restart;
-       S390_lowcore.restart_data = 0;
-       S390_lowcore.restart_source = -1UL;
-       __ctl_load(S390_lowcore.cregs_save_area, 0, 15);
-       __load_psw_mask(PSW_KERNEL_BITS | PSW_MASK_DAT);
-       call_on_stack_noreturn(smp_init_secondary, S390_lowcore.kernel_stack);
-}
-
 /* Upping and downing of CPUs */
 int __cpu_up(unsigned int cpu, struct task_struct *tidle)
 {
@@ -1055,7 +1063,7 @@ static ssize_t cpu_configure_store(struct device *dev,
                return -EINVAL;
        if (val != 0 && val != 1)
                return -EINVAL;
-       get_online_cpus();
+       cpus_read_lock();
        mutex_lock(&smp_cpu_state_mutex);
        rc = -EBUSY;
        /* disallow configuration changes of online cpus and cpu 0 */
@@ -1104,7 +1112,7 @@ static ssize_t cpu_configure_store(struct device *dev,
        }
 out:
        mutex_unlock(&smp_cpu_state_mutex);
-       put_online_cpus();
+       cpus_read_unlock();
        return rc ? rc : count;
 }
 static DEVICE_ATTR(configure, 0644, cpu_configure_show, cpu_configure_store);
similarity index 69%
rename from arch/s390/boot/text_dma.S
rename to arch/s390/kernel/text_amode31.S
index 5ff5fee028016158500ebf2ef65d48d283746e32..868e4a604110b41a95d5f75ebafe9c709e7bb648 100644 (file)
@@ -9,14 +9,14 @@
 #include <asm/errno.h>
 #include <asm/sigp.h>
 
-       .section .dma.text,"ax"
+       .section .amode31.text,"ax"
 /*
  * Simplified version of expoline thunk. The normal thunks can not be used here,
  * because they might be more than 2 GB away, and not reachable by the relative
  * branch. No comdat, exrl, etc. optimizations used here, because it only
  * affects a few functions that are not performance-relevant.
  */
-       .macro BR_EX_DMA_r14
+       .macro BR_EX_AMODE31_r14
        larl    %r1,0f
        ex      0,0(%r1)
        j       .
@@ -24,9 +24,9 @@
        .endm
 
 /*
- * int _diag14_dma(unsigned long rx, unsigned long ry1, unsigned long subcode)
+ * int _diag14_amode31(unsigned long rx, unsigned long ry1, unsigned long subcode)
  */
-ENTRY(_diag14_dma)
+ENTRY(_diag14_amode31)
        lgr     %r1,%r2
        lgr     %r2,%r3
        lgr     %r3,%r4
@@ -39,14 +39,14 @@ ENTRY(_diag14_dma)
 .Ldiag14_fault:
        sam64
        lgfr    %r2,%r5
-       BR_EX_DMA_r14
-       EX_TABLE_DMA(.Ldiag14_ex, .Ldiag14_fault)
-ENDPROC(_diag14_dma)
+       BR_EX_AMODE31_r14
+       EX_TABLE_AMODE31(.Ldiag14_ex, .Ldiag14_fault)
+ENDPROC(_diag14_amode31)
 
 /*
- * int _diag210_dma(struct diag210 *addr)
+ * int _diag210_amode31(struct diag210 *addr)
  */
-ENTRY(_diag210_dma)
+ENTRY(_diag210_amode31)
        lgr     %r1,%r2
        lhi     %r2,-1
        sam31
@@ -57,40 +57,40 @@ ENTRY(_diag210_dma)
 .Ldiag210_fault:
        sam64
        lgfr    %r2,%r2
-       BR_EX_DMA_r14
-       EX_TABLE_DMA(.Ldiag210_ex, .Ldiag210_fault)
-ENDPROC(_diag210_dma)
+       BR_EX_AMODE31_r14
+       EX_TABLE_AMODE31(.Ldiag210_ex, .Ldiag210_fault)
+ENDPROC(_diag210_amode31)
 
 /*
- * int _diag26c_dma(void *req, void *resp, enum diag26c_sc subcode)
+ * int _diag26c_amode31(void *req, void *resp, enum diag26c_sc subcode)
  */
-ENTRY(_diag26c_dma)
+ENTRY(_diag26c_amode31)
        lghi    %r5,-EOPNOTSUPP
        sam31
        diag    %r2,%r4,0x26c
 .Ldiag26c_ex:
        sam64
        lgfr    %r2,%r5
-       BR_EX_DMA_r14
-       EX_TABLE_DMA(.Ldiag26c_ex, .Ldiag26c_ex)
-ENDPROC(_diag26c_dma)
+       BR_EX_AMODE31_r14
+       EX_TABLE_AMODE31(.Ldiag26c_ex, .Ldiag26c_ex)
+ENDPROC(_diag26c_amode31)
 
 /*
- * void _diag0c_dma(struct hypfs_diag0c_entry *entry)
+ * void _diag0c_amode31(struct hypfs_diag0c_entry *entry)
  */
-ENTRY(_diag0c_dma)
+ENTRY(_diag0c_amode31)
        sam31
        diag    %r2,%r2,0x0c
        sam64
-       BR_EX_DMA_r14
-ENDPROC(_diag0c_dma)
+       BR_EX_AMODE31_r14
+ENDPROC(_diag0c_amode31)
 
 /*
- * void _diag308_reset_dma(void)
+ * void _diag308_reset_amode31(void)
  *
  * Calls diag 308 subcode 1 and continues execution
  */
-ENTRY(_diag308_reset_dma)
+ENTRY(_diag308_reset_amode31)
        larl    %r4,.Lctlregs           # Save control registers
        stctg   %c0,%c15,0(%r4)
        lg      %r2,0(%r4)              # Disable lowcore protection
@@ -107,7 +107,7 @@ ENTRY(_diag308_reset_dma)
        larl    %r4,.Lcontinue_psw      # Save PSW flags
        epsw    %r2,%r3
        stm     %r2,%r3,0(%r4)
-       larl    %r4,restart_part2       # Setup restart PSW at absolute 0
+       larl    %r4,.Lrestart_part2     # Setup restart PSW at absolute 0
        larl    %r3,.Lrestart_diag308_psw
        og      %r4,0(%r3)              # Save PSW
        lghi    %r3,0
@@ -115,7 +115,7 @@ ENTRY(_diag308_reset_dma)
        lghi    %r1,1
        lghi    %r0,0
        diag    %r0,%r1,0x308
-restart_part2:
+.Lrestart_part2:
        lhi     %r0,0                   # Load r0 with zero
        lhi     %r1,2                   # Use mode 2 = ESAME (dump)
        sigp    %r1,%r0,SIGP_SET_ARCHITECTURE   # Switch to ESAME mode
@@ -127,19 +127,21 @@ restart_part2:
        larl    %r4,.Lprefix            # Restore prefix register
        spx     0(%r4)
        larl    %r4,.Lcontinue_psw      # Restore PSW flags
+       larl    %r2,.Lcontinue
+       stg     %r2,8(%r4)
        lpswe   0(%r4)
 .Lcontinue:
-       BR_EX_DMA_r14
-ENDPROC(_diag308_reset_dma)
+       BR_EX_AMODE31_r14
+ENDPROC(_diag308_reset_amode31)
 
-       .section .dma.data,"aw",@progbits
+       .section .amode31.data,"aw",@progbits
 .align 8
 .Lrestart_diag308_psw:
        .long   0x00080000,0x80000000
 
 .align 8
 .Lcontinue_psw:
-       .quad   0,.Lcontinue
+       .quad   0,0
 
 .align 8
 .Lctlreg0:
index 26aa2614ee3526638895f2affea1218caef9102c..d2458a29618fa3eb5f4ad244615dee4d6deea723 100644 (file)
@@ -406,7 +406,7 @@ static ssize_t dispatching_store(struct device *dev,
        if (val != 0 && val != 1)
                return -EINVAL;
        rc = 0;
-       get_online_cpus();
+       cpus_read_lock();
        mutex_lock(&smp_cpu_state_mutex);
        if (cpu_management == val)
                goto out;
@@ -417,7 +417,7 @@ static ssize_t dispatching_store(struct device *dev,
        topology_expect_change();
 out:
        mutex_unlock(&smp_cpu_state_mutex);
-       put_online_cpus();
+       cpus_read_unlock();
        return rc ? rc : count;
 }
 static DEVICE_ATTR_RW(dispatching);
index 76947275fe8bd1c21b58e1f353d27b9adde1c72f..bcefc2173de45b081c5287251f390ed746ae49ef 100644 (file)
@@ -291,7 +291,7 @@ static void __init test_monitor_call(void)
 
 void __init trap_init(void)
 {
-       sort_extable(__start_dma_ex_table, __stop_dma_ex_table);
+       sort_extable(__start_amode31_ex_table, __stop_amode31_ex_table);
        local_mcck_enable();
        test_monitor_call();
 }
index aeb0a15bcbb7167bdf67c303534b3b5d1d752ee8..5a656c7b7a67aee4750be534dfd941cb49983ed0 100644 (file)
@@ -51,24 +51,9 @@ void __init setup_uv(void)
 {
        unsigned long uv_stor_base;
 
-       /*
-        * keep these conditions in line with has_uv_sec_stor_limit()
-        */
        if (!is_prot_virt_host())
                return;
 
-       if (is_prot_virt_guest()) {
-               prot_virt_host = 0;
-               pr_warn("Protected virtualization not available in protected guests.");
-               return;
-       }
-
-       if (!test_facility(158)) {
-               prot_virt_host = 0;
-               pr_warn("Protected virtualization not supported by the hardware.");
-               return;
-       }
-
        uv_stor_base = (unsigned long)memblock_alloc_try_nid(
                uv_info.uv_base_stor_len, SZ_1M, SZ_2G,
                MEMBLOCK_ALLOC_ACCESSIBLE, NUMA_NO_NODE);
index 3457dcf103965339a5e41662b2a20c35746b05ff..e3e6ac5686df54dc09e2f77b38061dc0a93d42ad 100644 (file)
@@ -36,6 +36,7 @@ CPPFLAGS_vdso32.lds += -P -C -U$(ARCH)
 GCOV_PROFILE := n
 UBSAN_SANITIZE := n
 KASAN_SANITIZE := n
+KCSAN_SANITIZE := n
 
 # Force dependency (incbin is bad)
 $(obj)/vdso32_wrapper.o : $(obj)/vdso32.so
index 2a2092ce19f1b2560ac987d36ae1921c63c57093..6568de2367010af85f0700fb3bf45226ebbc2e02 100644 (file)
@@ -39,6 +39,7 @@ CPPFLAGS_vdso64.lds += -P -C -U$(ARCH)
 GCOV_PROFILE := n
 UBSAN_SANITIZE := n
 KASAN_SANITIZE := n
+KCSAN_SANITIZE := n
 
 # Force dependency (incbin is bad)
 $(obj)/vdso64_wrapper.o : $(obj)/vdso64.so
index 4c0e19145cc66dd6191e5490ca8bdc2e14ec30c9..63bdb9e1bfc1343234bf71398ddc27e5f0de4e8b 100644 (file)
@@ -5,6 +5,7 @@
 
 #include <asm/thread_info.h>
 #include <asm/page.h>
+#include <asm/ftrace.lds.h>
 
 /*
  * Put .bss..swapper_pg_dir as the first thing in .bss. This will
@@ -46,6 +47,7 @@ SECTIONS
                KPROBES_TEXT
                IRQENTRY_TEXT
                SOFTIRQENTRY_TEXT
+               FTRACE_HOTPATCH_TRAMPOLINES_TEXT
                *(.text.*_indirect_*)
                *(.fixup)
                *(.gnu.warning)
@@ -71,6 +73,13 @@ SECTIONS
        RW_DATA(0x100, PAGE_SIZE, THREAD_SIZE)
        BOOT_DATA_PRESERVED
 
+       . = ALIGN(8);
+       .amode31.refs : {
+               _start_amode31_refs = .;
+               *(.amode31.refs)
+               _end_amode31_refs = .;
+       }
+
        _edata = .;             /* End of data section */
 
        /* will be freed after init */
@@ -136,6 +145,32 @@ SECTIONS
 
        BOOT_DATA
 
+       /*
+        * .amode31 section for code, data, ex_table that need to stay
+        * below 2 GB, even when the kernel is relocated above 2 GB.
+        */
+       . = ALIGN(PAGE_SIZE);
+       _samode31 = .;
+       .amode31.text : {
+               _stext_amode31 = .;
+               *(.amode31.text)
+               *(.amode31.text.*_indirect_*)
+               . = ALIGN(PAGE_SIZE);
+               _etext_amode31 = .;
+       }
+       . = ALIGN(16);
+       .amode31.ex_table : {
+               _start_amode31_ex_table = .;
+               KEEP(*(.amode31.ex_table))
+               _stop_amode31_ex_table = .;
+       }
+       . = ALIGN(PAGE_SIZE);
+       .amode31.data : {
+               *(.amode31.data)
+       }
+       . = ALIGN(PAGE_SIZE);
+       _eamode31 = .;
+
        /* early.c uses stsi, which requires page aligned data. */
        . = ALIGN(PAGE_SIZE);
        INIT_DATA_SECTION(0x100)
index f289afeb3f3169c26723f6de6f9f42eba438c2b1..bccbf394ae7ed543762f08cc5783c658cf14a053 100644 (file)
@@ -7,17 +7,10 @@
  *              Heiko Carstens <heiko.carstens@de.ibm.com>,
  */
 
-#include <linux/sched.h>
+#include <linux/processor.h>
 #include <linux/delay.h>
-#include <linux/timex.h>
-#include <linux/export.h>
-#include <linux/irqflags.h>
-#include <linux/interrupt.h>
-#include <linux/jump_label.h>
-#include <linux/irq.h>
-#include <asm/vtimer.h>
 #include <asm/div64.h>
-#include <asm/idle.h>
+#include <asm/timex.h>
 
 void __delay(unsigned long loops)
 {
index e40a30647d99d5c68304f09c72488c4e53f785e2..0b0c8c2849530c4f0294068c8325c57db77f9dba 100644 (file)
@@ -4,6 +4,7 @@
 #include <linux/seq_file.h>
 #include <linux/debugfs.h>
 #include <linux/mm.h>
+#include <linux/kfence.h>
 #include <linux/kasan.h>
 #include <asm/ptdump.h>
 #include <asm/kasan.h>
@@ -21,6 +22,10 @@ enum address_markers_idx {
        IDENTITY_BEFORE_END_NR,
        KERNEL_START_NR,
        KERNEL_END_NR,
+#ifdef CONFIG_KFENCE
+       KFENCE_START_NR,
+       KFENCE_END_NR,
+#endif
        IDENTITY_AFTER_NR,
        IDENTITY_AFTER_END_NR,
 #ifdef CONFIG_KASAN
@@ -40,6 +45,10 @@ static struct addr_marker address_markers[] = {
        [IDENTITY_BEFORE_END_NR] = {(unsigned long)_stext, "Identity Mapping End"},
        [KERNEL_START_NR]       = {(unsigned long)_stext, "Kernel Image Start"},
        [KERNEL_END_NR]         = {(unsigned long)_end, "Kernel Image End"},
+#ifdef CONFIG_KFENCE
+       [KFENCE_START_NR]       = {0, "KFence Pool Start"},
+       [KFENCE_END_NR]         = {0, "KFence Pool End"},
+#endif
        [IDENTITY_AFTER_NR]     = {(unsigned long)_end, "Identity Mapping Start"},
        [IDENTITY_AFTER_END_NR] = {0, "Identity Mapping End"},
 #ifdef CONFIG_KASAN
@@ -248,6 +257,9 @@ static void sort_address_markers(void)
 
 static int pt_dump_init(void)
 {
+#ifdef CONFIG_KFENCE
+       unsigned long kfence_start = (unsigned long)__kfence_pool;
+#endif
        /*
         * Figure out the maximum virtual address being accessible with the
         * kernel ASCE. We need this to keep the page table walker functions
@@ -262,6 +274,10 @@ static int pt_dump_init(void)
        address_markers[VMEMMAP_END_NR].start_address = (unsigned long)vmemmap + vmemmap_size;
        address_markers[VMALLOC_NR].start_address = VMALLOC_START;
        address_markers[VMALLOC_END_NR].start_address = VMALLOC_END;
+#ifdef CONFIG_KFENCE
+       address_markers[KFENCE_START_NR].start_address = kfence_start;
+       address_markers[KFENCE_END_NR].start_address = kfence_start + KFENCE_POOL_SIZE;
+#endif
        sort_address_markers();
 #ifdef CONFIG_PTDUMP_DEBUGFS
        debugfs_create_file("kernel_page_tables", 0400, NULL, NULL, &ptdump_fops);
index e33c43b38afe0e5b6566b1696cfb85dd3c4d1124..212632d57db9c9cfd91be9acd55a5e281ece479c 100644 (file)
@@ -31,6 +31,7 @@
 #include <linux/kprobes.h>
 #include <linux/uaccess.h>
 #include <linux/hugetlb.h>
+#include <linux/kfence.h>
 #include <asm/asm-offsets.h>
 #include <asm/diag.h>
 #include <asm/gmap.h>
@@ -230,8 +231,8 @@ const struct exception_table_entry *s390_search_extables(unsigned long addr)
 {
        const struct exception_table_entry *fixup;
 
-       fixup = search_extable(__start_dma_ex_table,
-                              __stop_dma_ex_table - __start_dma_ex_table,
+       fixup = search_extable(__start_amode31_ex_table,
+                              __stop_amode31_ex_table - __start_amode31_ex_table,
                               addr);
        if (!fixup)
                fixup = search_exception_tables(addr);
@@ -356,6 +357,7 @@ static inline vm_fault_t do_exception(struct pt_regs *regs, int access)
        unsigned long address;
        unsigned int flags;
        vm_fault_t fault;
+       bool is_write;
 
        tsk = current;
        /*
@@ -369,6 +371,8 @@ static inline vm_fault_t do_exception(struct pt_regs *regs, int access)
 
        mm = tsk->mm;
        trans_exc_code = regs->int_parm_long;
+       address = trans_exc_code & __FAIL_ADDR_MASK;
+       is_write = (trans_exc_code & store_indication) == 0x400;
 
        /*
         * Verify that the fault happened in user space, that
@@ -379,6 +383,8 @@ static inline vm_fault_t do_exception(struct pt_regs *regs, int access)
        type = get_fault_type(regs);
        switch (type) {
        case KERNEL_FAULT:
+               if (kfence_handle_page_fault(address, is_write, regs))
+                       return 0;
                goto out;
        case USER_FAULT:
        case GMAP_FAULT:
@@ -387,12 +393,11 @@ static inline vm_fault_t do_exception(struct pt_regs *regs, int access)
                break;
        }
 
-       address = trans_exc_code & __FAIL_ADDR_MASK;
        perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
        flags = FAULT_FLAG_DEFAULT;
        if (user_mode(regs))
                flags |= FAULT_FLAG_USER;
-       if (access == VM_WRITE || (trans_exc_code & store_indication) == 0x400)
+       if (access == VM_WRITE || is_write)
                flags |= FAULT_FLAG_WRITE;
        mmap_read_lock(mm);
 
index 8ac710de1ab1b81ee884f58458e792a00d9742b3..f3db3caa84476b2c85dd7342264a2e66507b0e71 100644 (file)
@@ -34,6 +34,7 @@
 #include <asm/processor.h>
 #include <linux/uaccess.h>
 #include <asm/pgalloc.h>
+#include <asm/kfence.h>
 #include <asm/ptdump.h>
 #include <asm/dma.h>
 #include <asm/lowcore.h>
@@ -200,7 +201,7 @@ void __init mem_init(void)
         high_memory = (void *) __va(max_low_pfn * PAGE_SIZE);
 
        pv_init();
-
+       kfence_split_mapping();
        /* Setup guest page hinting */
        cmma_init();
 
index a0fdc6dc5f9d0904a1a55f025396cd6a801156c2..3e473516801939b5e8759f65a1c97128949cc5e7 100644 (file)
@@ -107,6 +107,9 @@ static void __init kasan_early_pgtable_populate(unsigned long address,
                sgt_prot &= ~_SEGMENT_ENTRY_NOEXEC;
        }
 
+       /*
+        * The first 1MB of 1:1 mapping is mapped with 4KB pages
+        */
        while (address < end) {
                pg_dir = pgd_offset_k(address);
                if (pgd_none(*pg_dir)) {
@@ -157,30 +160,26 @@ static void __init kasan_early_pgtable_populate(unsigned long address,
 
                pm_dir = pmd_offset(pu_dir, address);
                if (pmd_none(*pm_dir)) {
-                       if (mode == POPULATE_ZERO_SHADOW &&
-                           IS_ALIGNED(address, PMD_SIZE) &&
+                       if (IS_ALIGNED(address, PMD_SIZE) &&
                            end - address >= PMD_SIZE) {
-                               pmd_populate(&init_mm, pm_dir,
-                                               kasan_early_shadow_pte);
-                               address = (address + PMD_SIZE) & PMD_MASK;
-                               continue;
-                       }
-                       /* the first megabyte of 1:1 is mapped with 4k pages */
-                       if (has_edat && address && end - address >= PMD_SIZE &&
-                           mode != POPULATE_ZERO_SHADOW) {
-                               void *page;
-
-                               if (mode == POPULATE_ONE2ONE) {
-                                       page = (void *)address;
-                               } else {
-                                       page = kasan_early_alloc_segment();
-                                       memset(page, 0, _SEGMENT_SIZE);
+                               if (mode == POPULATE_ZERO_SHADOW) {
+                                       pmd_populate(&init_mm, pm_dir, kasan_early_shadow_pte);
+                                       address = (address + PMD_SIZE) & PMD_MASK;
+                                       continue;
+                               } else if (has_edat && address) {
+                                       void *page;
+
+                                       if (mode == POPULATE_ONE2ONE) {
+                                               page = (void *)address;
+                                       } else {
+                                               page = kasan_early_alloc_segment();
+                                               memset(page, 0, _SEGMENT_SIZE);
+                                       }
+                                       pmd_val(*pm_dir) = __pa(page) | sgt_prot;
+                                       address = (address + PMD_SIZE) & PMD_MASK;
+                                       continue;
                                }
-                               pmd_val(*pm_dir) = __pa(page) | sgt_prot;
-                               address = (address + PMD_SIZE) & PMD_MASK;
-                               continue;
                        }
-
                        pt_dir = kasan_early_pte_alloc();
                        pmd_populate(&init_mm, pm_dir, pt_dir);
                } else if (pmd_large(*pm_dir)) {
@@ -300,7 +299,7 @@ void __init kasan_early_init(void)
        pgalloc_low = round_up((unsigned long)_end, _SEGMENT_SIZE);
        if (IS_ENABLED(CONFIG_BLK_DEV_INITRD)) {
                initrd_end =
-                   round_up(INITRD_START + INITRD_SIZE, _SEGMENT_SIZE);
+                   round_up(initrd_data.start + initrd_data.size, _SEGMENT_SIZE);
                pgalloc_low = max(pgalloc_low, initrd_end);
        }
 
index a0f54bd5e98abf0e75092afda9d48d48cb8017f3..9663ce3625bcdcd05c694b515369d7b45bf27929 100644 (file)
@@ -228,7 +228,7 @@ void *xlate_dev_mem_ptr(phys_addr_t addr)
        void *bounce = (void *) addr;
        unsigned long size;
 
-       get_online_cpus();
+       cpus_read_lock();
        preempt_disable();
        if (is_swapped(addr)) {
                size = PAGE_SIZE - (addr & ~PAGE_MASK);
@@ -237,7 +237,7 @@ void *xlate_dev_mem_ptr(phys_addr_t addr)
                        memcpy_absolute(bounce, (void *) addr, size);
        }
        preempt_enable();
-       put_online_cpus();
+       cpus_read_unlock();
        return bounce;
 }
 
index 68b153083a9210ef0eddaa6efbe39ee6f5e372b8..18a6381097a90fcc912796d19696c7f1a04cbbf4 100644 (file)
@@ -228,46 +228,3 @@ void arch_set_page_dat(struct page *page, int order)
                return;
        set_page_stable_dat(page, order);
 }
-
-void arch_set_page_nodat(struct page *page, int order)
-{
-       if (cmma_flag < 2)
-               return;
-       set_page_stable_nodat(page, order);
-}
-
-int arch_test_page_nodat(struct page *page)
-{
-       unsigned char state;
-
-       if (cmma_flag < 2)
-               return 0;
-       state = get_page_state(page);
-       return !!(state & 0x20);
-}
-
-void arch_set_page_states(int make_stable)
-{
-       unsigned long flags, order, t;
-       struct list_head *l;
-       struct page *page;
-       struct zone *zone;
-
-       if (!cmma_flag)
-               return;
-       if (make_stable)
-               drain_local_pages(NULL);
-       for_each_populated_zone(zone) {
-               spin_lock_irqsave(&zone->lock, flags);
-               for_each_migratetype_order(order, t) {
-                       list_for_each(l, &zone->free_area[order].free_list[t]) {
-                               page = list_entry(l, struct page, lru);
-                               if (make_stable)
-                                       set_page_stable_dat(page, order);
-                               else
-                                       set_page_unused(page, order);
-                       }
-               }
-               spin_unlock_irqrestore(&zone->lock, flags);
-       }
-}
index ed8e5b3575d59dc709cfa091d0713ff5519d491f..fdc86c0e4e6ca3d259f9bbd982e47261a06122b6 100644 (file)
@@ -8,6 +8,7 @@
 #include <asm/cacheflush.h>
 #include <asm/facility.h>
 #include <asm/pgalloc.h>
+#include <asm/kfence.h>
 #include <asm/page.h>
 #include <asm/set_memory.h>
 
@@ -85,6 +86,8 @@ static int walk_pte_level(pmd_t *pmdp, unsigned long addr, unsigned long end,
 {
        pte_t *ptep, new;
 
+       if (flags == SET_MEMORY_4K)
+               return 0;
        ptep = pte_offset_kernel(pmdp, addr);
        do {
                new = *ptep;
@@ -155,6 +158,7 @@ static int walk_pmd_level(pud_t *pudp, unsigned long addr, unsigned long end,
                          unsigned long flags)
 {
        unsigned long next;
+       int need_split;
        pmd_t *pmdp;
        int rc = 0;
 
@@ -164,7 +168,10 @@ static int walk_pmd_level(pud_t *pudp, unsigned long addr, unsigned long end,
                        return -EINVAL;
                next = pmd_addr_end(addr, end);
                if (pmd_large(*pmdp)) {
-                       if (addr & ~PMD_MASK || addr + PMD_SIZE > next) {
+                       need_split  = !!(flags & SET_MEMORY_4K);
+                       need_split |= !!(addr & ~PMD_MASK);
+                       need_split |= !!(addr + PMD_SIZE > next);
+                       if (need_split) {
                                rc = split_pmd_page(pmdp, addr);
                                if (rc)
                                        return rc;
@@ -232,6 +239,7 @@ static int walk_pud_level(p4d_t *p4d, unsigned long addr, unsigned long end,
                          unsigned long flags)
 {
        unsigned long next;
+       int need_split;
        pud_t *pudp;
        int rc = 0;
 
@@ -241,7 +249,10 @@ static int walk_pud_level(p4d_t *p4d, unsigned long addr, unsigned long end,
                        return -EINVAL;
                next = pud_addr_end(addr, end);
                if (pud_large(*pudp)) {
-                       if (addr & ~PUD_MASK || addr + PUD_SIZE > next) {
+                       need_split  = !!(flags & SET_MEMORY_4K);
+                       need_split |= !!(addr & ~PUD_MASK);
+                       need_split |= !!(addr + PUD_SIZE > next);
+                       if (need_split) {
                                rc = split_pud_page(pudp, addr);
                                if (rc)
                                        break;
@@ -316,7 +327,7 @@ int __set_memory(unsigned long addr, int numpages, unsigned long flags)
        return change_page_attr(addr, addr + numpages * PAGE_SIZE, flags);
 }
 
-#ifdef CONFIG_DEBUG_PAGEALLOC
+#if defined(CONFIG_DEBUG_PAGEALLOC) || defined(CONFIG_KFENCE)
 
 static void ipte_range(pte_t *pte, unsigned long address, int nr)
 {
@@ -340,7 +351,7 @@ void __kernel_map_pages(struct page *page, int numpages, int enable)
        pte_t *pte;
 
        for (i = 0; i < numpages;) {
-               address = page_to_phys(page + i);
+               address = (unsigned long)page_to_virt(page + i);
                pte = virt_to_kpte(address);
                nr = (unsigned long)pte >> ilog2(sizeof(long));
                nr = PTRS_PER_PTE - (nr & (PTRS_PER_PTE - 1));
index 96897fab89dc899baddb63e7953354823726d2c1..2b1c6d916cf9c651325da83569df9830f5aed0db 100644 (file)
@@ -581,7 +581,7 @@ void __init vmem_map_init(void)
        __set_memory((unsigned long)_sinittext,
                     (unsigned long)(_einittext - _sinittext) >> PAGE_SHIFT,
                     SET_MEMORY_RO | SET_MEMORY_X);
-       __set_memory(__stext_dma, (__etext_dma - __stext_dma) >> PAGE_SHIFT,
+       __set_memory(__stext_amode31, (__etext_amode31 - __stext_amode31) >> PAGE_SHIFT,
                     SET_MEMORY_RO | SET_MEMORY_X);
 
        /* we need lowcore executable for our LPSWE instructions */
index b0993e05affe65b0a54595f5adc87f49a211cd91..e7e6788d75a864605a99ff34d56393d1a9b35a5b 100644 (file)
@@ -113,13 +113,16 @@ int zpci_register_ioat(struct zpci_dev *zdev, u8 dmaas,
 {
        u64 req = ZPCI_CREATE_REQ(zdev->fh, dmaas, ZPCI_MOD_FC_REG_IOAT);
        struct zpci_fib fib = {0};
-       u8 status;
+       u8 cc, status;
 
        WARN_ON_ONCE(iota & 0x3fff);
        fib.pba = base;
        fib.pal = limit;
        fib.iota = iota | ZPCI_IOTA_RTTO_FLAG;
-       return zpci_mod_fc(req, &fib, &status) ? -EIO : 0;
+       cc = zpci_mod_fc(req, &fib, &status);
+       if (cc)
+               zpci_dbg(3, "reg ioat fid:%x, cc:%d, status:%d\n", zdev->fid, cc, status);
+       return cc;
 }
 
 /* Modify PCI: Unregister I/O address translation parameters */
@@ -130,9 +133,9 @@ int zpci_unregister_ioat(struct zpci_dev *zdev, u8 dmaas)
        u8 cc, status;
 
        cc = zpci_mod_fc(req, &fib, &status);
-       if (cc == 3) /* Function already gone. */
-               cc = 0;
-       return cc ? -EIO : 0;
+       if (cc)
+               zpci_dbg(3, "unreg ioat fid:%x, cc:%d, status:%d\n", zdev->fid, cc, status);
+       return cc;
 }
 
 /* Modify PCI: Set PCI function measurement parameters */
@@ -560,9 +563,12 @@ static void zpci_cleanup_bus_resources(struct zpci_dev *zdev)
 
 int pcibios_add_device(struct pci_dev *pdev)
 {
+       struct zpci_dev *zdev = to_zpci(pdev);
        struct resource *res;
        int i;
 
+       /* The pdev has a reference to the zdev via its bus */
+       zpci_zdev_get(zdev);
        if (pdev->is_physfn)
                pdev->no_vf_scan = 1;
 
@@ -582,7 +588,10 @@ int pcibios_add_device(struct pci_dev *pdev)
 
 void pcibios_release_device(struct pci_dev *pdev)
 {
+       struct zpci_dev *zdev = to_zpci(pdev);
+
        zpci_unmap_resources(pdev);
+       zpci_zdev_put(zdev);
 }
 
 int pcibios_enable_device(struct pci_dev *pdev, int mask)
@@ -653,32 +662,37 @@ void zpci_free_domain(int domain)
 
 int zpci_enable_device(struct zpci_dev *zdev)
 {
-       int rc;
-
-       rc = clp_enable_fh(zdev, ZPCI_NR_DMA_SPACES);
-       if (rc)
-               goto out;
-
-       rc = zpci_dma_init_device(zdev);
-       if (rc)
-               goto out_dma;
+       u32 fh = zdev->fh;
+       int rc = 0;
 
-       return 0;
-
-out_dma:
-       clp_disable_fh(zdev);
-out:
+       if (clp_enable_fh(zdev, &fh, ZPCI_NR_DMA_SPACES))
+               rc = -EIO;
+       else
+               zdev->fh = fh;
        return rc;
 }
 
 int zpci_disable_device(struct zpci_dev *zdev)
 {
-       zpci_dma_exit_device(zdev);
-       /*
-        * The zPCI function may already be disabled by the platform, this is
-        * detected in clp_disable_fh() which becomes a no-op.
-        */
-       return clp_disable_fh(zdev);
+       u32 fh = zdev->fh;
+       int cc, rc = 0;
+
+       cc = clp_disable_fh(zdev, &fh);
+       if (!cc) {
+               zdev->fh = fh;
+       } else if (cc == CLP_RC_SETPCIFN_ALRDY) {
+               pr_info("Disabling PCI function %08x had no effect as it was already disabled\n",
+                       zdev->fid);
+               /* Function is already disabled - update handle */
+               rc = clp_refresh_fh(zdev->fid, &fh);
+               if (!rc) {
+                       zdev->fh = fh;
+                       rc = -EINVAL;
+               }
+       } else {
+               rc = -EIO;
+       }
+       return rc;
 }
 
 /**
@@ -788,6 +802,11 @@ int zpci_deconfigure_device(struct zpci_dev *zdev)
        if (zdev->zbus->bus)
                zpci_bus_remove_device(zdev, false);
 
+       if (zdev->dma_table) {
+               rc = zpci_dma_exit_device(zdev);
+               if (rc)
+                       return rc;
+       }
        if (zdev_enabled(zdev)) {
                rc = zpci_disable_device(zdev);
                if (rc)
@@ -811,6 +830,8 @@ void zpci_release_device(struct kref *kref)
        if (zdev->zbus->bus)
                zpci_bus_remove_device(zdev, false);
 
+       if (zdev->dma_table)
+               zpci_dma_exit_device(zdev);
        if (zdev_enabled(zdev))
                zpci_disable_device(zdev);
 
@@ -822,7 +843,8 @@ void zpci_release_device(struct kref *kref)
        case ZPCI_FN_STATE_STANDBY:
                if (zdev->has_hp_slot)
                        zpci_exit_slot(zdev);
-               zpci_cleanup_bus_resources(zdev);
+               if (zdev->has_resources)
+                       zpci_cleanup_bus_resources(zdev);
                zpci_bus_device_unregister(zdev);
                zpci_destroy_iommu(zdev);
                fallthrough;
@@ -886,7 +908,6 @@ static void zpci_mem_exit(void)
 }
 
 static unsigned int s390_pci_probe __initdata = 1;
-static unsigned int s390_pci_no_mio __initdata;
 unsigned int s390_pci_force_floating __initdata;
 static unsigned int s390_pci_initialized;
 
@@ -897,7 +918,7 @@ char * __init pcibios_setup(char *str)
                return NULL;
        }
        if (!strcmp(str, "nomio")) {
-               s390_pci_no_mio = 1;
+               S390_lowcore.machine_flags &= ~MACHINE_FLAG_PCI_MIO;
                return NULL;
        }
        if (!strcmp(str, "force_floating")) {
@@ -928,7 +949,7 @@ static int __init pci_base_init(void)
                return 0;
        }
 
-       if (test_facility(153) && !s390_pci_no_mio) {
+       if (MACHINE_HAS_PCI_MIO) {
                static_branch_enable(&have_mio);
                ctl_set_bit(2, 5);
        }
index 9629f9779c79b1c7d89207ad645a628b45ac1b18..5d77acbd1c872b370f32a0603b59fe16132142d8 100644 (file)
@@ -49,6 +49,11 @@ static int zpci_bus_prepare_device(struct zpci_dev *zdev)
                rc = zpci_enable_device(zdev);
                if (rc)
                        return rc;
+               rc = zpci_dma_init_device(zdev);
+               if (rc) {
+                       zpci_disable_device(zdev);
+                       return rc;
+               }
        }
 
        if (!zdev->has_resources) {
@@ -343,11 +348,11 @@ static int zpci_bus_add_device(struct zpci_bus *zbus, struct zpci_dev *zdev)
 {
        int rc = -EINVAL;
 
-       zdev->zbus = zbus;
        if (zbus->function[zdev->devfn]) {
                pr_err("devfn %04x is already assigned\n", zdev->devfn);
                return rc;
        }
+       zdev->zbus = zbus;
        zbus->function[zdev->devfn] = zdev;
        zpci_nb_devices++;
 
@@ -367,6 +372,7 @@ static int zpci_bus_add_device(struct zpci_bus *zbus, struct zpci_dev *zdev)
 
 error:
        zbus->function[zdev->devfn] = NULL;
+       zdev->zbus = NULL;
        zpci_nb_devices--;
        return rc;
 }
index b877a97e6745bc9121e46bde83d3b937099f67d9..e359d2686178b8575dedc4a07021a70cc078246d 100644 (file)
@@ -22,6 +22,11 @@ static inline void zpci_zdev_put(struct zpci_dev *zdev)
        kref_put(&zdev->kref, zpci_release_device);
 }
 
+static inline void zpci_zdev_get(struct zpci_dev *zdev)
+{
+       kref_get(&zdev->kref);
+}
+
 int zpci_alloc_domain(int domain);
 void zpci_free_domain(int domain);
 int zpci_setup_bus_resources(struct zpci_dev *zdev,
index d3331596ddbe1d436c64be7f6af2f3f73bfc0fc3..51dc2215a2b74a73db8cc1afb13a31363c509780 100644 (file)
@@ -212,17 +212,22 @@ out:
        return rc;
 }
 
-static int clp_refresh_fh(u32 fid);
-/*
- * Enable/Disable a given PCI function and update its function handle if
- * necessary
+/**
+ * clp_set_pci_fn() - Execute a command on a PCI function
+ * @zdev: Function that will be affected
+ * @fh: Out parameter for updated function handle
+ * @nr_dma_as: DMA address space number
+ * @command: The command code to execute
+ *
+ * Returns: 0 on success, < 0 for Linux errors (e.g. -ENOMEM), and
+ * > 0 for non-success platform responses
  */
-static int clp_set_pci_fn(struct zpci_dev *zdev, u8 nr_dma_as, u8 command)
+static int clp_set_pci_fn(struct zpci_dev *zdev, u32 *fh, u8 nr_dma_as, u8 command)
 {
        struct clp_req_rsp_set_pci *rrb;
        int rc, retries = 100;
-       u32 fid = zdev->fid;
 
+       *fh = 0;
        rrb = clp_alloc_block(GFP_KERNEL);
        if (!rrb)
                return -ENOMEM;
@@ -245,17 +250,13 @@ static int clp_set_pci_fn(struct zpci_dev *zdev, u8 nr_dma_as, u8 command)
                }
        } while (rrb->response.hdr.rsp == CLP_RC_SETPCIFN_BUSY);
 
-       if (rc || rrb->response.hdr.rsp != CLP_RC_OK) {
+       if (!rc && rrb->response.hdr.rsp == CLP_RC_OK) {
+               *fh = rrb->response.fh;
+       } else {
                zpci_err("Set PCI FN:\n");
                zpci_err_clp(rrb->response.hdr.rsp, rc);
-       }
-
-       if (!rc && rrb->response.hdr.rsp == CLP_RC_OK) {
-               zdev->fh = rrb->response.fh;
-       } else if (!rc && rrb->response.hdr.rsp == CLP_RC_SETPCIFN_ALRDY &&
-                       rrb->response.fh == 0) {
-               /* Function is already in desired state - update handle */
-               rc = clp_refresh_fh(fid);
+               if (!rc)
+                       rc = rrb->response.hdr.rsp;
        }
        clp_free_block(rrb);
        return rc;
@@ -295,35 +296,62 @@ int clp_setup_writeback_mio(void)
        return rc;
 }
 
-int clp_enable_fh(struct zpci_dev *zdev, u8 nr_dma_as)
+int clp_enable_fh(struct zpci_dev *zdev, u32 *fh, u8 nr_dma_as)
 {
        int rc;
 
-       rc = clp_set_pci_fn(zdev, nr_dma_as, CLP_SET_ENABLE_PCI_FN);
-       zpci_dbg(3, "ena fid:%x, fh:%x, rc:%d\n", zdev->fid, zdev->fh, rc);
-       if (rc)
-               goto out;
-
-       if (zpci_use_mio(zdev)) {
-               rc = clp_set_pci_fn(zdev, nr_dma_as, CLP_SET_ENABLE_MIO);
+       rc = clp_set_pci_fn(zdev, fh, nr_dma_as, CLP_SET_ENABLE_PCI_FN);
+       zpci_dbg(3, "ena fid:%x, fh:%x, rc:%d\n", zdev->fid, *fh, rc);
+       if (!rc && zpci_use_mio(zdev)) {
+               rc = clp_set_pci_fn(zdev, fh, nr_dma_as, CLP_SET_ENABLE_MIO);
                zpci_dbg(3, "ena mio fid:%x, fh:%x, rc:%d\n",
-                               zdev->fid, zdev->fh, rc);
+                               zdev->fid, *fh, rc);
                if (rc)
-                       clp_disable_fh(zdev);
+                       clp_disable_fh(zdev, fh);
        }
-out:
        return rc;
 }
 
-int clp_disable_fh(struct zpci_dev *zdev)
+int clp_disable_fh(struct zpci_dev *zdev, u32 *fh)
 {
        int rc;
 
        if (!zdev_enabled(zdev))
                return 0;
 
-       rc = clp_set_pci_fn(zdev, 0, CLP_SET_DISABLE_PCI_FN);
-       zpci_dbg(3, "dis fid:%x, fh:%x, rc:%d\n", zdev->fid, zdev->fh, rc);
+       rc = clp_set_pci_fn(zdev, fh, 0, CLP_SET_DISABLE_PCI_FN);
+       zpci_dbg(3, "dis fid:%x, fh:%x, rc:%d\n", zdev->fid, *fh, rc);
+       return rc;
+}
+
+static int clp_list_pci_req(struct clp_req_rsp_list_pci *rrb,
+                           u64 *resume_token, int *nentries)
+{
+       int rc;
+
+       memset(rrb, 0, sizeof(*rrb));
+       rrb->request.hdr.len = sizeof(rrb->request);
+       rrb->request.hdr.cmd = CLP_LIST_PCI;
+       /* store as many entries as possible */
+       rrb->response.hdr.len = CLP_BLK_SIZE - LIST_PCI_HDR_LEN;
+       rrb->request.resume_token = *resume_token;
+
+       /* Get PCI function handle list */
+       rc = clp_req(rrb, CLP_LPS_PCI);
+       if (rc || rrb->response.hdr.rsp != CLP_RC_OK) {
+               zpci_err("List PCI FN:\n");
+               zpci_err_clp(rrb->response.hdr.rsp, rc);
+               return -EIO;
+       }
+
+       update_uid_checking(rrb->response.uid_checking);
+       WARN_ON_ONCE(rrb->response.entry_size !=
+               sizeof(struct clp_fh_list_entry));
+
+       *nentries = (rrb->response.hdr.len - LIST_PCI_HDR_LEN) /
+               rrb->response.entry_size;
+       *resume_token = rrb->response.resume_token;
+
        return rc;
 }
 
@@ -331,38 +359,40 @@ static int clp_list_pci(struct clp_req_rsp_list_pci *rrb, void *data,
                        void (*cb)(struct clp_fh_list_entry *, void *))
 {
        u64 resume_token = 0;
-       int entries, i, rc;
+       int nentries, i, rc;
 
        do {
-               memset(rrb, 0, sizeof(*rrb));
-               rrb->request.hdr.len = sizeof(rrb->request);
-               rrb->request.hdr.cmd = CLP_LIST_PCI;
-               /* store as many entries as possible */
-               rrb->response.hdr.len = CLP_BLK_SIZE - LIST_PCI_HDR_LEN;
-               rrb->request.resume_token = resume_token;
-
-               /* Get PCI function handle list */
-               rc = clp_req(rrb, CLP_LPS_PCI);
-               if (rc || rrb->response.hdr.rsp != CLP_RC_OK) {
-                       zpci_err("List PCI FN:\n");
-                       zpci_err_clp(rrb->response.hdr.rsp, rc);
-                       rc = -EIO;
-                       goto out;
-               }
+               rc = clp_list_pci_req(rrb, &resume_token, &nentries);
+               if (rc)
+                       return rc;
+               for (i = 0; i < nentries; i++)
+                       cb(&rrb->response.fh_list[i], data);
+       } while (resume_token);
 
-               update_uid_checking(rrb->response.uid_checking);
-               WARN_ON_ONCE(rrb->response.entry_size !=
-                       sizeof(struct clp_fh_list_entry));
+       return rc;
+}
 
-               entries = (rrb->response.hdr.len - LIST_PCI_HDR_LEN) /
-                       rrb->response.entry_size;
+static int clp_find_pci(struct clp_req_rsp_list_pci *rrb, u32 fid,
+                       struct clp_fh_list_entry *entry)
+{
+       struct clp_fh_list_entry *fh_list;
+       u64 resume_token = 0;
+       int nentries, i, rc;
 
-               resume_token = rrb->response.resume_token;
-               for (i = 0; i < entries; i++)
-                       cb(&rrb->response.fh_list[i], data);
+       do {
+               rc = clp_list_pci_req(rrb, &resume_token, &nentries);
+               if (rc)
+                       return rc;
+               for (i = 0; i < nentries; i++) {
+                       fh_list = rrb->response.fh_list;
+                       if (fh_list[i].fid == fid) {
+                               *entry = fh_list[i];
+                               return 0;
+                       }
+               }
        } while (resume_token);
-out:
-       return rc;
+
+       return -ENODEV;
 }
 
 static void __clp_add(struct clp_fh_list_entry *entry, void *data)
@@ -392,67 +422,41 @@ int clp_scan_pci_devices(void)
        return rc;
 }
 
-static void __clp_refresh_fh(struct clp_fh_list_entry *entry, void *data)
-{
-       struct zpci_dev *zdev;
-       u32 fid = *((u32 *)data);
-
-       if (!entry->vendor_id || fid != entry->fid)
-               return;
-
-       zdev = get_zdev_by_fid(fid);
-       if (!zdev)
-               return;
-
-       zdev->fh = entry->fh;
-}
-
 /*
- * Refresh the function handle of the function matching @fid
+ * Get the current function handle of the function matching @fid
  */
-static int clp_refresh_fh(u32 fid)
+int clp_refresh_fh(u32 fid, u32 *fh)
 {
        struct clp_req_rsp_list_pci *rrb;
+       struct clp_fh_list_entry entry;
        int rc;
 
        rrb = clp_alloc_block(GFP_NOWAIT);
        if (!rrb)
                return -ENOMEM;
 
-       rc = clp_list_pci(rrb, &fid, __clp_refresh_fh);
+       rc = clp_find_pci(rrb, fid, &entry);
+       if (!rc)
+               *fh = entry.fh;
 
        clp_free_block(rrb);
        return rc;
 }
 
-struct clp_state_data {
-       u32 fid;
-       enum zpci_state state;
-};
-
-static void __clp_get_state(struct clp_fh_list_entry *entry, void *data)
-{
-       struct clp_state_data *sd = data;
-
-       if (entry->fid != sd->fid)
-               return;
-
-       sd->state = entry->config_state;
-}
-
 int clp_get_state(u32 fid, enum zpci_state *state)
 {
        struct clp_req_rsp_list_pci *rrb;
-       struct clp_state_data sd = {fid, ZPCI_FN_STATE_RESERVED};
+       struct clp_fh_list_entry entry;
        int rc;
 
+       *state = ZPCI_FN_STATE_RESERVED;
        rrb = clp_alloc_block(GFP_ATOMIC);
        if (!rrb)
                return -ENOMEM;
 
-       rc = clp_list_pci(rrb, &sd, __clp_get_state);
+       rc = clp_find_pci(rrb, fid, &entry);
        if (!rc)
-               *state = sd.state;
+               *state = entry.config_state;
 
        clp_free_block(rrb);
        return rc;
index ebc9a49523aa3182e0949c5e442ed24016427b6d..58f2f7abea96e10e45a29065f9f8e374361107f9 100644 (file)
@@ -590,10 +590,11 @@ int zpci_dma_init_device(struct zpci_dev *zdev)
                }
 
        }
-       rc = zpci_register_ioat(zdev, 0, zdev->start_dma, zdev->end_dma,
-                               (u64) zdev->dma_table);
-       if (rc)
+       if (zpci_register_ioat(zdev, 0, zdev->start_dma, zdev->end_dma,
+                              (u64)zdev->dma_table)) {
+               rc = -EIO;
                goto free_bitmap;
+       }
 
        return 0;
 free_bitmap:
@@ -608,17 +609,25 @@ out:
        return rc;
 }
 
-void zpci_dma_exit_device(struct zpci_dev *zdev)
+int zpci_dma_exit_device(struct zpci_dev *zdev)
 {
+       int cc = 0;
+
        /*
         * At this point, if the device is part of an IOMMU domain, this would
         * be a strong hint towards a bug in the IOMMU API (common) code and/or
         * simultaneous access via IOMMU and DMA API. So let's issue a warning.
         */
        WARN_ON(zdev->s390_domain);
-
-       if (zpci_unregister_ioat(zdev, 0))
-               return;
+       if (zdev_enabled(zdev))
+               cc = zpci_unregister_ioat(zdev, 0);
+       /*
+        * cc == 3 indicates the function is gone already. This can happen
+        * if the function was deconfigured/disabled suddenly and we have not
+        * received a new handle yet.
+        */
+       if (cc && cc != 3)
+               return -EIO;
 
        dma_cleanup_tables(zdev->dma_table);
        zdev->dma_table = NULL;
@@ -626,8 +635,8 @@ void zpci_dma_exit_device(struct zpci_dev *zdev)
        zdev->iommu_bitmap = NULL;
        vfree(zdev->lazy_bitmap);
        zdev->lazy_bitmap = NULL;
-
        zdev->next_bit = 0;
+       return 0;
 }
 
 static int __init dma_alloc_cpu_table_caches(void)
index cd447b96b4b1b024db6e2366a3eb6e7e0c96c139..c856f80cb21b880d233fe6c6ec3c93ab4fcb688c 100644 (file)
@@ -84,7 +84,10 @@ static void zpci_event_hard_deconfigured(struct zpci_dev *zdev, u32 fh)
        /* Even though the device is already gone we still
         * need to free zPCI resources as part of the disable.
         */
-       zpci_disable_device(zdev);
+       if (zdev->dma_table)
+               zpci_dma_exit_device(zdev);
+       if (zdev_enabled(zdev))
+               zpci_disable_device(zdev);
        zdev->state = ZPCI_FN_STATE_STANDBY;
 }
 
index 6e2450c2b9c11b48e602c24967008c9b424b8f8f..335c281811c753592c9bd3acdee1e8614e030594 100644 (file)
@@ -82,13 +82,26 @@ static ssize_t recover_store(struct device *dev, struct device_attribute *attr,
        pci_lock_rescan_remove();
        if (pci_dev_is_added(pdev)) {
                pci_stop_and_remove_bus_device(pdev);
-               ret = zpci_disable_device(zdev);
-               if (ret)
-                       goto out;
+               if (zdev->dma_table) {
+                       ret = zpci_dma_exit_device(zdev);
+                       if (ret)
+                               goto out;
+               }
+
+               if (zdev_enabled(zdev)) {
+                       ret = zpci_disable_device(zdev);
+                       if (ret)
+                               goto out;
+               }
 
                ret = zpci_enable_device(zdev);
                if (ret)
                        goto out;
+               ret = zpci_dma_init_device(zdev);
+               if (ret) {
+                       zpci_disable_device(zdev);
+                       goto out;
+               }
                pci_rescan_bus(zdev->zbus->bus);
        }
 out:
index 21c4ebe29b9a2be839fd6399845f777e09ebe790..360ada80d20c3b72766f26fa1cbf138138d2dc2f 100644 (file)
@@ -19,6 +19,7 @@ KCOV_INSTRUMENT := n
 GCOV_PROFILE := n
 UBSAN_SANITIZE := n
 KASAN_SANITIZE := n
+KCSAN_SANITIZE := n
 
 KBUILD_CFLAGS := -fno-strict-aliasing -Wall -Wstrict-prototypes
 KBUILD_CFLAGS += -Wno-pointer-sign -Wno-sign-compare
index 0e207c46e8da1021971937a5e21bc3824738058f..6db9820d104a1baeab60b24228cef655f8ca5473 100644 (file)
@@ -189,6 +189,8 @@ ad  stosm   SI_URD
 ae     sigp    RS_RRRD
 af     mc      SI_URD
 b1     lra     RX_RRRD
+b200   lbear   S_RD
+b201   stbear  S_RD
 b202   stidp   S_RD
 b204   sck     S_RD
 b205   stck    S_RD
@@ -523,6 +525,7 @@ b931        clgfr   RRE_RR
 b938   sortl   RRE_RR
 b939   dfltcc  RRF_R0RR2
 b93a   kdsa    RRE_RR
+b93b   nnpa    RRE_00
 b93c   ppno    RRE_RR
 b93e   kimd    RRE_RR
 b93f   klmd    RRE_RR
@@ -562,6 +565,7 @@ b987        dlgr    RRE_RR
 b988   alcgr   RRE_RR
 b989   slbgr   RRE_RR
 b98a   cspg    RRE_RR
+b98b   rdp     RRF_RURR2
 b98d   epsw    RRE_RR
 b98e   idte    RRF_RURR2
 b98f   crdte   RRF_RURR2
@@ -876,19 +880,32 @@ e63d      vstrl   VSI_URDV
 e63f   vstrlr  VRS_RRDV
 e649   vlip    VRI_V0UU2
 e650   vcvb    VRR_RV0UU
+e651   vclzdp  VRR_VV0U2
 e652   vcvbg   VRR_RV0UU
+e654   vupkzh  VRR_VV0U2
+e655   vcnf    VRR_VV0UU2
+e656   vclfnh  VRR_VV0UU2
 e658   vcvd    VRI_VR0UU
 e659   vsrp    VRI_VVUUU2
 e65a   vcvdg   VRI_VR0UU
 e65b   vpsop   VRI_VVUUU2
+e65c   vupkzl  VRR_VV0U2
+e65d   vcfn    VRR_VV0UU2
+e65e   vclfnl  VRR_VV0UU2
 e65f   vtp     VRR_0V
+e670   vpkzr   VRI_VVV0UU2
 e671   vap     VRI_VVV0UU2
+e672   vsrpr   VRI_VVV0UU2
 e673   vsp     VRI_VVV0UU2
+e674   vschp   VRR_VVV0U0U
+e675   vcrnf   VRR_VVV0UU
 e677   vcp     VRR_0VV0U
 e678   vmp     VRI_VVV0UU2
 e679   vmsp    VRI_VVV0UU2
 e67a   vdp     VRI_VVV0UU2
 e67b   vrp     VRI_VVV0UU2
+e67c   vscshp  VRR_VVV
+e67d   vcsph   VRR_VVV0U0
 e67e   vsdp    VRI_VVV0UU2
 e700   vleb    VRX_VRRDU
 e701   vleh    VRX_VRRDU
@@ -1081,6 +1098,7 @@ eb61      stric   RSY_RDRU
 eb62   mric    RSY_RDRU
 eb6a   asi     SIY_IRD
 eb6e   alsi    SIY_IRD
+eb71   lpswey  SIY_URD
 eb7a   agsi    SIY_IRD
 eb7e   algsi   SIY_IRD
 eb80   icmh    RSY_RURD
index 307fd0000a8398f9ba21549f583572a9d39aab8f..d82d01490dd3ba04bddb5fb6daad435d137626b1 100644 (file)
@@ -31,8 +31,8 @@ REALMODE_CFLAGS       := -m16 -g -Os -DDISABLE_BRANCH_PROFILING \
 
 REALMODE_CFLAGS += -ffreestanding
 REALMODE_CFLAGS += -fno-stack-protector
-REALMODE_CFLAGS += $(call __cc-option, $(CC), $(REALMODE_CFLAGS), -Wno-address-of-packed-member)
-REALMODE_CFLAGS += $(call __cc-option, $(CC), $(REALMODE_CFLAGS), $(cc_stack_align4))
+REALMODE_CFLAGS += -Wno-address-of-packed-member
+REALMODE_CFLAGS += $(cc_stack_align4)
 REALMODE_CFLAGS += $(CLANG_FLAGS)
 export REALMODE_CFLAGS
 
@@ -48,8 +48,7 @@ export BITS
 #
 #    https://gcc.gnu.org/bugzilla/show_bug.cgi?id=53383
 #
-KBUILD_CFLAGS += -mno-sse -mno-mmx -mno-sse2 -mno-3dnow
-KBUILD_CFLAGS += $(call cc-option,-mno-avx,)
+KBUILD_CFLAGS += -mno-sse -mno-mmx -mno-sse2 -mno-3dnow -mno-avx
 
 # Intel CET isn't enabled in the kernel
 KBUILD_CFLAGS += $(call cc-option,-fcf-protection=none)
@@ -59,9 +58,8 @@ ifeq ($(CONFIG_X86_32),y)
         UTS_MACHINE := i386
         CHECKFLAGS += -D__i386__
 
-        biarch := $(call cc-option,-m32)
-        KBUILD_AFLAGS += $(biarch)
-        KBUILD_CFLAGS += $(biarch)
+        KBUILD_AFLAGS += -m32
+        KBUILD_CFLAGS += -m32
 
         KBUILD_CFLAGS += -msoft-float -mregparm=3 -freg-struct-return
 
@@ -72,7 +70,7 @@ ifeq ($(CONFIG_X86_32),y)
         # Align the stack to the register width instead of using the default
         # alignment of 16 bytes. This reduces stack usage and the number of
         # alignment instructions.
-        KBUILD_CFLAGS += $(call cc-option,$(cc_stack_align4))
+        KBUILD_CFLAGS += $(cc_stack_align4)
 
         # CPU-specific tuning. Anything which can be shared with UML should go here.
         include arch/x86/Makefile_32.cpu
@@ -93,7 +91,6 @@ else
         UTS_MACHINE := x86_64
         CHECKFLAGS += -D__x86_64__
 
-        biarch := -m64
         KBUILD_AFLAGS += -m64
         KBUILD_CFLAGS += -m64
 
@@ -104,7 +101,7 @@ else
         KBUILD_CFLAGS += $(call cc-option,-falign-loops=1)
 
         # Don't autogenerate traditional x87 instructions
-        KBUILD_CFLAGS += $(call cc-option,-mno-80387)
+        KBUILD_CFLAGS += -mno-80387
         KBUILD_CFLAGS += $(call cc-option,-mno-fp-ret-in-387)
 
         # By default gcc and clang use a stack alignment of 16 bytes for x86.
@@ -114,20 +111,17 @@ else
         # default alignment which keep the stack *mis*aligned.
         # Furthermore an alignment to the register width reduces stack usage
         # and the number of alignment instructions.
-        KBUILD_CFLAGS += $(call cc-option,$(cc_stack_align8))
+        KBUILD_CFLAGS += $(cc_stack_align8)
 
        # Use -mskip-rax-setup if supported.
        KBUILD_CFLAGS += $(call cc-option,-mskip-rax-setup)
 
         # FIXME - should be integrated in Makefile.cpu (Makefile_32.cpu)
-        cflags-$(CONFIG_MK8) += $(call cc-option,-march=k8)
-        cflags-$(CONFIG_MPSC) += $(call cc-option,-march=nocona)
-
-        cflags-$(CONFIG_MCORE2) += \
-                $(call cc-option,-march=core2,$(call cc-option,-mtune=generic))
-       cflags-$(CONFIG_MATOM) += $(call cc-option,-march=atom) \
-               $(call cc-option,-mtune=atom,$(call cc-option,-mtune=generic))
-        cflags-$(CONFIG_GENERIC_CPU) += $(call cc-option,-mtune=generic)
+        cflags-$(CONFIG_MK8)           += -march=k8
+        cflags-$(CONFIG_MPSC)          += -march=nocona
+        cflags-$(CONFIG_MCORE2)                += -march=core2
+        cflags-$(CONFIG_MATOM)         += -march=atom
+        cflags-$(CONFIG_GENERIC_CPU)   += -mtune=generic
         KBUILD_CFLAGS += $(cflags-y)
 
         KBUILD_CFLAGS += -mno-red-zone
@@ -158,18 +152,6 @@ export CONFIG_X86_X32_ABI
 ifdef CONFIG_FUNCTION_GRAPH_TRACER
   ifndef CONFIG_HAVE_FENTRY
        ACCUMULATE_OUTGOING_ARGS := 1
-  else
-    ifeq ($(call cc-option-yn, -mfentry), n)
-       ACCUMULATE_OUTGOING_ARGS := 1
-
-       # GCC ignores '-maccumulate-outgoing-args' when used with '-Os'.
-       # If '-Os' is enabled, disable it and print a warning.
-        ifdef CONFIG_CC_OPTIMIZE_FOR_SIZE
-          undefine CONFIG_CC_OPTIMIZE_FOR_SIZE
-          $(warning Disabling CONFIG_CC_OPTIMIZE_FOR_SIZE.  Your compiler does not have -mfentry so you cannot optimize for size with CONFIG_FUNCTION_GRAPH_TRACER.)
-        endif
-
-    endif
   endif
 endif
 
@@ -193,7 +175,7 @@ ifdef CONFIG_RETPOLINE
   # only been fixed starting from gcc stable version 8.4.0 and
   # onwards, but not for older ones. See gcc bug #86952.
   ifndef CONFIG_CC_IS_CLANG
-    KBUILD_CFLAGS += $(call cc-option,-fno-jump-tables)
+    KBUILD_CFLAGS += -fno-jump-tables
   endif
 endif
 
@@ -275,9 +257,10 @@ endif
 $(BOOT_TARGETS): vmlinux
        $(Q)$(MAKE) $(build)=$(boot) $@
 
-PHONY += install bzlilo
-install bzlilo:
-       $(Q)$(MAKE) $(build)=$(boot) $@
+PHONY += install
+install:
+       $(CONFIG_SHELL) $(srctree)/$(boot)/install.sh $(KERNELRELEASE) \
+               $(KBUILD_IMAGE) System.map "$(INSTALL_PATH)"
 
 PHONY += vdso_install
 vdso_install:
index dfbc26a8e924129969383e9ffc8b498fbb0ac0d2..b5aecb524a8aa6eb94d5ead6f4ec0d9b90cff537 100644 (file)
@@ -133,7 +133,7 @@ quiet_cmd_genimage = GENIMAGE $3
 cmd_genimage = $(BASH) $(srctree)/$(src)/genimage.sh $2 $3 $(obj)/bzImage \
                $(obj)/mtools.conf '$(FDARGS)' $(FDINITRD)
 
-PHONY += bzdisk fdimage fdimage144 fdimage288 hdimage isoimage install
+PHONY += bzdisk fdimage fdimage144 fdimage288 hdimage isoimage
 
 # This requires write access to /dev/fd0
 # All images require syslinux to be installed; hdimage also requires
@@ -156,8 +156,3 @@ hdimage: $(imgdeps)
 isoimage: $(imgdeps)
        $(call cmd,genimage,isoimage,$(obj)/image.iso)
        @$(kecho) 'Kernel: $(obj)/image.iso is ready'
-
-install:
-       $(CONFIG_SHELL) $(srctree)/$(src)/install.sh \
-               $(KERNELRELEASE) $(obj)/bzImage \
-               System.map "$(INSTALL_PATH)"
index 95a223b3e56a2b6b6b113867e6d9bbe49c90e419..8bb92e9f4e9739f1d0bf2d77f9fa42b829ed9e7f 100644 (file)
@@ -5,9 +5,8 @@
  * Early support for invoking 32-bit EFI services from a 64-bit kernel.
  *
  * Because this thunking occurs before ExitBootServices() we have to
- * restore the firmware's 32-bit GDT before we make EFI service calls,
- * since the firmware's 32-bit IDT is still currently installed and it
- * needs to be able to service interrupts.
+ * restore the firmware's 32-bit GDT and IDT before we make EFI service
+ * calls.
  *
  * On the plus side, we don't have to worry about mangling 64-bit
  * addresses into 32-bits because we're executing with an identity
@@ -39,7 +38,7 @@ SYM_FUNC_START(__efi64_thunk)
        /*
         * Convert x86-64 ABI params to i386 ABI
         */
-       subq    $32, %rsp
+       subq    $64, %rsp
        movl    %esi, 0x0(%rsp)
        movl    %edx, 0x4(%rsp)
        movl    %ecx, 0x8(%rsp)
@@ -49,14 +48,19 @@ SYM_FUNC_START(__efi64_thunk)
        leaq    0x14(%rsp), %rbx
        sgdt    (%rbx)
 
+       addq    $16, %rbx
+       sidt    (%rbx)
+
        /*
-        * Switch to gdt with 32-bit segments. This is the firmware GDT
-        * that was installed when the kernel started executing. This
-        * pointer was saved at the EFI stub entry point in head_64.S.
+        * Switch to IDT and GDT with 32-bit segments. This is the firmware GDT
+        * and IDT that was installed when the kernel started executing. The
+        * pointers were saved at the EFI stub entry point in head_64.S.
         *
         * Pass the saved DS selector to the 32-bit code, and use far return to
         * restore the saved CS selector.
         */
+       leaq    efi32_boot_idt(%rip), %rax
+       lidt    (%rax)
        leaq    efi32_boot_gdt(%rip), %rax
        lgdt    (%rax)
 
@@ -67,7 +71,7 @@ SYM_FUNC_START(__efi64_thunk)
        pushq   %rax
        lretq
 
-1:     addq    $32, %rsp
+1:     addq    $64, %rsp
        movq    %rdi, %rax
 
        pop     %rbx
@@ -128,10 +132,13 @@ SYM_FUNC_START_LOCAL(efi_enter32)
 
        /*
         * Some firmware will return with interrupts enabled. Be sure to
-        * disable them before we switch GDTs.
+        * disable them before we switch GDTs and IDTs.
         */
        cli
 
+       lidtl   (%ebx)
+       subl    $16, %ebx
+
        lgdtl   (%ebx)
 
        movl    %cr4, %eax
@@ -166,6 +173,11 @@ SYM_DATA_START(efi32_boot_gdt)
        .quad   0
 SYM_DATA_END(efi32_boot_gdt)
 
+SYM_DATA_START(efi32_boot_idt)
+       .word   0
+       .quad   0
+SYM_DATA_END(efi32_boot_idt)
+
 SYM_DATA_START(efi32_boot_cs)
        .word   0
 SYM_DATA_END(efi32_boot_cs)
index a2347ded77ea25fce37f80b32eb743254a7a480e..572c535cf45bcb168c07bb9324ef0c52d33c3e8b 100644 (file)
@@ -319,6 +319,9 @@ SYM_INNER_LABEL(efi32_pe_stub_entry, SYM_L_LOCAL)
        movw    %cs, rva(efi32_boot_cs)(%ebp)
        movw    %ds, rva(efi32_boot_ds)(%ebp)
 
+       /* Store firmware IDT descriptor */
+       sidtl   rva(efi32_boot_idt)(%ebp)
+
        /* Disable paging */
        movl    %cr0, %eax
        btrl    $X86_CR0_PG_BIT, %eax
index d0959e7b809f78fc35908028531e3fdac8640cb9..f307c93fc90a7ae337d2b4da934a1ce2af3a65e4 100644 (file)
@@ -88,6 +88,12 @@ nhpoly1305-avx2-y := nh-avx2-x86_64.o nhpoly1305-avx2-glue.o
 
 obj-$(CONFIG_CRYPTO_CURVE25519_X86) += curve25519-x86_64.o
 
+obj-$(CONFIG_CRYPTO_SM4_AESNI_AVX_X86_64) += sm4-aesni-avx-x86_64.o
+sm4-aesni-avx-x86_64-y := sm4-aesni-avx-asm_64.o sm4_aesni_avx_glue.o
+
+obj-$(CONFIG_CRYPTO_SM4_AESNI_AVX2_X86_64) += sm4-aesni-avx2-x86_64.o
+sm4-aesni-avx2-x86_64-y := sm4-aesni-avx2-asm_64.o sm4_aesni_avx2_glue.o
+
 quiet_cmd_perlasm = PERLASM $@
       cmd_perlasm = $(PERL) $< > $@
 $(obj)/%.S: $(src)/%.pl FORCE
index 2144e54a6c89212ded6eddb65e2e3bb3027030ea..0fc961bef299c993458b889db717caf00aeaaa3f 100644 (file)
@@ -849,6 +849,8 @@ static int xts_crypt(struct skcipher_request *req, bool encrypt)
                return -EINVAL;
 
        err = skcipher_walk_virt(&walk, req, false);
+       if (!walk.nbytes)
+               return err;
 
        if (unlikely(tail > 0 && walk.nbytes < walk.total)) {
                int blocks = DIV_ROUND_UP(req->cryptlen, AES_BLOCK_SIZE) - 2;
@@ -862,7 +864,10 @@ static int xts_crypt(struct skcipher_request *req, bool encrypt)
                skcipher_request_set_crypt(&subreq, req->src, req->dst,
                                           blocks * AES_BLOCK_SIZE, req->iv);
                req = &subreq;
+
                err = skcipher_walk_virt(&walk, req, false);
+               if (err)
+                       return err;
        } else {
                tail = 0;
        }
diff --git a/arch/x86/crypto/sm4-aesni-avx-asm_64.S b/arch/x86/crypto/sm4-aesni-avx-asm_64.S
new file mode 100644 (file)
index 0000000..fa2c3f5
--- /dev/null
@@ -0,0 +1,589 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * SM4 Cipher Algorithm, AES-NI/AVX optimized.
+ * as specified in
+ * https://tools.ietf.org/id/draft-ribose-cfrg-sm4-10.html
+ *
+ * Copyright (C) 2018 Markku-Juhani O. Saarinen <mjos@iki.fi>
+ * Copyright (C) 2020 Jussi Kivilinna <jussi.kivilinna@iki.fi>
+ * Copyright (c) 2021 Tianjia Zhang <tianjia.zhang@linux.alibaba.com>
+ */
+
+/* Based on SM4 AES-NI work by libgcrypt and Markku-Juhani O. Saarinen at:
+ *  https://github.com/mjosaarinen/sm4ni
+ */
+
+#include <linux/linkage.h>
+#include <asm/frame.h>
+
+#define rRIP         (%rip)
+
+#define RX0          %xmm0
+#define RX1          %xmm1
+#define MASK_4BIT    %xmm2
+#define RTMP0        %xmm3
+#define RTMP1        %xmm4
+#define RTMP2        %xmm5
+#define RTMP3        %xmm6
+#define RTMP4        %xmm7
+
+#define RA0          %xmm8
+#define RA1          %xmm9
+#define RA2          %xmm10
+#define RA3          %xmm11
+
+#define RB0          %xmm12
+#define RB1          %xmm13
+#define RB2          %xmm14
+#define RB3          %xmm15
+
+#define RNOT         %xmm0
+#define RBSWAP       %xmm1
+
+
+/* Transpose four 32-bit words between 128-bit vectors. */
+#define transpose_4x4(x0, x1, x2, x3, t1, t2) \
+       vpunpckhdq x1, x0, t2;                \
+       vpunpckldq x1, x0, x0;                \
+                                             \
+       vpunpckldq x3, x2, t1;                \
+       vpunpckhdq x3, x2, x2;                \
+                                             \
+       vpunpckhqdq t1, x0, x1;               \
+       vpunpcklqdq t1, x0, x0;               \
+                                             \
+       vpunpckhqdq x2, t2, x3;               \
+       vpunpcklqdq x2, t2, x2;
+
+/* pre-SubByte transform. */
+#define transform_pre(x, lo_t, hi_t, mask4bit, tmp0) \
+       vpand x, mask4bit, tmp0;                     \
+       vpandn x, mask4bit, x;                       \
+       vpsrld $4, x, x;                             \
+                                                    \
+       vpshufb tmp0, lo_t, tmp0;                    \
+       vpshufb x, hi_t, x;                          \
+       vpxor tmp0, x, x;
+
+/* post-SubByte transform. Note: x has been XOR'ed with mask4bit by
+ * 'vaeslastenc' instruction.
+ */
+#define transform_post(x, lo_t, hi_t, mask4bit, tmp0) \
+       vpandn mask4bit, x, tmp0;                     \
+       vpsrld $4, x, x;                              \
+       vpand x, mask4bit, x;                         \
+                                                     \
+       vpshufb tmp0, lo_t, tmp0;                     \
+       vpshufb x, hi_t, x;                           \
+       vpxor tmp0, x, x;
+
+
+.section       .rodata.cst164, "aM", @progbits, 164
+.align 16
+
+/*
+ * Following four affine transform look-up tables are from work by
+ * Markku-Juhani O. Saarinen, at https://github.com/mjosaarinen/sm4ni
+ *
+ * These allow exposing SM4 S-Box from AES SubByte.
+ */
+
+/* pre-SubByte affine transform, from SM4 field to AES field. */
+.Lpre_tf_lo_s:
+       .quad 0x9197E2E474720701, 0xC7C1B4B222245157
+.Lpre_tf_hi_s:
+       .quad 0xE240AB09EB49A200, 0xF052B91BF95BB012
+
+/* post-SubByte affine transform, from AES field to SM4 field. */
+.Lpost_tf_lo_s:
+       .quad 0x5B67F2CEA19D0834, 0xEDD14478172BBE82
+.Lpost_tf_hi_s:
+       .quad 0xAE7201DD73AFDC00, 0x11CDBE62CC1063BF
+
+/* For isolating SubBytes from AESENCLAST, inverse shift row */
+.Linv_shift_row:
+       .byte 0x00, 0x0d, 0x0a, 0x07, 0x04, 0x01, 0x0e, 0x0b
+       .byte 0x08, 0x05, 0x02, 0x0f, 0x0c, 0x09, 0x06, 0x03
+
+/* Inverse shift row + Rotate left by 8 bits on 32-bit words with vpshufb */
+.Linv_shift_row_rol_8:
+       .byte 0x07, 0x00, 0x0d, 0x0a, 0x0b, 0x04, 0x01, 0x0e
+       .byte 0x0f, 0x08, 0x05, 0x02, 0x03, 0x0c, 0x09, 0x06
+
+/* Inverse shift row + Rotate left by 16 bits on 32-bit words with vpshufb */
+.Linv_shift_row_rol_16:
+       .byte 0x0a, 0x07, 0x00, 0x0d, 0x0e, 0x0b, 0x04, 0x01
+       .byte 0x02, 0x0f, 0x08, 0x05, 0x06, 0x03, 0x0c, 0x09
+
+/* Inverse shift row + Rotate left by 24 bits on 32-bit words with vpshufb */
+.Linv_shift_row_rol_24:
+       .byte 0x0d, 0x0a, 0x07, 0x00, 0x01, 0x0e, 0x0b, 0x04
+       .byte 0x05, 0x02, 0x0f, 0x08, 0x09, 0x06, 0x03, 0x0c
+
+/* For CTR-mode IV byteswap */
+.Lbswap128_mask:
+       .byte 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0
+
+/* For input word byte-swap */
+.Lbswap32_mask:
+       .byte 3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12
+
+.align 4
+/* 4-bit mask */
+.L0f0f0f0f:
+       .long 0x0f0f0f0f
+
+
+.text
+.align 16
+
+/*
+ * void sm4_aesni_avx_crypt4(const u32 *rk, u8 *dst,
+ *                           const u8 *src, int nblocks)
+ */
+.align 8
+SYM_FUNC_START(sm4_aesni_avx_crypt4)
+       /* input:
+        *      %rdi: round key array, CTX
+        *      %rsi: dst (1..4 blocks)
+        *      %rdx: src (1..4 blocks)
+        *      %rcx: num blocks (1..4)
+        */
+       FRAME_BEGIN
+
+       vmovdqu 0*16(%rdx), RA0;
+       vmovdqa RA0, RA1;
+       vmovdqa RA0, RA2;
+       vmovdqa RA0, RA3;
+       cmpq $2, %rcx;
+       jb .Lblk4_load_input_done;
+       vmovdqu 1*16(%rdx), RA1;
+       je .Lblk4_load_input_done;
+       vmovdqu 2*16(%rdx), RA2;
+       cmpq $3, %rcx;
+       je .Lblk4_load_input_done;
+       vmovdqu 3*16(%rdx), RA3;
+
+.Lblk4_load_input_done:
+
+       vmovdqa .Lbswap32_mask rRIP, RTMP2;
+       vpshufb RTMP2, RA0, RA0;
+       vpshufb RTMP2, RA1, RA1;
+       vpshufb RTMP2, RA2, RA2;
+       vpshufb RTMP2, RA3, RA3;
+
+       vbroadcastss .L0f0f0f0f rRIP, MASK_4BIT;
+       vmovdqa .Lpre_tf_lo_s rRIP, RTMP4;
+       vmovdqa .Lpre_tf_hi_s rRIP, RB0;
+       vmovdqa .Lpost_tf_lo_s rRIP, RB1;
+       vmovdqa .Lpost_tf_hi_s rRIP, RB2;
+       vmovdqa .Linv_shift_row rRIP, RB3;
+       vmovdqa .Linv_shift_row_rol_8 rRIP, RTMP2;
+       vmovdqa .Linv_shift_row_rol_16 rRIP, RTMP3;
+       transpose_4x4(RA0, RA1, RA2, RA3, RTMP0, RTMP1);
+
+#define ROUND(round, s0, s1, s2, s3)                                \
+       vbroadcastss (4*(round))(%rdi), RX0;                        \
+       vpxor s1, RX0, RX0;                                         \
+       vpxor s2, RX0, RX0;                                         \
+       vpxor s3, RX0, RX0; /* s1 ^ s2 ^ s3 ^ rk */                 \
+                                                                   \
+       /* sbox, non-linear part */                                 \
+       transform_pre(RX0, RTMP4, RB0, MASK_4BIT, RTMP0);           \
+       vaesenclast MASK_4BIT, RX0, RX0;                            \
+       transform_post(RX0, RB1, RB2, MASK_4BIT, RTMP0);            \
+                                                                   \
+       /* linear part */                                           \
+       vpshufb RB3, RX0, RTMP0;                                    \
+       vpxor RTMP0, s0, s0; /* s0 ^ x */                           \
+       vpshufb RTMP2, RX0, RTMP1;                                  \
+       vpxor RTMP1, RTMP0, RTMP0; /* x ^ rol(x,8) */               \
+       vpshufb RTMP3, RX0, RTMP1;                                  \
+       vpxor RTMP1, RTMP0, RTMP0; /* x ^ rol(x,8) ^ rol(x,16) */   \
+       vpshufb .Linv_shift_row_rol_24 rRIP, RX0, RTMP1;            \
+       vpxor RTMP1, s0, s0; /* s0 ^ x ^ rol(x,24) */               \
+       vpslld $2, RTMP0, RTMP1;                                    \
+       vpsrld $30, RTMP0, RTMP0;                                   \
+       vpxor RTMP0, s0, s0;                                        \
+       /* s0 ^ x ^ rol(x,2) ^ rol(x,10) ^ rol(x,18) ^ rol(x,24) */ \
+       vpxor RTMP1, s0, s0;
+
+       leaq (32*4)(%rdi), %rax;
+.align 16
+.Lroundloop_blk4:
+       ROUND(0, RA0, RA1, RA2, RA3);
+       ROUND(1, RA1, RA2, RA3, RA0);
+       ROUND(2, RA2, RA3, RA0, RA1);
+       ROUND(3, RA3, RA0, RA1, RA2);
+       leaq (4*4)(%rdi), %rdi;
+       cmpq %rax, %rdi;
+       jne .Lroundloop_blk4;
+
+#undef ROUND
+
+       vmovdqa .Lbswap128_mask rRIP, RTMP2;
+
+       transpose_4x4(RA0, RA1, RA2, RA3, RTMP0, RTMP1);
+       vpshufb RTMP2, RA0, RA0;
+       vpshufb RTMP2, RA1, RA1;
+       vpshufb RTMP2, RA2, RA2;
+       vpshufb RTMP2, RA3, RA3;
+
+       vmovdqu RA0, 0*16(%rsi);
+       cmpq $2, %rcx;
+       jb .Lblk4_store_output_done;
+       vmovdqu RA1, 1*16(%rsi);
+       je .Lblk4_store_output_done;
+       vmovdqu RA2, 2*16(%rsi);
+       cmpq $3, %rcx;
+       je .Lblk4_store_output_done;
+       vmovdqu RA3, 3*16(%rsi);
+
+.Lblk4_store_output_done:
+       vzeroall;
+       FRAME_END
+       ret;
+SYM_FUNC_END(sm4_aesni_avx_crypt4)
+
+.align 8
+SYM_FUNC_START_LOCAL(__sm4_crypt_blk8)
+       /* input:
+        *      %rdi: round key array, CTX
+        *      RA0, RA1, RA2, RA3, RB0, RB1, RB2, RB3: eight parallel
+        *                                              plaintext blocks
+        * output:
+        *      RA0, RA1, RA2, RA3, RB0, RB1, RB2, RB3: eight parallel
+        *                                              ciphertext blocks
+        */
+       FRAME_BEGIN
+
+       vmovdqa .Lbswap32_mask rRIP, RTMP2;
+       vpshufb RTMP2, RA0, RA0;
+       vpshufb RTMP2, RA1, RA1;
+       vpshufb RTMP2, RA2, RA2;
+       vpshufb RTMP2, RA3, RA3;
+       vpshufb RTMP2, RB0, RB0;
+       vpshufb RTMP2, RB1, RB1;
+       vpshufb RTMP2, RB2, RB2;
+       vpshufb RTMP2, RB3, RB3;
+
+       vbroadcastss .L0f0f0f0f rRIP, MASK_4BIT;
+       transpose_4x4(RA0, RA1, RA2, RA3, RTMP0, RTMP1);
+       transpose_4x4(RB0, RB1, RB2, RB3, RTMP0, RTMP1);
+
+#define ROUND(round, s0, s1, s2, s3, r0, r1, r2, r3)                \
+       vbroadcastss (4*(round))(%rdi), RX0;                        \
+       vmovdqa .Lpre_tf_lo_s rRIP, RTMP4;                          \
+       vmovdqa .Lpre_tf_hi_s rRIP, RTMP1;                          \
+       vmovdqa RX0, RX1;                                           \
+       vpxor s1, RX0, RX0;                                         \
+       vpxor s2, RX0, RX0;                                         \
+       vpxor s3, RX0, RX0; /* s1 ^ s2 ^ s3 ^ rk */                 \
+       vmovdqa .Lpost_tf_lo_s rRIP, RTMP2;                         \
+       vmovdqa .Lpost_tf_hi_s rRIP, RTMP3;                         \
+       vpxor r1, RX1, RX1;                                         \
+       vpxor r2, RX1, RX1;                                         \
+       vpxor r3, RX1, RX1; /* r1 ^ r2 ^ r3 ^ rk */                 \
+                                                                    \
+       /* sbox, non-linear part */                                 \
+       transform_pre(RX0, RTMP4, RTMP1, MASK_4BIT, RTMP0);         \
+       transform_pre(RX1, RTMP4, RTMP1, MASK_4BIT, RTMP0);         \
+       vmovdqa .Linv_shift_row rRIP, RTMP4;                        \
+       vaesenclast MASK_4BIT, RX0, RX0;                            \
+       vaesenclast MASK_4BIT, RX1, RX1;                            \
+       transform_post(RX0, RTMP2, RTMP3, MASK_4BIT, RTMP0);        \
+       transform_post(RX1, RTMP2, RTMP3, MASK_4BIT, RTMP0);        \
+                                                                    \
+       /* linear part */                                           \
+       vpshufb RTMP4, RX0, RTMP0;                                  \
+       vpxor RTMP0, s0, s0; /* s0 ^ x */                           \
+       vpshufb RTMP4, RX1, RTMP2;                                  \
+       vmovdqa .Linv_shift_row_rol_8 rRIP, RTMP4;                  \
+       vpxor RTMP2, r0, r0; /* r0 ^ x */                           \
+       vpshufb RTMP4, RX0, RTMP1;                                  \
+       vpxor RTMP1, RTMP0, RTMP0; /* x ^ rol(x,8) */               \
+       vpshufb RTMP4, RX1, RTMP3;                                  \
+       vmovdqa .Linv_shift_row_rol_16 rRIP, RTMP4;                 \
+       vpxor RTMP3, RTMP2, RTMP2; /* x ^ rol(x,8) */               \
+       vpshufb RTMP4, RX0, RTMP1;                                  \
+       vpxor RTMP1, RTMP0, RTMP0; /* x ^ rol(x,8) ^ rol(x,16) */   \
+       vpshufb RTMP4, RX1, RTMP3;                                  \
+       vmovdqa .Linv_shift_row_rol_24 rRIP, RTMP4;                 \
+       vpxor RTMP3, RTMP2, RTMP2; /* x ^ rol(x,8) ^ rol(x,16) */   \
+       vpshufb RTMP4, RX0, RTMP1;                                  \
+       vpxor RTMP1, s0, s0; /* s0 ^ x ^ rol(x,24) */               \
+       /* s0 ^ x ^ rol(x,2) ^ rol(x,10) ^ rol(x,18) ^ rol(x,24) */ \
+       vpslld $2, RTMP0, RTMP1;                                    \
+       vpsrld $30, RTMP0, RTMP0;                                   \
+       vpxor RTMP0, s0, s0;                                        \
+       vpxor RTMP1, s0, s0;                                        \
+       vpshufb RTMP4, RX1, RTMP3;                                  \
+       vpxor RTMP3, r0, r0; /* r0 ^ x ^ rol(x,24) */               \
+       /* r0 ^ x ^ rol(x,2) ^ rol(x,10) ^ rol(x,18) ^ rol(x,24) */ \
+       vpslld $2, RTMP2, RTMP3;                                    \
+       vpsrld $30, RTMP2, RTMP2;                                   \
+       vpxor RTMP2, r0, r0;                                        \
+       vpxor RTMP3, r0, r0;
+
+       leaq (32*4)(%rdi), %rax;
+.align 16
+.Lroundloop_blk8:
+       ROUND(0, RA0, RA1, RA2, RA3, RB0, RB1, RB2, RB3);
+       ROUND(1, RA1, RA2, RA3, RA0, RB1, RB2, RB3, RB0);
+       ROUND(2, RA2, RA3, RA0, RA1, RB2, RB3, RB0, RB1);
+       ROUND(3, RA3, RA0, RA1, RA2, RB3, RB0, RB1, RB2);
+       leaq (4*4)(%rdi), %rdi;
+       cmpq %rax, %rdi;
+       jne .Lroundloop_blk8;
+
+#undef ROUND
+
+       vmovdqa .Lbswap128_mask rRIP, RTMP2;
+
+       transpose_4x4(RA0, RA1, RA2, RA3, RTMP0, RTMP1);
+       transpose_4x4(RB0, RB1, RB2, RB3, RTMP0, RTMP1);
+       vpshufb RTMP2, RA0, RA0;
+       vpshufb RTMP2, RA1, RA1;
+       vpshufb RTMP2, RA2, RA2;
+       vpshufb RTMP2, RA3, RA3;
+       vpshufb RTMP2, RB0, RB0;
+       vpshufb RTMP2, RB1, RB1;
+       vpshufb RTMP2, RB2, RB2;
+       vpshufb RTMP2, RB3, RB3;
+
+       FRAME_END
+       ret;
+SYM_FUNC_END(__sm4_crypt_blk8)
+
+/*
+ * void sm4_aesni_avx_crypt8(const u32 *rk, u8 *dst,
+ *                           const u8 *src, int nblocks)
+ */
+.align 8
+SYM_FUNC_START(sm4_aesni_avx_crypt8)
+       /* input:
+        *      %rdi: round key array, CTX
+        *      %rsi: dst (1..8 blocks)
+        *      %rdx: src (1..8 blocks)
+        *      %rcx: num blocks (1..8)
+        */
+       FRAME_BEGIN
+
+       cmpq $5, %rcx;
+       jb sm4_aesni_avx_crypt4;
+       vmovdqu (0 * 16)(%rdx), RA0;
+       vmovdqu (1 * 16)(%rdx), RA1;
+       vmovdqu (2 * 16)(%rdx), RA2;
+       vmovdqu (3 * 16)(%rdx), RA3;
+       vmovdqu (4 * 16)(%rdx), RB0;
+       vmovdqa RB0, RB1;
+       vmovdqa RB0, RB2;
+       vmovdqa RB0, RB3;
+       je .Lblk8_load_input_done;
+       vmovdqu (5 * 16)(%rdx), RB1;
+       cmpq $7, %rcx;
+       jb .Lblk8_load_input_done;
+       vmovdqu (6 * 16)(%rdx), RB2;
+       je .Lblk8_load_input_done;
+       vmovdqu (7 * 16)(%rdx), RB3;
+
+.Lblk8_load_input_done:
+       call __sm4_crypt_blk8;
+
+       cmpq $6, %rcx;
+       vmovdqu RA0, (0 * 16)(%rsi);
+       vmovdqu RA1, (1 * 16)(%rsi);
+       vmovdqu RA2, (2 * 16)(%rsi);
+       vmovdqu RA3, (3 * 16)(%rsi);
+       vmovdqu RB0, (4 * 16)(%rsi);
+       jb .Lblk8_store_output_done;
+       vmovdqu RB1, (5 * 16)(%rsi);
+       je .Lblk8_store_output_done;
+       vmovdqu RB2, (6 * 16)(%rsi);
+       cmpq $7, %rcx;
+       je .Lblk8_store_output_done;
+       vmovdqu RB3, (7 * 16)(%rsi);
+
+.Lblk8_store_output_done:
+       vzeroall;
+       FRAME_END
+       ret;
+SYM_FUNC_END(sm4_aesni_avx_crypt8)
+
+/*
+ * void sm4_aesni_avx_ctr_enc_blk8(const u32 *rk, u8 *dst,
+ *                                 const u8 *src, u8 *iv)
+ */
+.align 8
+SYM_FUNC_START(sm4_aesni_avx_ctr_enc_blk8)
+       /* input:
+        *      %rdi: round key array, CTX
+        *      %rsi: dst (8 blocks)
+        *      %rdx: src (8 blocks)
+        *      %rcx: iv (big endian, 128bit)
+        */
+       FRAME_BEGIN
+
+       /* load IV and byteswap */
+       vmovdqu (%rcx), RA0;
+
+       vmovdqa .Lbswap128_mask rRIP, RBSWAP;
+       vpshufb RBSWAP, RA0, RTMP0; /* be => le */
+
+       vpcmpeqd RNOT, RNOT, RNOT;
+       vpsrldq $8, RNOT, RNOT; /* low: -1, high: 0 */
+
+#define inc_le128(x, minus_one, tmp) \
+       vpcmpeqq minus_one, x, tmp;  \
+       vpsubq minus_one, x, x;      \
+       vpslldq $8, tmp, tmp;        \
+       vpsubq tmp, x, x;
+
+       /* construct IVs */
+       inc_le128(RTMP0, RNOT, RTMP2); /* +1 */
+       vpshufb RBSWAP, RTMP0, RA1;
+       inc_le128(RTMP0, RNOT, RTMP2); /* +2 */
+       vpshufb RBSWAP, RTMP0, RA2;
+       inc_le128(RTMP0, RNOT, RTMP2); /* +3 */
+       vpshufb RBSWAP, RTMP0, RA3;
+       inc_le128(RTMP0, RNOT, RTMP2); /* +4 */
+       vpshufb RBSWAP, RTMP0, RB0;
+       inc_le128(RTMP0, RNOT, RTMP2); /* +5 */
+       vpshufb RBSWAP, RTMP0, RB1;
+       inc_le128(RTMP0, RNOT, RTMP2); /* +6 */
+       vpshufb RBSWAP, RTMP0, RB2;
+       inc_le128(RTMP0, RNOT, RTMP2); /* +7 */
+       vpshufb RBSWAP, RTMP0, RB3;
+       inc_le128(RTMP0, RNOT, RTMP2); /* +8 */
+       vpshufb RBSWAP, RTMP0, RTMP1;
+
+       /* store new IV */
+       vmovdqu RTMP1, (%rcx);
+
+       call __sm4_crypt_blk8;
+
+       vpxor (0 * 16)(%rdx), RA0, RA0;
+       vpxor (1 * 16)(%rdx), RA1, RA1;
+       vpxor (2 * 16)(%rdx), RA2, RA2;
+       vpxor (3 * 16)(%rdx), RA3, RA3;
+       vpxor (4 * 16)(%rdx), RB0, RB0;
+       vpxor (5 * 16)(%rdx), RB1, RB1;
+       vpxor (6 * 16)(%rdx), RB2, RB2;
+       vpxor (7 * 16)(%rdx), RB3, RB3;
+
+       vmovdqu RA0, (0 * 16)(%rsi);
+       vmovdqu RA1, (1 * 16)(%rsi);
+       vmovdqu RA2, (2 * 16)(%rsi);
+       vmovdqu RA3, (3 * 16)(%rsi);
+       vmovdqu RB0, (4 * 16)(%rsi);
+       vmovdqu RB1, (5 * 16)(%rsi);
+       vmovdqu RB2, (6 * 16)(%rsi);
+       vmovdqu RB3, (7 * 16)(%rsi);
+
+       vzeroall;
+       FRAME_END
+       ret;
+SYM_FUNC_END(sm4_aesni_avx_ctr_enc_blk8)
+
+/*
+ * void sm4_aesni_avx_cbc_dec_blk8(const u32 *rk, u8 *dst,
+ *                                 const u8 *src, u8 *iv)
+ */
+.align 8
+SYM_FUNC_START(sm4_aesni_avx_cbc_dec_blk8)
+       /* input:
+        *      %rdi: round key array, CTX
+        *      %rsi: dst (8 blocks)
+        *      %rdx: src (8 blocks)
+        *      %rcx: iv
+        */
+       FRAME_BEGIN
+
+       vmovdqu (0 * 16)(%rdx), RA0;
+       vmovdqu (1 * 16)(%rdx), RA1;
+       vmovdqu (2 * 16)(%rdx), RA2;
+       vmovdqu (3 * 16)(%rdx), RA3;
+       vmovdqu (4 * 16)(%rdx), RB0;
+       vmovdqu (5 * 16)(%rdx), RB1;
+       vmovdqu (6 * 16)(%rdx), RB2;
+       vmovdqu (7 * 16)(%rdx), RB3;
+
+       call __sm4_crypt_blk8;
+
+       vmovdqu (7 * 16)(%rdx), RNOT;
+       vpxor (%rcx), RA0, RA0;
+       vpxor (0 * 16)(%rdx), RA1, RA1;
+       vpxor (1 * 16)(%rdx), RA2, RA2;
+       vpxor (2 * 16)(%rdx), RA3, RA3;
+       vpxor (3 * 16)(%rdx), RB0, RB0;
+       vpxor (4 * 16)(%rdx), RB1, RB1;
+       vpxor (5 * 16)(%rdx), RB2, RB2;
+       vpxor (6 * 16)(%rdx), RB3, RB3;
+       vmovdqu RNOT, (%rcx); /* store new IV */
+
+       vmovdqu RA0, (0 * 16)(%rsi);
+       vmovdqu RA1, (1 * 16)(%rsi);
+       vmovdqu RA2, (2 * 16)(%rsi);
+       vmovdqu RA3, (3 * 16)(%rsi);
+       vmovdqu RB0, (4 * 16)(%rsi);
+       vmovdqu RB1, (5 * 16)(%rsi);
+       vmovdqu RB2, (6 * 16)(%rsi);
+       vmovdqu RB3, (7 * 16)(%rsi);
+
+       vzeroall;
+       FRAME_END
+       ret;
+SYM_FUNC_END(sm4_aesni_avx_cbc_dec_blk8)
+
+/*
+ * void sm4_aesni_avx_cfb_dec_blk8(const u32 *rk, u8 *dst,
+ *                                 const u8 *src, u8 *iv)
+ */
+.align 8
+SYM_FUNC_START(sm4_aesni_avx_cfb_dec_blk8)
+       /* input:
+        *      %rdi: round key array, CTX
+        *      %rsi: dst (8 blocks)
+        *      %rdx: src (8 blocks)
+        *      %rcx: iv
+        */
+       FRAME_BEGIN
+
+       /* Load input */
+       vmovdqu (%rcx), RA0;
+       vmovdqu 0 * 16(%rdx), RA1;
+       vmovdqu 1 * 16(%rdx), RA2;
+       vmovdqu 2 * 16(%rdx), RA3;
+       vmovdqu 3 * 16(%rdx), RB0;
+       vmovdqu 4 * 16(%rdx), RB1;
+       vmovdqu 5 * 16(%rdx), RB2;
+       vmovdqu 6 * 16(%rdx), RB3;
+
+       /* Update IV */
+       vmovdqu 7 * 16(%rdx), RNOT;
+       vmovdqu RNOT, (%rcx);
+
+       call __sm4_crypt_blk8;
+
+       vpxor (0 * 16)(%rdx), RA0, RA0;
+       vpxor (1 * 16)(%rdx), RA1, RA1;
+       vpxor (2 * 16)(%rdx), RA2, RA2;
+       vpxor (3 * 16)(%rdx), RA3, RA3;
+       vpxor (4 * 16)(%rdx), RB0, RB0;
+       vpxor (5 * 16)(%rdx), RB1, RB1;
+       vpxor (6 * 16)(%rdx), RB2, RB2;
+       vpxor (7 * 16)(%rdx), RB3, RB3;
+
+       vmovdqu RA0, (0 * 16)(%rsi);
+       vmovdqu RA1, (1 * 16)(%rsi);
+       vmovdqu RA2, (2 * 16)(%rsi);
+       vmovdqu RA3, (3 * 16)(%rsi);
+       vmovdqu RB0, (4 * 16)(%rsi);
+       vmovdqu RB1, (5 * 16)(%rsi);
+       vmovdqu RB2, (6 * 16)(%rsi);
+       vmovdqu RB3, (7 * 16)(%rsi);
+
+       vzeroall;
+       FRAME_END
+       ret;
+SYM_FUNC_END(sm4_aesni_avx_cfb_dec_blk8)
diff --git a/arch/x86/crypto/sm4-aesni-avx2-asm_64.S b/arch/x86/crypto/sm4-aesni-avx2-asm_64.S
new file mode 100644 (file)
index 0000000..d2ffd7f
--- /dev/null
@@ -0,0 +1,497 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * SM4 Cipher Algorithm, AES-NI/AVX2 optimized.
+ * as specified in
+ * https://tools.ietf.org/id/draft-ribose-cfrg-sm4-10.html
+ *
+ * Copyright (C) 2018 Markku-Juhani O. Saarinen <mjos@iki.fi>
+ * Copyright (C) 2020 Jussi Kivilinna <jussi.kivilinna@iki.fi>
+ * Copyright (c) 2021 Tianjia Zhang <tianjia.zhang@linux.alibaba.com>
+ */
+
+/* Based on SM4 AES-NI work by libgcrypt and Markku-Juhani O. Saarinen at:
+ *  https://github.com/mjosaarinen/sm4ni
+ */
+
+#include <linux/linkage.h>
+#include <asm/frame.h>
+
+#define rRIP         (%rip)
+
+/* vector registers */
+#define RX0          %ymm0
+#define RX1          %ymm1
+#define MASK_4BIT    %ymm2
+#define RTMP0        %ymm3
+#define RTMP1        %ymm4
+#define RTMP2        %ymm5
+#define RTMP3        %ymm6
+#define RTMP4        %ymm7
+
+#define RA0          %ymm8
+#define RA1          %ymm9
+#define RA2          %ymm10
+#define RA3          %ymm11
+
+#define RB0          %ymm12
+#define RB1          %ymm13
+#define RB2          %ymm14
+#define RB3          %ymm15
+
+#define RNOT         %ymm0
+#define RBSWAP       %ymm1
+
+#define RX0x         %xmm0
+#define RX1x         %xmm1
+#define MASK_4BITx   %xmm2
+
+#define RNOTx        %xmm0
+#define RBSWAPx      %xmm1
+
+#define RTMP0x       %xmm3
+#define RTMP1x       %xmm4
+#define RTMP2x       %xmm5
+#define RTMP3x       %xmm6
+#define RTMP4x       %xmm7
+
+
+/* helper macros */
+
+/* Transpose four 32-bit words between 128-bit vector lanes. */
+#define transpose_4x4(x0, x1, x2, x3, t1, t2) \
+       vpunpckhdq x1, x0, t2;                \
+       vpunpckldq x1, x0, x0;                \
+                                             \
+       vpunpckldq x3, x2, t1;                \
+       vpunpckhdq x3, x2, x2;                \
+                                             \
+       vpunpckhqdq t1, x0, x1;               \
+       vpunpcklqdq t1, x0, x0;               \
+                                             \
+       vpunpckhqdq x2, t2, x3;               \
+       vpunpcklqdq x2, t2, x2;
+
+/* post-SubByte transform. */
+#define transform_pre(x, lo_t, hi_t, mask4bit, tmp0) \
+       vpand x, mask4bit, tmp0;                     \
+       vpandn x, mask4bit, x;                       \
+       vpsrld $4, x, x;                             \
+                                                    \
+       vpshufb tmp0, lo_t, tmp0;                    \
+       vpshufb x, hi_t, x;                          \
+       vpxor tmp0, x, x;
+
+/* post-SubByte transform. Note: x has been XOR'ed with mask4bit by
+ * 'vaeslastenc' instruction. */
+#define transform_post(x, lo_t, hi_t, mask4bit, tmp0) \
+       vpandn mask4bit, x, tmp0;                     \
+       vpsrld $4, x, x;                              \
+       vpand x, mask4bit, x;                         \
+                                                     \
+       vpshufb tmp0, lo_t, tmp0;                     \
+       vpshufb x, hi_t, x;                           \
+       vpxor tmp0, x, x;
+
+
+.section       .rodata.cst164, "aM", @progbits, 164
+.align 16
+
+/*
+ * Following four affine transform look-up tables are from work by
+ * Markku-Juhani O. Saarinen, at https://github.com/mjosaarinen/sm4ni
+ *
+ * These allow exposing SM4 S-Box from AES SubByte.
+ */
+
+/* pre-SubByte affine transform, from SM4 field to AES field. */
+.Lpre_tf_lo_s:
+       .quad 0x9197E2E474720701, 0xC7C1B4B222245157
+.Lpre_tf_hi_s:
+       .quad 0xE240AB09EB49A200, 0xF052B91BF95BB012
+
+/* post-SubByte affine transform, from AES field to SM4 field. */
+.Lpost_tf_lo_s:
+       .quad 0x5B67F2CEA19D0834, 0xEDD14478172BBE82
+.Lpost_tf_hi_s:
+       .quad 0xAE7201DD73AFDC00, 0x11CDBE62CC1063BF
+
+/* For isolating SubBytes from AESENCLAST, inverse shift row */
+.Linv_shift_row:
+       .byte 0x00, 0x0d, 0x0a, 0x07, 0x04, 0x01, 0x0e, 0x0b
+       .byte 0x08, 0x05, 0x02, 0x0f, 0x0c, 0x09, 0x06, 0x03
+
+/* Inverse shift row + Rotate left by 8 bits on 32-bit words with vpshufb */
+.Linv_shift_row_rol_8:
+       .byte 0x07, 0x00, 0x0d, 0x0a, 0x0b, 0x04, 0x01, 0x0e
+       .byte 0x0f, 0x08, 0x05, 0x02, 0x03, 0x0c, 0x09, 0x06
+
+/* Inverse shift row + Rotate left by 16 bits on 32-bit words with vpshufb */
+.Linv_shift_row_rol_16:
+       .byte 0x0a, 0x07, 0x00, 0x0d, 0x0e, 0x0b, 0x04, 0x01
+       .byte 0x02, 0x0f, 0x08, 0x05, 0x06, 0x03, 0x0c, 0x09
+
+/* Inverse shift row + Rotate left by 24 bits on 32-bit words with vpshufb */
+.Linv_shift_row_rol_24:
+       .byte 0x0d, 0x0a, 0x07, 0x00, 0x01, 0x0e, 0x0b, 0x04
+       .byte 0x05, 0x02, 0x0f, 0x08, 0x09, 0x06, 0x03, 0x0c
+
+/* For CTR-mode IV byteswap */
+.Lbswap128_mask:
+       .byte 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0
+
+/* For input word byte-swap */
+.Lbswap32_mask:
+       .byte 3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12
+
+.align 4
+/* 4-bit mask */
+.L0f0f0f0f:
+       .long 0x0f0f0f0f
+
+.text
+.align 16
+
+.align 8
+SYM_FUNC_START_LOCAL(__sm4_crypt_blk16)
+       /* input:
+        *      %rdi: round key array, CTX
+        *      RA0, RA1, RA2, RA3, RB0, RB1, RB2, RB3: sixteen parallel
+        *                                              plaintext blocks
+        * output:
+        *      RA0, RA1, RA2, RA3, RB0, RB1, RB2, RB3: sixteen parallel
+        *                                              ciphertext blocks
+        */
+       FRAME_BEGIN
+
+       vbroadcasti128 .Lbswap32_mask rRIP, RTMP2;
+       vpshufb RTMP2, RA0, RA0;
+       vpshufb RTMP2, RA1, RA1;
+       vpshufb RTMP2, RA2, RA2;
+       vpshufb RTMP2, RA3, RA3;
+       vpshufb RTMP2, RB0, RB0;
+       vpshufb RTMP2, RB1, RB1;
+       vpshufb RTMP2, RB2, RB2;
+       vpshufb RTMP2, RB3, RB3;
+
+       vpbroadcastd .L0f0f0f0f rRIP, MASK_4BIT;
+       transpose_4x4(RA0, RA1, RA2, RA3, RTMP0, RTMP1);
+       transpose_4x4(RB0, RB1, RB2, RB3, RTMP0, RTMP1);
+
+#define ROUND(round, s0, s1, s2, s3, r0, r1, r2, r3)                \
+       vpbroadcastd (4*(round))(%rdi), RX0;                        \
+       vbroadcasti128 .Lpre_tf_lo_s rRIP, RTMP4;                   \
+       vbroadcasti128 .Lpre_tf_hi_s rRIP, RTMP1;                   \
+       vmovdqa RX0, RX1;                                           \
+       vpxor s1, RX0, RX0;                                         \
+       vpxor s2, RX0, RX0;                                         \
+       vpxor s3, RX0, RX0; /* s1 ^ s2 ^ s3 ^ rk */                 \
+       vbroadcasti128 .Lpost_tf_lo_s rRIP, RTMP2;                  \
+       vbroadcasti128 .Lpost_tf_hi_s rRIP, RTMP3;                  \
+       vpxor r1, RX1, RX1;                                         \
+       vpxor r2, RX1, RX1;                                         \
+       vpxor r3, RX1, RX1; /* r1 ^ r2 ^ r3 ^ rk */                 \
+                                                                   \
+       /* sbox, non-linear part */                                 \
+       transform_pre(RX0, RTMP4, RTMP1, MASK_4BIT, RTMP0);         \
+       transform_pre(RX1, RTMP4, RTMP1, MASK_4BIT, RTMP0);         \
+       vextracti128 $1, RX0, RTMP4x;                               \
+       vextracti128 $1, RX1, RTMP0x;                               \
+       vaesenclast MASK_4BITx, RX0x, RX0x;                         \
+       vaesenclast MASK_4BITx, RTMP4x, RTMP4x;                     \
+       vaesenclast MASK_4BITx, RX1x, RX1x;                         \
+       vaesenclast MASK_4BITx, RTMP0x, RTMP0x;                     \
+       vinserti128 $1, RTMP4x, RX0, RX0;                           \
+       vbroadcasti128 .Linv_shift_row rRIP, RTMP4;                 \
+       vinserti128 $1, RTMP0x, RX1, RX1;                           \
+       transform_post(RX0, RTMP2, RTMP3, MASK_4BIT, RTMP0);        \
+       transform_post(RX1, RTMP2, RTMP3, MASK_4BIT, RTMP0);        \
+                                                                   \
+       /* linear part */                                           \
+       vpshufb RTMP4, RX0, RTMP0;                                  \
+       vpxor RTMP0, s0, s0; /* s0 ^ x */                           \
+       vpshufb RTMP4, RX1, RTMP2;                                  \
+       vbroadcasti128 .Linv_shift_row_rol_8 rRIP, RTMP4;           \
+       vpxor RTMP2, r0, r0; /* r0 ^ x */                           \
+       vpshufb RTMP4, RX0, RTMP1;                                  \
+       vpxor RTMP1, RTMP0, RTMP0; /* x ^ rol(x,8) */               \
+       vpshufb RTMP4, RX1, RTMP3;                                  \
+       vbroadcasti128 .Linv_shift_row_rol_16 rRIP, RTMP4;          \
+       vpxor RTMP3, RTMP2, RTMP2; /* x ^ rol(x,8) */               \
+       vpshufb RTMP4, RX0, RTMP1;                                  \
+       vpxor RTMP1, RTMP0, RTMP0; /* x ^ rol(x,8) ^ rol(x,16) */   \
+       vpshufb RTMP4, RX1, RTMP3;                                  \
+       vbroadcasti128 .Linv_shift_row_rol_24 rRIP, RTMP4;          \
+       vpxor RTMP3, RTMP2, RTMP2; /* x ^ rol(x,8) ^ rol(x,16) */   \
+       vpshufb RTMP4, RX0, RTMP1;                                  \
+       vpxor RTMP1, s0, s0; /* s0 ^ x ^ rol(x,24) */               \
+       vpslld $2, RTMP0, RTMP1;                                    \
+       vpsrld $30, RTMP0, RTMP0;                                   \
+       vpxor RTMP0, s0, s0;                                        \
+       /* s0 ^ x ^ rol(x,2) ^ rol(x,10) ^ rol(x,18) ^ rol(x,24) */ \
+       vpxor RTMP1, s0, s0;                                        \
+       vpshufb RTMP4, RX1, RTMP3;                                  \
+       vpxor RTMP3, r0, r0; /* r0 ^ x ^ rol(x,24) */               \
+       vpslld $2, RTMP2, RTMP3;                                    \
+       vpsrld $30, RTMP2, RTMP2;                                   \
+       vpxor RTMP2, r0, r0;                                        \
+       /* r0 ^ x ^ rol(x,2) ^ rol(x,10) ^ rol(x,18) ^ rol(x,24) */ \
+       vpxor RTMP3, r0, r0;
+
+       leaq (32*4)(%rdi), %rax;
+.align 16
+.Lroundloop_blk8:
+       ROUND(0, RA0, RA1, RA2, RA3, RB0, RB1, RB2, RB3);
+       ROUND(1, RA1, RA2, RA3, RA0, RB1, RB2, RB3, RB0);
+       ROUND(2, RA2, RA3, RA0, RA1, RB2, RB3, RB0, RB1);
+       ROUND(3, RA3, RA0, RA1, RA2, RB3, RB0, RB1, RB2);
+       leaq (4*4)(%rdi), %rdi;
+       cmpq %rax, %rdi;
+       jne .Lroundloop_blk8;
+
+#undef ROUND
+
+       vbroadcasti128 .Lbswap128_mask rRIP, RTMP2;
+
+       transpose_4x4(RA0, RA1, RA2, RA3, RTMP0, RTMP1);
+       transpose_4x4(RB0, RB1, RB2, RB3, RTMP0, RTMP1);
+       vpshufb RTMP2, RA0, RA0;
+       vpshufb RTMP2, RA1, RA1;
+       vpshufb RTMP2, RA2, RA2;
+       vpshufb RTMP2, RA3, RA3;
+       vpshufb RTMP2, RB0, RB0;
+       vpshufb RTMP2, RB1, RB1;
+       vpshufb RTMP2, RB2, RB2;
+       vpshufb RTMP2, RB3, RB3;
+
+       FRAME_END
+       ret;
+SYM_FUNC_END(__sm4_crypt_blk16)
+
+#define inc_le128(x, minus_one, tmp) \
+       vpcmpeqq minus_one, x, tmp;  \
+       vpsubq minus_one, x, x;      \
+       vpslldq $8, tmp, tmp;        \
+       vpsubq tmp, x, x;
+
+/*
+ * void sm4_aesni_avx2_ctr_enc_blk16(const u32 *rk, u8 *dst,
+ *                                   const u8 *src, u8 *iv)
+ */
+.align 8
+SYM_FUNC_START(sm4_aesni_avx2_ctr_enc_blk16)
+       /* input:
+        *      %rdi: round key array, CTX
+        *      %rsi: dst (16 blocks)
+        *      %rdx: src (16 blocks)
+        *      %rcx: iv (big endian, 128bit)
+        */
+       FRAME_BEGIN
+
+       movq 8(%rcx), %rax;
+       bswapq %rax;
+
+       vzeroupper;
+
+       vbroadcasti128 .Lbswap128_mask rRIP, RTMP3;
+       vpcmpeqd RNOT, RNOT, RNOT;
+       vpsrldq $8, RNOT, RNOT;   /* ab: -1:0 ; cd: -1:0 */
+       vpaddq RNOT, RNOT, RTMP2; /* ab: -2:0 ; cd: -2:0 */
+
+       /* load IV and byteswap */
+       vmovdqu (%rcx), RTMP4x;
+       vpshufb RTMP3x, RTMP4x, RTMP4x;
+       vmovdqa RTMP4x, RTMP0x;
+       inc_le128(RTMP4x, RNOTx, RTMP1x);
+       vinserti128 $1, RTMP4x, RTMP0, RTMP0;
+       vpshufb RTMP3, RTMP0, RA0; /* +1 ; +0 */
+
+       /* check need for handling 64-bit overflow and carry */
+       cmpq $(0xffffffffffffffff - 16), %rax;
+       ja .Lhandle_ctr_carry;
+
+       /* construct IVs */
+       vpsubq RTMP2, RTMP0, RTMP0; /* +3 ; +2 */
+       vpshufb RTMP3, RTMP0, RA1;
+       vpsubq RTMP2, RTMP0, RTMP0; /* +5 ; +4 */
+       vpshufb RTMP3, RTMP0, RA2;
+       vpsubq RTMP2, RTMP0, RTMP0; /* +7 ; +6 */
+       vpshufb RTMP3, RTMP0, RA3;
+       vpsubq RTMP2, RTMP0, RTMP0; /* +9 ; +8 */
+       vpshufb RTMP3, RTMP0, RB0;
+       vpsubq RTMP2, RTMP0, RTMP0; /* +11 ; +10 */
+       vpshufb RTMP3, RTMP0, RB1;
+       vpsubq RTMP2, RTMP0, RTMP0; /* +13 ; +12 */
+       vpshufb RTMP3, RTMP0, RB2;
+       vpsubq RTMP2, RTMP0, RTMP0; /* +15 ; +14 */
+       vpshufb RTMP3, RTMP0, RB3;
+       vpsubq RTMP2, RTMP0, RTMP0; /* +16 */
+       vpshufb RTMP3x, RTMP0x, RTMP0x;
+
+       jmp .Lctr_carry_done;
+
+.Lhandle_ctr_carry:
+       /* construct IVs */
+       inc_le128(RTMP0, RNOT, RTMP1);
+       inc_le128(RTMP0, RNOT, RTMP1);
+       vpshufb RTMP3, RTMP0, RA1; /* +3 ; +2 */
+       inc_le128(RTMP0, RNOT, RTMP1);
+       inc_le128(RTMP0, RNOT, RTMP1);
+       vpshufb RTMP3, RTMP0, RA2; /* +5 ; +4 */
+       inc_le128(RTMP0, RNOT, RTMP1);
+       inc_le128(RTMP0, RNOT, RTMP1);
+       vpshufb RTMP3, RTMP0, RA3; /* +7 ; +6 */
+       inc_le128(RTMP0, RNOT, RTMP1);
+       inc_le128(RTMP0, RNOT, RTMP1);
+       vpshufb RTMP3, RTMP0, RB0; /* +9 ; +8 */
+       inc_le128(RTMP0, RNOT, RTMP1);
+       inc_le128(RTMP0, RNOT, RTMP1);
+       vpshufb RTMP3, RTMP0, RB1; /* +11 ; +10 */
+       inc_le128(RTMP0, RNOT, RTMP1);
+       inc_le128(RTMP0, RNOT, RTMP1);
+       vpshufb RTMP3, RTMP0, RB2; /* +13 ; +12 */
+       inc_le128(RTMP0, RNOT, RTMP1);
+       inc_le128(RTMP0, RNOT, RTMP1);
+       vpshufb RTMP3, RTMP0, RB3; /* +15 ; +14 */
+       inc_le128(RTMP0, RNOT, RTMP1);
+       vextracti128 $1, RTMP0, RTMP0x;
+       vpshufb RTMP3x, RTMP0x, RTMP0x; /* +16 */
+
+.align 4
+.Lctr_carry_done:
+       /* store new IV */
+       vmovdqu RTMP0x, (%rcx);
+
+       call __sm4_crypt_blk16;
+
+       vpxor (0 * 32)(%rdx), RA0, RA0;
+       vpxor (1 * 32)(%rdx), RA1, RA1;
+       vpxor (2 * 32)(%rdx), RA2, RA2;
+       vpxor (3 * 32)(%rdx), RA3, RA3;
+       vpxor (4 * 32)(%rdx), RB0, RB0;
+       vpxor (5 * 32)(%rdx), RB1, RB1;
+       vpxor (6 * 32)(%rdx), RB2, RB2;
+       vpxor (7 * 32)(%rdx), RB3, RB3;
+
+       vmovdqu RA0, (0 * 32)(%rsi);
+       vmovdqu RA1, (1 * 32)(%rsi);
+       vmovdqu RA2, (2 * 32)(%rsi);
+       vmovdqu RA3, (3 * 32)(%rsi);
+       vmovdqu RB0, (4 * 32)(%rsi);
+       vmovdqu RB1, (5 * 32)(%rsi);
+       vmovdqu RB2, (6 * 32)(%rsi);
+       vmovdqu RB3, (7 * 32)(%rsi);
+
+       vzeroall;
+       FRAME_END
+       ret;
+SYM_FUNC_END(sm4_aesni_avx2_ctr_enc_blk16)
+
+/*
+ * void sm4_aesni_avx2_cbc_dec_blk16(const u32 *rk, u8 *dst,
+ *                                   const u8 *src, u8 *iv)
+ */
+.align 8
+SYM_FUNC_START(sm4_aesni_avx2_cbc_dec_blk16)
+       /* input:
+        *      %rdi: round key array, CTX
+        *      %rsi: dst (16 blocks)
+        *      %rdx: src (16 blocks)
+        *      %rcx: iv
+        */
+       FRAME_BEGIN
+
+       vzeroupper;
+
+       vmovdqu (0 * 32)(%rdx), RA0;
+       vmovdqu (1 * 32)(%rdx), RA1;
+       vmovdqu (2 * 32)(%rdx), RA2;
+       vmovdqu (3 * 32)(%rdx), RA3;
+       vmovdqu (4 * 32)(%rdx), RB0;
+       vmovdqu (5 * 32)(%rdx), RB1;
+       vmovdqu (6 * 32)(%rdx), RB2;
+       vmovdqu (7 * 32)(%rdx), RB3;
+
+       call __sm4_crypt_blk16;
+
+       vmovdqu (%rcx), RNOTx;
+       vinserti128 $1, (%rdx), RNOT, RNOT;
+       vpxor RNOT, RA0, RA0;
+       vpxor (0 * 32 + 16)(%rdx), RA1, RA1;
+       vpxor (1 * 32 + 16)(%rdx), RA2, RA2;
+       vpxor (2 * 32 + 16)(%rdx), RA3, RA3;
+       vpxor (3 * 32 + 16)(%rdx), RB0, RB0;
+       vpxor (4 * 32 + 16)(%rdx), RB1, RB1;
+       vpxor (5 * 32 + 16)(%rdx), RB2, RB2;
+       vpxor (6 * 32 + 16)(%rdx), RB3, RB3;
+       vmovdqu (7 * 32 + 16)(%rdx), RNOTx;
+       vmovdqu RNOTx, (%rcx); /* store new IV */
+
+       vmovdqu RA0, (0 * 32)(%rsi);
+       vmovdqu RA1, (1 * 32)(%rsi);
+       vmovdqu RA2, (2 * 32)(%rsi);
+       vmovdqu RA3, (3 * 32)(%rsi);
+       vmovdqu RB0, (4 * 32)(%rsi);
+       vmovdqu RB1, (5 * 32)(%rsi);
+       vmovdqu RB2, (6 * 32)(%rsi);
+       vmovdqu RB3, (7 * 32)(%rsi);
+
+       vzeroall;
+       FRAME_END
+       ret;
+SYM_FUNC_END(sm4_aesni_avx2_cbc_dec_blk16)
+
+/*
+ * void sm4_aesni_avx2_cfb_dec_blk16(const u32 *rk, u8 *dst,
+ *                                   const u8 *src, u8 *iv)
+ */
+.align 8
+SYM_FUNC_START(sm4_aesni_avx2_cfb_dec_blk16)
+       /* input:
+        *      %rdi: round key array, CTX
+        *      %rsi: dst (16 blocks)
+        *      %rdx: src (16 blocks)
+        *      %rcx: iv
+        */
+       FRAME_BEGIN
+
+       vzeroupper;
+
+       /* Load input */
+       vmovdqu (%rcx), RNOTx;
+       vinserti128 $1, (%rdx), RNOT, RA0;
+       vmovdqu (0 * 32 + 16)(%rdx), RA1;
+       vmovdqu (1 * 32 + 16)(%rdx), RA2;
+       vmovdqu (2 * 32 + 16)(%rdx), RA3;
+       vmovdqu (3 * 32 + 16)(%rdx), RB0;
+       vmovdqu (4 * 32 + 16)(%rdx), RB1;
+       vmovdqu (5 * 32 + 16)(%rdx), RB2;
+       vmovdqu (6 * 32 + 16)(%rdx), RB3;
+
+       /* Update IV */
+       vmovdqu (7 * 32 + 16)(%rdx), RNOTx;
+       vmovdqu RNOTx, (%rcx);
+
+       call __sm4_crypt_blk16;
+
+       vpxor (0 * 32)(%rdx), RA0, RA0;
+       vpxor (1 * 32)(%rdx), RA1, RA1;
+       vpxor (2 * 32)(%rdx), RA2, RA2;
+       vpxor (3 * 32)(%rdx), RA3, RA3;
+       vpxor (4 * 32)(%rdx), RB0, RB0;
+       vpxor (5 * 32)(%rdx), RB1, RB1;
+       vpxor (6 * 32)(%rdx), RB2, RB2;
+       vpxor (7 * 32)(%rdx), RB3, RB3;
+
+       vmovdqu RA0, (0 * 32)(%rsi);
+       vmovdqu RA1, (1 * 32)(%rsi);
+       vmovdqu RA2, (2 * 32)(%rsi);
+       vmovdqu RA3, (3 * 32)(%rsi);
+       vmovdqu RB0, (4 * 32)(%rsi);
+       vmovdqu RB1, (5 * 32)(%rsi);
+       vmovdqu RB2, (6 * 32)(%rsi);
+       vmovdqu RB3, (7 * 32)(%rsi);
+
+       vzeroall;
+       FRAME_END
+       ret;
+SYM_FUNC_END(sm4_aesni_avx2_cfb_dec_blk16)
diff --git a/arch/x86/crypto/sm4-avx.h b/arch/x86/crypto/sm4-avx.h
new file mode 100644 (file)
index 0000000..1bceab7
--- /dev/null
@@ -0,0 +1,24 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+#ifndef ASM_X86_SM4_AVX_H
+#define ASM_X86_SM4_AVX_H
+
+#include <linux/types.h>
+#include <crypto/sm4.h>
+
+typedef void (*sm4_crypt_func)(const u32 *rk, u8 *dst, const u8 *src, u8 *iv);
+
+int sm4_avx_ecb_encrypt(struct skcipher_request *req);
+int sm4_avx_ecb_decrypt(struct skcipher_request *req);
+
+int sm4_cbc_encrypt(struct skcipher_request *req);
+int sm4_avx_cbc_decrypt(struct skcipher_request *req,
+                       unsigned int bsize, sm4_crypt_func func);
+
+int sm4_cfb_encrypt(struct skcipher_request *req);
+int sm4_avx_cfb_decrypt(struct skcipher_request *req,
+                       unsigned int bsize, sm4_crypt_func func);
+
+int sm4_avx_ctr_crypt(struct skcipher_request *req,
+                       unsigned int bsize, sm4_crypt_func func);
+
+#endif
diff --git a/arch/x86/crypto/sm4_aesni_avx2_glue.c b/arch/x86/crypto/sm4_aesni_avx2_glue.c
new file mode 100644 (file)
index 0000000..84bc718
--- /dev/null
@@ -0,0 +1,169 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * SM4 Cipher Algorithm, AES-NI/AVX2 optimized.
+ * as specified in
+ * https://tools.ietf.org/id/draft-ribose-cfrg-sm4-10.html
+ *
+ * Copyright (c) 2021, Alibaba Group.
+ * Copyright (c) 2021 Tianjia Zhang <tianjia.zhang@linux.alibaba.com>
+ */
+
+#include <linux/module.h>
+#include <linux/crypto.h>
+#include <linux/kernel.h>
+#include <asm/simd.h>
+#include <crypto/internal/simd.h>
+#include <crypto/internal/skcipher.h>
+#include <crypto/sm4.h>
+#include "sm4-avx.h"
+
+#define SM4_CRYPT16_BLOCK_SIZE (SM4_BLOCK_SIZE * 16)
+
+asmlinkage void sm4_aesni_avx2_ctr_enc_blk16(const u32 *rk, u8 *dst,
+                                       const u8 *src, u8 *iv);
+asmlinkage void sm4_aesni_avx2_cbc_dec_blk16(const u32 *rk, u8 *dst,
+                                       const u8 *src, u8 *iv);
+asmlinkage void sm4_aesni_avx2_cfb_dec_blk16(const u32 *rk, u8 *dst,
+                                       const u8 *src, u8 *iv);
+
+static int sm4_skcipher_setkey(struct crypto_skcipher *tfm, const u8 *key,
+                       unsigned int key_len)
+{
+       struct sm4_ctx *ctx = crypto_skcipher_ctx(tfm);
+
+       return sm4_expandkey(ctx, key, key_len);
+}
+
+static int cbc_decrypt(struct skcipher_request *req)
+{
+       return sm4_avx_cbc_decrypt(req, SM4_CRYPT16_BLOCK_SIZE,
+                               sm4_aesni_avx2_cbc_dec_blk16);
+}
+
+
+static int cfb_decrypt(struct skcipher_request *req)
+{
+       return sm4_avx_cfb_decrypt(req, SM4_CRYPT16_BLOCK_SIZE,
+                               sm4_aesni_avx2_cfb_dec_blk16);
+}
+
+static int ctr_crypt(struct skcipher_request *req)
+{
+       return sm4_avx_ctr_crypt(req, SM4_CRYPT16_BLOCK_SIZE,
+                               sm4_aesni_avx2_ctr_enc_blk16);
+}
+
+static struct skcipher_alg sm4_aesni_avx2_skciphers[] = {
+       {
+               .base = {
+                       .cra_name               = "__ecb(sm4)",
+                       .cra_driver_name        = "__ecb-sm4-aesni-avx2",
+                       .cra_priority           = 500,
+                       .cra_flags              = CRYPTO_ALG_INTERNAL,
+                       .cra_blocksize          = SM4_BLOCK_SIZE,
+                       .cra_ctxsize            = sizeof(struct sm4_ctx),
+                       .cra_module             = THIS_MODULE,
+               },
+               .min_keysize    = SM4_KEY_SIZE,
+               .max_keysize    = SM4_KEY_SIZE,
+               .walksize       = 16 * SM4_BLOCK_SIZE,
+               .setkey         = sm4_skcipher_setkey,
+               .encrypt        = sm4_avx_ecb_encrypt,
+               .decrypt        = sm4_avx_ecb_decrypt,
+       }, {
+               .base = {
+                       .cra_name               = "__cbc(sm4)",
+                       .cra_driver_name        = "__cbc-sm4-aesni-avx2",
+                       .cra_priority           = 500,
+                       .cra_flags              = CRYPTO_ALG_INTERNAL,
+                       .cra_blocksize          = SM4_BLOCK_SIZE,
+                       .cra_ctxsize            = sizeof(struct sm4_ctx),
+                       .cra_module             = THIS_MODULE,
+               },
+               .min_keysize    = SM4_KEY_SIZE,
+               .max_keysize    = SM4_KEY_SIZE,
+               .ivsize         = SM4_BLOCK_SIZE,
+               .walksize       = 16 * SM4_BLOCK_SIZE,
+               .setkey         = sm4_skcipher_setkey,
+               .encrypt        = sm4_cbc_encrypt,
+               .decrypt        = cbc_decrypt,
+       }, {
+               .base = {
+                       .cra_name               = "__cfb(sm4)",
+                       .cra_driver_name        = "__cfb-sm4-aesni-avx2",
+                       .cra_priority           = 500,
+                       .cra_flags              = CRYPTO_ALG_INTERNAL,
+                       .cra_blocksize          = 1,
+                       .cra_ctxsize            = sizeof(struct sm4_ctx),
+                       .cra_module             = THIS_MODULE,
+               },
+               .min_keysize    = SM4_KEY_SIZE,
+               .max_keysize    = SM4_KEY_SIZE,
+               .ivsize         = SM4_BLOCK_SIZE,
+               .chunksize      = SM4_BLOCK_SIZE,
+               .walksize       = 16 * SM4_BLOCK_SIZE,
+               .setkey         = sm4_skcipher_setkey,
+               .encrypt        = sm4_cfb_encrypt,
+               .decrypt        = cfb_decrypt,
+       }, {
+               .base = {
+                       .cra_name               = "__ctr(sm4)",
+                       .cra_driver_name        = "__ctr-sm4-aesni-avx2",
+                       .cra_priority           = 500,
+                       .cra_flags              = CRYPTO_ALG_INTERNAL,
+                       .cra_blocksize          = 1,
+                       .cra_ctxsize            = sizeof(struct sm4_ctx),
+                       .cra_module             = THIS_MODULE,
+               },
+               .min_keysize    = SM4_KEY_SIZE,
+               .max_keysize    = SM4_KEY_SIZE,
+               .ivsize         = SM4_BLOCK_SIZE,
+               .chunksize      = SM4_BLOCK_SIZE,
+               .walksize       = 16 * SM4_BLOCK_SIZE,
+               .setkey         = sm4_skcipher_setkey,
+               .encrypt        = ctr_crypt,
+               .decrypt        = ctr_crypt,
+       }
+};
+
+static struct simd_skcipher_alg *
+simd_sm4_aesni_avx2_skciphers[ARRAY_SIZE(sm4_aesni_avx2_skciphers)];
+
+static int __init sm4_init(void)
+{
+       const char *feature_name;
+
+       if (!boot_cpu_has(X86_FEATURE_AVX) ||
+           !boot_cpu_has(X86_FEATURE_AVX2) ||
+           !boot_cpu_has(X86_FEATURE_AES) ||
+           !boot_cpu_has(X86_FEATURE_OSXSAVE)) {
+               pr_info("AVX2 or AES-NI instructions are not detected.\n");
+               return -ENODEV;
+       }
+
+       if (!cpu_has_xfeatures(XFEATURE_MASK_SSE | XFEATURE_MASK_YMM,
+                               &feature_name)) {
+               pr_info("CPU feature '%s' is not supported.\n", feature_name);
+               return -ENODEV;
+       }
+
+       return simd_register_skciphers_compat(sm4_aesni_avx2_skciphers,
+                                       ARRAY_SIZE(sm4_aesni_avx2_skciphers),
+                                       simd_sm4_aesni_avx2_skciphers);
+}
+
+static void __exit sm4_exit(void)
+{
+       simd_unregister_skciphers(sm4_aesni_avx2_skciphers,
+                               ARRAY_SIZE(sm4_aesni_avx2_skciphers),
+                               simd_sm4_aesni_avx2_skciphers);
+}
+
+module_init(sm4_init);
+module_exit(sm4_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Tianjia Zhang <tianjia.zhang@linux.alibaba.com>");
+MODULE_DESCRIPTION("SM4 Cipher Algorithm, AES-NI/AVX2 optimized");
+MODULE_ALIAS_CRYPTO("sm4");
+MODULE_ALIAS_CRYPTO("sm4-aesni-avx2");
diff --git a/arch/x86/crypto/sm4_aesni_avx_glue.c b/arch/x86/crypto/sm4_aesni_avx_glue.c
new file mode 100644 (file)
index 0000000..7800f77
--- /dev/null
@@ -0,0 +1,487 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * SM4 Cipher Algorithm, AES-NI/AVX optimized.
+ * as specified in
+ * https://tools.ietf.org/id/draft-ribose-cfrg-sm4-10.html
+ *
+ * Copyright (c) 2021, Alibaba Group.
+ * Copyright (c) 2021 Tianjia Zhang <tianjia.zhang@linux.alibaba.com>
+ */
+
+#include <linux/module.h>
+#include <linux/crypto.h>
+#include <linux/kernel.h>
+#include <asm/simd.h>
+#include <crypto/internal/simd.h>
+#include <crypto/internal/skcipher.h>
+#include <crypto/sm4.h>
+#include "sm4-avx.h"
+
+#define SM4_CRYPT8_BLOCK_SIZE  (SM4_BLOCK_SIZE * 8)
+
+asmlinkage void sm4_aesni_avx_crypt4(const u32 *rk, u8 *dst,
+                               const u8 *src, int nblocks);
+asmlinkage void sm4_aesni_avx_crypt8(const u32 *rk, u8 *dst,
+                               const u8 *src, int nblocks);
+asmlinkage void sm4_aesni_avx_ctr_enc_blk8(const u32 *rk, u8 *dst,
+                               const u8 *src, u8 *iv);
+asmlinkage void sm4_aesni_avx_cbc_dec_blk8(const u32 *rk, u8 *dst,
+                               const u8 *src, u8 *iv);
+asmlinkage void sm4_aesni_avx_cfb_dec_blk8(const u32 *rk, u8 *dst,
+                               const u8 *src, u8 *iv);
+
+static int sm4_skcipher_setkey(struct crypto_skcipher *tfm, const u8 *key,
+                       unsigned int key_len)
+{
+       struct sm4_ctx *ctx = crypto_skcipher_ctx(tfm);
+
+       return sm4_expandkey(ctx, key, key_len);
+}
+
+static int ecb_do_crypt(struct skcipher_request *req, const u32 *rkey)
+{
+       struct skcipher_walk walk;
+       unsigned int nbytes;
+       int err;
+
+       err = skcipher_walk_virt(&walk, req, false);
+
+       while ((nbytes = walk.nbytes) > 0) {
+               const u8 *src = walk.src.virt.addr;
+               u8 *dst = walk.dst.virt.addr;
+
+               kernel_fpu_begin();
+               while (nbytes >= SM4_CRYPT8_BLOCK_SIZE) {
+                       sm4_aesni_avx_crypt8(rkey, dst, src, 8);
+                       dst += SM4_CRYPT8_BLOCK_SIZE;
+                       src += SM4_CRYPT8_BLOCK_SIZE;
+                       nbytes -= SM4_CRYPT8_BLOCK_SIZE;
+               }
+               while (nbytes >= SM4_BLOCK_SIZE) {
+                       unsigned int nblocks = min(nbytes >> 4, 4u);
+                       sm4_aesni_avx_crypt4(rkey, dst, src, nblocks);
+                       dst += nblocks * SM4_BLOCK_SIZE;
+                       src += nblocks * SM4_BLOCK_SIZE;
+                       nbytes -= nblocks * SM4_BLOCK_SIZE;
+               }
+               kernel_fpu_end();
+
+               err = skcipher_walk_done(&walk, nbytes);
+       }
+
+       return err;
+}
+
+int sm4_avx_ecb_encrypt(struct skcipher_request *req)
+{
+       struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+       struct sm4_ctx *ctx = crypto_skcipher_ctx(tfm);
+
+       return ecb_do_crypt(req, ctx->rkey_enc);
+}
+EXPORT_SYMBOL_GPL(sm4_avx_ecb_encrypt);
+
+int sm4_avx_ecb_decrypt(struct skcipher_request *req)
+{
+       struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+       struct sm4_ctx *ctx = crypto_skcipher_ctx(tfm);
+
+       return ecb_do_crypt(req, ctx->rkey_dec);
+}
+EXPORT_SYMBOL_GPL(sm4_avx_ecb_decrypt);
+
+int sm4_cbc_encrypt(struct skcipher_request *req)
+{
+       struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+       struct sm4_ctx *ctx = crypto_skcipher_ctx(tfm);
+       struct skcipher_walk walk;
+       unsigned int nbytes;
+       int err;
+
+       err = skcipher_walk_virt(&walk, req, false);
+
+       while ((nbytes = walk.nbytes) > 0) {
+               const u8 *iv = walk.iv;
+               const u8 *src = walk.src.virt.addr;
+               u8 *dst = walk.dst.virt.addr;
+
+               while (nbytes >= SM4_BLOCK_SIZE) {
+                       crypto_xor_cpy(dst, src, iv, SM4_BLOCK_SIZE);
+                       sm4_crypt_block(ctx->rkey_enc, dst, dst);
+                       iv = dst;
+                       src += SM4_BLOCK_SIZE;
+                       dst += SM4_BLOCK_SIZE;
+                       nbytes -= SM4_BLOCK_SIZE;
+               }
+               if (iv != walk.iv)
+                       memcpy(walk.iv, iv, SM4_BLOCK_SIZE);
+
+               err = skcipher_walk_done(&walk, nbytes);
+       }
+
+       return err;
+}
+EXPORT_SYMBOL_GPL(sm4_cbc_encrypt);
+
+int sm4_avx_cbc_decrypt(struct skcipher_request *req,
+                       unsigned int bsize, sm4_crypt_func func)
+{
+       struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+       struct sm4_ctx *ctx = crypto_skcipher_ctx(tfm);
+       struct skcipher_walk walk;
+       unsigned int nbytes;
+       int err;
+
+       err = skcipher_walk_virt(&walk, req, false);
+
+       while ((nbytes = walk.nbytes) > 0) {
+               const u8 *src = walk.src.virt.addr;
+               u8 *dst = walk.dst.virt.addr;
+
+               kernel_fpu_begin();
+
+               while (nbytes >= bsize) {
+                       func(ctx->rkey_dec, dst, src, walk.iv);
+                       dst += bsize;
+                       src += bsize;
+                       nbytes -= bsize;
+               }
+
+               while (nbytes >= SM4_BLOCK_SIZE) {
+                       u8 keystream[SM4_BLOCK_SIZE * 8];
+                       u8 iv[SM4_BLOCK_SIZE];
+                       unsigned int nblocks = min(nbytes >> 4, 8u);
+                       int i;
+
+                       sm4_aesni_avx_crypt8(ctx->rkey_dec, keystream,
+                                               src, nblocks);
+
+                       src += ((int)nblocks - 2) * SM4_BLOCK_SIZE;
+                       dst += (nblocks - 1) * SM4_BLOCK_SIZE;
+                       memcpy(iv, src + SM4_BLOCK_SIZE, SM4_BLOCK_SIZE);
+
+                       for (i = nblocks - 1; i > 0; i--) {
+                               crypto_xor_cpy(dst, src,
+                                       &keystream[i * SM4_BLOCK_SIZE],
+                                       SM4_BLOCK_SIZE);
+                               src -= SM4_BLOCK_SIZE;
+                               dst -= SM4_BLOCK_SIZE;
+                       }
+                       crypto_xor_cpy(dst, walk.iv, keystream, SM4_BLOCK_SIZE);
+                       memcpy(walk.iv, iv, SM4_BLOCK_SIZE);
+                       dst += nblocks * SM4_BLOCK_SIZE;
+                       src += (nblocks + 1) * SM4_BLOCK_SIZE;
+                       nbytes -= nblocks * SM4_BLOCK_SIZE;
+               }
+
+               kernel_fpu_end();
+               err = skcipher_walk_done(&walk, nbytes);
+       }
+
+       return err;
+}
+EXPORT_SYMBOL_GPL(sm4_avx_cbc_decrypt);
+
+static int cbc_decrypt(struct skcipher_request *req)
+{
+       return sm4_avx_cbc_decrypt(req, SM4_CRYPT8_BLOCK_SIZE,
+                               sm4_aesni_avx_cbc_dec_blk8);
+}
+
+int sm4_cfb_encrypt(struct skcipher_request *req)
+{
+       struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+       struct sm4_ctx *ctx = crypto_skcipher_ctx(tfm);
+       struct skcipher_walk walk;
+       unsigned int nbytes;
+       int err;
+
+       err = skcipher_walk_virt(&walk, req, false);
+
+       while ((nbytes = walk.nbytes) > 0) {
+               u8 keystream[SM4_BLOCK_SIZE];
+               const u8 *iv = walk.iv;
+               const u8 *src = walk.src.virt.addr;
+               u8 *dst = walk.dst.virt.addr;
+
+               while (nbytes >= SM4_BLOCK_SIZE) {
+                       sm4_crypt_block(ctx->rkey_enc, keystream, iv);
+                       crypto_xor_cpy(dst, src, keystream, SM4_BLOCK_SIZE);
+                       iv = dst;
+                       src += SM4_BLOCK_SIZE;
+                       dst += SM4_BLOCK_SIZE;
+                       nbytes -= SM4_BLOCK_SIZE;
+               }
+               if (iv != walk.iv)
+                       memcpy(walk.iv, iv, SM4_BLOCK_SIZE);
+
+               /* tail */
+               if (walk.nbytes == walk.total && nbytes > 0) {
+                       sm4_crypt_block(ctx->rkey_enc, keystream, walk.iv);
+                       crypto_xor_cpy(dst, src, keystream, nbytes);
+                       nbytes = 0;
+               }
+
+               err = skcipher_walk_done(&walk, nbytes);
+       }
+
+       return err;
+}
+EXPORT_SYMBOL_GPL(sm4_cfb_encrypt);
+
+int sm4_avx_cfb_decrypt(struct skcipher_request *req,
+                       unsigned int bsize, sm4_crypt_func func)
+{
+       struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+       struct sm4_ctx *ctx = crypto_skcipher_ctx(tfm);
+       struct skcipher_walk walk;
+       unsigned int nbytes;
+       int err;
+
+       err = skcipher_walk_virt(&walk, req, false);
+
+       while ((nbytes = walk.nbytes) > 0) {
+               const u8 *src = walk.src.virt.addr;
+               u8 *dst = walk.dst.virt.addr;
+
+               kernel_fpu_begin();
+
+               while (nbytes >= bsize) {
+                       func(ctx->rkey_enc, dst, src, walk.iv);
+                       dst += bsize;
+                       src += bsize;
+                       nbytes -= bsize;
+               }
+
+               while (nbytes >= SM4_BLOCK_SIZE) {
+                       u8 keystream[SM4_BLOCK_SIZE * 8];
+                       unsigned int nblocks = min(nbytes >> 4, 8u);
+
+                       memcpy(keystream, walk.iv, SM4_BLOCK_SIZE);
+                       if (nblocks > 1)
+                               memcpy(&keystream[SM4_BLOCK_SIZE], src,
+                                       (nblocks - 1) * SM4_BLOCK_SIZE);
+                       memcpy(walk.iv, src + (nblocks - 1) * SM4_BLOCK_SIZE,
+                               SM4_BLOCK_SIZE);
+
+                       sm4_aesni_avx_crypt8(ctx->rkey_enc, keystream,
+                                               keystream, nblocks);
+
+                       crypto_xor_cpy(dst, src, keystream,
+                                       nblocks * SM4_BLOCK_SIZE);
+                       dst += nblocks * SM4_BLOCK_SIZE;
+                       src += nblocks * SM4_BLOCK_SIZE;
+                       nbytes -= nblocks * SM4_BLOCK_SIZE;
+               }
+
+               kernel_fpu_end();
+
+               /* tail */
+               if (walk.nbytes == walk.total && nbytes > 0) {
+                       u8 keystream[SM4_BLOCK_SIZE];
+
+                       sm4_crypt_block(ctx->rkey_enc, keystream, walk.iv);
+                       crypto_xor_cpy(dst, src, keystream, nbytes);
+                       nbytes = 0;
+               }
+
+               err = skcipher_walk_done(&walk, nbytes);
+       }
+
+       return err;
+}
+EXPORT_SYMBOL_GPL(sm4_avx_cfb_decrypt);
+
+static int cfb_decrypt(struct skcipher_request *req)
+{
+       return sm4_avx_cfb_decrypt(req, SM4_CRYPT8_BLOCK_SIZE,
+                               sm4_aesni_avx_cfb_dec_blk8);
+}
+
+int sm4_avx_ctr_crypt(struct skcipher_request *req,
+                       unsigned int bsize, sm4_crypt_func func)
+{
+       struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+       struct sm4_ctx *ctx = crypto_skcipher_ctx(tfm);
+       struct skcipher_walk walk;
+       unsigned int nbytes;
+       int err;
+
+       err = skcipher_walk_virt(&walk, req, false);
+
+       while ((nbytes = walk.nbytes) > 0) {
+               const u8 *src = walk.src.virt.addr;
+               u8 *dst = walk.dst.virt.addr;
+
+               kernel_fpu_begin();
+
+               while (nbytes >= bsize) {
+                       func(ctx->rkey_enc, dst, src, walk.iv);
+                       dst += bsize;
+                       src += bsize;
+                       nbytes -= bsize;
+               }
+
+               while (nbytes >= SM4_BLOCK_SIZE) {
+                       u8 keystream[SM4_BLOCK_SIZE * 8];
+                       unsigned int nblocks = min(nbytes >> 4, 8u);
+                       int i;
+
+                       for (i = 0; i < nblocks; i++) {
+                               memcpy(&keystream[i * SM4_BLOCK_SIZE],
+                                       walk.iv, SM4_BLOCK_SIZE);
+                               crypto_inc(walk.iv, SM4_BLOCK_SIZE);
+                       }
+                       sm4_aesni_avx_crypt8(ctx->rkey_enc, keystream,
+                                       keystream, nblocks);
+
+                       crypto_xor_cpy(dst, src, keystream,
+                                       nblocks * SM4_BLOCK_SIZE);
+                       dst += nblocks * SM4_BLOCK_SIZE;
+                       src += nblocks * SM4_BLOCK_SIZE;
+                       nbytes -= nblocks * SM4_BLOCK_SIZE;
+               }
+
+               kernel_fpu_end();
+
+               /* tail */
+               if (walk.nbytes == walk.total && nbytes > 0) {
+                       u8 keystream[SM4_BLOCK_SIZE];
+
+                       memcpy(keystream, walk.iv, SM4_BLOCK_SIZE);
+                       crypto_inc(walk.iv, SM4_BLOCK_SIZE);
+
+                       sm4_crypt_block(ctx->rkey_enc, keystream, keystream);
+
+                       crypto_xor_cpy(dst, src, keystream, nbytes);
+                       dst += nbytes;
+                       src += nbytes;
+                       nbytes = 0;
+               }
+
+               err = skcipher_walk_done(&walk, nbytes);
+       }
+
+       return err;
+}
+EXPORT_SYMBOL_GPL(sm4_avx_ctr_crypt);
+
+static int ctr_crypt(struct skcipher_request *req)
+{
+       return sm4_avx_ctr_crypt(req, SM4_CRYPT8_BLOCK_SIZE,
+                               sm4_aesni_avx_ctr_enc_blk8);
+}
+
+static struct skcipher_alg sm4_aesni_avx_skciphers[] = {
+       {
+               .base = {
+                       .cra_name               = "__ecb(sm4)",
+                       .cra_driver_name        = "__ecb-sm4-aesni-avx",
+                       .cra_priority           = 400,
+                       .cra_flags              = CRYPTO_ALG_INTERNAL,
+                       .cra_blocksize          = SM4_BLOCK_SIZE,
+                       .cra_ctxsize            = sizeof(struct sm4_ctx),
+                       .cra_module             = THIS_MODULE,
+               },
+               .min_keysize    = SM4_KEY_SIZE,
+               .max_keysize    = SM4_KEY_SIZE,
+               .walksize       = 8 * SM4_BLOCK_SIZE,
+               .setkey         = sm4_skcipher_setkey,
+               .encrypt        = sm4_avx_ecb_encrypt,
+               .decrypt        = sm4_avx_ecb_decrypt,
+       }, {
+               .base = {
+                       .cra_name               = "__cbc(sm4)",
+                       .cra_driver_name        = "__cbc-sm4-aesni-avx",
+                       .cra_priority           = 400,
+                       .cra_flags              = CRYPTO_ALG_INTERNAL,
+                       .cra_blocksize          = SM4_BLOCK_SIZE,
+                       .cra_ctxsize            = sizeof(struct sm4_ctx),
+                       .cra_module             = THIS_MODULE,
+               },
+               .min_keysize    = SM4_KEY_SIZE,
+               .max_keysize    = SM4_KEY_SIZE,
+               .ivsize         = SM4_BLOCK_SIZE,
+               .walksize       = 8 * SM4_BLOCK_SIZE,
+               .setkey         = sm4_skcipher_setkey,
+               .encrypt        = sm4_cbc_encrypt,
+               .decrypt        = cbc_decrypt,
+       }, {
+               .base = {
+                       .cra_name               = "__cfb(sm4)",
+                       .cra_driver_name        = "__cfb-sm4-aesni-avx",
+                       .cra_priority           = 400,
+                       .cra_flags              = CRYPTO_ALG_INTERNAL,
+                       .cra_blocksize          = 1,
+                       .cra_ctxsize            = sizeof(struct sm4_ctx),
+                       .cra_module             = THIS_MODULE,
+               },
+               .min_keysize    = SM4_KEY_SIZE,
+               .max_keysize    = SM4_KEY_SIZE,
+               .ivsize         = SM4_BLOCK_SIZE,
+               .chunksize      = SM4_BLOCK_SIZE,
+               .walksize       = 8 * SM4_BLOCK_SIZE,
+               .setkey         = sm4_skcipher_setkey,
+               .encrypt        = sm4_cfb_encrypt,
+               .decrypt        = cfb_decrypt,
+       }, {
+               .base = {
+                       .cra_name               = "__ctr(sm4)",
+                       .cra_driver_name        = "__ctr-sm4-aesni-avx",
+                       .cra_priority           = 400,
+                       .cra_flags              = CRYPTO_ALG_INTERNAL,
+                       .cra_blocksize          = 1,
+                       .cra_ctxsize            = sizeof(struct sm4_ctx),
+                       .cra_module             = THIS_MODULE,
+               },
+               .min_keysize    = SM4_KEY_SIZE,
+               .max_keysize    = SM4_KEY_SIZE,
+               .ivsize         = SM4_BLOCK_SIZE,
+               .chunksize      = SM4_BLOCK_SIZE,
+               .walksize       = 8 * SM4_BLOCK_SIZE,
+               .setkey         = sm4_skcipher_setkey,
+               .encrypt        = ctr_crypt,
+               .decrypt        = ctr_crypt,
+       }
+};
+
+static struct simd_skcipher_alg *
+simd_sm4_aesni_avx_skciphers[ARRAY_SIZE(sm4_aesni_avx_skciphers)];
+
+static int __init sm4_init(void)
+{
+       const char *feature_name;
+
+       if (!boot_cpu_has(X86_FEATURE_AVX) ||
+           !boot_cpu_has(X86_FEATURE_AES) ||
+           !boot_cpu_has(X86_FEATURE_OSXSAVE)) {
+               pr_info("AVX or AES-NI instructions are not detected.\n");
+               return -ENODEV;
+       }
+
+       if (!cpu_has_xfeatures(XFEATURE_MASK_SSE | XFEATURE_MASK_YMM,
+                               &feature_name)) {
+               pr_info("CPU feature '%s' is not supported.\n", feature_name);
+               return -ENODEV;
+       }
+
+       return simd_register_skciphers_compat(sm4_aesni_avx_skciphers,
+                                       ARRAY_SIZE(sm4_aesni_avx_skciphers),
+                                       simd_sm4_aesni_avx_skciphers);
+}
+
+static void __exit sm4_exit(void)
+{
+       simd_unregister_skciphers(sm4_aesni_avx_skciphers,
+                                       ARRAY_SIZE(sm4_aesni_avx_skciphers),
+                                       simd_sm4_aesni_avx_skciphers);
+}
+
+module_init(sm4_init);
+module_exit(sm4_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Tianjia Zhang <tianjia.zhang@linux.alibaba.com>");
+MODULE_DESCRIPTION("SM4 Cipher Algorithm, AES-NI/AVX optimized");
+MODULE_ALIAS_CRYPTO("sm4");
+MODULE_ALIAS_CRYPTO("sm4-aesni-avx");
index 40669eac9d6db51f09227cd5e8283e79be42bfd0..ccc9ee1971e891d2608210f61cb55d58f71583d0 100644 (file)
@@ -90,6 +90,7 @@ struct perf_ibs {
        unsigned long                   offset_mask[1];
        int                             offset_max;
        unsigned int                    fetch_count_reset_broken : 1;
+       unsigned int                    fetch_ignore_if_zero_rip : 1;
        struct cpu_perf_ibs __percpu    *pcpu;
 
        struct attribute                **format_attrs;
@@ -570,6 +571,7 @@ static struct perf_ibs perf_ibs_op = {
                .start          = perf_ibs_start,
                .stop           = perf_ibs_stop,
                .read           = perf_ibs_read,
+               .capabilities   = PERF_PMU_CAP_NO_EXCLUDE,
        },
        .msr                    = MSR_AMD64_IBSOPCTL,
        .config_mask            = IBS_OP_CONFIG_MASK,
@@ -672,6 +674,10 @@ fail:
        if (check_rip && (ibs_data.regs[2] & IBS_RIP_INVALID)) {
                regs.flags &= ~PERF_EFLAGS_EXACT;
        } else {
+               /* Workaround for erratum #1197 */
+               if (perf_ibs->fetch_ignore_if_zero_rip && !(ibs_data.regs[1]))
+                       goto out;
+
                set_linear_ip(&regs, ibs_data.regs[1]);
                regs.flags |= PERF_EFLAGS_EXACT;
        }
@@ -769,6 +775,9 @@ static __init void perf_event_ibs_init(void)
        if (boot_cpu_data.x86 >= 0x16 && boot_cpu_data.x86 <= 0x18)
                perf_ibs_fetch.fetch_count_reset_broken = 1;
 
+       if (boot_cpu_data.x86 == 0x19 && boot_cpu_data.x86_model < 0x10)
+               perf_ibs_fetch.fetch_ignore_if_zero_rip = 1;
+
        perf_ibs_pmu_init(&perf_ibs_fetch, "ibs_fetch");
 
        if (ibs_caps & IBS_CAPS_OPCNT) {
index 16a2369c586e88ed382477c09d60a72c98ab4263..37d5b380516ec4ecd85a9828273b778d7de9d29b 100644 (file)
@@ -213,6 +213,7 @@ static struct pmu pmu_class = {
        .stop           = pmu_event_stop,
        .read           = pmu_event_read,
        .capabilities   = PERF_PMU_CAP_NO_EXCLUDE,
+       .module         = THIS_MODULE,
 };
 
 static int power_cpu_exit(unsigned int cpu)
index 915847655c0652820ef9083faae49d63e85007d1..b044577785bbb20bd0b93a27caa5f6bfeabbfa7b 100644 (file)
@@ -62,7 +62,7 @@ static struct pt_cap_desc {
        PT_CAP(single_range_output,     0, CPUID_ECX, BIT(2)),
        PT_CAP(output_subsys,           0, CPUID_ECX, BIT(3)),
        PT_CAP(payloads_lip,            0, CPUID_ECX, BIT(31)),
-       PT_CAP(num_address_ranges,      1, CPUID_EAX, 0x3),
+       PT_CAP(num_address_ranges,      1, CPUID_EAX, 0x7),
        PT_CAP(mtc_periods,             1, CPUID_EAX, 0xffff0000),
        PT_CAP(cycle_thresholds,        1, CPUID_EBX, 0xffff),
        PT_CAP(psb_periods,             1, CPUID_EBX, 0xffff0000),
index 609c24aec71a77fef399b88bd35210caf6cedbb3..c682b09b18fa00d75adf7989e4129215e293deac 100644 (file)
@@ -4811,7 +4811,7 @@ static void __snr_uncore_mmio_init_box(struct intel_uncore_box *box,
                return;
 
        pci_read_config_dword(pdev, SNR_IMC_MMIO_BASE_OFFSET, &pci_dword);
-       addr = (pci_dword & SNR_IMC_MMIO_BASE_MASK) << 23;
+       addr = ((resource_size_t)pci_dword & SNR_IMC_MMIO_BASE_MASK) << 23;
 
        pci_read_config_dword(pdev, mem_offset, &pci_dword);
        addr |= (pci_dword & SNR_IMC_MMIO_MEM0_MASK) << 12;
index 05b48b33baf0b811b2788750d4539f18c9bd2255..ff5c7134a37aa11a6b9cfc08994cd58af76dbfbc 100644 (file)
@@ -8,6 +8,8 @@
 #ifndef _ASM_X86_KFENCE_H
 #define _ASM_X86_KFENCE_H
 
+#ifndef MODULE
+
 #include <linux/bug.h>
 #include <linux/kfence.h>
 
@@ -66,4 +68,6 @@ static inline bool kfence_protect_page(unsigned long addr, bool protect)
        return true;
 }
 
+#endif /* !MODULE */
+
 #endif /* _ASM_X86_KFENCE_H */
index 974cbfb1eefe362dcb894b000236b9e007556886..af6ce8d4c86a8e07073fe9355375bcd654ba5528 100644 (file)
@@ -1038,6 +1038,13 @@ struct kvm_arch {
        struct list_head lpage_disallowed_mmu_pages;
        struct kvm_page_track_notifier_node mmu_sp_tracker;
        struct kvm_page_track_notifier_head track_notifier_head;
+       /*
+        * Protects marking pages unsync during page faults, as TDP MMU page
+        * faults only take mmu_lock for read.  For simplicity, the unsync
+        * pages lock is always taken when marking pages unsync regardless of
+        * whether mmu_lock is held for read or write.
+        */
+       spinlock_t mmu_unsync_pages_lock;
 
        struct list_head assigned_dev_head;
        struct iommu_domain *iommu_domain;
index 0607ec4f50914eac4d85ac8d2fd59ba8c8636f9a..da9321548f6f1e2a54878459f3d73d580c81edca 100644 (file)
@@ -265,6 +265,7 @@ enum mcp_flags {
        MCP_TIMESTAMP   = BIT(0),       /* log time stamp */
        MCP_UC          = BIT(1),       /* log uncorrected errors */
        MCP_DONTLOG     = BIT(2),       /* only clear, don't log */
+       MCP_QUEUE_LOG   = BIT(3),       /* only queue to genpool */
 };
 bool machine_check_poll(enum mcp_flags flags, mce_banks_t *b);
 
index e322676039f4ec9e7e96d3d74f7f273392fb92e4..b00dbc5fac2b2adf83ea52736b47736497ec0ca1 100644 (file)
@@ -184,6 +184,8 @@ struct __attribute__ ((__packed__)) vmcb_control_area {
 #define V_IGN_TPR_SHIFT 20
 #define V_IGN_TPR_MASK (1 << V_IGN_TPR_SHIFT)
 
+#define V_IRQ_INJECTION_BITS_MASK (V_IRQ_MASK | V_INTR_PRIO_MASK | V_IGN_TPR_MASK)
+
 #define V_INTR_MASKING_SHIFT 24
 #define V_INTR_MASKING_MASK (1 << V_INTR_MASKING_SHIFT)
 
index d5c691a3208b69d7aba5b2321074d6175dab2dd4..39224e035e47569ac7edbf619a5b4ce17a879ec5 100644 (file)
@@ -1986,7 +1986,8 @@ static struct irq_chip ioapic_chip __read_mostly = {
        .irq_set_affinity       = ioapic_set_affinity,
        .irq_retrigger          = irq_chip_retrigger_hierarchy,
        .irq_get_irqchip_state  = ioapic_irq_get_chip_state,
-       .flags                  = IRQCHIP_SKIP_SET_WAKE,
+       .flags                  = IRQCHIP_SKIP_SET_WAKE |
+                                 IRQCHIP_AFFINITY_PRE_STARTUP,
 };
 
 static struct irq_chip ioapic_ir_chip __read_mostly = {
@@ -1999,7 +2000,8 @@ static struct irq_chip ioapic_ir_chip __read_mostly = {
        .irq_set_affinity       = ioapic_set_affinity,
        .irq_retrigger          = irq_chip_retrigger_hierarchy,
        .irq_get_irqchip_state  = ioapic_irq_get_chip_state,
-       .flags                  = IRQCHIP_SKIP_SET_WAKE,
+       .flags                  = IRQCHIP_SKIP_SET_WAKE |
+                                 IRQCHIP_AFFINITY_PRE_STARTUP,
 };
 
 static inline void init_IO_APIC_traps(void)
index 44ebe25e770360a4450fa6adf48b5786955a587c..dbacb9ec8843a53fac36ad58850b8008ac40f466 100644 (file)
@@ -58,11 +58,13 @@ msi_set_affinity(struct irq_data *irqd, const struct cpumask *mask, bool force)
         *   The quirk bit is not set in this case.
         * - The new vector is the same as the old vector
         * - The old vector is MANAGED_IRQ_SHUTDOWN_VECTOR (interrupt starts up)
+        * - The interrupt is not yet started up
         * - The new destination CPU is the same as the old destination CPU
         */
        if (!irqd_msi_nomask_quirk(irqd) ||
            cfg->vector == old_cfg.vector ||
            old_cfg.vector == MANAGED_IRQ_SHUTDOWN_VECTOR ||
+           !irqd_is_started(irqd) ||
            cfg->dest_apicid == old_cfg.dest_apicid) {
                irq_msi_update_msg(irqd, cfg);
                return ret;
@@ -150,7 +152,8 @@ static struct irq_chip pci_msi_controller = {
        .irq_ack                = irq_chip_ack_parent,
        .irq_retrigger          = irq_chip_retrigger_hierarchy,
        .irq_set_affinity       = msi_set_affinity,
-       .flags                  = IRQCHIP_SKIP_SET_WAKE,
+       .flags                  = IRQCHIP_SKIP_SET_WAKE |
+                                 IRQCHIP_AFFINITY_PRE_STARTUP,
 };
 
 int pci_msi_prepare(struct irq_domain *domain, struct device *dev, int nvec,
@@ -219,7 +222,8 @@ static struct irq_chip pci_msi_ir_controller = {
        .irq_mask               = pci_msi_mask_irq,
        .irq_ack                = irq_chip_ack_parent,
        .irq_retrigger          = irq_chip_retrigger_hierarchy,
-       .flags                  = IRQCHIP_SKIP_SET_WAKE,
+       .flags                  = IRQCHIP_SKIP_SET_WAKE |
+                                 IRQCHIP_AFFINITY_PRE_STARTUP,
 };
 
 static struct msi_domain_info pci_msi_ir_domain_info = {
@@ -273,7 +277,8 @@ static struct irq_chip dmar_msi_controller = {
        .irq_retrigger          = irq_chip_retrigger_hierarchy,
        .irq_compose_msi_msg    = dmar_msi_compose_msg,
        .irq_write_msi_msg      = dmar_msi_write_msg,
-       .flags                  = IRQCHIP_SKIP_SET_WAKE,
+       .flags                  = IRQCHIP_SKIP_SET_WAKE |
+                                 IRQCHIP_AFFINITY_PRE_STARTUP,
 };
 
 static int dmar_msi_init(struct irq_domain *domain,
index 22791aadc085c31d34d2501194e14b34b8c88595..8cb7816d03b4cd27dd9a2143533e4d684e6a0823 100644 (file)
@@ -817,7 +817,10 @@ log_it:
                if (mca_cfg.dont_log_ce && !mce_usable_address(&m))
                        goto clear_it;
 
-               mce_log(&m);
+               if (flags & MCP_QUEUE_LOG)
+                       mce_gen_pool_add(&m);
+               else
+                       mce_log(&m);
 
 clear_it:
                /*
@@ -1639,10 +1642,12 @@ static void __mcheck_cpu_init_generic(void)
                m_fl = MCP_DONTLOG;
 
        /*
-        * Log the machine checks left over from the previous reset.
+        * Log the machine checks left over from the previous reset. Log them
+        * only, do not start processing them. That will happen in mcheck_late_init()
+        * when all consumers have been registered on the notifier chain.
         */
        bitmap_fill(all_banks, MAX_NR_BANKS);
-       machine_check_poll(MCP_UC | m_fl, &all_banks);
+       machine_check_poll(MCP_UC | MCP_QUEUE_LOG | m_fl, &all_banks);
 
        cr4_set_bits(X86_CR4_MCE);
 
index 23001ae03e82b924d66ed56e70194d533bbff5a0..4b8813bafffdcf87cde891df773b53ed2f702de7 100644 (file)
@@ -57,128 +57,57 @@ static void
 mba_wrmsr_amd(struct rdt_domain *d, struct msr_param *m,
              struct rdt_resource *r);
 
-#define domain_init(id) LIST_HEAD_INIT(rdt_resources_all[id].domains)
+#define domain_init(id) LIST_HEAD_INIT(rdt_resources_all[id].r_resctrl.domains)
 
-struct rdt_resource rdt_resources_all[] = {
+struct rdt_hw_resource rdt_resources_all[] = {
        [RDT_RESOURCE_L3] =
        {
-               .rid                    = RDT_RESOURCE_L3,
-               .name                   = "L3",
-               .domains                = domain_init(RDT_RESOURCE_L3),
-               .msr_base               = MSR_IA32_L3_CBM_BASE,
-               .msr_update             = cat_wrmsr,
-               .cache_level            = 3,
-               .cache = {
-                       .min_cbm_bits   = 1,
-                       .cbm_idx_mult   = 1,
-                       .cbm_idx_offset = 0,
-               },
-               .parse_ctrlval          = parse_cbm,
-               .format_str             = "%d=%0*x",
-               .fflags                 = RFTYPE_RES_CACHE,
-       },
-       [RDT_RESOURCE_L3DATA] =
-       {
-               .rid                    = RDT_RESOURCE_L3DATA,
-               .name                   = "L3DATA",
-               .domains                = domain_init(RDT_RESOURCE_L3DATA),
-               .msr_base               = MSR_IA32_L3_CBM_BASE,
-               .msr_update             = cat_wrmsr,
-               .cache_level            = 3,
-               .cache = {
-                       .min_cbm_bits   = 1,
-                       .cbm_idx_mult   = 2,
-                       .cbm_idx_offset = 0,
+               .r_resctrl = {
+                       .rid                    = RDT_RESOURCE_L3,
+                       .name                   = "L3",
+                       .cache_level            = 3,
+                       .cache = {
+                               .min_cbm_bits   = 1,
+                       },
+                       .domains                = domain_init(RDT_RESOURCE_L3),
+                       .parse_ctrlval          = parse_cbm,
+                       .format_str             = "%d=%0*x",
+                       .fflags                 = RFTYPE_RES_CACHE,
                },
-               .parse_ctrlval          = parse_cbm,
-               .format_str             = "%d=%0*x",
-               .fflags                 = RFTYPE_RES_CACHE,
-       },
-       [RDT_RESOURCE_L3CODE] =
-       {
-               .rid                    = RDT_RESOURCE_L3CODE,
-               .name                   = "L3CODE",
-               .domains                = domain_init(RDT_RESOURCE_L3CODE),
                .msr_base               = MSR_IA32_L3_CBM_BASE,
                .msr_update             = cat_wrmsr,
-               .cache_level            = 3,
-               .cache = {
-                       .min_cbm_bits   = 1,
-                       .cbm_idx_mult   = 2,
-                       .cbm_idx_offset = 1,
-               },
-               .parse_ctrlval          = parse_cbm,
-               .format_str             = "%d=%0*x",
-               .fflags                 = RFTYPE_RES_CACHE,
        },
        [RDT_RESOURCE_L2] =
        {
-               .rid                    = RDT_RESOURCE_L2,
-               .name                   = "L2",
-               .domains                = domain_init(RDT_RESOURCE_L2),
-               .msr_base               = MSR_IA32_L2_CBM_BASE,
-               .msr_update             = cat_wrmsr,
-               .cache_level            = 2,
-               .cache = {
-                       .min_cbm_bits   = 1,
-                       .cbm_idx_mult   = 1,
-                       .cbm_idx_offset = 0,
+               .r_resctrl = {
+                       .rid                    = RDT_RESOURCE_L2,
+                       .name                   = "L2",
+                       .cache_level            = 2,
+                       .cache = {
+                               .min_cbm_bits   = 1,
+                       },
+                       .domains                = domain_init(RDT_RESOURCE_L2),
+                       .parse_ctrlval          = parse_cbm,
+                       .format_str             = "%d=%0*x",
+                       .fflags                 = RFTYPE_RES_CACHE,
                },
-               .parse_ctrlval          = parse_cbm,
-               .format_str             = "%d=%0*x",
-               .fflags                 = RFTYPE_RES_CACHE,
-       },
-       [RDT_RESOURCE_L2DATA] =
-       {
-               .rid                    = RDT_RESOURCE_L2DATA,
-               .name                   = "L2DATA",
-               .domains                = domain_init(RDT_RESOURCE_L2DATA),
                .msr_base               = MSR_IA32_L2_CBM_BASE,
                .msr_update             = cat_wrmsr,
-               .cache_level            = 2,
-               .cache = {
-                       .min_cbm_bits   = 1,
-                       .cbm_idx_mult   = 2,
-                       .cbm_idx_offset = 0,
-               },
-               .parse_ctrlval          = parse_cbm,
-               .format_str             = "%d=%0*x",
-               .fflags                 = RFTYPE_RES_CACHE,
-       },
-       [RDT_RESOURCE_L2CODE] =
-       {
-               .rid                    = RDT_RESOURCE_L2CODE,
-               .name                   = "L2CODE",
-               .domains                = domain_init(RDT_RESOURCE_L2CODE),
-               .msr_base               = MSR_IA32_L2_CBM_BASE,
-               .msr_update             = cat_wrmsr,
-               .cache_level            = 2,
-               .cache = {
-                       .min_cbm_bits   = 1,
-                       .cbm_idx_mult   = 2,
-                       .cbm_idx_offset = 1,
-               },
-               .parse_ctrlval          = parse_cbm,
-               .format_str             = "%d=%0*x",
-               .fflags                 = RFTYPE_RES_CACHE,
        },
        [RDT_RESOURCE_MBA] =
        {
-               .rid                    = RDT_RESOURCE_MBA,
-               .name                   = "MB",
-               .domains                = domain_init(RDT_RESOURCE_MBA),
-               .cache_level            = 3,
-               .parse_ctrlval          = parse_bw,
-               .format_str             = "%d=%*u",
-               .fflags                 = RFTYPE_RES_MB,
+               .r_resctrl = {
+                       .rid                    = RDT_RESOURCE_MBA,
+                       .name                   = "MB",
+                       .cache_level            = 3,
+                       .domains                = domain_init(RDT_RESOURCE_MBA),
+                       .parse_ctrlval          = parse_bw,
+                       .format_str             = "%d=%*u",
+                       .fflags                 = RFTYPE_RES_MB,
+               },
        },
 };
 
-static unsigned int cbm_idx(struct rdt_resource *r, unsigned int closid)
-{
-       return closid * r->cache.cbm_idx_mult + r->cache.cbm_idx_offset;
-}
-
 /*
  * cache_alloc_hsw_probe() - Have to probe for Intel haswell server CPUs
  * as they do not have CPUID enumeration support for Cache allocation.
@@ -199,7 +128,8 @@ static unsigned int cbm_idx(struct rdt_resource *r, unsigned int closid)
  */
 static inline void cache_alloc_hsw_probe(void)
 {
-       struct rdt_resource *r  = &rdt_resources_all[RDT_RESOURCE_L3];
+       struct rdt_hw_resource *hw_res = &rdt_resources_all[RDT_RESOURCE_L3];
+       struct rdt_resource *r  = &hw_res->r_resctrl;
        u32 l, h, max_cbm = BIT_MASK(20) - 1;
 
        if (wrmsr_safe(MSR_IA32_L3_CBM_BASE, max_cbm, 0))
@@ -211,7 +141,7 @@ static inline void cache_alloc_hsw_probe(void)
        if (l != max_cbm)
                return;
 
-       r->num_closid = 4;
+       hw_res->num_closid = 4;
        r->default_ctrl = max_cbm;
        r->cache.cbm_len = 20;
        r->cache.shareable_bits = 0xc0000;
@@ -225,7 +155,7 @@ static inline void cache_alloc_hsw_probe(void)
 bool is_mba_sc(struct rdt_resource *r)
 {
        if (!r)
-               return rdt_resources_all[RDT_RESOURCE_MBA].membw.mba_sc;
+               return rdt_resources_all[RDT_RESOURCE_MBA].r_resctrl.membw.mba_sc;
 
        return r->membw.mba_sc;
 }
@@ -253,12 +183,13 @@ static inline bool rdt_get_mb_table(struct rdt_resource *r)
 
 static bool __get_mem_config_intel(struct rdt_resource *r)
 {
+       struct rdt_hw_resource *hw_res = resctrl_to_arch_res(r);
        union cpuid_0x10_3_eax eax;
        union cpuid_0x10_x_edx edx;
        u32 ebx, ecx, max_delay;
 
        cpuid_count(0x00000010, 3, &eax.full, &ebx, &ecx, &edx.full);
-       r->num_closid = edx.split.cos_max + 1;
+       hw_res->num_closid = edx.split.cos_max + 1;
        max_delay = eax.split.max_delay + 1;
        r->default_ctrl = MAX_MBA_BW;
        r->membw.arch_needs_linear = true;
@@ -287,12 +218,13 @@ static bool __get_mem_config_intel(struct rdt_resource *r)
 
 static bool __rdt_get_mem_config_amd(struct rdt_resource *r)
 {
+       struct rdt_hw_resource *hw_res = resctrl_to_arch_res(r);
        union cpuid_0x10_3_eax eax;
        union cpuid_0x10_x_edx edx;
        u32 ebx, ecx;
 
        cpuid_count(0x80000020, 1, &eax.full, &ebx, &ecx, &edx.full);
-       r->num_closid = edx.split.cos_max + 1;
+       hw_res->num_closid = edx.split.cos_max + 1;
        r->default_ctrl = MAX_MBA_BW_AMD;
 
        /* AMD does not use delay */
@@ -317,12 +249,13 @@ static bool __rdt_get_mem_config_amd(struct rdt_resource *r)
 
 static void rdt_get_cache_alloc_cfg(int idx, struct rdt_resource *r)
 {
+       struct rdt_hw_resource *hw_res = resctrl_to_arch_res(r);
        union cpuid_0x10_1_eax eax;
        union cpuid_0x10_x_edx edx;
        u32 ebx, ecx;
 
        cpuid_count(0x00000010, idx, &eax.full, &ebx, &ecx, &edx.full);
-       r->num_closid = edx.split.cos_max + 1;
+       hw_res->num_closid = edx.split.cos_max + 1;
        r->cache.cbm_len = eax.split.cbm_len + 1;
        r->default_ctrl = BIT_MASK(eax.split.cbm_len + 1) - 1;
        r->cache.shareable_bits = ebx & r->default_ctrl;
@@ -331,43 +264,35 @@ static void rdt_get_cache_alloc_cfg(int idx, struct rdt_resource *r)
        r->alloc_enabled = true;
 }
 
-static void rdt_get_cdp_config(int level, int type)
+static void rdt_get_cdp_config(int level)
 {
-       struct rdt_resource *r_l = &rdt_resources_all[level];
-       struct rdt_resource *r = &rdt_resources_all[type];
-
-       r->num_closid = r_l->num_closid / 2;
-       r->cache.cbm_len = r_l->cache.cbm_len;
-       r->default_ctrl = r_l->default_ctrl;
-       r->cache.shareable_bits = r_l->cache.shareable_bits;
-       r->data_width = (r->cache.cbm_len + 3) / 4;
-       r->alloc_capable = true;
        /*
         * By default, CDP is disabled. CDP can be enabled by mount parameter
         * "cdp" during resctrl file system mount time.
         */
-       r->alloc_enabled = false;
+       rdt_resources_all[level].cdp_enabled = false;
+       rdt_resources_all[level].r_resctrl.cdp_capable = true;
 }
 
 static void rdt_get_cdp_l3_config(void)
 {
-       rdt_get_cdp_config(RDT_RESOURCE_L3, RDT_RESOURCE_L3DATA);
-       rdt_get_cdp_config(RDT_RESOURCE_L3, RDT_RESOURCE_L3CODE);
+       rdt_get_cdp_config(RDT_RESOURCE_L3);
 }
 
 static void rdt_get_cdp_l2_config(void)
 {
-       rdt_get_cdp_config(RDT_RESOURCE_L2, RDT_RESOURCE_L2DATA);
-       rdt_get_cdp_config(RDT_RESOURCE_L2, RDT_RESOURCE_L2CODE);
+       rdt_get_cdp_config(RDT_RESOURCE_L2);
 }
 
 static void
 mba_wrmsr_amd(struct rdt_domain *d, struct msr_param *m, struct rdt_resource *r)
 {
        unsigned int i;
+       struct rdt_hw_domain *hw_dom = resctrl_to_arch_dom(d);
+       struct rdt_hw_resource *hw_res = resctrl_to_arch_res(r);
 
        for (i = m->low; i < m->high; i++)
-               wrmsrl(r->msr_base + i, d->ctrl_val[i]);
+               wrmsrl(hw_res->msr_base + i, hw_dom->ctrl_val[i]);
 }
 
 /*
@@ -389,19 +314,23 @@ mba_wrmsr_intel(struct rdt_domain *d, struct msr_param *m,
                struct rdt_resource *r)
 {
        unsigned int i;
+       struct rdt_hw_domain *hw_dom = resctrl_to_arch_dom(d);
+       struct rdt_hw_resource *hw_res = resctrl_to_arch_res(r);
 
        /*  Write the delay values for mba. */
        for (i = m->low; i < m->high; i++)
-               wrmsrl(r->msr_base + i, delay_bw_map(d->ctrl_val[i], r));
+               wrmsrl(hw_res->msr_base + i, delay_bw_map(hw_dom->ctrl_val[i], r));
 }
 
 static void
 cat_wrmsr(struct rdt_domain *d, struct msr_param *m, struct rdt_resource *r)
 {
        unsigned int i;
+       struct rdt_hw_domain *hw_dom = resctrl_to_arch_dom(d);
+       struct rdt_hw_resource *hw_res = resctrl_to_arch_res(r);
 
        for (i = m->low; i < m->high; i++)
-               wrmsrl(r->msr_base + cbm_idx(r, i), d->ctrl_val[i]);
+               wrmsrl(hw_res->msr_base + i, hw_dom->ctrl_val[i]);
 }
 
 struct rdt_domain *get_domain_from_cpu(int cpu, struct rdt_resource *r)
@@ -417,16 +346,22 @@ struct rdt_domain *get_domain_from_cpu(int cpu, struct rdt_resource *r)
        return NULL;
 }
 
+u32 resctrl_arch_get_num_closid(struct rdt_resource *r)
+{
+       return resctrl_to_arch_res(r)->num_closid;
+}
+
 void rdt_ctrl_update(void *arg)
 {
        struct msr_param *m = arg;
+       struct rdt_hw_resource *hw_res = resctrl_to_arch_res(m->res);
        struct rdt_resource *r = m->res;
        int cpu = smp_processor_id();
        struct rdt_domain *d;
 
        d = get_domain_from_cpu(cpu, r);
        if (d) {
-               r->msr_update(d, m, r);
+               hw_res->msr_update(d, m, r);
                return;
        }
        pr_warn_once("cpu %d not found in any domain for resource %s\n",
@@ -468,6 +403,7 @@ struct rdt_domain *rdt_find_domain(struct rdt_resource *r, int id,
 
 void setup_default_ctrlval(struct rdt_resource *r, u32 *dc, u32 *dm)
 {
+       struct rdt_hw_resource *hw_res = resctrl_to_arch_res(r);
        int i;
 
        /*
@@ -476,7 +412,7 @@ void setup_default_ctrlval(struct rdt_resource *r, u32 *dc, u32 *dm)
         * For Memory Allocation: Set b/w requested to 100%
         * and the bandwidth in MBps to U32_MAX
         */
-       for (i = 0; i < r->num_closid; i++, dc++, dm++) {
+       for (i = 0; i < hw_res->num_closid; i++, dc++, dm++) {
                *dc = r->default_ctrl;
                *dm = MBA_MAX_MBPS;
        }
@@ -484,26 +420,30 @@ void setup_default_ctrlval(struct rdt_resource *r, u32 *dc, u32 *dm)
 
 static int domain_setup_ctrlval(struct rdt_resource *r, struct rdt_domain *d)
 {
+       struct rdt_hw_resource *hw_res = resctrl_to_arch_res(r);
+       struct rdt_hw_domain *hw_dom = resctrl_to_arch_dom(d);
        struct msr_param m;
        u32 *dc, *dm;
 
-       dc = kmalloc_array(r->num_closid, sizeof(*d->ctrl_val), GFP_KERNEL);
+       dc = kmalloc_array(hw_res->num_closid, sizeof(*hw_dom->ctrl_val),
+                          GFP_KERNEL);
        if (!dc)
                return -ENOMEM;
 
-       dm = kmalloc_array(r->num_closid, sizeof(*d->mbps_val), GFP_KERNEL);
+       dm = kmalloc_array(hw_res->num_closid, sizeof(*hw_dom->mbps_val),
+                          GFP_KERNEL);
        if (!dm) {
                kfree(dc);
                return -ENOMEM;
        }
 
-       d->ctrl_val = dc;
-       d->mbps_val = dm;
+       hw_dom->ctrl_val = dc;
+       hw_dom->mbps_val = dm;
        setup_default_ctrlval(r, dc, dm);
 
        m.low = 0;
-       m.high = r->num_closid;
-       r->msr_update(d, &m, r);
+       m.high = hw_res->num_closid;
+       hw_res->msr_update(d, &m, r);
        return 0;
 }
 
@@ -560,6 +500,7 @@ static void domain_add_cpu(int cpu, struct rdt_resource *r)
 {
        int id = get_cpu_cacheinfo_id(cpu, r->cache_level);
        struct list_head *add_pos = NULL;
+       struct rdt_hw_domain *hw_dom;
        struct rdt_domain *d;
 
        d = rdt_find_domain(r, id, &add_pos);
@@ -575,10 +516,11 @@ static void domain_add_cpu(int cpu, struct rdt_resource *r)
                return;
        }
 
-       d = kzalloc_node(sizeof(*d), GFP_KERNEL, cpu_to_node(cpu));
-       if (!d)
+       hw_dom = kzalloc_node(sizeof(*hw_dom), GFP_KERNEL, cpu_to_node(cpu));
+       if (!hw_dom)
                return;
 
+       d = &hw_dom->d_resctrl;
        d->id = id;
        cpumask_set_cpu(cpu, &d->cpu_mask);
 
@@ -607,6 +549,7 @@ static void domain_add_cpu(int cpu, struct rdt_resource *r)
 static void domain_remove_cpu(int cpu, struct rdt_resource *r)
 {
        int id = get_cpu_cacheinfo_id(cpu, r->cache_level);
+       struct rdt_hw_domain *hw_dom;
        struct rdt_domain *d;
 
        d = rdt_find_domain(r, id, NULL);
@@ -614,6 +557,7 @@ static void domain_remove_cpu(int cpu, struct rdt_resource *r)
                pr_warn("Couldn't find cache id for CPU %d\n", cpu);
                return;
        }
+       hw_dom = resctrl_to_arch_dom(d);
 
        cpumask_clear_cpu(cpu, &d->cpu_mask);
        if (cpumask_empty(&d->cpu_mask)) {
@@ -646,16 +590,16 @@ static void domain_remove_cpu(int cpu, struct rdt_resource *r)
                if (d->plr)
                        d->plr->d = NULL;
 
-               kfree(d->ctrl_val);
-               kfree(d->mbps_val);
+               kfree(hw_dom->ctrl_val);
+               kfree(hw_dom->mbps_val);
                bitmap_free(d->rmid_busy_llc);
                kfree(d->mbm_total);
                kfree(d->mbm_local);
-               kfree(d);
+               kfree(hw_dom);
                return;
        }
 
-       if (r == &rdt_resources_all[RDT_RESOURCE_L3]) {
+       if (r == &rdt_resources_all[RDT_RESOURCE_L3].r_resctrl) {
                if (is_mbm_enabled() && cpu == d->mbm_work_cpu) {
                        cancel_delayed_work(&d->mbm_over);
                        mbm_setup_overflow_handler(d, 0);
@@ -732,13 +676,8 @@ static int resctrl_offline_cpu(unsigned int cpu)
 static __init void rdt_init_padding(void)
 {
        struct rdt_resource *r;
-       int cl;
 
        for_each_alloc_capable_rdt_resource(r) {
-               cl = strlen(r->name);
-               if (cl > max_name_width)
-                       max_name_width = cl;
-
                if (r->data_width > max_data_width)
                        max_data_width = r->data_width;
        }
@@ -827,19 +766,22 @@ static bool __init rdt_cpu_has(int flag)
 
 static __init bool get_mem_config(void)
 {
+       struct rdt_hw_resource *hw_res = &rdt_resources_all[RDT_RESOURCE_MBA];
+
        if (!rdt_cpu_has(X86_FEATURE_MBA))
                return false;
 
        if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL)
-               return __get_mem_config_intel(&rdt_resources_all[RDT_RESOURCE_MBA]);
+               return __get_mem_config_intel(&hw_res->r_resctrl);
        else if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD)
-               return __rdt_get_mem_config_amd(&rdt_resources_all[RDT_RESOURCE_MBA]);
+               return __rdt_get_mem_config_amd(&hw_res->r_resctrl);
 
        return false;
 }
 
 static __init bool get_rdt_alloc_resources(void)
 {
+       struct rdt_resource *r;
        bool ret = false;
 
        if (rdt_alloc_capable)
@@ -849,14 +791,16 @@ static __init bool get_rdt_alloc_resources(void)
                return false;
 
        if (rdt_cpu_has(X86_FEATURE_CAT_L3)) {
-               rdt_get_cache_alloc_cfg(1, &rdt_resources_all[RDT_RESOURCE_L3]);
+               r = &rdt_resources_all[RDT_RESOURCE_L3].r_resctrl;
+               rdt_get_cache_alloc_cfg(1, r);
                if (rdt_cpu_has(X86_FEATURE_CDP_L3))
                        rdt_get_cdp_l3_config();
                ret = true;
        }
        if (rdt_cpu_has(X86_FEATURE_CAT_L2)) {
                /* CPUID 0x10.2 fields are same format at 0x10.1 */
-               rdt_get_cache_alloc_cfg(2, &rdt_resources_all[RDT_RESOURCE_L2]);
+               r = &rdt_resources_all[RDT_RESOURCE_L2].r_resctrl;
+               rdt_get_cache_alloc_cfg(2, r);
                if (rdt_cpu_has(X86_FEATURE_CDP_L2))
                        rdt_get_cdp_l2_config();
                ret = true;
@@ -870,6 +814,8 @@ static __init bool get_rdt_alloc_resources(void)
 
 static __init bool get_rdt_mon_resources(void)
 {
+       struct rdt_resource *r = &rdt_resources_all[RDT_RESOURCE_L3].r_resctrl;
+
        if (rdt_cpu_has(X86_FEATURE_CQM_OCCUP_LLC))
                rdt_mon_features |= (1 << QOS_L3_OCCUP_EVENT_ID);
        if (rdt_cpu_has(X86_FEATURE_CQM_MBM_TOTAL))
@@ -880,7 +826,7 @@ static __init bool get_rdt_mon_resources(void)
        if (!rdt_mon_features)
                return false;
 
-       return !rdt_get_mon_l3_config(&rdt_resources_all[RDT_RESOURCE_L3]);
+       return !rdt_get_mon_l3_config(r);
 }
 
 static __init void __check_quirks_intel(void)
@@ -918,42 +864,40 @@ static __init bool get_rdt_resources(void)
 
 static __init void rdt_init_res_defs_intel(void)
 {
+       struct rdt_hw_resource *hw_res;
        struct rdt_resource *r;
 
        for_each_rdt_resource(r) {
+               hw_res = resctrl_to_arch_res(r);
+
                if (r->rid == RDT_RESOURCE_L3 ||
-                   r->rid == RDT_RESOURCE_L3DATA ||
-                   r->rid == RDT_RESOURCE_L3CODE ||
-                   r->rid == RDT_RESOURCE_L2 ||
-                   r->rid == RDT_RESOURCE_L2DATA ||
-                   r->rid == RDT_RESOURCE_L2CODE) {
+                   r->rid == RDT_RESOURCE_L2) {
                        r->cache.arch_has_sparse_bitmaps = false;
                        r->cache.arch_has_empty_bitmaps = false;
                        r->cache.arch_has_per_cpu_cfg = false;
                } else if (r->rid == RDT_RESOURCE_MBA) {
-                       r->msr_base = MSR_IA32_MBA_THRTL_BASE;
-                       r->msr_update = mba_wrmsr_intel;
+                       hw_res->msr_base = MSR_IA32_MBA_THRTL_BASE;
+                       hw_res->msr_update = mba_wrmsr_intel;
                }
        }
 }
 
 static __init void rdt_init_res_defs_amd(void)
 {
+       struct rdt_hw_resource *hw_res;
        struct rdt_resource *r;
 
        for_each_rdt_resource(r) {
+               hw_res = resctrl_to_arch_res(r);
+
                if (r->rid == RDT_RESOURCE_L3 ||
-                   r->rid == RDT_RESOURCE_L3DATA ||
-                   r->rid == RDT_RESOURCE_L3CODE ||
-                   r->rid == RDT_RESOURCE_L2 ||
-                   r->rid == RDT_RESOURCE_L2DATA ||
-                   r->rid == RDT_RESOURCE_L2CODE) {
+                   r->rid == RDT_RESOURCE_L2) {
                        r->cache.arch_has_sparse_bitmaps = true;
                        r->cache.arch_has_empty_bitmaps = true;
                        r->cache.arch_has_per_cpu_cfg = true;
                } else if (r->rid == RDT_RESOURCE_MBA) {
-                       r->msr_base = MSR_IA32_MBA_BW_BASE;
-                       r->msr_update = mba_wrmsr_amd;
+                       hw_res->msr_base = MSR_IA32_MBA_BW_BASE;
+                       hw_res->msr_update = mba_wrmsr_amd;
                }
        }
 }
index c877642e8a147560dd561b7fd5c5236bb8cd8dcb..87666275eed920654868ccf3fd9bb2a0b6b81004 100644 (file)
@@ -57,20 +57,23 @@ static bool bw_validate(char *buf, unsigned long *data, struct rdt_resource *r)
        return true;
 }
 
-int parse_bw(struct rdt_parse_data *data, struct rdt_resource *r,
+int parse_bw(struct rdt_parse_data *data, struct resctrl_schema *s,
             struct rdt_domain *d)
 {
+       struct resctrl_staged_config *cfg;
+       struct rdt_resource *r = s->res;
        unsigned long bw_val;
 
-       if (d->have_new_ctrl) {
+       cfg = &d->staged_config[s->conf_type];
+       if (cfg->have_new_ctrl) {
                rdt_last_cmd_printf("Duplicate domain %d\n", d->id);
                return -EINVAL;
        }
 
        if (!bw_validate(data->buf, &bw_val, r))
                return -EINVAL;
-       d->new_ctrl = bw_val;
-       d->have_new_ctrl = true;
+       cfg->new_ctrl = bw_val;
+       cfg->have_new_ctrl = true;
 
        return 0;
 }
@@ -125,13 +128,16 @@ static bool cbm_validate(char *buf, u32 *data, struct rdt_resource *r)
  * Read one cache bit mask (hex). Check that it is valid for the current
  * resource type.
  */
-int parse_cbm(struct rdt_parse_data *data, struct rdt_resource *r,
+int parse_cbm(struct rdt_parse_data *data, struct resctrl_schema *s,
              struct rdt_domain *d)
 {
        struct rdtgroup *rdtgrp = data->rdtgrp;
+       struct resctrl_staged_config *cfg;
+       struct rdt_resource *r = s->res;
        u32 cbm_val;
 
-       if (d->have_new_ctrl) {
+       cfg = &d->staged_config[s->conf_type];
+       if (cfg->have_new_ctrl) {
                rdt_last_cmd_printf("Duplicate domain %d\n", d->id);
                return -EINVAL;
        }
@@ -160,12 +166,12 @@ int parse_cbm(struct rdt_parse_data *data, struct rdt_resource *r,
         * The CBM may not overlap with the CBM of another closid if
         * either is exclusive.
         */
-       if (rdtgroup_cbm_overlaps(r, d, cbm_val, rdtgrp->closid, true)) {
+       if (rdtgroup_cbm_overlaps(s, d, cbm_val, rdtgrp->closid, true)) {
                rdt_last_cmd_puts("Overlaps with exclusive group\n");
                return -EINVAL;
        }
 
-       if (rdtgroup_cbm_overlaps(r, d, cbm_val, rdtgrp->closid, false)) {
+       if (rdtgroup_cbm_overlaps(s, d, cbm_val, rdtgrp->closid, false)) {
                if (rdtgrp->mode == RDT_MODE_EXCLUSIVE ||
                    rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP) {
                        rdt_last_cmd_puts("Overlaps with other group\n");
@@ -173,8 +179,8 @@ int parse_cbm(struct rdt_parse_data *data, struct rdt_resource *r,
                }
        }
 
-       d->new_ctrl = cbm_val;
-       d->have_new_ctrl = true;
+       cfg->new_ctrl = cbm_val;
+       cfg->have_new_ctrl = true;
 
        return 0;
 }
@@ -185,9 +191,12 @@ int parse_cbm(struct rdt_parse_data *data, struct rdt_resource *r,
  * separated by ";". The "id" is in decimal, and must match one of
  * the "id"s for this resource.
  */
-static int parse_line(char *line, struct rdt_resource *r,
+static int parse_line(char *line, struct resctrl_schema *s,
                      struct rdtgroup *rdtgrp)
 {
+       enum resctrl_conf_type t = s->conf_type;
+       struct resctrl_staged_config *cfg;
+       struct rdt_resource *r = s->res;
        struct rdt_parse_data data;
        char *dom = NULL, *id;
        struct rdt_domain *d;
@@ -213,9 +222,10 @@ next:
                if (d->id == dom_id) {
                        data.buf = dom;
                        data.rdtgrp = rdtgrp;
-                       if (r->parse_ctrlval(&data, r, d))
+                       if (r->parse_ctrlval(&data, s, d))
                                return -EINVAL;
                        if (rdtgrp->mode ==  RDT_MODE_PSEUDO_LOCKSETUP) {
+                               cfg = &d->staged_config[t];
                                /*
                                 * In pseudo-locking setup mode and just
                                 * parsed a valid CBM that should be
@@ -224,9 +234,9 @@ next:
                                 * the required initialization for single
                                 * region and return.
                                 */
-                               rdtgrp->plr->r = r;
+                               rdtgrp->plr->s = s;
                                rdtgrp->plr->d = d;
-                               rdtgrp->plr->cbm = d->new_ctrl;
+                               rdtgrp->plr->cbm = cfg->new_ctrl;
                                d->plr = rdtgrp->plr;
                                return 0;
                        }
@@ -236,28 +246,72 @@ next:
        return -EINVAL;
 }
 
-int update_domains(struct rdt_resource *r, int closid)
+static u32 get_config_index(u32 closid, enum resctrl_conf_type type)
 {
+       switch (type) {
+       default:
+       case CDP_NONE:
+               return closid;
+       case CDP_CODE:
+               return closid * 2 + 1;
+       case CDP_DATA:
+               return closid * 2;
+       }
+}
+
+static bool apply_config(struct rdt_hw_domain *hw_dom,
+                        struct resctrl_staged_config *cfg, u32 idx,
+                        cpumask_var_t cpu_mask, bool mba_sc)
+{
+       struct rdt_domain *dom = &hw_dom->d_resctrl;
+       u32 *dc = !mba_sc ? hw_dom->ctrl_val : hw_dom->mbps_val;
+
+       if (cfg->new_ctrl != dc[idx]) {
+               cpumask_set_cpu(cpumask_any(&dom->cpu_mask), cpu_mask);
+               dc[idx] = cfg->new_ctrl;
+
+               return true;
+       }
+
+       return false;
+}
+
+int resctrl_arch_update_domains(struct rdt_resource *r, u32 closid)
+{
+       struct resctrl_staged_config *cfg;
+       struct rdt_hw_domain *hw_dom;
        struct msr_param msr_param;
+       enum resctrl_conf_type t;
        cpumask_var_t cpu_mask;
        struct rdt_domain *d;
        bool mba_sc;
-       u32 *dc;
        int cpu;
+       u32 idx;
 
        if (!zalloc_cpumask_var(&cpu_mask, GFP_KERNEL))
                return -ENOMEM;
 
-       msr_param.low = closid;
-       msr_param.high = msr_param.low + 1;
-       msr_param.res = r;
-
        mba_sc = is_mba_sc(r);
+       msr_param.res = NULL;
        list_for_each_entry(d, &r->domains, list) {
-               dc = !mba_sc ? d->ctrl_val : d->mbps_val;
-               if (d->have_new_ctrl && d->new_ctrl != dc[closid]) {
-                       cpumask_set_cpu(cpumask_any(&d->cpu_mask), cpu_mask);
-                       dc[closid] = d->new_ctrl;
+               hw_dom = resctrl_to_arch_dom(d);
+               for (t = 0; t < CDP_NUM_TYPES; t++) {
+                       cfg = &hw_dom->d_resctrl.staged_config[t];
+                       if (!cfg->have_new_ctrl)
+                               continue;
+
+                       idx = get_config_index(closid, t);
+                       if (!apply_config(hw_dom, cfg, idx, cpu_mask, mba_sc))
+                               continue;
+
+                       if (!msr_param.res) {
+                               msr_param.low = idx;
+                               msr_param.high = msr_param.low + 1;
+                               msr_param.res = r;
+                       } else {
+                               msr_param.low = min(msr_param.low, idx);
+                               msr_param.high = max(msr_param.high, idx + 1);
+                       }
                }
        }
 
@@ -284,11 +338,11 @@ done:
 static int rdtgroup_parse_resource(char *resname, char *tok,
                                   struct rdtgroup *rdtgrp)
 {
-       struct rdt_resource *r;
+       struct resctrl_schema *s;
 
-       for_each_alloc_enabled_rdt_resource(r) {
-               if (!strcmp(resname, r->name) && rdtgrp->closid < r->num_closid)
-                       return parse_line(tok, r, rdtgrp);
+       list_for_each_entry(s, &resctrl_schema_all, list) {
+               if (!strcmp(resname, s->name) && rdtgrp->closid < s->num_closid)
+                       return parse_line(tok, s, rdtgrp);
        }
        rdt_last_cmd_printf("Unknown or unsupported resource name '%s'\n", resname);
        return -EINVAL;
@@ -297,6 +351,7 @@ static int rdtgroup_parse_resource(char *resname, char *tok,
 ssize_t rdtgroup_schemata_write(struct kernfs_open_file *of,
                                char *buf, size_t nbytes, loff_t off)
 {
+       struct resctrl_schema *s;
        struct rdtgroup *rdtgrp;
        struct rdt_domain *dom;
        struct rdt_resource *r;
@@ -327,9 +382,9 @@ ssize_t rdtgroup_schemata_write(struct kernfs_open_file *of,
                goto out;
        }
 
-       for_each_alloc_enabled_rdt_resource(r) {
-               list_for_each_entry(dom, &r->domains, list)
-                       dom->have_new_ctrl = false;
+       list_for_each_entry(s, &resctrl_schema_all, list) {
+               list_for_each_entry(dom, &s->res->domains, list)
+                       memset(dom->staged_config, 0, sizeof(dom->staged_config));
        }
 
        while ((tok = strsep(&buf, "\n")) != NULL) {
@@ -349,8 +404,9 @@ ssize_t rdtgroup_schemata_write(struct kernfs_open_file *of,
                        goto out;
        }
 
-       for_each_alloc_enabled_rdt_resource(r) {
-               ret = update_domains(r, rdtgrp->closid);
+       list_for_each_entry(s, &resctrl_schema_all, list) {
+               r = s->res;
+               ret = resctrl_arch_update_domains(r, rdtgrp->closid);
                if (ret)
                        goto out;
        }
@@ -371,19 +427,31 @@ out:
        return ret ?: nbytes;
 }
 
-static void show_doms(struct seq_file *s, struct rdt_resource *r, int closid)
+u32 resctrl_arch_get_config(struct rdt_resource *r, struct rdt_domain *d,
+                           u32 closid, enum resctrl_conf_type type)
+{
+       struct rdt_hw_domain *hw_dom = resctrl_to_arch_dom(d);
+       u32 idx = get_config_index(closid, type);
+
+       if (!is_mba_sc(r))
+               return hw_dom->ctrl_val[idx];
+       return hw_dom->mbps_val[idx];
+}
+
+static void show_doms(struct seq_file *s, struct resctrl_schema *schema, int closid)
 {
+       struct rdt_resource *r = schema->res;
        struct rdt_domain *dom;
        bool sep = false;
        u32 ctrl_val;
 
-       seq_printf(s, "%*s:", max_name_width, r->name);
+       seq_printf(s, "%*s:", max_name_width, schema->name);
        list_for_each_entry(dom, &r->domains, list) {
                if (sep)
                        seq_puts(s, ";");
 
-               ctrl_val = (!is_mba_sc(r) ? dom->ctrl_val[closid] :
-                           dom->mbps_val[closid]);
+               ctrl_val = resctrl_arch_get_config(r, dom, closid,
+                                                  schema->conf_type);
                seq_printf(s, r->format_str, dom->id, max_data_width,
                           ctrl_val);
                sep = true;
@@ -394,16 +462,17 @@ static void show_doms(struct seq_file *s, struct rdt_resource *r, int closid)
 int rdtgroup_schemata_show(struct kernfs_open_file *of,
                           struct seq_file *s, void *v)
 {
+       struct resctrl_schema *schema;
        struct rdtgroup *rdtgrp;
-       struct rdt_resource *r;
        int ret = 0;
        u32 closid;
 
        rdtgrp = rdtgroup_kn_lock_live(of->kn);
        if (rdtgrp) {
                if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP) {
-                       for_each_alloc_enabled_rdt_resource(r)
-                               seq_printf(s, "%s:uninitialized\n", r->name);
+                       list_for_each_entry(schema, &resctrl_schema_all, list) {
+                               seq_printf(s, "%s:uninitialized\n", schema->name);
+                       }
                } else if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKED) {
                        if (!rdtgrp->plr->d) {
                                rdt_last_cmd_clear();
@@ -411,15 +480,15 @@ int rdtgroup_schemata_show(struct kernfs_open_file *of,
                                ret = -ENODEV;
                        } else {
                                seq_printf(s, "%s:%d=%x\n",
-                                          rdtgrp->plr->r->name,
+                                          rdtgrp->plr->s->res->name,
                                           rdtgrp->plr->d->id,
                                           rdtgrp->plr->cbm);
                        }
                } else {
                        closid = rdtgrp->closid;
-                       for_each_alloc_enabled_rdt_resource(r) {
-                               if (closid < r->num_closid)
-                                       show_doms(s, r, closid);
+                       list_for_each_entry(schema, &resctrl_schema_all, list) {
+                               if (closid < schema->num_closid)
+                                       show_doms(s, schema, closid);
                        }
                }
        } else {
@@ -449,6 +518,7 @@ void mon_event_read(struct rmid_read *rr, struct rdt_resource *r,
 int rdtgroup_mondata_show(struct seq_file *m, void *arg)
 {
        struct kernfs_open_file *of = m->private;
+       struct rdt_hw_resource *hw_res;
        u32 resid, evtid, domid;
        struct rdtgroup *rdtgrp;
        struct rdt_resource *r;
@@ -468,7 +538,8 @@ int rdtgroup_mondata_show(struct seq_file *m, void *arg)
        domid = md.u.domid;
        evtid = md.u.evtid;
 
-       r = &rdt_resources_all[resid];
+       hw_res = &rdt_resources_all[resid];
+       r = &hw_res->r_resctrl;
        d = rdt_find_domain(r, domid, NULL);
        if (IS_ERR_OR_NULL(d)) {
                ret = -ENOENT;
@@ -482,7 +553,7 @@ int rdtgroup_mondata_show(struct seq_file *m, void *arg)
        else if (rr.val & RMID_VAL_UNAVAIL)
                seq_puts(m, "Unavailable\n");
        else
-               seq_printf(m, "%llu\n", rr.val * r->mon_scale);
+               seq_printf(m, "%llu\n", rr.val * hw_res->mon_scale);
 
 out:
        rdtgroup_kn_unlock(of->kn);
index 6a5f60a37219893111e677387510db515624be1d..1d647188a43bfc9b673dcfd55301f897bc140ffd 100644 (file)
@@ -2,6 +2,7 @@
 #ifndef _ASM_X86_RESCTRL_INTERNAL_H
 #define _ASM_X86_RESCTRL_INTERNAL_H
 
+#include <linux/resctrl.h>
 #include <linux/sched.h>
 #include <linux/kernfs.h>
 #include <linux/fs_context.h>
@@ -109,6 +110,7 @@ extern unsigned int resctrl_cqm_threshold;
 extern bool rdt_alloc_capable;
 extern bool rdt_mon_capable;
 extern unsigned int rdt_mon_features;
+extern struct list_head resctrl_schema_all;
 
 enum rdt_group_type {
        RDTCTRL_GROUP = 0,
@@ -161,8 +163,8 @@ struct mongroup {
 
 /**
  * struct pseudo_lock_region - pseudo-lock region information
- * @r:                 RDT resource to which this pseudo-locked region
- *                     belongs
+ * @s:                 Resctrl schema for the resource to which this
+ *                     pseudo-locked region belongs
  * @d:                 RDT domain to which this pseudo-locked region
  *                     belongs
  * @cbm:               bitmask of the pseudo-locked region
@@ -182,7 +184,7 @@ struct mongroup {
  * @pm_reqs:           Power management QoS requests related to this region
  */
 struct pseudo_lock_region {
-       struct rdt_resource     *r;
+       struct resctrl_schema   *s;
        struct rdt_domain       *d;
        u32                     cbm;
        wait_queue_head_t       lock_thread_wq;
@@ -303,44 +305,25 @@ struct mbm_state {
 };
 
 /**
- * struct rdt_domain - group of cpus sharing an RDT resource
- * @list:      all instances of this resource
- * @id:                unique id for this instance
- * @cpu_mask:  which cpus share this resource
- * @rmid_busy_llc:
- *             bitmap of which limbo RMIDs are above threshold
- * @mbm_total: saved state for MBM total bandwidth
- * @mbm_local: saved state for MBM local bandwidth
- * @mbm_over:  worker to periodically read MBM h/w counters
- * @cqm_limbo: worker to periodically read CQM h/w counters
- * @mbm_work_cpu:
- *             worker cpu for MBM h/w counters
- * @cqm_work_cpu:
- *             worker cpu for CQM h/w counters
+ * struct rdt_hw_domain - Arch private attributes of a set of CPUs that share
+ *                       a resource
+ * @d_resctrl: Properties exposed to the resctrl file system
  * @ctrl_val:  array of cache or mem ctrl values (indexed by CLOSID)
  * @mbps_val:  When mba_sc is enabled, this holds the bandwidth in MBps
- * @new_ctrl:  new ctrl value to be loaded
- * @have_new_ctrl: did user provide new_ctrl for this domain
- * @plr:       pseudo-locked region (if any) associated with domain
+ *
+ * Members of this structure are accessed via helpers that provide abstraction.
  */
-struct rdt_domain {
-       struct list_head                list;
-       int                             id;
-       struct cpumask                  cpu_mask;
-       unsigned long                   *rmid_busy_llc;
-       struct mbm_state                *mbm_total;
-       struct mbm_state                *mbm_local;
-       struct delayed_work             mbm_over;
-       struct delayed_work             cqm_limbo;
-       int                             mbm_work_cpu;
-       int                             cqm_work_cpu;
+struct rdt_hw_domain {
+       struct rdt_domain               d_resctrl;
        u32                             *ctrl_val;
        u32                             *mbps_val;
-       u32                             new_ctrl;
-       bool                            have_new_ctrl;
-       struct pseudo_lock_region       *plr;
 };
 
+static inline struct rdt_hw_domain *resctrl_to_arch_dom(struct rdt_domain *r)
+{
+       return container_of(r, struct rdt_hw_domain, d_resctrl);
+}
+
 /**
  * struct msr_param - set a range of MSRs from a domain
  * @res:       The resource to use
@@ -349,69 +332,8 @@ struct rdt_domain {
  */
 struct msr_param {
        struct rdt_resource     *res;
-       int                     low;
-       int                     high;
-};
-
-/**
- * struct rdt_cache - Cache allocation related data
- * @cbm_len:           Length of the cache bit mask
- * @min_cbm_bits:      Minimum number of consecutive bits to be set
- * @cbm_idx_mult:      Multiplier of CBM index
- * @cbm_idx_offset:    Offset of CBM index. CBM index is computed by:
- *                     closid * cbm_idx_multi + cbm_idx_offset
- *                     in a cache bit mask
- * @shareable_bits:    Bitmask of shareable resource with other
- *                     executing entities
- * @arch_has_sparse_bitmaps:   True if a bitmap like f00f is valid.
- * @arch_has_empty_bitmaps:    True if the '0' bitmap is valid.
- * @arch_has_per_cpu_cfg:      True if QOS_CFG register for this cache
- *                             level has CPU scope.
- */
-struct rdt_cache {
-       unsigned int    cbm_len;
-       unsigned int    min_cbm_bits;
-       unsigned int    cbm_idx_mult;
-       unsigned int    cbm_idx_offset;
-       unsigned int    shareable_bits;
-       bool            arch_has_sparse_bitmaps;
-       bool            arch_has_empty_bitmaps;
-       bool            arch_has_per_cpu_cfg;
-};
-
-/**
- * enum membw_throttle_mode - System's memory bandwidth throttling mode
- * @THREAD_THROTTLE_UNDEFINED: Not relevant to the system
- * @THREAD_THROTTLE_MAX:       Memory bandwidth is throttled at the core
- *                             always using smallest bandwidth percentage
- *                             assigned to threads, aka "max throttling"
- * @THREAD_THROTTLE_PER_THREAD:        Memory bandwidth is throttled at the thread
- */
-enum membw_throttle_mode {
-       THREAD_THROTTLE_UNDEFINED = 0,
-       THREAD_THROTTLE_MAX,
-       THREAD_THROTTLE_PER_THREAD,
-};
-
-/**
- * struct rdt_membw - Memory bandwidth allocation related data
- * @min_bw:            Minimum memory bandwidth percentage user can request
- * @bw_gran:           Granularity at which the memory bandwidth is allocated
- * @delay_linear:      True if memory B/W delay is in linear scale
- * @arch_needs_linear: True if we can't configure non-linear resources
- * @throttle_mode:     Bandwidth throttling mode when threads request
- *                     different memory bandwidths
- * @mba_sc:            True if MBA software controller(mba_sc) is enabled
- * @mb_map:            Mapping of memory B/W percentage to memory B/W delay
- */
-struct rdt_membw {
-       u32                             min_bw;
-       u32                             bw_gran;
-       u32                             delay_linear;
-       bool                            arch_needs_linear;
-       enum membw_throttle_mode        throttle_mode;
-       bool                            mba_sc;
-       u32                             *mb_map;
+       u32                     low;
+       u32                     high;
 };
 
 static inline bool is_llc_occupancy_enabled(void)
@@ -446,111 +368,103 @@ struct rdt_parse_data {
 };
 
 /**
- * struct rdt_resource - attributes of an RDT resource
- * @rid:               The index of the resource
- * @alloc_enabled:     Is allocation enabled on this machine
- * @mon_enabled:       Is monitoring enabled for this feature
- * @alloc_capable:     Is allocation available on this machine
- * @mon_capable:       Is monitor feature available on this machine
- * @name:              Name to use in "schemata" file
- * @num_closid:                Number of CLOSIDs available
- * @cache_level:       Which cache level defines scope of this resource
- * @default_ctrl:      Specifies default cache cbm or memory B/W percent.
+ * struct rdt_hw_resource - arch private attributes of a resctrl resource
+ * @r_resctrl:         Attributes of the resource used directly by resctrl.
+ * @num_closid:                Maximum number of closid this hardware can support,
+ *                     regardless of CDP. This is exposed via
+ *                     resctrl_arch_get_num_closid() to avoid confusion
+ *                     with struct resctrl_schema's property of the same name,
+ *                     which has been corrected for features like CDP.
  * @msr_base:          Base MSR address for CBMs
  * @msr_update:                Function pointer to update QOS MSRs
- * @data_width:                Character width of data when displaying
- * @domains:           All domains for this resource
- * @cache:             Cache allocation related data
- * @membw:             If the component has bandwidth controls, their properties.
- * @format_str:                Per resource format string to show domain value
- * @parse_ctrlval:     Per resource function pointer to parse control values
- * @evt_list:          List of monitoring events
- * @num_rmid:          Number of RMIDs available
  * @mon_scale:         cqm counter * mon_scale = occupancy in bytes
  * @mbm_width:         Monitor width, to detect and correct for overflow.
- * @fflags:            flags to choose base and info files
+ * @cdp_enabled:       CDP state of this resource
+ *
+ * Members of this structure are either private to the architecture
+ * e.g. mbm_width, or accessed via helpers that provide abstraction. e.g.
+ * msr_update and msr_base.
  */
-struct rdt_resource {
-       int                     rid;
-       bool                    alloc_enabled;
-       bool                    mon_enabled;
-       bool                    alloc_capable;
-       bool                    mon_capable;
-       char                    *name;
-       int                     num_closid;
-       int                     cache_level;
-       u32                     default_ctrl;
+struct rdt_hw_resource {
+       struct rdt_resource     r_resctrl;
+       u32                     num_closid;
        unsigned int            msr_base;
        void (*msr_update)      (struct rdt_domain *d, struct msr_param *m,
                                 struct rdt_resource *r);
-       int                     data_width;
-       struct list_head        domains;
-       struct rdt_cache        cache;
-       struct rdt_membw        membw;
-       const char              *format_str;
-       int (*parse_ctrlval)(struct rdt_parse_data *data,
-                            struct rdt_resource *r,
-                            struct rdt_domain *d);
-       struct list_head        evt_list;
-       int                     num_rmid;
        unsigned int            mon_scale;
        unsigned int            mbm_width;
-       unsigned long           fflags;
+       bool                    cdp_enabled;
 };
 
-int parse_cbm(struct rdt_parse_data *data, struct rdt_resource *r,
+static inline struct rdt_hw_resource *resctrl_to_arch_res(struct rdt_resource *r)
+{
+       return container_of(r, struct rdt_hw_resource, r_resctrl);
+}
+
+int parse_cbm(struct rdt_parse_data *data, struct resctrl_schema *s,
              struct rdt_domain *d);
-int parse_bw(struct rdt_parse_data *data, struct rdt_resource *r,
+int parse_bw(struct rdt_parse_data *data, struct resctrl_schema *s,
             struct rdt_domain *d);
 
 extern struct mutex rdtgroup_mutex;
 
-extern struct rdt_resource rdt_resources_all[];
+extern struct rdt_hw_resource rdt_resources_all[];
 extern struct rdtgroup rdtgroup_default;
 DECLARE_STATIC_KEY_FALSE(rdt_alloc_enable_key);
 
 extern struct dentry *debugfs_resctrl;
 
-enum {
+enum resctrl_res_level {
        RDT_RESOURCE_L3,
-       RDT_RESOURCE_L3DATA,
-       RDT_RESOURCE_L3CODE,
        RDT_RESOURCE_L2,
-       RDT_RESOURCE_L2DATA,
-       RDT_RESOURCE_L2CODE,
        RDT_RESOURCE_MBA,
 
        /* Must be the last */
        RDT_NUM_RESOURCES,
 };
 
+static inline struct rdt_resource *resctrl_inc(struct rdt_resource *res)
+{
+       struct rdt_hw_resource *hw_res = resctrl_to_arch_res(res);
+
+       hw_res++;
+       return &hw_res->r_resctrl;
+}
+
+static inline bool resctrl_arch_get_cdp_enabled(enum resctrl_res_level l)
+{
+       return rdt_resources_all[l].cdp_enabled;
+}
+
+int resctrl_arch_set_cdp_enabled(enum resctrl_res_level l, bool enable);
+
+/*
+ * To return the common struct rdt_resource, which is contained in struct
+ * rdt_hw_resource, walk the resctrl member of struct rdt_hw_resource.
+ */
 #define for_each_rdt_resource(r)                                             \
-       for (r = rdt_resources_all; r < rdt_resources_all + RDT_NUM_RESOURCES;\
-            r++)
+       for (r = &rdt_resources_all[0].r_resctrl;                             \
+            r <= &rdt_resources_all[RDT_NUM_RESOURCES - 1].r_resctrl;        \
+            r = resctrl_inc(r))
 
 #define for_each_capable_rdt_resource(r)                                     \
-       for (r = rdt_resources_all; r < rdt_resources_all + RDT_NUM_RESOURCES;\
-            r++)                                                             \
+       for_each_rdt_resource(r)                                              \
                if (r->alloc_capable || r->mon_capable)
 
 #define for_each_alloc_capable_rdt_resource(r)                               \
-       for (r = rdt_resources_all; r < rdt_resources_all + RDT_NUM_RESOURCES;\
-            r++)                                                             \
+       for_each_rdt_resource(r)                                              \
                if (r->alloc_capable)
 
 #define for_each_mon_capable_rdt_resource(r)                                 \
-       for (r = rdt_resources_all; r < rdt_resources_all + RDT_NUM_RESOURCES;\
-            r++)                                                             \
+       for_each_rdt_resource(r)                                              \
                if (r->mon_capable)
 
 #define for_each_alloc_enabled_rdt_resource(r)                               \
-       for (r = rdt_resources_all; r < rdt_resources_all + RDT_NUM_RESOURCES;\
-            r++)                                                             \
+       for_each_rdt_resource(r)                                              \
                if (r->alloc_enabled)
 
 #define for_each_mon_enabled_rdt_resource(r)                                 \
-       for (r = rdt_resources_all; r < rdt_resources_all + RDT_NUM_RESOURCES;\
-            r++)                                                             \
+       for_each_rdt_resource(r)                                              \
                if (r->mon_enabled)
 
 /* CPUID.(EAX=10H, ECX=ResID=1).EAX */
@@ -594,7 +508,7 @@ ssize_t rdtgroup_schemata_write(struct kernfs_open_file *of,
                                char *buf, size_t nbytes, loff_t off);
 int rdtgroup_schemata_show(struct kernfs_open_file *of,
                           struct seq_file *s, void *v);
-bool rdtgroup_cbm_overlaps(struct rdt_resource *r, struct rdt_domain *d,
+bool rdtgroup_cbm_overlaps(struct resctrl_schema *s, struct rdt_domain *d,
                           unsigned long cbm, int closid, bool exclusive);
 unsigned int rdtgroup_cbm_to_size(struct rdt_resource *r, struct rdt_domain *d,
                                  unsigned long cbm);
@@ -609,7 +523,6 @@ void rdt_pseudo_lock_release(void);
 int rdtgroup_pseudo_lock_create(struct rdtgroup *rdtgrp);
 void rdtgroup_pseudo_lock_remove(struct rdtgroup *rdtgrp);
 struct rdt_domain *get_domain_from_cpu(int cpu, struct rdt_resource *r);
-int update_domains(struct rdt_resource *r, int closid);
 int closids_supported(void);
 void closid_free(int closid);
 int alloc_rmid(void);
index f07c10b87a8732e0767c4261525911ea795f183c..c9f0f3d63f75fffdc54e6c39259f39e3b88c5b1d 100644 (file)
@@ -174,7 +174,7 @@ void __check_limbo(struct rdt_domain *d, bool force_free)
        struct rdt_resource *r;
        u32 crmid = 1, nrmid;
 
-       r = &rdt_resources_all[RDT_RESOURCE_L3];
+       r = &rdt_resources_all[RDT_RESOURCE_L3].r_resctrl;
 
        /*
         * Skip RMID 0 and start from RMID 1 and check all the RMIDs that
@@ -232,7 +232,7 @@ static void add_rmid_to_limbo(struct rmid_entry *entry)
        int cpu;
        u64 val;
 
-       r = &rdt_resources_all[RDT_RESOURCE_L3];
+       r = &rdt_resources_all[RDT_RESOURCE_L3].r_resctrl;
 
        entry->busy = 0;
        cpu = get_cpu();
@@ -285,15 +285,15 @@ static u64 mbm_overflow_count(u64 prev_msr, u64 cur_msr, unsigned int width)
        return chunks >>= shift;
 }
 
-static int __mon_event_count(u32 rmid, struct rmid_read *rr)
+static u64 __mon_event_count(u32 rmid, struct rmid_read *rr)
 {
+       struct rdt_hw_resource *hw_res = resctrl_to_arch_res(rr->r);
        struct mbm_state *m;
        u64 chunks, tval;
 
        tval = __rmid_read(rmid, rr->evtid);
        if (tval & (RMID_VAL_ERROR | RMID_VAL_UNAVAIL)) {
-               rr->val = tval;
-               return -EINVAL;
+               return tval;
        }
        switch (rr->evtid) {
        case QOS_L3_OCCUP_EVENT_ID:
@@ -307,10 +307,10 @@ static int __mon_event_count(u32 rmid, struct rmid_read *rr)
                break;
        default:
                /*
-                * Code would never reach here because
-                * an invalid event id would fail the __rmid_read.
+                * Code would never reach here because an invalid
+                * event id would fail the __rmid_read.
                 */
-               return -EINVAL;
+               return RMID_VAL_ERROR;
        }
 
        if (rr->first) {
@@ -319,7 +319,7 @@ static int __mon_event_count(u32 rmid, struct rmid_read *rr)
                return 0;
        }
 
-       chunks = mbm_overflow_count(m->prev_msr, tval, rr->r->mbm_width);
+       chunks = mbm_overflow_count(m->prev_msr, tval, hw_res->mbm_width);
        m->chunks += chunks;
        m->prev_msr = tval;
 
@@ -334,7 +334,7 @@ static int __mon_event_count(u32 rmid, struct rmid_read *rr)
  */
 static void mbm_bw_count(u32 rmid, struct rmid_read *rr)
 {
-       struct rdt_resource *r = &rdt_resources_all[RDT_RESOURCE_L3];
+       struct rdt_hw_resource *hw_res = resctrl_to_arch_res(rr->r);
        struct mbm_state *m = &rr->d->mbm_local[rmid];
        u64 tval, cur_bw, chunks;
 
@@ -342,8 +342,8 @@ static void mbm_bw_count(u32 rmid, struct rmid_read *rr)
        if (tval & (RMID_VAL_ERROR | RMID_VAL_UNAVAIL))
                return;
 
-       chunks = mbm_overflow_count(m->prev_bw_msr, tval, rr->r->mbm_width);
-       cur_bw = (get_corrected_mbm_count(rmid, chunks) * r->mon_scale) >> 20;
+       chunks = mbm_overflow_count(m->prev_bw_msr, tval, hw_res->mbm_width);
+       cur_bw = (get_corrected_mbm_count(rmid, chunks) * hw_res->mon_scale) >> 20;
 
        if (m->delta_comp)
                m->delta_bw = abs(cur_bw - m->prev_bw);
@@ -361,23 +361,29 @@ void mon_event_count(void *info)
        struct rdtgroup *rdtgrp, *entry;
        struct rmid_read *rr = info;
        struct list_head *head;
+       u64 ret_val;
 
        rdtgrp = rr->rgrp;
 
-       if (__mon_event_count(rdtgrp->mon.rmid, rr))
-               return;
+       ret_val = __mon_event_count(rdtgrp->mon.rmid, rr);
 
        /*
-        * For Ctrl groups read data from child monitor groups.
+        * For Ctrl groups read data from child monitor groups and
+        * add them together. Count events which are read successfully.
+        * Discard the rmid_read's reporting errors.
         */
        head = &rdtgrp->mon.crdtgrp_list;
 
        if (rdtgrp->type == RDTCTRL_GROUP) {
                list_for_each_entry(entry, head, mon.crdtgrp_list) {
-                       if (__mon_event_count(entry->mon.rmid, rr))
-                               return;
+                       if (__mon_event_count(entry->mon.rmid, rr) == 0)
+                               ret_val = 0;
                }
        }
+
+       /* Report error if none of rmid_reads are successful */
+       if (ret_val)
+               rr->val = ret_val;
 }
 
 /*
@@ -416,6 +422,8 @@ static void update_mba_bw(struct rdtgroup *rgrp, struct rdt_domain *dom_mbm)
 {
        u32 closid, rmid, cur_msr, cur_msr_val, new_msr_val;
        struct mbm_state *pmbm_data, *cmbm_data;
+       struct rdt_hw_resource *hw_r_mba;
+       struct rdt_hw_domain *hw_dom_mba;
        u32 cur_bw, delta_bw, user_bw;
        struct rdt_resource *r_mba;
        struct rdt_domain *dom_mba;
@@ -425,7 +433,8 @@ static void update_mba_bw(struct rdtgroup *rgrp, struct rdt_domain *dom_mbm)
        if (!is_mbm_local_enabled())
                return;
 
-       r_mba = &rdt_resources_all[RDT_RESOURCE_MBA];
+       hw_r_mba = &rdt_resources_all[RDT_RESOURCE_MBA];
+       r_mba = &hw_r_mba->r_resctrl;
        closid = rgrp->closid;
        rmid = rgrp->mon.rmid;
        pmbm_data = &dom_mbm->mbm_local[rmid];
@@ -435,11 +444,16 @@ static void update_mba_bw(struct rdtgroup *rgrp, struct rdt_domain *dom_mbm)
                pr_warn_once("Failure to get domain for MBA update\n");
                return;
        }
+       hw_dom_mba = resctrl_to_arch_dom(dom_mba);
 
        cur_bw = pmbm_data->prev_bw;
-       user_bw = dom_mba->mbps_val[closid];
+       user_bw = resctrl_arch_get_config(r_mba, dom_mba, closid, CDP_NONE);
        delta_bw = pmbm_data->delta_bw;
-       cur_msr_val = dom_mba->ctrl_val[closid];
+       /*
+        * resctrl_arch_get_config() chooses the mbps/ctrl value to return
+        * based on is_mba_sc(). For now, reach into the hw_dom.
+        */
+       cur_msr_val = hw_dom_mba->ctrl_val[closid];
 
        /*
         * For Ctrl groups read data from child monitor groups.
@@ -474,9 +488,9 @@ static void update_mba_bw(struct rdtgroup *rgrp, struct rdt_domain *dom_mbm)
                return;
        }
 
-       cur_msr = r_mba->msr_base + closid;
+       cur_msr = hw_r_mba->msr_base + closid;
        wrmsrl(cur_msr, delay_bw_map(new_msr_val, r_mba));
-       dom_mba->ctrl_val[closid] = new_msr_val;
+       hw_dom_mba->ctrl_val[closid] = new_msr_val;
 
        /*
         * Delta values are updated dynamically package wise for each
@@ -538,7 +552,7 @@ void cqm_handle_limbo(struct work_struct *work)
 
        mutex_lock(&rdtgroup_mutex);
 
-       r = &rdt_resources_all[RDT_RESOURCE_L3];
+       r = &rdt_resources_all[RDT_RESOURCE_L3].r_resctrl;
        d = container_of(work, struct rdt_domain, cqm_limbo.work);
 
        __check_limbo(d, false);
@@ -574,7 +588,7 @@ void mbm_handle_overflow(struct work_struct *work)
        if (!static_branch_likely(&rdt_mon_enable_key))
                goto out_unlock;
 
-       r = &rdt_resources_all[RDT_RESOURCE_L3];
+       r = &rdt_resources_all[RDT_RESOURCE_L3].r_resctrl;
        d = container_of(work, struct rdt_domain, mbm_over.work);
 
        list_for_each_entry(prgrp, &rdt_all_groups, rdtgroup_list) {
@@ -671,15 +685,16 @@ static void l3_mon_evt_init(struct rdt_resource *r)
 int rdt_get_mon_l3_config(struct rdt_resource *r)
 {
        unsigned int mbm_offset = boot_cpu_data.x86_cache_mbm_width_offset;
+       struct rdt_hw_resource *hw_res = resctrl_to_arch_res(r);
        unsigned int cl_size = boot_cpu_data.x86_cache_size;
        int ret;
 
-       r->mon_scale = boot_cpu_data.x86_cache_occ_scale;
+       hw_res->mon_scale = boot_cpu_data.x86_cache_occ_scale;
        r->num_rmid = boot_cpu_data.x86_cache_max_rmid + 1;
-       r->mbm_width = MBM_CNTR_WIDTH_BASE;
+       hw_res->mbm_width = MBM_CNTR_WIDTH_BASE;
 
        if (mbm_offset > 0 && mbm_offset <= MBM_CNTR_WIDTH_OFFSET_MAX)
-               r->mbm_width += mbm_offset;
+               hw_res->mbm_width += mbm_offset;
        else if (mbm_offset > MBM_CNTR_WIDTH_OFFSET_MAX)
                pr_warn("Ignoring impossible MBM counter offset\n");
 
@@ -693,7 +708,7 @@ int rdt_get_mon_l3_config(struct rdt_resource *r)
        resctrl_cqm_threshold = cl_size * 1024 / r->num_rmid;
 
        /* h/w works in units of "boot_cpu_data.x86_cache_occ_scale" */
-       resctrl_cqm_threshold /= r->mon_scale;
+       resctrl_cqm_threshold /= hw_res->mon_scale;
 
        ret = dom_data_init(r);
        if (ret)
index 2207916cae6569d415dd3946d5859099ea13fafa..db813f819ad6cb071290eac33dc3c6ff4bcc2059 100644 (file)
@@ -250,7 +250,7 @@ static void pseudo_lock_region_clear(struct pseudo_lock_region *plr)
        plr->line_size = 0;
        kfree(plr->kmem);
        plr->kmem = NULL;
-       plr->r = NULL;
+       plr->s = NULL;
        if (plr->d)
                plr->d->plr = NULL;
        plr->d = NULL;
@@ -294,10 +294,10 @@ static int pseudo_lock_region_init(struct pseudo_lock_region *plr)
 
        ci = get_cpu_cacheinfo(plr->cpu);
 
-       plr->size = rdtgroup_cbm_to_size(plr->r, plr->d, plr->cbm);
+       plr->size = rdtgroup_cbm_to_size(plr->s->res, plr->d, plr->cbm);
 
        for (i = 0; i < ci->num_leaves; i++) {
-               if (ci->info_list[i].level == plr->r->cache_level) {
+               if (ci->info_list[i].level == plr->s->res->cache_level) {
                        plr->line_size = ci->info_list[i].coherency_line_size;
                        return 0;
                }
@@ -688,8 +688,8 @@ int rdtgroup_locksetup_enter(struct rdtgroup *rdtgrp)
         *   resource, the portion of cache used by it should be made
         *   unavailable to all future allocations from both resources.
         */
-       if (rdt_resources_all[RDT_RESOURCE_L3DATA].alloc_enabled ||
-           rdt_resources_all[RDT_RESOURCE_L2DATA].alloc_enabled) {
+       if (resctrl_arch_get_cdp_enabled(RDT_RESOURCE_L3) ||
+           resctrl_arch_get_cdp_enabled(RDT_RESOURCE_L2)) {
                rdt_last_cmd_puts("CDP enabled\n");
                return -EINVAL;
        }
@@ -800,7 +800,7 @@ bool rdtgroup_cbm_overlaps_pseudo_locked(struct rdt_domain *d, unsigned long cbm
        unsigned long cbm_b;
 
        if (d->plr) {
-               cbm_len = d->plr->r->cache.cbm_len;
+               cbm_len = d->plr->s->res->cache.cbm_len;
                cbm_b = d->plr->cbm;
                if (bitmap_intersects(&cbm, &cbm_b, cbm_len))
                        return true;
index 01fd30e7829dcf8accb31891e734ea770dd68b8b..b57b3db9a6a78ffa2d58e906f2126c208d5d1260 100644 (file)
@@ -39,6 +39,9 @@ static struct kernfs_root *rdt_root;
 struct rdtgroup rdtgroup_default;
 LIST_HEAD(rdt_all_groups);
 
+/* list of entries for the schemata file */
+LIST_HEAD(resctrl_schema_all);
+
 /* Kernel fs node for "info" directory under root */
 static struct kernfs_node *kn_info;
 
@@ -100,12 +103,12 @@ int closids_supported(void)
 
 static void closid_init(void)
 {
-       struct rdt_resource *r;
-       int rdt_min_closid = 32;
+       struct resctrl_schema *s;
+       u32 rdt_min_closid = 32;
 
        /* Compute rdt_min_closid across all resources */
-       for_each_alloc_enabled_rdt_resource(r)
-               rdt_min_closid = min(rdt_min_closid, r->num_closid);
+       list_for_each_entry(s, &resctrl_schema_all, list)
+               rdt_min_closid = min(rdt_min_closid, s->num_closid);
 
        closid_free_map = BIT_MASK(rdt_min_closid) - 1;
 
@@ -842,16 +845,17 @@ static int rdt_last_cmd_status_show(struct kernfs_open_file *of,
 static int rdt_num_closids_show(struct kernfs_open_file *of,
                                struct seq_file *seq, void *v)
 {
-       struct rdt_resource *r = of->kn->parent->priv;
+       struct resctrl_schema *s = of->kn->parent->priv;
 
-       seq_printf(seq, "%d\n", r->num_closid);
+       seq_printf(seq, "%u\n", s->num_closid);
        return 0;
 }
 
 static int rdt_default_ctrl_show(struct kernfs_open_file *of,
                             struct seq_file *seq, void *v)
 {
-       struct rdt_resource *r = of->kn->parent->priv;
+       struct resctrl_schema *s = of->kn->parent->priv;
+       struct rdt_resource *r = s->res;
 
        seq_printf(seq, "%x\n", r->default_ctrl);
        return 0;
@@ -860,7 +864,8 @@ static int rdt_default_ctrl_show(struct kernfs_open_file *of,
 static int rdt_min_cbm_bits_show(struct kernfs_open_file *of,
                             struct seq_file *seq, void *v)
 {
-       struct rdt_resource *r = of->kn->parent->priv;
+       struct resctrl_schema *s = of->kn->parent->priv;
+       struct rdt_resource *r = s->res;
 
        seq_printf(seq, "%u\n", r->cache.min_cbm_bits);
        return 0;
@@ -869,7 +874,8 @@ static int rdt_min_cbm_bits_show(struct kernfs_open_file *of,
 static int rdt_shareable_bits_show(struct kernfs_open_file *of,
                                   struct seq_file *seq, void *v)
 {
-       struct rdt_resource *r = of->kn->parent->priv;
+       struct resctrl_schema *s = of->kn->parent->priv;
+       struct rdt_resource *r = s->res;
 
        seq_printf(seq, "%x\n", r->cache.shareable_bits);
        return 0;
@@ -892,38 +898,40 @@ static int rdt_shareable_bits_show(struct kernfs_open_file *of,
 static int rdt_bit_usage_show(struct kernfs_open_file *of,
                              struct seq_file *seq, void *v)
 {
-       struct rdt_resource *r = of->kn->parent->priv;
+       struct resctrl_schema *s = of->kn->parent->priv;
        /*
         * Use unsigned long even though only 32 bits are used to ensure
         * test_bit() is used safely.
         */
        unsigned long sw_shareable = 0, hw_shareable = 0;
        unsigned long exclusive = 0, pseudo_locked = 0;
+       struct rdt_resource *r = s->res;
        struct rdt_domain *dom;
        int i, hwb, swb, excl, psl;
        enum rdtgrp_mode mode;
        bool sep = false;
-       u32 *ctrl;
+       u32 ctrl_val;
 
        mutex_lock(&rdtgroup_mutex);
        hw_shareable = r->cache.shareable_bits;
        list_for_each_entry(dom, &r->domains, list) {
                if (sep)
                        seq_putc(seq, ';');
-               ctrl = dom->ctrl_val;
                sw_shareable = 0;
                exclusive = 0;
                seq_printf(seq, "%d=", dom->id);
-               for (i = 0; i < closids_supported(); i++, ctrl++) {
+               for (i = 0; i < closids_supported(); i++) {
                        if (!closid_allocated(i))
                                continue;
+                       ctrl_val = resctrl_arch_get_config(r, dom, i,
+                                                          s->conf_type);
                        mode = rdtgroup_mode_by_closid(i);
                        switch (mode) {
                        case RDT_MODE_SHAREABLE:
-                               sw_shareable |= *ctrl;
+                               sw_shareable |= ctrl_val;
                                break;
                        case RDT_MODE_EXCLUSIVE:
-                               exclusive |= *ctrl;
+                               exclusive |= ctrl_val;
                                break;
                        case RDT_MODE_PSEUDO_LOCKSETUP:
                        /*
@@ -970,7 +978,8 @@ static int rdt_bit_usage_show(struct kernfs_open_file *of,
 static int rdt_min_bw_show(struct kernfs_open_file *of,
                             struct seq_file *seq, void *v)
 {
-       struct rdt_resource *r = of->kn->parent->priv;
+       struct resctrl_schema *s = of->kn->parent->priv;
+       struct rdt_resource *r = s->res;
 
        seq_printf(seq, "%u\n", r->membw.min_bw);
        return 0;
@@ -1001,7 +1010,8 @@ static int rdt_mon_features_show(struct kernfs_open_file *of,
 static int rdt_bw_gran_show(struct kernfs_open_file *of,
                             struct seq_file *seq, void *v)
 {
-       struct rdt_resource *r = of->kn->parent->priv;
+       struct resctrl_schema *s = of->kn->parent->priv;
+       struct rdt_resource *r = s->res;
 
        seq_printf(seq, "%u\n", r->membw.bw_gran);
        return 0;
@@ -1010,7 +1020,8 @@ static int rdt_bw_gran_show(struct kernfs_open_file *of,
 static int rdt_delay_linear_show(struct kernfs_open_file *of,
                             struct seq_file *seq, void *v)
 {
-       struct rdt_resource *r = of->kn->parent->priv;
+       struct resctrl_schema *s = of->kn->parent->priv;
+       struct rdt_resource *r = s->res;
 
        seq_printf(seq, "%u\n", r->membw.delay_linear);
        return 0;
@@ -1020,8 +1031,9 @@ static int max_threshold_occ_show(struct kernfs_open_file *of,
                                  struct seq_file *seq, void *v)
 {
        struct rdt_resource *r = of->kn->parent->priv;
+       struct rdt_hw_resource *hw_res = resctrl_to_arch_res(r);
 
-       seq_printf(seq, "%u\n", resctrl_cqm_threshold * r->mon_scale);
+       seq_printf(seq, "%u\n", resctrl_cqm_threshold * hw_res->mon_scale);
 
        return 0;
 }
@@ -1029,7 +1041,8 @@ static int max_threshold_occ_show(struct kernfs_open_file *of,
 static int rdt_thread_throttle_mode_show(struct kernfs_open_file *of,
                                         struct seq_file *seq, void *v)
 {
-       struct rdt_resource *r = of->kn->parent->priv;
+       struct resctrl_schema *s = of->kn->parent->priv;
+       struct rdt_resource *r = s->res;
 
        if (r->membw.throttle_mode == THREAD_THROTTLE_PER_THREAD)
                seq_puts(seq, "per-thread\n");
@@ -1042,7 +1055,7 @@ static int rdt_thread_throttle_mode_show(struct kernfs_open_file *of,
 static ssize_t max_threshold_occ_write(struct kernfs_open_file *of,
                                       char *buf, size_t nbytes, loff_t off)
 {
-       struct rdt_resource *r = of->kn->parent->priv;
+       struct rdt_hw_resource *hw_res;
        unsigned int bytes;
        int ret;
 
@@ -1053,7 +1066,8 @@ static ssize_t max_threshold_occ_write(struct kernfs_open_file *of,
        if (bytes > (boot_cpu_data.x86_cache_size * 1024))
                return -EINVAL;
 
-       resctrl_cqm_threshold = bytes / r->mon_scale;
+       hw_res = resctrl_to_arch_res(of->kn->parent->priv);
+       resctrl_cqm_threshold = bytes / hw_res->mon_scale;
 
        return nbytes;
 }
@@ -1078,76 +1092,17 @@ static int rdtgroup_mode_show(struct kernfs_open_file *of,
        return 0;
 }
 
-/**
- * rdt_cdp_peer_get - Retrieve CDP peer if it exists
- * @r: RDT resource to which RDT domain @d belongs
- * @d: Cache instance for which a CDP peer is requested
- * @r_cdp: RDT resource that shares hardware with @r (RDT resource peer)
- *         Used to return the result.
- * @d_cdp: RDT domain that shares hardware with @d (RDT domain peer)
- *         Used to return the result.
- *
- * RDT resources are managed independently and by extension the RDT domains
- * (RDT resource instances) are managed independently also. The Code and
- * Data Prioritization (CDP) RDT resources, while managed independently,
- * could refer to the same underlying hardware. For example,
- * RDT_RESOURCE_L2CODE and RDT_RESOURCE_L2DATA both refer to the L2 cache.
- *
- * When provided with an RDT resource @r and an instance of that RDT
- * resource @d rdt_cdp_peer_get() will return if there is a peer RDT
- * resource and the exact instance that shares the same hardware.
- *
- * Return: 0 if a CDP peer was found, <0 on error or if no CDP peer exists.
- *         If a CDP peer was found, @r_cdp will point to the peer RDT resource
- *         and @d_cdp will point to the peer RDT domain.
- */
-static int rdt_cdp_peer_get(struct rdt_resource *r, struct rdt_domain *d,
-                           struct rdt_resource **r_cdp,
-                           struct rdt_domain **d_cdp)
+static enum resctrl_conf_type resctrl_peer_type(enum resctrl_conf_type my_type)
 {
-       struct rdt_resource *_r_cdp = NULL;
-       struct rdt_domain *_d_cdp = NULL;
-       int ret = 0;
-
-       switch (r->rid) {
-       case RDT_RESOURCE_L3DATA:
-               _r_cdp = &rdt_resources_all[RDT_RESOURCE_L3CODE];
-               break;
-       case RDT_RESOURCE_L3CODE:
-               _r_cdp =  &rdt_resources_all[RDT_RESOURCE_L3DATA];
-               break;
-       case RDT_RESOURCE_L2DATA:
-               _r_cdp =  &rdt_resources_all[RDT_RESOURCE_L2CODE];
-               break;
-       case RDT_RESOURCE_L2CODE:
-               _r_cdp =  &rdt_resources_all[RDT_RESOURCE_L2DATA];
-               break;
+       switch (my_type) {
+       case CDP_CODE:
+               return CDP_DATA;
+       case CDP_DATA:
+               return CDP_CODE;
        default:
-               ret = -ENOENT;
-               goto out;
-       }
-
-       /*
-        * When a new CPU comes online and CDP is enabled then the new
-        * RDT domains (if any) associated with both CDP RDT resources
-        * are added in the same CPU online routine while the
-        * rdtgroup_mutex is held. It should thus not happen for one
-        * RDT domain to exist and be associated with its RDT CDP
-        * resource but there is no RDT domain associated with the
-        * peer RDT CDP resource. Hence the WARN.
-        */
-       _d_cdp = rdt_find_domain(_r_cdp, d->id, NULL);
-       if (WARN_ON(IS_ERR_OR_NULL(_d_cdp))) {
-               _r_cdp = NULL;
-               _d_cdp = NULL;
-               ret = -EINVAL;
+       case CDP_NONE:
+               return CDP_NONE;
        }
-
-out:
-       *r_cdp = _r_cdp;
-       *d_cdp = _d_cdp;
-
-       return ret;
 }
 
 /**
@@ -1171,11 +1126,11 @@ out:
  * Return: false if CBM does not overlap, true if it does.
  */
 static bool __rdtgroup_cbm_overlaps(struct rdt_resource *r, struct rdt_domain *d,
-                                   unsigned long cbm, int closid, bool exclusive)
+                                   unsigned long cbm, int closid,
+                                   enum resctrl_conf_type type, bool exclusive)
 {
        enum rdtgrp_mode mode;
        unsigned long ctrl_b;
-       u32 *ctrl;
        int i;
 
        /* Check for any overlap with regions used by hardware directly */
@@ -1186,9 +1141,8 @@ static bool __rdtgroup_cbm_overlaps(struct rdt_resource *r, struct rdt_domain *d
        }
 
        /* Check for overlap with other resource groups */
-       ctrl = d->ctrl_val;
-       for (i = 0; i < closids_supported(); i++, ctrl++) {
-               ctrl_b = *ctrl;
+       for (i = 0; i < closids_supported(); i++) {
+               ctrl_b = resctrl_arch_get_config(r, d, i, type);
                mode = rdtgroup_mode_by_closid(i);
                if (closid_allocated(i) && i != closid &&
                    mode != RDT_MODE_PSEUDO_LOCKSETUP) {
@@ -1208,7 +1162,7 @@ static bool __rdtgroup_cbm_overlaps(struct rdt_resource *r, struct rdt_domain *d
 
 /**
  * rdtgroup_cbm_overlaps - Does CBM overlap with other use of hardware
- * @r: Resource to which domain instance @d belongs.
+ * @s: Schema for the resource to which domain instance @d belongs.
  * @d: The domain instance for which @closid is being tested.
  * @cbm: Capacity bitmask being tested.
  * @closid: Intended closid for @cbm.
@@ -1226,19 +1180,19 @@ static bool __rdtgroup_cbm_overlaps(struct rdt_resource *r, struct rdt_domain *d
  *
  * Return: true if CBM overlap detected, false if there is no overlap
  */
-bool rdtgroup_cbm_overlaps(struct rdt_resource *r, struct rdt_domain *d,
+bool rdtgroup_cbm_overlaps(struct resctrl_schema *s, struct rdt_domain *d,
                           unsigned long cbm, int closid, bool exclusive)
 {
-       struct rdt_resource *r_cdp;
-       struct rdt_domain *d_cdp;
+       enum resctrl_conf_type peer_type = resctrl_peer_type(s->conf_type);
+       struct rdt_resource *r = s->res;
 
-       if (__rdtgroup_cbm_overlaps(r, d, cbm, closid, exclusive))
+       if (__rdtgroup_cbm_overlaps(r, d, cbm, closid, s->conf_type,
+                                   exclusive))
                return true;
 
-       if (rdt_cdp_peer_get(r, d, &r_cdp, &d_cdp) < 0)
+       if (!resctrl_arch_get_cdp_enabled(r->rid))
                return false;
-
-       return  __rdtgroup_cbm_overlaps(r_cdp, d_cdp, cbm, closid, exclusive);
+       return  __rdtgroup_cbm_overlaps(r, d, cbm, closid, peer_type, exclusive);
 }
 
 /**
@@ -1256,17 +1210,21 @@ bool rdtgroup_cbm_overlaps(struct rdt_resource *r, struct rdt_domain *d,
 static bool rdtgroup_mode_test_exclusive(struct rdtgroup *rdtgrp)
 {
        int closid = rdtgrp->closid;
+       struct resctrl_schema *s;
        struct rdt_resource *r;
        bool has_cache = false;
        struct rdt_domain *d;
+       u32 ctrl;
 
-       for_each_alloc_enabled_rdt_resource(r) {
+       list_for_each_entry(s, &resctrl_schema_all, list) {
+               r = s->res;
                if (r->rid == RDT_RESOURCE_MBA)
                        continue;
                has_cache = true;
                list_for_each_entry(d, &r->domains, list) {
-                       if (rdtgroup_cbm_overlaps(r, d, d->ctrl_val[closid],
-                                                 rdtgrp->closid, false)) {
+                       ctrl = resctrl_arch_get_config(r, d, closid,
+                                                      s->conf_type);
+                       if (rdtgroup_cbm_overlaps(s, d, ctrl, closid, false)) {
                                rdt_last_cmd_puts("Schemata overlaps\n");
                                return false;
                        }
@@ -1397,6 +1355,7 @@ unsigned int rdtgroup_cbm_to_size(struct rdt_resource *r,
 static int rdtgroup_size_show(struct kernfs_open_file *of,
                              struct seq_file *s, void *v)
 {
+       struct resctrl_schema *schema;
        struct rdtgroup *rdtgrp;
        struct rdt_resource *r;
        struct rdt_domain *d;
@@ -1418,8 +1377,8 @@ static int rdtgroup_size_show(struct kernfs_open_file *of,
                        ret = -ENODEV;
                } else {
                        seq_printf(s, "%*s:", max_name_width,
-                                  rdtgrp->plr->r->name);
-                       size = rdtgroup_cbm_to_size(rdtgrp->plr->r,
+                                  rdtgrp->plr->s->name);
+                       size = rdtgroup_cbm_to_size(rdtgrp->plr->s->res,
                                                    rdtgrp->plr->d,
                                                    rdtgrp->plr->cbm);
                        seq_printf(s, "%d=%u\n", rdtgrp->plr->d->id, size);
@@ -1427,18 +1386,19 @@ static int rdtgroup_size_show(struct kernfs_open_file *of,
                goto out;
        }
 
-       for_each_alloc_enabled_rdt_resource(r) {
+       list_for_each_entry(schema, &resctrl_schema_all, list) {
+               r = schema->res;
                sep = false;
-               seq_printf(s, "%*s:", max_name_width, r->name);
+               seq_printf(s, "%*s:", max_name_width, schema->name);
                list_for_each_entry(d, &r->domains, list) {
                        if (sep)
                                seq_putc(s, ';');
                        if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP) {
                                size = 0;
                        } else {
-                               ctrl = (!is_mba_sc(r) ?
-                                               d->ctrl_val[rdtgrp->closid] :
-                                               d->mbps_val[rdtgrp->closid]);
+                               ctrl = resctrl_arch_get_config(r, d,
+                                                              rdtgrp->closid,
+                                                              schema->conf_type);
                                if (r->rid == RDT_RESOURCE_MBA)
                                        size = ctrl;
                                else
@@ -1757,14 +1717,14 @@ int rdtgroup_kn_mode_restore(struct rdtgroup *r, const char *name,
        return ret;
 }
 
-static int rdtgroup_mkdir_info_resdir(struct rdt_resource *r, char *name,
+static int rdtgroup_mkdir_info_resdir(void *priv, char *name,
                                      unsigned long fflags)
 {
        struct kernfs_node *kn_subdir;
        int ret;
 
        kn_subdir = kernfs_create_dir(kn_info, name,
-                                     kn_info->mode, r);
+                                     kn_info->mode, priv);
        if (IS_ERR(kn_subdir))
                return PTR_ERR(kn_subdir);
 
@@ -1781,6 +1741,7 @@ static int rdtgroup_mkdir_info_resdir(struct rdt_resource *r, char *name,
 
 static int rdtgroup_create_info_dir(struct kernfs_node *parent_kn)
 {
+       struct resctrl_schema *s;
        struct rdt_resource *r;
        unsigned long fflags;
        char name[32];
@@ -1795,9 +1756,11 @@ static int rdtgroup_create_info_dir(struct kernfs_node *parent_kn)
        if (ret)
                goto out_destroy;
 
-       for_each_alloc_enabled_rdt_resource(r) {
+       /* loop over enabled controls, these are all alloc_enabled */
+       list_for_each_entry(s, &resctrl_schema_all, list) {
+               r = s->res;
                fflags =  r->fflags | RF_CTRL_INFO;
-               ret = rdtgroup_mkdir_info_resdir(r, r->name, fflags);
+               ret = rdtgroup_mkdir_info_resdir(s, s->name, fflags);
                if (ret)
                        goto out_destroy;
        }
@@ -1867,7 +1830,7 @@ static void l2_qos_cfg_update(void *arg)
 
 static inline bool is_mba_linear(void)
 {
-       return rdt_resources_all[RDT_RESOURCE_MBA].membw.delay_linear;
+       return rdt_resources_all[RDT_RESOURCE_MBA].r_resctrl.membw.delay_linear;
 }
 
 static int set_cache_qos_cfg(int level, bool enable)
@@ -1888,7 +1851,7 @@ static int set_cache_qos_cfg(int level, bool enable)
        if (!zalloc_cpumask_var(&cpu_mask, GFP_KERNEL))
                return -ENOMEM;
 
-       r_l = &rdt_resources_all[level];
+       r_l = &rdt_resources_all[level].r_resctrl;
        list_for_each_entry(d, &r_l->domains, list) {
                if (r_l->cache.arch_has_per_cpu_cfg)
                        /* Pick all the CPUs in the domain instance */
@@ -1914,14 +1877,16 @@ static int set_cache_qos_cfg(int level, bool enable)
 /* Restore the qos cfg state when a domain comes online */
 void rdt_domain_reconfigure_cdp(struct rdt_resource *r)
 {
-       if (!r->alloc_capable)
+       struct rdt_hw_resource *hw_res = resctrl_to_arch_res(r);
+
+       if (!r->cdp_capable)
                return;
 
-       if (r == &rdt_resources_all[RDT_RESOURCE_L2DATA])
-               l2_qos_cfg_update(&r->alloc_enabled);
+       if (r->rid == RDT_RESOURCE_L2)
+               l2_qos_cfg_update(&hw_res->cdp_enabled);
 
-       if (r == &rdt_resources_all[RDT_RESOURCE_L3DATA])
-               l3_qos_cfg_update(&r->alloc_enabled);
+       if (r->rid == RDT_RESOURCE_L3)
+               l3_qos_cfg_update(&hw_res->cdp_enabled);
 }
 
 /*
@@ -1932,7 +1897,8 @@ void rdt_domain_reconfigure_cdp(struct rdt_resource *r)
  */
 static int set_mba_sc(bool mba_sc)
 {
-       struct rdt_resource *r = &rdt_resources_all[RDT_RESOURCE_MBA];
+       struct rdt_resource *r = &rdt_resources_all[RDT_RESOURCE_MBA].r_resctrl;
+       struct rdt_hw_domain *hw_dom;
        struct rdt_domain *d;
 
        if (!is_mbm_enabled() || !is_mba_linear() ||
@@ -1940,73 +1906,60 @@ static int set_mba_sc(bool mba_sc)
                return -EINVAL;
 
        r->membw.mba_sc = mba_sc;
-       list_for_each_entry(d, &r->domains, list)
-               setup_default_ctrlval(r, d->ctrl_val, d->mbps_val);
+       list_for_each_entry(d, &r->domains, list) {
+               hw_dom = resctrl_to_arch_dom(d);
+               setup_default_ctrlval(r, hw_dom->ctrl_val, hw_dom->mbps_val);
+       }
 
        return 0;
 }
 
-static int cdp_enable(int level, int data_type, int code_type)
+static int cdp_enable(int level)
 {
-       struct rdt_resource *r_ldata = &rdt_resources_all[data_type];
-       struct rdt_resource *r_lcode = &rdt_resources_all[code_type];
-       struct rdt_resource *r_l = &rdt_resources_all[level];
+       struct rdt_resource *r_l = &rdt_resources_all[level].r_resctrl;
        int ret;
 
-       if (!r_l->alloc_capable || !r_ldata->alloc_capable ||
-           !r_lcode->alloc_capable)
+       if (!r_l->alloc_capable)
                return -EINVAL;
 
        ret = set_cache_qos_cfg(level, true);
-       if (!ret) {
-               r_l->alloc_enabled = false;
-               r_ldata->alloc_enabled = true;
-               r_lcode->alloc_enabled = true;
-       }
+       if (!ret)
+               rdt_resources_all[level].cdp_enabled = true;
+
        return ret;
 }
 
-static int cdpl3_enable(void)
+static void cdp_disable(int level)
 {
-       return cdp_enable(RDT_RESOURCE_L3, RDT_RESOURCE_L3DATA,
-                         RDT_RESOURCE_L3CODE);
-}
+       struct rdt_hw_resource *r_hw = &rdt_resources_all[level];
 
-static int cdpl2_enable(void)
-{
-       return cdp_enable(RDT_RESOURCE_L2, RDT_RESOURCE_L2DATA,
-                         RDT_RESOURCE_L2CODE);
+       if (r_hw->cdp_enabled) {
+               set_cache_qos_cfg(level, false);
+               r_hw->cdp_enabled = false;
+       }
 }
 
-static void cdp_disable(int level, int data_type, int code_type)
+int resctrl_arch_set_cdp_enabled(enum resctrl_res_level l, bool enable)
 {
-       struct rdt_resource *r = &rdt_resources_all[level];
+       struct rdt_hw_resource *hw_res = &rdt_resources_all[l];
 
-       r->alloc_enabled = r->alloc_capable;
+       if (!hw_res->r_resctrl.cdp_capable)
+               return -EINVAL;
 
-       if (rdt_resources_all[data_type].alloc_enabled) {
-               rdt_resources_all[data_type].alloc_enabled = false;
-               rdt_resources_all[code_type].alloc_enabled = false;
-               set_cache_qos_cfg(level, false);
-       }
-}
+       if (enable)
+               return cdp_enable(l);
 
-static void cdpl3_disable(void)
-{
-       cdp_disable(RDT_RESOURCE_L3, RDT_RESOURCE_L3DATA, RDT_RESOURCE_L3CODE);
-}
+       cdp_disable(l);
 
-static void cdpl2_disable(void)
-{
-       cdp_disable(RDT_RESOURCE_L2, RDT_RESOURCE_L2DATA, RDT_RESOURCE_L2CODE);
+       return 0;
 }
 
 static void cdp_disable_all(void)
 {
-       if (rdt_resources_all[RDT_RESOURCE_L3DATA].alloc_enabled)
-               cdpl3_disable();
-       if (rdt_resources_all[RDT_RESOURCE_L2DATA].alloc_enabled)
-               cdpl2_disable();
+       if (resctrl_arch_get_cdp_enabled(RDT_RESOURCE_L3))
+               resctrl_arch_set_cdp_enabled(RDT_RESOURCE_L3, false);
+       if (resctrl_arch_get_cdp_enabled(RDT_RESOURCE_L2))
+               resctrl_arch_set_cdp_enabled(RDT_RESOURCE_L2, false);
 }
 
 /*
@@ -2084,10 +2037,10 @@ static int rdt_enable_ctx(struct rdt_fs_context *ctx)
        int ret = 0;
 
        if (ctx->enable_cdpl2)
-               ret = cdpl2_enable();
+               ret = resctrl_arch_set_cdp_enabled(RDT_RESOURCE_L2, true);
 
        if (!ret && ctx->enable_cdpl3)
-               ret = cdpl3_enable();
+               ret = resctrl_arch_set_cdp_enabled(RDT_RESOURCE_L3, true);
 
        if (!ret && ctx->enable_mba_mbps)
                ret = set_mba_sc(true);
@@ -2095,6 +2048,92 @@ static int rdt_enable_ctx(struct rdt_fs_context *ctx)
        return ret;
 }
 
+static int schemata_list_add(struct rdt_resource *r, enum resctrl_conf_type type)
+{
+       struct resctrl_schema *s;
+       const char *suffix = "";
+       int ret, cl;
+
+       s = kzalloc(sizeof(*s), GFP_KERNEL);
+       if (!s)
+               return -ENOMEM;
+
+       s->res = r;
+       s->num_closid = resctrl_arch_get_num_closid(r);
+       if (resctrl_arch_get_cdp_enabled(r->rid))
+               s->num_closid /= 2;
+
+       s->conf_type = type;
+       switch (type) {
+       case CDP_CODE:
+               suffix = "CODE";
+               break;
+       case CDP_DATA:
+               suffix = "DATA";
+               break;
+       case CDP_NONE:
+               suffix = "";
+               break;
+       }
+
+       ret = snprintf(s->name, sizeof(s->name), "%s%s", r->name, suffix);
+       if (ret >= sizeof(s->name)) {
+               kfree(s);
+               return -EINVAL;
+       }
+
+       cl = strlen(s->name);
+
+       /*
+        * If CDP is supported by this resource, but not enabled,
+        * include the suffix. This ensures the tabular format of the
+        * schemata file does not change between mounts of the filesystem.
+        */
+       if (r->cdp_capable && !resctrl_arch_get_cdp_enabled(r->rid))
+               cl += 4;
+
+       if (cl > max_name_width)
+               max_name_width = cl;
+
+       INIT_LIST_HEAD(&s->list);
+       list_add(&s->list, &resctrl_schema_all);
+
+       return 0;
+}
+
+static int schemata_list_create(void)
+{
+       struct rdt_resource *r;
+       int ret = 0;
+
+       for_each_alloc_enabled_rdt_resource(r) {
+               if (resctrl_arch_get_cdp_enabled(r->rid)) {
+                       ret = schemata_list_add(r, CDP_CODE);
+                       if (ret)
+                               break;
+
+                       ret = schemata_list_add(r, CDP_DATA);
+               } else {
+                       ret = schemata_list_add(r, CDP_NONE);
+               }
+
+               if (ret)
+                       break;
+       }
+
+       return ret;
+}
+
+static void schemata_list_destroy(void)
+{
+       struct resctrl_schema *s, *tmp;
+
+       list_for_each_entry_safe(s, tmp, &resctrl_schema_all, list) {
+               list_del(&s->list);
+               kfree(s);
+       }
+}
+
 static int rdt_get_tree(struct fs_context *fc)
 {
        struct rdt_fs_context *ctx = rdt_fc2context(fc);
@@ -2116,11 +2155,17 @@ static int rdt_get_tree(struct fs_context *fc)
        if (ret < 0)
                goto out_cdp;
 
+       ret = schemata_list_create();
+       if (ret) {
+               schemata_list_destroy();
+               goto out_mba;
+       }
+
        closid_init();
 
        ret = rdtgroup_create_info_dir(rdtgroup_default.kn);
        if (ret < 0)
-               goto out_mba;
+               goto out_schemata_free;
 
        if (rdt_mon_capable) {
                ret = mongroup_create_dir(rdtgroup_default.kn,
@@ -2153,7 +2198,7 @@ static int rdt_get_tree(struct fs_context *fc)
                static_branch_enable_cpuslocked(&rdt_enable_key);
 
        if (is_mbm_enabled()) {
-               r = &rdt_resources_all[RDT_RESOURCE_L3];
+               r = &rdt_resources_all[RDT_RESOURCE_L3].r_resctrl;
                list_for_each_entry(dom, &r->domains, list)
                        mbm_setup_overflow_handler(dom, MBM_OVERFLOW_INTERVAL);
        }
@@ -2170,6 +2215,8 @@ out_mongrp:
                kernfs_remove(kn_mongrp);
 out_info:
        kernfs_remove(kn_info);
+out_schemata_free:
+       schemata_list_destroy();
 out_mba:
        if (ctx->enable_mba_mbps)
                set_mba_sc(false);
@@ -2257,6 +2304,8 @@ static int rdt_init_fs_context(struct fs_context *fc)
 
 static int reset_all_ctrls(struct rdt_resource *r)
 {
+       struct rdt_hw_resource *hw_res = resctrl_to_arch_res(r);
+       struct rdt_hw_domain *hw_dom;
        struct msr_param msr_param;
        cpumask_var_t cpu_mask;
        struct rdt_domain *d;
@@ -2267,7 +2316,7 @@ static int reset_all_ctrls(struct rdt_resource *r)
 
        msr_param.res = r;
        msr_param.low = 0;
-       msr_param.high = r->num_closid;
+       msr_param.high = hw_res->num_closid;
 
        /*
         * Disable resource control for this resource by setting all
@@ -2275,10 +2324,11 @@ static int reset_all_ctrls(struct rdt_resource *r)
         * from each domain to update the MSRs below.
         */
        list_for_each_entry(d, &r->domains, list) {
+               hw_dom = resctrl_to_arch_dom(d);
                cpumask_set_cpu(cpumask_any(&d->cpu_mask), cpu_mask);
 
-               for (i = 0; i < r->num_closid; i++)
-                       d->ctrl_val[i] = r->default_ctrl;
+               for (i = 0; i < hw_res->num_closid; i++)
+                       hw_dom->ctrl_val[i] = r->default_ctrl;
        }
        cpu = get_cpu();
        /* Update CBM on this cpu if it's in cpu_mask. */
@@ -2408,6 +2458,7 @@ static void rdt_kill_sb(struct super_block *sb)
        rmdir_all_sub();
        rdt_pseudo_lock_release();
        rdtgroup_default.mode = RDT_MODE_SHAREABLE;
+       schemata_list_destroy();
        static_branch_disable_cpuslocked(&rdt_alloc_enable_key);
        static_branch_disable_cpuslocked(&rdt_mon_enable_key);
        static_branch_disable_cpuslocked(&rdt_enable_key);
@@ -2642,23 +2693,24 @@ static u32 cbm_ensure_valid(u32 _val, struct rdt_resource *r)
  * Set the RDT domain up to start off with all usable allocations. That is,
  * all shareable and unused bits. All-zero CBM is invalid.
  */
-static int __init_one_rdt_domain(struct rdt_domain *d, struct rdt_resource *r,
+static int __init_one_rdt_domain(struct rdt_domain *d, struct resctrl_schema *s,
                                 u32 closid)
 {
-       struct rdt_resource *r_cdp = NULL;
-       struct rdt_domain *d_cdp = NULL;
+       enum resctrl_conf_type peer_type = resctrl_peer_type(s->conf_type);
+       enum resctrl_conf_type t = s->conf_type;
+       struct resctrl_staged_config *cfg;
+       struct rdt_resource *r = s->res;
        u32 used_b = 0, unused_b = 0;
        unsigned long tmp_cbm;
        enum rdtgrp_mode mode;
-       u32 peer_ctl, *ctrl;
+       u32 peer_ctl, ctrl_val;
        int i;
 
-       rdt_cdp_peer_get(r, d, &r_cdp, &d_cdp);
-       d->have_new_ctrl = false;
-       d->new_ctrl = r->cache.shareable_bits;
+       cfg = &d->staged_config[t];
+       cfg->have_new_ctrl = false;
+       cfg->new_ctrl = r->cache.shareable_bits;
        used_b = r->cache.shareable_bits;
-       ctrl = d->ctrl_val;
-       for (i = 0; i < closids_supported(); i++, ctrl++) {
+       for (i = 0; i < closids_supported(); i++) {
                if (closid_allocated(i) && i != closid) {
                        mode = rdtgroup_mode_by_closid(i);
                        if (mode == RDT_MODE_PSEUDO_LOCKSETUP)
@@ -2673,35 +2725,38 @@ static int __init_one_rdt_domain(struct rdt_domain *d, struct rdt_resource *r,
                         * usage to ensure there is no overlap
                         * with an exclusive group.
                         */
-                       if (d_cdp)
-                               peer_ctl = d_cdp->ctrl_val[i];
+                       if (resctrl_arch_get_cdp_enabled(r->rid))
+                               peer_ctl = resctrl_arch_get_config(r, d, i,
+                                                                  peer_type);
                        else
                                peer_ctl = 0;
-                       used_b |= *ctrl | peer_ctl;
+                       ctrl_val = resctrl_arch_get_config(r, d, i,
+                                                          s->conf_type);
+                       used_b |= ctrl_val | peer_ctl;
                        if (mode == RDT_MODE_SHAREABLE)
-                               d->new_ctrl |= *ctrl | peer_ctl;
+                               cfg->new_ctrl |= ctrl_val | peer_ctl;
                }
        }
        if (d->plr && d->plr->cbm > 0)
                used_b |= d->plr->cbm;
        unused_b = used_b ^ (BIT_MASK(r->cache.cbm_len) - 1);
        unused_b &= BIT_MASK(r->cache.cbm_len) - 1;
-       d->new_ctrl |= unused_b;
+       cfg->new_ctrl |= unused_b;
        /*
         * Force the initial CBM to be valid, user can
         * modify the CBM based on system availability.
         */
-       d->new_ctrl = cbm_ensure_valid(d->new_ctrl, r);
+       cfg->new_ctrl = cbm_ensure_valid(cfg->new_ctrl, r);
        /*
         * Assign the u32 CBM to an unsigned long to ensure that
         * bitmap_weight() does not access out-of-bound memory.
         */
-       tmp_cbm = d->new_ctrl;
+       tmp_cbm = cfg->new_ctrl;
        if (bitmap_weight(&tmp_cbm, r->cache.cbm_len) < r->cache.min_cbm_bits) {
-               rdt_last_cmd_printf("No space on %s:%d\n", r->name, d->id);
+               rdt_last_cmd_printf("No space on %s:%d\n", s->name, d->id);
                return -ENOSPC;
        }
-       d->have_new_ctrl = true;
+       cfg->have_new_ctrl = true;
 
        return 0;
 }
@@ -2716,13 +2771,13 @@ static int __init_one_rdt_domain(struct rdt_domain *d, struct rdt_resource *r,
  * If there are no more shareable bits available on any domain then
  * the entire allocation will fail.
  */
-static int rdtgroup_init_cat(struct rdt_resource *r, u32 closid)
+static int rdtgroup_init_cat(struct resctrl_schema *s, u32 closid)
 {
        struct rdt_domain *d;
        int ret;
 
-       list_for_each_entry(d, &r->domains, list) {
-               ret = __init_one_rdt_domain(d, r, closid);
+       list_for_each_entry(d, &s->res->domains, list) {
+               ret = __init_one_rdt_domain(d, s, closid);
                if (ret < 0)
                        return ret;
        }
@@ -2733,30 +2788,34 @@ static int rdtgroup_init_cat(struct rdt_resource *r, u32 closid)
 /* Initialize MBA resource with default values. */
 static void rdtgroup_init_mba(struct rdt_resource *r)
 {
+       struct resctrl_staged_config *cfg;
        struct rdt_domain *d;
 
        list_for_each_entry(d, &r->domains, list) {
-               d->new_ctrl = is_mba_sc(r) ? MBA_MAX_MBPS : r->default_ctrl;
-               d->have_new_ctrl = true;
+               cfg = &d->staged_config[CDP_NONE];
+               cfg->new_ctrl = is_mba_sc(r) ? MBA_MAX_MBPS : r->default_ctrl;
+               cfg->have_new_ctrl = true;
        }
 }
 
 /* Initialize the RDT group's allocations. */
 static int rdtgroup_init_alloc(struct rdtgroup *rdtgrp)
 {
+       struct resctrl_schema *s;
        struct rdt_resource *r;
        int ret;
 
-       for_each_alloc_enabled_rdt_resource(r) {
+       list_for_each_entry(s, &resctrl_schema_all, list) {
+               r = s->res;
                if (r->rid == RDT_RESOURCE_MBA) {
                        rdtgroup_init_mba(r);
                } else {
-                       ret = rdtgroup_init_cat(r, rdtgrp->closid);
+                       ret = rdtgroup_init_cat(s, rdtgrp->closid);
                        if (ret < 0)
                                return ret;
                }
 
-               ret = update_domains(r, rdtgrp->closid);
+               ret = resctrl_arch_update_domains(r, rdtgrp->closid);
                if (ret < 0) {
                        rdt_last_cmd_puts("Failed to initialize allocations\n");
                        return ret;
@@ -3124,13 +3183,13 @@ out:
 
 static int rdtgroup_show_options(struct seq_file *seq, struct kernfs_root *kf)
 {
-       if (rdt_resources_all[RDT_RESOURCE_L3DATA].alloc_enabled)
+       if (resctrl_arch_get_cdp_enabled(RDT_RESOURCE_L3))
                seq_puts(seq, ",cdp");
 
-       if (rdt_resources_all[RDT_RESOURCE_L2DATA].alloc_enabled)
+       if (resctrl_arch_get_cdp_enabled(RDT_RESOURCE_L2))
                seq_puts(seq, ",cdpl2");
 
-       if (is_mba_sc(&rdt_resources_all[RDT_RESOURCE_MBA]))
+       if (is_mba_sc(&rdt_resources_all[RDT_RESOURCE_MBA].r_resctrl))
                seq_puts(seq, ",mba_MBps");
 
        return 0;
index 08651a4e6aa0f4ece403b5680e909c7ced76eb9a..42fc41dd0e1f17058724bbba75dfa1ede83994c8 100644 (file)
@@ -508,7 +508,7 @@ static struct irq_chip hpet_msi_controller __ro_after_init = {
        .irq_set_affinity = msi_domain_set_affinity,
        .irq_retrigger = irq_chip_retrigger_hierarchy,
        .irq_write_msi_msg = hpet_msi_write_msg,
-       .flags = IRQCHIP_SKIP_SET_WAKE,
+       .flags = IRQCHIP_SKIP_SET_WAKE | IRQCHIP_AFFINITY_PRE_STARTUP,
 };
 
 static int hpet_msi_init(struct irq_domain *domain,
index 739be5da3bca788515358f77cb2d39551990138d..fe03bd978761eb34a1b5230303236c5e746a807d 100644 (file)
@@ -208,30 +208,6 @@ static void kvm_vcpu_after_set_cpuid(struct kvm_vcpu *vcpu)
        kvm_mmu_after_set_cpuid(vcpu);
 }
 
-static int is_efer_nx(void)
-{
-       return host_efer & EFER_NX;
-}
-
-static void cpuid_fix_nx_cap(struct kvm_vcpu *vcpu)
-{
-       int i;
-       struct kvm_cpuid_entry2 *e, *entry;
-
-       entry = NULL;
-       for (i = 0; i < vcpu->arch.cpuid_nent; ++i) {
-               e = &vcpu->arch.cpuid_entries[i];
-               if (e->function == 0x80000001) {
-                       entry = e;
-                       break;
-               }
-       }
-       if (entry && cpuid_entry_has(entry, X86_FEATURE_NX) && !is_efer_nx()) {
-               cpuid_entry_clear(entry, X86_FEATURE_NX);
-               printk(KERN_INFO "kvm: guest NX capability removed\n");
-       }
-}
-
 int cpuid_query_maxphyaddr(struct kvm_vcpu *vcpu)
 {
        struct kvm_cpuid_entry2 *best;
@@ -302,7 +278,6 @@ int kvm_vcpu_ioctl_set_cpuid(struct kvm_vcpu *vcpu,
        vcpu->arch.cpuid_entries = e2;
        vcpu->arch.cpuid_nent = cpuid->nent;
 
-       cpuid_fix_nx_cap(vcpu);
        kvm_update_cpuid_runtime(vcpu);
        kvm_vcpu_after_set_cpuid(vcpu);
 
@@ -401,7 +376,6 @@ static __always_inline void kvm_cpu_cap_mask(enum cpuid_leafs leaf, u32 mask)
 
 void kvm_set_cpu_caps(void)
 {
-       unsigned int f_nx = is_efer_nx() ? F(NX) : 0;
 #ifdef CONFIG_X86_64
        unsigned int f_gbpages = F(GBPAGES);
        unsigned int f_lm = F(LM);
@@ -515,7 +489,7 @@ void kvm_set_cpu_caps(void)
                F(CX8) | F(APIC) | 0 /* Reserved */ | F(SYSCALL) |
                F(MTRR) | F(PGE) | F(MCA) | F(CMOV) |
                F(PAT) | F(PSE36) | 0 /* Reserved */ |
-               f_nx | 0 /* Reserved */ | F(MMXEXT) | F(MMX) |
+               F(NX) | 0 /* Reserved */ | F(MMXEXT) | F(MMX) |
                F(FXSR) | F(FXSR_OPT) | f_gbpages | F(RDTSCP) |
                0 /* Reserved */ | f_lm | F(3DNOWEXT) | F(3DNOW)
        );
index 0b38f944c6b6f634b16d6bc2aba4fd1db6cbf001..41d2a53c5dea0bdec953d9f4ba597373c10e38fb 100644 (file)
@@ -1933,7 +1933,7 @@ ret_success:
 void kvm_hv_set_cpuid(struct kvm_vcpu *vcpu)
 {
        struct kvm_cpuid_entry2 *entry;
-       struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu);
+       struct kvm_vcpu_hv *hv_vcpu;
 
        entry = kvm_find_cpuid_entry(vcpu, HYPERV_CPUID_INTERFACE, 0);
        if (entry && entry->eax == HYPERV_CPUID_SIGNATURE_EAX) {
index c4f4fa23320ee7f4326a6ea3bf237e1be84c14f5..47b7652702397b6ade0fc57b36b8de0fb063bb1f 100644 (file)
@@ -2535,6 +2535,7 @@ static void kvm_unsync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
 int mmu_try_to_unsync_pages(struct kvm_vcpu *vcpu, gfn_t gfn, bool can_unsync)
 {
        struct kvm_mmu_page *sp;
+       bool locked = false;
 
        /*
         * Force write-protection if the page is being tracked.  Note, the page
@@ -2557,9 +2558,34 @@ int mmu_try_to_unsync_pages(struct kvm_vcpu *vcpu, gfn_t gfn, bool can_unsync)
                if (sp->unsync)
                        continue;
 
+               /*
+                * TDP MMU page faults require an additional spinlock as they
+                * run with mmu_lock held for read, not write, and the unsync
+                * logic is not thread safe.  Take the spinklock regardless of
+                * the MMU type to avoid extra conditionals/parameters, there's
+                * no meaningful penalty if mmu_lock is held for write.
+                */
+               if (!locked) {
+                       locked = true;
+                       spin_lock(&vcpu->kvm->arch.mmu_unsync_pages_lock);
+
+                       /*
+                        * Recheck after taking the spinlock, a different vCPU
+                        * may have since marked the page unsync.  A false
+                        * positive on the unprotected check above is not
+                        * possible as clearing sp->unsync _must_ hold mmu_lock
+                        * for write, i.e. unsync cannot transition from 0->1
+                        * while this CPU holds mmu_lock for read (or write).
+                        */
+                       if (READ_ONCE(sp->unsync))
+                               continue;
+               }
+
                WARN_ON(sp->role.level != PG_LEVEL_4K);
                kvm_unsync_page(vcpu, sp);
        }
+       if (locked)
+               spin_unlock(&vcpu->kvm->arch.mmu_unsync_pages_lock);
 
        /*
         * We need to ensure that the marking of unsync pages is visible
@@ -5537,6 +5563,8 @@ void kvm_mmu_init_vm(struct kvm *kvm)
 {
        struct kvm_page_track_notifier_node *node = &kvm->arch.mmu_sp_tracker;
 
+       spin_lock_init(&kvm->arch.mmu_unsync_pages_lock);
+
        if (!kvm_mmu_init_tdp_mmu(kvm))
                /*
                 * No smp_load/store wrappers needed here as we are in
index 0853370bd811eb5977eb1aab5473330bcf2d1e30..d80cb122b5f386d0ad3369a69caaf38ef5bcda79 100644 (file)
@@ -43,6 +43,7 @@ void kvm_mmu_uninit_tdp_mmu(struct kvm *kvm)
        if (!kvm->arch.tdp_mmu_enabled)
                return;
 
+       WARN_ON(!list_empty(&kvm->arch.tdp_mmu_pages));
        WARN_ON(!list_empty(&kvm->arch.tdp_mmu_roots));
 
        /*
@@ -81,8 +82,6 @@ static void tdp_mmu_free_sp_rcu_callback(struct rcu_head *head)
 void kvm_tdp_mmu_put_root(struct kvm *kvm, struct kvm_mmu_page *root,
                          bool shared)
 {
-       gfn_t max_gfn = 1ULL << (shadow_phys_bits - PAGE_SHIFT);
-
        kvm_lockdep_assert_mmu_lock_held(kvm, shared);
 
        if (!refcount_dec_and_test(&root->tdp_mmu_root_count))
@@ -94,7 +93,7 @@ void kvm_tdp_mmu_put_root(struct kvm *kvm, struct kvm_mmu_page *root,
        list_del_rcu(&root->link);
        spin_unlock(&kvm->arch.tdp_mmu_pages_lock);
 
-       zap_gfn_range(kvm, root, 0, max_gfn, false, false, shared);
+       zap_gfn_range(kvm, root, 0, -1ull, false, false, shared);
 
        call_rcu(&root->rcu_head, tdp_mmu_free_sp_rcu_callback);
 }
@@ -724,13 +723,29 @@ static bool zap_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root,
                          gfn_t start, gfn_t end, bool can_yield, bool flush,
                          bool shared)
 {
+       gfn_t max_gfn_host = 1ULL << (shadow_phys_bits - PAGE_SHIFT);
+       bool zap_all = (start == 0 && end >= max_gfn_host);
        struct tdp_iter iter;
 
+       /*
+        * No need to try to step down in the iterator when zapping all SPTEs,
+        * zapping the top-level non-leaf SPTEs will recurse on their children.
+        */
+       int min_level = zap_all ? root->role.level : PG_LEVEL_4K;
+
+       /*
+        * Bound the walk at host.MAXPHYADDR, guest accesses beyond that will
+        * hit a #PF(RSVD) and never get to an EPT Violation/Misconfig / #NPF,
+        * and so KVM will never install a SPTE for such addresses.
+        */
+       end = min(end, max_gfn_host);
+
        kvm_lockdep_assert_mmu_lock_held(kvm, shared);
 
        rcu_read_lock();
 
-       tdp_root_for_each_pte(iter, root, start, end) {
+       for_each_tdp_pte_min_level(iter, root->spt, root->role.level,
+                                  min_level, start, end) {
 retry:
                if (can_yield &&
                    tdp_mmu_iter_cond_resched(kvm, &iter, flush, shared)) {
@@ -744,9 +759,10 @@ retry:
                /*
                 * If this is a non-last-level SPTE that covers a larger range
                 * than should be zapped, continue, and zap the mappings at a
-                * lower level.
+                * lower level, except when zapping all SPTEs.
                 */
-               if ((iter.gfn < start ||
+               if (!zap_all &&
+                   (iter.gfn < start ||
                     iter.gfn + KVM_PAGES_PER_HPAGE(iter.level) > end) &&
                    !is_last_spte(iter.old_spte, iter.level))
                        continue;
@@ -794,12 +810,11 @@ bool __kvm_tdp_mmu_zap_gfn_range(struct kvm *kvm, int as_id, gfn_t start,
 
 void kvm_tdp_mmu_zap_all(struct kvm *kvm)
 {
-       gfn_t max_gfn = 1ULL << (shadow_phys_bits - PAGE_SHIFT);
        bool flush = false;
        int i;
 
        for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++)
-               flush = kvm_tdp_mmu_zap_gfn_range(kvm, i, 0, max_gfn,
+               flush = kvm_tdp_mmu_zap_gfn_range(kvm, i, 0, -1ull,
                                                  flush, false);
 
        if (flush)
@@ -838,7 +853,6 @@ static struct kvm_mmu_page *next_invalidated_root(struct kvm *kvm,
  */
 void kvm_tdp_mmu_zap_invalidated_roots(struct kvm *kvm)
 {
-       gfn_t max_gfn = 1ULL << (shadow_phys_bits - PAGE_SHIFT);
        struct kvm_mmu_page *next_root;
        struct kvm_mmu_page *root;
        bool flush = false;
@@ -854,8 +868,7 @@ void kvm_tdp_mmu_zap_invalidated_roots(struct kvm *kvm)
 
                rcu_read_unlock();
 
-               flush = zap_gfn_range(kvm, root, 0, max_gfn, true, flush,
-                                     true);
+               flush = zap_gfn_range(kvm, root, 0, -1ull, true, flush, true);
 
                /*
                 * Put the reference acquired in
index 61738ff8ef33b6c85eac131273e83c4415910961..e5515477c30a6152046e74e5b0e9e9a4f574f7a2 100644 (file)
@@ -158,6 +158,9 @@ void recalc_intercepts(struct vcpu_svm *svm)
        /* If SMI is not intercepted, ignore guest SMI intercept as well  */
        if (!intercept_smi)
                vmcb_clr_intercept(c, INTERCEPT_SMI);
+
+       vmcb_set_intercept(c, INTERCEPT_VMLOAD);
+       vmcb_set_intercept(c, INTERCEPT_VMSAVE);
 }
 
 static void copy_vmcb_control_area(struct vmcb_control_area *dst,
@@ -503,7 +506,11 @@ static void nested_vmcb02_prepare_save(struct vcpu_svm *svm, struct vmcb *vmcb12
 
 static void nested_vmcb02_prepare_control(struct vcpu_svm *svm)
 {
-       const u32 mask = V_INTR_MASKING_MASK | V_GIF_ENABLE_MASK | V_GIF_MASK;
+       const u32 int_ctl_vmcb01_bits =
+               V_INTR_MASKING_MASK | V_GIF_MASK | V_GIF_ENABLE_MASK;
+
+       const u32 int_ctl_vmcb12_bits = V_TPR_MASK | V_IRQ_INJECTION_BITS_MASK;
+
        struct kvm_vcpu *vcpu = &svm->vcpu;
 
        /*
@@ -535,8 +542,8 @@ static void nested_vmcb02_prepare_control(struct vcpu_svm *svm)
                vcpu->arch.l1_tsc_offset + svm->nested.ctl.tsc_offset;
 
        svm->vmcb->control.int_ctl             =
-               (svm->nested.ctl.int_ctl & ~mask) |
-               (svm->vmcb01.ptr->control.int_ctl & mask);
+               (svm->nested.ctl.int_ctl & int_ctl_vmcb12_bits) |
+               (svm->vmcb01.ptr->control.int_ctl & int_ctl_vmcb01_bits);
 
        svm->vmcb->control.virt_ext            = svm->nested.ctl.virt_ext;
        svm->vmcb->control.int_vector          = svm->nested.ctl.int_vector;
index e8ccab50ebf6f1cfa0e795ddf04c893012a329c1..69639f9624f56ddd961440754cdcc4b5740105b1 100644 (file)
@@ -1589,17 +1589,18 @@ static void svm_set_vintr(struct vcpu_svm *svm)
 
 static void svm_clear_vintr(struct vcpu_svm *svm)
 {
-       const u32 mask = V_TPR_MASK | V_GIF_ENABLE_MASK | V_GIF_MASK | V_INTR_MASKING_MASK;
        svm_clr_intercept(svm, INTERCEPT_VINTR);
 
        /* Drop int_ctl fields related to VINTR injection.  */
-       svm->vmcb->control.int_ctl &= mask;
+       svm->vmcb->control.int_ctl &= ~V_IRQ_INJECTION_BITS_MASK;
        if (is_guest_mode(&svm->vcpu)) {
-               svm->vmcb01.ptr->control.int_ctl &= mask;
+               svm->vmcb01.ptr->control.int_ctl &= ~V_IRQ_INJECTION_BITS_MASK;
 
                WARN_ON((svm->vmcb->control.int_ctl & V_TPR_MASK) !=
                        (svm->nested.ctl.int_ctl & V_TPR_MASK));
-               svm->vmcb->control.int_ctl |= svm->nested.ctl.int_ctl & ~mask;
+
+               svm->vmcb->control.int_ctl |= svm->nested.ctl.int_ctl &
+                       V_IRQ_INJECTION_BITS_MASK;
        }
 
        vmcb_mark_dirty(svm->vmcb, VMCB_INTR);
index 1a52134b0c42af69a375996086eb2aa8e968b03a..b3f77d18eb5aadebb89f5e72726f3e0fd619b06d 100644 (file)
@@ -330,6 +330,31 @@ void nested_vmx_free_vcpu(struct kvm_vcpu *vcpu)
        vcpu_put(vcpu);
 }
 
+#define EPTP_PA_MASK   GENMASK_ULL(51, 12)
+
+static bool nested_ept_root_matches(hpa_t root_hpa, u64 root_eptp, u64 eptp)
+{
+       return VALID_PAGE(root_hpa) &&
+              ((root_eptp & EPTP_PA_MASK) == (eptp & EPTP_PA_MASK));
+}
+
+static void nested_ept_invalidate_addr(struct kvm_vcpu *vcpu, gpa_t eptp,
+                                      gpa_t addr)
+{
+       uint i;
+       struct kvm_mmu_root_info *cached_root;
+
+       WARN_ON_ONCE(!mmu_is_nested(vcpu));
+
+       for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++) {
+               cached_root = &vcpu->arch.mmu->prev_roots[i];
+
+               if (nested_ept_root_matches(cached_root->hpa, cached_root->pgd,
+                                           eptp))
+                       vcpu->arch.mmu->invlpg(vcpu, addr, cached_root->hpa);
+       }
+}
+
 static void nested_ept_inject_page_fault(struct kvm_vcpu *vcpu,
                struct x86_exception *fault)
 {
@@ -342,10 +367,22 @@ static void nested_ept_inject_page_fault(struct kvm_vcpu *vcpu,
                vm_exit_reason = EXIT_REASON_PML_FULL;
                vmx->nested.pml_full = false;
                exit_qualification &= INTR_INFO_UNBLOCK_NMI;
-       } else if (fault->error_code & PFERR_RSVD_MASK)
-               vm_exit_reason = EXIT_REASON_EPT_MISCONFIG;
-       else
-               vm_exit_reason = EXIT_REASON_EPT_VIOLATION;
+       } else {
+               if (fault->error_code & PFERR_RSVD_MASK)
+                       vm_exit_reason = EXIT_REASON_EPT_MISCONFIG;
+               else
+                       vm_exit_reason = EXIT_REASON_EPT_VIOLATION;
+
+               /*
+                * Although the caller (kvm_inject_emulated_page_fault) would
+                * have already synced the faulting address in the shadow EPT
+                * tables for the current EPTP12, we also need to sync it for
+                * any other cached EPTP02s based on the same EP4TA, since the
+                * TLB associates mappings to the EP4TA rather than the full EPTP.
+                */
+               nested_ept_invalidate_addr(vcpu, vmcs12->ept_pointer,
+                                          fault->address);
+       }
 
        nested_vmx_vmexit(vcpu, vm_exit_reason, 0, exit_qualification);
        vmcs12->guest_physical_address = fault->address;
@@ -5325,14 +5362,6 @@ static int handle_vmptrst(struct kvm_vcpu *vcpu)
        return nested_vmx_succeed(vcpu);
 }
 
-#define EPTP_PA_MASK   GENMASK_ULL(51, 12)
-
-static bool nested_ept_root_matches(hpa_t root_hpa, u64 root_eptp, u64 eptp)
-{
-       return VALID_PAGE(root_hpa) &&
-               ((root_eptp & EPTP_PA_MASK) == (eptp & EPTP_PA_MASK));
-}
-
 /* Emulate the INVEPT instruction */
 static int handle_invept(struct kvm_vcpu *vcpu)
 {
@@ -5826,7 +5855,8 @@ static bool nested_vmx_l0_wants_exit(struct kvm_vcpu *vcpu,
                if (is_nmi(intr_info))
                        return true;
                else if (is_page_fault(intr_info))
-                       return vcpu->arch.apf.host_apf_flags || !enable_ept;
+                       return vcpu->arch.apf.host_apf_flags ||
+                              vmx_need_pf_intercept(vcpu);
                else if (is_debug(intr_info) &&
                         vcpu->guest_debug &
                         (KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP))
index db88ed4f2121c128936a6bc81d13072ea820b688..17a1cb4b059dfdb3097964853a777176677bef50 100644 (file)
@@ -522,7 +522,7 @@ static inline struct vmcs *alloc_vmcs(bool shadow)
 
 static inline bool vmx_has_waitpkg(struct vcpu_vmx *vmx)
 {
-       return vmx->secondary_exec_control &
+       return secondary_exec_controls_get(vmx) &
                SECONDARY_EXEC_ENABLE_USR_WAIT_PAUSE;
 }
 
index fd1ab80be0dece39b8f99bdb1f5a0b16e06e7de5..a4cf678cf5c80e2ccb2391e18ba801bab73bb124 100644 (file)
@@ -10,6 +10,7 @@ BEGIN {
 
 /^GNU objdump/ {
        verstr = ""
+       gsub(/\(.*\)/, "");
        for (i = 3; i <= NF; i++)
                if (match($(i), "^[0-9]")) {
                        verstr = $(i);
index 9ba700dc47de415629fdf5a05fcca80178295e7e..27c82207d387813ecf7fdbb39eafd221a482935b 100644 (file)
@@ -26,6 +26,9 @@ static struct relocs relocs32;
 #if ELF_BITS == 64
 static struct relocs relocs32neg;
 static struct relocs relocs64;
+#define FMT PRIu64
+#else
+#define FMT PRIu32
 #endif
 
 struct section {
@@ -389,7 +392,7 @@ static void read_ehdr(FILE *fp)
                Elf_Shdr shdr;
 
                if (fseek(fp, ehdr.e_shoff, SEEK_SET) < 0)
-                       die("Seek to %d failed: %s\n", ehdr.e_shoff, strerror(errno));
+                       die("Seek to %" FMT " failed: %s\n", ehdr.e_shoff, strerror(errno));
 
                if (fread(&shdr, sizeof(shdr), 1, fp) != 1)
                        die("Cannot read initial ELF section header: %s\n", strerror(errno));
@@ -412,17 +415,17 @@ static void read_shdrs(FILE *fp)
 
        secs = calloc(shnum, sizeof(struct section));
        if (!secs) {
-               die("Unable to allocate %d section headers\n",
+               die("Unable to allocate %ld section headers\n",
                    shnum);
        }
        if (fseek(fp, ehdr.e_shoff, SEEK_SET) < 0) {
-               die("Seek to %d failed: %s\n",
-                       ehdr.e_shoff, strerror(errno));
+               die("Seek to %" FMT " failed: %s\n",
+                   ehdr.e_shoff, strerror(errno));
        }
        for (i = 0; i < shnum; i++) {
                struct section *sec = &secs[i];
                if (fread(&shdr, sizeof(shdr), 1, fp) != 1)
-                       die("Cannot read ELF section headers %d/%d: %s\n",
+                       die("Cannot read ELF section headers %d/%ld: %s\n",
                            i, shnum, strerror(errno));
                sec->shdr.sh_name      = elf_word_to_cpu(shdr.sh_name);
                sec->shdr.sh_type      = elf_word_to_cpu(shdr.sh_type);
@@ -450,12 +453,12 @@ static void read_strtabs(FILE *fp)
                }
                sec->strtab = malloc(sec->shdr.sh_size);
                if (!sec->strtab) {
-                       die("malloc of %d bytes for strtab failed\n",
-                               sec->shdr.sh_size);
+                       die("malloc of %" FMT " bytes for strtab failed\n",
+                           sec->shdr.sh_size);
                }
                if (fseek(fp, sec->shdr.sh_offset, SEEK_SET) < 0) {
-                       die("Seek to %d failed: %s\n",
-                               sec->shdr.sh_offset, strerror(errno));
+                       die("Seek to %" FMT " failed: %s\n",
+                           sec->shdr.sh_offset, strerror(errno));
                }
                if (fread(sec->strtab, 1, sec->shdr.sh_size, fp)
                    != sec->shdr.sh_size) {
@@ -475,12 +478,12 @@ static void read_symtabs(FILE *fp)
                }
                sec->symtab = malloc(sec->shdr.sh_size);
                if (!sec->symtab) {
-                       die("malloc of %d bytes for symtab failed\n",
-                               sec->shdr.sh_size);
+                       die("malloc of %" FMT " bytes for symtab failed\n",
+                           sec->shdr.sh_size);
                }
                if (fseek(fp, sec->shdr.sh_offset, SEEK_SET) < 0) {
-                       die("Seek to %d failed: %s\n",
-                               sec->shdr.sh_offset, strerror(errno));
+                       die("Seek to %" FMT " failed: %s\n",
+                           sec->shdr.sh_offset, strerror(errno));
                }
                if (fread(sec->symtab, 1, sec->shdr.sh_size, fp)
                    != sec->shdr.sh_size) {
@@ -508,12 +511,12 @@ static void read_relocs(FILE *fp)
                }
                sec->reltab = malloc(sec->shdr.sh_size);
                if (!sec->reltab) {
-                       die("malloc of %d bytes for relocs failed\n",
-                               sec->shdr.sh_size);
+                       die("malloc of %" FMT " bytes for relocs failed\n",
+                           sec->shdr.sh_size);
                }
                if (fseek(fp, sec->shdr.sh_offset, SEEK_SET) < 0) {
-                       die("Seek to %d failed: %s\n",
-                               sec->shdr.sh_offset, strerror(errno));
+                       die("Seek to %" FMT " failed: %s\n",
+                           sec->shdr.sh_offset, strerror(errno));
                }
                if (fread(sec->reltab, 1, sec->shdr.sh_size, fp)
                    != sec->shdr.sh_size) {
index 43c83c0fd22c80e2e77b7a53fe363a4ce2bc5f54..4c49c82446eb52ce95bc4996acd36cdfb1691a32 100644 (file)
@@ -17,6 +17,7 @@
 #include <regex.h>
 #include <tools/le_byteshift.h>
 
+__attribute__((__format__(printf, 1, 2)))
 void die(char *fmt, ...) __attribute__((noreturn));
 
 #define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
index 64053d67a97b783795bd9c6b3067f34dfea910f3..2f2158e05a91c1eaa85bdaa142e33a1748441d65 100644 (file)
@@ -9,12 +9,6 @@ config MQ_IOSCHED_DEADLINE
        help
          MQ version of the deadline IO scheduler.
 
-config MQ_IOSCHED_DEADLINE_CGROUP
-       tristate
-       default y
-       depends on MQ_IOSCHED_DEADLINE
-       depends on BLK_CGROUP
-
 config MQ_IOSCHED_KYBER
        tristate "Kyber I/O scheduler"
        default y
index bfbe4e13ca1ef244b0b97718a8e1be9ae2db119f..1e1afa10f869d192100ec8764800c911f690bedc 100644 (file)
@@ -22,8 +22,6 @@ obj-$(CONFIG_BLK_CGROUP_IOPRIO)       += blk-ioprio.o
 obj-$(CONFIG_BLK_CGROUP_IOLATENCY)     += blk-iolatency.o
 obj-$(CONFIG_BLK_CGROUP_IOCOST)        += blk-iocost.o
 obj-$(CONFIG_MQ_IOSCHED_DEADLINE)      += mq-deadline.o
-mq-deadline-y += mq-deadline-main.o
-mq-deadline-$(CONFIG_MQ_IOSCHED_DEADLINE_CGROUP)+= mq-deadline-cgroup.o
 obj-$(CONFIG_MQ_IOSCHED_KYBER) += kyber-iosched.o
 bfq-y                          := bfq-iosched.o bfq-wf2q.o bfq-cgroup.o
 obj-$(CONFIG_IOSCHED_BFQ)      += bfq.o
index 575d7a2e7203b87a6df7b360cef002b143dc8a19..31fe9be179d99b8a0fd07feb44cc47459428c205 100644 (file)
@@ -790,6 +790,7 @@ static void blkcg_rstat_flush(struct cgroup_subsys_state *css, int cpu)
                struct blkcg_gq *parent = blkg->parent;
                struct blkg_iostat_set *bisc = per_cpu_ptr(blkg->iostat_cpu, cpu);
                struct blkg_iostat cur, delta;
+               unsigned long flags;
                unsigned int seq;
 
                /* fetch the current per-cpu values */
@@ -799,21 +800,21 @@ static void blkcg_rstat_flush(struct cgroup_subsys_state *css, int cpu)
                } while (u64_stats_fetch_retry(&bisc->sync, seq));
 
                /* propagate percpu delta to global */
-               u64_stats_update_begin(&blkg->iostat.sync);
+               flags = u64_stats_update_begin_irqsave(&blkg->iostat.sync);
                blkg_iostat_set(&delta, &cur);
                blkg_iostat_sub(&delta, &bisc->last);
                blkg_iostat_add(&blkg->iostat.cur, &delta);
                blkg_iostat_add(&bisc->last, &delta);
-               u64_stats_update_end(&blkg->iostat.sync);
+               u64_stats_update_end_irqrestore(&blkg->iostat.sync, flags);
 
                /* propagate global delta to parent (unless that's root) */
                if (parent && parent->parent) {
-                       u64_stats_update_begin(&parent->iostat.sync);
+                       flags = u64_stats_update_begin_irqsave(&parent->iostat.sync);
                        blkg_iostat_set(&delta, &blkg->iostat.cur);
                        blkg_iostat_sub(&delta, &blkg->iostat.last);
                        blkg_iostat_add(&parent->iostat.cur, &delta);
                        blkg_iostat_add(&blkg->iostat.last, &delta);
-                       u64_stats_update_end(&parent->iostat.sync);
+                       u64_stats_update_end_irqrestore(&parent->iostat.sync, flags);
                }
        }
 
@@ -848,6 +849,7 @@ static void blkcg_fill_root_iostats(void)
                memset(&tmp, 0, sizeof(tmp));
                for_each_possible_cpu(cpu) {
                        struct disk_stats *cpu_dkstats;
+                       unsigned long flags;
 
                        cpu_dkstats = per_cpu_ptr(bdev->bd_stats, cpu);
                        tmp.ios[BLKG_IOSTAT_READ] +=
@@ -864,9 +866,9 @@ static void blkcg_fill_root_iostats(void)
                        tmp.bytes[BLKG_IOSTAT_DISCARD] +=
                                cpu_dkstats->sectors[STAT_DISCARD] << 9;
 
-                       u64_stats_update_begin(&blkg->iostat.sync);
+                       flags = u64_stats_update_begin_irqsave(&blkg->iostat.sync);
                        blkg_iostat_set(&blkg->iostat.cur, &tmp);
-                       u64_stats_update_end(&blkg->iostat.sync);
+                       u64_stats_update_end_irqrestore(&blkg->iostat.sync, flags);
                }
        }
 }
index 04477697ee4b4df8de9edb403ffa45b8286fe345..4f8449b29b2146ae88e1536131388f9a5937564a 100644 (file)
@@ -122,7 +122,6 @@ void blk_rq_init(struct request_queue *q, struct request *rq)
        rq->internal_tag = BLK_MQ_NO_TAG;
        rq->start_time_ns = ktime_get_ns();
        rq->part = NULL;
-       refcount_set(&rq->ref, 1);
        blk_crypto_rq_set_defaults(rq);
 }
 EXPORT_SYMBOL(blk_rq_init);
index 1002f6c581816d111f28b085fb647931d591b32d..4201728bf3a5a8144e9c0bf76c87d69898f1bf37 100644 (file)
@@ -262,6 +262,11 @@ static void flush_end_io(struct request *flush_rq, blk_status_t error)
        spin_unlock_irqrestore(&fq->mq_flush_lock, flags);
 }
 
+bool is_flush_rq(struct request *rq)
+{
+       return rq->end_io == flush_end_io;
+}
+
 /**
  * blk_kick_flush - consider issuing flush request
  * @q: request_queue being kicked
@@ -329,6 +334,14 @@ static void blk_kick_flush(struct request_queue *q, struct blk_flush_queue *fq,
        flush_rq->rq_flags |= RQF_FLUSH_SEQ;
        flush_rq->rq_disk = first_rq->rq_disk;
        flush_rq->end_io = flush_end_io;
+       /*
+        * Order WRITE ->end_io and WRITE rq->ref, and its pair is the one
+        * implied in refcount_inc_not_zero() called from
+        * blk_mq_find_and_get_req(), which orders WRITE/READ flush_rq->ref
+        * and READ flush_rq->end_io
+        */
+       smp_wmb();
+       refcount_set(&flush_rq->ref, 1);
 
        blk_flush_queue_rq(flush_rq, false);
 }
index 5fac3757e6e052d4b0da81c8bd090a9111043349..0e56557cacf266bc83a33e30c84af452577c876a 100644 (file)
@@ -3061,19 +3061,19 @@ static ssize_t ioc_weight_write(struct kernfs_open_file *of, char *buf,
                if (v < CGROUP_WEIGHT_MIN || v > CGROUP_WEIGHT_MAX)
                        return -EINVAL;
 
-               spin_lock(&blkcg->lock);
+               spin_lock_irq(&blkcg->lock);
                iocc->dfl_weight = v * WEIGHT_ONE;
                hlist_for_each_entry(blkg, &blkcg->blkg_list, blkcg_node) {
                        struct ioc_gq *iocg = blkg_to_iocg(blkg);
 
                        if (iocg) {
-                               spin_lock_irq(&iocg->ioc->lock);
+                               spin_lock(&iocg->ioc->lock);
                                ioc_now(iocg->ioc, &now);
                                weight_updated(iocg, &now);
-                               spin_unlock_irq(&iocg->ioc->lock);
+                               spin_unlock(&iocg->ioc->lock);
                        }
                }
-               spin_unlock(&blkcg->lock);
+               spin_unlock_irq(&blkcg->lock);
 
                return nbytes;
        }
index 2c4ac51e54ebae329e239a431244c57a06519b64..9d4fdc2be88a5e5eb7d995fc1592cbd932eed2f9 100644 (file)
@@ -911,7 +911,7 @@ static bool blk_mq_req_expired(struct request *rq, unsigned long *next)
 
 void blk_mq_put_rq_ref(struct request *rq)
 {
-       if (is_flush_rq(rq, rq->mq_hctx))
+       if (is_flush_rq(rq))
                rq->end_io(rq, 0);
        else if (refcount_dec_and_test(&rq->ref))
                __blk_mq_free_request(rq);
@@ -923,34 +923,14 @@ static bool blk_mq_check_expired(struct blk_mq_hw_ctx *hctx,
        unsigned long *next = priv;
 
        /*
-        * Just do a quick check if it is expired before locking the request in
-        * so we're not unnecessarilly synchronizing across CPUs.
-        */
-       if (!blk_mq_req_expired(rq, next))
-               return true;
-
-       /*
-        * We have reason to believe the request may be expired. Take a
-        * reference on the request to lock this request lifetime into its
-        * currently allocated context to prevent it from being reallocated in
-        * the event the completion by-passes this timeout handler.
-        *
-        * If the reference was already released, then the driver beat the
-        * timeout handler to posting a natural completion.
-        */
-       if (!refcount_inc_not_zero(&rq->ref))
-               return true;
-
-       /*
-        * The request is now locked and cannot be reallocated underneath the
-        * timeout handler's processing. Re-verify this exact request is truly
-        * expired; if it is not expired, then the request was completed and
-        * reallocated as a new request.
+        * blk_mq_queue_tag_busy_iter() has locked the request, so it cannot
+        * be reallocated underneath the timeout handler's processing, then
+        * the expire check is reliable. If the request is not expired, then
+        * it was completed and reallocated as a new request after returning
+        * from blk_mq_check_expired().
         */
        if (blk_mq_req_expired(rq, next))
                blk_mq_rq_timed_out(rq, reserved);
-
-       blk_mq_put_rq_ref(rq);
        return true;
 }
 
@@ -2994,10 +2974,12 @@ static void queue_set_hctx_shared(struct request_queue *q, bool shared)
        int i;
 
        queue_for_each_hw_ctx(q, hctx, i) {
-               if (shared)
+               if (shared) {
                        hctx->flags |= BLK_MQ_F_TAG_QUEUE_SHARED;
-               else
+               } else {
+                       blk_mq_tag_idle(hctx);
                        hctx->flags &= ~BLK_MQ_F_TAG_QUEUE_SHARED;
+               }
        }
 }
 
index 4b885c0f6708d96d73441006a4f87820e13c576a..cb01429c162c6f950a3edc3b4274f282f2e648b4 100644 (file)
@@ -44,11 +44,7 @@ static inline void __blk_get_queue(struct request_queue *q)
        kobject_get(&q->kobj);
 }
 
-static inline bool
-is_flush_rq(struct request *req, struct blk_mq_hw_ctx *hctx)
-{
-       return hctx->fq->flush_rq == req;
-}
+bool is_flush_rq(struct request *req);
 
 struct blk_flush_queue *blk_alloc_flush_queue(int node, int cmd_size,
                                              gfp_t flags);
diff --git a/block/mq-deadline-cgroup.c b/block/mq-deadline-cgroup.c
deleted file mode 100644 (file)
index 3b4bfdd..0000000
+++ /dev/null
@@ -1,126 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-
-#include <linux/blk-cgroup.h>
-#include <linux/ioprio.h>
-
-#include "mq-deadline-cgroup.h"
-
-static struct blkcg_policy dd_blkcg_policy;
-
-static struct blkcg_policy_data *dd_cpd_alloc(gfp_t gfp)
-{
-       struct dd_blkcg *pd;
-
-       pd = kzalloc(sizeof(*pd), gfp);
-       if (!pd)
-               return NULL;
-       pd->stats = alloc_percpu_gfp(typeof(*pd->stats),
-                                    GFP_KERNEL | __GFP_ZERO);
-       if (!pd->stats) {
-               kfree(pd);
-               return NULL;
-       }
-       return &pd->cpd;
-}
-
-static void dd_cpd_free(struct blkcg_policy_data *cpd)
-{
-       struct dd_blkcg *dd_blkcg = container_of(cpd, typeof(*dd_blkcg), cpd);
-
-       free_percpu(dd_blkcg->stats);
-       kfree(dd_blkcg);
-}
-
-static struct dd_blkcg *dd_blkcg_from_pd(struct blkg_policy_data *pd)
-{
-       return container_of(blkcg_to_cpd(pd->blkg->blkcg, &dd_blkcg_policy),
-                           struct dd_blkcg, cpd);
-}
-
-/*
- * Convert an association between a block cgroup and a request queue into a
- * pointer to the mq-deadline information associated with a (blkcg, queue) pair.
- */
-struct dd_blkcg *dd_blkcg_from_bio(struct bio *bio)
-{
-       struct blkg_policy_data *pd;
-
-       pd = blkg_to_pd(bio->bi_blkg, &dd_blkcg_policy);
-       if (!pd)
-               return NULL;
-
-       return dd_blkcg_from_pd(pd);
-}
-
-static size_t dd_pd_stat(struct blkg_policy_data *pd, char *buf, size_t size)
-{
-       static const char *const prio_class_name[] = {
-               [IOPRIO_CLASS_NONE]     = "NONE",
-               [IOPRIO_CLASS_RT]       = "RT",
-               [IOPRIO_CLASS_BE]       = "BE",
-               [IOPRIO_CLASS_IDLE]     = "IDLE",
-       };
-       struct dd_blkcg *blkcg = dd_blkcg_from_pd(pd);
-       int res = 0;
-       u8 prio;
-
-       for (prio = 0; prio < ARRAY_SIZE(blkcg->stats->stats); prio++)
-               res += scnprintf(buf + res, size - res,
-                       " [%s] dispatched=%u inserted=%u merged=%u",
-                       prio_class_name[prio],
-                       ddcg_sum(blkcg, dispatched, prio) +
-                       ddcg_sum(blkcg, merged, prio) -
-                       ddcg_sum(blkcg, completed, prio),
-                       ddcg_sum(blkcg, inserted, prio) -
-                       ddcg_sum(blkcg, completed, prio),
-                       ddcg_sum(blkcg, merged, prio));
-
-       return res;
-}
-
-static struct blkg_policy_data *dd_pd_alloc(gfp_t gfp, struct request_queue *q,
-                                           struct blkcg *blkcg)
-{
-       struct dd_blkg *pd;
-
-       pd = kzalloc(sizeof(*pd), gfp);
-       if (!pd)
-               return NULL;
-       return &pd->pd;
-}
-
-static void dd_pd_free(struct blkg_policy_data *pd)
-{
-       struct dd_blkg *dd_blkg = container_of(pd, typeof(*dd_blkg), pd);
-
-       kfree(dd_blkg);
-}
-
-static struct blkcg_policy dd_blkcg_policy = {
-       .cpd_alloc_fn           = dd_cpd_alloc,
-       .cpd_free_fn            = dd_cpd_free,
-
-       .pd_alloc_fn            = dd_pd_alloc,
-       .pd_free_fn             = dd_pd_free,
-       .pd_stat_fn             = dd_pd_stat,
-};
-
-int dd_activate_policy(struct request_queue *q)
-{
-       return blkcg_activate_policy(q, &dd_blkcg_policy);
-}
-
-void dd_deactivate_policy(struct request_queue *q)
-{
-       blkcg_deactivate_policy(q, &dd_blkcg_policy);
-}
-
-int __init dd_blkcg_init(void)
-{
-       return blkcg_policy_register(&dd_blkcg_policy);
-}
-
-void __exit dd_blkcg_exit(void)
-{
-       blkcg_policy_unregister(&dd_blkcg_policy);
-}
diff --git a/block/mq-deadline-cgroup.h b/block/mq-deadline-cgroup.h
deleted file mode 100644 (file)
index 0143fd7..0000000
+++ /dev/null
@@ -1,114 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-
-#if !defined(_MQ_DEADLINE_CGROUP_H_)
-#define _MQ_DEADLINE_CGROUP_H_
-
-#include <linux/blk-cgroup.h>
-
-struct request_queue;
-
-/**
- * struct io_stats_per_prio - I/O statistics per I/O priority class.
- * @inserted: Number of inserted requests.
- * @merged: Number of merged requests.
- * @dispatched: Number of dispatched requests.
- * @completed: Number of I/O completions.
- */
-struct io_stats_per_prio {
-       local_t inserted;
-       local_t merged;
-       local_t dispatched;
-       local_t completed;
-};
-
-/* I/O statistics per I/O cgroup per I/O priority class (IOPRIO_CLASS_*). */
-struct blkcg_io_stats {
-       struct io_stats_per_prio stats[4];
-};
-
-/**
- * struct dd_blkcg - Per cgroup data.
- * @cpd: blkcg_policy_data structure.
- * @stats: I/O statistics.
- */
-struct dd_blkcg {
-       struct blkcg_policy_data cpd;   /* must be the first member */
-       struct blkcg_io_stats __percpu *stats;
-};
-
-/*
- * Count one event of type 'event_type' and with I/O priority class
- * 'prio_class'.
- */
-#define ddcg_count(ddcg, event_type, prio_class) do {                  \
-if (ddcg) {                                                            \
-       struct blkcg_io_stats *io_stats = get_cpu_ptr((ddcg)->stats);   \
-                                                                       \
-       BUILD_BUG_ON(!__same_type((ddcg), struct dd_blkcg *));          \
-       BUILD_BUG_ON(!__same_type((prio_class), u8));                   \
-       local_inc(&io_stats->stats[(prio_class)].event_type);           \
-       put_cpu_ptr(io_stats);                                          \
-}                                                                      \
-} while (0)
-
-/*
- * Returns the total number of ddcg_count(ddcg, event_type, prio_class) calls
- * across all CPUs. No locking or barriers since it is fine if the returned
- * sum is slightly outdated.
- */
-#define ddcg_sum(ddcg, event_type, prio) ({                            \
-       unsigned int cpu;                                               \
-       u32 sum = 0;                                                    \
-                                                                       \
-       BUILD_BUG_ON(!__same_type((ddcg), struct dd_blkcg *));          \
-       BUILD_BUG_ON(!__same_type((prio), u8));                         \
-       for_each_present_cpu(cpu)                                       \
-               sum += local_read(&per_cpu_ptr((ddcg)->stats, cpu)->    \
-                                 stats[(prio)].event_type);            \
-       sum;                                                            \
-})
-
-#ifdef CONFIG_BLK_CGROUP
-
-/**
- * struct dd_blkg - Per (cgroup, request queue) data.
- * @pd: blkg_policy_data structure.
- */
-struct dd_blkg {
-       struct blkg_policy_data pd;     /* must be the first member */
-};
-
-struct dd_blkcg *dd_blkcg_from_bio(struct bio *bio);
-int dd_activate_policy(struct request_queue *q);
-void dd_deactivate_policy(struct request_queue *q);
-int __init dd_blkcg_init(void);
-void __exit dd_blkcg_exit(void);
-
-#else /* CONFIG_BLK_CGROUP */
-
-static inline struct dd_blkcg *dd_blkcg_from_bio(struct bio *bio)
-{
-       return NULL;
-}
-
-static inline int dd_activate_policy(struct request_queue *q)
-{
-       return 0;
-}
-
-static inline void dd_deactivate_policy(struct request_queue *q)
-{
-}
-
-static inline int dd_blkcg_init(void)
-{
-       return 0;
-}
-
-static inline void dd_blkcg_exit(void)
-{
-}
-
-#endif /* CONFIG_BLK_CGROUP */
-
-#endif /* _MQ_DEADLINE_CGROUP_H_ */
similarity index 91%
rename from block/mq-deadline-main.c
rename to block/mq-deadline.c
index 6f612e6dc82b6e165f804267d167857be863d8a4..36920670dccc35f3c4d019423a53754400c3a40a 100644 (file)
 #include "blk-mq-debugfs.h"
 #include "blk-mq-tag.h"
 #include "blk-mq-sched.h"
-#include "mq-deadline-cgroup.h"
 
 /*
  * See Documentation/block/deadline-iosched.rst
  */
 static const int read_expire = HZ / 2;  /* max time before a read is submitted. */
 static const int write_expire = 5 * HZ; /* ditto for writes, these limits are SOFT! */
-/*
- * Time after which to dispatch lower priority requests even if higher
- * priority requests are pending.
- */
-static const int aging_expire = 10 * HZ;
 static const int writes_starved = 2;    /* max times reads can starve a write */
 static const int fifo_batch = 16;       /* # of sequential requests treated as one
                                     by the above parameters. For throughput. */
@@ -57,6 +51,14 @@ enum dd_prio {
 
 enum { DD_PRIO_COUNT = 3 };
 
+/* I/O statistics per I/O priority. */
+struct io_stats_per_prio {
+       local_t inserted;
+       local_t merged;
+       local_t dispatched;
+       local_t completed;
+};
+
 /* I/O statistics for all I/O priorities (enum dd_prio). */
 struct io_stats {
        struct io_stats_per_prio stats[DD_PRIO_COUNT];
@@ -79,9 +81,6 @@ struct deadline_data {
         * run time data
         */
 
-       /* Request queue that owns this data structure. */
-       struct request_queue *queue;
-
        struct dd_per_prio per_prio[DD_PRIO_COUNT];
 
        /* Data direction of latest dispatched request. */
@@ -99,7 +98,6 @@ struct deadline_data {
        int writes_starved;
        int front_merges;
        u32 async_depth;
-       int aging_expire;
 
        spinlock_t lock;
        spinlock_t zone_lock;
@@ -234,10 +232,8 @@ static void dd_merged_requests(struct request_queue *q, struct request *req,
        struct deadline_data *dd = q->elevator->elevator_data;
        const u8 ioprio_class = dd_rq_ioclass(next);
        const enum dd_prio prio = ioprio_class_to_prio[ioprio_class];
-       struct dd_blkcg *blkcg = next->elv.priv[0];
 
        dd_count(dd, merged, prio);
-       ddcg_count(blkcg, merged, ioprio_class);
 
        /*
         * if next expires before rq, assign its expire time to rq
@@ -367,15 +363,13 @@ deadline_next_request(struct deadline_data *dd, struct dd_per_prio *per_prio,
 
 /*
  * deadline_dispatch_requests selects the best request according to
- * read/write expire, fifo_batch, etc and with a start time <= @latest.
+ * read/write expire, fifo_batch, etc
  */
 static struct request *__dd_dispatch_request(struct deadline_data *dd,
-                                            struct dd_per_prio *per_prio,
-                                            u64 latest_start_ns)
+                                            struct dd_per_prio *per_prio)
 {
        struct request *rq, *next_rq;
        enum dd_data_dir data_dir;
-       struct dd_blkcg *blkcg;
        enum dd_prio prio;
        u8 ioprio_class;
 
@@ -384,8 +378,6 @@ static struct request *__dd_dispatch_request(struct deadline_data *dd,
        if (!list_empty(&per_prio->dispatch)) {
                rq = list_first_entry(&per_prio->dispatch, struct request,
                                      queuelist);
-               if (rq->start_time_ns > latest_start_ns)
-                       return NULL;
                list_del_init(&rq->queuelist);
                goto done;
        }
@@ -463,8 +455,6 @@ dispatch_find_request:
        dd->batching = 0;
 
 dispatch_request:
-       if (rq->start_time_ns > latest_start_ns)
-               return NULL;
        /*
         * rq is the selected appropriate request.
         */
@@ -474,8 +464,6 @@ done:
        ioprio_class = dd_rq_ioclass(rq);
        prio = ioprio_class_to_prio[ioprio_class];
        dd_count(dd, dispatched, prio);
-       blkcg = rq->elv.priv[0];
-       ddcg_count(blkcg, dispatched, ioprio_class);
        /*
         * If the request needs its target zone locked, do it.
         */
@@ -495,32 +483,15 @@ done:
 static struct request *dd_dispatch_request(struct blk_mq_hw_ctx *hctx)
 {
        struct deadline_data *dd = hctx->queue->elevator->elevator_data;
-       const u64 now_ns = ktime_get_ns();
-       struct request *rq = NULL;
+       struct request *rq;
        enum dd_prio prio;
 
        spin_lock(&dd->lock);
-       /*
-        * Start with dispatching requests whose deadline expired more than
-        * aging_expire jiffies ago.
-        */
-       for (prio = DD_BE_PRIO; prio <= DD_PRIO_MAX; prio++) {
-               rq = __dd_dispatch_request(dd, &dd->per_prio[prio], now_ns -
-                                          jiffies_to_nsecs(dd->aging_expire));
-               if (rq)
-                       goto unlock;
-       }
-       /*
-        * Next, dispatch requests in priority order. Ignore lower priority
-        * requests if any higher priority requests are pending.
-        */
        for (prio = 0; prio <= DD_PRIO_MAX; prio++) {
-               rq = __dd_dispatch_request(dd, &dd->per_prio[prio], now_ns);
-               if (rq || dd_queued(dd, prio))
+               rq = __dd_dispatch_request(dd, &dd->per_prio[prio]);
+               if (rq)
                        break;
        }
-
-unlock:
        spin_unlock(&dd->lock);
 
        return rq;
@@ -569,8 +540,6 @@ static void dd_exit_sched(struct elevator_queue *e)
        struct deadline_data *dd = e->elevator_data;
        enum dd_prio prio;
 
-       dd_deactivate_policy(dd->queue);
-
        for (prio = 0; prio <= DD_PRIO_MAX; prio++) {
                struct dd_per_prio *per_prio = &dd->per_prio[prio];
 
@@ -584,7 +553,7 @@ static void dd_exit_sched(struct elevator_queue *e)
 }
 
 /*
- * Initialize elevator private data (deadline_data) and associate with blkcg.
+ * initialize elevator private data (deadline_data).
  */
 static int dd_init_sched(struct request_queue *q, struct elevator_type *e)
 {
@@ -593,12 +562,6 @@ static int dd_init_sched(struct request_queue *q, struct elevator_type *e)
        enum dd_prio prio;
        int ret = -ENOMEM;
 
-       /*
-        * Initialization would be very tricky if the queue is not frozen,
-        * hence the warning statement below.
-        */
-       WARN_ON_ONCE(!percpu_ref_is_zero(&q->q_usage_counter));
-
        eq = elevator_alloc(q, e);
        if (!eq)
                return ret;
@@ -614,8 +577,6 @@ static int dd_init_sched(struct request_queue *q, struct elevator_type *e)
        if (!dd->stats)
                goto free_dd;
 
-       dd->queue = q;
-
        for (prio = 0; prio <= DD_PRIO_MAX; prio++) {
                struct dd_per_prio *per_prio = &dd->per_prio[prio];
 
@@ -631,21 +592,12 @@ static int dd_init_sched(struct request_queue *q, struct elevator_type *e)
        dd->front_merges = 1;
        dd->last_dir = DD_WRITE;
        dd->fifo_batch = fifo_batch;
-       dd->aging_expire = aging_expire;
        spin_lock_init(&dd->lock);
        spin_lock_init(&dd->zone_lock);
 
-       ret = dd_activate_policy(q);
-       if (ret)
-               goto free_stats;
-
-       ret = 0;
        q->elevator = eq;
        return 0;
 
-free_stats:
-       free_percpu(dd->stats);
-
 free_dd:
        kfree(dd);
 
@@ -718,7 +670,6 @@ static void dd_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
        u8 ioprio_class = IOPRIO_PRIO_CLASS(ioprio);
        struct dd_per_prio *per_prio;
        enum dd_prio prio;
-       struct dd_blkcg *blkcg;
        LIST_HEAD(free);
 
        lockdep_assert_held(&dd->lock);
@@ -729,18 +680,9 @@ static void dd_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
         */
        blk_req_zone_write_unlock(rq);
 
-       /*
-        * If a block cgroup has been associated with the submitter and if an
-        * I/O priority has been set in the associated block cgroup, use the
-        * lowest of the cgroup priority and the request priority for the
-        * request. If no priority has been set in the request, use the cgroup
-        * priority.
-        */
        prio = ioprio_class_to_prio[ioprio_class];
        dd_count(dd, inserted, prio);
-       blkcg = dd_blkcg_from_bio(rq->bio);
-       ddcg_count(blkcg, inserted, ioprio_class);
-       rq->elv.priv[0] = blkcg;
+       rq->elv.priv[0] = (void *)(uintptr_t)1;
 
        if (blk_mq_sched_try_insert_merge(q, rq, &free)) {
                blk_mq_free_requests(&free);
@@ -815,13 +757,18 @@ static void dd_finish_request(struct request *rq)
 {
        struct request_queue *q = rq->q;
        struct deadline_data *dd = q->elevator->elevator_data;
-       struct dd_blkcg *blkcg = rq->elv.priv[0];
        const u8 ioprio_class = dd_rq_ioclass(rq);
        const enum dd_prio prio = ioprio_class_to_prio[ioprio_class];
        struct dd_per_prio *per_prio = &dd->per_prio[prio];
 
-       dd_count(dd, completed, prio);
-       ddcg_count(blkcg, completed, ioprio_class);
+       /*
+        * The block layer core may call dd_finish_request() without having
+        * called dd_insert_requests(). Hence only update statistics for
+        * requests for which dd_insert_requests() has been called. See also
+        * blk_mq_request_bypass_insert().
+        */
+       if (rq->elv.priv[0])
+               dd_count(dd, completed, prio);
 
        if (blk_queue_is_zoned(q)) {
                unsigned long flags;
@@ -866,7 +813,6 @@ static ssize_t __FUNC(struct elevator_queue *e, char *page)         \
 #define SHOW_JIFFIES(__FUNC, __VAR) SHOW_INT(__FUNC, jiffies_to_msecs(__VAR))
 SHOW_JIFFIES(deadline_read_expire_show, dd->fifo_expire[DD_READ]);
 SHOW_JIFFIES(deadline_write_expire_show, dd->fifo_expire[DD_WRITE]);
-SHOW_JIFFIES(deadline_aging_expire_show, dd->aging_expire);
 SHOW_INT(deadline_writes_starved_show, dd->writes_starved);
 SHOW_INT(deadline_front_merges_show, dd->front_merges);
 SHOW_INT(deadline_async_depth_show, dd->front_merges);
@@ -896,7 +842,6 @@ static ssize_t __FUNC(struct elevator_queue *e, const char *page, size_t count)
        STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, msecs_to_jiffies)
 STORE_JIFFIES(deadline_read_expire_store, &dd->fifo_expire[DD_READ], 0, INT_MAX);
 STORE_JIFFIES(deadline_write_expire_store, &dd->fifo_expire[DD_WRITE], 0, INT_MAX);
-STORE_JIFFIES(deadline_aging_expire_store, &dd->aging_expire, 0, INT_MAX);
 STORE_INT(deadline_writes_starved_store, &dd->writes_starved, INT_MIN, INT_MAX);
 STORE_INT(deadline_front_merges_store, &dd->front_merges, 0, 1);
 STORE_INT(deadline_async_depth_store, &dd->front_merges, 1, INT_MAX);
@@ -915,7 +860,6 @@ static struct elv_fs_entry deadline_attrs[] = {
        DD_ATTR(front_merges),
        DD_ATTR(async_depth),
        DD_ATTR(fifo_batch),
-       DD_ATTR(aging_expire),
        __ATTR_NULL
 };
 
@@ -1144,26 +1088,11 @@ MODULE_ALIAS("mq-deadline-iosched");
 
 static int __init deadline_init(void)
 {
-       int ret;
-
-       ret = elv_register(&mq_deadline);
-       if (ret)
-               goto out;
-       ret = dd_blkcg_init();
-       if (ret)
-               goto unreg;
-
-out:
-       return ret;
-
-unreg:
-       elv_unregister(&mq_deadline);
-       goto out;
+       return elv_register(&mq_deadline);
 }
 
 static void __exit deadline_exit(void)
 {
-       dd_blkcg_exit();
        elv_unregister(&mq_deadline);
 }
 
index f4e61116f94ed9bf9394645e04ad90c9c10a565c..ae7f2e876a31b584ad43172c226a6be19704fd9a 100644 (file)
@@ -15,6 +15,32 @@ config MODULE_SIG_KEY
          then the kernel will automatically generate the private key and
          certificate as described in Documentation/admin-guide/module-signing.rst
 
+choice
+       prompt "Type of module signing key to be generated"
+       default MODULE_SIG_KEY_TYPE_RSA
+       help
+        The type of module signing key type to generate. This option
+        does not apply if a #PKCS11 URI is used.
+
+config MODULE_SIG_KEY_TYPE_RSA
+       bool "RSA"
+       depends on MODULE_SIG || (IMA_APPRAISE_MODSIG && MODULES)
+       help
+        Use an RSA key for module signing.
+
+config MODULE_SIG_KEY_TYPE_ECDSA
+       bool "ECDSA"
+       select CRYPTO_ECDSA
+       depends on MODULE_SIG || (IMA_APPRAISE_MODSIG && MODULES)
+       help
+        Use an elliptic curve key (NIST P384) for module signing. Consider
+        using a strong hash like sha256 or sha384 for hashing modules.
+
+        Note: Remove all ECDSA signing keys, e.g. certs/signing_key.pem,
+        when falling back to building Linux 5.14 and older kernels.
+
+endchoice
+
 config SYSTEM_TRUSTED_KEYRING
        bool "Provide system-wide ring of trusted keys"
        depends on KEYS
index 359239a0ee9e32b7bd11cfa08a9f1224f2a3156b..279433783b10af16ead0e84e2cf6fd4be65193e2 100644 (file)
@@ -57,11 +57,31 @@ endif
 redirect_openssl       = 2>&1
 quiet_redirect_openssl = 2>&1
 silent_redirect_openssl = 2>/dev/null
+openssl_available       = $(shell openssl help 2>/dev/null && echo yes)
 
 # We do it this way rather than having a boolean option for enabling an
 # external private key, because 'make randconfig' might enable such a
 # boolean option and we unfortunately can't make it depend on !RANDCONFIG.
 ifeq ($(CONFIG_MODULE_SIG_KEY),"certs/signing_key.pem")
+
+ifeq ($(openssl_available),yes)
+X509TEXT=$(shell openssl x509 -in "certs/signing_key.pem" -text 2>/dev/null)
+endif
+
+# Support user changing key type
+ifdef CONFIG_MODULE_SIG_KEY_TYPE_ECDSA
+keytype_openssl = -newkey ec -pkeyopt ec_paramgen_curve:secp384r1
+ifeq ($(openssl_available),yes)
+$(if $(findstring id-ecPublicKey,$(X509TEXT)),,$(shell rm -f "certs/signing_key.pem"))
+endif
+endif # CONFIG_MODULE_SIG_KEY_TYPE_ECDSA
+
+ifdef CONFIG_MODULE_SIG_KEY_TYPE_RSA
+ifeq ($(openssl_available),yes)
+$(if $(findstring rsaEncryption,$(X509TEXT)),,$(shell rm -f "certs/signing_key.pem"))
+endif
+endif # CONFIG_MODULE_SIG_KEY_TYPE_RSA
+
 $(obj)/signing_key.pem: $(obj)/x509.genkey
        @$(kecho) "###"
        @$(kecho) "### Now generating an X.509 key pair to be used for signing modules."
@@ -75,6 +95,7 @@ $(obj)/signing_key.pem: $(obj)/x509.genkey
                -batch -x509 -config $(obj)/x509.genkey \
                -outform PEM -out $(obj)/signing_key.pem \
                -keyout $(obj)/signing_key.pem \
+               $(keytype_openssl) \
                $($(quiet)redirect_openssl)
        @$(kecho) "###"
        @$(kecho) "### Key pair generated."
index ca3b02dcbbfac7ecb5daf3c4035205ffc733372f..536df4b6b825c58b2b2e10157aec9810493209ed 100644 (file)
@@ -1547,6 +1547,7 @@ config CRYPTO_SERPENT_AVX2_X86_64
 config CRYPTO_SM4
        tristate "SM4 cipher algorithm"
        select CRYPTO_ALGAPI
+       select CRYPTO_LIB_SM4
        help
          SM4 cipher algorithms (OSCCA GB/T 32907-2016).
 
@@ -1569,6 +1570,49 @@ config CRYPTO_SM4
 
          If unsure, say N.
 
+config CRYPTO_SM4_AESNI_AVX_X86_64
+       tristate "SM4 cipher algorithm (x86_64/AES-NI/AVX)"
+       depends on X86 && 64BIT
+       select CRYPTO_SKCIPHER
+       select CRYPTO_SIMD
+       select CRYPTO_ALGAPI
+       select CRYPTO_LIB_SM4
+       help
+         SM4 cipher algorithms (OSCCA GB/T 32907-2016) (x86_64/AES-NI/AVX).
+
+         SM4 (GBT.32907-2016) is a cryptographic standard issued by the
+         Organization of State Commercial Administration of China (OSCCA)
+         as an authorized cryptographic algorithms for the use within China.
+
+         This is SM4 optimized implementation using AES-NI/AVX/x86_64
+         instruction set for block cipher. Through two affine transforms,
+         we can use the AES S-Box to simulate the SM4 S-Box to achieve the
+         effect of instruction acceleration.
+
+         If unsure, say N.
+
+config CRYPTO_SM4_AESNI_AVX2_X86_64
+       tristate "SM4 cipher algorithm (x86_64/AES-NI/AVX2)"
+       depends on X86 && 64BIT
+       select CRYPTO_SKCIPHER
+       select CRYPTO_SIMD
+       select CRYPTO_ALGAPI
+       select CRYPTO_LIB_SM4
+       select CRYPTO_SM4_AESNI_AVX_X86_64
+       help
+         SM4 cipher algorithms (OSCCA GB/T 32907-2016) (x86_64/AES-NI/AVX2).
+
+         SM4 (GBT.32907-2016) is a cryptographic standard issued by the
+         Organization of State Commercial Administration of China (OSCCA)
+         as an authorized cryptographic algorithms for the use within China.
+
+         This is SM4 optimized implementation using AES-NI/AVX2/x86_64
+         instruction set for block cipher. Through two affine transforms,
+         we can use the AES S-Box to simulate the SM4 S-Box to achieve the
+         effect of instruction acceleration.
+
+         If unsure, say N.
+
 config CRYPTO_TEA
        tristate "TEA, XTEA and XETA cipher algorithms"
        depends on CRYPTO_USER_API_ENABLE_OBSOLETE
@@ -1768,7 +1812,7 @@ config CRYPTO_DRBG_HMAC
        bool
        default y
        select CRYPTO_HMAC
-       select CRYPTO_SHA256
+       select CRYPTO_SHA512
 
 config CRYPTO_DRBG_HASH
        bool "Enable Hash DRBG"
index 10526d4559b802b27224215ba1ac32b3cacc9abe..c633f15a048136805970627be563680c8d3fdb06 100644 (file)
@@ -74,7 +74,6 @@ obj-$(CONFIG_CRYPTO_NULL2) += crypto_null.o
 obj-$(CONFIG_CRYPTO_MD4) += md4.o
 obj-$(CONFIG_CRYPTO_MD5) += md5.o
 obj-$(CONFIG_CRYPTO_RMD160) += rmd160.o
-obj-$(CONFIG_CRYPTO_RMD320) += rmd320.o
 obj-$(CONFIG_CRYPTO_SHA1) += sha1_generic.o
 obj-$(CONFIG_CRYPTO_SHA256) += sha256_generic.o
 obj-$(CONFIG_CRYPTO_SHA512) += sha512_generic.o
index 967329e0a07b7958a60b334d24d89273868cb5ba..6592279d839aff6d267cd225e5e51f0e6bbe0ea2 100644 (file)
@@ -269,6 +269,14 @@ int pkcs7_sig_note_pkey_algo(void *context, size_t hdrlen,
                ctx->sinfo->sig->pkey_algo = "rsa";
                ctx->sinfo->sig->encoding = "pkcs1";
                break;
+       case OID_id_ecdsa_with_sha1:
+       case OID_id_ecdsa_with_sha224:
+       case OID_id_ecdsa_with_sha256:
+       case OID_id_ecdsa_with_sha384:
+       case OID_id_ecdsa_with_sha512:
+               ctx->sinfo->sig->pkey_algo = "ecdsa";
+               ctx->sinfo->sig->encoding = "x962";
+               break;
        default:
                printk("Unsupported pkey algo: %u\n", ctx->last_oid);
                return -ENOPKG;
index a006132646a4389e74bdadb1a27ac998cea97993..1350e8eb6ac238d5bc67c785d4253fa887c587d8 100644 (file)
@@ -27,6 +27,7 @@
 #define _CRYPTO_ECC_H
 
 #include <crypto/ecc_curve.h>
+#include <asm/unaligned.h>
 
 /* One digit is u64 qword. */
 #define ECC_CURVE_NIST_P192_DIGITS  3
  * @out:      Output array
  * @ndigits:  Number of digits to copy
  */
-static inline void ecc_swap_digits(const u64 *in, u64 *out, unsigned int ndigits)
+static inline void ecc_swap_digits(const void *in, u64 *out, unsigned int ndigits)
 {
        const __be64 *src = (__force __be64 *)in;
        int i;
 
        for (i = 0; i < ndigits; i++)
-               out[i] = be64_to_cpu(src[ndigits - 1 - i]);
+               out[i] = get_unaligned_be64(&src[ndigits - 1 - i]);
 }
 
 /**
index c72d72ad828e1ff2c09eb2fd02e2270a9a4650ca..be70e76d6d8623ac5ec58990b024b4c03c78859d 100644 (file)
@@ -143,9 +143,6 @@ sha512_transform(u64 *state, const u8 *input)
 
        state[0] += a; state[1] += b; state[2] += c; state[3] += d;
        state[4] += e; state[5] += f; state[6] += g; state[7] += h;
-
-       /* erase our data */
-       a = b = c = d = e = f = g = h = t1 = t2 = 0;
 }
 
 static void sha512_generic_block_fn(struct sha512_state *sst, u8 const *src,
index a1537624541607c8095d627c505f0485d9b07155..418211180ceec379570c7de99ef43d86861b7c5a 100644 (file)
@@ -431,7 +431,7 @@ static int skcipher_copy_iv(struct skcipher_walk *walk)
 
 static int skcipher_walk_first(struct skcipher_walk *walk)
 {
-       if (WARN_ON_ONCE(in_irq()))
+       if (WARN_ON_ONCE(in_hardirq()))
                return -EDEADLK;
 
        walk->buffer = NULL;
index 016dbc595705968cf94d4343fdac464ac097993f..4a6480a27fee51869331f74e64a329c00ec912ef 100644 (file)
 #include <asm/byteorder.h>
 #include <asm/unaligned.h>
 
-static const u32 fk[4] = {
-       0xa3b1bac6, 0x56aa3350, 0x677d9197, 0xb27022dc
-};
-
-static const u8 sbox[256] = {
-       0xd6, 0x90, 0xe9, 0xfe, 0xcc, 0xe1, 0x3d, 0xb7,
-       0x16, 0xb6, 0x14, 0xc2, 0x28, 0xfb, 0x2c, 0x05,
-       0x2b, 0x67, 0x9a, 0x76, 0x2a, 0xbe, 0x04, 0xc3,
-       0xaa, 0x44, 0x13, 0x26, 0x49, 0x86, 0x06, 0x99,
-       0x9c, 0x42, 0x50, 0xf4, 0x91, 0xef, 0x98, 0x7a,
-       0x33, 0x54, 0x0b, 0x43, 0xed, 0xcf, 0xac, 0x62,
-       0xe4, 0xb3, 0x1c, 0xa9, 0xc9, 0x08, 0xe8, 0x95,
-       0x80, 0xdf, 0x94, 0xfa, 0x75, 0x8f, 0x3f, 0xa6,
-       0x47, 0x07, 0xa7, 0xfc, 0xf3, 0x73, 0x17, 0xba,
-       0x83, 0x59, 0x3c, 0x19, 0xe6, 0x85, 0x4f, 0xa8,
-       0x68, 0x6b, 0x81, 0xb2, 0x71, 0x64, 0xda, 0x8b,
-       0xf8, 0xeb, 0x0f, 0x4b, 0x70, 0x56, 0x9d, 0x35,
-       0x1e, 0x24, 0x0e, 0x5e, 0x63, 0x58, 0xd1, 0xa2,
-       0x25, 0x22, 0x7c, 0x3b, 0x01, 0x21, 0x78, 0x87,
-       0xd4, 0x00, 0x46, 0x57, 0x9f, 0xd3, 0x27, 0x52,
-       0x4c, 0x36, 0x02, 0xe7, 0xa0, 0xc4, 0xc8, 0x9e,
-       0xea, 0xbf, 0x8a, 0xd2, 0x40, 0xc7, 0x38, 0xb5,
-       0xa3, 0xf7, 0xf2, 0xce, 0xf9, 0x61, 0x15, 0xa1,
-       0xe0, 0xae, 0x5d, 0xa4, 0x9b, 0x34, 0x1a, 0x55,
-       0xad, 0x93, 0x32, 0x30, 0xf5, 0x8c, 0xb1, 0xe3,
-       0x1d, 0xf6, 0xe2, 0x2e, 0x82, 0x66, 0xca, 0x60,
-       0xc0, 0x29, 0x23, 0xab, 0x0d, 0x53, 0x4e, 0x6f,
-       0xd5, 0xdb, 0x37, 0x45, 0xde, 0xfd, 0x8e, 0x2f,
-       0x03, 0xff, 0x6a, 0x72, 0x6d, 0x6c, 0x5b, 0x51,
-       0x8d, 0x1b, 0xaf, 0x92, 0xbb, 0xdd, 0xbc, 0x7f,
-       0x11, 0xd9, 0x5c, 0x41, 0x1f, 0x10, 0x5a, 0xd8,
-       0x0a, 0xc1, 0x31, 0x88, 0xa5, 0xcd, 0x7b, 0xbd,
-       0x2d, 0x74, 0xd0, 0x12, 0xb8, 0xe5, 0xb4, 0xb0,
-       0x89, 0x69, 0x97, 0x4a, 0x0c, 0x96, 0x77, 0x7e,
-       0x65, 0xb9, 0xf1, 0x09, 0xc5, 0x6e, 0xc6, 0x84,
-       0x18, 0xf0, 0x7d, 0xec, 0x3a, 0xdc, 0x4d, 0x20,
-       0x79, 0xee, 0x5f, 0x3e, 0xd7, 0xcb, 0x39, 0x48
-};
-
-static const u32 ck[] = {
-       0x00070e15, 0x1c232a31, 0x383f464d, 0x545b6269,
-       0x70777e85, 0x8c939aa1, 0xa8afb6bd, 0xc4cbd2d9,
-       0xe0e7eef5, 0xfc030a11, 0x181f262d, 0x343b4249,
-       0x50575e65, 0x6c737a81, 0x888f969d, 0xa4abb2b9,
-       0xc0c7ced5, 0xdce3eaf1, 0xf8ff060d, 0x141b2229,
-       0x30373e45, 0x4c535a61, 0x686f767d, 0x848b9299,
-       0xa0a7aeb5, 0xbcc3cad1, 0xd8dfe6ed, 0xf4fb0209,
-       0x10171e25, 0x2c333a41, 0x484f565d, 0x646b7279
-};
-
-static u32 sm4_t_non_lin_sub(u32 x)
-{
-       int i;
-       u8 *b = (u8 *)&x;
-
-       for (i = 0; i < 4; ++i)
-               b[i] = sbox[b[i]];
-
-       return x;
-}
-
-static u32 sm4_key_lin_sub(u32 x)
-{
-       return x ^ rol32(x, 13) ^ rol32(x, 23);
-
-}
-
-static u32 sm4_enc_lin_sub(u32 x)
-{
-       return x ^ rol32(x, 2) ^ rol32(x, 10) ^ rol32(x, 18) ^ rol32(x, 24);
-}
-
-static u32 sm4_key_sub(u32 x)
-{
-       return sm4_key_lin_sub(sm4_t_non_lin_sub(x));
-}
-
-static u32 sm4_enc_sub(u32 x)
-{
-       return sm4_enc_lin_sub(sm4_t_non_lin_sub(x));
-}
-
-static u32 sm4_round(const u32 *x, const u32 rk)
-{
-       return x[0] ^ sm4_enc_sub(x[1] ^ x[2] ^ x[3] ^ rk);
-}
-
-
 /**
- * crypto_sm4_expand_key - Expands the SM4 key as described in GB/T 32907-2016
- * @ctx:       The location where the computed key will be stored.
- * @in_key:    The supplied key.
- * @key_len:   The length of the supplied key.
- *
- * Returns 0 on success. The function fails only if an invalid key size (or
- * pointer) is supplied.
- */
-int crypto_sm4_expand_key(struct crypto_sm4_ctx *ctx, const u8 *in_key,
-                         unsigned int key_len)
-{
-       u32 rk[4], t;
-       const u32 *key = (u32 *)in_key;
-       int i;
-
-       if (key_len != SM4_KEY_SIZE)
-               return -EINVAL;
-
-       for (i = 0; i < 4; ++i)
-               rk[i] = get_unaligned_be32(&key[i]) ^ fk[i];
-
-       for (i = 0; i < 32; ++i) {
-               t = rk[0] ^ sm4_key_sub(rk[1] ^ rk[2] ^ rk[3] ^ ck[i]);
-               ctx->rkey_enc[i] = t;
-               rk[0] = rk[1];
-               rk[1] = rk[2];
-               rk[2] = rk[3];
-               rk[3] = t;
-       }
-
-       for (i = 0; i < 32; ++i)
-               ctx->rkey_dec[i] = ctx->rkey_enc[31 - i];
-
-       return 0;
-}
-EXPORT_SYMBOL_GPL(crypto_sm4_expand_key);
-
-/**
- * crypto_sm4_set_key - Set the SM4 key.
+ * sm4_setkey - Set the SM4 key.
  * @tfm:       The %crypto_tfm that is used in the context.
  * @in_key:    The input key.
  * @key_len:   The size of the key.
  *
- * This function uses crypto_sm4_expand_key() to expand the key.
- * &crypto_sm4_ctx _must_ be the private data embedded in @tfm which is
+ * This function uses sm4_expandkey() to expand the key.
+ * &sm4_ctx _must_ be the private data embedded in @tfm which is
  * retrieved with crypto_tfm_ctx().
  *
  * Return: 0 on success; -EINVAL on failure (only happens for bad key lengths)
  */
-int crypto_sm4_set_key(struct crypto_tfm *tfm, const u8 *in_key,
+static int sm4_setkey(struct crypto_tfm *tfm, const u8 *in_key,
                       unsigned int key_len)
 {
-       struct crypto_sm4_ctx *ctx = crypto_tfm_ctx(tfm);
-
-       return crypto_sm4_expand_key(ctx, in_key, key_len);
-}
-EXPORT_SYMBOL_GPL(crypto_sm4_set_key);
-
-static void sm4_do_crypt(const u32 *rk, u32 *out, const u32 *in)
-{
-       u32 x[4], i, t;
-
-       for (i = 0; i < 4; ++i)
-               x[i] = get_unaligned_be32(&in[i]);
-
-       for (i = 0; i < 32; ++i) {
-               t = sm4_round(x, rk[i]);
-               x[0] = x[1];
-               x[1] = x[2];
-               x[2] = x[3];
-               x[3] = t;
-       }
+       struct sm4_ctx *ctx = crypto_tfm_ctx(tfm);
 
-       for (i = 0; i < 4; ++i)
-               put_unaligned_be32(x[3 - i], &out[i]);
+       return sm4_expandkey(ctx, in_key, key_len);
 }
 
 /* encrypt a block of text */
 
-void crypto_sm4_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
+static void sm4_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
 {
-       const struct crypto_sm4_ctx *ctx = crypto_tfm_ctx(tfm);
+       const struct sm4_ctx *ctx = crypto_tfm_ctx(tfm);
 
-       sm4_do_crypt(ctx->rkey_enc, (u32 *)out, (u32 *)in);
+       sm4_crypt_block(ctx->rkey_enc, out, in);
 }
-EXPORT_SYMBOL_GPL(crypto_sm4_encrypt);
 
 /* decrypt a block of text */
 
-void crypto_sm4_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
+static void sm4_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
 {
-       const struct crypto_sm4_ctx *ctx = crypto_tfm_ctx(tfm);
+       const struct sm4_ctx *ctx = crypto_tfm_ctx(tfm);
 
-       sm4_do_crypt(ctx->rkey_dec, (u32 *)out, (u32 *)in);
+       sm4_crypt_block(ctx->rkey_dec, out, in);
 }
-EXPORT_SYMBOL_GPL(crypto_sm4_decrypt);
 
 static struct crypto_alg sm4_alg = {
        .cra_name               =       "sm4",
@@ -208,15 +60,15 @@ static struct crypto_alg sm4_alg = {
        .cra_priority           =       100,
        .cra_flags              =       CRYPTO_ALG_TYPE_CIPHER,
        .cra_blocksize          =       SM4_BLOCK_SIZE,
-       .cra_ctxsize            =       sizeof(struct crypto_sm4_ctx),
+       .cra_ctxsize            =       sizeof(struct sm4_ctx),
        .cra_module             =       THIS_MODULE,
        .cra_u                  =       {
                .cipher = {
                        .cia_min_keysize        =       SM4_KEY_SIZE,
                        .cia_max_keysize        =       SM4_KEY_SIZE,
-                       .cia_setkey             =       crypto_sm4_set_key,
-                       .cia_encrypt            =       crypto_sm4_encrypt,
-                       .cia_decrypt            =       crypto_sm4_decrypt
+                       .cia_setkey             =       sm4_setkey,
+                       .cia_encrypt            =       sm4_encrypt,
+                       .cia_decrypt            =       sm4_decrypt
                }
        }
 };
index f8d06da78e4f3a9161043d7e8d767caa35075a16..82b0400985a51bb309e2f5bee9d025d4bbf46e65 100644 (file)
@@ -77,7 +77,7 @@ static const char *check[] = {
        NULL
 };
 
-static const int block_sizes[] = { 16, 64, 256, 1024, 1420, 4096, 0 };
+static const int block_sizes[] = { 16, 64, 128, 256, 1024, 1420, 4096, 0 };
 static const int aead_sizes[] = { 16, 64, 256, 512, 1024, 1420, 4096, 8192, 0 };
 
 #define XBUFSIZE 8
@@ -290,6 +290,11 @@ static void test_mb_aead_speed(const char *algo, int enc, int secs,
        }
 
        ret = crypto_aead_setauthsize(tfm, authsize);
+       if (ret) {
+               pr_err("alg: aead: Failed to setauthsize for %s: %d\n", algo,
+                      ret);
+               goto out_free_tfm;
+       }
 
        for (i = 0; i < num_mb; ++i)
                if (testmgr_alloc_buf(data[i].xbuf)) {
@@ -315,7 +320,7 @@ static void test_mb_aead_speed(const char *algo, int enc, int secs,
        for (i = 0; i < num_mb; ++i) {
                data[i].req = aead_request_alloc(tfm, GFP_KERNEL);
                if (!data[i].req) {
-                       pr_err("alg: skcipher: Failed to allocate request for %s\n",
+                       pr_err("alg: aead: Failed to allocate request for %s\n",
                               algo);
                        while (i--)
                                aead_request_free(data[i].req);
@@ -567,13 +572,19 @@ static void test_aead_speed(const char *algo, int enc, unsigned int secs,
        sgout = &sg[9];
 
        tfm = crypto_alloc_aead(algo, 0, 0);
-
        if (IS_ERR(tfm)) {
                pr_err("alg: aead: Failed to load transform for %s: %ld\n", algo,
                       PTR_ERR(tfm));
                goto out_notfm;
        }
 
+       ret = crypto_aead_setauthsize(tfm, authsize);
+       if (ret) {
+               pr_err("alg: aead: Failed to setauthsize for %s: %d\n", algo,
+                      ret);
+               goto out_noreq;
+       }
+
        crypto_init_wait(&wait);
        printk(KERN_INFO "\ntesting speed of %s (%s) %s\n", algo,
                        get_driver_name(crypto_aead, tfm), e);
@@ -611,8 +622,13 @@ static void test_aead_speed(const char *algo, int enc, unsigned int secs,
                                        break;
                                }
                        }
+
                        ret = crypto_aead_setkey(tfm, key, *keysize);
-                       ret = crypto_aead_setauthsize(tfm, authsize);
+                       if (ret) {
+                               pr_err("setkey() failed flags=%x: %d\n",
+                                       crypto_aead_get_flags(tfm), ret);
+                               goto out;
+                       }
 
                        iv_len = crypto_aead_ivsize(tfm);
                        if (iv_len)
@@ -622,15 +638,8 @@ static void test_aead_speed(const char *algo, int enc, unsigned int secs,
                        printk(KERN_INFO "test %u (%d bit key, %d byte blocks): ",
                                        i, *keysize * 8, bs);
 
-
                        memset(tvmem[0], 0xff, PAGE_SIZE);
 
-                       if (ret) {
-                               pr_err("setkey() failed flags=%x\n",
-                                               crypto_aead_get_flags(tfm));
-                               goto out;
-                       }
-
                        sg_init_aead(sg, xbuf, bs + (enc ? 0 : authsize),
                                     assoc, aad_size);
 
@@ -1907,6 +1916,14 @@ static int do_test(const char *alg, u32 type, u32 mask, int m, u32 num_mb)
                ret += tcrypt_test("streebog512");
                break;
 
+       case 55:
+               ret += tcrypt_test("gcm(sm4)");
+               break;
+
+       case 56:
+               ret += tcrypt_test("ccm(sm4)");
+               break;
+
        case 100:
                ret += tcrypt_test("hmac(md5)");
                break;
@@ -1998,6 +2015,15 @@ static int do_test(const char *alg, u32 type, u32 mask, int m, u32 num_mb)
        case 157:
                ret += tcrypt_test("authenc(hmac(sha1),ecb(cipher_null))");
                break;
+
+       case 158:
+               ret += tcrypt_test("cbcmac(sm4)");
+               break;
+
+       case 159:
+               ret += tcrypt_test("cmac(sm4)");
+               break;
+
        case 181:
                ret += tcrypt_test("authenc(hmac(sha1),cbc(des))");
                break;
@@ -2031,6 +2057,7 @@ static int do_test(const char *alg, u32 type, u32 mask, int m, u32 num_mb)
        case 191:
                ret += tcrypt_test("ecb(sm4)");
                ret += tcrypt_test("cbc(sm4)");
+               ret += tcrypt_test("cfb(sm4)");
                ret += tcrypt_test("ctr(sm4)");
                break;
        case 200:
@@ -2289,6 +2316,10 @@ static int do_test(const char *alg, u32 type, u32 mask, int m, u32 num_mb)
                                speed_template_16);
                test_cipher_speed("cbc(sm4)", DECRYPT, sec, NULL, 0,
                                speed_template_16);
+               test_cipher_speed("cfb(sm4)", ENCRYPT, sec, NULL, 0,
+                               speed_template_16);
+               test_cipher_speed("cfb(sm4)", DECRYPT, sec, NULL, 0,
+                               speed_template_16);
                test_cipher_speed("ctr(sm4)", ENCRYPT, sec, NULL, 0,
                                speed_template_16);
                test_cipher_speed("ctr(sm4)", DECRYPT, sec, NULL, 0,
@@ -2322,6 +2353,34 @@ static int do_test(const char *alg, u32 type, u32 mask, int m, u32 num_mb)
                                NULL, 0, 16, 8, speed_template_16);
                break;
 
+       case 222:
+               test_aead_speed("gcm(sm4)", ENCRYPT, sec,
+                               NULL, 0, 16, 8, speed_template_16);
+               test_aead_speed("gcm(sm4)", DECRYPT, sec,
+                               NULL, 0, 16, 8, speed_template_16);
+               break;
+
+       case 223:
+               test_aead_speed("rfc4309(ccm(sm4))", ENCRYPT, sec,
+                               NULL, 0, 16, 16, aead_speed_template_19);
+               test_aead_speed("rfc4309(ccm(sm4))", DECRYPT, sec,
+                               NULL, 0, 16, 16, aead_speed_template_19);
+               break;
+
+       case 224:
+               test_mb_aead_speed("gcm(sm4)", ENCRYPT, sec, NULL, 0, 16, 8,
+                                  speed_template_16, num_mb);
+               test_mb_aead_speed("gcm(sm4)", DECRYPT, sec, NULL, 0, 16, 8,
+                                  speed_template_16, num_mb);
+               break;
+
+       case 225:
+               test_mb_aead_speed("rfc4309(ccm(sm4))", ENCRYPT, sec, NULL, 0,
+                                  16, 16, aead_speed_template_19, num_mb);
+               test_mb_aead_speed("rfc4309(ccm(sm4))", DECRYPT, sec, NULL, 0,
+                                  16, 16, aead_speed_template_19, num_mb);
+               break;
+
        case 300:
                if (alg) {
                        test_hash_speed(alg, sec, generic_hash_speed_template);
@@ -2757,6 +2816,25 @@ static int do_test(const char *alg, u32 type, u32 mask, int m, u32 num_mb)
                                   speed_template_8_32);
                break;
 
+       case 518:
+               test_acipher_speed("ecb(sm4)", ENCRYPT, sec, NULL, 0,
+                               speed_template_16);
+               test_acipher_speed("ecb(sm4)", DECRYPT, sec, NULL, 0,
+                               speed_template_16);
+               test_acipher_speed("cbc(sm4)", ENCRYPT, sec, NULL, 0,
+                               speed_template_16);
+               test_acipher_speed("cbc(sm4)", DECRYPT, sec, NULL, 0,
+                               speed_template_16);
+               test_acipher_speed("cfb(sm4)", ENCRYPT, sec, NULL, 0,
+                               speed_template_16);
+               test_acipher_speed("cfb(sm4)", DECRYPT, sec, NULL, 0,
+                               speed_template_16);
+               test_acipher_speed("ctr(sm4)", ENCRYPT, sec, NULL, 0,
+                               speed_template_16);
+               test_acipher_speed("ctr(sm4)", DECRYPT, sec, NULL, 0,
+                               speed_template_16);
+               break;
+
        case 600:
                test_mb_skcipher_speed("ecb(aes)", ENCRYPT, sec, NULL, 0,
                                       speed_template_16_24_32, num_mb);
index c978e41f11a11b5d5ee1a211be55720605d27511..70f69f0910c9ea198bbe3d4bf77a3192f13ef630 100644 (file)
@@ -4450,6 +4450,12 @@ static const struct alg_test_desc alg_test_descs[] = {
                .suite = {
                        .hash = __VECS(aes_cbcmac_tv_template)
                }
+       }, {
+               .alg = "cbcmac(sm4)",
+               .test = alg_test_hash,
+               .suite = {
+                       .hash = __VECS(sm4_cbcmac_tv_template)
+               }
        }, {
                .alg = "ccm(aes)",
                .generic_driver = "ccm_base(ctr(aes-generic),cbcmac(aes-generic))",
@@ -4461,6 +4467,16 @@ static const struct alg_test_desc alg_test_descs[] = {
                                .einval_allowed = 1,
                        }
                }
+       }, {
+               .alg = "ccm(sm4)",
+               .generic_driver = "ccm_base(ctr(sm4-generic),cbcmac(sm4-generic))",
+               .test = alg_test_aead,
+               .suite = {
+                       .aead = {
+                               ____VECS(sm4_ccm_tv_template),
+                               .einval_allowed = 1,
+                       }
+               }
        }, {
                .alg = "cfb(aes)",
                .test = alg_test_skcipher,
@@ -4494,6 +4510,12 @@ static const struct alg_test_desc alg_test_descs[] = {
                .suite = {
                        .hash = __VECS(des3_ede_cmac64_tv_template)
                }
+       }, {
+               .alg = "cmac(sm4)",
+               .test = alg_test_hash,
+               .suite = {
+                       .hash = __VECS(sm4_cmac128_tv_template)
+               }
        }, {
                .alg = "compress_null",
                .test = alg_test_null,
@@ -4967,6 +4989,13 @@ static const struct alg_test_desc alg_test_descs[] = {
                .suite = {
                        .aead = __VECS(aes_gcm_tv_template)
                }
+       }, {
+               .alg = "gcm(sm4)",
+               .generic_driver = "gcm_base(ctr(sm4-generic),ghash-generic)",
+               .test = alg_test_aead,
+               .suite = {
+                       .aead = __VECS(sm4_gcm_tv_template)
+               }
        }, {
                .alg = "ghash",
                .test = alg_test_hash,
index 3ed6ab34ab512b5e7304035c67c541fdb2c14f8d..e6fca34b5b257ccdac6e9d91cf24a6e52e4a9b85 100644 (file)
@@ -13328,6 +13328,154 @@ static const struct cipher_testvec sm4_cfb_tv_template[] = {
        }
 };
 
+static const struct aead_testvec sm4_gcm_tv_template[] = {
+       { /* From https://datatracker.ietf.org/doc/html/rfc8998#appendix-A.1 */
+               .key    = "\x01\x23\x45\x67\x89\xAB\xCD\xEF"
+                         "\xFE\xDC\xBA\x98\x76\x54\x32\x10",
+               .klen   = 16,
+               .iv     = "\x00\x00\x12\x34\x56\x78\x00\x00"
+                         "\x00\x00\xAB\xCD",
+               .ptext  = "\xAA\xAA\xAA\xAA\xAA\xAA\xAA\xAA"
+                         "\xBB\xBB\xBB\xBB\xBB\xBB\xBB\xBB"
+                         "\xCC\xCC\xCC\xCC\xCC\xCC\xCC\xCC"
+                         "\xDD\xDD\xDD\xDD\xDD\xDD\xDD\xDD"
+                         "\xEE\xEE\xEE\xEE\xEE\xEE\xEE\xEE"
+                         "\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF"
+                         "\xEE\xEE\xEE\xEE\xEE\xEE\xEE\xEE"
+                         "\xAA\xAA\xAA\xAA\xAA\xAA\xAA\xAA",
+               .plen   = 64,
+               .assoc  = "\xFE\xED\xFA\xCE\xDE\xAD\xBE\xEF"
+                         "\xFE\xED\xFA\xCE\xDE\xAD\xBE\xEF"
+                         "\xAB\xAD\xDA\xD2",
+               .alen   = 20,
+               .ctext  = "\x17\xF3\x99\xF0\x8C\x67\xD5\xEE"
+                         "\x19\xD0\xDC\x99\x69\xC4\xBB\x7D"
+                         "\x5F\xD4\x6F\xD3\x75\x64\x89\x06"
+                         "\x91\x57\xB2\x82\xBB\x20\x07\x35"
+                         "\xD8\x27\x10\xCA\x5C\x22\xF0\xCC"
+                         "\xFA\x7C\xBF\x93\xD4\x96\xAC\x15"
+                         "\xA5\x68\x34\xCB\xCF\x98\xC3\x97"
+                         "\xB4\x02\x4A\x26\x91\x23\x3B\x8D"
+                         "\x83\xDE\x35\x41\xE4\xC2\xB5\x81"
+                         "\x77\xE0\x65\xA9\xBF\x7B\x62\xEC",
+               .clen   = 80,
+       }
+};
+
+static const struct aead_testvec sm4_ccm_tv_template[] = {
+       { /* From https://datatracker.ietf.org/doc/html/rfc8998#appendix-A.2 */
+               .key    = "\x01\x23\x45\x67\x89\xAB\xCD\xEF"
+                         "\xFE\xDC\xBA\x98\x76\x54\x32\x10",
+               .klen   = 16,
+               .iv     = "\x02\x00\x00\x12\x34\x56\x78\x00"
+                         "\x00\x00\x00\xAB\xCD\x00\x00\x00",
+               .ptext  = "\xAA\xAA\xAA\xAA\xAA\xAA\xAA\xAA"
+                         "\xBB\xBB\xBB\xBB\xBB\xBB\xBB\xBB"
+                         "\xCC\xCC\xCC\xCC\xCC\xCC\xCC\xCC"
+                         "\xDD\xDD\xDD\xDD\xDD\xDD\xDD\xDD"
+                         "\xEE\xEE\xEE\xEE\xEE\xEE\xEE\xEE"
+                         "\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF"
+                         "\xEE\xEE\xEE\xEE\xEE\xEE\xEE\xEE"
+                         "\xAA\xAA\xAA\xAA\xAA\xAA\xAA\xAA",
+               .plen   = 64,
+               .assoc  = "\xFE\xED\xFA\xCE\xDE\xAD\xBE\xEF"
+                         "\xFE\xED\xFA\xCE\xDE\xAD\xBE\xEF"
+                         "\xAB\xAD\xDA\xD2",
+               .alen   = 20,
+               .ctext  = "\x48\xAF\x93\x50\x1F\xA6\x2A\xDB"
+                         "\xCD\x41\x4C\xCE\x60\x34\xD8\x95"
+                         "\xDD\xA1\xBF\x8F\x13\x2F\x04\x20"
+                         "\x98\x66\x15\x72\xE7\x48\x30\x94"
+                         "\xFD\x12\xE5\x18\xCE\x06\x2C\x98"
+                         "\xAC\xEE\x28\xD9\x5D\xF4\x41\x6B"
+                         "\xED\x31\xA2\xF0\x44\x76\xC1\x8B"
+                         "\xB4\x0C\x84\xA7\x4B\x97\xDC\x5B"
+                         "\x16\x84\x2D\x4F\xA1\x86\xF5\x6A"
+                         "\xB3\x32\x56\x97\x1F\xA1\x10\xF4",
+               .clen   = 80,
+       }
+};
+
+static const struct hash_testvec sm4_cbcmac_tv_template[] = {
+       {
+               .key            = "\xff\xee\xdd\xcc\xbb\xaa\x99\x88"
+                                 "\x77\x66\x55\x44\x33\x22\x11\x00",
+               .plaintext      = "\x01\x23\x45\x67\x89\xab\xcd\xef"
+                                 "\xfe\xdc\xba\x98\x76\x54\x32\x10",
+               .digest         = "\x97\xb4\x75\x8f\x84\x92\x3d\x3f"
+                                 "\x86\x81\x0e\x0e\xea\x14\x6d\x73",
+               .psize          = 16,
+               .ksize          = 16,
+       }, {
+               .key            = "\x01\x23\x45\x67\x89\xab\xcd\xef"
+                                 "\xfe\xdc\xBA\x98\x76\x54\x32\x10",
+               .plaintext      = "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
+                                 "\xbb\xbb\xbb\xbb\xbb\xbb\xbb\xbb"
+                                 "\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc"
+                                 "\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd"
+                                 "\xee",
+               .digest         = "\xc7\xdb\x17\x71\xa1\x5c\x0d\x22"
+                                 "\xa3\x39\x3a\x31\x88\x91\x49\xa1",
+               .psize          = 33,
+               .ksize          = 16,
+       }, {
+               .key            = "\x01\x23\x45\x67\x89\xab\xcd\xef"
+                                 "\xfe\xdc\xBA\x98\x76\x54\x32\x10",
+               .plaintext      = "\xfb\xd1\xbe\x92\x7e\x50\x3f\x16"
+                                 "\xf9\xdd\xbe\x91\x73\x53\x37\x1a"
+                                 "\xfe\xdd\xba\x97\x7e\x53\x3c\x1c"
+                                 "\xfe\xd7\xbf\x9c\x75\x5f\x3e\x11"
+                                 "\xf0\xd8\xbc\x96\x73\x5c\x34\x11"
+                                 "\xf5\xdb\xb1\x99\x7a\x5a\x32\x1f"
+                                 "\xf6\xdf\xb4\x95\x7f\x5f\x3b\x17"
+                                 "\xfd\xdb\xb1\x9b\x76\x5c\x37",
+               .digest         = "\x9b\x07\x88\x7f\xd5\x95\x23\x12"
+                                 "\x64\x0a\x66\x7f\x4e\x25\xca\xd0",
+               .psize          = 63,
+               .ksize          = 16,
+       }
+};
+
+static const struct hash_testvec sm4_cmac128_tv_template[] = {
+       {
+               .key            = "\xff\xee\xdd\xcc\xbb\xaa\x99\x88"
+                                 "\x77\x66\x55\x44\x33\x22\x11\x00",
+               .plaintext      = "\x01\x23\x45\x67\x89\xab\xcd\xef"
+                                 "\xfe\xdc\xba\x98\x76\x54\x32\x10",
+               .digest         = "\x00\xd4\x63\xb4\x9a\xf3\x52\xe2"
+                                 "\x74\xa9\x00\x55\x13\x54\x2a\xd1",
+               .psize          = 16,
+               .ksize          = 16,
+       }, {
+               .key            = "\x01\x23\x45\x67\x89\xab\xcd\xef"
+                                 "\xfe\xdc\xBA\x98\x76\x54\x32\x10",
+               .plaintext      = "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
+                                 "\xbb\xbb\xbb\xbb\xbb\xbb\xbb\xbb"
+                                 "\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc"
+                                 "\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd"
+                                 "\xee",
+               .digest         = "\x8a\x8a\xe9\xc0\xc8\x97\x0e\x85"
+                                 "\x21\x57\x02\x10\x1a\xbf\x9c\xc6",
+               .psize          = 33,
+               .ksize          = 16,
+       }, {
+               .key            = "\x01\x23\x45\x67\x89\xab\xcd\xef"
+                                 "\xfe\xdc\xBA\x98\x76\x54\x32\x10",
+               .plaintext      = "\xfb\xd1\xbe\x92\x7e\x50\x3f\x16"
+                                 "\xf9\xdd\xbe\x91\x73\x53\x37\x1a"
+                                 "\xfe\xdd\xba\x97\x7e\x53\x3c\x1c"
+                                 "\xfe\xd7\xbf\x9c\x75\x5f\x3e\x11"
+                                 "\xf0\xd8\xbc\x96\x73\x5c\x34\x11"
+                                 "\xf5\xdb\xb1\x99\x7a\x5a\x32\x1f"
+                                 "\xf6\xdf\xb4\x95\x7f\x5f\x3b\x17"
+                                 "\xfd\xdb\xb1\x9b\x76\x5c\x37",
+               .digest         = "\x5f\x14\xc9\xa9\x20\xb2\xb4\xf0"
+                                 "\x76\xe0\xd8\xd6\xdc\x4f\xe1\xbc",
+               .psize          = 63,
+               .ksize          = 16,
+       }
+};
+
 /* Cast6 test vectors from RFC 2612 */
 static const struct cipher_testvec cast6_tv_template[] = {
        {
index bf79fbb2340fa5632b6a96979769a0718c2d3ac7..5e820afa3c7858e19958dc8cb6216539314111b9 100644 (file)
@@ -775,7 +775,7 @@ static const u64 rc[WHIRLPOOL_ROUNDS] = {
        0xca2dbf07ad5a8333ULL,
 };
 
-/**
+/*
  * The core Whirlpool transform.
  */
 
index 23d9a09d7060445be83e153ba483a16fe1fb3a51..a3ef6cce644cc071e5d58e5698cc1bf52947b6df 100644 (file)
@@ -3021,6 +3021,9 @@ static int acpi_nfit_register_region(struct acpi_nfit_desc *acpi_desc,
                struct acpi_nfit_memory_map *memdev = nfit_memdev->memdev;
                struct nd_mapping_desc *mapping;
 
+               /* range index 0 == unmapped in SPA or invalid-SPA */
+               if (memdev->range_index == 0 || spa->range_index == 0)
+                       continue;
                if (memdev->range_index != spa->range_index)
                        continue;
                if (count >= ND_MAX_MAPPINGS) {
index 31cf9aee5edd4ccb2973d6b5bdcdd62af1048747..1f6007abcf18eda3259ae274d08a6b1f48cc5dde 100644 (file)
@@ -292,6 +292,12 @@ void __init init_prmt(void)
        int mc = acpi_table_parse_entries(ACPI_SIG_PRMT, sizeof(struct acpi_table_prmt) +
                                          sizeof (struct acpi_table_prmt_header),
                                          0, acpi_parse_prmt, 0);
+       /*
+        * Return immediately if PRMT table is not present or no PRM module found.
+        */
+       if (mc <= 0)
+               return;
+
        pr_info("PRM: found %u modules\n", mc);
 
        status = acpi_install_address_space_handler(ACPI_ROOT_OBJECT,
index fbdbef0ab5529e4f37e4fe8dd7af470dc5a808a1..3a308461246a8dca19be628490718c40e6286e88 100644 (file)
@@ -452,7 +452,7 @@ int acpi_s2idle_prepare_late(void)
        if (lps0_dsm_func_mask_microsoft > 0) {
                acpi_sleep_run_lps0_dsm(ACPI_LPS0_SCREEN_OFF,
                                lps0_dsm_func_mask_microsoft, lps0_dsm_guid_microsoft);
-               acpi_sleep_run_lps0_dsm(ACPI_LPS0_MS_EXIT,
+               acpi_sleep_run_lps0_dsm(ACPI_LPS0_MS_ENTRY,
                                lps0_dsm_func_mask_microsoft, lps0_dsm_guid_microsoft);
                acpi_sleep_run_lps0_dsm(ACPI_LPS0_ENTRY,
                                lps0_dsm_func_mask_microsoft, lps0_dsm_guid_microsoft);
@@ -479,7 +479,7 @@ void acpi_s2idle_restore_early(void)
        if (lps0_dsm_func_mask_microsoft > 0) {
                acpi_sleep_run_lps0_dsm(ACPI_LPS0_EXIT,
                                lps0_dsm_func_mask_microsoft, lps0_dsm_guid_microsoft);
-               acpi_sleep_run_lps0_dsm(ACPI_LPS0_MS_ENTRY,
+               acpi_sleep_run_lps0_dsm(ACPI_LPS0_MS_EXIT,
                                lps0_dsm_func_mask_microsoft, lps0_dsm_guid_microsoft);
                acpi_sleep_run_lps0_dsm(ACPI_LPS0_SCREEN_ON,
                                lps0_dsm_func_mask_microsoft, lps0_dsm_guid_microsoft);
index f6360490a4a30de4c6454ac7cd1253ee551f9e58..6c0ef9d55a343a463b4d8596e550c036db56593e 100644 (file)
@@ -2837,6 +2837,7 @@ void device_initialize(struct device *dev)
        device_pm_init(dev);
        set_dev_node(dev, -1);
 #ifdef CONFIG_GENERIC_MSI_IRQ
+       raw_spin_lock_init(&dev->msi_lock);
        INIT_LIST_HEAD(&dev->msi_list);
 #endif
        INIT_LIST_HEAD(&dev->links.consumers);
index a934c679e6ce6a01f68c46ee47eb65d95617f23d..f10688e83226c04166db7164385406cc2f3abdc6 100644 (file)
@@ -435,7 +435,7 @@ static void genpd_restore_performance_state(struct device *dev,
 int dev_pm_genpd_set_performance_state(struct device *dev, unsigned int state)
 {
        struct generic_pm_domain *genpd;
-       int ret;
+       int ret = 0;
 
        genpd = dev_to_genpd_safe(dev);
        if (!genpd)
@@ -446,7 +446,13 @@ int dev_pm_genpd_set_performance_state(struct device *dev, unsigned int state)
                return -EINVAL;
 
        genpd_lock(genpd);
-       ret = genpd_set_performance_state(dev, state);
+       if (pm_runtime_suspended(dev)) {
+               dev_gpd_data(dev)->rpm_pstate = state;
+       } else {
+               ret = genpd_set_performance_state(dev, state);
+               if (!ret)
+                       dev_gpd_data(dev)->rpm_pstate = 0;
+       }
        genpd_unlock(genpd);
 
        return ret;
index 0097696c31de2d81cb2c578c0a5aca3a035d3729..b1905916f7af80f19e091a9b6b50fe08bb5b0985 100644 (file)
@@ -53,6 +53,10 @@ struct regmap {
                        spinlock_t spinlock;
                        unsigned long spinlock_flags;
                };
+               struct {
+                       raw_spinlock_t raw_spinlock;
+                       unsigned long raw_spinlock_flags;
+               };
        };
        regmap_lock lock;
        regmap_unlock unlock;
index 211a335a608d7acc7cda755a45302b5544c25464..ad684d37c2dae37ad467cccf4893d0efc9359910 100644 (file)
@@ -368,7 +368,7 @@ static ssize_t regmap_reg_ranges_read_file(struct file *file,
        char *buf;
        char *entry;
        int ret;
-       unsigned entry_len;
+       unsigned int entry_len;
 
        if (*ppos < 0 || !count)
                return -EINVAL;
index f9cd51afb9d2c58aa95c30ac5cb9613ae36d3f1f..71f16be7e7177a1d716613ab9fec6c9f2ceb8286 100644 (file)
@@ -15,7 +15,7 @@
 
 struct regmap_mmio_context {
        void __iomem *regs;
-       unsigned val_bytes;
+       unsigned int val_bytes;
        bool relaxed_mmio;
 
        bool attached_clk;
index fe3e38dd5324f7735bc24547b30a683be8423d5e..21a0c2562ec0685a712b19c7e0969fabf878268b 100644 (file)
@@ -533,6 +533,23 @@ __releases(&map->spinlock)
        spin_unlock_irqrestore(&map->spinlock, map->spinlock_flags);
 }
 
+static void regmap_lock_raw_spinlock(void *__map)
+__acquires(&map->raw_spinlock)
+{
+       struct regmap *map = __map;
+       unsigned long flags;
+
+       raw_spin_lock_irqsave(&map->raw_spinlock, flags);
+       map->raw_spinlock_flags = flags;
+}
+
+static void regmap_unlock_raw_spinlock(void *__map)
+__releases(&map->raw_spinlock)
+{
+       struct regmap *map = __map;
+       raw_spin_unlock_irqrestore(&map->raw_spinlock, map->raw_spinlock_flags);
+}
+
 static void dev_get_regmap_release(struct device *dev, void *res)
 {
        /*
@@ -770,11 +787,19 @@ struct regmap *__regmap_init(struct device *dev,
        } else {
                if ((bus && bus->fast_io) ||
                    config->fast_io) {
-                       spin_lock_init(&map->spinlock);
-                       map->lock = regmap_lock_spinlock;
-                       map->unlock = regmap_unlock_spinlock;
-                       lockdep_set_class_and_name(&map->spinlock,
-                                                  lock_key, lock_name);
+                       if (config->use_raw_spinlock) {
+                               raw_spin_lock_init(&map->raw_spinlock);
+                               map->lock = regmap_lock_raw_spinlock;
+                               map->unlock = regmap_unlock_raw_spinlock;
+                               lockdep_set_class_and_name(&map->raw_spinlock,
+                                                          lock_key, lock_name);
+                       } else {
+                               spin_lock_init(&map->spinlock);
+                               map->lock = regmap_lock_spinlock;
+                               map->unlock = regmap_unlock_spinlock;
+                               lockdep_set_class_and_name(&map->spinlock,
+                                                          lock_key, lock_name);
+                       }
                } else {
                        mutex_init(&map->mutex);
                        map->lock = regmap_lock_mutex;
@@ -1126,10 +1151,10 @@ skip_format_initialization:
                /* Make sure, that this register range has no selector
                   or data window within its boundary */
                for (j = 0; j < config->num_ranges; j++) {
-                       unsigned sel_reg = config->ranges[j].selector_reg;
-                       unsigned win_min = config->ranges[j].window_start;
-                       unsigned win_max = win_min +
-                                          config->ranges[j].window_len - 1;
+                       unsigned int sel_reg = config->ranges[j].selector_reg;
+                       unsigned int win_min = config->ranges[j].window_start;
+                       unsigned int win_max = win_min +
+                                              config->ranges[j].window_len - 1;
 
                        /* Allow data window inside its own virtual range */
                        if (j == i)
@@ -1298,7 +1323,7 @@ EXPORT_SYMBOL_GPL(devm_regmap_field_alloc);
  */
 int regmap_field_bulk_alloc(struct regmap *regmap,
                            struct regmap_field **rm_field,
-                           struct reg_field *reg_field,
+                           const struct reg_field *reg_field,
                            int num_fields)
 {
        struct regmap_field *rf;
@@ -1334,7 +1359,7 @@ EXPORT_SYMBOL_GPL(regmap_field_bulk_alloc);
 int devm_regmap_field_bulk_alloc(struct device *dev,
                                 struct regmap *regmap,
                                 struct regmap_field **rm_field,
-                                struct reg_field *reg_field,
+                                const struct reg_field *reg_field,
                                 int num_fields)
 {
        struct regmap_field *rf;
@@ -1667,7 +1692,7 @@ static int _regmap_raw_write_impl(struct regmap *map, unsigned int reg,
                        if (ret) {
                                dev_err(map->dev,
                                        "Error in caching of register: %x ret: %d\n",
-                                       reg + i, ret);
+                                       reg + regmap_get_offset(map, i), ret);
                                return ret;
                        }
                }
index 63056cfd4b62c7995daafdb827b1661875321102..fbb3a558139fca22b174e90bd665fe6113b3ad51 100644 (file)
@@ -213,7 +213,7 @@ config BLK_DEV_LOOP_MIN_COUNT
          dynamically allocated with the /dev/loop-control interface.
 
 config BLK_DEV_CRYPTOLOOP
-       tristate "Cryptoloop Support"
+       tristate "Cryptoloop Support (DEPRECATED)"
        select CRYPTO
        select CRYPTO_CBC
        depends on BLK_DEV_LOOP
@@ -225,7 +225,7 @@ config BLK_DEV_CRYPTOLOOP
          WARNING: This device is not safe for journaled file systems like
          ext3 or Reiserfs. Please use the Device Mapper crypto module
          instead, which can be configured to be on-disk compatible with the
-         cryptoloop device.
+         cryptoloop device.  cryptoloop support will be removed in Linux 5.16.
 
 source "drivers/block/drbd/Kconfig"
 
index 3cabc335ae7448fc671642df86caa4fe2c3d3bf9..f0a91faa43a89ff71cddf8c26c9aa2c72f3d3654 100644 (file)
@@ -189,6 +189,8 @@ init_cryptoloop(void)
 
        if (rc)
                printk(KERN_ERR "cryptoloop: loop_register_transfer failed\n");
+       else
+               pr_warn("the cryptoloop driver has been deprecated and will be removed in in Linux 5.16\n");
        return rc;
 }
 
index c38317979f74ec42a92885d7f963a49526bd8637..19f5d5a8b16a3cf7e57df67c50cf729d8b349b64 100644 (file)
@@ -818,6 +818,10 @@ static bool nbd_clear_req(struct request *req, void *data, bool reserved)
 {
        struct nbd_cmd *cmd = blk_mq_rq_to_pdu(req);
 
+       /* don't abort one completed request */
+       if (blk_mq_request_completed(req))
+               return true;
+
        mutex_lock(&cmd->lock);
        cmd->status = BLK_STS_IOERR;
        mutex_unlock(&cmd->lock);
@@ -2004,15 +2008,19 @@ static void nbd_disconnect_and_put(struct nbd_device *nbd)
 {
        mutex_lock(&nbd->config_lock);
        nbd_disconnect(nbd);
-       nbd_clear_sock(nbd);
-       mutex_unlock(&nbd->config_lock);
+       sock_shutdown(nbd);
        /*
         * Make sure recv thread has finished, so it does not drop the last
         * config ref and try to destroy the workqueue from inside the work
-        * queue.
+        * queue. And this also ensure that we can safely call nbd_clear_que()
+        * to cancel the inflight I/Os.
         */
        if (nbd->recv_workq)
                flush_workqueue(nbd->recv_workq);
+       nbd_clear_que(nbd);
+       nbd->task_setup = NULL;
+       mutex_unlock(&nbd->config_lock);
+
        if (test_and_clear_bit(NBD_RT_HAS_CONFIG_REF,
                               &nbd->config->runtime_flags))
                nbd_config_put(nbd);
index 9b3298926356d81617e61c815c7c279759db3464..675327df6aff98cf63b9c2f824d4bc11023135cb 100644 (file)
@@ -892,7 +892,7 @@ static void pd_probe_drive(struct pd_unit *disk)
                return;
 
        p = blk_mq_alloc_disk(&disk->tag_set, disk);
-       if (!p) {
+       if (IS_ERR(p)) {
                blk_mq_free_tag_set(&disk->tag_set);
                return;
        }
index 4b49df2dfd23f5af748286248daeb121722230f5..afb37aac09e88896050061c379b5a5a8c31f1149 100644 (file)
@@ -692,6 +692,28 @@ static const struct blk_mq_ops virtio_mq_ops = {
 static unsigned int virtblk_queue_depth;
 module_param_named(queue_depth, virtblk_queue_depth, uint, 0444);
 
+static int virtblk_validate(struct virtio_device *vdev)
+{
+       u32 blk_size;
+
+       if (!vdev->config->get) {
+               dev_err(&vdev->dev, "%s failure: config access disabled\n",
+                       __func__);
+               return -EINVAL;
+       }
+
+       if (!virtio_has_feature(vdev, VIRTIO_BLK_F_BLK_SIZE))
+               return 0;
+
+       blk_size = virtio_cread32(vdev,
+                       offsetof(struct virtio_blk_config, blk_size));
+
+       if (blk_size < SECTOR_SIZE || blk_size > PAGE_SIZE)
+               __virtio_clear_bit(vdev, VIRTIO_BLK_F_BLK_SIZE);
+
+       return 0;
+}
+
 static int virtblk_probe(struct virtio_device *vdev)
 {
        struct virtio_blk *vblk;
@@ -703,12 +725,6 @@ static int virtblk_probe(struct virtio_device *vdev)
        u8 physical_block_exp, alignment_offset;
        unsigned int queue_depth;
 
-       if (!vdev->config->get) {
-               dev_err(&vdev->dev, "%s failure: config access disabled\n",
-                       __func__);
-               return -EINVAL;
-       }
-
        err = ida_simple_get(&vd_index_ida, 0, minor_to_index(1 << MINORBITS),
                             GFP_KERNEL);
        if (err < 0)
@@ -823,6 +839,14 @@ static int virtblk_probe(struct virtio_device *vdev)
        else
                blk_size = queue_logical_block_size(q);
 
+       if (unlikely(blk_size < SECTOR_SIZE || blk_size > PAGE_SIZE)) {
+               dev_err(&vdev->dev,
+                       "block size is changed unexpectedly, now is %u\n",
+                       blk_size);
+               err = -EINVAL;
+               goto err_cleanup_disk;
+       }
+
        /* Use topology information if available */
        err = virtio_cread_feature(vdev, VIRTIO_BLK_F_TOPOLOGY,
                                   struct virtio_blk_config, physical_block_exp,
@@ -881,6 +905,8 @@ static int virtblk_probe(struct virtio_device *vdev)
        device_add_disk(&vdev->dev, vblk->disk, virtblk_attr_groups);
        return 0;
 
+err_cleanup_disk:
+       blk_cleanup_disk(vblk->disk);
 out_free_tags:
        blk_mq_free_tag_set(&vblk->tag_set);
 out_free_vq:
@@ -983,6 +1009,7 @@ static struct virtio_driver virtio_blk = {
        .driver.name                    = KBUILD_MODNAME,
        .driver.owner                   = THIS_MODULE,
        .id_table                       = id_table,
+       .validate                       = virtblk_validate,
        .probe                          = virtblk_probe,
        .remove                         = virtblk_remove,
        .config_changed                 = virtblk_config_changed,
index bc239a11aa69873420d93b4cf5f175591e2a20b3..5b9ea66b92dc32fb7dce14f4a4db5ea7419839af 100644 (file)
@@ -682,7 +682,7 @@ void mhi_rddm_prepare(struct mhi_controller *mhi_cntrl,
                      struct image_info *img_info);
 void mhi_fw_load_handler(struct mhi_controller *mhi_cntrl);
 int mhi_prepare_channel(struct mhi_controller *mhi_cntrl,
-                       struct mhi_chan *mhi_chan, unsigned int flags);
+                       struct mhi_chan *mhi_chan);
 int mhi_init_chan_ctxt(struct mhi_controller *mhi_cntrl,
                       struct mhi_chan *mhi_chan);
 void mhi_deinit_chan_ctxt(struct mhi_controller *mhi_cntrl,
index 84448233f64c5faa57626d17380fd8f737283bd4..fc9196f11cb7dbdc68756651944c42a8eded8764 100644 (file)
@@ -1430,7 +1430,7 @@ exit_unprepare_channel:
 }
 
 int mhi_prepare_channel(struct mhi_controller *mhi_cntrl,
-                       struct mhi_chan *mhi_chan, unsigned int flags)
+                       struct mhi_chan *mhi_chan)
 {
        int ret = 0;
        struct device *dev = &mhi_chan->mhi_dev->dev;
@@ -1455,9 +1455,6 @@ int mhi_prepare_channel(struct mhi_controller *mhi_cntrl,
        if (ret)
                goto error_pm_state;
 
-       if (mhi_chan->dir == DMA_FROM_DEVICE)
-               mhi_chan->pre_alloc = !!(flags & MHI_CH_INBOUND_ALLOC_BUFS);
-       
        /* Pre-allocate buffer for xfer ring */
        if (mhi_chan->pre_alloc) {
                int nr_el = get_nr_avail_ring_elements(mhi_cntrl,
@@ -1613,7 +1610,7 @@ void mhi_reset_chan(struct mhi_controller *mhi_cntrl, struct mhi_chan *mhi_chan)
 }
 
 /* Move channel to start state */
-int mhi_prepare_for_transfer(struct mhi_device *mhi_dev, unsigned int flags)
+int mhi_prepare_for_transfer(struct mhi_device *mhi_dev)
 {
        int ret, dir;
        struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
@@ -1624,7 +1621,7 @@ int mhi_prepare_for_transfer(struct mhi_device *mhi_dev, unsigned int flags)
                if (!mhi_chan)
                        continue;
 
-               ret = mhi_prepare_channel(mhi_cntrl, mhi_chan, flags);
+               ret = mhi_prepare_channel(mhi_cntrl, mhi_chan);
                if (ret)
                        goto error_open_chan;
        }
index 0ef98e3ba3410bc595dae526afd835d088e33d1d..148a4dd8cb9acc76e31537a488bc5e140509e9c1 100644 (file)
@@ -3097,8 +3097,10 @@ static int sysc_probe(struct platform_device *pdev)
                return error;
 
        error = sysc_check_active_timer(ddata);
-       if (error == -EBUSY)
+       if (error == -ENXIO)
                ddata->reserved = true;
+       else if (error)
+               return error;
 
        error = sysc_get_clocks(ddata);
        if (error)
index 3f166c8a4099132bdcfb06281c82578f85c81781..239eca4d68055a3195253b2daf8de207f6d06f09 100644 (file)
@@ -524,6 +524,20 @@ config HW_RANDOM_XIPHERA
          To compile this driver as a module, choose M here: the
          module will be called xiphera-trng.
 
+config HW_RANDOM_ARM_SMCCC_TRNG
+       tristate "Arm SMCCC TRNG firmware interface support"
+       depends on HAVE_ARM_SMCCC_DISCOVERY
+       default HW_RANDOM
+       help
+         Say 'Y' to enable the True Random Number Generator driver using
+         the Arm SMCCC TRNG firmware interface. This reads entropy from
+         higher exception levels (firmware, hypervisor). Uses SMCCC for
+         communicating with the firmware:
+         https://developer.arm.com/documentation/den0098/latest/
+
+         To compile this driver as a module, choose M here: the
+         module will be called arm_smccc_trng.
+
 endif # HW_RANDOM
 
 config UML_RANDOM
index 8933fada74f2fb9488b6b6531c9e2197dc3d15b0..a5a1c765a3946e0d80ba73d145718c6ceffaf842 100644 (file)
@@ -45,3 +45,4 @@ obj-$(CONFIG_HW_RANDOM_OPTEE) += optee-rng.o
 obj-$(CONFIG_HW_RANDOM_NPCM) += npcm-rng.o
 obj-$(CONFIG_HW_RANDOM_CCTRNG) += cctrng.o
 obj-$(CONFIG_HW_RANDOM_XIPHERA) += xiphera-trng.o
+obj-$(CONFIG_HW_RANDOM_ARM_SMCCC_TRNG) += arm_smccc_trng.o
index d8d4ef5214a19ea4068a4bdb2bc1ef61dca6e733..c22d4184bb612a61c83762e0b0feaf21f118fe15 100644 (file)
@@ -124,7 +124,7 @@ static struct hwrng amd_rng = {
        .read           = amd_rng_read,
 };
 
-static int __init mod_init(void)
+static int __init amd_rng_mod_init(void)
 {
        int err;
        struct pci_dev *pdev = NULL;
@@ -188,7 +188,7 @@ out:
        return err;
 }
 
-static void __exit mod_exit(void)
+static void __exit amd_rng_mod_exit(void)
 {
        struct amd768_priv *priv;
 
@@ -203,8 +203,8 @@ static void __exit mod_exit(void)
        kfree(priv);
 }
 
-module_init(mod_init);
-module_exit(mod_exit);
+module_init(amd_rng_mod_init);
+module_exit(amd_rng_mod_exit);
 
 MODULE_AUTHOR("The Linux Kernel team");
 MODULE_DESCRIPTION("H/W RNG driver for AMD chipsets");
diff --git a/drivers/char/hw_random/arm_smccc_trng.c b/drivers/char/hw_random/arm_smccc_trng.c
new file mode 100644 (file)
index 0000000..b24ac39
--- /dev/null
@@ -0,0 +1,123 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Randomness driver for the ARM SMCCC TRNG Firmware Interface
+ * https://developer.arm.com/documentation/den0098/latest/
+ *
+ *  Copyright (C) 2020 Arm Ltd.
+ *
+ * The ARM TRNG firmware interface specifies a protocol to read entropy
+ * from a higher exception level, to abstract from any machine specific
+ * implemenations and allow easier use in hypervisors.
+ *
+ * The firmware interface is realised using the SMCCC specification.
+ */
+
+#include <linux/bits.h>
+#include <linux/device.h>
+#include <linux/hw_random.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/arm-smccc.h>
+
+#ifdef CONFIG_ARM64
+#define ARM_SMCCC_TRNG_RND     ARM_SMCCC_TRNG_RND64
+#define MAX_BITS_PER_CALL      (3 * 64UL)
+#else
+#define ARM_SMCCC_TRNG_RND     ARM_SMCCC_TRNG_RND32
+#define MAX_BITS_PER_CALL      (3 * 32UL)
+#endif
+
+/* We don't want to allow the firmware to stall us forever. */
+#define SMCCC_TRNG_MAX_TRIES   20
+
+#define SMCCC_RET_TRNG_INVALID_PARAMETER       -2
+#define SMCCC_RET_TRNG_NO_ENTROPY              -3
+
+static int copy_from_registers(char *buf, struct arm_smccc_res *res,
+                              size_t bytes)
+{
+       unsigned int chunk, copied;
+
+       if (bytes == 0)
+               return 0;
+
+       chunk = min(bytes, sizeof(long));
+       memcpy(buf, &res->a3, chunk);
+       copied = chunk;
+       if (copied >= bytes)
+               return copied;
+
+       chunk = min((bytes - copied), sizeof(long));
+       memcpy(&buf[copied], &res->a2, chunk);
+       copied += chunk;
+       if (copied >= bytes)
+               return copied;
+
+       chunk = min((bytes - copied), sizeof(long));
+       memcpy(&buf[copied], &res->a1, chunk);
+
+       return copied + chunk;
+}
+
+static int smccc_trng_read(struct hwrng *rng, void *data, size_t max, bool wait)
+{
+       struct arm_smccc_res res;
+       u8 *buf = data;
+       unsigned int copied = 0;
+       int tries = 0;
+
+       while (copied < max) {
+               size_t bits = min_t(size_t, (max - copied) * BITS_PER_BYTE,
+                                 MAX_BITS_PER_CALL);
+
+               arm_smccc_1_1_invoke(ARM_SMCCC_TRNG_RND, bits, &res);
+               if ((int)res.a0 < 0)
+                       return (int)res.a0;
+
+               switch ((int)res.a0) {
+               case SMCCC_RET_SUCCESS:
+                       copied += copy_from_registers(buf + copied, &res,
+                                                     bits / BITS_PER_BYTE);
+                       tries = 0;
+                       break;
+               case SMCCC_RET_TRNG_NO_ENTROPY:
+                       if (!wait)
+                               return copied;
+                       tries++;
+                       if (tries >= SMCCC_TRNG_MAX_TRIES)
+                               return copied;
+                       cond_resched();
+                       break;
+               }
+       }
+
+       return copied;
+}
+
+static int smccc_trng_probe(struct platform_device *pdev)
+{
+       struct hwrng *trng;
+
+       trng = devm_kzalloc(&pdev->dev, sizeof(*trng), GFP_KERNEL);
+       if (!trng)
+               return -ENOMEM;
+
+       trng->name = "smccc_trng";
+       trng->read = smccc_trng_read;
+
+       platform_set_drvdata(pdev, trng);
+
+       return devm_hwrng_register(&pdev->dev, trng);
+}
+
+static struct platform_driver smccc_trng_driver = {
+       .driver = {
+               .name           = "smccc_trng",
+       },
+       .probe          = smccc_trng_probe,
+};
+module_platform_driver(smccc_trng_driver);
+
+MODULE_ALIAS("platform:smccc_trng");
+MODULE_AUTHOR("Andre Przywara");
+MODULE_LICENSE("GPL");
index e1d421a36a138d6a5e30b16bf62208846b5372ab..138ce434f86b20b7552ec962cb8267115b97c059 100644 (file)
@@ -83,7 +83,7 @@ static struct hwrng geode_rng = {
 };
 
 
-static int __init mod_init(void)
+static int __init geode_rng_init(void)
 {
        int err = -ENODEV;
        struct pci_dev *pdev = NULL;
@@ -124,7 +124,7 @@ err_unmap:
        goto out;
 }
 
-static void __exit mod_exit(void)
+static void __exit geode_rng_exit(void)
 {
        void __iomem *mem = (void __iomem *)geode_rng.priv;
 
@@ -132,8 +132,8 @@ static void __exit mod_exit(void)
        iounmap(mem);
 }
 
-module_init(mod_init);
-module_exit(mod_exit);
+module_init(geode_rng_init);
+module_exit(geode_rng_exit);
 
 MODULE_DESCRIPTION("H/W RNG driver for AMD Geode LX CPUs");
 MODULE_LICENSE("GPL");
index d740b8814bf3fe1ea02c8bc4747920e51ec60a22..7b171cb3b825f3eeb97369644bbf544084ea01f4 100644 (file)
@@ -325,7 +325,7 @@ PFX "RNG, try using the 'no_fwh_detect' option.\n";
 }
 
 
-static int __init mod_init(void)
+static int __init intel_rng_mod_init(void)
 {
        int err = -ENODEV;
        int i;
@@ -403,7 +403,7 @@ out:
 
 }
 
-static void __exit mod_exit(void)
+static void __exit intel_rng_mod_exit(void)
 {
        void __iomem *mem = (void __iomem *)intel_rng.priv;
 
@@ -411,8 +411,8 @@ static void __exit mod_exit(void)
        iounmap(mem);
 }
 
-module_init(mod_init);
-module_exit(mod_exit);
+module_init(intel_rng_mod_init);
+module_exit(intel_rng_mod_exit);
 
 MODULE_DESCRIPTION("H/W RNG driver for Intel chipsets");
 MODULE_LICENSE("GPL");
index 39943bc3651a61e10a634dbbbc3e5ee980e2506a..7444cc146e86a8d7b2341fe815a65e1338afa0dc 100644 (file)
@@ -192,7 +192,7 @@ static struct hwrng via_rng = {
 };
 
 
-static int __init mod_init(void)
+static int __init via_rng_mod_init(void)
 {
        int err;
 
@@ -209,13 +209,13 @@ static int __init mod_init(void)
 out:
        return err;
 }
-module_init(mod_init);
+module_init(via_rng_mod_init);
 
-static void __exit mod_exit(void)
+static void __exit via_rng_mod_exit(void)
 {
        hwrng_unregister(&via_rng);
 }
-module_exit(mod_exit);
+module_exit(via_rng_mod_exit);
 
 static struct x86_cpu_id __maybe_unused via_rng_cpu_id[] = {
        X86_MATCH_FEATURE(X86_FEATURE_XSTORE, NULL),
index 4308f9ca7a43d1bd53dfc4cb9b1129f221216cb0..d6ba644f6b00a58a8dc67c545f968f3c4b0a692e 100644 (file)
@@ -89,7 +89,6 @@ config TCG_TIS_SYNQUACER
 config TCG_TIS_I2C_CR50
        tristate "TPM Interface Specification 2.0 Interface (I2C - CR50)"
        depends on I2C
-       select TCG_CR50
        help
          This is a driver for the Google cr50 I2C TPM interface which is a
          custom microcontroller and requires a custom i2c protocol interface
index 903604769de99cf2aa98e68145a3caa9f8a23e30..3af4c07a9342ff115a2e3de3561c2b09793a6a6f 100644 (file)
@@ -106,17 +106,12 @@ static int tpm_ibmvtpm_recv(struct tpm_chip *chip, u8 *buf, size_t count)
 {
        struct ibmvtpm_dev *ibmvtpm = dev_get_drvdata(&chip->dev);
        u16 len;
-       int sig;
 
        if (!ibmvtpm->rtce_buf) {
                dev_err(ibmvtpm->dev, "ibmvtpm device is not ready\n");
                return 0;
        }
 
-       sig = wait_event_interruptible(ibmvtpm->wq, !ibmvtpm->tpm_processing_cmd);
-       if (sig)
-               return -EINTR;
-
        len = ibmvtpm->res_len;
 
        if (count < len) {
@@ -237,7 +232,7 @@ static int tpm_ibmvtpm_send(struct tpm_chip *chip, u8 *buf, size_t count)
         * set the processing flag before the Hcall, since we may get the
         * result (interrupt) before even being able to check rc.
         */
-       ibmvtpm->tpm_processing_cmd = true;
+       ibmvtpm->tpm_processing_cmd = 1;
 
 again:
        rc = ibmvtpm_send_crq(ibmvtpm->vdev,
@@ -255,7 +250,7 @@ again:
                        goto again;
                }
                dev_err(ibmvtpm->dev, "tpm_ibmvtpm_send failed rc=%d\n", rc);
-               ibmvtpm->tpm_processing_cmd = false;
+               ibmvtpm->tpm_processing_cmd = 0;
        }
 
        spin_unlock(&ibmvtpm->rtce_lock);
@@ -269,7 +264,9 @@ static void tpm_ibmvtpm_cancel(struct tpm_chip *chip)
 
 static u8 tpm_ibmvtpm_status(struct tpm_chip *chip)
 {
-       return 0;
+       struct ibmvtpm_dev *ibmvtpm = dev_get_drvdata(&chip->dev);
+
+       return ibmvtpm->tpm_processing_cmd;
 }
 
 /**
@@ -457,7 +454,7 @@ static const struct tpm_class_ops tpm_ibmvtpm = {
        .send = tpm_ibmvtpm_send,
        .cancel = tpm_ibmvtpm_cancel,
        .status = tpm_ibmvtpm_status,
-       .req_complete_mask = 0,
+       .req_complete_mask = 1,
        .req_complete_val = 0,
        .req_canceled = tpm_ibmvtpm_req_canceled,
 };
@@ -550,7 +547,7 @@ static void ibmvtpm_crq_process(struct ibmvtpm_crq *crq,
                case VTPM_TPM_COMMAND_RES:
                        /* len of the data in rtce buffer */
                        ibmvtpm->res_len = be16_to_cpu(crq->len);
-                       ibmvtpm->tpm_processing_cmd = false;
+                       ibmvtpm->tpm_processing_cmd = 0;
                        wake_up_interruptible(&ibmvtpm->wq);
                        return;
                default:
@@ -688,8 +685,15 @@ static int tpm_ibmvtpm_probe(struct vio_dev *vio_dev,
                goto init_irq_cleanup;
        }
 
-       if (!strcmp(id->compat, "IBM,vtpm20")) {
+
+       if (!strcmp(id->compat, "IBM,vtpm20"))
                chip->flags |= TPM_CHIP_FLAG_TPM2;
+
+       rc = tpm_get_timeouts(chip);
+       if (rc)
+               goto init_irq_cleanup;
+
+       if (chip->flags & TPM_CHIP_FLAG_TPM2) {
                rc = tpm2_get_cc_attrs_tbl(chip);
                if (rc)
                        goto init_irq_cleanup;
index b92aa7d3e93e7d47b38117ed8fa3077ae8a16a1c..51198b137461e903ca4a4abfec75279ee5bc7fac 100644 (file)
@@ -41,7 +41,7 @@ struct ibmvtpm_dev {
        wait_queue_head_t wq;
        u16 res_len;
        u32 vtpm_version;
-       bool tpm_processing_cmd;
+       u8 tpm_processing_cmd;
 };
 
 #define CRQ_RES_BUF_SIZE       PAGE_SIZE
index 44dde2fbe2fbcce1372579d63f78bb44b424e8aa..c89278103703759c434e75c97112dd07dc4b81a3 100644 (file)
@@ -639,12 +639,6 @@ static const struct tpm_class_ops cr50_i2c = {
        .req_canceled = &tpm_cr50_i2c_req_canceled,
 };
 
-static const struct i2c_device_id cr50_i2c_table[] = {
-       {"cr50_i2c", 0},
-       {}
-};
-MODULE_DEVICE_TABLE(i2c, cr50_i2c_table);
-
 #ifdef CONFIG_ACPI
 static const struct acpi_device_id cr50_i2c_acpi_id[] = {
        { "GOOG0005", 0 },
@@ -670,8 +664,7 @@ MODULE_DEVICE_TABLE(of, of_cr50_i2c_match);
  * - 0:                Success.
  * - -errno:   A POSIX error code.
  */
-static int tpm_cr50_i2c_probe(struct i2c_client *client,
-                             const struct i2c_device_id *id)
+static int tpm_cr50_i2c_probe(struct i2c_client *client)
 {
        struct tpm_i2c_cr50_priv_data *priv;
        struct device *dev = &client->dev;
@@ -774,8 +767,7 @@ static int tpm_cr50_i2c_remove(struct i2c_client *client)
 static SIMPLE_DEV_PM_OPS(cr50_i2c_pm, tpm_pm_suspend, tpm_pm_resume);
 
 static struct i2c_driver cr50_i2c_driver = {
-       .id_table = cr50_i2c_table,
-       .probe = tpm_cr50_i2c_probe,
+       .probe_new = tpm_cr50_i2c_probe,
        .remove = tpm_cr50_i2c_remove,
        .driver = {
                .name = "cr50_i2c",
index 496900de0b0bbf6eff60a28f7f37344a0f3a63d8..de36f58d551c0b920b0375360318f3f38c093528 100644 (file)
@@ -974,6 +974,6 @@ static void __init imx6q_clocks_init(struct device_node *ccm_node)
                               hws[IMX6QDL_CLK_PLL3_USB_OTG]->clk);
        }
 
-       imx_register_uart_clocks(1);
+       imx_register_uart_clocks(2);
 }
 CLK_OF_DECLARE(imx6q, "fsl,imx6q-ccm", imx6q_clocks_init);
index 51ed640e527b4871eb007b6994ddfc8a725ae6dd..4ece326ea233e90e51c7dd4e5eecf48b8d6e424d 100644 (file)
@@ -357,27 +357,43 @@ static int gdsc_init(struct gdsc *sc)
        if (on < 0)
                return on;
 
-       /*
-        * Votable GDSCs can be ON due to Vote from other masters.
-        * If a Votable GDSC is ON, make sure we have a Vote.
-        */
-       if ((sc->flags & VOTABLE) && on)
-               gdsc_enable(&sc->pd);
+       if (on) {
+               /* The regulator must be on, sync the kernel state */
+               if (sc->rsupply) {
+                       ret = regulator_enable(sc->rsupply);
+                       if (ret < 0)
+                               return ret;
+               }
 
-       /*
-        * Make sure the retain bit is set if the GDSC is already on, otherwise
-        * we end up turning off the GDSC and destroying all the register
-        * contents that we thought we were saving.
-        */
-       if ((sc->flags & RETAIN_FF_ENABLE) && on)
-               gdsc_retain_ff_on(sc);
+               /*
+                * Votable GDSCs can be ON due to Vote from other masters.
+                * If a Votable GDSC is ON, make sure we have a Vote.
+                */
+               if (sc->flags & VOTABLE) {
+                       ret = regmap_update_bits(sc->regmap, sc->gdscr,
+                                                SW_COLLAPSE_MASK, val);
+                       if (ret)
+                               return ret;
+               }
+
+               /* Turn on HW trigger mode if supported */
+               if (sc->flags & HW_CTRL) {
+                       ret = gdsc_hwctrl(sc, true);
+                       if (ret < 0)
+                               return ret;
+               }
 
-       /* If ALWAYS_ON GDSCs are not ON, turn them ON */
-       if (sc->flags & ALWAYS_ON) {
-               if (!on)
-                       gdsc_enable(&sc->pd);
+               /*
+                * Make sure the retain bit is set if the GDSC is already on,
+                * otherwise we end up turning off the GDSC and destroying all
+                * the register contents that we thought we were saving.
+                */
+               if (sc->flags & RETAIN_FF_ENABLE)
+                       gdsc_retain_ff_on(sc);
+       } else if (sc->flags & ALWAYS_ON) {
+               /* If ALWAYS_ON GDSCs are not ON, turn them ON */
+               gdsc_enable(&sc->pd);
                on = true;
-               sc->pd.flags |= GENPD_FLAG_ALWAYS_ON;
        }
 
        if (on || (sc->pwrsts & PWRSTS_RET))
@@ -385,6 +401,8 @@ static int gdsc_init(struct gdsc *sc)
        else
                gdsc_clear_mem_on(sc);
 
+       if (sc->flags & ALWAYS_ON)
+               sc->pd.flags |= GENPD_FLAG_ALWAYS_ON;
        if (!sc->pd.power_off)
                sc->pd.power_off = gdsc_disable;
        if (!sc->pd.power_on)
index 9fb79bd794350578297df9c6282e658a6e904fc2..684d8937965e066abc512739182616ed9324f008 100644 (file)
@@ -187,7 +187,7 @@ static int rcar_usb2_clock_sel_probe(struct platform_device *pdev)
        init.ops = &usb2_clock_sel_clock_ops;
        priv->hw.init = &init;
 
-       ret = devm_clk_hw_register(NULL, &priv->hw);
+       ret = devm_clk_hw_register(dev, &priv->hw);
        if (ret)
                goto pm_put;
 
index 3fc98a3ffd91ec0d084e16276991246f9acb04e2..c10fc33b29b181aa725c202ca734b95227836533 100644 (file)
@@ -104,7 +104,11 @@ struct armada_37xx_dvfs {
 };
 
 static struct armada_37xx_dvfs armada_37xx_dvfs[] = {
-       {.cpu_freq_max = 1200*1000*1000, .divider = {1, 2, 4, 6} },
+       /*
+        * The cpufreq scaling for 1.2 GHz variant of the SOC is currently
+        * unstable because we do not know how to configure it properly.
+        */
+       /* {.cpu_freq_max = 1200*1000*1000, .divider = {1, 2, 4, 6} }, */
        {.cpu_freq_max = 1000*1000*1000, .divider = {1, 2, 4, 5} },
        {.cpu_freq_max = 800*1000*1000,  .divider = {1, 2, 3, 4} },
        {.cpu_freq_max = 600*1000*1000,  .divider = {2, 4, 5, 6} },
index bef7528aecd313aabaff829fbfe5937ad1dd5836..231e585f6ba2cc458b6548b63fb107f97570b620 100644 (file)
@@ -139,7 +139,9 @@ static const struct of_device_id blocklist[] __initconst = {
        { .compatible = "qcom,qcs404", },
        { .compatible = "qcom,sc7180", },
        { .compatible = "qcom,sc7280", },
+       { .compatible = "qcom,sc8180x", },
        { .compatible = "qcom,sdm845", },
+       { .compatible = "qcom,sm8150", },
 
        { .compatible = "st,stih407", },
        { .compatible = "st,stih410", },
index ec9a87ca2dbb8997c5ef3dbc9da3cf49e1fa9201..75f818d04b481a1d0f250457f0557de464cf908a 100644 (file)
@@ -134,7 +134,7 @@ static int scmi_cpufreq_init(struct cpufreq_policy *policy)
        }
 
        if (!zalloc_cpumask_var(&opp_shared_cpus, GFP_KERNEL))
-               ret = -ENOMEM;
+               return -ENOMEM;
 
        /* Obtain CPUs that share SCMI performance controls */
        ret = scmi_get_sharing_cpus(cpu_dev, policy->cpus);
index cd1baee424a18a6fd56187f64a399d631e2db3d9..b3a9bbfb8831f3f1704528b84f504d971558de9f 100644 (file)
@@ -26,8 +26,7 @@ void sun8i_ce_prng_exit(struct crypto_tfm *tfm)
 {
        struct sun8i_ce_rng_tfm_ctx *ctx = crypto_tfm_ctx(tfm);
 
-       memzero_explicit(ctx->seed, ctx->slen);
-       kfree(ctx->seed);
+       kfree_sensitive(ctx->seed);
        ctx->seed = NULL;
        ctx->slen = 0;
 }
@@ -38,8 +37,7 @@ int sun8i_ce_prng_seed(struct crypto_rng *tfm, const u8 *seed,
        struct sun8i_ce_rng_tfm_ctx *ctx = crypto_rng_ctx(tfm);
 
        if (ctx->seed && ctx->slen != slen) {
-               memzero_explicit(ctx->seed, ctx->slen);
-               kfree(ctx->seed);
+               kfree_sensitive(ctx->seed);
                ctx->slen = 0;
                ctx->seed = NULL;
        }
@@ -157,9 +155,8 @@ err_dst:
                memcpy(dst, d, dlen);
                memcpy(ctx->seed, d + dlen, ctx->slen);
        }
-       memzero_explicit(d, todo);
 err_iv:
-       kfree(d);
+       kfree_sensitive(d);
 err_mem:
        return err;
 }
index 5b7af4498bd5a6f937a5ae15caced491237f8c8e..19cd2e52f89d40076bde59538e678d127bbfdf28 100644 (file)
@@ -95,9 +95,8 @@ err_pm:
                memcpy(data, d, max);
                err = max;
        }
-       memzero_explicit(d, todo);
 err_dst:
-       kfree(d);
+       kfree_sensitive(d);
        return err;
 }
 
index 3191527928e4163df5c0017029076413977c8322..246a6782674c35311709eedd38fe85a6156af5e7 100644 (file)
@@ -20,8 +20,7 @@ int sun8i_ss_prng_seed(struct crypto_rng *tfm, const u8 *seed,
        struct sun8i_ss_rng_tfm_ctx *ctx = crypto_rng_ctx(tfm);
 
        if (ctx->seed && ctx->slen != slen) {
-               memzero_explicit(ctx->seed, ctx->slen);
-               kfree(ctx->seed);
+               kfree_sensitive(ctx->seed);
                ctx->slen = 0;
                ctx->seed = NULL;
        }
@@ -48,8 +47,7 @@ void sun8i_ss_prng_exit(struct crypto_tfm *tfm)
 {
        struct sun8i_ss_rng_tfm_ctx *ctx = crypto_tfm_ctx(tfm);
 
-       memzero_explicit(ctx->seed, ctx->slen);
-       kfree(ctx->seed);
+       kfree_sensitive(ctx->seed);
        ctx->seed = NULL;
        ctx->slen = 0;
 }
@@ -167,9 +165,8 @@ err_iv:
                /* Update seed */
                memcpy(ctx->seed, d + dlen, ctx->slen);
        }
-       memzero_explicit(d, todo);
 err_free:
-       kfree(d);
+       kfree_sensitive(d);
 
        return err;
 }
index b1d2860042958bb2ca103e33bf2d4d60041a557e..9391ccc03382d13649a8c07ce26ad2758725941f 100644 (file)
@@ -143,6 +143,7 @@ struct atmel_aes_xts_ctx {
        struct atmel_aes_base_ctx       base;
 
        u32                     key2[AES_KEYSIZE_256 / sizeof(u32)];
+       struct crypto_skcipher *fallback_tfm;
 };
 
 #if IS_ENABLED(CONFIG_CRYPTO_DEV_ATMEL_AUTHENC)
@@ -155,6 +156,7 @@ struct atmel_aes_authenc_ctx {
 struct atmel_aes_reqctx {
        unsigned long           mode;
        u8                      lastc[AES_BLOCK_SIZE];
+       struct skcipher_request fallback_req;
 };
 
 #if IS_ENABLED(CONFIG_CRYPTO_DEV_ATMEL_AUTHENC)
@@ -418,24 +420,15 @@ static inline size_t atmel_aes_padlen(size_t len, size_t block_size)
        return len ? block_size - len : 0;
 }
 
-static struct atmel_aes_dev *atmel_aes_find_dev(struct atmel_aes_base_ctx *ctx)
+static struct atmel_aes_dev *atmel_aes_dev_alloc(struct atmel_aes_base_ctx *ctx)
 {
-       struct atmel_aes_dev *aes_dd = NULL;
-       struct atmel_aes_dev *tmp;
+       struct atmel_aes_dev *aes_dd;
 
        spin_lock_bh(&atmel_aes.lock);
-       if (!ctx->dd) {
-               list_for_each_entry(tmp, &atmel_aes.dev_list, list) {
-                       aes_dd = tmp;
-                       break;
-               }
-               ctx->dd = aes_dd;
-       } else {
-               aes_dd = ctx->dd;
-       }
-
+       /* One AES IP per SoC. */
+       aes_dd = list_first_entry_or_null(&atmel_aes.dev_list,
+                                         struct atmel_aes_dev, list);
        spin_unlock_bh(&atmel_aes.lock);
-
        return aes_dd;
 }
 
@@ -967,7 +960,6 @@ static int atmel_aes_handle_queue(struct atmel_aes_dev *dd,
        ctx = crypto_tfm_ctx(areq->tfm);
 
        dd->areq = areq;
-       dd->ctx = ctx;
        start_async = (areq != new_areq);
        dd->is_async = start_async;
 
@@ -1083,12 +1075,48 @@ static int atmel_aes_ctr_start(struct atmel_aes_dev *dd)
        return atmel_aes_ctr_transfer(dd);
 }
 
+static int atmel_aes_xts_fallback(struct skcipher_request *req, bool enc)
+{
+       struct atmel_aes_reqctx *rctx = skcipher_request_ctx(req);
+       struct atmel_aes_xts_ctx *ctx = crypto_skcipher_ctx(
+                       crypto_skcipher_reqtfm(req));
+
+       skcipher_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm);
+       skcipher_request_set_callback(&rctx->fallback_req, req->base.flags,
+                                     req->base.complete, req->base.data);
+       skcipher_request_set_crypt(&rctx->fallback_req, req->src, req->dst,
+                                  req->cryptlen, req->iv);
+
+       return enc ? crypto_skcipher_encrypt(&rctx->fallback_req) :
+                    crypto_skcipher_decrypt(&rctx->fallback_req);
+}
+
 static int atmel_aes_crypt(struct skcipher_request *req, unsigned long mode)
 {
        struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
        struct atmel_aes_base_ctx *ctx = crypto_skcipher_ctx(skcipher);
        struct atmel_aes_reqctx *rctx;
-       struct atmel_aes_dev *dd;
+       u32 opmode = mode & AES_FLAGS_OPMODE_MASK;
+
+       if (opmode == AES_FLAGS_XTS) {
+               if (req->cryptlen < XTS_BLOCK_SIZE)
+                       return -EINVAL;
+
+               if (!IS_ALIGNED(req->cryptlen, XTS_BLOCK_SIZE))
+                       return atmel_aes_xts_fallback(req,
+                                                     mode & AES_FLAGS_ENCRYPT);
+       }
+
+       /*
+        * ECB, CBC, CFB, OFB or CTR mode require the plaintext and ciphertext
+        * to have a positve integer length.
+        */
+       if (!req->cryptlen && opmode != AES_FLAGS_XTS)
+               return 0;
+
+       if ((opmode == AES_FLAGS_ECB || opmode == AES_FLAGS_CBC) &&
+           !IS_ALIGNED(req->cryptlen, crypto_skcipher_blocksize(skcipher)))
+               return -EINVAL;
 
        switch (mode & AES_FLAGS_OPMODE_MASK) {
        case AES_FLAGS_CFB8:
@@ -1113,14 +1141,10 @@ static int atmel_aes_crypt(struct skcipher_request *req, unsigned long mode)
        }
        ctx->is_aead = false;
 
-       dd = atmel_aes_find_dev(ctx);
-       if (!dd)
-               return -ENODEV;
-
        rctx = skcipher_request_ctx(req);
        rctx->mode = mode;
 
-       if ((mode & AES_FLAGS_OPMODE_MASK) != AES_FLAGS_ECB &&
+       if (opmode != AES_FLAGS_ECB &&
            !(mode & AES_FLAGS_ENCRYPT) && req->src == req->dst) {
                unsigned int ivsize = crypto_skcipher_ivsize(skcipher);
 
@@ -1130,7 +1154,7 @@ static int atmel_aes_crypt(struct skcipher_request *req, unsigned long mode)
                                                 ivsize, 0);
        }
 
-       return atmel_aes_handle_queue(dd, &req->base);
+       return atmel_aes_handle_queue(ctx->dd, &req->base);
 }
 
 static int atmel_aes_setkey(struct crypto_skcipher *tfm, const u8 *key,
@@ -1242,8 +1266,15 @@ static int atmel_aes_ctr_decrypt(struct skcipher_request *req)
 static int atmel_aes_init_tfm(struct crypto_skcipher *tfm)
 {
        struct atmel_aes_ctx *ctx = crypto_skcipher_ctx(tfm);
+       struct atmel_aes_dev *dd;
+
+       dd = atmel_aes_dev_alloc(&ctx->base);
+       if (!dd)
+               return -ENODEV;
 
        crypto_skcipher_set_reqsize(tfm, sizeof(struct atmel_aes_reqctx));
+       ctx->base.dd = dd;
+       ctx->base.dd->ctx = &ctx->base;
        ctx->base.start = atmel_aes_start;
 
        return 0;
@@ -1252,8 +1283,15 @@ static int atmel_aes_init_tfm(struct crypto_skcipher *tfm)
 static int atmel_aes_ctr_init_tfm(struct crypto_skcipher *tfm)
 {
        struct atmel_aes_ctx *ctx = crypto_skcipher_ctx(tfm);
+       struct atmel_aes_dev *dd;
+
+       dd = atmel_aes_dev_alloc(&ctx->base);
+       if (!dd)
+               return -ENODEV;
 
        crypto_skcipher_set_reqsize(tfm, sizeof(struct atmel_aes_reqctx));
+       ctx->base.dd = dd;
+       ctx->base.dd->ctx = &ctx->base;
        ctx->base.start = atmel_aes_ctr_start;
 
        return 0;
@@ -1290,7 +1328,7 @@ static struct skcipher_alg aes_algs[] = {
 {
        .base.cra_name          = "ofb(aes)",
        .base.cra_driver_name   = "atmel-ofb-aes",
-       .base.cra_blocksize     = AES_BLOCK_SIZE,
+       .base.cra_blocksize     = 1,
        .base.cra_ctxsize       = sizeof(struct atmel_aes_ctx),
 
        .init                   = atmel_aes_init_tfm,
@@ -1691,20 +1729,15 @@ static int atmel_aes_gcm_crypt(struct aead_request *req,
 {
        struct atmel_aes_base_ctx *ctx;
        struct atmel_aes_reqctx *rctx;
-       struct atmel_aes_dev *dd;
 
        ctx = crypto_aead_ctx(crypto_aead_reqtfm(req));
        ctx->block_size = AES_BLOCK_SIZE;
        ctx->is_aead = true;
 
-       dd = atmel_aes_find_dev(ctx);
-       if (!dd)
-               return -ENODEV;
-
        rctx = aead_request_ctx(req);
        rctx->mode = AES_FLAGS_GCM | mode;
 
-       return atmel_aes_handle_queue(dd, &req->base);
+       return atmel_aes_handle_queue(ctx->dd, &req->base);
 }
 
 static int atmel_aes_gcm_setkey(struct crypto_aead *tfm, const u8 *key,
@@ -1742,8 +1775,15 @@ static int atmel_aes_gcm_decrypt(struct aead_request *req)
 static int atmel_aes_gcm_init(struct crypto_aead *tfm)
 {
        struct atmel_aes_gcm_ctx *ctx = crypto_aead_ctx(tfm);
+       struct atmel_aes_dev *dd;
+
+       dd = atmel_aes_dev_alloc(&ctx->base);
+       if (!dd)
+               return -ENODEV;
 
        crypto_aead_set_reqsize(tfm, sizeof(struct atmel_aes_reqctx));
+       ctx->base.dd = dd;
+       ctx->base.dd->ctx = &ctx->base;
        ctx->base.start = atmel_aes_gcm_start;
 
        return 0;
@@ -1819,12 +1859,8 @@ static int atmel_aes_xts_process_data(struct atmel_aes_dev *dd)
         * the order of the ciphered tweak bytes need to be reversed before
         * writing them into the ODATARx registers.
         */
-       for (i = 0; i < AES_BLOCK_SIZE/2; ++i) {
-               u8 tmp = tweak_bytes[AES_BLOCK_SIZE - 1 - i];
-
-               tweak_bytes[AES_BLOCK_SIZE - 1 - i] = tweak_bytes[i];
-               tweak_bytes[i] = tmp;
-       }
+       for (i = 0; i < AES_BLOCK_SIZE/2; ++i)
+               swap(tweak_bytes[i], tweak_bytes[AES_BLOCK_SIZE - 1 - i]);
 
        /* Process the data. */
        atmel_aes_write_ctrl(dd, use_dma, NULL);
@@ -1849,6 +1885,13 @@ static int atmel_aes_xts_setkey(struct crypto_skcipher *tfm, const u8 *key,
        if (err)
                return err;
 
+       crypto_skcipher_clear_flags(ctx->fallback_tfm, CRYPTO_TFM_REQ_MASK);
+       crypto_skcipher_set_flags(ctx->fallback_tfm, tfm->base.crt_flags &
+                                 CRYPTO_TFM_REQ_MASK);
+       err = crypto_skcipher_setkey(ctx->fallback_tfm, key, keylen);
+       if (err)
+               return err;
+
        memcpy(ctx->base.key, key, keylen/2);
        memcpy(ctx->key2, key + keylen/2, keylen/2);
        ctx->base.keylen = keylen/2;
@@ -1869,18 +1912,40 @@ static int atmel_aes_xts_decrypt(struct skcipher_request *req)
 static int atmel_aes_xts_init_tfm(struct crypto_skcipher *tfm)
 {
        struct atmel_aes_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
+       struct atmel_aes_dev *dd;
+       const char *tfm_name = crypto_tfm_alg_name(&tfm->base);
 
-       crypto_skcipher_set_reqsize(tfm, sizeof(struct atmel_aes_reqctx));
+       dd = atmel_aes_dev_alloc(&ctx->base);
+       if (!dd)
+               return -ENODEV;
+
+       ctx->fallback_tfm = crypto_alloc_skcipher(tfm_name, 0,
+                                                 CRYPTO_ALG_NEED_FALLBACK);
+       if (IS_ERR(ctx->fallback_tfm))
+               return PTR_ERR(ctx->fallback_tfm);
+
+       crypto_skcipher_set_reqsize(tfm, sizeof(struct atmel_aes_reqctx) +
+                                   crypto_skcipher_reqsize(ctx->fallback_tfm));
+       ctx->base.dd = dd;
+       ctx->base.dd->ctx = &ctx->base;
        ctx->base.start = atmel_aes_xts_start;
 
        return 0;
 }
 
+static void atmel_aes_xts_exit_tfm(struct crypto_skcipher *tfm)
+{
+       struct atmel_aes_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
+
+       crypto_free_skcipher(ctx->fallback_tfm);
+}
+
 static struct skcipher_alg aes_xts_alg = {
        .base.cra_name          = "xts(aes)",
        .base.cra_driver_name   = "atmel-xts-aes",
        .base.cra_blocksize     = AES_BLOCK_SIZE,
        .base.cra_ctxsize       = sizeof(struct atmel_aes_xts_ctx),
+       .base.cra_flags         = CRYPTO_ALG_NEED_FALLBACK,
 
        .min_keysize            = 2 * AES_MIN_KEY_SIZE,
        .max_keysize            = 2 * AES_MAX_KEY_SIZE,
@@ -1889,6 +1954,7 @@ static struct skcipher_alg aes_xts_alg = {
        .encrypt                = atmel_aes_xts_encrypt,
        .decrypt                = atmel_aes_xts_decrypt,
        .init                   = atmel_aes_xts_init_tfm,
+       .exit                   = atmel_aes_xts_exit_tfm,
 };
 
 #if IS_ENABLED(CONFIG_CRYPTO_DEV_ATMEL_AUTHENC)
@@ -2075,6 +2141,11 @@ static int atmel_aes_authenc_init_tfm(struct crypto_aead *tfm,
 {
        struct atmel_aes_authenc_ctx *ctx = crypto_aead_ctx(tfm);
        unsigned int auth_reqsize = atmel_sha_authenc_get_reqsize();
+       struct atmel_aes_dev *dd;
+
+       dd = atmel_aes_dev_alloc(&ctx->base);
+       if (!dd)
+               return -ENODEV;
 
        ctx->auth = atmel_sha_authenc_spawn(auth_mode);
        if (IS_ERR(ctx->auth))
@@ -2082,6 +2153,8 @@ static int atmel_aes_authenc_init_tfm(struct crypto_aead *tfm,
 
        crypto_aead_set_reqsize(tfm, (sizeof(struct atmel_aes_authenc_reqctx) +
                                      auth_reqsize));
+       ctx->base.dd = dd;
+       ctx->base.dd->ctx = &ctx->base;
        ctx->base.start = atmel_aes_authenc_start;
 
        return 0;
@@ -2127,7 +2200,6 @@ static int atmel_aes_authenc_crypt(struct aead_request *req,
        struct atmel_aes_base_ctx *ctx = crypto_aead_ctx(tfm);
        u32 authsize = crypto_aead_authsize(tfm);
        bool enc = (mode & AES_FLAGS_ENCRYPT);
-       struct atmel_aes_dev *dd;
 
        /* Compute text length. */
        if (!enc && req->cryptlen < authsize)
@@ -2146,11 +2218,7 @@ static int atmel_aes_authenc_crypt(struct aead_request *req,
        ctx->block_size = AES_BLOCK_SIZE;
        ctx->is_aead = true;
 
-       dd = atmel_aes_find_dev(ctx);
-       if (!dd)
-               return -ENODEV;
-
-       return atmel_aes_handle_queue(dd, &req->base);
+       return atmel_aes_handle_queue(ctx->dd, &req->base);
 }
 
 static int atmel_aes_authenc_cbc_aes_encrypt(struct aead_request *req)
@@ -2358,7 +2426,7 @@ static void atmel_aes_unregister_algs(struct atmel_aes_dev *dd)
 
 static void atmel_aes_crypto_alg_init(struct crypto_alg *alg)
 {
-       alg->cra_flags = CRYPTO_ALG_ASYNC;
+       alg->cra_flags |= CRYPTO_ALG_ASYNC;
        alg->cra_alignmask = 0xf;
        alg->cra_priority = ATMEL_AES_PRIORITY;
        alg->cra_module = THIS_MODULE;
index 6f01c51e3c37644d903ba9b2b0a94336fa92cb77..e30786ec9f2d4d268e4fde6f9a710998f3a8291d 100644 (file)
@@ -196,23 +196,15 @@ static void atmel_tdes_write_n(struct atmel_tdes_dev *dd, u32 offset,
                atmel_tdes_write(dd, offset, *value);
 }
 
-static struct atmel_tdes_dev *atmel_tdes_find_dev(struct atmel_tdes_ctx *ctx)
+static struct atmel_tdes_dev *atmel_tdes_dev_alloc(void)
 {
-       struct atmel_tdes_dev *tdes_dd = NULL;
-       struct atmel_tdes_dev *tmp;
+       struct atmel_tdes_dev *tdes_dd;
 
        spin_lock_bh(&atmel_tdes.lock);
-       if (!ctx->dd) {
-               list_for_each_entry(tmp, &atmel_tdes.dev_list, list) {
-                       tdes_dd = tmp;
-                       break;
-               }
-               ctx->dd = tdes_dd;
-       } else {
-               tdes_dd = ctx->dd;
-       }
+       /* One TDES IP per SoC. */
+       tdes_dd = list_first_entry_or_null(&atmel_tdes.dev_list,
+                                          struct atmel_tdes_dev, list);
        spin_unlock_bh(&atmel_tdes.lock);
-
        return tdes_dd;
 }
 
@@ -320,7 +312,7 @@ static int atmel_tdes_crypt_pdc_stop(struct atmel_tdes_dev *dd)
                                dd->buf_out, dd->buflen, dd->dma_size, 1);
                if (count != dd->dma_size) {
                        err = -EINVAL;
-                       pr_err("not all data converted: %zu\n", count);
+                       dev_dbg(dd->dev, "not all data converted: %zu\n", count);
                }
        }
 
@@ -337,24 +329,24 @@ static int atmel_tdes_buff_init(struct atmel_tdes_dev *dd)
        dd->buflen &= ~(DES_BLOCK_SIZE - 1);
 
        if (!dd->buf_in || !dd->buf_out) {
-               dev_err(dd->dev, "unable to alloc pages.\n");
+               dev_dbg(dd->dev, "unable to alloc pages.\n");
                goto err_alloc;
        }
 
        /* MAP here */
        dd->dma_addr_in = dma_map_single(dd->dev, dd->buf_in,
                                        dd->buflen, DMA_TO_DEVICE);
-       if (dma_mapping_error(dd->dev, dd->dma_addr_in)) {
-               dev_err(dd->dev, "dma %zd bytes error\n", dd->buflen);
-               err = -EINVAL;
+       err = dma_mapping_error(dd->dev, dd->dma_addr_in);
+       if (err) {
+               dev_dbg(dd->dev, "dma %zd bytes error\n", dd->buflen);
                goto err_map_in;
        }
 
        dd->dma_addr_out = dma_map_single(dd->dev, dd->buf_out,
                                        dd->buflen, DMA_FROM_DEVICE);
-       if (dma_mapping_error(dd->dev, dd->dma_addr_out)) {
-               dev_err(dd->dev, "dma %zd bytes error\n", dd->buflen);
-               err = -EINVAL;
+       err = dma_mapping_error(dd->dev, dd->dma_addr_out);
+       if (err) {
+               dev_dbg(dd->dev, "dma %zd bytes error\n", dd->buflen);
                goto err_map_out;
        }
 
@@ -367,8 +359,6 @@ err_map_in:
 err_alloc:
        free_page((unsigned long)dd->buf_out);
        free_page((unsigned long)dd->buf_in);
-       if (err)
-               pr_err("error: %d\n", err);
        return err;
 }
 
@@ -520,14 +510,14 @@ static int atmel_tdes_crypt_start(struct atmel_tdes_dev *dd)
 
                err = dma_map_sg(dd->dev, dd->in_sg, 1, DMA_TO_DEVICE);
                if (!err) {
-                       dev_err(dd->dev, "dma_map_sg() error\n");
+                       dev_dbg(dd->dev, "dma_map_sg() error\n");
                        return -EINVAL;
                }
 
                err = dma_map_sg(dd->dev, dd->out_sg, 1,
                                DMA_FROM_DEVICE);
                if (!err) {
-                       dev_err(dd->dev, "dma_map_sg() error\n");
+                       dev_dbg(dd->dev, "dma_map_sg() error\n");
                        dma_unmap_sg(dd->dev, dd->in_sg, 1,
                                DMA_TO_DEVICE);
                        return -EINVAL;
@@ -646,7 +636,6 @@ static int atmel_tdes_handle_queue(struct atmel_tdes_dev *dd,
        rctx->mode &= TDES_FLAGS_MODE_MASK;
        dd->flags = (dd->flags & ~TDES_FLAGS_MODE_MASK) | rctx->mode;
        dd->ctx = ctx;
-       ctx->dd = dd;
 
        err = atmel_tdes_write_ctrl(dd);
        if (!err)
@@ -679,7 +668,7 @@ static int atmel_tdes_crypt_dma_stop(struct atmel_tdes_dev *dd)
                                dd->buf_out, dd->buflen, dd->dma_size, 1);
                        if (count != dd->dma_size) {
                                err = -EINVAL;
-                               pr_err("not all data converted: %zu\n", count);
+                               dev_dbg(dd->dev, "not all data converted: %zu\n", count);
                        }
                }
        }
@@ -691,11 +680,15 @@ static int atmel_tdes_crypt(struct skcipher_request *req, unsigned long mode)
        struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
        struct atmel_tdes_ctx *ctx = crypto_skcipher_ctx(skcipher);
        struct atmel_tdes_reqctx *rctx = skcipher_request_ctx(req);
+       struct device *dev = ctx->dd->dev;
+
+       if (!req->cryptlen)
+               return 0;
 
        switch (mode & TDES_FLAGS_OPMODE_MASK) {
        case TDES_FLAGS_CFB8:
                if (!IS_ALIGNED(req->cryptlen, CFB8_BLOCK_SIZE)) {
-                       pr_err("request size is not exact amount of CFB8 blocks\n");
+                       dev_dbg(dev, "request size is not exact amount of CFB8 blocks\n");
                        return -EINVAL;
                }
                ctx->block_size = CFB8_BLOCK_SIZE;
@@ -703,7 +696,7 @@ static int atmel_tdes_crypt(struct skcipher_request *req, unsigned long mode)
 
        case TDES_FLAGS_CFB16:
                if (!IS_ALIGNED(req->cryptlen, CFB16_BLOCK_SIZE)) {
-                       pr_err("request size is not exact amount of CFB16 blocks\n");
+                       dev_dbg(dev, "request size is not exact amount of CFB16 blocks\n");
                        return -EINVAL;
                }
                ctx->block_size = CFB16_BLOCK_SIZE;
@@ -711,7 +704,7 @@ static int atmel_tdes_crypt(struct skcipher_request *req, unsigned long mode)
 
        case TDES_FLAGS_CFB32:
                if (!IS_ALIGNED(req->cryptlen, CFB32_BLOCK_SIZE)) {
-                       pr_err("request size is not exact amount of CFB32 blocks\n");
+                       dev_dbg(dev, "request size is not exact amount of CFB32 blocks\n");
                        return -EINVAL;
                }
                ctx->block_size = CFB32_BLOCK_SIZE;
@@ -719,7 +712,7 @@ static int atmel_tdes_crypt(struct skcipher_request *req, unsigned long mode)
 
        default:
                if (!IS_ALIGNED(req->cryptlen, DES_BLOCK_SIZE)) {
-                       pr_err("request size is not exact amount of DES blocks\n");
+                       dev_dbg(dev, "request size is not exact amount of DES blocks\n");
                        return -EINVAL;
                }
                ctx->block_size = DES_BLOCK_SIZE;
@@ -897,14 +890,13 @@ static int atmel_tdes_ofb_decrypt(struct skcipher_request *req)
 static int atmel_tdes_init_tfm(struct crypto_skcipher *tfm)
 {
        struct atmel_tdes_ctx *ctx = crypto_skcipher_ctx(tfm);
-       struct atmel_tdes_dev *dd;
-
-       crypto_skcipher_set_reqsize(tfm, sizeof(struct atmel_tdes_reqctx));
 
-       dd = atmel_tdes_find_dev(ctx);
-       if (!dd)
+       ctx->dd = atmel_tdes_dev_alloc();
+       if (!ctx->dd)
                return -ENODEV;
 
+       crypto_skcipher_set_reqsize(tfm, sizeof(struct atmel_tdes_reqctx));
+
        return 0;
 }
 
@@ -999,7 +991,7 @@ static struct skcipher_alg tdes_algs[] = {
 {
        .base.cra_name          = "ofb(des)",
        .base.cra_driver_name   = "atmel-ofb-des",
-       .base.cra_blocksize     = DES_BLOCK_SIZE,
+       .base.cra_blocksize     = 1,
        .base.cra_alignmask     = 0x7,
 
        .min_keysize            = DES_KEY_SIZE,
index 91808402e0bf2a13cc25ce333fe6cfb15a4a3888..2ecb0e1f65d8d5b84cab6940abde841af9dbaa50 100644 (file)
@@ -300,6 +300,9 @@ static int __sev_platform_shutdown_locked(int *error)
        struct sev_device *sev = psp_master->sev_data;
        int ret;
 
+       if (sev->state == SEV_STATE_UNINIT)
+               return 0;
+
        ret = __sev_do_cmd_locked(SEV_CMD_SHUTDOWN, NULL, error);
        if (ret)
                return ret;
@@ -1019,6 +1022,20 @@ e_err:
        return ret;
 }
 
+static void sev_firmware_shutdown(struct sev_device *sev)
+{
+       sev_platform_shutdown(NULL);
+
+       if (sev_es_tmr) {
+               /* The TMR area was encrypted, flush it from the cache */
+               wbinvd_on_all_cpus();
+
+               free_pages((unsigned long)sev_es_tmr,
+                          get_order(SEV_ES_TMR_SIZE));
+               sev_es_tmr = NULL;
+       }
+}
+
 void sev_dev_destroy(struct psp_device *psp)
 {
        struct sev_device *sev = psp->sev_data;
@@ -1026,6 +1043,8 @@ void sev_dev_destroy(struct psp_device *psp)
        if (!sev)
                return;
 
+       sev_firmware_shutdown(sev);
+
        if (sev->misc)
                kref_put(&misc_dev->refcount, sev_exit);
 
@@ -1056,21 +1075,6 @@ void sev_pci_init(void)
        if (sev_get_api_version())
                goto err;
 
-       /*
-        * If platform is not in UNINIT state then firmware upgrade and/or
-        * platform INIT command will fail. These command require UNINIT state.
-        *
-        * In a normal boot we should never run into case where the firmware
-        * is not in UNINIT state on boot. But in case of kexec boot, a reboot
-        * may not go through a typical shutdown sequence and may leave the
-        * firmware in INIT or WORKING state.
-        */
-
-       if (sev->state != SEV_STATE_UNINIT) {
-               sev_platform_shutdown(NULL);
-               sev->state = SEV_STATE_UNINIT;
-       }
-
        if (sev_version_greater_or_equal(0, 15) &&
            sev_update_firmware(sev->dev) == 0)
                sev_get_api_version();
@@ -1115,17 +1119,10 @@ err:
 
 void sev_pci_exit(void)
 {
-       if (!psp_master->sev_data)
-               return;
-
-       sev_platform_shutdown(NULL);
+       struct sev_device *sev = psp_master->sev_data;
 
-       if (sev_es_tmr) {
-               /* The TMR area was encrypted, flush it from the cache */
-               wbinvd_on_all_cpus();
+       if (!sev)
+               return;
 
-               free_pages((unsigned long)sev_es_tmr,
-                          get_order(SEV_ES_TMR_SIZE));
-               sev_es_tmr = NULL;
-       }
+       sev_firmware_shutdown(sev);
 }
index 6fb6ba35f89d420e817b4ba59b8af393c1ece1fa..88c672ad27e445776184b4e88c8ef8517f3a4a6b 100644 (file)
@@ -241,6 +241,17 @@ e_err:
        return ret;
 }
 
+static void sp_pci_shutdown(struct pci_dev *pdev)
+{
+       struct device *dev = &pdev->dev;
+       struct sp_device *sp = dev_get_drvdata(dev);
+
+       if (!sp)
+               return;
+
+       sp_destroy(sp);
+}
+
 static void sp_pci_remove(struct pci_dev *pdev)
 {
        struct device *dev = &pdev->dev;
@@ -349,6 +360,12 @@ static const struct sp_dev_vdata dev_vdata[] = {
 #endif
 #ifdef CONFIG_CRYPTO_DEV_SP_PSP
                .psp_vdata = &pspv3,
+#endif
+       },
+       {       /* 5 */
+               .bar = 2,
+#ifdef CONFIG_CRYPTO_DEV_SP_PSP
+               .psp_vdata = &pspv2,
 #endif
        },
 };
@@ -359,6 +376,7 @@ static const struct pci_device_id sp_pci_table[] = {
        { PCI_VDEVICE(AMD, 0x1486), (kernel_ulong_t)&dev_vdata[3] },
        { PCI_VDEVICE(AMD, 0x15DF), (kernel_ulong_t)&dev_vdata[4] },
        { PCI_VDEVICE(AMD, 0x1649), (kernel_ulong_t)&dev_vdata[4] },
+       { PCI_VDEVICE(AMD, 0x14CA), (kernel_ulong_t)&dev_vdata[5] },
        /* Last entry must be zero */
        { 0, }
 };
@@ -371,6 +389,7 @@ static struct pci_driver sp_pci_driver = {
        .id_table = sp_pci_table,
        .probe = sp_pci_probe,
        .remove = sp_pci_remove,
+       .shutdown = sp_pci_shutdown,
        .driver.pm = &sp_pci_pm_ops,
 };
 
index 8b0640fb04be674812e77d6be491083d73501b4e..65a641396c07fe88043c707a49066f1584084842 100644 (file)
@@ -9,6 +9,7 @@
 #include <linux/kernel.h>
 #include <linux/module.h>
 #include <linux/pci.h>
+#include <linux/pm_runtime.h>
 #include <linux/topology.h>
 #include <linux/uacce.h>
 #include "hpre.h"
 #define HPRE_PREFETCH_DISABLE          BIT(30)
 #define HPRE_SVA_DISABLE_READY         (BIT(4) | BIT(8))
 
+/* clock gate */
+#define HPRE_CLKGATE_CTL               0x301a10
+#define HPRE_PEH_CFG_AUTO_GATE         0x301a2c
+#define HPRE_CLUSTER_DYN_CTL           0x302010
+#define HPRE_CORE_SHB_CFG              0x302088
+#define HPRE_CLKGATE_CTL_EN            BIT(0)
+#define HPRE_PEH_CFG_AUTO_GATE_EN      BIT(0)
+#define HPRE_CLUSTER_DYN_CTL_EN                BIT(0)
+#define HPRE_CORE_GATE_EN              (BIT(30) | BIT(31))
+
 #define HPRE_AM_OOO_SHUTDOWN_ENB       0x301044
 #define HPRE_AM_OOO_SHUTDOWN_ENABLE    BIT(0)
 #define HPRE_WR_MSI_PORT               BIT(2)
@@ -417,12 +428,63 @@ static void hpre_close_sva_prefetch(struct hisi_qm *qm)
                pci_err(qm->pdev, "failed to close sva prefetch\n");
 }
 
+static void hpre_enable_clock_gate(struct hisi_qm *qm)
+{
+       u32 val;
+
+       if (qm->ver < QM_HW_V3)
+               return;
+
+       val = readl(qm->io_base + HPRE_CLKGATE_CTL);
+       val |= HPRE_CLKGATE_CTL_EN;
+       writel(val, qm->io_base + HPRE_CLKGATE_CTL);
+
+       val = readl(qm->io_base + HPRE_PEH_CFG_AUTO_GATE);
+       val |= HPRE_PEH_CFG_AUTO_GATE_EN;
+       writel(val, qm->io_base + HPRE_PEH_CFG_AUTO_GATE);
+
+       val = readl(qm->io_base + HPRE_CLUSTER_DYN_CTL);
+       val |= HPRE_CLUSTER_DYN_CTL_EN;
+       writel(val, qm->io_base + HPRE_CLUSTER_DYN_CTL);
+
+       val = readl_relaxed(qm->io_base + HPRE_CORE_SHB_CFG);
+       val |= HPRE_CORE_GATE_EN;
+       writel(val, qm->io_base + HPRE_CORE_SHB_CFG);
+}
+
+static void hpre_disable_clock_gate(struct hisi_qm *qm)
+{
+       u32 val;
+
+       if (qm->ver < QM_HW_V3)
+               return;
+
+       val = readl(qm->io_base + HPRE_CLKGATE_CTL);
+       val &= ~HPRE_CLKGATE_CTL_EN;
+       writel(val, qm->io_base + HPRE_CLKGATE_CTL);
+
+       val = readl(qm->io_base + HPRE_PEH_CFG_AUTO_GATE);
+       val &= ~HPRE_PEH_CFG_AUTO_GATE_EN;
+       writel(val, qm->io_base + HPRE_PEH_CFG_AUTO_GATE);
+
+       val = readl(qm->io_base + HPRE_CLUSTER_DYN_CTL);
+       val &= ~HPRE_CLUSTER_DYN_CTL_EN;
+       writel(val, qm->io_base + HPRE_CLUSTER_DYN_CTL);
+
+       val = readl_relaxed(qm->io_base + HPRE_CORE_SHB_CFG);
+       val &= ~HPRE_CORE_GATE_EN;
+       writel(val, qm->io_base + HPRE_CORE_SHB_CFG);
+}
+
 static int hpre_set_user_domain_and_cache(struct hisi_qm *qm)
 {
        struct device *dev = &qm->pdev->dev;
        u32 val;
        int ret;
 
+       /* disabel dynamic clock gate before sram init */
+       hpre_disable_clock_gate(qm);
+
        writel(HPRE_QM_USR_CFG_MASK, qm->io_base + QM_ARUSER_M_CFG_ENABLE);
        writel(HPRE_QM_USR_CFG_MASK, qm->io_base + QM_AWUSER_M_CFG_ENABLE);
        writel_relaxed(HPRE_QM_AXI_CFG_MASK, qm->io_base + QM_AXI_M_CFG);
@@ -473,6 +535,8 @@ static int hpre_set_user_domain_and_cache(struct hisi_qm *qm)
        /* Config data buffer pasid needed by Kunpeng 920 */
        hpre_config_pasid(qm);
 
+       hpre_enable_clock_gate(qm);
+
        return ret;
 }
 
@@ -595,10 +659,15 @@ static ssize_t hpre_ctrl_debug_read(struct file *filp, char __user *buf,
                                    size_t count, loff_t *pos)
 {
        struct hpre_debugfs_file *file = filp->private_data;
+       struct hisi_qm *qm = hpre_file_to_qm(file);
        char tbuf[HPRE_DBGFS_VAL_MAX_LEN];
        u32 val;
        int ret;
 
+       ret = hisi_qm_get_dfx_access(qm);
+       if (ret)
+               return ret;
+
        spin_lock_irq(&file->lock);
        switch (file->type) {
        case HPRE_CLEAR_ENABLE:
@@ -608,18 +677,25 @@ static ssize_t hpre_ctrl_debug_read(struct file *filp, char __user *buf,
                val = hpre_cluster_inqry_read(file);
                break;
        default:
-               spin_unlock_irq(&file->lock);
-               return -EINVAL;
+               goto err_input;
        }
        spin_unlock_irq(&file->lock);
+
+       hisi_qm_put_dfx_access(qm);
        ret = snprintf(tbuf, HPRE_DBGFS_VAL_MAX_LEN, "%u\n", val);
        return simple_read_from_buffer(buf, count, pos, tbuf, ret);
+
+err_input:
+       spin_unlock_irq(&file->lock);
+       hisi_qm_put_dfx_access(qm);
+       return -EINVAL;
 }
 
 static ssize_t hpre_ctrl_debug_write(struct file *filp, const char __user *buf,
                                     size_t count, loff_t *pos)
 {
        struct hpre_debugfs_file *file = filp->private_data;
+       struct hisi_qm *qm = hpre_file_to_qm(file);
        char tbuf[HPRE_DBGFS_VAL_MAX_LEN];
        unsigned long val;
        int len, ret;
@@ -639,6 +715,10 @@ static ssize_t hpre_ctrl_debug_write(struct file *filp, const char __user *buf,
        if (kstrtoul(tbuf, 0, &val))
                return -EFAULT;
 
+       ret = hisi_qm_get_dfx_access(qm);
+       if (ret)
+               return ret;
+
        spin_lock_irq(&file->lock);
        switch (file->type) {
        case HPRE_CLEAR_ENABLE:
@@ -655,12 +735,12 @@ static ssize_t hpre_ctrl_debug_write(struct file *filp, const char __user *buf,
                ret = -EINVAL;
                goto err_input;
        }
-       spin_unlock_irq(&file->lock);
 
-       return count;
+       ret = count;
 
 err_input:
        spin_unlock_irq(&file->lock);
+       hisi_qm_put_dfx_access(qm);
        return ret;
 }
 
@@ -700,6 +780,24 @@ static int hpre_debugfs_atomic64_set(void *data, u64 val)
 DEFINE_DEBUGFS_ATTRIBUTE(hpre_atomic64_ops, hpre_debugfs_atomic64_get,
                         hpre_debugfs_atomic64_set, "%llu\n");
 
+static int hpre_com_regs_show(struct seq_file *s, void *unused)
+{
+       hisi_qm_regs_dump(s, s->private);
+
+       return 0;
+}
+
+DEFINE_SHOW_ATTRIBUTE(hpre_com_regs);
+
+static int hpre_cluster_regs_show(struct seq_file *s, void *unused)
+{
+       hisi_qm_regs_dump(s, s->private);
+
+       return 0;
+}
+
+DEFINE_SHOW_ATTRIBUTE(hpre_cluster_regs);
+
 static int hpre_create_debugfs_file(struct hisi_qm *qm, struct dentry *dir,
                                    enum hpre_ctrl_dbgfs_file type, int indx)
 {
@@ -737,8 +835,11 @@ static int hpre_pf_comm_regs_debugfs_init(struct hisi_qm *qm)
        regset->regs = hpre_com_dfx_regs;
        regset->nregs = ARRAY_SIZE(hpre_com_dfx_regs);
        regset->base = qm->io_base;
+       regset->dev = dev;
+
+       debugfs_create_file("regs", 0444, qm->debug.debug_root,
+                           regset, &hpre_com_regs_fops);
 
-       debugfs_create_regset32("regs", 0444,  qm->debug.debug_root, regset);
        return 0;
 }
 
@@ -764,8 +865,10 @@ static int hpre_cluster_debugfs_init(struct hisi_qm *qm)
                regset->regs = hpre_cluster_dfx_regs;
                regset->nregs = ARRAY_SIZE(hpre_cluster_dfx_regs);
                regset->base = qm->io_base + hpre_cluster_offsets[i];
+               regset->dev = dev;
 
-               debugfs_create_regset32("regs", 0444, tmp_d, regset);
+               debugfs_create_file("regs", 0444, tmp_d, regset,
+                                   &hpre_cluster_regs_fops);
                ret = hpre_create_debugfs_file(qm, tmp_d, HPRE_CLUSTER_CTRL,
                                               i + HPRE_CLUSTER_CTRL);
                if (ret)
@@ -1017,6 +1120,8 @@ static int hpre_probe(struct pci_dev *pdev, const struct pci_device_id *id)
                        goto err_with_alg_register;
        }
 
+       hisi_qm_pm_init(qm);
+
        return 0;
 
 err_with_alg_register:
@@ -1040,6 +1145,7 @@ static void hpre_remove(struct pci_dev *pdev)
        struct hisi_qm *qm = pci_get_drvdata(pdev);
        int ret;
 
+       hisi_qm_pm_uninit(qm);
        hisi_qm_wait_task_finish(qm, &hpre_devices);
        hisi_qm_alg_unregister(qm, &hpre_devices);
        if (qm->fun_type == QM_HW_PF && qm->vfs_num) {
@@ -1062,6 +1168,10 @@ static void hpre_remove(struct pci_dev *pdev)
        hisi_qm_uninit(qm);
 }
 
+static const struct dev_pm_ops hpre_pm_ops = {
+       SET_RUNTIME_PM_OPS(hisi_qm_suspend, hisi_qm_resume, NULL)
+};
+
 static const struct pci_error_handlers hpre_err_handler = {
        .error_detected         = hisi_qm_dev_err_detected,
        .slot_reset             = hisi_qm_dev_slot_reset,
@@ -1078,6 +1188,7 @@ static struct pci_driver hpre_pci_driver = {
                                  hisi_qm_sriov_configure : NULL,
        .err_handler            = &hpre_err_handler,
        .shutdown               = hisi_qm_dev_shutdown,
+       .driver.pm              = &hpre_pm_ops,
 };
 
 static void hpre_register_debugfs(void)
index 1d67f94a1d568ffbc93ace15d6f7076200f7525b..369562d34d66a7ffb7ecbcd0382cd64475a1a1fa 100644 (file)
@@ -4,12 +4,12 @@
 #include <linux/acpi.h>
 #include <linux/aer.h>
 #include <linux/bitmap.h>
-#include <linux/debugfs.h>
 #include <linux/dma-mapping.h>
 #include <linux/idr.h>
 #include <linux/io.h>
 #include <linux/irqreturn.h>
 #include <linux/log2.h>
+#include <linux/pm_runtime.h>
 #include <linux/seq_file.h>
 #include <linux/slab.h>
 #include <linux/uacce.h>
 #define QM_QOS_MAX_CIR_S               11
 #define QM_QOS_VAL_MAX_LEN             32
 
+#define QM_AUTOSUSPEND_DELAY           3000
+
 #define QM_MK_CQC_DW3_V1(hop_num, pg_sz, buf_sz, cqe_sz) \
        (((hop_num) << QM_CQ_HOP_NUM_SHIFT)     | \
        ((pg_sz) << QM_CQ_PAGE_SIZE_SHIFT)      | \
@@ -734,6 +736,34 @@ static u32 qm_get_irq_num_v3(struct hisi_qm *qm)
        return QM_IRQ_NUM_VF_V3;
 }
 
+static int qm_pm_get_sync(struct hisi_qm *qm)
+{
+       struct device *dev = &qm->pdev->dev;
+       int ret;
+
+       if (qm->fun_type == QM_HW_VF || qm->ver < QM_HW_V3)
+               return 0;
+
+       ret = pm_runtime_resume_and_get(dev);
+       if (ret < 0) {
+               dev_err(dev, "failed to get_sync(%d).\n", ret);
+               return ret;
+       }
+
+       return 0;
+}
+
+static void qm_pm_put_sync(struct hisi_qm *qm)
+{
+       struct device *dev = &qm->pdev->dev;
+
+       if (qm->fun_type == QM_HW_VF || qm->ver < QM_HW_V3)
+               return;
+
+       pm_runtime_mark_last_busy(dev);
+       pm_runtime_put_autosuspend(dev);
+}
+
 static struct hisi_qp *qm_to_hisi_qp(struct hisi_qm *qm, struct qm_eqe *eqe)
 {
        u16 cqn = le32_to_cpu(eqe->dw0) & QM_EQE_CQN_MASK;
@@ -1173,16 +1203,13 @@ static struct hisi_qm *file_to_qm(struct debugfs_file *file)
        return container_of(debug, struct hisi_qm, debug);
 }
 
-static u32 current_q_read(struct debugfs_file *file)
+static u32 current_q_read(struct hisi_qm *qm)
 {
-       struct hisi_qm *qm = file_to_qm(file);
-
        return readl(qm->io_base + QM_DFX_SQE_CNT_VF_SQN) >> QM_DFX_QN_SHIFT;
 }
 
-static int current_q_write(struct debugfs_file *file, u32 val)
+static int current_q_write(struct hisi_qm *qm, u32 val)
 {
-       struct hisi_qm *qm = file_to_qm(file);
        u32 tmp;
 
        if (val >= qm->debug.curr_qm_qp_num)
@@ -1199,18 +1226,14 @@ static int current_q_write(struct debugfs_file *file, u32 val)
        return 0;
 }
 
-static u32 clear_enable_read(struct debugfs_file *file)
+static u32 clear_enable_read(struct hisi_qm *qm)
 {
-       struct hisi_qm *qm = file_to_qm(file);
-
        return readl(qm->io_base + QM_DFX_CNT_CLR_CE);
 }
 
 /* rd_clr_ctrl 1 enable read clear, otherwise 0 disable it */
-static int clear_enable_write(struct debugfs_file *file, u32 rd_clr_ctrl)
+static int clear_enable_write(struct hisi_qm *qm, u32 rd_clr_ctrl)
 {
-       struct hisi_qm *qm = file_to_qm(file);
-
        if (rd_clr_ctrl > 1)
                return -EINVAL;
 
@@ -1219,16 +1242,13 @@ static int clear_enable_write(struct debugfs_file *file, u32 rd_clr_ctrl)
        return 0;
 }
 
-static u32 current_qm_read(struct debugfs_file *file)
+static u32 current_qm_read(struct hisi_qm *qm)
 {
-       struct hisi_qm *qm = file_to_qm(file);
-
        return readl(qm->io_base + QM_DFX_MB_CNT_VF);
 }
 
-static int current_qm_write(struct debugfs_file *file, u32 val)
+static int current_qm_write(struct hisi_qm *qm, u32 val)
 {
-       struct hisi_qm *qm = file_to_qm(file);
        u32 tmp;
 
        if (val > qm->vfs_num)
@@ -1259,29 +1279,39 @@ static ssize_t qm_debug_read(struct file *filp, char __user *buf,
 {
        struct debugfs_file *file = filp->private_data;
        enum qm_debug_file index = file->index;
+       struct hisi_qm *qm = file_to_qm(file);
        char tbuf[QM_DBG_TMP_BUF_LEN];
        u32 val;
        int ret;
 
+       ret = hisi_qm_get_dfx_access(qm);
+       if (ret)
+               return ret;
+
        mutex_lock(&file->lock);
        switch (index) {
        case CURRENT_QM:
-               val = current_qm_read(file);
+               val = current_qm_read(qm);
                break;
        case CURRENT_Q:
-               val = current_q_read(file);
+               val = current_q_read(qm);
                break;
        case CLEAR_ENABLE:
-               val = clear_enable_read(file);
+               val = clear_enable_read(qm);
                break;
        default:
-               mutex_unlock(&file->lock);
-               return -EINVAL;
+               goto err_input;
        }
        mutex_unlock(&file->lock);
 
+       hisi_qm_put_dfx_access(qm);
        ret = scnprintf(tbuf, QM_DBG_TMP_BUF_LEN, "%u\n", val);
        return simple_read_from_buffer(buf, count, pos, tbuf, ret);
+
+err_input:
+       mutex_unlock(&file->lock);
+       hisi_qm_put_dfx_access(qm);
+       return -EINVAL;
 }
 
 static ssize_t qm_debug_write(struct file *filp, const char __user *buf,
@@ -1289,6 +1319,7 @@ static ssize_t qm_debug_write(struct file *filp, const char __user *buf,
 {
        struct debugfs_file *file = filp->private_data;
        enum qm_debug_file index = file->index;
+       struct hisi_qm *qm = file_to_qm(file);
        unsigned long val;
        char tbuf[QM_DBG_TMP_BUF_LEN];
        int len, ret;
@@ -1308,22 +1339,28 @@ static ssize_t qm_debug_write(struct file *filp, const char __user *buf,
        if (kstrtoul(tbuf, 0, &val))
                return -EFAULT;
 
+       ret = hisi_qm_get_dfx_access(qm);
+       if (ret)
+               return ret;
+
        mutex_lock(&file->lock);
        switch (index) {
        case CURRENT_QM:
-               ret = current_qm_write(file, val);
+               ret = current_qm_write(qm, val);
                break;
        case CURRENT_Q:
-               ret = current_q_write(file, val);
+               ret = current_q_write(qm, val);
                break;
        case CLEAR_ENABLE:
-               ret = clear_enable_write(file, val);
+               ret = clear_enable_write(qm, val);
                break;
        default:
                ret = -EINVAL;
        }
        mutex_unlock(&file->lock);
 
+       hisi_qm_put_dfx_access(qm);
+
        if (ret)
                return ret;
 
@@ -1337,13 +1374,8 @@ static const struct file_operations qm_debug_fops = {
        .write = qm_debug_write,
 };
 
-struct qm_dfx_registers {
-       char  *reg_name;
-       u64   reg_offset;
-};
-
 #define CNT_CYC_REGS_NUM               10
-static struct qm_dfx_registers qm_dfx_regs[] = {
+static const struct debugfs_reg32 qm_dfx_regs[] = {
        /* XXX_CNT are reading clear register */
        {"QM_ECC_1BIT_CNT               ",  0x104000ull},
        {"QM_ECC_MBIT_CNT               ",  0x104008ull},
@@ -1369,31 +1401,59 @@ static struct qm_dfx_registers qm_dfx_regs[] = {
        {"QM_DFX_FF_ST5                 ",  0x1040dcull},
        {"QM_DFX_FF_ST6                 ",  0x1040e0ull},
        {"QM_IN_IDLE_ST                 ",  0x1040e4ull},
-       { NULL, 0}
 };
 
-static struct qm_dfx_registers qm_vf_dfx_regs[] = {
+static const struct debugfs_reg32 qm_vf_dfx_regs[] = {
        {"QM_DFX_FUNS_ACTIVE_ST         ",  0x200ull},
-       { NULL, 0}
 };
 
-static int qm_regs_show(struct seq_file *s, void *unused)
+/**
+ * hisi_qm_regs_dump() - Dump registers's value.
+ * @s: debugfs file handle.
+ * @regset: accelerator registers information.
+ *
+ * Dump accelerator registers.
+ */
+void hisi_qm_regs_dump(struct seq_file *s, struct debugfs_regset32 *regset)
 {
-       struct hisi_qm *qm = s->private;
-       struct qm_dfx_registers *regs;
+       struct pci_dev *pdev = to_pci_dev(regset->dev);
+       struct hisi_qm *qm = pci_get_drvdata(pdev);
+       const struct debugfs_reg32 *regs = regset->regs;
+       int regs_len = regset->nregs;
+       int i, ret;
        u32 val;
 
-       if (qm->fun_type == QM_HW_PF)
-               regs = qm_dfx_regs;
-       else
-               regs = qm_vf_dfx_regs;
+       ret = hisi_qm_get_dfx_access(qm);
+       if (ret)
+               return;
 
-       while (regs->reg_name) {
-               val = readl(qm->io_base + regs->reg_offset);
-               seq_printf(s, "%s= 0x%08x\n", regs->reg_name, val);
-               regs++;
+       for (i = 0; i < regs_len; i++) {
+               val = readl(regset->base + regs[i].offset);
+               seq_printf(s, "%s= 0x%08x\n", regs[i].name, val);
        }
 
+       hisi_qm_put_dfx_access(qm);
+}
+EXPORT_SYMBOL_GPL(hisi_qm_regs_dump);
+
+static int qm_regs_show(struct seq_file *s, void *unused)
+{
+       struct hisi_qm *qm = s->private;
+       struct debugfs_regset32 regset;
+
+       if (qm->fun_type == QM_HW_PF) {
+               regset.regs = qm_dfx_regs;
+               regset.nregs = ARRAY_SIZE(qm_dfx_regs);
+       } else {
+               regset.regs = qm_vf_dfx_regs;
+               regset.nregs = ARRAY_SIZE(qm_vf_dfx_regs);
+       }
+
+       regset.base = qm->io_base;
+       regset.dev = &qm->pdev->dev;
+
+       hisi_qm_regs_dump(s, &regset);
+
        return 0;
 }
 
@@ -1823,16 +1883,24 @@ static ssize_t qm_cmd_write(struct file *filp, const char __user *buffer,
        if (*pos)
                return 0;
 
+       ret = hisi_qm_get_dfx_access(qm);
+       if (ret)
+               return ret;
+
        /* Judge if the instance is being reset. */
        if (unlikely(atomic_read(&qm->status.flags) == QM_STOP))
                return 0;
 
-       if (count > QM_DBG_WRITE_LEN)
-               return -ENOSPC;
+       if (count > QM_DBG_WRITE_LEN) {
+               ret = -ENOSPC;
+               goto put_dfx_access;
+       }
 
        cmd_buf = memdup_user_nul(buffer, count);
-       if (IS_ERR(cmd_buf))
-               return PTR_ERR(cmd_buf);
+       if (IS_ERR(cmd_buf)) {
+               ret = PTR_ERR(cmd_buf);
+               goto put_dfx_access;
+       }
 
        cmd_buf_tmp = strchr(cmd_buf, '\n');
        if (cmd_buf_tmp) {
@@ -1843,12 +1911,16 @@ static ssize_t qm_cmd_write(struct file *filp, const char __user *buffer,
        ret = qm_cmd_write_dump(qm, cmd_buf);
        if (ret) {
                kfree(cmd_buf);
-               return ret;
+               goto put_dfx_access;
        }
 
        kfree(cmd_buf);
 
-       return count;
+       ret = count;
+
+put_dfx_access:
+       hisi_qm_put_dfx_access(qm);
+       return ret;
 }
 
 static const struct file_operations qm_cmd_fops = {
@@ -2445,11 +2517,19 @@ static struct hisi_qp *qm_create_qp_nolock(struct hisi_qm *qm, u8 alg_type)
 struct hisi_qp *hisi_qm_create_qp(struct hisi_qm *qm, u8 alg_type)
 {
        struct hisi_qp *qp;
+       int ret;
+
+       ret = qm_pm_get_sync(qm);
+       if (ret)
+               return ERR_PTR(ret);
 
        down_write(&qm->qps_lock);
        qp = qm_create_qp_nolock(qm, alg_type);
        up_write(&qm->qps_lock);
 
+       if (IS_ERR(qp))
+               qm_pm_put_sync(qm);
+
        return qp;
 }
 EXPORT_SYMBOL_GPL(hisi_qm_create_qp);
@@ -2475,6 +2555,8 @@ void hisi_qm_release_qp(struct hisi_qp *qp)
        idr_remove(&qm->qp_idr, qp->qp_id);
 
        up_write(&qm->qps_lock);
+
+       qm_pm_put_sync(qm);
 }
 EXPORT_SYMBOL_GPL(hisi_qm_release_qp);
 
@@ -3200,6 +3282,10 @@ static void hisi_qm_pre_init(struct hisi_qm *qm)
        init_rwsem(&qm->qps_lock);
        qm->qp_in_used = 0;
        qm->misc_ctl = false;
+       if (qm->fun_type == QM_HW_PF && qm->ver > QM_HW_V2) {
+               if (!acpi_device_power_manageable(ACPI_COMPANION(&pdev->dev)))
+                       dev_info(&pdev->dev, "_PS0 and _PR0 are not defined");
+       }
 }
 
 static void qm_cmd_uninit(struct hisi_qm *qm)
@@ -4057,10 +4143,15 @@ static ssize_t qm_algqos_read(struct file *filp, char __user *buf,
        u32 qos_val, ir;
        int ret;
 
+       ret = hisi_qm_get_dfx_access(qm);
+       if (ret)
+               return ret;
+
        /* Mailbox and reset cannot be operated at the same time */
        if (test_and_set_bit(QM_RESETTING, &qm->misc_ctl)) {
                pci_err(qm->pdev, "dev resetting, read alg qos failed!\n");
-               return  -EAGAIN;
+               ret = -EAGAIN;
+               goto err_put_dfx_access;
        }
 
        if (qm->fun_type == QM_HW_PF) {
@@ -4079,6 +4170,8 @@ static ssize_t qm_algqos_read(struct file *filp, char __user *buf,
 
 err_get_status:
        clear_bit(QM_RESETTING, &qm->misc_ctl);
+err_put_dfx_access:
+       hisi_qm_put_dfx_access(qm);
        return ret;
 }
 
@@ -4159,15 +4252,23 @@ static ssize_t qm_algqos_write(struct file *filp, const char __user *buf,
 
        fun_index = device * 8 + function;
 
+       ret = qm_pm_get_sync(qm);
+       if (ret) {
+               ret = -EINVAL;
+               goto err_get_status;
+       }
+
        ret = qm_func_shaper_enable(qm, fun_index, val);
        if (ret) {
                pci_err(qm->pdev, "failed to enable function shaper!\n");
                ret = -EINVAL;
-               goto err_get_status;
+               goto err_put_sync;
        }
 
-       ret =  count;
+       ret = count;
 
+err_put_sync:
+       qm_pm_put_sync(qm);
 err_get_status:
        clear_bit(QM_RESETTING, &qm->misc_ctl);
        return ret;
@@ -4245,7 +4346,7 @@ EXPORT_SYMBOL_GPL(hisi_qm_debug_init);
  */
 void hisi_qm_debug_regs_clear(struct hisi_qm *qm)
 {
-       struct qm_dfx_registers *regs;
+       const struct debugfs_reg32 *regs;
        int i;
 
        /* clear current_qm */
@@ -4264,7 +4365,7 @@ void hisi_qm_debug_regs_clear(struct hisi_qm *qm)
 
        regs = qm_dfx_regs;
        for (i = 0; i < CNT_CYC_REGS_NUM; i++) {
-               readl(qm->io_base + regs->reg_offset);
+               readl(qm->io_base + regs->offset);
                regs++;
        }
 
@@ -4287,19 +4388,23 @@ int hisi_qm_sriov_enable(struct pci_dev *pdev, int max_vfs)
        struct hisi_qm *qm = pci_get_drvdata(pdev);
        int pre_existing_vfs, num_vfs, total_vfs, ret;
 
+       ret = qm_pm_get_sync(qm);
+       if (ret)
+               return ret;
+
        total_vfs = pci_sriov_get_totalvfs(pdev);
        pre_existing_vfs = pci_num_vf(pdev);
        if (pre_existing_vfs) {
                pci_err(pdev, "%d VFs already enabled. Please disable pre-enabled VFs!\n",
                        pre_existing_vfs);
-               return 0;
+               goto err_put_sync;
        }
 
        num_vfs = min_t(int, max_vfs, total_vfs);
        ret = qm_vf_q_assign(qm, num_vfs);
        if (ret) {
                pci_err(pdev, "Can't assign queues for VF!\n");
-               return ret;
+               goto err_put_sync;
        }
 
        qm->vfs_num = num_vfs;
@@ -4308,12 +4413,16 @@ int hisi_qm_sriov_enable(struct pci_dev *pdev, int max_vfs)
        if (ret) {
                pci_err(pdev, "Can't enable VF!\n");
                qm_clear_vft_config(qm);
-               return ret;
+               goto err_put_sync;
        }
 
        pci_info(pdev, "VF enabled, vfs_num(=%d)!\n", num_vfs);
 
        return num_vfs;
+
+err_put_sync:
+       qm_pm_put_sync(qm);
+       return ret;
 }
 EXPORT_SYMBOL_GPL(hisi_qm_sriov_enable);
 
@@ -4328,6 +4437,7 @@ int hisi_qm_sriov_disable(struct pci_dev *pdev, bool is_frozen)
 {
        struct hisi_qm *qm = pci_get_drvdata(pdev);
        int total_vfs = pci_sriov_get_totalvfs(qm->pdev);
+       int ret;
 
        if (pci_vfs_assigned(pdev)) {
                pci_err(pdev, "Failed to disable VFs as VFs are assigned!\n");
@@ -4343,8 +4453,13 @@ int hisi_qm_sriov_disable(struct pci_dev *pdev, bool is_frozen)
        pci_disable_sriov(pdev);
        /* clear vf function shaper configure array */
        memset(qm->factor + 1, 0, sizeof(struct qm_shaper_factor) * total_vfs);
+       ret = qm_clear_vft_config(qm);
+       if (ret)
+               return ret;
 
-       return qm_clear_vft_config(qm);
+       qm_pm_put_sync(qm);
+
+       return 0;
 }
 EXPORT_SYMBOL_GPL(hisi_qm_sriov_disable);
 
@@ -5164,11 +5279,18 @@ static void hisi_qm_controller_reset(struct work_struct *rst_work)
        struct hisi_qm *qm = container_of(rst_work, struct hisi_qm, rst_work);
        int ret;
 
+       ret = qm_pm_get_sync(qm);
+       if (ret) {
+               clear_bit(QM_RST_SCHED, &qm->misc_ctl);
+               return;
+       }
+
        /* reset pcie device controller */
        ret = qm_controller_reset(qm);
        if (ret)
                dev_err(&qm->pdev->dev, "controller reset failed (%d)\n", ret);
 
+       qm_pm_put_sync(qm);
 }
 
 static void qm_pf_reset_vf_prepare(struct hisi_qm *qm,
@@ -5680,6 +5802,194 @@ err_pci_init:
 }
 EXPORT_SYMBOL_GPL(hisi_qm_init);
 
+/**
+ * hisi_qm_get_dfx_access() - Try to get dfx access.
+ * @qm: pointer to accelerator device.
+ *
+ * Try to get dfx access, then user can get message.
+ *
+ * If device is in suspended, return failure, otherwise
+ * bump up the runtime PM usage counter.
+ */
+int hisi_qm_get_dfx_access(struct hisi_qm *qm)
+{
+       struct device *dev = &qm->pdev->dev;
+
+       if (pm_runtime_suspended(dev)) {
+               dev_info(dev, "can not read/write - device in suspended.\n");
+               return -EAGAIN;
+       }
+
+       return qm_pm_get_sync(qm);
+}
+EXPORT_SYMBOL_GPL(hisi_qm_get_dfx_access);
+
+/**
+ * hisi_qm_put_dfx_access() - Put dfx access.
+ * @qm: pointer to accelerator device.
+ *
+ * Put dfx access, drop runtime PM usage counter.
+ */
+void hisi_qm_put_dfx_access(struct hisi_qm *qm)
+{
+       qm_pm_put_sync(qm);
+}
+EXPORT_SYMBOL_GPL(hisi_qm_put_dfx_access);
+
+/**
+ * hisi_qm_pm_init() - Initialize qm runtime PM.
+ * @qm: pointer to accelerator device.
+ *
+ * Function that initialize qm runtime PM.
+ */
+void hisi_qm_pm_init(struct hisi_qm *qm)
+{
+       struct device *dev = &qm->pdev->dev;
+
+       if (qm->fun_type == QM_HW_VF || qm->ver < QM_HW_V3)
+               return;
+
+       pm_runtime_set_autosuspend_delay(dev, QM_AUTOSUSPEND_DELAY);
+       pm_runtime_use_autosuspend(dev);
+       pm_runtime_put_noidle(dev);
+}
+EXPORT_SYMBOL_GPL(hisi_qm_pm_init);
+
+/**
+ * hisi_qm_pm_uninit() - Uninitialize qm runtime PM.
+ * @qm: pointer to accelerator device.
+ *
+ * Function that uninitialize qm runtime PM.
+ */
+void hisi_qm_pm_uninit(struct hisi_qm *qm)
+{
+       struct device *dev = &qm->pdev->dev;
+
+       if (qm->fun_type == QM_HW_VF || qm->ver < QM_HW_V3)
+               return;
+
+       pm_runtime_get_noresume(dev);
+       pm_runtime_dont_use_autosuspend(dev);
+}
+EXPORT_SYMBOL_GPL(hisi_qm_pm_uninit);
+
+static int qm_prepare_for_suspend(struct hisi_qm *qm)
+{
+       struct pci_dev *pdev = qm->pdev;
+       int ret;
+       u32 val;
+
+       ret = qm->ops->set_msi(qm, false);
+       if (ret) {
+               pci_err(pdev, "failed to disable MSI before suspending!\n");
+               return ret;
+       }
+
+       /* shutdown OOO register */
+       writel(ACC_MASTER_GLOBAL_CTRL_SHUTDOWN,
+              qm->io_base + ACC_MASTER_GLOBAL_CTRL);
+
+       ret = readl_relaxed_poll_timeout(qm->io_base + ACC_MASTER_TRANS_RETURN,
+                                        val,
+                                        (val == ACC_MASTER_TRANS_RETURN_RW),
+                                        POLL_PERIOD, POLL_TIMEOUT);
+       if (ret) {
+               pci_emerg(pdev, "Bus lock! Please reset system.\n");
+               return ret;
+       }
+
+       ret = qm_set_pf_mse(qm, false);
+       if (ret)
+               pci_err(pdev, "failed to disable MSE before suspending!\n");
+
+       return ret;
+}
+
+static int qm_rebuild_for_resume(struct hisi_qm *qm)
+{
+       struct pci_dev *pdev = qm->pdev;
+       int ret;
+
+       ret = qm_set_pf_mse(qm, true);
+       if (ret) {
+               pci_err(pdev, "failed to enable MSE after resuming!\n");
+               return ret;
+       }
+
+       ret = qm->ops->set_msi(qm, true);
+       if (ret) {
+               pci_err(pdev, "failed to enable MSI after resuming!\n");
+               return ret;
+       }
+
+       ret = qm_dev_hw_init(qm);
+       if (ret) {
+               pci_err(pdev, "failed to init device after resuming\n");
+               return ret;
+       }
+
+       qm_cmd_init(qm);
+       hisi_qm_dev_err_init(qm);
+
+       return 0;
+}
+
+/**
+ * hisi_qm_suspend() - Runtime suspend of given device.
+ * @dev: device to suspend.
+ *
+ * Function that suspend the device.
+ */
+int hisi_qm_suspend(struct device *dev)
+{
+       struct pci_dev *pdev = to_pci_dev(dev);
+       struct hisi_qm *qm = pci_get_drvdata(pdev);
+       int ret;
+
+       pci_info(pdev, "entering suspended state\n");
+
+       ret = hisi_qm_stop(qm, QM_NORMAL);
+       if (ret) {
+               pci_err(pdev, "failed to stop qm(%d)\n", ret);
+               return ret;
+       }
+
+       ret = qm_prepare_for_suspend(qm);
+       if (ret)
+               pci_err(pdev, "failed to prepare suspended(%d)\n", ret);
+
+       return ret;
+}
+EXPORT_SYMBOL_GPL(hisi_qm_suspend);
+
+/**
+ * hisi_qm_resume() - Runtime resume of given device.
+ * @dev: device to resume.
+ *
+ * Function that resume the device.
+ */
+int hisi_qm_resume(struct device *dev)
+{
+       struct pci_dev *pdev = to_pci_dev(dev);
+       struct hisi_qm *qm = pci_get_drvdata(pdev);
+       int ret;
+
+       pci_info(pdev, "resuming from suspend state\n");
+
+       ret = qm_rebuild_for_resume(qm);
+       if (ret) {
+               pci_err(pdev, "failed to rebuild resume(%d)\n", ret);
+               return ret;
+       }
+
+       ret = hisi_qm_start(qm);
+       if (ret)
+               pci_err(pdev, "failed to start qm(%d)\n", ret);
+
+       return 0;
+}
+EXPORT_SYMBOL_GPL(hisi_qm_resume);
+
 MODULE_LICENSE("GPL v2");
 MODULE_AUTHOR("Zhou Wang <wangzhou1@hisilicon.com>");
 MODULE_DESCRIPTION("HiSilicon Accelerator queue manager driver");
index 035eaf8c442ddfb1307de6756a9fd09a2584a6ab..3068093229a506e749f1c444a441f6b351136a9c 100644 (file)
@@ -4,6 +4,7 @@
 #define HISI_ACC_QM_H
 
 #include <linux/bitfield.h>
+#include <linux/debugfs.h>
 #include <linux/iopoll.h>
 #include <linux/module.h>
 #include <linux/pci.h>
@@ -430,4 +431,11 @@ void hisi_qm_dev_shutdown(struct pci_dev *pdev);
 void hisi_qm_wait_task_finish(struct hisi_qm *qm, struct hisi_qm_list *qm_list);
 int hisi_qm_alg_register(struct hisi_qm *qm, struct hisi_qm_list *qm_list);
 void hisi_qm_alg_unregister(struct hisi_qm *qm, struct hisi_qm_list *qm_list);
+int hisi_qm_resume(struct device *dev);
+int hisi_qm_suspend(struct device *dev);
+void hisi_qm_pm_uninit(struct hisi_qm *qm);
+void hisi_qm_pm_init(struct hisi_qm *qm);
+int hisi_qm_get_dfx_access(struct hisi_qm *qm);
+void hisi_qm_put_dfx_access(struct hisi_qm *qm);
+void hisi_qm_regs_dump(struct seq_file *s, struct debugfs_regset32 *regset);
 #endif
index 018415b9840a978c1f05a987adf8f7b6141b489d..d97cf02b1df7509ebf77d9a888569b17de57ed8e 100644 (file)
@@ -157,11 +157,6 @@ struct sec_ctx {
        struct device *dev;
 };
 
-enum sec_endian {
-       SEC_LE = 0,
-       SEC_32BE,
-       SEC_64BE
-};
 
 enum sec_debug_file_index {
        SEC_CLEAR_ENABLE,
index 490db7bccf6192204b48ed818fbc35991263bd52..90551bf38b523a3e3f67974c93a4a855e73de7d5 100644 (file)
@@ -11,6 +11,7 @@
 #include <linux/kernel.h>
 #include <linux/module.h>
 #include <linux/pci.h>
+#include <linux/pm_runtime.h>
 #include <linux/seq_file.h>
 #include <linux/topology.h>
 #include <linux/uacce.h>
 #define SEC_MEM_START_INIT_REG 0x301100
 #define SEC_MEM_INIT_DONE_REG          0x301104
 
+/* clock gating */
 #define SEC_CONTROL_REG                0x301200
-#define SEC_TRNG_EN_SHIFT              8
+#define SEC_DYNAMIC_GATE_REG           0x30121c
+#define SEC_CORE_AUTO_GATE             0x30212c
+#define SEC_DYNAMIC_GATE_EN            0x7bff
+#define SEC_CORE_AUTO_GATE_EN          GENMASK(3, 0)
 #define SEC_CLK_GATE_ENABLE            BIT(3)
 #define SEC_CLK_GATE_DISABLE           (~BIT(3))
+
+#define SEC_TRNG_EN_SHIFT              8
 #define SEC_AXI_SHUTDOWN_ENABLE        BIT(12)
 #define SEC_AXI_SHUTDOWN_DISABLE       0xFFFFEFFF
 
@@ -312,31 +319,20 @@ static const struct pci_device_id sec_dev_ids[] = {
 };
 MODULE_DEVICE_TABLE(pci, sec_dev_ids);
 
-static u8 sec_get_endian(struct hisi_qm *qm)
+static void sec_set_endian(struct hisi_qm *qm)
 {
        u32 reg;
 
-       /*
-        * As for VF, it is a wrong way to get endian setting by
-        * reading a register of the engine
-        */
-       if (qm->pdev->is_virtfn) {
-               dev_err_ratelimited(&qm->pdev->dev,
-                                   "cannot access a register in VF!\n");
-               return SEC_LE;
-       }
        reg = readl_relaxed(qm->io_base + SEC_CONTROL_REG);
-       /* BD little endian mode */
-       if (!(reg & BIT(0)))
-               return SEC_LE;
+       reg &= ~(BIT(1) | BIT(0));
+       if (!IS_ENABLED(CONFIG_64BIT))
+               reg |= BIT(1);
 
-       /* BD 32-bits big endian mode */
-       else if (!(reg & BIT(1)))
-               return SEC_32BE;
 
-       /* BD 64-bits big endian mode */
-       else
-               return SEC_64BE;
+       if (!IS_ENABLED(CONFIG_CPU_LITTLE_ENDIAN))
+               reg |= BIT(0);
+
+       writel_relaxed(reg, qm->io_base + SEC_CONTROL_REG);
 }
 
 static void sec_open_sva_prefetch(struct hisi_qm *qm)
@@ -378,15 +374,43 @@ static void sec_close_sva_prefetch(struct hisi_qm *qm)
                pci_err(qm->pdev, "failed to close sva prefetch\n");
 }
 
+static void sec_enable_clock_gate(struct hisi_qm *qm)
+{
+       u32 val;
+
+       if (qm->ver < QM_HW_V3)
+               return;
+
+       val = readl_relaxed(qm->io_base + SEC_CONTROL_REG);
+       val |= SEC_CLK_GATE_ENABLE;
+       writel_relaxed(val, qm->io_base + SEC_CONTROL_REG);
+
+       val = readl(qm->io_base + SEC_DYNAMIC_GATE_REG);
+       val |= SEC_DYNAMIC_GATE_EN;
+       writel(val, qm->io_base + SEC_DYNAMIC_GATE_REG);
+
+       val = readl(qm->io_base + SEC_CORE_AUTO_GATE);
+       val |= SEC_CORE_AUTO_GATE_EN;
+       writel(val, qm->io_base + SEC_CORE_AUTO_GATE);
+}
+
+static void sec_disable_clock_gate(struct hisi_qm *qm)
+{
+       u32 val;
+
+       /* Kunpeng920 needs to close clock gating */
+       val = readl_relaxed(qm->io_base + SEC_CONTROL_REG);
+       val &= SEC_CLK_GATE_DISABLE;
+       writel_relaxed(val, qm->io_base + SEC_CONTROL_REG);
+}
+
 static int sec_engine_init(struct hisi_qm *qm)
 {
        int ret;
        u32 reg;
 
-       /* disable clock gate control */
-       reg = readl_relaxed(qm->io_base + SEC_CONTROL_REG);
-       reg &= SEC_CLK_GATE_DISABLE;
-       writel_relaxed(reg, qm->io_base + SEC_CONTROL_REG);
+       /* disable clock gate control before mem init */
+       sec_disable_clock_gate(qm);
 
        writel_relaxed(0x1, qm->io_base + SEC_MEM_START_INIT_REG);
 
@@ -429,9 +453,9 @@ static int sec_engine_init(struct hisi_qm *qm)
                       qm->io_base + SEC_BD_ERR_CHK_EN_REG3);
 
        /* config endian */
-       reg = readl_relaxed(qm->io_base + SEC_CONTROL_REG);
-       reg |= sec_get_endian(qm);
-       writel_relaxed(reg, qm->io_base + SEC_CONTROL_REG);
+       sec_set_endian(qm);
+
+       sec_enable_clock_gate(qm);
 
        return 0;
 }
@@ -533,17 +557,14 @@ static void sec_hw_error_disable(struct hisi_qm *qm)
        writel(SEC_RAS_DISABLE, qm->io_base + SEC_RAS_NFE_REG);
 }
 
-static u32 sec_clear_enable_read(struct sec_debug_file *file)
+static u32 sec_clear_enable_read(struct hisi_qm *qm)
 {
-       struct hisi_qm *qm = file->qm;
-
        return readl(qm->io_base + SEC_CTRL_CNT_CLR_CE) &
                        SEC_CTRL_CNT_CLR_CE_BIT;
 }
 
-static int sec_clear_enable_write(struct sec_debug_file *file, u32 val)
+static int sec_clear_enable_write(struct hisi_qm *qm, u32 val)
 {
-       struct hisi_qm *qm = file->qm;
        u32 tmp;
 
        if (val != 1 && val)
@@ -561,24 +582,34 @@ static ssize_t sec_debug_read(struct file *filp, char __user *buf,
 {
        struct sec_debug_file *file = filp->private_data;
        char tbuf[SEC_DBGFS_VAL_MAX_LEN];
+       struct hisi_qm *qm = file->qm;
        u32 val;
        int ret;
 
+       ret = hisi_qm_get_dfx_access(qm);
+       if (ret)
+               return ret;
+
        spin_lock_irq(&file->lock);
 
        switch (file->index) {
        case SEC_CLEAR_ENABLE:
-               val = sec_clear_enable_read(file);
+               val = sec_clear_enable_read(qm);
                break;
        default:
-               spin_unlock_irq(&file->lock);
-               return -EINVAL;
+               goto err_input;
        }
 
        spin_unlock_irq(&file->lock);
-       ret = snprintf(tbuf, SEC_DBGFS_VAL_MAX_LEN, "%u\n", val);
 
+       hisi_qm_put_dfx_access(qm);
+       ret = snprintf(tbuf, SEC_DBGFS_VAL_MAX_LEN, "%u\n", val);
        return simple_read_from_buffer(buf, count, pos, tbuf, ret);
+
+err_input:
+       spin_unlock_irq(&file->lock);
+       hisi_qm_put_dfx_access(qm);
+       return -EINVAL;
 }
 
 static ssize_t sec_debug_write(struct file *filp, const char __user *buf,
@@ -586,6 +617,7 @@ static ssize_t sec_debug_write(struct file *filp, const char __user *buf,
 {
        struct sec_debug_file *file = filp->private_data;
        char tbuf[SEC_DBGFS_VAL_MAX_LEN];
+       struct hisi_qm *qm = file->qm;
        unsigned long val;
        int len, ret;
 
@@ -604,11 +636,15 @@ static ssize_t sec_debug_write(struct file *filp, const char __user *buf,
        if (kstrtoul(tbuf, 0, &val))
                return -EFAULT;
 
+       ret = hisi_qm_get_dfx_access(qm);
+       if (ret)
+               return ret;
+
        spin_lock_irq(&file->lock);
 
        switch (file->index) {
        case SEC_CLEAR_ENABLE:
-               ret = sec_clear_enable_write(file, val);
+               ret = sec_clear_enable_write(qm, val);
                if (ret)
                        goto err_input;
                break;
@@ -617,12 +653,11 @@ static ssize_t sec_debug_write(struct file *filp, const char __user *buf,
                goto err_input;
        }
 
-       spin_unlock_irq(&file->lock);
-
-       return count;
+       ret = count;
 
  err_input:
        spin_unlock_irq(&file->lock);
+       hisi_qm_put_dfx_access(qm);
        return ret;
 }
 
@@ -653,6 +688,15 @@ static int sec_debugfs_atomic64_set(void *data, u64 val)
 DEFINE_DEBUGFS_ATTRIBUTE(sec_atomic64_ops, sec_debugfs_atomic64_get,
                         sec_debugfs_atomic64_set, "%lld\n");
 
+static int sec_regs_show(struct seq_file *s, void *unused)
+{
+       hisi_qm_regs_dump(s, s->private);
+
+       return 0;
+}
+
+DEFINE_SHOW_ATTRIBUTE(sec_regs);
+
 static int sec_core_debug_init(struct hisi_qm *qm)
 {
        struct sec_dev *sec = container_of(qm, struct sec_dev, qm);
@@ -671,9 +715,10 @@ static int sec_core_debug_init(struct hisi_qm *qm)
        regset->regs = sec_dfx_regs;
        regset->nregs = ARRAY_SIZE(sec_dfx_regs);
        regset->base = qm->io_base;
+       regset->dev = dev;
 
        if (qm->pdev->device == SEC_PF_PCI_DEVICE_ID)
-               debugfs_create_regset32("regs", 0444, tmp_d, regset);
+               debugfs_create_file("regs", 0444, tmp_d, regset, &sec_regs_fops);
 
        for (i = 0; i < ARRAY_SIZE(sec_dfx_labels); i++) {
                atomic64_t *data = (atomic64_t *)((uintptr_t)dfx +
@@ -981,10 +1026,13 @@ static int sec_probe(struct pci_dev *pdev, const struct pci_device_id *id)
                        goto err_alg_unregister;
        }
 
+       hisi_qm_pm_init(qm);
+
        return 0;
 
 err_alg_unregister:
-       hisi_qm_alg_unregister(qm, &sec_devices);
+       if (qm->qp_num >= ctx_q_num)
+               hisi_qm_alg_unregister(qm, &sec_devices);
 err_qm_stop:
        sec_debugfs_exit(qm);
        hisi_qm_stop(qm, QM_NORMAL);
@@ -999,6 +1047,7 @@ static void sec_remove(struct pci_dev *pdev)
 {
        struct hisi_qm *qm = pci_get_drvdata(pdev);
 
+       hisi_qm_pm_uninit(qm);
        hisi_qm_wait_task_finish(qm, &sec_devices);
        if (qm->qp_num >= ctx_q_num)
                hisi_qm_alg_unregister(qm, &sec_devices);
@@ -1018,6 +1067,10 @@ static void sec_remove(struct pci_dev *pdev)
        sec_qm_uninit(qm);
 }
 
+static const struct dev_pm_ops sec_pm_ops = {
+       SET_RUNTIME_PM_OPS(hisi_qm_suspend, hisi_qm_resume, NULL)
+};
+
 static const struct pci_error_handlers sec_err_handler = {
        .error_detected = hisi_qm_dev_err_detected,
        .slot_reset     = hisi_qm_dev_slot_reset,
@@ -1033,6 +1086,7 @@ static struct pci_driver sec_pci_driver = {
        .err_handler = &sec_err_handler,
        .sriov_configure = hisi_qm_sriov_configure,
        .shutdown = hisi_qm_dev_shutdown,
+       .driver.pm = &sec_pm_ops,
 };
 
 static void sec_register_debugfs(void)
index f8482ceebf2ab70646b1201d398f938877bd3413..7148201ce76ed31abe505ec464d000aec96af876 100644 (file)
@@ -9,6 +9,7 @@
 #include <linux/kernel.h>
 #include <linux/module.h>
 #include <linux/pci.h>
+#include <linux/pm_runtime.h>
 #include <linux/seq_file.h>
 #include <linux/topology.h>
 #include <linux/uacce.h>
 #define HZIP_DELAY_1_US                1
 #define HZIP_POLL_TIMEOUT_US   1000
 
+/* clock gating */
+#define HZIP_PEH_CFG_AUTO_GATE         0x3011A8
+#define HZIP_PEH_CFG_AUTO_GATE_EN      BIT(0)
+#define HZIP_CORE_GATED_EN             GENMASK(15, 8)
+#define HZIP_CORE_GATED_OOO_EN         BIT(29)
+#define HZIP_CLOCK_GATED_EN            (HZIP_CORE_GATED_EN | \
+                                        HZIP_CORE_GATED_OOO_EN)
+
 static const char hisi_zip_name[] = "hisi_zip";
 static struct dentry *hzip_debugfs_root;
 
@@ -312,6 +321,22 @@ static void hisi_zip_close_sva_prefetch(struct hisi_qm *qm)
                pci_err(qm->pdev, "failed to close sva prefetch\n");
 }
 
+static void hisi_zip_enable_clock_gate(struct hisi_qm *qm)
+{
+       u32 val;
+
+       if (qm->ver < QM_HW_V3)
+               return;
+
+       val = readl(qm->io_base + HZIP_CLOCK_GATE_CTRL);
+       val |= HZIP_CLOCK_GATED_EN;
+       writel(val, qm->io_base + HZIP_CLOCK_GATE_CTRL);
+
+       val = readl(qm->io_base + HZIP_PEH_CFG_AUTO_GATE);
+       val |= HZIP_PEH_CFG_AUTO_GATE_EN;
+       writel(val, qm->io_base + HZIP_PEH_CFG_AUTO_GATE);
+}
+
 static int hisi_zip_set_user_domain_and_cache(struct hisi_qm *qm)
 {
        void __iomem *base = qm->io_base;
@@ -359,6 +384,8 @@ static int hisi_zip_set_user_domain_and_cache(struct hisi_qm *qm)
               CQC_CACHE_WB_ENABLE | FIELD_PREP(SQC_CACHE_WB_THRD, 1) |
               FIELD_PREP(CQC_CACHE_WB_THRD, 1), base + QM_CACHE_CTL);
 
+       hisi_zip_enable_clock_gate(qm);
+
        return 0;
 }
 
@@ -423,17 +450,14 @@ static inline struct hisi_qm *file_to_qm(struct ctrl_debug_file *file)
        return &hisi_zip->qm;
 }
 
-static u32 clear_enable_read(struct ctrl_debug_file *file)
+static u32 clear_enable_read(struct hisi_qm *qm)
 {
-       struct hisi_qm *qm = file_to_qm(file);
-
        return readl(qm->io_base + HZIP_SOFT_CTRL_CNT_CLR_CE) &
                     HZIP_SOFT_CTRL_CNT_CLR_CE_BIT;
 }
 
-static int clear_enable_write(struct ctrl_debug_file *file, u32 val)
+static int clear_enable_write(struct hisi_qm *qm, u32 val)
 {
-       struct hisi_qm *qm = file_to_qm(file);
        u32 tmp;
 
        if (val != 1 && val != 0)
@@ -450,22 +474,33 @@ static ssize_t hisi_zip_ctrl_debug_read(struct file *filp, char __user *buf,
                                        size_t count, loff_t *pos)
 {
        struct ctrl_debug_file *file = filp->private_data;
+       struct hisi_qm *qm = file_to_qm(file);
        char tbuf[HZIP_BUF_SIZE];
        u32 val;
        int ret;
 
+       ret = hisi_qm_get_dfx_access(qm);
+       if (ret)
+               return ret;
+
        spin_lock_irq(&file->lock);
        switch (file->index) {
        case HZIP_CLEAR_ENABLE:
-               val = clear_enable_read(file);
+               val = clear_enable_read(qm);
                break;
        default:
-               spin_unlock_irq(&file->lock);
-               return -EINVAL;
+               goto err_input;
        }
        spin_unlock_irq(&file->lock);
+
+       hisi_qm_put_dfx_access(qm);
        ret = scnprintf(tbuf, sizeof(tbuf), "%u\n", val);
        return simple_read_from_buffer(buf, count, pos, tbuf, ret);
+
+err_input:
+       spin_unlock_irq(&file->lock);
+       hisi_qm_put_dfx_access(qm);
+       return -EINVAL;
 }
 
 static ssize_t hisi_zip_ctrl_debug_write(struct file *filp,
@@ -473,6 +508,7 @@ static ssize_t hisi_zip_ctrl_debug_write(struct file *filp,
                                         size_t count, loff_t *pos)
 {
        struct ctrl_debug_file *file = filp->private_data;
+       struct hisi_qm *qm = file_to_qm(file);
        char tbuf[HZIP_BUF_SIZE];
        unsigned long val;
        int len, ret;
@@ -491,10 +527,14 @@ static ssize_t hisi_zip_ctrl_debug_write(struct file *filp,
        if (kstrtoul(tbuf, 0, &val))
                return -EFAULT;
 
+       ret = hisi_qm_get_dfx_access(qm);
+       if (ret)
+               return ret;
+
        spin_lock_irq(&file->lock);
        switch (file->index) {
        case HZIP_CLEAR_ENABLE:
-               ret = clear_enable_write(file, val);
+               ret = clear_enable_write(qm, val);
                if (ret)
                        goto err_input;
                break;
@@ -502,12 +542,12 @@ static ssize_t hisi_zip_ctrl_debug_write(struct file *filp,
                ret = -EINVAL;
                goto err_input;
        }
-       spin_unlock_irq(&file->lock);
 
-       return count;
+       ret = count;
 
 err_input:
        spin_unlock_irq(&file->lock);
+       hisi_qm_put_dfx_access(qm);
        return ret;
 }
 
@@ -538,6 +578,15 @@ static int zip_debugfs_atomic64_get(void *data, u64 *val)
 DEFINE_DEBUGFS_ATTRIBUTE(zip_atomic64_ops, zip_debugfs_atomic64_get,
                         zip_debugfs_atomic64_set, "%llu\n");
 
+static int hisi_zip_regs_show(struct seq_file *s, void *unused)
+{
+       hisi_qm_regs_dump(s, s->private);
+
+       return 0;
+}
+
+DEFINE_SHOW_ATTRIBUTE(hisi_zip_regs);
+
 static int hisi_zip_core_debug_init(struct hisi_qm *qm)
 {
        struct device *dev = &qm->pdev->dev;
@@ -560,9 +609,11 @@ static int hisi_zip_core_debug_init(struct hisi_qm *qm)
                regset->regs = hzip_dfx_regs;
                regset->nregs = ARRAY_SIZE(hzip_dfx_regs);
                regset->base = qm->io_base + core_offsets[i];
+               regset->dev = dev;
 
                tmp_d = debugfs_create_dir(buf, qm->debug.debug_root);
-               debugfs_create_regset32("regs", 0444, tmp_d, regset);
+               debugfs_create_file("regs", 0444, tmp_d, regset,
+                                    &hisi_zip_regs_fops);
        }
 
        return 0;
@@ -898,6 +949,8 @@ static int hisi_zip_probe(struct pci_dev *pdev, const struct pci_device_id *id)
                        goto err_qm_alg_unregister;
        }
 
+       hisi_qm_pm_init(qm);
+
        return 0;
 
 err_qm_alg_unregister:
@@ -920,6 +973,7 @@ static void hisi_zip_remove(struct pci_dev *pdev)
 {
        struct hisi_qm *qm = pci_get_drvdata(pdev);
 
+       hisi_qm_pm_uninit(qm);
        hisi_qm_wait_task_finish(qm, &zip_devices);
        hisi_qm_alg_unregister(qm, &zip_devices);
 
@@ -932,6 +986,10 @@ static void hisi_zip_remove(struct pci_dev *pdev)
        hisi_zip_qm_uninit(qm);
 }
 
+static const struct dev_pm_ops hisi_zip_pm_ops = {
+       SET_RUNTIME_PM_OPS(hisi_qm_suspend, hisi_qm_resume, NULL)
+};
+
 static const struct pci_error_handlers hisi_zip_err_handler = {
        .error_detected = hisi_qm_dev_err_detected,
        .slot_reset     = hisi_qm_dev_slot_reset,
@@ -948,6 +1006,7 @@ static struct pci_driver hisi_zip_pci_driver = {
                                        hisi_qm_sriov_configure : NULL,
        .err_handler            = &hisi_zip_err_handler,
        .shutdown               = hisi_qm_dev_shutdown,
+       .driver.pm              = &hisi_zip_pm_ops,
 };
 
 static void hisi_zip_register_debugfs(void)
index d6a7784d298881544fb6b25e70d25805789cc007..d19e5ffb5104b953c90e771401ca5c5a3778fd48 100644 (file)
@@ -170,15 +170,19 @@ static struct dcp *global_sdcp;
 
 static int mxs_dcp_start_dma(struct dcp_async_ctx *actx)
 {
+       int dma_err;
        struct dcp *sdcp = global_sdcp;
        const int chan = actx->chan;
        uint32_t stat;
        unsigned long ret;
        struct dcp_dma_desc *desc = &sdcp->coh->desc[actx->chan];
-
        dma_addr_t desc_phys = dma_map_single(sdcp->dev, desc, sizeof(*desc),
                                              DMA_TO_DEVICE);
 
+       dma_err = dma_mapping_error(sdcp->dev, desc_phys);
+       if (dma_err)
+               return dma_err;
+
        reinit_completion(&sdcp->completion[chan]);
 
        /* Clear status register. */
@@ -216,18 +220,29 @@ static int mxs_dcp_start_dma(struct dcp_async_ctx *actx)
 static int mxs_dcp_run_aes(struct dcp_async_ctx *actx,
                           struct skcipher_request *req, int init)
 {
+       dma_addr_t key_phys, src_phys, dst_phys;
        struct dcp *sdcp = global_sdcp;
        struct dcp_dma_desc *desc = &sdcp->coh->desc[actx->chan];
        struct dcp_aes_req_ctx *rctx = skcipher_request_ctx(req);
        int ret;
 
-       dma_addr_t key_phys = dma_map_single(sdcp->dev, sdcp->coh->aes_key,
-                                            2 * AES_KEYSIZE_128,
-                                            DMA_TO_DEVICE);
-       dma_addr_t src_phys = dma_map_single(sdcp->dev, sdcp->coh->aes_in_buf,
-                                            DCP_BUF_SZ, DMA_TO_DEVICE);
-       dma_addr_t dst_phys = dma_map_single(sdcp->dev, sdcp->coh->aes_out_buf,
-                                            DCP_BUF_SZ, DMA_FROM_DEVICE);
+       key_phys = dma_map_single(sdcp->dev, sdcp->coh->aes_key,
+                                 2 * AES_KEYSIZE_128, DMA_TO_DEVICE);
+       ret = dma_mapping_error(sdcp->dev, key_phys);
+       if (ret)
+               return ret;
+
+       src_phys = dma_map_single(sdcp->dev, sdcp->coh->aes_in_buf,
+                                 DCP_BUF_SZ, DMA_TO_DEVICE);
+       ret = dma_mapping_error(sdcp->dev, src_phys);
+       if (ret)
+               goto err_src;
+
+       dst_phys = dma_map_single(sdcp->dev, sdcp->coh->aes_out_buf,
+                                 DCP_BUF_SZ, DMA_FROM_DEVICE);
+       ret = dma_mapping_error(sdcp->dev, dst_phys);
+       if (ret)
+               goto err_dst;
 
        if (actx->fill % AES_BLOCK_SIZE) {
                dev_err(sdcp->dev, "Invalid block size!\n");
@@ -265,10 +280,12 @@ static int mxs_dcp_run_aes(struct dcp_async_ctx *actx,
        ret = mxs_dcp_start_dma(actx);
 
 aes_done_run:
+       dma_unmap_single(sdcp->dev, dst_phys, DCP_BUF_SZ, DMA_FROM_DEVICE);
+err_dst:
+       dma_unmap_single(sdcp->dev, src_phys, DCP_BUF_SZ, DMA_TO_DEVICE);
+err_src:
        dma_unmap_single(sdcp->dev, key_phys, 2 * AES_KEYSIZE_128,
                         DMA_TO_DEVICE);
-       dma_unmap_single(sdcp->dev, src_phys, DCP_BUF_SZ, DMA_TO_DEVICE);
-       dma_unmap_single(sdcp->dev, dst_phys, DCP_BUF_SZ, DMA_FROM_DEVICE);
 
        return ret;
 }
@@ -283,21 +300,20 @@ static int mxs_dcp_aes_block_crypt(struct crypto_async_request *arq)
 
        struct scatterlist *dst = req->dst;
        struct scatterlist *src = req->src;
-       const int nents = sg_nents(req->src);
+       int dst_nents = sg_nents(dst);
 
        const int out_off = DCP_BUF_SZ;
        uint8_t *in_buf = sdcp->coh->aes_in_buf;
        uint8_t *out_buf = sdcp->coh->aes_out_buf;
 
-       uint8_t *out_tmp, *src_buf, *dst_buf = NULL;
        uint32_t dst_off = 0;
+       uint8_t *src_buf = NULL;
        uint32_t last_out_len = 0;
 
        uint8_t *key = sdcp->coh->aes_key;
 
        int ret = 0;
-       int split = 0;
-       unsigned int i, len, clen, rem = 0, tlen = 0;
+       unsigned int i, len, clen, tlen = 0;
        int init = 0;
        bool limit_hit = false;
 
@@ -315,7 +331,7 @@ static int mxs_dcp_aes_block_crypt(struct crypto_async_request *arq)
                memset(key + AES_KEYSIZE_128, 0, AES_KEYSIZE_128);
        }
 
-       for_each_sg(req->src, src, nents, i) {
+       for_each_sg(req->src, src, sg_nents(src), i) {
                src_buf = sg_virt(src);
                len = sg_dma_len(src);
                tlen += len;
@@ -340,34 +356,17 @@ static int mxs_dcp_aes_block_crypt(struct crypto_async_request *arq)
                         * submit the buffer.
                         */
                        if (actx->fill == out_off || sg_is_last(src) ||
-                               limit_hit) {
+                           limit_hit) {
                                ret = mxs_dcp_run_aes(actx, req, init);
                                if (ret)
                                        return ret;
                                init = 0;
 
-                               out_tmp = out_buf;
+                               sg_pcopy_from_buffer(dst, dst_nents, out_buf,
+                                                    actx->fill, dst_off);
+                               dst_off += actx->fill;
                                last_out_len = actx->fill;
-                               while (dst && actx->fill) {
-                                       if (!split) {
-                                               dst_buf = sg_virt(dst);
-                                               dst_off = 0;
-                                       }
-                                       rem = min(sg_dma_len(dst) - dst_off,
-                                                 actx->fill);
-
-                                       memcpy(dst_buf + dst_off, out_tmp, rem);
-                                       out_tmp += rem;
-                                       dst_off += rem;
-                                       actx->fill -= rem;
-
-                                       if (dst_off == sg_dma_len(dst)) {
-                                               dst = sg_next(dst);
-                                               split = 0;
-                                       } else {
-                                               split = 1;
-                                       }
-                               }
+                               actx->fill = 0;
                        }
                } while (len);
 
@@ -557,6 +556,10 @@ static int mxs_dcp_run_sha(struct ahash_request *req)
        dma_addr_t buf_phys = dma_map_single(sdcp->dev, sdcp->coh->sha_in_buf,
                                             DCP_BUF_SZ, DMA_TO_DEVICE);
 
+       ret = dma_mapping_error(sdcp->dev, buf_phys);
+       if (ret)
+               return ret;
+
        /* Fill in the DMA descriptor. */
        desc->control0 = MXS_DCP_CONTROL0_DECR_SEMAPHORE |
                    MXS_DCP_CONTROL0_INTERRUPT |
@@ -589,6 +592,10 @@ static int mxs_dcp_run_sha(struct ahash_request *req)
        if (rctx->fini) {
                digest_phys = dma_map_single(sdcp->dev, sdcp->coh->sha_out_buf,
                                             DCP_SHA_PAY_SZ, DMA_FROM_DEVICE);
+               ret = dma_mapping_error(sdcp->dev, digest_phys);
+               if (ret)
+                       goto done_run;
+
                desc->control0 |= MXS_DCP_CONTROL0_HASH_TERM;
                desc->payload = digest_phys;
        }
index 0dd4c6b157de903972cd13179d79e10f87a2a419..9b968ac4ee7b617302ae4748b4b130a10ae580e3 100644 (file)
@@ -1175,9 +1175,9 @@ static int omap_aes_probe(struct platform_device *pdev)
        spin_lock_init(&dd->lock);
 
        INIT_LIST_HEAD(&dd->list);
-       spin_lock(&list_lock);
+       spin_lock_bh(&list_lock);
        list_add_tail(&dd->list, &dev_list);
-       spin_unlock(&list_lock);
+       spin_unlock_bh(&list_lock);
 
        /* Initialize crypto engine */
        dd->engine = crypto_engine_alloc_init(dev, 1);
@@ -1264,9 +1264,9 @@ static int omap_aes_remove(struct platform_device *pdev)
        if (!dd)
                return -ENODEV;
 
-       spin_lock(&list_lock);
+       spin_lock_bh(&list_lock);
        list_del(&dd->list);
-       spin_unlock(&list_lock);
+       spin_unlock_bh(&list_lock);
 
        for (i = dd->pdata->algs_info_size - 1; i >= 0; i--)
                for (j = dd->pdata->algs_info[i].registered - 1; j >= 0; j--) {
index 31bdb1d76d1108b2aae6abec2524938080aec19f..a4cc6bf146ec09524c4d43412b7050f042649aff 100644 (file)
@@ -210,7 +210,7 @@ void omap_crypto_cleanup(struct scatterlist *sg, struct scatterlist *orig,
        buf = sg_virt(sg);
        pages = get_order(len);
 
-       if (orig && (flags & OMAP_CRYPTO_COPY_MASK))
+       if (orig && (flags & OMAP_CRYPTO_DATA_COPIED))
                omap_crypto_copy_data(sg, orig, offset, len);
 
        if (flags & OMAP_CRYPTO_DATA_COPIED)
index bc8631363d72516d174f0b618c9038b17a90aaed..be77656864e3f3a85d643856042ed1b026a1cba3 100644 (file)
@@ -1033,9 +1033,9 @@ static int omap_des_probe(struct platform_device *pdev)
 
 
        INIT_LIST_HEAD(&dd->list);
-       spin_lock(&list_lock);
+       spin_lock_bh(&list_lock);
        list_add_tail(&dd->list, &dev_list);
-       spin_unlock(&list_lock);
+       spin_unlock_bh(&list_lock);
 
        /* Initialize des crypto engine */
        dd->engine = crypto_engine_alloc_init(dev, 1);
@@ -1094,9 +1094,9 @@ static int omap_des_remove(struct platform_device *pdev)
        if (!dd)
                return -ENODEV;
 
-       spin_lock(&list_lock);
+       spin_lock_bh(&list_lock);
        list_del(&dd->list);
-       spin_unlock(&list_lock);
+       spin_unlock_bh(&list_lock);
 
        for (i = dd->pdata->algs_info_size - 1; i >= 0; i--)
                for (j = dd->pdata->algs_info[i].registered - 1; j >= 0; j--)
index dd53ad9987b0dabd14f0da8a24facdfa7d6a8179..f6bf53c00b6143948d85b2f7d77ce3fd5e69ece2 100644 (file)
 #define FLAGS_FINAL            1
 #define FLAGS_DMA_ACTIVE       2
 #define FLAGS_OUTPUT_READY     3
-#define FLAGS_INIT             4
 #define FLAGS_CPU              5
 #define FLAGS_DMA_READY                6
 #define FLAGS_AUTO_XOR         7
@@ -368,24 +367,6 @@ static void omap_sham_copy_ready_hash(struct ahash_request *req)
                        hash[i] = le32_to_cpup((__le32 *)in + i);
 }
 
-static int omap_sham_hw_init(struct omap_sham_dev *dd)
-{
-       int err;
-
-       err = pm_runtime_resume_and_get(dd->dev);
-       if (err < 0) {
-               dev_err(dd->dev, "failed to get sync: %d\n", err);
-               return err;
-       }
-
-       if (!test_bit(FLAGS_INIT, &dd->flags)) {
-               set_bit(FLAGS_INIT, &dd->flags);
-               dd->err = 0;
-       }
-
-       return 0;
-}
-
 static void omap_sham_write_ctrl_omap2(struct omap_sham_dev *dd, size_t length,
                                 int final, int dma)
 {
@@ -1093,11 +1074,14 @@ static int omap_sham_hash_one_req(struct crypto_engine *engine, void *areq)
        dev_dbg(dd->dev, "hash-one: op: %u, total: %u, digcnt: %zd, final: %d",
                ctx->op, ctx->total, ctx->digcnt, final);
 
-       dd->req = req;
-
-       err = omap_sham_hw_init(dd);
-       if (err)
+       err = pm_runtime_resume_and_get(dd->dev);
+       if (err < 0) {
+               dev_err(dd->dev, "failed to get sync: %d\n", err);
                return err;
+       }
+
+       dd->err = 0;
+       dd->req = req;
 
        if (ctx->digcnt)
                dd->pdata->copy_hash(req, 0);
@@ -1736,7 +1720,7 @@ static void omap_sham_done_task(unsigned long data)
                if (test_and_clear_bit(FLAGS_OUTPUT_READY, &dd->flags))
                        goto finish;
        } else if (test_bit(FLAGS_DMA_READY, &dd->flags)) {
-               if (test_and_clear_bit(FLAGS_DMA_ACTIVE, &dd->flags)) {
+               if (test_bit(FLAGS_DMA_ACTIVE, &dd->flags)) {
                        omap_sham_update_dma_stop(dd);
                        if (dd->err) {
                                err = dd->err;
@@ -2129,7 +2113,6 @@ static int omap_sham_probe(struct platform_device *pdev)
        dd->fallback_sz = OMAP_SHA_DMA_THRESHOLD;
 
        pm_runtime_enable(dev);
-       pm_runtime_irq_safe(dev);
 
        err = pm_runtime_get_sync(dev);
        if (err < 0) {
@@ -2144,9 +2127,9 @@ static int omap_sham_probe(struct platform_device *pdev)
                (rev & dd->pdata->major_mask) >> dd->pdata->major_shift,
                (rev & dd->pdata->minor_mask) >> dd->pdata->minor_shift);
 
-       spin_lock(&sham.lock);
+       spin_lock_bh(&sham.lock);
        list_add_tail(&dd->list, &sham.dev_list);
-       spin_unlock(&sham.lock);
+       spin_unlock_bh(&sham.lock);
 
        dd->engine = crypto_engine_alloc_init(dev, 1);
        if (!dd->engine) {
@@ -2194,10 +2177,11 @@ err_algs:
 err_engine_start:
        crypto_engine_exit(dd->engine);
 err_engine:
-       spin_lock(&sham.lock);
+       spin_lock_bh(&sham.lock);
        list_del(&dd->list);
-       spin_unlock(&sham.lock);
+       spin_unlock_bh(&sham.lock);
 err_pm:
+       pm_runtime_dont_use_autosuspend(dev);
        pm_runtime_disable(dev);
        if (!dd->polling_mode)
                dma_release_channel(dd->dma_lch);
@@ -2215,9 +2199,9 @@ static int omap_sham_remove(struct platform_device *pdev)
        dd = platform_get_drvdata(pdev);
        if (!dd)
                return -ENODEV;
-       spin_lock(&sham.lock);
+       spin_lock_bh(&sham.lock);
        list_del(&dd->list);
-       spin_unlock(&sham.lock);
+       spin_unlock_bh(&sham.lock);
        for (i = dd->pdata->algs_info_size - 1; i >= 0; i--)
                for (j = dd->pdata->algs_info[i].registered - 1; j >= 0; j--) {
                        crypto_unregister_ahash(
@@ -2225,6 +2209,7 @@ static int omap_sham_remove(struct platform_device *pdev)
                        dd->pdata->algs_info[i].registered--;
                }
        tasklet_kill(&dd->done_task);
+       pm_runtime_dont_use_autosuspend(&pdev->dev);
        pm_runtime_disable(&pdev->dev);
 
        if (!dd->polling_mode)
@@ -2235,32 +2220,11 @@ static int omap_sham_remove(struct platform_device *pdev)
        return 0;
 }
 
-#ifdef CONFIG_PM_SLEEP
-static int omap_sham_suspend(struct device *dev)
-{
-       pm_runtime_put_sync(dev);
-       return 0;
-}
-
-static int omap_sham_resume(struct device *dev)
-{
-       int err = pm_runtime_resume_and_get(dev);
-       if (err < 0) {
-               dev_err(dev, "failed to get sync: %d\n", err);
-               return err;
-       }
-       return 0;
-}
-#endif
-
-static SIMPLE_DEV_PM_OPS(omap_sham_pm_ops, omap_sham_suspend, omap_sham_resume);
-
 static struct platform_driver omap_sham_driver = {
        .probe  = omap_sham_probe,
        .remove = omap_sham_remove,
        .driver = {
                .name   = "omap-sham",
-               .pm     = &omap_sham_pm_ops,
                .of_match_table = omap_sham_of_match,
        },
 };
index 3524ddd4893009394f79e0a217204b2ba4498855..33d8e50dcbdacf91414270fe96ceb997a96e7b26 100644 (file)
@@ -161,7 +161,7 @@ static void adf_enable_ints(struct adf_accel_dev *accel_dev)
        ADF_CSR_WR(addr, ADF_4XXX_SMIAPF_MASK_OFFSET, 0);
 }
 
-static int adf_pf_enable_vf2pf_comms(struct adf_accel_dev *accel_dev)
+static int adf_enable_pf2vf_comms(struct adf_accel_dev *accel_dev)
 {
        return 0;
 }
@@ -210,21 +210,21 @@ void adf_init_hw_data_4xxx(struct adf_hw_device_data *hw_data)
        hw_data->fw_mmp_name = ADF_4XXX_MMP;
        hw_data->init_admin_comms = adf_init_admin_comms;
        hw_data->exit_admin_comms = adf_exit_admin_comms;
-       hw_data->disable_iov = adf_disable_sriov;
        hw_data->send_admin_init = adf_send_admin_init;
        hw_data->init_arb = adf_init_arb;
        hw_data->exit_arb = adf_exit_arb;
        hw_data->get_arb_mapping = adf_get_arbiter_mapping;
        hw_data->enable_ints = adf_enable_ints;
-       hw_data->enable_vf2pf_comms = adf_pf_enable_vf2pf_comms;
        hw_data->reset_device = adf_reset_flr;
-       hw_data->min_iov_compat_ver = ADF_PFVF_COMPATIBILITY_VERSION;
        hw_data->admin_ae_mask = ADF_4XXX_ADMIN_AE_MASK;
        hw_data->uof_get_num_objs = uof_get_num_objs;
        hw_data->uof_get_name = uof_get_name;
        hw_data->uof_get_ae_mask = uof_get_ae_mask;
        hw_data->set_msix_rttable = set_msix_default_rttable;
        hw_data->set_ssm_wdtimer = adf_gen4_set_ssm_wdtimer;
+       hw_data->enable_pfvf_comms = adf_enable_pf2vf_comms;
+       hw_data->disable_iov = adf_disable_sriov;
+       hw_data->min_iov_compat_ver = ADF_PFVF_COMPAT_THIS_VERSION;
 
        adf_gen4_init_hw_csr_ops(&hw_data->csr_ops);
 }
index a8805c815d1610f645467ed02aa929858b59c0a9..359fb7989dfbfd4fd25e47892a695e5442abf302 100644 (file)
@@ -221,16 +221,10 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
        }
 
        /* Set DMA identifier */
-       if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
-               if ((pci_set_dma_mask(pdev, DMA_BIT_MASK(32)))) {
-                       dev_err(&pdev->dev, "No usable DMA configuration.\n");
-                       ret = -EFAULT;
-                       goto out_err;
-               } else {
-                       pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
-               }
-       } else {
-               pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
+       ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
+       if (ret) {
+               dev_err(&pdev->dev, "No usable DMA configuration.\n");
+               goto out_err;
        }
 
        /* Get accelerator capabilities mask */
index 1dd64af22bea92fa3c7b89f23ab78a116e74f14c..3027c01bc89e2717d033c4a09168e80cdb3ee141 100644 (file)
@@ -111,11 +111,6 @@ static u32 get_pf2vf_offset(u32 i)
        return ADF_C3XXX_PF2VF_OFFSET(i);
 }
 
-static u32 get_vintmsk_offset(u32 i)
-{
-       return ADF_C3XXX_VINTMSK_OFFSET(i);
-}
-
 static void adf_enable_error_correction(struct adf_accel_dev *accel_dev)
 {
        struct adf_hw_device_data *hw_device = accel_dev->hw_device;
@@ -159,8 +154,10 @@ static void adf_enable_ints(struct adf_accel_dev *accel_dev)
                   ADF_C3XXX_SMIA1_MASK);
 }
 
-static int adf_pf_enable_vf2pf_comms(struct adf_accel_dev *accel_dev)
+static int adf_enable_pf2vf_comms(struct adf_accel_dev *accel_dev)
 {
+       spin_lock_init(&accel_dev->pf.vf2pf_ints_lock);
+
        return 0;
 }
 
@@ -193,8 +190,6 @@ void adf_init_hw_data_c3xxx(struct adf_hw_device_data *hw_data)
        hw_data->get_sram_bar_id = get_sram_bar_id;
        hw_data->get_etr_bar_id = get_etr_bar_id;
        hw_data->get_misc_bar_id = get_misc_bar_id;
-       hw_data->get_pf2vf_offset = get_pf2vf_offset;
-       hw_data->get_vintmsk_offset = get_vintmsk_offset;
        hw_data->get_admin_info = adf_gen2_get_admin_info;
        hw_data->get_arb_info = adf_gen2_get_arb_info;
        hw_data->get_sku = get_sku;
@@ -203,16 +198,18 @@ void adf_init_hw_data_c3xxx(struct adf_hw_device_data *hw_data)
        hw_data->init_admin_comms = adf_init_admin_comms;
        hw_data->exit_admin_comms = adf_exit_admin_comms;
        hw_data->configure_iov_threads = configure_iov_threads;
-       hw_data->disable_iov = adf_disable_sriov;
        hw_data->send_admin_init = adf_send_admin_init;
        hw_data->init_arb = adf_init_arb;
        hw_data->exit_arb = adf_exit_arb;
        hw_data->get_arb_mapping = adf_get_arbiter_mapping;
        hw_data->enable_ints = adf_enable_ints;
-       hw_data->enable_vf2pf_comms = adf_pf_enable_vf2pf_comms;
        hw_data->reset_device = adf_reset_flr;
-       hw_data->min_iov_compat_ver = ADF_PFVF_COMPATIBILITY_VERSION;
        hw_data->set_ssm_wdtimer = adf_gen2_set_ssm_wdtimer;
+       hw_data->get_pf2vf_offset = get_pf2vf_offset;
+       hw_data->enable_pfvf_comms = adf_enable_pf2vf_comms;
+       hw_data->disable_iov = adf_disable_sriov;
+       hw_data->min_iov_compat_ver = ADF_PFVF_COMPAT_THIS_VERSION;
+
        adf_gen2_init_hw_csr_ops(&hw_data->csr_ops);
 }
 
index fece8e38025ace1b9ce8756375580a6c823f7a8c..86ee02a8678960e4d4d673bc00a41128644608c1 100644 (file)
@@ -29,7 +29,6 @@
 #define ADF_C3XXX_ERRSSMSH_EN BIT(3)
 
 #define ADF_C3XXX_PF2VF_OFFSET(i)      (0x3A000 + 0x280 + ((i) * 0x04))
-#define ADF_C3XXX_VINTMSK_OFFSET(i)    (0x3A000 + 0x200 + ((i) * 0x04))
 
 /* AE to function mapping */
 #define ADF_C3XXX_AE2FUNC_MAP_GRP_A_NUM_REGS 48
index 7fb3343ae8b0c0824d47be8eb895de8310fb06a0..cc6e75dc60de94a033aa6a3a42bd1e5e30f38823 100644 (file)
@@ -159,17 +159,10 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
        }
 
        /* set dma identifier */
-       if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
-               if ((pci_set_dma_mask(pdev, DMA_BIT_MASK(32)))) {
-                       dev_err(&pdev->dev, "No usable DMA configuration\n");
-                       ret = -EFAULT;
-                       goto out_err_disable;
-               } else {
-                       pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
-               }
-
-       } else {
-               pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
+       ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(48));
+       if (ret) {
+               dev_err(&pdev->dev, "No usable DMA configuration\n");
+               goto out_err_disable;
        }
 
        if (pci_request_regions(pdev, ADF_C3XXX_DEVICE_NAME)) {
@@ -208,12 +201,12 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
        if (pci_save_state(pdev)) {
                dev_err(&pdev->dev, "Failed to save pci state\n");
                ret = -ENOMEM;
-               goto out_err_free_reg;
+               goto out_err_disable_aer;
        }
 
        ret = qat_crypto_dev_config(accel_dev);
        if (ret)
-               goto out_err_free_reg;
+               goto out_err_disable_aer;
 
        ret = adf_dev_init(accel_dev);
        if (ret)
@@ -229,6 +222,8 @@ out_err_dev_stop:
        adf_dev_stop(accel_dev);
 out_err_dev_shutdown:
        adf_dev_shutdown(accel_dev);
+out_err_disable_aer:
+       adf_disable_aer(accel_dev);
 out_err_free_reg:
        pci_release_regions(accel_pci_dev->pci_dev);
 out_err_disable:
index 15f6b9bdfb2213f881140df0b4c57823427e9d20..3e69b520e82fa20bf972f187c4e6a596e4dd1045 100644 (file)
@@ -52,11 +52,6 @@ static u32 get_pf2vf_offset(u32 i)
        return ADF_C3XXXIOV_PF2VF_OFFSET;
 }
 
-static u32 get_vintmsk_offset(u32 i)
-{
-       return ADF_C3XXXIOV_VINTMSK_OFFSET;
-}
-
 static int adf_vf_int_noop(struct adf_accel_dev *accel_dev)
 {
        return 0;
@@ -81,10 +76,10 @@ void adf_init_hw_data_c3xxxiov(struct adf_hw_device_data *hw_data)
        hw_data->enable_error_correction = adf_vf_void_noop;
        hw_data->init_admin_comms = adf_vf_int_noop;
        hw_data->exit_admin_comms = adf_vf_void_noop;
-       hw_data->send_admin_init = adf_vf2pf_init;
+       hw_data->send_admin_init = adf_vf2pf_notify_init;
        hw_data->init_arb = adf_vf_int_noop;
        hw_data->exit_arb = adf_vf_void_noop;
-       hw_data->disable_iov = adf_vf2pf_shutdown;
+       hw_data->disable_iov = adf_vf2pf_notify_shutdown;
        hw_data->get_accel_mask = get_accel_mask;
        hw_data->get_ae_mask = get_ae_mask;
        hw_data->get_num_accels = get_num_accels;
@@ -92,11 +87,10 @@ void adf_init_hw_data_c3xxxiov(struct adf_hw_device_data *hw_data)
        hw_data->get_etr_bar_id = get_etr_bar_id;
        hw_data->get_misc_bar_id = get_misc_bar_id;
        hw_data->get_pf2vf_offset = get_pf2vf_offset;
-       hw_data->get_vintmsk_offset = get_vintmsk_offset;
        hw_data->get_sku = get_sku;
        hw_data->enable_ints = adf_vf_void_noop;
-       hw_data->enable_vf2pf_comms = adf_enable_vf2pf_comms;
-       hw_data->min_iov_compat_ver = ADF_PFVF_COMPATIBILITY_VERSION;
+       hw_data->enable_pfvf_comms = adf_enable_vf2pf_comms;
+       hw_data->min_iov_compat_ver = ADF_PFVF_COMPAT_THIS_VERSION;
        hw_data->dev_class->instances++;
        adf_devmgr_update_class_index(hw_data);
        adf_gen2_init_hw_csr_ops(&hw_data->csr_ops);
index 7945a9cd1c60c50e2714ed8ba7eeae9a5dff916d..f5de4ce66014871ab52cffb145dc2ef3d76bdcaf 100644 (file)
@@ -13,7 +13,6 @@
 #define ADF_C3XXXIOV_ETR_BAR 0
 #define ADF_C3XXXIOV_ETR_MAX_BANKS 1
 #define ADF_C3XXXIOV_PF2VF_OFFSET      0x200
-#define ADF_C3XXXIOV_VINTMSK_OFFSET    0x208
 
 void adf_init_hw_data_c3xxxiov(struct adf_hw_device_data *hw_data);
 void adf_clean_hw_data_c3xxxiov(struct adf_hw_device_data *hw_data);
index 067ca5e17d387cb87c7053aab8ec16a54bd49bb7..1df1b868978d94a92c00398a6de9390c2de09514 100644 (file)
@@ -141,17 +141,10 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
        }
 
        /* set dma identifier */
-       if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
-               if ((pci_set_dma_mask(pdev, DMA_BIT_MASK(32)))) {
-                       dev_err(&pdev->dev, "No usable DMA configuration\n");
-                       ret = -EFAULT;
-                       goto out_err_disable;
-               } else {
-                       pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
-               }
-
-       } else {
-               pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
+       ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(48));
+       if (ret) {
+               dev_err(&pdev->dev, "No usable DMA configuration\n");
+               goto out_err_disable;
        }
 
        if (pci_request_regions(pdev, ADF_C3XXXVF_DEVICE_NAME)) {
@@ -218,6 +211,7 @@ static void adf_remove(struct pci_dev *pdev)
                pr_err("QAT: Driver removal failed\n");
                return;
        }
+       adf_flush_vf_wq(accel_dev);
        adf_dev_stop(accel_dev);
        adf_dev_shutdown(accel_dev);
        adf_cleanup_accel(accel_dev);
index 30337390513c6fdc309c15e66c2e13127b883231..b023c80873bb5336c63e5fd46b1740d91880e207 100644 (file)
@@ -113,11 +113,6 @@ static u32 get_pf2vf_offset(u32 i)
        return ADF_C62X_PF2VF_OFFSET(i);
 }
 
-static u32 get_vintmsk_offset(u32 i)
-{
-       return ADF_C62X_VINTMSK_OFFSET(i);
-}
-
 static void adf_enable_error_correction(struct adf_accel_dev *accel_dev)
 {
        struct adf_hw_device_data *hw_device = accel_dev->hw_device;
@@ -161,8 +156,10 @@ static void adf_enable_ints(struct adf_accel_dev *accel_dev)
                   ADF_C62X_SMIA1_MASK);
 }
 
-static int adf_pf_enable_vf2pf_comms(struct adf_accel_dev *accel_dev)
+static int adf_enable_pf2vf_comms(struct adf_accel_dev *accel_dev)
 {
+       spin_lock_init(&accel_dev->pf.vf2pf_ints_lock);
+
        return 0;
 }
 
@@ -195,8 +192,6 @@ void adf_init_hw_data_c62x(struct adf_hw_device_data *hw_data)
        hw_data->get_sram_bar_id = get_sram_bar_id;
        hw_data->get_etr_bar_id = get_etr_bar_id;
        hw_data->get_misc_bar_id = get_misc_bar_id;
-       hw_data->get_pf2vf_offset = get_pf2vf_offset;
-       hw_data->get_vintmsk_offset = get_vintmsk_offset;
        hw_data->get_admin_info = adf_gen2_get_admin_info;
        hw_data->get_arb_info = adf_gen2_get_arb_info;
        hw_data->get_sku = get_sku;
@@ -205,16 +200,18 @@ void adf_init_hw_data_c62x(struct adf_hw_device_data *hw_data)
        hw_data->init_admin_comms = adf_init_admin_comms;
        hw_data->exit_admin_comms = adf_exit_admin_comms;
        hw_data->configure_iov_threads = configure_iov_threads;
-       hw_data->disable_iov = adf_disable_sriov;
        hw_data->send_admin_init = adf_send_admin_init;
        hw_data->init_arb = adf_init_arb;
        hw_data->exit_arb = adf_exit_arb;
        hw_data->get_arb_mapping = adf_get_arbiter_mapping;
        hw_data->enable_ints = adf_enable_ints;
-       hw_data->enable_vf2pf_comms = adf_pf_enable_vf2pf_comms;
        hw_data->reset_device = adf_reset_flr;
-       hw_data->min_iov_compat_ver = ADF_PFVF_COMPATIBILITY_VERSION;
        hw_data->set_ssm_wdtimer = adf_gen2_set_ssm_wdtimer;
+       hw_data->get_pf2vf_offset = get_pf2vf_offset;
+       hw_data->enable_pfvf_comms = adf_enable_pf2vf_comms;
+       hw_data->disable_iov = adf_disable_sriov;
+       hw_data->min_iov_compat_ver = ADF_PFVF_COMPAT_THIS_VERSION;
+
        adf_gen2_init_hw_csr_ops(&hw_data->csr_ops);
 }
 
index 53d3cb577f5bbf1098c8213f472c5fd108766032..e6664bd20c915b9f6c91f58150fdf95bd7948af0 100644 (file)
@@ -30,7 +30,6 @@
 #define ADF_C62X_ERRSSMSH_EN BIT(3)
 
 #define ADF_C62X_PF2VF_OFFSET(i)       (0x3A000 + 0x280 + ((i) * 0x04))
-#define ADF_C62X_VINTMSK_OFFSET(i)     (0x3A000 + 0x200 + ((i) * 0x04))
 
 /* AE to function mapping */
 #define ADF_C62X_AE2FUNC_MAP_GRP_A_NUM_REGS 80
index 1f5de442e1e6de8b6aaf30b53f9d8abc8010337e..bf251dfe74b36603e6802a84f330184d86e6b94a 100644 (file)
@@ -159,17 +159,10 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
        }
 
        /* set dma identifier */
-       if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
-               if ((pci_set_dma_mask(pdev, DMA_BIT_MASK(32)))) {
-                       dev_err(&pdev->dev, "No usable DMA configuration\n");
-                       ret = -EFAULT;
-                       goto out_err_disable;
-               } else {
-                       pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
-               }
-
-       } else {
-               pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
+       ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(48));
+       if (ret) {
+               dev_err(&pdev->dev, "No usable DMA configuration\n");
+               goto out_err_disable;
        }
 
        if (pci_request_regions(pdev, ADF_C62X_DEVICE_NAME)) {
@@ -208,12 +201,12 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
        if (pci_save_state(pdev)) {
                dev_err(&pdev->dev, "Failed to save pci state\n");
                ret = -ENOMEM;
-               goto out_err_free_reg;
+               goto out_err_disable_aer;
        }
 
        ret = qat_crypto_dev_config(accel_dev);
        if (ret)
-               goto out_err_free_reg;
+               goto out_err_disable_aer;
 
        ret = adf_dev_init(accel_dev);
        if (ret)
@@ -229,6 +222,8 @@ out_err_dev_stop:
        adf_dev_stop(accel_dev);
 out_err_dev_shutdown:
        adf_dev_shutdown(accel_dev);
+out_err_disable_aer:
+       adf_disable_aer(accel_dev);
 out_err_free_reg:
        pci_release_regions(accel_pci_dev->pci_dev);
 out_err_disable:
index d231583428c91f5ae4c59f694c64b4d8acc93549..3bee3e4673636f1911e88307f817ebb1d73e141a 100644 (file)
@@ -52,11 +52,6 @@ static u32 get_pf2vf_offset(u32 i)
        return ADF_C62XIOV_PF2VF_OFFSET;
 }
 
-static u32 get_vintmsk_offset(u32 i)
-{
-       return ADF_C62XIOV_VINTMSK_OFFSET;
-}
-
 static int adf_vf_int_noop(struct adf_accel_dev *accel_dev)
 {
        return 0;
@@ -81,10 +76,10 @@ void adf_init_hw_data_c62xiov(struct adf_hw_device_data *hw_data)
        hw_data->enable_error_correction = adf_vf_void_noop;
        hw_data->init_admin_comms = adf_vf_int_noop;
        hw_data->exit_admin_comms = adf_vf_void_noop;
-       hw_data->send_admin_init = adf_vf2pf_init;
+       hw_data->send_admin_init = adf_vf2pf_notify_init;
        hw_data->init_arb = adf_vf_int_noop;
        hw_data->exit_arb = adf_vf_void_noop;
-       hw_data->disable_iov = adf_vf2pf_shutdown;
+       hw_data->disable_iov = adf_vf2pf_notify_shutdown;
        hw_data->get_accel_mask = get_accel_mask;
        hw_data->get_ae_mask = get_ae_mask;
        hw_data->get_num_accels = get_num_accels;
@@ -92,11 +87,10 @@ void adf_init_hw_data_c62xiov(struct adf_hw_device_data *hw_data)
        hw_data->get_etr_bar_id = get_etr_bar_id;
        hw_data->get_misc_bar_id = get_misc_bar_id;
        hw_data->get_pf2vf_offset = get_pf2vf_offset;
-       hw_data->get_vintmsk_offset = get_vintmsk_offset;
        hw_data->get_sku = get_sku;
        hw_data->enable_ints = adf_vf_void_noop;
-       hw_data->enable_vf2pf_comms = adf_enable_vf2pf_comms;
-       hw_data->min_iov_compat_ver = ADF_PFVF_COMPATIBILITY_VERSION;
+       hw_data->enable_pfvf_comms = adf_enable_vf2pf_comms;
+       hw_data->min_iov_compat_ver = ADF_PFVF_COMPAT_THIS_VERSION;
        hw_data->dev_class->instances++;
        adf_devmgr_update_class_index(hw_data);
        adf_gen2_init_hw_csr_ops(&hw_data->csr_ops);
index a6c04cf7a43cb5765a814f5990087d541fa2024d..794778c486782f2084e985ee0880262360c0c2ee 100644 (file)
@@ -13,7 +13,6 @@
 #define ADF_C62XIOV_ETR_BAR 0
 #define ADF_C62XIOV_ETR_MAX_BANKS 1
 #define ADF_C62XIOV_PF2VF_OFFSET       0x200
-#define ADF_C62XIOV_VINTMSK_OFFSET     0x208
 
 void adf_init_hw_data_c62xiov(struct adf_hw_device_data *hw_data);
 void adf_clean_hw_data_c62xiov(struct adf_hw_device_data *hw_data);
index 51ea88c0b17d7751515bb63e316ce85853e3f290..8103bd81d617ae9fbe1906b6e8537b89aa42d5e7 100644 (file)
@@ -141,17 +141,10 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
        }
 
        /* set dma identifier */
-       if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
-               if ((pci_set_dma_mask(pdev, DMA_BIT_MASK(32)))) {
-                       dev_err(&pdev->dev, "No usable DMA configuration\n");
-                       ret = -EFAULT;
-                       goto out_err_disable;
-               } else {
-                       pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
-               }
-
-       } else {
-               pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
+       ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(48));
+       if (ret) {
+               dev_err(&pdev->dev, "No usable DMA configuration\n");
+               goto out_err_disable;
        }
 
        if (pci_request_regions(pdev, ADF_C62XVF_DEVICE_NAME)) {
@@ -218,6 +211,7 @@ static void adf_remove(struct pci_dev *pdev)
                pr_err("QAT: Driver removal failed\n");
                return;
        }
+       adf_flush_vf_wq(accel_dev);
        adf_dev_stop(accel_dev);
        adf_dev_shutdown(accel_dev);
        adf_cleanup_accel(accel_dev);
index ac435b44f1d20d12cf0c3a00824302ec83452d6e..38c0af6d4e43e75e72e559fe7cf57bed8485c9e9 100644 (file)
@@ -18,8 +18,6 @@
 #define ADF_4XXX_DEVICE_NAME "4xxx"
 #define ADF_4XXX_PCI_DEVICE_ID 0x4940
 #define ADF_4XXXIOV_PCI_DEVICE_ID 0x4941
-#define ADF_ERRSOU3 (0x3A000 + 0x0C)
-#define ADF_ERRSOU5 (0x3A000 + 0xD8)
 #define ADF_DEVICE_FUSECTL_OFFSET 0x40
 #define ADF_DEVICE_LEGFUSE_OFFSET 0x4C
 #define ADF_DEVICE_FUSECTL_MASK 0x80000000
@@ -156,7 +154,6 @@ struct adf_hw_device_data {
        u32 (*get_num_aes)(struct adf_hw_device_data *self);
        u32 (*get_num_accels)(struct adf_hw_device_data *self);
        u32 (*get_pf2vf_offset)(u32 i);
-       u32 (*get_vintmsk_offset)(u32 i);
        void (*get_arb_info)(struct arb_info *arb_csrs_info);
        void (*get_admin_info)(struct admin_info *admin_csrs_info);
        enum dev_sku_info (*get_sku)(struct adf_hw_device_data *self);
@@ -174,7 +171,7 @@ struct adf_hw_device_data {
                                      bool enable);
        void (*enable_ints)(struct adf_accel_dev *accel_dev);
        void (*set_ssm_wdtimer)(struct adf_accel_dev *accel_dev);
-       int (*enable_vf2pf_comms)(struct adf_accel_dev *accel_dev);
+       int (*enable_pfvf_comms)(struct adf_accel_dev *accel_dev);
        void (*reset_device)(struct adf_accel_dev *accel_dev);
        void (*set_msix_rttable)(struct adf_accel_dev *accel_dev);
        char *(*uof_get_name)(u32 obj_num);
@@ -227,7 +224,6 @@ struct adf_fw_loader_data {
 
 struct adf_accel_vf_info {
        struct adf_accel_dev *accel_dev;
-       struct tasklet_struct vf2pf_bh_tasklet;
        struct mutex pf2vf_lock; /* protect CSR access for PF2VF messages */
        struct ratelimit_state vf2pf_ratelimit;
        u32 vf_nr;
@@ -249,6 +245,8 @@ struct adf_accel_dev {
        struct adf_accel_pci accel_pci_dev;
        union {
                struct {
+                       /* protects VF2PF interrupts access */
+                       spinlock_t vf2pf_ints_lock;
                        /* vf_info is non-zero when SR-IOV is init'ed */
                        struct adf_accel_vf_info *vf_info;
                } pf;
index d2ae293d0df6a82a0f7a9a74c196b52f3197dbb6..ed3e40bc56eb2b4f9db0d6df77a641ad511b702a 100644 (file)
@@ -194,7 +194,7 @@ int adf_enable_aer(struct adf_accel_dev *accel_dev)
 EXPORT_SYMBOL_GPL(adf_enable_aer);
 
 /**
- * adf_disable_aer() - Enable Advance Error Reporting for acceleration device
+ * adf_disable_aer() - Disable Advance Error Reporting for acceleration device
  * @accel_dev:  Pointer to acceleration device.
  *
  * Function disables PCI Advance Error Reporting for the
index c61476553728dc5ee9b1c9cd2ea5e84032b1f00f..4261749fae8d45442f5afe5c0017d4ffdeecc788 100644 (file)
@@ -193,22 +193,23 @@ int adf_sriov_configure(struct pci_dev *pdev, int numvfs);
 void adf_disable_sriov(struct adf_accel_dev *accel_dev);
 void adf_disable_vf2pf_interrupts(struct adf_accel_dev *accel_dev,
                                  u32 vf_mask);
+void adf_disable_vf2pf_interrupts_irq(struct adf_accel_dev *accel_dev,
+                                     u32 vf_mask);
 void adf_enable_vf2pf_interrupts(struct adf_accel_dev *accel_dev,
                                 u32 vf_mask);
 void adf_enable_pf2vf_interrupts(struct adf_accel_dev *accel_dev);
 void adf_disable_pf2vf_interrupts(struct adf_accel_dev *accel_dev);
+void adf_schedule_vf2pf_handler(struct adf_accel_vf_info *vf_info);
 
-int adf_vf2pf_init(struct adf_accel_dev *accel_dev);
-void adf_vf2pf_shutdown(struct adf_accel_dev *accel_dev);
+int adf_vf2pf_notify_init(struct adf_accel_dev *accel_dev);
+void adf_vf2pf_notify_shutdown(struct adf_accel_dev *accel_dev);
 int adf_init_pf_wq(void);
 void adf_exit_pf_wq(void);
 int adf_init_vf_wq(void);
 void adf_exit_vf_wq(void);
+void adf_flush_vf_wq(struct adf_accel_dev *accel_dev);
 #else
-static inline int adf_sriov_configure(struct pci_dev *pdev, int numvfs)
-{
-       return 0;
-}
+#define adf_sriov_configure NULL
 
 static inline void adf_disable_sriov(struct adf_accel_dev *accel_dev)
 {
@@ -222,12 +223,12 @@ static inline void adf_disable_pf2vf_interrupts(struct adf_accel_dev *accel_dev)
 {
 }
 
-static inline int adf_vf2pf_init(struct adf_accel_dev *accel_dev)
+static inline int adf_vf2pf_notify_init(struct adf_accel_dev *accel_dev)
 {
        return 0;
 }
 
-static inline void adf_vf2pf_shutdown(struct adf_accel_dev *accel_dev)
+static inline void adf_vf2pf_notify_shutdown(struct adf_accel_dev *accel_dev)
 {
 }
 
@@ -249,5 +250,9 @@ static inline void adf_exit_vf_wq(void)
 {
 }
 
+static inline void adf_flush_vf_wq(struct adf_accel_dev *accel_dev)
+{
+}
+
 #endif
 #endif
index 744c40351428da6ceeda9f01bcdcbd0798cff732..60bc7b991d3519986942930edb66dc6229b1a7d0 100644 (file)
@@ -61,6 +61,7 @@ int adf_dev_init(struct adf_accel_dev *accel_dev)
        struct service_hndl *service;
        struct list_head *list_itr;
        struct adf_hw_device_data *hw_data = accel_dev->hw_device;
+       int ret;
 
        if (!hw_data) {
                dev_err(&GET_DEV(accel_dev),
@@ -88,8 +89,6 @@ int adf_dev_init(struct adf_accel_dev *accel_dev)
                return -EFAULT;
        }
 
-       hw_data->enable_ints(accel_dev);
-
        if (adf_ae_init(accel_dev)) {
                dev_err(&GET_DEV(accel_dev),
                        "Failed to initialise Acceleration Engine\n");
@@ -110,6 +109,13 @@ int adf_dev_init(struct adf_accel_dev *accel_dev)
        }
        set_bit(ADF_STATUS_IRQ_ALLOCATED, &accel_dev->status);
 
+       hw_data->enable_ints(accel_dev);
+       hw_data->enable_error_correction(accel_dev);
+
+       ret = hw_data->enable_pfvf_comms(accel_dev);
+       if (ret)
+               return ret;
+
        /*
         * Subservice initialisation is divided into two stages: init and start.
         * This is to facilitate any ordering dependencies between services
@@ -126,9 +132,6 @@ int adf_dev_init(struct adf_accel_dev *accel_dev)
                set_bit(accel_dev->accel_id, service->init_status);
        }
 
-       hw_data->enable_error_correction(accel_dev);
-       hw_data->enable_vf2pf_comms(accel_dev);
-
        return 0;
 }
 EXPORT_SYMBOL_GPL(adf_dev_init);
index e3ad5587be49e0fc12897fa5b4d74ab5a89f1237..c678d5c531aa9d11af7fe2df4720bcb4c3fcf5bb 100644 (file)
 #include "adf_transport_access_macros.h"
 #include "adf_transport_internal.h"
 
+#define ADF_MAX_NUM_VFS        32
+#define ADF_ERRSOU3    (0x3A000 + 0x0C)
+#define ADF_ERRSOU5    (0x3A000 + 0xD8)
+#define ADF_ERRMSK3    (0x3A000 + 0x1C)
+#define ADF_ERRMSK5    (0x3A000 + 0xDC)
+#define ADF_ERR_REG_VF2PF_L(vf_src)    (((vf_src) & 0x01FFFE00) >> 9)
+#define ADF_ERR_REG_VF2PF_U(vf_src)    (((vf_src) & 0x0000FFFF) << 16)
+
 static int adf_enable_msix(struct adf_accel_dev *accel_dev)
 {
        struct adf_accel_pci *pci_dev_info = &accel_dev->accel_pci_dev;
@@ -71,14 +79,23 @@ static irqreturn_t adf_msix_isr_ae(int irq, void *dev_ptr)
                struct adf_hw_device_data *hw_data = accel_dev->hw_device;
                struct adf_bar *pmisc =
                        &GET_BARS(accel_dev)[hw_data->get_misc_bar_id(hw_data)];
-               void __iomem *pmisc_bar_addr = pmisc->virt_addr;
-               u32 vf_mask;
+               void __iomem *pmisc_addr = pmisc->virt_addr;
+               u32 errsou3, errsou5, errmsk3, errmsk5;
+               unsigned long vf_mask;
 
                /* Get the interrupt sources triggered by VFs */
-               vf_mask = ((ADF_CSR_RD(pmisc_bar_addr, ADF_ERRSOU5) &
-                           0x0000FFFF) << 16) |
-                         ((ADF_CSR_RD(pmisc_bar_addr, ADF_ERRSOU3) &
-                           0x01FFFE00) >> 9);
+               errsou3 = ADF_CSR_RD(pmisc_addr, ADF_ERRSOU3);
+               errsou5 = ADF_CSR_RD(pmisc_addr, ADF_ERRSOU5);
+               vf_mask = ADF_ERR_REG_VF2PF_L(errsou3);
+               vf_mask |= ADF_ERR_REG_VF2PF_U(errsou5);
+
+               /* To avoid adding duplicate entries to work queue, clear
+                * vf_int_mask_sets bits that are already masked in ERRMSK register.
+                */
+               errmsk3 = ADF_CSR_RD(pmisc_addr, ADF_ERRMSK3);
+               errmsk5 = ADF_CSR_RD(pmisc_addr, ADF_ERRMSK5);
+               vf_mask &= ~ADF_ERR_REG_VF2PF_L(errmsk3);
+               vf_mask &= ~ADF_ERR_REG_VF2PF_U(errmsk5);
 
                if (vf_mask) {
                        struct adf_accel_vf_info *vf_info;
@@ -86,15 +103,13 @@ static irqreturn_t adf_msix_isr_ae(int irq, void *dev_ptr)
                        int i;
 
                        /* Disable VF2PF interrupts for VFs with pending ints */
-                       adf_disable_vf2pf_interrupts(accel_dev, vf_mask);
+                       adf_disable_vf2pf_interrupts_irq(accel_dev, vf_mask);
 
                        /*
-                        * Schedule tasklets to handle VF2PF interrupt BHs
-                        * unless the VF is malicious and is attempting to
-                        * flood the host OS with VF2PF interrupts.
+                        * Handle VF2PF interrupt unless the VF is malicious and
+                        * is attempting to flood the host OS with VF2PF interrupts.
                         */
-                       for_each_set_bit(i, (const unsigned long *)&vf_mask,
-                                        (sizeof(vf_mask) * BITS_PER_BYTE)) {
+                       for_each_set_bit(i, &vf_mask, ADF_MAX_NUM_VFS) {
                                vf_info = accel_dev->pf.vf_info + i;
 
                                if (!__ratelimit(&vf_info->vf2pf_ratelimit)) {
@@ -104,8 +119,7 @@ static irqreturn_t adf_msix_isr_ae(int irq, void *dev_ptr)
                                        continue;
                                }
 
-                               /* Tasklet will re-enable ints from this VF */
-                               tasklet_hi_schedule(&vf_info->vf2pf_bh_tasklet);
+                               adf_schedule_vf2pf_handler(vf_info);
                                irq_handled = true;
                        }
 
index a1b77bd7a89443cc22b4801f49b3ea8868639ce2..976b9ab7617cd81018f9cf5474ea204ab1d47fe8 100644 (file)
 #define ADF_DH895XCC_ERRMSK5   (ADF_DH895XCC_EP_OFFSET + 0xDC)
 #define ADF_DH895XCC_ERRMSK5_VF2PF_U_MASK(vf_mask) (vf_mask >> 16)
 
-void adf_enable_pf2vf_interrupts(struct adf_accel_dev *accel_dev)
-{
-       struct adf_accel_pci *pci_info = &accel_dev->accel_pci_dev;
-       struct adf_hw_device_data *hw_data = accel_dev->hw_device;
-       void __iomem *pmisc_bar_addr =
-               pci_info->pci_bars[hw_data->get_misc_bar_id(hw_data)].virt_addr;
-
-       ADF_CSR_WR(pmisc_bar_addr, hw_data->get_vintmsk_offset(0), 0x0);
-}
-
-void adf_disable_pf2vf_interrupts(struct adf_accel_dev *accel_dev)
-{
-       struct adf_accel_pci *pci_info = &accel_dev->accel_pci_dev;
-       struct adf_hw_device_data *hw_data = accel_dev->hw_device;
-       void __iomem *pmisc_bar_addr =
-               pci_info->pci_bars[hw_data->get_misc_bar_id(hw_data)].virt_addr;
-
-       ADF_CSR_WR(pmisc_bar_addr, hw_data->get_vintmsk_offset(0), 0x2);
-}
-
-void adf_enable_vf2pf_interrupts(struct adf_accel_dev *accel_dev,
-                                u32 vf_mask)
+static void __adf_enable_vf2pf_interrupts(struct adf_accel_dev *accel_dev,
+                                         u32 vf_mask)
 {
        struct adf_hw_device_data *hw_data = accel_dev->hw_device;
        struct adf_bar *pmisc =
@@ -55,7 +35,17 @@ void adf_enable_vf2pf_interrupts(struct adf_accel_dev *accel_dev,
        }
 }
 
-void adf_disable_vf2pf_interrupts(struct adf_accel_dev *accel_dev, u32 vf_mask)
+void adf_enable_vf2pf_interrupts(struct adf_accel_dev *accel_dev, u32 vf_mask)
+{
+       unsigned long flags;
+
+       spin_lock_irqsave(&accel_dev->pf.vf2pf_ints_lock, flags);
+       __adf_enable_vf2pf_interrupts(accel_dev, vf_mask);
+       spin_unlock_irqrestore(&accel_dev->pf.vf2pf_ints_lock, flags);
+}
+
+static void __adf_disable_vf2pf_interrupts(struct adf_accel_dev *accel_dev,
+                                          u32 vf_mask)
 {
        struct adf_hw_device_data *hw_data = accel_dev->hw_device;
        struct adf_bar *pmisc =
@@ -78,6 +68,22 @@ void adf_disable_vf2pf_interrupts(struct adf_accel_dev *accel_dev, u32 vf_mask)
        }
 }
 
+void adf_disable_vf2pf_interrupts(struct adf_accel_dev *accel_dev, u32 vf_mask)
+{
+       unsigned long flags;
+
+       spin_lock_irqsave(&accel_dev->pf.vf2pf_ints_lock, flags);
+       __adf_disable_vf2pf_interrupts(accel_dev, vf_mask);
+       spin_unlock_irqrestore(&accel_dev->pf.vf2pf_ints_lock, flags);
+}
+
+void adf_disable_vf2pf_interrupts_irq(struct adf_accel_dev *accel_dev, u32 vf_mask)
+{
+       spin_lock(&accel_dev->pf.vf2pf_ints_lock);
+       __adf_disable_vf2pf_interrupts(accel_dev, vf_mask);
+       spin_unlock(&accel_dev->pf.vf2pf_ints_lock);
+}
+
 static int __adf_iov_putmsg(struct adf_accel_dev *accel_dev, u32 msg, u8 vf_nr)
 {
        struct adf_accel_pci *pci_info = &accel_dev->accel_pci_dev;
@@ -186,7 +192,6 @@ int adf_iov_putmsg(struct adf_accel_dev *accel_dev, u32 msg, u8 vf_nr)
 
        return ret;
 }
-EXPORT_SYMBOL_GPL(adf_iov_putmsg);
 
 void adf_vf2pf_req_hndl(struct adf_accel_vf_info *vf_info)
 {
@@ -216,7 +221,7 @@ void adf_vf2pf_req_hndl(struct adf_accel_vf_info *vf_info)
                resp = (ADF_PF2VF_MSGORIGIN_SYSTEM |
                         (ADF_PF2VF_MSGTYPE_VERSION_RESP <<
                          ADF_PF2VF_MSGTYPE_SHIFT) |
-                        (ADF_PFVF_COMPATIBILITY_VERSION <<
+                        (ADF_PFVF_COMPAT_THIS_VERSION <<
                          ADF_PF2VF_VERSION_RESP_VERS_SHIFT));
 
                dev_dbg(&GET_DEV(accel_dev),
@@ -226,19 +231,19 @@ void adf_vf2pf_req_hndl(struct adf_accel_vf_info *vf_info)
                if (vf_compat_ver < hw_data->min_iov_compat_ver) {
                        dev_err(&GET_DEV(accel_dev),
                                "VF (vers %d) incompatible with PF (vers %d)\n",
-                               vf_compat_ver, ADF_PFVF_COMPATIBILITY_VERSION);
+                               vf_compat_ver, ADF_PFVF_COMPAT_THIS_VERSION);
                        resp |= ADF_PF2VF_VF_INCOMPATIBLE <<
                                ADF_PF2VF_VERSION_RESP_RESULT_SHIFT;
-               } else if (vf_compat_ver > ADF_PFVF_COMPATIBILITY_VERSION) {
+               } else if (vf_compat_ver > ADF_PFVF_COMPAT_THIS_VERSION) {
                        dev_err(&GET_DEV(accel_dev),
                                "VF (vers %d) compat with PF (vers %d) unkn.\n",
-                               vf_compat_ver, ADF_PFVF_COMPATIBILITY_VERSION);
+                               vf_compat_ver, ADF_PFVF_COMPAT_THIS_VERSION);
                        resp |= ADF_PF2VF_VF_COMPAT_UNKNOWN <<
                                ADF_PF2VF_VERSION_RESP_RESULT_SHIFT;
                } else {
                        dev_dbg(&GET_DEV(accel_dev),
                                "VF (vers %d) compatible with PF (vers %d)\n",
-                               vf_compat_ver, ADF_PFVF_COMPATIBILITY_VERSION);
+                               vf_compat_ver, ADF_PFVF_COMPAT_THIS_VERSION);
                        resp |= ADF_PF2VF_VF_COMPATIBLE <<
                                ADF_PF2VF_VERSION_RESP_RESULT_SHIFT;
                }
@@ -251,7 +256,7 @@ void adf_vf2pf_req_hndl(struct adf_accel_vf_info *vf_info)
                resp = (ADF_PF2VF_MSGORIGIN_SYSTEM |
                         (ADF_PF2VF_MSGTYPE_VERSION_RESP <<
                          ADF_PF2VF_MSGTYPE_SHIFT) |
-                        (ADF_PFVF_COMPATIBILITY_VERSION <<
+                        (ADF_PFVF_COMPAT_THIS_VERSION <<
                          ADF_PF2VF_VERSION_RESP_VERS_SHIFT));
                resp |= ADF_PF2VF_VF_COMPATIBLE <<
                        ADF_PF2VF_VERSION_RESP_RESULT_SHIFT;
@@ -284,6 +289,7 @@ void adf_vf2pf_req_hndl(struct adf_accel_vf_info *vf_info)
 
        /* re-enable interrupt on PF from this VF */
        adf_enable_vf2pf_interrupts(accel_dev, (1 << vf_nr));
+
        return;
 err:
        dev_dbg(&GET_DEV(accel_dev), "Unknown message from VF%d (0x%x);\n",
@@ -313,8 +319,10 @@ static int adf_vf2pf_request_version(struct adf_accel_dev *accel_dev)
 
        msg = ADF_VF2PF_MSGORIGIN_SYSTEM;
        msg |= ADF_VF2PF_MSGTYPE_COMPAT_VER_REQ << ADF_VF2PF_MSGTYPE_SHIFT;
-       msg |= ADF_PFVF_COMPATIBILITY_VERSION << ADF_VF2PF_COMPAT_VER_REQ_SHIFT;
-       BUILD_BUG_ON(ADF_PFVF_COMPATIBILITY_VERSION > 255);
+       msg |= ADF_PFVF_COMPAT_THIS_VERSION << ADF_VF2PF_COMPAT_VER_REQ_SHIFT;
+       BUILD_BUG_ON(ADF_PFVF_COMPAT_THIS_VERSION > 255);
+
+       reinit_completion(&accel_dev->vf.iov_msg_completion);
 
        /* Send request from VF to PF */
        ret = adf_iov_putmsg(accel_dev, msg, 0);
@@ -338,14 +346,16 @@ static int adf_vf2pf_request_version(struct adf_accel_dev *accel_dev)
                break;
        case ADF_PF2VF_VF_COMPAT_UNKNOWN:
                /* VF is newer than PF and decides whether it is compatible */
-               if (accel_dev->vf.pf_version >= hw_data->min_iov_compat_ver)
+               if (accel_dev->vf.pf_version >= hw_data->min_iov_compat_ver) {
+                       accel_dev->vf.compatible = ADF_PF2VF_VF_COMPATIBLE;
                        break;
+               }
                fallthrough;
        case ADF_PF2VF_VF_INCOMPATIBLE:
                dev_err(&GET_DEV(accel_dev),
                        "PF (vers %d) and VF (vers %d) are not compatible\n",
                        accel_dev->vf.pf_version,
-                       ADF_PFVF_COMPATIBILITY_VERSION);
+                       ADF_PFVF_COMPAT_THIS_VERSION);
                return -EINVAL;
        default:
                dev_err(&GET_DEV(accel_dev),
index 0690c031bfce75bdece8b45d128e4477d83993c1..ffd43aa50b57e6b854f9aa26fd939f9b90bd2e96 100644 (file)
@@ -52,7 +52,7 @@
  * IN_USE_BY pattern as part of a collision control scheme (see adf_iov_putmsg).
  */
 
-#define ADF_PFVF_COMPATIBILITY_VERSION         0x1     /* PF<->VF compat */
+#define ADF_PFVF_COMPAT_THIS_VERSION           0x1     /* PF<->VF compat */
 
 /* PF->VF messages */
 #define ADF_PF2VF_INT                          BIT(0)
index 8c822c2861c2bae1cd79760cb152e5919d00141d..90ec057f9183d776580f56f10f531e610bad597b 100644 (file)
@@ -24,9 +24,8 @@ static void adf_iov_send_resp(struct work_struct *work)
        kfree(pf2vf_resp);
 }
 
-static void adf_vf2pf_bh_handler(void *data)
+void adf_schedule_vf2pf_handler(struct adf_accel_vf_info *vf_info)
 {
-       struct adf_accel_vf_info *vf_info = (struct adf_accel_vf_info *)data;
        struct adf_pf2vf_resp *pf2vf_resp;
 
        pf2vf_resp = kzalloc(sizeof(*pf2vf_resp), GFP_ATOMIC);
@@ -52,9 +51,6 @@ static int adf_enable_sriov(struct adf_accel_dev *accel_dev)
                vf_info->accel_dev = accel_dev;
                vf_info->vf_nr = i;
 
-               tasklet_init(&vf_info->vf2pf_bh_tasklet,
-                            (void *)adf_vf2pf_bh_handler,
-                            (unsigned long)vf_info);
                mutex_init(&vf_info->pf2vf_lock);
                ratelimit_state_init(&vf_info->vf2pf_ratelimit,
                                     DEFAULT_RATELIMIT_INTERVAL,
@@ -110,8 +106,6 @@ void adf_disable_sriov(struct adf_accel_dev *accel_dev)
                hw_data->configure_iov_threads(accel_dev, false);
 
        for (i = 0, vf = accel_dev->pf.vf_info; i < totalvfs; i++, vf++) {
-               tasklet_disable(&vf->vf2pf_bh_tasklet);
-               tasklet_kill(&vf->vf2pf_bh_tasklet);
                mutex_destroy(&vf->pf2vf_lock);
        }
 
index e85bd62d134a4224dfb8e5f717e8a0ef74f41393..3e25fac051b25f005a29cf6275fc9aa4a539f4bf 100644 (file)
@@ -5,14 +5,14 @@
 #include "adf_pf2vf_msg.h"
 
 /**
- * adf_vf2pf_init() - send init msg to PF
+ * adf_vf2pf_notify_init() - send init msg to PF
  * @accel_dev:  Pointer to acceleration VF device.
  *
  * Function sends an init message from the VF to a PF
  *
  * Return: 0 on success, error code otherwise.
  */
-int adf_vf2pf_init(struct adf_accel_dev *accel_dev)
+int adf_vf2pf_notify_init(struct adf_accel_dev *accel_dev)
 {
        u32 msg = (ADF_VF2PF_MSGORIGIN_SYSTEM |
                (ADF_VF2PF_MSGTYPE_INIT << ADF_VF2PF_MSGTYPE_SHIFT));
@@ -25,17 +25,17 @@ int adf_vf2pf_init(struct adf_accel_dev *accel_dev)
        set_bit(ADF_STATUS_PF_RUNNING, &accel_dev->status);
        return 0;
 }
-EXPORT_SYMBOL_GPL(adf_vf2pf_init);
+EXPORT_SYMBOL_GPL(adf_vf2pf_notify_init);
 
 /**
- * adf_vf2pf_shutdown() - send shutdown msg to PF
+ * adf_vf2pf_notify_shutdown() - send shutdown msg to PF
  * @accel_dev:  Pointer to acceleration VF device.
  *
  * Function sends a shutdown message from the VF to a PF
  *
  * Return: void
  */
-void adf_vf2pf_shutdown(struct adf_accel_dev *accel_dev)
+void adf_vf2pf_notify_shutdown(struct adf_accel_dev *accel_dev)
 {
        u32 msg = (ADF_VF2PF_MSGORIGIN_SYSTEM |
            (ADF_VF2PF_MSGTYPE_SHUTDOWN << ADF_VF2PF_MSGTYPE_SHIFT));
@@ -45,4 +45,4 @@ void adf_vf2pf_shutdown(struct adf_accel_dev *accel_dev)
                        dev_err(&GET_DEV(accel_dev),
                                "Failed to send Shutdown event to PF\n");
 }
-EXPORT_SYMBOL_GPL(adf_vf2pf_shutdown);
+EXPORT_SYMBOL_GPL(adf_vf2pf_notify_shutdown);
index 888388acb6bd3e7e3b8019f2d1d6a7d6d038071a..7828a6573f3e2ebe922a9bd3cf80f820fd495e27 100644 (file)
@@ -18,6 +18,7 @@
 #include "adf_pf2vf_msg.h"
 
 #define ADF_VINTSOU_OFFSET     0x204
+#define ADF_VINTMSK_OFFSET     0x208
 #define ADF_VINTSOU_BUN                BIT(0)
 #define ADF_VINTSOU_PF2VF      BIT(1)
 
@@ -28,6 +29,27 @@ struct adf_vf_stop_data {
        struct work_struct work;
 };
 
+void adf_enable_pf2vf_interrupts(struct adf_accel_dev *accel_dev)
+{
+       struct adf_accel_pci *pci_info = &accel_dev->accel_pci_dev;
+       struct adf_hw_device_data *hw_data = accel_dev->hw_device;
+       void __iomem *pmisc_bar_addr =
+               pci_info->pci_bars[hw_data->get_misc_bar_id(hw_data)].virt_addr;
+
+       ADF_CSR_WR(pmisc_bar_addr, ADF_VINTMSK_OFFSET, 0x0);
+}
+
+void adf_disable_pf2vf_interrupts(struct adf_accel_dev *accel_dev)
+{
+       struct adf_accel_pci *pci_info = &accel_dev->accel_pci_dev;
+       struct adf_hw_device_data *hw_data = accel_dev->hw_device;
+       void __iomem *pmisc_bar_addr =
+               pci_info->pci_bars[hw_data->get_misc_bar_id(hw_data)].virt_addr;
+
+       ADF_CSR_WR(pmisc_bar_addr, ADF_VINTMSK_OFFSET, 0x2);
+}
+EXPORT_SYMBOL_GPL(adf_disable_pf2vf_interrupts);
+
 static int adf_enable_msi(struct adf_accel_dev *accel_dev)
 {
        struct adf_accel_pci *pci_dev_info = &accel_dev->accel_pci_dev;
@@ -160,11 +182,21 @@ static irqreturn_t adf_isr(int irq, void *privdata)
        struct adf_bar *pmisc =
                        &GET_BARS(accel_dev)[hw_data->get_misc_bar_id(hw_data)];
        void __iomem *pmisc_bar_addr = pmisc->virt_addr;
-       u32 v_int;
+       bool handled = false;
+       u32 v_int, v_mask;
 
        /* Read VF INT source CSR to determine the source of VF interrupt */
        v_int = ADF_CSR_RD(pmisc_bar_addr, ADF_VINTSOU_OFFSET);
 
+       /* Read VF INT mask CSR to determine which sources are masked */
+       v_mask = ADF_CSR_RD(pmisc_bar_addr, ADF_VINTMSK_OFFSET);
+
+       /*
+        * Recompute v_int ignoring sources that are masked. This is to
+        * avoid rescheduling the tasklet for interrupts already handled
+        */
+       v_int &= ~v_mask;
+
        /* Check for PF2VF interrupt */
        if (v_int & ADF_VINTSOU_PF2VF) {
                /* Disable PF to VF interrupt */
@@ -172,7 +204,7 @@ static irqreturn_t adf_isr(int irq, void *privdata)
 
                /* Schedule tasklet to handle interrupt BH */
                tasklet_hi_schedule(&accel_dev->vf.pf2vf_bh_tasklet);
-               return IRQ_HANDLED;
+               handled = true;
        }
 
        /* Check bundle interrupt */
@@ -184,10 +216,10 @@ static irqreturn_t adf_isr(int irq, void *privdata)
                csr_ops->write_csr_int_flag_and_col(bank->csr_addr,
                                                    bank->bank_number, 0);
                tasklet_hi_schedule(&bank->resp_handler);
-               return IRQ_HANDLED;
+               handled = true;
        }
 
-       return IRQ_NONE;
+       return handled ? IRQ_HANDLED : IRQ_NONE;
 }
 
 static int adf_request_msi_irq(struct adf_accel_dev *accel_dev)
@@ -285,6 +317,30 @@ err_out:
 }
 EXPORT_SYMBOL_GPL(adf_vf_isr_resource_alloc);
 
+/**
+ * adf_flush_vf_wq() - Flush workqueue for VF
+ * @accel_dev:  Pointer to acceleration device.
+ *
+ * Function disables the PF/VF interrupts on the VF so that no new messages
+ * are received and flushes the workqueue 'adf_vf_stop_wq'.
+ *
+ * Return: void.
+ */
+void adf_flush_vf_wq(struct adf_accel_dev *accel_dev)
+{
+       adf_disable_pf2vf_interrupts(accel_dev);
+
+       flush_workqueue(adf_vf_stop_wq);
+}
+EXPORT_SYMBOL_GPL(adf_flush_vf_wq);
+
+/**
+ * adf_init_vf_wq() - Init workqueue for VF
+ *
+ * Function init workqueue 'adf_vf_stop_wq' for VF.
+ *
+ * Return: 0 on success, error code otherwise.
+ */
 int __init adf_init_vf_wq(void)
 {
        adf_vf_stop_wq = alloc_workqueue("adf_vf_stop_wq", WQ_MEM_RECLAIM, 0);
index 7dd7cd6c3ef84bd5887e9f8687acc8fe787b151d..0a9ce365a544e4f3cf5d365a09611bd3a3f45895 100644 (file)
@@ -131,11 +131,6 @@ static u32 get_pf2vf_offset(u32 i)
        return ADF_DH895XCC_PF2VF_OFFSET(i);
 }
 
-static u32 get_vintmsk_offset(u32 i)
-{
-       return ADF_DH895XCC_VINTMSK_OFFSET(i);
-}
-
 static void adf_enable_error_correction(struct adf_accel_dev *accel_dev)
 {
        struct adf_hw_device_data *hw_device = accel_dev->hw_device;
@@ -180,8 +175,10 @@ static void adf_enable_ints(struct adf_accel_dev *accel_dev)
                   ADF_DH895XCC_SMIA1_MASK);
 }
 
-static int adf_pf_enable_vf2pf_comms(struct adf_accel_dev *accel_dev)
+static int adf_enable_pf2vf_comms(struct adf_accel_dev *accel_dev)
 {
+       spin_lock_init(&accel_dev->pf.vf2pf_ints_lock);
+
        return 0;
 }
 
@@ -213,8 +210,6 @@ void adf_init_hw_data_dh895xcc(struct adf_hw_device_data *hw_data)
        hw_data->get_num_aes = get_num_aes;
        hw_data->get_etr_bar_id = get_etr_bar_id;
        hw_data->get_misc_bar_id = get_misc_bar_id;
-       hw_data->get_pf2vf_offset = get_pf2vf_offset;
-       hw_data->get_vintmsk_offset = get_vintmsk_offset;
        hw_data->get_admin_info = adf_gen2_get_admin_info;
        hw_data->get_arb_info = adf_gen2_get_arb_info;
        hw_data->get_sram_bar_id = get_sram_bar_id;
@@ -224,15 +219,17 @@ void adf_init_hw_data_dh895xcc(struct adf_hw_device_data *hw_data)
        hw_data->init_admin_comms = adf_init_admin_comms;
        hw_data->exit_admin_comms = adf_exit_admin_comms;
        hw_data->configure_iov_threads = configure_iov_threads;
-       hw_data->disable_iov = adf_disable_sriov;
        hw_data->send_admin_init = adf_send_admin_init;
        hw_data->init_arb = adf_init_arb;
        hw_data->exit_arb = adf_exit_arb;
        hw_data->get_arb_mapping = adf_get_arbiter_mapping;
        hw_data->enable_ints = adf_enable_ints;
-       hw_data->enable_vf2pf_comms = adf_pf_enable_vf2pf_comms;
        hw_data->reset_device = adf_reset_sbr;
-       hw_data->min_iov_compat_ver = ADF_PFVF_COMPATIBILITY_VERSION;
+       hw_data->get_pf2vf_offset = get_pf2vf_offset;
+       hw_data->enable_pfvf_comms = adf_enable_pf2vf_comms;
+       hw_data->disable_iov = adf_disable_sriov;
+       hw_data->min_iov_compat_ver = ADF_PFVF_COMPAT_THIS_VERSION;
+
        adf_gen2_init_hw_csr_ops(&hw_data->csr_ops);
 }
 
index 4d613923d1559174f15106e58b6970ee13591a2d..f99319cd45432ae8e24549523d18fcdd3be4d231 100644 (file)
@@ -35,7 +35,6 @@
 #define ADF_DH895XCC_ERRSSMSH_EN BIT(3)
 
 #define ADF_DH895XCC_PF2VF_OFFSET(i)   (0x3A000 + 0x280 + ((i) * 0x04))
-#define ADF_DH895XCC_VINTMSK_OFFSET(i) (0x3A000 + 0x200 + ((i) * 0x04))
 
 /* AE to function mapping */
 #define ADF_DH895XCC_AE2FUNC_MAP_GRP_A_NUM_REGS 96
index a9ec4357144c343bc741c79e896f2b127d6822b5..3976a81bd99b8b34fd2df8dc0a211063cdeaba1b 100644 (file)
@@ -159,17 +159,10 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
        }
 
        /* set dma identifier */
-       if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
-               if ((pci_set_dma_mask(pdev, DMA_BIT_MASK(32)))) {
-                       dev_err(&pdev->dev, "No usable DMA configuration\n");
-                       ret = -EFAULT;
-                       goto out_err_disable;
-               } else {
-                       pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
-               }
-
-       } else {
-               pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
+       ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(48));
+       if (ret) {
+               dev_err(&pdev->dev, "No usable DMA configuration\n");
+               goto out_err_disable;
        }
 
        if (pci_request_regions(pdev, ADF_DH895XCC_DEVICE_NAME)) {
@@ -208,12 +201,12 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
        if (pci_save_state(pdev)) {
                dev_err(&pdev->dev, "Failed to save pci state\n");
                ret = -ENOMEM;
-               goto out_err_free_reg;
+               goto out_err_disable_aer;
        }
 
        ret = qat_crypto_dev_config(accel_dev);
        if (ret)
-               goto out_err_free_reg;
+               goto out_err_disable_aer;
 
        ret = adf_dev_init(accel_dev);
        if (ret)
@@ -229,6 +222,8 @@ out_err_dev_stop:
        adf_dev_stop(accel_dev);
 out_err_dev_shutdown:
        adf_dev_shutdown(accel_dev);
+out_err_disable_aer:
+       adf_disable_aer(accel_dev);
 out_err_free_reg:
        pci_release_regions(accel_pci_dev->pci_dev);
 out_err_disable:
index f14fb82ed6dfc936885e9eeb4dee8a5fca0137d8..7c6ed6bc8abf2fbbdffa83fe387f1db033b04621 100644 (file)
@@ -52,11 +52,6 @@ static u32 get_pf2vf_offset(u32 i)
        return ADF_DH895XCCIOV_PF2VF_OFFSET;
 }
 
-static u32 get_vintmsk_offset(u32 i)
-{
-       return ADF_DH895XCCIOV_VINTMSK_OFFSET;
-}
-
 static int adf_vf_int_noop(struct adf_accel_dev *accel_dev)
 {
        return 0;
@@ -81,10 +76,10 @@ void adf_init_hw_data_dh895xcciov(struct adf_hw_device_data *hw_data)
        hw_data->enable_error_correction = adf_vf_void_noop;
        hw_data->init_admin_comms = adf_vf_int_noop;
        hw_data->exit_admin_comms = adf_vf_void_noop;
-       hw_data->send_admin_init = adf_vf2pf_init;
+       hw_data->send_admin_init = adf_vf2pf_notify_init;
        hw_data->init_arb = adf_vf_int_noop;
        hw_data->exit_arb = adf_vf_void_noop;
-       hw_data->disable_iov = adf_vf2pf_shutdown;
+       hw_data->disable_iov = adf_vf2pf_notify_shutdown;
        hw_data->get_accel_mask = get_accel_mask;
        hw_data->get_ae_mask = get_ae_mask;
        hw_data->get_num_accels = get_num_accels;
@@ -92,11 +87,10 @@ void adf_init_hw_data_dh895xcciov(struct adf_hw_device_data *hw_data)
        hw_data->get_etr_bar_id = get_etr_bar_id;
        hw_data->get_misc_bar_id = get_misc_bar_id;
        hw_data->get_pf2vf_offset = get_pf2vf_offset;
-       hw_data->get_vintmsk_offset = get_vintmsk_offset;
        hw_data->get_sku = get_sku;
        hw_data->enable_ints = adf_vf_void_noop;
-       hw_data->enable_vf2pf_comms = adf_enable_vf2pf_comms;
-       hw_data->min_iov_compat_ver = ADF_PFVF_COMPATIBILITY_VERSION;
+       hw_data->enable_pfvf_comms = adf_enable_vf2pf_comms;
+       hw_data->min_iov_compat_ver = ADF_PFVF_COMPAT_THIS_VERSION;
        hw_data->dev_class->instances++;
        adf_devmgr_update_class_index(hw_data);
        adf_gen2_init_hw_csr_ops(&hw_data->csr_ops);
index 2bfcc67f8f3928b295acce37e200f956b8bfd6f9..306ebb71a408bd9330185acdba9d6facf54136fa 100644 (file)
@@ -13,7 +13,6 @@
 #define ADF_DH895XCCIOV_ETR_BAR 0
 #define ADF_DH895XCCIOV_ETR_MAX_BANKS 1
 #define ADF_DH895XCCIOV_PF2VF_OFFSET   0x200
-#define ADF_DH895XCCIOV_VINTMSK_OFFSET 0x208
 
 void adf_init_hw_data_dh895xcciov(struct adf_hw_device_data *hw_data);
 void adf_clean_hw_data_dh895xcciov(struct adf_hw_device_data *hw_data);
index 29999da716cc949ed0333d539b5c86b7c57edeb6..99d90f3ea2b7945dd064c99afd80725b42339ece 100644 (file)
@@ -141,17 +141,10 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
        }
 
        /* set dma identifier */
-       if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
-               if ((pci_set_dma_mask(pdev, DMA_BIT_MASK(32)))) {
-                       dev_err(&pdev->dev, "No usable DMA configuration\n");
-                       ret = -EFAULT;
-                       goto out_err_disable;
-               } else {
-                       pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
-               }
-
-       } else {
-               pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
+       ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(48));
+       if (ret) {
+               dev_err(&pdev->dev, "No usable DMA configuration\n");
+               goto out_err_disable;
        }
 
        if (pci_request_regions(pdev, ADF_DH895XCCVF_DEVICE_NAME)) {
@@ -218,6 +211,7 @@ static void adf_remove(struct pci_dev *pdev)
                pr_err("QAT: Driver removal failed\n");
                return;
        }
+       adf_flush_vf_wq(accel_dev);
        adf_dev_stop(accel_dev);
        adf_dev_shutdown(accel_dev);
        adf_cleanup_accel(accel_dev);
index 080955a1dd9c0a5471d0bbcc0490e6af09edfc7a..e2375d992308701f32574b2d5abf13ce496cbfe8 100644 (file)
@@ -187,9 +187,9 @@ static int virtcrypto_init_vqs(struct virtio_crypto *vi)
        if (ret)
                goto err_free;
 
-       get_online_cpus();
+       cpus_read_lock();
        virtcrypto_set_affinity(vi);
-       put_online_cpus();
+       cpus_read_unlock();
 
        return 0;
 
index 5fa6ae9dbc8b02b137496231b5f04175f0e5376c..44736cbd446ef747151aa1c807be368a97fbe318 100644 (file)
@@ -313,7 +313,7 @@ long dax_direct_access(struct dax_device *dax_dev, pgoff_t pgoff, long nr_pages,
                return -ENXIO;
 
        if (nr_pages < 0)
-               return nr_pages;
+               return -EINVAL;
 
        avail = dax_dev->ops->direct_access(dax_dev, pgoff, nr_pages,
                        kaddr, pfn);
index 61c21bd880a43dd4a6d185d4f960ee5e1b551d00..2949edb934549f1ee712f4e13dcb2d361ccd027f 100644 (file)
@@ -539,10 +539,18 @@ module_platform_driver(altr_edac_driver);
  * trigger testing are different for each memory.
  */
 
+#ifdef CONFIG_EDAC_ALTERA_OCRAM
 static const struct edac_device_prv_data ocramecc_data;
+#endif
+#ifdef CONFIG_EDAC_ALTERA_L2C
 static const struct edac_device_prv_data l2ecc_data;
+#endif
+#ifdef CONFIG_EDAC_ALTERA_OCRAM
 static const struct edac_device_prv_data a10_ocramecc_data;
+#endif
+#ifdef CONFIG_EDAC_ALTERA_L2C
 static const struct edac_device_prv_data a10_l2ecc_data;
+#endif
 
 static irqreturn_t altr_edac_device_handler(int irq, void *dev_id)
 {
@@ -569,9 +577,9 @@ static irqreturn_t altr_edac_device_handler(int irq, void *dev_id)
        return ret_value;
 }
 
-static ssize_t altr_edac_device_trig(struct file *file,
-                                    const char __user *user_buf,
-                                    size_t count, loff_t *ppos)
+static ssize_t __maybe_unused
+altr_edac_device_trig(struct file *file, const char __user *user_buf,
+                     size_t count, loff_t *ppos)
 
 {
        u32 *ptemp, i, error_mask;
@@ -640,27 +648,27 @@ static ssize_t altr_edac_device_trig(struct file *file,
        return count;
 }
 
-static const struct file_operations altr_edac_device_inject_fops = {
+static const struct file_operations altr_edac_device_inject_fops __maybe_unused = {
        .open = simple_open,
        .write = altr_edac_device_trig,
        .llseek = generic_file_llseek,
 };
 
-static ssize_t altr_edac_a10_device_trig(struct file *file,
-                                        const char __user *user_buf,
-                                        size_t count, loff_t *ppos);
+static ssize_t __maybe_unused
+altr_edac_a10_device_trig(struct file *file, const char __user *user_buf,
+                         size_t count, loff_t *ppos);
 
-static const struct file_operations altr_edac_a10_device_inject_fops = {
+static const struct file_operations altr_edac_a10_device_inject_fops __maybe_unused = {
        .open = simple_open,
        .write = altr_edac_a10_device_trig,
        .llseek = generic_file_llseek,
 };
 
-static ssize_t altr_edac_a10_device_trig2(struct file *file,
-                                         const char __user *user_buf,
-                                         size_t count, loff_t *ppos);
+static ssize_t __maybe_unused
+altr_edac_a10_device_trig2(struct file *file, const char __user *user_buf,
+                          size_t count, loff_t *ppos);
 
-static const struct file_operations altr_edac_a10_device_inject2_fops = {
+static const struct file_operations altr_edac_a10_device_inject2_fops __maybe_unused = {
        .open = simple_open,
        .write = altr_edac_a10_device_trig2,
        .llseek = generic_file_llseek,
@@ -1697,9 +1705,9 @@ MODULE_DEVICE_TABLE(of, altr_edac_a10_device_of_match);
  * Based on xgene_edac.c peripheral code.
  */
 
-static ssize_t altr_edac_a10_device_trig(struct file *file,
-                                        const char __user *user_buf,
-                                        size_t count, loff_t *ppos)
+static ssize_t __maybe_unused
+altr_edac_a10_device_trig(struct file *file, const char __user *user_buf,
+                         size_t count, loff_t *ppos)
 {
        struct edac_device_ctl_info *edac_dci = file->private_data;
        struct altr_edac_device_dev *drvdata = edac_dci->pvt_info;
@@ -1729,9 +1737,9 @@ static ssize_t altr_edac_a10_device_trig(struct file *file,
  * slightly. A few Arria10 peripherals can use this injection function.
  * Inject the error into the memory and then readback to trigger the IRQ.
  */
-static ssize_t altr_edac_a10_device_trig2(struct file *file,
-                                         const char __user *user_buf,
-                                         size_t count, loff_t *ppos)
+static ssize_t __maybe_unused
+altr_edac_a10_device_trig2(struct file *file, const char __user *user_buf,
+                          size_t count, loff_t *ppos)
 {
        struct edac_device_ctl_info *edac_dci = file->private_data;
        struct altr_edac_device_dev *drvdata = edac_dci->pvt_info;
index f0d8f60acee10c36004ae3c241bb71d89705ae8c..99b06a3e8fb1289c4d7bb2ebc30d72d11b069e4b 100644 (file)
@@ -571,8 +571,8 @@ EDAC_DCT_ATTR_SHOW(dbam0);
 EDAC_DCT_ATTR_SHOW(top_mem);
 EDAC_DCT_ATTR_SHOW(top_mem2);
 
-static ssize_t hole_show(struct device *dev, struct device_attribute *mattr,
-                        char *data)
+static ssize_t dram_hole_show(struct device *dev, struct device_attribute *mattr,
+                             char *data)
 {
        struct mem_ctl_info *mci = to_mci(dev);
 
@@ -593,7 +593,7 @@ static DEVICE_ATTR(dhar, S_IRUGO, dhar_show, NULL);
 static DEVICE_ATTR(dbam, S_IRUGO, dbam0_show, NULL);
 static DEVICE_ATTR(topmem, S_IRUGO, top_mem_show, NULL);
 static DEVICE_ATTR(topmem2, S_IRUGO, top_mem2_show, NULL);
-static DEVICE_ATTR(dram_hole, S_IRUGO, hole_show, NULL);
+static DEVICE_ATTR_RO(dram_hole);
 
 static struct attribute *dbg_attrs[] = {
        &dev_attr_dhar.attr,
@@ -802,16 +802,11 @@ static ssize_t inject_write_store(struct device *dev,
  * update NUM_INJ_ATTRS in case you add new members
  */
 
-static DEVICE_ATTR(inject_section, S_IRUGO | S_IWUSR,
-                  inject_section_show, inject_section_store);
-static DEVICE_ATTR(inject_word, S_IRUGO | S_IWUSR,
-                  inject_word_show, inject_word_store);
-static DEVICE_ATTR(inject_ecc_vector, S_IRUGO | S_IWUSR,
-                  inject_ecc_vector_show, inject_ecc_vector_store);
-static DEVICE_ATTR(inject_write, S_IWUSR,
-                  NULL, inject_write_store);
-static DEVICE_ATTR(inject_read,  S_IWUSR,
-                  NULL, inject_read_store);
+static DEVICE_ATTR_RW(inject_section);
+static DEVICE_ATTR_RW(inject_word);
+static DEVICE_ATTR_RW(inject_ecc_vector);
+static DEVICE_ATTR_WO(inject_write);
+static DEVICE_ATTR_WO(inject_read);
 
 static struct attribute *inj_attrs[] = {
        &dev_attr_inject_section.attr,
index f6d462d0be2d552de828f3ee2b85980aaf32ef2d..2c5975674723aedc70cbe76c53e56a1e6e010023 100644 (file)
@@ -166,6 +166,7 @@ const char * const edac_mem_types[] = {
        [MEM_DDR5]      = "Unbuffered-DDR5",
        [MEM_NVDIMM]    = "Non-volatile-RAM",
        [MEM_WIO2]      = "Wide-IO-2",
+       [MEM_HBM2]      = "High-bandwidth-memory-Gen2",
 };
 EXPORT_SYMBOL_GPL(edac_mem_types);
 
index 6ce0ed2ffaaf12ffb2e19911282e86c5f5365630..83345bfac246f13f25a9b04489af479e60efb3e5 100644 (file)
 #define I10NM_GET_DIMMMTR(m, i, j)     \
        readl((m)->mbase + ((m)->hbm_mc ? 0x80c : 0x2080c) + \
        (i) * (m)->chan_mmio_sz + (j) * 4)
-#define I10NM_GET_MCDDRTCFG(m, i, j)   \
+#define I10NM_GET_MCDDRTCFG(m, i)      \
        readl((m)->mbase + ((m)->hbm_mc ? 0x970 : 0x20970) + \
-       (i) * (m)->chan_mmio_sz + (j) * 4)
+       (i) * (m)->chan_mmio_sz)
 #define I10NM_GET_MCMTR(m, i)          \
        readl((m)->mbase + ((m)->hbm_mc ? 0xef8 : 0x20ef8) + \
        (i) * (m)->chan_mmio_sz)
 #define I10NM_GET_AMAP(m, i)           \
        readl((m)->mbase + ((m)->hbm_mc ? 0x814 : 0x20814) + \
        (i) * (m)->chan_mmio_sz)
+#define I10NM_GET_REG32(m, i, offset)  \
+       readl((m)->mbase + (i) * (m)->chan_mmio_sz + (offset))
+#define I10NM_GET_REG64(m, i, offset)  \
+       readq((m)->mbase + (i) * (m)->chan_mmio_sz + (offset))
+#define I10NM_SET_REG32(m, i, offset, v)       \
+       writel(v, (m)->mbase + (i) * (m)->chan_mmio_sz + (offset))
 
 #define I10NM_GET_SCK_MMIO_BASE(reg)   (GET_BITFIELD(reg, 0, 28) << 23)
 #define I10NM_GET_IMC_MMIO_OFFSET(reg) (GET_BITFIELD(reg, 0, 10) << 12)
 #define I10NM_SAD_ENABLE(reg)          GET_BITFIELD(reg, 0, 0)
 #define I10NM_SAD_NM_CACHEABLE(reg)    GET_BITFIELD(reg, 5, 5)
 
+#define RETRY_RD_ERR_LOG_UC            BIT(1)
+#define RETRY_RD_ERR_LOG_NOOVER                BIT(14)
+#define RETRY_RD_ERR_LOG_EN            BIT(15)
+#define RETRY_RD_ERR_LOG_NOOVER_UC     (BIT(14) | BIT(1))
+#define RETRY_RD_ERR_LOG_OVER_UC_V     (BIT(2) | BIT(1) | BIT(0))
+
 static struct list_head *i10nm_edac_list;
 
+static struct res_config *res_cfg;
+static int retry_rd_err_log;
+
+static u32 offsets_scrub_icx[]  = {0x22c60, 0x22c54, 0x22c5c, 0x22c58, 0x22c28, 0x20ed8};
+static u32 offsets_scrub_spr[]  = {0x22c60, 0x22c54, 0x22f08, 0x22c58, 0x22c28, 0x20ed8};
+static u32 offsets_demand_icx[] = {0x22e54, 0x22e60, 0x22e64, 0x22e58, 0x22e5c, 0x20ee0};
+static u32 offsets_demand_spr[] = {0x22e54, 0x22e60, 0x22f10, 0x22e58, 0x22e5c, 0x20ee0};
+
+static void __enable_retry_rd_err_log(struct skx_imc *imc, int chan, bool enable)
+{
+       u32 s, d;
+
+       if (!imc->mbase)
+               return;
+
+       s = I10NM_GET_REG32(imc, chan, res_cfg->offsets_scrub[0]);
+       d = I10NM_GET_REG32(imc, chan, res_cfg->offsets_demand[0]);
+
+       if (enable) {
+               /* Save default configurations */
+               imc->chan[chan].retry_rd_err_log_s = s;
+               imc->chan[chan].retry_rd_err_log_d = d;
+
+               s &= ~RETRY_RD_ERR_LOG_NOOVER_UC;
+               s |=  RETRY_RD_ERR_LOG_EN;
+               d &= ~RETRY_RD_ERR_LOG_NOOVER_UC;
+               d |=  RETRY_RD_ERR_LOG_EN;
+       } else {
+               /* Restore default configurations */
+               if (imc->chan[chan].retry_rd_err_log_s & RETRY_RD_ERR_LOG_UC)
+                       s |=  RETRY_RD_ERR_LOG_UC;
+               if (imc->chan[chan].retry_rd_err_log_s & RETRY_RD_ERR_LOG_NOOVER)
+                       s |=  RETRY_RD_ERR_LOG_NOOVER;
+               if (!(imc->chan[chan].retry_rd_err_log_s & RETRY_RD_ERR_LOG_EN))
+                       s &= ~RETRY_RD_ERR_LOG_EN;
+               if (imc->chan[chan].retry_rd_err_log_d & RETRY_RD_ERR_LOG_UC)
+                       d |=  RETRY_RD_ERR_LOG_UC;
+               if (imc->chan[chan].retry_rd_err_log_d & RETRY_RD_ERR_LOG_NOOVER)
+                       d |=  RETRY_RD_ERR_LOG_NOOVER;
+               if (!(imc->chan[chan].retry_rd_err_log_d & RETRY_RD_ERR_LOG_EN))
+                       d &= ~RETRY_RD_ERR_LOG_EN;
+       }
+
+       I10NM_SET_REG32(imc, chan, res_cfg->offsets_scrub[0], s);
+       I10NM_SET_REG32(imc, chan, res_cfg->offsets_demand[0], d);
+}
+
+static void enable_retry_rd_err_log(bool enable)
+{
+       struct skx_dev *d;
+       int i, j;
+
+       edac_dbg(2, "\n");
+
+       list_for_each_entry(d, i10nm_edac_list, list)
+               for (i = 0; i < I10NM_NUM_IMC; i++)
+                       for (j = 0; j < I10NM_NUM_CHANNELS; j++)
+                               __enable_retry_rd_err_log(&d->imc[i], j, enable);
+}
+
+static void show_retry_rd_err_log(struct decoded_addr *res, char *msg,
+                                 int len, bool scrub_err)
+{
+       struct skx_imc *imc = &res->dev->imc[res->imc];
+       u32 log0, log1, log2, log3, log4;
+       u32 corr0, corr1, corr2, corr3;
+       u64 log2a, log5;
+       u32 *offsets;
+       int n;
+
+       if (!imc->mbase)
+               return;
+
+       offsets = scrub_err ? res_cfg->offsets_scrub : res_cfg->offsets_demand;
+
+       log0 = I10NM_GET_REG32(imc, res->channel, offsets[0]);
+       log1 = I10NM_GET_REG32(imc, res->channel, offsets[1]);
+       log3 = I10NM_GET_REG32(imc, res->channel, offsets[3]);
+       log4 = I10NM_GET_REG32(imc, res->channel, offsets[4]);
+       log5 = I10NM_GET_REG64(imc, res->channel, offsets[5]);
+
+       if (res_cfg->type == SPR) {
+               log2a = I10NM_GET_REG64(imc, res->channel, offsets[2]);
+               n = snprintf(msg, len, " retry_rd_err_log[%.8x %.8x %.16llx %.8x %.8x %.16llx]",
+                            log0, log1, log2a, log3, log4, log5);
+       } else {
+               log2 = I10NM_GET_REG32(imc, res->channel, offsets[2]);
+               n = snprintf(msg, len, " retry_rd_err_log[%.8x %.8x %.8x %.8x %.8x %.16llx]",
+                            log0, log1, log2, log3, log4, log5);
+       }
+
+       corr0 = I10NM_GET_REG32(imc, res->channel, 0x22c18);
+       corr1 = I10NM_GET_REG32(imc, res->channel, 0x22c1c);
+       corr2 = I10NM_GET_REG32(imc, res->channel, 0x22c20);
+       corr3 = I10NM_GET_REG32(imc, res->channel, 0x22c24);
+
+       if (len - n > 0)
+               snprintf(msg + n, len - n,
+                        " correrrcnt[%.4x %.4x %.4x %.4x %.4x %.4x %.4x %.4x]",
+                        corr0 & 0xffff, corr0 >> 16,
+                        corr1 & 0xffff, corr1 >> 16,
+                        corr2 & 0xffff, corr2 >> 16,
+                        corr3 & 0xffff, corr3 >> 16);
+
+       /* Clear status bits */
+       if (retry_rd_err_log == 2 && (log0 & RETRY_RD_ERR_LOG_OVER_UC_V)) {
+               log0 &= ~RETRY_RD_ERR_LOG_OVER_UC_V;
+               I10NM_SET_REG32(imc, res->channel, offsets[0], log0);
+       }
+}
+
 static struct pci_dev *pci_get_dev_wrapper(int dom, unsigned int bus,
                                           unsigned int dev, unsigned int fun)
 {
@@ -263,6 +386,8 @@ static struct res_config i10nm_cfg0 = {
        .ddr_chan_mmio_sz       = 0x4000,
        .sad_all_devfn          = PCI_DEVFN(29, 0),
        .sad_all_offset         = 0x108,
+       .offsets_scrub          = offsets_scrub_icx,
+       .offsets_demand         = offsets_demand_icx,
 };
 
 static struct res_config i10nm_cfg1 = {
@@ -272,6 +397,8 @@ static struct res_config i10nm_cfg1 = {
        .ddr_chan_mmio_sz       = 0x4000,
        .sad_all_devfn          = PCI_DEVFN(29, 0),
        .sad_all_offset         = 0x108,
+       .offsets_scrub          = offsets_scrub_icx,
+       .offsets_demand         = offsets_demand_icx,
 };
 
 static struct res_config spr_cfg = {
@@ -283,6 +410,8 @@ static struct res_config spr_cfg = {
        .support_ddr5           = true,
        .sad_all_devfn          = PCI_DEVFN(10, 0),
        .sad_all_offset         = 0x300,
+       .offsets_scrub          = offsets_scrub_spr,
+       .offsets_demand         = offsets_demand_spr,
 };
 
 static const struct x86_cpu_id i10nm_cpuids[] = {
@@ -321,10 +450,10 @@ static int i10nm_get_dimm_config(struct mem_ctl_info *mci,
 
                ndimms = 0;
                amap = I10NM_GET_AMAP(imc, i);
+               mcddrtcfg = I10NM_GET_MCDDRTCFG(imc, i);
                for (j = 0; j < imc->num_dimms; j++) {
                        dimm = edac_get_dimm(mci, i, j, 0);
                        mtr = I10NM_GET_DIMMMTR(imc, i, j);
-                       mcddrtcfg = I10NM_GET_MCDDRTCFG(imc, i, j);
                        edac_dbg(1, "dimmmtr 0x%x mcddrtcfg 0x%x (mc%d ch%d dimm%d)\n",
                                 mtr, mcddrtcfg, imc->mc, i, j);
 
@@ -422,6 +551,7 @@ static int __init i10nm_init(void)
                return -ENODEV;
 
        cfg = (struct res_config *)id->driver_data;
+       res_cfg = cfg;
 
        rc = skx_get_hi_lo(0x09a2, off, &tolm, &tohm);
        if (rc)
@@ -486,6 +616,12 @@ static int __init i10nm_init(void)
        mce_register_decode_chain(&i10nm_mce_dec);
        setup_i10nm_debug();
 
+       if (retry_rd_err_log && res_cfg->offsets_scrub && res_cfg->offsets_demand) {
+               skx_set_decode(NULL, show_retry_rd_err_log);
+               if (retry_rd_err_log == 2)
+                       enable_retry_rd_err_log(true);
+       }
+
        i10nm_printk(KERN_INFO, "%s\n", I10NM_REVISION);
 
        return 0;
@@ -497,6 +633,13 @@ fail:
 static void __exit i10nm_exit(void)
 {
        edac_dbg(2, "\n");
+
+       if (retry_rd_err_log && res_cfg->offsets_scrub && res_cfg->offsets_demand) {
+               skx_set_decode(NULL, NULL);
+               if (retry_rd_err_log == 2)
+                       enable_retry_rd_err_log(false);
+       }
+
        teardown_i10nm_debug();
        mce_unregister_decode_chain(&i10nm_mce_dec);
        skx_adxl_put();
@@ -506,5 +649,8 @@ static void __exit i10nm_exit(void)
 module_init(i10nm_init);
 module_exit(i10nm_exit);
 
+module_param(retry_rd_err_log, int, 0444);
+MODULE_PARM_DESC(retry_rd_err_log, "retry_rd_err_log: 0=off(default), 1=bios(Linux doesn't reset any control bits, but just reports values.), 2=linux(Linux tries to take control and resets mode bits, clear valid/UC bits after reading.)");
+
 MODULE_LICENSE("GPL v2");
 MODULE_DESCRIPTION("MC Driver for Intel 10nm server processors");
index 27d56920b4690ff578675bec58193bc7e3e25a84..67dbf4c3127163f96a3c0086e43696e3077b69fd 100644 (file)
@@ -1246,6 +1246,9 @@ static int __init mce_amd_init(void)
            c->x86_vendor != X86_VENDOR_HYGON)
                return -ENODEV;
 
+       if (cpu_feature_enabled(X86_FEATURE_HYPERVISOR))
+               return -ENODEV;
+
        if (boot_cpu_has(X86_FEATURE_SMCA)) {
                xec_mask = 0x3f;
                goto out;
index 4dbd46575bfb47e6e2ff07aae2481307e415a852..1abc020d49ab64a6654ab7b9b80f5140e17532d1 100644 (file)
@@ -230,7 +230,8 @@ static int skx_get_dimm_config(struct mem_ctl_info *mci, struct res_config *cfg)
 #define SKX_ILV_TARGET(tgt)    ((tgt) & 7)
 
 static void skx_show_retry_rd_err_log(struct decoded_addr *res,
-                                     char *msg, int len)
+                                     char *msg, int len,
+                                     bool scrub_err)
 {
        u32 log0, log1, log2, log3, log4;
        u32 corr0, corr1, corr2, corr3;
index 5e83f59bef8aaf6d7ad122793dbf1a981d79a52a..19c17c5198c5f9d8bf9f97feae2d4c3b5669115a 100644 (file)
@@ -345,7 +345,10 @@ int skx_get_dimm_info(u32 mtr, u32 mcmtr, u32 amap, struct dimm_info *dimm,
        rows = numrow(mtr);
        cols = imc->hbm_mc ? 6 : numcol(mtr);
 
-       if (cfg->support_ddr5 && ((amap & 0x8) || imc->hbm_mc)) {
+       if (imc->hbm_mc) {
+               banks = 32;
+               mtype = MEM_HBM2;
+       } else if (cfg->support_ddr5 && (amap & 0x8)) {
                banks = 32;
                mtype = MEM_DDR5;
        } else {
@@ -529,6 +532,7 @@ static void skx_mce_output_error(struct mem_ctl_info *mci,
        bool ripv = GET_BITFIELD(m->mcgstatus, 0, 0);
        bool overflow = GET_BITFIELD(m->status, 62, 62);
        bool uncorrected_error = GET_BITFIELD(m->status, 61, 61);
+       bool scrub_err = false;
        bool recoverable;
        int len;
        u32 core_err_cnt = GET_BITFIELD(m->status, 38, 52);
@@ -580,6 +584,7 @@ static void skx_mce_output_error(struct mem_ctl_info *mci,
                        break;
                case 4:
                        optype = "memory scrubbing error";
+                       scrub_err = true;
                        break;
                default:
                        optype = "reserved";
@@ -602,7 +607,7 @@ static void skx_mce_output_error(struct mem_ctl_info *mci,
        }
 
        if (skx_show_retry_rd_err_log)
-               skx_show_retry_rd_err_log(res, skx_msg + len, MSG_SIZE - len);
+               skx_show_retry_rd_err_log(res, skx_msg + len, MSG_SIZE - len, scrub_err);
 
        edac_dbg(0, "%s\n", skx_msg);
 
index 01f67e731766978583f3189215a1c44d0392322f..03ac067a80b9f779fe2ef726672f222ba4403be7 100644 (file)
@@ -80,6 +80,8 @@ struct skx_dev {
                struct skx_channel {
                        struct pci_dev  *cdev;
                        struct pci_dev  *edev;
+                       u32 retry_rd_err_log_s;
+                       u32 retry_rd_err_log_d;
                        struct skx_dimm {
                                u8 close_pg;
                                u8 bank_xor_enable;
@@ -150,12 +152,15 @@ struct res_config {
        /* SAD device number and function number */
        unsigned int sad_all_devfn;
        int sad_all_offset;
+       /* Offsets of retry_rd_err_log registers */
+       u32 *offsets_scrub;
+       u32 *offsets_demand;
 };
 
 typedef int (*get_dimm_config_f)(struct mem_ctl_info *mci,
                                 struct res_config *cfg);
 typedef bool (*skx_decode_f)(struct decoded_addr *res);
-typedef void (*skx_show_retry_log_f)(struct decoded_addr *res, char *msg, int len);
+typedef void (*skx_show_retry_log_f)(struct decoded_addr *res, char *msg, int len, bool scrub_err);
 
 int __init skx_adxl_get(void);
 void __exit skx_adxl_put(void);
index 7bf0a7acae5e689f8f3470ead6b67dc54ac08f2c..2363fee9211c98d597a6863cec5771bcd8058261 100644 (file)
@@ -35,15 +35,48 @@ efi_status_t check_platform_features(void)
 }
 
 /*
- * Although relocatable kernels can fix up the misalignment with respect to
- * MIN_KIMG_ALIGN, the resulting virtual text addresses are subtly out of
- * sync with those recorded in the vmlinux when kaslr is disabled but the
- * image required relocation anyway. Therefore retain 2M alignment unless
- * KASLR is in use.
+ * Distro versions of GRUB may ignore the BSS allocation entirely (i.e., fail
+ * to provide space, and fail to zero it). Check for this condition by double
+ * checking that the first and the last byte of the image are covered by the
+ * same EFI memory map entry.
  */
-static u64 min_kimg_align(void)
+static bool check_image_region(u64 base, u64 size)
 {
-       return efi_nokaslr ? MIN_KIMG_ALIGN : EFI_KIMG_ALIGN;
+       unsigned long map_size, desc_size, buff_size;
+       efi_memory_desc_t *memory_map;
+       struct efi_boot_memmap map;
+       efi_status_t status;
+       bool ret = false;
+       int map_offset;
+
+       map.map =       &memory_map;
+       map.map_size =  &map_size;
+       map.desc_size = &desc_size;
+       map.desc_ver =  NULL;
+       map.key_ptr =   NULL;
+       map.buff_size = &buff_size;
+
+       status = efi_get_memory_map(&map);
+       if (status != EFI_SUCCESS)
+               return false;
+
+       for (map_offset = 0; map_offset < map_size; map_offset += desc_size) {
+               efi_memory_desc_t *md = (void *)memory_map + map_offset;
+               u64 end = md->phys_addr + md->num_pages * EFI_PAGE_SIZE;
+
+               /*
+                * Find the region that covers base, and return whether
+                * it covers base+size bytes.
+                */
+               if (base >= md->phys_addr && base < end) {
+                       ret = (base + size) <= end;
+                       break;
+               }
+       }
+
+       efi_bs_call(free_pool, memory_map);
+
+       return ret;
 }
 
 efi_status_t handle_kernel_image(unsigned long *image_addr,
@@ -56,6 +89,16 @@ efi_status_t handle_kernel_image(unsigned long *image_addr,
        unsigned long kernel_size, kernel_memsize = 0;
        u32 phys_seed = 0;
 
+       /*
+        * Although relocatable kernels can fix up the misalignment with
+        * respect to MIN_KIMG_ALIGN, the resulting virtual text addresses are
+        * subtly out of sync with those recorded in the vmlinux when kaslr is
+        * disabled but the image required relocation anyway. Therefore retain
+        * 2M alignment if KASLR was explicitly disabled, even if it was not
+        * going to be activated to begin with.
+        */
+       u64 min_kimg_align = efi_nokaslr ? MIN_KIMG_ALIGN : EFI_KIMG_ALIGN;
+
        if (IS_ENABLED(CONFIG_RANDOMIZE_BASE)) {
                if (!efi_nokaslr) {
                        status = efi_get_random_bytes(sizeof(phys_seed),
@@ -76,6 +119,10 @@ efi_status_t handle_kernel_image(unsigned long *image_addr,
        if (image->image_base != _text)
                efi_err("FIRMWARE BUG: efi_loaded_image_t::image_base has bogus value\n");
 
+       if (!IS_ALIGNED((u64)_text, EFI_KIMG_ALIGN))
+               efi_err("FIRMWARE BUG: kernel image not aligned on %ldk boundary\n",
+                       EFI_KIMG_ALIGN >> 10);
+
        kernel_size = _edata - _text;
        kernel_memsize = kernel_size + (_end - _edata);
        *reserve_size = kernel_memsize;
@@ -85,14 +132,18 @@ efi_status_t handle_kernel_image(unsigned long *image_addr,
                 * If KASLR is enabled, and we have some randomness available,
                 * locate the kernel at a randomized offset in physical memory.
                 */
-               status = efi_random_alloc(*reserve_size, min_kimg_align(),
+               status = efi_random_alloc(*reserve_size, min_kimg_align,
                                          reserve_addr, phys_seed);
+               if (status != EFI_SUCCESS)
+                       efi_warn("efi_random_alloc() failed: 0x%lx\n", status);
        } else {
                status = EFI_OUT_OF_RESOURCES;
        }
 
        if (status != EFI_SUCCESS) {
-               if (IS_ALIGNED((u64)_text, min_kimg_align())) {
+               if (!check_image_region((u64)_text, kernel_memsize)) {
+                       efi_err("FIRMWARE BUG: Image BSS overlaps adjacent EFI memory region\n");
+               } else if (IS_ALIGNED((u64)_text, min_kimg_align)) {
                        /*
                         * Just execute from wherever we were loaded by the
                         * UEFI PE/COFF loader if the alignment is suitable.
@@ -103,7 +154,7 @@ efi_status_t handle_kernel_image(unsigned long *image_addr,
                }
 
                status = efi_allocate_pages_aligned(*reserve_size, reserve_addr,
-                                                   ULONG_MAX, min_kimg_align());
+                                                   ULONG_MAX, min_kimg_align);
 
                if (status != EFI_SUCCESS) {
                        efi_err("Failed to relocate kernel\n");
index a408df474d83763526168234a9ca1577b287d1c6..724155b9e10dcf84a44a836efed036574daaa728 100644 (file)
@@ -30,6 +30,8 @@ static unsigned long get_entry_num_slots(efi_memory_desc_t *md,
 
        region_end = min(md->phys_addr + md->num_pages * EFI_PAGE_SIZE - 1,
                         (u64)ULONG_MAX);
+       if (region_end < size)
+               return 0;
 
        first_slot = round_up(md->phys_addr, align);
        last_slot = round_down(region_end - size + 1, align);
index 9f937b125ab076ac0bcc2cd95b3250d918e7ae0e..60ccf3e90d7de541b87fda5c157624751dbc0980 100644 (file)
@@ -9,6 +9,7 @@
 #include <linux/init.h>
 #include <linux/arm-smccc.h>
 #include <linux/kernel.h>
+#include <linux/platform_device.h>
 #include <asm/archrandom.h>
 
 static u32 smccc_version = ARM_SMCCC_VERSION_1_0;
@@ -42,3 +43,19 @@ u32 arm_smccc_get_version(void)
        return smccc_version;
 }
 EXPORT_SYMBOL_GPL(arm_smccc_get_version);
+
+static int __init smccc_devices_init(void)
+{
+       struct platform_device *pdev;
+
+       if (smccc_trng_available) {
+               pdev = platform_device_register_simple("smccc_trng", -1,
+                                                      NULL, 0);
+               if (IS_ERR(pdev))
+                       pr_err("smccc_trng: could not register device: %ld\n",
+                              PTR_ERR(pdev));
+       }
+
+       return 0;
+}
+device_initcall(smccc_devices_init);
index 4137e848f6a24f31141940b69144b1a1da303da7..a9ce3b20d371377d47962cf5a0fa64290d313158 100644 (file)
@@ -1040,7 +1040,7 @@ void amdgpu_acpi_detect(void)
  */
 bool amdgpu_acpi_is_s0ix_supported(struct amdgpu_device *adev)
 {
-#if IS_ENABLED(CONFIG_AMD_PMC) && IS_ENABLED(CONFIG_PM_SLEEP)
+#if IS_ENABLED(CONFIG_AMD_PMC) && IS_ENABLED(CONFIG_SUSPEND)
        if (acpi_gbl_FADT.flags & ACPI_FADT_LOW_POWER_S0) {
                if (adev->flags & AMD_IS_APU)
                        return pm_suspend_target_state == PM_SUSPEND_TO_IDLE;
index 3b5d13189073419fec176ec58fb004bdb92fcc8d..8f53837d4d3ee8f60acc5147e5a8e45ec02776de 100644 (file)
@@ -468,6 +468,46 @@ bool amdgpu_atomfirmware_dynamic_boot_config_supported(struct amdgpu_device *ade
        return (fw_cap & ATOM_FIRMWARE_CAP_DYNAMIC_BOOT_CFG_ENABLE) ? true : false;
 }
 
+/*
+ * Helper function to query RAS EEPROM address
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Return true if vbios supports ras rom address reporting
+ */
+bool amdgpu_atomfirmware_ras_rom_addr(struct amdgpu_device *adev, uint8_t* i2c_address)
+{
+       struct amdgpu_mode_info *mode_info = &adev->mode_info;
+       int index;
+       u16 data_offset, size;
+       union firmware_info *firmware_info;
+       u8 frev, crev;
+
+       if (i2c_address == NULL)
+               return false;
+
+       *i2c_address = 0;
+
+       index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
+                       firmwareinfo);
+
+       if (amdgpu_atom_parse_data_header(adev->mode_info.atom_context,
+                               index, &size, &frev, &crev, &data_offset)) {
+               /* support firmware_info 3.4 + */
+               if ((frev == 3 && crev >=4) || (frev > 3)) {
+                       firmware_info = (union firmware_info *)
+                               (mode_info->atom_context->bios + data_offset);
+                       *i2c_address = firmware_info->v34.ras_rom_i2c_slave_addr;
+               }
+       }
+
+       if (*i2c_address != 0)
+               return true;
+
+       return false;
+}
+
+
 union smu_info {
        struct atom_smu_info_v3_1 v31;
 };
index 1bbbb195015d4babcd72577cbdade027971b5ee3..751248b253de8b10b22ebaa99f245e21f876e828 100644 (file)
@@ -36,6 +36,7 @@ int amdgpu_atomfirmware_get_clock_info(struct amdgpu_device *adev);
 int amdgpu_atomfirmware_get_gfx_info(struct amdgpu_device *adev);
 bool amdgpu_atomfirmware_mem_ecc_supported(struct amdgpu_device *adev);
 bool amdgpu_atomfirmware_sram_ecc_supported(struct amdgpu_device *adev);
+bool amdgpu_atomfirmware_ras_rom_addr(struct amdgpu_device *adev, uint8_t* i2c_address);
 bool amdgpu_atomfirmware_mem_training_supported(struct amdgpu_device *adev);
 bool amdgpu_atomfirmware_dynamic_boot_config_supported(struct amdgpu_device *adev);
 int amdgpu_atomfirmware_get_fw_reserved_fb_size(struct amdgpu_device *adev);
index f3fd5ec710b63cd9d5d9fc80d0a8af0f646a4581..f944ed858f3e7f02c003c476eff136da705d89e1 100644 (file)
@@ -2777,12 +2777,11 @@ static void amdgpu_device_delay_enable_gfx_off(struct work_struct *work)
        struct amdgpu_device *adev =
                container_of(work, struct amdgpu_device, gfx.gfx_off_delay_work.work);
 
-       mutex_lock(&adev->gfx.gfx_off_mutex);
-       if (!adev->gfx.gfx_off_state && !adev->gfx.gfx_off_req_count) {
-               if (!amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_GFX, true))
-                       adev->gfx.gfx_off_state = true;
-       }
-       mutex_unlock(&adev->gfx.gfx_off_mutex);
+       WARN_ON_ONCE(adev->gfx.gfx_off_state);
+       WARN_ON_ONCE(adev->gfx.gfx_off_req_count);
+
+       if (!amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_GFX, true))
+               adev->gfx.gfx_off_state = true;
 }
 
 /**
index 43e7b61d1c5c304af78d65873453d4776bd4a21a..ada7bc19118acde04fc5cfdca463472b6cef702b 100644 (file)
@@ -299,6 +299,9 @@ int amdgpu_discovery_reg_base_init(struct amdgpu_device *adev)
                                  ip->major, ip->minor,
                                  ip->revision);
 
+                       if (le16_to_cpu(ip->hw_id) == VCN_HWID)
+                               adev->vcn.num_vcn_inst++;
+
                        for (k = 0; k < num_base_address; k++) {
                                /*
                                 * convert the endianness of base addresses in place,
@@ -385,7 +388,7 @@ void amdgpu_discovery_harvest_ip(struct amdgpu_device *adev)
 {
        struct binary_header *bhdr;
        struct harvest_table *harvest_info;
-       int i;
+       int i, vcn_harvest_count = 0;
 
        bhdr = (struct binary_header *)adev->mman.discovery_bin;
        harvest_info = (struct harvest_table *)(adev->mman.discovery_bin +
@@ -397,8 +400,7 @@ void amdgpu_discovery_harvest_ip(struct amdgpu_device *adev)
 
                switch (le32_to_cpu(harvest_info->list[i].hw_id)) {
                case VCN_HWID:
-                       adev->harvest_ip_mask |= AMD_HARVEST_IP_VCN_MASK;
-                       adev->harvest_ip_mask |= AMD_HARVEST_IP_JPEG_MASK;
+                       vcn_harvest_count++;
                        break;
                case DMU_HWID:
                        adev->harvest_ip_mask |= AMD_HARVEST_IP_DMU_MASK;
@@ -407,6 +409,10 @@ void amdgpu_discovery_harvest_ip(struct amdgpu_device *adev)
                        break;
                }
        }
+       if (vcn_harvest_count == adev->vcn.num_vcn_inst) {
+               adev->harvest_ip_mask |= AMD_HARVEST_IP_VCN_MASK;
+               adev->harvest_ip_mask |= AMD_HARVEST_IP_JPEG_MASK;
+       }
 }
 
 int amdgpu_discovery_get_gfx_info(struct amdgpu_device *adev)
index 5ed8381ae0f5405d689700f73e2e2ef9e3d58dee..971c5b8e75dc52dbdcb0fb9a896cbf754e50ba8f 100644 (file)
@@ -1571,6 +1571,8 @@ static int amdgpu_pmops_runtime_suspend(struct device *dev)
                pci_ignore_hotplug(pdev);
                pci_set_power_state(pdev, PCI_D3cold);
                drm_dev->switch_power_state = DRM_SWITCH_POWER_DYNAMIC_OFF;
+       } else if (amdgpu_device_supports_boco(drm_dev)) {
+               /* nothing to do */
        } else if (amdgpu_device_supports_baco(drm_dev)) {
                amdgpu_device_baco_enter(drm_dev);
        }
index a0be0772c8b3c3bff7e95aa144152ad2a03c97c1..b4ced45301becd73b99ca17085491609e9a85153 100644 (file)
@@ -563,24 +563,38 @@ void amdgpu_gfx_off_ctrl(struct amdgpu_device *adev, bool enable)
 
        mutex_lock(&adev->gfx.gfx_off_mutex);
 
-       if (!enable)
-               adev->gfx.gfx_off_req_count++;
-       else if (adev->gfx.gfx_off_req_count > 0)
+       if (enable) {
+               /* If the count is already 0, it means there's an imbalance bug somewhere.
+                * Note that the bug may be in a different caller than the one which triggers the
+                * WARN_ON_ONCE.
+                */
+               if (WARN_ON_ONCE(adev->gfx.gfx_off_req_count == 0))
+                       goto unlock;
+
                adev->gfx.gfx_off_req_count--;
 
-       if (enable && !adev->gfx.gfx_off_state && !adev->gfx.gfx_off_req_count) {
-               schedule_delayed_work(&adev->gfx.gfx_off_delay_work, GFX_OFF_DELAY_ENABLE);
-       } else if (!enable && adev->gfx.gfx_off_state) {
-               if (!amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_GFX, false)) {
-                       adev->gfx.gfx_off_state = false;
+               if (adev->gfx.gfx_off_req_count == 0 && !adev->gfx.gfx_off_state)
+                       schedule_delayed_work(&adev->gfx.gfx_off_delay_work, GFX_OFF_DELAY_ENABLE);
+       } else {
+               if (adev->gfx.gfx_off_req_count == 0) {
+                       cancel_delayed_work_sync(&adev->gfx.gfx_off_delay_work);
+
+                       if (adev->gfx.gfx_off_state &&
+                           !amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_GFX, false)) {
+                               adev->gfx.gfx_off_state = false;
 
-                       if (adev->gfx.funcs->init_spm_golden) {
-                               dev_dbg(adev->dev, "GFXOFF is disabled, re-init SPM golden settings\n");
-                               amdgpu_gfx_init_spm_golden(adev);
+                               if (adev->gfx.funcs->init_spm_golden) {
+                                       dev_dbg(adev->dev,
+                                               "GFXOFF is disabled, re-init SPM golden settings\n");
+                                       amdgpu_gfx_init_spm_golden(adev);
+                               }
                        }
                }
+
+               adev->gfx.gfx_off_req_count++;
        }
 
+unlock:
        mutex_unlock(&adev->gfx.gfx_off_mutex);
 }
 
index 795fa7445abec91503c2c3995d3e9e28e54822df..92c8e6e7f346bc9061bf1baa7db80ffd819fc733 100644 (file)
@@ -920,11 +920,6 @@ int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain,
                        return -EINVAL;
        }
 
-       /* This assumes only APU display buffers are pinned with (VRAM|GTT).
-        * See function amdgpu_display_supported_domains()
-        */
-       domain = amdgpu_bo_get_preferred_pin_domain(adev, domain);
-
        if (bo->tbo.pin_count) {
                uint32_t mem_type = bo->tbo.resource->mem_type;
                uint32_t mem_flags = bo->tbo.resource->placement;
@@ -949,6 +944,11 @@ int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain,
                return 0;
        }
 
+       /* This assumes only APU display buffers are pinned with (VRAM|GTT).
+        * See function amdgpu_display_supported_domains()
+        */
+       domain = amdgpu_bo_get_preferred_pin_domain(adev, domain);
+
        if (bo->tbo.base.import_attach)
                dma_buf_pin(bo->tbo.base.import_attach);
 
index f40c871da0c623d584953d23b292b5772f91432f..38222de921d159d19dea6b0d9e09b5624ee9e8f7 100644 (file)
@@ -26,6 +26,7 @@
 #include "amdgpu_ras.h"
 #include <linux/bits.h>
 #include "atom.h"
+#include "amdgpu_atomfirmware.h"
 
 #define EEPROM_I2C_TARGET_ADDR_VEGA20          0xA0
 #define EEPROM_I2C_TARGET_ADDR_ARCTURUS                0xA8
@@ -96,6 +97,9 @@ static bool __get_eeprom_i2c_addr(struct amdgpu_device *adev,
        if (!i2c_addr)
                return false;
 
+       if (amdgpu_atomfirmware_ras_rom_addr(adev, (uint8_t*)i2c_addr))
+               return true;
+
        switch (adev->asic_type) {
        case CHIP_VEGA20:
                *i2c_addr = EEPROM_I2C_TARGET_ADDR_VEGA20;
index c7b364e4a287fdc396d3dd0c7b1a41792eb7abf4..e883731c3f8ffb9927431b554d37dcb885c5e424 100644 (file)
@@ -3026,6 +3026,14 @@ svm_range_get_attr(struct kfd_process *p, uint64_t start, uint64_t size,
        pr_debug("svms 0x%p [0x%llx 0x%llx] nattr 0x%x\n", &p->svms, start,
                 start + size - 1, nattr);
 
+       /* Flush pending deferred work to avoid racing with deferred actions from
+        * previous memory map changes (e.g. munmap). Concurrent memory map changes
+        * can still race with get_attr because we don't hold the mmap lock. But that
+        * would be a race condition in the application anyway, and undefined
+        * behaviour is acceptable in that case.
+        */
+       flush_work(&p->svms.deferred_list_work);
+
        mmap_read_lock(mm);
        if (!svm_range_is_valid(mm, start, size)) {
                pr_debug("invalid range\n");
index c0ae73b0691c03d196fff1584cb475cb5302d306..afa96c8f721b79b7760d7e695ceea9818d2a0d6e 100644 (file)
@@ -9605,7 +9605,12 @@ static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
                } else if (amdgpu_freesync_vid_mode && aconnector &&
                           is_freesync_video_mode(&new_crtc_state->mode,
                                                  aconnector)) {
-                       set_freesync_fixed_config(dm_new_crtc_state);
+                       struct drm_display_mode *high_mode;
+
+                       high_mode = get_highest_refresh_rate_mode(aconnector, false);
+                       if (!drm_mode_equal(&new_crtc_state->mode, high_mode)) {
+                               set_freesync_fixed_config(dm_new_crtc_state);
+                       }
                }
 
                ret = dm_atomic_get_state(state, &dm_state);
index 40f617bbb86f4be2d5cea49ee96ead6dd796c5f5..4aba0e8c84f814e19996874db91f8c2238b7476a 100644 (file)
@@ -584,7 +584,7 @@ static void amdgpu_dm_irq_schedule_work(struct amdgpu_device *adev,
                handler_data = container_of(handler_list->next, struct amdgpu_dm_irq_handler_data, list);
 
                /*allocate a new amdgpu_dm_irq_handler_data*/
-               handler_data_add = kzalloc(sizeof(*handler_data), GFP_KERNEL);
+               handler_data_add = kzalloc(sizeof(*handler_data), GFP_ATOMIC);
                if (!handler_data_add) {
                        DRM_ERROR("DM_IRQ: failed to allocate irq handler!\n");
                        return;
index 605e297b7a591933268c7771a9fd9b7a2f7bf019..a30283fa5173ce3479e825e2383c5fc4c1ad80ba 100644 (file)
@@ -1530,6 +1530,12 @@ void dc_z10_restore(struct dc *dc)
        if (dc->hwss.z10_restore)
                dc->hwss.z10_restore(dc);
 }
+
+void dc_z10_save_init(struct dc *dc)
+{
+       if (dc->hwss.z10_save_init)
+               dc->hwss.z10_save_init(dc);
+}
 #endif
 /*
  * Applies given context to HW and copy it into current context.
index f2b39ec35c89867f542bf89931a6b9387cfd10fa..cde8ed2560b357ae33af143f3c787ed62e784486 100644 (file)
@@ -47,6 +47,9 @@ int dc_setup_system_context(struct dc *dc, struct dc_phy_addr_space_config *pa_c
                 */
                memcpy(&dc->vm_pa_config, pa_config, sizeof(struct dc_phy_addr_space_config));
                dc->vm_pa_config.valid = true;
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+               dc_z10_save_init(dc);
+#endif
        }
 
        return num_vmids;
index af7b60108e9d3d44ec9f46b8305ed8bda7dbb6cc..21d78289b0488dafea7b9f6f65a808d69d97371f 100644 (file)
@@ -1338,6 +1338,7 @@ void dc_hardware_release(struct dc *dc);
 bool dc_set_psr_allow_active(struct dc *dc, bool enable);
 #if defined(CONFIG_DRM_AMD_DC_DCN)
 void dc_z10_restore(struct dc *dc);
+void dc_z10_save_init(struct dc *dc);
 #endif
 
 bool dc_enable_dmub_notifications(struct dc *dc);
index 253654d605c2302f3d3b4c2ed15eb96efaa37ebd..28e15ebf2f43128367cdcdd79c6559dfa5fa4759 100644 (file)
@@ -1788,7 +1788,6 @@ static bool dcn30_split_stream_for_mpc_or_odm(
                }
                pri_pipe->next_odm_pipe = sec_pipe;
                sec_pipe->prev_odm_pipe = pri_pipe;
-               ASSERT(sec_pipe->top_pipe == NULL);
 
                if (!sec_pipe->top_pipe)
                        sec_pipe->stream_res.opp = pool->opps[pipe_idx];
index 9776d17378184f838571880d145bd076f07cfd30..912285fdce18e807cde212a91125f15f7998a8ab 100644 (file)
@@ -1622,106 +1622,12 @@ static void dcn301_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *b
        dml_init_instance(&dc->dml, &dcn3_01_soc, &dcn3_01_ip, DML_PROJECT_DCN30);
 }
 
-static void calculate_wm_set_for_vlevel(
-               int vlevel,
-               struct wm_range_table_entry *table_entry,
-               struct dcn_watermarks *wm_set,
-               struct display_mode_lib *dml,
-               display_e2e_pipe_params_st *pipes,
-               int pipe_cnt)
-{
-       double dram_clock_change_latency_cached = dml->soc.dram_clock_change_latency_us;
-
-       ASSERT(vlevel < dml->soc.num_states);
-       /* only pipe 0 is read for voltage and dcf/soc clocks */
-       pipes[0].clks_cfg.voltage = vlevel;
-       pipes[0].clks_cfg.dcfclk_mhz = dml->soc.clock_limits[vlevel].dcfclk_mhz;
-       pipes[0].clks_cfg.socclk_mhz = dml->soc.clock_limits[vlevel].socclk_mhz;
-
-       dml->soc.dram_clock_change_latency_us = table_entry->pstate_latency_us;
-       dml->soc.sr_exit_time_us = table_entry->sr_exit_time_us;
-       dml->soc.sr_enter_plus_exit_time_us = table_entry->sr_enter_plus_exit_time_us;
-
-       wm_set->urgent_ns = get_wm_urgent(dml, pipes, pipe_cnt) * 1000;
-       wm_set->cstate_pstate.cstate_enter_plus_exit_ns = get_wm_stutter_enter_exit(dml, pipes, pipe_cnt) * 1000;
-       wm_set->cstate_pstate.cstate_exit_ns = get_wm_stutter_exit(dml, pipes, pipe_cnt) * 1000;
-       wm_set->cstate_pstate.pstate_change_ns = get_wm_dram_clock_change(dml, pipes, pipe_cnt) * 1000;
-       wm_set->pte_meta_urgent_ns = get_wm_memory_trip(dml, pipes, pipe_cnt) * 1000;
-       wm_set->frac_urg_bw_nom = get_fraction_of_urgent_bandwidth(dml, pipes, pipe_cnt) * 1000;
-       wm_set->frac_urg_bw_flip = get_fraction_of_urgent_bandwidth_imm_flip(dml, pipes, pipe_cnt) * 1000;
-       wm_set->urgent_latency_ns = get_urgent_latency(dml, pipes, pipe_cnt) * 1000;
-       dml->soc.dram_clock_change_latency_us = dram_clock_change_latency_cached;
-
-}
-
-static void dcn301_calculate_wm_and_dlg(
-               struct dc *dc, struct dc_state *context,
-               display_e2e_pipe_params_st *pipes,
-               int pipe_cnt,
-               int vlevel_req)
-{
-       int i, pipe_idx;
-       int vlevel, vlevel_max;
-       struct wm_range_table_entry *table_entry;
-       struct clk_bw_params *bw_params = dc->clk_mgr->bw_params;
-
-       ASSERT(bw_params);
-
-       vlevel_max = bw_params->clk_table.num_entries - 1;
-
-       /* WM Set D */
-       table_entry = &bw_params->wm_table.entries[WM_D];
-       if (table_entry->wm_type == WM_TYPE_RETRAINING)
-               vlevel = 0;
-       else
-               vlevel = vlevel_max;
-       calculate_wm_set_for_vlevel(vlevel, table_entry, &context->bw_ctx.bw.dcn.watermarks.d,
-                                               &context->bw_ctx.dml, pipes, pipe_cnt);
-       /* WM Set C */
-       table_entry = &bw_params->wm_table.entries[WM_C];
-       vlevel = min(max(vlevel_req, 2), vlevel_max);
-       calculate_wm_set_for_vlevel(vlevel, table_entry, &context->bw_ctx.bw.dcn.watermarks.c,
-                                               &context->bw_ctx.dml, pipes, pipe_cnt);
-       /* WM Set B */
-       table_entry = &bw_params->wm_table.entries[WM_B];
-       vlevel = min(max(vlevel_req, 1), vlevel_max);
-       calculate_wm_set_for_vlevel(vlevel, table_entry, &context->bw_ctx.bw.dcn.watermarks.b,
-                                               &context->bw_ctx.dml, pipes, pipe_cnt);
-
-       /* WM Set A */
-       table_entry = &bw_params->wm_table.entries[WM_A];
-       vlevel = min(vlevel_req, vlevel_max);
-       calculate_wm_set_for_vlevel(vlevel, table_entry, &context->bw_ctx.bw.dcn.watermarks.a,
-                                               &context->bw_ctx.dml, pipes, pipe_cnt);
-
-       for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) {
-               if (!context->res_ctx.pipe_ctx[i].stream)
-                       continue;
-
-               pipes[pipe_idx].clks_cfg.dispclk_mhz = get_dispclk_calculated(&context->bw_ctx.dml, pipes, pipe_cnt);
-               pipes[pipe_idx].clks_cfg.dppclk_mhz = get_dppclk_calculated(&context->bw_ctx.dml, pipes, pipe_cnt, pipe_idx);
-
-               if (dc->config.forced_clocks) {
-                       pipes[pipe_idx].clks_cfg.dispclk_mhz = context->bw_ctx.dml.soc.clock_limits[0].dispclk_mhz;
-                       pipes[pipe_idx].clks_cfg.dppclk_mhz = context->bw_ctx.dml.soc.clock_limits[0].dppclk_mhz;
-               }
-               if (dc->debug.min_disp_clk_khz > pipes[pipe_idx].clks_cfg.dispclk_mhz * 1000)
-                       pipes[pipe_idx].clks_cfg.dispclk_mhz = dc->debug.min_disp_clk_khz / 1000.0;
-               if (dc->debug.min_dpp_clk_khz > pipes[pipe_idx].clks_cfg.dppclk_mhz * 1000)
-                       pipes[pipe_idx].clks_cfg.dppclk_mhz = dc->debug.min_dpp_clk_khz / 1000.0;
-
-               pipe_idx++;
-       }
-
-       dcn20_calculate_dlg_params(dc, context, pipes, pipe_cnt, vlevel);
-}
-
 static struct resource_funcs dcn301_res_pool_funcs = {
        .destroy = dcn301_destroy_resource_pool,
        .link_enc_create = dcn301_link_encoder_create,
        .panel_cntl_create = dcn301_panel_cntl_create,
        .validate_bandwidth = dcn30_validate_bandwidth,
-       .calculate_wm_and_dlg = dcn301_calculate_wm_and_dlg,
+       .calculate_wm_and_dlg = dcn30_calculate_wm_and_dlg,
        .update_soc_for_wm_a = dcn30_update_soc_for_wm_a,
        .populate_dml_pipes = dcn30_populate_dml_pipes_from_context,
        .acquire_idle_pipe_for_layer = dcn20_acquire_idle_pipe_for_layer,
index 6ac6faf0c533babff7fc13421f059815affe2d75..8a2119d8ca0de8dd79f1d4d2b7520579a78b3f7f 100644 (file)
@@ -404,6 +404,18 @@ void dcn31_update_info_frame(struct pipe_ctx *pipe_ctx)
                        &pipe_ctx->stream_res.encoder_info_frame);
        }
 }
+void dcn31_z10_save_init(struct dc *dc)
+{
+       union dmub_rb_cmd cmd;
+
+       memset(&cmd, 0, sizeof(cmd));
+       cmd.dcn_restore.header.type = DMUB_CMD__IDLE_OPT;
+       cmd.dcn_restore.header.sub_type = DMUB_CMD__IDLE_OPT_DCN_SAVE_INIT;
+
+       dc_dmub_srv_cmd_queue(dc->ctx->dmub_srv, &cmd);
+       dc_dmub_srv_cmd_execute(dc->ctx->dmub_srv);
+       dc_dmub_srv_wait_idle(dc->ctx->dmub_srv);
+}
 
 void dcn31_z10_restore(struct dc *dc)
 {
index 40dfebe78fdd1284c33784aecd666aaba94360e9..140435e4f7fffa942c78bbd89fa916d7633a3e0f 100644 (file)
@@ -44,6 +44,7 @@ void dcn31_enable_power_gating_plane(
 void dcn31_update_info_frame(struct pipe_ctx *pipe_ctx);
 
 void dcn31_z10_restore(struct dc *dc);
+void dcn31_z10_save_init(struct dc *dc);
 
 void dcn31_hubp_pg_control(struct dce_hwseq *hws, unsigned int hubp_inst, bool power_on);
 int dcn31_init_sys_ctx(struct dce_hwseq *hws, struct dc *dc, struct dc_phy_addr_space_config *pa_config);
index aaf2dbd095fe1db82d00486fbd4c6af1401423b5..b30d923471cb75dce36883547002759e3e945c82 100644 (file)
@@ -97,6 +97,7 @@ static const struct hw_sequencer_funcs dcn31_funcs = {
        .set_abm_immediate_disable = dcn21_set_abm_immediate_disable,
        .set_pipe = dcn21_set_pipe,
        .z10_restore = dcn31_z10_restore,
+       .z10_save_init = dcn31_z10_save_init,
        .is_abm_supported = dcn31_is_abm_supported,
        .set_disp_pattern_generator = dcn30_set_disp_pattern_generator,
        .update_visual_confirm_color = dcn20_update_visual_confirm_color,
index 5ab008e62b8251475975b3b7d553d28ba6221ec4..ad5f2adcc40d5d1f00ad76513c27610414c726a4 100644 (file)
@@ -237,6 +237,7 @@ struct hw_sequencer_funcs {
                        int width, int height, int offset);
 
        void (*z10_restore)(struct dc *dc);
+       void (*z10_save_init)(struct dc *dc);
 
        void (*update_visual_confirm_color)(struct dc *dc,
                        struct pipe_ctx *pipe_ctx,
index 7c4734f905d921ba105b9dd657a33b76d818bfc4..7fafb8d6c1da1984e0b87fb2c1a332b4e658ca5e 100644 (file)
@@ -856,6 +856,11 @@ enum dmub_cmd_idle_opt_type {
         * DCN hardware restore.
         */
        DMUB_CMD__IDLE_OPT_DCN_RESTORE = 0,
+
+       /**
+        * DCN hardware save.
+        */
+       DMUB_CMD__IDLE_OPT_DCN_SAVE_INIT = 1
 };
 
 /**
index 3811e58dd8578908a91a9ae01609292b1464e319..44955458fe38193db56852f25ff6acd723ff245d 100644 (file)
@@ -590,7 +590,7 @@ struct atom_firmware_info_v3_4 {
        uint8_t  board_i2c_feature_id;            // enum of atom_board_i2c_feature_id_def
        uint8_t  board_i2c_feature_gpio_id;       // i2c id find in gpio_lut data table gpio_id
        uint8_t  board_i2c_feature_slave_addr;
-       uint8_t  reserved3;
+       uint8_t  ras_rom_i2c_slave_addr;
        uint16_t bootup_mvddq_mv;
        uint16_t bootup_mvpp_mv;
        uint32_t zfbstartaddrin16mb;
index 5627de7342468ee79d935bea0d6eb25848a4f934..c5e26d619bf0aab9038dab164d8ddb579eef25ac 100644 (file)
@@ -111,7 +111,9 @@ typedef struct {
   uint32_t InWhisperMode        : 1;
   uint32_t spare0               : 1;
   uint32_t ZstateStatus         : 4;
-  uint32_t spare1               :12;
+  uint32_t spare1               : 4;
+  uint32_t DstateFun            : 4;
+  uint32_t DstateDev            : 4;
   // MP1_EXT_SCRATCH2
   uint32_t P2JobHandler         :24;
   uint32_t RsmuPmiP2FinishedCnt : 8;
index 25979106fd255e1e6b26d35c128358a3b7545a3a..02e8c6e5448de69a94463049455663bdb71bf65d 100644 (file)
@@ -5127,6 +5127,13 @@ static int vega10_get_power_profile_mode(struct pp_hwmgr *hwmgr, char *buf)
        return size;
 }
 
+static bool vega10_get_power_profile_mode_quirks(struct pp_hwmgr *hwmgr)
+{
+       struct amdgpu_device *adev = hwmgr->adev;
+
+       return (adev->pdev->device == 0x6860);
+}
+
 static int vega10_set_power_profile_mode(struct pp_hwmgr *hwmgr, long *input, uint32_t size)
 {
        struct vega10_hwmgr *data = hwmgr->backend;
@@ -5163,9 +5170,15 @@ static int vega10_set_power_profile_mode(struct pp_hwmgr *hwmgr, long *input, ui
        }
 
 out:
-       smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetWorkloadMask,
+       if (vega10_get_power_profile_mode_quirks(hwmgr))
+               smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetWorkloadMask,
+                                               1 << power_profile_mode,
+                                               NULL);
+       else
+               smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetWorkloadMask,
                                                (!power_profile_mode) ? 0 : 1 << (power_profile_mode - 1),
                                                NULL);
+
        hwmgr->power_profile_mode = power_profile_mode;
 
        return 0;
index c751f717a0daff49620bcd850906bb362df1f665..d92dd2c7448e3e73d04509e1ae8c15c47236a569 100644 (file)
@@ -353,8 +353,7 @@ static void sienna_cichlid_check_bxco_support(struct smu_context *smu)
        struct amdgpu_device *adev = smu->adev;
        uint32_t val;
 
-       if (powerplay_table->platform_caps & SMU_11_0_7_PP_PLATFORM_CAP_BACO ||
-           powerplay_table->platform_caps & SMU_11_0_7_PP_PLATFORM_CAP_MACO) {
+       if (powerplay_table->platform_caps & SMU_11_0_7_PP_PLATFORM_CAP_BACO) {
                val = RREG32_SOC15(NBIO, 0, mmRCC_BIF_STRAP0);
                smu_baco->platform_support =
                        (val & RCC_BIF_STRAP0__STRAP_PX_CAPABLE_MASK) ? true :
index 18681dc458da2b9fe96335e098dcb7b6681a4154..bcaaa086fc2fbfef34eed9d8794bb350eb463790 100644 (file)
@@ -256,7 +256,7 @@ static int vangogh_tables_init(struct smu_context *smu)
        return 0;
 
 err3_out:
-       kfree(smu_table->clocks_table);
+       kfree(smu_table->watermarks_table);
 err2_out:
        kfree(smu_table->gpu_metrics_table);
 err1_out:
index d29907955ff79de5cb4fb885a6d1180bfe81daf1..5d82891c32223e4da909ab7efa0b5a0c527cf74a 100644 (file)
@@ -855,8 +855,6 @@ static int compat_drm_wait_vblank(struct file *file, unsigned int cmd,
        req.request.sequence = req32.request.sequence;
        req.request.signal = req32.request.signal;
        err = drm_ioctl_kernel(file, drm_wait_vblank_ioctl, &req, DRM_UNLOCKED);
-       if (err)
-               return err;
 
        req32.reply.type = req.reply.type;
        req32.reply.sequence = req.reply.sequence;
@@ -865,7 +863,7 @@ static int compat_drm_wait_vblank(struct file *file, unsigned int cmd,
        if (copy_to_user(argp, &req32, sizeof(req32)))
                return -EFAULT;
 
-       return 0;
+       return err;
 }
 
 #if defined(CONFIG_X86)
index be716b56e8e080f11b830688d74c81542939a567..00dade49665b8e358c9f3d09e7706a7b6f1187b9 100644 (file)
@@ -2463,6 +2463,15 @@ static void intel_ddi_power_up_lanes(struct intel_encoder *encoder,
        }
 }
 
+/* Splitter enable for eDP MSO is limited to certain pipes. */
+static u8 intel_ddi_splitter_pipe_mask(struct drm_i915_private *i915)
+{
+       if (IS_ALDERLAKE_P(i915))
+               return BIT(PIPE_A) | BIT(PIPE_B);
+       else
+               return BIT(PIPE_A);
+}
+
 static void intel_ddi_mso_get_config(struct intel_encoder *encoder,
                                     struct intel_crtc_state *pipe_config)
 {
@@ -2480,8 +2489,7 @@ static void intel_ddi_mso_get_config(struct intel_encoder *encoder,
        if (!pipe_config->splitter.enable)
                return;
 
-       /* Splitter enable is supported for pipe A only. */
-       if (drm_WARN_ON(&i915->drm, pipe != PIPE_A)) {
+       if (drm_WARN_ON(&i915->drm, !(intel_ddi_splitter_pipe_mask(i915) & BIT(pipe)))) {
                pipe_config->splitter.enable = false;
                return;
        }
@@ -2513,10 +2521,6 @@ static void intel_ddi_mso_configure(const struct intel_crtc_state *crtc_state)
                return;
 
        if (crtc_state->splitter.enable) {
-               /* Splitter enable is supported for pipe A only. */
-               if (drm_WARN_ON(&i915->drm, pipe != PIPE_A))
-                       return;
-
                dss1 |= SPLITTER_ENABLE;
                dss1 |= OVERLAP_PIXELS(crtc_state->splitter.pixel_overlap);
                if (crtc_state->splitter.link_count == 2)
@@ -4743,12 +4747,8 @@ void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port)
 
                dig_port->hpd_pulse = intel_dp_hpd_pulse;
 
-               /* Splitter enable for eDP MSO is limited to certain pipes. */
-               if (dig_port->dp.mso_link_count) {
-                       encoder->pipe_mask = BIT(PIPE_A);
-                       if (IS_ALDERLAKE_P(dev_priv))
-                               encoder->pipe_mask |= BIT(PIPE_B);
-               }
+               if (dig_port->dp.mso_link_count)
+                       encoder->pipe_mask = intel_ddi_splitter_pipe_mask(dev_priv);
        }
 
        /* In theory we don't need the encoder->type check, but leave it just in
index 2d5d21740c25b6bcd85e9fa3589ff936ad351183..0a8a2395c8acacdc34d069c1d189b0b65dd9b482 100644 (file)
@@ -5746,16 +5746,18 @@ static void bdw_set_pipemisc(const struct intel_crtc_state *crtc_state)
 
        switch (crtc_state->pipe_bpp) {
        case 18:
-               val |= PIPEMISC_DITHER_6_BPC;
+               val |= PIPEMISC_6_BPC;
                break;
        case 24:
-               val |= PIPEMISC_DITHER_8_BPC;
+               val |= PIPEMISC_8_BPC;
                break;
        case 30:
-               val |= PIPEMISC_DITHER_10_BPC;
+               val |= PIPEMISC_10_BPC;
                break;
        case 36:
-               val |= PIPEMISC_DITHER_12_BPC;
+               /* Port output 12BPC defined for ADLP+ */
+               if (DISPLAY_VER(dev_priv) > 12)
+                       val |= PIPEMISC_12_BPC_ADLP;
                break;
        default:
                MISSING_CASE(crtc_state->pipe_bpp);
@@ -5808,15 +5810,27 @@ int bdw_get_pipemisc_bpp(struct intel_crtc *crtc)
 
        tmp = intel_de_read(dev_priv, PIPEMISC(crtc->pipe));
 
-       switch (tmp & PIPEMISC_DITHER_BPC_MASK) {
-       case PIPEMISC_DITHER_6_BPC:
+       switch (tmp & PIPEMISC_BPC_MASK) {
+       case PIPEMISC_6_BPC:
                return 18;
-       case PIPEMISC_DITHER_8_BPC:
+       case PIPEMISC_8_BPC:
                return 24;
-       case PIPEMISC_DITHER_10_BPC:
+       case PIPEMISC_10_BPC:
                return 30;
-       case PIPEMISC_DITHER_12_BPC:
-               return 36;
+       /*
+        * PORT OUTPUT 12 BPC defined for ADLP+.
+        *
+        * TODO:
+        * For previous platforms with DSI interface, bits 5:7
+        * are used for storing pipe_bpp irrespective of dithering.
+        * Since the value of 12 BPC is not defined for these bits
+        * on older platforms, need to find a workaround for 12 BPC
+        * MIPI DSI HW readout.
+        */
+       case PIPEMISC_12_BPC_ADLP:
+               if (DISPLAY_VER(dev_priv) > 12)
+                       return 36;
+               fallthrough;
        default:
                MISSING_CASE(tmp);
                return 0;
index 4298ae684d7d9b36c580ce1241eccd0abc011249..86b7ac7b65ecda4cd2bca020abc3fed22e434a25 100644 (file)
@@ -6387,13 +6387,13 @@ void intel_display_power_suspend_late(struct drm_i915_private *i915)
        if (DISPLAY_VER(i915) >= 11 || IS_GEMINILAKE(i915) ||
            IS_BROXTON(i915)) {
                bxt_enable_dc9(i915);
-               /* Tweaked Wa_14010685332:icp,jsp,mcc */
-               if (INTEL_PCH_TYPE(i915) >= PCH_ICP && INTEL_PCH_TYPE(i915) <= PCH_MCC)
-                       intel_de_rmw(i915, SOUTH_CHICKEN1,
-                                    SBCLK_RUN_REFCLK_DIS, SBCLK_RUN_REFCLK_DIS);
        } else if (IS_HASWELL(i915) || IS_BROADWELL(i915)) {
                hsw_enable_pc8(i915);
        }
+
+       /* Tweaked Wa_14010685332:cnp,icp,jsp,mcc,tgp,adp */
+       if (INTEL_PCH_TYPE(i915) >= PCH_CNP && INTEL_PCH_TYPE(i915) < PCH_DG1)
+               intel_de_rmw(i915, SOUTH_CHICKEN1, SBCLK_RUN_REFCLK_DIS, SBCLK_RUN_REFCLK_DIS);
 }
 
 void intel_display_power_resume_early(struct drm_i915_private *i915)
@@ -6402,13 +6402,13 @@ void intel_display_power_resume_early(struct drm_i915_private *i915)
            IS_BROXTON(i915)) {
                gen9_sanitize_dc_state(i915);
                bxt_disable_dc9(i915);
-               /* Tweaked Wa_14010685332:icp,jsp,mcc */
-               if (INTEL_PCH_TYPE(i915) >= PCH_ICP && INTEL_PCH_TYPE(i915) <= PCH_MCC)
-                       intel_de_rmw(i915, SOUTH_CHICKEN1, SBCLK_RUN_REFCLK_DIS, 0);
-
        } else if (IS_HASWELL(i915) || IS_BROADWELL(i915)) {
                hsw_disable_pc8(i915);
        }
+
+       /* Tweaked Wa_14010685332:cnp,icp,jsp,mcc,tgp,adp */
+       if (INTEL_PCH_TYPE(i915) >= PCH_CNP && INTEL_PCH_TYPE(i915) < PCH_DG1)
+               intel_de_rmw(i915, SOUTH_CHICKEN1, SBCLK_RUN_REFCLK_DIS, 0);
 }
 
 void intel_display_power_suspend(struct drm_i915_private *i915)
index 6cc03b9e4321aa5376d8fa4de41e86ed507542b1..862c1df69cc2a7e76f68f12b54d0739115cf9f28 100644 (file)
@@ -3850,23 +3850,18 @@ static void intel_dp_check_device_service_irq(struct intel_dp *intel_dp)
 
 static void intel_dp_check_link_service_irq(struct intel_dp *intel_dp)
 {
-       struct drm_i915_private *i915 = dp_to_i915(intel_dp);
        u8 val;
 
        if (intel_dp->dpcd[DP_DPCD_REV] < 0x11)
                return;
 
        if (drm_dp_dpcd_readb(&intel_dp->aux,
-                             DP_LINK_SERVICE_IRQ_VECTOR_ESI0, &val) != 1 || !val) {
-               drm_dbg_kms(&i915->drm, "Error in reading link service irq vector\n");
+                             DP_LINK_SERVICE_IRQ_VECTOR_ESI0, &val) != 1 || !val)
                return;
-       }
 
        if (drm_dp_dpcd_writeb(&intel_dp->aux,
-                              DP_LINK_SERVICE_IRQ_VECTOR_ESI0, val) != 1) {
-               drm_dbg_kms(&i915->drm, "Error in writing link service irq vector\n");
+                              DP_LINK_SERVICE_IRQ_VECTOR_ESI0, val) != 1)
                return;
-       }
 
        if (val & HDMI_LINK_STATUS_CHANGED)
                intel_dp_handle_hdmi_link_status_change(intel_dp);
index 08bceae40aa8ddd9e5ae588cf1ebd15c76f92ad0..053a3c2f726776b5e50aaaf72c40b31630fe5674 100644 (file)
@@ -206,7 +206,6 @@ int intel_dp_init_lttpr_and_dprx_caps(struct intel_dp *intel_dp)
 
        return lttpr_count;
 }
-EXPORT_SYMBOL(intel_dp_init_lttpr_and_dprx_caps);
 
 static u8 dp_voltage_max(u8 preemph)
 {
index c4a126c8caef871ab14b2c78ad19d044636c66cc..1257f4f11e66fc5e62a52a838201abf69b325e1d 100644 (file)
@@ -127,6 +127,15 @@ static void intel_timeline_fini(struct rcu_head *rcu)
 
        i915_vma_put(timeline->hwsp_ggtt);
        i915_active_fini(&timeline->active);
+
+       /*
+        * A small race exists between intel_gt_retire_requests_timeout and
+        * intel_timeline_exit which could result in the syncmap not getting
+        * free'd. Rather than work to hard to seal this race, simply cleanup
+        * the syncmap on fini.
+        */
+       i915_syncmap_free(&timeline->sync);
+
        kfree(timeline);
 }
 
index 06024d321a1a5c3fe660754071ffd543865bf8dc..cde0a477fb497ef02c6d8a31f24be496fb00c722 100644 (file)
@@ -3149,6 +3149,7 @@ static int init_bdw_mmio_info(struct intel_gvt *gvt)
        MMIO_DFH(_MMIO(0xb100), D_BDW, F_CMD_ACCESS, NULL, NULL);
        MMIO_DFH(_MMIO(0xb10c), D_BDW, F_CMD_ACCESS, NULL, NULL);
        MMIO_D(_MMIO(0xb110), D_BDW);
+       MMIO_D(GEN9_SCRATCH_LNCF1, D_BDW_PLUS);
 
        MMIO_F(_MMIO(0x24d0), 48, F_CMD_ACCESS | F_CMD_WRITE_PATCH, 0, 0,
                D_BDW_PLUS, NULL, force_nonpriv_write);
index b8ac8076546173b2a428ff68c393e77291d721f2..f776c470914d28f7f6adfc1a15ec306ec6444c95 100644 (file)
@@ -105,6 +105,8 @@ static struct engine_mmio gen9_engine_mmio_list[] __cacheline_aligned = {
        {RCS0, COMMON_SLICE_CHICKEN2, 0xffff, true}, /* 0x7014 */
        {RCS0, GEN9_CS_DEBUG_MODE1, 0xffff, false}, /* 0x20ec */
        {RCS0, GEN8_L3SQCREG4, 0, false}, /* 0xb118 */
+       {RCS0, GEN9_SCRATCH1, 0, false}, /* 0xb11c */
+       {RCS0, GEN9_SCRATCH_LNCF1, 0, false}, /* 0xb008 */
        {RCS0, GEN7_HALF_SLICE_CHICKEN1, 0xffff, true}, /* 0xe100 */
        {RCS0, HALF_SLICE_CHICKEN2, 0xffff, true}, /* 0xe180 */
        {RCS0, HALF_SLICE_CHICKEN3, 0xffff, true}, /* 0xe184 */
index 35c97c39f125b4c6cf16a8c9f74322d61937dbd3..966664610c8cdc473e0c33c2c4855a73236752ac 100644 (file)
@@ -727,9 +727,18 @@ static void err_print_gt(struct drm_i915_error_state_buf *m,
        if (GRAPHICS_VER(m->i915) >= 12) {
                int i;
 
-               for (i = 0; i < GEN12_SFC_DONE_MAX; i++)
+               for (i = 0; i < GEN12_SFC_DONE_MAX; i++) {
+                       /*
+                        * SFC_DONE resides in the VD forcewake domain, so it
+                        * only exists if the corresponding VCS engine is
+                        * present.
+                        */
+                       if (!HAS_ENGINE(gt->_gt, _VCS(i * 2)))
+                               continue;
+
                        err_printf(m, "  SFC_DONE[%d]: 0x%08x\n", i,
                                   gt->sfc_done[i]);
+               }
 
                err_printf(m, "  GAM_DONE: 0x%08x\n", gt->gam_done);
        }
@@ -1581,6 +1590,14 @@ static void gt_record_regs(struct intel_gt_coredump *gt)
 
        if (GRAPHICS_VER(i915) >= 12) {
                for (i = 0; i < GEN12_SFC_DONE_MAX; i++) {
+                       /*
+                        * SFC_DONE resides in the VD forcewake domain, so it
+                        * only exists if the corresponding VCS engine is
+                        * present.
+                        */
+                       if (!HAS_ENGINE(gt->_gt, _VCS(i * 2)))
+                               continue;
+
                        gt->sfc_done[i] =
                                intel_uncore_read(uncore, GEN12_SFC_DONE(i));
                }
index c03943198089d547a2a8ee2d87b7fa6a1981832b..c3816f5c690011f862c630e4e5abc6783f0d3ee1 100644 (file)
@@ -3064,24 +3064,6 @@ static void valleyview_irq_reset(struct drm_i915_private *dev_priv)
        spin_unlock_irq(&dev_priv->irq_lock);
 }
 
-static void cnp_display_clock_wa(struct drm_i915_private *dev_priv)
-{
-       struct intel_uncore *uncore = &dev_priv->uncore;
-
-       /*
-        * Wa_14010685332:cnp/cmp,tgp,adp
-        * TODO: Clarify which platforms this applies to
-        * TODO: Figure out if this workaround can be applied in the s0ix suspend/resume handlers as
-        * on earlier platforms and whether the workaround is also needed for runtime suspend/resume
-        */
-       if (INTEL_PCH_TYPE(dev_priv) == PCH_CNP ||
-           (INTEL_PCH_TYPE(dev_priv) >= PCH_TGP && INTEL_PCH_TYPE(dev_priv) < PCH_DG1)) {
-               intel_uncore_rmw(uncore, SOUTH_CHICKEN1, SBCLK_RUN_REFCLK_DIS,
-                                SBCLK_RUN_REFCLK_DIS);
-               intel_uncore_rmw(uncore, SOUTH_CHICKEN1, SBCLK_RUN_REFCLK_DIS, 0);
-       }
-}
-
 static void gen8_display_irq_reset(struct drm_i915_private *dev_priv)
 {
        struct intel_uncore *uncore = &dev_priv->uncore;
@@ -3115,7 +3097,6 @@ static void gen8_irq_reset(struct drm_i915_private *dev_priv)
        if (HAS_PCH_SPLIT(dev_priv))
                ibx_irq_reset(dev_priv);
 
-       cnp_display_clock_wa(dev_priv);
 }
 
 static void gen11_display_irq_reset(struct drm_i915_private *dev_priv)
@@ -3159,8 +3140,6 @@ static void gen11_display_irq_reset(struct drm_i915_private *dev_priv)
 
        if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
                GEN3_IRQ_RESET(uncore, SDE);
-
-       cnp_display_clock_wa(dev_priv);
 }
 
 static void gen11_irq_reset(struct drm_i915_private *dev_priv)
index 41186c1f771e5d2e96af54c070393a410034e2b8..476bb3b9ad11afb14e2b4de0ea522718d03af65e 100644 (file)
@@ -6163,11 +6163,17 @@ enum {
 #define   PIPEMISC_HDR_MODE_PRECISION  (1 << 23) /* icl+ */
 #define   PIPEMISC_OUTPUT_COLORSPACE_YUV  (1 << 11)
 #define   PIPEMISC_PIXEL_ROUNDING_TRUNC        REG_BIT(8) /* tgl+ */
-#define   PIPEMISC_DITHER_BPC_MASK     (7 << 5)
-#define   PIPEMISC_DITHER_8_BPC                (0 << 5)
-#define   PIPEMISC_DITHER_10_BPC       (1 << 5)
-#define   PIPEMISC_DITHER_6_BPC                (2 << 5)
-#define   PIPEMISC_DITHER_12_BPC       (3 << 5)
+/*
+ * For Display < 13, Bits 5-7 of PIPE MISC represent DITHER BPC with
+ * valid values of: 6, 8, 10 BPC.
+ * ADLP+, the bits 5-7 represent PORT OUTPUT BPC with valid values of:
+ * 6, 8, 10, 12 BPC.
+ */
+#define   PIPEMISC_BPC_MASK            (7 << 5)
+#define   PIPEMISC_8_BPC               (0 << 5)
+#define   PIPEMISC_10_BPC              (1 << 5)
+#define   PIPEMISC_6_BPC               (2 << 5)
+#define   PIPEMISC_12_BPC_ADLP         (4 << 5) /* adlp+ */
 #define   PIPEMISC_DITHER_ENABLE       (1 << 4)
 #define   PIPEMISC_DITHER_TYPE_MASK    (3 << 2)
 #define   PIPEMISC_DITHER_TYPE_SP      (0 << 2)
index 8710f55d25798fc8a6a54153122807a42c5a21c6..bd1f9f0366d375345fc5da26f2b770178de42022 100644 (file)
@@ -683,7 +683,7 @@ static void ipu_plane_atomic_update(struct drm_plane *plane,
                break;
        }
 
-       ipu_dmfc_config_wait4eot(ipu_plane->dmfc, drm_rect_width(dst));
+       ipu_dmfc_config_wait4eot(ipu_plane->dmfc, ALIGN(drm_rect_width(dst), 8));
 
        width = ipu_src_rect_width(new_state);
        height = drm_rect_height(&new_state->src) >> 16;
index 6f4c80bbc0eb6730cc50453e4f672fb3a7a83bf1..473f5bb5cbadce716fb3590dcaaa24c47ebca0db 100644 (file)
@@ -133,6 +133,8 @@ static int mtk_disp_color_probe(struct platform_device *pdev)
 
 static int mtk_disp_color_remove(struct platform_device *pdev)
 {
+       component_del(&pdev->dev, &mtk_disp_color_component_ops);
+
        return 0;
 }
 
index fa9d79963cd342560acb35d4bda07f507ae80b93..5326989d520615775c0612378ccfaa86a1ef1ac2 100644 (file)
@@ -423,6 +423,8 @@ static int mtk_disp_ovl_probe(struct platform_device *pdev)
 
 static int mtk_disp_ovl_remove(struct platform_device *pdev)
 {
+       component_del(&pdev->dev, &mtk_disp_ovl_component_ops);
+
        return 0;
 }
 
index bced555648b01385566733f5ee68313d2bd7f9bd..e94738fe4db897c497812a0b347cba82adae7bc3 100644 (file)
@@ -605,11 +605,15 @@ static int mtk_dpi_bridge_atomic_check(struct drm_bridge *bridge,
                                       struct drm_crtc_state *crtc_state,
                                       struct drm_connector_state *conn_state)
 {
-       struct mtk_dpi *dpi = bridge->driver_private;
+       struct mtk_dpi *dpi = bridge_to_dpi(bridge);
        unsigned int out_bus_format;
 
        out_bus_format = bridge_state->output_bus_cfg.format;
 
+       if (out_bus_format == MEDIA_BUS_FMT_FIXED)
+               if (dpi->conf->num_output_fmts)
+                       out_bus_format = dpi->conf->output_fmts[0];
+
        dev_dbg(dpi->dev, "input format 0x%04x, output format 0x%04x\n",
                bridge_state->input_bus_cfg.format,
                bridge_state->output_bus_cfg.format);
index 474efb84424933d894812f6c783b51a1b5cef38a..735efe79f07599cb02e9817db6281fd73898e4ab 100644 (file)
@@ -532,13 +532,10 @@ void mtk_drm_crtc_async_update(struct drm_crtc *crtc, struct drm_plane *plane,
                               struct drm_atomic_state *state)
 {
        struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc);
-       const struct drm_plane_helper_funcs *plane_helper_funcs =
-                       plane->helper_private;
 
        if (!mtk_crtc->enabled)
                return;
 
-       plane_helper_funcs->atomic_update(plane, state);
        mtk_drm_crtc_update_config(mtk_crtc, false);
 }
 
index 75bc00e17fc49302e8097084d66b1ceece3a93d7..50d20562e612d22548cd091feef900f95b545ecf 100644 (file)
@@ -34,6 +34,7 @@
 
 #define DISP_AAL_EN                            0x0000
 #define DISP_AAL_SIZE                          0x0030
+#define DISP_AAL_OUTPUT_SIZE                   0x04d8
 
 #define DISP_DITHER_EN                         0x0000
 #define DITHER_EN                              BIT(0)
@@ -197,6 +198,7 @@ static void mtk_aal_config(struct device *dev, unsigned int w,
        struct mtk_ddp_comp_dev *priv = dev_get_drvdata(dev);
 
        mtk_ddp_write(cmdq_pkt, w << 16 | h, &priv->cmdq_reg, priv->regs, DISP_AAL_SIZE);
+       mtk_ddp_write(cmdq_pkt, w << 16 | h, &priv->cmdq_reg, priv->regs, DISP_AAL_OUTPUT_SIZE);
 }
 
 static void mtk_aal_gamma_set(struct device *dev, struct drm_crtc_state *state)
index b5582dcf564cebca0db196196737f234d6a21b29..e6dcb34d30522444511a5c1fd9e85bbcdb534dfe 100644 (file)
@@ -110,6 +110,35 @@ static int mtk_plane_atomic_async_check(struct drm_plane *plane,
                                                   true, true);
 }
 
+static void mtk_plane_update_new_state(struct drm_plane_state *new_state,
+                                      struct mtk_plane_state *mtk_plane_state)
+{
+       struct drm_framebuffer *fb = new_state->fb;
+       struct drm_gem_object *gem;
+       struct mtk_drm_gem_obj *mtk_gem;
+       unsigned int pitch, format;
+       dma_addr_t addr;
+
+       gem = fb->obj[0];
+       mtk_gem = to_mtk_gem_obj(gem);
+       addr = mtk_gem->dma_addr;
+       pitch = fb->pitches[0];
+       format = fb->format->format;
+
+       addr += (new_state->src.x1 >> 16) * fb->format->cpp[0];
+       addr += (new_state->src.y1 >> 16) * pitch;
+
+       mtk_plane_state->pending.enable = true;
+       mtk_plane_state->pending.pitch = pitch;
+       mtk_plane_state->pending.format = format;
+       mtk_plane_state->pending.addr = addr;
+       mtk_plane_state->pending.x = new_state->dst.x1;
+       mtk_plane_state->pending.y = new_state->dst.y1;
+       mtk_plane_state->pending.width = drm_rect_width(&new_state->dst);
+       mtk_plane_state->pending.height = drm_rect_height(&new_state->dst);
+       mtk_plane_state->pending.rotation = new_state->rotation;
+}
+
 static void mtk_plane_atomic_async_update(struct drm_plane *plane,
                                          struct drm_atomic_state *state)
 {
@@ -126,8 +155,10 @@ static void mtk_plane_atomic_async_update(struct drm_plane *plane,
        plane->state->src_h = new_state->src_h;
        plane->state->src_w = new_state->src_w;
        swap(plane->state->fb, new_state->fb);
-       new_plane_state->pending.async_dirty = true;
 
+       mtk_plane_update_new_state(new_state, new_plane_state);
+       wmb(); /* Make sure the above parameters are set before update */
+       new_plane_state->pending.async_dirty = true;
        mtk_drm_crtc_async_update(new_state->crtc, plane, state);
 }
 
@@ -189,14 +220,8 @@ static void mtk_plane_atomic_update(struct drm_plane *plane,
        struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state,
                                                                           plane);
        struct mtk_plane_state *mtk_plane_state = to_mtk_plane_state(new_state);
-       struct drm_crtc *crtc = new_state->crtc;
-       struct drm_framebuffer *fb = new_state->fb;
-       struct drm_gem_object *gem;
-       struct mtk_drm_gem_obj *mtk_gem;
-       unsigned int pitch, format;
-       dma_addr_t addr;
 
-       if (!crtc || WARN_ON(!fb))
+       if (!new_state->crtc || WARN_ON(!new_state->fb))
                return;
 
        if (!new_state->visible) {
@@ -204,24 +229,7 @@ static void mtk_plane_atomic_update(struct drm_plane *plane,
                return;
        }
 
-       gem = fb->obj[0];
-       mtk_gem = to_mtk_gem_obj(gem);
-       addr = mtk_gem->dma_addr;
-       pitch = fb->pitches[0];
-       format = fb->format->format;
-
-       addr += (new_state->src.x1 >> 16) * fb->format->cpp[0];
-       addr += (new_state->src.y1 >> 16) * pitch;
-
-       mtk_plane_state->pending.enable = true;
-       mtk_plane_state->pending.pitch = pitch;
-       mtk_plane_state->pending.format = format;
-       mtk_plane_state->pending.addr = addr;
-       mtk_plane_state->pending.x = new_state->dst.x1;
-       mtk_plane_state->pending.y = new_state->dst.y1;
-       mtk_plane_state->pending.width = drm_rect_width(&new_state->dst);
-       mtk_plane_state->pending.height = drm_rect_height(&new_state->dst);
-       mtk_plane_state->pending.rotation = new_state->rotation;
+       mtk_plane_update_new_state(new_state, mtk_plane_state);
        wmb(); /* Make sure the above parameters are set before update */
        mtk_plane_state->pending.dirty = true;
 }
index 446e7961da48632071b48291eaae852f4e1b6968..0f3cafab8860023bc033af551e414098fb32690d 100644 (file)
 #define VPP_WRAP_OSD3_MATRIX_PRE_OFFSET2 0x3dbc
 #define VPP_WRAP_OSD3_MATRIX_EN_CTRL 0x3dbd
 
+/* osd1 HDR */
+#define OSD1_HDR2_CTRL 0x38a0
+#define OSD1_HDR2_CTRL_VDIN0_HDR2_TOP_EN       BIT(13)
+#define OSD1_HDR2_CTRL_REG_ONLY_MAT            BIT(16)
+
 /* osd2 scaler */
 #define OSD2_VSC_PHASE_STEP 0x3d00
 #define OSD2_VSC_INI_PHASE 0x3d01
index aede0c67a57f09c8effef544582e8659b0ec087e..259f3e6bec90ad694e6afeaeb5f1e894c2bf5dac 100644 (file)
@@ -425,9 +425,14 @@ void meson_viu_init(struct meson_drm *priv)
        if (meson_vpu_is_compatible(priv, VPU_COMPATIBLE_GXM) ||
            meson_vpu_is_compatible(priv, VPU_COMPATIBLE_GXL))
                meson_viu_load_matrix(priv);
-       else if (meson_vpu_is_compatible(priv, VPU_COMPATIBLE_G12A))
+       else if (meson_vpu_is_compatible(priv, VPU_COMPATIBLE_G12A)) {
                meson_viu_set_g12a_osd1_matrix(priv, RGB709_to_YUV709l_coeff,
                                               true);
+               /* fix green/pink color distortion from vendor u-boot */
+               writel_bits_relaxed(OSD1_HDR2_CTRL_REG_ONLY_MAT |
+                               OSD1_HDR2_CTRL_VDIN0_HDR2_TOP_EN, 0,
+                               priv->io_base + _REG(OSD1_HDR2_CTRL));
+       }
 
        /* Initialize OSD1 fifo control register */
        reg = VIU_OSD_DDR_PRIORITY_URGENT |
index f949767698fc8ee9a3d6ede4873f04088fd88d0d..bcb0310a41b63e507aa7239e30a17261200e7c69 100644 (file)
@@ -2237,6 +2237,33 @@ nv50_disp_atomic_commit_tail(struct drm_atomic_state *state)
                interlock[NV50_DISP_INTERLOCK_CORE] = 0;
        }
 
+       /* Finish updating head(s)...
+        *
+        * NVD is rather picky about both where window assignments can change,
+        * *and* about certain core and window channel states matching.
+        *
+        * The EFI GOP driver on newer GPUs configures window channels with a
+        * different output format to what we do, and the core channel update
+        * in the assign_windows case above would result in a state mismatch.
+        *
+        * Delay some of the head update until after that point to workaround
+        * the issue.  This only affects the initial modeset.
+        *
+        * TODO: handle this better when adding flexible window mapping
+        */
+       for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
+               struct nv50_head_atom *asyh = nv50_head_atom(new_crtc_state);
+               struct nv50_head *head = nv50_head(crtc);
+
+               NV_ATOMIC(drm, "%s: set %04x (clr %04x)\n", crtc->name,
+                         asyh->set.mask, asyh->clr.mask);
+
+               if (asyh->set.mask) {
+                       nv50_head_flush_set_wndw(head, asyh);
+                       interlock[NV50_DISP_INTERLOCK_CORE] = 1;
+               }
+       }
+
        /* Update plane(s). */
        for_each_new_plane_in_state(state, plane, new_plane_state, i) {
                struct nv50_wndw_atom *asyw = nv50_wndw_atom(new_plane_state);
index ec361d17e900bec9df47f826e034f17561344cf3..d66f97280282a3a2f2a85299280f01a5a174c06f 100644 (file)
@@ -50,11 +50,8 @@ nv50_head_flush_clr(struct nv50_head *head,
 }
 
 void
-nv50_head_flush_set(struct nv50_head *head, struct nv50_head_atom *asyh)
+nv50_head_flush_set_wndw(struct nv50_head *head, struct nv50_head_atom *asyh)
 {
-       if (asyh->set.view   ) head->func->view    (head, asyh);
-       if (asyh->set.mode   ) head->func->mode    (head, asyh);
-       if (asyh->set.core   ) head->func->core_set(head, asyh);
        if (asyh->set.olut   ) {
                asyh->olut.offset = nv50_lut_load(&head->olut,
                                                  asyh->olut.buffer,
@@ -62,6 +59,14 @@ nv50_head_flush_set(struct nv50_head *head, struct nv50_head_atom *asyh)
                                                  asyh->olut.load);
                head->func->olut_set(head, asyh);
        }
+}
+
+void
+nv50_head_flush_set(struct nv50_head *head, struct nv50_head_atom *asyh)
+{
+       if (asyh->set.view   ) head->func->view    (head, asyh);
+       if (asyh->set.mode   ) head->func->mode    (head, asyh);
+       if (asyh->set.core   ) head->func->core_set(head, asyh);
        if (asyh->set.curs   ) head->func->curs_set(head, asyh);
        if (asyh->set.base   ) head->func->base    (head, asyh);
        if (asyh->set.ovly   ) head->func->ovly    (head, asyh);
index dae841dc05fdf7d303f70c6483b7d7651f6e628d..0bac6be9ba34ddf2d1be4415ee8a970006556f9f 100644 (file)
@@ -21,6 +21,7 @@ struct nv50_head {
 
 struct nv50_head *nv50_head_create(struct drm_device *, int index);
 void nv50_head_flush_set(struct nv50_head *head, struct nv50_head_atom *asyh);
+void nv50_head_flush_set_wndw(struct nv50_head *head, struct nv50_head_atom *asyh);
 void nv50_head_flush_clr(struct nv50_head *head,
                         struct nv50_head_atom *asyh, bool flush);
 
index 0b86c44878e0c85c32bf11b20ab518642775e5c6..59759c4fb62e2bfbaca724453eb0350db910f1ee 100644 (file)
@@ -4,7 +4,8 @@
 
 struct nv_device_v0 {
        __u8  version;
-       __u8  pad01[7];
+       __u8  priv;
+       __u8  pad02[6];
        __u64 device;   /* device identifier, ~0 for client default */
 };
 
index ba2c28ea43d20f46b6aca9a2e4e2798a50de45d0..c68cc957248e2b268f7fcdc2b0f77b2cf4d4a948 100644 (file)
@@ -61,8 +61,6 @@
 #define NV10_CHANNEL_DMA                              /* cl506b.h */ 0x0000006e
 #define NV17_CHANNEL_DMA                              /* cl506b.h */ 0x0000176e
 #define NV40_CHANNEL_DMA                              /* cl506b.h */ 0x0000406e
-#define NV50_CHANNEL_DMA                              /* cl506e.h */ 0x0000506e
-#define G82_CHANNEL_DMA                               /* cl826e.h */ 0x0000826e
 
 #define NV50_CHANNEL_GPFIFO                           /* cl506f.h */ 0x0000506f
 #define G82_CHANNEL_GPFIFO                            /* cl826f.h */ 0x0000826f
index 347d2c020bd194cbcacae4d8eaf7d1694a6d9c79..5d9395e651b6bc1070218c138d4e218634a66f21 100644 (file)
@@ -9,7 +9,6 @@ struct nvif_client {
        const struct nvif_driver *driver;
        u64 version;
        u8 route;
-       bool super;
 };
 
 int  nvif_client_ctor(struct nvif_client *parent, const char *name, u64 device,
index 8e85b936eaa041da8369903d08ca6c13feab54f1..7a3af05f7f98ddcf622a9a40a598fb9095199e8b 100644 (file)
@@ -11,7 +11,7 @@ struct nvif_driver {
        void (*fini)(void *priv);
        int (*suspend)(void *priv);
        int (*resume)(void *priv);
-       int (*ioctl)(void *priv, bool super, void *data, u32 size, void **hack);
+       int (*ioctl)(void *priv, void *data, u32 size, void **hack);
        void __iomem *(*map)(void *priv, u64 handle, u32 size);
        void (*unmap)(void *priv, void __iomem *ptr, u32 size);
        bool keep;
index 5d7017fe5039b8e4c35e9b0ed7166e38f8a5a368..2f86606e708c9ffe1ff074b1dc9777f4c0f8fb33 100644 (file)
@@ -13,7 +13,6 @@ struct nvkm_client {
        struct nvkm_client_notify *notify[32];
        struct rb_root objroot;
 
-       bool super;
        void *data;
        int (*ntfy)(const void *, u32, const void *, u32);
 
index 71ed147ad077395c7038f797dfc9a3dc4917a1d2..f52918a43246ac46d34c77c00a2d3d66c00c1aa3 100644 (file)
@@ -4,5 +4,5 @@
 #include <core/os.h>
 struct nvkm_client;
 
-int nvkm_ioctl(struct nvkm_client *, bool, void *, u32, void **);
+int nvkm_ioctl(struct nvkm_client *, void *, u32, void **);
 #endif
index 0911e73f742464a4d23e1c13c87339699b75ec1b..70e7887ef4b4b5e6515034728c4ece061a7957f0 100644 (file)
@@ -15,7 +15,6 @@ struct nvkm_vma {
        u8   refd:3; /* Current page type (index, or NONE for unreferenced). */
        bool used:1; /* Region allocated. */
        bool part:1; /* Region was split from an allocated region by map(). */
-       bool user:1; /* Region user-allocated. */
        bool busy:1; /* Region busy (for temporarily preventing user access). */
        bool mapped:1; /* Region contains valid pages. */
        struct nvkm_memory *memory; /* Memory currently mapped into VMA. */
index b45ec3086285a4f42754655afe4eb141393a8be5..4107b70065398ff6803e2ef83315b8dcef595ec8 100644 (file)
@@ -570,11 +570,9 @@ nouveau_abi16_ioctl_notifierobj_alloc(ABI16_IOCTL_ARGS)
        }
 
        client->route = NVDRM_OBJECT_ABI16;
-       client->super = true;
        ret = nvif_object_ctor(&chan->chan->user, "abi16Ntfy", info->handle,
                               NV_DMA_IN_MEMORY, &args, sizeof(args),
                               &ntfy->object);
-       client->super = false;
        client->route = NVDRM_OBJECT_NVIF;
        if (ret)
                goto done;
index 40362600eed26f3d72180a461f4c9cb489650e2a..80099ef7570226b65ce3fa98bc4b2e19999dedcb 100644 (file)
@@ -86,12 +86,6 @@ nouveau_channel_del(struct nouveau_channel **pchan)
        struct nouveau_channel *chan = *pchan;
        if (chan) {
                struct nouveau_cli *cli = (void *)chan->user.client;
-               bool super;
-
-               if (cli) {
-                       super = cli->base.super;
-                       cli->base.super = true;
-               }
 
                if (chan->fence)
                        nouveau_fence(chan->drm)->context_del(chan);
@@ -111,9 +105,6 @@ nouveau_channel_del(struct nouveau_channel **pchan)
                        nouveau_bo_unpin(chan->push.buffer);
                nouveau_bo_ref(NULL, &chan->push.buffer);
                kfree(chan);
-
-               if (cli)
-                       cli->base.super = super;
        }
        *pchan = NULL;
 }
@@ -512,20 +503,16 @@ nouveau_channel_new(struct nouveau_drm *drm, struct nvif_device *device,
                    struct nouveau_channel **pchan)
 {
        struct nouveau_cli *cli = (void *)device->object.client;
-       bool super;
        int ret;
 
        /* hack until fencenv50 is fixed, and agp access relaxed */
-       super = cli->base.super;
-       cli->base.super = true;
-
        ret = nouveau_channel_ind(drm, device, arg0, priv, pchan);
        if (ret) {
                NV_PRINTK(dbg, cli, "ib channel create, %d\n", ret);
                ret = nouveau_channel_dma(drm, device, pchan);
                if (ret) {
                        NV_PRINTK(dbg, cli, "dma channel create, %d\n", ret);
-                       goto done;
+                       return ret;
                }
        }
 
@@ -533,15 +520,13 @@ nouveau_channel_new(struct nouveau_drm *drm, struct nvif_device *device,
        if (ret) {
                NV_PRINTK(err, cli, "channel failed to initialise, %d\n", ret);
                nouveau_channel_del(pchan);
-               goto done;
+               return ret;
        }
 
        ret = nouveau_svmm_join((*pchan)->vmm->svmm, (*pchan)->inst);
        if (ret)
                nouveau_channel_del(pchan);
 
-done:
-       cli->base.super = super;
        return ret;
 }
 
index a616cf4573b8dcc4714a6a823cf4ac8b750e7f99..ba4cd5f83725988b9d5daf95c20baa6f01cd0f34 100644 (file)
@@ -244,6 +244,7 @@ nouveau_cli_init(struct nouveau_drm *drm, const char *sname,
        ret = nvif_device_ctor(&cli->base.object, "drmDevice", 0, NV_DEVICE,
                               &(struct nv_device_v0) {
                                        .device = ~0,
+                                       .priv = true,
                               }, sizeof(struct nv_device_v0),
                               &cli->device);
        if (ret) {
@@ -1086,8 +1087,6 @@ nouveau_drm_open(struct drm_device *dev, struct drm_file *fpriv)
        if (ret)
                goto done;
 
-       cli->base.super = false;
-
        fpriv->driver_priv = cli;
 
        mutex_lock(&drm->client.mutex);
index 0de6549fb875c18dd06f16096d708585308f1f06..2ca3207c13fcddd5e0447d440b5b5546c92d18d0 100644 (file)
@@ -41,8 +41,6 @@ nouveau_mem_map(struct nouveau_mem *mem,
                struct gf100_vmm_map_v0 gf100;
        } args;
        u32 argc = 0;
-       bool super;
-       int ret;
 
        switch (vmm->object.oclass) {
        case NVIF_CLASS_VMM_NV04:
@@ -73,12 +71,7 @@ nouveau_mem_map(struct nouveau_mem *mem,
                return -ENOSYS;
        }
 
-       super = vmm->object.client->super;
-       vmm->object.client->super = true;
-       ret = nvif_vmm_map(vmm, vma->addr, mem->mem.size, &args, argc,
-                          &mem->mem, 0);
-       vmm->object.client->super = super;
-       return ret;
+       return nvif_vmm_map(vmm, vma->addr, mem->mem.size, &args, argc, &mem->mem, 0);
 }
 
 void
@@ -99,7 +92,6 @@ nouveau_mem_host(struct ttm_resource *reg, struct ttm_tt *tt)
        struct nouveau_drm *drm = cli->drm;
        struct nvif_mmu *mmu = &cli->mmu;
        struct nvif_mem_ram_v0 args = {};
-       bool super = cli->base.super;
        u8 type;
        int ret;
 
@@ -122,11 +114,9 @@ nouveau_mem_host(struct ttm_resource *reg, struct ttm_tt *tt)
                args.dma = tt->dma_address;
 
        mutex_lock(&drm->master.lock);
-       cli->base.super = true;
        ret = nvif_mem_ctor_type(mmu, "ttmHostMem", cli->mem->oclass, type, PAGE_SHIFT,
                                 reg->num_pages << PAGE_SHIFT,
                                 &args, sizeof(args), &mem->mem);
-       cli->base.super = super;
        mutex_unlock(&drm->master.lock);
        return ret;
 }
@@ -138,12 +128,10 @@ nouveau_mem_vram(struct ttm_resource *reg, bool contig, u8 page)
        struct nouveau_cli *cli = mem->cli;
        struct nouveau_drm *drm = cli->drm;
        struct nvif_mmu *mmu = &cli->mmu;
-       bool super = cli->base.super;
        u64 size = ALIGN(reg->num_pages << PAGE_SHIFT, 1 << page);
        int ret;
 
        mutex_lock(&drm->master.lock);
-       cli->base.super = true;
        switch (cli->mem->oclass) {
        case NVIF_CLASS_MEM_GF100:
                ret = nvif_mem_ctor_type(mmu, "ttmVram", cli->mem->oclass,
@@ -167,7 +155,6 @@ nouveau_mem_vram(struct ttm_resource *reg, bool contig, u8 page)
                WARN_ON(1);
                break;
        }
-       cli->base.super = super;
        mutex_unlock(&drm->master.lock);
 
        reg->start = mem->mem.addr >> PAGE_SHIFT;
index b3f29b1ce9eab3bd414097d2f325e25e87f47809..52f5793b7274fb7d4d84aa082d89ad6d5fc95adc 100644 (file)
@@ -52,9 +52,9 @@ nvkm_client_map(void *priv, u64 handle, u32 size)
 }
 
 static int
-nvkm_client_ioctl(void *priv, bool super, void *data, u32 size, void **hack)
+nvkm_client_ioctl(void *priv, void *data, u32 size, void **hack)
 {
-       return nvkm_ioctl(priv, super, data, size, hack);
+       return nvkm_ioctl(priv, data, size, hack);
 }
 
 static int
index 82b583f5fca8afcf256521ae1dd5c002fe856b5f..b0c3422cb01fa5b745e594c3bf33f9a40e1a98dd 100644 (file)
@@ -237,14 +237,11 @@ void
 nouveau_svmm_invalidate(struct nouveau_svmm *svmm, u64 start, u64 limit)
 {
        if (limit > start) {
-               bool super = svmm->vmm->vmm.object.client->super;
-               svmm->vmm->vmm.object.client->super = true;
                nvif_object_mthd(&svmm->vmm->vmm.object, NVIF_VMM_V0_PFNCLR,
                                 &(struct nvif_vmm_pfnclr_v0) {
                                        .addr = start,
                                        .size = limit - start,
                                 }, sizeof(struct nvif_vmm_pfnclr_v0));
-               svmm->vmm->vmm.object.client->super = super;
        }
 }
 
@@ -634,9 +631,7 @@ static int nouveau_atomic_range_fault(struct nouveau_svmm *svmm,
                NVIF_VMM_PFNMAP_V0_A |
                NVIF_VMM_PFNMAP_V0_HOST;
 
-       svmm->vmm->vmm.object.client->super = true;
        ret = nvif_object_ioctl(&svmm->vmm->vmm.object, args, size, NULL);
-       svmm->vmm->vmm.object.client->super = false;
        mutex_unlock(&svmm->mutex);
 
        unlock_page(page);
@@ -702,9 +697,7 @@ static int nouveau_range_fault(struct nouveau_svmm *svmm,
 
        nouveau_hmm_convert_pfn(drm, &range, args);
 
-       svmm->vmm->vmm.object.client->super = true;
        ret = nvif_object_ioctl(&svmm->vmm->vmm.object, args, size, NULL);
-       svmm->vmm->vmm.object.client->super = false;
        mutex_unlock(&svmm->mutex);
 
 out:
@@ -928,10 +921,8 @@ nouveau_pfns_map(struct nouveau_svmm *svmm, struct mm_struct *mm,
 
        mutex_lock(&svmm->mutex);
 
-       svmm->vmm->vmm.object.client->super = true;
        ret = nvif_object_ioctl(&svmm->vmm->vmm.object, args, sizeof(*args) +
                                npages * sizeof(args->p.phys[0]), NULL);
-       svmm->vmm->vmm.object.client->super = false;
 
        mutex_unlock(&svmm->mutex);
 }
index 9dc10b17ad34f24f204c556cace3c36eec4ae1ba..5da1f4d223d76083f1697f0cb9ad0dc4745a7dac 100644 (file)
@@ -32,6 +32,9 @@
 #include <nvif/event.h>
 #include <nvif/ioctl.h>
 
+#include <nvif/class.h>
+#include <nvif/cl0080.h>
+
 struct usif_notify_p {
        struct drm_pending_event base;
        struct {
@@ -261,7 +264,7 @@ usif_object_dtor(struct usif_object *object)
 }
 
 static int
-usif_object_new(struct drm_file *f, void *data, u32 size, void *argv, u32 argc)
+usif_object_new(struct drm_file *f, void *data, u32 size, void *argv, u32 argc, bool parent_abi16)
 {
        struct nouveau_cli *cli = nouveau_cli(f);
        struct nvif_client *client = &cli->base;
@@ -271,23 +274,48 @@ usif_object_new(struct drm_file *f, void *data, u32 size, void *argv, u32 argc)
        struct usif_object *object;
        int ret = -ENOSYS;
 
+       if ((ret = nvif_unpack(ret, &data, &size, args->v0, 0, 0, true)))
+               return ret;
+
+       switch (args->v0.oclass) {
+       case NV_DMA_FROM_MEMORY:
+       case NV_DMA_TO_MEMORY:
+       case NV_DMA_IN_MEMORY:
+               return -EINVAL;
+       case NV_DEVICE: {
+               union {
+                       struct nv_device_v0 v0;
+               } *args = data;
+
+               if ((ret = nvif_unpack(ret, &data, &size, args->v0, 0, 0, false)))
+                       return ret;
+
+               args->v0.priv = false;
+               break;
+       }
+       default:
+               if (!parent_abi16)
+                       return -EINVAL;
+               break;
+       }
+
        if (!(object = kmalloc(sizeof(*object), GFP_KERNEL)))
                return -ENOMEM;
        list_add(&object->head, &cli->objects);
 
-       if (!(ret = nvif_unpack(ret, &data, &size, args->v0, 0, 0, true))) {
-               object->route = args->v0.route;
-               object->token = args->v0.token;
-               args->v0.route = NVDRM_OBJECT_USIF;
-               args->v0.token = (unsigned long)(void *)object;
-               ret = nvif_client_ioctl(client, argv, argc);
-               args->v0.token = object->token;
-               args->v0.route = object->route;
+       object->route = args->v0.route;
+       object->token = args->v0.token;
+       args->v0.route = NVDRM_OBJECT_USIF;
+       args->v0.token = (unsigned long)(void *)object;
+       ret = nvif_client_ioctl(client, argv, argc);
+       if (ret) {
+               usif_object_dtor(object);
+               return ret;
        }
 
-       if (ret)
-               usif_object_dtor(object);
-       return ret;
+       args->v0.token = object->token;
+       args->v0.route = object->route;
+       return 0;
 }
 
 int
@@ -301,6 +329,7 @@ usif_ioctl(struct drm_file *filp, void __user *user, u32 argc)
                struct nvif_ioctl_v0 v0;
        } *argv = data;
        struct usif_object *object;
+       bool abi16 = false;
        u8 owner;
        int ret;
 
@@ -331,11 +360,13 @@ usif_ioctl(struct drm_file *filp, void __user *user, u32 argc)
                        mutex_unlock(&cli->mutex);
                        goto done;
                }
+
+               abi16 = true;
        }
 
        switch (argv->v0.type) {
        case NVIF_IOCTL_V0_NEW:
-               ret = usif_object_new(filp, data, size, argv, argc);
+               ret = usif_object_new(filp, data, size, argv, argc, abi16);
                break;
        case NVIF_IOCTL_V0_NTFY_NEW:
                ret = usif_notify_new(filp, data, size, argv, argc);
index 12644f811b3ed4108b8d8bcc47db0c8e75222ee1..a3264a0e933a5995614d4c04ade98f8d5900b7e0 100644 (file)
@@ -32,7 +32,7 @@
 int
 nvif_client_ioctl(struct nvif_client *client, void *data, u32 size)
 {
-       return client->driver->ioctl(client->object.priv, client->super, data, size, NULL);
+       return client->driver->ioctl(client->object.priv, data, size, NULL);
 }
 
 int
@@ -80,7 +80,6 @@ nvif_client_ctor(struct nvif_client *parent, const char *name, u64 device,
        client->object.client = client;
        client->object.handle = ~0;
        client->route = NVIF_IOCTL_V0_ROUTE_NVIF;
-       client->super = true;
        client->driver = parent->driver;
 
        if (ret == 0) {
index 671a5c0199e0da31f967675a998cc5dcddf00431..dce1ecee2af5a5cca1978264cc3cd5a5d11f3957 100644 (file)
@@ -44,8 +44,7 @@ nvif_object_ioctl(struct nvif_object *object, void *data, u32 size, void **hack)
        } else
                return -ENOSYS;
 
-       return client->driver->ioctl(client->object.priv, client->super,
-                                    data, size, hack);
+       return client->driver->ioctl(client->object.priv, data, size, hack);
 }
 
 void
index d777df5a64e6c08efa4481a7f772290b978953d2..735cb6816f108cc7603d784eb866d65134a117d7 100644 (file)
@@ -426,8 +426,7 @@ nvkm_ioctl_path(struct nvkm_client *client, u64 handle, u32 type,
 }
 
 int
-nvkm_ioctl(struct nvkm_client *client, bool supervisor,
-          void *data, u32 size, void **hack)
+nvkm_ioctl(struct nvkm_client *client, void *data, u32 size, void **hack)
 {
        struct nvkm_object *object = &client->object;
        union {
@@ -435,7 +434,6 @@ nvkm_ioctl(struct nvkm_client *client, bool supervisor,
        } *args = data;
        int ret = -ENOSYS;
 
-       client->super = supervisor;
        nvif_ioctl(object, "size %d\n", size);
 
        if (!(ret = nvif_unpack(ret, &data, &size, args->v0, 0, 0, true))) {
index b930f539feec727c98640c72f0249a94e3eead60..93ddf63d111408ecc354b54eb3e682278899b84e 100644 (file)
@@ -2624,6 +2624,26 @@ nv174_chipset = {
        .dma      = { 0x00000001, gv100_dma_new },
 };
 
+static const struct nvkm_device_chip
+nv177_chipset = {
+       .name = "GA107",
+       .bar      = { 0x00000001, tu102_bar_new },
+       .bios     = { 0x00000001, nvkm_bios_new },
+       .devinit  = { 0x00000001, ga100_devinit_new },
+       .fb       = { 0x00000001, ga102_fb_new },
+       .gpio     = { 0x00000001, ga102_gpio_new },
+       .i2c      = { 0x00000001, gm200_i2c_new },
+       .imem     = { 0x00000001, nv50_instmem_new },
+       .mc       = { 0x00000001, ga100_mc_new },
+       .mmu      = { 0x00000001, tu102_mmu_new },
+       .pci      = { 0x00000001, gp100_pci_new },
+       .privring = { 0x00000001, gm200_privring_new },
+       .timer    = { 0x00000001, gk20a_timer_new },
+       .top      = { 0x00000001, ga100_top_new },
+       .disp     = { 0x00000001, ga102_disp_new },
+       .dma      = { 0x00000001, gv100_dma_new },
+};
+
 static int
 nvkm_device_event_ctor(struct nvkm_object *object, void *data, u32 size,
                       struct nvkm_notify *notify)
@@ -3049,6 +3069,7 @@ nvkm_device_ctor(const struct nvkm_device_func *func,
                case 0x168: device->chip = &nv168_chipset; break;
                case 0x172: device->chip = &nv172_chipset; break;
                case 0x174: device->chip = &nv174_chipset; break;
+               case 0x177: device->chip = &nv177_chipset; break;
                default:
                        if (nvkm_boolopt(device->cfgopt, "NvEnableUnsupportedChipsets", false)) {
                                switch (device->chipset) {
index fea9d8f2b10cbcfe83a390d197dd83e1a1fbb1ce..f28894fdede94fa1b78174630cb47987104023d1 100644 (file)
@@ -397,7 +397,7 @@ nvkm_udevice_new(const struct nvkm_oclass *oclass, void *data, u32 size,
                return ret;
 
        /* give priviledged clients register access */
-       if (client->super)
+       if (args->v0.priv)
                func = &nvkm_udevice_super;
        else
                func = &nvkm_udevice;
index 55fbfe28c6dc1a50da06bc117a95cc91b303b4ac..9669472a2749ddc986fe9c6c1a0f9ba1a1bb2df3 100644 (file)
@@ -440,7 +440,7 @@ nvkm_dp_train(struct nvkm_dp *dp, u32 dataKBps)
        return ret;
 }
 
-static void
+void
 nvkm_dp_disable(struct nvkm_outp *outp, struct nvkm_ior *ior)
 {
        struct nvkm_dp *dp = nvkm_dp(outp);
index 428b3f488f033af88d7627d2289cf388c524f72b..e484d0c3b0d42a9c8eb1d32607d852fcd5dddf9d 100644 (file)
@@ -32,6 +32,7 @@ struct nvkm_dp {
 
 int nvkm_dp_new(struct nvkm_disp *, int index, struct dcb_output *,
                struct nvkm_outp **);
+void nvkm_dp_disable(struct nvkm_outp *, struct nvkm_ior *);
 
 /* DPCD Receiver Capabilities */
 #define DPCD_RC00_DPCD_REV                                              0x00000
index dffcac249211c3c02667d5e239ffade0916162b7..129982fef7ef66392e0622edf8c4df28d010ec33 100644 (file)
@@ -22,6 +22,7 @@
  * Authors: Ben Skeggs
  */
 #include "outp.h"
+#include "dp.h"
 #include "ior.h"
 
 #include <subdev/bios.h>
@@ -257,6 +258,14 @@ nvkm_outp_init_route(struct nvkm_outp *outp)
        if (!ior->arm.head || ior->arm.proto != proto) {
                OUTP_DBG(outp, "no heads (%x %d %d)", ior->arm.head,
                         ior->arm.proto, proto);
+
+               /* The EFI GOP driver on Ampere can leave unused DP links routed,
+                * which we don't expect.  The DisableLT IED script *should* get
+                * us back to where we need to be.
+                */
+               if (ior->func->route.get && !ior->arm.head && outp->info.type == DCB_OUTPUT_DP)
+                       nvkm_dp_disable(outp, ior);
+
                return;
        }
 
index d20cc0681a8844f1e150df1b72f045a5b14976cd..797131ed7d674f4b4fcc5b24019534f58a3ed76d 100644 (file)
@@ -26,7 +26,6 @@
 #include <core/client.h>
 #include <core/gpuobj.h>
 #include <subdev/fb.h>
-#include <subdev/instmem.h>
 
 #include <nvif/cl0002.h>
 #include <nvif/unpack.h>
@@ -72,11 +71,7 @@ nvkm_dmaobj_ctor(const struct nvkm_dmaobj_func *func, struct nvkm_dma *dma,
        union {
                struct nv_dma_v0 v0;
        } *args = *pdata;
-       struct nvkm_device *device = dma->engine.subdev.device;
-       struct nvkm_client *client = oclass->client;
        struct nvkm_object *parent = oclass->parent;
-       struct nvkm_instmem *instmem = device->imem;
-       struct nvkm_fb *fb = device->fb;
        void *data = *pdata;
        u32 size = *psize;
        int ret = -ENOSYS;
@@ -109,23 +104,13 @@ nvkm_dmaobj_ctor(const struct nvkm_dmaobj_func *func, struct nvkm_dma *dma,
                dmaobj->target = NV_MEM_TARGET_VM;
                break;
        case NV_DMA_V0_TARGET_VRAM:
-               if (!client->super) {
-                       if (dmaobj->limit >= fb->ram->size - instmem->reserved)
-                               return -EACCES;
-                       if (device->card_type >= NV_50)
-                               return -EACCES;
-               }
                dmaobj->target = NV_MEM_TARGET_VRAM;
                break;
        case NV_DMA_V0_TARGET_PCI:
-               if (!client->super)
-                       return -EACCES;
                dmaobj->target = NV_MEM_TARGET_PCI;
                break;
        case NV_DMA_V0_TARGET_PCI_US:
        case NV_DMA_V0_TARGET_AGP:
-               if (!client->super)
-                       return -EACCES;
                dmaobj->target = NV_MEM_TARGET_PCI_NOSNOOP;
                break;
        default:
index 90e9a0972a44097d3b65d99bcdf7d989984290e2..3209eb7af65fb35e4b4dd70b33f77cd592c726e4 100644 (file)
@@ -27,8 +27,6 @@ nvkm-y += nvkm/engine/fifo/dmanv04.o
 nvkm-y += nvkm/engine/fifo/dmanv10.o
 nvkm-y += nvkm/engine/fifo/dmanv17.o
 nvkm-y += nvkm/engine/fifo/dmanv40.o
-nvkm-y += nvkm/engine/fifo/dmanv50.o
-nvkm-y += nvkm/engine/fifo/dmag84.o
 
 nvkm-y += nvkm/engine/fifo/gpfifonv50.o
 nvkm-y += nvkm/engine/fifo/gpfifog84.o
index af8bdf27555234e91a02a0914d71b2a7c2e5f0fd..3a95730d7ff506390ed3746312a9cb958520a922 100644 (file)
@@ -48,8 +48,6 @@ void nv50_fifo_chan_object_dtor(struct nvkm_fifo_chan *, int);
 int g84_fifo_chan_ctor(struct nv50_fifo *, u64 vmm, u64 push,
                       const struct nvkm_oclass *, struct nv50_fifo_chan *);
 
-extern const struct nvkm_fifo_chan_oclass nv50_fifo_dma_oclass;
 extern const struct nvkm_fifo_chan_oclass nv50_fifo_gpfifo_oclass;
-extern const struct nvkm_fifo_chan_oclass g84_fifo_dma_oclass;
 extern const struct nvkm_fifo_chan_oclass g84_fifo_gpfifo_oclass;
 #endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmag84.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmag84.c
deleted file mode 100644 (file)
index fc34cdd..0000000
+++ /dev/null
@@ -1,94 +0,0 @@
-/*
- * Copyright 2012 Red Hat Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: Ben Skeggs
- */
-#include "channv50.h"
-
-#include <core/client.h>
-#include <core/ramht.h>
-
-#include <nvif/class.h>
-#include <nvif/cl826e.h>
-#include <nvif/unpack.h>
-
-static int
-g84_fifo_dma_new(struct nvkm_fifo *base, const struct nvkm_oclass *oclass,
-                void *data, u32 size, struct nvkm_object **pobject)
-{
-       struct nvkm_object *parent = oclass->parent;
-       union {
-               struct g82_channel_dma_v0 v0;
-       } *args = data;
-       struct nv50_fifo *fifo = nv50_fifo(base);
-       struct nv50_fifo_chan *chan;
-       int ret = -ENOSYS;
-
-       nvif_ioctl(parent, "create channel dma size %d\n", size);
-       if (!(ret = nvif_unpack(ret, &data, &size, args->v0, 0, 0, false))) {
-               nvif_ioctl(parent, "create channel dma vers %d vmm %llx "
-                                  "pushbuf %llx offset %016llx\n",
-                          args->v0.version, args->v0.vmm, args->v0.pushbuf,
-                          args->v0.offset);
-               if (!args->v0.pushbuf)
-                       return -EINVAL;
-       } else
-               return ret;
-
-       if (!(chan = kzalloc(sizeof(*chan), GFP_KERNEL)))
-               return -ENOMEM;
-       *pobject = &chan->base.object;
-
-       ret = g84_fifo_chan_ctor(fifo, args->v0.vmm, args->v0.pushbuf,
-                                oclass, chan);
-       if (ret)
-               return ret;
-
-       args->v0.chid = chan->base.chid;
-
-       nvkm_kmap(chan->ramfc);
-       nvkm_wo32(chan->ramfc, 0x08, lower_32_bits(args->v0.offset));
-       nvkm_wo32(chan->ramfc, 0x0c, upper_32_bits(args->v0.offset));
-       nvkm_wo32(chan->ramfc, 0x10, lower_32_bits(args->v0.offset));
-       nvkm_wo32(chan->ramfc, 0x14, upper_32_bits(args->v0.offset));
-       nvkm_wo32(chan->ramfc, 0x3c, 0x003f6078);
-       nvkm_wo32(chan->ramfc, 0x44, 0x01003fff);
-       nvkm_wo32(chan->ramfc, 0x48, chan->base.push->node->offset >> 4);
-       nvkm_wo32(chan->ramfc, 0x4c, 0xffffffff);
-       nvkm_wo32(chan->ramfc, 0x60, 0x7fffffff);
-       nvkm_wo32(chan->ramfc, 0x78, 0x00000000);
-       nvkm_wo32(chan->ramfc, 0x7c, 0x30000001);
-       nvkm_wo32(chan->ramfc, 0x80, ((chan->ramht->bits - 9) << 27) |
-                                    (4 << 24) /* SEARCH_FULL */ |
-                                    (chan->ramht->gpuobj->node->offset >> 4));
-       nvkm_wo32(chan->ramfc, 0x88, chan->cache->addr >> 10);
-       nvkm_wo32(chan->ramfc, 0x98, chan->base.inst->addr >> 12);
-       nvkm_done(chan->ramfc);
-       return 0;
-}
-
-const struct nvkm_fifo_chan_oclass
-g84_fifo_dma_oclass = {
-       .base.oclass = G82_CHANNEL_DMA,
-       .base.minver = 0,
-       .base.maxver = 0,
-       .ctor = g84_fifo_dma_new,
-};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmanv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmanv50.c
deleted file mode 100644 (file)
index 8043718..0000000
+++ /dev/null
@@ -1,92 +0,0 @@
-/*
- * Copyright 2012 Red Hat Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: Ben Skeggs
- */
-#include "channv50.h"
-
-#include <core/client.h>
-#include <core/ramht.h>
-
-#include <nvif/class.h>
-#include <nvif/cl506e.h>
-#include <nvif/unpack.h>
-
-static int
-nv50_fifo_dma_new(struct nvkm_fifo *base, const struct nvkm_oclass *oclass,
-                 void *data, u32 size, struct nvkm_object **pobject)
-{
-       struct nvkm_object *parent = oclass->parent;
-       union {
-               struct nv50_channel_dma_v0 v0;
-       } *args = data;
-       struct nv50_fifo *fifo = nv50_fifo(base);
-       struct nv50_fifo_chan *chan;
-       int ret = -ENOSYS;
-
-       nvif_ioctl(parent, "create channel dma size %d\n", size);
-       if (!(ret = nvif_unpack(ret, &data, &size, args->v0, 0, 0, false))) {
-               nvif_ioctl(parent, "create channel dma vers %d vmm %llx "
-                                  "pushbuf %llx offset %016llx\n",
-                          args->v0.version, args->v0.vmm, args->v0.pushbuf,
-                          args->v0.offset);
-               if (!args->v0.pushbuf)
-                       return -EINVAL;
-       } else
-               return ret;
-
-       if (!(chan = kzalloc(sizeof(*chan), GFP_KERNEL)))
-               return -ENOMEM;
-       *pobject = &chan->base.object;
-
-       ret = nv50_fifo_chan_ctor(fifo, args->v0.vmm, args->v0.pushbuf,
-                                 oclass, chan);
-       if (ret)
-               return ret;
-
-       args->v0.chid = chan->base.chid;
-
-       nvkm_kmap(chan->ramfc);
-       nvkm_wo32(chan->ramfc, 0x08, lower_32_bits(args->v0.offset));
-       nvkm_wo32(chan->ramfc, 0x0c, upper_32_bits(args->v0.offset));
-       nvkm_wo32(chan->ramfc, 0x10, lower_32_bits(args->v0.offset));
-       nvkm_wo32(chan->ramfc, 0x14, upper_32_bits(args->v0.offset));
-       nvkm_wo32(chan->ramfc, 0x3c, 0x003f6078);
-       nvkm_wo32(chan->ramfc, 0x44, 0x01003fff);
-       nvkm_wo32(chan->ramfc, 0x48, chan->base.push->node->offset >> 4);
-       nvkm_wo32(chan->ramfc, 0x4c, 0xffffffff);
-       nvkm_wo32(chan->ramfc, 0x60, 0x7fffffff);
-       nvkm_wo32(chan->ramfc, 0x78, 0x00000000);
-       nvkm_wo32(chan->ramfc, 0x7c, 0x30000001);
-       nvkm_wo32(chan->ramfc, 0x80, ((chan->ramht->bits - 9) << 27) |
-                                    (4 << 24) /* SEARCH_FULL */ |
-                                    (chan->ramht->gpuobj->node->offset >> 4));
-       nvkm_done(chan->ramfc);
-       return 0;
-}
-
-const struct nvkm_fifo_chan_oclass
-nv50_fifo_dma_oclass = {
-       .base.oclass = NV50_CHANNEL_DMA,
-       .base.minver = 0,
-       .base.maxver = 0,
-       .ctor = nv50_fifo_dma_new,
-};
index c0a7d0f21dacdecbbc57275e2115d0634df84ba6..3885c3830b949a6d2e29582dd819072cbf46d75d 100644 (file)
@@ -119,7 +119,6 @@ g84_fifo = {
        .uevent_init = g84_fifo_uevent_init,
        .uevent_fini = g84_fifo_uevent_fini,
        .chan = {
-               &g84_fifo_dma_oclass,
                &g84_fifo_gpfifo_oclass,
                NULL
        },
index b6900a52bcce5ae714b8029acd322a42ad2654ab..ae6c4d846eb59949f7f69484fa8a533b57e92aaf 100644 (file)
@@ -341,8 +341,6 @@ gk104_fifo_gpfifo_new(struct gk104_fifo *fifo, const struct nvkm_oclass *oclass,
                                   "runlist %016llx priv %d\n",
                           args->v0.version, args->v0.vmm, args->v0.ioffset,
                           args->v0.ilength, args->v0.runlist, args->v0.priv);
-               if (args->v0.priv && !oclass->client->super)
-                       return -EINVAL;
                return gk104_fifo_gpfifo_new_(fifo,
                                              &args->v0.runlist,
                                              &args->v0.chid,
index ee4967b706a7d1991ef583b743bc7e8774d74a5e..743791c514fef4a7f2ce6d548ac342560c34d7f3 100644 (file)
@@ -226,8 +226,6 @@ gv100_fifo_gpfifo_new(struct gk104_fifo *fifo, const struct nvkm_oclass *oclass,
                                   "runlist %016llx priv %d\n",
                           args->v0.version, args->v0.vmm, args->v0.ioffset,
                           args->v0.ilength, args->v0.runlist, args->v0.priv);
-               if (args->v0.priv && !oclass->client->super)
-                       return -EINVAL;
                return gv100_fifo_gpfifo_new_(&gv100_fifo_gpfifo, fifo,
                                              &args->v0.runlist,
                                              &args->v0.chid,
index abef7fb6e2d3adfdbb1f2decb25b6717e47b94fe..99aafa103a317cd802d0a137b5c4ac0becfb8c32 100644 (file)
@@ -65,8 +65,6 @@ tu102_fifo_gpfifo_new(struct gk104_fifo *fifo, const struct nvkm_oclass *oclass,
                                   "runlist %016llx priv %d\n",
                           args->v0.version, args->v0.vmm, args->v0.ioffset,
                           args->v0.ilength, args->v0.runlist, args->v0.priv);
-               if (args->v0.priv && !oclass->client->super)
-                       return -EINVAL;
                return gv100_fifo_gpfifo_new_(&tu102_fifo_gpfifo, fifo,
                                              &args->v0.runlist,
                                              &args->v0.chid,
index be94156ea2488551c4e6cfd4b77ddd93a211e53a..a08742cf425aac5dff1ac8e79bac6fa34a2786c8 100644 (file)
@@ -136,7 +136,6 @@ nv50_fifo = {
        .pause = nv04_fifo_pause,
        .start = nv04_fifo_start,
        .chan = {
-               &nv50_fifo_dma_oclass,
                &nv50_fifo_gpfifo_oclass,
                NULL
        },
index fac2f9a45ea693b5e7c24bf155c61090491d414c..e530bb8b3b170944f293ced035ea8bd840eeeabc 100644 (file)
@@ -41,7 +41,7 @@ nvkm_umem_search(struct nvkm_client *client, u64 handle)
 
        object = nvkm_object_search(client, handle, &nvkm_umem);
        if (IS_ERR(object)) {
-               if (client->super && client != master) {
+               if (client != master) {
                        spin_lock(&master->lock);
                        list_for_each_entry(umem, &master->umem, head) {
                                if (umem->object.object == handle) {
@@ -53,8 +53,7 @@ nvkm_umem_search(struct nvkm_client *client, u64 handle)
                }
        } else {
                umem = nvkm_umem(object);
-               if (!umem->priv || client->super)
-                       memory = nvkm_memory_ref(umem->memory);
+               memory = nvkm_memory_ref(umem->memory);
        }
 
        return memory ? memory : ERR_PTR(-ENOENT);
@@ -167,7 +166,6 @@ nvkm_umem_new(const struct nvkm_oclass *oclass, void *argv, u32 argc,
        nvkm_object_ctor(&nvkm_umem, oclass, &umem->object);
        umem->mmu = mmu;
        umem->type = mmu->type[type].type;
-       umem->priv = oclass->client->super;
        INIT_LIST_HEAD(&umem->head);
        *pobject = &umem->object;
 
index 85cf692d620a7edd57a414f71bf4e5c2e304f368..d56a594016cc279c60b27d18423532927bf7575f 100644 (file)
@@ -8,7 +8,6 @@ struct nvkm_umem {
        struct nvkm_object object;
        struct nvkm_mmu *mmu;
        u8 type:8;
-       bool priv:1;
        bool mappable:1;
        bool io:1;
 
index 0e4b8941da372ce5d55c386546f988283b0b7621..6870fda4b18895ce8df674a5e2182513862d4921 100644 (file)
@@ -34,7 +34,7 @@ nvkm_ummu_sclass(struct nvkm_object *object, int index,
 {
        struct nvkm_mmu *mmu = nvkm_ummu(object)->mmu;
 
-       if (mmu->func->mem.user.oclass && oclass->client->super) {
+       if (mmu->func->mem.user.oclass) {
                if (index-- == 0) {
                        oclass->base = mmu->func->mem.user;
                        oclass->ctor = nvkm_umem_new;
index c43b8248c682fd60621e94371322a8af4b429758..d6a1f8d04c09c59d335b0d62774dfff6a57f1b77 100644 (file)
@@ -45,7 +45,6 @@ nvkm_uvmm_search(struct nvkm_client *client, u64 handle)
 static int
 nvkm_uvmm_mthd_pfnclr(struct nvkm_uvmm *uvmm, void *argv, u32 argc)
 {
-       struct nvkm_client *client = uvmm->object.client;
        union {
                struct nvif_vmm_pfnclr_v0 v0;
        } *args = argv;
@@ -59,9 +58,6 @@ nvkm_uvmm_mthd_pfnclr(struct nvkm_uvmm *uvmm, void *argv, u32 argc)
        } else
                return ret;
 
-       if (!client->super)
-               return -ENOENT;
-
        if (size) {
                mutex_lock(&vmm->mutex);
                ret = nvkm_vmm_pfn_unmap(vmm, addr, size);
@@ -74,7 +70,6 @@ nvkm_uvmm_mthd_pfnclr(struct nvkm_uvmm *uvmm, void *argv, u32 argc)
 static int
 nvkm_uvmm_mthd_pfnmap(struct nvkm_uvmm *uvmm, void *argv, u32 argc)
 {
-       struct nvkm_client *client = uvmm->object.client;
        union {
                struct nvif_vmm_pfnmap_v0 v0;
        } *args = argv;
@@ -93,9 +88,6 @@ nvkm_uvmm_mthd_pfnmap(struct nvkm_uvmm *uvmm, void *argv, u32 argc)
        } else
                return ret;
 
-       if (!client->super)
-               return -ENOENT;
-
        if (size) {
                mutex_lock(&vmm->mutex);
                ret = nvkm_vmm_pfn_map(vmm, page, addr, size, phys);
@@ -108,7 +100,6 @@ nvkm_uvmm_mthd_pfnmap(struct nvkm_uvmm *uvmm, void *argv, u32 argc)
 static int
 nvkm_uvmm_mthd_unmap(struct nvkm_uvmm *uvmm, void *argv, u32 argc)
 {
-       struct nvkm_client *client = uvmm->object.client;
        union {
                struct nvif_vmm_unmap_v0 v0;
        } *args = argv;
@@ -130,9 +121,8 @@ nvkm_uvmm_mthd_unmap(struct nvkm_uvmm *uvmm, void *argv, u32 argc)
                goto done;
        }
 
-       if (ret = -ENOENT, (!vma->user && !client->super) || vma->busy) {
-               VMM_DEBUG(vmm, "denied %016llx: %d %d %d", addr,
-                         vma->user, !client->super, vma->busy);
+       if (ret = -ENOENT, vma->busy) {
+               VMM_DEBUG(vmm, "denied %016llx: %d", addr, vma->busy);
                goto done;
        }
 
@@ -181,9 +171,8 @@ nvkm_uvmm_mthd_map(struct nvkm_uvmm *uvmm, void *argv, u32 argc)
                goto fail;
        }
 
-       if (ret = -ENOENT, (!vma->user && !client->super) || vma->busy) {
-               VMM_DEBUG(vmm, "denied %016llx: %d %d %d", addr,
-                         vma->user, !client->super, vma->busy);
+       if (ret = -ENOENT, vma->busy) {
+               VMM_DEBUG(vmm, "denied %016llx: %d", addr, vma->busy);
                goto fail;
        }
 
@@ -230,7 +219,6 @@ fail:
 static int
 nvkm_uvmm_mthd_put(struct nvkm_uvmm *uvmm, void *argv, u32 argc)
 {
-       struct nvkm_client *client = uvmm->object.client;
        union {
                struct nvif_vmm_put_v0 v0;
        } *args = argv;
@@ -252,9 +240,8 @@ nvkm_uvmm_mthd_put(struct nvkm_uvmm *uvmm, void *argv, u32 argc)
                goto done;
        }
 
-       if (ret = -ENOENT, (!vma->user && !client->super) || vma->busy) {
-               VMM_DEBUG(vmm, "denied %016llx: %d %d %d", addr,
-                         vma->user, !client->super, vma->busy);
+       if (ret = -ENOENT, vma->busy) {
+               VMM_DEBUG(vmm, "denied %016llx: %d", addr, vma->busy);
                goto done;
        }
 
@@ -268,7 +255,6 @@ done:
 static int
 nvkm_uvmm_mthd_get(struct nvkm_uvmm *uvmm, void *argv, u32 argc)
 {
-       struct nvkm_client *client = uvmm->object.client;
        union {
                struct nvif_vmm_get_v0 v0;
        } *args = argv;
@@ -297,7 +283,6 @@ nvkm_uvmm_mthd_get(struct nvkm_uvmm *uvmm, void *argv, u32 argc)
                return ret;
 
        args->v0.addr = vma->addr;
-       vma->user = !client->super;
        return ret;
 }
 
index 710f3f8dc7c9434b243964888776fb2427deae3b..8bf00b396ec160321626a3bfba18d7e69afe5408 100644 (file)
@@ -774,7 +774,6 @@ nvkm_vma_tail(struct nvkm_vma *vma, u64 tail)
        new->refd = vma->refd;
        new->used = vma->used;
        new->part = vma->part;
-       new->user = vma->user;
        new->busy = vma->busy;
        new->mapped = vma->mapped;
        list_add(&new->head, &vma->head);
@@ -951,7 +950,7 @@ nvkm_vmm_node_split(struct nvkm_vmm *vmm,
 static void
 nvkm_vma_dump(struct nvkm_vma *vma)
 {
-       printk(KERN_ERR "%016llx %016llx %c%c%c%c%c%c%c%c%c %p\n",
+       printk(KERN_ERR "%016llx %016llx %c%c%c%c%c%c%c%c %p\n",
               vma->addr, (u64)vma->size,
               vma->used ? '-' : 'F',
               vma->mapref ? 'R' : '-',
@@ -959,7 +958,6 @@ nvkm_vma_dump(struct nvkm_vma *vma)
               vma->page != NVKM_VMA_PAGE_NONE ? '0' + vma->page : '-',
               vma->refd != NVKM_VMA_PAGE_NONE ? '0' + vma->refd : '-',
               vma->part ? 'P' : '-',
-              vma->user ? 'U' : '-',
               vma->busy ? 'B' : '-',
               vma->mapped ? 'M' : '-',
               vma->memory);
@@ -1024,7 +1022,6 @@ nvkm_vmm_ctor_managed(struct nvkm_vmm *vmm, u64 addr, u64 size)
        vma->mapref = true;
        vma->sparse = false;
        vma->used = true;
-       vma->user = true;
        nvkm_vmm_node_insert(vmm, vma);
        list_add_tail(&vma->head, &vmm->list);
        return 0;
@@ -1615,7 +1612,6 @@ nvkm_vmm_put_locked(struct nvkm_vmm *vmm, struct nvkm_vma *vma)
        vma->page = NVKM_VMA_PAGE_NONE;
        vma->refd = NVKM_VMA_PAGE_NONE;
        vma->used = false;
-       vma->user = false;
        nvkm_vmm_put_region(vmm, vma);
 }
 
index f02abd9cb4ddfc6a7a492e1e3d7ea2ee99965f6e..b5e733783b5b39c91d8e0a88d3ad8ad5f90cdf0b 100644 (file)
@@ -534,15 +534,13 @@ int
 gp100_vmm_mthd(struct nvkm_vmm *vmm,
               struct nvkm_client *client, u32 mthd, void *argv, u32 argc)
 {
-       if (client->super) {
-               switch (mthd) {
-               case GP100_VMM_VN_FAULT_REPLAY:
-                       return gp100_vmm_fault_replay(vmm, argv, argc);
-               case GP100_VMM_VN_FAULT_CANCEL:
-                       return gp100_vmm_fault_cancel(vmm, argv, argc);
-               default:
-                       break;
-               }
+       switch (mthd) {
+       case GP100_VMM_VN_FAULT_REPLAY:
+               return gp100_vmm_fault_replay(vmm, argv, argc);
+       case GP100_VMM_VN_FAULT_CANCEL:
+               return gp100_vmm_fault_cancel(vmm, argv, argc);
+       default:
+               break;
        }
        return -EINVAL;
 }
index 74e3b460132b3d858ecd4e70cde2cefadc1d8fa1..2df59b3c2ea1674cbc9349772a4c149d09ce8ca7 100644 (file)
@@ -78,9 +78,7 @@ static int ttm_global_init(void)
 
        ttm_debugfs_root = debugfs_create_dir("ttm", NULL);
        if (IS_ERR(ttm_debugfs_root)) {
-               ret = PTR_ERR(ttm_debugfs_root);
                ttm_debugfs_root = NULL;
-               goto out;
        }
 
        /* Limit the number of pages in the pool to about 50% of the total
index a1c85d1521f5c8fc9ebca3e83fb9f499f14aee66..82b244cb313e638a9549a569e3a79427cf3bf756 100644 (file)
@@ -585,21 +585,21 @@ static const struct ipu_rgb def_bgra_16 = {
        .bits_per_pixel = 16,
 };
 
-#define Y_OFFSET(pix, x, y)    ((x) + pix->width * (y))
-#define U_OFFSET(pix, x, y)    ((pix->width * pix->height) +           \
-                                (pix->width * ((y) / 2) / 2) + (x) / 2)
-#define V_OFFSET(pix, x, y)    ((pix->width * pix->height) +           \
-                                (pix->width * pix->height / 4) +       \
-                                (pix->width * ((y) / 2) / 2) + (x) / 2)
-#define U2_OFFSET(pix, x, y)   ((pix->width * pix->height) +           \
-                                (pix->width * (y) / 2) + (x) / 2)
-#define V2_OFFSET(pix, x, y)   ((pix->width * pix->height) +           \
-                                (pix->width * pix->height / 2) +       \
-                                (pix->width * (y) / 2) + (x) / 2)
-#define UV_OFFSET(pix, x, y)   ((pix->width * pix->height) +   \
-                                (pix->width * ((y) / 2)) + (x))
-#define UV2_OFFSET(pix, x, y)  ((pix->width * pix->height) +   \
-                                (pix->width * y) + (x))
+#define Y_OFFSET(pix, x, y)    ((x) + pix->bytesperline * (y))
+#define U_OFFSET(pix, x, y)    ((pix->bytesperline * pix->height) +     \
+                                (pix->bytesperline * ((y) / 2) / 2) + (x) / 2)
+#define V_OFFSET(pix, x, y)    ((pix->bytesperline * pix->height) +     \
+                                (pix->bytesperline * pix->height / 4) + \
+                                (pix->bytesperline * ((y) / 2) / 2) + (x) / 2)
+#define U2_OFFSET(pix, x, y)   ((pix->bytesperline * pix->height) +     \
+                                (pix->bytesperline * (y) / 2) + (x) / 2)
+#define V2_OFFSET(pix, x, y)   ((pix->bytesperline * pix->height) +     \
+                                (pix->bytesperline * pix->height / 2) + \
+                                (pix->bytesperline * (y) / 2) + (x) / 2)
+#define UV_OFFSET(pix, x, y)   ((pix->bytesperline * pix->height) +     \
+                                (pix->bytesperline * ((y) / 2)) + (x))
+#define UV2_OFFSET(pix, x, y)  ((pix->bytesperline * pix->height) +     \
+                                (pix->bytesperline * y) + (x))
 
 #define NUM_ALPHA_CHANNELS     7
 
index cceaf69279a94e6dd3b9e08eaee59942abbb0663..6304d1dd2dd6fc15b83b14e5acda2e178716c378 100644 (file)
@@ -1224,14 +1224,14 @@ static int bcm_iproc_i2c_unreg_slave(struct i2c_client *slave)
 
        disable_irq(iproc_i2c->irq);
 
+       tasklet_kill(&iproc_i2c->slave_rx_tasklet);
+
        /* disable all slave interrupts */
        tmp = iproc_i2c_rd_reg(iproc_i2c, IE_OFFSET);
        tmp &= ~(IE_S_ALL_INTERRUPT_MASK <<
                        IE_S_ALL_INTERRUPT_SHIFT);
        iproc_i2c_wr_reg(iproc_i2c, IE_OFFSET, tmp);
 
-       tasklet_kill(&iproc_i2c->slave_rx_tasklet);
-
        /* Erase the slave address programmed */
        tmp = iproc_i2c_rd_reg(iproc_i2c, S_CFG_SMBUS_ADDR_OFFSET);
        tmp &= ~BIT(S_CFG_EN_NIC_SMB_ADDR3_SHIFT);
index cb64fe649390e932257e14db50fe7615c08dd1ea..77f576e5165229819d71ebbaa1c5bf87361b892b 100644 (file)
@@ -141,7 +141,7 @@ static ssize_t i2cdev_read(struct file *file, char __user *buf, size_t count,
        if (count > 8192)
                count = 8192;
 
-       tmp = kmalloc(count, GFP_KERNEL);
+       tmp = kzalloc(count, GFP_KERNEL);
        if (tmp == NULL)
                return -ENOMEM;
 
@@ -150,7 +150,8 @@ static ssize_t i2cdev_read(struct file *file, char __user *buf, size_t count,
 
        ret = i2c_master_recv(client, tmp, count);
        if (ret >= 0)
-               ret = copy_to_user(buf, tmp, count) ? -EFAULT : ret;
+               if (copy_to_user(buf, tmp, ret))
+                       ret = -EFAULT;
        kfree(tmp);
        return ret;
 }
index 0e56ace61103379bb5e6f2d01e173e76ee9b25e9..8d8b1ba42ff80e31cccfc04991ee516e512c065c 100644 (file)
@@ -231,6 +231,7 @@ config DMARD10
 
 config FXLS8962AF
        tristate
+       depends on I2C || !I2C # cannot be built-in for modular I2C
 
 config FXLS8962AF_I2C
        tristate "NXP FXLS8962AF/FXLS8964AF Accelerometer I2C Driver"
@@ -247,6 +248,7 @@ config FXLS8962AF_I2C
 config FXLS8962AF_SPI
        tristate "NXP FXLS8962AF/FXLS8964AF Accelerometer SPI Driver"
        depends on SPI
+       depends on I2C || !I2C
        select FXLS8962AF
        select REGMAP_SPI
        help
index 078d87865fdeec758d2eb3e5397ac5a35ab4cbb7..0019f1ea7df27248766bc0ee699a34a60a038af4 100644 (file)
@@ -637,7 +637,7 @@ static int fxls8962af_i2c_raw_read_errata3(struct fxls8962af_data *data,
                        return ret;
        }
 
-       return ret;
+       return 0;
 }
 
 static int fxls8962af_fifo_transfer(struct fxls8962af_data *data,
index 6ef09609be9fe442d75359438877de09c942e309..f9c8385c72d3d924792f55def5a470f8f9751494 100644 (file)
@@ -664,8 +664,8 @@ static int palmas_adc_wakeup_configure(struct palmas_gpadc *adc)
 
        adc_period = adc->auto_conversion_period;
        for (i = 0; i < 16; ++i) {
-               if (((1000 * (1 << i)) / 32) < adc_period)
-                       continue;
+               if (((1000 * (1 << i)) / 32) >= adc_period)
+                       break;
        }
        if (i > 0)
                i--;
index 7010c4276947ea583b14d3566f731bf2ce0b3e23..c56fccb2c8e17a36f863f6f88b3ea577d9b6430d 100644 (file)
@@ -16,6 +16,8 @@
 #include <linux/completion.h>
 #include <linux/regmap.h>
 #include <linux/iio/iio.h>
+#include <linux/iio/driver.h>
+#include <linux/iio/machine.h>
 #include <linux/slab.h>
 
 #define RN5T618_ADC_CONVERSION_TIMEOUT   (msecs_to_jiffies(500))
@@ -189,6 +191,19 @@ static const struct iio_chan_spec rn5t618_adc_iio_channels[] = {
        RN5T618_ADC_CHANNEL(AIN0, IIO_VOLTAGE, "AIN0")
 };
 
+static struct iio_map rn5t618_maps[] = {
+       IIO_MAP("VADP", "rn5t618-power", "vadp"),
+       IIO_MAP("VUSB", "rn5t618-power", "vusb"),
+       { /* sentinel */ }
+};
+
+static void unregister_map(void *data)
+{
+       struct iio_dev *iio_dev = (struct iio_dev *) data;
+
+       iio_map_array_unregister(iio_dev);
+}
+
 static int rn5t618_adc_probe(struct platform_device *pdev)
 {
        int ret;
@@ -239,6 +254,14 @@ static int rn5t618_adc_probe(struct platform_device *pdev)
                return ret;
        }
 
+       ret = iio_map_array_register(iio_dev, rn5t618_maps);
+       if (ret < 0)
+               return ret;
+
+       ret = devm_add_action_or_reset(adc->dev, unregister_map, iio_dev);
+       if (ret < 0)
+               return ret;
+
        return devm_iio_device_register(adc->dev, iio_dev);
 }
 
index 2383eacada87ddf43609b299fb29e3a0de2ce11b..a2b83f0bd5260633f62d39999516b0eff069d76c 100644 (file)
@@ -568,7 +568,6 @@ static int ti_ads7950_probe(struct spi_device *spi)
        st->ring_xfer.tx_buf = &st->tx_buf[0];
        st->ring_xfer.rx_buf = &st->rx_buf[0];
        /* len will be set later */
-       st->ring_xfer.cs_change = true;
 
        spi_message_add_tail(&st->ring_xfer, &st->ring_msg);
 
index 2a957f19048ee8e78c5c2feb8f6c25a8b947ed85..9e0fce917ce4cfdecd34b6e3ea492530fc75ab72 100644 (file)
@@ -25,6 +25,8 @@
 #include <linux/iio/trigger_consumer.h>
 #include <linux/iio/triggered_buffer.h>
 
+#include <linux/time.h>
+
 #define HDC100X_REG_TEMP                       0x00
 #define HDC100X_REG_HUMIDITY                   0x01
 
@@ -166,7 +168,7 @@ static int hdc100x_get_measurement(struct hdc100x_data *data,
                                   struct iio_chan_spec const *chan)
 {
        struct i2c_client *client = data->client;
-       int delay = data->adc_int_us[chan->address];
+       int delay = data->adc_int_us[chan->address] + 1*USEC_PER_MSEC;
        int ret;
        __be16 val;
 
@@ -316,7 +318,7 @@ static irqreturn_t hdc100x_trigger_handler(int irq, void *p)
        struct iio_dev *indio_dev = pf->indio_dev;
        struct hdc100x_data *data = iio_priv(indio_dev);
        struct i2c_client *client = data->client;
-       int delay = data->adc_int_us[0] + data->adc_int_us[1];
+       int delay = data->adc_int_us[0] + data->adc_int_us[1] + 2*USEC_PER_MSEC;
        int ret;
 
        /* dual read starts at temp register */
index a5b421f4228711317dc5b520a6afab6658f87587..b9a06ca29beecd4c2b271334286840d5034ff46e 100644 (file)
@@ -411,12 +411,11 @@ int __adis_initial_startup(struct adis *adis)
        int ret;
 
        /* check if the device has rst pin low */
-       gpio = devm_gpiod_get_optional(&adis->spi->dev, "reset", GPIOD_ASIS);
+       gpio = devm_gpiod_get_optional(&adis->spi->dev, "reset", GPIOD_OUT_HIGH);
        if (IS_ERR(gpio))
                return PTR_ERR(gpio);
 
        if (gpio) {
-               gpiod_set_value_cansleep(gpio, 1);
                msleep(10);
                /* bring device out of reset */
                gpiod_set_value_cansleep(gpio, 0);
index f782d5e1aa25527a597fce63ef417f2fcaccee08..03e1db5d1e8c3903f26432c58b00d2e5d64b3681 100644 (file)
@@ -249,6 +249,9 @@ static int UVERBS_HANDLER(UVERBS_METHOD_REG_DMABUF_MR)(
        mr->uobject = uobj;
        atomic_inc(&pd->usecnt);
 
+       rdma_restrack_new(&mr->res, RDMA_RESTRACK_MR);
+       rdma_restrack_set_name(&mr->res, NULL);
+       rdma_restrack_add(&mr->res);
        uobj->object = mr;
 
        uverbs_finalize_uobj_create(attrs, UVERBS_ATTR_REG_DMABUF_MR_HANDLE);
index 283b6b81563cc438da1364766b7f891adaa900e4..ea0054c60fbc68305a4f83023a325fc32079b18f 100644 (file)
@@ -1681,6 +1681,7 @@ int bnxt_re_create_srq(struct ib_srq *ib_srq,
        if (nq)
                nq->budget++;
        atomic_inc(&rdev->srq_count);
+       spin_lock_init(&srq->lock);
 
        return 0;
 
index a8688a92c760281419eec67a9dd05de43354f39a..4678bd6ec7d63465a2ef4aebd3ffb9aefc53336c 100644 (file)
@@ -1397,7 +1397,6 @@ static int bnxt_re_dev_init(struct bnxt_re_dev *rdev, u8 wqe_mode)
        memset(&rattr, 0, sizeof(rattr));
        rc = bnxt_re_register_netdev(rdev);
        if (rc) {
-               rtnl_unlock();
                ibdev_err(&rdev->ibdev,
                          "Failed to register with netedev: %#x\n", rc);
                return -EINVAL;
index 203e6ddcacbc9c87ca5222bb85503f188e287549..be4a07bd268a312499670b6aafff2af3759694bd 100644 (file)
@@ -357,6 +357,7 @@ static int efa_enable_msix(struct efa_dev *dev)
        }
 
        if (irq_num != msix_vecs) {
+               efa_disable_msix(dev);
                dev_err(&dev->pdev->dev,
                        "Allocated %d MSI-X (out of %d requested)\n",
                        irq_num, msix_vecs);
index eb15c310d63d8d7cebfd17492d67e49d2b869e4c..e83dc562629ed36dffa68fa47c0ee5942933c403 100644 (file)
@@ -3055,6 +3055,7 @@ static void __sdma_process_event(struct sdma_engine *sde,
 static int _extend_sdma_tx_descs(struct hfi1_devdata *dd, struct sdma_txreq *tx)
 {
        int i;
+       struct sdma_desc *descp;
 
        /* Handle last descriptor */
        if (unlikely((tx->num_desc == (MAX_DESC - 1)))) {
@@ -3075,12 +3076,10 @@ static int _extend_sdma_tx_descs(struct hfi1_devdata *dd, struct sdma_txreq *tx)
        if (unlikely(tx->num_desc == MAX_DESC))
                goto enomem;
 
-       tx->descp = kmalloc_array(
-                       MAX_DESC,
-                       sizeof(struct sdma_desc),
-                       GFP_ATOMIC);
-       if (!tx->descp)
+       descp = kmalloc_array(MAX_DESC, sizeof(struct sdma_desc), GFP_ATOMIC);
+       if (!descp)
                goto enomem;
+       tx->descp = descp;
 
        /* reserve last descriptor for coalescing */
        tx->desc_limit = MAX_DESC - 1;
index dab88286d549e2094f3f747528367427afdf44d9..b6f9c41bca51db1dcc83162fa0db8a269a1d8bf9 100644 (file)
@@ -6,7 +6,7 @@ config INFINIBAND_IRDMA
        depends on PCI
        depends on ICE && I40E
        select GENERIC_ALLOCATOR
-       select CONFIG_AUXILIARY_BUS
+       select AUXILIARY_BUS
        help
          This is an Intel(R) Ethernet Protocol Driver for RDMA driver
          that support E810 (iWARP/RoCE) and X722 (iWARP) network devices.
index 7abeb576b3c50977bb165e23b4ac13d9d30d6d32..b8e5e371bb1962fdf3527681b0e081da440bfa38 100644 (file)
@@ -945,7 +945,6 @@ int mlx5_ib_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
        u32 *cqb = NULL;
        void *cqc;
        int cqe_size;
-       unsigned int irqn;
        int eqn;
        int err;
 
@@ -984,7 +983,7 @@ int mlx5_ib_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
                INIT_WORK(&cq->notify_work, notify_soft_wc_handler);
        }
 
-       err = mlx5_vector2eqn(dev->mdev, vector, &eqn, &irqn);
+       err = mlx5_vector2eqn(dev->mdev, vector, &eqn);
        if (err)
                goto err_cqb;
 
@@ -1007,7 +1006,6 @@ int mlx5_ib_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
                goto err_cqb;
 
        mlx5_ib_dbg(dev, "cqn 0x%x\n", cq->mcq.cqn);
-       cq->mcq.irqn = irqn;
        if (udata)
                cq->mcq.tasklet_ctx.comp = mlx5_ib_cq_comp;
        else
index eb9b0a2707f800a33719e4f5ab80c80cf715343a..c869b2a91a289b4fa1b7c249b837f0f90edd4d0e 100644 (file)
@@ -975,7 +975,6 @@ static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_QUERY_EQN)(
        struct mlx5_ib_dev *dev;
        int user_vector;
        int dev_eqn;
-       unsigned int irqn;
        int err;
 
        if (uverbs_copy_from(&user_vector, attrs,
@@ -987,7 +986,7 @@ static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_QUERY_EQN)(
                return PTR_ERR(c);
        dev = to_mdev(c->ibucontext.device);
 
-       err = mlx5_vector2eqn(dev->mdev, user_vector, &dev_eqn, &irqn);
+       err = mlx5_vector2eqn(dev->mdev, user_vector, &dev_eqn);
        if (err < 0)
                return err;
 
index 094c976b1eedf73298a1f13efa6a6e3d869ce758..2507051f7b897a00bb363af5a3e60b455f34582a 100644 (file)
@@ -4454,7 +4454,8 @@ static void mlx5r_mp_remove(struct auxiliary_device *adev)
        mutex_lock(&mlx5_ib_multiport_mutex);
        if (mpi->ibdev)
                mlx5_ib_unbind_slave_port(mpi->ibdev, mpi);
-       list_del(&mpi->list);
+       else
+               list_del(&mpi->list);
        mutex_unlock(&mlx5_ib_multiport_mutex);
        kfree(mpi);
 }
index 0ea9a5aa4ec0de30e46ef3bc4cc383e26c3f6f61..1c1d1b53312dc9f1f4854e0910c2d9ebd81ecaf9 100644 (file)
@@ -85,7 +85,7 @@ int rxe_mcast_add_grp_elem(struct rxe_dev *rxe, struct rxe_qp *qp,
                goto out;
        }
 
-       elem = rxe_alloc(&rxe->mc_elem_pool);
+       elem = rxe_alloc_locked(&rxe->mc_elem_pool);
        if (!elem) {
                err = -ENOMEM;
                goto out;
index 85b812586ed487d182f9db3c0058f138f514ae08..72d95398e604108abe3c9103aa33a2b89eaaafb2 100644 (file)
@@ -63,7 +63,7 @@ struct rxe_queue *rxe_queue_init(struct rxe_dev *rxe, int *num_elem,
        if (*num_elem < 0)
                goto err1;
 
-       q = kmalloc(sizeof(*q), GFP_KERNEL);
+       q = kzalloc(sizeof(*q), GFP_KERNEL);
        if (!q)
                goto err1;
 
index 27cc5f03611cbe91e0bf50a903296829ecf81782..f6fae64861ce873bca8105e2dcc1038b81e6901c 100644 (file)
@@ -20,18 +20,13 @@ void qcom_icc_pre_aggregate(struct icc_node *node)
 {
        size_t i;
        struct qcom_icc_node *qn;
-       struct qcom_icc_provider *qp;
 
        qn = node->data;
-       qp = to_qcom_provider(node->provider);
 
        for (i = 0; i < QCOM_ICC_NUM_BUCKETS; i++) {
                qn->sum_avg[i] = 0;
                qn->max_peak[i] = 0;
        }
-
-       for (i = 0; i < qn->num_bcms; i++)
-               qcom_icc_bcm_voter_add(qp->voter, qn->bcms[i]);
 }
 EXPORT_SYMBOL_GPL(qcom_icc_pre_aggregate);
 
@@ -49,8 +44,10 @@ int qcom_icc_aggregate(struct icc_node *node, u32 tag, u32 avg_bw,
 {
        size_t i;
        struct qcom_icc_node *qn;
+       struct qcom_icc_provider *qp;
 
        qn = node->data;
+       qp = to_qcom_provider(node->provider);
 
        if (!tag)
                tag = QCOM_ICC_TAG_ALWAYS;
@@ -70,6 +67,9 @@ int qcom_icc_aggregate(struct icc_node *node, u32 tag, u32 avg_bw,
        *agg_avg += avg_bw;
        *agg_peak = max_t(u32, *agg_peak, peak_bw);
 
+       for (i = 0; i < qn->num_bcms; i++)
+               qcom_icc_bcm_voter_add(qp->voter, qn->bcms[i]);
+
        return 0;
 }
 EXPORT_SYMBOL_GPL(qcom_icc_aggregate);
index 98ba927aee1a6903c7bc8619e41160618292f952..6f0df629353fd18ea92ba9458ac10444b8d51ae6 100644 (file)
@@ -768,6 +768,7 @@ static void iommu_dma_free_noncontiguous(struct device *dev, size_t size,
        __iommu_dma_unmap(dev, sgt->sgl->dma_address, size);
        __iommu_dma_free_pages(sh->pages, PAGE_ALIGN(size) >> PAGE_SHIFT);
        sg_free_table(&sh->sgt);
+       kfree(sh);
 }
 #endif /* CONFIG_DMA_REMAP */
 
index c6cf44a6c92305c57093feb22372a6076b0a0536..9ec374e17469a28b04d0f69c85dfdd691ae81d87 100644 (file)
@@ -511,7 +511,7 @@ void intel_pasid_tear_down_entry(struct intel_iommu *iommu, struct device *dev,
                                 u32 pasid, bool fault_ignore)
 {
        struct pasid_entry *pte;
-       u16 did;
+       u16 did, pgtt;
 
        pte = intel_pasid_get_entry(dev, pasid);
        if (WARN_ON(!pte))
@@ -521,13 +521,19 @@ void intel_pasid_tear_down_entry(struct intel_iommu *iommu, struct device *dev,
                return;
 
        did = pasid_get_domain_id(pte);
+       pgtt = pasid_pte_get_pgtt(pte);
+
        intel_pasid_clear_entry(dev, pasid, fault_ignore);
 
        if (!ecap_coherent(iommu->ecap))
                clflush_cache_range(pte, sizeof(*pte));
 
        pasid_cache_invalidation_with_pasid(iommu, did, pasid);
-       qi_flush_piotlb(iommu, did, pasid, 0, -1, 0);
+
+       if (pgtt == PASID_ENTRY_PGTT_PT || pgtt == PASID_ENTRY_PGTT_FL_ONLY)
+               qi_flush_piotlb(iommu, did, pasid, 0, -1, 0);
+       else
+               iommu->flush.flush_iotlb(iommu, did, 0, 0, DMA_TLB_DSI_FLUSH);
 
        /* Device IOTLB doesn't need to be flushed in caching mode. */
        if (!cap_caching_mode(iommu->cap))
index 5ff61c3d401f9a7d7d7086df3b2237b419cb2b43..c11bc8b833b8ee59c0ef8c9b74f8a9316e2b43f0 100644 (file)
@@ -99,6 +99,12 @@ static inline bool pasid_pte_is_present(struct pasid_entry *pte)
        return READ_ONCE(pte->val[0]) & PASID_PTE_PRESENT;
 }
 
+/* Get PGTT field of a PASID table entry */
+static inline u16 pasid_pte_get_pgtt(struct pasid_entry *pte)
+{
+       return (u16)((READ_ONCE(pte->val[0]) >> 6) & 0x7);
+}
+
 extern unsigned int intel_pasid_max_id;
 int intel_pasid_alloc_table(struct device *dev);
 void intel_pasid_free_table(struct device *dev);
index 9b0f22bc0514e0ea5863524f2f7b41718ae05fcc..4b9b3f35ba0ea2c485b6bdfaa6f9e8ace9e5bd40 100644 (file)
@@ -675,7 +675,6 @@ static int intel_svm_unbind_mm(struct device *dev, u32 pasid)
                        kfree_rcu(sdev, rcu);
 
                        if (list_empty(&svm->devs)) {
-                               intel_svm_free_pasid(mm);
                                if (svm->notifier.ops) {
                                        mmu_notifier_unregister(&svm->notifier, mm);
                                        /* Clear mm's pasid. */
@@ -690,6 +689,8 @@ static int intel_svm_unbind_mm(struct device *dev, u32 pasid)
                                kfree(svm);
                        }
                }
+               /* Drop a PASID reference and free it if no reference. */
+               intel_svm_free_pasid(mm);
        }
 out:
        return ret;
index 5419c4b9f27ada00f31f86b0a6eb3adc19f56753..63f0af10c4033432b95285cf424d35ea97d215d6 100644 (file)
@@ -924,6 +924,9 @@ void iommu_group_remove_device(struct device *dev)
        struct iommu_group *group = dev->iommu_group;
        struct group_device *tmp_device, *device = NULL;
 
+       if (!group)
+               return;
+
        dev_info(dev, "Removing from iommu group %d\n", group->id);
 
        /* Pre-notify listeners that a device is being removed. */
index 6019e58ce4fbe846428e331ae853dfc6541c8b24..83df387e70a336bf976192d85b077efd2598e484 100644 (file)
@@ -90,7 +90,7 @@ static int s390_iommu_attach_device(struct iommu_domain *domain,
        struct zpci_dev *zdev = to_zpci_dev(dev);
        struct s390_domain_device *domain_device;
        unsigned long flags;
-       int rc;
+       int cc, rc;
 
        if (!zdev)
                return -ENODEV;
@@ -99,14 +99,21 @@ static int s390_iommu_attach_device(struct iommu_domain *domain,
        if (!domain_device)
                return -ENOMEM;
 
-       if (zdev->dma_table)
-               zpci_dma_exit_device(zdev);
+       if (zdev->dma_table) {
+               cc = zpci_dma_exit_device(zdev);
+               if (cc) {
+                       rc = -EIO;
+                       goto out_free;
+               }
+       }
 
        zdev->dma_table = s390_domain->dma_table;
-       rc = zpci_register_ioat(zdev, 0, zdev->start_dma, zdev->end_dma,
+       cc = zpci_register_ioat(zdev, 0, zdev->start_dma, zdev->end_dma,
                                (u64) zdev->dma_table);
-       if (rc)
+       if (cc) {
+               rc = -EIO;
                goto out_restore;
+       }
 
        spin_lock_irqsave(&s390_domain->list_lock, flags);
        /* First device defines the DMA range limits */
@@ -130,6 +137,7 @@ static int s390_iommu_attach_device(struct iommu_domain *domain,
 
 out_restore:
        zpci_dma_init_device(zdev);
+out_free:
        kfree(domain_device);
 
        return rc;
index 3461b0a7dc624ea0c4cd6a8466badba09a47c19a..cbfdadecb23b9fa1ba4dae6f3aa26c2242178a50 100644 (file)
@@ -89,16 +89,13 @@ static void tpci200_unregister(struct tpci200_board *tpci200)
        free_irq(tpci200->info->pdev->irq, (void *) tpci200);
 
        pci_iounmap(tpci200->info->pdev, tpci200->info->interface_regs);
-       pci_iounmap(tpci200->info->pdev, tpci200->info->cfg_regs);
 
        pci_release_region(tpci200->info->pdev, TPCI200_IP_INTERFACE_BAR);
        pci_release_region(tpci200->info->pdev, TPCI200_IO_ID_INT_SPACES_BAR);
        pci_release_region(tpci200->info->pdev, TPCI200_MEM16_SPACE_BAR);
        pci_release_region(tpci200->info->pdev, TPCI200_MEM8_SPACE_BAR);
-       pci_release_region(tpci200->info->pdev, TPCI200_CFG_MEM_BAR);
 
        pci_disable_device(tpci200->info->pdev);
-       pci_dev_put(tpci200->info->pdev);
 }
 
 static void tpci200_enable_irq(struct tpci200_board *tpci200,
@@ -257,7 +254,7 @@ static int tpci200_register(struct tpci200_board *tpci200)
                        "(bn 0x%X, sn 0x%X) failed to allocate PCI resource for BAR 2 !",
                        tpci200->info->pdev->bus->number,
                        tpci200->info->pdev->devfn);
-               goto out_disable_pci;
+               goto err_disable_device;
        }
 
        /* Request IO ID INT space (Bar 3) */
@@ -269,7 +266,7 @@ static int tpci200_register(struct tpci200_board *tpci200)
                        "(bn 0x%X, sn 0x%X) failed to allocate PCI resource for BAR 3 !",
                        tpci200->info->pdev->bus->number,
                        tpci200->info->pdev->devfn);
-               goto out_release_ip_space;
+               goto err_ip_interface_bar;
        }
 
        /* Request MEM8 space (Bar 5) */
@@ -280,7 +277,7 @@ static int tpci200_register(struct tpci200_board *tpci200)
                        "(bn 0x%X, sn 0x%X) failed to allocate PCI resource for BAR 5!",
                        tpci200->info->pdev->bus->number,
                        tpci200->info->pdev->devfn);
-               goto out_release_ioid_int_space;
+               goto err_io_id_int_spaces_bar;
        }
 
        /* Request MEM16 space (Bar 4) */
@@ -291,7 +288,7 @@ static int tpci200_register(struct tpci200_board *tpci200)
                        "(bn 0x%X, sn 0x%X) failed to allocate PCI resource for BAR 4!",
                        tpci200->info->pdev->bus->number,
                        tpci200->info->pdev->devfn);
-               goto out_release_mem8_space;
+               goto err_mem8_space_bar;
        }
 
        /* Map internal tpci200 driver user space */
@@ -305,7 +302,7 @@ static int tpci200_register(struct tpci200_board *tpci200)
                        tpci200->info->pdev->bus->number,
                        tpci200->info->pdev->devfn);
                res = -ENOMEM;
-               goto out_release_mem8_space;
+               goto err_mem16_space_bar;
        }
 
        /* Initialize lock that protects interface_regs */
@@ -344,18 +341,22 @@ static int tpci200_register(struct tpci200_board *tpci200)
                        "(bn 0x%X, sn 0x%X) unable to register IRQ !",
                        tpci200->info->pdev->bus->number,
                        tpci200->info->pdev->devfn);
-               goto out_release_ioid_int_space;
+               goto err_interface_regs;
        }
 
        return 0;
 
-out_release_mem8_space:
+err_interface_regs:
+       pci_iounmap(tpci200->info->pdev, tpci200->info->interface_regs);
+err_mem16_space_bar:
+       pci_release_region(tpci200->info->pdev, TPCI200_MEM16_SPACE_BAR);
+err_mem8_space_bar:
        pci_release_region(tpci200->info->pdev, TPCI200_MEM8_SPACE_BAR);
-out_release_ioid_int_space:
+err_io_id_int_spaces_bar:
        pci_release_region(tpci200->info->pdev, TPCI200_IO_ID_INT_SPACES_BAR);
-out_release_ip_space:
+err_ip_interface_bar:
        pci_release_region(tpci200->info->pdev, TPCI200_IP_INTERFACE_BAR);
-out_disable_pci:
+err_disable_device:
        pci_disable_device(tpci200->info->pdev);
        return res;
 }
@@ -527,7 +528,7 @@ static int tpci200_pci_probe(struct pci_dev *pdev,
        tpci200->info = kzalloc(sizeof(struct tpci200_infos), GFP_KERNEL);
        if (!tpci200->info) {
                ret = -ENOMEM;
-               goto out_err_info;
+               goto err_tpci200;
        }
 
        pci_dev_get(pdev);
@@ -538,7 +539,7 @@ static int tpci200_pci_probe(struct pci_dev *pdev,
        if (ret) {
                dev_err(&pdev->dev, "Failed to allocate PCI Configuration Memory");
                ret = -EBUSY;
-               goto out_err_pci_request;
+               goto err_tpci200_info;
        }
        tpci200->info->cfg_regs = ioremap(
                        pci_resource_start(pdev, TPCI200_CFG_MEM_BAR),
@@ -546,7 +547,7 @@ static int tpci200_pci_probe(struct pci_dev *pdev,
        if (!tpci200->info->cfg_regs) {
                dev_err(&pdev->dev, "Failed to map PCI Configuration Memory");
                ret = -EFAULT;
-               goto out_err_ioremap;
+               goto err_request_region;
        }
 
        /* Disable byte swapping for 16 bit IP module access. This will ensure
@@ -569,7 +570,7 @@ static int tpci200_pci_probe(struct pci_dev *pdev,
        if (ret) {
                dev_err(&pdev->dev, "error during tpci200 install\n");
                ret = -ENODEV;
-               goto out_err_install;
+               goto err_cfg_regs;
        }
 
        /* Register the carrier in the industry pack bus driver */
@@ -581,7 +582,7 @@ static int tpci200_pci_probe(struct pci_dev *pdev,
                dev_err(&pdev->dev,
                        "error registering the carrier on ipack driver\n");
                ret = -EFAULT;
-               goto out_err_bus_register;
+               goto err_tpci200_install;
        }
 
        /* save the bus number given by ipack to logging purpose */
@@ -592,19 +593,16 @@ static int tpci200_pci_probe(struct pci_dev *pdev,
                tpci200_create_device(tpci200, i);
        return 0;
 
-out_err_bus_register:
+err_tpci200_install:
        tpci200_uninstall(tpci200);
-       /* tpci200->info->cfg_regs is unmapped in tpci200_uninstall */
-       tpci200->info->cfg_regs = NULL;
-out_err_install:
-       if (tpci200->info->cfg_regs)
-               iounmap(tpci200->info->cfg_regs);
-out_err_ioremap:
+err_cfg_regs:
+       pci_iounmap(tpci200->info->pdev, tpci200->info->cfg_regs);
+err_request_region:
        pci_release_region(pdev, TPCI200_CFG_MEM_BAR);
-out_err_pci_request:
-       pci_dev_put(pdev);
+err_tpci200_info:
        kfree(tpci200->info);
-out_err_info:
+       pci_dev_put(pdev);
+err_tpci200:
        kfree(tpci200);
        return ret;
 }
@@ -614,6 +612,12 @@ static void __tpci200_pci_remove(struct tpci200_board *tpci200)
        ipack_bus_unregister(tpci200->info->ipack_bus);
        tpci200_uninstall(tpci200);
 
+       pci_iounmap(tpci200->info->pdev, tpci200->info->cfg_regs);
+
+       pci_release_region(tpci200->info->pdev, TPCI200_CFG_MEM_BAR);
+
+       pci_dev_put(tpci200->info->pdev);
+
        kfree(tpci200->info);
        kfree(tpci200);
 }
index 59a36f92267555175db081310b3a32c0b0052c6b..30d29b96a339643aa226d81228b38d956f6451b7 100644 (file)
@@ -226,7 +226,7 @@ static int cio2_bridge_connect_sensor(const struct cio2_sensor_config *cfg,
 err_free_swnodes:
        software_node_unregister_nodes(sensor->swnodes);
 err_put_adev:
-       acpi_dev_put(sensor->adev);
+       acpi_dev_put(adev);
        return ret;
 }
 
index d333130d15315a5efb6be73a2c3c415362bafc2f..c3229d8c7041c197afa975fba16074774c84da64 100644 (file)
@@ -2018,8 +2018,8 @@ static void dw_mci_tasklet_func(struct tasklet_struct *t)
                                        continue;
                                }
 
-                               dw_mci_stop_dma(host);
                                send_stop_abort(host, data);
+                               dw_mci_stop_dma(host);
                                state = STATE_SENDING_STOP;
                                break;
                        }
@@ -2043,10 +2043,10 @@ static void dw_mci_tasklet_func(struct tasklet_struct *t)
                         */
                        if (test_and_clear_bit(EVENT_DATA_ERROR,
                                               &host->pending_events)) {
-                               dw_mci_stop_dma(host);
                                if (!(host->data_status & (SDMMC_INT_DRTO |
                                                           SDMMC_INT_EBE)))
                                        send_stop_abort(host, data);
+                               dw_mci_stop_dma(host);
                                state = STATE_DATA_ERROR;
                                break;
                        }
@@ -2079,10 +2079,10 @@ static void dw_mci_tasklet_func(struct tasklet_struct *t)
                         */
                        if (test_and_clear_bit(EVENT_DATA_ERROR,
                                               &host->pending_events)) {
-                               dw_mci_stop_dma(host);
                                if (!(host->data_status & (SDMMC_INT_DRTO |
                                                           SDMMC_INT_EBE)))
                                        send_stop_abort(host, data);
+                               dw_mci_stop_dma(host);
                                state = STATE_DATA_ERROR;
                                break;
                        }
index 51db30acf4dcaf32e08d3e28a8adb3443d2f44a4..fdaa11f92fe6f5c31901f85128d5b60c2191f3a5 100644 (file)
@@ -479,8 +479,9 @@ static int sdmmc_post_sig_volt_switch(struct mmci_host *host,
        u32 status;
        int ret = 0;
 
-       if (ios->signal_voltage == MMC_SIGNAL_VOLTAGE_180) {
-               spin_lock_irqsave(&host->lock, flags);
+       spin_lock_irqsave(&host->lock, flags);
+       if (ios->signal_voltage == MMC_SIGNAL_VOLTAGE_180 &&
+           host->pwr_reg & MCI_STM32_VSWITCHEN) {
                mmci_write_pwrreg(host, host->pwr_reg | MCI_STM32_VSWITCH);
                spin_unlock_irqrestore(&host->lock, flags);
 
@@ -492,9 +493,11 @@ static int sdmmc_post_sig_volt_switch(struct mmci_host *host,
 
                writel_relaxed(MCI_STM32_VSWENDC | MCI_STM32_CKSTOPC,
                               host->base + MMCICLEAR);
+               spin_lock_irqsave(&host->lock, flags);
                mmci_write_pwrreg(host, host->pwr_reg &
                                  ~(MCI_STM32_VSWITCHEN | MCI_STM32_VSWITCH));
        }
+       spin_unlock_irqrestore(&host->lock, flags);
 
        return ret;
 }
index cce390fe9cf376c13279a9d3760022ac39ecf365..032bf852397f95a3fe8aea5ee08149ee38e3c1d5 100644 (file)
@@ -173,6 +173,23 @@ static unsigned int sdhci_iproc_get_max_clock(struct sdhci_host *host)
                return pltfm_host->clock;
 }
 
+/*
+ * There is a known bug on BCM2711's SDHCI core integration where the
+ * controller will hang when the difference between the core clock and the bus
+ * clock is too great. Specifically this can be reproduced under the following
+ * conditions:
+ *
+ *  - No SD card plugged in, polling thread is running, probing cards at
+ *    100 kHz.
+ *  - BCM2711's core clock configured at 500MHz or more
+ *
+ * So we set 200kHz as the minimum clock frequency available for that SoC.
+ */
+static unsigned int sdhci_iproc_bcm2711_get_min_clock(struct sdhci_host *host)
+{
+       return 200000;
+}
+
 static const struct sdhci_ops sdhci_iproc_ops = {
        .set_clock = sdhci_set_clock,
        .get_max_clock = sdhci_iproc_get_max_clock,
@@ -271,6 +288,7 @@ static const struct sdhci_ops sdhci_iproc_bcm2711_ops = {
        .set_clock = sdhci_set_clock,
        .set_power = sdhci_set_power_and_bus_voltage,
        .get_max_clock = sdhci_iproc_get_max_clock,
+       .get_min_clock = sdhci_iproc_bcm2711_get_min_clock,
        .set_bus_width = sdhci_set_bus_width,
        .reset = sdhci_reset,
        .set_uhs_signaling = sdhci_set_uhs_signaling,
index e44b7a66b73c55b0364b0f54d7ee3025f5f26d36..290a14cdc1cf6878d1b56035c3638de116c1bdaf 100644 (file)
@@ -2089,6 +2089,23 @@ static void sdhci_msm_cqe_disable(struct mmc_host *mmc, bool recovery)
        sdhci_cqe_disable(mmc, recovery);
 }
 
+static void sdhci_msm_set_timeout(struct sdhci_host *host, struct mmc_command *cmd)
+{
+       u32 count, start = 15;
+
+       __sdhci_set_timeout(host, cmd);
+       count = sdhci_readb(host, SDHCI_TIMEOUT_CONTROL);
+       /*
+        * Update software timeout value if its value is less than hardware data
+        * timeout value. Qcom SoC hardware data timeout value was calculated
+        * using 4 * MCLK * 2^(count + 13). where MCLK = 1 / host->clock.
+        */
+       if (cmd && cmd->data && host->clock > 400000 &&
+           host->clock <= 50000000 &&
+           ((1 << (count + start)) > (10 * host->clock)))
+               host->data_timeout = 22LL * NSEC_PER_SEC;
+}
+
 static const struct cqhci_host_ops sdhci_msm_cqhci_ops = {
        .enable         = sdhci_msm_cqe_enable,
        .disable        = sdhci_msm_cqe_disable,
@@ -2438,6 +2455,7 @@ static const struct sdhci_ops sdhci_msm_ops = {
        .irq    = sdhci_msm_cqe_irq,
        .dump_vendor_regs = sdhci_msm_dump_vendor_regs,
        .set_power = sdhci_set_power_noreg,
+       .set_timeout = sdhci_msm_set_timeout,
 };
 
 static const struct sdhci_pltfm_data sdhci_msm_pdata = {
index 3097e93787f72625dfc4a22888aec35c68141f1e..a761134fd3bea03432d4e846f01a196a13cbf8b6 100644 (file)
@@ -119,7 +119,7 @@ static int cfi_use_status_reg(struct cfi_private *cfi)
        struct cfi_pri_amdstd *extp = cfi->cmdset_priv;
        u8 poll_mask = CFI_POLL_STATUS_REG | CFI_POLL_DQ;
 
-       return extp->MinorVersion >= '5' &&
+       return extp && extp->MinorVersion >= '5' &&
                (extp->SoftwareFeatures & poll_mask) == CFI_POLL_STATUS_REG;
 }
 
index efc2003bd13a479d0ca1829c9cc691254299fbb4..99400d0fb8c1e1cda9e9f5282a14d7ff462657ee 100644 (file)
@@ -229,7 +229,7 @@ static int mchp48l640_write(struct mtd_info *mtd, loff_t to, size_t len,
                woff += ws;
        }
 
-       return ret;
+       return 0;
 }
 
 static int mchp48l640_read_page(struct mtd_info *mtd, loff_t from, size_t len,
@@ -255,6 +255,7 @@ static int mchp48l640_read_page(struct mtd_info *mtd, loff_t from, size_t len,
        if (!ret)
                *retlen += len;
 
+       kfree(cmd);
        return ret;
 
 fail:
@@ -286,7 +287,7 @@ static int mchp48l640_read(struct mtd_info *mtd, loff_t from, size_t len,
                woff += ws;
        }
 
-       return ret;
+       return 0;
 };
 
 static const struct mchp48_caps mchp48l640_caps = {
index 6ce4bc57f91946729e82957f1087b8d6582c2ebe..44bea3f65060d5352292a83af162115649927598 100644 (file)
@@ -419,6 +419,7 @@ int add_mtd_blktrans_dev(struct mtd_blktrans_dev *new)
        if (tr->discard) {
                blk_queue_flag_set(QUEUE_FLAG_DISCARD, new->rq);
                blk_queue_max_discard_sectors(new->rq, UINT_MAX);
+               new->rq->limits.discard_granularity = tr->blksize;
        }
 
        gd->queue = new->rq;
@@ -525,14 +526,10 @@ int register_mtd_blktrans(struct mtd_blktrans_ops *tr)
        if (!blktrans_notifier.list.next)
                register_mtd_user(&blktrans_notifier);
 
-
-       mutex_lock(&mtd_table_mutex);
-
        ret = register_blkdev(tr->major, tr->name);
        if (ret < 0) {
                printk(KERN_WARNING "Unable to register %s block device on major %d: %d\n",
                       tr->name, tr->major, ret);
-               mutex_unlock(&mtd_table_mutex);
                return ret;
        }
 
@@ -542,12 +539,12 @@ int register_mtd_blktrans(struct mtd_blktrans_ops *tr)
        tr->blkshift = ffs(tr->blksize) - 1;
 
        INIT_LIST_HEAD(&tr->devs);
-       list_add(&tr->list, &blktrans_majors);
 
+       mutex_lock(&mtd_table_mutex);
+       list_add(&tr->list, &blktrans_majors);
        mtd_for_each_device(mtd)
                if (mtd->type != MTD_ABSENT)
                        tr->add_mtd(tr, mtd);
-
        mutex_unlock(&mtd_table_mutex);
        return 0;
 }
@@ -564,8 +561,8 @@ int deregister_mtd_blktrans(struct mtd_blktrans_ops *tr)
        list_for_each_entry_safe(dev, next, &tr->devs, list)
                tr->remove_dev(dev);
 
-       unregister_blkdev(tr->major, tr->name);
        mutex_unlock(&mtd_table_mutex);
+       unregister_blkdev(tr->major, tr->name);
 
        BUG_ON(!list_empty(&tr->devs));
        return 0;
index b5ccd3037788a2401ce31b39ed013f4bb57ad72b..c8fd7f758938bfd35d70968b1b8115eb589facd8 100644 (file)
@@ -806,7 +806,9 @@ static ssize_t mtd_otp_size(struct mtd_info *mtd, bool is_user)
 
 err:
        kfree(info);
-       return ret;
+
+       /* ENODATA means there is no OTP region. */
+       return ret == -ENODATA ? 0 : ret;
 }
 
 static struct nvmem_device *mtd_otp_nvmem_register(struct mtd_info *mtd,
index 57a583149cc0cb0264a7a726d35f19737cae8007..3d6c6e88052072751886f8391ae1561f960e8b72 100644 (file)
@@ -5228,12 +5228,18 @@ static bool of_get_nand_on_flash_bbt(struct device_node *np)
 static int of_get_nand_secure_regions(struct nand_chip *chip)
 {
        struct device_node *dn = nand_get_flash_node(chip);
+       struct property *prop;
        int nr_elem, i, j;
 
-       nr_elem = of_property_count_elems_of_size(dn, "secure-regions", sizeof(u64));
-       if (!nr_elem)
+       /* Only proceed if the "secure-regions" property is present in DT */
+       prop = of_find_property(dn, "secure-regions", NULL);
+       if (!prop)
                return 0;
 
+       nr_elem = of_property_count_elems_of_size(dn, "secure-regions", sizeof(u64));
+       if (nr_elem <= 0)
+               return nr_elem;
+
        chip->nr_secure_regions = nr_elem / 2;
        chip->secure_regions = kcalloc(chip->nr_secure_regions, sizeof(*chip->secure_regions),
                                       GFP_KERNEL);
index a7ee0af1af904a9ef4f0fe8ba4a596e48cc6c4b9..54e321a695ce9230dc42f1ec2d2ef2b0f2e50ce6 100644 (file)
@@ -71,12 +71,18 @@ static int bareudp_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
                family = AF_INET6;
 
        if (bareudp->ethertype == htons(ETH_P_IP)) {
-               struct iphdr *iphdr;
+               __u8 ipversion;
 
-               iphdr = (struct iphdr *)(skb->data + BAREUDP_BASE_HLEN);
-               if (iphdr->version == 4) {
-                       proto = bareudp->ethertype;
-               } else if (bareudp->multi_proto_mode && (iphdr->version == 6)) {
+               if (skb_copy_bits(skb, BAREUDP_BASE_HLEN, &ipversion,
+                                 sizeof(ipversion))) {
+                       bareudp->dev->stats.rx_dropped++;
+                       goto drop;
+               }
+               ipversion >>= 4;
+
+               if (ipversion == 4) {
+                       proto = htons(ETH_P_IP);
+               } else if (ipversion == 6 && bareudp->multi_proto_mode) {
                        proto = htons(ETH_P_IPV6);
                } else {
                        bareudp->dev->stats.rx_dropped++;
index bba2a449ac70d8634e2fb91c2533bcaec3ab9bd0..43bca315a66c68aab837913d520ddc10cb8757f2 100644 (file)
@@ -1164,10 +1164,10 @@ static int m_can_set_bittiming(struct net_device *dev)
                                    FIELD_PREP(TDCR_TDCO_MASK, tdco));
                }
 
-               reg_btp = FIELD_PREP(NBTP_NBRP_MASK, brp) |
-                         FIELD_PREP(NBTP_NSJW_MASK, sjw) |
-                         FIELD_PREP(NBTP_NTSEG1_MASK, tseg1) |
-                         FIELD_PREP(NBTP_NTSEG2_MASK, tseg2);
+               reg_btp |= FIELD_PREP(DBTP_DBRP_MASK, brp) |
+                       FIELD_PREP(DBTP_DSJW_MASK, sjw) |
+                       FIELD_PREP(DBTP_DTSEG1_MASK, tseg1) |
+                       FIELD_PREP(DBTP_DTSEG2_MASK, tseg2);
 
                m_can_write(cdev, M_CAN_DBTP, reg_btp);
        }
index 66fa8b07c2e6f66e7c4555c780d081a4cd844016..95ae740fc31100fa59f1f8f4c9df125130307e9e 100644 (file)
@@ -224,8 +224,8 @@ static void esd_usb2_rx_event(struct esd_usb2_net_priv *priv,
        if (id == ESD_EV_CAN_ERROR_EXT) {
                u8 state = msg->msg.rx.data[0];
                u8 ecc = msg->msg.rx.data[1];
-               u8 txerr = msg->msg.rx.data[2];
-               u8 rxerr = msg->msg.rx.data[3];
+               u8 rxerr = msg->msg.rx.data[2];
+               u8 txerr = msg->msg.rx.data[3];
 
                skb = alloc_can_err_skb(priv->netdev, &cf);
                if (skb == NULL) {
index 9fdcc4bde4809b68aa6a7f3992735a9e86ce2bdd..7062db6a083c2ba616ba44e723ba5858af33c93a 100644 (file)
@@ -912,6 +912,7 @@ static int hellcreek_fdb_dump(struct dsa_switch *ds, int port,
 {
        struct hellcreek *hellcreek = ds->priv;
        u16 entries;
+       int ret = 0;
        size_t i;
 
        mutex_lock(&hellcreek->reg_lock);
@@ -943,12 +944,14 @@ static int hellcreek_fdb_dump(struct dsa_switch *ds, int port,
                if (!(entry.portmask & BIT(port)))
                        continue;
 
-               cb(entry.mac, 0, entry.is_static, data);
+               ret = cb(entry.mac, 0, entry.is_static, data);
+               if (ret)
+                       break;
        }
 
        mutex_unlock(&hellcreek->reg_lock);
 
-       return 0;
+       return ret;
 }
 
 static int hellcreek_vlan_filtering(struct dsa_switch *ds, int port,
@@ -1469,9 +1472,6 @@ static void hellcreek_setup_gcl(struct hellcreek *hellcreek, int port,
                u16 data;
                u8 gates;
 
-               cur++;
-               next++;
-
                if (i == schedule->num_entries)
                        gates = initial->gate_mask ^
                                cur->gate_mask;
@@ -1500,6 +1500,9 @@ static void hellcreek_setup_gcl(struct hellcreek *hellcreek, int port,
                        (initial->gate_mask <<
                         TR_GCLCMD_INIT_GATE_STATES_SHIFT);
                hellcreek_write(hellcreek, data, TR_GCLCMD);
+
+               cur++;
+               next++;
        }
 }
 
@@ -1547,7 +1550,7 @@ static bool hellcreek_schedule_startable(struct hellcreek *hellcreek, int port)
        /* Calculate difference to admin base time */
        base_time_ns = ktime_to_ns(hellcreek_port->current_schedule->base_time);
 
-       return base_time_ns - current_ns < (s64)8 * NSEC_PER_SEC;
+       return base_time_ns - current_ns < (s64)4 * NSEC_PER_SEC;
 }
 
 static void hellcreek_start_schedule(struct hellcreek *hellcreek, int port)
index 34437402542611db4cdf94e29e04599d99236b8a..d7ce281570b5429b2369b03e8cf2da380422aa7f 100644 (file)
@@ -557,12 +557,12 @@ static int lan9303_alr_make_entry_raw(struct lan9303 *chip, u32 dat0, u32 dat1)
        return 0;
 }
 
-typedef void alr_loop_cb_t(struct lan9303 *chip, u32 dat0, u32 dat1,
-                          int portmap, void *ctx);
+typedef int alr_loop_cb_t(struct lan9303 *chip, u32 dat0, u32 dat1,
+                         int portmap, void *ctx);
 
-static void lan9303_alr_loop(struct lan9303 *chip, alr_loop_cb_t *cb, void *ctx)
+static int lan9303_alr_loop(struct lan9303 *chip, alr_loop_cb_t *cb, void *ctx)
 {
-       int i;
+       int ret = 0, i;
 
        mutex_lock(&chip->alr_mutex);
        lan9303_write_switch_reg(chip, LAN9303_SWE_ALR_CMD,
@@ -582,13 +582,17 @@ static void lan9303_alr_loop(struct lan9303 *chip, alr_loop_cb_t *cb, void *ctx)
                                                LAN9303_ALR_DAT1_PORT_BITOFFS;
                portmap = alrport_2_portmap[alrport];
 
-               cb(chip, dat0, dat1, portmap, ctx);
+               ret = cb(chip, dat0, dat1, portmap, ctx);
+               if (ret)
+                       break;
 
                lan9303_write_switch_reg(chip, LAN9303_SWE_ALR_CMD,
                                         LAN9303_ALR_CMD_GET_NEXT);
                lan9303_write_switch_reg(chip, LAN9303_SWE_ALR_CMD, 0);
        }
        mutex_unlock(&chip->alr_mutex);
+
+       return ret;
 }
 
 static void alr_reg_to_mac(u32 dat0, u32 dat1, u8 mac[6])
@@ -606,18 +610,20 @@ struct del_port_learned_ctx {
 };
 
 /* Clear learned (non-static) entry on given port */
-static void alr_loop_cb_del_port_learned(struct lan9303 *chip, u32 dat0,
-                                        u32 dat1, int portmap, void *ctx)
+static int alr_loop_cb_del_port_learned(struct lan9303 *chip, u32 dat0,
+                                       u32 dat1, int portmap, void *ctx)
 {
        struct del_port_learned_ctx *del_ctx = ctx;
        int port = del_ctx->port;
 
        if (((BIT(port) & portmap) == 0) || (dat1 & LAN9303_ALR_DAT1_STATIC))
-               return;
+               return 0;
 
        /* learned entries has only one port, we can just delete */
        dat1 &= ~LAN9303_ALR_DAT1_VALID; /* delete entry */
        lan9303_alr_make_entry_raw(chip, dat0, dat1);
+
+       return 0;
 }
 
 struct port_fdb_dump_ctx {
@@ -626,19 +632,19 @@ struct port_fdb_dump_ctx {
        dsa_fdb_dump_cb_t *cb;
 };
 
-static void alr_loop_cb_fdb_port_dump(struct lan9303 *chip, u32 dat0,
-                                     u32 dat1, int portmap, void *ctx)
+static int alr_loop_cb_fdb_port_dump(struct lan9303 *chip, u32 dat0,
+                                    u32 dat1, int portmap, void *ctx)
 {
        struct port_fdb_dump_ctx *dump_ctx = ctx;
        u8 mac[ETH_ALEN];
        bool is_static;
 
        if ((BIT(dump_ctx->port) & portmap) == 0)
-               return;
+               return 0;
 
        alr_reg_to_mac(dat0, dat1, mac);
        is_static = !!(dat1 & LAN9303_ALR_DAT1_STATIC);
-       dump_ctx->cb(mac, 0, is_static, dump_ctx->data);
+       return dump_ctx->cb(mac, 0, is_static, dump_ctx->data);
 }
 
 /* Set a static ALR entry. Delete entry if port_map is zero */
@@ -1210,9 +1216,7 @@ static int lan9303_port_fdb_dump(struct dsa_switch *ds, int port,
        };
 
        dev_dbg(chip->dev, "%s(%d)\n", __func__, port);
-       lan9303_alr_loop(chip, alr_loop_cb_fdb_port_dump, &dump_ctx);
-
-       return 0;
+       return lan9303_alr_loop(chip, alr_loop_cb_fdb_port_dump, &dump_ctx);
 }
 
 static int lan9303_port_mdb_prepare(struct dsa_switch *ds, int port,
index 314ae78bbdd63d46ff9f016e6102432aa48af66e..e78026ef6d8cc14215f3aa25a110eb0dcb414f04 100644 (file)
@@ -1404,11 +1404,17 @@ static int gswip_port_fdb_dump(struct dsa_switch *ds, int port,
                addr[1] = mac_bridge.key[2] & 0xff;
                addr[0] = (mac_bridge.key[2] >> 8) & 0xff;
                if (mac_bridge.val[1] & GSWIP_TABLE_MAC_BRIDGE_STATIC) {
-                       if (mac_bridge.val[0] & BIT(port))
-                               cb(addr, 0, true, data);
+                       if (mac_bridge.val[0] & BIT(port)) {
+                               err = cb(addr, 0, true, data);
+                               if (err)
+                                       return err;
+                       }
                } else {
-                       if (((mac_bridge.val[0] & GENMASK(7, 4)) >> 4) == port)
-                               cb(addr, 0, false, data);
+                       if (((mac_bridge.val[0] & GENMASK(7, 4)) >> 4) == port) {
+                               err = cb(addr, 0, false, data);
+                               if (err)
+                                       return err;
+                       }
                }
        }
        return 0;
index 560f6843bb65cd68cf1111c207e488e15290bda7..c5142f86a3c755577ec75729d46464a9d9ffac48 100644 (file)
@@ -687,8 +687,8 @@ static void ksz8_r_vlan_entries(struct ksz_device *dev, u16 addr)
        shifts = ksz8->shifts;
 
        ksz8_r_table(dev, TABLE_VLAN, addr, &data);
-       addr *= dev->phy_port_cnt;
-       for (i = 0; i < dev->phy_port_cnt; i++) {
+       addr *= 4;
+       for (i = 0; i < 4; i++) {
                dev->vlan_cache[addr + i].table[0] = (u16)data;
                data >>= shifts[VLAN_TABLE];
        }
@@ -702,7 +702,7 @@ static void ksz8_r_vlan_table(struct ksz_device *dev, u16 vid, u16 *vlan)
        u64 buf;
 
        data = (u16 *)&buf;
-       addr = vid / dev->phy_port_cnt;
+       addr = vid / 4;
        index = vid & 3;
        ksz8_r_table(dev, TABLE_VLAN, addr, &buf);
        *vlan = data[index];
@@ -716,7 +716,7 @@ static void ksz8_w_vlan_table(struct ksz_device *dev, u16 vid, u16 vlan)
        u64 buf;
 
        data = (u16 *)&buf;
-       addr = vid / dev->phy_port_cnt;
+       addr = vid / 4;
        index = vid & 3;
        ksz8_r_table(dev, TABLE_VLAN, addr, &buf);
        data[index] = vlan;
@@ -1119,24 +1119,67 @@ static int ksz8_port_vlan_filtering(struct dsa_switch *ds, int port, bool flag,
        if (ksz_is_ksz88x3(dev))
                return -ENOTSUPP;
 
+       /* Discard packets with VID not enabled on the switch */
        ksz_cfg(dev, S_MIRROR_CTRL, SW_VLAN_ENABLE, flag);
 
+       /* Discard packets with VID not enabled on the ingress port */
+       for (port = 0; port < dev->phy_port_cnt; ++port)
+               ksz_port_cfg(dev, port, REG_PORT_CTRL_2, PORT_INGRESS_FILTER,
+                            flag);
+
        return 0;
 }
 
+static void ksz8_port_enable_pvid(struct ksz_device *dev, int port, bool state)
+{
+       if (ksz_is_ksz88x3(dev)) {
+               ksz_cfg(dev, REG_SW_INSERT_SRC_PVID,
+                       0x03 << (4 - 2 * port), state);
+       } else {
+               ksz_pwrite8(dev, port, REG_PORT_CTRL_12, state ? 0x0f : 0x00);
+       }
+}
+
 static int ksz8_port_vlan_add(struct dsa_switch *ds, int port,
                              const struct switchdev_obj_port_vlan *vlan,
                              struct netlink_ext_ack *extack)
 {
        bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
        struct ksz_device *dev = ds->priv;
+       struct ksz_port *p = &dev->ports[port];
        u16 data, new_pvid = 0;
        u8 fid, member, valid;
 
        if (ksz_is_ksz88x3(dev))
                return -ENOTSUPP;
 
-       ksz_port_cfg(dev, port, P_TAG_CTRL, PORT_REMOVE_TAG, untagged);
+       /* If a VLAN is added with untagged flag different from the
+        * port's Remove Tag flag, we need to change the latter.
+        * Ignore VID 0, which is always untagged.
+        * Ignore CPU port, which will always be tagged.
+        */
+       if (untagged != p->remove_tag && vlan->vid != 0 &&
+           port != dev->cpu_port) {
+               unsigned int vid;
+
+               /* Reject attempts to add a VLAN that requires the
+                * Remove Tag flag to be changed, unless there are no
+                * other VLANs currently configured.
+                */
+               for (vid = 1; vid < dev->num_vlans; ++vid) {
+                       /* Skip the VID we are going to add or reconfigure */
+                       if (vid == vlan->vid)
+                               continue;
+
+                       ksz8_from_vlan(dev, dev->vlan_cache[vid].table[0],
+                                      &fid, &member, &valid);
+                       if (valid && (member & BIT(port)))
+                               return -EINVAL;
+               }
+
+               ksz_port_cfg(dev, port, P_TAG_CTRL, PORT_REMOVE_TAG, untagged);
+               p->remove_tag = untagged;
+       }
 
        ksz8_r_vlan_table(dev, vlan->vid, &data);
        ksz8_from_vlan(dev, data, &fid, &member, &valid);
@@ -1160,9 +1203,11 @@ static int ksz8_port_vlan_add(struct dsa_switch *ds, int port,
                u16 vid;
 
                ksz_pread16(dev, port, REG_PORT_CTRL_VID, &vid);
-               vid &= 0xfff;
+               vid &= ~VLAN_VID_MASK;
                vid |= new_pvid;
                ksz_pwrite16(dev, port, REG_PORT_CTRL_VID, vid);
+
+               ksz8_port_enable_pvid(dev, port, true);
        }
 
        return 0;
@@ -1171,9 +1216,8 @@ static int ksz8_port_vlan_add(struct dsa_switch *ds, int port,
 static int ksz8_port_vlan_del(struct dsa_switch *ds, int port,
                              const struct switchdev_obj_port_vlan *vlan)
 {
-       bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
        struct ksz_device *dev = ds->priv;
-       u16 data, pvid, new_pvid = 0;
+       u16 data, pvid;
        u8 fid, member, valid;
 
        if (ksz_is_ksz88x3(dev))
@@ -1182,8 +1226,6 @@ static int ksz8_port_vlan_del(struct dsa_switch *ds, int port,
        ksz_pread16(dev, port, REG_PORT_CTRL_VID, &pvid);
        pvid = pvid & 0xFFF;
 
-       ksz_port_cfg(dev, port, P_TAG_CTRL, PORT_REMOVE_TAG, untagged);
-
        ksz8_r_vlan_table(dev, vlan->vid, &data);
        ksz8_from_vlan(dev, data, &fid, &member, &valid);
 
@@ -1195,14 +1237,11 @@ static int ksz8_port_vlan_del(struct dsa_switch *ds, int port,
                valid = 0;
        }
 
-       if (pvid == vlan->vid)
-               new_pvid = 1;
-
        ksz8_to_vlan(dev, fid, member, valid, &data);
        ksz8_w_vlan_table(dev, vlan->vid, data);
 
-       if (new_pvid != pvid)
-               ksz_pwrite16(dev, port, REG_PORT_CTRL_VID, pvid);
+       if (pvid == vlan->vid)
+               ksz8_port_enable_pvid(dev, port, false);
 
        return 0;
 }
@@ -1435,6 +1474,9 @@ static int ksz8_setup(struct dsa_switch *ds)
 
        ksz_cfg(dev, S_MIRROR_CTRL, SW_MIRROR_RX_TX, false);
 
+       if (!ksz_is_ksz88x3(dev))
+               ksz_cfg(dev, REG_SW_CTRL_19, SW_INS_TAG_ENABLE, true);
+
        /* set broadcast storm protection 10% rate */
        regmap_update_bits(dev->regmap[1], S_REPLACE_VID_CTRL,
                           BROADCAST_STORM_RATE,
@@ -1717,6 +1759,16 @@ static int ksz8_switch_init(struct ksz_device *dev)
        /* set the real number of ports */
        dev->ds->num_ports = dev->port_cnt;
 
+       /* We rely on software untagging on the CPU port, so that we
+        * can support both tagged and untagged VLANs
+        */
+       dev->ds->untag_bridge_pvid = true;
+
+       /* VLAN filtering is partly controlled by the global VLAN
+        * Enable flag
+        */
+       dev->ds->vlan_filtering_is_global = true;
+
        return 0;
 }
 
index a32355624f31f77b7c4e20f8f50c047368295ded..6b40bc25f7ff5e46ca03a2cb1cc466fd52a4f5aa 100644 (file)
 #define REG_PORT_4_OUT_RATE_3          0xEE
 #define REG_PORT_5_OUT_RATE_3          0xFE
 
+/* 88x3 specific */
+
+#define REG_SW_INSERT_SRC_PVID         0xC2
+
 /* PME */
 
 #define SW_PME_OUTPUT_ENABLE           BIT(1)
index 2e6bfd333f504c9a1dd2490a5d41440175d70bba..1597c63988b4e430bcfbb1002fd3644c9328730c 100644 (file)
@@ -27,6 +27,7 @@ struct ksz_port_mib {
 struct ksz_port {
        u16 member;
        u16 vid_member;
+       bool remove_tag;                /* Remove Tag flag set, for ksz8795 only */
        int stp_state;
        struct phy_device phydev;
 
@@ -205,12 +206,8 @@ static inline int ksz_read64(struct ksz_device *dev, u32 reg, u64 *val)
        int ret;
 
        ret = regmap_bulk_read(dev->regmap[2], reg, value, 2);
-       if (!ret) {
-               /* Ick! ToDo: Add 64bit R/W to regmap on 32bit systems */
-               value[0] = swab32(value[0]);
-               value[1] = swab32(value[1]);
-               *val = swab64((u64)*value);
-       }
+       if (!ret)
+               *val = (u64)value[0] << 32 | value[1];
 
        return ret;
 }
index 69f21b71614c5bec59ca6189f620e4ec9ab08007..632f0fcc5aa736155ffbce4c8123d5f28a17b37b 100644 (file)
@@ -47,6 +47,7 @@ static const struct mt7530_mib_desc mt7530_mib[] = {
        MIB_DESC(2, 0x48, "TxBytes"),
        MIB_DESC(1, 0x60, "RxDrop"),
        MIB_DESC(1, 0x64, "RxFiltering"),
+       MIB_DESC(1, 0x68, "RxUnicast"),
        MIB_DESC(1, 0x6c, "RxMulticast"),
        MIB_DESC(1, 0x70, "RxBroadcast"),
        MIB_DESC(1, 0x74, "RxAlignErr"),
index b1d46dd8eaabcdf3c2d8cd294a30d5ad725e2998..6ea003678798651f70df5f8235863f7c79973c85 100644 (file)
@@ -1277,15 +1277,16 @@ static int mv88e6393x_serdes_port_errata(struct mv88e6xxx_chip *chip, int lane)
        int err;
 
        /* mv88e6393x family errata 4.6:
-        * Cannot clear PwrDn bit on SERDES on port 0 if device is configured
-        * CPU_MGD mode or P0_mode is configured for [x]MII.
-        * Workaround: Set Port0 SERDES register 4.F002 bit 5=0 and bit 15=1.
+        * Cannot clear PwrDn bit on SERDES if device is configured CPU_MGD
+        * mode or P0_mode is configured for [x]MII.
+        * Workaround: Set SERDES register 4.F002 bit 5=0 and bit 15=1.
         *
         * It seems that after this workaround the SERDES is automatically
         * powered up (the bit is cleared), so power it down.
         */
-       if (lane == MV88E6393X_PORT0_LANE) {
-               err = mv88e6390_serdes_read(chip, MV88E6393X_PORT0_LANE,
+       if (lane == MV88E6393X_PORT0_LANE || lane == MV88E6393X_PORT9_LANE ||
+           lane == MV88E6393X_PORT10_LANE) {
+               err = mv88e6390_serdes_read(chip, lane,
                                            MDIO_MMD_PHYXS,
                                            MV88E6393X_SERDES_POC, &reg);
                if (err)
index 6686192e1883ea7336a318a18ae0a08c693c5b73..563d8a27903060ae86e2c39ede2b8cf522bd6014 100644 (file)
         AR9331_SW_PORT_STATUS_RX_FLOW_EN | AR9331_SW_PORT_STATUS_TX_FLOW_EN | \
         AR9331_SW_PORT_STATUS_SPEED_M)
 
+#define AR9331_SW_REG_PORT_CTRL(_port)                 (0x104 + (_port) * 0x100)
+#define AR9331_SW_PORT_CTRL_HEAD_EN                    BIT(11)
+#define AR9331_SW_PORT_CTRL_PORT_STATE                 GENMASK(2, 0)
+#define AR9331_SW_PORT_CTRL_PORT_STATE_DISABLED                0
+#define AR9331_SW_PORT_CTRL_PORT_STATE_BLOCKING                1
+#define AR9331_SW_PORT_CTRL_PORT_STATE_LISTENING       2
+#define AR9331_SW_PORT_CTRL_PORT_STATE_LEARNING                3
+#define AR9331_SW_PORT_CTRL_PORT_STATE_FORWARD         4
+
+#define AR9331_SW_REG_PORT_VLAN(_port)                 (0x108 + (_port) * 0x100)
+#define AR9331_SW_PORT_VLAN_8021Q_MODE                 GENMASK(31, 30)
+#define AR9331_SW_8021Q_MODE_SECURE                    3
+#define AR9331_SW_8021Q_MODE_CHECK                     2
+#define AR9331_SW_8021Q_MODE_FALLBACK                  1
+#define AR9331_SW_8021Q_MODE_NONE                      0
+#define AR9331_SW_PORT_VLAN_PORT_VID_MEMBER            GENMASK(25, 16)
+
 /* MIB registers */
 #define AR9331_MIB_COUNTER(x)                  (0x20000 + ((x) * 0x100))
 
@@ -371,12 +388,60 @@ static int ar9331_sw_mbus_init(struct ar9331_sw_priv *priv)
        return 0;
 }
 
-static int ar9331_sw_setup(struct dsa_switch *ds)
+static int ar9331_sw_setup_port(struct dsa_switch *ds, int port)
 {
        struct ar9331_sw_priv *priv = (struct ar9331_sw_priv *)ds->priv;
        struct regmap *regmap = priv->regmap;
+       u32 port_mask, port_ctrl, val;
        int ret;
 
+       /* Generate default port settings */
+       port_ctrl = FIELD_PREP(AR9331_SW_PORT_CTRL_PORT_STATE,
+                              AR9331_SW_PORT_CTRL_PORT_STATE_FORWARD);
+
+       if (dsa_is_cpu_port(ds, port)) {
+               /* CPU port should be allowed to communicate with all user
+                * ports.
+                */
+               port_mask = dsa_user_ports(ds);
+               /* Enable Atheros header on CPU port. This will allow us
+                * communicate with each port separately
+                */
+               port_ctrl |= AR9331_SW_PORT_CTRL_HEAD_EN;
+       } else if (dsa_is_user_port(ds, port)) {
+               /* User ports should communicate only with the CPU port.
+                */
+               port_mask = BIT(dsa_upstream_port(ds, port));
+       } else {
+               /* Other ports do not need to communicate at all */
+               port_mask = 0;
+       }
+
+       val = FIELD_PREP(AR9331_SW_PORT_VLAN_8021Q_MODE,
+                        AR9331_SW_8021Q_MODE_NONE) |
+               FIELD_PREP(AR9331_SW_PORT_VLAN_PORT_VID_MEMBER, port_mask);
+
+       ret = regmap_write(regmap, AR9331_SW_REG_PORT_VLAN(port), val);
+       if (ret)
+               goto error;
+
+       ret = regmap_write(regmap, AR9331_SW_REG_PORT_CTRL(port), port_ctrl);
+       if (ret)
+               goto error;
+
+       return 0;
+error:
+       dev_err(priv->dev, "%s: error: %i\n", __func__, ret);
+
+       return ret;
+}
+
+static int ar9331_sw_setup(struct dsa_switch *ds)
+{
+       struct ar9331_sw_priv *priv = (struct ar9331_sw_priv *)ds->priv;
+       struct regmap *regmap = priv->regmap;
+       int ret, i;
+
        ret = ar9331_sw_reset(priv);
        if (ret)
                return ret;
@@ -402,6 +467,12 @@ static int ar9331_sw_setup(struct dsa_switch *ds)
        if (ret)
                goto error;
 
+       for (i = 0; i < ds->num_ports; i++) {
+               ret = ar9331_sw_setup_port(ds, i);
+               if (ret)
+                       goto error;
+       }
+
        ds->configure_vlan_while_not_filtering = false;
 
        return 0;
index 8667c9754330a1925f05ff5ba61d90ffc4027c21..49eb0ac41b7d93d187f95cf96cf18aeb52e7249d 100644 (file)
@@ -1635,7 +1635,9 @@ static int sja1105_fdb_dump(struct dsa_switch *ds, int port,
                /* We need to hide the dsa_8021q VLANs from the user. */
                if (priv->vlan_state == SJA1105_VLAN_UNAWARE)
                        l2_lookup.vlanid = 0;
-               cb(macaddr, l2_lookup.vlanid, l2_lookup.lockeds, data);
+               rc = cb(macaddr, l2_lookup.vlanid, l2_lookup.lockeds, data);
+               if (rc)
+                       return rc;
        }
        return 0;
 }
@@ -3185,6 +3187,7 @@ static void sja1105_teardown(struct dsa_switch *ds)
        }
 
        sja1105_devlink_teardown(ds);
+       sja1105_mdiobus_unregister(ds);
        sja1105_flower_teardown(ds);
        sja1105_tas_teardown(ds);
        sja1105_ptp_clock_unregister(ds);
index 19aea8fb76f6a8bdba33120014ce28845edfa55a..705d3900e43a3bdd9bc2c14da5db5be1f5b6fdad 100644 (file)
@@ -284,8 +284,7 @@ static int sja1105_mdiobus_base_tx_register(struct sja1105_private *priv,
        struct mii_bus *bus;
        int rc = 0;
 
-       np = of_find_compatible_node(mdio_node, NULL,
-                                    "nxp,sja1110-base-tx-mdio");
+       np = of_get_compatible_child(mdio_node, "nxp,sja1110-base-tx-mdio");
        if (!np)
                return 0;
 
@@ -339,8 +338,7 @@ static int sja1105_mdiobus_base_t1_register(struct sja1105_private *priv,
        struct mii_bus *bus;
        int rc = 0;
 
-       np = of_find_compatible_node(mdio_node, NULL,
-                                    "nxp,sja1110-base-t1-mdio");
+       np = of_get_compatible_child(mdio_node, "nxp,sja1110-base-t1-mdio");
        if (!np)
                return 0;
 
index 860c18fb7aae9e2bb1f8962a95f6338b02b03221..80399c8980bd308fb026404c0bcb3ec5a1b04912 100644 (file)
@@ -677,11 +677,13 @@ static int xge_probe(struct platform_device *pdev)
        ret = register_netdev(ndev);
        if (ret) {
                netdev_err(ndev, "Failed to register netdev\n");
-               goto err;
+               goto err_mdio_remove;
        }
 
        return 0;
 
+err_mdio_remove:
+       xge_mdio_remove(ndev);
 err:
        free_netdev(ndev);
 
index 89606587b1566901d44432e4a93282de44c7fc83..8a97640cdfe761e3d4be2af5114522733d591408 100644 (file)
@@ -72,7 +72,8 @@
 #include "bnxt_debugfs.h"
 
 #define BNXT_TX_TIMEOUT                (5 * HZ)
-#define BNXT_DEF_MSG_ENABLE    (NETIF_MSG_DRV | NETIF_MSG_HW)
+#define BNXT_DEF_MSG_ENABLE    (NETIF_MSG_DRV | NETIF_MSG_HW | \
+                                NETIF_MSG_TX_ERR)
 
 MODULE_LICENSE("GPL");
 MODULE_DESCRIPTION("Broadcom BCM573xx network driver");
@@ -365,6 +366,33 @@ static u16 bnxt_xmit_get_cfa_action(struct sk_buff *skb)
        return md_dst->u.port_info.port_id;
 }
 
+static void bnxt_txr_db_kick(struct bnxt *bp, struct bnxt_tx_ring_info *txr,
+                            u16 prod)
+{
+       bnxt_db_write(bp, &txr->tx_db, prod);
+       txr->kick_pending = 0;
+}
+
+static bool bnxt_txr_netif_try_stop_queue(struct bnxt *bp,
+                                         struct bnxt_tx_ring_info *txr,
+                                         struct netdev_queue *txq)
+{
+       netif_tx_stop_queue(txq);
+
+       /* netif_tx_stop_queue() must be done before checking
+        * tx index in bnxt_tx_avail() below, because in
+        * bnxt_tx_int(), we update tx index before checking for
+        * netif_tx_queue_stopped().
+        */
+       smp_mb();
+       if (bnxt_tx_avail(bp, txr) > bp->tx_wake_thresh) {
+               netif_tx_wake_queue(txq);
+               return false;
+       }
+
+       return true;
+}
+
 static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev)
 {
        struct bnxt *bp = netdev_priv(dev);
@@ -384,6 +412,7 @@ static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev)
        i = skb_get_queue_mapping(skb);
        if (unlikely(i >= bp->tx_nr_rings)) {
                dev_kfree_skb_any(skb);
+               atomic_long_inc(&dev->tx_dropped);
                return NETDEV_TX_OK;
        }
 
@@ -393,8 +422,12 @@ static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev)
 
        free_size = bnxt_tx_avail(bp, txr);
        if (unlikely(free_size < skb_shinfo(skb)->nr_frags + 2)) {
-               netif_tx_stop_queue(txq);
-               return NETDEV_TX_BUSY;
+               /* We must have raced with NAPI cleanup */
+               if (net_ratelimit() && txr->kick_pending)
+                       netif_warn(bp, tx_err, dev,
+                                  "bnxt: ring busy w/ flush pending!\n");
+               if (bnxt_txr_netif_try_stop_queue(bp, txr, txq))
+                       return NETDEV_TX_BUSY;
        }
 
        length = skb->len;
@@ -426,7 +459,10 @@ static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev)
 
                if (ptp && ptp->tx_tstamp_en && !skb_is_gso(skb) &&
                    atomic_dec_if_positive(&ptp->tx_avail) >= 0) {
-                       if (!bnxt_ptp_parse(skb, &ptp->tx_seqid)) {
+                       if (!bnxt_ptp_parse(skb, &ptp->tx_seqid,
+                                           &ptp->tx_hdr_off)) {
+                               if (vlan_tag_flags)
+                                       ptp->tx_hdr_off += VLAN_HLEN;
                                lflags |= cpu_to_le32(TX_BD_FLAGS_STAMP);
                                skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
                        } else {
@@ -514,21 +550,16 @@ static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev)
 normal_tx:
        if (length < BNXT_MIN_PKT_SIZE) {
                pad = BNXT_MIN_PKT_SIZE - length;
-               if (skb_pad(skb, pad)) {
+               if (skb_pad(skb, pad))
                        /* SKB already freed. */
-                       tx_buf->skb = NULL;
-                       return NETDEV_TX_OK;
-               }
+                       goto tx_kick_pending;
                length = BNXT_MIN_PKT_SIZE;
        }
 
        mapping = dma_map_single(&pdev->dev, skb->data, len, DMA_TO_DEVICE);
 
-       if (unlikely(dma_mapping_error(&pdev->dev, mapping))) {
-               dev_kfree_skb_any(skb);
-               tx_buf->skb = NULL;
-               return NETDEV_TX_OK;
-       }
+       if (unlikely(dma_mapping_error(&pdev->dev, mapping)))
+               goto tx_free;
 
        dma_unmap_addr_set(tx_buf, mapping, mapping);
        flags = (len << TX_BD_LEN_SHIFT) | TX_BD_TYPE_LONG_TX_BD |
@@ -615,24 +646,17 @@ normal_tx:
        txr->tx_prod = prod;
 
        if (!netdev_xmit_more() || netif_xmit_stopped(txq))
-               bnxt_db_write(bp, &txr->tx_db, prod);
+               bnxt_txr_db_kick(bp, txr, prod);
+       else
+               txr->kick_pending = 1;
 
 tx_done:
 
        if (unlikely(bnxt_tx_avail(bp, txr) <= MAX_SKB_FRAGS + 1)) {
                if (netdev_xmit_more() && !tx_buf->is_push)
-                       bnxt_db_write(bp, &txr->tx_db, prod);
-
-               netif_tx_stop_queue(txq);
+                       bnxt_txr_db_kick(bp, txr, prod);
 
-               /* netif_tx_stop_queue() must be done before checking
-                * tx index in bnxt_tx_avail() below, because in
-                * bnxt_tx_int(), we update tx index before checking for
-                * netif_tx_queue_stopped().
-                */
-               smp_mb();
-               if (bnxt_tx_avail(bp, txr) > bp->tx_wake_thresh)
-                       netif_tx_wake_queue(txq);
+               bnxt_txr_netif_try_stop_queue(bp, txr, txq);
        }
        return NETDEV_TX_OK;
 
@@ -645,7 +669,6 @@ tx_dma_error:
        /* start back at beginning and unmap skb */
        prod = txr->tx_prod;
        tx_buf = &txr->tx_buf_ring[prod];
-       tx_buf->skb = NULL;
        dma_unmap_single(&pdev->dev, dma_unmap_addr(tx_buf, mapping),
                         skb_headlen(skb), PCI_DMA_TODEVICE);
        prod = NEXT_TX(prod);
@@ -659,7 +682,13 @@ tx_dma_error:
                               PCI_DMA_TODEVICE);
        }
 
+tx_free:
        dev_kfree_skb_any(skb);
+tx_kick_pending:
+       if (txr->kick_pending)
+               bnxt_txr_db_kick(bp, txr, txr->tx_prod);
+       txr->tx_buf_ring[txr->tx_prod].skb = NULL;
+       atomic_long_inc(&dev->tx_dropped);
        return NETDEV_TX_OK;
 }
 
@@ -729,14 +758,9 @@ next_tx_int:
        smp_mb();
 
        if (unlikely(netif_tx_queue_stopped(txq)) &&
-           (bnxt_tx_avail(bp, txr) > bp->tx_wake_thresh)) {
-               __netif_tx_lock(txq, smp_processor_id());
-               if (netif_tx_queue_stopped(txq) &&
-                   bnxt_tx_avail(bp, txr) > bp->tx_wake_thresh &&
-                   txr->dev_state != BNXT_DEV_STATE_CLOSING)
-                       netif_tx_wake_queue(txq);
-               __netif_tx_unlock(txq);
-       }
+           bnxt_tx_avail(bp, txr) > bp->tx_wake_thresh &&
+           READ_ONCE(txr->dev_state) != BNXT_DEV_STATE_CLOSING)
+               netif_tx_wake_queue(txq);
 }
 
 static struct page *__bnxt_alloc_rx_page(struct bnxt *bp, dma_addr_t *mapping,
@@ -1764,6 +1788,10 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
        if (!RX_CMP_VALID(rxcmp1, tmp_raw_cons))
                return -EBUSY;
 
+       /* The valid test of the entry must be done first before
+        * reading any further.
+        */
+       dma_rmb();
        prod = rxr->rx_prod;
 
        if (cmp_type == CMP_TYPE_RX_L2_TPA_START_CMP) {
@@ -1986,6 +2014,10 @@ static int bnxt_force_rx_discard(struct bnxt *bp,
        if (!RX_CMP_VALID(rxcmp1, tmp_raw_cons))
                return -EBUSY;
 
+       /* The valid test of the entry must be done first before
+        * reading any further.
+        */
+       dma_rmb();
        cmp_type = RX_CMP_TYPE(rxcmp);
        if (cmp_type == CMP_TYPE_RX_L2_CMP) {
                rxcmp1->rx_cmp_cfa_code_errors_v2 |=
@@ -2451,6 +2483,10 @@ static int bnxt_poll_nitroa0(struct napi_struct *napi, int budget)
                if (!TX_CMP_VALID(txcmp, raw_cons))
                        break;
 
+               /* The valid test of the entry must be done first before
+                * reading any further.
+                */
+               dma_rmb();
                if ((TX_CMP_TYPE(txcmp) & 0x30) == 0x10) {
                        tmp_raw_cons = NEXT_RAW_CMP(raw_cons);
                        cp_cons = RING_CMP(tmp_raw_cons);
@@ -9125,10 +9161,9 @@ static void bnxt_disable_napi(struct bnxt *bp)
        for (i = 0; i < bp->cp_nr_rings; i++) {
                struct bnxt_cp_ring_info *cpr = &bp->bnapi[i]->cp_ring;
 
+               napi_disable(&bp->bnapi[i]->napi);
                if (bp->bnapi[i]->rx_ring)
                        cancel_work_sync(&cpr->dim.work);
-
-               napi_disable(&bp->bnapi[i]->napi);
        }
 }
 
@@ -9162,9 +9197,11 @@ void bnxt_tx_disable(struct bnxt *bp)
        if (bp->tx_ring) {
                for (i = 0; i < bp->tx_nr_rings; i++) {
                        txr = &bp->tx_ring[i];
-                       txr->dev_state = BNXT_DEV_STATE_CLOSING;
+                       WRITE_ONCE(txr->dev_state, BNXT_DEV_STATE_CLOSING);
                }
        }
+       /* Make sure napi polls see @dev_state change */
+       synchronize_net();
        /* Drop carrier first to prevent TX timeout */
        netif_carrier_off(bp->dev);
        /* Stop all TX queues */
@@ -9178,8 +9215,10 @@ void bnxt_tx_enable(struct bnxt *bp)
 
        for (i = 0; i < bp->tx_nr_rings; i++) {
                txr = &bp->tx_ring[i];
-               txr->dev_state = 0;
+               WRITE_ONCE(txr->dev_state, 0);
        }
+       /* Make sure napi polls see @dev_state change */
+       synchronize_net();
        netif_tx_wake_all_queues(bp->dev);
        if (bp->link_info.link_up)
                netif_carrier_on(bp->dev);
@@ -10765,6 +10804,9 @@ static bool bnxt_rfs_supported(struct bnxt *bp)
                        return true;
                return false;
        }
+       /* 212 firmware is broken for aRFS */
+       if (BNXT_FW_MAJ(bp) == 212)
+               return false;
        if (BNXT_PF(bp) && !BNXT_CHIP_TYPE_NITRO_A0(bp))
                return true;
        if (bp->flags & BNXT_FLAG_NEW_RSS_CAP)
index bcf8d00b8c80178774dbc9fbf3310a779f7bd458..ba4e0fc38520cd5a292dd266b976ad576a6e1510 100644 (file)
@@ -786,6 +786,7 @@ struct bnxt_tx_ring_info {
        u16                     tx_prod;
        u16                     tx_cons;
        u16                     txq_index;
+       u8                      kick_pending;
        struct bnxt_db_info     tx_db;
 
        struct tx_bd            *tx_desc_ring[MAX_TX_PAGES];
index 3fc6781c5b9849aea676668016aee2043e6198e6..94d07a9f70343b7e4d094f43cd12476e48dc0bb6 100644 (file)
@@ -368,6 +368,7 @@ struct cmd_nums {
        #define HWRM_FUNC_PTP_TS_QUERY                    0x19fUL
        #define HWRM_FUNC_PTP_EXT_CFG                     0x1a0UL
        #define HWRM_FUNC_PTP_EXT_QCFG                    0x1a1UL
+       #define HWRM_FUNC_KEY_CTX_ALLOC                   0x1a2UL
        #define HWRM_SELFTEST_QLIST                       0x200UL
        #define HWRM_SELFTEST_EXEC                        0x201UL
        #define HWRM_SELFTEST_IRQ                         0x202UL
@@ -531,8 +532,8 @@ struct hwrm_err_output {
 #define HWRM_VERSION_MAJOR 1
 #define HWRM_VERSION_MINOR 10
 #define HWRM_VERSION_UPDATE 2
-#define HWRM_VERSION_RSVD 47
-#define HWRM_VERSION_STR "1.10.2.47"
+#define HWRM_VERSION_RSVD 52
+#define HWRM_VERSION_STR "1.10.2.52"
 
 /* hwrm_ver_get_input (size:192b/24B) */
 struct hwrm_ver_get_input {
@@ -585,6 +586,7 @@ struct hwrm_ver_get_output {
        #define VER_GET_RESP_DEV_CAPS_CFG_CFA_ADV_FLOW_MGNT_SUPPORTED              0x1000UL
        #define VER_GET_RESP_DEV_CAPS_CFG_CFA_TFLIB_SUPPORTED                      0x2000UL
        #define VER_GET_RESP_DEV_CAPS_CFG_CFA_TRUFLOW_SUPPORTED                    0x4000UL
+       #define VER_GET_RESP_DEV_CAPS_CFG_SECURE_BOOT_CAPABLE                      0x8000UL
        u8      roce_fw_maj_8b;
        u8      roce_fw_min_8b;
        u8      roce_fw_bld_8b;
@@ -886,7 +888,8 @@ struct hwrm_async_event_cmpl_reset_notify {
        #define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_REASON_CODE_FW_EXCEPTION_FATAL        (0x2UL << 8)
        #define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_REASON_CODE_FW_EXCEPTION_NON_FATAL    (0x3UL << 8)
        #define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_REASON_CODE_FAST_RESET                (0x4UL << 8)
-       #define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_REASON_CODE_LAST                     ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_REASON_CODE_FAST_RESET
+       #define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_REASON_CODE_FW_ACTIVATION             (0x5UL << 8)
+       #define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_REASON_CODE_LAST                     ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_REASON_CODE_FW_ACTIVATION
        #define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_DELAY_IN_100MS_TICKS_MASK           0xffff0000UL
        #define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_DELAY_IN_100MS_TICKS_SFT            16
 };
@@ -1236,13 +1239,14 @@ struct hwrm_async_event_cmpl_error_report_base {
        u8      timestamp_lo;
        __le16  timestamp_hi;
        __le32  event_data1;
-       #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_MASK          0xffUL
-       #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_SFT           0
-       #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_RESERVED        0x0UL
-       #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_PAUSE_STORM     0x1UL
-       #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_INVALID_SIGNAL  0x2UL
-       #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_NVM             0x3UL
-       #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_LAST           ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_NVM
+       #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_MASK                   0xffUL
+       #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_SFT                    0
+       #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_RESERVED                 0x0UL
+       #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_PAUSE_STORM              0x1UL
+       #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_INVALID_SIGNAL           0x2UL
+       #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_NVM                      0x3UL
+       #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_DOORBELL_DROP_THRESHOLD  0x4UL
+       #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_LAST                    ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_DOORBELL_DROP_THRESHOLD
 };
 
 /* hwrm_async_event_cmpl_error_report_pause_storm (size:128b/16B) */
@@ -1446,6 +1450,8 @@ struct hwrm_func_vf_cfg_input {
        #define FUNC_VF_CFG_REQ_ENABLES_NUM_VNICS            0x200UL
        #define FUNC_VF_CFG_REQ_ENABLES_NUM_STAT_CTXS        0x400UL
        #define FUNC_VF_CFG_REQ_ENABLES_NUM_HW_RING_GRPS     0x800UL
+       #define FUNC_VF_CFG_REQ_ENABLES_NUM_TX_KEY_CTXS      0x1000UL
+       #define FUNC_VF_CFG_REQ_ENABLES_NUM_RX_KEY_CTXS      0x2000UL
        __le16  mtu;
        __le16  guest_vlan;
        __le16  async_event_cr;
@@ -1469,7 +1475,8 @@ struct hwrm_func_vf_cfg_input {
        __le16  num_vnics;
        __le16  num_stat_ctxs;
        __le16  num_hw_ring_grps;
-       u8      unused_0[4];
+       __le16  num_tx_key_ctxs;
+       __le16  num_rx_key_ctxs;
 };
 
 /* hwrm_func_vf_cfg_output (size:128b/16B) */
@@ -1493,7 +1500,7 @@ struct hwrm_func_qcaps_input {
        u8      unused_0[6];
 };
 
-/* hwrm_func_qcaps_output (size:704b/88B) */
+/* hwrm_func_qcaps_output (size:768b/96B) */
 struct hwrm_func_qcaps_output {
        __le16  error_code;
        __le16  req_type;
@@ -1587,7 +1594,8 @@ struct hwrm_func_qcaps_output {
        #define FUNC_QCAPS_RESP_MPC_CHNLS_CAP_TE_CFA      0x4UL
        #define FUNC_QCAPS_RESP_MPC_CHNLS_CAP_RE_CFA      0x8UL
        #define FUNC_QCAPS_RESP_MPC_CHNLS_CAP_PRIMATE     0x10UL
-       u8      unused_1;
+       __le16  max_key_ctxs_alloc;
+       u8      unused_1[7];
        u8      valid;
 };
 
@@ -1602,7 +1610,7 @@ struct hwrm_func_qcfg_input {
        u8      unused_0[6];
 };
 
-/* hwrm_func_qcfg_output (size:832b/104B) */
+/* hwrm_func_qcfg_output (size:896b/112B) */
 struct hwrm_func_qcfg_output {
        __le16  error_code;
        __le16  req_type;
@@ -1749,11 +1757,13 @@ struct hwrm_func_qcfg_output {
        #define FUNC_QCFG_RESP_PARTITION_MAX_BW_BW_VALUE_UNIT_PERCENT1_100  (0x1UL << 29)
        #define FUNC_QCFG_RESP_PARTITION_MAX_BW_BW_VALUE_UNIT_LAST         FUNC_QCFG_RESP_PARTITION_MAX_BW_BW_VALUE_UNIT_PERCENT1_100
        __le16  host_mtu;
-       u8      unused_3;
+       __le16  alloc_tx_key_ctxs;
+       __le16  alloc_rx_key_ctxs;
+       u8      unused_3[5];
        u8      valid;
 };
 
-/* hwrm_func_cfg_input (size:832b/104B) */
+/* hwrm_func_cfg_input (size:896b/112B) */
 struct hwrm_func_cfg_input {
        __le16  req_type;
        __le16  cmpl_ring;
@@ -1820,6 +1830,8 @@ struct hwrm_func_cfg_input {
        #define FUNC_CFG_REQ_ENABLES_PARTITION_MAX_BW         0x8000000UL
        #define FUNC_CFG_REQ_ENABLES_TPID                     0x10000000UL
        #define FUNC_CFG_REQ_ENABLES_HOST_MTU                 0x20000000UL
+       #define FUNC_CFG_REQ_ENABLES_TX_KEY_CTXS              0x40000000UL
+       #define FUNC_CFG_REQ_ENABLES_RX_KEY_CTXS              0x80000000UL
        __le16  admin_mtu;
        __le16  mru;
        __le16  num_rsscos_ctxs;
@@ -1929,6 +1941,9 @@ struct hwrm_func_cfg_input {
        #define FUNC_CFG_REQ_PARTITION_MAX_BW_BW_VALUE_UNIT_LAST         FUNC_CFG_REQ_PARTITION_MAX_BW_BW_VALUE_UNIT_PERCENT1_100
        __be16  tpid;
        __le16  host_mtu;
+       __le16  num_tx_key_ctxs;
+       __le16  num_rx_key_ctxs;
+       u8      unused_0[4];
 };
 
 /* hwrm_func_cfg_output (size:128b/16B) */
@@ -2099,6 +2114,7 @@ struct hwrm_func_drv_rgtr_input {
        #define FUNC_DRV_RGTR_REQ_FLAGS_MASTER_SUPPORT                   0x40UL
        #define FUNC_DRV_RGTR_REQ_FLAGS_FAST_RESET_SUPPORT               0x80UL
        #define FUNC_DRV_RGTR_REQ_FLAGS_RSS_STRICT_HASH_TYPE_SUPPORT     0x100UL
+       #define FUNC_DRV_RGTR_REQ_FLAGS_NPAR_1_2_SUPPORT                 0x200UL
        __le32  enables;
        #define FUNC_DRV_RGTR_REQ_ENABLES_OS_TYPE             0x1UL
        #define FUNC_DRV_RGTR_REQ_ENABLES_VER                 0x2UL
@@ -2268,7 +2284,7 @@ struct hwrm_func_resource_qcaps_input {
        u8      unused_0[6];
 };
 
-/* hwrm_func_resource_qcaps_output (size:448b/56B) */
+/* hwrm_func_resource_qcaps_output (size:512b/64B) */
 struct hwrm_func_resource_qcaps_output {
        __le16  error_code;
        __le16  req_type;
@@ -2300,11 +2316,15 @@ struct hwrm_func_resource_qcaps_output {
        __le16  max_tx_scheduler_inputs;
        __le16  flags;
        #define FUNC_RESOURCE_QCAPS_RESP_FLAGS_MIN_GUARANTEED     0x1UL
+       __le16  min_tx_key_ctxs;
+       __le16  max_tx_key_ctxs;
+       __le16  min_rx_key_ctxs;
+       __le16  max_rx_key_ctxs;
        u8      unused_0[5];
        u8      valid;
 };
 
-/* hwrm_func_vf_resource_cfg_input (size:448b/56B) */
+/* hwrm_func_vf_resource_cfg_input (size:512b/64B) */
 struct hwrm_func_vf_resource_cfg_input {
        __le16  req_type;
        __le16  cmpl_ring;
@@ -2331,6 +2351,10 @@ struct hwrm_func_vf_resource_cfg_input {
        __le16  max_hw_ring_grps;
        __le16  flags;
        #define FUNC_VF_RESOURCE_CFG_REQ_FLAGS_MIN_GUARANTEED     0x1UL
+       __le16  min_tx_key_ctxs;
+       __le16  max_tx_key_ctxs;
+       __le16  min_rx_key_ctxs;
+       __le16  max_rx_key_ctxs;
        u8      unused_0[2];
 };
 
@@ -2348,7 +2372,9 @@ struct hwrm_func_vf_resource_cfg_output {
        __le16  reserved_vnics;
        __le16  reserved_stat_ctx;
        __le16  reserved_hw_ring_grps;
-       u8      unused_0[7];
+       __le16  reserved_tx_key_ctxs;
+       __le16  reserved_rx_key_ctxs;
+       u8      unused_0[3];
        u8      valid;
 };
 
@@ -4220,7 +4246,7 @@ struct hwrm_port_lpbk_clr_stats_output {
        u8      valid;
 };
 
-/* hwrm_port_ts_query_input (size:256b/32B) */
+/* hwrm_port_ts_query_input (size:320b/40B) */
 struct hwrm_port_ts_query_input {
        __le16  req_type;
        __le16  cmpl_ring;
@@ -4238,8 +4264,11 @@ struct hwrm_port_ts_query_input {
        __le16  enables;
        #define PORT_TS_QUERY_REQ_ENABLES_TS_REQ_TIMEOUT     0x1UL
        #define PORT_TS_QUERY_REQ_ENABLES_PTP_SEQ_ID         0x2UL
+       #define PORT_TS_QUERY_REQ_ENABLES_PTP_HDR_OFFSET     0x4UL
        __le16  ts_req_timeout;
        __le32  ptp_seq_id;
+       __le16  ptp_hdr_offset;
+       u8      unused_1[6];
 };
 
 /* hwrm_port_ts_query_output (size:192b/24B) */
@@ -8172,6 +8201,7 @@ struct hwrm_fw_reset_input {
        u8      host_idx;
        u8      flags;
        #define FW_RESET_REQ_FLAGS_RESET_GRACEFUL     0x1UL
+       #define FW_RESET_REQ_FLAGS_FW_ACTIVATION      0x2UL
        u8      unused_0[4];
 };
 
@@ -8952,7 +8982,7 @@ struct hwrm_nvm_get_dir_info_output {
        u8      valid;
 };
 
-/* hwrm_nvm_write_input (size:384b/48B) */
+/* hwrm_nvm_write_input (size:448b/56B) */
 struct hwrm_nvm_write_input {
        __le16  req_type;
        __le16  cmpl_ring;
@@ -8968,7 +8998,11 @@ struct hwrm_nvm_write_input {
        __le16  option;
        __le16  flags;
        #define NVM_WRITE_REQ_FLAGS_KEEP_ORIG_ACTIVE_IMG     0x1UL
+       #define NVM_WRITE_REQ_FLAGS_BATCH_MODE               0x2UL
+       #define NVM_WRITE_REQ_FLAGS_BATCH_LAST               0x4UL
        __le32  dir_item_length;
+       __le32  offset;
+       __le32  len;
        __le32  unused_0;
 };
 
index ec381c2423b8ccef618b9e224665f09473cf66c2..81f40ab748f16db9a3e59b3508824d0664c8b60e 100644 (file)
@@ -20,7 +20,7 @@
 #include "bnxt.h"
 #include "bnxt_ptp.h"
 
-int bnxt_ptp_parse(struct sk_buff *skb, u16 *seq_id)
+int bnxt_ptp_parse(struct sk_buff *skb, u16 *seq_id, u16 *hdr_off)
 {
        unsigned int ptp_class;
        struct ptp_header *hdr;
@@ -34,6 +34,7 @@ int bnxt_ptp_parse(struct sk_buff *skb, u16 *seq_id)
                if (!hdr)
                        return -EINVAL;
 
+               *hdr_off = (u8 *)hdr - skb->data;
                *seq_id  = ntohs(hdr->sequence_id);
                return 0;
        default:
@@ -91,6 +92,7 @@ static int bnxt_hwrm_port_ts_query(struct bnxt *bp, u32 flags, u64 *ts)
            PORT_TS_QUERY_REQ_FLAGS_PATH_TX) {
                req.enables = cpu_to_le16(BNXT_PTP_QTS_TX_ENABLES);
                req.ptp_seq_id = cpu_to_le32(bp->ptp_cfg->tx_seqid);
+               req.ptp_hdr_offset = cpu_to_le16(bp->ptp_cfg->tx_hdr_off);
                req.ts_req_timeout = cpu_to_le16(BNXT_PTP_QTS_TIMEOUT);
        }
        mutex_lock(&bp->hwrm_cmd_lock);
index 254ba7bc0f9908ce6d0f10a3510ff495875b8e55..524f1c27205420bb8e43143f0f6c754af5f3a637 100644 (file)
@@ -10,8 +10,8 @@
 #ifndef BNXT_PTP_H
 #define BNXT_PTP_H
 
-#define BNXT_PTP_GRC_WIN       5
-#define BNXT_PTP_GRC_WIN_BASE  0x5000
+#define BNXT_PTP_GRC_WIN       6
+#define BNXT_PTP_GRC_WIN_BASE  0x6000
 
 #define BNXT_MAX_PHC_DRIFT     31000000
 #define BNXT_LO_TIMER_MASK     0x0000ffffffffUL
@@ -19,7 +19,8 @@
 
 #define BNXT_PTP_QTS_TIMEOUT   1000
 #define BNXT_PTP_QTS_TX_ENABLES        (PORT_TS_QUERY_REQ_ENABLES_PTP_SEQ_ID | \
-                                PORT_TS_QUERY_REQ_ENABLES_TS_REQ_TIMEOUT)
+                                PORT_TS_QUERY_REQ_ENABLES_TS_REQ_TIMEOUT | \
+                                PORT_TS_QUERY_REQ_ENABLES_PTP_HDR_OFFSET)
 
 struct bnxt_ptp_cfg {
        struct ptp_clock_info   ptp_info;
@@ -37,6 +38,7 @@ struct bnxt_ptp_cfg {
        #define BNXT_PHC_OVERFLOW_PERIOD        (19 * 3600 * HZ)
 
        u16                     tx_seqid;
+       u16                     tx_hdr_off;
        struct bnxt             *bp;
        atomic_t                tx_avail;
 #define BNXT_MAX_TX_TS 1
@@ -74,7 +76,7 @@ do {                                          \
        ((dst) = READ_ONCE(src))
 #endif
 
-int bnxt_ptp_parse(struct sk_buff *skb, u16 *seq_id);
+int bnxt_ptp_parse(struct sk_buff *skb, u16 *seq_id, u16 *hdr_off);
 int bnxt_hwtstamp_set(struct net_device *dev, struct ifreq *ifr);
 int bnxt_hwtstamp_get(struct net_device *dev, struct ifreq *ifr);
 int bnxt_get_tx_ts_p5(struct bnxt *bp, struct sk_buff *skb);
index 5c368a9cbbbcd5c67e36523be545d7f21383a9e0..c2e1f163bb14f0885dfe85f6f1f601a085fa4ef9 100644 (file)
@@ -275,6 +275,12 @@ void gem_ptp_rxstamp(struct macb *bp, struct sk_buff *skb,
 
        if (GEM_BFEXT(DMA_RXVALID, desc->addr)) {
                desc_ptp = macb_ptp_desc(bp, desc);
+               /* Unlikely but check */
+               if (!desc_ptp) {
+                       dev_warn_ratelimited(&bp->pdev->dev,
+                                            "Timestamp not supported in BD\n");
+                       return;
+               }
                gem_hw_timestamp(bp, desc_ptp->ts_1, desc_ptp->ts_2, &ts);
                memset(shhwtstamps, 0, sizeof(struct skb_shared_hwtstamps));
                shhwtstamps->hwtstamp = ktime_set(ts.tv_sec, ts.tv_nsec);
@@ -307,8 +313,11 @@ int gem_ptp_txstamp(struct macb_queue *queue, struct sk_buff *skb,
        if (CIRC_SPACE(head, tail, PTP_TS_BUFFER_SIZE) == 0)
                return -ENOMEM;
 
-       skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
        desc_ptp = macb_ptp_desc(queue->bp, desc);
+       /* Unlikely but check */
+       if (!desc_ptp)
+               return -EINVAL;
+       skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
        tx_timestamp = &queue->tx_timestamps[head];
        tx_timestamp->skb = skb;
        /* ensure ts_1/ts_2 is loaded after ctrl (TX_USED check) */
index dbf9a0e6601d1d41d046c7b07a932c729a2d7459..710cb00ce3a3b88331d21d1b9234342f32106c18 100644 (file)
@@ -5068,6 +5068,7 @@ static int adap_init0(struct adapter *adap, int vpd_skip)
                ret = -ENOMEM;
                goto bye;
        }
+       bitmap_zero(adap->sge.blocked_fl, adap->sge.egr_sz);
 #endif
 
        params[0] = FW_PARAM_PFVF(CLIP_START);
@@ -6788,13 +6789,11 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
 
        setup_memwin(adapter);
        err = adap_init0(adapter, 0);
-#ifdef CONFIG_DEBUG_FS
-       bitmap_zero(adapter->sge.blocked_fl, adapter->sge.egr_sz);
-#endif
-       setup_memwin_rdma(adapter);
        if (err)
                goto out_unmap_bar;
 
+       setup_memwin_rdma(adapter);
+
        /* configure SGE_STAT_CFG_A to read WC stats */
        if (!is_t4(adapter->params.chip))
                t4_write_reg(adapter, SGE_STAT_CFG_A, STATSOURCE_T5_V(7) |
index 68b78642c045d44d49d62b8c3125a6421756dbd8..98cc0133c343798d3fef72cb83d7d1ed6f099905 100644 (file)
@@ -3038,26 +3038,30 @@ static int dpaa2_switch_port_init(struct ethsw_port_priv *port_priv, u16 port)
        return err;
 }
 
-static void dpaa2_switch_takedown(struct fsl_mc_device *sw_dev)
+static void dpaa2_switch_ctrl_if_teardown(struct ethsw_core *ethsw)
+{
+       dpsw_ctrl_if_disable(ethsw->mc_io, 0, ethsw->dpsw_handle);
+       dpaa2_switch_free_dpio(ethsw);
+       dpaa2_switch_destroy_rings(ethsw);
+       dpaa2_switch_drain_bp(ethsw);
+       dpaa2_switch_free_dpbp(ethsw);
+}
+
+static void dpaa2_switch_teardown(struct fsl_mc_device *sw_dev)
 {
        struct device *dev = &sw_dev->dev;
        struct ethsw_core *ethsw = dev_get_drvdata(dev);
        int err;
 
+       dpaa2_switch_ctrl_if_teardown(ethsw);
+
+       destroy_workqueue(ethsw->workqueue);
+
        err = dpsw_close(ethsw->mc_io, 0, ethsw->dpsw_handle);
        if (err)
                dev_warn(dev, "dpsw_close err %d\n", err);
 }
 
-static void dpaa2_switch_ctrl_if_teardown(struct ethsw_core *ethsw)
-{
-       dpsw_ctrl_if_disable(ethsw->mc_io, 0, ethsw->dpsw_handle);
-       dpaa2_switch_free_dpio(ethsw);
-       dpaa2_switch_destroy_rings(ethsw);
-       dpaa2_switch_drain_bp(ethsw);
-       dpaa2_switch_free_dpbp(ethsw);
-}
-
 static int dpaa2_switch_remove(struct fsl_mc_device *sw_dev)
 {
        struct ethsw_port_priv *port_priv;
@@ -3068,8 +3072,6 @@ static int dpaa2_switch_remove(struct fsl_mc_device *sw_dev)
        dev = &sw_dev->dev;
        ethsw = dev_get_drvdata(dev);
 
-       dpaa2_switch_ctrl_if_teardown(ethsw);
-
        dpaa2_switch_teardown_irqs(sw_dev);
 
        dpsw_disable(ethsw->mc_io, 0, ethsw->dpsw_handle);
@@ -3084,9 +3086,7 @@ static int dpaa2_switch_remove(struct fsl_mc_device *sw_dev)
        kfree(ethsw->acls);
        kfree(ethsw->ports);
 
-       dpaa2_switch_takedown(sw_dev);
-
-       destroy_workqueue(ethsw->workqueue);
+       dpaa2_switch_teardown(sw_dev);
 
        fsl_mc_portal_free(ethsw->mc_io);
 
@@ -3199,7 +3199,7 @@ static int dpaa2_switch_probe(struct fsl_mc_device *sw_dev)
                               GFP_KERNEL);
        if (!(ethsw->ports)) {
                err = -ENOMEM;
-               goto err_takedown;
+               goto err_teardown;
        }
 
        ethsw->fdbs = kcalloc(ethsw->sw_attr.num_ifs, sizeof(*ethsw->fdbs),
@@ -3270,8 +3270,8 @@ err_free_fdbs:
 err_free_ports:
        kfree(ethsw->ports);
 
-err_takedown:
-       dpaa2_switch_takedown(sw_dev);
+err_teardown:
+       dpaa2_switch_teardown(sw_dev);
 
 err_free_cmdport:
        fsl_mc_portal_free(ethsw->mc_io);
index 532523069d74b33fe025ad277a5120f1b56549cc..80461ab0ce9e7f08c91dc81442282ed4d8423314 100644 (file)
@@ -938,20 +938,19 @@ static int hns3_dbg_dev_info(struct hnae3_handle *h, char *buf, int len)
        return 0;
 }
 
-static int hns3_dbg_get_cmd_index(struct hnae3_handle *handle,
-                                 const unsigned char *name, u32 *index)
+static int hns3_dbg_get_cmd_index(struct hns3_dbg_data *dbg_data, u32 *index)
 {
        u32 i;
 
        for (i = 0; i < ARRAY_SIZE(hns3_dbg_cmd); i++) {
-               if (!strncmp(name, hns3_dbg_cmd[i].name,
-                            strlen(hns3_dbg_cmd[i].name))) {
+               if (hns3_dbg_cmd[i].cmd == dbg_data->cmd) {
                        *index = i;
                        return 0;
                }
        }
 
-       dev_err(&handle->pdev->dev, "unknown command(%s)\n", name);
+       dev_err(&dbg_data->handle->pdev->dev, "unknown command(%d)\n",
+               dbg_data->cmd);
        return -EINVAL;
 }
 
@@ -1019,8 +1018,7 @@ static ssize_t hns3_dbg_read(struct file *filp, char __user *buffer,
        u32 index;
        int ret;
 
-       ret = hns3_dbg_get_cmd_index(handle, filp->f_path.dentry->d_iname,
-                                    &index);
+       ret = hns3_dbg_get_cmd_index(dbg_data, &index);
        if (ret)
                return ret;
 
@@ -1090,6 +1088,7 @@ static int hns3_dbg_bd_file_init(struct hnae3_handle *handle, u32 cmd)
                char name[HNS3_DBG_FILE_NAME_LEN];
 
                data[i].handle = handle;
+               data[i].cmd = hns3_dbg_cmd[cmd].cmd;
                data[i].qid = i;
                sprintf(name, "%s%u", hns3_dbg_cmd[cmd].name, i);
                debugfs_create_file(name, 0400, entry_dir, &data[i],
@@ -1110,6 +1109,7 @@ hns3_dbg_common_file_init(struct hnae3_handle *handle, u32 cmd)
                return -ENOMEM;
 
        data->handle = handle;
+       data->cmd = hns3_dbg_cmd[cmd].cmd;
        entry_dir = hns3_dbg_dentry[hns3_dbg_cmd[cmd].dentry].dentry;
        debugfs_create_file(hns3_dbg_cmd[cmd].name, 0400, entry_dir,
                            data, &hns3_dbg_fops);
index f3766ff38bb7ab7d4fcd1f5f5a46d9c460b333b7..bd8801065e024823afb6f2f6d3d559ded8721551 100644 (file)
@@ -22,6 +22,7 @@ struct hns3_dbg_item {
 
 struct hns3_dbg_data {
        struct hnae3_handle *handle;
+       enum hnae3_dbg_cmd cmd;
        u16 qid;
 };
 
index 887297e37cf3399f35e10876cadc5fbfdae2101d..eb748aa35952cdfbe0d762b4951d3bcb81129750 100644 (file)
@@ -573,9 +573,13 @@ static void hclge_cmd_uninit_regs(struct hclge_hw *hw)
 
 void hclge_cmd_uninit(struct hclge_dev *hdev)
 {
+       set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
+       /* wait to ensure that the firmware completes the possible left
+        * over commands.
+        */
+       msleep(HCLGE_CMDQ_CLEAR_WAIT_TIME);
        spin_lock_bh(&hdev->hw.cmq.csq.lock);
        spin_lock(&hdev->hw.cmq.crq.lock);
-       set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
        hclge_cmd_uninit_regs(&hdev->hw);
        spin_unlock(&hdev->hw.cmq.crq.lock);
        spin_unlock_bh(&hdev->hw.cmq.csq.lock);
index 18bde77ef944254c66f62606e194856b4e0b16c0..ac70d49e205d0c8cd755b7d7cf2bd55fb2ddfce2 100644 (file)
@@ -9,6 +9,7 @@
 #include "hnae3.h"
 
 #define HCLGE_CMDQ_TX_TIMEOUT          30000
+#define HCLGE_CMDQ_CLEAR_WAIT_TIME     200
 #define HCLGE_DESC_DATA_LEN            6
 
 struct hclge_dev;
@@ -270,6 +271,9 @@ enum hclge_opcode_type {
        /* Led command */
        HCLGE_OPC_LED_STATUS_CFG        = 0xB000,
 
+       /* clear hardware resource command */
+       HCLGE_OPC_CLEAR_HW_RESOURCE     = 0x700B,
+
        /* NCL config command */
        HCLGE_OPC_QUERY_NCL_CONFIG      = 0x7011,
 
index 5bf5db91d16cc9aa1152d5ab6a88056b88e4d3af..39f56f245d843b703989c0fe0d9dc4e3ebac5654 100644 (file)
@@ -255,21 +255,12 @@ static int hclge_ieee_getpfc(struct hnae3_handle *h, struct ieee_pfc *pfc)
        u64 requests[HNAE3_MAX_TC], indications[HNAE3_MAX_TC];
        struct hclge_vport *vport = hclge_get_vport(h);
        struct hclge_dev *hdev = vport->back;
-       u8 i, j, pfc_map, *prio_tc;
        int ret;
+       u8 i;
 
        memset(pfc, 0, sizeof(*pfc));
        pfc->pfc_cap = hdev->pfc_max;
-       prio_tc = hdev->tm_info.prio_tc;
-       pfc_map = hdev->tm_info.hw_pfc_map;
-
-       /* Pfc setting is based on TC */
-       for (i = 0; i < hdev->tm_info.num_tc; i++) {
-               for (j = 0; j < HNAE3_MAX_USER_PRIO; j++) {
-                       if ((prio_tc[j] == i) && (pfc_map & BIT(i)))
-                               pfc->pfc_en |= BIT(j);
-               }
-       }
+       pfc->pfc_en = hdev->tm_info.pfc_en;
 
        ret = hclge_pfc_tx_stats_get(hdev, requests);
        if (ret)
index ebeaf12e409bcf0fd96d891c27fb2f3fd53e6159..03ae122f1c9ac3bd21357adc7644a907db29a4a4 100644 (file)
@@ -1550,6 +1550,7 @@ static int hclge_configure(struct hclge_dev *hdev)
        hdev->tm_info.hw_pfc_map = 0;
        hdev->wanted_umv_size = cfg.umv_space;
        hdev->tx_spare_buf_size = cfg.tx_spare_buf_size;
+       hdev->gro_en = true;
        if (cfg.vlan_fliter_cap == HCLGE_VLAN_FLTR_CAN_MDF)
                set_bit(HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B, ae_dev->caps);
 
@@ -1618,7 +1619,7 @@ static int hclge_config_tso(struct hclge_dev *hdev, u16 tso_mss_min,
        return hclge_cmd_send(&hdev->hw, &desc, 1);
 }
 
-static int hclge_config_gro(struct hclge_dev *hdev, bool en)
+static int hclge_config_gro(struct hclge_dev *hdev)
 {
        struct hclge_cfg_gro_status_cmd *req;
        struct hclge_desc desc;
@@ -1630,7 +1631,7 @@ static int hclge_config_gro(struct hclge_dev *hdev, bool en)
        hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GRO_GENERIC_CONFIG, false);
        req = (struct hclge_cfg_gro_status_cmd *)desc.data;
 
-       req->gro_en = en ? 1 : 0;
+       req->gro_en = hdev->gro_en ? 1 : 0;
 
        ret = hclge_cmd_send(&hdev->hw, &desc, 1);
        if (ret)
@@ -2952,12 +2953,12 @@ static void hclge_update_link_status(struct hclge_dev *hdev)
        }
 
        if (state != hdev->hw.mac.link) {
+               hdev->hw.mac.link = state;
                client->ops->link_status_change(handle, state);
                hclge_config_mac_tnl_int(hdev, state);
                if (rclient && rclient->ops->link_status_change)
                        rclient->ops->link_status_change(rhandle, state);
 
-               hdev->hw.mac.link = state;
                hclge_push_link_status(hdev);
        }
 
@@ -10073,7 +10074,11 @@ static int hclge_init_vlan_config(struct hclge_dev *hdev)
 static void hclge_add_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id,
                                       bool writen_to_tbl)
 {
-       struct hclge_vport_vlan_cfg *vlan;
+       struct hclge_vport_vlan_cfg *vlan, *tmp;
+
+       list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node)
+               if (vlan->vlan_id == vlan_id)
+                       return;
 
        vlan = kzalloc(sizeof(*vlan), GFP_KERNEL);
        if (!vlan)
@@ -11443,6 +11448,28 @@ static void hclge_clear_resetting_state(struct hclge_dev *hdev)
        }
 }
 
+static int hclge_clear_hw_resource(struct hclge_dev *hdev)
+{
+       struct hclge_desc desc;
+       int ret;
+
+       hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CLEAR_HW_RESOURCE, false);
+
+       ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+       /* This new command is only supported by new firmware, it will
+        * fail with older firmware. Error value -EOPNOSUPP can only be
+        * returned by older firmware running this command, to keep code
+        * backward compatible we will override this value and return
+        * success.
+        */
+       if (ret && ret != -EOPNOTSUPP) {
+               dev_err(&hdev->pdev->dev,
+                       "failed to clear hw resource, ret = %d\n", ret);
+               return ret;
+       }
+       return 0;
+}
+
 static void hclge_init_rxd_adv_layout(struct hclge_dev *hdev)
 {
        if (hnae3_ae_dev_rxd_adv_layout_supported(hdev->ae_dev))
@@ -11492,6 +11519,10 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
        if (ret)
                goto err_cmd_uninit;
 
+       ret  = hclge_clear_hw_resource(hdev);
+       if (ret)
+               goto err_cmd_uninit;
+
        ret = hclge_get_cap(hdev);
        if (ret)
                goto err_cmd_uninit;
@@ -11556,7 +11587,7 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
                goto err_mdiobus_unreg;
        }
 
-       ret = hclge_config_gro(hdev, true);
+       ret = hclge_config_gro(hdev);
        if (ret)
                goto err_mdiobus_unreg;
 
@@ -11937,7 +11968,7 @@ static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev)
                return ret;
        }
 
-       ret = hclge_config_gro(hdev, true);
+       ret = hclge_config_gro(hdev);
        if (ret)
                return ret;
 
@@ -12671,8 +12702,15 @@ static int hclge_gro_en(struct hnae3_handle *handle, bool enable)
 {
        struct hclge_vport *vport = hclge_get_vport(handle);
        struct hclge_dev *hdev = vport->back;
+       bool gro_en_old = hdev->gro_en;
+       int ret;
 
-       return hclge_config_gro(hdev, enable);
+       hdev->gro_en = enable;
+       ret = hclge_config_gro(hdev);
+       if (ret)
+               hdev->gro_en = gro_en_old;
+
+       return ret;
 }
 
 static void hclge_sync_promisc_mode(struct hclge_dev *hdev)
index 3d3352491dba412f2b3958a8ba47fa305a7a0538..e446b839a37154834892a0b7ea2a489dcafcbe1c 100644 (file)
@@ -927,6 +927,7 @@ struct hclge_dev {
        unsigned long fd_bmap[BITS_TO_LONGS(MAX_FD_FILTER_NUM)];
        enum HCLGE_FD_ACTIVE_RULE_TYPE fd_active_type;
        u8 fd_en;
+       bool gro_en;
 
        u16 wanted_umv_size;
        /* max available unicast mac vlan space */
index bd19a2d89f6cafe10f12a06d68b1528eee10dae7..d9ddb0a243d460fd6304e678a40a9eb004d80534 100644 (file)
@@ -507,12 +507,17 @@ static void hclgevf_cmd_uninit_regs(struct hclgevf_hw *hw)
 
 void hclgevf_cmd_uninit(struct hclgevf_dev *hdev)
 {
+       set_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state);
+       /* wait to ensure that the firmware completes the possible left
+        * over commands.
+        */
+       msleep(HCLGEVF_CMDQ_CLEAR_WAIT_TIME);
        spin_lock_bh(&hdev->hw.cmq.csq.lock);
        spin_lock(&hdev->hw.cmq.crq.lock);
-       set_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state);
        hclgevf_cmd_uninit_regs(&hdev->hw);
        spin_unlock(&hdev->hw.cmq.crq.lock);
        spin_unlock_bh(&hdev->hw.cmq.csq.lock);
+
        hclgevf_free_cmd_desc(&hdev->hw.cmq.csq);
        hclgevf_free_cmd_desc(&hdev->hw.cmq.crq);
 }
index 202feb70dba529bf51fbf25ff4cdeca94f84559d..5b82177f98b42cb51d898ef3aa3cae2790166c72 100644 (file)
@@ -8,6 +8,7 @@
 #include "hnae3.h"
 
 #define HCLGEVF_CMDQ_TX_TIMEOUT                30000
+#define HCLGEVF_CMDQ_CLEAR_WAIT_TIME   200
 #define HCLGEVF_CMDQ_RX_INVLD_B                0
 #define HCLGEVF_CMDQ_RX_OUTVLD_B       1
 
index 8784d61e833f15a98777a13a49f308a8169f24e0..938654778979a17d3741b36b2bdae552e927d57f 100644 (file)
@@ -506,10 +506,10 @@ void hclgevf_update_link_status(struct hclgevf_dev *hdev, int link_state)
        link_state =
                test_bit(HCLGEVF_STATE_DOWN, &hdev->state) ? 0 : link_state;
        if (link_state != hdev->hw.mac.link) {
+               hdev->hw.mac.link = link_state;
                client->ops->link_status_change(handle, !!link_state);
                if (rclient && rclient->ops->link_status_change)
                        rclient->ops->link_status_change(rhandle, !!link_state);
-               hdev->hw.mac.link = link_state;
        }
 
        clear_bit(HCLGEVF_STATE_LINK_UPDATING, &hdev->state);
@@ -2487,6 +2487,8 @@ static int hclgevf_configure(struct hclgevf_dev *hdev)
 {
        int ret;
 
+       hdev->gro_en = true;
+
        ret = hclgevf_get_basic_info(hdev);
        if (ret)
                return ret;
@@ -2549,7 +2551,7 @@ static int hclgevf_init_roce_base_info(struct hclgevf_dev *hdev)
        return 0;
 }
 
-static int hclgevf_config_gro(struct hclgevf_dev *hdev, bool en)
+static int hclgevf_config_gro(struct hclgevf_dev *hdev)
 {
        struct hclgevf_cfg_gro_status_cmd *req;
        struct hclgevf_desc desc;
@@ -2562,7 +2564,7 @@ static int hclgevf_config_gro(struct hclgevf_dev *hdev, bool en)
                                     false);
        req = (struct hclgevf_cfg_gro_status_cmd *)desc.data;
 
-       req->gro_en = en ? 1 : 0;
+       req->gro_en = hdev->gro_en ? 1 : 0;
 
        ret = hclgevf_cmd_send(&hdev->hw, &desc, 1);
        if (ret)
@@ -3308,7 +3310,7 @@ static int hclgevf_reset_hdev(struct hclgevf_dev *hdev)
                return ret;
        }
 
-       ret = hclgevf_config_gro(hdev, true);
+       ret = hclgevf_config_gro(hdev);
        if (ret)
                return ret;
 
@@ -3389,7 +3391,7 @@ static int hclgevf_init_hdev(struct hclgevf_dev *hdev)
        if (ret)
                goto err_config;
 
-       ret = hclgevf_config_gro(hdev, true);
+       ret = hclgevf_config_gro(hdev);
        if (ret)
                goto err_config;
 
@@ -3638,8 +3640,15 @@ void hclgevf_update_speed_duplex(struct hclgevf_dev *hdev, u32 speed,
 static int hclgevf_gro_en(struct hnae3_handle *handle, bool enable)
 {
        struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
+       bool gro_en_old = hdev->gro_en;
+       int ret;
 
-       return hclgevf_config_gro(hdev, enable);
+       hdev->gro_en = enable;
+       ret = hclgevf_config_gro(hdev);
+       if (ret)
+               hdev->gro_en = gro_en_old;
+
+       return ret;
 }
 
 static void hclgevf_get_media_type(struct hnae3_handle *handle, u8 *media_type,
index d7d02848d674aad3af0ac245feb9e2b3a93316ef..e8013be055f89bb97e98ed7f03bdd301604f5158 100644 (file)
@@ -310,6 +310,8 @@ struct hclgevf_dev {
        u16 *vector_status;
        int *vector_irq;
 
+       bool gro_en;
+
        unsigned long vlan_del_fail_bmap[BITS_TO_LONGS(VLAN_N_VID)];
 
        struct hclgevf_mac_table_cfg mac_table;
index 772b2f8acd2e83e8f98a3041648e4f59b8330009..b339b9bc0625a2e171686eb582f258bd56bbae45 100644 (file)
@@ -323,8 +323,8 @@ void hclgevf_mbx_async_handler(struct hclgevf_dev *hdev)
                        flag = (u8)msg_q[5];
 
                        /* update upper layer with new link link status */
-                       hclgevf_update_link_status(hdev, link_status);
                        hclgevf_update_speed_duplex(hdev, speed, duplex);
+                       hclgevf_update_link_status(hdev, link_status);
 
                        if (flag & HCLGE_MBX_PUSH_LINK_STATUS_EN)
                                set_bit(HCLGEVF_STATE_PF_PUSH_LINK_STATUS,
index cf7b3887da1d5822779befc94905249e9cd1e172..a80336c4319bb2f61a23c28ab3cf211134da28fa 100644 (file)
@@ -1006,6 +1006,8 @@ static s32 e1000_platform_pm_pch_lpt(struct e1000_hw *hw, bool link)
 {
        u32 reg = link << (E1000_LTRV_REQ_SHIFT + E1000_LTRV_NOSNOOP_SHIFT) |
            link << E1000_LTRV_REQ_SHIFT | E1000_LTRV_SEND;
+       u16 max_ltr_enc_d = 0;  /* maximum LTR decoded by platform */
+       u16 lat_enc_d = 0;      /* latency decoded */
        u16 lat_enc = 0;        /* latency encoded */
 
        if (link) {
@@ -1059,7 +1061,17 @@ static s32 e1000_platform_pm_pch_lpt(struct e1000_hw *hw, bool link)
                                     E1000_PCI_LTR_CAP_LPT + 2, &max_nosnoop);
                max_ltr_enc = max_t(u16, max_snoop, max_nosnoop);
 
-               if (lat_enc > max_ltr_enc)
+               lat_enc_d = (lat_enc & E1000_LTRV_VALUE_MASK) *
+                            (1U << (E1000_LTRV_SCALE_FACTOR *
+                            ((lat_enc & E1000_LTRV_SCALE_MASK)
+                            >> E1000_LTRV_SCALE_SHIFT)));
+
+               max_ltr_enc_d = (max_ltr_enc & E1000_LTRV_VALUE_MASK) *
+                                (1U << (E1000_LTRV_SCALE_FACTOR *
+                                ((max_ltr_enc & E1000_LTRV_SCALE_MASK)
+                                >> E1000_LTRV_SCALE_SHIFT)));
+
+               if (lat_enc_d > max_ltr_enc_d)
                        lat_enc = max_ltr_enc;
        }
 
@@ -4115,13 +4127,17 @@ static s32 e1000_validate_nvm_checksum_ich8lan(struct e1000_hw *hw)
                return ret_val;
 
        if (!(data & valid_csum_mask)) {
-               data |= valid_csum_mask;
-               ret_val = e1000_write_nvm(hw, word, 1, &data);
-               if (ret_val)
-                       return ret_val;
-               ret_val = e1000e_update_nvm_checksum(hw);
-               if (ret_val)
-                       return ret_val;
+               e_dbg("NVM Checksum Invalid\n");
+
+               if (hw->mac.type < e1000_pch_cnp) {
+                       data |= valid_csum_mask;
+                       ret_val = e1000_write_nvm(hw, word, 1, &data);
+                       if (ret_val)
+                               return ret_val;
+                       ret_val = e1000e_update_nvm_checksum(hw);
+                       if (ret_val)
+                               return ret_val;
+               }
        }
 
        return e1000e_validate_nvm_checksum_generic(hw);
index 1502895eb45ddd996c9f75aefc4ac29ea61e16a5..e757896287eba1256d3838a7db8af4048c4b2347 100644 (file)
 
 /* Latency Tolerance Reporting */
 #define E1000_LTRV                     0x000F8
+#define E1000_LTRV_VALUE_MASK          0x000003FF
 #define E1000_LTRV_SCALE_MAX           5
 #define E1000_LTRV_SCALE_FACTOR                5
+#define E1000_LTRV_SCALE_SHIFT         10
+#define E1000_LTRV_SCALE_MASK          0x00001C00
 #define E1000_LTRV_REQ_SHIFT           15
 #define E1000_LTRV_NOSNOOP_SHIFT       16
 #define E1000_LTRV_SEND                        (1 << 30)
index 3f25bd8c4924ec21f884a27950a0fbe77b884c0b..10a83e5385c70342cbe8c100309ae9db1a4adbfe 100644 (file)
@@ -3663,8 +3663,7 @@ u16 i40e_lan_select_queue(struct net_device *netdev,
 
        /* is DCB enabled at all? */
        if (vsi->tc_config.numtc == 1)
-               return i40e_swdcb_skb_tx_hash(netdev, skb,
-                                             netdev->real_num_tx_queues);
+               return netdev_pick_tx(netdev, skb, sb_dev);
 
        prio = skb->priority;
        hw = &vsi->back->hw;
index e8bd04100ecd0909f8688f73ed089c207ede2797..90793b36126e6fb32951e4e46d7611ccbc28d6b4 100644 (file)
@@ -136,6 +136,7 @@ struct iavf_q_vector {
 struct iavf_mac_filter {
        struct list_head list;
        u8 macaddr[ETH_ALEN];
+       bool is_new_mac;        /* filter is new, wait for PF decision */
        bool remove;            /* filter needs to be removed */
        bool add;               /* filter needs to be added */
 };
index 44bafedd09f286f6cf2d8551deaeefc1ac772bfa..606a01ce407390f58073550e5f1a4601e142c71f 100644 (file)
@@ -751,6 +751,7 @@ struct iavf_mac_filter *iavf_add_filter(struct iavf_adapter *adapter,
 
                list_add_tail(&f->list, &adapter->mac_filter_list);
                f->add = true;
+               f->is_new_mac = true;
                adapter->aq_required |= IAVF_FLAG_AQ_ADD_MAC_FILTER;
        } else {
                f->remove = false;
@@ -1506,11 +1507,6 @@ static int iavf_reinit_interrupt_scheme(struct iavf_adapter *adapter)
        set_bit(__IAVF_VSI_DOWN, adapter->vsi.state);
 
        iavf_map_rings_to_vectors(adapter);
-
-       if (RSS_AQ(adapter))
-               adapter->aq_required |= IAVF_FLAG_AQ_CONFIGURE_RSS;
-       else
-               err = iavf_init_rss(adapter);
 err:
        return err;
 }
@@ -2200,6 +2196,14 @@ continue_reset:
                        goto reset_err;
        }
 
+       if (RSS_AQ(adapter)) {
+               adapter->aq_required |= IAVF_FLAG_AQ_CONFIGURE_RSS;
+       } else {
+               err = iavf_init_rss(adapter);
+               if (err)
+                       goto reset_err;
+       }
+
        adapter->aq_required |= IAVF_FLAG_AQ_GET_CONFIG;
        adapter->aq_required |= IAVF_FLAG_AQ_MAP_VECTORS;
 
index 0eab3c43bdc59be945a4d3648f22691a9c765a53..3c735968e1b8527037a315a4bb74c3b6b930189d 100644 (file)
@@ -540,6 +540,47 @@ void iavf_del_ether_addrs(struct iavf_adapter *adapter)
        kfree(veal);
 }
 
+/**
+ * iavf_mac_add_ok
+ * @adapter: adapter structure
+ *
+ * Submit list of filters based on PF response.
+ **/
+static void iavf_mac_add_ok(struct iavf_adapter *adapter)
+{
+       struct iavf_mac_filter *f, *ftmp;
+
+       spin_lock_bh(&adapter->mac_vlan_list_lock);
+       list_for_each_entry_safe(f, ftmp, &adapter->mac_filter_list, list) {
+               f->is_new_mac = false;
+       }
+       spin_unlock_bh(&adapter->mac_vlan_list_lock);
+}
+
+/**
+ * iavf_mac_add_reject
+ * @adapter: adapter structure
+ *
+ * Remove filters from list based on PF response.
+ **/
+static void iavf_mac_add_reject(struct iavf_adapter *adapter)
+{
+       struct net_device *netdev = adapter->netdev;
+       struct iavf_mac_filter *f, *ftmp;
+
+       spin_lock_bh(&adapter->mac_vlan_list_lock);
+       list_for_each_entry_safe(f, ftmp, &adapter->mac_filter_list, list) {
+               if (f->remove && ether_addr_equal(f->macaddr, netdev->dev_addr))
+                       f->remove = false;
+
+               if (f->is_new_mac) {
+                       list_del(&f->list);
+                       kfree(f);
+               }
+       }
+       spin_unlock_bh(&adapter->mac_vlan_list_lock);
+}
+
 /**
  * iavf_add_vlans
  * @adapter: adapter structure
@@ -1492,6 +1533,7 @@ void iavf_virtchnl_completion(struct iavf_adapter *adapter,
                case VIRTCHNL_OP_ADD_ETH_ADDR:
                        dev_err(&adapter->pdev->dev, "Failed to add MAC filter, error %s\n",
                                iavf_stat_str(&adapter->hw, v_retval));
+                       iavf_mac_add_reject(adapter);
                        /* restore administratively set MAC address */
                        ether_addr_copy(adapter->hw.mac.addr, netdev->dev_addr);
                        break;
@@ -1639,10 +1681,11 @@ void iavf_virtchnl_completion(struct iavf_adapter *adapter,
                }
        }
        switch (v_opcode) {
-       case VIRTCHNL_OP_ADD_ETH_ADDR: {
+       case VIRTCHNL_OP_ADD_ETH_ADDR:
+               if (!v_retval)
+                       iavf_mac_add_ok(adapter);
                if (!ether_addr_equal(netdev->dev_addr, adapter->hw.mac.addr))
                        ether_addr_copy(netdev->dev_addr, adapter->hw.mac.addr);
-               }
                break;
        case VIRTCHNL_OP_GET_STATS: {
                struct iavf_eth_stats *stats =
index a450343fbb92d0c4bdef9a58e74e42cc1c4d8585..eadcb99583464f04c69412212742379683a895a9 100644 (file)
@@ -234,6 +234,7 @@ enum ice_pf_state {
        ICE_VFLR_EVENT_PENDING,
        ICE_FLTR_OVERFLOW_PROMISC,
        ICE_VF_DIS,
+       ICE_VF_DEINIT_IN_PROGRESS,
        ICE_CFG_BUSY,
        ICE_SERVICE_SCHED,
        ICE_SERVICE_DIS,
index 91b545ab8b8f7590539482e857dc6baf03f2b59b..7fe6e8ea39f0d3c4180c18c846add49e224cd2a2 100644 (file)
@@ -42,7 +42,9 @@ static int ice_info_pba(struct ice_pf *pf, struct ice_info_ctx *ctx)
 
        status = ice_read_pba_string(hw, (u8 *)ctx->buf, sizeof(ctx->buf));
        if (status)
-               return -EIO;
+               /* We failed to locate the PBA, so just skip this entry */
+               dev_dbg(ice_pf_to_dev(pf), "Failed to read Product Board Assembly string, status %s\n",
+                       ice_stat_str(status));
 
        return 0;
 }
index ef8d1815af5618911a14cd9da714c791c47bc39e..fe2ded775f259d215462d41618120ff593a071d0 100644 (file)
@@ -191,6 +191,14 @@ static int ice_add_mac_to_unsync_list(struct net_device *netdev, const u8 *addr)
        struct ice_netdev_priv *np = netdev_priv(netdev);
        struct ice_vsi *vsi = np->vsi;
 
+       /* Under some circumstances, we might receive a request to delete our
+        * own device address from our uc list. Because we store the device
+        * address in the VSI's MAC filter list, we need to ignore such
+        * requests and not delete our device address from this list.
+        */
+       if (ether_addr_equal(addr, netdev->dev_addr))
+               return 0;
+
        if (ice_fltr_add_mac_to_list(vsi, &vsi->tmp_unsync_list, addr,
                                     ICE_FWD_TO_VSI))
                return -EINVAL;
@@ -4194,6 +4202,11 @@ ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent)
        struct ice_hw *hw;
        int i, err;
 
+       if (pdev->is_virtfn) {
+               dev_err(dev, "can't probe a virtual function\n");
+               return -EINVAL;
+       }
+
        /* this driver uses devres, see
         * Documentation/driver-api/driver-model/devres.rst
         */
@@ -5119,7 +5132,7 @@ static int ice_set_mac_address(struct net_device *netdev, void *pi)
                return -EADDRNOTAVAIL;
 
        if (ether_addr_equal(netdev->dev_addr, mac)) {
-               netdev_warn(netdev, "already using mac %pM\n", mac);
+               netdev_dbg(netdev, "already using mac %pM\n", mac);
                return 0;
        }
 
@@ -5130,6 +5143,7 @@ static int ice_set_mac_address(struct net_device *netdev, void *pi)
                return -EBUSY;
        }
 
+       netif_addr_lock_bh(netdev);
        /* Clean up old MAC filter. Not an error if old filter doesn't exist */
        status = ice_fltr_remove_mac(vsi, netdev->dev_addr, ICE_FWD_TO_VSI);
        if (status && status != ICE_ERR_DOES_NOT_EXIST) {
@@ -5139,30 +5153,28 @@ static int ice_set_mac_address(struct net_device *netdev, void *pi)
 
        /* Add filter for new MAC. If filter exists, return success */
        status = ice_fltr_add_mac(vsi, mac, ICE_FWD_TO_VSI);
-       if (status == ICE_ERR_ALREADY_EXISTS) {
+       if (status == ICE_ERR_ALREADY_EXISTS)
                /* Although this MAC filter is already present in hardware it's
                 * possible in some cases (e.g. bonding) that dev_addr was
                 * modified outside of the driver and needs to be restored back
                 * to this value.
                 */
-               memcpy(netdev->dev_addr, mac, netdev->addr_len);
                netdev_dbg(netdev, "filter for MAC %pM already exists\n", mac);
-               return 0;
-       }
-
-       /* error if the new filter addition failed */
-       if (status)
+       else if (status)
+               /* error if the new filter addition failed */
                err = -EADDRNOTAVAIL;
 
 err_update_filters:
        if (err) {
                netdev_err(netdev, "can't set MAC %pM. filter update failed\n",
                           mac);
+               netif_addr_unlock_bh(netdev);
                return err;
        }
 
        /* change the netdev's MAC address */
        memcpy(netdev->dev_addr, mac, netdev->addr_len);
+       netif_addr_unlock_bh(netdev);
        netdev_dbg(vsi->netdev, "updated MAC address to %pM\n",
                   netdev->dev_addr);
 
index 5d5207b56ca905f394c6065824a2276d6136db80..9e3ddb9b8b5167acf8660811a8d0e266ce0d2756 100644 (file)
@@ -656,7 +656,7 @@ static int ice_ptp_cfg_clkout(struct ice_pf *pf, unsigned int chan,
         * maintaining phase
         */
        if (start_time < current_time)
-               start_time = div64_u64(current_time + NSEC_PER_MSEC - 1,
+               start_time = div64_u64(current_time + NSEC_PER_SEC - 1,
                                       NSEC_PER_SEC) * NSEC_PER_SEC + phase;
 
        start_time -= E810_OUT_PROP_DELAY_NS;
index 2826570dab51bffd26f23c6c51474ef36db8339a..e93430ab37f1e95473b4c26cc088d07270ac6344 100644 (file)
@@ -615,6 +615,8 @@ void ice_free_vfs(struct ice_pf *pf)
        struct ice_hw *hw = &pf->hw;
        unsigned int tmp, i;
 
+       set_bit(ICE_VF_DEINIT_IN_PROGRESS, pf->state);
+
        if (!pf->vf)
                return;
 
@@ -680,6 +682,7 @@ void ice_free_vfs(struct ice_pf *pf)
                                i);
 
        clear_bit(ICE_VF_DIS, pf->state);
+       clear_bit(ICE_VF_DEINIT_IN_PROGRESS, pf->state);
        clear_bit(ICE_FLAG_SRIOV_ENA, pf->flags);
 }
 
@@ -4415,6 +4418,10 @@ void ice_vc_process_vf_msg(struct ice_pf *pf, struct ice_rq_event_info *event)
        struct device *dev;
        int err = 0;
 
+       /* if de-init is underway, don't process messages from VF */
+       if (test_bit(ICE_VF_DEINIT_IN_PROGRESS, pf->state))
+               return;
+
        dev = ice_pf_to_dev(pf);
        if (ice_validate_vf_id(pf, vf_id)) {
                err = -EINVAL;
index e29aadbc67441864b32abff5e03ddacefac7cecc..ed2d66bc2d6c31d008aafcb52d0b6ee9a0a50fa2 100644 (file)
@@ -149,6 +149,9 @@ static void igc_release_hw_control(struct igc_adapter *adapter)
        struct igc_hw *hw = &adapter->hw;
        u32 ctrl_ext;
 
+       if (!pci_device_is_present(adapter->pdev))
+               return;
+
        /* Let firmware take over control of h/w */
        ctrl_ext = rd32(IGC_CTRL_EXT);
        wr32(IGC_CTRL_EXT,
@@ -4449,26 +4452,29 @@ void igc_down(struct igc_adapter *adapter)
 
        igc_ptp_suspend(adapter);
 
-       /* disable receives in the hardware */
-       rctl = rd32(IGC_RCTL);
-       wr32(IGC_RCTL, rctl & ~IGC_RCTL_EN);
-       /* flush and sleep below */
-
+       if (pci_device_is_present(adapter->pdev)) {
+               /* disable receives in the hardware */
+               rctl = rd32(IGC_RCTL);
+               wr32(IGC_RCTL, rctl & ~IGC_RCTL_EN);
+               /* flush and sleep below */
+       }
        /* set trans_start so we don't get spurious watchdogs during reset */
        netif_trans_update(netdev);
 
        netif_carrier_off(netdev);
        netif_tx_stop_all_queues(netdev);
 
-       /* disable transmits in the hardware */
-       tctl = rd32(IGC_TCTL);
-       tctl &= ~IGC_TCTL_EN;
-       wr32(IGC_TCTL, tctl);
-       /* flush both disables and wait for them to finish */
-       wrfl();
-       usleep_range(10000, 20000);
+       if (pci_device_is_present(adapter->pdev)) {
+               /* disable transmits in the hardware */
+               tctl = rd32(IGC_TCTL);
+               tctl &= ~IGC_TCTL_EN;
+               wr32(IGC_TCTL, tctl);
+               /* flush both disables and wait for them to finish */
+               wrfl();
+               usleep_range(10000, 20000);
 
-       igc_irq_disable(adapter);
+               igc_irq_disable(adapter);
+       }
 
        adapter->flags &= ~IGC_FLAG_NEED_LINK_UPDATE;
 
@@ -5489,7 +5495,7 @@ static bool validate_schedule(struct igc_adapter *adapter,
                if (e->command != TC_TAPRIO_CMD_SET_GATES)
                        return false;
 
-               for (i = 0; i < IGC_MAX_TX_QUEUES; i++) {
+               for (i = 0; i < adapter->num_tx_queues; i++) {
                        if (e->gate_mask & BIT(i))
                                queue_uses[i]++;
 
@@ -5546,7 +5552,7 @@ static int igc_save_qbv_schedule(struct igc_adapter *adapter,
 
                end_time += e->interval;
 
-               for (i = 0; i < IGC_MAX_TX_QUEUES; i++) {
+               for (i = 0; i < adapter->num_tx_queues; i++) {
                        struct igc_ring *ring = adapter->tx_ring[i];
 
                        if (!(e->gate_mask & BIT(i)))
index 69617d2c1be23f00549a07e98aa2260088bade2d..4ae19c6a32477c8088d841c53f6841ceda13334d 100644 (file)
@@ -849,7 +849,8 @@ void igc_ptp_suspend(struct igc_adapter *adapter)
        adapter->ptp_tx_skb = NULL;
        clear_bit_unlock(__IGC_PTP_TX_IN_PROGRESS, &adapter->state);
 
-       igc_ptp_time_save(adapter);
+       if (pci_device_is_present(adapter->pdev))
+               igc_ptp_time_save(adapter);
 }
 
 /**
index 96dd1a4f956a7ca4c181d8e07a8d293c792f6dda..b1d22e4d5ec9cab3a215c72244def0b1d24688d2 100644 (file)
@@ -52,8 +52,11 @@ static int ixgbe_xsk_pool_enable(struct ixgbe_adapter *adapter,
 
                /* Kick start the NAPI context so that receiving will start */
                err = ixgbe_xsk_wakeup(adapter->netdev, qid, XDP_WAKEUP_RX);
-               if (err)
+               if (err) {
+                       clear_bit(qid, adapter->af_xdp_zc_qps);
+                       xsk_pool_dma_unmap(pool, IXGBE_RX_DMA_ATTR);
                        return err;
+               }
        }
 
        return 0;
index 76a7777c746dacb530515d0a56c033cd92c844cd..de32e5b49053b51dbe7b13c03054067daacae1e0 100644 (file)
 #define        MVNETA_VLAN_PRIO_TO_RXQ                  0x2440
 #define      MVNETA_VLAN_PRIO_RXQ_MAP(prio, rxq) ((rxq) << ((prio) * 3))
 #define MVNETA_PORT_STATUS                       0x2444
-#define      MVNETA_TX_IN_PRGRS                  BIT(1)
+#define      MVNETA_TX_IN_PRGRS                  BIT(0)
 #define      MVNETA_TX_FIFO_EMPTY                BIT(8)
 #define MVNETA_RX_MIN_FRAME_SIZE                 0x247c
 /* Only exists on Armada XP and Armada 370 */
index b9fbc9f000f2fd53ba6c42b01293ba272323adc8..cf8acabb90ac104b3c648e6de41d316265af1531 100644 (file)
@@ -938,7 +938,7 @@ enum mvpp22_ptp_packet_format {
 #define MVPP2_BM_COOKIE_POOL_OFFS      8
 #define MVPP2_BM_COOKIE_CPU_OFFS       24
 
-#define MVPP2_BM_SHORT_FRAME_SIZE      704     /* frame size 128 */
+#define MVPP2_BM_SHORT_FRAME_SIZE      736     /* frame size 128 */
 #define MVPP2_BM_LONG_FRAME_SIZE       2240    /* frame size 1664 */
 #define MVPP2_BM_JUMBO_FRAME_SIZE      10432   /* frame size 9856 */
 /* BM short pool packet size
index 0b3e8f2db294990b7abe1fc01f25fa49e8a5f2b4..9a309169dbae9c63ce571391f5e3950211fbb06d 100644 (file)
@@ -748,7 +748,7 @@ static void
 prestera_fdb_offload_notify(struct prestera_port *port,
                            struct switchdev_notifier_fdb_info *info)
 {
-       struct switchdev_notifier_fdb_info send_info;
+       struct switchdev_notifier_fdb_info send_info = {};
 
        send_info.addr = info->addr;
        send_info.vid = info->vid;
@@ -1123,7 +1123,7 @@ static int prestera_switchdev_blk_event(struct notifier_block *unused,
 static void prestera_fdb_event(struct prestera_switch *sw,
                               struct prestera_event *evt, void *arg)
 {
-       struct switchdev_notifier_fdb_info info;
+       struct switchdev_notifier_fdb_info info = {};
        struct net_device *dev = NULL;
        struct prestera_port *port;
        struct prestera_lag *lag;
index df3e4938ecdd9a920d1462b67ed579a7de0d8295..360e093874d4fc00ddf68aecf8353d204011224a 100644 (file)
@@ -134,6 +134,7 @@ int mlx5_core_create_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq,
                              cq->cqn);
 
        cq->uar = dev->priv.uar;
+       cq->irqn = eq->core.irqn;
 
        return 0;
 
index 01a1d02dcf15d6d93f8b72f2ec9bb1236ebc67c1..3f8a98093f8cb79606698247da2ad4f2147f809a 100644 (file)
@@ -1019,12 +1019,19 @@ int mlx5_fw_tracer_init(struct mlx5_fw_tracer *tracer)
        MLX5_NB_INIT(&tracer->nb, fw_tracer_event, DEVICE_TRACER);
        mlx5_eq_notifier_register(dev, &tracer->nb);
 
-       mlx5_fw_tracer_start(tracer);
-
+       err = mlx5_fw_tracer_start(tracer);
+       if (err) {
+               mlx5_core_warn(dev, "FWTracer: Failed to start tracer %d\n", err);
+               goto err_notifier_unregister;
+       }
        return 0;
 
+err_notifier_unregister:
+       mlx5_eq_notifier_unregister(dev, &tracer->nb);
+       mlx5_core_destroy_mkey(dev, &tracer->buff.mkey);
 err_dealloc_pd:
        mlx5_core_dealloc_pd(dev, tracer->buff.pdn);
+       cancel_work_sync(&tracer->read_fw_strings_work);
        return err;
 }
 
index 8f79f04eccd61f7ce55864ea225735ae154b5b4a..1e2d117082d47ab1ad066f584d53a0b36ba45d9b 100644 (file)
@@ -124,6 +124,11 @@ static int mlx5e_route_lookup_ipv4_get(struct mlx5e_priv *priv,
        if (IS_ERR(rt))
                return PTR_ERR(rt);
 
+       if (rt->rt_type != RTN_UNICAST) {
+               ret = -ENETUNREACH;
+               goto err_rt_release;
+       }
+
        if (mlx5_lag_is_multipath(mdev) && rt->rt_gw_family != AF_INET) {
                ret = -ENETUNREACH;
                goto err_rt_release;
index 37c4408379459b8591f1a08520fa5346178cf989..24f919ef9b8e46a506ae5f3d28f5f7a5c8c004fd 100644 (file)
@@ -1535,15 +1535,9 @@ static int mlx5e_alloc_cq_common(struct mlx5e_priv *priv,
 {
        struct mlx5_core_dev *mdev = priv->mdev;
        struct mlx5_core_cq *mcq = &cq->mcq;
-       int eqn_not_used;
-       unsigned int irqn;
        int err;
        u32 i;
 
-       err = mlx5_vector2eqn(mdev, param->eq_ix, &eqn_not_used, &irqn);
-       if (err)
-               return err;
-
        err = mlx5_cqwq_create(mdev, &param->wq, param->cqc, &cq->wq,
                               &cq->wq_ctrl);
        if (err)
@@ -1557,7 +1551,6 @@ static int mlx5e_alloc_cq_common(struct mlx5e_priv *priv,
        mcq->vector     = param->eq_ix;
        mcq->comp       = mlx5e_completion_event;
        mcq->event      = mlx5e_cq_error_event;
-       mcq->irqn       = irqn;
 
        for (i = 0; i < mlx5_cqwq_get_size(&cq->wq); i++) {
                struct mlx5_cqe64 *cqe = mlx5_cqwq_get_wqe(&cq->wq, i);
@@ -1605,11 +1598,10 @@ static int mlx5e_create_cq(struct mlx5e_cq *cq, struct mlx5e_cq_param *param)
        void *in;
        void *cqc;
        int inlen;
-       unsigned int irqn_not_used;
        int eqn;
        int err;
 
-       err = mlx5_vector2eqn(mdev, param->eq_ix, &eqn, &irqn_not_used);
+       err = mlx5_vector2eqn(mdev, param->eq_ix, &eqn);
        if (err)
                return err;
 
@@ -1891,30 +1883,30 @@ static int mlx5e_open_queues(struct mlx5e_channel *c,
        if (err)
                goto err_close_icosq;
 
+       err = mlx5e_open_rxq_rq(c, params, &cparam->rq);
+       if (err)
+               goto err_close_sqs;
+
        if (c->xdp) {
                err = mlx5e_open_xdpsq(c, params, &cparam->xdp_sq, NULL,
                                       &c->rq_xdpsq, false);
                if (err)
-                       goto err_close_sqs;
+                       goto err_close_rq;
        }
 
-       err = mlx5e_open_rxq_rq(c, params, &cparam->rq);
-       if (err)
-               goto err_close_xdp_sq;
-
        err = mlx5e_open_xdpsq(c, params, &cparam->xdp_sq, NULL, &c->xdpsq, true);
        if (err)
-               goto err_close_rq;
+               goto err_close_xdp_sq;
 
        return 0;
 
-err_close_rq:
-       mlx5e_close_rq(&c->rq);
-
 err_close_xdp_sq:
        if (c->xdp)
                mlx5e_close_xdpsq(&c->rq_xdpsq);
 
+err_close_rq:
+       mlx5e_close_rq(&c->rq);
+
 err_close_sqs:
        mlx5e_close_sqs(c);
 
@@ -1949,9 +1941,9 @@ err_close_async_icosq_cq:
 static void mlx5e_close_queues(struct mlx5e_channel *c)
 {
        mlx5e_close_xdpsq(&c->xdpsq);
-       mlx5e_close_rq(&c->rq);
        if (c->xdp)
                mlx5e_close_xdpsq(&c->rq_xdpsq);
+       mlx5e_close_rq(&c->rq);
        mlx5e_close_sqs(c);
        mlx5e_close_icosq(&c->icosq);
        mlx5e_close_icosq(&c->async_icosq);
@@ -1983,9 +1975,8 @@ static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix,
        struct mlx5e_channel *c;
        unsigned int irq;
        int err;
-       int eqn;
 
-       err = mlx5_vector2eqn(priv->mdev, ix, &eqn, &irq);
+       err = mlx5_vector2irqn(priv->mdev, ix, &irq);
        if (err)
                return err;
 
index 6e074cc457de103398e953fa70df3b3fa4f8f97b..605c8ecc3610f71360e4f0354effb35eb1ad15c3 100644 (file)
@@ -855,8 +855,8 @@ clean:
        return err;
 }
 
-int mlx5_vector2eqn(struct mlx5_core_dev *dev, int vector, int *eqn,
-                   unsigned int *irqn)
+static int vector2eqnirqn(struct mlx5_core_dev *dev, int vector, int *eqn,
+                         unsigned int *irqn)
 {
        struct mlx5_eq_table *table = dev->priv.eq_table;
        struct mlx5_eq_comp *eq, *n;
@@ -865,8 +865,10 @@ int mlx5_vector2eqn(struct mlx5_core_dev *dev, int vector, int *eqn,
 
        list_for_each_entry_safe(eq, n, &table->comp_eqs_list, list) {
                if (i++ == vector) {
-                       *eqn = eq->core.eqn;
-                       *irqn = eq->core.irqn;
+                       if (irqn)
+                               *irqn = eq->core.irqn;
+                       if (eqn)
+                               *eqn = eq->core.eqn;
                        err = 0;
                        break;
                }
@@ -874,8 +876,18 @@ int mlx5_vector2eqn(struct mlx5_core_dev *dev, int vector, int *eqn,
 
        return err;
 }
+
+int mlx5_vector2eqn(struct mlx5_core_dev *dev, int vector, int *eqn)
+{
+       return vector2eqnirqn(dev, vector, eqn, NULL);
+}
 EXPORT_SYMBOL(mlx5_vector2eqn);
 
+int mlx5_vector2irqn(struct mlx5_core_dev *dev, int vector, unsigned int *irqn)
+{
+       return vector2eqnirqn(dev, vector, NULL, irqn);
+}
+
 unsigned int mlx5_comp_vectors_count(struct mlx5_core_dev *dev)
 {
        return dev->priv.eq_table->num_comp_eqs;
index a6e1d4f7826835c6d90c42284115439c7b842687..69a3630818d711abaf28c497254ba5507560ffa3 100644 (file)
@@ -69,7 +69,7 @@ static void
 mlx5_esw_bridge_fdb_offload_notify(struct net_device *dev, const unsigned char *addr, u16 vid,
                                   unsigned long val)
 {
-       struct switchdev_notifier_fdb_info send_info;
+       struct switchdev_notifier_fdb_info send_info = {};
 
        send_info.addr = addr;
        send_info.vid = vid;
@@ -579,7 +579,7 @@ static struct mlx5_esw_bridge *mlx5_esw_bridge_create(int ifindex,
        xa_init(&bridge->vports);
        bridge->ifindex = ifindex;
        bridge->refcnt = 1;
-       bridge->ageing_time = BR_DEFAULT_AGEING_TIME;
+       bridge->ageing_time = clock_t_to_jiffies(BR_DEFAULT_AGEING_TIME);
        list_add(&bridge->list, &br_offloads->bridges);
 
        return bridge;
@@ -1006,7 +1006,7 @@ int mlx5_esw_bridge_ageing_time_set(unsigned long ageing_time, struct mlx5_eswit
        if (!vport->bridge)
                return -EINVAL;
 
-       vport->bridge->ageing_time = ageing_time;
+       vport->bridge->ageing_time = clock_t_to_jiffies(ageing_time);
        return 0;
 }
 
index 794012c5c476593ba6ac35da6b51bf66734b78da..d3ad78aa9d4508834aafe5646fcdb90d759e2763 100644 (file)
@@ -501,6 +501,7 @@ err_sampler:
 err_offload_rule:
        mlx5_esw_vporttbl_put(esw, &per_vport_tbl_attr);
 err_default_tbl:
+       kfree(sample_flow);
        return ERR_PTR(err);
 }
 
index 011e766e4f675a0727155ed32357bd8f14de54d9..3bb71a1860042a753030032f0a5a4406af9b6e09 100644 (file)
@@ -48,6 +48,7 @@
 #include "lib/fs_chains.h"
 #include "en_tc.h"
 #include "en/mapping.h"
+#include "devlink.h"
 
 #define mlx5_esw_for_each_rep(esw, i, rep) \
        xa_for_each(&((esw)->offloads.vport_reps), i, rep)
@@ -3001,12 +3002,19 @@ int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode,
        if (cur_mlx5_mode == mlx5_mode)
                goto unlock;
 
-       if (mode == DEVLINK_ESWITCH_MODE_SWITCHDEV)
+       if (mode == DEVLINK_ESWITCH_MODE_SWITCHDEV) {
+               if (mlx5_devlink_trap_get_num_active(esw->dev)) {
+                       NL_SET_ERR_MSG_MOD(extack,
+                                          "Can't change mode while devlink traps are active");
+                       err = -EOPNOTSUPP;
+                       goto unlock;
+               }
                err = esw_offloads_start(esw, extack);
-       else if (mode == DEVLINK_ESWITCH_MODE_LEGACY)
+       } else if (mode == DEVLINK_ESWITCH_MODE_LEGACY) {
                err = esw_offloads_stop(esw, extack);
-       else
+       } else {
                err = -EINVAL;
+       }
 
 unlock:
        mlx5_esw_unlock(esw);
index bd66ab2af5b5419c9bac13f66b7b122c043377fc..d5da4ab65766da03d8d2eb894fa177e4cf3ca247 100644 (file)
@@ -417,7 +417,6 @@ static int mlx5_fpga_conn_create_cq(struct mlx5_fpga_conn *conn, int cq_size)
        struct mlx5_wq_param wqp;
        struct mlx5_cqe64 *cqe;
        int inlen, err, eqn;
-       unsigned int irqn;
        void *cqc, *in;
        __be64 *pas;
        u32 i;
@@ -446,7 +445,7 @@ static int mlx5_fpga_conn_create_cq(struct mlx5_fpga_conn *conn, int cq_size)
                goto err_cqwq;
        }
 
-       err = mlx5_vector2eqn(mdev, smp_processor_id(), &eqn, &irqn);
+       err = mlx5_vector2eqn(mdev, smp_processor_id(), &eqn);
        if (err) {
                kvfree(in);
                goto err_cqwq;
@@ -476,7 +475,6 @@ static int mlx5_fpga_conn_create_cq(struct mlx5_fpga_conn *conn, int cq_size)
        *conn->cq.mcq.arm_db    = 0;
        conn->cq.mcq.vector     = 0;
        conn->cq.mcq.comp       = mlx5_fpga_conn_cq_complete;
-       conn->cq.mcq.irqn       = irqn;
        conn->cq.mcq.uar        = fdev->conn_res.uar;
        tasklet_setup(&conn->cq.tasklet, mlx5_fpga_conn_cq_tasklet);
 
index 624cedebb5108840bcde50f5ec512904b06923ef..d3d628b862f3764abd32269f102d489d19b986bd 100644 (file)
@@ -104,4 +104,6 @@ void mlx5_core_eq_free_irqs(struct mlx5_core_dev *dev);
 struct cpu_rmap *mlx5_eq_table_get_rmap(struct mlx5_core_dev *dev);
 #endif
 
+int mlx5_vector2irqn(struct mlx5_core_dev *dev, int vector, unsigned int *irqn);
+
 #endif
index eb1b316560a888622d4e06933c856f00c70fbfd2..c84ad87c99bb6b8a1f19c6b03d3b46c5ef08158e 100644 (file)
@@ -1784,16 +1784,14 @@ static int __init init(void)
        if (err)
                goto err_sf;
 
-#ifdef CONFIG_MLX5_CORE_EN
        err = mlx5e_init();
-       if (err) {
-               pci_unregister_driver(&mlx5_core_driver);
-               goto err_debug;
-       }
-#endif
+       if (err)
+               goto err_en;
 
        return 0;
 
+err_en:
+       mlx5_sf_driver_unregister();
 err_sf:
        pci_unregister_driver(&mlx5_core_driver);
 err_debug:
@@ -1803,9 +1801,7 @@ err_debug:
 
 static void __exit cleanup(void)
 {
-#ifdef CONFIG_MLX5_CORE_EN
        mlx5e_cleanup();
-#endif
        mlx5_sf_driver_unregister();
        pci_unregister_driver(&mlx5_core_driver);
        mlx5_unregister_debugfs();
index 343807ac20364edb8c33329c05b883965addc999..da365b8f014155d52d2be840aa1b4bb169667132 100644 (file)
@@ -206,8 +206,13 @@ int mlx5_firmware_flash(struct mlx5_core_dev *dev, const struct firmware *fw,
 int mlx5_fw_version_query(struct mlx5_core_dev *dev,
                          u32 *running_ver, u32 *stored_ver);
 
+#ifdef CONFIG_MLX5_CORE_EN
 int mlx5e_init(void);
 void mlx5e_cleanup(void);
+#else
+static inline int mlx5e_init(void){ return 0; }
+static inline void mlx5e_cleanup(void){}
+#endif
 
 static inline bool mlx5_sriov_is_enabled(struct mlx5_core_dev *dev)
 {
index b25f764daa0889a1f848c6e109a9ae1cbe02378c..3465b363fc2fe3f34b721f71de37041f3897c25f 100644 (file)
@@ -214,6 +214,7 @@ static struct mlx5_irq *irq_request(struct mlx5_irq_pool *pool, int i)
                err = -ENOMEM;
                goto err_cpumask;
        }
+       irq->pool = pool;
        kref_init(&irq->kref);
        irq->index = i;
        err = xa_err(xa_store(&pool->irqs, irq->index, irq, GFP_KERNEL));
@@ -222,7 +223,6 @@ static struct mlx5_irq *irq_request(struct mlx5_irq_pool *pool, int i)
                              irq->index, err);
                goto err_xa;
        }
-       irq->pool = pool;
        return irq;
 err_xa:
        free_cpumask_var(irq->mask);
@@ -251,8 +251,11 @@ int mlx5_irq_attach_nb(struct mlx5_irq *irq, struct notifier_block *nb)
 
 int mlx5_irq_detach_nb(struct mlx5_irq *irq, struct notifier_block *nb)
 {
+       int err = 0;
+
+       err = atomic_notifier_chain_unregister(&irq->nh, nb);
        irq_put(irq);
-       return atomic_notifier_chain_unregister(&irq->nh, nb);
+       return err;
 }
 
 struct cpumask *mlx5_irq_get_affinity_mask(struct mlx5_irq *irq)
@@ -437,6 +440,7 @@ irq_pool_alloc(struct mlx5_core_dev *dev, int start, int size, char *name,
        if (!pool)
                return ERR_PTR(-ENOMEM);
        pool->dev = dev;
+       mutex_init(&pool->lock);
        xa_init_flags(&pool->irqs, XA_FLAGS_ALLOC);
        pool->xa_num_irqs.min = start;
        pool->xa_num_irqs.max = start + size - 1;
@@ -445,7 +449,6 @@ irq_pool_alloc(struct mlx5_core_dev *dev, int start, int size, char *name,
                         name);
        pool->min_threshold = min_threshold * MLX5_EQ_REFS_PER_IRQ;
        pool->max_threshold = max_threshold * MLX5_EQ_REFS_PER_IRQ;
-       mutex_init(&pool->lock);
        mlx5_core_dbg(dev, "pool->name = %s, pool->size = %d, pool->start = %d",
                      name, size, start);
        return pool;
@@ -459,6 +462,7 @@ static void irq_pool_free(struct mlx5_irq_pool *pool)
        xa_for_each(&pool->irqs, index, irq)
                irq_release(&irq->kref);
        xa_destroy(&pool->irqs);
+       mutex_destroy(&pool->lock);
        kvfree(pool);
 }
 
index 12cf323a59430ff6379eacea8b98d7ec9d67a720..9df0e73d1c358d7c3a96382740115fbecb3ef119 100644 (file)
@@ -749,7 +749,6 @@ static struct mlx5dr_cq *dr_create_cq(struct mlx5_core_dev *mdev,
        struct mlx5_cqe64 *cqe;
        struct mlx5dr_cq *cq;
        int inlen, err, eqn;
-       unsigned int irqn;
        void *cqc, *in;
        __be64 *pas;
        int vector;
@@ -782,7 +781,7 @@ static struct mlx5dr_cq *dr_create_cq(struct mlx5_core_dev *mdev,
                goto err_cqwq;
 
        vector = raw_smp_processor_id() % mlx5_comp_vectors_count(mdev);
-       err = mlx5_vector2eqn(mdev, vector, &eqn, &irqn);
+       err = mlx5_vector2eqn(mdev, vector, &eqn);
        if (err) {
                kvfree(in);
                goto err_cqwq;
@@ -818,7 +817,6 @@ static struct mlx5dr_cq *dr_create_cq(struct mlx5_core_dev *mdev,
        *cq->mcq.arm_db = cpu_to_be32(2 << 28);
 
        cq->mcq.vector = 0;
-       cq->mcq.irqn = irqn;
        cq->mcq.uar = uar;
 
        return cq;
index f1950e4968dad7f5852c3dcf3da338d415515ffe..e4dd4eed5aee9dcc808b3a189d7f56eb466dfd63 100644 (file)
@@ -352,6 +352,7 @@ static void dr_ste_v0_set_rx_decap(u8 *hw_ste_p)
 {
        MLX5_SET(ste_rx_steering_mult, hw_ste_p, tunneling_action,
                 DR_STE_TUNL_ACTION_DECAP);
+       MLX5_SET(ste_rx_steering_mult, hw_ste_p, fail_on_error, 1);
 }
 
 static void dr_ste_v0_set_rx_pop_vlan(u8 *hw_ste_p)
@@ -365,6 +366,7 @@ static void dr_ste_v0_set_rx_decap_l3(u8 *hw_ste_p, bool vlan)
        MLX5_SET(ste_rx_steering_mult, hw_ste_p, tunneling_action,
                 DR_STE_TUNL_ACTION_L3_DECAP);
        MLX5_SET(ste_modify_packet, hw_ste_p, action_description, vlan ? 1 : 0);
+       MLX5_SET(ste_rx_steering_mult, hw_ste_p, fail_on_error, 1);
 }
 
 static void dr_ste_v0_set_rewrite_actions(u8 *hw_ste_p, u16 num_of_actions,
index 7e221ef014376ce8b300bad1c289ce4811743f14..f69cbb3852d59648e1d251a6e59b891176d72184 100644 (file)
@@ -9079,7 +9079,7 @@ mlxsw_sp_rif_fid_fid_get(struct mlxsw_sp_rif *rif,
 
 static void mlxsw_sp_rif_fid_fdb_del(struct mlxsw_sp_rif *rif, const char *mac)
 {
-       struct switchdev_notifier_fdb_info info;
+       struct switchdev_notifier_fdb_info info = {};
        struct net_device *dev;
 
        dev = br_fdb_find_port(rif->dev, mac, 0);
@@ -9127,8 +9127,8 @@ mlxsw_sp_rif_vlan_fid_get(struct mlxsw_sp_rif *rif,
 
 static void mlxsw_sp_rif_vlan_fdb_del(struct mlxsw_sp_rif *rif, const char *mac)
 {
+       struct switchdev_notifier_fdb_info info = {};
        u16 vid = mlxsw_sp_fid_8021q_vid(rif->fid);
-       struct switchdev_notifier_fdb_info info;
        struct net_device *br_dev;
        struct net_device *dev;
 
index c5ef9aa64efe318c55d25440d383956d54285675..8f90cd323d5feb57ff1132bd6eb2759877f2b24a 100644 (file)
@@ -2508,7 +2508,7 @@ mlxsw_sp_fdb_call_notifiers(enum switchdev_notifier_type type,
                            const char *mac, u16 vid,
                            struct net_device *dev, bool offloaded)
 {
-       struct switchdev_notifier_fdb_info info;
+       struct switchdev_notifier_fdb_info info = {};
 
        info.addr = mac;
        info.vid = vid;
index 0443f66b5550bd5f063cbf9dab694d792077bf79..9a8e4f201eb1f7d3663c0c9a4be314d7bda2f334 100644 (file)
@@ -277,7 +277,7 @@ static void sparx5_fdb_call_notifiers(enum switchdev_notifier_type type,
                                      const char *mac, u16 vid,
                                      struct net_device *dev, bool offloaded)
 {
-       struct switchdev_notifier_fdb_info info;
+       struct switchdev_notifier_fdb_info info = {};
 
        info.addr = mac;
        info.vid = vid;
index adfb9781799eecfd0890dd979d37335eb71b2758..2948d731a1c1c5adfdc952cd4e766502da89f44a 100644 (file)
@@ -1334,6 +1334,7 @@ void ocelot_apply_bridge_fwd_mask(struct ocelot *ocelot)
                        struct net_device *bond = ocelot_port->bond;
 
                        mask = ocelot_get_bridge_fwd_mask(ocelot, bridge);
+                       mask |= cpu_fwd_mask;
                        mask &= ~BIT(port);
                        if (bond) {
                                mask &= ~ocelot_get_bond_mask(ocelot, bond,
index ea4e83410fe4d03a6fa15f770c0dab9f399271b9..7390fa3980ec5ec549da673b4506f1f3b532f82a 100644 (file)
@@ -21,7 +21,7 @@ u32 __ocelot_read_ix(struct ocelot *ocelot, u32 reg, u32 offset)
                    ocelot->map[target][reg & REG_MASK] + offset, &val);
        return val;
 }
-EXPORT_SYMBOL(__ocelot_read_ix);
+EXPORT_SYMBOL_GPL(__ocelot_read_ix);
 
 void __ocelot_write_ix(struct ocelot *ocelot, u32 val, u32 reg, u32 offset)
 {
@@ -32,7 +32,7 @@ void __ocelot_write_ix(struct ocelot *ocelot, u32 val, u32 reg, u32 offset)
        regmap_write(ocelot->targets[target],
                     ocelot->map[target][reg & REG_MASK] + offset, val);
 }
-EXPORT_SYMBOL(__ocelot_write_ix);
+EXPORT_SYMBOL_GPL(__ocelot_write_ix);
 
 void __ocelot_rmw_ix(struct ocelot *ocelot, u32 val, u32 mask, u32 reg,
                     u32 offset)
@@ -45,7 +45,7 @@ void __ocelot_rmw_ix(struct ocelot *ocelot, u32 val, u32 mask, u32 reg,
                           ocelot->map[target][reg & REG_MASK] + offset,
                           mask, val);
 }
-EXPORT_SYMBOL(__ocelot_rmw_ix);
+EXPORT_SYMBOL_GPL(__ocelot_rmw_ix);
 
 u32 ocelot_port_readl(struct ocelot_port *port, u32 reg)
 {
@@ -58,7 +58,7 @@ u32 ocelot_port_readl(struct ocelot_port *port, u32 reg)
        regmap_read(port->target, ocelot->map[target][reg & REG_MASK], &val);
        return val;
 }
-EXPORT_SYMBOL(ocelot_port_readl);
+EXPORT_SYMBOL_GPL(ocelot_port_readl);
 
 void ocelot_port_writel(struct ocelot_port *port, u32 val, u32 reg)
 {
@@ -69,7 +69,7 @@ void ocelot_port_writel(struct ocelot_port *port, u32 val, u32 reg)
 
        regmap_write(port->target, ocelot->map[target][reg & REG_MASK], val);
 }
-EXPORT_SYMBOL(ocelot_port_writel);
+EXPORT_SYMBOL_GPL(ocelot_port_writel);
 
 void ocelot_port_rmwl(struct ocelot_port *port, u32 val, u32 mask, u32 reg)
 {
@@ -77,7 +77,7 @@ void ocelot_port_rmwl(struct ocelot_port *port, u32 val, u32 mask, u32 reg)
 
        ocelot_port_writel(port, (cur & (~mask)) | val, reg);
 }
-EXPORT_SYMBOL(ocelot_port_rmwl);
+EXPORT_SYMBOL_GPL(ocelot_port_rmwl);
 
 u32 __ocelot_target_read_ix(struct ocelot *ocelot, enum ocelot_target target,
                            u32 reg, u32 offset)
@@ -128,7 +128,7 @@ int ocelot_regfields_init(struct ocelot *ocelot,
 
        return 0;
 }
-EXPORT_SYMBOL(ocelot_regfields_init);
+EXPORT_SYMBOL_GPL(ocelot_regfields_init);
 
 static struct regmap_config ocelot_regmap_config = {
        .reg_bits       = 32,
@@ -148,4 +148,4 @@ struct regmap *ocelot_regmap_init(struct ocelot *ocelot, struct resource *res)
 
        return devm_regmap_init_mmio(ocelot->dev, regs, &ocelot_regmap_config);
 }
-EXPORT_SYMBOL(ocelot_regmap_init);
+EXPORT_SYMBOL_GPL(ocelot_regmap_init);
index 02a4610d9330722e57939d34ff187c03abf18133..c46a7f756ed5f3c1e91e9bcb84f6e2632a8b0998 100644 (file)
@@ -327,6 +327,9 @@ static int qed_ll2_txq_completion(struct qed_hwfn *p_hwfn, void *p_cookie)
        unsigned long flags;
        int rc = -EINVAL;
 
+       if (!p_ll2_conn)
+               return rc;
+
        spin_lock_irqsave(&p_tx->lock, flags);
        if (p_tx->b_completing_packet) {
                rc = -EBUSY;
@@ -500,7 +503,16 @@ static int qed_ll2_rxq_completion(struct qed_hwfn *p_hwfn, void *cookie)
        unsigned long flags = 0;
        int rc = 0;
 
+       if (!p_ll2_conn)
+               return rc;
+
        spin_lock_irqsave(&p_rx->lock, flags);
+
+       if (!QED_LL2_RX_REGISTERED(p_ll2_conn)) {
+               spin_unlock_irqrestore(&p_rx->lock, flags);
+               return 0;
+       }
+
        cq_new_idx = le16_to_cpu(*p_rx->p_fw_cons);
        cq_old_idx = qed_chain_get_cons_idx(&p_rx->rcq_chain);
 
@@ -821,6 +833,9 @@ static int qed_ll2_lb_rxq_completion(struct qed_hwfn *p_hwfn, void *p_cookie)
        struct qed_ll2_info *p_ll2_conn = (struct qed_ll2_info *)p_cookie;
        int rc;
 
+       if (!p_ll2_conn)
+               return 0;
+
        if (!QED_LL2_RX_REGISTERED(p_ll2_conn))
                return 0;
 
@@ -844,6 +859,9 @@ static int qed_ll2_lb_txq_completion(struct qed_hwfn *p_hwfn, void *p_cookie)
        u16 new_idx = 0, num_bds = 0;
        int rc;
 
+       if (!p_ll2_conn)
+               return 0;
+
        if (!QED_LL2_TX_REGISTERED(p_ll2_conn))
                return 0;
 
@@ -1728,6 +1746,8 @@ int qed_ll2_post_rx_buffer(void *cxt,
        if (!p_ll2_conn)
                return -EINVAL;
        p_rx = &p_ll2_conn->rx_queue;
+       if (!p_rx->set_prod_addr)
+               return -EIO;
 
        spin_lock_irqsave(&p_rx->lock, flags);
        if (!list_empty(&p_rx->free_descq))
index 5bd58c65e163115a32353ab0241e605ae738daeb..6bb9ec98a12b53b954cd5003e2c1024400138538 100644 (file)
@@ -616,7 +616,12 @@ static int qed_enable_msix(struct qed_dev *cdev,
                        rc = cnt;
        }
 
-       if (rc > 0) {
+       /* For VFs, we should return with an error in case we didn't get the
+        * exact number of msix vectors as we requested.
+        * Not doing that will lead to a crash when starting queues for
+        * this VF.
+        */
+       if ((IS_PF(cdev) && rc > 0) || (IS_VF(cdev) && rc == cnt)) {
                /* MSI-x configuration was achieved */
                int_params->out.int_mode = QED_INT_MODE_MSIX;
                int_params->out.num_vectors = rc;
index da864d12916b7705ac4e21e2158a4732dba372cd..4f4b79250a2b2fdc8a9b4f33605c3b5238789416 100644 (file)
@@ -1285,8 +1285,7 @@ qed_rdma_create_qp(void *rdma_cxt,
 
        if (!rdma_cxt || !in_params || !out_params ||
            !p_hwfn->p_rdma_info->active) {
-               DP_ERR(p_hwfn->cdev,
-                      "qed roce create qp failed due to NULL entry (rdma_cxt=%p, in=%p, out=%p, roce_info=?\n",
+               pr_err("qed roce create qp failed due to NULL entry (rdma_cxt=%p, in=%p, out=%p, roce_info=?\n",
                       rdma_cxt, in_params, out_params);
                return NULL;
        }
index 7c6064baeba28a5a40ed0868e2b8e9f6e5144c05..1c7f9ed6f1c190a5ac4eff163a478cfd9ffc069f 100644 (file)
@@ -1874,6 +1874,7 @@ static void qede_sync_free_irqs(struct qede_dev *edev)
        }
 
        edev->int_info.used_cnt = 0;
+       edev->int_info.msix_cnt = 0;
 }
 
 static int qede_req_msix_irqs(struct qede_dev *edev)
@@ -2427,7 +2428,6 @@ static int qede_load(struct qede_dev *edev, enum qede_load_mode mode,
        goto out;
 err4:
        qede_sync_free_irqs(edev);
-       memset(&edev->int_info.msix_cnt, 0, sizeof(struct qed_int_info));
 err3:
        qede_napi_disable_remove(edev);
 err2:
index d8882d0b6b49834e46cbf40156ca0e2617d1c7af..d51bac7ba5afadca6df37a8761838432fba08b1f 100644 (file)
@@ -3156,8 +3156,10 @@ int qlcnic_83xx_flash_read32(struct qlcnic_adapter *adapter, u32 flash_addr,
 
                indirect_addr = QLC_83XX_FLASH_DIRECT_DATA(addr);
                ret = QLCRD32(adapter, indirect_addr, &err);
-               if (err == -EIO)
+               if (err == -EIO) {
+                       qlcnic_83xx_unlock_flash(adapter);
                        return err;
+               }
 
                word = ret;
                *(u32 *)p_data  = word;
index c7af5bc3b8afff79a45b029144d0d94f6fca9cf1..4d8e337f5085a0fe8b314b136dde8b8490ae2946 100644 (file)
@@ -3502,12 +3502,16 @@ static void rtl_hw_start_8106(struct rtl8169_private *tp)
        RTL_W8(tp, MCU, RTL_R8(tp, MCU) | EN_NDP | EN_OOB_RESET);
        RTL_W8(tp, DLLPR, RTL_R8(tp, DLLPR) & ~PFM_EN);
 
+       /* The default value is 0x13. Change it to 0x2f */
+       rtl_csi_access_enable(tp, 0x2f);
+
        rtl_eri_write(tp, 0x1d0, ERIAR_MASK_0011, 0x0000);
 
        /* disable EEE */
        rtl_eri_write(tp, 0x1b0, ERIAR_MASK_0011, 0x0000);
 
        rtl_pcie_state_l2l3_disable(tp);
+       rtl_hw_aspm_clkreq_enable(tp, true);
 }
 
 DECLARE_RTL_COND(rtl_mac_ocp_e00e_cond)
index a46633606cae06327eaa823bdcbfc5b969093116..1f06b92ee5bbb74a3980d54a67c2f79aafce7e21 100644 (file)
@@ -2715,7 +2715,7 @@ static void
 rocker_fdb_offload_notify(struct rocker_port *rocker_port,
                          struct switchdev_notifier_fdb_info *recv_info)
 {
-       struct switchdev_notifier_fdb_info info;
+       struct switchdev_notifier_fdb_info info = {};
 
        info.addr = recv_info->addr;
        info.vid = recv_info->vid;
index 967a634ee9ac1b2e88c454f5b290bbc7482e09ee..e33a9d283a4e31701f688f8a2fb3b264fe362355 100644 (file)
@@ -1822,7 +1822,7 @@ static void ofdpa_port_fdb_learn_work(struct work_struct *work)
                container_of(work, struct ofdpa_fdb_learn_work, work);
        bool removing = (lw->flags & OFDPA_OP_FLAG_REMOVE);
        bool learned = (lw->flags & OFDPA_OP_FLAG_LEARNED);
-       struct switchdev_notifier_fdb_info info;
+       struct switchdev_notifier_fdb_info info = {};
 
        info.addr = lw->addr;
        info.vid = lw->vid;
index 280ac0129572e008a67cdeb60bdb4a704acf99ee..ed817011a94a09e68d353344756e322c84c1eb2c 100644 (file)
@@ -21,7 +21,6 @@
 #include <linux/delay.h>
 #include <linux/mfd/syscon.h>
 #include <linux/regmap.h>
-#include <linux/pm_runtime.h>
 
 #include "stmmac_platform.h"
 
@@ -1529,9 +1528,6 @@ static int rk_gmac_powerup(struct rk_priv_data *bsp_priv)
                return ret;
        }
 
-       pm_runtime_enable(dev);
-       pm_runtime_get_sync(dev);
-
        if (bsp_priv->integrated_phy)
                rk_gmac_integrated_phy_powerup(bsp_priv);
 
@@ -1540,14 +1536,9 @@ static int rk_gmac_powerup(struct rk_priv_data *bsp_priv)
 
 static void rk_gmac_powerdown(struct rk_priv_data *gmac)
 {
-       struct device *dev = &gmac->pdev->dev;
-
        if (gmac->integrated_phy)
                rk_gmac_integrated_phy_powerdown(gmac);
 
-       pm_runtime_put_sync(dev);
-       pm_runtime_disable(dev);
-
        phy_power_on(gmac, false);
        gmac_clk_enable(gmac, false);
 }
index fcdb1d20389b346ad4a4d62fe26cd3efef9e137e..43eead726886a0a13a1c749a300a353f9e7e9ebd 100644 (file)
@@ -339,9 +339,9 @@ static inline bool stmmac_xdp_is_enabled(struct stmmac_priv *priv)
 static inline unsigned int stmmac_rx_offset(struct stmmac_priv *priv)
 {
        if (stmmac_xdp_is_enabled(priv))
-               return XDP_PACKET_HEADROOM + NET_IP_ALIGN;
+               return XDP_PACKET_HEADROOM;
 
-       return NET_SKB_PAD + NET_IP_ALIGN;
+       return 0;
 }
 
 void stmmac_disable_rx_queue(struct stmmac_priv *priv, u32 queue);
index 7b8404a21544cf29668e8a14240c3971e6bce0c3..fa90bcdf4e45577a9302e9ddc931fb5ea8646b12 100644 (file)
@@ -4914,6 +4914,10 @@ read_again:
 
                prefetch(np);
 
+               /* Ensure a valid XSK buffer before proceed */
+               if (!buf->xdp)
+                       break;
+
                if (priv->extend_desc)
                        stmmac_rx_extended_status(priv, &priv->dev->stats,
                                                  &priv->xstats,
@@ -4934,10 +4938,6 @@ read_again:
                        continue;
                }
 
-               /* Ensure a valid XSK buffer before proceed */
-               if (!buf->xdp)
-                       break;
-
                /* XSK pool expects RX frame 1:1 mapped to XSK buffer */
                if (likely(status & rx_not_ls)) {
                        xsk_buff_free(buf->xdp);
index 4f3b6437b11455e0ab897c209c0abc74d42ee28d..8160087ee92f2fd4932581f1169635c0156b0177 100644 (file)
@@ -884,11 +884,13 @@ static int tc_setup_taprio(struct stmmac_priv *priv,
        return 0;
 
 disable:
-       mutex_lock(&priv->plat->est->lock);
-       priv->plat->est->enable = false;
-       stmmac_est_configure(priv, priv->ioaddr, priv->plat->est,
-                            priv->plat->clk_ptp_rate);
-       mutex_unlock(&priv->plat->est->lock);
+       if (priv->plat->est) {
+               mutex_lock(&priv->plat->est->lock);
+               priv->plat->est->enable = false;
+               stmmac_est_configure(priv, priv->ioaddr, priv->plat->est,
+                                    priv->plat->clk_ptp_rate);
+               mutex_unlock(&priv->plat->est->lock);
+       }
 
        priv->plat->fpe_cfg->enable = false;
        stmmac_fpe_configure(priv, priv->ioaddr,
index 105821b53020b8d9af9759b52b3d4ba23450ddc9..2a616c6f7cd0ec64880f413d8ecd781b2503f883 100644 (file)
@@ -34,18 +34,18 @@ static int stmmac_xdp_enable_pool(struct stmmac_priv *priv,
        need_update = netif_running(priv->dev) && stmmac_xdp_is_enabled(priv);
 
        if (need_update) {
-               stmmac_disable_rx_queue(priv, queue);
-               stmmac_disable_tx_queue(priv, queue);
                napi_disable(&ch->rx_napi);
                napi_disable(&ch->tx_napi);
+               stmmac_disable_rx_queue(priv, queue);
+               stmmac_disable_tx_queue(priv, queue);
        }
 
        set_bit(queue, priv->af_xdp_zc_qps);
 
        if (need_update) {
-               napi_enable(&ch->rxtx_napi);
                stmmac_enable_rx_queue(priv, queue);
                stmmac_enable_tx_queue(priv, queue);
+               napi_enable(&ch->rxtx_napi);
 
                err = stmmac_xsk_wakeup(priv->dev, queue, XDP_WAKEUP_RX);
                if (err)
@@ -72,10 +72,10 @@ static int stmmac_xdp_disable_pool(struct stmmac_priv *priv, u16 queue)
        need_update = netif_running(priv->dev) && stmmac_xdp_is_enabled(priv);
 
        if (need_update) {
+               napi_disable(&ch->rxtx_napi);
                stmmac_disable_rx_queue(priv, queue);
                stmmac_disable_tx_queue(priv, queue);
                synchronize_rcu();
-               napi_disable(&ch->rxtx_napi);
        }
 
        xsk_pool_dma_unmap(pool, STMMAC_RX_DMA_ATTR);
@@ -83,10 +83,10 @@ static int stmmac_xdp_disable_pool(struct stmmac_priv *priv, u16 queue)
        clear_bit(queue, priv->af_xdp_zc_qps);
 
        if (need_update) {
-               napi_enable(&ch->rx_napi);
-               napi_enable(&ch->tx_napi);
                stmmac_enable_rx_queue(priv, queue);
                stmmac_enable_tx_queue(priv, queue);
+               napi_enable(&ch->rx_napi);
+               napi_enable(&ch->tx_napi);
        }
 
        return 0;
index 9c29b363e9aecc8265e29dc736357da42cb07a5b..599708a3e81dbf9d25ae88b2485388598896f83a 100644 (file)
@@ -358,7 +358,7 @@ static int am65_cpsw_port_obj_del(struct net_device *ndev, const void *ctx,
 static void am65_cpsw_fdb_offload_notify(struct net_device *ndev,
                                         struct switchdev_notifier_fdb_info *rcv)
 {
-       struct switchdev_notifier_fdb_info info;
+       struct switchdev_notifier_fdb_info info = {};
 
        info.addr = rcv->addr;
        info.vid = rcv->vid;
index 57d279fdcc9f774a9bedfe13709ab1c1a970b1c4..d1d02001cef6ea38a5ab014f32cb091fd03f69df 100644 (file)
@@ -920,7 +920,7 @@ static netdev_tx_t cpsw_ndo_start_xmit(struct sk_buff *skb,
        struct cpdma_chan *txch;
        int ret, q_idx;
 
-       if (skb_padto(skb, CPSW_MIN_PACKET_SIZE)) {
+       if (skb_put_padto(skb, READ_ONCE(priv->tx_packet_min))) {
                cpsw_err(priv, tx_err, "packet pad failed\n");
                ndev->stats.tx_dropped++;
                return NET_XMIT_DROP;
@@ -1100,7 +1100,7 @@ static int cpsw_ndo_xdp_xmit(struct net_device *ndev, int n,
 
        for (i = 0; i < n; i++) {
                xdpf = frames[i];
-               if (xdpf->len < CPSW_MIN_PACKET_SIZE)
+               if (xdpf->len < READ_ONCE(priv->tx_packet_min))
                        break;
 
                if (cpsw_xdp_tx_frame(priv, xdpf, NULL, priv->emac_port))
@@ -1389,6 +1389,7 @@ static int cpsw_create_ports(struct cpsw_common *cpsw)
                priv->dev  = dev;
                priv->msg_enable = netif_msg_init(debug_level, CPSW_DEBUG);
                priv->emac_port = i + 1;
+               priv->tx_packet_min = CPSW_MIN_PACKET_SIZE;
 
                if (is_valid_ether_addr(slave_data->mac_addr)) {
                        ether_addr_copy(priv->mac_addr, slave_data->mac_addr);
@@ -1686,6 +1687,7 @@ static int cpsw_dl_switch_mode_set(struct devlink *dl, u32 id,
 
                        priv = netdev_priv(sl_ndev);
                        slave->port_vlan = vlan;
+                       WRITE_ONCE(priv->tx_packet_min, CPSW_MIN_PACKET_SIZE_VLAN);
                        if (netif_running(sl_ndev))
                                cpsw_port_add_switch_def_ale_entries(priv,
                                                                     slave);
@@ -1714,6 +1716,7 @@ static int cpsw_dl_switch_mode_set(struct devlink *dl, u32 id,
 
                        priv = netdev_priv(slave->ndev);
                        slave->port_vlan = slave->data->dual_emac_res_vlan;
+                       WRITE_ONCE(priv->tx_packet_min, CPSW_MIN_PACKET_SIZE);
                        cpsw_port_add_dual_emac_def_ale_entries(priv, slave);
                }
 
index a323bea54faa21b50e0110ae2858d48488c72d21..2951fb7b9dae785810c4607e7453fb9281eb157e 100644 (file)
@@ -89,7 +89,8 @@ do {                                                          \
 
 #define CPSW_POLL_WEIGHT       64
 #define CPSW_RX_VLAN_ENCAP_HDR_SIZE            4
-#define CPSW_MIN_PACKET_SIZE   (VLAN_ETH_ZLEN)
+#define CPSW_MIN_PACKET_SIZE_VLAN      (VLAN_ETH_ZLEN)
+#define CPSW_MIN_PACKET_SIZE   (ETH_ZLEN)
 #define CPSW_MAX_PACKET_SIZE   (VLAN_ETH_FRAME_LEN +\
                                 ETH_FCS_LEN +\
                                 CPSW_RX_VLAN_ENCAP_HDR_SIZE)
@@ -380,6 +381,7 @@ struct cpsw_priv {
        u32 emac_port;
        struct cpsw_common *cpsw;
        int offload_fwd_mark;
+       u32 tx_packet_min;
 };
 
 #define ndev_to_cpsw(ndev) (((struct cpsw_priv *)netdev_priv(ndev))->cpsw)
index f7fb6e17dadd52b9db09174ee161cf8b9c7a6225..a7d97d429e06834b0f238d04898b41d0a5f6e3be 100644 (file)
@@ -368,7 +368,7 @@ static int cpsw_port_obj_del(struct net_device *ndev, const void *ctx,
 static void cpsw_fdb_offload_notify(struct net_device *ndev,
                                    struct switchdev_notifier_fdb_info *rcv)
 {
-       struct switchdev_notifier_fdb_info info;
+       struct switchdev_notifier_fdb_info info = {};
 
        info.addr = rcv->addr;
        info.vid = rcv->vid;
index fcf3af76b6d7b757ee3293df2fd7e87f2fe83cbe..8fe8887d506a3e269d34542b75486624bdc03a3e 100644 (file)
@@ -827,6 +827,12 @@ static void decode_data(struct sixpack *sp, unsigned char inbyte)
                return;
        }
 
+       if (sp->rx_count_cooked + 2 >= sizeof(sp->cooked_buf)) {
+               pr_err("6pack: cooked buffer overrun, data loss\n");
+               sp->rx_count = 0;
+               return;
+       }
+
        buf = sp->raw_buf;
        sp->cooked_buf[sp->rx_count_cooked++] =
                buf[0] | ((buf[1] << 2) & 0xc0);
index ebc976b7fcc2adcd7cb27589e0981a21bde4241c..8caa61ec718f503d0731262446a88805ff5e06f3 100644 (file)
@@ -418,7 +418,7 @@ static int hwsim_new_edge_nl(struct sk_buff *msg, struct genl_info *info)
        struct hwsim_edge *e;
        u32 v0, v1;
 
-       if (!info->attrs[MAC802154_HWSIM_ATTR_RADIO_ID] &&
+       if (!info->attrs[MAC802154_HWSIM_ATTR_RADIO_ID] ||
            !info->attrs[MAC802154_HWSIM_ATTR_RADIO_EDGE])
                return -EINVAL;
 
@@ -528,14 +528,14 @@ static int hwsim_set_edge_lqi(struct sk_buff *msg, struct genl_info *info)
        u32 v0, v1;
        u8 lqi;
 
-       if (!info->attrs[MAC802154_HWSIM_ATTR_RADIO_ID] &&
+       if (!info->attrs[MAC802154_HWSIM_ATTR_RADIO_ID] ||
            !info->attrs[MAC802154_HWSIM_ATTR_RADIO_EDGE])
                return -EINVAL;
 
        if (nla_parse_nested_deprecated(edge_attrs, MAC802154_HWSIM_EDGE_ATTR_MAX, info->attrs[MAC802154_HWSIM_ATTR_RADIO_EDGE], hwsim_edge_policy, NULL))
                return -EINVAL;
 
-       if (!edge_attrs[MAC802154_HWSIM_EDGE_ATTR_ENDPOINT_ID] &&
+       if (!edge_attrs[MAC802154_HWSIM_EDGE_ATTR_ENDPOINT_ID] ||
            !edge_attrs[MAC802154_HWSIM_EDGE_ATTR_LQI])
                return -EINVAL;
 
index 110e4ee85785cce29f9a9b6c06556e6c480afa60..ebd001f0eece4601d00de95748a1d29eab2df142 100644 (file)
@@ -82,6 +82,17 @@ out:
 
 static int parent_count;
 
+static void mdio_mux_uninit_children(struct mdio_mux_parent_bus *pb)
+{
+       struct mdio_mux_child_bus *cb = pb->children;
+
+       while (cb) {
+               mdiobus_unregister(cb->mii_bus);
+               mdiobus_free(cb->mii_bus);
+               cb = cb->next;
+       }
+}
+
 int mdio_mux_init(struct device *dev,
                  struct device_node *mux_node,
                  int (*switch_fn)(int cur, int desired, void *data),
@@ -144,7 +155,7 @@ int mdio_mux_init(struct device *dev,
                cb = devm_kzalloc(dev, sizeof(*cb), GFP_KERNEL);
                if (!cb) {
                        ret_val = -ENOMEM;
-                       continue;
+                       goto err_loop;
                }
                cb->bus_number = v;
                cb->parent = pb;
@@ -152,8 +163,7 @@ int mdio_mux_init(struct device *dev,
                cb->mii_bus = mdiobus_alloc();
                if (!cb->mii_bus) {
                        ret_val = -ENOMEM;
-                       devm_kfree(dev, cb);
-                       continue;
+                       goto err_loop;
                }
                cb->mii_bus->priv = cb;
 
@@ -165,11 +175,15 @@ int mdio_mux_init(struct device *dev,
                cb->mii_bus->write = mdio_mux_write;
                r = of_mdiobus_register(cb->mii_bus, child_bus_node);
                if (r) {
+                       mdiobus_free(cb->mii_bus);
+                       if (r == -EPROBE_DEFER) {
+                               ret_val = r;
+                               goto err_loop;
+                       }
+                       devm_kfree(dev, cb);
                        dev_err(dev,
                                "Error: Failed to register MDIO bus for child %pOF\n",
                                child_bus_node);
-                       mdiobus_free(cb->mii_bus);
-                       devm_kfree(dev, cb);
                } else {
                        cb->next = pb->children;
                        pb->children = cb;
@@ -181,7 +195,10 @@ int mdio_mux_init(struct device *dev,
        }
 
        dev_err(dev, "Error: No acceptable child buses found\n");
-       devm_kfree(dev, pb);
+
+err_loop:
+       mdio_mux_uninit_children(pb);
+       of_node_put(child_bus_node);
 err_pb_kz:
        put_device(&parent_bus->dev);
 err_parent_bus:
@@ -193,14 +210,8 @@ EXPORT_SYMBOL_GPL(mdio_mux_init);
 void mdio_mux_uninit(void *mux_handle)
 {
        struct mdio_mux_parent_bus *pb = mux_handle;
-       struct mdio_mux_child_bus *cb = pb->children;
-
-       while (cb) {
-               mdiobus_unregister(cb->mii_bus);
-               mdiobus_free(cb->mii_bus);
-               cb = cb->next;
-       }
 
+       mdio_mux_uninit_children(pb);
        put_device(&pb->mii_bus->dev);
 }
 EXPORT_SYMBOL_GPL(mdio_mux_uninit);
index 11be6bcdd551a0e16fcb9d24202633de22f1bc7b..e60e38c1f09d317750c7fd6f0bdfa2ec9ca33513 100644 (file)
@@ -335,7 +335,7 @@ static int mhi_net_newlink(void *ctxt, struct net_device *ndev, u32 if_id,
        u64_stats_init(&mhi_netdev->stats.tx_syncp);
 
        /* Start MHI channels */
-       err = mhi_prepare_for_transfer(mhi_dev, 0);
+       err = mhi_prepare_for_transfer(mhi_dev);
        if (err)
                goto out_err;
 
index 63fda3fc40aacbb25a26c011b1cf1e523cd2d9e7..4bd61339823ceeeb202097b825e4438710d44d6b 100644 (file)
@@ -1089,7 +1089,7 @@ struct dw_xpcs *xpcs_create(struct mdio_device *mdiodev,
 
        xpcs = kzalloc(sizeof(*xpcs), GFP_KERNEL);
        if (!xpcs)
-               return NULL;
+               return ERR_PTR(-ENOMEM);
 
        xpcs->mdiodev = mdiodev;
 
index 11ff335d62286e5e58cc23cfa44dce8c09688f4b..b7a5ae20edd5374f975400f78765ccb05c33d8b5 100644 (file)
@@ -81,6 +81,8 @@ static struct phy_driver mtk_gephy_driver[] = {
                 */
                .config_intr    = genphy_no_config_intr,
                .handle_interrupt = genphy_handle_interrupt_no_ack,
+               .suspend        = genphy_suspend,
+               .resume         = genphy_resume,
                .read_page      = mtk_gephy_read_page,
                .write_page     = mtk_gephy_write_page,
        },
@@ -93,6 +95,8 @@ static struct phy_driver mtk_gephy_driver[] = {
                 */
                .config_intr    = genphy_no_config_intr,
                .handle_interrupt = genphy_handle_interrupt_no_ack,
+               .suspend        = genphy_suspend,
+               .resume         = genphy_resume,
                .read_page      = mtk_gephy_read_page,
                .write_page     = mtk_gephy_write_page,
        },
index 53bdd673ae56158ceaf39a8298e1587a9f9fd366..5c928f827173c37479448cdc5b0b48d7d8b12ce8 100644 (file)
@@ -1760,8 +1760,6 @@ static struct phy_driver ksphy_driver[] = {
        .name           = "Micrel KSZ87XX Switch",
        /* PHY_BASIC_FEATURES */
        .config_init    = kszphy_config_init,
-       .config_aneg    = ksz8873mll_config_aneg,
-       .read_status    = ksz8873mll_read_status,
        .match_phy_device = ksz8795_match_phy_device,
        .suspend        = genphy_suspend,
        .resume         = genphy_resume,
index 930e49ef15f6a0bd6231a01a46d2d1dd545dacae..7a099c37527f0734f959b4148d1cbb2c4d3fd91e 100644 (file)
@@ -284,7 +284,7 @@ static struct channel *ppp_find_channel(struct ppp_net *pn, int unit);
 static int ppp_connect_channel(struct channel *pch, int unit);
 static int ppp_disconnect_channel(struct channel *pch);
 static void ppp_destroy_channel(struct channel *pch);
-static int unit_get(struct idr *p, void *ptr);
+static int unit_get(struct idr *p, void *ptr, int min);
 static int unit_set(struct idr *p, void *ptr, int n);
 static void unit_put(struct idr *p, int n);
 static void *unit_find(struct idr *p, int n);
@@ -1155,9 +1155,20 @@ static int ppp_unit_register(struct ppp *ppp, int unit, bool ifname_is_set)
        mutex_lock(&pn->all_ppp_mutex);
 
        if (unit < 0) {
-               ret = unit_get(&pn->units_idr, ppp);
+               ret = unit_get(&pn->units_idr, ppp, 0);
                if (ret < 0)
                        goto err;
+               if (!ifname_is_set) {
+                       while (1) {
+                               snprintf(ppp->dev->name, IFNAMSIZ, "ppp%i", ret);
+                               if (!__dev_get_by_name(ppp->ppp_net, ppp->dev->name))
+                                       break;
+                               unit_put(&pn->units_idr, ret);
+                               ret = unit_get(&pn->units_idr, ppp, ret + 1);
+                               if (ret < 0)
+                                       goto err;
+                       }
+               }
        } else {
                /* Caller asked for a specific unit number. Fail with -EEXIST
                 * if unavailable. For backward compatibility, return -EEXIST
@@ -1306,7 +1317,7 @@ static int ppp_nl_newlink(struct net *src_net, struct net_device *dev,
         * the PPP unit identifer as suffix (i.e. ppp<unit_id>). This allows
         * userspace to infer the device name using to the PPPIOCGUNIT ioctl.
         */
-       if (!tb[IFLA_IFNAME])
+       if (!tb[IFLA_IFNAME] || !nla_len(tb[IFLA_IFNAME]) || !*(char *)nla_data(tb[IFLA_IFNAME]))
                conf.ifname_is_set = false;
 
        err = ppp_dev_configure(src_net, dev, &conf);
@@ -3552,9 +3563,9 @@ static int unit_set(struct idr *p, void *ptr, int n)
 }
 
 /* get new free unit number and associate pointer with it */
-static int unit_get(struct idr *p, void *ptr)
+static int unit_get(struct idr *p, void *ptr, int min)
 {
-       return idr_alloc(p, ptr, 0, 0, GFP_KERNEL);
+       return idr_alloc(p, ptr, min, 0, GFP_KERNEL);
 }
 
 /* put unit number back to a pool */
index e1994a24612274cd70e1dfd847a0d5ac0b21c7ee..2a1e31defe718efefcdb9c4f8b50b511de521b2a 100644 (file)
@@ -184,6 +184,7 @@ struct asix_common_private {
        struct phy_device *phydev;
        u16 phy_addr;
        char phy_name[20];
+       bool embd_phy;
 };
 
 extern const struct driver_info ax88172a_info;
index ac92bc52a85ecd0359113d9218b1eb29d05cd2cc..38cda590895cc0a0b9a956e3e116b42829f15d3f 100644 (file)
@@ -63,6 +63,29 @@ void asix_write_cmd_async(struct usbnet *dev, u8 cmd, u16 value, u16 index,
                               value, index, data, size);
 }
 
+static int asix_check_host_enable(struct usbnet *dev, int in_pm)
+{
+       int i, ret;
+       u8 smsr;
+
+       for (i = 0; i < 30; ++i) {
+               ret = asix_set_sw_mii(dev, in_pm);
+               if (ret == -ENODEV || ret == -ETIMEDOUT)
+                       break;
+               usleep_range(1000, 1100);
+               ret = asix_read_cmd(dev, AX_CMD_STATMNGSTS_REG,
+                                   0, 0, 1, &smsr, in_pm);
+               if (ret == -ENODEV)
+                       break;
+               else if (ret < 0)
+                       continue;
+               else if (smsr & AX_HOST_EN)
+                       break;
+       }
+
+       return ret;
+}
+
 static void reset_asix_rx_fixup_info(struct asix_rx_fixup_info *rx)
 {
        /* Reset the variables that have a lifetime outside of
@@ -467,19 +490,11 @@ int asix_mdio_read(struct net_device *netdev, int phy_id, int loc)
 {
        struct usbnet *dev = netdev_priv(netdev);
        __le16 res;
-       u8 smsr;
-       int i = 0;
        int ret;
 
        mutex_lock(&dev->phy_mutex);
-       do {
-               ret = asix_set_sw_mii(dev, 0);
-               if (ret == -ENODEV || ret == -ETIMEDOUT)
-                       break;
-               usleep_range(1000, 1100);
-               ret = asix_read_cmd(dev, AX_CMD_STATMNGSTS_REG,
-                                   0, 0, 1, &smsr, 0);
-       } while (!(smsr & AX_HOST_EN) && (i++ < 30) && (ret != -ENODEV));
+
+       ret = asix_check_host_enable(dev, 0);
        if (ret == -ENODEV || ret == -ETIMEDOUT) {
                mutex_unlock(&dev->phy_mutex);
                return ret;
@@ -505,23 +520,14 @@ static int __asix_mdio_write(struct net_device *netdev, int phy_id, int loc,
 {
        struct usbnet *dev = netdev_priv(netdev);
        __le16 res = cpu_to_le16(val);
-       u8 smsr;
-       int i = 0;
        int ret;
 
        netdev_dbg(dev->net, "asix_mdio_write() phy_id=0x%02x, loc=0x%02x, val=0x%04x\n",
                        phy_id, loc, val);
 
        mutex_lock(&dev->phy_mutex);
-       do {
-               ret = asix_set_sw_mii(dev, 0);
-               if (ret == -ENODEV)
-                       break;
-               usleep_range(1000, 1100);
-               ret = asix_read_cmd(dev, AX_CMD_STATMNGSTS_REG,
-                                   0, 0, 1, &smsr, 0);
-       } while (!(smsr & AX_HOST_EN) && (i++ < 30) && (ret != -ENODEV));
 
+       ret = asix_check_host_enable(dev, 0);
        if (ret == -ENODEV)
                goto out;
 
@@ -561,19 +567,11 @@ int asix_mdio_read_nopm(struct net_device *netdev, int phy_id, int loc)
 {
        struct usbnet *dev = netdev_priv(netdev);
        __le16 res;
-       u8 smsr;
-       int i = 0;
        int ret;
 
        mutex_lock(&dev->phy_mutex);
-       do {
-               ret = asix_set_sw_mii(dev, 1);
-               if (ret == -ENODEV || ret == -ETIMEDOUT)
-                       break;
-               usleep_range(1000, 1100);
-               ret = asix_read_cmd(dev, AX_CMD_STATMNGSTS_REG,
-                                   0, 0, 1, &smsr, 1);
-       } while (!(smsr & AX_HOST_EN) && (i++ < 30) && (ret != -ENODEV));
+
+       ret = asix_check_host_enable(dev, 1);
        if (ret == -ENODEV || ret == -ETIMEDOUT) {
                mutex_unlock(&dev->phy_mutex);
                return ret;
@@ -595,22 +593,14 @@ asix_mdio_write_nopm(struct net_device *netdev, int phy_id, int loc, int val)
 {
        struct usbnet *dev = netdev_priv(netdev);
        __le16 res = cpu_to_le16(val);
-       u8 smsr;
-       int i = 0;
        int ret;
 
        netdev_dbg(dev->net, "asix_mdio_write() phy_id=0x%02x, loc=0x%02x, val=0x%04x\n",
                        phy_id, loc, val);
 
        mutex_lock(&dev->phy_mutex);
-       do {
-               ret = asix_set_sw_mii(dev, 1);
-               if (ret == -ENODEV)
-                       break;
-               usleep_range(1000, 1100);
-               ret = asix_read_cmd(dev, AX_CMD_STATMNGSTS_REG,
-                                   0, 0, 1, &smsr, 1);
-       } while (!(smsr & AX_HOST_EN) && (i++ < 30) && (ret != -ENODEV));
+
+       ret = asix_check_host_enable(dev, 1);
        if (ret == -ENODEV) {
                mutex_unlock(&dev->phy_mutex);
                return;
index 2c115216420a4343f74340ab2a29109b8e4c6d6b..dc87e8caf954af6b86a776e56941c3055637492e 100644 (file)
@@ -354,24 +354,23 @@ out:
 static int ax88772_hw_reset(struct usbnet *dev, int in_pm)
 {
        struct asix_data *data = (struct asix_data *)&dev->data;
-       int ret, embd_phy;
+       struct asix_common_private *priv = dev->driver_priv;
        u16 rx_ctl;
+       int ret;
 
        ret = asix_write_gpio(dev, AX_GPIO_RSE | AX_GPIO_GPO_2 |
                              AX_GPIO_GPO2EN, 5, in_pm);
        if (ret < 0)
                goto out;
 
-       embd_phy = ((dev->mii.phy_id & 0x1f) == 0x10 ? 1 : 0);
-
-       ret = asix_write_cmd(dev, AX_CMD_SW_PHY_SELECT, embd_phy,
+       ret = asix_write_cmd(dev, AX_CMD_SW_PHY_SELECT, priv->embd_phy,
                             0, 0, NULL, in_pm);
        if (ret < 0) {
                netdev_dbg(dev->net, "Select PHY #1 failed: %d\n", ret);
                goto out;
        }
 
-       if (embd_phy) {
+       if (priv->embd_phy) {
                ret = asix_sw_reset(dev, AX_SWRESET_IPPD, in_pm);
                if (ret < 0)
                        goto out;
@@ -449,17 +448,16 @@ out:
 static int ax88772a_hw_reset(struct usbnet *dev, int in_pm)
 {
        struct asix_data *data = (struct asix_data *)&dev->data;
-       int ret, embd_phy;
+       struct asix_common_private *priv = dev->driver_priv;
        u16 rx_ctl, phy14h, phy15h, phy16h;
        u8 chipcode = 0;
+       int ret;
 
        ret = asix_write_gpio(dev, AX_GPIO_RSE, 5, in_pm);
        if (ret < 0)
                goto out;
 
-       embd_phy = ((dev->mii.phy_id & 0x1f) == 0x10 ? 1 : 0);
-
-       ret = asix_write_cmd(dev, AX_CMD_SW_PHY_SELECT, embd_phy |
+       ret = asix_write_cmd(dev, AX_CMD_SW_PHY_SELECT, priv->embd_phy |
                             AX_PHYSEL_SSEN, 0, 0, NULL, in_pm);
        if (ret < 0) {
                netdev_dbg(dev->net, "Select PHY #1 failed: %d\n", ret);
@@ -683,12 +681,6 @@ static int ax88772_init_phy(struct usbnet *dev)
        struct asix_common_private *priv = dev->driver_priv;
        int ret;
 
-       ret = asix_read_phy_addr(dev, true);
-       if (ret < 0)
-               return ret;
-
-       priv->phy_addr = ret;
-
        snprintf(priv->phy_name, sizeof(priv->phy_name), PHY_ID_FMT,
                 priv->mdio->id, priv->phy_addr);
 
@@ -716,6 +708,12 @@ static int ax88772_bind(struct usbnet *dev, struct usb_interface *intf)
        int ret, i;
        u32 phyid;
 
+       priv = devm_kzalloc(&dev->udev->dev, sizeof(*priv), GFP_KERNEL);
+       if (!priv)
+               return -ENOMEM;
+
+       dev->driver_priv = priv;
+
        usbnet_get_endpoints(dev, intf);
 
        /* Maybe the boot loader passed the MAC address via device tree */
@@ -751,6 +749,13 @@ static int ax88772_bind(struct usbnet *dev, struct usb_interface *intf)
        dev->net->needed_headroom = 4; /* cf asix_tx_fixup() */
        dev->net->needed_tailroom = 4; /* cf asix_tx_fixup() */
 
+       ret = asix_read_phy_addr(dev, true);
+       if (ret < 0)
+               return ret;
+
+       priv->phy_addr = ret;
+       priv->embd_phy = ((priv->phy_addr & 0x1f) == 0x10);
+
        asix_read_cmd(dev, AX_CMD_STATMNGSTS_REG, 0, 0, 1, &chipcode, 0);
        chipcode &= AX_CHIPCODE_MASK;
 
@@ -773,12 +778,6 @@ static int ax88772_bind(struct usbnet *dev, struct usb_interface *intf)
                dev->rx_urb_size = 2048;
        }
 
-       priv = devm_kzalloc(&dev->udev->dev, sizeof(*priv), GFP_KERNEL);
-       if (!priv)
-               return -ENOMEM;
-
-       dev->driver_priv = priv;
-
        priv->presvd_phy_bmcr = 0;
        priv->presvd_phy_advertise = 0;
        if (chipcode == AX_AX88772_CHIPCODE) {
@@ -817,6 +816,12 @@ static void ax88772_unbind(struct usbnet *dev, struct usb_interface *intf)
        asix_rx_fixup_common_free(dev->driver_priv);
 }
 
+static void ax88178_unbind(struct usbnet *dev, struct usb_interface *intf)
+{
+       asix_rx_fixup_common_free(dev->driver_priv);
+       kfree(dev->driver_priv);
+}
+
 static const struct ethtool_ops ax88178_ethtool_ops = {
        .get_drvinfo            = asix_get_drvinfo,
        .get_link               = asix_get_link,
@@ -1225,7 +1230,7 @@ static const struct driver_info ax88772b_info = {
 static const struct driver_info ax88178_info = {
        .description = "ASIX AX88178 USB 2.0 Ethernet",
        .bind = ax88178_bind,
-       .unbind = ax88772_unbind,
+       .unbind = ax88178_unbind,
        .status = asix_status,
        .link_reset = ax88178_link_reset,
        .reset = ax88178_reset,
index 652e9fcf0b773d6f85dfea8acb28637ee151f35f..9f9dd0de33cb645b5894458bbcb037e836c1e500 100644 (file)
@@ -446,7 +446,7 @@ static int enable_net_traffic(struct net_device *dev, struct usb_device *usb)
                write_mii_word(pegasus, 0, 0x1b, &auxmode);
        }
 
-       return 0;
+       return ret;
 fail:
        netif_dbg(pegasus, drv, pegasus->net, "%s failed\n", __func__);
        return ret;
@@ -835,7 +835,7 @@ static int pegasus_open(struct net_device *net)
        if (!pegasus->rx_skb)
                goto exit;
 
-       res = set_registers(pegasus, EthID, 6, net->dev_addr);
+       set_registers(pegasus, EthID, 6, net->dev_addr);
 
        usb_fill_bulk_urb(pegasus->rx_urb, pegasus->usb,
                          usb_rcvbulkpipe(pegasus->usb, 1),
index e09b107b5c9920ac8b39c39be80fe62df496f16d..79832374f78dbffcaac90adbfc65782b00f8e9ed 100644 (file)
@@ -3955,17 +3955,28 @@ static void rtl_clear_bp(struct r8152 *tp, u16 type)
        case RTL_VER_06:
                ocp_write_byte(tp, type, PLA_BP_EN, 0);
                break;
+       case RTL_VER_14:
+               ocp_write_word(tp, type, USB_BP2_EN, 0);
+
+               ocp_write_word(tp, type, USB_BP_8, 0);
+               ocp_write_word(tp, type, USB_BP_9, 0);
+               ocp_write_word(tp, type, USB_BP_10, 0);
+               ocp_write_word(tp, type, USB_BP_11, 0);
+               ocp_write_word(tp, type, USB_BP_12, 0);
+               ocp_write_word(tp, type, USB_BP_13, 0);
+               ocp_write_word(tp, type, USB_BP_14, 0);
+               ocp_write_word(tp, type, USB_BP_15, 0);
+               break;
        case RTL_VER_08:
        case RTL_VER_09:
        case RTL_VER_10:
        case RTL_VER_11:
        case RTL_VER_12:
        case RTL_VER_13:
-       case RTL_VER_14:
        case RTL_VER_15:
        default:
                if (type == MCU_TYPE_USB) {
-                       ocp_write_byte(tp, MCU_TYPE_USB, USB_BP2_EN, 0);
+                       ocp_write_word(tp, MCU_TYPE_USB, USB_BP2_EN, 0);
 
                        ocp_write_word(tp, MCU_TYPE_USB, USB_BP_8, 0);
                        ocp_write_word(tp, MCU_TYPE_USB, USB_BP_9, 0);
@@ -4331,7 +4342,6 @@ static bool rtl8152_is_fw_mac_ok(struct r8152 *tp, struct fw_mac *mac)
                case RTL_VER_11:
                case RTL_VER_12:
                case RTL_VER_13:
-               case RTL_VER_14:
                case RTL_VER_15:
                        fw_reg = 0xf800;
                        bp_ba_addr = PLA_BP_BA;
@@ -4339,6 +4349,13 @@ static bool rtl8152_is_fw_mac_ok(struct r8152 *tp, struct fw_mac *mac)
                        bp_start = PLA_BP_0;
                        max_bp = 8;
                        break;
+               case RTL_VER_14:
+                       fw_reg = 0xf800;
+                       bp_ba_addr = PLA_BP_BA;
+                       bp_en_addr = USB_BP2_EN;
+                       bp_start = PLA_BP_0;
+                       max_bp = 16;
+                       break;
                default:
                        goto out;
                }
index 56c3f85190938dedc657af691191d34baabc3671..eee493685aad5d70b0824b5441c495cd63284518 100644 (file)
@@ -63,7 +63,7 @@ static const unsigned long guest_offloads[] = {
        VIRTIO_NET_F_GUEST_CSUM
 };
 
-#define GUEST_OFFLOAD_LRO_MASK ((1ULL << VIRTIO_NET_F_GUEST_TSO4) | \
+#define GUEST_OFFLOAD_GRO_HW_MASK ((1ULL << VIRTIO_NET_F_GUEST_TSO4) | \
                                (1ULL << VIRTIO_NET_F_GUEST_TSO6) | \
                                (1ULL << VIRTIO_NET_F_GUEST_ECN)  | \
                                (1ULL << VIRTIO_NET_F_GUEST_UFO))
@@ -2515,7 +2515,7 @@ static int virtnet_xdp_set(struct net_device *dev, struct bpf_prog *prog,
                virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_ECN) ||
                virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_UFO) ||
                virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_CSUM))) {
-               NL_SET_ERR_MSG_MOD(extack, "Can't set XDP while host is implementing LRO/CSUM, disable LRO/CSUM first");
+               NL_SET_ERR_MSG_MOD(extack, "Can't set XDP while host is implementing GRO_HW/CSUM, disable GRO_HW/CSUM first");
                return -EOPNOTSUPP;
        }
 
@@ -2646,15 +2646,15 @@ static int virtnet_set_features(struct net_device *dev,
        u64 offloads;
        int err;
 
-       if ((dev->features ^ features) & NETIF_F_LRO) {
+       if ((dev->features ^ features) & NETIF_F_GRO_HW) {
                if (vi->xdp_enabled)
                        return -EBUSY;
 
-               if (features & NETIF_F_LRO)
+               if (features & NETIF_F_GRO_HW)
                        offloads = vi->guest_offloads_capable;
                else
                        offloads = vi->guest_offloads_capable &
-                                  ~GUEST_OFFLOAD_LRO_MASK;
+                                  ~GUEST_OFFLOAD_GRO_HW_MASK;
 
                err = virtnet_set_guest_offloads(vi, offloads);
                if (err)
@@ -3134,9 +3134,9 @@ static int virtnet_probe(struct virtio_device *vdev)
                dev->features |= NETIF_F_RXCSUM;
        if (virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO4) ||
            virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO6))
-               dev->features |= NETIF_F_LRO;
+               dev->features |= NETIF_F_GRO_HW;
        if (virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_GUEST_OFFLOADS))
-               dev->hw_features |= NETIF_F_LRO;
+               dev->hw_features |= NETIF_F_GRO_HW;
 
        dev->vlan_features = dev->features;
 
index 2b1b944d4b281c7547ec2d8aa0dac8990af72c01..8bbe2a7bb1412607c65bb479eabd8d94c2bcfbbf 100644 (file)
@@ -1367,6 +1367,8 @@ static struct sk_buff *vrf_ip6_rcv(struct net_device *vrf_dev,
        bool need_strict = rt6_need_strict(&ipv6_hdr(skb)->daddr);
        bool is_ndisc = ipv6_ndisc_frame(skb);
 
+       nf_reset_ct(skb);
+
        /* loopback, multicast & non-ND link-local traffic; do not push through
         * packet taps again. Reset pkt_type for upper layers to process skb.
         * For strict packets with a source LLA, determine the dst using the
@@ -1429,6 +1431,8 @@ static struct sk_buff *vrf_ip_rcv(struct net_device *vrf_dev,
        skb->skb_iif = vrf_dev->ifindex;
        IPCB(skb)->flags |= IPSKB_L3SLAVE;
 
+       nf_reset_ct(skb);
+
        if (ipv4_is_multicast(ip_hdr(skb)->daddr))
                goto out;
 
index 2403490cbc265a3c3a5f2ed4917dc2efcf712bf9..b4b1f75b9c2a8b3998d7c65234282688f701497f 100644 (file)
@@ -37,6 +37,7 @@ static int iwl_pnvm_handle_section(struct iwl_trans *trans, const u8 *data,
        u32 sha1 = 0;
        u16 mac_type = 0, rf_id = 0;
        u8 *pnvm_data = NULL, *tmp;
+       bool hw_match = false;
        u32 size = 0;
        int ret;
 
@@ -83,6 +84,9 @@ static int iwl_pnvm_handle_section(struct iwl_trans *trans, const u8 *data,
                                break;
                        }
 
+                       if (hw_match)
+                               break;
+
                        mac_type = le16_to_cpup((__le16 *)data);
                        rf_id = le16_to_cpup((__le16 *)(data + sizeof(__le16)));
 
@@ -90,15 +94,9 @@ static int iwl_pnvm_handle_section(struct iwl_trans *trans, const u8 *data,
                                     "Got IWL_UCODE_TLV_HW_TYPE mac_type 0x%0x rf_id 0x%0x\n",
                                     mac_type, rf_id);
 
-                       if (mac_type != CSR_HW_REV_TYPE(trans->hw_rev) ||
-                           rf_id != CSR_HW_RFID_TYPE(trans->hw_rf_id)) {
-                               IWL_DEBUG_FW(trans,
-                                            "HW mismatch, skipping PNVM section, mac_type 0x%0x, rf_id 0x%0x.\n",
-                                            CSR_HW_REV_TYPE(trans->hw_rev), trans->hw_rf_id);
-                               ret = -ENOENT;
-                               goto out;
-                       }
-
+                       if (mac_type == CSR_HW_REV_TYPE(trans->hw_rev) &&
+                           rf_id == CSR_HW_RFID_TYPE(trans->hw_rf_id))
+                               hw_match = true;
                        break;
                case IWL_UCODE_TLV_SEC_RT: {
                        struct iwl_pnvm_section *section = (void *)data;
@@ -149,6 +147,15 @@ static int iwl_pnvm_handle_section(struct iwl_trans *trans, const u8 *data,
        }
 
 done:
+       if (!hw_match) {
+               IWL_DEBUG_FW(trans,
+                            "HW mismatch, skipping PNVM section (need mac_type 0x%x rf_id 0x%x)\n",
+                            CSR_HW_REV_TYPE(trans->hw_rev),
+                            CSR_HW_RFID_TYPE(trans->hw_rf_id));
+               ret = -ENOENT;
+               goto out;
+       }
+
        if (!size) {
                IWL_DEBUG_FW(trans, "Empty PNVM, skipping.\n");
                ret = -ENOENT;
index 16baee3d52aedb27a93c7722441be260917fc0a1..0b8a0cd3b652dea28167717f19d409ba3abe8a84 100644 (file)
@@ -1110,12 +1110,80 @@ static const struct iwl_dev_info iwl_dev_info_table[] = {
                      IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB,
                      iwl_cfg_bz_a0_mr_a0, iwl_ax211_name),
 
+/* SoF with JF2 */
+       _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
+                     IWL_CFG_MAC_TYPE_SOF, IWL_CFG_ANY,
+                     IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF,
+                     IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
+                     iwlax210_2ax_cfg_so_jf_b0, iwl9560_160_name),
+       _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
+                     IWL_CFG_MAC_TYPE_SOF, IWL_CFG_ANY,
+                     IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF,
+                     IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
+                     iwlax210_2ax_cfg_so_jf_b0, iwl9560_name),
+
+/* SoF with JF */
+       _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
+                     IWL_CFG_MAC_TYPE_SOF, IWL_CFG_ANY,
+                     IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1,
+                     IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
+                     iwlax210_2ax_cfg_so_jf_b0, iwl9461_160_name),
+       _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
+                     IWL_CFG_MAC_TYPE_SOF, IWL_CFG_ANY,
+                     IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1_DIV,
+                     IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
+                     iwlax210_2ax_cfg_so_jf_b0, iwl9462_160_name),
+       _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
+                     IWL_CFG_MAC_TYPE_SOF, IWL_CFG_ANY,
+                     IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1,
+                     IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
+                     iwlax210_2ax_cfg_so_jf_b0, iwl9461_name),
+       _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
+                     IWL_CFG_MAC_TYPE_SOF, IWL_CFG_ANY,
+                     IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1_DIV,
+                     IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
+                     iwlax210_2ax_cfg_so_jf_b0, iwl9462_name),
+
 /* So with GF */
        _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
                      IWL_CFG_MAC_TYPE_SO, IWL_CFG_ANY,
                      IWL_CFG_RF_TYPE_GF, IWL_CFG_ANY,
                      IWL_CFG_160, IWL_CFG_ANY, IWL_CFG_NO_CDB,
-                     iwlax211_2ax_cfg_so_gf_a0, iwl_ax211_name)
+                     iwlax211_2ax_cfg_so_gf_a0, iwl_ax211_name),
+
+/* So with JF2 */
+       _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
+                     IWL_CFG_MAC_TYPE_SO, IWL_CFG_ANY,
+                     IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF,
+                     IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
+                     iwlax210_2ax_cfg_so_jf_b0, iwl9560_160_name),
+       _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
+                     IWL_CFG_MAC_TYPE_SO, IWL_CFG_ANY,
+                     IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF,
+                     IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
+                     iwlax210_2ax_cfg_so_jf_b0, iwl9560_name),
+
+/* So with JF */
+       _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
+                     IWL_CFG_MAC_TYPE_SO, IWL_CFG_ANY,
+                     IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1,
+                     IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
+                     iwlax210_2ax_cfg_so_jf_b0, iwl9461_160_name),
+       _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
+                     IWL_CFG_MAC_TYPE_SO, IWL_CFG_ANY,
+                     IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1_DIV,
+                     IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
+                     iwlax210_2ax_cfg_so_jf_b0, iwl9462_160_name),
+       _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
+                     IWL_CFG_MAC_TYPE_SO, IWL_CFG_ANY,
+                     IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1,
+                     IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
+                     iwlax210_2ax_cfg_so_jf_b0, iwl9461_name),
+       _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
+                     IWL_CFG_MAC_TYPE_SO, IWL_CFG_ANY,
+                     IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1_DIV,
+                     IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
+                     iwlax210_2ax_cfg_so_jf_b0, iwl9462_name)
 
 #endif /* CONFIG_IWLMVM */
 };
index 863aa18b302496447ba0ddfb947f2df6e7ec6992..43960770a9af207bbbdabdac7a00380e2e043bde 100644 (file)
@@ -111,7 +111,7 @@ mt7915_mcu_get_cipher(int cipher)
        case WLAN_CIPHER_SUITE_SMS4:
                return MCU_CIPHER_WAPI;
        default:
-               return MT_CIPHER_NONE;
+               return MCU_CIPHER_NONE;
        }
 }
 
index edd3ba3a0c2daca4a7a2d069570c816ee36940f7..e68a562cc5b4f61b86119b4cb710f82292101d64 100644 (file)
@@ -1073,7 +1073,8 @@ enum {
 };
 
 enum mcu_cipher_type {
-       MCU_CIPHER_WEP40 = 1,
+       MCU_CIPHER_NONE = 0,
+       MCU_CIPHER_WEP40,
        MCU_CIPHER_WEP104,
        MCU_CIPHER_WEP128,
        MCU_CIPHER_TKIP,
index cd690c64f65b983c0720c0ceb9c3fc248698b1a3..9fbaacc67cfad0d8d0bfb4757748aea698fd1476 100644 (file)
@@ -111,7 +111,7 @@ mt7921_mcu_get_cipher(int cipher)
        case WLAN_CIPHER_SUITE_SMS4:
                return MCU_CIPHER_WAPI;
        default:
-               return MT_CIPHER_NONE;
+               return MCU_CIPHER_NONE;
        }
 }
 
index d76cf8f8dfdf8a59885a4ce05b6ddfbe3993989e..de3c091f673685db0ad2a144da8b0d6ec03d9382 100644 (file)
@@ -199,7 +199,8 @@ struct sta_rec_sec {
 } __packed;
 
 enum mcu_cipher_type {
-       MCU_CIPHER_WEP40 = 1,
+       MCU_CIPHER_NONE = 0,
+       MCU_CIPHER_WEP40,
        MCU_CIPHER_WEP104,
        MCU_CIPHER_WEP128,
        MCU_CIPHER_TKIP,
index 804e6c4f2c782f3c9763e9f4089bce605001f738..519361ec40dfcb472b264b184eace3d4a48b4c33 100644 (file)
@@ -64,10 +64,9 @@ static struct ipc_chnl_cfg modem_cfg[] = {
 
 int ipc_chnl_cfg_get(struct ipc_chnl_cfg *chnl_cfg, int index)
 {
-       int array_size = ARRAY_SIZE(modem_cfg);
-
-       if (index >= array_size) {
-               pr_err("index: %d and array_size %d", index, array_size);
+       if (index >= ARRAY_SIZE(modem_cfg)) {
+               pr_err("index: %d and array size %zu", index,
+                      ARRAY_SIZE(modem_cfg));
                return -ECHRNG;
        }
 
index 1e18420ce4045ca904583d5a562d0e8d6c9c4131..e4d0f696687f209560094e413ffe379283dcefa5 100644 (file)
@@ -41,14 +41,14 @@ struct mhi_wwan_dev {
 /* Increment RX budget and schedule RX refill if necessary */
 static void mhi_wwan_rx_budget_inc(struct mhi_wwan_dev *mhiwwan)
 {
-       spin_lock(&mhiwwan->rx_lock);
+       spin_lock_bh(&mhiwwan->rx_lock);
 
        mhiwwan->rx_budget++;
 
        if (test_bit(MHI_WWAN_RX_REFILL, &mhiwwan->flags))
                schedule_work(&mhiwwan->rx_refill);
 
-       spin_unlock(&mhiwwan->rx_lock);
+       spin_unlock_bh(&mhiwwan->rx_lock);
 }
 
 /* Decrement RX budget if non-zero and return true on success */
@@ -56,7 +56,7 @@ static bool mhi_wwan_rx_budget_dec(struct mhi_wwan_dev *mhiwwan)
 {
        bool ret = false;
 
-       spin_lock(&mhiwwan->rx_lock);
+       spin_lock_bh(&mhiwwan->rx_lock);
 
        if (mhiwwan->rx_budget) {
                mhiwwan->rx_budget--;
@@ -64,7 +64,7 @@ static bool mhi_wwan_rx_budget_dec(struct mhi_wwan_dev *mhiwwan)
                        ret = true;
        }
 
-       spin_unlock(&mhiwwan->rx_lock);
+       spin_unlock_bh(&mhiwwan->rx_lock);
 
        return ret;
 }
@@ -110,7 +110,7 @@ static int mhi_wwan_ctrl_start(struct wwan_port *port)
        int ret;
 
        /* Start mhi device's channel(s) */
-       ret = mhi_prepare_for_transfer(mhiwwan->mhi_dev, 0);
+       ret = mhi_prepare_for_transfer(mhiwwan->mhi_dev);
        if (ret)
                return ret;
 
@@ -130,9 +130,9 @@ static void mhi_wwan_ctrl_stop(struct wwan_port *port)
 {
        struct mhi_wwan_dev *mhiwwan = wwan_port_get_drvdata(port);
 
-       spin_lock(&mhiwwan->rx_lock);
+       spin_lock_bh(&mhiwwan->rx_lock);
        clear_bit(MHI_WWAN_RX_REFILL, &mhiwwan->flags);
-       spin_unlock(&mhiwwan->rx_lock);
+       spin_unlock_bh(&mhiwwan->rx_lock);
 
        cancel_work_sync(&mhiwwan->rx_refill);
 
index 674a81d79db3efffa522c964a2ab75c8655d368f..35ece98134c09793e88bb40de4370d67e143ce0f 100644 (file)
@@ -164,11 +164,14 @@ static struct wwan_device *wwan_create_dev(struct device *parent)
                goto done_unlock;
 
        id = ida_alloc(&wwan_dev_ids, GFP_KERNEL);
-       if (id < 0)
+       if (id < 0) {
+               wwandev = ERR_PTR(id);
                goto done_unlock;
+       }
 
        wwandev = kzalloc(sizeof(*wwandev), GFP_KERNEL);
        if (!wwandev) {
+               wwandev = ERR_PTR(-ENOMEM);
                ida_free(&wwan_dev_ids, id);
                goto done_unlock;
        }
@@ -182,7 +185,8 @@ static struct wwan_device *wwan_create_dev(struct device *parent)
        err = device_register(&wwandev->dev);
        if (err) {
                put_device(&wwandev->dev);
-               wwandev = NULL;
+               wwandev = ERR_PTR(err);
+               goto done_unlock;
        }
 
 done_unlock:
@@ -1014,8 +1018,8 @@ int wwan_register_ops(struct device *parent, const struct wwan_ops *ops,
                return -EINVAL;
 
        wwandev = wwan_create_dev(parent);
-       if (!wwandev)
-               return -ENOMEM;
+       if (IS_ERR(wwandev))
+               return PTR_ERR(wwandev);
 
        if (WARN_ON(wwandev->ops)) {
                wwan_remove_dev(wwandev);
index 2403b71b601e94a1baf3613c1f4c4663f6cd0392..745478213ff2116b7f4a5e1ed9afcbcb37b31ad5 100644 (file)
@@ -2527,7 +2527,7 @@ static void deactivate_labels(void *region)
 
 static int init_active_labels(struct nd_region *nd_region)
 {
-       int i;
+       int i, rc = 0;
 
        for (i = 0; i < nd_region->ndr_mappings; i++) {
                struct nd_mapping *nd_mapping = &nd_region->mapping[i];
@@ -2546,13 +2546,14 @@ static int init_active_labels(struct nd_region *nd_region)
                        else if (test_bit(NDD_LABELING, &nvdimm->flags))
                                /* fail, labels needed to disambiguate dpa */;
                        else
-                               return 0;
+                               continue;
 
                        dev_err(&nd_region->dev, "%s: is %s, failing probe\n",
                                        dev_name(&nd_mapping->nvdimm->dev),
                                        test_bit(NDD_LOCKED, &nvdimm->flags)
                                        ? "locked" : "disabled");
-                       return -ENXIO;
+                       rc = -ENXIO;
+                       goto out;
                }
                nd_mapping->ndd = ndd;
                atomic_inc(&nvdimm->busy);
@@ -2586,13 +2587,17 @@ static int init_active_labels(struct nd_region *nd_region)
                        break;
        }
 
-       if (i < nd_region->ndr_mappings) {
+       if (i < nd_region->ndr_mappings)
+               rc = -ENOMEM;
+
+out:
+       if (rc) {
                deactivate_labels(nd_region);
-               return -ENOMEM;
+               return rc;
        }
 
        return devm_add_action_or_reset(&nd_region->dev, deactivate_labels,
-                       nd_region);
+                                       nd_region);
 }
 
 int nd_region_register_namespaces(struct nd_region *nd_region, int *err)
index b335c077f215bbdef3b6b0897e3e1179411d9643..04b4691a8aac7ff65e94a19f21637304dd3e14ab 100644 (file)
@@ -893,6 +893,10 @@ static int _set_required_opps(struct device *dev,
        if (!required_opp_tables)
                return 0;
 
+       /* required-opps not fully initialized yet */
+       if (lazy_linking_pending(opp_table))
+               return -EBUSY;
+
        /*
         * We only support genpd's OPPs in the "required-opps" for now, as we
         * don't know much about other use cases. Error out if the required OPP
@@ -903,10 +907,6 @@ static int _set_required_opps(struct device *dev,
                return -ENOENT;
        }
 
-       /* required-opps not fully initialized yet */
-       if (lazy_linking_pending(opp_table))
-               return -EBUSY;
-
        /* Single genpd case */
        if (!genpd_virt_devs)
                return _set_required_opp(dev, dev, opp, 0);
@@ -1856,9 +1856,6 @@ void dev_pm_opp_put_supported_hw(struct opp_table *opp_table)
        if (unlikely(!opp_table))
                return;
 
-       /* Make sure there are no concurrent readers while updating opp_table */
-       WARN_ON(!list_empty(&opp_table->opp_list));
-
        kfree(opp_table->supported_hw);
        opp_table->supported_hw = NULL;
        opp_table->supported_hw_count = 0;
@@ -1944,9 +1941,6 @@ void dev_pm_opp_put_prop_name(struct opp_table *opp_table)
        if (unlikely(!opp_table))
                return;
 
-       /* Make sure there are no concurrent readers while updating opp_table */
-       WARN_ON(!list_empty(&opp_table->opp_list));
-
        kfree(opp_table->prop_name);
        opp_table->prop_name = NULL;
 
@@ -2056,9 +2050,6 @@ void dev_pm_opp_put_regulators(struct opp_table *opp_table)
        if (!opp_table->regulators)
                goto put_opp_table;
 
-       /* Make sure there are no concurrent readers while updating opp_table */
-       WARN_ON(!list_empty(&opp_table->opp_list));
-
        if (opp_table->enabled) {
                for (i = opp_table->regulator_count - 1; i >= 0; i--)
                        regulator_disable(opp_table->regulators[i]);
@@ -2178,9 +2169,6 @@ void dev_pm_opp_put_clkname(struct opp_table *opp_table)
        if (unlikely(!opp_table))
                return;
 
-       /* Make sure there are no concurrent readers while updating opp_table */
-       WARN_ON(!list_empty(&opp_table->opp_list));
-
        clk_put(opp_table->clk);
        opp_table->clk = ERR_PTR(-EINVAL);
 
@@ -2279,9 +2267,6 @@ void dev_pm_opp_unregister_set_opp_helper(struct opp_table *opp_table)
        if (unlikely(!opp_table))
                return;
 
-       /* Make sure there are no concurrent readers while updating opp_table */
-       WARN_ON(!list_empty(&opp_table->opp_list));
-
        opp_table->set_opp = NULL;
 
        mutex_lock(&opp_table->lock);
index d298e38aaf7ef759f05f1aefe1963833bd045fc6..67f2e0710e79c434f34b60f7b62f5b5f54f8efb2 100644 (file)
@@ -964,8 +964,9 @@ static int _of_add_opp_table_v2(struct device *dev, struct opp_table *opp_table)
                }
        }
 
-       /* There should be one of more OPP defined */
-       if (WARN_ON(!count)) {
+       /* There should be one or more OPPs defined */
+       if (!count) {
+               dev_err(dev, "%s: no supported OPPs", __func__);
                ret = -ENOENT;
                goto remove_static_opp;
        }
index 896a45b242361c15198f1746137bd5cdf6eee299..654ac4a82beb984bb038b30456eb147c227f0e6f 100644 (file)
@@ -145,7 +145,7 @@ static int ixp4xx_pci_check_master_abort(struct ixp4xx_pci *p)
        return 0;
 }
 
-static int ixp4xx_pci_read(struct ixp4xx_pci *p, u32 addr, u32 cmd, u32 *data)
+static int ixp4xx_pci_read_indirect(struct ixp4xx_pci *p, u32 addr, u32 cmd, u32 *data)
 {
        ixp4xx_writel(p, IXP4XX_PCI_NP_AD, addr);
 
@@ -170,7 +170,7 @@ static int ixp4xx_pci_read(struct ixp4xx_pci *p, u32 addr, u32 cmd, u32 *data)
        return ixp4xx_pci_check_master_abort(p);
 }
 
-static int ixp4xx_pci_write(struct ixp4xx_pci *p, u32 addr, u32 cmd, u32 data)
+static int ixp4xx_pci_write_indirect(struct ixp4xx_pci *p, u32 addr, u32 cmd, u32 data)
 {
        ixp4xx_writel(p, IXP4XX_PCI_NP_AD, addr);
 
@@ -308,7 +308,7 @@ static int ixp4xx_pci_read_config(struct pci_bus *bus, unsigned int devfn,
        dev_dbg(p->dev, "read_config from %d size %d dev %d:%d:%d address: %08x cmd: %08x\n",
                where, size, bus_num, PCI_SLOT(devfn), PCI_FUNC(devfn), addr, cmd);
 
-       ret = ixp4xx_pci_read(p, addr, cmd, &val);
+       ret = ixp4xx_pci_read_indirect(p, addr, cmd, &val);
        if (ret)
                return PCIBIOS_DEVICE_NOT_FOUND;
 
@@ -356,7 +356,7 @@ static int ixp4xx_pci_write_config(struct pci_bus *bus,  unsigned int devfn,
        dev_dbg(p->dev, "write_config_byte %#x to %d size %d dev %d:%d:%d addr: %08x cmd %08x\n",
                value, where, size, bus_num, PCI_SLOT(devfn), PCI_FUNC(devfn), addr, cmd);
 
-       ret = ixp4xx_pci_write(p, addr, cmd, val);
+       ret = ixp4xx_pci_write_indirect(p, addr, cmd, val);
        if (ret)
                return PCIBIOS_DEVICE_NOT_FOUND;
 
index 9232255c8515df4b7f24012967f5b6149c6b9496..3a9f4f8ad8f94005d568602a924128d4c1bd298d 100644 (file)
@@ -143,24 +143,25 @@ static inline __attribute_const__ u32 msi_mask(unsigned x)
  * reliably as devices without an INTx disable bit will then generate a
  * level IRQ which will never be cleared.
  */
-u32 __pci_msi_desc_mask_irq(struct msi_desc *desc, u32 mask, u32 flag)
+void __pci_msi_desc_mask_irq(struct msi_desc *desc, u32 mask, u32 flag)
 {
-       u32 mask_bits = desc->masked;
+       raw_spinlock_t *lock = &desc->dev->msi_lock;
+       unsigned long flags;
 
        if (pci_msi_ignore_mask || !desc->msi_attrib.maskbit)
-               return 0;
+               return;
 
-       mask_bits &= ~mask;
-       mask_bits |= flag;
+       raw_spin_lock_irqsave(lock, flags);
+       desc->masked &= ~mask;
+       desc->masked |= flag;
        pci_write_config_dword(msi_desc_to_pci_dev(desc), desc->mask_pos,
-                              mask_bits);
-
-       return mask_bits;
+                              desc->masked);
+       raw_spin_unlock_irqrestore(lock, flags);
 }
 
 static void msi_mask_irq(struct msi_desc *desc, u32 mask, u32 flag)
 {
-       desc->masked = __pci_msi_desc_mask_irq(desc, mask, flag);
+       __pci_msi_desc_mask_irq(desc, mask, flag);
 }
 
 static void __iomem *pci_msix_desc_addr(struct msi_desc *desc)
@@ -289,13 +290,31 @@ void __pci_write_msi_msg(struct msi_desc *entry, struct msi_msg *msg)
                /* Don't touch the hardware now */
        } else if (entry->msi_attrib.is_msix) {
                void __iomem *base = pci_msix_desc_addr(entry);
+               bool unmasked = !(entry->masked & PCI_MSIX_ENTRY_CTRL_MASKBIT);
 
                if (!base)
                        goto skip;
 
+               /*
+                * The specification mandates that the entry is masked
+                * when the message is modified:
+                *
+                * "If software changes the Address or Data value of an
+                * entry while the entry is unmasked, the result is
+                * undefined."
+                */
+               if (unmasked)
+                       __pci_msix_desc_mask_irq(entry, PCI_MSIX_ENTRY_CTRL_MASKBIT);
+
                writel(msg->address_lo, base + PCI_MSIX_ENTRY_LOWER_ADDR);
                writel(msg->address_hi, base + PCI_MSIX_ENTRY_UPPER_ADDR);
                writel(msg->data, base + PCI_MSIX_ENTRY_DATA);
+
+               if (unmasked)
+                       __pci_msix_desc_mask_irq(entry, 0);
+
+               /* Ensure that the writes are visible in the device */
+               readl(base + PCI_MSIX_ENTRY_DATA);
        } else {
                int pos = dev->msi_cap;
                u16 msgctl;
@@ -316,6 +335,8 @@ void __pci_write_msi_msg(struct msi_desc *entry, struct msi_msg *msg)
                        pci_write_config_word(dev, pos + PCI_MSI_DATA_32,
                                              msg->data);
                }
+               /* Ensure that the writes are visible in the device */
+               pci_read_config_word(dev, pos + PCI_MSI_FLAGS, &msgctl);
        }
 
 skip:
@@ -636,21 +657,21 @@ static int msi_capability_init(struct pci_dev *dev, int nvec,
        /* Configure MSI capability structure */
        ret = pci_msi_setup_msi_irqs(dev, nvec, PCI_CAP_ID_MSI);
        if (ret) {
-               msi_mask_irq(entry, mask, ~mask);
+               msi_mask_irq(entry, mask, 0);
                free_msi_irqs(dev);
                return ret;
        }
 
        ret = msi_verify_entries(dev);
        if (ret) {
-               msi_mask_irq(entry, mask, ~mask);
+               msi_mask_irq(entry, mask, 0);
                free_msi_irqs(dev);
                return ret;
        }
 
        ret = populate_msi_sysfs(dev);
        if (ret) {
-               msi_mask_irq(entry, mask, ~mask);
+               msi_mask_irq(entry, mask, 0);
                free_msi_irqs(dev);
                return ret;
        }
@@ -691,6 +712,7 @@ static int msix_setup_entries(struct pci_dev *dev, void __iomem *base,
 {
        struct irq_affinity_desc *curmsk, *masks = NULL;
        struct msi_desc *entry;
+       void __iomem *addr;
        int ret, i;
        int vec_count = pci_msix_vec_count(dev);
 
@@ -711,6 +733,7 @@ static int msix_setup_entries(struct pci_dev *dev, void __iomem *base,
 
                entry->msi_attrib.is_msix       = 1;
                entry->msi_attrib.is_64         = 1;
+
                if (entries)
                        entry->msi_attrib.entry_nr = entries[i].entry;
                else
@@ -722,6 +745,10 @@ static int msix_setup_entries(struct pci_dev *dev, void __iomem *base,
                entry->msi_attrib.default_irq   = dev->irq;
                entry->mask_base                = base;
 
+               addr = pci_msix_desc_addr(entry);
+               if (addr)
+                       entry->masked = readl(addr + PCI_MSIX_ENTRY_VECTOR_CTRL);
+
                list_add_tail(&entry->list, dev_to_msi_list(&dev->dev));
                if (masks)
                        curmsk++;
@@ -732,26 +759,28 @@ out:
        return ret;
 }
 
-static void msix_program_entries(struct pci_dev *dev,
-                                struct msix_entry *entries)
+static void msix_update_entries(struct pci_dev *dev, struct msix_entry *entries)
 {
        struct msi_desc *entry;
-       int i = 0;
-       void __iomem *desc_addr;
 
        for_each_pci_msi_entry(entry, dev) {
-               if (entries)
-                       entries[i++].vector = entry->irq;
+               if (entries) {
+                       entries->vector = entry->irq;
+                       entries++;
+               }
+       }
+}
 
-               desc_addr = pci_msix_desc_addr(entry);
-               if (desc_addr)
-                       entry->masked = readl(desc_addr +
-                                             PCI_MSIX_ENTRY_VECTOR_CTRL);
-               else
-                       entry->masked = 0;
+static void msix_mask_all(void __iomem *base, int tsize)
+{
+       u32 ctrl = PCI_MSIX_ENTRY_CTRL_MASKBIT;
+       int i;
 
-               msix_mask_irq(entry, 1);
-       }
+       if (pci_msi_ignore_mask)
+               return;
+
+       for (i = 0; i < tsize; i++, base += PCI_MSIX_ENTRY_SIZE)
+               writel(ctrl, base + PCI_MSIX_ENTRY_VECTOR_CTRL);
 }
 
 /**
@@ -768,22 +797,33 @@ static void msix_program_entries(struct pci_dev *dev,
 static int msix_capability_init(struct pci_dev *dev, struct msix_entry *entries,
                                int nvec, struct irq_affinity *affd)
 {
-       int ret;
-       u16 control;
        void __iomem *base;
+       int ret, tsize;
+       u16 control;
 
-       /* Ensure MSI-X is disabled while it is set up */
-       pci_msix_clear_and_set_ctrl(dev, PCI_MSIX_FLAGS_ENABLE, 0);
+       /*
+        * Some devices require MSI-X to be enabled before the MSI-X
+        * registers can be accessed.  Mask all the vectors to prevent
+        * interrupts coming in before they're fully set up.
+        */
+       pci_msix_clear_and_set_ctrl(dev, 0, PCI_MSIX_FLAGS_MASKALL |
+                                   PCI_MSIX_FLAGS_ENABLE);
 
        pci_read_config_word(dev, dev->msix_cap + PCI_MSIX_FLAGS, &control);
        /* Request & Map MSI-X table region */
-       base = msix_map_region(dev, msix_table_size(control));
-       if (!base)
-               return -ENOMEM;
+       tsize = msix_table_size(control);
+       base = msix_map_region(dev, tsize);
+       if (!base) {
+               ret = -ENOMEM;
+               goto out_disable;
+       }
+
+       /* Ensure that all table entries are masked. */
+       msix_mask_all(base, tsize);
 
        ret = msix_setup_entries(dev, base, entries, nvec, affd);
        if (ret)
-               return ret;
+               goto out_disable;
 
        ret = pci_msi_setup_msi_irqs(dev, nvec, PCI_CAP_ID_MSIX);
        if (ret)
@@ -794,15 +834,7 @@ static int msix_capability_init(struct pci_dev *dev, struct msix_entry *entries,
        if (ret)
                goto out_free;
 
-       /*
-        * Some devices require MSI-X to be enabled before we can touch the
-        * MSI-X registers.  We need to mask all the vectors to prevent
-        * interrupts coming in before they're fully set up.
-        */
-       pci_msix_clear_and_set_ctrl(dev, 0,
-                               PCI_MSIX_FLAGS_MASKALL | PCI_MSIX_FLAGS_ENABLE);
-
-       msix_program_entries(dev, entries);
+       msix_update_entries(dev, entries);
 
        ret = populate_msi_sysfs(dev);
        if (ret)
@@ -836,6 +868,9 @@ out_avail:
 out_free:
        free_msi_irqs(dev);
 
+out_disable:
+       pci_msix_clear_and_set_ctrl(dev, PCI_MSIX_FLAGS_ENABLE, 0);
+
        return ret;
 }
 
@@ -930,8 +965,7 @@ static void pci_msi_shutdown(struct pci_dev *dev)
 
        /* Return the device with MSI unmasked as initial states */
        mask = msi_mask(desc->msi_attrib.multi_cap);
-       /* Keep cached state to be restored */
-       __pci_msi_desc_mask_irq(desc, mask, ~mask);
+       msi_mask_irq(desc, mask, 0);
 
        /* Restore dev->irq to its default pin-assertion IRQ */
        dev->irq = desc->msi_attrib.default_irq;
@@ -1016,10 +1050,8 @@ static void pci_msix_shutdown(struct pci_dev *dev)
        }
 
        /* Return the device with MSI-X masked as initial states */
-       for_each_pci_msi_entry(entry, dev) {
-               /* Keep cached states to be restored */
+       for_each_pci_msi_entry(entry, dev)
                __pci_msix_desc_mask_irq(entry, 1);
-       }
 
        pci_msix_clear_and_set_ctrl(dev, PCI_MSIX_FLAGS_ENABLE, 0);
        pci_intx_for_msi(dev, 1);
index 5d63df7c18206ae416f800fa5f35ea37cb0aab9b..7bbf2673c7f2f956a8901b50e8d8435953b6eaa1 100644 (file)
@@ -978,7 +978,7 @@ void pci_create_legacy_files(struct pci_bus *b)
        b->legacy_mem->size = 1024*1024;
        b->legacy_mem->attr.mode = 0600;
        b->legacy_mem->mmap = pci_mmap_legacy_mem;
-       b->legacy_io->mapping = iomem_get_mapping();
+       b->legacy_mem->mapping = iomem_get_mapping();
        pci_adjust_legacy_attr(b, pci_mmap_mem);
        error = device_create_bin_file(&b->dev, b->legacy_mem);
        if (error)
index 6d74386eadc2c5ae93619be6e9cac5c567ff0a43..ab3de1551b5034fb40b91751656ad6a8cc2c2e84 100644 (file)
@@ -1900,6 +1900,7 @@ static void quirk_ryzen_xhci_d3hot(struct pci_dev *dev)
 }
 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, 0x15e0, quirk_ryzen_xhci_d3hot);
 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, 0x15e1, quirk_ryzen_xhci_d3hot);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, 0x1639, quirk_ryzen_xhci_d3hot);
 
 #ifdef CONFIG_X86_IO_APIC
 static int dmi_disable_ioapicreroute(const struct dmi_system_id *d)
index 3e4ef2b87526890aa44a14b9b293a91ada204afc..0bcd19597e4adad2f4f439d03aeb37d377e4bfb8 100644 (file)
@@ -701,32 +701,32 @@ static const struct pinctrl_pin_desc tglh_pins[] = {
 
 static const struct intel_padgroup tglh_community0_gpps[] = {
        TGL_GPP(0, 0, 24, 0),                           /* GPP_A */
-       TGL_GPP(1, 25, 44, 128),                        /* GPP_R */
-       TGL_GPP(2, 45, 70, 32),                         /* GPP_B */
-       TGL_GPP(3, 71, 78, INTEL_GPIO_BASE_NOMAP),      /* vGPIO_0 */
+       TGL_GPP(1, 25, 44, 32),                         /* GPP_R */
+       TGL_GPP(2, 45, 70, 64),                         /* GPP_B */
+       TGL_GPP(3, 71, 78, 96),                         /* vGPIO_0 */
 };
 
 static const struct intel_padgroup tglh_community1_gpps[] = {
-       TGL_GPP(0, 79, 104, 96),                        /* GPP_D */
-       TGL_GPP(1, 105, 128, 64),                       /* GPP_C */
-       TGL_GPP(2, 129, 136, 160),                      /* GPP_S */
-       TGL_GPP(3, 137, 153, 192),                      /* GPP_G */
-       TGL_GPP(4, 154, 180, 224),                      /* vGPIO */
+       TGL_GPP(0, 79, 104, 128),                       /* GPP_D */
+       TGL_GPP(1, 105, 128, 160),                      /* GPP_C */
+       TGL_GPP(2, 129, 136, 192),                      /* GPP_S */
+       TGL_GPP(3, 137, 153, 224),                      /* GPP_G */
+       TGL_GPP(4, 154, 180, 256),                      /* vGPIO */
 };
 
 static const struct intel_padgroup tglh_community3_gpps[] = {
-       TGL_GPP(0, 181, 193, 256),                      /* GPP_E */
-       TGL_GPP(1, 194, 217, 288),                      /* GPP_F */
+       TGL_GPP(0, 181, 193, 288),                      /* GPP_E */
+       TGL_GPP(1, 194, 217, 320),                      /* GPP_F */
 };
 
 static const struct intel_padgroup tglh_community4_gpps[] = {
-       TGL_GPP(0, 218, 241, 320),                      /* GPP_H */
+       TGL_GPP(0, 218, 241, 352),                      /* GPP_H */
        TGL_GPP(1, 242, 251, 384),                      /* GPP_J */
-       TGL_GPP(2, 252, 266, 352),                      /* GPP_K */
+       TGL_GPP(2, 252, 266, 416),                      /* GPP_K */
 };
 
 static const struct intel_padgroup tglh_community5_gpps[] = {
-       TGL_GPP(0, 267, 281, 416),                      /* GPP_I */
+       TGL_GPP(0, 267, 281, 448),                      /* GPP_I */
        TGL_GPP(1, 282, 290, INTEL_GPIO_BASE_NOMAP),    /* JTAG */
 };
 
index 5b3b048725cc826222280d40ff1e478609921d98..45ebdeba985aeaf77b3d8649b943c125434c0b2d 100644 (file)
@@ -925,12 +925,10 @@ int mtk_pinconf_adv_pull_set(struct mtk_pinctrl *hw,
                        err = hw->soc->bias_set(hw, desc, pullup);
                        if (err)
                                return err;
-               } else if (hw->soc->bias_set_combo) {
-                       err = hw->soc->bias_set_combo(hw, desc, pullup, arg);
-                       if (err)
-                               return err;
                } else {
-                       return -ENOTSUPP;
+                       err = mtk_pinconf_bias_set_rev1(hw, desc, pullup);
+                       if (err)
+                               err = mtk_pinconf_bias_set(hw, desc, pullup);
                }
        }
 
index a76be6cc26ee13fcea0ddebea5e2629d5b7aad74..5b764740b82989586cc4583f50430247fead3fd7 100644 (file)
@@ -444,8 +444,7 @@ static int amd_gpio_irq_set_wake(struct irq_data *d, unsigned int on)
        unsigned long flags;
        struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
        struct amd_gpio *gpio_dev = gpiochip_get_data(gc);
-       u32 wake_mask = BIT(WAKE_CNTRL_OFF_S0I3) | BIT(WAKE_CNTRL_OFF_S3) |
-                       BIT(WAKE_CNTRL_OFF_S4);
+       u32 wake_mask = BIT(WAKE_CNTRL_OFF_S0I3) | BIT(WAKE_CNTRL_OFF_S3);
 
        raw_spin_lock_irqsave(&gpio_dev->lock, flags);
        pin_reg = readl(gpio_dev->base + (d->hwirq)*4);
index f831526d06ff68fe47c7cc5f16e0c6d0421975e3..49e32684dbb25848c8a9048aeb481a2c7f86246a 100644 (file)
@@ -950,23 +950,37 @@ static int k210_fpioa_probe(struct platform_device *pdev)
                return ret;
 
        pdata->pclk = devm_clk_get_optional(dev, "pclk");
-       if (!IS_ERR(pdata->pclk))
-               clk_prepare_enable(pdata->pclk);
+       if (!IS_ERR(pdata->pclk)) {
+               ret = clk_prepare_enable(pdata->pclk);
+               if (ret)
+                       goto disable_clk;
+       }
 
        pdata->sysctl_map =
                syscon_regmap_lookup_by_phandle_args(np,
                                                "canaan,k210-sysctl-power",
                                                1, &pdata->power_offset);
-       if (IS_ERR(pdata->sysctl_map))
-               return PTR_ERR(pdata->sysctl_map);
+       if (IS_ERR(pdata->sysctl_map)) {
+               ret = PTR_ERR(pdata->sysctl_map);
+               goto disable_pclk;
+       }
 
        k210_fpioa_init_ties(pdata);
 
        pdata->pctl = pinctrl_register(&k210_pinctrl_desc, dev, (void *)pdata);
-       if (IS_ERR(pdata->pctl))
-               return PTR_ERR(pdata->pctl);
+       if (IS_ERR(pdata->pctl)) {
+               ret = PTR_ERR(pdata->pctl);
+               goto disable_pclk;
+       }
 
        return 0;
+
+disable_pclk:
+       clk_disable_unprepare(pdata->pclk);
+disable_clk:
+       clk_disable_unprepare(pdata->clk);
+
+       return ret;
 }
 
 static const struct of_device_id k210_fpioa_dt_ids[] = {
index 2f51b4f99393543932843b59ee7ab0da0bf42a3e..cad4e60df618c86a057ee9dfb6ab1962dd86f03e 100644 (file)
@@ -13,7 +13,7 @@ config PINCTRL_MSM
 
 config PINCTRL_APQ8064
        tristate "Qualcomm APQ8064 pin controller driver"
-       depends on GPIOLIB && OF
+       depends on OF
        depends on PINCTRL_MSM
        help
          This is the pinctrl, pinmux, pinconf and gpiolib driver for the
@@ -21,7 +21,7 @@ config PINCTRL_APQ8064
 
 config PINCTRL_APQ8084
        tristate "Qualcomm APQ8084 pin controller driver"
-       depends on GPIOLIB && OF
+       depends on OF
        depends on PINCTRL_MSM
        help
          This is the pinctrl, pinmux, pinconf and gpiolib driver for the
@@ -29,7 +29,7 @@ config PINCTRL_APQ8084
 
 config PINCTRL_IPQ4019
        tristate "Qualcomm IPQ4019 pin controller driver"
-       depends on GPIOLIB && OF
+       depends on OF
        depends on PINCTRL_MSM
        help
          This is the pinctrl, pinmux, pinconf and gpiolib driver for the
@@ -37,7 +37,7 @@ config PINCTRL_IPQ4019
 
 config PINCTRL_IPQ8064
        tristate "Qualcomm IPQ8064 pin controller driver"
-       depends on GPIOLIB && OF
+       depends on OF
        depends on PINCTRL_MSM
        help
          This is the pinctrl, pinmux, pinconf and gpiolib driver for the
@@ -45,7 +45,7 @@ config PINCTRL_IPQ8064
 
 config PINCTRL_IPQ8074
        tristate "Qualcomm Technologies, Inc. IPQ8074 pin controller driver"
-       depends on GPIOLIB && OF
+       depends on OF
        depends on PINCTRL_MSM
        help
          This is the pinctrl, pinmux, pinconf and gpiolib driver for
@@ -55,7 +55,7 @@ config PINCTRL_IPQ8074
 
 config PINCTRL_IPQ6018
        tristate "Qualcomm Technologies, Inc. IPQ6018 pin controller driver"
-       depends on GPIOLIB && OF
+       depends on OF
        depends on PINCTRL_MSM
        help
          This is the pinctrl, pinmux, pinconf and gpiolib driver for
@@ -65,7 +65,7 @@ config PINCTRL_IPQ6018
 
 config PINCTRL_MSM8226
        tristate "Qualcomm 8226 pin controller driver"
-       depends on GPIOLIB && OF
+       depends on OF
        depends on PINCTRL_MSM
        help
          This is the pinctrl, pinmux, pinconf and gpiolib driver for the
@@ -74,7 +74,7 @@ config PINCTRL_MSM8226
 
 config PINCTRL_MSM8660
        tristate "Qualcomm 8660 pin controller driver"
-       depends on GPIOLIB && OF
+       depends on OF
        depends on PINCTRL_MSM
        help
          This is the pinctrl, pinmux, pinconf and gpiolib driver for the
@@ -82,7 +82,7 @@ config PINCTRL_MSM8660
 
 config PINCTRL_MSM8960
        tristate "Qualcomm 8960 pin controller driver"
-       depends on GPIOLIB && OF
+       depends on OF
        depends on PINCTRL_MSM
        help
          This is the pinctrl, pinmux, pinconf and gpiolib driver for the
@@ -90,7 +90,7 @@ config PINCTRL_MSM8960
 
 config PINCTRL_MDM9615
        tristate "Qualcomm 9615 pin controller driver"
-       depends on GPIOLIB && OF
+       depends on OF
        depends on PINCTRL_MSM
        help
          This is the pinctrl, pinmux, pinconf and gpiolib driver for the
@@ -98,7 +98,7 @@ config PINCTRL_MDM9615
 
 config PINCTRL_MSM8X74
        tristate "Qualcomm 8x74 pin controller driver"
-       depends on GPIOLIB && OF
+       depends on OF
        depends on PINCTRL_MSM
        help
          This is the pinctrl, pinmux, pinconf and gpiolib driver for the
@@ -106,7 +106,7 @@ config PINCTRL_MSM8X74
 
 config PINCTRL_MSM8916
        tristate "Qualcomm 8916 pin controller driver"
-       depends on GPIOLIB && OF
+       depends on OF
        depends on PINCTRL_MSM
        help
          This is the pinctrl, pinmux, pinconf and gpiolib driver for the
@@ -114,7 +114,7 @@ config PINCTRL_MSM8916
 
 config PINCTRL_MSM8953
        tristate "Qualcomm 8953 pin controller driver"
-       depends on GPIOLIB && OF
+       depends on OF
        depends on PINCTRL_MSM
        help
          This is the pinctrl, pinmux, pinconf and gpiolib driver for the
@@ -124,7 +124,7 @@ config PINCTRL_MSM8953
 
 config PINCTRL_MSM8976
        tristate "Qualcomm 8976 pin controller driver"
-       depends on GPIOLIB && OF
+       depends on OF
        depends on PINCTRL_MSM
        help
          This is the pinctrl, pinmux, pinconf and gpiolib driver for the
@@ -134,7 +134,7 @@ config PINCTRL_MSM8976
 
 config PINCTRL_MSM8994
        tristate "Qualcomm 8994 pin controller driver"
-       depends on GPIOLIB && OF
+       depends on OF
        depends on PINCTRL_MSM
        help
          This is the pinctrl, pinmux, pinconf and gpiolib driver for the
@@ -143,7 +143,7 @@ config PINCTRL_MSM8994
 
 config PINCTRL_MSM8996
        tristate "Qualcomm MSM8996 pin controller driver"
-       depends on GPIOLIB && OF
+       depends on OF
        depends on PINCTRL_MSM
        help
          This is the pinctrl, pinmux, pinconf and gpiolib driver for the
@@ -151,7 +151,7 @@ config PINCTRL_MSM8996
 
 config PINCTRL_MSM8998
        tristate "Qualcomm MSM8998 pin controller driver"
-       depends on GPIOLIB && OF
+       depends on OF
        depends on PINCTRL_MSM
        help
          This is the pinctrl, pinmux, pinconf and gpiolib driver for the
@@ -159,7 +159,7 @@ config PINCTRL_MSM8998
 
 config PINCTRL_QCS404
        tristate "Qualcomm QCS404 pin controller driver"
-       depends on GPIOLIB && OF
+       depends on OF
        depends on PINCTRL_MSM
        help
          This is the pinctrl, pinmux, pinconf and gpiolib driver for the
@@ -167,7 +167,7 @@ config PINCTRL_QCS404
 
 config PINCTRL_QDF2XXX
        tristate "Qualcomm Technologies QDF2xxx pin controller driver"
-       depends on GPIOLIB && ACPI
+       depends on ACPI
        depends on PINCTRL_MSM
        help
          This is the GPIO driver for the TLMM block found on the
@@ -175,7 +175,7 @@ config PINCTRL_QDF2XXX
 
 config PINCTRL_QCOM_SPMI_PMIC
        tristate "Qualcomm SPMI PMIC pin controller driver"
-       depends on GPIOLIB && OF && SPMI
+       depends on OF && SPMI
        select REGMAP_SPMI
        select PINMUX
        select PINCONF
@@ -190,7 +190,7 @@ config PINCTRL_QCOM_SPMI_PMIC
 
 config PINCTRL_QCOM_SSBI_PMIC
        tristate "Qualcomm SSBI PMIC pin controller driver"
-       depends on GPIOLIB && OF
+       depends on OF
        select PINMUX
        select PINCONF
        select GENERIC_PINCONF
@@ -204,7 +204,7 @@ config PINCTRL_QCOM_SSBI_PMIC
 
 config PINCTRL_SC7180
        tristate "Qualcomm Technologies Inc SC7180 pin controller driver"
-       depends on GPIOLIB && OF
+       depends on OF
        depends on PINCTRL_MSM
        help
          This is the pinctrl, pinmux, pinconf and gpiolib driver for the
@@ -213,7 +213,7 @@ config PINCTRL_SC7180
 
 config PINCTRL_SC7280
        tristate "Qualcomm Technologies Inc SC7280 pin controller driver"
-       depends on GPIOLIB && OF
+       depends on OF
        depends on PINCTRL_MSM
        help
          This is the pinctrl, pinmux, pinconf and gpiolib driver for the
@@ -222,7 +222,7 @@ config PINCTRL_SC7280
 
 config PINCTRL_SC8180X
        tristate "Qualcomm Technologies Inc SC8180x pin controller driver"
-       depends on GPIOLIB && (OF || ACPI)
+       depends on (OF || ACPI)
        depends on PINCTRL_MSM
        help
          This is the pinctrl, pinmux, pinconf and gpiolib driver for the
@@ -231,7 +231,7 @@ config PINCTRL_SC8180X
 
 config PINCTRL_SDM660
        tristate "Qualcomm Technologies Inc SDM660 pin controller driver"
-       depends on GPIOLIB && OF
+       depends on OF
        depends on PINCTRL_MSM
        help
         This is the pinctrl, pinmux, pinconf and gpiolib driver for the
@@ -240,7 +240,7 @@ config PINCTRL_SDM660
 
 config PINCTRL_SDM845
        tristate "Qualcomm Technologies Inc SDM845 pin controller driver"
-       depends on GPIOLIB && (OF || ACPI)
+       depends on (OF || ACPI)
        depends on PINCTRL_MSM
        help
         This is the pinctrl, pinmux, pinconf and gpiolib driver for the
@@ -249,7 +249,7 @@ config PINCTRL_SDM845
 
 config PINCTRL_SDX55
        tristate "Qualcomm Technologies Inc SDX55 pin controller driver"
-       depends on GPIOLIB && OF
+       depends on OF
        depends on PINCTRL_MSM
        help
         This is the pinctrl, pinmux, pinconf and gpiolib driver for the
@@ -258,7 +258,7 @@ config PINCTRL_SDX55
 
 config PINCTRL_SM6125
        tristate "Qualcomm Technologies Inc SM6125 pin controller driver"
-       depends on GPIOLIB && OF
+       depends on OF
        depends on PINCTRL_MSM
        help
         This is the pinctrl, pinmux, pinconf and gpiolib driver for the
@@ -267,7 +267,7 @@ config PINCTRL_SM6125
 
 config PINCTRL_SM8150
        tristate "Qualcomm Technologies Inc SM8150 pin controller driver"
-       depends on GPIOLIB && OF
+       depends on OF
        depends on PINCTRL_MSM
        help
         This is the pinctrl, pinmux, pinconf and gpiolib driver for the
@@ -276,7 +276,7 @@ config PINCTRL_SM8150
 
 config PINCTRL_SM8250
        tristate "Qualcomm Technologies Inc SM8250 pin controller driver"
-       depends on GPIOLIB && OF
+       depends on OF
        depends on PINCTRL_MSM
        help
          This is the pinctrl, pinmux, pinconf and gpiolib driver for the
@@ -285,8 +285,7 @@ config PINCTRL_SM8250
 
 config PINCTRL_SM8350
        tristate "Qualcomm Technologies Inc SM8350 pin controller driver"
-       depends on GPIOLIB && OF
-       select PINCTRL_MSM
+       depends on PINCTRL_MSM
        help
          This is the pinctrl, pinmux, pinconf and gpiolib driver for the
          Qualcomm Technologies Inc TLMM block found on the Qualcomm
index dc8d39ae045b22b5b7b50c5ec379b7f418d231e3..9c7679c06dcad96203306bb906c93eeee9da37f2 100644 (file)
@@ -1219,10 +1219,12 @@ static int sunxi_pinctrl_build_state(struct platform_device *pdev)
        }
 
        /*
-        * We suppose that we won't have any more functions than pins,
-        * we'll reallocate that later anyway
+        * Find an upper bound for the maximum number of functions: in
+        * the worst case we have gpio_in, gpio_out, irq and up to four
+        * special functions per pin, plus one entry for the sentinel.
+        * We'll reallocate that later anyway.
         */
-       pctl->functions = kcalloc(pctl->ngroups,
+       pctl->functions = kcalloc(4 * pctl->ngroups + 4,
                                  sizeof(*pctl->functions),
                                  GFP_KERNEL);
        if (!pctl->functions)
index 7d385c3b22393c8182c136278cb6f9f243542bb6..d12db6c316ea2b9a8528a7012949d4b0a8cc906f 100644 (file)
@@ -508,6 +508,7 @@ config THINKPAD_ACPI
        depends on RFKILL || RFKILL = n
        depends on ACPI_VIDEO || ACPI_VIDEO = n
        depends on BACKLIGHT_CLASS_DEVICE
+       depends on I2C
        select ACPI_PLATFORM_PROFILE
        select HWMON
        select NVRAM
@@ -691,6 +692,7 @@ config INTEL_HID_EVENT
        tristate "INTEL HID Event"
        depends on ACPI
        depends on INPUT
+       depends on I2C
        select INPUT_SPARSEKMAP
        help
          This driver provides support for the Intel HID Event hotkey interface.
@@ -742,6 +744,7 @@ config INTEL_VBTN
        tristate "INTEL VIRTUAL BUTTON"
        depends on ACPI
        depends on INPUT
+       depends on I2C
        select INPUT_SPARSEKMAP
        help
          This driver provides support for the Intel Virtual Button interface.
index 0cb927f0f301a38301e82095275fcf0b87d55bd5..a81dc4b191b779dee0d7af5d9a1bcf622e6ea7e1 100644 (file)
@@ -41,6 +41,10 @@ static int wapf = -1;
 module_param(wapf, uint, 0444);
 MODULE_PARM_DESC(wapf, "WAPF value");
 
+static int tablet_mode_sw = -1;
+module_param(tablet_mode_sw, uint, 0444);
+MODULE_PARM_DESC(tablet_mode_sw, "Tablet mode detect: -1:auto 0:disable 1:kbd-dock 2:lid-flip");
+
 static struct quirk_entry *quirks;
 
 static bool asus_q500a_i8042_filter(unsigned char data, unsigned char str,
@@ -458,6 +462,15 @@ static const struct dmi_system_id asus_quirks[] = {
                },
                .driver_data = &quirk_asus_use_lid_flip_devid,
        },
+       {
+               .callback = dmi_matched,
+               .ident = "ASUS TP200s / E205SA",
+               .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+                       DMI_MATCH(DMI_PRODUCT_NAME, "E205SA"),
+               },
+               .driver_data = &quirk_asus_use_lid_flip_devid,
+       },
        {},
 };
 
@@ -477,6 +490,21 @@ static void asus_nb_wmi_quirks(struct asus_wmi_driver *driver)
        else
                wapf = quirks->wapf;
 
+       switch (tablet_mode_sw) {
+       case 0:
+               quirks->use_kbd_dock_devid = false;
+               quirks->use_lid_flip_devid = false;
+               break;
+       case 1:
+               quirks->use_kbd_dock_devid = true;
+               quirks->use_lid_flip_devid = false;
+               break;
+       case 2:
+               quirks->use_kbd_dock_devid = false;
+               quirks->use_lid_flip_devid = true;
+               break;
+       }
+
        if (quirks->i8042_filter) {
                ret = i8042_install_filter(quirks->i8042_filter);
                if (ret) {
diff --git a/drivers/platform/x86/dual_accel_detect.h b/drivers/platform/x86/dual_accel_detect.h
new file mode 100644 (file)
index 0000000..a9eae17
--- /dev/null
@@ -0,0 +1,76 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Helper code to detect 360 degree hinges (yoga) style 2-in-1 devices using 2 accelerometers
+ * to allow the OS to determine the angle between the display and the base of the device.
+ *
+ * On Windows these are read by a special HingeAngleService process which calls undocumented
+ * ACPI methods, to let the firmware know if the 2-in-1 is in tablet- or laptop-mode.
+ * The firmware may use this to disable the kbd and touchpad to avoid spurious input in
+ * tablet-mode as well as to report SW_TABLET_MODE info to the OS.
+ *
+ * Since Linux does not call these undocumented methods, the SW_TABLET_MODE info reported
+ * by various drivers/platform/x86 drivers is incorrect. These drivers use the detection
+ * code in this file to disable SW_TABLET_MODE reporting to avoid reporting broken info
+ * (instead userspace can derive the status itself by directly reading the 2 accels).
+ */
+
+#include <linux/acpi.h>
+#include <linux/i2c.h>
+
+static int dual_accel_i2c_resource_count(struct acpi_resource *ares, void *data)
+{
+       struct acpi_resource_i2c_serialbus *sb;
+       int *count = data;
+
+       if (i2c_acpi_get_i2c_resource(ares, &sb))
+               *count = *count + 1;
+
+       return 1;
+}
+
+static int dual_accel_i2c_client_count(struct acpi_device *adev)
+{
+       int ret, count = 0;
+       LIST_HEAD(r);
+
+       ret = acpi_dev_get_resources(adev, &r, dual_accel_i2c_resource_count, &count);
+       if (ret < 0)
+               return ret;
+
+       acpi_dev_free_resource_list(&r);
+       return count;
+}
+
+static bool dual_accel_detect_bosc0200(void)
+{
+       struct acpi_device *adev;
+       int count;
+
+       adev = acpi_dev_get_first_match_dev("BOSC0200", NULL, -1);
+       if (!adev)
+               return false;
+
+       count = dual_accel_i2c_client_count(adev);
+
+       acpi_dev_put(adev);
+
+       return count == 2;
+}
+
+static bool dual_accel_detect(void)
+{
+       /* Systems which use a pair of accels with KIOX010A / KIOX020A ACPI ids */
+       if (acpi_dev_present("KIOX010A", NULL, -1) &&
+           acpi_dev_present("KIOX020A", NULL, -1))
+               return true;
+
+       /* Systems which use a single DUAL250E ACPI device to model 2 accels */
+       if (acpi_dev_present("DUAL250E", NULL, -1))
+               return true;
+
+       /* Systems which use a single BOSC0200 ACPI device to model 2 accels */
+       if (dual_accel_detect_bosc0200())
+               return true;
+
+       return false;
+}
index fbb224a82e34ca0065a0c0717eefce177be53980..7f3a03f937f66564f6af5dec899eaf728f5eee18 100644 (file)
@@ -140,6 +140,7 @@ static u8 gigabyte_wmi_detect_sensor_usability(struct wmi_device *wdev)
        }}
 
 static const struct dmi_system_id gigabyte_wmi_known_working_platforms[] = {
+       DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("B450M S2H V2"),
        DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("B550 AORUS ELITE"),
        DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("B550 AORUS ELITE V2"),
        DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("B550 GAMING X V2"),
@@ -147,6 +148,7 @@ static const struct dmi_system_id gigabyte_wmi_known_working_platforms[] = {
        DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("B550M DS3H"),
        DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("Z390 I AORUS PRO WIFI-CF"),
        DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("X570 AORUS ELITE"),
+       DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("X570 GAMING X"),
        DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("X570 I AORUS PRO WIFI"),
        DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("X570 UD"),
        { }
index e5fbe017f8e10f1032c0e043f957499c69e4661c..2e4e97a626a51273e9a2163a3a2eb23256f94ca5 100644 (file)
@@ -14,6 +14,7 @@
 #include <linux/module.h>
 #include <linux/platform_device.h>
 #include <linux/suspend.h>
+#include "dual_accel_detect.h"
 
 /* When NOT in tablet mode, VGBS returns with the flag 0x40 */
 #define TABLET_MODE_FLAG BIT(6)
@@ -122,6 +123,7 @@ struct intel_hid_priv {
        struct input_dev *array;
        struct input_dev *switches;
        bool wakeup_mode;
+       bool dual_accel;
 };
 
 #define HID_EVENT_FILTER_UUID  "eeec56b3-4442-408f-a792-4edd4d758054"
@@ -451,22 +453,9 @@ static void notify_handler(acpi_handle handle, u32 event, void *context)
         * SW_TABLET_MODE report, in these cases we enable support when receiving
         * the first event instead of during driver setup.
         *
-        * Some 360 degree hinges (yoga) style 2-in-1 devices use 2 accelerometers
-        * to allow the OS to determine the angle between the display and the base
-        * of the device. On Windows these are read by a special HingeAngleService
-        * process which calls an ACPI DSM (Device Specific Method) on the
-        * ACPI KIOX010A device node for the sensor in the display, to let the
-        * firmware know if the 2-in-1 is in tablet- or laptop-mode so that it can
-        * disable the kbd and touchpad to avoid spurious input in tablet-mode.
-        *
-        * The linux kxcjk1013 driver calls the DSM for this once at probe time
-        * to ensure that the builtin kbd and touchpad work. On some devices this
-        * causes a "spurious" 0xcd event on the intel-hid ACPI dev. In this case
-        * there is not a functional tablet-mode switch, so we should not register
-        * the tablet-mode switch device.
+        * See dual_accel_detect.h for more info on the dual_accel check.
         */
-       if (!priv->switches && (event == 0xcc || event == 0xcd) &&
-           !acpi_dev_present("KIOX010A", NULL, -1)) {
+       if (!priv->switches && !priv->dual_accel && (event == 0xcc || event == 0xcd)) {
                dev_info(&device->dev, "switch event received, enable switches supports\n");
                err = intel_hid_switches_setup(device);
                if (err)
@@ -607,6 +596,8 @@ static int intel_hid_probe(struct platform_device *device)
                return -ENOMEM;
        dev_set_drvdata(&device->dev, priv);
 
+       priv->dual_accel = dual_accel_detect();
+
        err = intel_hid_input_setup(device);
        if (err) {
                pr_err("Failed to setup Intel HID hotkeys\n");
index 888a764efad1ac1b42527c4e457c1262e836c6a7..30916643106336dec3d716ece7a5208e031f299c 100644 (file)
@@ -14,6 +14,7 @@
 #include <linux/module.h>
 #include <linux/platform_device.h>
 #include <linux/suspend.h>
+#include "dual_accel_detect.h"
 
 /* Returned when NOT in tablet mode on some HP Stream x360 11 models */
 #define VGBS_TABLET_MODE_FLAG_ALT      0x10
@@ -66,6 +67,7 @@ static const struct key_entry intel_vbtn_switchmap[] = {
 struct intel_vbtn_priv {
        struct input_dev *buttons_dev;
        struct input_dev *switches_dev;
+       bool dual_accel;
        bool has_buttons;
        bool has_switches;
        bool wakeup_mode;
@@ -160,6 +162,10 @@ static void notify_handler(acpi_handle handle, u32 event, void *context)
                input_dev = priv->buttons_dev;
        } else if ((ke = sparse_keymap_entry_from_scancode(priv->switches_dev, event))) {
                if (!priv->has_switches) {
+                       /* See dual_accel_detect.h for more info */
+                       if (priv->dual_accel)
+                               return;
+
                        dev_info(&device->dev, "Registering Intel Virtual Switches input-dev after receiving a switch event\n");
                        ret = input_register_device(priv->switches_dev);
                        if (ret)
@@ -248,11 +254,15 @@ static const struct dmi_system_id dmi_switches_allow_list[] = {
        {} /* Array terminator */
 };
 
-static bool intel_vbtn_has_switches(acpi_handle handle)
+static bool intel_vbtn_has_switches(acpi_handle handle, bool dual_accel)
 {
        unsigned long long vgbs;
        acpi_status status;
 
+       /* See dual_accel_detect.h for more info */
+       if (dual_accel)
+               return false;
+
        if (!dmi_check_system(dmi_switches_allow_list))
                return false;
 
@@ -263,13 +273,14 @@ static bool intel_vbtn_has_switches(acpi_handle handle)
 static int intel_vbtn_probe(struct platform_device *device)
 {
        acpi_handle handle = ACPI_HANDLE(&device->dev);
-       bool has_buttons, has_switches;
+       bool dual_accel, has_buttons, has_switches;
        struct intel_vbtn_priv *priv;
        acpi_status status;
        int err;
 
+       dual_accel = dual_accel_detect();
        has_buttons = acpi_has_method(handle, "VBDL");
-       has_switches = intel_vbtn_has_switches(handle);
+       has_switches = intel_vbtn_has_switches(handle, dual_accel);
 
        if (!has_buttons && !has_switches) {
                dev_warn(&device->dev, "failed to read Intel Virtual Button driver\n");
@@ -281,6 +292,7 @@ static int intel_vbtn_probe(struct platform_device *device)
                return -ENOMEM;
        dev_set_drvdata(&device->dev, priv);
 
+       priv->dual_accel = dual_accel;
        priv->has_buttons = has_buttons;
        priv->has_switches = has_switches;
 
index c37349f97bb804b9dda638f3faf90e46030ecb0b..d063d91db9bcbe5ceb2ac641d3105df37651ac4d 100644 (file)
@@ -94,6 +94,7 @@ static struct gpiod_lookup_table gpios_led_table = {
                                NULL, 1, GPIO_ACTIVE_LOW),
                GPIO_LOOKUP_IDX(AMD_FCH_GPIO_DRIVER_NAME, APU2_GPIO_LINE_LED3,
                                NULL, 2, GPIO_ACTIVE_LOW),
+               {} /* Terminating entry */
        }
 };
 
@@ -123,6 +124,7 @@ static struct gpiod_lookup_table gpios_key_table = {
        .table = {
                GPIO_LOOKUP_IDX(AMD_FCH_GPIO_DRIVER_NAME, APU2_GPIO_LINE_MODESW,
                                NULL, 0, GPIO_ACTIVE_LOW),
+               {} /* Terminating entry */
        }
 };
 
index 603156a6e3ed8ede8f9f90c6305107b21cc982e2..50ff04c84650cddae9fc099eb56f994ff5e94912 100644 (file)
@@ -73,6 +73,7 @@
 #include <linux/uaccess.h>
 #include <acpi/battery.h>
 #include <acpi/video.h>
+#include "dual_accel_detect.h"
 
 /* ThinkPad CMOS commands */
 #define TP_CMOS_VOLUME_DOWN    0
@@ -3232,7 +3233,7 @@ static int hotkey_init_tablet_mode(void)
                 * the laptop/tent/tablet mode to the EC. The bmc150 iio driver
                 * does not support this, so skip the hotkey on these models.
                 */
-               if (has_tablet_mode && !acpi_dev_present("BOSC0200", "1", -1))
+               if (has_tablet_mode && !dual_accel_detect())
                        tp_features.hotkey_tablet = TP_HOTKEY_TABLET_USES_GMMS;
                type = "GMMS";
        } else if (acpi_evalf(hkey_handle, &res, "MHKG", "qd")) {
index 4d1192062508d108c5a3e937f90e3d9dc34051a9..4b563db3ab3ec07bf3e6b59f8dde9664a572f64d 100644 (file)
@@ -204,6 +204,12 @@ config POWER_RESET_ST
        help
          Reset support for STMicroelectronics boards.
 
+config POWER_RESET_TPS65086
+       bool "TPS65086 restart driver"
+       depends on MFD_TPS65086
+       help
+         This driver adds support for resetting the TPS65086 PMIC on restart.
+
 config POWER_RESET_VERSATILE
        bool "ARM Versatile family reboot driver"
        depends on ARM
index cf3f4d02d8a54b3cb0cd3170769a85f255a485dd..f606a2f60539583bd3e5b82bb816e75f02979996 100644 (file)
@@ -23,6 +23,7 @@ obj-$(CONFIG_POWER_RESET_QNAP) += qnap-poweroff.o
 obj-$(CONFIG_POWER_RESET_REGULATOR) += regulator-poweroff.o
 obj-$(CONFIG_POWER_RESET_RESTART) += restart-poweroff.o
 obj-$(CONFIG_POWER_RESET_ST) += st-poweroff.o
+obj-$(CONFIG_POWER_RESET_TPS65086) += tps65086-restart.o
 obj-$(CONFIG_POWER_RESET_VERSATILE) += arm-versatile-reboot.o
 obj-$(CONFIG_POWER_RESET_VEXPRESS) += vexpress-poweroff.o
 obj-$(CONFIG_POWER_RESET_XGENE) += xgene-reboot.o
index f1e843df0e161bc9cef74a9e28a8aec99e06acd0..02f5fdb8ffc4c0827bfbd0c2efafa9ef4d6d5afb 100644 (file)
@@ -19,6 +19,7 @@
 #define MII_MARVELL_PHY_PAGE           22
 
 #define MII_PHY_LED_CTRL               16
+#define MII_PHY_LED_POL_CTRL           17
 #define MII_88E1318S_PHY_LED_TCR       18
 #define MII_88E1318S_PHY_WOL_CTRL      16
 #define MII_M1011_IEVENT               19
 #define LED2_FORCE_ON                                  (0x8 << 8)
 #define LEDMASK                                                GENMASK(11,8)
 
+#define MII_88E1318S_PHY_LED_POL_LED2          BIT(4)
+
+struct power_off_cfg {
+       char *mdio_node_name;
+       void (*phy_set_reg)(bool restart);
+};
+
 static struct phy_device *phydev;
+static const struct power_off_cfg *cfg;
 
-static void mvphy_reg_intn(u16 data)
+static void linkstation_mvphy_reg_intn(bool restart)
 {
        int rc = 0, saved_page;
+       u16 data = 0;
+
+       if (restart)
+               data = MII_88E1318S_PHY_LED_TCR_FORCE_INT;
 
        saved_page = phy_select_page(phydev, MII_MARVELL_LED_PAGE);
        if (saved_page < 0)
@@ -66,11 +79,52 @@ err:
                dev_err(&phydev->mdio.dev, "Write register failed, %d\n", rc);
 }
 
+static void readynas_mvphy_set_reg(bool restart)
+{
+       int rc = 0, saved_page;
+       u16 data = 0;
+
+       if (restart)
+               data = MII_88E1318S_PHY_LED_POL_LED2;
+
+       saved_page = phy_select_page(phydev, MII_MARVELL_LED_PAGE);
+       if (saved_page < 0)
+               goto err;
+
+       /* Set the LED[2].0 Polarity bit to the required state */
+       __phy_modify(phydev, MII_PHY_LED_POL_CTRL,
+                    MII_88E1318S_PHY_LED_POL_LED2, data);
+
+       if (!data) {
+               /* If WOL was enabled and a magic packet was received before powering
+                * off, we won't be able to wake up by sending another magic packet.
+                * Clear WOL status.
+                */
+               __phy_write(phydev, MII_MARVELL_PHY_PAGE, MII_MARVELL_WOL_PAGE);
+               __phy_set_bits(phydev, MII_88E1318S_PHY_WOL_CTRL,
+                              MII_88E1318S_PHY_WOL_CTRL_CLEAR_WOL_STATUS);
+       }
+err:
+       rc = phy_restore_page(phydev, saved_page, rc);
+       if (rc < 0)
+               dev_err(&phydev->mdio.dev, "Write register failed, %d\n", rc);
+}
+
+static const struct power_off_cfg linkstation_power_off_cfg = {
+       .mdio_node_name = "mdio",
+       .phy_set_reg = linkstation_mvphy_reg_intn,
+};
+
+static const struct power_off_cfg readynas_power_off_cfg = {
+       .mdio_node_name = "mdio-bus",
+       .phy_set_reg = readynas_mvphy_set_reg,
+};
+
 static int linkstation_reboot_notifier(struct notifier_block *nb,
                                       unsigned long action, void *unused)
 {
        if (action == SYS_RESTART)
-               mvphy_reg_intn(MII_88E1318S_PHY_LED_TCR_FORCE_INT);
+               cfg->phy_set_reg(true);
 
        return NOTIFY_DONE;
 }
@@ -82,14 +136,21 @@ static struct notifier_block linkstation_reboot_nb = {
 static void linkstation_poweroff(void)
 {
        unregister_reboot_notifier(&linkstation_reboot_nb);
-       mvphy_reg_intn(0);
+       cfg->phy_set_reg(false);
 
        kernel_restart("Power off");
 }
 
 static const struct of_device_id ls_poweroff_of_match[] = {
-       { .compatible = "buffalo,ls421d" },
-       { .compatible = "buffalo,ls421de" },
+       { .compatible = "buffalo,ls421d",
+         .data = &linkstation_power_off_cfg,
+       },
+       { .compatible = "buffalo,ls421de",
+         .data = &linkstation_power_off_cfg,
+       },
+       { .compatible = "netgear,readynas-duo-v2",
+         .data = &readynas_power_off_cfg,
+       },
        { },
 };
 
@@ -97,13 +158,17 @@ static int __init linkstation_poweroff_init(void)
 {
        struct mii_bus *bus;
        struct device_node *dn;
+       const struct of_device_id *match;
 
        dn = of_find_matching_node(NULL, ls_poweroff_of_match);
        if (!dn)
                return -ENODEV;
        of_node_put(dn);
 
-       dn = of_find_node_by_name(NULL, "mdio");
+       match = of_match_node(ls_poweroff_of_match, dn);
+       cfg = match->data;
+
+       dn = of_find_node_by_name(NULL, cfg->mdio_node_name);
        if (!dn)
                return -ENODEV;
 
diff --git a/drivers/power/reset/tps65086-restart.c b/drivers/power/reset/tps65086-restart.c
new file mode 100644 (file)
index 0000000..78b89f7
--- /dev/null
@@ -0,0 +1,98 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2021 Emil Renner Berthing
+ */
+
+#include <linux/mfd/tps65086.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/reboot.h>
+
+struct tps65086_restart {
+       struct notifier_block handler;
+       struct device *dev;
+};
+
+static int tps65086_restart_notify(struct notifier_block *this,
+                                  unsigned long mode, void *cmd)
+{
+       struct tps65086_restart *tps65086_restart =
+               container_of(this, struct tps65086_restart, handler);
+       struct tps65086 *tps65086 = dev_get_drvdata(tps65086_restart->dev->parent);
+       int ret;
+
+       ret = regmap_write(tps65086->regmap, TPS65086_FORCESHUTDN, 1);
+       if (ret) {
+               dev_err(tps65086_restart->dev, "%s: error writing to tps65086 pmic: %d\n",
+                       __func__, ret);
+               return NOTIFY_DONE;
+       }
+
+       /* give it a little time */
+       mdelay(200);
+
+       WARN_ON(1);
+
+       return NOTIFY_DONE;
+}
+
+static int tps65086_restart_probe(struct platform_device *pdev)
+{
+       struct tps65086_restart *tps65086_restart;
+       int ret;
+
+       tps65086_restart = devm_kzalloc(&pdev->dev, sizeof(*tps65086_restart), GFP_KERNEL);
+       if (!tps65086_restart)
+               return -ENOMEM;
+
+       platform_set_drvdata(pdev, tps65086_restart);
+
+       tps65086_restart->handler.notifier_call = tps65086_restart_notify;
+       tps65086_restart->handler.priority = 192;
+       tps65086_restart->dev = &pdev->dev;
+
+       ret = register_restart_handler(&tps65086_restart->handler);
+       if (ret) {
+               dev_err(&pdev->dev, "%s: cannot register restart handler: %d\n",
+                       __func__, ret);
+               return -ENODEV;
+       }
+
+       return 0;
+}
+
+static int tps65086_restart_remove(struct platform_device *pdev)
+{
+       struct tps65086_restart *tps65086_restart = platform_get_drvdata(pdev);
+       int ret;
+
+       ret = unregister_restart_handler(&tps65086_restart->handler);
+       if (ret) {
+               dev_err(&pdev->dev, "%s: cannot unregister restart handler: %d\n",
+                       __func__, ret);
+               return -ENODEV;
+       }
+
+       return 0;
+}
+
+static const struct platform_device_id tps65086_restart_id_table[] = {
+       { "tps65086-reset", },
+       { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(platform, tps65086_restart_id_table);
+
+static struct platform_driver tps65086_restart_driver = {
+       .driver = {
+               .name = "tps65086-restart",
+       },
+       .probe = tps65086_restart_probe,
+       .remove = tps65086_restart_remove,
+       .id_table = tps65086_restart_id_table,
+};
+module_platform_driver(tps65086_restart_driver);
+
+MODULE_AUTHOR("Emil Renner Berthing <kernel@esmil.dk>");
+MODULE_DESCRIPTION("TPS65086 restart driver");
+MODULE_LICENSE("GPL v2");
index 11f5368e810e0d350bf4508a6b545519806492a1..fcc7534edcb2f9cd99b2ab6526b66f59260267b5 100644 (file)
@@ -358,7 +358,7 @@ config AXP288_CHARGER
 
 config AXP288_FUEL_GAUGE
        tristate "X-Powers AXP288 Fuel Gauge"
-       depends on MFD_AXP20X && IIO
+       depends on MFD_AXP20X && IIO && IOSF_MBI
        help
          Say yes here to have support for X-Power power management IC (PMIC)
          Fuel Gauge. The device provides battery statistics and status
@@ -577,6 +577,17 @@ config CHARGER_MP2629
          Battery charger. This driver provides Battery charger power management
          functions on the systems.
 
+config CHARGER_MT6360
+       tristate "Mediatek MT6360 Charger Driver"
+       depends on MFD_MT6360
+       depends on REGULATOR
+       select LINEAR_RANGES
+       help
+         Say Y here to enable MT6360 Charger Part.
+         The device supports High-Accuracy Voltage/Current Regulation,
+         Average Input Current Regulation, Battery Temperature Sensing,
+         Over-Temperature Protection, DPDM Detection for BC1.2.
+
 config CHARGER_QCOM_SMBB
        tristate "Qualcomm Switch-Mode Battery Charger and Boost"
        depends on MFD_SPMI_PMIC || COMPILE_TEST
@@ -669,6 +680,7 @@ config CHARGER_BQ256XX
 config CHARGER_SMB347
        tristate "Summit Microelectronics SMB3XX Battery Charger"
        depends on I2C
+       depends on REGULATOR
        select REGMAP_I2C
        help
          Say Y to include support for Summit Microelectronics SMB345,
@@ -736,6 +748,16 @@ config CHARGER_CROS_USBPD
          what is connected to USB PD ports from the EC and converts
          that into power_supply properties.
 
+config CHARGER_CROS_PCHG
+       tristate "ChromeOS EC based peripheral charger"
+       depends on MFD_CROS_EC_DEV
+       default MFD_CROS_EC_DEV
+       help
+         Say Y here to enable ChromeOS EC based peripheral charge driver.
+         This driver gets various information about the devices connected to
+         the peripheral charge ports from the EC and converts that into
+         power_supply properties.
+
 config CHARGER_SC2731
        tristate "Spreadtrum SC2731 charger driver"
        depends on MFD_SC27XX_PMIC || COMPILE_TEST
@@ -782,6 +804,8 @@ config CHARGER_WILCO
 config RN5T618_POWER
        tristate "RN5T618 charger/fuel gauge support"
        depends on MFD_RN5T618
+       depends on RN5T618_ADC
+       depends on IIO
        help
          Say Y here to have support for RN5T618 PMIC family fuel gauge and charger.
          This driver can also be built as a module. If so, the module will be
index 33059a91f60c6baa8d273da7f3b7c5eaf4ea6244..4e55a11aab79f124d46aa9f9fdf2e086c44badfc 100644 (file)
@@ -60,7 +60,7 @@ obj-$(CONFIG_BATTERY_TWL4030_MADC)    += twl4030_madc_battery.o
 obj-$(CONFIG_CHARGER_88PM860X) += 88pm860x_charger.o
 obj-$(CONFIG_CHARGER_PCF50633) += pcf50633-charger.o
 obj-$(CONFIG_BATTERY_RX51)     += rx51_battery.o
-obj-$(CONFIG_AB8500_BM)                += ab8500_bmdata.o ab8500_charger.o ab8500_fg.o ab8500_btemp.o abx500_chargalg.o
+obj-$(CONFIG_AB8500_BM)                += ab8500_bmdata.o ab8500_charger.o ab8500_fg.o ab8500_btemp.o ab8500_chargalg.o
 obj-$(CONFIG_CHARGER_CPCAP)    += cpcap-charger.o
 obj-$(CONFIG_CHARGER_ISP1704)  += isp1704_charger.o
 obj-$(CONFIG_CHARGER_MAX8903)  += max8903_charger.o
@@ -78,6 +78,7 @@ obj-$(CONFIG_CHARGER_MAX77693)        += max77693_charger.o
 obj-$(CONFIG_CHARGER_MAX8997)  += max8997_charger.o
 obj-$(CONFIG_CHARGER_MAX8998)  += max8998_charger.o
 obj-$(CONFIG_CHARGER_MP2629)   += mp2629_charger.o
+obj-$(CONFIG_CHARGER_MT6360)   += mt6360_charger.o
 obj-$(CONFIG_CHARGER_QCOM_SMBB)        += qcom_smbb.o
 obj-$(CONFIG_CHARGER_BQ2415X)  += bq2415x_charger.o
 obj-$(CONFIG_CHARGER_BQ24190)  += bq24190_charger.o
@@ -93,6 +94,7 @@ obj-$(CONFIG_CHARGER_TPS65217)        += tps65217_charger.o
 obj-$(CONFIG_AXP288_FUEL_GAUGE) += axp288_fuel_gauge.o
 obj-$(CONFIG_AXP288_CHARGER)   += axp288_charger.o
 obj-$(CONFIG_CHARGER_CROS_USBPD)       += cros_usbpd-charger.o
+obj-$(CONFIG_CHARGER_CROS_PCHG)        += cros_peripheral_charger.o
 obj-$(CONFIG_CHARGER_SC2731)   += sc2731_charger.o
 obj-$(CONFIG_FUEL_GAUGE_SC27XX)        += sc27xx_fuel_gauge.o
 obj-$(CONFIG_CHARGER_UCS1002)  += ucs1002_power.o
index 0c940571e5b0f655e4967a9ba765135a21af7ca2..d11405b7ee1aad7f319c565b48c8cb8a21db44a0 100644 (file)
@@ -269,43 +269,43 @@ enum bup_vch_sel {
 
 /*
  * ADC for the battery thermistor.
- * When using the ABx500_ADC_THERM_BATCTRL the battery ID resistor is combined
+ * When using the AB8500_ADC_THERM_BATCTRL the battery ID resistor is combined
  * with a NTC resistor to both identify the battery and to measure its
  * temperature. Different phone manufactures uses different techniques to both
  * identify the battery and to read its temperature.
  */
-enum abx500_adc_therm {
-       ABx500_ADC_THERM_BATCTRL,
-       ABx500_ADC_THERM_BATTEMP,
+enum ab8500_adc_therm {
+       AB8500_ADC_THERM_BATCTRL,
+       AB8500_ADC_THERM_BATTEMP,
 };
 
 /**
- * struct abx500_res_to_temp - defines one point in a temp to res curve. To
+ * struct ab8500_res_to_temp - defines one point in a temp to res curve. To
  * be used in battery packs that combines the identification resistor with a
  * NTC resistor.
  * @temp:                      battery pack temperature in Celsius
  * @resist:                    NTC resistor net total resistance
  */
-struct abx500_res_to_temp {
+struct ab8500_res_to_temp {
        int temp;
        int resist;
 };
 
 /**
- * struct abx500_v_to_cap - Table for translating voltage to capacity
+ * struct ab8500_v_to_cap - Table for translating voltage to capacity
  * @voltage:           Voltage in mV
  * @capacity:          Capacity in percent
  */
-struct abx500_v_to_cap {
+struct ab8500_v_to_cap {
        int voltage;
        int capacity;
 };
 
 /* Forward declaration */
-struct abx500_fg;
+struct ab8500_fg;
 
 /**
- * struct abx500_fg_parameters - Fuel gauge algorithm parameters, in seconds
+ * struct ab8500_fg_parameters - Fuel gauge algorithm parameters, in seconds
  * if not specified
  * @recovery_sleep_timer:      Time between measurements while recovering
  * @recovery_total_time:       Total recovery time
@@ -333,7 +333,7 @@ struct abx500_fg;
  * @pcut_max_restart:          Max number of restarts
  * @pcut_debounce_time:                Sets battery debounce time
  */
-struct abx500_fg_parameters {
+struct ab8500_fg_parameters {
        int recovery_sleep_timer;
        int recovery_total_time;
        int init_timer;
@@ -357,13 +357,13 @@ struct abx500_fg_parameters {
 };
 
 /**
- * struct abx500_charger_maximization - struct used by the board config.
+ * struct ab8500_charger_maximization - struct used by the board config.
  * @use_maxi:          Enable maximization for this battery type
  * @maxi_chg_curr:     Maximum charger current allowed
  * @maxi_wait_cycles:  cycles to wait before setting charger current
  * @charger_curr_step  delta between two charger current settings (mA)
  */
-struct abx500_maxim_parameters {
+struct ab8500_maxim_parameters {
        bool ena_maxi;
        int chg_curr;
        int wait_cycles;
@@ -371,7 +371,7 @@ struct abx500_maxim_parameters {
 };
 
 /**
- * struct abx500_battery_type - different batteries supported
+ * struct ab8500_battery_type - different batteries supported
  * @name:                      battery technology
  * @resis_high:                        battery upper resistance limit
  * @resis_low:                 battery lower resistance limit
@@ -400,7 +400,7 @@ struct abx500_maxim_parameters {
  * @n_batres_tbl_elements      number of elements in the batres_tbl
  * @batres_tbl                 battery internal resistance vs temperature table
  */
-struct abx500_battery_type {
+struct ab8500_battery_type {
        int name;
        int resis_high;
        int resis_low;
@@ -421,22 +421,22 @@ struct abx500_battery_type {
        int low_high_vol_lvl;
        int battery_resistance;
        int n_temp_tbl_elements;
-       const struct abx500_res_to_temp *r_to_t_tbl;
+       const struct ab8500_res_to_temp *r_to_t_tbl;
        int n_v_cap_tbl_elements;
-       const struct abx500_v_to_cap *v_to_cap_tbl;
+       const struct ab8500_v_to_cap *v_to_cap_tbl;
        int n_batres_tbl_elements;
        const struct batres_vs_temp *batres_tbl;
 };
 
 /**
- * struct abx500_bm_capacity_levels - abx500 capacity level data
+ * struct ab8500_bm_capacity_levels - ab8500 capacity level data
  * @critical:          critical capacity level in percent
  * @low:               low capacity level in percent
  * @normal:            normal capacity level in percent
  * @high:              high capacity level in percent
  * @full:              full capacity level in percent
  */
-struct abx500_bm_capacity_levels {
+struct ab8500_bm_capacity_levels {
        int critical;
        int low;
        int normal;
@@ -445,13 +445,13 @@ struct abx500_bm_capacity_levels {
 };
 
 /**
- * struct abx500_bm_charger_parameters - Charger specific parameters
+ * struct ab8500_bm_charger_parameters - Charger specific parameters
  * @usb_volt_max:      maximum allowed USB charger voltage in mV
  * @usb_curr_max:      maximum allowed USB charger current in mA
  * @ac_volt_max:       maximum allowed AC charger voltage in mV
  * @ac_curr_max:       maximum allowed AC charger current in mA
  */
-struct abx500_bm_charger_parameters {
+struct ab8500_bm_charger_parameters {
        int usb_volt_max;
        int usb_curr_max;
        int ac_volt_max;
@@ -459,7 +459,7 @@ struct abx500_bm_charger_parameters {
 };
 
 /**
- * struct abx500_bm_data - abx500 battery management data
+ * struct ab8500_bm_data - ab8500 battery management data
  * @temp_under         under this temp, charging is stopped
  * @temp_low           between this temp and temp_under charging is reduced
  * @temp_high          between this temp and temp_over charging is reduced
@@ -473,7 +473,7 @@ struct abx500_bm_charger_parameters {
  * @bkup_bat_i         current which we charge the backup battery with
  * @no_maintenance     indicates that maintenance charging is disabled
  * @capacity_scaling    indicates whether capacity scaling is to be used
- * @abx500_adc_therm   placement of thermistor, batctrl or battemp adc
+ * @ab8500_adc_therm   placement of thermistor, batctrl or battemp adc
  * @chg_unknown_bat    flag to enable charging of unknown batteries
  * @enable_overshoot   flag to enable VBAT overshoot control
  * @auto_trig          flag to enable auto adc trigger
@@ -494,7 +494,7 @@ struct abx500_bm_charger_parameters {
  * @chg_params         charger parameters
  * @fg_params          fuel gauge parameters
  */
-struct abx500_bm_data {
+struct ab8500_bm_data {
        int temp_under;
        int temp_low;
        int temp_high;
@@ -511,7 +511,7 @@ struct abx500_bm_data {
        bool chg_unknown_bat;
        bool enable_overshoot;
        bool auto_trig;
-       enum abx500_adc_therm adc_therm;
+       enum ab8500_adc_therm adc_therm;
        int fg_res;
        int n_btypes;
        int batt_id;
@@ -523,11 +523,11 @@ struct abx500_bm_data {
        int n_chg_in_curr;
        int *chg_output_curr;
        int *chg_input_curr;
-       const struct abx500_maxim_parameters *maxi;
-       const struct abx500_bm_capacity_levels *cap_levels;
-       struct abx500_battery_type *bat_type;
-       const struct abx500_bm_charger_parameters *chg_params;
-       const struct abx500_fg_parameters *fg_params;
+       const struct ab8500_maxim_parameters *maxi;
+       const struct ab8500_bm_capacity_levels *cap_levels;
+       struct ab8500_battery_type *bat_type;
+       const struct ab8500_bm_charger_parameters *chg_params;
+       const struct ab8500_fg_parameters *fg_params;
 };
 
 enum {
@@ -561,160 +561,7 @@ struct batres_vs_temp {
 /* Forward declaration */
 struct ab8500_fg;
 
-/**
- * struct ab8500_fg_parameters - Fuel gauge algorithm parameters, in seconds
- * if not specified
- * @recovery_sleep_timer:      Time between measurements while recovering
- * @recovery_total_time:       Total recovery time
- * @init_timer:                        Measurement interval during startup
- * @init_discard_time:         Time we discard voltage measurement at startup
- * @init_total_time:           Total init time during startup
- * @high_curr_time:            Time current has to be high to go to recovery
- * @accu_charging:             FG accumulation time while charging
- * @accu_high_curr:            FG accumulation time in high current mode
- * @high_curr_threshold:       High current threshold, in mA
- * @lowbat_threshold:          Low battery threshold, in mV
- * @battok_falling_th_sel0     Threshold in mV for battOk signal sel0
- *                             Resolution in 50 mV step.
- * @battok_raising_th_sel1     Threshold in mV for battOk signal sel1
- *                             Resolution in 50 mV step.
- * @user_cap_limit             Capacity reported from user must be within this
- *                             limit to be considered as sane, in percentage
- *                             points.
- * @maint_thres                        This is the threshold where we stop reporting
- *                             battery full while in maintenance, in per cent
- * @pcut_enable:                       Enable power cut feature in ab8505
- * @pcut_max_time:             Max time threshold
- * @pcut_flag_time:            Flagtime threshold
- * @pcut_max_restart:          Max number of restarts
- * @pcut_debunce_time: Sets battery debounce time
- */
-struct ab8500_fg_parameters {
-       int recovery_sleep_timer;
-       int recovery_total_time;
-       int init_timer;
-       int init_discard_time;
-       int init_total_time;
-       int high_curr_time;
-       int accu_charging;
-       int accu_high_curr;
-       int high_curr_threshold;
-       int lowbat_threshold;
-       int battok_falling_th_sel0;
-       int battok_raising_th_sel1;
-       int user_cap_limit;
-       int maint_thres;
-       bool pcut_enable;
-       u8 pcut_max_time;
-       u8 pcut_flag_time;
-       u8 pcut_max_restart;
-       u8 pcut_debunce_time;
-};
-
-/**
- * struct ab8500_charger_maximization - struct used by the board config.
- * @use_maxi:          Enable maximization for this battery type
- * @maxi_chg_curr:     Maximum charger current allowed
- * @maxi_wait_cycles:  cycles to wait before setting charger current
- * @charger_curr_step  delta between two charger current settings (mA)
- */
-struct ab8500_maxim_parameters {
-       bool ena_maxi;
-       int chg_curr;
-       int wait_cycles;
-       int charger_curr_step;
-};
-
-/**
- * struct ab8500_bm_capacity_levels - ab8500 capacity level data
- * @critical:          critical capacity level in percent
- * @low:               low capacity level in percent
- * @normal:            normal capacity level in percent
- * @high:              high capacity level in percent
- * @full:              full capacity level in percent
- */
-struct ab8500_bm_capacity_levels {
-       int critical;
-       int low;
-       int normal;
-       int high;
-       int full;
-};
-
-/**
- * struct ab8500_bm_charger_parameters - Charger specific parameters
- * @usb_volt_max:      maximum allowed USB charger voltage in mV
- * @usb_curr_max:      maximum allowed USB charger current in mA
- * @ac_volt_max:       maximum allowed AC charger voltage in mV
- * @ac_curr_max:       maximum allowed AC charger current in mA
- */
-struct ab8500_bm_charger_parameters {
-       int usb_volt_max;
-       int usb_curr_max;
-       int ac_volt_max;
-       int ac_curr_max;
-};
-
-/**
- * struct ab8500_bm_data - ab8500 battery management data
- * @temp_under         under this temp, charging is stopped
- * @temp_low           between this temp and temp_under charging is reduced
- * @temp_high          between this temp and temp_over charging is reduced
- * @temp_over          over this temp, charging is stopped
- * @temp_interval_chg  temperature measurement interval in s when charging
- * @temp_interval_nochg        temperature measurement interval in s when not charging
- * @main_safety_tmr_h  safety timer for main charger
- * @usb_safety_tmr_h   safety timer for usb charger
- * @bkup_bat_v         voltage which we charge the backup battery with
- * @bkup_bat_i         current which we charge the backup battery with
- * @no_maintenance     indicates that maintenance charging is disabled
- * @capacity_scaling    indicates whether capacity scaling is to be used
- * @adc_therm          placement of thermistor, batctrl or battemp adc
- * @chg_unknown_bat    flag to enable charging of unknown batteries
- * @enable_overshoot   flag to enable VBAT overshoot control
- * @fg_res             resistance of FG resistor in 0.1mOhm
- * @n_btypes           number of elements in array bat_type
- * @batt_id            index of the identified battery in array bat_type
- * @interval_charging  charge alg cycle period time when charging (sec)
- * @interval_not_charging charge alg cycle period time when not charging (sec)
- * @temp_hysteresis    temperature hysteresis
- * @gnd_lift_resistance        Battery ground to phone ground resistance (mOhm)
- * @maxi:              maximization parameters
- * @cap_levels         capacity in percent for the different capacity levels
- * @bat_type           table of supported battery types
- * @chg_params         charger parameters
- * @fg_params          fuel gauge parameters
- */
-struct ab8500_bm_data {
-       int temp_under;
-       int temp_low;
-       int temp_high;
-       int temp_over;
-       int temp_interval_chg;
-       int temp_interval_nochg;
-       int main_safety_tmr_h;
-       int usb_safety_tmr_h;
-       int bkup_bat_v;
-       int bkup_bat_i;
-       bool no_maintenance;
-       bool capacity_scaling;
-       bool chg_unknown_bat;
-       bool enable_overshoot;
-       enum abx500_adc_therm adc_therm;
-       int fg_res;
-       int n_btypes;
-       int batt_id;
-       int interval_charging;
-       int interval_not_charging;
-       int temp_hysteresis;
-       int gnd_lift_resistance;
-       const struct ab8500_maxim_parameters *maxi;
-       const struct ab8500_bm_capacity_levels *cap_levels;
-       const struct ab8500_bm_charger_parameters *chg_params;
-       const struct ab8500_fg_parameters *fg_params;
-};
-
-extern struct abx500_bm_data ab8500_bm_data;
+extern struct ab8500_bm_data ab8500_bm_data;
 
 void ab8500_charger_usb_state_changed(u8 bm_usb_state, u16 mA);
 struct ab8500_fg *ab8500_fg_get(void);
@@ -725,10 +572,10 @@ int ab8500_fg_inst_curr_started(struct ab8500_fg *di);
 int ab8500_fg_inst_curr_done(struct ab8500_fg *di);
 int ab8500_bm_of_probe(struct device *dev,
                       struct device_node *np,
-                      struct abx500_bm_data *bm);
+                      struct ab8500_bm_data *bm);
 
 extern struct platform_driver ab8500_fg_driver;
 extern struct platform_driver ab8500_btemp_driver;
-extern struct platform_driver abx500_chargalg_driver;
+extern struct platform_driver ab8500_chargalg_driver;
 
 #endif /* _AB8500_CHARGER_H_ */
index c2b8c0bb77e275c67671f4c224f41b2c400b5bc3..6f5fb794042ce56b3d23887f6da5745467a5b3e7 100644 (file)
@@ -2,8 +2,6 @@
 #include <linux/export.h>
 #include <linux/power_supply.h>
 #include <linux/of.h>
-#include <linux/mfd/abx500.h>
-#include <linux/mfd/abx500/ab8500.h>
 
 #include "ab8500-bm.h"
 
@@ -13,7 +11,7 @@
  * Note that the res_to_temp table must be strictly sorted by falling resistance
  * values to work.
  */
-const struct abx500_res_to_temp ab8500_temp_tbl_a_thermistor[] = {
+const struct ab8500_res_to_temp ab8500_temp_tbl_a_thermistor[] = {
        {-5, 53407},
        { 0, 48594},
        { 5, 43804},
@@ -35,7 +33,7 @@ EXPORT_SYMBOL(ab8500_temp_tbl_a_thermistor);
 const int ab8500_temp_tbl_a_size = ARRAY_SIZE(ab8500_temp_tbl_a_thermistor);
 EXPORT_SYMBOL(ab8500_temp_tbl_a_size);
 
-const struct abx500_res_to_temp ab8500_temp_tbl_b_thermistor[] = {
+const struct ab8500_res_to_temp ab8500_temp_tbl_b_thermistor[] = {
        {-5, 200000},
        { 0, 159024},
        { 5, 151921},
@@ -57,7 +55,7 @@ EXPORT_SYMBOL(ab8500_temp_tbl_b_thermistor);
 const int ab8500_temp_tbl_b_size = ARRAY_SIZE(ab8500_temp_tbl_b_thermistor);
 EXPORT_SYMBOL(ab8500_temp_tbl_b_size);
 
-static const struct abx500_v_to_cap cap_tbl_a_thermistor[] = {
+static const struct ab8500_v_to_cap cap_tbl_a_thermistor[] = {
        {4171,  100},
        {4114,   95},
        {4009,   83},
@@ -80,7 +78,7 @@ static const struct abx500_v_to_cap cap_tbl_a_thermistor[] = {
        {3247,    0},
 };
 
-static const struct abx500_v_to_cap cap_tbl_b_thermistor[] = {
+static const struct ab8500_v_to_cap cap_tbl_b_thermistor[] = {
        {4161,  100},
        {4124,   98},
        {4044,   90},
@@ -103,7 +101,7 @@ static const struct abx500_v_to_cap cap_tbl_b_thermistor[] = {
        {3250,    0},
 };
 
-static const struct abx500_v_to_cap cap_tbl[] = {
+static const struct ab8500_v_to_cap cap_tbl[] = {
        {4186,  100},
        {4163,   99},
        {4114,   95},
@@ -134,7 +132,7 @@ static const struct abx500_v_to_cap cap_tbl[] = {
  * Note that the res_to_temp table must be strictly sorted by falling
  * resistance values to work.
  */
-static const struct abx500_res_to_temp temp_tbl[] = {
+static const struct ab8500_res_to_temp temp_tbl[] = {
        {-5, 214834},
        { 0, 162943},
        { 5, 124820},
@@ -191,7 +189,7 @@ static const struct batres_vs_temp temp_to_batres_tbl_9100[] = {
        {-20, 180},
 };
 
-static struct abx500_battery_type bat_type_thermistor[] = {
+static struct ab8500_battery_type bat_type_thermistor[] = {
        [BATTERY_UNKNOWN] = {
                /* First element always represent the UNKNOWN battery */
                .name = POWER_SUPPLY_TECHNOLOGY_UNKNOWN,
@@ -277,7 +275,7 @@ static struct abx500_battery_type bat_type_thermistor[] = {
        },
 };
 
-static struct abx500_battery_type bat_type_ext_thermistor[] = {
+static struct ab8500_battery_type bat_type_ext_thermistor[] = {
        [BATTERY_UNKNOWN] = {
                /* First element always represent the UNKNOWN battery */
                .name = POWER_SUPPLY_TECHNOLOGY_UNKNOWN,
@@ -394,7 +392,7 @@ static struct abx500_battery_type bat_type_ext_thermistor[] = {
        },
 };
 
-static const struct abx500_bm_capacity_levels cap_levels = {
+static const struct ab8500_bm_capacity_levels cap_levels = {
        .critical       = 2,
        .low            = 10,
        .normal         = 70,
@@ -402,7 +400,7 @@ static const struct abx500_bm_capacity_levels cap_levels = {
        .full           = 100,
 };
 
-static const struct abx500_fg_parameters fg = {
+static const struct ab8500_fg_parameters fg = {
        .recovery_sleep_timer = 10,
        .recovery_total_time = 100,
        .init_timer = 1,
@@ -424,14 +422,14 @@ static const struct abx500_fg_parameters fg = {
        .pcut_debounce_time = 2,
 };
 
-static const struct abx500_maxim_parameters ab8500_maxi_params = {
+static const struct ab8500_maxim_parameters ab8500_maxi_params = {
        .ena_maxi = true,
        .chg_curr = 910,
        .wait_cycles = 10,
        .charger_curr_step = 100,
 };
 
-static const struct abx500_bm_charger_parameters chg = {
+static const struct ab8500_bm_charger_parameters chg = {
        .usb_volt_max           = 5500,
        .usb_curr_max           = 1500,
        .ac_volt_max            = 7500,
@@ -456,7 +454,7 @@ static int ab8500_charge_input_curr_map[] = {
         700,    800,    900,    1000,   1100,   1300,   1400,   1500,
 };
 
-struct abx500_bm_data ab8500_bm_data = {
+struct ab8500_bm_data ab8500_bm_data = {
        .temp_under             = 3,
        .temp_low               = 8,
        .temp_high              = 43,
@@ -469,7 +467,7 @@ struct abx500_bm_data ab8500_bm_data = {
        .bkup_bat_i             = BUP_ICH_SEL_150UA,
        .no_maintenance         = false,
        .capacity_scaling       = false,
-       .adc_therm              = ABx500_ADC_THERM_BATCTRL,
+       .adc_therm              = AB8500_ADC_THERM_BATCTRL,
        .chg_unknown_bat        = false,
        .enable_overshoot       = false,
        .fg_res                 = 100,
@@ -492,7 +490,7 @@ struct abx500_bm_data ab8500_bm_data = {
 
 int ab8500_bm_of_probe(struct device *dev,
                       struct device_node *np,
-                      struct abx500_bm_data *bm)
+                      struct ab8500_bm_data *bm)
 {
        const struct batres_vs_temp *tmp_batres_tbl;
        struct device_node *battery_node;
@@ -531,7 +529,7 @@ int ab8500_bm_of_probe(struct device *dev,
        } else {
                bm->n_btypes   = 4;
                bm->bat_type   = bat_type_ext_thermistor;
-               bm->adc_therm  = ABx500_ADC_THERM_BATTEMP;
+               bm->adc_therm  = AB8500_ADC_THERM_BATTEMP;
                tmp_batres_tbl = temp_to_batres_tbl_ext_thermistor;
        }
 
index dbdcff32f35399eebc93ff7bac7d1bf4e633c9f5..b6c9111d77d7d11829f4f7cf5a56aabdde53008f 100644 (file)
@@ -27,6 +27,7 @@
 #include <linux/mfd/abx500.h>
 #include <linux/mfd/abx500/ab8500.h>
 #include <linux/iio/consumer.h>
+#include <linux/fixp-arith.h>
 
 #include "ab8500-bm.h"
 
@@ -102,7 +103,7 @@ struct ab8500_btemp {
        struct iio_channel *btemp_ball;
        struct iio_channel *bat_ctrl;
        struct ab8500_fg *fg;
-       struct abx500_bm_data *bm;
+       struct ab8500_bm_data *bm;
        struct power_supply *btemp_psy;
        struct ab8500_btemp_events events;
        struct ab8500_btemp_ranges btemp_ranges;
@@ -144,7 +145,7 @@ static int ab8500_btemp_batctrl_volt_to_res(struct ab8500_btemp *di,
                return (450000 * (v_batctrl)) / (1800 - v_batctrl);
        }
 
-       if (di->bm->adc_therm == ABx500_ADC_THERM_BATCTRL) {
+       if (di->bm->adc_therm == AB8500_ADC_THERM_BATCTRL) {
                /*
                 * If the battery has internal NTC, we use the current
                 * source to calculate the resistance.
@@ -206,7 +207,7 @@ static int ab8500_btemp_curr_source_enable(struct ab8500_btemp *di,
                return 0;
 
        /* Only do this for batteries with internal NTC */
-       if (di->bm->adc_therm == ABx500_ADC_THERM_BATCTRL && enable) {
+       if (di->bm->adc_therm == AB8500_ADC_THERM_BATCTRL && enable) {
 
                if (di->curr_source == BTEMP_BATCTRL_CURR_SRC_7UA)
                        curr = BAT_CTRL_7U_ENA;
@@ -239,7 +240,7 @@ static int ab8500_btemp_curr_source_enable(struct ab8500_btemp *di,
                                __func__);
                        goto disable_curr_source;
                }
-       } else if (di->bm->adc_therm == ABx500_ADC_THERM_BATCTRL && !enable) {
+       } else if (di->bm->adc_therm == AB8500_ADC_THERM_BATCTRL && !enable) {
                dev_dbg(di->dev, "Disable BATCTRL curr source\n");
 
                /* Write 0 to the curr bits */
@@ -417,7 +418,7 @@ static int ab8500_btemp_get_batctrl_res(struct ab8500_btemp *di)
  * based on the NTC resistance.
  */
 static int ab8500_btemp_res_to_temp(struct ab8500_btemp *di,
-       const struct abx500_res_to_temp *tbl, int tbl_size, int res)
+       const struct ab8500_res_to_temp *tbl, int tbl_size, int res)
 {
        int i;
        /*
@@ -437,8 +438,9 @@ static int ab8500_btemp_res_to_temp(struct ab8500_btemp *di,
                        i++;
        }
 
-       return tbl[i].temp + ((tbl[i + 1].temp - tbl[i].temp) *
-               (res - tbl[i].resist)) / (tbl[i + 1].resist - tbl[i].resist);
+       return fixp_linear_interpolate(tbl[i].resist, tbl[i].temp,
+                                      tbl[i + 1].resist, tbl[i + 1].temp,
+                                      res);
 }
 
 /**
@@ -456,7 +458,7 @@ static int ab8500_btemp_measure_temp(struct ab8500_btemp *di)
 
        id = di->bm->batt_id;
 
-       if (di->bm->adc_therm == ABx500_ADC_THERM_BATCTRL &&
+       if (di->bm->adc_therm == AB8500_ADC_THERM_BATCTRL &&
                        id != BATTERY_UNKNOWN) {
 
                rbat = ab8500_btemp_get_batctrl_res(di);
@@ -525,7 +527,7 @@ static int ab8500_btemp_id(struct ab8500_btemp *di)
                        dev_dbg(di->dev, "Battery detected on %s"
                                " low %d < res %d < high: %d"
                                " index: %d\n",
-                               di->bm->adc_therm == ABx500_ADC_THERM_BATCTRL ?
+                               di->bm->adc_therm == AB8500_ADC_THERM_BATCTRL ?
                                "BATCTRL" : "BATTEMP",
                                di->bm->bat_type[i].resis_low, res,
                                di->bm->bat_type[i].resis_high, i);
@@ -545,7 +547,7 @@ static int ab8500_btemp_id(struct ab8500_btemp *di)
         * We only have to change current source if the
         * detected type is Type 1.
         */
-       if (di->bm->adc_therm == ABx500_ADC_THERM_BATCTRL &&
+       if (di->bm->adc_therm == AB8500_ADC_THERM_BATCTRL &&
            di->bm->batt_id == 1) {
                dev_dbg(di->dev, "Set BATCTRL current source to 20uA\n");
                di->curr_source = BTEMP_BATCTRL_CURR_SRC_20UA;
similarity index 74%
rename from drivers/power/supply/abx500_chargalg.c
rename to drivers/power/supply/ab8500_chargalg.c
index b72826cf6794ba21450a807222cbc8992201a49b..ff4b26b1cecae2b17221ae602c33a0ebe298172d 100644 (file)
@@ -3,7 +3,7 @@
  * Copyright (C) ST-Ericsson SA 2012
  * Copyright (c) 2012 Sony Mobile Communications AB
  *
- * Charging algorithm driver for abx500 variants
+ * Charging algorithm driver for AB8500
  *
  * Authors:
  *     Johan Palsson <johan.palsson@stericsson.com>
 #define CHARGALG_CURR_STEP_LOW         0
 #define CHARGALG_CURR_STEP_HIGH        100
 
-enum abx500_chargers {
+enum ab8500_chargers {
        NO_CHG,
        AC_CHG,
        USB_CHG,
 };
 
-struct abx500_chargalg_charger_info {
-       enum abx500_chargers conn_chg;
-       enum abx500_chargers prev_conn_chg;
-       enum abx500_chargers online_chg;
-       enum abx500_chargers prev_online_chg;
-       enum abx500_chargers charger_type;
+struct ab8500_chargalg_charger_info {
+       enum ab8500_chargers conn_chg;
+       enum ab8500_chargers prev_conn_chg;
+       enum ab8500_chargers online_chg;
+       enum ab8500_chargers prev_online_chg;
+       enum ab8500_chargers charger_type;
        bool usb_chg_ok;
        bool ac_chg_ok;
        int usb_volt;
@@ -73,18 +73,18 @@ struct abx500_chargalg_charger_info {
        int ac_iset;
 };
 
-struct abx500_chargalg_suspension_status {
+struct ab8500_chargalg_suspension_status {
        bool suspended_change;
        bool ac_suspended;
        bool usb_suspended;
 };
 
-struct abx500_chargalg_current_step_status {
+struct ab8500_chargalg_current_step_status {
        bool curr_step_change;
        int curr_step;
 };
 
-struct abx500_chargalg_battery_data {
+struct ab8500_chargalg_battery_data {
        int temp;
        int volt;
        int avg_curr;
@@ -92,7 +92,7 @@ struct abx500_chargalg_battery_data {
        int percent;
 };
 
-enum abx500_chargalg_states {
+enum ab8500_chargalg_states {
        STATE_HANDHELD_INIT,
        STATE_HANDHELD,
        STATE_CHG_NOT_OK_INIT,
@@ -123,7 +123,7 @@ enum abx500_chargalg_states {
        STATE_WD_EXPIRED,
 };
 
-static const char *states[] = {
+static const char * const states[] = {
        "HANDHELD_INIT",
        "HANDHELD",
        "CHG_NOT_OK_INIT",
@@ -154,7 +154,7 @@ static const char *states[] = {
        "WD_EXPIRED",
 };
 
-struct abx500_chargalg_events {
+struct ab8500_chargalg_events {
        bool batt_unknown;
        bool mainextchnotok;
        bool batt_ovv;
@@ -176,7 +176,7 @@ struct abx500_chargalg_events {
 };
 
 /**
- * struct abx500_charge_curr_maximization - Charger maximization parameters
+ * struct ab8500_charge_curr_maximization - Charger maximization parameters
  * @original_iset:     the non optimized/maximised charger current
  * @current_iset:      the charging current used at this moment
  * @test_delta_i:      the delta between the current we want to charge and the
@@ -190,7 +190,7 @@ struct abx500_chargalg_events {
  * @level:             tells in how many steps the charging current has been
                        increased
  */
-struct abx500_charge_curr_maximization {
+struct ab8500_charge_curr_maximization {
        int original_iset;
        int current_iset;
        int test_delta_i;
@@ -207,7 +207,7 @@ enum maxim_ret {
 };
 
 /**
- * struct abx500_chargalg - abx500 Charging algorithm device information
+ * struct ab8500_chargalg - ab8500 Charging algorithm device information
  * @dev:               pointer to the structure device
  * @charge_status:     battery operating status
  * @eoc_cnt:           counter used to determine end-of_charge
@@ -223,7 +223,7 @@ enum maxim_ret {
  * @susp_status:       current charger suspension status
  * @bm:                Platform specific battery management information
  * @curr_status:       Current step status for over-current protection
- * @parent:            pointer to the struct abx500
+ * @parent:            pointer to the struct ab8500
  * @chargalg_psy:      structure that holds the battery properties exposed by
  *                     the charging algorithm
  * @events:            structure for information about events triggered
@@ -235,25 +235,25 @@ enum maxim_ret {
  * @maintenance_timer:         maintenance charging timer
  * @chargalg_kobject:          structure of type kobject
  */
-struct abx500_chargalg {
+struct ab8500_chargalg {
        struct device *dev;
        int charge_status;
        int eoc_cnt;
        bool maintenance_chg;
        int t_hyst_norm;
        int t_hyst_lowhigh;
-       enum abx500_chargalg_states charge_state;
-       struct abx500_charge_curr_maximization ccm;
-       struct abx500_chargalg_charger_info chg_info;
-       struct abx500_chargalg_battery_data batt_data;
-       struct abx500_chargalg_suspension_status susp_status;
+       enum ab8500_chargalg_states charge_state;
+       struct ab8500_charge_curr_maximization ccm;
+       struct ab8500_chargalg_charger_info chg_info;
+       struct ab8500_chargalg_battery_data batt_data;
+       struct ab8500_chargalg_suspension_status susp_status;
        struct ab8500 *parent;
-       struct abx500_chargalg_current_step_status curr_status;
-       struct abx500_bm_data *bm;
+       struct ab8500_chargalg_current_step_status curr_status;
+       struct ab8500_bm_data *bm;
        struct power_supply *chargalg_psy;
        struct ux500_charger *ac_chg;
        struct ux500_charger *usb_chg;
-       struct abx500_chargalg_events events;
+       struct ab8500_chargalg_events events;
        struct workqueue_struct *chargalg_wq;
        struct delayed_work chargalg_periodic_work;
        struct delayed_work chargalg_wd_work;
@@ -267,28 +267,28 @@ struct abx500_chargalg {
 BLOCKING_NOTIFIER_HEAD(charger_notifier_list);
 
 /* Main battery properties */
-static enum power_supply_property abx500_chargalg_props[] = {
+static enum power_supply_property ab8500_chargalg_props[] = {
        POWER_SUPPLY_PROP_STATUS,
        POWER_SUPPLY_PROP_HEALTH,
 };
 
-struct abx500_chargalg_sysfs_entry {
+struct ab8500_chargalg_sysfs_entry {
        struct attribute attr;
-       ssize_t (*show)(struct abx500_chargalg *, char *);
-       ssize_t (*store)(struct abx500_chargalg *, const char *, size_t);
+       ssize_t (*show)(struct ab8500_chargalg *di, char *buf);
+       ssize_t (*store)(struct ab8500_chargalg *di, const char *buf, size_t length);
 };
 
 /**
- * abx500_chargalg_safety_timer_expired() - Expiration of the safety timer
+ * ab8500_chargalg_safety_timer_expired() - Expiration of the safety timer
  * @timer:     pointer to the hrtimer structure
  *
  * This function gets called when the safety timer for the charger
  * expires
  */
 static enum hrtimer_restart
-abx500_chargalg_safety_timer_expired(struct hrtimer *timer)
+ab8500_chargalg_safety_timer_expired(struct hrtimer *timer)
 {
-       struct abx500_chargalg *di = container_of(timer, struct abx500_chargalg,
+       struct ab8500_chargalg *di = container_of(timer, struct ab8500_chargalg,
                                                  safety_timer);
        dev_err(di->dev, "Safety timer expired\n");
        di->events.safety_timer_expired = true;
@@ -300,7 +300,7 @@ abx500_chargalg_safety_timer_expired(struct hrtimer *timer)
 }
 
 /**
- * abx500_chargalg_maintenance_timer_expired() - Expiration of
+ * ab8500_chargalg_maintenance_timer_expired() - Expiration of
  * the maintenance timer
  * @timer:     pointer to the timer structure
  *
@@ -308,10 +308,10 @@ abx500_chargalg_safety_timer_expired(struct hrtimer *timer)
  * expires
  */
 static enum hrtimer_restart
-abx500_chargalg_maintenance_timer_expired(struct hrtimer *timer)
+ab8500_chargalg_maintenance_timer_expired(struct hrtimer *timer)
 {
 
-       struct abx500_chargalg *di = container_of(timer, struct abx500_chargalg,
+       struct ab8500_chargalg *di = container_of(timer, struct ab8500_chargalg,
                                                  maintenance_timer);
 
        dev_dbg(di->dev, "Maintenance timer expired\n");
@@ -324,13 +324,13 @@ abx500_chargalg_maintenance_timer_expired(struct hrtimer *timer)
 }
 
 /**
- * abx500_chargalg_state_to() - Change charge state
- * @di:                pointer to the abx500_chargalg structure
+ * ab8500_chargalg_state_to() - Change charge state
+ * @di:                pointer to the ab8500_chargalg structure
  *
  * This function gets called when a charge state change should occur
  */
-static void abx500_chargalg_state_to(struct abx500_chargalg *di,
-       enum abx500_chargalg_states state)
+static void ab8500_chargalg_state_to(struct ab8500_chargalg *di,
+       enum ab8500_chargalg_states state)
 {
        dev_dbg(di->dev,
                "State changed: %s (From state: [%d] %s =to=> [%d] %s )\n",
@@ -343,7 +343,7 @@ static void abx500_chargalg_state_to(struct abx500_chargalg *di,
        di->charge_state = state;
 }
 
-static int abx500_chargalg_check_charger_enable(struct abx500_chargalg *di)
+static int ab8500_chargalg_check_charger_enable(struct ab8500_chargalg *di)
 {
        switch (di->charge_state) {
        case STATE_NORMAL:
@@ -368,13 +368,13 @@ static int abx500_chargalg_check_charger_enable(struct abx500_chargalg *di)
 }
 
 /**
- * abx500_chargalg_check_charger_connection() - Check charger connection change
- * @di:                pointer to the abx500_chargalg structure
+ * ab8500_chargalg_check_charger_connection() - Check charger connection change
+ * @di:                pointer to the ab8500_chargalg structure
  *
  * This function will check if there is a change in the charger connection
  * and change charge state accordingly. AC has precedence over USB.
  */
-static int abx500_chargalg_check_charger_connection(struct abx500_chargalg *di)
+static int ab8500_chargalg_check_charger_connection(struct ab8500_chargalg *di)
 {
        if (di->chg_info.conn_chg != di->chg_info.prev_conn_chg ||
                di->susp_status.suspended_change) {
@@ -387,23 +387,23 @@ static int abx500_chargalg_check_charger_connection(struct abx500_chargalg *di)
                        dev_dbg(di->dev, "Charging source is AC\n");
                        if (di->chg_info.charger_type != AC_CHG) {
                                di->chg_info.charger_type = AC_CHG;
-                               abx500_chargalg_state_to(di, STATE_NORMAL_INIT);
+                               ab8500_chargalg_state_to(di, STATE_NORMAL_INIT);
                        }
                } else if ((di->chg_info.conn_chg & USB_CHG) &&
                        !di->susp_status.usb_suspended) {
                        dev_dbg(di->dev, "Charging source is USB\n");
                        di->chg_info.charger_type = USB_CHG;
-                       abx500_chargalg_state_to(di, STATE_NORMAL_INIT);
+                       ab8500_chargalg_state_to(di, STATE_NORMAL_INIT);
                } else if (di->chg_info.conn_chg &&
                        (di->susp_status.ac_suspended ||
                        di->susp_status.usb_suspended)) {
                        dev_dbg(di->dev, "Charging is suspended\n");
                        di->chg_info.charger_type = NO_CHG;
-                       abx500_chargalg_state_to(di, STATE_SUSPENDED_INIT);
+                       ab8500_chargalg_state_to(di, STATE_SUSPENDED_INIT);
                } else {
                        dev_dbg(di->dev, "Charging source is OFF\n");
                        di->chg_info.charger_type = NO_CHG;
-                       abx500_chargalg_state_to(di, STATE_HANDHELD_INIT);
+                       ab8500_chargalg_state_to(di, STATE_HANDHELD_INIT);
                }
                di->chg_info.prev_conn_chg = di->chg_info.conn_chg;
                di->susp_status.suspended_change = false;
@@ -412,29 +412,29 @@ static int abx500_chargalg_check_charger_connection(struct abx500_chargalg *di)
 }
 
 /**
- * abx500_chargalg_check_current_step_status() - Check charging current
+ * ab8500_chargalg_check_current_step_status() - Check charging current
  * step status.
- * @di:                pointer to the abx500_chargalg structure
+ * @di:                pointer to the ab8500_chargalg structure
  *
  * This function will check if there is a change in the charging current step
  * and change charge state accordingly.
  */
-static void abx500_chargalg_check_current_step_status
-       (struct abx500_chargalg *di)
+static void ab8500_chargalg_check_current_step_status
+       (struct ab8500_chargalg *di)
 {
        if (di->curr_status.curr_step_change)
-               abx500_chargalg_state_to(di, STATE_NORMAL_INIT);
+               ab8500_chargalg_state_to(di, STATE_NORMAL_INIT);
        di->curr_status.curr_step_change = false;
 }
 
 /**
- * abx500_chargalg_start_safety_timer() - Start charging safety timer
- * @di:                pointer to the abx500_chargalg structure
+ * ab8500_chargalg_start_safety_timer() - Start charging safety timer
+ * @di:                pointer to the ab8500_chargalg structure
  *
  * The safety timer is used to avoid overcharging of old or bad batteries.
  * There are different timers for AC and USB
  */
-static void abx500_chargalg_start_safety_timer(struct abx500_chargalg *di)
+static void ab8500_chargalg_start_safety_timer(struct ab8500_chargalg *di)
 {
        /* Charger-dependent expiration time in hours*/
        int timer_expiration = 0;
@@ -461,27 +461,27 @@ static void abx500_chargalg_start_safety_timer(struct abx500_chargalg *di)
 }
 
 /**
- * abx500_chargalg_stop_safety_timer() - Stop charging safety timer
- * @di:                pointer to the abx500_chargalg structure
+ * ab8500_chargalg_stop_safety_timer() - Stop charging safety timer
+ * @di:                pointer to the ab8500_chargalg structure
  *
  * The safety timer is stopped whenever the NORMAL state is exited
  */
-static void abx500_chargalg_stop_safety_timer(struct abx500_chargalg *di)
+static void ab8500_chargalg_stop_safety_timer(struct ab8500_chargalg *di)
 {
        if (hrtimer_try_to_cancel(&di->safety_timer) >= 0)
                di->events.safety_timer_expired = false;
 }
 
 /**
- * abx500_chargalg_start_maintenance_timer() - Start charging maintenance timer
- * @di:                pointer to the abx500_chargalg structure
+ * ab8500_chargalg_start_maintenance_timer() - Start charging maintenance timer
+ * @di:                pointer to the ab8500_chargalg structure
  * @duration:  duration of ther maintenance timer in hours
  *
  * The maintenance timer is used to maintain the charge in the battery once
  * the battery is considered full. These timers are chosen to match the
  * discharge curve of the battery
  */
-static void abx500_chargalg_start_maintenance_timer(struct abx500_chargalg *di,
+static void ab8500_chargalg_start_maintenance_timer(struct ab8500_chargalg *di,
        int duration)
 {
        hrtimer_set_expires_range(&di->maintenance_timer,
@@ -492,26 +492,26 @@ static void abx500_chargalg_start_maintenance_timer(struct abx500_chargalg *di,
 }
 
 /**
- * abx500_chargalg_stop_maintenance_timer() - Stop maintenance timer
- * @di:                pointer to the abx500_chargalg structure
+ * ab8500_chargalg_stop_maintenance_timer() - Stop maintenance timer
+ * @di:                pointer to the ab8500_chargalg structure
  *
  * The maintenance timer is stopped whenever maintenance ends or when another
  * state is entered
  */
-static void abx500_chargalg_stop_maintenance_timer(struct abx500_chargalg *di)
+static void ab8500_chargalg_stop_maintenance_timer(struct ab8500_chargalg *di)
 {
        if (hrtimer_try_to_cancel(&di->maintenance_timer) >= 0)
                di->events.maintenance_timer_expired = false;
 }
 
 /**
- * abx500_chargalg_kick_watchdog() - Kick charger watchdog
- * @di:                pointer to the abx500_chargalg structure
+ * ab8500_chargalg_kick_watchdog() - Kick charger watchdog
+ * @di:                pointer to the ab8500_chargalg structure
  *
  * The charger watchdog have to be kicked periodically whenever the charger is
  * on, else the ABB will reset the system
  */
-static int abx500_chargalg_kick_watchdog(struct abx500_chargalg *di)
+static int ab8500_chargalg_kick_watchdog(struct ab8500_chargalg *di)
 {
        /* Check if charger exists and kick watchdog if charging */
        if (di->ac_chg && di->ac_chg->ops.kick_wd &&
@@ -526,8 +526,7 @@ static int abx500_chargalg_kick_watchdog(struct abx500_chargalg *di)
                        di->usb_chg->ops.kick_wd(di->usb_chg);
 
                return di->ac_chg->ops.kick_wd(di->ac_chg);
-       }
-       else if (di->usb_chg && di->usb_chg->ops.kick_wd &&
+       } else if (di->usb_chg && di->usb_chg->ops.kick_wd &&
                        di->chg_info.online_chg & USB_CHG)
                return di->usb_chg->ops.kick_wd(di->usb_chg);
 
@@ -535,8 +534,8 @@ static int abx500_chargalg_kick_watchdog(struct abx500_chargalg *di)
 }
 
 /**
- * abx500_chargalg_ac_en() - Turn on/off the AC charger
- * @di:                pointer to the abx500_chargalg structure
+ * ab8500_chargalg_ac_en() - Turn on/off the AC charger
+ * @di:                pointer to the ab8500_chargalg structure
  * @enable:    charger on/off
  * @vset:      requested charger output voltage
  * @iset:      requested charger output current
@@ -544,10 +543,10 @@ static int abx500_chargalg_kick_watchdog(struct abx500_chargalg *di)
  * The AC charger will be turned on/off with the requested charge voltage and
  * current
  */
-static int abx500_chargalg_ac_en(struct abx500_chargalg *di, int enable,
+static int ab8500_chargalg_ac_en(struct ab8500_chargalg *di, int enable,
        int vset, int iset)
 {
-       static int abx500_chargalg_ex_ac_enable_toggle;
+       static int ab8500_chargalg_ex_ac_enable_toggle;
 
        if (!di->ac_chg || !di->ac_chg->ops.enable)
                return -ENXIO;
@@ -563,18 +562,18 @@ static int abx500_chargalg_ac_en(struct abx500_chargalg *di, int enable,
 
        /* Enable external charger */
        if (enable && di->ac_chg->external &&
-           !abx500_chargalg_ex_ac_enable_toggle) {
+           !ab8500_chargalg_ex_ac_enable_toggle) {
                blocking_notifier_call_chain(&charger_notifier_list,
                                             0, di->dev);
-               abx500_chargalg_ex_ac_enable_toggle++;
+               ab8500_chargalg_ex_ac_enable_toggle++;
        }
 
        return di->ac_chg->ops.enable(di->ac_chg, enable, vset, iset);
 }
 
 /**
- * abx500_chargalg_usb_en() - Turn on/off the USB charger
- * @di:                pointer to the abx500_chargalg structure
+ * ab8500_chargalg_usb_en() - Turn on/off the USB charger
+ * @di:                pointer to the ab8500_chargalg structure
  * @enable:    charger on/off
  * @vset:      requested charger output voltage
  * @iset:      requested charger output current
@@ -582,7 +581,7 @@ static int abx500_chargalg_ac_en(struct abx500_chargalg *di, int enable,
  * The USB charger will be turned on/off with the requested charge voltage and
  * current
  */
-static int abx500_chargalg_usb_en(struct abx500_chargalg *di, int enable,
+static int ab8500_chargalg_usb_en(struct ab8500_chargalg *di, int enable,
        int vset, int iset)
 {
        if (!di->usb_chg || !di->usb_chg->ops.enable)
@@ -601,14 +600,14 @@ static int abx500_chargalg_usb_en(struct abx500_chargalg *di, int enable,
 }
 
 /**
- * abx500_chargalg_update_chg_curr() - Update charger current
- * @di:                pointer to the abx500_chargalg structure
+ * ab8500_chargalg_update_chg_curr() - Update charger current
+ * @di:                pointer to the ab8500_chargalg structure
  * @iset:      requested charger output current
  *
  * The charger output current will be updated for the charger
  * that is currently in use
  */
-static int abx500_chargalg_update_chg_curr(struct abx500_chargalg *di,
+static int ab8500_chargalg_update_chg_curr(struct ab8500_chargalg *di,
                int iset)
 {
        /* Check if charger exists and update current if charging */
@@ -642,19 +641,19 @@ static int abx500_chargalg_update_chg_curr(struct abx500_chargalg *di,
 }
 
 /**
- * abx500_chargalg_stop_charging() - Stop charging
- * @di:                pointer to the abx500_chargalg structure
+ * ab8500_chargalg_stop_charging() - Stop charging
+ * @di:                pointer to the ab8500_chargalg structure
  *
  * This function is called from any state where charging should be stopped.
  * All charging is disabled and all status parameters and timers are changed
  * accordingly
  */
-static void abx500_chargalg_stop_charging(struct abx500_chargalg *di)
+static void ab8500_chargalg_stop_charging(struct ab8500_chargalg *di)
 {
-       abx500_chargalg_ac_en(di, false, 0, 0);
-       abx500_chargalg_usb_en(di, false, 0, 0);
-       abx500_chargalg_stop_safety_timer(di);
-       abx500_chargalg_stop_maintenance_timer(di);
+       ab8500_chargalg_ac_en(di, false, 0, 0);
+       ab8500_chargalg_usb_en(di, false, 0, 0);
+       ab8500_chargalg_stop_safety_timer(di);
+       ab8500_chargalg_stop_maintenance_timer(di);
        di->charge_status = POWER_SUPPLY_STATUS_NOT_CHARGING;
        di->maintenance_chg = false;
        cancel_delayed_work(&di->chargalg_wd_work);
@@ -662,19 +661,19 @@ static void abx500_chargalg_stop_charging(struct abx500_chargalg *di)
 }
 
 /**
- * abx500_chargalg_hold_charging() - Pauses charging
- * @di:                pointer to the abx500_chargalg structure
+ * ab8500_chargalg_hold_charging() - Pauses charging
+ * @di:                pointer to the ab8500_chargalg structure
  *
  * This function is called in the case where maintenance charging has been
  * disabled and instead a battery voltage mode is entered to check when the
  * battery voltage has reached a certain recharge voltage
  */
-static void abx500_chargalg_hold_charging(struct abx500_chargalg *di)
+static void ab8500_chargalg_hold_charging(struct ab8500_chargalg *di)
 {
-       abx500_chargalg_ac_en(di, false, 0, 0);
-       abx500_chargalg_usb_en(di, false, 0, 0);
-       abx500_chargalg_stop_safety_timer(di);
-       abx500_chargalg_stop_maintenance_timer(di);
+       ab8500_chargalg_ac_en(di, false, 0, 0);
+       ab8500_chargalg_usb_en(di, false, 0, 0);
+       ab8500_chargalg_stop_safety_timer(di);
+       ab8500_chargalg_stop_maintenance_timer(di);
        di->charge_status = POWER_SUPPLY_STATUS_CHARGING;
        di->maintenance_chg = false;
        cancel_delayed_work(&di->chargalg_wd_work);
@@ -682,30 +681,30 @@ static void abx500_chargalg_hold_charging(struct abx500_chargalg *di)
 }
 
 /**
- * abx500_chargalg_start_charging() - Start the charger
- * @di:                pointer to the abx500_chargalg structure
+ * ab8500_chargalg_start_charging() - Start the charger
+ * @di:                pointer to the ab8500_chargalg structure
  * @vset:      requested charger output voltage
  * @iset:      requested charger output current
  *
  * A charger will be enabled depending on the requested charger type that was
  * detected previously.
  */
-static void abx500_chargalg_start_charging(struct abx500_chargalg *di,
+static void ab8500_chargalg_start_charging(struct ab8500_chargalg *di,
        int vset, int iset)
 {
        switch (di->chg_info.charger_type) {
        case AC_CHG:
                dev_dbg(di->dev,
                        "AC parameters: Vset %d, Ich %d\n", vset, iset);
-               abx500_chargalg_usb_en(di, false, 0, 0);
-               abx500_chargalg_ac_en(di, true, vset, iset);
+               ab8500_chargalg_usb_en(di, false, 0, 0);
+               ab8500_chargalg_ac_en(di, true, vset, iset);
                break;
 
        case USB_CHG:
                dev_dbg(di->dev,
                        "USB parameters: Vset %d, Ich %d\n", vset, iset);
-               abx500_chargalg_ac_en(di, false, 0, 0);
-               abx500_chargalg_usb_en(di, true, vset, iset);
+               ab8500_chargalg_ac_en(di, false, 0, 0);
+               ab8500_chargalg_usb_en(di, true, vset, iset);
                break;
 
        default:
@@ -715,13 +714,13 @@ static void abx500_chargalg_start_charging(struct abx500_chargalg *di,
 }
 
 /**
- * abx500_chargalg_check_temp() - Check battery temperature ranges
- * @di:                pointer to the abx500_chargalg structure
+ * ab8500_chargalg_check_temp() - Check battery temperature ranges
+ * @di:                pointer to the ab8500_chargalg structure
  *
  * The battery temperature is checked against the predefined limits and the
  * charge state is changed accordingly
  */
-static void abx500_chargalg_check_temp(struct abx500_chargalg *di)
+static void ab8500_chargalg_check_temp(struct ab8500_chargalg *di)
 {
        if (di->batt_data.temp > (di->bm->temp_low + di->t_hyst_norm) &&
                di->batt_data.temp < (di->bm->temp_high - di->t_hyst_norm)) {
@@ -750,8 +749,8 @@ static void abx500_chargalg_check_temp(struct abx500_chargalg *di)
                        di->t_hyst_norm = 0;
                        di->t_hyst_lowhigh = di->bm->temp_hysteresis;
                } else {
-               /* Within hysteresis */
-               dev_dbg(di->dev, "Within hysteresis limit temp: %d "
+                       /* Within hysteresis */
+                       dev_dbg(di->dev, "Within hysteresis limit temp: %d "
                                "hyst_lowhigh %d, hyst normal %d\n",
                                di->batt_data.temp, di->t_hyst_lowhigh,
                                di->t_hyst_norm);
@@ -760,12 +759,12 @@ static void abx500_chargalg_check_temp(struct abx500_chargalg *di)
 }
 
 /**
- * abx500_chargalg_check_charger_voltage() - Check charger voltage
- * @di:                pointer to the abx500_chargalg structure
+ * ab8500_chargalg_check_charger_voltage() - Check charger voltage
+ * @di:                pointer to the ab8500_chargalg structure
  *
  * Charger voltage is checked against maximum limit
  */
-static void abx500_chargalg_check_charger_voltage(struct abx500_chargalg *di)
+static void ab8500_chargalg_check_charger_voltage(struct ab8500_chargalg *di)
 {
        if (di->chg_info.usb_volt > di->bm->chg_params->usb_volt_max)
                di->chg_info.usb_chg_ok = false;
@@ -780,14 +779,14 @@ static void abx500_chargalg_check_charger_voltage(struct abx500_chargalg *di)
 }
 
 /**
- * abx500_chargalg_end_of_charge() - Check if end-of-charge criteria is fulfilled
- * @di:                pointer to the abx500_chargalg structure
+ * ab8500_chargalg_end_of_charge() - Check if end-of-charge criteria is fulfilled
+ * @di:                pointer to the ab8500_chargalg structure
  *
  * End-of-charge criteria is fulfilled when the battery voltage is above a
  * certain limit and the battery current is below a certain limit for a
  * predefined number of consecutive seconds. If true, the battery is full
  */
-static void abx500_chargalg_end_of_charge(struct abx500_chargalg *di)
+static void ab8500_chargalg_end_of_charge(struct ab8500_chargalg *di)
 {
        if (di->charge_status == POWER_SUPPLY_STATUS_CHARGING &&
                di->charge_state == STATE_NORMAL &&
@@ -815,7 +814,7 @@ static void abx500_chargalg_end_of_charge(struct abx500_chargalg *di)
        }
 }
 
-static void init_maxim_chg_curr(struct abx500_chargalg *di)
+static void init_maxim_chg_curr(struct ab8500_chargalg *di)
 {
        di->ccm.original_iset =
                di->bm->bat_type[di->bm->batt_id].normal_cur_lvl;
@@ -828,15 +827,15 @@ static void init_maxim_chg_curr(struct abx500_chargalg *di)
 }
 
 /**
- * abx500_chargalg_chg_curr_maxim - increases the charger current to
+ * ab8500_chargalg_chg_curr_maxim - increases the charger current to
  *                     compensate for the system load
- * @di         pointer to the abx500_chargalg structure
+ * @di         pointer to the ab8500_chargalg structure
  *
  * This maximization function is used to raise the charger current to get the
  * battery current as close to the optimal value as possible. The battery
  * current during charging is affected by the system load
  */
-static enum maxim_ret abx500_chargalg_chg_curr_maxim(struct abx500_chargalg *di)
+static enum maxim_ret ab8500_chargalg_chg_curr_maxim(struct ab8500_chargalg *di)
 {
        int delta_i;
 
@@ -867,7 +866,7 @@ static enum maxim_ret abx500_chargalg_chg_curr_maxim(struct abx500_chargalg *di)
 
        di->ccm.wait_cnt = 0;
 
-       if ((di->batt_data.inst_curr > di->ccm.original_iset)) {
+       if (di->batt_data.inst_curr > di->ccm.original_iset) {
                dev_dbg(di->dev, " Maximization Ibat (%dmA) too high"
                        " (limit %dmA) (current iset: %dmA)!\n",
                        di->batt_data.inst_curr, di->ccm.original_iset,
@@ -908,21 +907,21 @@ static enum maxim_ret abx500_chargalg_chg_curr_maxim(struct abx500_chargalg *di)
        }
 }
 
-static void handle_maxim_chg_curr(struct abx500_chargalg *di)
+static void handle_maxim_chg_curr(struct ab8500_chargalg *di)
 {
        enum maxim_ret ret;
        int result;
 
-       ret = abx500_chargalg_chg_curr_maxim(di);
+       ret = ab8500_chargalg_chg_curr_maxim(di);
        switch (ret) {
        case MAXIM_RET_CHANGE:
-               result = abx500_chargalg_update_chg_curr(di,
+               result = ab8500_chargalg_update_chg_curr(di,
                        di->ccm.current_iset);
                if (result)
                        dev_err(di->dev, "failed to set chg curr\n");
                break;
        case MAXIM_RET_IBAT_TOO_HIGH:
-               result = abx500_chargalg_update_chg_curr(di,
+               result = ab8500_chargalg_update_chg_curr(di,
                        di->bm->bat_type[di->bm->batt_id].normal_cur_lvl);
                if (result)
                        dev_err(di->dev, "failed to set chg curr\n");
@@ -935,12 +934,12 @@ static void handle_maxim_chg_curr(struct abx500_chargalg *di)
        }
 }
 
-static int abx500_chargalg_get_ext_psy_data(struct device *dev, void *data)
+static int ab8500_chargalg_get_ext_psy_data(struct device *dev, void *data)
 {
        struct power_supply *psy;
        struct power_supply *ext = dev_get_drvdata(dev);
        const char **supplicants = (const char **)ext->supplied_to;
-       struct abx500_chargalg *di;
+       struct ab8500_chargalg *di;
        union power_supply_propval ret;
        int j;
        bool capacity_updated = false;
@@ -1260,7 +1259,7 @@ static int abx500_chargalg_get_ext_psy_data(struct device *dev, void *data)
 }
 
 /**
- * abx500_chargalg_external_power_changed() - callback for power supply changes
+ * ab8500_chargalg_external_power_changed() - callback for power supply changes
  * @psy:       pointer to the structure power_supply
  *
  * This function is the entry point of the pointer external_power_changed
@@ -1268,26 +1267,27 @@ static int abx500_chargalg_get_ext_psy_data(struct device *dev, void *data)
  * This function gets executed when there is a change in any external power
  * supply that this driver needs to be notified of.
  */
-static void abx500_chargalg_external_power_changed(struct power_supply *psy)
+static void ab8500_chargalg_external_power_changed(struct power_supply *psy)
 {
-       struct abx500_chargalg *di = power_supply_get_drvdata(psy);
+       struct ab8500_chargalg *di = power_supply_get_drvdata(psy);
 
        /*
         * Trigger execution of the algorithm instantly and read
         * all power_supply properties there instead
         */
-       queue_work(di->chargalg_wq, &di->chargalg_work);
+       if (di->chargalg_wq)
+               queue_work(di->chargalg_wq, &di->chargalg_work);
 }
 
 /**
- * abx500_chargalg_algorithm() - Main function for the algorithm
- * @di:                pointer to the abx500_chargalg structure
+ * ab8500_chargalg_algorithm() - Main function for the algorithm
+ * @di:                pointer to the ab8500_chargalg structure
  *
  * This is the main control function for the charging algorithm.
  * It is called periodically or when something happens that will
  * trigger a state change
  */
-static void abx500_chargalg_algorithm(struct abx500_chargalg *di)
+static void ab8500_chargalg_algorithm(struct ab8500_chargalg *di)
 {
        int charger_status;
        int ret;
@@ -1295,17 +1295,17 @@ static void abx500_chargalg_algorithm(struct abx500_chargalg *di)
 
        /* Collect data from all power_supply class devices */
        class_for_each_device(power_supply_class, NULL,
-               di->chargalg_psy, abx500_chargalg_get_ext_psy_data);
+               di->chargalg_psy, ab8500_chargalg_get_ext_psy_data);
 
-       abx500_chargalg_end_of_charge(di);
-       abx500_chargalg_check_temp(di);
-       abx500_chargalg_check_charger_voltage(di);
+       ab8500_chargalg_end_of_charge(di);
+       ab8500_chargalg_check_temp(di);
+       ab8500_chargalg_check_charger_voltage(di);
 
-       charger_status = abx500_chargalg_check_charger_connection(di);
-       abx500_chargalg_check_current_step_status(di);
+       charger_status = ab8500_chargalg_check_charger_connection(di);
+       ab8500_chargalg_check_current_step_status(di);
 
        if (is_ab8500(di->parent)) {
-               ret = abx500_chargalg_check_charger_enable(di);
+               ret = ab8500_chargalg_check_charger_enable(di);
                if (ret < 0)
                        dev_err(di->dev, "Checking charger is enabled error"
                                        ": Returned Value %d\n", ret);
@@ -1320,7 +1320,7 @@ static void abx500_chargalg_algorithm(struct abx500_chargalg *di)
                (di->events.batt_unknown && !di->bm->chg_unknown_bat)) {
                if (di->charge_state != STATE_HANDHELD) {
                        di->events.safety_timer_expired = false;
-                       abx500_chargalg_state_to(di, STATE_HANDHELD_INIT);
+                       ab8500_chargalg_state_to(di, STATE_HANDHELD_INIT);
                }
        }
 
@@ -1333,7 +1333,7 @@ static void abx500_chargalg_algorithm(struct abx500_chargalg *di)
        /* Safety timer expiration */
        else if (di->events.safety_timer_expired) {
                if (di->charge_state != STATE_SAFETY_TIMER_EXPIRED)
-                       abx500_chargalg_state_to(di,
+                       ab8500_chargalg_state_to(di,
                                STATE_SAFETY_TIMER_EXPIRED_INIT);
        }
        /*
@@ -1344,7 +1344,7 @@ static void abx500_chargalg_algorithm(struct abx500_chargalg *di)
        /* Battery removed */
        else if (di->events.batt_rem) {
                if (di->charge_state != STATE_BATT_REMOVED)
-                       abx500_chargalg_state_to(di, STATE_BATT_REMOVED_INIT);
+                       ab8500_chargalg_state_to(di, STATE_BATT_REMOVED_INIT);
        }
        /* Main or USB charger not ok. */
        else if (di->events.mainextchnotok || di->events.usbchargernotok) {
@@ -1354,7 +1354,7 @@ static void abx500_chargalg_algorithm(struct abx500_chargalg *di)
                 */
                if (di->charge_state != STATE_CHG_NOT_OK &&
                                !di->events.vbus_collapsed)
-                       abx500_chargalg_state_to(di, STATE_CHG_NOT_OK_INIT);
+                       ab8500_chargalg_state_to(di, STATE_CHG_NOT_OK_INIT);
        }
        /* VBUS, Main or VBAT OVV. */
        else if (di->events.vbus_ovv ||
@@ -1363,31 +1363,31 @@ static void abx500_chargalg_algorithm(struct abx500_chargalg *di)
                        !di->chg_info.usb_chg_ok ||
                        !di->chg_info.ac_chg_ok) {
                if (di->charge_state != STATE_OVV_PROTECT)
-                       abx500_chargalg_state_to(di, STATE_OVV_PROTECT_INIT);
+                       ab8500_chargalg_state_to(di, STATE_OVV_PROTECT_INIT);
        }
        /* USB Thermal, stop charging */
        else if (di->events.main_thermal_prot ||
                di->events.usb_thermal_prot) {
                if (di->charge_state != STATE_HW_TEMP_PROTECT)
-                       abx500_chargalg_state_to(di,
+                       ab8500_chargalg_state_to(di,
                                STATE_HW_TEMP_PROTECT_INIT);
        }
        /* Battery temp over/under */
        else if (di->events.btemp_underover) {
                if (di->charge_state != STATE_TEMP_UNDEROVER)
-                       abx500_chargalg_state_to(di,
+                       ab8500_chargalg_state_to(di,
                                STATE_TEMP_UNDEROVER_INIT);
        }
        /* Watchdog expired */
        else if (di->events.ac_wd_expired ||
                di->events.usb_wd_expired) {
                if (di->charge_state != STATE_WD_EXPIRED)
-                       abx500_chargalg_state_to(di, STATE_WD_EXPIRED_INIT);
+                       ab8500_chargalg_state_to(di, STATE_WD_EXPIRED_INIT);
        }
        /* Battery temp high/low */
        else if (di->events.btemp_lowhigh) {
                if (di->charge_state != STATE_TEMP_LOWHIGH)
-                       abx500_chargalg_state_to(di, STATE_TEMP_LOWHIGH_INIT);
+                       ab8500_chargalg_state_to(di, STATE_TEMP_LOWHIGH_INIT);
        }
 
        dev_dbg(di->dev,
@@ -1419,9 +1419,9 @@ static void abx500_chargalg_algorithm(struct abx500_chargalg *di)
 
        switch (di->charge_state) {
        case STATE_HANDHELD_INIT:
-               abx500_chargalg_stop_charging(di);
+               ab8500_chargalg_stop_charging(di);
                di->charge_status = POWER_SUPPLY_STATUS_DISCHARGING;
-               abx500_chargalg_state_to(di, STATE_HANDHELD);
+               ab8500_chargalg_state_to(di, STATE_HANDHELD);
                fallthrough;
 
        case STATE_HANDHELD:
@@ -1429,14 +1429,14 @@ static void abx500_chargalg_algorithm(struct abx500_chargalg *di)
 
        case STATE_SUSPENDED_INIT:
                if (di->susp_status.ac_suspended)
-                       abx500_chargalg_ac_en(di, false, 0, 0);
+                       ab8500_chargalg_ac_en(di, false, 0, 0);
                if (di->susp_status.usb_suspended)
-                       abx500_chargalg_usb_en(di, false, 0, 0);
-               abx500_chargalg_stop_safety_timer(di);
-               abx500_chargalg_stop_maintenance_timer(di);
+                       ab8500_chargalg_usb_en(di, false, 0, 0);
+               ab8500_chargalg_stop_safety_timer(di);
+               ab8500_chargalg_stop_maintenance_timer(di);
                di->charge_status = POWER_SUPPLY_STATUS_NOT_CHARGING;
                di->maintenance_chg = false;
-               abx500_chargalg_state_to(di, STATE_SUSPENDED);
+               ab8500_chargalg_state_to(di, STATE_SUSPENDED);
                power_supply_changed(di->chargalg_psy);
                fallthrough;
 
@@ -1445,29 +1445,29 @@ static void abx500_chargalg_algorithm(struct abx500_chargalg *di)
                break;
 
        case STATE_BATT_REMOVED_INIT:
-               abx500_chargalg_stop_charging(di);
-               abx500_chargalg_state_to(di, STATE_BATT_REMOVED);
+               ab8500_chargalg_stop_charging(di);
+               ab8500_chargalg_state_to(di, STATE_BATT_REMOVED);
                fallthrough;
 
        case STATE_BATT_REMOVED:
                if (!di->events.batt_rem)
-                       abx500_chargalg_state_to(di, STATE_NORMAL_INIT);
+                       ab8500_chargalg_state_to(di, STATE_NORMAL_INIT);
                break;
 
        case STATE_HW_TEMP_PROTECT_INIT:
-               abx500_chargalg_stop_charging(di);
-               abx500_chargalg_state_to(di, STATE_HW_TEMP_PROTECT);
+               ab8500_chargalg_stop_charging(di);
+               ab8500_chargalg_state_to(di, STATE_HW_TEMP_PROTECT);
                fallthrough;
 
        case STATE_HW_TEMP_PROTECT:
                if (!di->events.main_thermal_prot &&
                                !di->events.usb_thermal_prot)
-                       abx500_chargalg_state_to(di, STATE_NORMAL_INIT);
+                       ab8500_chargalg_state_to(di, STATE_NORMAL_INIT);
                break;
 
        case STATE_OVV_PROTECT_INIT:
-               abx500_chargalg_stop_charging(di);
-               abx500_chargalg_state_to(di, STATE_OVV_PROTECT);
+               ab8500_chargalg_stop_charging(di);
+               ab8500_chargalg_state_to(di, STATE_OVV_PROTECT);
                fallthrough;
 
        case STATE_OVV_PROTECT:
@@ -1476,23 +1476,23 @@ static void abx500_chargalg_algorithm(struct abx500_chargalg *di)
                                !di->events.batt_ovv &&
                                di->chg_info.usb_chg_ok &&
                                di->chg_info.ac_chg_ok)
-                       abx500_chargalg_state_to(di, STATE_NORMAL_INIT);
+                       ab8500_chargalg_state_to(di, STATE_NORMAL_INIT);
                break;
 
        case STATE_CHG_NOT_OK_INIT:
-               abx500_chargalg_stop_charging(di);
-               abx500_chargalg_state_to(di, STATE_CHG_NOT_OK);
+               ab8500_chargalg_stop_charging(di);
+               ab8500_chargalg_state_to(di, STATE_CHG_NOT_OK);
                fallthrough;
 
        case STATE_CHG_NOT_OK:
                if (!di->events.mainextchnotok &&
                                !di->events.usbchargernotok)
-                       abx500_chargalg_state_to(di, STATE_NORMAL_INIT);
+                       ab8500_chargalg_state_to(di, STATE_NORMAL_INIT);
                break;
 
        case STATE_SAFETY_TIMER_EXPIRED_INIT:
-               abx500_chargalg_stop_charging(di);
-               abx500_chargalg_state_to(di, STATE_SAFETY_TIMER_EXPIRED);
+               ab8500_chargalg_stop_charging(di);
+               ab8500_chargalg_state_to(di, STATE_SAFETY_TIMER_EXPIRED);
                fallthrough;
 
        case STATE_SAFETY_TIMER_EXPIRED:
@@ -1501,20 +1501,20 @@ static void abx500_chargalg_algorithm(struct abx500_chargalg *di)
 
        case STATE_NORMAL_INIT:
                if (di->curr_status.curr_step == CHARGALG_CURR_STEP_LOW)
-                       abx500_chargalg_stop_charging(di);
+                       ab8500_chargalg_stop_charging(di);
                else {
                        curr_step_lvl = di->bm->bat_type[
                                di->bm->batt_id].normal_cur_lvl
                                * di->curr_status.curr_step
                                / CHARGALG_CURR_STEP_HIGH;
-                       abx500_chargalg_start_charging(di,
+                       ab8500_chargalg_start_charging(di,
                                di->bm->bat_type[di->bm->batt_id]
                                .normal_vol_lvl, curr_step_lvl);
                }
 
-               abx500_chargalg_state_to(di, STATE_NORMAL);
-               abx500_chargalg_start_safety_timer(di);
-               abx500_chargalg_stop_maintenance_timer(di);
+               ab8500_chargalg_state_to(di, STATE_NORMAL);
+               ab8500_chargalg_start_safety_timer(di);
+               ab8500_chargalg_stop_maintenance_timer(di);
                init_maxim_chg_curr(di);
                di->charge_status = POWER_SUPPLY_STATUS_CHARGING;
                di->eoc_cnt = 0;
@@ -1528,104 +1528,103 @@ static void abx500_chargalg_algorithm(struct abx500_chargalg *di)
                if (di->charge_status == POWER_SUPPLY_STATUS_FULL &&
                        di->maintenance_chg) {
                        if (di->bm->no_maintenance)
-                               abx500_chargalg_state_to(di,
+                               ab8500_chargalg_state_to(di,
                                        STATE_WAIT_FOR_RECHARGE_INIT);
                        else
-                               abx500_chargalg_state_to(di,
+                               ab8500_chargalg_state_to(di,
                                        STATE_MAINTENANCE_A_INIT);
                }
                break;
 
        /* This state will be used when the maintenance state is disabled */
        case STATE_WAIT_FOR_RECHARGE_INIT:
-               abx500_chargalg_hold_charging(di);
-               abx500_chargalg_state_to(di, STATE_WAIT_FOR_RECHARGE);
+               ab8500_chargalg_hold_charging(di);
+               ab8500_chargalg_state_to(di, STATE_WAIT_FOR_RECHARGE);
                fallthrough;
 
        case STATE_WAIT_FOR_RECHARGE:
                if (di->batt_data.percent <=
-                   di->bm->bat_type[di->bm->batt_id].
-                   recharge_cap)
-                       abx500_chargalg_state_to(di, STATE_NORMAL_INIT);
+                   di->bm->bat_type[di->bm->batt_id].recharge_cap)
+                       ab8500_chargalg_state_to(di, STATE_NORMAL_INIT);
                break;
 
        case STATE_MAINTENANCE_A_INIT:
-               abx500_chargalg_stop_safety_timer(di);
-               abx500_chargalg_start_maintenance_timer(di,
+               ab8500_chargalg_stop_safety_timer(di);
+               ab8500_chargalg_start_maintenance_timer(di,
                        di->bm->bat_type[
                                di->bm->batt_id].maint_a_chg_timer_h);
-               abx500_chargalg_start_charging(di,
+               ab8500_chargalg_start_charging(di,
                        di->bm->bat_type[
                                di->bm->batt_id].maint_a_vol_lvl,
                        di->bm->bat_type[
                                di->bm->batt_id].maint_a_cur_lvl);
-               abx500_chargalg_state_to(di, STATE_MAINTENANCE_A);
+               ab8500_chargalg_state_to(di, STATE_MAINTENANCE_A);
                power_supply_changed(di->chargalg_psy);
                fallthrough;
 
        case STATE_MAINTENANCE_A:
                if (di->events.maintenance_timer_expired) {
-                       abx500_chargalg_stop_maintenance_timer(di);
-                       abx500_chargalg_state_to(di, STATE_MAINTENANCE_B_INIT);
+                       ab8500_chargalg_stop_maintenance_timer(di);
+                       ab8500_chargalg_state_to(di, STATE_MAINTENANCE_B_INIT);
                }
                break;
 
        case STATE_MAINTENANCE_B_INIT:
-               abx500_chargalg_start_maintenance_timer(di,
+               ab8500_chargalg_start_maintenance_timer(di,
                        di->bm->bat_type[
                                di->bm->batt_id].maint_b_chg_timer_h);
-               abx500_chargalg_start_charging(di,
+               ab8500_chargalg_start_charging(di,
                        di->bm->bat_type[
                                di->bm->batt_id].maint_b_vol_lvl,
                        di->bm->bat_type[
                                di->bm->batt_id].maint_b_cur_lvl);
-               abx500_chargalg_state_to(di, STATE_MAINTENANCE_B);
+               ab8500_chargalg_state_to(di, STATE_MAINTENANCE_B);
                power_supply_changed(di->chargalg_psy);
                fallthrough;
 
        case STATE_MAINTENANCE_B:
                if (di->events.maintenance_timer_expired) {
-                       abx500_chargalg_stop_maintenance_timer(di);
-                       abx500_chargalg_state_to(di, STATE_NORMAL_INIT);
+                       ab8500_chargalg_stop_maintenance_timer(di);
+                       ab8500_chargalg_state_to(di, STATE_NORMAL_INIT);
                }
                break;
 
        case STATE_TEMP_LOWHIGH_INIT:
-               abx500_chargalg_start_charging(di,
+               ab8500_chargalg_start_charging(di,
                        di->bm->bat_type[
                                di->bm->batt_id].low_high_vol_lvl,
                        di->bm->bat_type[
                                di->bm->batt_id].low_high_cur_lvl);
-               abx500_chargalg_stop_maintenance_timer(di);
+               ab8500_chargalg_stop_maintenance_timer(di);
                di->charge_status = POWER_SUPPLY_STATUS_CHARGING;
-               abx500_chargalg_state_to(di, STATE_TEMP_LOWHIGH);
+               ab8500_chargalg_state_to(di, STATE_TEMP_LOWHIGH);
                power_supply_changed(di->chargalg_psy);
                fallthrough;
 
        case STATE_TEMP_LOWHIGH:
                if (!di->events.btemp_lowhigh)
-                       abx500_chargalg_state_to(di, STATE_NORMAL_INIT);
+                       ab8500_chargalg_state_to(di, STATE_NORMAL_INIT);
                break;
 
        case STATE_WD_EXPIRED_INIT:
-               abx500_chargalg_stop_charging(di);
-               abx500_chargalg_state_to(di, STATE_WD_EXPIRED);
+               ab8500_chargalg_stop_charging(di);
+               ab8500_chargalg_state_to(di, STATE_WD_EXPIRED);
                fallthrough;
 
        case STATE_WD_EXPIRED:
                if (!di->events.ac_wd_expired &&
                                !di->events.usb_wd_expired)
-                       abx500_chargalg_state_to(di, STATE_NORMAL_INIT);
+                       ab8500_chargalg_state_to(di, STATE_NORMAL_INIT);
                break;
 
        case STATE_TEMP_UNDEROVER_INIT:
-               abx500_chargalg_stop_charging(di);
-               abx500_chargalg_state_to(di, STATE_TEMP_UNDEROVER);
+               ab8500_chargalg_stop_charging(di);
+               ab8500_chargalg_state_to(di, STATE_TEMP_UNDEROVER);
                fallthrough;
 
        case STATE_TEMP_UNDEROVER:
                if (!di->events.btemp_underover)
-                       abx500_chargalg_state_to(di, STATE_NORMAL_INIT);
+                       ab8500_chargalg_state_to(di, STATE_NORMAL_INIT);
                break;
        }
 
@@ -1637,17 +1636,17 @@ static void abx500_chargalg_algorithm(struct abx500_chargalg *di)
 }
 
 /**
- * abx500_chargalg_periodic_work() - Periodic work for the algorithm
+ * ab8500_chargalg_periodic_work() - Periodic work for the algorithm
  * @work:      pointer to the work_struct structure
  *
  * Work queue function for the charging algorithm
  */
-static void abx500_chargalg_periodic_work(struct work_struct *work)
+static void ab8500_chargalg_periodic_work(struct work_struct *work)
 {
-       struct abx500_chargalg *di = container_of(work,
-               struct abx500_chargalg, chargalg_periodic_work.work);
+       struct ab8500_chargalg *di = container_of(work,
+               struct ab8500_chargalg, chargalg_periodic_work.work);
 
-       abx500_chargalg_algorithm(di);
+       ab8500_chargalg_algorithm(di);
 
        /*
         * If a charger is connected then the battery has to be monitored
@@ -1664,20 +1663,18 @@ static void abx500_chargalg_periodic_work(struct work_struct *work)
 }
 
 /**
- * abx500_chargalg_wd_work() - periodic work to kick the charger watchdog
+ * ab8500_chargalg_wd_work() - periodic work to kick the charger watchdog
  * @work:      pointer to the work_struct structure
  *
  * Work queue function for kicking the charger watchdog
  */
-static void abx500_chargalg_wd_work(struct work_struct *work)
+static void ab8500_chargalg_wd_work(struct work_struct *work)
 {
        int ret;
-       struct abx500_chargalg *di = container_of(work,
-               struct abx500_chargalg, chargalg_wd_work.work);
-
-       dev_dbg(di->dev, "abx500_chargalg_wd_work\n");
+       struct ab8500_chargalg *di = container_of(work,
+               struct ab8500_chargalg, chargalg_wd_work.work);
 
-       ret = abx500_chargalg_kick_watchdog(di);
+       ret = ab8500_chargalg_kick_watchdog(di);
        if (ret < 0)
                dev_err(di->dev, "failed to kick watchdog\n");
 
@@ -1686,21 +1683,21 @@ static void abx500_chargalg_wd_work(struct work_struct *work)
 }
 
 /**
- * abx500_chargalg_work() - Work to run the charging algorithm instantly
+ * ab8500_chargalg_work() - Work to run the charging algorithm instantly
  * @work:      pointer to the work_struct structure
  *
  * Work queue function for calling the charging algorithm
  */
-static void abx500_chargalg_work(struct work_struct *work)
+static void ab8500_chargalg_work(struct work_struct *work)
 {
-       struct abx500_chargalg *di = container_of(work,
-               struct abx500_chargalg, chargalg_work);
+       struct ab8500_chargalg *di = container_of(work,
+               struct ab8500_chargalg, chargalg_work);
 
-       abx500_chargalg_algorithm(di);
+       ab8500_chargalg_algorithm(di);
 }
 
 /**
- * abx500_chargalg_get_property() - get the chargalg properties
+ * ab8500_chargalg_get_property() - get the chargalg properties
  * @psy:       pointer to the power_supply structure
  * @psp:       pointer to the power_supply_property structure
  * @val:       pointer to the power_supply_propval union
@@ -1711,11 +1708,11 @@ static void abx500_chargalg_work(struct work_struct *work)
  * health:     health of the battery
  * Returns error code in case of failure else 0 on success
  */
-static int abx500_chargalg_get_property(struct power_supply *psy,
+static int ab8500_chargalg_get_property(struct power_supply *psy,
        enum power_supply_property psp,
        union power_supply_propval *val)
 {
-       struct abx500_chargalg *di = power_supply_get_drvdata(psy);
+       struct ab8500_chargalg *di = power_supply_get_drvdata(psy);
 
        switch (psp) {
        case POWER_SUPPLY_PROP_STATUS:
@@ -1744,16 +1741,16 @@ static int abx500_chargalg_get_property(struct power_supply *psy,
 
 /* Exposure to the sysfs interface */
 
-static ssize_t abx500_chargalg_curr_step_show(struct abx500_chargalg *di,
+static ssize_t ab8500_chargalg_curr_step_show(struct ab8500_chargalg *di,
                                              char *buf)
 {
        return sprintf(buf, "%d\n", di->curr_status.curr_step);
 }
 
-static ssize_t abx500_chargalg_curr_step_store(struct abx500_chargalg *di,
+static ssize_t ab8500_chargalg_curr_step_store(struct ab8500_chargalg *di,
                                               const char *buf, size_t length)
 {
-       long int param;
+       long param;
        int ret;
 
        ret = kstrtol(buf, 10, &param);
@@ -1775,7 +1772,7 @@ static ssize_t abx500_chargalg_curr_step_store(struct abx500_chargalg *di,
 }
 
 
-static ssize_t abx500_chargalg_en_show(struct abx500_chargalg *di,
+static ssize_t ab8500_chargalg_en_show(struct ab8500_chargalg *di,
                                       char *buf)
 {
        return sprintf(buf, "%d\n",
@@ -1783,10 +1780,10 @@ static ssize_t abx500_chargalg_en_show(struct abx500_chargalg *di,
                       di->susp_status.usb_suspended);
 }
 
-static ssize_t abx500_chargalg_en_store(struct abx500_chargalg *di,
+static ssize_t ab8500_chargalg_en_store(struct ab8500_chargalg *di,
        const char *buf, size_t length)
 {
-       long int param;
+       long param;
        int ac_usb;
        int ret;
 
@@ -1830,22 +1827,22 @@ static ssize_t abx500_chargalg_en_store(struct abx500_chargalg *di,
        return strlen(buf);
 }
 
-static struct abx500_chargalg_sysfs_entry abx500_chargalg_en_charger =
-       __ATTR(chargalg, 0644, abx500_chargalg_en_show,
-                               abx500_chargalg_en_store);
+static struct ab8500_chargalg_sysfs_entry ab8500_chargalg_en_charger =
+       __ATTR(chargalg, 0644, ab8500_chargalg_en_show,
+                               ab8500_chargalg_en_store);
 
-static struct abx500_chargalg_sysfs_entry abx500_chargalg_curr_step =
-       __ATTR(chargalg_curr_step, 0644, abx500_chargalg_curr_step_show,
-                                       abx500_chargalg_curr_step_store);
+static struct ab8500_chargalg_sysfs_entry ab8500_chargalg_curr_step =
+       __ATTR(chargalg_curr_step, 0644, ab8500_chargalg_curr_step_show,
+                                       ab8500_chargalg_curr_step_store);
 
-static ssize_t abx500_chargalg_sysfs_show(struct kobject *kobj,
+static ssize_t ab8500_chargalg_sysfs_show(struct kobject *kobj,
        struct attribute *attr, char *buf)
 {
-       struct abx500_chargalg_sysfs_entry *entry = container_of(attr,
-               struct abx500_chargalg_sysfs_entry, attr);
+       struct ab8500_chargalg_sysfs_entry *entry = container_of(attr,
+               struct ab8500_chargalg_sysfs_entry, attr);
 
-       struct abx500_chargalg *di = container_of(kobj,
-               struct abx500_chargalg, chargalg_kobject);
+       struct ab8500_chargalg *di = container_of(kobj,
+               struct ab8500_chargalg, chargalg_kobject);
 
        if (!entry->show)
                return -EIO;
@@ -1853,14 +1850,14 @@ static ssize_t abx500_chargalg_sysfs_show(struct kobject *kobj,
        return entry->show(di, buf);
 }
 
-static ssize_t abx500_chargalg_sysfs_charger(struct kobject *kobj,
+static ssize_t ab8500_chargalg_sysfs_charger(struct kobject *kobj,
        struct attribute *attr, const char *buf, size_t length)
 {
-       struct abx500_chargalg_sysfs_entry *entry = container_of(attr,
-               struct abx500_chargalg_sysfs_entry, attr);
+       struct ab8500_chargalg_sysfs_entry *entry = container_of(attr,
+               struct ab8500_chargalg_sysfs_entry, attr);
 
-       struct abx500_chargalg *di = container_of(kobj,
-               struct abx500_chargalg, chargalg_kobject);
+       struct ab8500_chargalg *di = container_of(kobj,
+               struct ab8500_chargalg, chargalg_kobject);
 
        if (!entry->store)
                return -EIO;
@@ -1868,47 +1865,47 @@ static ssize_t abx500_chargalg_sysfs_charger(struct kobject *kobj,
        return entry->store(di, buf, length);
 }
 
-static struct attribute *abx500_chargalg_chg[] = {
-       &abx500_chargalg_en_charger.attr,
-       &abx500_chargalg_curr_step.attr,
+static struct attribute *ab8500_chargalg_chg[] = {
+       &ab8500_chargalg_en_charger.attr,
+       &ab8500_chargalg_curr_step.attr,
        NULL,
 };
 
-static const struct sysfs_ops abx500_chargalg_sysfs_ops = {
-       .show = abx500_chargalg_sysfs_show,
-       .store = abx500_chargalg_sysfs_charger,
+static const struct sysfs_ops ab8500_chargalg_sysfs_ops = {
+       .show = ab8500_chargalg_sysfs_show,
+       .store = ab8500_chargalg_sysfs_charger,
 };
 
-static struct kobj_type abx500_chargalg_ktype = {
-       .sysfs_ops = &abx500_chargalg_sysfs_ops,
-       .default_attrs = abx500_chargalg_chg,
+static struct kobj_type ab8500_chargalg_ktype = {
+       .sysfs_ops = &ab8500_chargalg_sysfs_ops,
+       .default_attrs = ab8500_chargalg_chg,
 };
 
 /**
- * abx500_chargalg_sysfs_exit() - de-init of sysfs entry
- * @di:                pointer to the struct abx500_chargalg
+ * ab8500_chargalg_sysfs_exit() - de-init of sysfs entry
+ * @di:                pointer to the struct ab8500_chargalg
  *
  * This function removes the entry in sysfs.
  */
-static void abx500_chargalg_sysfs_exit(struct abx500_chargalg *di)
+static void ab8500_chargalg_sysfs_exit(struct ab8500_chargalg *di)
 {
        kobject_del(&di->chargalg_kobject);
 }
 
 /**
- * abx500_chargalg_sysfs_init() - init of sysfs entry
- * @di:                pointer to the struct abx500_chargalg
+ * ab8500_chargalg_sysfs_init() - init of sysfs entry
+ * @di:                pointer to the struct ab8500_chargalg
  *
  * This function adds an entry in sysfs.
  * Returns error code in case of failure else 0(on success)
  */
-static int abx500_chargalg_sysfs_init(struct abx500_chargalg *di)
+static int ab8500_chargalg_sysfs_init(struct ab8500_chargalg *di)
 {
        int ret = 0;
 
        ret = kobject_init_and_add(&di->chargalg_kobject,
-               &abx500_chargalg_ktype,
-               NULL, "abx500_chargalg");
+               &ab8500_chargalg_ktype,
+               NULL, "ab8500_chargalg");
        if (ret < 0)
                dev_err(di->dev, "failed to create sysfs entry\n");
 
@@ -1916,9 +1913,9 @@ static int abx500_chargalg_sysfs_init(struct abx500_chargalg *di)
 }
 /* Exposure to the sysfs interface <<END>> */
 
-static int __maybe_unused abx500_chargalg_resume(struct device *dev)
+static int __maybe_unused ab8500_chargalg_resume(struct device *dev)
 {
-       struct abx500_chargalg *di = dev_get_drvdata(dev);
+       struct ab8500_chargalg *di = dev_get_drvdata(dev);
 
        /* Kick charger watchdog if charging (any charger online) */
        if (di->chg_info.online_chg)
@@ -1933,9 +1930,9 @@ static int __maybe_unused abx500_chargalg_resume(struct device *dev)
        return 0;
 }
 
-static int __maybe_unused abx500_chargalg_suspend(struct device *dev)
+static int __maybe_unused ab8500_chargalg_suspend(struct device *dev)
 {
-       struct abx500_chargalg *di = dev_get_drvdata(dev);
+       struct ab8500_chargalg *di = dev_get_drvdata(dev);
 
        if (di->chg_info.online_chg)
                cancel_delayed_work_sync(&di->chargalg_wd_work);
@@ -1949,22 +1946,22 @@ static char *supply_interface[] = {
        "ab8500_fg",
 };
 
-static const struct power_supply_desc abx500_chargalg_desc = {
-       .name                   = "abx500_chargalg",
+static const struct power_supply_desc ab8500_chargalg_desc = {
+       .name                   = "ab8500_chargalg",
        .type                   = POWER_SUPPLY_TYPE_BATTERY,
-       .properties             = abx500_chargalg_props,
-       .num_properties         = ARRAY_SIZE(abx500_chargalg_props),
-       .get_property           = abx500_chargalg_get_property,
-       .external_power_changed = abx500_chargalg_external_power_changed,
+       .properties             = ab8500_chargalg_props,
+       .num_properties         = ARRAY_SIZE(ab8500_chargalg_props),
+       .get_property           = ab8500_chargalg_get_property,
+       .external_power_changed = ab8500_chargalg_external_power_changed,
 };
 
-static int abx500_chargalg_bind(struct device *dev, struct device *master,
+static int ab8500_chargalg_bind(struct device *dev, struct device *master,
                                void *data)
 {
-       struct abx500_chargalg *di = dev_get_drvdata(dev);
+       struct ab8500_chargalg *di = dev_get_drvdata(dev);
 
        /* Create a work queue for the chargalg */
-       di->chargalg_wq = alloc_ordered_workqueue("abx500_chargalg_wq",
+       di->chargalg_wq = alloc_ordered_workqueue("ab8500_chargalg_wq",
                                                  WQ_MEM_RECLAIM);
        if (di->chargalg_wq == NULL) {
                dev_err(di->dev, "failed to create work queue\n");
@@ -1977,10 +1974,10 @@ static int abx500_chargalg_bind(struct device *dev, struct device *master,
        return 0;
 }
 
-static void abx500_chargalg_unbind(struct device *dev, struct device *master,
+static void ab8500_chargalg_unbind(struct device *dev, struct device *master,
                                   void *data)
 {
-       struct abx500_chargalg *di = dev_get_drvdata(dev);
+       struct ab8500_chargalg *di = dev_get_drvdata(dev);
 
        /* Stop all timers and work */
        hrtimer_cancel(&di->safety_timer);
@@ -1995,16 +1992,16 @@ static void abx500_chargalg_unbind(struct device *dev, struct device *master,
        flush_scheduled_work();
 }
 
-static const struct component_ops abx500_chargalg_component_ops = {
-       .bind = abx500_chargalg_bind,
-       .unbind = abx500_chargalg_unbind,
+static const struct component_ops ab8500_chargalg_component_ops = {
+       .bind = ab8500_chargalg_bind,
+       .unbind = ab8500_chargalg_unbind,
 };
 
-static int abx500_chargalg_probe(struct platform_device *pdev)
+static int ab8500_chargalg_probe(struct platform_device *pdev)
 {
        struct device *dev = &pdev->dev;
        struct power_supply_config psy_cfg = {};
-       struct abx500_chargalg *di;
+       struct ab8500_chargalg *di;
        int ret = 0;
 
        di = devm_kzalloc(dev, sizeof(*di), GFP_KERNEL);
@@ -2023,28 +2020,28 @@ static int abx500_chargalg_probe(struct platform_device *pdev)
 
        /* Initilialize safety timer */
        hrtimer_init(&di->safety_timer, CLOCK_REALTIME, HRTIMER_MODE_ABS);
-       di->safety_timer.function = abx500_chargalg_safety_timer_expired;
+       di->safety_timer.function = ab8500_chargalg_safety_timer_expired;
 
        /* Initilialize maintenance timer */
        hrtimer_init(&di->maintenance_timer, CLOCK_REALTIME, HRTIMER_MODE_ABS);
        di->maintenance_timer.function =
-               abx500_chargalg_maintenance_timer_expired;
+               ab8500_chargalg_maintenance_timer_expired;
 
        /* Init work for chargalg */
        INIT_DEFERRABLE_WORK(&di->chargalg_periodic_work,
-               abx500_chargalg_periodic_work);
+               ab8500_chargalg_periodic_work);
        INIT_DEFERRABLE_WORK(&di->chargalg_wd_work,
-               abx500_chargalg_wd_work);
+               ab8500_chargalg_wd_work);
 
        /* Init work for chargalg */
-       INIT_WORK(&di->chargalg_work, abx500_chargalg_work);
+       INIT_WORK(&di->chargalg_work, ab8500_chargalg_work);
 
        /* To detect charger at startup */
        di->chg_info.prev_conn_chg = -1;
 
        /* Register chargalg power supply class */
        di->chargalg_psy = devm_power_supply_register(di->dev,
-                                                &abx500_chargalg_desc,
+                                                &ab8500_chargalg_desc,
                                                 &psy_cfg);
        if (IS_ERR(di->chargalg_psy)) {
                dev_err(di->dev, "failed to register chargalg psy\n");
@@ -2054,7 +2051,7 @@ static int abx500_chargalg_probe(struct platform_device *pdev)
        platform_set_drvdata(pdev, di);
 
        /* sysfs interface to enable/disable charging from user space */
-       ret = abx500_chargalg_sysfs_init(di);
+       ret = ab8500_chargalg_sysfs_init(di);
        if (ret) {
                dev_err(di->dev, "failed to create sysfs entry\n");
                return ret;
@@ -2062,38 +2059,38 @@ static int abx500_chargalg_probe(struct platform_device *pdev)
        di->curr_status.curr_step = CHARGALG_CURR_STEP_HIGH;
 
        dev_info(di->dev, "probe success\n");
-       return component_add(dev, &abx500_chargalg_component_ops);
+       return component_add(dev, &ab8500_chargalg_component_ops);
 }
 
-static int abx500_chargalg_remove(struct platform_device *pdev)
+static int ab8500_chargalg_remove(struct platform_device *pdev)
 {
-       struct abx500_chargalg *di = platform_get_drvdata(pdev);
+       struct ab8500_chargalg *di = platform_get_drvdata(pdev);
 
-       component_del(&pdev->dev, &abx500_chargalg_component_ops);
+       component_del(&pdev->dev, &ab8500_chargalg_component_ops);
 
        /* sysfs interface to enable/disable charging from user space */
-       abx500_chargalg_sysfs_exit(di);
+       ab8500_chargalg_sysfs_exit(di);
 
        return 0;
 }
 
-static SIMPLE_DEV_PM_OPS(abx500_chargalg_pm_ops, abx500_chargalg_suspend, abx500_chargalg_resume);
+static SIMPLE_DEV_PM_OPS(ab8500_chargalg_pm_ops, ab8500_chargalg_suspend, ab8500_chargalg_resume);
 
 static const struct of_device_id ab8500_chargalg_match[] = {
        { .compatible = "stericsson,ab8500-chargalg", },
        { },
 };
 
-struct platform_driver abx500_chargalg_driver = {
-       .probe = abx500_chargalg_probe,
-       .remove = abx500_chargalg_remove,
+struct platform_driver ab8500_chargalg_driver = {
+       .probe = ab8500_chargalg_probe,
+       .remove = ab8500_chargalg_remove,
        .driver = {
-               .name = "ab8500-chargalg",
+               .name = "ab8500_chargalg",
                .of_match_table = ab8500_chargalg_match,
-               .pm = &abx500_chargalg_pm_ops,
+               .pm = &ab8500_chargalg_pm_ops,
        },
 };
 MODULE_LICENSE("GPL v2");
 MODULE_AUTHOR("Johan Palsson, Karl Komierowski");
-MODULE_ALIAS("platform:abx500-chargalg");
-MODULE_DESCRIPTION("abx500 battery charging algorithm");
+MODULE_ALIAS("platform:ab8500-chargalg");
+MODULE_DESCRIPTION("ab8500 battery charging algorithm");
index fa49e12e5a602db4e9e5d4651cd1e2ab9544106a..15eadaf46f144247f60badfa0d3435fcfdb188c3 100644 (file)
@@ -292,7 +292,7 @@ struct ab8500_charger {
        struct iio_channel *adc_main_charger_c;
        struct iio_channel *adc_vbus_v;
        struct iio_channel *adc_usb_charger_c;
-       struct abx500_bm_data *bm;
+       struct ab8500_bm_data *bm;
        struct ab8500_charger_event_flags flags;
        struct ab8500_charger_usb_state usb_state;
        struct ab8500_charger_max_usb_in_curr max_usb_in_curr;
@@ -3388,7 +3388,7 @@ static const struct component_master_ops ab8500_charger_comp_ops = {
 static struct platform_driver *const ab8500_charger_component_drivers[] = {
        &ab8500_fg_driver,
        &ab8500_btemp_driver,
-       &abx500_chargalg_driver,
+       &ab8500_chargalg_driver,
 };
 
 static int ab8500_charger_compare_dev(struct device *dev, void *data)
index a6ebdb269fdd6ac58991bc26d4d00ca1a14bdbca..05fe9724ba508318cdb3bf10e5ffb165e83440b5 100644 (file)
@@ -34,6 +34,7 @@
 #include <linux/mfd/abx500/ab8500.h>
 #include <linux/iio/consumer.h>
 #include <linux/kernel.h>
+#include <linux/fixp-arith.h>
 
 #include "ab8500-bm.h"
 
@@ -56,9 +57,6 @@
 /* FG constants */
 #define BATT_OVV                       0x01
 
-#define interpolate(x, x1, y1, x2, y2) \
-       ((y1) + ((((y2) - (y1)) * ((x) - (x1))) / ((x2) - (x1))));
-
 /**
  * struct ab8500_fg_interrupts - ab8500 fg interrupts
  * @name:      name of the interrupt
@@ -227,7 +225,7 @@ struct ab8500_fg {
        struct ab8500_fg_avg_cap avg_cap;
        struct ab8500 *parent;
        struct iio_channel *main_bat_v;
-       struct abx500_bm_data *bm;
+       struct ab8500_bm_data *bm;
        struct power_supply *fg_psy;
        struct workqueue_struct *fg_wq;
        struct delayed_work fg_periodic_work;
@@ -856,7 +854,7 @@ static int ab8500_fg_bat_voltage(struct ab8500_fg *di)
 static int ab8500_fg_volt_to_capacity(struct ab8500_fg *di, int voltage)
 {
        int i, tbl_size;
-       const struct abx500_v_to_cap *tbl;
+       const struct ab8500_v_to_cap *tbl;
        int cap = 0;
 
        tbl = di->bm->bat_type[di->bm->batt_id].v_to_cap_tbl;
@@ -868,11 +866,12 @@ static int ab8500_fg_volt_to_capacity(struct ab8500_fg *di, int voltage)
        }
 
        if ((i > 0) && (i < tbl_size)) {
-               cap = interpolate(voltage,
+               cap = fixp_linear_interpolate(
                        tbl[i].voltage,
                        tbl[i].capacity * 10,
                        tbl[i-1].voltage,
-                       tbl[i-1].capacity * 10);
+                       tbl[i-1].capacity * 10,
+                       voltage);
        } else if (i == 0) {
                cap = 1000;
        } else {
@@ -920,11 +919,12 @@ static int ab8500_fg_battery_resistance(struct ab8500_fg *di)
        }
 
        if ((i > 0) && (i < tbl_size)) {
-               resist = interpolate(di->bat_temp / 10,
+               resist = fixp_linear_interpolate(
                        tbl[i].temp,
                        tbl[i].resist,
                        tbl[i-1].temp,
-                       tbl[i-1].resist);
+                       tbl[i-1].resist,
+                       di->bat_temp / 10);
        } else if (i == 0) {
                resist = tbl[0].resist;
        } else {
@@ -2235,7 +2235,7 @@ static int ab8500_fg_get_ext_psy_data(struct device *dev, void *data)
                        case POWER_SUPPLY_TYPE_BATTERY:
                                if (!di->flags.batt_id_received &&
                                    di->bm->batt_id != BATTERY_UNKNOWN) {
-                                       const struct abx500_battery_type *b;
+                                       const struct ab8500_battery_type *b;
 
                                        b = &(di->bm->bat_type[di->bm->batt_id]);
 
index a4df1ea923864d46b1fa4892e34a80e9c1592782..b9553be9bed56d06ceaa5aafc13d5f716814972f 100644 (file)
@@ -813,7 +813,7 @@ static int axp288_charger_probe(struct platform_device *pdev)
        if (val == 0)
                return -ENODEV;
 
-       info = devm_kzalloc(&pdev->dev, sizeof(*info), GFP_KERNEL);
+       info = devm_kzalloc(dev, sizeof(*info), GFP_KERNEL);
        if (!info)
                return -ENOMEM;
 
@@ -823,7 +823,7 @@ static int axp288_charger_probe(struct platform_device *pdev)
 
        info->cable.edev = extcon_get_extcon_dev(AXP288_EXTCON_DEV_NAME);
        if (info->cable.edev == NULL) {
-               dev_dbg(&pdev->dev, "%s is not ready, probe deferred\n",
+               dev_dbg(dev, "%s is not ready, probe deferred\n",
                        AXP288_EXTCON_DEV_NAME);
                return -EPROBE_DEFER;
        }
@@ -834,8 +834,7 @@ static int axp288_charger_probe(struct platform_device *pdev)
                        dev_dbg(dev, "EXTCON_USB_HOST is not ready, probe deferred\n");
                        return -EPROBE_DEFER;
                }
-               dev_info(&pdev->dev,
-                        "Using " USB_HOST_EXTCON_HID " extcon for usb-id\n");
+               dev_info(dev, "Using " USB_HOST_EXTCON_HID " extcon for usb-id\n");
        }
 
        platform_set_drvdata(pdev, info);
@@ -874,7 +873,7 @@ static int axp288_charger_probe(struct platform_device *pdev)
        INIT_WORK(&info->otg.work, axp288_charger_otg_evt_worker);
        info->otg.id_nb.notifier_call = axp288_charger_handle_otg_evt;
        if (info->otg.cable) {
-               ret = devm_extcon_register_notifier(&pdev->dev, info->otg.cable,
+               ret = devm_extcon_register_notifier(dev, info->otg.cable,
                                        EXTCON_USB_HOST, &info->otg.id_nb);
                if (ret) {
                        dev_err(dev, "failed to register EXTCON_USB_HOST notifier\n");
@@ -899,7 +898,7 @@ static int axp288_charger_probe(struct platform_device *pdev)
                                        NULL, axp288_charger_irq_thread_handler,
                                        IRQF_ONESHOT, info->pdev->name, info);
                if (ret) {
-                       dev_err(&pdev->dev, "failed to request interrupt=%d\n",
+                       dev_err(dev, "failed to request interrupt=%d\n",
                                                                info->irq[i]);
                        return ret;
                }
index 2ba2d8d6b8e635b1014cc5ddb194e232eb99eb2a..c1da217fdb0e2bb4cf7bc4b7c461958a9f3724dd 100644 (file)
@@ -2,7 +2,8 @@
 /*
  * axp288_fuel_gauge.c - Xpower AXP288 PMIC Fuel Gauge Driver
  *
- * Copyright (C) 2016-2017 Hans de Goede <hdegoede@redhat.com>
+ * Copyright (C) 2020-2021 Andrejus Basovas <xxx@yyy.tld>
+ * Copyright (C) 2016-2021 Hans de Goede <hdegoede@redhat.com>
  * Copyright (C) 2014 Intel Corporation
  *
  * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
 #include <linux/platform_device.h>
 #include <linux/power_supply.h>
 #include <linux/iio/consumer.h>
-#include <linux/debugfs.h>
-#include <linux/seq_file.h>
 #include <asm/unaligned.h>
+#include <asm/iosf_mbi.h>
 
-#define PS_STAT_VBUS_TRIGGER           (1 << 0)
-#define PS_STAT_BAT_CHRG_DIR           (1 << 2)
-#define PS_STAT_VBAT_ABOVE_VHOLD       (1 << 3)
-#define PS_STAT_VBUS_VALID             (1 << 4)
-#define PS_STAT_VBUS_PRESENT           (1 << 5)
+#define PS_STAT_VBUS_TRIGGER                   (1 << 0)
+#define PS_STAT_BAT_CHRG_DIR                   (1 << 2)
+#define PS_STAT_VBAT_ABOVE_VHOLD               (1 << 3)
+#define PS_STAT_VBUS_VALID                     (1 << 4)
+#define PS_STAT_VBUS_PRESENT                   (1 << 5)
 
-#define CHRG_STAT_BAT_SAFE_MODE                (1 << 3)
+#define CHRG_STAT_BAT_SAFE_MODE                        (1 << 3)
 #define CHRG_STAT_BAT_VALID                    (1 << 4)
-#define CHRG_STAT_BAT_PRESENT          (1 << 5)
+#define CHRG_STAT_BAT_PRESENT                  (1 << 5)
 #define CHRG_STAT_CHARGING                     (1 << 6)
 #define CHRG_STAT_PMIC_OTP                     (1 << 7)
 
 #define CHRG_CCCV_CC_MASK                      0xf     /* 4 bits */
-#define CHRG_CCCV_CC_BIT_POS           0
+#define CHRG_CCCV_CC_BIT_POS                   0
 #define CHRG_CCCV_CC_OFFSET                    200     /* 200mA */
-#define CHRG_CCCV_CC_LSB_RES           200     /* 200mA */
+#define CHRG_CCCV_CC_LSB_RES                   200     /* 200mA */
 #define CHRG_CCCV_ITERM_20P                    (1 << 4)    /* 20% of CC */
 #define CHRG_CCCV_CV_MASK                      0x60        /* 2 bits */
-#define CHRG_CCCV_CV_BIT_POS           5
+#define CHRG_CCCV_CV_BIT_POS                   5
 #define CHRG_CCCV_CV_4100MV                    0x0     /* 4.10V */
 #define CHRG_CCCV_CV_4150MV                    0x1     /* 4.15V */
 #define CHRG_CCCV_CV_4200MV                    0x2     /* 4.20V */
 #define CHRG_CCCV_CV_4350MV                    0x3     /* 4.35V */
 #define CHRG_CCCV_CHG_EN                       (1 << 7)
 
-#define FG_CNTL_OCV_ADJ_STAT           (1 << 2)
+#define FG_CNTL_OCV_ADJ_STAT                   (1 << 2)
 #define FG_CNTL_OCV_ADJ_EN                     (1 << 3)
-#define FG_CNTL_CAP_ADJ_STAT           (1 << 4)
+#define FG_CNTL_CAP_ADJ_STAT                   (1 << 4)
 #define FG_CNTL_CAP_ADJ_EN                     (1 << 5)
 #define FG_CNTL_CC_EN                          (1 << 6)
 #define FG_CNTL_GAUGE_EN                       (1 << 7)
 #define FG_CC_CAP_VALID                                (1 << 7)
 #define FG_CC_CAP_VAL_MASK                     0x7F
 
-#define FG_LOW_CAP_THR1_MASK           0xf0    /* 5% tp 20% */
+#define FG_LOW_CAP_THR1_MASK                   0xf0    /* 5% tp 20% */
 #define FG_LOW_CAP_THR1_VAL                    0xa0    /* 15 perc */
-#define FG_LOW_CAP_THR2_MASK           0x0f    /* 0% to 15% */
+#define FG_LOW_CAP_THR2_MASK                   0x0f    /* 0% to 15% */
 #define FG_LOW_CAP_WARN_THR                    14  /* 14 perc */
 #define FG_LOW_CAP_CRIT_THR                    4   /* 4 perc */
 #define FG_LOW_CAP_SHDN_THR                    0   /* 0 perc */
 
-#define NR_RETRY_CNT    3
-#define DEV_NAME       "axp288_fuel_gauge"
+#define DEV_NAME                               "axp288_fuel_gauge"
 
 /* 1.1mV per LSB expressed in uV */
 #define VOLTAGE_FROM_ADC(a)                    ((a * 11) / 10)
 /* properties converted to uV, uA */
-#define PROP_VOLT(a)           ((a) * 1000)
-#define PROP_CURR(a)           ((a) * 1000)
+#define PROP_VOLT(a)                           ((a) * 1000)
+#define PROP_CURR(a)                           ((a) * 1000)
 
-#define AXP288_FG_INTR_NUM     6
+#define AXP288_REG_UPDATE_INTERVAL             (60 * HZ)
+#define AXP288_FG_INTR_NUM                     6
 enum {
        QWBTU_IRQ = 0,
        WBTU_IRQ,
@@ -98,9 +98,6 @@ enum {
 };
 
 enum {
-       BAT_TEMP = 0,
-       PMIC_TEMP,
-       SYSTEM_TEMP,
        BAT_CHRG_CURR,
        BAT_D_CURR,
        BAT_VOLT,
@@ -108,7 +105,7 @@ enum {
 };
 
 struct axp288_fg_info {
-       struct platform_device *pdev;
+       struct device *dev;
        struct regmap *regmap;
        struct regmap_irq_chip_data *regmap_irqc;
        int irq[AXP288_FG_INTR_NUM];
@@ -117,7 +114,21 @@ struct axp288_fg_info {
        struct mutex lock;
        int status;
        int max_volt;
+       int pwr_op;
+       int low_cap;
        struct dentry *debug_file;
+
+       char valid;                 /* zero until following fields are valid */
+       unsigned long last_updated; /* in jiffies */
+
+       int pwr_stat;
+       int fg_res;
+       int bat_volt;
+       int d_curr;
+       int c_curr;
+       int ocv;
+       int fg_cc_mtr1;
+       int fg_des_cap1;
 };
 
 static enum power_supply_property fuel_gauge_props[] = {
@@ -137,17 +148,12 @@ static enum power_supply_property fuel_gauge_props[] = {
 
 static int fuel_gauge_reg_readb(struct axp288_fg_info *info, int reg)
 {
-       int ret, i;
        unsigned int val;
+       int ret;
 
-       for (i = 0; i < NR_RETRY_CNT; i++) {
-               ret = regmap_read(info->regmap, reg, &val);
-               if (ret != -EBUSY)
-                       break;
-       }
-
+       ret = regmap_read(info->regmap, reg, &val);
        if (ret < 0) {
-               dev_err(&info->pdev->dev, "axp288 reg read err:%d\n", ret);
+               dev_err(info->dev, "Error reading reg 0x%02x err: %d\n", reg, ret);
                return ret;
        }
 
@@ -161,7 +167,7 @@ static int fuel_gauge_reg_writeb(struct axp288_fg_info *info, int reg, u8 val)
        ret = regmap_write(info->regmap, reg, (unsigned int)val);
 
        if (ret < 0)
-               dev_err(&info->pdev->dev, "axp288 reg write err:%d\n", ret);
+               dev_err(info->dev, "Error writing reg 0x%02x err: %d\n", reg, ret);
 
        return ret;
 }
@@ -173,15 +179,13 @@ static int fuel_gauge_read_15bit_word(struct axp288_fg_info *info, int reg)
 
        ret = regmap_bulk_read(info->regmap, reg, buf, 2);
        if (ret < 0) {
-               dev_err(&info->pdev->dev, "Error reading reg 0x%02x err: %d\n",
-                       reg, ret);
+               dev_err(info->dev, "Error reading reg 0x%02x err: %d\n", reg, ret);
                return ret;
        }
 
        ret = get_unaligned_be16(buf);
        if (!(ret & FG_15BIT_WORD_VALID)) {
-               dev_err(&info->pdev->dev, "Error reg 0x%02x contents not valid\n",
-                       reg);
+               dev_err(info->dev, "Error reg 0x%02x contents not valid\n", reg);
                return -ENXIO;
        }
 
@@ -195,8 +199,7 @@ static int fuel_gauge_read_12bit_word(struct axp288_fg_info *info, int reg)
 
        ret = regmap_bulk_read(info->regmap, reg, buf, 2);
        if (ret < 0) {
-               dev_err(&info->pdev->dev, "Error reading reg 0x%02x err: %d\n",
-                       reg, ret);
+               dev_err(info->dev, "Error reading reg 0x%02x err: %d\n", reg, ret);
                return ret;
        }
 
@@ -204,139 +207,78 @@ static int fuel_gauge_read_12bit_word(struct axp288_fg_info *info, int reg)
        return (buf[0] << 4) | ((buf[1] >> 4) & 0x0f);
 }
 
-#ifdef CONFIG_DEBUG_FS
-static int fuel_gauge_debug_show(struct seq_file *s, void *data)
+static int fuel_gauge_update_registers(struct axp288_fg_info *info)
 {
-       struct axp288_fg_info *info = s->private;
-       int raw_val, ret;
-
-       seq_printf(s, " PWR_STATUS[%02x] : %02x\n",
-               AXP20X_PWR_INPUT_STATUS,
-               fuel_gauge_reg_readb(info, AXP20X_PWR_INPUT_STATUS));
-       seq_printf(s, "PWR_OP_MODE[%02x] : %02x\n",
-               AXP20X_PWR_OP_MODE,
-               fuel_gauge_reg_readb(info, AXP20X_PWR_OP_MODE));
-       seq_printf(s, " CHRG_CTRL1[%02x] : %02x\n",
-               AXP20X_CHRG_CTRL1,
-               fuel_gauge_reg_readb(info, AXP20X_CHRG_CTRL1));
-       seq_printf(s, "       VLTF[%02x] : %02x\n",
-               AXP20X_V_LTF_DISCHRG,
-               fuel_gauge_reg_readb(info, AXP20X_V_LTF_DISCHRG));
-       seq_printf(s, "       VHTF[%02x] : %02x\n",
-               AXP20X_V_HTF_DISCHRG,
-               fuel_gauge_reg_readb(info, AXP20X_V_HTF_DISCHRG));
-       seq_printf(s, "    CC_CTRL[%02x] : %02x\n",
-               AXP20X_CC_CTRL,
-               fuel_gauge_reg_readb(info, AXP20X_CC_CTRL));
-       seq_printf(s, "BATTERY CAP[%02x] : %02x\n",
-               AXP20X_FG_RES,
-               fuel_gauge_reg_readb(info, AXP20X_FG_RES));
-       seq_printf(s, "    FG_RDC1[%02x] : %02x\n",
-               AXP288_FG_RDC1_REG,
-               fuel_gauge_reg_readb(info, AXP288_FG_RDC1_REG));
-       seq_printf(s, "    FG_RDC0[%02x] : %02x\n",
-               AXP288_FG_RDC0_REG,
-               fuel_gauge_reg_readb(info, AXP288_FG_RDC0_REG));
-       seq_printf(s, "     FG_OCV[%02x] : %04x\n",
-               AXP288_FG_OCVH_REG,
-               fuel_gauge_read_12bit_word(info, AXP288_FG_OCVH_REG));
-       seq_printf(s, " FG_DES_CAP[%02x] : %04x\n",
-               AXP288_FG_DES_CAP1_REG,
-               fuel_gauge_read_15bit_word(info, AXP288_FG_DES_CAP1_REG));
-       seq_printf(s, "  FG_CC_MTR[%02x] : %04x\n",
-               AXP288_FG_CC_MTR1_REG,
-               fuel_gauge_read_15bit_word(info, AXP288_FG_CC_MTR1_REG));
-       seq_printf(s, " FG_OCV_CAP[%02x] : %02x\n",
-               AXP288_FG_OCV_CAP_REG,
-               fuel_gauge_reg_readb(info, AXP288_FG_OCV_CAP_REG));
-       seq_printf(s, "  FG_CC_CAP[%02x] : %02x\n",
-               AXP288_FG_CC_CAP_REG,
-               fuel_gauge_reg_readb(info, AXP288_FG_CC_CAP_REG));
-       seq_printf(s, " FG_LOW_CAP[%02x] : %02x\n",
-               AXP288_FG_LOW_CAP_REG,
-               fuel_gauge_reg_readb(info, AXP288_FG_LOW_CAP_REG));
-       seq_printf(s, "TUNING_CTL0[%02x] : %02x\n",
-               AXP288_FG_TUNE0,
-               fuel_gauge_reg_readb(info, AXP288_FG_TUNE0));
-       seq_printf(s, "TUNING_CTL1[%02x] : %02x\n",
-               AXP288_FG_TUNE1,
-               fuel_gauge_reg_readb(info, AXP288_FG_TUNE1));
-       seq_printf(s, "TUNING_CTL2[%02x] : %02x\n",
-               AXP288_FG_TUNE2,
-               fuel_gauge_reg_readb(info, AXP288_FG_TUNE2));
-       seq_printf(s, "TUNING_CTL3[%02x] : %02x\n",
-               AXP288_FG_TUNE3,
-               fuel_gauge_reg_readb(info, AXP288_FG_TUNE3));
-       seq_printf(s, "TUNING_CTL4[%02x] : %02x\n",
-               AXP288_FG_TUNE4,
-               fuel_gauge_reg_readb(info, AXP288_FG_TUNE4));
-       seq_printf(s, "TUNING_CTL5[%02x] : %02x\n",
-               AXP288_FG_TUNE5,
-               fuel_gauge_reg_readb(info, AXP288_FG_TUNE5));
-
-       ret = iio_read_channel_raw(info->iio_channel[BAT_TEMP], &raw_val);
-       if (ret >= 0)
-               seq_printf(s, "axp288-batttemp : %d\n", raw_val);
-       ret = iio_read_channel_raw(info->iio_channel[PMIC_TEMP], &raw_val);
-       if (ret >= 0)
-               seq_printf(s, "axp288-pmictemp : %d\n", raw_val);
-       ret = iio_read_channel_raw(info->iio_channel[SYSTEM_TEMP], &raw_val);
-       if (ret >= 0)
-               seq_printf(s, "axp288-systtemp : %d\n", raw_val);
-       ret = iio_read_channel_raw(info->iio_channel[BAT_CHRG_CURR], &raw_val);
-       if (ret >= 0)
-               seq_printf(s, "axp288-chrgcurr : %d\n", raw_val);
-       ret = iio_read_channel_raw(info->iio_channel[BAT_D_CURR], &raw_val);
-       if (ret >= 0)
-               seq_printf(s, "axp288-dchrgcur : %d\n", raw_val);
-       ret = iio_read_channel_raw(info->iio_channel[BAT_VOLT], &raw_val);
-       if (ret >= 0)
-               seq_printf(s, "axp288-battvolt : %d\n", raw_val);
+       int ret;
 
-       return 0;
-}
+       if (info->valid && time_before(jiffies, info->last_updated + AXP288_REG_UPDATE_INTERVAL))
+               return 0;
 
-DEFINE_SHOW_ATTRIBUTE(fuel_gauge_debug);
+       dev_dbg(info->dev, "Fuel Gauge updating register values...\n");
 
-static void fuel_gauge_create_debugfs(struct axp288_fg_info *info)
-{
-       info->debug_file = debugfs_create_file("fuelgauge", 0666, NULL,
-               info, &fuel_gauge_debug_fops);
-}
+       ret = iosf_mbi_block_punit_i2c_access();
+       if (ret < 0)
+               return ret;
 
-static void fuel_gauge_remove_debugfs(struct axp288_fg_info *info)
-{
-       debugfs_remove(info->debug_file);
-}
-#else
-static inline void fuel_gauge_create_debugfs(struct axp288_fg_info *info)
-{
-}
-static inline void fuel_gauge_remove_debugfs(struct axp288_fg_info *info)
-{
+       ret = fuel_gauge_reg_readb(info, AXP20X_PWR_INPUT_STATUS);
+       if (ret < 0)
+               goto out;
+       info->pwr_stat = ret;
+
+       ret = fuel_gauge_reg_readb(info, AXP20X_FG_RES);
+       if (ret < 0)
+               goto out;
+       info->fg_res = ret;
+
+       ret = iio_read_channel_raw(info->iio_channel[BAT_VOLT], &info->bat_volt);
+       if (ret < 0)
+               goto out;
+
+       if (info->pwr_stat & PS_STAT_BAT_CHRG_DIR) {
+               info->d_curr = 0;
+               ret = iio_read_channel_raw(info->iio_channel[BAT_CHRG_CURR], &info->c_curr);
+               if (ret < 0)
+                       goto out;
+       } else {
+               info->c_curr = 0;
+               ret = iio_read_channel_raw(info->iio_channel[BAT_D_CURR], &info->d_curr);
+               if (ret < 0)
+                       goto out;
+       }
+
+       ret = fuel_gauge_read_12bit_word(info, AXP288_FG_OCVH_REG);
+       if (ret < 0)
+               goto out;
+       info->ocv = ret;
+
+       ret = fuel_gauge_read_15bit_word(info, AXP288_FG_CC_MTR1_REG);
+       if (ret < 0)
+               goto out;
+       info->fg_cc_mtr1 = ret;
+
+       ret = fuel_gauge_read_15bit_word(info, AXP288_FG_DES_CAP1_REG);
+       if (ret < 0)
+               goto out;
+       info->fg_des_cap1 = ret;
+
+       info->last_updated = jiffies;
+       info->valid = 1;
+       ret = 0;
+out:
+       iosf_mbi_unblock_punit_i2c_access();
+       return ret;
 }
-#endif
 
 static void fuel_gauge_get_status(struct axp288_fg_info *info)
 {
-       int pwr_stat, fg_res, curr, ret;
-
-       pwr_stat = fuel_gauge_reg_readb(info, AXP20X_PWR_INPUT_STATUS);
-       if (pwr_stat < 0) {
-               dev_err(&info->pdev->dev,
-                       "PWR STAT read failed:%d\n", pwr_stat);
-               return;
-       }
+       int pwr_stat = info->pwr_stat;
+       int fg_res = info->fg_res;
+       int curr = info->d_curr;
 
        /* Report full if Vbus is valid and the reported capacity is 100% */
        if (!(pwr_stat & PS_STAT_VBUS_VALID))
                goto not_full;
 
-       fg_res = fuel_gauge_reg_readb(info, AXP20X_FG_RES);
-       if (fg_res < 0) {
-               dev_err(&info->pdev->dev, "FG RES read failed: %d\n", fg_res);
-               return;
-       }
        if (!(fg_res & FG_REP_CAP_VALID))
                goto not_full;
 
@@ -354,11 +296,6 @@ static void fuel_gauge_get_status(struct axp288_fg_info *info)
        if (fg_res < 90 || (pwr_stat & PS_STAT_BAT_CHRG_DIR))
                goto not_full;
 
-       ret = iio_read_channel_raw(info->iio_channel[BAT_D_CURR], &curr);
-       if (ret < 0) {
-               dev_err(&info->pdev->dev, "FG get current failed: %d\n", ret);
-               return;
-       }
        if (curr == 0) {
                info->status = POWER_SUPPLY_STATUS_FULL;
                return;
@@ -371,61 +308,16 @@ not_full:
                info->status = POWER_SUPPLY_STATUS_DISCHARGING;
 }
 
-static int fuel_gauge_get_vbatt(struct axp288_fg_info *info, int *vbatt)
-{
-       int ret = 0, raw_val;
-
-       ret = iio_read_channel_raw(info->iio_channel[BAT_VOLT], &raw_val);
-       if (ret < 0)
-               goto vbatt_read_fail;
-
-       *vbatt = VOLTAGE_FROM_ADC(raw_val);
-vbatt_read_fail:
-       return ret;
-}
-
-static int fuel_gauge_get_current(struct axp288_fg_info *info, int *cur)
-{
-       int ret, discharge;
-
-       /* First check discharge current, so that we do only 1 read on bat. */
-       ret = iio_read_channel_raw(info->iio_channel[BAT_D_CURR], &discharge);
-       if (ret < 0)
-               return ret;
-
-       if (discharge > 0) {
-               *cur = -1 * discharge;
-               return 0;
-       }
-
-       return iio_read_channel_raw(info->iio_channel[BAT_CHRG_CURR], cur);
-}
-
-static int fuel_gauge_get_vocv(struct axp288_fg_info *info, int *vocv)
-{
-       int ret;
-
-       ret = fuel_gauge_read_12bit_word(info, AXP288_FG_OCVH_REG);
-       if (ret >= 0)
-               *vocv = VOLTAGE_FROM_ADC(ret);
-
-       return ret;
-}
-
 static int fuel_gauge_battery_health(struct axp288_fg_info *info)
 {
-       int ret, vocv, health = POWER_SUPPLY_HEALTH_UNKNOWN;
-
-       ret = fuel_gauge_get_vocv(info, &vocv);
-       if (ret < 0)
-               goto health_read_fail;
+       int vocv = VOLTAGE_FROM_ADC(info->ocv);
+       int health = POWER_SUPPLY_HEALTH_UNKNOWN;
 
        if (vocv > info->max_volt)
                health = POWER_SUPPLY_HEALTH_OVERVOLTAGE;
        else
                health = POWER_SUPPLY_HEALTH_GOOD;
 
-health_read_fail:
        return health;
 }
 
@@ -434,9 +326,14 @@ static int fuel_gauge_get_property(struct power_supply *ps,
                union power_supply_propval *val)
 {
        struct axp288_fg_info *info = power_supply_get_drvdata(ps);
-       int ret = 0, value;
+       int ret, value;
 
        mutex_lock(&info->lock);
+
+       ret = fuel_gauge_update_registers(info);
+       if (ret < 0)
+               goto out;
+
        switch (prop) {
        case POWER_SUPPLY_PROP_STATUS:
                fuel_gauge_get_status(info);
@@ -446,78 +343,52 @@ static int fuel_gauge_get_property(struct power_supply *ps,
                val->intval = fuel_gauge_battery_health(info);
                break;
        case POWER_SUPPLY_PROP_VOLTAGE_NOW:
-               ret = fuel_gauge_get_vbatt(info, &value);
-               if (ret < 0)
-                       goto fuel_gauge_read_err;
+               value = VOLTAGE_FROM_ADC(info->bat_volt);
                val->intval = PROP_VOLT(value);
                break;
        case POWER_SUPPLY_PROP_VOLTAGE_OCV:
-               ret = fuel_gauge_get_vocv(info, &value);
-               if (ret < 0)
-                       goto fuel_gauge_read_err;
+               value = VOLTAGE_FROM_ADC(info->ocv);
                val->intval = PROP_VOLT(value);
                break;
        case POWER_SUPPLY_PROP_CURRENT_NOW:
-               ret = fuel_gauge_get_current(info, &value);
-               if (ret < 0)
-                       goto fuel_gauge_read_err;
+               if (info->d_curr > 0)
+                       value = -1 * info->d_curr;
+               else
+                       value = info->c_curr;
+
                val->intval = PROP_CURR(value);
                break;
        case POWER_SUPPLY_PROP_PRESENT:
-               ret = fuel_gauge_reg_readb(info, AXP20X_PWR_OP_MODE);
-               if (ret < 0)
-                       goto fuel_gauge_read_err;
-
-               if (ret & CHRG_STAT_BAT_PRESENT)
+               if (info->pwr_op & CHRG_STAT_BAT_PRESENT)
                        val->intval = 1;
                else
                        val->intval = 0;
                break;
        case POWER_SUPPLY_PROP_CAPACITY:
-               ret = fuel_gauge_reg_readb(info, AXP20X_FG_RES);
-               if (ret < 0)
-                       goto fuel_gauge_read_err;
-
-               if (!(ret & FG_REP_CAP_VALID))
-                       dev_err(&info->pdev->dev,
-                               "capacity measurement not valid\n");
-               val->intval = (ret & FG_REP_CAP_VAL_MASK);
+               if (!(info->fg_res & FG_REP_CAP_VALID))
+                       dev_err(info->dev, "capacity measurement not valid\n");
+               val->intval = (info->fg_res & FG_REP_CAP_VAL_MASK);
                break;
        case POWER_SUPPLY_PROP_CAPACITY_ALERT_MIN:
-               ret = fuel_gauge_reg_readb(info, AXP288_FG_LOW_CAP_REG);
-               if (ret < 0)
-                       goto fuel_gauge_read_err;
-               val->intval = (ret & 0x0f);
+               val->intval = (info->low_cap & 0x0f);
                break;
        case POWER_SUPPLY_PROP_TECHNOLOGY:
                val->intval = POWER_SUPPLY_TECHNOLOGY_LION;
                break;
        case POWER_SUPPLY_PROP_CHARGE_NOW:
-               ret = fuel_gauge_read_15bit_word(info, AXP288_FG_CC_MTR1_REG);
-               if (ret < 0)
-                       goto fuel_gauge_read_err;
-
-               val->intval = ret * FG_DES_CAP_RES_LSB;
+               val->intval = info->fg_cc_mtr1 * FG_DES_CAP_RES_LSB;
                break;
        case POWER_SUPPLY_PROP_CHARGE_FULL:
-               ret = fuel_gauge_read_15bit_word(info, AXP288_FG_DES_CAP1_REG);
-               if (ret < 0)
-                       goto fuel_gauge_read_err;
-
-               val->intval = ret * FG_DES_CAP_RES_LSB;
+               val->intval = info->fg_des_cap1 * FG_DES_CAP_RES_LSB;
                break;
        case POWER_SUPPLY_PROP_VOLTAGE_MAX_DESIGN:
                val->intval = PROP_VOLT(info->max_volt);
                break;
        default:
-               mutex_unlock(&info->lock);
-               return -EINVAL;
+               ret = -EINVAL;
        }
 
-       mutex_unlock(&info->lock);
-       return 0;
-
-fuel_gauge_read_err:
+out:
        mutex_unlock(&info->lock);
        return ret;
 }
@@ -527,7 +398,7 @@ static int fuel_gauge_set_property(struct power_supply *ps,
                const union power_supply_propval *val)
 {
        struct axp288_fg_info *info = power_supply_get_drvdata(ps);
-       int ret = 0;
+       int new_low_cap, ret = 0;
 
        mutex_lock(&info->lock);
        switch (prop) {
@@ -536,12 +407,12 @@ static int fuel_gauge_set_property(struct power_supply *ps,
                        ret = -EINVAL;
                        break;
                }
-               ret = fuel_gauge_reg_readb(info, AXP288_FG_LOW_CAP_REG);
-               if (ret < 0)
-                       break;
-               ret &= 0xf0;
-               ret |= (val->intval & 0xf);
-               ret = fuel_gauge_reg_writeb(info, AXP288_FG_LOW_CAP_REG, ret);
+               new_low_cap = info->low_cap;
+               new_low_cap &= 0xf0;
+               new_low_cap |= (val->intval & 0xf);
+               ret = fuel_gauge_reg_writeb(info, AXP288_FG_LOW_CAP_REG, new_low_cap);
+               if (ret == 0)
+                       info->low_cap = new_low_cap;
                break;
        default:
                ret = -EINVAL;
@@ -579,37 +450,35 @@ static irqreturn_t fuel_gauge_thread_handler(int irq, void *dev)
        }
 
        if (i >= AXP288_FG_INTR_NUM) {
-               dev_warn(&info->pdev->dev, "spurious interrupt!!\n");
+               dev_warn(info->dev, "spurious interrupt!!\n");
                return IRQ_NONE;
        }
 
        switch (i) {
        case QWBTU_IRQ:
-               dev_info(&info->pdev->dev,
-                       "Quit Battery under temperature in work mode IRQ (QWBTU)\n");
+               dev_info(info->dev, "Quit Battery under temperature in work mode IRQ (QWBTU)\n");
                break;
        case WBTU_IRQ:
-               dev_info(&info->pdev->dev,
-                       "Battery under temperature in work mode IRQ (WBTU)\n");
+               dev_info(info->dev, "Battery under temperature in work mode IRQ (WBTU)\n");
                break;
        case QWBTO_IRQ:
-               dev_info(&info->pdev->dev,
-                       "Quit Battery over temperature in work mode IRQ (QWBTO)\n");
+               dev_info(info->dev, "Quit Battery over temperature in work mode IRQ (QWBTO)\n");
                break;
        case WBTO_IRQ:
-               dev_info(&info->pdev->dev,
-                       "Battery over temperature in work mode IRQ (WBTO)\n");
+               dev_info(info->dev, "Battery over temperature in work mode IRQ (WBTO)\n");
                break;
        case WL2_IRQ:
-               dev_info(&info->pdev->dev, "Low Batt Warning(2) INTR\n");
+               dev_info(info->dev, "Low Batt Warning(2) INTR\n");
                break;
        case WL1_IRQ:
-               dev_info(&info->pdev->dev, "Low Batt Warning(1) INTR\n");
+               dev_info(info->dev, "Low Batt Warning(1) INTR\n");
                break;
        default:
-               dev_warn(&info->pdev->dev, "Spurious Interrupt!!!\n");
+               dev_warn(info->dev, "Spurious Interrupt!!!\n");
        }
 
+       info->valid = 0; /* Force updating of the cached registers */
+
        power_supply_changed(info->bat);
        return IRQ_HANDLED;
 }
@@ -618,6 +487,7 @@ static void fuel_gauge_external_power_changed(struct power_supply *psy)
 {
        struct axp288_fg_info *info = power_supply_get_drvdata(psy);
 
+       info->valid = 0; /* Force updating of the cached registers */
        power_supply_changed(info->bat);
 }
 
@@ -632,16 +502,15 @@ static const struct power_supply_desc fuel_gauge_desc = {
        .external_power_changed = fuel_gauge_external_power_changed,
 };
 
-static void fuel_gauge_init_irq(struct axp288_fg_info *info)
+static void fuel_gauge_init_irq(struct axp288_fg_info *info, struct platform_device *pdev)
 {
        int ret, i, pirq;
 
        for (i = 0; i < AXP288_FG_INTR_NUM; i++) {
-               pirq = platform_get_irq(info->pdev, i);
+               pirq = platform_get_irq(pdev, i);
                info->irq[i] = regmap_irq_get_virq(info->regmap_irqc, pirq);
                if (info->irq[i] < 0) {
-                       dev_warn(&info->pdev->dev,
-                               "regmap_irq get virq failed for IRQ %d: %d\n",
+                       dev_warn(info->dev, "regmap_irq get virq failed for IRQ %d: %d\n",
                                pirq, info->irq[i]);
                        info->irq[i] = -1;
                        goto intr_failed;
@@ -650,14 +519,10 @@ static void fuel_gauge_init_irq(struct axp288_fg_info *info)
                                NULL, fuel_gauge_thread_handler,
                                IRQF_ONESHOT, DEV_NAME, info);
                if (ret) {
-                       dev_warn(&info->pdev->dev,
-                               "request irq failed for IRQ %d: %d\n",
+                       dev_warn(info->dev, "request irq failed for IRQ %d: %d\n",
                                pirq, info->irq[i]);
                        info->irq[i] = -1;
                        goto intr_failed;
-               } else {
-                       dev_info(&info->pdev->dev, "HW IRQ %d -> VIRQ %d\n",
-                               pirq, info->irq[i]);
                }
        }
        return;
@@ -753,9 +618,6 @@ static int axp288_fuel_gauge_probe(struct platform_device *pdev)
        struct axp20x_dev *axp20x = dev_get_drvdata(pdev->dev.parent);
        struct power_supply_config psy_cfg = {};
        static const char * const iio_chan_name[] = {
-               [BAT_TEMP] = "axp288-batt-temp",
-               [PMIC_TEMP] = "axp288-pmic-temp",
-               [SYSTEM_TEMP] = "axp288-system-temp",
                [BAT_CHRG_CURR] = "axp288-chrg-curr",
                [BAT_D_CURR] = "axp288-chrg-d-curr",
                [BAT_VOLT] = "axp288-batt-volt",
@@ -765,24 +627,15 @@ static int axp288_fuel_gauge_probe(struct platform_device *pdev)
        if (dmi_check_system(axp288_no_battery_list))
                return -ENODEV;
 
-       /*
-        * On some devices the fuelgauge and charger parts of the axp288 are
-        * not used, check that the fuelgauge is enabled (CC_CTRL != 0).
-        */
-       ret = regmap_read(axp20x->regmap, AXP20X_CC_CTRL, &val);
-       if (ret < 0)
-               return ret;
-       if (val == 0)
-               return -ENODEV;
-
        info = devm_kzalloc(&pdev->dev, sizeof(*info), GFP_KERNEL);
        if (!info)
                return -ENOMEM;
 
-       info->pdev = pdev;
+       info->dev = &pdev->dev;
        info->regmap = axp20x->regmap;
        info->regmap_irqc = axp20x->regmap_irqc;
        info->status = POWER_SUPPLY_STATUS_UNKNOWN;
+       info->valid = 0;
 
        platform_set_drvdata(pdev, info);
 
@@ -808,19 +661,35 @@ static int axp288_fuel_gauge_probe(struct platform_device *pdev)
                }
        }
 
-       ret = fuel_gauge_reg_readb(info, AXP288_FG_DES_CAP1_REG);
+       ret = iosf_mbi_block_punit_i2c_access();
        if (ret < 0)
                goto out_free_iio_chan;
 
+       /*
+        * On some devices the fuelgauge and charger parts of the axp288 are
+        * not used, check that the fuelgauge is enabled (CC_CTRL != 0).
+        */
+       ret = regmap_read(axp20x->regmap, AXP20X_CC_CTRL, &val);
+       if (ret < 0)
+               goto unblock_punit_i2c_access;
+       if (val == 0) {
+               ret = -ENODEV;
+               goto unblock_punit_i2c_access;
+       }
+
+       ret = fuel_gauge_reg_readb(info, AXP288_FG_DES_CAP1_REG);
+       if (ret < 0)
+               goto unblock_punit_i2c_access;
+
        if (!(ret & FG_DES_CAP1_VALID)) {
                dev_err(&pdev->dev, "axp288 not configured by firmware\n");
                ret = -ENODEV;
-               goto out_free_iio_chan;
+               goto unblock_punit_i2c_access;
        }
 
        ret = fuel_gauge_reg_readb(info, AXP20X_CHRG_CTRL1);
        if (ret < 0)
-               goto out_free_iio_chan;
+               goto unblock_punit_i2c_access;
        switch ((ret & CHRG_CCCV_CV_MASK) >> CHRG_CCCV_CV_BIT_POS) {
        case CHRG_CCCV_CV_4100MV:
                info->max_volt = 4100;
@@ -836,6 +705,22 @@ static int axp288_fuel_gauge_probe(struct platform_device *pdev)
                break;
        }
 
+       ret = fuel_gauge_reg_readb(info, AXP20X_PWR_OP_MODE);
+       if (ret < 0)
+               goto unblock_punit_i2c_access;
+       info->pwr_op = ret;
+
+       ret = fuel_gauge_reg_readb(info, AXP288_FG_LOW_CAP_REG);
+       if (ret < 0)
+               goto unblock_punit_i2c_access;
+       info->low_cap = ret;
+
+unblock_punit_i2c_access:
+       iosf_mbi_unblock_punit_i2c_access();
+       /* In case we arrive here by goto because of a register access error */
+       if (ret < 0)
+               goto out_free_iio_chan;
+
        psy_cfg.drv_data = info;
        info->bat = power_supply_register(&pdev->dev, &fuel_gauge_desc, &psy_cfg);
        if (IS_ERR(info->bat)) {
@@ -844,8 +729,7 @@ static int axp288_fuel_gauge_probe(struct platform_device *pdev)
                goto out_free_iio_chan;
        }
 
-       fuel_gauge_create_debugfs(info);
-       fuel_gauge_init_irq(info);
+       fuel_gauge_init_irq(info, pdev);
 
        return 0;
 
@@ -869,7 +753,6 @@ static int axp288_fuel_gauge_remove(struct platform_device *pdev)
        int i;
 
        power_supply_unregister(info->bat);
-       fuel_gauge_remove_debugfs(info);
 
        for (i = 0; i < AXP288_FG_INTR_NUM; i++)
                if (info->irq[i] >= 0)
index b5d619db79f6be6ccb5978c682c07358d5c16e82..3ce36d09c017cf1fb6ff118f36d46723431d0cae 100644 (file)
@@ -31,9 +31,8 @@
 
 #include <linux/power/bq24735-charger.h>
 
-#define BQ24735_CHG_OPT                        0x12
-#define BQ24735_CHG_OPT_CHARGE_DISABLE (1 << 0)
-#define BQ24735_CHG_OPT_AC_PRESENT     (1 << 4)
+/* BQ24735 available commands and their respective masks */
+#define BQ24735_CHARGE_OPT             0x12
 #define BQ24735_CHARGE_CURRENT         0x14
 #define BQ24735_CHARGE_CURRENT_MASK    0x1fc0
 #define BQ24735_CHARGE_VOLTAGE         0x15
 #define BQ24735_MANUFACTURER_ID                0xfe
 #define BQ24735_DEVICE_ID              0xff
 
+/* ChargeOptions bits of interest */
+#define BQ24735_CHARGE_OPT_CHG_DISABLE (1 << 0)
+#define BQ24735_CHARGE_OPT_AC_PRESENT  (1 << 4)
+
 struct bq24735 {
        struct power_supply             *charger;
        struct power_supply_desc        charger_desc;
@@ -167,8 +170,8 @@ static inline int bq24735_enable_charging(struct bq24735 *charger)
        if (ret)
                return ret;
 
-       return bq24735_update_word(charger->client, BQ24735_CHG_OPT,
-                                  BQ24735_CHG_OPT_CHARGE_DISABLE, 0);
+       return bq24735_update_word(charger->client, BQ24735_CHARGE_OPT,
+                                  BQ24735_CHARGE_OPT_CHG_DISABLE, 0);
 }
 
 static inline int bq24735_disable_charging(struct bq24735 *charger)
@@ -176,9 +179,9 @@ static inline int bq24735_disable_charging(struct bq24735 *charger)
        if (charger->pdata->ext_control)
                return 0;
 
-       return bq24735_update_word(charger->client, BQ24735_CHG_OPT,
-                                  BQ24735_CHG_OPT_CHARGE_DISABLE,
-                                  BQ24735_CHG_OPT_CHARGE_DISABLE);
+       return bq24735_update_word(charger->client, BQ24735_CHARGE_OPT,
+                                  BQ24735_CHARGE_OPT_CHG_DISABLE,
+                                  BQ24735_CHARGE_OPT_CHG_DISABLE);
 }
 
 static bool bq24735_charger_is_present(struct bq24735 *charger)
@@ -188,14 +191,14 @@ static bool bq24735_charger_is_present(struct bq24735 *charger)
        } else {
                int ac = 0;
 
-               ac = bq24735_read_word(charger->client, BQ24735_CHG_OPT);
+               ac = bq24735_read_word(charger->client, BQ24735_CHARGE_OPT);
                if (ac < 0) {
                        dev_dbg(&charger->client->dev,
                                "Failed to read charger options : %d\n",
                                ac);
                        return false;
                }
-               return (ac & BQ24735_CHG_OPT_AC_PRESENT) ? true : false;
+               return (ac & BQ24735_CHARGE_OPT_AC_PRESENT) ? true : false;
        }
 
        return false;
@@ -208,11 +211,11 @@ static int bq24735_charger_is_charging(struct bq24735 *charger)
        if (!bq24735_charger_is_present(charger))
                return 0;
 
-       ret  = bq24735_read_word(charger->client, BQ24735_CHG_OPT);
+       ret  = bq24735_read_word(charger->client, BQ24735_CHARGE_OPT);
        if (ret < 0)
                return ret;
 
-       return !(ret & BQ24735_CHG_OPT_CHARGE_DISABLE);
+       return !(ret & BQ24735_CHARGE_OPT_CHG_DISABLE);
 }
 
 static void bq24735_update(struct bq24735 *charger)
diff --git a/drivers/power/supply/cros_peripheral_charger.c b/drivers/power/supply/cros_peripheral_charger.c
new file mode 100644 (file)
index 0000000..305f10d
--- /dev/null
@@ -0,0 +1,386 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Power supply driver for ChromeOS EC based Peripheral Device Charger.
+ *
+ * Copyright 2020 Google LLC.
+ */
+
+#include <linux/module.h>
+#include <linux/notifier.h>
+#include <linux/platform_data/cros_ec_commands.h>
+#include <linux/platform_data/cros_ec_proto.h>
+#include <linux/platform_device.h>
+#include <linux/power_supply.h>
+#include <linux/slab.h>
+#include <linux/stringify.h>
+#include <linux/types.h>
+
+#define DRV_NAME               "cros-ec-pchg"
+#define PCHG_DIR_PREFIX                "peripheral"
+#define PCHG_DIR_NAME          PCHG_DIR_PREFIX "%d"
+#define PCHG_DIR_NAME_LENGTH \
+               sizeof(PCHG_DIR_PREFIX __stringify(EC_PCHG_MAX_PORTS))
+#define PCHG_CACHE_UPDATE_DELAY        msecs_to_jiffies(500)
+
+struct port_data {
+       int port_number;
+       char name[PCHG_DIR_NAME_LENGTH];
+       struct power_supply *psy;
+       struct power_supply_desc psy_desc;
+       int psy_status;
+       int battery_percentage;
+       int charge_type;
+       struct charger_data *charger;
+       unsigned long last_update;
+};
+
+struct charger_data {
+       struct device *dev;
+       struct cros_ec_dev *ec_dev;
+       struct cros_ec_device *ec_device;
+       int num_registered_psy;
+       struct port_data *ports[EC_PCHG_MAX_PORTS];
+       struct notifier_block notifier;
+};
+
+static enum power_supply_property cros_pchg_props[] = {
+       POWER_SUPPLY_PROP_STATUS,
+       POWER_SUPPLY_PROP_CHARGE_TYPE,
+       POWER_SUPPLY_PROP_CAPACITY,
+       POWER_SUPPLY_PROP_SCOPE,
+};
+
+static int cros_pchg_ec_command(const struct charger_data *charger,
+                               unsigned int version,
+                               unsigned int command,
+                               const void *outdata,
+                               unsigned int outsize,
+                               void *indata,
+                               unsigned int insize)
+{
+       struct cros_ec_dev *ec_dev = charger->ec_dev;
+       struct cros_ec_command *msg;
+       int ret;
+
+       msg = kzalloc(sizeof(*msg) + max(outsize, insize), GFP_KERNEL);
+       if (!msg)
+               return -ENOMEM;
+
+       msg->version = version;
+       msg->command = ec_dev->cmd_offset + command;
+       msg->outsize = outsize;
+       msg->insize = insize;
+
+       if (outsize)
+               memcpy(msg->data, outdata, outsize);
+
+       ret = cros_ec_cmd_xfer_status(charger->ec_device, msg);
+       if (ret >= 0 && insize)
+               memcpy(indata, msg->data, insize);
+
+       kfree(msg);
+       return ret;
+}
+
+static const unsigned int pchg_cmd_version = 1;
+
+static bool cros_pchg_cmd_ver_check(const struct charger_data *charger)
+{
+       struct ec_params_get_cmd_versions_v1 req;
+       struct ec_response_get_cmd_versions rsp;
+       int ret;
+
+       req.cmd = EC_CMD_PCHG;
+       ret = cros_pchg_ec_command(charger, 1, EC_CMD_GET_CMD_VERSIONS,
+                                  &req, sizeof(req), &rsp, sizeof(rsp));
+       if (ret < 0) {
+               dev_warn(charger->dev,
+                        "Unable to get versions of EC_CMD_PCHG (err:%d)\n",
+                        ret);
+               return false;
+       }
+
+       return !!(rsp.version_mask & BIT(pchg_cmd_version));
+}
+
+static int cros_pchg_port_count(const struct charger_data *charger)
+{
+       struct ec_response_pchg_count rsp;
+       int ret;
+
+       ret = cros_pchg_ec_command(charger, 0, EC_CMD_PCHG_COUNT,
+                                  NULL, 0, &rsp, sizeof(rsp));
+       if (ret < 0) {
+               dev_warn(charger->dev,
+                        "Unable to get number or ports (err:%d)\n", ret);
+               return ret;
+       }
+
+       return rsp.port_count;
+}
+
+static int cros_pchg_get_status(struct port_data *port)
+{
+       struct charger_data *charger = port->charger;
+       struct ec_params_pchg req;
+       struct ec_response_pchg rsp;
+       struct device *dev = charger->dev;
+       int old_status = port->psy_status;
+       int old_percentage = port->battery_percentage;
+       int ret;
+
+       req.port = port->port_number;
+       ret = cros_pchg_ec_command(charger, pchg_cmd_version, EC_CMD_PCHG,
+                                  &req, sizeof(req), &rsp, sizeof(rsp));
+       if (ret < 0) {
+               dev_err(dev, "Unable to get port.%d status (err:%d)\n",
+                       port->port_number, ret);
+               return ret;
+       }
+
+       switch (rsp.state) {
+       case PCHG_STATE_RESET:
+       case PCHG_STATE_INITIALIZED:
+       case PCHG_STATE_ENABLED:
+       default:
+               port->psy_status = POWER_SUPPLY_STATUS_UNKNOWN;
+               port->charge_type = POWER_SUPPLY_CHARGE_TYPE_NONE;
+               break;
+       case PCHG_STATE_DETECTED:
+               port->psy_status = POWER_SUPPLY_STATUS_CHARGING;
+               port->charge_type = POWER_SUPPLY_CHARGE_TYPE_TRICKLE;
+               break;
+       case PCHG_STATE_CHARGING:
+               port->psy_status = POWER_SUPPLY_STATUS_CHARGING;
+               port->charge_type = POWER_SUPPLY_CHARGE_TYPE_STANDARD;
+               break;
+       case PCHG_STATE_FULL:
+               port->psy_status = POWER_SUPPLY_STATUS_FULL;
+               port->charge_type = POWER_SUPPLY_CHARGE_TYPE_NONE;
+               break;
+       }
+
+       port->battery_percentage = rsp.battery_percentage;
+
+       if (port->psy_status != old_status ||
+                       port->battery_percentage != old_percentage)
+               power_supply_changed(port->psy);
+
+       dev_dbg(dev,
+               "Port %d: state=%d battery=%d%%\n",
+               port->port_number, rsp.state, rsp.battery_percentage);
+
+       return 0;
+}
+
+static int cros_pchg_get_port_status(struct port_data *port, bool ratelimit)
+{
+       int ret;
+
+       if (ratelimit &&
+           time_is_after_jiffies(port->last_update + PCHG_CACHE_UPDATE_DELAY))
+               return 0;
+
+       ret = cros_pchg_get_status(port);
+       if (ret < 0)
+               return ret;
+
+       port->last_update = jiffies;
+
+       return ret;
+}
+
+static int cros_pchg_get_prop(struct power_supply *psy,
+                             enum power_supply_property psp,
+                             union power_supply_propval *val)
+{
+       struct port_data *port = power_supply_get_drvdata(psy);
+
+       switch (psp) {
+       case POWER_SUPPLY_PROP_STATUS:
+       case POWER_SUPPLY_PROP_CAPACITY:
+       case POWER_SUPPLY_PROP_CHARGE_TYPE:
+               cros_pchg_get_port_status(port, true);
+               break;
+       default:
+               break;
+       }
+
+       switch (psp) {
+       case POWER_SUPPLY_PROP_STATUS:
+               val->intval = port->psy_status;
+               break;
+       case POWER_SUPPLY_PROP_CAPACITY:
+               val->intval = port->battery_percentage;
+               break;
+       case POWER_SUPPLY_PROP_CHARGE_TYPE:
+               val->intval = port->charge_type;
+               break;
+       case POWER_SUPPLY_PROP_SCOPE:
+               val->intval = POWER_SUPPLY_SCOPE_DEVICE;
+               break;
+       default:
+               return -EINVAL;
+       }
+
+       return 0;
+}
+
+static int cros_pchg_event(const struct charger_data *charger,
+                          unsigned long host_event)
+{
+       int i;
+
+       for (i = 0; i < charger->num_registered_psy; i++)
+               cros_pchg_get_port_status(charger->ports[i], false);
+
+       return NOTIFY_OK;
+}
+
+static u32 cros_get_device_event(const struct charger_data *charger)
+{
+       struct ec_params_device_event req;
+       struct ec_response_device_event rsp;
+       struct device *dev = charger->dev;
+       int ret;
+
+       req.param = EC_DEVICE_EVENT_PARAM_GET_CURRENT_EVENTS;
+       ret = cros_pchg_ec_command(charger, 0, EC_CMD_DEVICE_EVENT,
+                                  &req, sizeof(req), &rsp, sizeof(rsp));
+       if (ret < 0) {
+               dev_warn(dev, "Unable to get device events (err:%d)\n", ret);
+               return 0;
+       }
+
+       return rsp.event_mask;
+}
+
+static int cros_ec_notify(struct notifier_block *nb,
+                         unsigned long queued_during_suspend,
+                         void *data)
+{
+       struct cros_ec_device *ec_dev = (struct cros_ec_device *)data;
+       u32 host_event = cros_ec_get_host_event(ec_dev);
+       struct charger_data *charger =
+                       container_of(nb, struct charger_data, notifier);
+       u32 device_event_mask;
+
+       if (!host_event)
+               return NOTIFY_DONE;
+
+       if (!(host_event & EC_HOST_EVENT_MASK(EC_HOST_EVENT_DEVICE)))
+               return NOTIFY_DONE;
+
+       /*
+        * todo: Retrieve device event mask in common place
+        * (e.g. cros_ec_proto.c).
+        */
+       device_event_mask = cros_get_device_event(charger);
+       if (!(device_event_mask & EC_DEVICE_EVENT_MASK(EC_DEVICE_EVENT_WLC)))
+               return NOTIFY_DONE;
+
+       return cros_pchg_event(charger, host_event);
+}
+
+static int cros_pchg_probe(struct platform_device *pdev)
+{
+       struct device *dev = &pdev->dev;
+       struct cros_ec_dev *ec_dev = dev_get_drvdata(dev->parent);
+       struct cros_ec_device *ec_device = ec_dev->ec_dev;
+       struct power_supply_desc *psy_desc;
+       struct charger_data *charger;
+       struct power_supply *psy;
+       struct port_data *port;
+       struct notifier_block *nb;
+       int num_ports;
+       int ret;
+       int i;
+
+       charger = devm_kzalloc(dev, sizeof(*charger), GFP_KERNEL);
+       if (!charger)
+               return -ENOMEM;
+
+       charger->dev = dev;
+       charger->ec_dev = ec_dev;
+       charger->ec_device = ec_device;
+
+       ret = cros_pchg_port_count(charger);
+       if (ret <= 0) {
+               /*
+                * This feature is enabled by the EC and the kernel driver is
+                * included by default for CrOS devices. Don't need to be loud
+                * since this error can be normal.
+                */
+               dev_info(dev, "No peripheral charge ports (err:%d)\n", ret);
+               return -ENODEV;
+       }
+
+       if (!cros_pchg_cmd_ver_check(charger)) {
+               dev_err(dev, "EC_CMD_PCHG version %d isn't available.\n",
+                       pchg_cmd_version);
+               return -EOPNOTSUPP;
+       }
+
+       num_ports = ret;
+       if (num_ports > EC_PCHG_MAX_PORTS) {
+               dev_err(dev, "Too many peripheral charge ports (%d)\n",
+                       num_ports);
+               return -ENOBUFS;
+       }
+
+       dev_info(dev, "%d peripheral charge ports found\n", num_ports);
+
+       for (i = 0; i < num_ports; i++) {
+               struct power_supply_config psy_cfg = {};
+
+               port = devm_kzalloc(dev, sizeof(*port), GFP_KERNEL);
+               if (!port)
+                       return -ENOMEM;
+
+               port->charger = charger;
+               port->port_number = i;
+               snprintf(port->name, sizeof(port->name), PCHG_DIR_NAME, i);
+
+               psy_desc = &port->psy_desc;
+               psy_desc->name = port->name;
+               psy_desc->type = POWER_SUPPLY_TYPE_BATTERY;
+               psy_desc->get_property = cros_pchg_get_prop;
+               psy_desc->external_power_changed = NULL;
+               psy_desc->properties = cros_pchg_props;
+               psy_desc->num_properties = ARRAY_SIZE(cros_pchg_props);
+               psy_cfg.drv_data = port;
+
+               psy = devm_power_supply_register(dev, psy_desc, &psy_cfg);
+               if (IS_ERR(psy))
+                       return dev_err_probe(dev, PTR_ERR(psy),
+                                       "Failed to register power supply\n");
+               port->psy = psy;
+
+               charger->ports[charger->num_registered_psy++] = port;
+       }
+
+       if (!charger->num_registered_psy)
+               return -ENODEV;
+
+       nb = &charger->notifier;
+       nb->notifier_call = cros_ec_notify;
+       ret = blocking_notifier_chain_register(&ec_dev->ec_dev->event_notifier,
+                                              nb);
+       if (ret < 0)
+               dev_err(dev, "Failed to register notifier (err:%d)\n", ret);
+
+       return 0;
+}
+
+static struct platform_driver cros_pchg_driver = {
+       .driver = {
+               .name = DRV_NAME,
+       },
+       .probe = cros_pchg_probe
+};
+
+module_platform_driver(cros_pchg_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("ChromeOS EC peripheral device charger");
+MODULE_ALIAS("platform:" DRV_NAME);
index d110597746b0aadd78fa20da710eaa1127a65cfa..091868e9e9e82309e5d75b9c2813732b8632a793 100644 (file)
@@ -679,7 +679,9 @@ static int cw_bat_probe(struct i2c_client *client)
                                                    &cw2015_bat_desc,
                                                    &psy_cfg);
        if (IS_ERR(cw_bat->rk_bat)) {
-               dev_err(cw_bat->dev, "Failed to register power supply\n");
+               /* try again if this happens */
+               dev_err_probe(&client->dev, PTR_ERR(cw_bat->rk_bat),
+                       "Failed to register power supply\n");
                return PTR_ERR(cw_bat->rk_bat);
        }
 
index ce2041b30a0662ec73455abda8ae764ff724c68e..8dffae76b6a31e2fa23af7ac00a54e329e8f59d6 100644 (file)
@@ -36,8 +36,6 @@
 
 /* Interrupt mask bits */
 #define CONFIG_ALRT_BIT_ENBL   (1 << 2)
-#define STATUS_INTR_SOCMIN_BIT (1 << 10)
-#define STATUS_INTR_SOCMAX_BIT (1 << 14)
 
 #define VFSOC0_LOCK            0x0000
 #define VFSOC0_UNLOCK          0x0080
@@ -285,8 +283,6 @@ static int max17042_get_property(struct power_supply *psy,
        case POWER_SUPPLY_PROP_VOLTAGE_MIN_DESIGN:
                if (chip->chip_type == MAXIM_DEVICE_TYPE_MAX17042)
                        ret = regmap_read(map, MAX17042_V_empty, &data);
-               else if (chip->chip_type == MAXIM_DEVICE_TYPE_MAX17055)
-                       ret = regmap_read(map, MAX17055_V_empty, &data);
                else
                        ret = regmap_read(map, MAX17047_V_empty, &data);
                if (ret < 0)
@@ -748,7 +744,7 @@ static inline void max17042_override_por_values(struct max17042_chip *chip)
        struct max17042_config_data *config = chip->pdata->config_data;
 
        max17042_override_por(map, MAX17042_TGAIN, config->tgain);
-       max17042_override_por(map, MAx17042_TOFF, config->toff);
+       max17042_override_por(map, MAX17042_TOFF, config->toff);
        max17042_override_por(map, MAX17042_CGAIN, config->cgain);
        max17042_override_por(map, MAX17042_COFF, config->coff);
 
@@ -767,36 +763,36 @@ static inline void max17042_override_por_values(struct max17042_chip *chip)
        max17042_override_por(map, MAX17042_FilterCFG, config->filter_cfg);
        max17042_override_por(map, MAX17042_RelaxCFG, config->relax_cfg);
        max17042_override_por(map, MAX17042_MiscCFG, config->misc_cfg);
-       max17042_override_por(map, MAX17042_MaskSOC, config->masksoc);
 
        max17042_override_por(map, MAX17042_FullCAP, config->fullcap);
        max17042_override_por(map, MAX17042_FullCAPNom, config->fullcapnom);
-       if (chip->chip_type == MAXIM_DEVICE_TYPE_MAX17042)
-               max17042_override_por(map, MAX17042_SOC_empty,
-                                               config->socempty);
-       max17042_override_por(map, MAX17042_LAvg_empty, config->lavg_empty);
        max17042_override_por(map, MAX17042_dQacc, config->dqacc);
        max17042_override_por(map, MAX17042_dPacc, config->dpacc);
 
-       if (chip->chip_type == MAXIM_DEVICE_TYPE_MAX17042)
-               max17042_override_por(map, MAX17042_V_empty, config->vempty);
-       if (chip->chip_type == MAXIM_DEVICE_TYPE_MAX17055)
-               max17042_override_por(map, MAX17055_V_empty, config->vempty);
-       else
-               max17042_override_por(map, MAX17047_V_empty, config->vempty);
-       max17042_override_por(map, MAX17042_TempNom, config->temp_nom);
-       max17042_override_por(map, MAX17042_TempLim, config->temp_lim);
-       max17042_override_por(map, MAX17042_FCTC, config->fctc);
        max17042_override_por(map, MAX17042_RCOMP0, config->rcomp0);
        max17042_override_por(map, MAX17042_TempCo, config->tcompc0);
-       if (chip->chip_type &&
-           ((chip->chip_type == MAXIM_DEVICE_TYPE_MAX17042) ||
+
+       if (chip->chip_type == MAXIM_DEVICE_TYPE_MAX17042) {
+               max17042_override_por(map, MAX17042_MaskSOC, config->masksoc);
+               max17042_override_por(map, MAX17042_SOC_empty, config->socempty);
+               max17042_override_por(map, MAX17042_V_empty, config->vempty);
+               max17042_override_por(map, MAX17042_EmptyTempCo, config->empty_tempco);
+               max17042_override_por(map, MAX17042_K_empty0, config->kempty0);
+       }
+
+       if ((chip->chip_type == MAXIM_DEVICE_TYPE_MAX17042) ||
            (chip->chip_type == MAXIM_DEVICE_TYPE_MAX17047) ||
-           (chip->chip_type == MAXIM_DEVICE_TYPE_MAX17050))) {
-               max17042_override_por(map, MAX17042_EmptyTempCo,
-                                               config->empty_tempco);
-               max17042_override_por(map, MAX17042_K_empty0,
-                                               config->kempty0);
+           (chip->chip_type == MAXIM_DEVICE_TYPE_MAX17050)) {
+               max17042_override_por(map, MAX17042_LAvg_empty, config->lavg_empty);
+               max17042_override_por(map, MAX17042_TempNom, config->temp_nom);
+               max17042_override_por(map, MAX17042_TempLim, config->temp_lim);
+               max17042_override_por(map, MAX17042_FCTC, config->fctc);
+       }
+
+       if ((chip->chip_type == MAXIM_DEVICE_TYPE_MAX17047) ||
+           (chip->chip_type == MAXIM_DEVICE_TYPE_MAX17050) ||
+           (chip->chip_type == MAXIM_DEVICE_TYPE_MAX17055)) {
+               max17042_override_por(map, MAX17047_V_empty, config->vempty);
        }
 }
 
@@ -869,11 +865,14 @@ static irqreturn_t max17042_thread_handler(int id, void *dev)
 {
        struct max17042_chip *chip = dev;
        u32 val;
+       int ret;
 
-       regmap_read(chip->regmap, MAX17042_STATUS, &val);
-       if ((val & STATUS_INTR_SOCMIN_BIT) ||
-               (val & STATUS_INTR_SOCMAX_BIT)) {
-               dev_info(&chip->client->dev, "SOC threshold INTR\n");
+       ret = regmap_read(chip->regmap, MAX17042_STATUS, &val);
+       if (ret)
+               return IRQ_HANDLED;
+
+       if ((val & STATUS_SMN_BIT) || (val & STATUS_SMX_BIT)) {
+               dev_dbg(&chip->client->dev, "SOC threshold INTR\n");
                max17042_set_soc_threshold(chip, 1);
        }
 
@@ -1196,6 +1195,7 @@ static const struct of_device_id max17042_dt_match[] = {
        { .compatible = "maxim,max17047" },
        { .compatible = "maxim,max17050" },
        { .compatible = "maxim,max17055" },
+       { .compatible = "maxim,max77849-battery" },
        { },
 };
 MODULE_DEVICE_TABLE(of, max17042_dt_match);
@@ -1206,6 +1206,7 @@ static const struct i2c_device_id max17042_id[] = {
        { "max17047", MAXIM_DEVICE_TYPE_MAX17047 },
        { "max17050", MAXIM_DEVICE_TYPE_MAX17050 },
        { "max17055", MAXIM_DEVICE_TYPE_MAX17055 },
+       { "max77849-battery", MAXIM_DEVICE_TYPE_MAX17047 },
        { }
 };
 MODULE_DEVICE_TABLE(i2c, max17042_id);
diff --git a/drivers/power/supply/mt6360_charger.c b/drivers/power/supply/mt6360_charger.c
new file mode 100644 (file)
index 0000000..3abaa72
--- /dev/null
@@ -0,0 +1,867 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2021 MediaTek Inc.
+ */
+
+#include <linux/devm-helpers.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/linear_range.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/power_supply.h>
+#include <linux/property.h>
+#include <linux/regmap.h>
+#include <linux/regulator/driver.h>
+
+#define MT6360_PMU_CHG_CTRL1   0x311
+#define MT6360_PMU_CHG_CTRL2   0x312
+#define MT6360_PMU_CHG_CTRL3   0x313
+#define MT6360_PMU_CHG_CTRL4   0x314
+#define MT6360_PMU_CHG_CTRL5   0x315
+#define MT6360_PMU_CHG_CTRL6   0x316
+#define MT6360_PMU_CHG_CTRL7   0x317
+#define MT6360_PMU_CHG_CTRL8   0x318
+#define MT6360_PMU_CHG_CTRL9   0x319
+#define MT6360_PMU_CHG_CTRL10  0x31A
+#define MT6360_PMU_DEVICE_TYPE 0x322
+#define MT6360_PMU_USB_STATUS1 0x327
+#define MT6360_PMU_CHG_STAT    0x34A
+#define MT6360_PMU_CHG_CTRL19  0x361
+#define MT6360_PMU_FOD_STAT    0x3E7
+
+/* MT6360_PMU_CHG_CTRL1 */
+#define MT6360_FSLP_SHFT       (3)
+#define MT6360_FSLP_MASK       BIT(MT6360_FSLP_SHFT)
+#define MT6360_OPA_MODE_SHFT   (0)
+#define MT6360_OPA_MODE_MASK   BIT(MT6360_OPA_MODE_SHFT)
+/* MT6360_PMU_CHG_CTRL2 */
+#define MT6360_IINLMTSEL_SHFT  (2)
+#define MT6360_IINLMTSEL_MASK  GENMASK(3, 2)
+/* MT6360_PMU_CHG_CTRL3 */
+#define MT6360_IAICR_SHFT      (2)
+#define MT6360_IAICR_MASK      GENMASK(7, 2)
+#define MT6360_ILIM_EN_MASK    BIT(0)
+/* MT6360_PMU_CHG_CTRL4 */
+#define MT6360_VOREG_SHFT      (1)
+#define MT6360_VOREG_MASK      GENMASK(7, 1)
+/* MT6360_PMU_CHG_CTRL5 */
+#define MT6360_VOBST_MASK      GENMASK(7, 2)
+/* MT6360_PMU_CHG_CTRL6 */
+#define MT6360_VMIVR_SHFT      (1)
+#define MT6360_VMIVR_MASK      GENMASK(7, 1)
+/* MT6360_PMU_CHG_CTRL7 */
+#define MT6360_ICHG_SHFT       (2)
+#define MT6360_ICHG_MASK       GENMASK(7, 2)
+/* MT6360_PMU_CHG_CTRL8 */
+#define MT6360_IPREC_SHFT      (0)
+#define MT6360_IPREC_MASK      GENMASK(3, 0)
+/* MT6360_PMU_CHG_CTRL9 */
+#define MT6360_IEOC_SHFT       (4)
+#define MT6360_IEOC_MASK       GENMASK(7, 4)
+/* MT6360_PMU_CHG_CTRL10 */
+#define MT6360_OTG_OC_MASK     GENMASK(3, 0)
+/* MT6360_PMU_DEVICE_TYPE */
+#define MT6360_USBCHGEN_MASK   BIT(7)
+/* MT6360_PMU_USB_STATUS1 */
+#define MT6360_USB_STATUS_SHFT (4)
+#define MT6360_USB_STATUS_MASK GENMASK(6, 4)
+/* MT6360_PMU_CHG_STAT */
+#define MT6360_CHG_STAT_SHFT   (6)
+#define MT6360_CHG_STAT_MASK   GENMASK(7, 6)
+#define MT6360_VBAT_LVL_MASK   BIT(5)
+/* MT6360_PMU_CHG_CTRL19 */
+#define MT6360_VINOVP_SHFT     (5)
+#define MT6360_VINOVP_MASK     GENMASK(6, 5)
+/* MT6360_PMU_FOD_STAT */
+#define MT6360_CHRDET_EXT_MASK BIT(4)
+
+/* uV */
+#define MT6360_VMIVR_MIN       3900000
+#define MT6360_VMIVR_MAX       13400000
+#define MT6360_VMIVR_STEP      100000
+/* uA */
+#define MT6360_ICHG_MIN                100000
+#define MT6360_ICHG_MAX                5000000
+#define MT6360_ICHG_STEP       100000
+/* uV */
+#define MT6360_VOREG_MIN       3900000
+#define MT6360_VOREG_MAX       4710000
+#define MT6360_VOREG_STEP      10000
+/* uA */
+#define MT6360_AICR_MIN                100000
+#define MT6360_AICR_MAX                3250000
+#define MT6360_AICR_STEP       50000
+/* uA */
+#define MT6360_IPREC_MIN       100000
+#define MT6360_IPREC_MAX       850000
+#define MT6360_IPREC_STEP      50000
+/* uA */
+#define MT6360_IEOC_MIN                100000
+#define MT6360_IEOC_MAX                850000
+#define MT6360_IEOC_STEP       50000
+
+enum {
+       MT6360_RANGE_VMIVR,
+       MT6360_RANGE_ICHG,
+       MT6360_RANGE_VOREG,
+       MT6360_RANGE_AICR,
+       MT6360_RANGE_IPREC,
+       MT6360_RANGE_IEOC,
+       MT6360_RANGE_MAX,
+};
+
+#define MT6360_LINEAR_RANGE(idx, _min, _min_sel, _max_sel, _step) \
+       [idx] = REGULATOR_LINEAR_RANGE(_min, _min_sel, _max_sel, _step)
+
+static const struct linear_range mt6360_chg_range[MT6360_RANGE_MAX] = {
+       MT6360_LINEAR_RANGE(MT6360_RANGE_VMIVR, 3900000, 0, 0x5F, 100000),
+       MT6360_LINEAR_RANGE(MT6360_RANGE_ICHG, 100000, 0, 0x31, 100000),
+       MT6360_LINEAR_RANGE(MT6360_RANGE_VOREG, 3900000, 0, 0x51, 10000),
+       MT6360_LINEAR_RANGE(MT6360_RANGE_AICR, 100000, 0, 0x3F, 50000),
+       MT6360_LINEAR_RANGE(MT6360_RANGE_IPREC, 100000, 0, 0x0F, 50000),
+       MT6360_LINEAR_RANGE(MT6360_RANGE_IEOC, 100000, 0, 0x0F, 50000),
+};
+
+struct mt6360_chg_info {
+       struct device *dev;
+       struct regmap *regmap;
+       struct power_supply_desc psy_desc;
+       struct power_supply *psy;
+       struct regulator_dev *otg_rdev;
+       struct mutex chgdet_lock;
+       u32 vinovp;
+       bool pwr_rdy;
+       bool bc12_en;
+       int psy_usb_type;
+       struct work_struct chrdet_work;
+};
+
+enum mt6360_iinlmtsel {
+       MT6360_IINLMTSEL_AICR_3250 = 0,
+       MT6360_IINLMTSEL_CHG_TYPE,
+       MT6360_IINLMTSEL_AICR,
+       MT6360_IINLMTSEL_LOWER_LEVEL,
+};
+
+enum mt6360_pmu_chg_type {
+       MT6360_CHG_TYPE_NOVBUS = 0,
+       MT6360_CHG_TYPE_UNDER_GOING,
+       MT6360_CHG_TYPE_SDP,
+       MT6360_CHG_TYPE_SDPNSTD,
+       MT6360_CHG_TYPE_DCP,
+       MT6360_CHG_TYPE_CDP,
+       MT6360_CHG_TYPE_DISABLE_BC12,
+       MT6360_CHG_TYPE_MAX,
+};
+
+static enum power_supply_usb_type mt6360_charger_usb_types[] = {
+       POWER_SUPPLY_USB_TYPE_UNKNOWN,
+       POWER_SUPPLY_USB_TYPE_SDP,
+       POWER_SUPPLY_USB_TYPE_DCP,
+       POWER_SUPPLY_USB_TYPE_CDP,
+};
+
+static int mt6360_get_chrdet_ext_stat(struct mt6360_chg_info *mci,
+                                            bool *pwr_rdy)
+{
+       int ret;
+       unsigned int regval;
+
+       ret = regmap_read(mci->regmap, MT6360_PMU_FOD_STAT, &regval);
+       if (ret < 0)
+               return ret;
+       *pwr_rdy = (regval & MT6360_CHRDET_EXT_MASK) ? true : false;
+       return 0;
+}
+
+static int mt6360_charger_get_online(struct mt6360_chg_info *mci,
+                                    union power_supply_propval *val)
+{
+       int ret;
+       bool pwr_rdy;
+
+       ret = mt6360_get_chrdet_ext_stat(mci, &pwr_rdy);
+       if (ret < 0)
+               return ret;
+       val->intval = pwr_rdy ? true : false;
+       return 0;
+}
+
+static int mt6360_charger_get_status(struct mt6360_chg_info *mci,
+                                    union power_supply_propval *val)
+{
+       int status, ret;
+       unsigned int regval;
+       bool pwr_rdy;
+
+       ret = mt6360_get_chrdet_ext_stat(mci, &pwr_rdy);
+       if (ret < 0)
+               return ret;
+       if (!pwr_rdy) {
+               status = POWER_SUPPLY_STATUS_DISCHARGING;
+               goto out;
+       }
+
+       ret = regmap_read(mci->regmap, MT6360_PMU_CHG_STAT, &regval);
+       if (ret < 0)
+               return ret;
+       regval &= MT6360_CHG_STAT_MASK;
+       regval >>= MT6360_CHG_STAT_SHFT;
+       switch (regval) {
+       case 0x0:
+               status = POWER_SUPPLY_STATUS_NOT_CHARGING;
+               break;
+       case 0x1:
+               status = POWER_SUPPLY_STATUS_CHARGING;
+               break;
+       case 0x2:
+               status = POWER_SUPPLY_STATUS_FULL;
+               break;
+       default:
+               ret = -EIO;
+       }
+out:
+       if (!ret)
+               val->intval = status;
+       return ret;
+}
+
+static int mt6360_charger_get_charge_type(struct mt6360_chg_info *mci,
+                                         union power_supply_propval *val)
+{
+       int type, ret;
+       unsigned int regval;
+       u8 chg_stat;
+
+       ret = regmap_read(mci->regmap, MT6360_PMU_CHG_STAT, &regval);
+       if (ret < 0)
+               return ret;
+
+       chg_stat = (regval & MT6360_CHG_STAT_MASK) >> MT6360_CHG_STAT_SHFT;
+       switch (chg_stat) {
+       case 0x01: /* Charge in Progress */
+               if (regval & MT6360_VBAT_LVL_MASK)
+                       type = POWER_SUPPLY_CHARGE_TYPE_FAST;
+               else
+                       type = POWER_SUPPLY_CHARGE_TYPE_TRICKLE;
+               break;
+       case 0x00: /* Not Charging */
+       case 0x02: /* Charge Done */
+       case 0x03: /* Charge Fault */
+       default:
+               type = POWER_SUPPLY_CHARGE_TYPE_NONE;
+               break;
+       }
+
+       val->intval = type;
+       return 0;
+}
+
+static int mt6360_charger_get_ichg(struct mt6360_chg_info *mci,
+                                  union power_supply_propval *val)
+{
+       int ret;
+       u32 sel, value;
+
+       ret = regmap_read(mci->regmap, MT6360_PMU_CHG_CTRL7, &sel);
+       if (ret < 0)
+               return ret;
+       sel = (sel & MT6360_ICHG_MASK) >> MT6360_ICHG_SHFT;
+       ret = linear_range_get_value(&mt6360_chg_range[MT6360_RANGE_ICHG], sel, &value);
+       if (!ret)
+               val->intval = value;
+       return ret;
+}
+
+static int mt6360_charger_get_max_ichg(struct mt6360_chg_info *mci,
+                                      union power_supply_propval *val)
+{
+       val->intval = MT6360_ICHG_MAX;
+       return 0;
+}
+
+static int mt6360_charger_get_cv(struct mt6360_chg_info *mci,
+                                union power_supply_propval *val)
+{
+       int ret;
+       u32 sel, value;
+
+       ret = regmap_read(mci->regmap, MT6360_PMU_CHG_CTRL4, &sel);
+       if (ret < 0)
+               return ret;
+       sel = (sel & MT6360_VOREG_MASK) >> MT6360_VOREG_SHFT;
+       ret = linear_range_get_value(&mt6360_chg_range[MT6360_RANGE_VOREG], sel, &value);
+       if (!ret)
+               val->intval = value;
+       return ret;
+}
+
+static int mt6360_charger_get_max_cv(struct mt6360_chg_info *mci,
+                                    union power_supply_propval *val)
+{
+       val->intval = MT6360_VOREG_MAX;
+       return 0;
+}
+
+static int mt6360_charger_get_aicr(struct mt6360_chg_info *mci,
+                                  union power_supply_propval *val)
+{
+       int ret;
+       u32 sel, value;
+
+       ret = regmap_read(mci->regmap, MT6360_PMU_CHG_CTRL3, &sel);
+       if (ret < 0)
+               return ret;
+       sel = (sel & MT6360_IAICR_MASK) >> MT6360_IAICR_SHFT;
+       ret = linear_range_get_value(&mt6360_chg_range[MT6360_RANGE_AICR], sel, &value);
+       if (!ret)
+               val->intval = value;
+       return ret;
+}
+
+static int mt6360_charger_get_mivr(struct mt6360_chg_info *mci,
+                                  union power_supply_propval *val)
+{
+       int ret;
+       u32 sel, value;
+
+       ret = regmap_read(mci->regmap, MT6360_PMU_CHG_CTRL6, &sel);
+       if (ret < 0)
+               return ret;
+       sel = (sel & MT6360_VMIVR_MASK) >> MT6360_VMIVR_SHFT;
+       ret = linear_range_get_value(&mt6360_chg_range[MT6360_RANGE_VMIVR], sel, &value);
+       if (!ret)
+               val->intval = value;
+       return ret;
+}
+
+static int mt6360_charger_get_iprechg(struct mt6360_chg_info *mci,
+                                     union power_supply_propval *val)
+{
+       int ret;
+       u32 sel, value;
+
+       ret = regmap_read(mci->regmap, MT6360_PMU_CHG_CTRL8, &sel);
+       if (ret < 0)
+               return ret;
+       sel = (sel & MT6360_IPREC_MASK) >> MT6360_IPREC_SHFT;
+       ret = linear_range_get_value(&mt6360_chg_range[MT6360_RANGE_IPREC], sel, &value);
+       if (!ret)
+               val->intval = value;
+       return ret;
+}
+
+static int mt6360_charger_get_ieoc(struct mt6360_chg_info *mci,
+                                  union power_supply_propval *val)
+{
+       int ret;
+       u32 sel, value;
+
+       ret = regmap_read(mci->regmap, MT6360_PMU_CHG_CTRL9, &sel);
+       if (ret < 0)
+               return ret;
+       sel = (sel & MT6360_IEOC_MASK) >> MT6360_IEOC_SHFT;
+       ret = linear_range_get_value(&mt6360_chg_range[MT6360_RANGE_IEOC], sel, &value);
+       if (!ret)
+               val->intval = value;
+       return ret;
+}
+
+static int mt6360_charger_set_online(struct mt6360_chg_info *mci,
+                                    const union power_supply_propval *val)
+{
+       u8 force_sleep = val->intval ? 0 : 1;
+
+       return regmap_update_bits(mci->regmap,
+                                 MT6360_PMU_CHG_CTRL1,
+                                 MT6360_FSLP_MASK,
+                                 force_sleep << MT6360_FSLP_SHFT);
+}
+
+static int mt6360_charger_set_ichg(struct mt6360_chg_info *mci,
+                                  const union power_supply_propval *val)
+{
+       u32 sel;
+
+       linear_range_get_selector_within(&mt6360_chg_range[MT6360_RANGE_ICHG], val->intval, &sel);
+       return regmap_update_bits(mci->regmap,
+                                 MT6360_PMU_CHG_CTRL7,
+                                 MT6360_ICHG_MASK,
+                                 sel << MT6360_ICHG_SHFT);
+}
+
+static int mt6360_charger_set_cv(struct mt6360_chg_info *mci,
+                                const union power_supply_propval *val)
+{
+       u32 sel;
+
+       linear_range_get_selector_within(&mt6360_chg_range[MT6360_RANGE_VOREG], val->intval, &sel);
+       return regmap_update_bits(mci->regmap,
+                                 MT6360_PMU_CHG_CTRL4,
+                                 MT6360_VOREG_MASK,
+                                 sel << MT6360_VOREG_SHFT);
+}
+
+static int mt6360_charger_set_aicr(struct mt6360_chg_info *mci,
+                                  const union power_supply_propval *val)
+{
+       u32 sel;
+
+       linear_range_get_selector_within(&mt6360_chg_range[MT6360_RANGE_AICR], val->intval, &sel);
+       return regmap_update_bits(mci->regmap,
+                                 MT6360_PMU_CHG_CTRL3,
+                                 MT6360_IAICR_MASK,
+                                 sel << MT6360_IAICR_SHFT);
+}
+
+static int mt6360_charger_set_mivr(struct mt6360_chg_info *mci,
+                                  const union power_supply_propval *val)
+{
+       u32 sel;
+
+       linear_range_get_selector_within(&mt6360_chg_range[MT6360_RANGE_VMIVR], val->intval, &sel);
+       return regmap_update_bits(mci->regmap,
+                                 MT6360_PMU_CHG_CTRL3,
+                                 MT6360_VMIVR_MASK,
+                                 sel << MT6360_VMIVR_SHFT);
+}
+
+static int mt6360_charger_set_iprechg(struct mt6360_chg_info *mci,
+                                     const union power_supply_propval *val)
+{
+       u32 sel;
+
+       linear_range_get_selector_within(&mt6360_chg_range[MT6360_RANGE_IPREC], val->intval, &sel);
+       return regmap_update_bits(mci->regmap,
+                                 MT6360_PMU_CHG_CTRL8,
+                                 MT6360_IPREC_MASK,
+                                 sel << MT6360_IPREC_SHFT);
+}
+
+static int mt6360_charger_set_ieoc(struct mt6360_chg_info *mci,
+                                  const union power_supply_propval *val)
+{
+       u32 sel;
+
+       linear_range_get_selector_within(&mt6360_chg_range[MT6360_RANGE_IEOC], val->intval, &sel);
+       return regmap_update_bits(mci->regmap,
+                                 MT6360_PMU_CHG_CTRL9,
+                                 MT6360_IEOC_MASK,
+                                 sel << MT6360_IEOC_SHFT);
+}
+
+static int mt6360_charger_get_property(struct power_supply *psy,
+                                      enum power_supply_property psp,
+                                      union power_supply_propval *val)
+{
+       struct mt6360_chg_info *mci = power_supply_get_drvdata(psy);
+       int ret = 0;
+
+       switch (psp) {
+       case POWER_SUPPLY_PROP_ONLINE:
+               ret = mt6360_charger_get_online(mci, val);
+               break;
+       case POWER_SUPPLY_PROP_STATUS:
+               ret = mt6360_charger_get_status(mci, val);
+               break;
+       case POWER_SUPPLY_PROP_CHARGE_TYPE:
+               ret = mt6360_charger_get_charge_type(mci, val);
+               break;
+       case POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT:
+               ret = mt6360_charger_get_ichg(mci, val);
+               break;
+       case POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX:
+               ret = mt6360_charger_get_max_ichg(mci, val);
+               break;
+       case POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE:
+               ret = mt6360_charger_get_cv(mci, val);
+               break;
+       case POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE_MAX:
+               ret = mt6360_charger_get_max_cv(mci, val);
+               break;
+       case POWER_SUPPLY_PROP_INPUT_CURRENT_LIMIT:
+               ret = mt6360_charger_get_aicr(mci, val);
+               break;
+       case POWER_SUPPLY_PROP_INPUT_VOLTAGE_LIMIT:
+               ret = mt6360_charger_get_mivr(mci, val);
+               break;
+       case POWER_SUPPLY_PROP_PRECHARGE_CURRENT:
+               ret = mt6360_charger_get_iprechg(mci, val);
+               break;
+       case POWER_SUPPLY_PROP_CHARGE_TERM_CURRENT:
+               ret = mt6360_charger_get_ieoc(mci, val);
+               break;
+       case POWER_SUPPLY_PROP_USB_TYPE:
+               val->intval = mci->psy_usb_type;
+               break;
+       default:
+               ret = -ENODATA;
+       }
+       return ret;
+}
+
+static int mt6360_charger_set_property(struct power_supply *psy,
+                                      enum power_supply_property psp,
+                                      const union power_supply_propval *val)
+{
+       struct mt6360_chg_info *mci = power_supply_get_drvdata(psy);
+       int ret;
+
+       switch (psp) {
+       case POWER_SUPPLY_PROP_ONLINE:
+               ret = mt6360_charger_set_online(mci, val);
+               break;
+       case POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT:
+               ret = mt6360_charger_set_ichg(mci, val);
+               break;
+       case POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE:
+               ret = mt6360_charger_set_cv(mci, val);
+               break;
+       case POWER_SUPPLY_PROP_INPUT_CURRENT_LIMIT:
+               ret = mt6360_charger_set_aicr(mci, val);
+               break;
+       case POWER_SUPPLY_PROP_INPUT_VOLTAGE_LIMIT:
+               ret = mt6360_charger_set_mivr(mci, val);
+               break;
+       case POWER_SUPPLY_PROP_PRECHARGE_CURRENT:
+               ret = mt6360_charger_set_iprechg(mci, val);
+               break;
+       case POWER_SUPPLY_PROP_CHARGE_TERM_CURRENT:
+               ret = mt6360_charger_set_ieoc(mci, val);
+               break;
+       default:
+               ret = -EINVAL;
+       }
+       return ret;
+}
+
+static int mt6360_charger_property_is_writeable(struct power_supply *psy,
+                                              enum power_supply_property psp)
+{
+       switch (psp) {
+       case POWER_SUPPLY_PROP_ONLINE:
+       case POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT:
+       case POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE:
+       case POWER_SUPPLY_PROP_INPUT_CURRENT_LIMIT:
+       case POWER_SUPPLY_PROP_INPUT_VOLTAGE_LIMIT:
+       case POWER_SUPPLY_PROP_PRECHARGE_CURRENT:
+       case POWER_SUPPLY_PROP_CHARGE_TERM_CURRENT:
+               return 1;
+       default:
+               return 0;
+       }
+}
+
+static enum power_supply_property mt6360_charger_properties[] = {
+       POWER_SUPPLY_PROP_ONLINE,
+       POWER_SUPPLY_PROP_STATUS,
+       POWER_SUPPLY_PROP_CHARGE_TYPE,
+       POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT,
+       POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX,
+       POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE,
+       POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE_MAX,
+       POWER_SUPPLY_PROP_INPUT_CURRENT_LIMIT,
+       POWER_SUPPLY_PROP_INPUT_VOLTAGE_LIMIT,
+       POWER_SUPPLY_PROP_PRECHARGE_CURRENT,
+       POWER_SUPPLY_PROP_CHARGE_TERM_CURRENT,
+       POWER_SUPPLY_PROP_USB_TYPE,
+};
+
+static const struct power_supply_desc mt6360_charger_desc = {
+       .type                   = POWER_SUPPLY_TYPE_USB,
+       .properties             = mt6360_charger_properties,
+       .num_properties         = ARRAY_SIZE(mt6360_charger_properties),
+       .get_property           = mt6360_charger_get_property,
+       .set_property           = mt6360_charger_set_property,
+       .property_is_writeable  = mt6360_charger_property_is_writeable,
+       .usb_types              = mt6360_charger_usb_types,
+       .num_usb_types          = ARRAY_SIZE(mt6360_charger_usb_types),
+};
+
+static const struct regulator_ops mt6360_chg_otg_ops = {
+       .list_voltage = regulator_list_voltage_linear,
+       .enable = regulator_enable_regmap,
+       .disable = regulator_disable_regmap,
+       .is_enabled = regulator_is_enabled_regmap,
+       .set_voltage_sel = regulator_set_voltage_sel_regmap,
+       .get_voltage_sel = regulator_get_voltage_sel_regmap,
+};
+
+static const struct regulator_desc mt6360_otg_rdesc = {
+       .of_match = "usb-otg-vbus",
+       .name = "usb-otg-vbus",
+       .ops = &mt6360_chg_otg_ops,
+       .owner = THIS_MODULE,
+       .type = REGULATOR_VOLTAGE,
+       .min_uV = 4425000,
+       .uV_step = 25000,
+       .n_voltages = 57,
+       .vsel_reg = MT6360_PMU_CHG_CTRL5,
+       .vsel_mask = MT6360_VOBST_MASK,
+       .enable_reg = MT6360_PMU_CHG_CTRL1,
+       .enable_mask = MT6360_OPA_MODE_MASK,
+};
+
+static irqreturn_t mt6360_pmu_attach_i_handler(int irq, void *data)
+{
+       struct mt6360_chg_info *mci = data;
+       int ret;
+       unsigned int usb_status;
+       int last_usb_type;
+
+       mutex_lock(&mci->chgdet_lock);
+       if (!mci->bc12_en) {
+               dev_warn(mci->dev, "Received attach interrupt, bc12 disabled, ignore irq\n");
+               goto out;
+       }
+       last_usb_type = mci->psy_usb_type;
+       /* Plug in */
+       ret = regmap_read(mci->regmap, MT6360_PMU_USB_STATUS1, &usb_status);
+       if (ret < 0)
+               goto out;
+       usb_status &= MT6360_USB_STATUS_MASK;
+       usb_status >>= MT6360_USB_STATUS_SHFT;
+       switch (usb_status) {
+       case MT6360_CHG_TYPE_NOVBUS:
+               dev_dbg(mci->dev, "Received attach interrupt, no vbus\n");
+               goto out;
+       case MT6360_CHG_TYPE_UNDER_GOING:
+               dev_dbg(mci->dev, "Received attach interrupt, under going...\n");
+               goto out;
+       case MT6360_CHG_TYPE_SDP:
+               mci->psy_usb_type = POWER_SUPPLY_USB_TYPE_SDP;
+               break;
+       case MT6360_CHG_TYPE_SDPNSTD:
+               mci->psy_usb_type = POWER_SUPPLY_USB_TYPE_SDP;
+               break;
+       case MT6360_CHG_TYPE_CDP:
+               mci->psy_usb_type = POWER_SUPPLY_USB_TYPE_CDP;
+               break;
+       case MT6360_CHG_TYPE_DCP:
+               mci->psy_usb_type = POWER_SUPPLY_USB_TYPE_DCP;
+               break;
+       case MT6360_CHG_TYPE_DISABLE_BC12:
+               dev_dbg(mci->dev, "Received attach interrupt, bc12 detect not enable\n");
+               goto out;
+       default:
+               mci->psy_usb_type = POWER_SUPPLY_USB_TYPE_UNKNOWN;
+               dev_dbg(mci->dev, "Received attach interrupt, reserved address\n");
+               goto out;
+       }
+
+       dev_dbg(mci->dev, "Received attach interrupt, chg_type = %d\n", mci->psy_usb_type);
+       if (last_usb_type != mci->psy_usb_type)
+               power_supply_changed(mci->psy);
+out:
+       mutex_unlock(&mci->chgdet_lock);
+       return IRQ_HANDLED;
+}
+
+static void mt6360_handle_chrdet_ext_evt(struct mt6360_chg_info *mci)
+{
+       int ret;
+       bool pwr_rdy;
+
+       mutex_lock(&mci->chgdet_lock);
+       ret = mt6360_get_chrdet_ext_stat(mci, &pwr_rdy);
+       if (ret < 0)
+               goto out;
+       if (mci->pwr_rdy == pwr_rdy) {
+               dev_dbg(mci->dev, "Received vbus interrupt, pwr_rdy is same(%d)\n", pwr_rdy);
+               goto out;
+       }
+       mci->pwr_rdy = pwr_rdy;
+       dev_dbg(mci->dev, "Received vbus interrupt, pwr_rdy = %d\n", pwr_rdy);
+       if (!pwr_rdy) {
+               mci->psy_usb_type = POWER_SUPPLY_USB_TYPE_UNKNOWN;
+               power_supply_changed(mci->psy);
+
+       }
+       ret = regmap_update_bits(mci->regmap,
+                                MT6360_PMU_DEVICE_TYPE,
+                                MT6360_USBCHGEN_MASK,
+                                pwr_rdy ? MT6360_USBCHGEN_MASK : 0);
+       if (ret < 0)
+               goto out;
+       mci->bc12_en = pwr_rdy;
+out:
+       mutex_unlock(&mci->chgdet_lock);
+}
+
+static void mt6360_chrdet_work(struct work_struct *work)
+{
+       struct mt6360_chg_info *mci = (struct mt6360_chg_info *)container_of(
+                                    work, struct mt6360_chg_info, chrdet_work);
+
+       mt6360_handle_chrdet_ext_evt(mci);
+}
+
+static irqreturn_t mt6360_pmu_chrdet_ext_evt_handler(int irq, void *data)
+{
+       struct mt6360_chg_info *mci = data;
+
+       mt6360_handle_chrdet_ext_evt(mci);
+       return IRQ_HANDLED;
+}
+
+static int mt6360_chg_irq_register(struct platform_device *pdev)
+{
+       const struct {
+               const char *name;
+               irq_handler_t handler;
+       } irq_descs[] = {
+               { "attach_i", mt6360_pmu_attach_i_handler },
+               { "chrdet_ext_evt", mt6360_pmu_chrdet_ext_evt_handler }
+       };
+       int i, ret;
+
+       for (i = 0; i < ARRAY_SIZE(irq_descs); i++) {
+               ret = platform_get_irq_byname(pdev, irq_descs[i].name);
+               if (ret < 0)
+                       return ret;
+
+               ret = devm_request_threaded_irq(&pdev->dev, ret, NULL,
+                                               irq_descs[i].handler,
+                                               IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
+                                               irq_descs[i].name,
+                                               platform_get_drvdata(pdev));
+               if (ret < 0)
+                       return dev_err_probe(&pdev->dev, ret, "Failed to request %s irq\n",
+                                            irq_descs[i].name);
+       }
+
+       return 0;
+}
+
+static u32 mt6360_vinovp_trans_to_sel(u32 val)
+{
+       u32 vinovp_tbl[] = { 5500000, 6500000, 11000000, 14500000 };
+       int i;
+
+       /* Select the smaller and equal supported value */
+       for (i = 0; i < ARRAY_SIZE(vinovp_tbl)-1; i++) {
+               if (val < vinovp_tbl[i+1])
+                       break;
+       }
+       return i;
+}
+
+static int mt6360_chg_init_setting(struct mt6360_chg_info *mci)
+{
+       int ret;
+       u32 sel;
+
+       sel = mt6360_vinovp_trans_to_sel(mci->vinovp);
+       ret = regmap_update_bits(mci->regmap, MT6360_PMU_CHG_CTRL19,
+                                 MT6360_VINOVP_MASK, sel << MT6360_VINOVP_SHFT);
+       if (ret)
+               return dev_err_probe(mci->dev, ret, "%s: Failed to apply vinovp\n", __func__);
+       ret = regmap_update_bits(mci->regmap, MT6360_PMU_DEVICE_TYPE,
+                                MT6360_USBCHGEN_MASK, 0);
+       if (ret)
+               return dev_err_probe(mci->dev, ret, "%s: Failed to disable bc12\n", __func__);
+       ret = regmap_update_bits(mci->regmap, MT6360_PMU_CHG_CTRL2,
+                                MT6360_IINLMTSEL_MASK,
+                                MT6360_IINLMTSEL_AICR <<
+                                       MT6360_IINLMTSEL_SHFT);
+       if (ret)
+               return dev_err_probe(mci->dev, ret,
+                                    "%s: Failed to switch iinlmtsel to aicr\n", __func__);
+       usleep_range(5000, 6000);
+       ret = regmap_update_bits(mci->regmap, MT6360_PMU_CHG_CTRL3,
+                                MT6360_ILIM_EN_MASK, 0);
+       if (ret)
+               return dev_err_probe(mci->dev, ret,
+                                    "%s: Failed to disable ilim\n", __func__);
+       ret = regmap_update_bits(mci->regmap, MT6360_PMU_CHG_CTRL10,
+                                MT6360_OTG_OC_MASK, MT6360_OTG_OC_MASK);
+       if (ret)
+               return dev_err_probe(mci->dev, ret,
+                                    "%s: Failed to config otg oc to 3A\n", __func__);
+       return 0;
+}
+
+static int mt6360_charger_probe(struct platform_device *pdev)
+{
+       struct mt6360_chg_info *mci;
+       struct power_supply_config charger_cfg = {};
+       struct regulator_config config = { };
+       int ret;
+
+       mci = devm_kzalloc(&pdev->dev, sizeof(*mci), GFP_KERNEL);
+       if (!mci)
+               return -ENOMEM;
+
+       mci->dev = &pdev->dev;
+       mci->vinovp = 6500000;
+       mutex_init(&mci->chgdet_lock);
+       platform_set_drvdata(pdev, mci);
+       devm_work_autocancel(&pdev->dev, &mci->chrdet_work, mt6360_chrdet_work);
+
+       ret = device_property_read_u32(&pdev->dev, "richtek,vinovp-microvolt", &mci->vinovp);
+       if (ret)
+               dev_warn(&pdev->dev, "Failed to parse vinovp in DT, keep default 6.5v\n");
+
+       mci->regmap = dev_get_regmap(pdev->dev.parent, NULL);
+       if (!mci->regmap)
+               return dev_err_probe(&pdev->dev, -ENODEV, "Failed to get parent regmap\n");
+
+       ret = mt6360_chg_init_setting(mci);
+       if (ret)
+               return dev_err_probe(&pdev->dev, ret, "Failed to initial setting\n");
+
+       memcpy(&mci->psy_desc, &mt6360_charger_desc, sizeof(mci->psy_desc));
+       mci->psy_desc.name = dev_name(&pdev->dev);
+       charger_cfg.drv_data = mci;
+       charger_cfg.of_node = pdev->dev.of_node;
+       mci->psy = devm_power_supply_register(&pdev->dev,
+                                             &mci->psy_desc, &charger_cfg);
+       if (IS_ERR(mci->psy))
+               return dev_err_probe(&pdev->dev, PTR_ERR(mci->psy),
+                                    "Failed to register power supply dev\n");
+
+
+       ret = mt6360_chg_irq_register(pdev);
+       if (ret)
+               return dev_err_probe(&pdev->dev, ret, "Failed to register irqs\n");
+
+       config.dev = &pdev->dev;
+       config.regmap = mci->regmap;
+       mci->otg_rdev = devm_regulator_register(&pdev->dev, &mt6360_otg_rdesc,
+                                               &config);
+       if (IS_ERR(mci->otg_rdev))
+               return PTR_ERR(mci->otg_rdev);
+
+       schedule_work(&mci->chrdet_work);
+
+       return 0;
+}
+
+static const struct of_device_id __maybe_unused mt6360_charger_of_id[] = {
+       { .compatible = "mediatek,mt6360-chg", },
+       {},
+};
+MODULE_DEVICE_TABLE(of, mt6360_charger_of_id);
+
+static const struct platform_device_id mt6360_charger_id[] = {
+       { "mt6360-chg", 0 },
+       {},
+};
+MODULE_DEVICE_TABLE(platform, mt6360_charger_id);
+
+static struct platform_driver mt6360_charger_driver = {
+       .driver = {
+               .name = "mt6360-chg",
+               .of_match_table = of_match_ptr(mt6360_charger_of_id),
+       },
+       .probe = mt6360_charger_probe,
+       .id_table = mt6360_charger_id,
+};
+module_platform_driver(mt6360_charger_driver);
+
+MODULE_AUTHOR("Gene Chen <gene_chen@richtek.com>");
+MODULE_DESCRIPTION("MT6360 Charger Driver");
+MODULE_LICENSE("GPL");
index d99e2f11c18356d5464939ff9b50f9ba3d87cf6b..0c2132c7f5d40f8de11d272cb808b0b093d2ddc9 100644 (file)
@@ -571,6 +571,7 @@ int power_supply_get_battery_info(struct power_supply *psy,
        int err, len, index;
        const __be32 *list;
 
+       info->technology                     = POWER_SUPPLY_TECHNOLOGY_UNKNOWN;
        info->energy_full_design_uwh         = -EINVAL;
        info->charge_full_design_uah         = -EINVAL;
        info->voltage_min_design_uv          = -EINVAL;
@@ -618,6 +619,24 @@ int power_supply_get_battery_info(struct power_supply *psy,
         * Documentation/power/power_supply_class.rst.
         */
 
+       if (!of_property_read_string(battery_np, "device-chemistry", &value)) {
+               if (!strcmp("nickel-cadmium", value))
+                       info->technology = POWER_SUPPLY_TECHNOLOGY_NiCd;
+               else if (!strcmp("nickel-metal-hydride", value))
+                       info->technology = POWER_SUPPLY_TECHNOLOGY_NiMH;
+               else if (!strcmp("lithium-ion", value))
+                       /* Imprecise lithium-ion type */
+                       info->technology = POWER_SUPPLY_TECHNOLOGY_LION;
+               else if (!strcmp("lithium-ion-polymer", value))
+                       info->technology = POWER_SUPPLY_TECHNOLOGY_LIPO;
+               else if (!strcmp("lithium-ion-iron-phosphate", value))
+                       info->technology = POWER_SUPPLY_TECHNOLOGY_LiFe;
+               else if (!strcmp("lithium-ion-manganese-oxide", value))
+                       info->technology = POWER_SUPPLY_TECHNOLOGY_LiMn;
+               else
+                       dev_warn(&psy->dev, "%s unknown battery type\n", value);
+       }
+
        of_property_read_u32(battery_np, "energy-full-design-microwatt-hours",
                             &info->energy_full_design_uwh);
        of_property_read_u32(battery_np, "charge-full-design-microamp-hours",
index c890e1cec7200548166f3a53dc268147076ee526..84cc9fba029d505e10c88ac32b2924a40e54e7cf 100644 (file)
@@ -929,11 +929,8 @@ static int smbb_charger_probe(struct platform_device *pdev)
                int irq;
 
                irq = platform_get_irq_byname(pdev, smbb_charger_irqs[i].name);
-               if (irq < 0) {
-                       dev_err(&pdev->dev, "failed to get irq '%s'\n",
-                               smbb_charger_irqs[i].name);
+               if (irq < 0)
                        return irq;
-               }
 
                smbb_charger_irqs[i].handler(irq, chg);
 
index 819061918b2a1cd726266462e3a3194774fc377d..a5e09ac78a507a572d05a243e9cbef77f8daba43 100644 (file)
@@ -9,10 +9,12 @@
 #include <linux/device.h>
 #include <linux/bitops.h>
 #include <linux/errno.h>
+#include <linux/iio/consumer.h>
 #include <linux/init.h>
 #include <linux/interrupt.h>
 #include <linux/module.h>
 #include <linux/mfd/rn5t618.h>
+#include <linux/of_device.h>
 #include <linux/platform_device.h>
 #include <linux/power_supply.h>
 #include <linux/regmap.h>
@@ -64,6 +66,8 @@ struct rn5t618_power_info {
        struct power_supply *battery;
        struct power_supply *usb;
        struct power_supply *adp;
+       struct iio_channel *channel_vusb;
+       struct iio_channel *channel_vadp;
        int irq;
 };
 
@@ -77,6 +81,7 @@ static enum power_supply_usb_type rn5t618_usb_types[] = {
 static enum power_supply_property rn5t618_usb_props[] = {
        /* input current limit is not very accurate */
        POWER_SUPPLY_PROP_INPUT_CURRENT_LIMIT,
+       POWER_SUPPLY_PROP_VOLTAGE_NOW,
        POWER_SUPPLY_PROP_STATUS,
        POWER_SUPPLY_PROP_USB_TYPE,
        POWER_SUPPLY_PROP_ONLINE,
@@ -85,6 +90,7 @@ static enum power_supply_property rn5t618_usb_props[] = {
 static enum power_supply_property rn5t618_adp_props[] = {
        /* input current limit is not very accurate */
        POWER_SUPPLY_PROP_INPUT_CURRENT_LIMIT,
+       POWER_SUPPLY_PROP_VOLTAGE_NOW,
        POWER_SUPPLY_PROP_STATUS,
        POWER_SUPPLY_PROP_ONLINE,
 };
@@ -463,6 +469,15 @@ static int rn5t618_adp_get_property(struct power_supply *psy,
                        return ret;
 
                val->intval = FROM_CUR_REG(regval);
+               break;
+       case POWER_SUPPLY_PROP_VOLTAGE_NOW:
+               if (!info->channel_vadp)
+                       return -ENODATA;
+
+               ret = iio_read_channel_processed_scale(info->channel_vadp, &val->intval, 1000);
+               if (ret < 0)
+                       return ret;
+
                break;
        default:
                return -EINVAL;
@@ -588,6 +603,15 @@ static int rn5t618_usb_get_property(struct power_supply *psy,
 
                        val->intval = FROM_CUR_REG(regval);
                }
+               break;
+       case POWER_SUPPLY_PROP_VOLTAGE_NOW:
+               if (!info->channel_vusb)
+                       return -ENODATA;
+
+               ret = iio_read_channel_processed_scale(info->channel_vusb, &val->intval, 1000);
+               if (ret < 0)
+                       return ret;
+
                break;
        default:
                return -EINVAL;
@@ -711,6 +735,20 @@ static int rn5t618_power_probe(struct platform_device *pdev)
 
        platform_set_drvdata(pdev, info);
 
+       info->channel_vusb = devm_iio_channel_get(&pdev->dev, "vusb");
+       if (IS_ERR(info->channel_vusb)) {
+               if (PTR_ERR(info->channel_vusb) == -ENODEV)
+                       return -EPROBE_DEFER;
+               return PTR_ERR(info->channel_vusb);
+       }
+
+       info->channel_vadp = devm_iio_channel_get(&pdev->dev, "vadp");
+       if (IS_ERR(info->channel_vadp)) {
+               if (PTR_ERR(info->channel_vadp) == -ENODEV)
+                       return -EPROBE_DEFER;
+               return PTR_ERR(info->channel_vadp);
+       }
+
        ret = regmap_read(info->rn5t618->regmap, RN5T618_CONTROL, &v);
        if (ret)
                return ret;
index f84dbaab283a0e866d18b96eb7ee9842765e2dde..c4a95b01463aef9e0e2ca0fba6ed434fbafa5cc5 100644 (file)
@@ -31,8 +31,9 @@ enum {
        REG_CURRENT_AVG,
        REG_MAX_ERR,
        REG_CAPACITY,
-       REG_TIME_TO_EMPTY,
-       REG_TIME_TO_FULL,
+       REG_TIME_TO_EMPTY_NOW,
+       REG_TIME_TO_EMPTY_AVG,
+       REG_TIME_TO_FULL_AVG,
        REG_STATUS,
        REG_CAPACITY_LEVEL,
        REG_CYCLE_COUNT,
@@ -102,7 +103,7 @@ static const struct chip_data {
        [REG_TEMPERATURE] =
                SBS_DATA(POWER_SUPPLY_PROP_TEMP, 0x08, 0, 65535),
        [REG_VOLTAGE] =
-               SBS_DATA(POWER_SUPPLY_PROP_VOLTAGE_NOW, 0x09, 0, 20000),
+               SBS_DATA(POWER_SUPPLY_PROP_VOLTAGE_NOW, 0x09, 0, 65535),
        [REG_CURRENT_NOW] =
                SBS_DATA(POWER_SUPPLY_PROP_CURRENT_NOW, 0x0A, -32768, 32767),
        [REG_CURRENT_AVG] =
@@ -119,9 +120,11 @@ static const struct chip_data {
                SBS_DATA(POWER_SUPPLY_PROP_ENERGY_FULL, 0x10, 0, 65535),
        [REG_FULL_CHARGE_CAPACITY_CHARGE] =
                SBS_DATA(POWER_SUPPLY_PROP_CHARGE_FULL, 0x10, 0, 65535),
-       [REG_TIME_TO_EMPTY] =
+       [REG_TIME_TO_EMPTY_NOW] =
+               SBS_DATA(POWER_SUPPLY_PROP_TIME_TO_EMPTY_NOW, 0x11, 0, 65535),
+       [REG_TIME_TO_EMPTY_AVG] =
                SBS_DATA(POWER_SUPPLY_PROP_TIME_TO_EMPTY_AVG, 0x12, 0, 65535),
-       [REG_TIME_TO_FULL] =
+       [REG_TIME_TO_FULL_AVG] =
                SBS_DATA(POWER_SUPPLY_PROP_TIME_TO_FULL_AVG, 0x13, 0, 65535),
        [REG_CHARGE_CURRENT] =
                SBS_DATA(POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX, 0x14, 0, 65535),
@@ -165,6 +168,7 @@ static const enum power_supply_property sbs_properties[] = {
        POWER_SUPPLY_PROP_CAPACITY,
        POWER_SUPPLY_PROP_CAPACITY_ERROR_MARGIN,
        POWER_SUPPLY_PROP_TEMP,
+       POWER_SUPPLY_PROP_TIME_TO_EMPTY_NOW,
        POWER_SUPPLY_PROP_TIME_TO_EMPTY_AVG,
        POWER_SUPPLY_PROP_TIME_TO_FULL_AVG,
        POWER_SUPPLY_PROP_SERIAL_NUMBER,
@@ -748,6 +752,7 @@ static void  sbs_unit_adjustment(struct i2c_client *client,
                val->intval -= TEMP_KELVIN_TO_CELSIUS;
                break;
 
+       case POWER_SUPPLY_PROP_TIME_TO_EMPTY_NOW:
        case POWER_SUPPLY_PROP_TIME_TO_EMPTY_AVG:
        case POWER_SUPPLY_PROP_TIME_TO_FULL_AVG:
                /* sbs provides time to empty and time to full in minutes.
@@ -966,6 +971,7 @@ static int sbs_get_property(struct power_supply *psy,
        case POWER_SUPPLY_PROP_CURRENT_NOW:
        case POWER_SUPPLY_PROP_CURRENT_AVG:
        case POWER_SUPPLY_PROP_TEMP:
+       case POWER_SUPPLY_PROP_TIME_TO_EMPTY_NOW:
        case POWER_SUPPLY_PROP_TIME_TO_EMPTY_AVG:
        case POWER_SUPPLY_PROP_TIME_TO_FULL_AVG:
        case POWER_SUPPLY_PROP_VOLTAGE_MIN_DESIGN:
index 1ae8374e1cebeea46b88f8327d715228913bddd3..ae45069bd5e1bc439863556264785605fde05af5 100644 (file)
@@ -1229,10 +1229,8 @@ static int sc27xx_fgu_probe(struct platform_device *pdev)
        }
 
        irq = platform_get_irq(pdev, 0);
-       if (irq < 0) {
-               dev_err(dev, "no irq resource specified\n");
+       if (irq < 0)
                return irq;
-       }
 
        ret = devm_request_threaded_irq(data->dev, irq, NULL,
                                        sc27xx_fgu_interrupt,
index df240420f2de0e603f1bff0fcea9d4cfcb03432b..753944e774c4f3de24440941d805ba014fe933a0 100644 (file)
@@ -18,6 +18,7 @@
 #include <linux/power_supply.h>
 #include <linux/property.h>
 #include <linux/regmap.h>
+#include <linux/regulator/driver.h>
 
 #include <dt-bindings/power/summit,smb347-charger.h>
 
@@ -55,6 +56,7 @@
 #define CFG_PIN_EN_CTRL_ACTIVE_LOW             0x60
 #define CFG_PIN_EN_APSD_IRQ                    BIT(1)
 #define CFG_PIN_EN_CHARGER_ERROR               BIT(2)
+#define CFG_PIN_EN_CTRL                                BIT(4)
 #define CFG_THERM                              0x07
 #define CFG_THERM_SOFT_HOT_COMPENSATION_MASK   0x03
 #define CFG_THERM_SOFT_HOT_COMPENSATION_SHIFT  0
 #define CFG_THERM_SOFT_COLD_COMPENSATION_SHIFT 2
 #define CFG_THERM_MONITOR_DISABLED             BIT(4)
 #define CFG_SYSOK                              0x08
+#define CFG_SYSOK_INOK_ACTIVE_HIGH             BIT(0)
 #define CFG_SYSOK_SUSPEND_HARD_LIMIT_DISABLED  BIT(2)
 #define CFG_OTHER                              0x09
 #define CFG_OTHER_RID_MASK                     0xc0
 #define CFG_OTHER_RID_ENABLED_AUTO_OTG         0xc0
 #define CFG_OTG                                        0x0a
 #define CFG_OTG_TEMP_THRESHOLD_MASK            0x30
+#define CFG_OTG_CURRENT_LIMIT_250mA            BIT(2)
+#define CFG_OTG_CURRENT_LIMIT_750mA            BIT(3)
 #define CFG_OTG_TEMP_THRESHOLD_SHIFT           4
 #define CFG_OTG_CC_COMPENSATION_MASK           0xc0
 #define CFG_OTG_CC_COMPENSATION_SHIFT          6
@@ -91,6 +96,7 @@
 #define CMD_A                                  0x30
 #define CMD_A_CHG_ENABLED                      BIT(1)
 #define CMD_A_SUSPEND_ENABLED                  BIT(2)
+#define CMD_A_OTG_ENABLED                      BIT(4)
 #define CMD_A_ALLOW_WRITE                      BIT(7)
 #define CMD_B                                  0x31
 #define CMD_C                                  0x33
  * @regmap: pointer to driver regmap
  * @mains: power_supply instance for AC/DC power
  * @usb: power_supply instance for USB power
+ * @usb_rdev: USB VBUS regulator device
  * @id: SMB charger ID
  * @mains_online: is AC/DC input connected
  * @usb_online: is USB input connected
- * @charging_enabled: is charging enabled
  * @irq_unsupported: is interrupt unsupported by SMB hardware
+ * @usb_vbus_enabled: is USB VBUS powered by SMB charger
  * @max_charge_current: maximum current (in uA) the battery can be charged
  * @max_charge_voltage: maximum voltage (in uV) the battery can be charged
  * @pre_charge_current: current (in uA) to use in pre-charging phase
  * @use_usb_otg: USB OTG output can be used (not implemented yet)
  * @enable_control: how charging enable/disable is controlled
  *                 (driver/pin controls)
+ * @inok_polarity: polarity of INOK signal which denotes presence of external
+ *                power supply
  *
  * @use_main, @use_usb, and @use_usb_otg are means to enable/disable
  * hardware support for these. This is useful when we want to have for
@@ -189,11 +198,12 @@ struct smb347_charger {
        struct regmap           *regmap;
        struct power_supply     *mains;
        struct power_supply     *usb;
+       struct regulator_dev    *usb_rdev;
        unsigned int            id;
        bool                    mains_online;
        bool                    usb_online;
-       bool                    charging_enabled;
        bool                    irq_unsupported;
+       bool                    usb_vbus_enabled;
 
        unsigned int            max_charge_current;
        unsigned int            max_charge_voltage;
@@ -214,6 +224,7 @@ struct smb347_charger {
        bool                    use_usb;
        bool                    use_usb_otg;
        unsigned int            enable_control;
+       unsigned int            inok_polarity;
 };
 
 enum smb_charger_chipid {
@@ -358,21 +369,18 @@ static int smb347_charging_status(struct smb347_charger *smb)
 
 static int smb347_charging_set(struct smb347_charger *smb, bool enable)
 {
-       int ret = 0;
-
        if (smb->enable_control != SMB3XX_CHG_ENABLE_SW) {
                dev_dbg(smb->dev, "charging enable/disable in SW disabled\n");
                return 0;
        }
 
-       if (smb->charging_enabled != enable) {
-               ret = regmap_update_bits(smb->regmap, CMD_A, CMD_A_CHG_ENABLED,
-                                        enable ? CMD_A_CHG_ENABLED : 0);
-               if (!ret)
-                       smb->charging_enabled = enable;
+       if (enable && smb->usb_vbus_enabled) {
+               dev_dbg(smb->dev, "charging not enabled because USB is in host mode\n");
+               return 0;
        }
 
-       return ret;
+       return regmap_update_bits(smb->regmap, CMD_A, CMD_A_CHG_ENABLED,
+                                 enable ? CMD_A_CHG_ENABLED : 0);
 }
 
 static inline int smb347_charging_enable(struct smb347_charger *smb)
@@ -671,10 +679,22 @@ static int smb347_set_temp_limits(struct smb347_charger *smb)
  *
  * Returns %0 on success and negative errno in case of failure.
  */
-static int smb347_set_writable(struct smb347_charger *smb, bool writable)
+static int smb347_set_writable(struct smb347_charger *smb, bool writable,
+                              bool irq_toggle)
 {
-       return regmap_update_bits(smb->regmap, CMD_A, CMD_A_ALLOW_WRITE,
-                                 writable ? CMD_A_ALLOW_WRITE : 0);
+       struct i2c_client *client = to_i2c_client(smb->dev);
+       int ret;
+
+       if (writable && irq_toggle && !smb->irq_unsupported)
+               disable_irq(client->irq);
+
+       ret = regmap_update_bits(smb->regmap, CMD_A, CMD_A_ALLOW_WRITE,
+                                writable ? CMD_A_ALLOW_WRITE : 0);
+
+       if ((!writable || ret) && irq_toggle && !smb->irq_unsupported)
+               enable_irq(client->irq);
+
+       return ret;
 }
 
 static int smb347_hw_init(struct smb347_charger *smb)
@@ -682,7 +702,7 @@ static int smb347_hw_init(struct smb347_charger *smb)
        unsigned int val;
        int ret;
 
-       ret = smb347_set_writable(smb, true);
+       ret = smb347_set_writable(smb, true, false);
        if (ret < 0)
                return ret;
 
@@ -724,6 +744,15 @@ static int smb347_hw_init(struct smb347_charger *smb)
        if (ret < 0)
                goto fail;
 
+       /* Activate pin control, making it writable. */
+       switch (smb->enable_control) {
+       case SMB3XX_CHG_ENABLE_PIN_ACTIVE_LOW:
+       case SMB3XX_CHG_ENABLE_PIN_ACTIVE_HIGH:
+               ret = regmap_set_bits(smb->regmap, CFG_PIN, CFG_PIN_EN_CTRL);
+               if (ret < 0)
+                       goto fail;
+       }
+
        /*
         * Make the charging functionality controllable by a write to the
         * command register unless pin control is specified in the platform
@@ -758,7 +787,7 @@ static int smb347_hw_init(struct smb347_charger *smb)
        ret = smb347_start_stop_charging(smb);
 
 fail:
-       smb347_set_writable(smb, false);
+       smb347_set_writable(smb, false, false);
        return ret;
 }
 
@@ -866,7 +895,7 @@ static int smb347_irq_set(struct smb347_charger *smb, bool enable)
        if (smb->irq_unsupported)
                return 0;
 
-       ret = smb347_set_writable(smb, true);
+       ret = smb347_set_writable(smb, true, true);
        if (ret < 0)
                return ret;
 
@@ -891,7 +920,7 @@ static int smb347_irq_set(struct smb347_charger *smb, bool enable)
        ret = regmap_update_bits(smb->regmap, CFG_PIN, CFG_PIN_EN_CHARGER_ERROR,
                                 enable ? CFG_PIN_EN_CHARGER_ERROR : 0);
 fail:
-       smb347_set_writable(smb, false);
+       smb347_set_writable(smb, false, true);
        return ret;
 }
 
@@ -919,7 +948,7 @@ static int smb347_irq_init(struct smb347_charger *smb,
        if (!client->irq)
                return 0;
 
-       ret = smb347_set_writable(smb, true);
+       ret = smb347_set_writable(smb, true, false);
        if (ret < 0)
                return ret;
 
@@ -931,7 +960,7 @@ static int smb347_irq_init(struct smb347_charger *smb,
                                 CFG_STAT_ACTIVE_HIGH | CFG_STAT_DISABLED,
                                 CFG_STAT_DISABLED);
 
-       smb347_set_writable(smb, false);
+       smb347_set_writable(smb, false, false);
 
        if (ret < 0) {
                dev_warn(smb->dev, "failed to initialize IRQ: %d\n", ret);
@@ -1241,6 +1270,13 @@ static void smb347_dt_parse_dev_info(struct smb347_charger *smb)
        /* Select charging control */
        device_property_read_u32(dev, "summit,enable-charge-control",
                                 &smb->enable_control);
+
+       /*
+        * Polarity of INOK signal indicating presence of external power
+        * supply connected to the charger.
+        */
+       device_property_read_u32(dev, "summit,inok-polarity",
+                                &smb->inok_polarity);
 }
 
 static int smb347_get_battery_info(struct smb347_charger *smb)
@@ -1292,12 +1328,176 @@ static int smb347_get_battery_info(struct smb347_charger *smb)
        return 0;
 }
 
+static int smb347_usb_vbus_get_current_limit(struct regulator_dev *rdev)
+{
+       struct smb347_charger *smb = rdev_get_drvdata(rdev);
+       unsigned int val;
+       int ret;
+
+       ret = regmap_read(smb->regmap, CFG_OTG, &val);
+       if (ret < 0)
+               return ret;
+
+       /*
+        * It's unknown what happens if this bit is unset due to lack of
+        * access to the datasheet, assume it's limit-enable.
+        */
+       if (!(val & CFG_OTG_CURRENT_LIMIT_250mA))
+               return 0;
+
+       return val & CFG_OTG_CURRENT_LIMIT_750mA ? 750000 : 250000;
+}
+
+static int smb347_usb_vbus_set_new_current_limit(struct smb347_charger *smb,
+                                                int max_uA)
+{
+       const unsigned int mask = CFG_OTG_CURRENT_LIMIT_750mA |
+                                 CFG_OTG_CURRENT_LIMIT_250mA;
+       unsigned int val = CFG_OTG_CURRENT_LIMIT_250mA;
+       int ret;
+
+       if (max_uA >= 750000)
+               val |= CFG_OTG_CURRENT_LIMIT_750mA;
+
+       ret = regmap_update_bits(smb->regmap, CFG_OTG, mask, val);
+       if (ret < 0)
+               dev_err(smb->dev, "failed to change USB current limit\n");
+
+       return ret;
+}
+
+static int smb347_usb_vbus_set_current_limit(struct regulator_dev *rdev,
+                                            int min_uA, int max_uA)
+{
+       struct smb347_charger *smb = rdev_get_drvdata(rdev);
+       int ret;
+
+       ret = smb347_set_writable(smb, true, true);
+       if (ret < 0)
+               return ret;
+
+       ret = smb347_usb_vbus_set_new_current_limit(smb, max_uA);
+       smb347_set_writable(smb, false, true);
+
+       return ret;
+}
+
+static int smb347_usb_vbus_regulator_enable(struct regulator_dev *rdev)
+{
+       struct smb347_charger *smb = rdev_get_drvdata(rdev);
+       int ret, max_uA;
+
+       ret = smb347_set_writable(smb, true, true);
+       if (ret < 0)
+               return ret;
+
+       smb347_charging_disable(smb);
+
+       if (device_property_read_bool(&rdev->dev, "summit,needs-inok-toggle")) {
+               unsigned int sysok = 0;
+
+               if (smb->inok_polarity == SMB3XX_SYSOK_INOK_ACTIVE_LOW)
+                       sysok = CFG_SYSOK_INOK_ACTIVE_HIGH;
+
+               /*
+                * VBUS won't be powered if INOK is active, so we need to
+                * manually disable INOK on some platforms.
+                */
+               ret = regmap_update_bits(smb->regmap, CFG_SYSOK,
+                                        CFG_SYSOK_INOK_ACTIVE_HIGH, sysok);
+               if (ret < 0) {
+                       dev_err(smb->dev, "failed to disable INOK\n");
+                       goto done;
+               }
+       }
+
+       ret = smb347_usb_vbus_get_current_limit(rdev);
+       if (ret < 0) {
+               dev_err(smb->dev, "failed to get USB VBUS current limit\n");
+               goto done;
+       }
+
+       max_uA = ret;
+
+       ret = smb347_usb_vbus_set_new_current_limit(smb, 250000);
+       if (ret < 0) {
+               dev_err(smb->dev, "failed to preset USB VBUS current limit\n");
+               goto done;
+       }
+
+       ret = regmap_set_bits(smb->regmap, CMD_A, CMD_A_OTG_ENABLED);
+       if (ret < 0) {
+               dev_err(smb->dev, "failed to enable USB VBUS\n");
+               goto done;
+       }
+
+       smb->usb_vbus_enabled = true;
+
+       ret = smb347_usb_vbus_set_new_current_limit(smb, max_uA);
+       if (ret < 0) {
+               dev_err(smb->dev, "failed to restore USB VBUS current limit\n");
+               goto done;
+       }
+done:
+       smb347_set_writable(smb, false, true);
+
+       return ret;
+}
+
+static int smb347_usb_vbus_regulator_disable(struct regulator_dev *rdev)
+{
+       struct smb347_charger *smb = rdev_get_drvdata(rdev);
+       int ret;
+
+       ret = smb347_set_writable(smb, true, true);
+       if (ret < 0)
+               return ret;
+
+       ret = regmap_clear_bits(smb->regmap, CMD_A, CMD_A_OTG_ENABLED);
+       if (ret < 0) {
+               dev_err(smb->dev, "failed to disable USB VBUS\n");
+               goto done;
+       }
+
+       smb->usb_vbus_enabled = false;
+
+       if (device_property_read_bool(&rdev->dev, "summit,needs-inok-toggle")) {
+               unsigned int sysok = 0;
+
+               if (smb->inok_polarity == SMB3XX_SYSOK_INOK_ACTIVE_HIGH)
+                       sysok = CFG_SYSOK_INOK_ACTIVE_HIGH;
+
+               ret = regmap_update_bits(smb->regmap, CFG_SYSOK,
+                                        CFG_SYSOK_INOK_ACTIVE_HIGH, sysok);
+               if (ret < 0) {
+                       dev_err(smb->dev, "failed to enable INOK\n");
+                       goto done;
+               }
+       }
+
+       smb347_start_stop_charging(smb);
+done:
+       smb347_set_writable(smb, false, true);
+
+       return ret;
+}
+
 static const struct regmap_config smb347_regmap = {
        .reg_bits       = 8,
        .val_bits       = 8,
        .max_register   = SMB347_MAX_REGISTER,
        .volatile_reg   = smb347_volatile_reg,
        .readable_reg   = smb347_readable_reg,
+       .cache_type     = REGCACHE_FLAT,
+       .num_reg_defaults_raw = SMB347_MAX_REGISTER,
+};
+
+static const struct regulator_ops smb347_usb_vbus_regulator_ops = {
+       .is_enabled     = regulator_is_enabled_regmap,
+       .enable         = smb347_usb_vbus_regulator_enable,
+       .disable        = smb347_usb_vbus_regulator_disable,
+       .get_current_limit = smb347_usb_vbus_get_current_limit,
+       .set_current_limit = smb347_usb_vbus_set_current_limit,
 };
 
 static const struct power_supply_desc smb347_mains_desc = {
@@ -1316,10 +1516,24 @@ static const struct power_supply_desc smb347_usb_desc = {
        .num_properties = ARRAY_SIZE(smb347_properties),
 };
 
+static const struct regulator_desc smb347_usb_vbus_regulator_desc = {
+       .name           = "smb347-usb-vbus",
+       .of_match       = of_match_ptr("usb-vbus"),
+       .ops            = &smb347_usb_vbus_regulator_ops,
+       .type           = REGULATOR_VOLTAGE,
+       .owner          = THIS_MODULE,
+       .enable_reg     = CMD_A,
+       .enable_mask    = CMD_A_OTG_ENABLED,
+       .enable_val     = CMD_A_OTG_ENABLED,
+       .fixed_uV       = 5000000,
+       .n_voltages     = 1,
+};
+
 static int smb347_probe(struct i2c_client *client,
                        const struct i2c_device_id *id)
 {
        struct power_supply_config mains_usb_cfg = {};
+       struct regulator_config usb_rdev_cfg = {};
        struct device *dev = &client->dev;
        struct smb347_charger *smb;
        int ret;
@@ -1367,6 +1581,18 @@ static int smb347_probe(struct i2c_client *client,
        if (ret)
                return ret;
 
+       usb_rdev_cfg.dev = dev;
+       usb_rdev_cfg.driver_data = smb;
+       usb_rdev_cfg.regmap = smb->regmap;
+
+       smb->usb_rdev = devm_regulator_register(dev,
+                                               &smb347_usb_vbus_regulator_desc,
+                                               &usb_rdev_cfg);
+       if (IS_ERR(smb->usb_rdev)) {
+               smb347_irq_disable(smb);
+               return PTR_ERR(smb->usb_rdev);
+       }
+
        return 0;
 }
 
@@ -1374,11 +1600,17 @@ static int smb347_remove(struct i2c_client *client)
 {
        struct smb347_charger *smb = i2c_get_clientdata(client);
 
+       smb347_usb_vbus_regulator_disable(smb->usb_rdev);
        smb347_irq_disable(smb);
 
        return 0;
 }
 
+static void smb347_shutdown(struct i2c_client *client)
+{
+       smb347_remove(client);
+}
+
 static const struct i2c_device_id smb347_id[] = {
        { "smb345", SMB345 },
        { "smb347", SMB347 },
@@ -1402,6 +1634,7 @@ static struct i2c_driver smb347_driver = {
        },
        .probe = smb347_probe,
        .remove = smb347_remove,
+       .shutdown = smb347_shutdown,
        .id_table = smb347_id,
 };
 module_i2c_driver(smb347_driver);
index 8c20e524e9ad4d7dfdb0e977c76a66b5264c844e..e085c255da0c1a0eaaea3f197087194966971ec4 100644 (file)
@@ -90,7 +90,8 @@ config PTP_1588_CLOCK_INES
 config PTP_1588_CLOCK_PCH
        tristate "Intel PCH EG20T as PTP clock"
        depends on X86_32 || COMPILE_TEST
-       depends on HAS_IOMEM && NET
+       depends on HAS_IOMEM && PCI
+       depends on NET
        imply PTP_1588_CLOCK
        help
          This driver adds support for using the PCH EG20T as a PTP
index b3d96b74729256852840fb2ae455b68f254f6e4b..41b92dc2f011a3541c59e5a8248cf6f05eae4aa8 100644 (file)
@@ -154,7 +154,7 @@ static int unregister_vclock(struct device *dev, void *data)
        struct ptp_clock *ptp = dev_get_drvdata(dev);
        struct ptp_clock_info *info = ptp->info;
        struct ptp_vclock *vclock;
-       u8 *num = data;
+       u32 *num = data;
 
        vclock = info_to_vclock(info);
        dev_info(dev->parent, "delete virtual clock ptp%d\n",
index 24ce9a17ab4fe4710dc9007ff3642b8587010a09..4fd13b06231f20c4f1b2ced8d4e0a0966bfbacb0 100644 (file)
@@ -1044,7 +1044,7 @@ config REGULATOR_RT6160
        help
          This adds support for voltage regulator in Richtek RT6160.
          This device automatically change voltage output mode from
-         Buck or Boost. The mode transistion depend on the input source voltage.
+         Buck or Boost. The mode transition depend on the input source voltage.
          The wide output range is from 2025mV to 5200mV and can be used on most
          common application scenario.
 
@@ -1053,10 +1053,21 @@ config REGULATOR_RT6245
        depends on I2C
        select REGMAP_I2C
        help
-         This adds supprot for Richtek RT6245 voltage regulator.
+         This adds support for Richtek RT6245 voltage regulator.
          It can support up to 14A output current and adjustable output voltage
          from 0.4375V to 1.3875V, per step 12.5mV.
 
+config REGULATOR_RTQ2134
+       tristate "Richtek RTQ2134 SubPMIC Regulator"
+       depends on I2C
+       select REGMAP_I2C
+       help
+         This driver adds support for RTQ2134 SubPMIC regulators.
+         The RTQ2134 is a multi-phase, programmable power management IC that
+         integrate with four high efficient, synchronous step-down converter
+         cores. It features wide output voltage range and the capability to
+         configure the corresponding power stages.
+
 config REGULATOR_RTMV20
        tristate "Richtek RTMV20 Laser Diode Regulator"
        depends on I2C
@@ -1066,6 +1077,15 @@ config REGULATOR_RTMV20
          the Richtek RTMV20. It can support the load current up to 6A and
          integrate strobe/vsync/fsin signal to synchronize the IR camera.
 
+config REGULATOR_RTQ6752
+       tristate "Richtek RTQ6752 TFT LCD voltage regulator"
+       depends on I2C
+       select REGMAP_I2C
+       help
+         This driver adds support for Richtek RTQ6752. RTQ6752 includes two
+         synchronous boost converters for PAVDD, and one synchronous NAVDD
+         buck-boost. This device is suitable for automotive TFT-LCD panel.
+
 config REGULATOR_S2MPA01
        tristate "Samsung S2MPA01 voltage regulator"
        depends on MFD_SEC_CORE || COMPILE_TEST
index 8c2f82206b94c019ce5d30787bcd35c15b38c236..9e382b50a5ef0c4267dac01884dccac9ecfdea37 100644 (file)
@@ -128,6 +128,8 @@ obj-$(CONFIG_REGULATOR_RT5033)      += rt5033-regulator.o
 obj-$(CONFIG_REGULATOR_RT6160) += rt6160-regulator.o
 obj-$(CONFIG_REGULATOR_RT6245) += rt6245-regulator.o
 obj-$(CONFIG_REGULATOR_RTMV20) += rtmv20-regulator.o
+obj-$(CONFIG_REGULATOR_RTQ2134) += rtq2134-regulator.o
+obj-$(CONFIG_REGULATOR_RTQ6752)        += rtq6752-regulator.o
 obj-$(CONFIG_REGULATOR_S2MPA01) += s2mpa01.o
 obj-$(CONFIG_REGULATOR_S2MPS11) += s2mps11.o
 obj-$(CONFIG_REGULATOR_S5M8767) += s5m8767.o
index b1eb46961993040ca2032860e291b20fdd50bee3..d60fccedb250d4bb3d9fe092d034b5f20c1942e0 100644 (file)
@@ -55,7 +55,8 @@
 #define BD718XX_HWOPNAME(swopname) swopname##_hwcontrol
 
 #define BD718XX_OPS(name, _list_voltage, _map_voltage, _set_voltage_sel, \
-                  _get_voltage_sel, _set_voltage_time_sel, _set_ramp_delay) \
+                  _get_voltage_sel, _set_voltage_time_sel, _set_ramp_delay, \
+                  _set_uvp, _set_ovp)                          \
 static const struct regulator_ops name = {                     \
        .enable = regulator_enable_regmap,                      \
        .disable = regulator_disable_regmap,                    \
@@ -66,6 +67,8 @@ static const struct regulator_ops name = {                    \
        .get_voltage_sel = (_get_voltage_sel),                  \
        .set_voltage_time_sel = (_set_voltage_time_sel),        \
        .set_ramp_delay = (_set_ramp_delay),                    \
+       .set_under_voltage_protection = (_set_uvp),             \
+       .set_over_voltage_protection = (_set_ovp),              \
 };                                                             \
                                                                \
 static const struct regulator_ops BD718XX_HWOPNAME(name) = {   \
@@ -76,6 +79,8 @@ static const struct regulator_ops BD718XX_HWOPNAME(name) = {  \
        .get_voltage_sel = (_get_voltage_sel),                  \
        .set_voltage_time_sel = (_set_voltage_time_sel),        \
        .set_ramp_delay = (_set_ramp_delay),                    \
+       .set_under_voltage_protection = (_set_uvp),             \
+       .set_over_voltage_protection = (_set_ovp),              \
 }                                                              \
 
 /*
@@ -154,17 +159,9 @@ static void voltage_change_done(struct regulator_dev *rdev, unsigned int sel,
                 * exceed it due to the scheduling.
                 */
                msleep(1);
-               /*
-                * Note for next hacker. The PWRGOOD should not be masked on
-                * BD71847 so we will just unconditionally enable detection
-                * when voltage is set.
-                * If someone want's to disable PWRGOOD he must implement
-                * caching and restoring the old value here. I am not
-                * aware of such use-cases so for the sake of the simplicity
-                * we just always enable PWRGOOD here.
-                */
-               ret = regmap_update_bits(rdev->regmap, BD718XX_REG_MVRFLTMASK2,
-                                        *mask, 0);
+
+               ret = regmap_clear_bits(rdev->regmap, BD718XX_REG_MVRFLTMASK2,
+                                        *mask);
                if (ret)
                        dev_err(&rdev->dev,
                                "Failed to re-enable voltage monitoring (%d)\n",
@@ -208,12 +205,27 @@ static int voltage_change_prepare(struct regulator_dev *rdev, unsigned int sel,
                 * time configurable.
                 */
                if (new > now) {
+                       int tmp;
+                       int prot_bit;
                        int ldo_offset = rdev->desc->id - BD718XX_LDO1;
 
-                       *mask = BD718XX_LDO1_VRMON80 << ldo_offset;
-                       ret = regmap_update_bits(rdev->regmap,
-                                                BD718XX_REG_MVRFLTMASK2,
-                                                *mask, *mask);
+                       prot_bit = BD718XX_LDO1_VRMON80 << ldo_offset;
+                       ret = regmap_read(rdev->regmap, BD718XX_REG_MVRFLTMASK2,
+                                         &tmp);
+                       if (ret) {
+                               dev_err(&rdev->dev,
+                                       "Failed to read voltage monitoring state\n");
+                               return ret;
+                       }
+
+                       if (!(tmp & prot_bit)) {
+                               /* We disable protection if it was enabled... */
+                               ret = regmap_set_bits(rdev->regmap,
+                                                     BD718XX_REG_MVRFLTMASK2,
+                                                     prot_bit);
+                               /* ...and we also want to re-enable it */
+                               *mask = prot_bit;
+                       }
                        if (ret) {
                                dev_err(&rdev->dev,
                                        "Failed to stop voltage monitoring\n");
@@ -266,99 +278,6 @@ static int bd71837_set_voltage_sel_pickable_restricted(
        return regulator_set_voltage_sel_pickable_regmap(rdev, sel);
 }
 
-/*
- * OPS common for BD71847 and BD71850
- */
-BD718XX_OPS(bd718xx_pickable_range_ldo_ops,
-           regulator_list_voltage_pickable_linear_range, NULL,
-           bd718xx_set_voltage_sel_pickable_restricted,
-           regulator_get_voltage_sel_pickable_regmap, NULL, NULL);
-
-/* BD71847 and BD71850 LDO 5 is by default OFF at RUN state */
-static const struct regulator_ops bd718xx_ldo5_ops_hwstate = {
-       .is_enabled = never_enabled_by_hwstate,
-       .list_voltage = regulator_list_voltage_pickable_linear_range,
-       .set_voltage_sel = bd718xx_set_voltage_sel_pickable_restricted,
-       .get_voltage_sel = regulator_get_voltage_sel_pickable_regmap,
-};
-
-BD718XX_OPS(bd718xx_pickable_range_buck_ops,
-           regulator_list_voltage_pickable_linear_range, NULL,
-           regulator_set_voltage_sel_pickable_regmap,
-           regulator_get_voltage_sel_pickable_regmap,
-           regulator_set_voltage_time_sel, NULL);
-
-BD718XX_OPS(bd718xx_ldo_regulator_ops, regulator_list_voltage_linear_range,
-           NULL, bd718xx_set_voltage_sel_restricted,
-           regulator_get_voltage_sel_regmap, NULL, NULL);
-
-BD718XX_OPS(bd718xx_ldo_regulator_nolinear_ops, regulator_list_voltage_table,
-           NULL, bd718xx_set_voltage_sel_restricted,
-           regulator_get_voltage_sel_regmap, NULL, NULL);
-
-BD718XX_OPS(bd718xx_buck_regulator_ops, regulator_list_voltage_linear_range,
-           NULL, regulator_set_voltage_sel_regmap,
-           regulator_get_voltage_sel_regmap, regulator_set_voltage_time_sel,
-           NULL);
-
-BD718XX_OPS(bd718xx_buck_regulator_nolinear_ops, regulator_list_voltage_table,
-           regulator_map_voltage_ascend, regulator_set_voltage_sel_regmap,
-           regulator_get_voltage_sel_regmap, regulator_set_voltage_time_sel,
-           NULL);
-
-/*
- * OPS for BD71837
- */
-BD718XX_OPS(bd71837_pickable_range_ldo_ops,
-           regulator_list_voltage_pickable_linear_range, NULL,
-           bd71837_set_voltage_sel_pickable_restricted,
-           regulator_get_voltage_sel_pickable_regmap, NULL, NULL);
-
-BD718XX_OPS(bd71837_pickable_range_buck_ops,
-           regulator_list_voltage_pickable_linear_range, NULL,
-           bd71837_set_voltage_sel_pickable_restricted,
-           regulator_get_voltage_sel_pickable_regmap,
-           regulator_set_voltage_time_sel, NULL);
-
-BD718XX_OPS(bd71837_ldo_regulator_ops, regulator_list_voltage_linear_range,
-           NULL, bd71837_set_voltage_sel_restricted,
-           regulator_get_voltage_sel_regmap, NULL, NULL);
-
-BD718XX_OPS(bd71837_ldo_regulator_nolinear_ops, regulator_list_voltage_table,
-           NULL, bd71837_set_voltage_sel_restricted,
-           regulator_get_voltage_sel_regmap, NULL, NULL);
-
-BD718XX_OPS(bd71837_buck_regulator_ops, regulator_list_voltage_linear_range,
-           NULL, bd71837_set_voltage_sel_restricted,
-           regulator_get_voltage_sel_regmap, regulator_set_voltage_time_sel,
-           NULL);
-
-BD718XX_OPS(bd71837_buck_regulator_nolinear_ops, regulator_list_voltage_table,
-           regulator_map_voltage_ascend, bd71837_set_voltage_sel_restricted,
-           regulator_get_voltage_sel_regmap, regulator_set_voltage_time_sel,
-           NULL);
-/*
- * BD71837 bucks 3 and 4 support defining their enable/disable state also
- * when buck enable state is under HW state machine control. In that case the
- * bit [2] in CTRL register is used to indicate if regulator should be ON.
- */
-static const struct regulator_ops bd71837_buck34_ops_hwctrl = {
-       .is_enabled = bd71837_get_buck34_enable_hwctrl,
-       .list_voltage = regulator_list_voltage_linear_range,
-       .set_voltage_sel = regulator_set_voltage_sel_regmap,
-       .get_voltage_sel = regulator_get_voltage_sel_regmap,
-       .set_voltage_time_sel = regulator_set_voltage_time_sel,
-       .set_ramp_delay = regulator_set_ramp_delay_regmap,
-};
-
-/*
- * OPS for all of the ICs - BD718(37/47/50)
- */
-BD718XX_OPS(bd718xx_dvs_buck_regulator_ops, regulator_list_voltage_linear_range,
-           NULL, regulator_set_voltage_sel_regmap,
-           regulator_get_voltage_sel_regmap, regulator_set_voltage_time_sel,
-           /* bd718xx_buck1234_set_ramp_delay */ regulator_set_ramp_delay_regmap);
-
 /*
  * BD71837 BUCK1/2/3/4
  * BD71847 BUCK1/2
@@ -536,6 +455,238 @@ struct bd718xx_regulator_data {
        int additional_init_amnt;
 };
 
+static int bd718x7_xvp_sanity_check(struct regulator_dev *rdev, int lim_uV,
+                                   int severity)
+{
+       /*
+        * BD71837/47/50 ... (ICs supported by this driver) do not provide
+        * warnings, only protection
+        */
+       if (severity != REGULATOR_SEVERITY_PROT) {
+               dev_err(&rdev->dev,
+                       "Unsupported Under Voltage protection level\n");
+               return -EINVAL;
+       }
+
+       /*
+        * And protection limit is not changeable. It can only be enabled
+        * or disabled
+        */
+       if (lim_uV)
+               return -EINVAL;
+
+       return 0;
+}
+
+static int bd718x7_set_ldo_uvp(struct regulator_dev *rdev, int lim_uV,
+                              int severity, bool enable)
+{
+       int ldo_offset = rdev->desc->id - BD718XX_LDO1;
+       int prot_bit, ret;
+
+       ret = bd718x7_xvp_sanity_check(rdev, lim_uV, severity);
+       if (ret)
+               return ret;
+
+       prot_bit = BD718XX_LDO1_VRMON80 << ldo_offset;
+
+       if (enable)
+               return regmap_clear_bits(rdev->regmap, BD718XX_REG_MVRFLTMASK2,
+                                        prot_bit);
+
+       return regmap_set_bits(rdev->regmap, BD718XX_REG_MVRFLTMASK2,
+                              prot_bit);
+}
+
+static int bd718x7_get_buck_prot_reg(int id, int *reg)
+{
+
+       if (id > BD718XX_BUCK8) {
+               WARN_ON(id > BD718XX_BUCK8);
+               return -EINVAL;
+       }
+
+       if (id > BD718XX_BUCK4)
+               *reg = BD718XX_REG_MVRFLTMASK0;
+       else
+               *reg = BD718XX_REG_MVRFLTMASK1;
+
+       return 0;
+}
+
+static int bd718x7_get_buck_ovp_info(int id, int *reg, int *bit)
+{
+       int ret;
+
+       ret = bd718x7_get_buck_prot_reg(id, reg);
+       if (ret)
+               return ret;
+
+       *bit = BIT((id % 4) * 2 + 1);
+
+       return 0;
+}
+
+static int bd718x7_get_buck_uvp_info(int id, int *reg, int *bit)
+{
+       int ret;
+
+       ret = bd718x7_get_buck_prot_reg(id, reg);
+       if (ret)
+               return ret;
+
+       *bit = BIT((id % 4) * 2);
+
+       return 0;
+}
+
+static int bd718x7_set_buck_uvp(struct regulator_dev *rdev, int lim_uV,
+                               int severity, bool enable)
+{
+       int bit, reg, ret;
+
+       ret = bd718x7_xvp_sanity_check(rdev, lim_uV, severity);
+       if (ret)
+               return ret;
+
+       ret = bd718x7_get_buck_uvp_info(rdev->desc->id, &reg, &bit);
+       if (ret)
+               return ret;
+
+       if (enable)
+               return regmap_clear_bits(rdev->regmap, reg, bit);
+
+       return regmap_set_bits(rdev->regmap, reg, bit);
+
+}
+
+static int bd718x7_set_buck_ovp(struct regulator_dev *rdev, int lim_uV,
+                               int severity,
+                               bool enable)
+{
+       int bit, reg, ret;
+
+       ret = bd718x7_xvp_sanity_check(rdev, lim_uV, severity);
+       if (ret)
+               return ret;
+
+       ret = bd718x7_get_buck_ovp_info(rdev->desc->id, &reg, &bit);
+       if (ret)
+               return ret;
+
+       if (enable)
+               return regmap_clear_bits(rdev->regmap, reg, bit);
+
+       return regmap_set_bits(rdev->regmap, reg, bit);
+}
+
+/*
+ * OPS common for BD71847 and BD71850
+ */
+BD718XX_OPS(bd718xx_pickable_range_ldo_ops,
+           regulator_list_voltage_pickable_linear_range, NULL,
+           bd718xx_set_voltage_sel_pickable_restricted,
+           regulator_get_voltage_sel_pickable_regmap, NULL, NULL,
+           bd718x7_set_ldo_uvp, NULL);
+
+/* BD71847 and BD71850 LDO 5 is by default OFF at RUN state */
+static const struct regulator_ops bd718xx_ldo5_ops_hwstate = {
+       .is_enabled = never_enabled_by_hwstate,
+       .list_voltage = regulator_list_voltage_pickable_linear_range,
+       .set_voltage_sel = bd718xx_set_voltage_sel_pickable_restricted,
+       .get_voltage_sel = regulator_get_voltage_sel_pickable_regmap,
+       .set_under_voltage_protection = bd718x7_set_ldo_uvp,
+};
+
+BD718XX_OPS(bd718xx_pickable_range_buck_ops,
+           regulator_list_voltage_pickable_linear_range, NULL,
+           regulator_set_voltage_sel_pickable_regmap,
+           regulator_get_voltage_sel_pickable_regmap,
+           regulator_set_voltage_time_sel, NULL, bd718x7_set_buck_uvp,
+           bd718x7_set_buck_ovp);
+
+BD718XX_OPS(bd718xx_ldo_regulator_ops, regulator_list_voltage_linear_range,
+           NULL, bd718xx_set_voltage_sel_restricted,
+           regulator_get_voltage_sel_regmap, NULL, NULL, bd718x7_set_ldo_uvp,
+           NULL);
+
+BD718XX_OPS(bd718xx_ldo_regulator_nolinear_ops, regulator_list_voltage_table,
+           NULL, bd718xx_set_voltage_sel_restricted,
+           regulator_get_voltage_sel_regmap, NULL, NULL, bd718x7_set_ldo_uvp,
+           NULL);
+
+BD718XX_OPS(bd718xx_buck_regulator_ops, regulator_list_voltage_linear_range,
+           NULL, regulator_set_voltage_sel_regmap,
+           regulator_get_voltage_sel_regmap, regulator_set_voltage_time_sel,
+           NULL, bd718x7_set_buck_uvp, bd718x7_set_buck_ovp);
+
+BD718XX_OPS(bd718xx_buck_regulator_nolinear_ops, regulator_list_voltage_table,
+           regulator_map_voltage_ascend, regulator_set_voltage_sel_regmap,
+           regulator_get_voltage_sel_regmap, regulator_set_voltage_time_sel,
+           NULL, bd718x7_set_buck_uvp, bd718x7_set_buck_ovp);
+
+/*
+ * OPS for BD71837
+ */
+BD718XX_OPS(bd71837_pickable_range_ldo_ops,
+           regulator_list_voltage_pickable_linear_range, NULL,
+           bd71837_set_voltage_sel_pickable_restricted,
+           regulator_get_voltage_sel_pickable_regmap, NULL, NULL,
+           bd718x7_set_ldo_uvp, NULL);
+
+BD718XX_OPS(bd71837_pickable_range_buck_ops,
+           regulator_list_voltage_pickable_linear_range, NULL,
+           bd71837_set_voltage_sel_pickable_restricted,
+           regulator_get_voltage_sel_pickable_regmap,
+           regulator_set_voltage_time_sel, NULL, bd718x7_set_buck_uvp,
+           bd718x7_set_buck_ovp);
+
+BD718XX_OPS(bd71837_ldo_regulator_ops, regulator_list_voltage_linear_range,
+           NULL, bd71837_set_voltage_sel_restricted,
+           regulator_get_voltage_sel_regmap, NULL, NULL, bd718x7_set_ldo_uvp,
+           NULL);
+
+BD718XX_OPS(bd71837_ldo_regulator_nolinear_ops, regulator_list_voltage_table,
+           NULL, bd71837_set_voltage_sel_restricted,
+           regulator_get_voltage_sel_regmap, NULL, NULL, bd718x7_set_ldo_uvp,
+           NULL);
+
+BD718XX_OPS(bd71837_buck_regulator_ops, regulator_list_voltage_linear_range,
+           NULL, bd71837_set_voltage_sel_restricted,
+           regulator_get_voltage_sel_regmap, regulator_set_voltage_time_sel,
+           NULL, bd718x7_set_buck_uvp, bd718x7_set_buck_ovp);
+
+BD718XX_OPS(bd71837_buck_regulator_nolinear_ops, regulator_list_voltage_table,
+           regulator_map_voltage_ascend, bd71837_set_voltage_sel_restricted,
+           regulator_get_voltage_sel_regmap, regulator_set_voltage_time_sel,
+           NULL, bd718x7_set_buck_uvp, bd718x7_set_buck_ovp);
+/*
+ * BD71837 bucks 3 and 4 support defining their enable/disable state also
+ * when buck enable state is under HW state machine control. In that case the
+ * bit [2] in CTRL register is used to indicate if regulator should be ON.
+ */
+static const struct regulator_ops bd71837_buck34_ops_hwctrl = {
+       .is_enabled = bd71837_get_buck34_enable_hwctrl,
+       .list_voltage = regulator_list_voltage_linear_range,
+       .set_voltage_sel = regulator_set_voltage_sel_regmap,
+       .get_voltage_sel = regulator_get_voltage_sel_regmap,
+       .set_voltage_time_sel = regulator_set_voltage_time_sel,
+       .set_ramp_delay = regulator_set_ramp_delay_regmap,
+       .set_under_voltage_protection = bd718x7_set_buck_uvp,
+       .set_over_voltage_protection = bd718x7_set_buck_ovp,
+};
+
+/*
+ * OPS for all of the ICs - BD718(37/47/50)
+ */
+BD718XX_OPS(bd718xx_dvs_buck_regulator_ops, regulator_list_voltage_linear_range,
+           NULL, regulator_set_voltage_sel_regmap,
+           regulator_get_voltage_sel_regmap, regulator_set_voltage_time_sel,
+           regulator_set_ramp_delay_regmap, bd718x7_set_buck_uvp,
+           bd718x7_set_buck_ovp);
+
+
+
 /*
  * There is a HW quirk in BD71837. The shutdown sequence timings for
  * bucks/LDOs which are controlled via register interface are changed.
index cf7d5341750e19a4c0b14c939fd33161672bbfc4..82f52a2a031ac597e1ac48a48e1182a3647ee527 100644 (file)
@@ -412,6 +412,134 @@ static int da9063_ldo_set_suspend_mode(struct regulator_dev *rdev,
        return regmap_field_write(regl->suspend_sleep, val);
 }
 
+static unsigned int da9063_get_overdrive_mask(const struct regulator_desc *desc)
+{
+       switch (desc->id) {
+       case DA9063_ID_BCORES_MERGED:
+       case DA9063_ID_BCORE1:
+               return DA9063_BCORE1_OD;
+       case DA9063_ID_BCORE2:
+               return DA9063_BCORE2_OD;
+       case DA9063_ID_BPRO:
+               return DA9063_BPRO_OD;
+       default:
+               return 0;
+       }
+}
+
+static int da9063_buck_set_limit_set_overdrive(struct regulator_dev *rdev,
+                                              int min_uA, int max_uA,
+                                              unsigned int overdrive_mask)
+{
+       /*
+        * When enabling overdrive, do it before changing the current limit to
+        * ensure sufficient supply throughout the switch.
+        */
+       struct da9063_regulator *regl = rdev_get_drvdata(rdev);
+       int ret;
+       unsigned int orig_overdrive;
+
+       ret = regmap_read(regl->hw->regmap, DA9063_REG_CONFIG_H,
+                         &orig_overdrive);
+       if (ret < 0)
+               return ret;
+       orig_overdrive &= overdrive_mask;
+
+       if (orig_overdrive == 0) {
+               ret = regmap_set_bits(regl->hw->regmap, DA9063_REG_CONFIG_H,
+                               overdrive_mask);
+               if (ret < 0)
+                       return ret;
+       }
+
+       ret = regulator_set_current_limit_regmap(rdev, min_uA / 2, max_uA / 2);
+       if (ret < 0 && orig_overdrive == 0)
+               /*
+                * regulator_set_current_limit_regmap may have rejected the
+                * change because of unusable min_uA and/or max_uA inputs.
+                * Attempt to restore original overdrive state, ignore failure-
+                * on-failure.
+                */
+               regmap_clear_bits(regl->hw->regmap, DA9063_REG_CONFIG_H,
+                                 overdrive_mask);
+
+       return ret;
+}
+
+static int da9063_buck_set_limit_clear_overdrive(struct regulator_dev *rdev,
+                                                int min_uA, int max_uA,
+                                                unsigned int overdrive_mask)
+{
+       /*
+        * When disabling overdrive, do it after changing the current limit to
+        * ensure sufficient supply throughout the switch.
+        */
+       struct da9063_regulator *regl = rdev_get_drvdata(rdev);
+       int ret, orig_limit;
+
+       ret = regmap_read(rdev->regmap, rdev->desc->csel_reg, &orig_limit);
+       if (ret < 0)
+               return ret;
+
+       ret = regulator_set_current_limit_regmap(rdev, min_uA, max_uA);
+       if (ret < 0)
+               return ret;
+
+       ret = regmap_clear_bits(regl->hw->regmap, DA9063_REG_CONFIG_H,
+                               overdrive_mask);
+       if (ret < 0)
+               /*
+                * Attempt to restore original current limit, ignore failure-
+                * on-failure.
+                */
+               regmap_write(rdev->regmap, rdev->desc->csel_reg, orig_limit);
+
+       return ret;
+}
+
+static int da9063_buck_set_current_limit(struct regulator_dev *rdev,
+                                        int min_uA, int max_uA)
+{
+       unsigned int overdrive_mask, n_currents;
+
+       overdrive_mask = da9063_get_overdrive_mask(rdev->desc);
+       if (overdrive_mask) {
+               n_currents = rdev->desc->n_current_limits;
+               if (n_currents == 0)
+                       return -EINVAL;
+
+               if (max_uA > rdev->desc->curr_table[n_currents - 1])
+                       return da9063_buck_set_limit_set_overdrive(rdev, min_uA,
+                                                                  max_uA,
+                                                                  overdrive_mask);
+
+               return da9063_buck_set_limit_clear_overdrive(rdev, min_uA,
+                                                            max_uA,
+                                                            overdrive_mask);
+       }
+       return regulator_set_current_limit_regmap(rdev, min_uA, max_uA);
+}
+
+static int da9063_buck_get_current_limit(struct regulator_dev *rdev)
+{
+       struct da9063_regulator *regl = rdev_get_drvdata(rdev);
+       int val, ret, limit;
+       unsigned int mask;
+
+       limit = regulator_get_current_limit_regmap(rdev);
+       if (limit < 0)
+               return limit;
+       mask = da9063_get_overdrive_mask(rdev->desc);
+       if (mask) {
+               ret = regmap_read(regl->hw->regmap, DA9063_REG_CONFIG_H, &val);
+               if (ret < 0)
+                       return ret;
+               if (val & mask)
+                       limit *= 2;
+       }
+       return limit;
+}
+
 static const struct regulator_ops da9063_buck_ops = {
        .enable                 = regulator_enable_regmap,
        .disable                = regulator_disable_regmap,
@@ -419,8 +547,8 @@ static const struct regulator_ops da9063_buck_ops = {
        .get_voltage_sel        = regulator_get_voltage_sel_regmap,
        .set_voltage_sel        = regulator_set_voltage_sel_regmap,
        .list_voltage           = regulator_list_voltage_linear,
-       .set_current_limit      = regulator_set_current_limit_regmap,
-       .get_current_limit      = regulator_get_current_limit_regmap,
+       .set_current_limit      = da9063_buck_set_current_limit,
+       .get_current_limit      = da9063_buck_get_current_limit,
        .set_mode               = da9063_buck_set_mode,
        .get_mode               = da9063_buck_get_mode,
        .get_status             = da9063_buck_get_status,
index 8b70bfe88019a0b85f3db6f4fe26c69bc04ecabc..a45c1e1ac7ef66180fc57ec6d37ae48063771604 100644 (file)
@@ -117,11 +117,11 @@ ux500_regulator_debug_init(struct platform_device *pdev,
        rdebug.dir = debugfs_create_dir("ux500-regulator", NULL);
 
        /* create "status" file */
-       debugfs_create_file("status", S_IRUGO, rdebug.dir, &pdev->dev,
+       debugfs_create_file("status", 0444, rdebug.dir, &pdev->dev,
                            &ux500_regulator_status_fops);
 
        /* create "power-state-count" file */
-       debugfs_create_file("power-state-count", S_IRUGO, rdebug.dir,
+       debugfs_create_file("power-state-count", 0444, rdebug.dir,
                            &pdev->dev, &ux500_regulator_power_state_cnt_fops);
 
        rdebug.regulator_array = regulator_info;
index a8de0aa88bad6c3ae581a09bb8aa2f4e38fe41e0..9113233f41cd11a38c7d6e308172f6b0407c28fb 100644 (file)
@@ -205,35 +205,6 @@ struct regulator_dev *devm_regulator_register(struct device *dev,
 }
 EXPORT_SYMBOL_GPL(devm_regulator_register);
 
-static int devm_rdev_match(struct device *dev, void *res, void *data)
-{
-       struct regulator_dev **r = res;
-       if (!r || !*r) {
-               WARN_ON(!r || !*r);
-               return 0;
-       }
-       return *r == data;
-}
-
-/**
- * devm_regulator_unregister - Resource managed regulator_unregister()
- * @dev:  device to supply
- * @rdev: regulator to free
- *
- * Unregister a regulator registered with devm_regulator_register().
- * Normally this function will not need to be called and the resource
- * management code will ensure that the resource is freed.
- */
-void devm_regulator_unregister(struct device *dev, struct regulator_dev *rdev)
-{
-       int rc;
-
-       rc = devres_release(dev, devm_rdev_release, devm_rdev_match, rdev);
-       if (rc != 0)
-               WARN_ON(rc);
-}
-EXPORT_SYMBOL_GPL(devm_regulator_unregister);
-
 struct regulator_supply_alias_match {
        struct device *dev;
        const char *id;
@@ -296,19 +267,8 @@ int devm_regulator_register_supply_alias(struct device *dev, const char *id,
 }
 EXPORT_SYMBOL_GPL(devm_regulator_register_supply_alias);
 
-/**
- * devm_regulator_unregister_supply_alias - Resource managed
- * regulator_unregister_supply_alias()
- *
- * @dev: device to supply
- * @id:  supply name or regulator ID
- *
- * Unregister an alias registered with
- * devm_regulator_register_supply_alias(). Normally this function
- * will not need to be called and the resource management code
- * will ensure that the resource is freed.
- */
-void devm_regulator_unregister_supply_alias(struct device *dev, const char *id)
+static void devm_regulator_unregister_supply_alias(struct device *dev,
+                                                  const char *id)
 {
        struct regulator_supply_alias_match match;
        int rc;
@@ -321,7 +281,6 @@ void devm_regulator_unregister_supply_alias(struct device *dev, const char *id)
        if (rc != 0)
                WARN_ON(rc);
 }
-EXPORT_SYMBOL_GPL(devm_regulator_unregister_supply_alias);
 
 /**
  * devm_regulator_bulk_register_supply_alias - Managed register
@@ -373,30 +332,6 @@ err:
 }
 EXPORT_SYMBOL_GPL(devm_regulator_bulk_register_supply_alias);
 
-/**
- * devm_regulator_bulk_unregister_supply_alias - Managed unregister
- * multiple aliases
- *
- * @dev:    device to supply
- * @id:     list of supply names or regulator IDs
- * @num_id: number of aliases to unregister
- *
- * Unregister aliases registered with
- * devm_regulator_bulk_register_supply_alias(). Normally this function
- * will not need to be called and the resource management code
- * will ensure that the resource is freed.
- */
-void devm_regulator_bulk_unregister_supply_alias(struct device *dev,
-                                                const char *const *id,
-                                                int num_id)
-{
-       int i;
-
-       for (i = 0; i < num_id; ++i)
-               devm_regulator_unregister_supply_alias(dev, id[i]);
-}
-EXPORT_SYMBOL_GPL(devm_regulator_bulk_unregister_supply_alias);
-
 struct regulator_notifier_match {
        struct regulator *regulator;
        struct notifier_block *nb;
index 39284610a53664c1960f32b27ac05540bfe801bb..599ad201dca758a64c97ed88622097859e306862 100644 (file)
@@ -287,8 +287,9 @@ static int reg_fixed_voltage_probe(struct platform_device *pdev)
        drvdata->dev = devm_regulator_register(&pdev->dev, &drvdata->desc,
                                               &cfg);
        if (IS_ERR(drvdata->dev)) {
-               ret = PTR_ERR(drvdata->dev);
-               dev_err(&pdev->dev, "Failed to register regulator: %d\n", ret);
+               ret = dev_err_probe(&pdev->dev, PTR_ERR(drvdata->dev),
+                                   "Failed to register regulator: %ld\n",
+                                   PTR_ERR(drvdata->dev));
                return ret;
        }
 
index 845bc3b4026dd2b4d4142102e26eb3cc3ca1c799..662d87ae61cb5254420e8673a8ec53f2a2fb14d7 100644 (file)
@@ -4,7 +4,7 @@
 //
 // Copyright (c) 2013 Linaro Ltd.
 // Copyright (c) 2011 HiSilicon Ltd.
-// Copyright (c) 2020-2021 Huawei Technologies Co., Ltd
+// Copyright (c) 2020-2021 Huawei Technologies Co., Ltd.
 //
 // Guodong Xu <guodong.xu@linaro.org>
 
@@ -27,34 +27,34 @@ struct hi6421_spmi_reg_info {
        u32                     eco_uA;
 };
 
-static const unsigned int ldo3_voltages[] = {
+static const unsigned int range_1v5_to_2v0[] = {
        1500000, 1550000, 1600000, 1650000,
        1700000, 1725000, 1750000, 1775000,
        1800000, 1825000, 1850000, 1875000,
        1900000, 1925000, 1950000, 2000000
 };
 
-static const unsigned int ldo4_voltages[] = {
+static const unsigned int range_1v725_to_1v9[] = {
        1725000, 1750000, 1775000, 1800000,
        1825000, 1850000, 1875000, 1900000
 };
 
-static const unsigned int ldo9_voltages[] = {
+static const unsigned int range_1v75_to_3v3[] = {
        1750000, 1800000, 1825000, 2800000,
        2850000, 2950000, 3000000, 3300000
 };
 
-static const unsigned int ldo15_voltages[] = {
+static const unsigned int range_1v8_to_3v0[] = {
        1800000, 1850000, 2400000, 2600000,
        2700000, 2850000, 2950000, 3000000
 };
 
-static const unsigned int ldo17_voltages[] = {
+static const unsigned int range_2v5_to_3v3[] = {
        2500000, 2600000, 2700000, 2800000,
        3000000, 3100000, 3200000, 3300000
 };
 
-static const unsigned int ldo34_voltages[] = {
+static const unsigned int range_2v6_to_3v3[] = {
        2600000, 2700000, 2800000, 2900000,
        3000000, 3100000, 3200000, 3300000
 };
@@ -73,14 +73,14 @@ static const unsigned int ldo34_voltages[] = {
  */
 #define HI6421V600_LDO(_id, vtable, ereg, emask, vreg,                        \
                       odelay, etime, ecomask, ecoamp)                         \
-       [HI6421V600_##_id] = {                                                 \
+       [hi6421v600_##_id] = {                                                 \
                .desc = {                                                      \
                        .name           = #_id,                                \
                        .of_match        = of_match_ptr(#_id),                 \
                        .regulators_node = of_match_ptr("regulators"),         \
                        .ops            = &hi6421_spmi_ldo_rops,               \
                        .type           = REGULATOR_VOLTAGE,                   \
-                       .id             = HI6421V600_##_id,                    \
+                       .id             = hi6421v600_##_id,                    \
                        .owner          = THIS_MODULE,                         \
                        .volt_table     = vtable,                              \
                        .n_voltages     = ARRAY_SIZE(vtable),                  \
@@ -185,46 +185,46 @@ static const struct regulator_ops hi6421_spmi_ldo_rops = {
 
 /* HI6421v600 regulators with known registers */
 enum hi6421_spmi_regulator_id {
-       HI6421V600_LDO3,
-       HI6421V600_LDO4,
-       HI6421V600_LDO9,
-       HI6421V600_LDO15,
-       HI6421V600_LDO16,
-       HI6421V600_LDO17,
-       HI6421V600_LDO33,
-       HI6421V600_LDO34,
+       hi6421v600_ldo3,
+       hi6421v600_ldo4,
+       hi6421v600_ldo9,
+       hi6421v600_ldo15,
+       hi6421v600_ldo16,
+       hi6421v600_ldo17,
+       hi6421v600_ldo33,
+       hi6421v600_ldo34,
 };
 
 static struct hi6421_spmi_reg_info regulator_info[] = {
-       HI6421V600_LDO(LDO3, ldo3_voltages,
+       HI6421V600_LDO(ldo3, range_1v5_to_2v0,
                       0x16, 0x01, 0x51,
                       20000, 120,
                       0, 0),
-       HI6421V600_LDO(LDO4, ldo4_voltages,
+       HI6421V600_LDO(ldo4, range_1v725_to_1v9,
                       0x17, 0x01, 0x52,
                       20000, 120,
                       0x10, 10000),
-       HI6421V600_LDO(LDO9, ldo9_voltages,
+       HI6421V600_LDO(ldo9, range_1v75_to_3v3,
                       0x1c, 0x01, 0x57,
                       20000, 360,
                       0x10, 10000),
-       HI6421V600_LDO(LDO15, ldo15_voltages,
+       HI6421V600_LDO(ldo15, range_1v8_to_3v0,
                       0x21, 0x01, 0x5c,
                       20000, 360,
                       0x10, 10000),
-       HI6421V600_LDO(LDO16, ldo15_voltages,
+       HI6421V600_LDO(ldo16, range_1v8_to_3v0,
                       0x22, 0x01, 0x5d,
                       20000, 360,
                       0x10, 10000),
-       HI6421V600_LDO(LDO17, ldo17_voltages,
+       HI6421V600_LDO(ldo17, range_2v5_to_3v3,
                       0x23, 0x01, 0x5e,
                       20000, 120,
                       0x10, 10000),
-       HI6421V600_LDO(LDO33, ldo17_voltages,
+       HI6421V600_LDO(ldo33, range_2v5_to_3v3,
                       0x32, 0x01, 0x6d,
                       20000, 120,
                       0, 0),
-       HI6421V600_LDO(LDO34, ldo34_voltages,
+       HI6421V600_LDO(ldo34, range_2v6_to_3v3,
                       0x33, 0x01, 0x6e,
                       20000, 120,
                       0, 0),
index fabe2e53093ee53129ddec4003ad2b0a45eda9fa..52276443557501442a7a3d1fc53ecd531513d5ce 100644 (file)
@@ -184,7 +184,7 @@ static irqreturn_t regulator_notifier_isr(int irq, void *data)
         * If retry_count exceeds the given safety limit we call IC specific die
         * handler which can try disabling regulator(s).
         *
-        * If no die handler is given we will just bug() as a last resort.
+        * If no die handler is given we will just power-off as a last resort.
         *
         * We could try disabling all associated rdevs - but we might shoot
         * ourselves in the head and leave the problematic regulator enabled. So
index 0d35be4e0e5ac9a9e1b86c820cf75bd2965ac029..eb8027813b99f037750eace868075b14c637bcb8 100644 (file)
@@ -28,18 +28,15 @@ struct mt6358_regulator_info {
        u32 qi;
        const u32 *index_table;
        unsigned int n_table;
-       u32 vsel_shift;
        u32 da_vsel_reg;
        u32 da_vsel_mask;
-       u32 da_vsel_shift;
        u32 modeset_reg;
        u32 modeset_mask;
-       u32 modeset_shift;
 };
 
 #define MT6358_BUCK(match, vreg, min, max, step,               \
        volt_ranges, vosel_mask, _da_vsel_reg, _da_vsel_mask,   \
-       _da_vsel_shift, _modeset_reg, _modeset_shift)           \
+       _modeset_reg, _modeset_shift)           \
 [MT6358_ID_##vreg] = { \
        .desc = {       \
                .name = #vreg,  \
@@ -61,15 +58,13 @@ struct mt6358_regulator_info {
        .qi = BIT(0),   \
        .da_vsel_reg = _da_vsel_reg,    \
        .da_vsel_mask = _da_vsel_mask,  \
-       .da_vsel_shift = _da_vsel_shift,        \
        .modeset_reg = _modeset_reg,    \
        .modeset_mask = BIT(_modeset_shift),    \
-       .modeset_shift = _modeset_shift \
 }
 
 #define MT6358_LDO(match, vreg, ldo_volt_table,        \
        ldo_index_table, enreg, enbit, vosel,   \
-       vosel_mask, vosel_shift)        \
+       vosel_mask)     \
 [MT6358_ID_##vreg] = { \
        .desc = {       \
                .name = #vreg,  \
@@ -89,12 +84,11 @@ struct mt6358_regulator_info {
        .qi = BIT(15),  \
        .index_table = ldo_index_table, \
        .n_table = ARRAY_SIZE(ldo_index_table), \
-       .vsel_shift = vosel_shift,      \
 }
 
 #define MT6358_LDO1(match, vreg, min, max, step,       \
        volt_ranges, _da_vsel_reg, _da_vsel_mask,       \
-       _da_vsel_shift, vosel, vosel_mask)      \
+       vosel, vosel_mask)      \
 [MT6358_ID_##vreg] = { \
        .desc = {       \
                .name = #vreg,  \
@@ -113,7 +107,6 @@ struct mt6358_regulator_info {
        },      \
        .da_vsel_reg = _da_vsel_reg,    \
        .da_vsel_mask = _da_vsel_mask,  \
-       .da_vsel_shift = _da_vsel_shift,        \
        .status_reg = MT6358_LDO_##vreg##_DBG1, \
        .qi = BIT(0),   \
 }
@@ -260,9 +253,9 @@ static int mt6358_set_voltage_sel(struct regulator_dev *rdev,
        pvol = info->index_table;
 
        idx = pvol[selector];
+       idx <<= ffs(info->desc.vsel_mask) - 1;
        ret = regmap_update_bits(rdev->regmap, info->desc.vsel_reg,
-                                info->desc.vsel_mask,
-                                idx << info->vsel_shift);
+                                info->desc.vsel_mask, idx);
 
        return ret;
 }
@@ -282,7 +275,8 @@ static int mt6358_get_voltage_sel(struct regulator_dev *rdev)
                return ret;
        }
 
-       selector = (selector & info->desc.vsel_mask) >> info->vsel_shift;
+       selector = (selector & info->desc.vsel_mask) >>
+                       (ffs(info->desc.vsel_mask) - 1);
        pvol = info->index_table;
        for (idx = 0; idx < info->desc.n_voltages; idx++) {
                if (pvol[idx] == selector)
@@ -305,7 +299,7 @@ static int mt6358_get_buck_voltage_sel(struct regulator_dev *rdev)
                return ret;
        }
 
-       ret = (regval >> info->da_vsel_shift) & info->da_vsel_mask;
+       ret = (regval & info->da_vsel_mask) >> (ffs(info->da_vsel_mask) - 1);
 
        return ret;
 }
@@ -342,11 +336,10 @@ static int mt6358_regulator_set_mode(struct regulator_dev *rdev,
                return -EINVAL;
        }
 
-       dev_dbg(&rdev->dev, "mt6358 buck set_mode %#x, %#x, %#x, %#x\n",
-               info->modeset_reg, info->modeset_mask,
-               info->modeset_shift, val);
+       dev_dbg(&rdev->dev, "mt6358 buck set_mode %#x, %#x, %#x\n",
+               info->modeset_reg, info->modeset_mask, val);
 
-       val <<= info->modeset_shift;
+       val <<= ffs(info->modeset_mask) - 1;
 
        return regmap_update_bits(rdev->regmap, info->modeset_reg,
                                  info->modeset_mask, val);
@@ -364,7 +357,7 @@ static unsigned int mt6358_regulator_get_mode(struct regulator_dev *rdev)
                return ret;
        }
 
-       switch ((regval & info->modeset_mask) >> info->modeset_shift) {
+       switch ((regval & info->modeset_mask) >> (ffs(info->modeset_mask) - 1)) {
        case MT6358_BUCK_MODE_AUTO:
                return REGULATOR_MODE_NORMAL;
        case MT6358_BUCK_MODE_FORCE_PWM:
@@ -412,30 +405,30 @@ static const struct regulator_ops mt6358_volt_fixed_ops = {
 static struct mt6358_regulator_info mt6358_regulators[] = {
        MT6358_BUCK("buck_vdram1", VDRAM1, 500000, 2087500, 12500,
                    buck_volt_range2, 0x7f, MT6358_BUCK_VDRAM1_DBG0, 0x7f,
-                   0, MT6358_VDRAM1_ANA_CON0, 8),
+                   MT6358_VDRAM1_ANA_CON0, 8),
        MT6358_BUCK("buck_vcore", VCORE, 500000, 1293750, 6250,
                    buck_volt_range1, 0x7f, MT6358_BUCK_VCORE_DBG0, 0x7f,
-                   0, MT6358_VCORE_VGPU_ANA_CON0, 1),
+                   MT6358_VCORE_VGPU_ANA_CON0, 1),
        MT6358_BUCK("buck_vpa", VPA, 500000, 3650000, 50000,
-                   buck_volt_range3, 0x3f, MT6358_BUCK_VPA_DBG0, 0x3f, 0,
+                   buck_volt_range3, 0x3f, MT6358_BUCK_VPA_DBG0, 0x3f,
                    MT6358_VPA_ANA_CON0, 3),
        MT6358_BUCK("buck_vproc11", VPROC11, 500000, 1293750, 6250,
                    buck_volt_range1, 0x7f, MT6358_BUCK_VPROC11_DBG0, 0x7f,
-                   0, MT6358_VPROC_ANA_CON0, 1),
+                   MT6358_VPROC_ANA_CON0, 1),
        MT6358_BUCK("buck_vproc12", VPROC12, 500000, 1293750, 6250,
                    buck_volt_range1, 0x7f, MT6358_BUCK_VPROC12_DBG0, 0x7f,
-                   0, MT6358_VPROC_ANA_CON0, 2),
+                   MT6358_VPROC_ANA_CON0, 2),
        MT6358_BUCK("buck_vgpu", VGPU, 500000, 1293750, 6250,
-                   buck_volt_range1, 0x7f, MT6358_BUCK_VGPU_ELR0, 0x7f, 0,
+                   buck_volt_range1, 0x7f, MT6358_BUCK_VGPU_ELR0, 0x7f,
                    MT6358_VCORE_VGPU_ANA_CON0, 2),
        MT6358_BUCK("buck_vs2", VS2, 500000, 2087500, 12500,
-                   buck_volt_range2, 0x7f, MT6358_BUCK_VS2_DBG0, 0x7f, 0,
+                   buck_volt_range2, 0x7f, MT6358_BUCK_VS2_DBG0, 0x7f,
                    MT6358_VS2_ANA_CON0, 8),
        MT6358_BUCK("buck_vmodem", VMODEM, 500000, 1293750, 6250,
                    buck_volt_range1, 0x7f, MT6358_BUCK_VMODEM_DBG0, 0x7f,
-                   0, MT6358_VMODEM_ANA_CON0, 8),
+                   MT6358_VMODEM_ANA_CON0, 8),
        MT6358_BUCK("buck_vs1", VS1, 1000000, 2587500, 12500,
-                   buck_volt_range4, 0x7f, MT6358_BUCK_VS1_DBG0, 0x7f, 0,
+                   buck_volt_range4, 0x7f, MT6358_BUCK_VS1_DBG0, 0x7f,
                    MT6358_VS1_ANA_CON0, 8),
        MT6358_REG_FIXED("ldo_vrf12", VRF12,
                         MT6358_LDO_VRF12_CON0, 0, 1200000),
@@ -457,49 +450,49 @@ static struct mt6358_regulator_info mt6358_regulators[] = {
        MT6358_REG_FIXED("ldo_vaud28", VAUD28,
                         MT6358_LDO_VAUD28_CON0, 0, 2800000),
        MT6358_LDO("ldo_vdram2", VDRAM2, vdram2_voltages, vdram2_idx,
-                  MT6358_LDO_VDRAM2_CON0, 0, MT6358_LDO_VDRAM2_ELR0, 0xf, 0),
+                  MT6358_LDO_VDRAM2_CON0, 0, MT6358_LDO_VDRAM2_ELR0, 0xf),
        MT6358_LDO("ldo_vsim1", VSIM1, vsim_voltages, vsim_idx,
-                  MT6358_LDO_VSIM1_CON0, 0, MT6358_VSIM1_ANA_CON0, 0xf00, 8),
+                  MT6358_LDO_VSIM1_CON0, 0, MT6358_VSIM1_ANA_CON0, 0xf00),
        MT6358_LDO("ldo_vibr", VIBR, vibr_voltages, vibr_idx,
-                  MT6358_LDO_VIBR_CON0, 0, MT6358_VIBR_ANA_CON0, 0xf00, 8),
+                  MT6358_LDO_VIBR_CON0, 0, MT6358_VIBR_ANA_CON0, 0xf00),
        MT6358_LDO("ldo_vusb", VUSB, vusb_voltages, vusb_idx,
-                  MT6358_LDO_VUSB_CON0_0, 0, MT6358_VUSB_ANA_CON0, 0x700, 8),
+                  MT6358_LDO_VUSB_CON0_0, 0, MT6358_VUSB_ANA_CON0, 0x700),
        MT6358_LDO("ldo_vcamd", VCAMD, vcamd_voltages, vcamd_idx,
-                  MT6358_LDO_VCAMD_CON0, 0, MT6358_VCAMD_ANA_CON0, 0xf00, 8),
+                  MT6358_LDO_VCAMD_CON0, 0, MT6358_VCAMD_ANA_CON0, 0xf00),
        MT6358_LDO("ldo_vefuse", VEFUSE, vefuse_voltages, vefuse_idx,
-                  MT6358_LDO_VEFUSE_CON0, 0, MT6358_VEFUSE_ANA_CON0, 0xf00, 8),
+                  MT6358_LDO_VEFUSE_CON0, 0, MT6358_VEFUSE_ANA_CON0, 0xf00),
        MT6358_LDO("ldo_vmch", VMCH, vmch_vemc_voltages, vmch_vemc_idx,
-                  MT6358_LDO_VMCH_CON0, 0, MT6358_VMCH_ANA_CON0, 0x700, 8),
+                  MT6358_LDO_VMCH_CON0, 0, MT6358_VMCH_ANA_CON0, 0x700),
        MT6358_LDO("ldo_vcama1", VCAMA1, vcama_voltages, vcama_idx,
-                  MT6358_LDO_VCAMA1_CON0, 0, MT6358_VCAMA1_ANA_CON0, 0xf00, 8),
+                  MT6358_LDO_VCAMA1_CON0, 0, MT6358_VCAMA1_ANA_CON0, 0xf00),
        MT6358_LDO("ldo_vemc", VEMC, vmch_vemc_voltages, vmch_vemc_idx,
-                  MT6358_LDO_VEMC_CON0, 0, MT6358_VEMC_ANA_CON0, 0x700, 8),
+                  MT6358_LDO_VEMC_CON0, 0, MT6358_VEMC_ANA_CON0, 0x700),
        MT6358_LDO("ldo_vcn33_bt", VCN33_BT, vcn33_bt_wifi_voltages,
                   vcn33_bt_wifi_idx, MT6358_LDO_VCN33_CON0_0,
-                  0, MT6358_VCN33_ANA_CON0, 0x300, 8),
+                  0, MT6358_VCN33_ANA_CON0, 0x300),
        MT6358_LDO("ldo_vcn33_wifi", VCN33_WIFI, vcn33_bt_wifi_voltages,
                   vcn33_bt_wifi_idx, MT6358_LDO_VCN33_CON0_1,
-                  0, MT6358_VCN33_ANA_CON0, 0x300, 8),
+                  0, MT6358_VCN33_ANA_CON0, 0x300),
        MT6358_LDO("ldo_vcama2", VCAMA2, vcama_voltages, vcama_idx,
-                  MT6358_LDO_VCAMA2_CON0, 0, MT6358_VCAMA2_ANA_CON0, 0xf00, 8),
+                  MT6358_LDO_VCAMA2_CON0, 0, MT6358_VCAMA2_ANA_CON0, 0xf00),
        MT6358_LDO("ldo_vmc", VMC, vmc_voltages, vmc_idx,
-                  MT6358_LDO_VMC_CON0, 0, MT6358_VMC_ANA_CON0, 0xf00, 8),
+                  MT6358_LDO_VMC_CON0, 0, MT6358_VMC_ANA_CON0, 0xf00),
        MT6358_LDO("ldo_vldo28", VLDO28, vldo28_voltages, vldo28_idx,
                   MT6358_LDO_VLDO28_CON0_0, 0,
-                  MT6358_VLDO28_ANA_CON0, 0x300, 8),
+                  MT6358_VLDO28_ANA_CON0, 0x300),
        MT6358_LDO("ldo_vsim2", VSIM2, vsim_voltages, vsim_idx,
-                  MT6358_LDO_VSIM2_CON0, 0, MT6358_VSIM2_ANA_CON0, 0xf00, 8),
+                  MT6358_LDO_VSIM2_CON0, 0, MT6358_VSIM2_ANA_CON0, 0xf00),
        MT6358_LDO1("ldo_vsram_proc11", VSRAM_PROC11, 500000, 1293750, 6250,
-                   buck_volt_range1, MT6358_LDO_VSRAM_PROC11_DBG0, 0x7f, 8,
+                   buck_volt_range1, MT6358_LDO_VSRAM_PROC11_DBG0, 0x7f00,
                    MT6358_LDO_VSRAM_CON0, 0x7f),
        MT6358_LDO1("ldo_vsram_others", VSRAM_OTHERS, 500000, 1293750, 6250,
-                   buck_volt_range1, MT6358_LDO_VSRAM_OTHERS_DBG0, 0x7f, 8,
+                   buck_volt_range1, MT6358_LDO_VSRAM_OTHERS_DBG0, 0x7f00,
                    MT6358_LDO_VSRAM_CON2, 0x7f),
        MT6358_LDO1("ldo_vsram_gpu", VSRAM_GPU, 500000, 1293750, 6250,
-                   buck_volt_range1, MT6358_LDO_VSRAM_GPU_DBG0, 0x7f, 8,
+                   buck_volt_range1, MT6358_LDO_VSRAM_GPU_DBG0, 0x7f00,
                    MT6358_LDO_VSRAM_CON3, 0x7f),
        MT6358_LDO1("ldo_vsram_proc12", VSRAM_PROC12, 500000, 1293750, 6250,
-                   buck_volt_range1, MT6358_LDO_VSRAM_PROC12_DBG0, 0x7f, 8,
+                   buck_volt_range1, MT6358_LDO_VSRAM_PROC12_DBG0, 0x7f00,
                    MT6358_LDO_VSRAM_CON1, 0x7f),
 };
 
index 7ce0bd377a089fa9d471f14a0a3cffc30f8e402a..de3b0462832cd3e7777a55ec16ce181a67a6c5e9 100644 (file)
@@ -27,7 +27,6 @@
  * @qi: Mask for query enable signal status of regulators.
  * @modeset_reg: for operating AUTO/PWM mode register.
  * @modeset_mask: MASK for operating modeset register.
- * @modeset_shift: SHIFT for operating modeset register.
  */
 struct mt6359_regulator_info {
        struct regulator_desc desc;
@@ -35,10 +34,8 @@ struct mt6359_regulator_info {
        u32 qi;
        u32 modeset_reg;
        u32 modeset_mask;
-       u32 modeset_shift;
        u32 lp_mode_reg;
        u32 lp_mode_mask;
-       u32 lp_mode_shift;
 };
 
 #define MT6359_BUCK(match, _name, min, max, step,              \
@@ -68,10 +65,8 @@ struct mt6359_regulator_info {
        .qi = BIT(0),                                           \
        .lp_mode_reg = _lp_mode_reg,                            \
        .lp_mode_mask = BIT(_lp_mode_shift),                    \
-       .lp_mode_shift = _lp_mode_shift,                        \
        .modeset_reg = _modeset_reg,                            \
        .modeset_mask = BIT(_modeset_shift),                    \
-       .modeset_shift = _modeset_shift                         \
 }
 
 #define MT6359_LDO_LINEAR(match, _name, min, max, step,                \
@@ -282,8 +277,10 @@ static unsigned int mt6359_regulator_get_mode(struct regulator_dev *rdev)
                return ret;
        }
 
-       if ((regval & info->modeset_mask) >> info->modeset_shift ==
-               MT6359_BUCK_MODE_FORCE_PWM)
+       regval &= info->modeset_mask;
+       regval >>= ffs(info->modeset_mask) - 1;
+
+       if (regval == MT6359_BUCK_MODE_FORCE_PWM)
                return REGULATOR_MODE_FAST;
 
        ret = regmap_read(rdev->regmap, info->lp_mode_reg, &regval);
@@ -310,7 +307,7 @@ static int mt6359_regulator_set_mode(struct regulator_dev *rdev,
        switch (mode) {
        case REGULATOR_MODE_FAST:
                val = MT6359_BUCK_MODE_FORCE_PWM;
-               val <<= info->modeset_shift;
+               val <<= ffs(info->modeset_mask) - 1;
                ret = regmap_update_bits(rdev->regmap,
                                         info->modeset_reg,
                                         info->modeset_mask,
@@ -319,14 +316,14 @@ static int mt6359_regulator_set_mode(struct regulator_dev *rdev,
        case REGULATOR_MODE_NORMAL:
                if (curr_mode == REGULATOR_MODE_FAST) {
                        val = MT6359_BUCK_MODE_AUTO;
-                       val <<= info->modeset_shift;
+                       val <<= ffs(info->modeset_mask) - 1;
                        ret = regmap_update_bits(rdev->regmap,
                                                 info->modeset_reg,
                                                 info->modeset_mask,
                                                 val);
                } else if (curr_mode == REGULATOR_MODE_IDLE) {
                        val = MT6359_BUCK_MODE_NORMAL;
-                       val <<= info->lp_mode_shift;
+                       val <<= ffs(info->lp_mode_mask) - 1;
                        ret = regmap_update_bits(rdev->regmap,
                                                 info->lp_mode_reg,
                                                 info->lp_mode_mask,
@@ -336,7 +333,7 @@ static int mt6359_regulator_set_mode(struct regulator_dev *rdev,
                break;
        case REGULATOR_MODE_IDLE:
                val = MT6359_BUCK_MODE_LP >> 1;
-               val <<= info->lp_mode_shift;
+               val <<= ffs(info->lp_mode_mask) - 1;
                ret = regmap_update_bits(rdev->regmap,
                                         info->lp_mode_reg,
                                         info->lp_mode_mask,
index 0a30df5e414fc5677627490d787e60dac771e426..b9bf7ade1f8a73357157434109d38c3fbe482e8b 100644 (file)
@@ -32,7 +32,6 @@ struct mt6397_regulator_info {
        u32 vselctrl_mask;
        u32 modeset_reg;
        u32 modeset_mask;
-       u32 modeset_shift;
 };
 
 #define MT6397_BUCK(match, vreg, min, max, step, volt_ranges, enreg,   \
@@ -61,7 +60,6 @@ struct mt6397_regulator_info {
        .vselctrl_mask = BIT(1),                                        \
        .modeset_reg = _modeset_reg,                                    \
        .modeset_mask = BIT(_modeset_shift),                            \
-       .modeset_shift = _modeset_shift                                 \
 }
 
 #define MT6397_LDO(match, vreg, ldo_volt_table, enreg, enbit, vosel,   \
@@ -175,11 +173,11 @@ static int mt6397_regulator_set_mode(struct regulator_dev *rdev,
                goto err_mode;
        }
 
-       dev_dbg(&rdev->dev, "mt6397 buck set_mode %#x, %#x, %#x, %#x\n",
-               info->modeset_reg, info->modeset_mask,
-               info->modeset_shift, val);
+       dev_dbg(&rdev->dev, "mt6397 buck set_mode %#x, %#x, %#x\n",
+               info->modeset_reg, info->modeset_mask, val);
+
+       val <<= ffs(info->modeset_mask) - 1;
 
-       val <<= info->modeset_shift;
        ret = regmap_update_bits(rdev->regmap, info->modeset_reg,
                                 info->modeset_mask, val);
 err_mode:
@@ -204,7 +202,10 @@ static unsigned int mt6397_regulator_get_mode(struct regulator_dev *rdev)
                return ret;
        }
 
-       switch ((regval & info->modeset_mask) >> info->modeset_shift) {
+       regval &= info->modeset_mask;
+       regval >>= ffs(info->modeset_mask) - 1;
+
+       switch (regval) {
        case MT6397_BUCK_MODE_AUTO:
                return REGULATOR_MODE_NORMAL;
        case MT6397_BUCK_MODE_FORCE_PWM:
index 0e7311629165dabd2c479c8606270f7fd50baf5b..da4cf5a6acc20ff5864732fc554b8b079d37ccb3 100644 (file)
 #include <linux/mfd/rt5033-private.h>
 #include <linux/regulator/of_regulator.h>
 
+static const struct linear_range rt5033_buck_ranges[] = {
+       REGULATOR_LINEAR_RANGE(1000000, 0, 20, 100000),
+       REGULATOR_LINEAR_RANGE(3000000, 21, 31, 0),
+};
+
+static const struct linear_range rt5033_ldo_ranges[] = {
+       REGULATOR_LINEAR_RANGE(1200000, 0, 18, 100000),
+       REGULATOR_LINEAR_RANGE(3000000, 19, 31, 0),
+};
+
 static const struct regulator_ops rt5033_safe_ldo_ops = {
        .is_enabled             = regulator_is_enabled_regmap,
        .enable                 = regulator_enable_regmap,
@@ -24,8 +34,7 @@ static const struct regulator_ops rt5033_buck_ops = {
        .is_enabled             = regulator_is_enabled_regmap,
        .enable                 = regulator_enable_regmap,
        .disable                = regulator_disable_regmap,
-       .list_voltage           = regulator_list_voltage_linear,
-       .map_voltage            = regulator_map_voltage_linear,
+       .list_voltage           = regulator_list_voltage_linear_range,
        .get_voltage_sel        = regulator_get_voltage_sel_regmap,
        .set_voltage_sel        = regulator_set_voltage_sel_regmap,
 };
@@ -40,8 +49,8 @@ static const struct regulator_desc rt5033_supported_regulators[] = {
                .type           = REGULATOR_VOLTAGE,
                .owner          = THIS_MODULE,
                .n_voltages     = RT5033_REGULATOR_BUCK_VOLTAGE_STEP_NUM,
-               .min_uV         = RT5033_REGULATOR_BUCK_VOLTAGE_MIN,
-               .uV_step        = RT5033_REGULATOR_BUCK_VOLTAGE_STEP,
+               .linear_ranges  = rt5033_buck_ranges,
+               .n_linear_ranges = ARRAY_SIZE(rt5033_buck_ranges),
                .enable_reg     = RT5033_REG_CTRL,
                .enable_mask    = RT5033_CTRL_EN_BUCK_MASK,
                .vsel_reg       = RT5033_REG_BUCK_CTRL,
@@ -56,8 +65,8 @@ static const struct regulator_desc rt5033_supported_regulators[] = {
                .type           = REGULATOR_VOLTAGE,
                .owner          = THIS_MODULE,
                .n_voltages     = RT5033_REGULATOR_LDO_VOLTAGE_STEP_NUM,
-               .min_uV         = RT5033_REGULATOR_LDO_VOLTAGE_MIN,
-               .uV_step        = RT5033_REGULATOR_LDO_VOLTAGE_STEP,
+               .linear_ranges  = rt5033_ldo_ranges,
+               .n_linear_ranges = ARRAY_SIZE(rt5033_ldo_ranges),
                .enable_reg     = RT5033_REG_CTRL,
                .enable_mask    = RT5033_CTRL_EN_LDO_MASK,
                .vsel_reg       = RT5033_REG_LDO_CTRL,
index d3299a72fd10dfb42d1a252636a3078cd3756315..cb22a207e9fffc5d75d5e166710ff107c749b432 100644 (file)
@@ -144,7 +144,7 @@ static int rt6245_init_device_properties(struct device *dev)
 static int rt6245_reg_write(void *context, unsigned int reg, unsigned int val)
 {
        struct i2c_client *i2c = context;
-       const u8 func_base[] = { 0x6F, 0x73, 0x78, 0x61, 0x7C, 0 };
+       static const u8 func_base[] = { 0x6F, 0x73, 0x78, 0x61, 0x7C, 0 };
        unsigned int code, bit_count;
 
        code = func_base[reg];
diff --git a/drivers/regulator/rtq2134-regulator.c b/drivers/regulator/rtq2134-regulator.c
new file mode 100644 (file)
index 0000000..f21e3f8
--- /dev/null
@@ -0,0 +1,373 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+#include <linux/bitops.h>
+#include <linux/i2c.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/regmap.h>
+#include <linux/regulator/driver.h>
+
+enum {
+       RTQ2134_IDX_BUCK1 = 0,
+       RTQ2134_IDX_BUCK2,
+       RTQ2134_IDX_BUCK3,
+       RTQ2134_IDX_MAX
+};
+
+#define RTQ2134_AUTO_MODE              0
+#define RTQ2134_FCCM_MODE              1
+
+#define RTQ2134_BUCK_DVS0_CTRL         0
+#define RTQ2134_BUCK_VSEL_CTRL         2
+
+#define RTQ2134_REG_IO_CHIPNAME                0x01
+#define RTQ2134_REG_FLT_RECORDTEMP     0x13
+#define RTQ2134_REG_FLT_RECORDBUCK(_id)        (0x14 + (_id))
+#define RTQ2134_REG_FLT_BUCKCTRL(_id)  (0x37 + (_id))
+#define RTQ2134_REG_BUCK1_CFG0         0x42
+#define RTQ2134_REG_BUCK1_DVS0CFG1     0x48
+#define RTQ2134_REG_BUCK1_DVS0CFG0     0x49
+#define RTQ2134_REG_BUCK1_DVS1CFG1     0x4A
+#define RTQ2134_REG_BUCK1_DVS1CFG0     0x4B
+#define RTQ2134_REG_BUCK1_DVSCFG       0x52
+#define RTQ2134_REG_BUCK1_RSPCFG       0x54
+#define RTQ2134_REG_BUCK2_CFG0         0x5F
+#define RTQ2134_REG_BUCK2_DVS0CFG1     0x62
+#define RTQ2134_REG_BUCK2_DVS0CFG0     0x63
+#define RTQ2134_REG_BUCK2_DVS1CFG1     0x64
+#define RTQ2134_REG_BUCK2_DVS1CFG0     0x65
+#define RTQ2134_REG_BUCK2_DVSCFG       0x6C
+#define RTQ2134_REG_BUCK2_RSPCFG       0x6E
+#define RTQ2134_REG_BUCK3_CFG0         0x79
+#define RTQ2134_REG_BUCK3_DVS0CFG1     0x7C
+#define RTQ2134_REG_BUCK3_DVS0CFG0     0x7D
+#define RTQ2134_REG_BUCK3_DVS1CFG1     0x7E
+#define RTQ2134_REG_BUCK3_DVS1CFG0     0x7F
+#define RTQ2134_REG_BUCK3_DVSCFG       0x86
+#define RTQ2134_REG_BUCK3_RSPCFG       0x88
+#define RTQ2134_REG_BUCK3_SLEWCTRL     0x89
+
+#define RTQ2134_VOUT_MAXNUM            256
+#define RTQ2134_VOUT_MASK              0xFF
+#define RTQ2134_VOUTEN_MASK            BIT(0)
+#define RTQ2134_ACTDISCHG_MASK         BIT(0)
+#define RTQ2134_RSPUP_MASK             GENMASK(6, 4)
+#define RTQ2134_FCCM_MASK              BIT(5)
+#define RTQ2134_UVHICCUP_MASK          BIT(3)
+#define RTQ2134_BUCKDVS_CTRL_MASK      GENMASK(1, 0)
+#define RTQ2134_CHIPOT_MASK            BIT(2)
+#define RTQ2134_BUCKOV_MASK            BIT(5)
+#define RTQ2134_BUCKUV_MASK            BIT(4)
+
+struct rtq2134_regulator_desc {
+       struct regulator_desc desc;
+       /* Extension for proprietary register and mask */
+       unsigned int mode_reg;
+       unsigned int mode_mask;
+       unsigned int suspend_enable_reg;
+       unsigned int suspend_enable_mask;
+       unsigned int suspend_vsel_reg;
+       unsigned int suspend_vsel_mask;
+       unsigned int suspend_mode_reg;
+       unsigned int suspend_mode_mask;
+       unsigned int dvs_ctrl_reg;
+};
+
+static int rtq2134_buck_set_mode(struct regulator_dev *rdev, unsigned int mode)
+{
+       struct rtq2134_regulator_desc *desc =
+               (struct rtq2134_regulator_desc *)rdev->desc;
+       unsigned int val;
+
+       if (mode == REGULATOR_MODE_NORMAL)
+               val = RTQ2134_AUTO_MODE;
+       else if (mode == REGULATOR_MODE_FAST)
+               val = RTQ2134_FCCM_MODE;
+       else
+               return -EINVAL;
+
+       val <<= ffs(desc->mode_mask) - 1;
+       return regmap_update_bits(rdev->regmap, desc->mode_reg, desc->mode_mask,
+                                 val);
+}
+
+static unsigned int rtq2134_buck_get_mode(struct regulator_dev *rdev)
+{
+       struct rtq2134_regulator_desc *desc =
+               (struct rtq2134_regulator_desc *)rdev->desc;
+       unsigned int mode;
+       int ret;
+
+       ret = regmap_read(rdev->regmap, desc->mode_reg, &mode);
+       if (ret)
+               return ret;
+
+       if (mode & desc->mode_mask)
+               return REGULATOR_MODE_FAST;
+       return REGULATOR_MODE_NORMAL;
+}
+
+static int rtq2134_buck_set_suspend_voltage(struct regulator_dev *rdev, int uV)
+{
+       struct rtq2134_regulator_desc *desc =
+               (struct rtq2134_regulator_desc *)rdev->desc;
+       int sel;
+
+       sel = regulator_map_voltage_linear_range(rdev, uV, uV);
+       if (sel < 0)
+               return sel;
+
+       sel <<= ffs(desc->suspend_vsel_mask) - 1;
+
+       return regmap_update_bits(rdev->regmap, desc->suspend_vsel_reg,
+                                 desc->suspend_vsel_mask, sel);
+}
+
+static int rtq2134_buck_set_suspend_enable(struct regulator_dev *rdev)
+{
+       struct rtq2134_regulator_desc *desc =
+               (struct rtq2134_regulator_desc *)rdev->desc;
+       unsigned int val = desc->suspend_enable_mask;
+
+       return regmap_update_bits(rdev->regmap, desc->suspend_enable_reg,
+                                 desc->suspend_enable_mask, val);
+}
+
+static int rtq2134_buck_set_suspend_disable(struct regulator_dev *rdev)
+{
+       struct rtq2134_regulator_desc *desc =
+               (struct rtq2134_regulator_desc *)rdev->desc;
+
+       return regmap_update_bits(rdev->regmap, desc->suspend_enable_reg,
+                                 desc->suspend_enable_mask, 0);
+}
+
+static int rtq2134_buck_set_suspend_mode(struct regulator_dev *rdev,
+                                        unsigned int mode)
+{
+       struct rtq2134_regulator_desc *desc =
+               (struct rtq2134_regulator_desc *)rdev->desc;
+       unsigned int val;
+
+       if (mode == REGULATOR_MODE_NORMAL)
+               val = RTQ2134_AUTO_MODE;
+       else if (mode == REGULATOR_MODE_FAST)
+               val = RTQ2134_FCCM_MODE;
+       else
+               return -EINVAL;
+
+       val <<= ffs(desc->suspend_mode_mask) - 1;
+       return regmap_update_bits(rdev->regmap, desc->suspend_mode_reg,
+                                 desc->suspend_mode_mask, val);
+}
+
+static int rtq2134_buck_get_error_flags(struct regulator_dev *rdev,
+                                       unsigned int *flags)
+{
+       int rid = rdev_get_id(rdev);
+       unsigned int chip_error, buck_error, events = 0;
+       int ret;
+
+       ret = regmap_read(rdev->regmap, RTQ2134_REG_FLT_RECORDTEMP,
+                         &chip_error);
+       if (ret) {
+               dev_err(&rdev->dev, "Failed to get chip error flag\n");
+               return ret;
+       }
+
+       ret = regmap_read(rdev->regmap, RTQ2134_REG_FLT_RECORDBUCK(rid),
+                         &buck_error);
+       if (ret) {
+               dev_err(&rdev->dev, "Failed to get buck error flag\n");
+               return ret;
+       }
+
+       if (chip_error & RTQ2134_CHIPOT_MASK)
+               events |= REGULATOR_ERROR_OVER_TEMP;
+
+       if (buck_error & RTQ2134_BUCKUV_MASK)
+               events |= REGULATOR_ERROR_UNDER_VOLTAGE;
+
+       if (buck_error & RTQ2134_BUCKOV_MASK)
+               events |= REGULATOR_ERROR_REGULATION_OUT;
+
+       *flags = events;
+       return 0;
+}
+
+static const struct regulator_ops rtq2134_buck_ops = {
+       .list_voltage = regulator_list_voltage_linear_range,
+       .set_voltage_sel = regulator_set_voltage_sel_regmap,
+       .get_voltage_sel = regulator_get_voltage_sel_regmap,
+       .enable = regulator_enable_regmap,
+       .disable = regulator_disable_regmap,
+       .is_enabled = regulator_is_enabled_regmap,
+       .set_active_discharge = regulator_set_active_discharge_regmap,
+       .set_ramp_delay = regulator_set_ramp_delay_regmap,
+       .set_mode = rtq2134_buck_set_mode,
+       .get_mode = rtq2134_buck_get_mode,
+       .set_suspend_voltage = rtq2134_buck_set_suspend_voltage,
+       .set_suspend_enable = rtq2134_buck_set_suspend_enable,
+       .set_suspend_disable = rtq2134_buck_set_suspend_disable,
+       .set_suspend_mode = rtq2134_buck_set_suspend_mode,
+       .get_error_flags = rtq2134_buck_get_error_flags,
+};
+
+static const struct linear_range rtq2134_buck_vout_ranges[] = {
+       REGULATOR_LINEAR_RANGE(300000, 0, 200, 5000),
+       REGULATOR_LINEAR_RANGE(1310000, 201, 255, 10000)
+};
+
+static unsigned int rtq2134_buck_of_map_mode(unsigned int mode)
+{
+       switch (mode) {
+       case RTQ2134_AUTO_MODE:
+               return REGULATOR_MODE_NORMAL;
+       case RTQ2134_FCCM_MODE:
+               return REGULATOR_MODE_FAST;
+       }
+
+       return REGULATOR_MODE_INVALID;
+}
+
+static int rtq2134_buck_of_parse_cb(struct device_node *np,
+                                   const struct regulator_desc *desc,
+                                   struct regulator_config *cfg)
+{
+       struct rtq2134_regulator_desc *rdesc =
+               (struct rtq2134_regulator_desc *)desc;
+       int rid = desc->id;
+       bool uv_shutdown, vsel_dvs;
+       unsigned int val;
+       int ret;
+
+       vsel_dvs = of_property_read_bool(np, "richtek,use-vsel-dvs");
+       if (vsel_dvs)
+               val = RTQ2134_BUCK_VSEL_CTRL;
+       else
+               val = RTQ2134_BUCK_DVS0_CTRL;
+
+       ret = regmap_update_bits(cfg->regmap, rdesc->dvs_ctrl_reg,
+                                RTQ2134_BUCKDVS_CTRL_MASK, val);
+       if (ret)
+               return ret;
+
+       uv_shutdown = of_property_read_bool(np, "richtek,uv-shutdown");
+       if (uv_shutdown)
+               val = 0;
+       else
+               val = RTQ2134_UVHICCUP_MASK;
+
+       return regmap_update_bits(cfg->regmap, RTQ2134_REG_FLT_BUCKCTRL(rid),
+                                 RTQ2134_UVHICCUP_MASK, val);
+}
+
+static const unsigned int rtq2134_buck_ramp_delay_table[] = {
+       0, 16000, 0, 8000, 4000, 2000, 1000, 500
+};
+
+#define RTQ2134_BUCK_DESC(_id) { \
+       .desc = { \
+               .name = "rtq2134_buck" #_id, \
+               .of_match = of_match_ptr("buck" #_id), \
+               .regulators_node = of_match_ptr("regulators"), \
+               .id = RTQ2134_IDX_BUCK##_id, \
+               .type = REGULATOR_VOLTAGE, \
+               .owner = THIS_MODULE, \
+               .ops = &rtq2134_buck_ops, \
+               .n_voltages = RTQ2134_VOUT_MAXNUM, \
+               .linear_ranges = rtq2134_buck_vout_ranges, \
+               .n_linear_ranges = ARRAY_SIZE(rtq2134_buck_vout_ranges), \
+               .vsel_reg = RTQ2134_REG_BUCK##_id##_DVS0CFG1, \
+               .vsel_mask = RTQ2134_VOUT_MASK, \
+               .enable_reg = RTQ2134_REG_BUCK##_id##_DVS0CFG0, \
+               .enable_mask = RTQ2134_VOUTEN_MASK, \
+               .active_discharge_reg = RTQ2134_REG_BUCK##_id##_CFG0, \
+               .active_discharge_mask = RTQ2134_ACTDISCHG_MASK, \
+               .ramp_reg = RTQ2134_REG_BUCK##_id##_RSPCFG, \
+               .ramp_mask = RTQ2134_RSPUP_MASK, \
+               .ramp_delay_table = rtq2134_buck_ramp_delay_table, \
+               .n_ramp_values = ARRAY_SIZE(rtq2134_buck_ramp_delay_table), \
+               .of_map_mode = rtq2134_buck_of_map_mode, \
+               .of_parse_cb = rtq2134_buck_of_parse_cb, \
+       }, \
+       .mode_reg = RTQ2134_REG_BUCK##_id##_DVS0CFG0, \
+       .mode_mask = RTQ2134_FCCM_MASK, \
+       .suspend_mode_reg = RTQ2134_REG_BUCK##_id##_DVS1CFG0, \
+       .suspend_mode_mask = RTQ2134_FCCM_MASK, \
+       .suspend_enable_reg = RTQ2134_REG_BUCK##_id##_DVS1CFG0, \
+       .suspend_enable_mask = RTQ2134_VOUTEN_MASK, \
+       .suspend_vsel_reg = RTQ2134_REG_BUCK##_id##_DVS1CFG1, \
+       .suspend_vsel_mask = RTQ2134_VOUT_MASK, \
+       .dvs_ctrl_reg = RTQ2134_REG_BUCK##_id##_DVSCFG, \
+}
+
+static const struct rtq2134_regulator_desc rtq2134_regulator_descs[] = {
+       RTQ2134_BUCK_DESC(1),
+       RTQ2134_BUCK_DESC(2),
+       RTQ2134_BUCK_DESC(3)
+};
+
+static bool rtq2134_is_accissible_reg(struct device *dev, unsigned int reg)
+{
+       if (reg >= RTQ2134_REG_IO_CHIPNAME && reg <= RTQ2134_REG_BUCK3_SLEWCTRL)
+               return true;
+       return false;
+}
+
+static const struct regmap_config rtq2134_regmap_config = {
+       .reg_bits = 8,
+       .val_bits = 8,
+       .max_register = RTQ2134_REG_BUCK3_SLEWCTRL,
+
+       .readable_reg = rtq2134_is_accissible_reg,
+       .writeable_reg = rtq2134_is_accissible_reg,
+};
+
+static int rtq2134_probe(struct i2c_client *i2c)
+{
+       struct regmap *regmap;
+       struct regulator_dev *rdev;
+       struct regulator_config regulator_cfg = {};
+       int i;
+
+       regmap = devm_regmap_init_i2c(i2c, &rtq2134_regmap_config);
+       if (IS_ERR(regmap)) {
+               dev_err(&i2c->dev, "Failed to allocate regmap\n");
+               return PTR_ERR(regmap);
+       }
+
+       regulator_cfg.dev = &i2c->dev;
+       regulator_cfg.regmap = regmap;
+       for (i = 0; i < ARRAY_SIZE(rtq2134_regulator_descs); i++) {
+               rdev = devm_regulator_register(&i2c->dev,
+                                              &rtq2134_regulator_descs[i].desc,
+                                              &regulator_cfg);
+               if (IS_ERR(rdev)) {
+                       dev_err(&i2c->dev, "Failed to init %d regulator\n", i);
+                       return PTR_ERR(rdev);
+               }
+       }
+
+       return 0;
+}
+
+static const struct of_device_id __maybe_unused rtq2134_device_tables[] = {
+       { .compatible = "richtek,rtq2134", },
+       {}
+};
+MODULE_DEVICE_TABLE(of, rtq2134_device_tables);
+
+static struct i2c_driver rtq2134_driver = {
+       .driver = {
+               .name = "rtq2134",
+               .of_match_table = rtq2134_device_tables,
+       },
+       .probe_new = rtq2134_probe,
+};
+module_i2c_driver(rtq2134_driver);
+
+MODULE_AUTHOR("ChiYuan Huang <cy_huang@richtek.com>");
+MODULE_DESCRIPTION("Richtek RTQ2134 Regulator Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/regulator/rtq6752-regulator.c b/drivers/regulator/rtq6752-regulator.c
new file mode 100644 (file)
index 0000000..609d3fc
--- /dev/null
@@ -0,0 +1,289 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+#include <linux/bitops.h>
+#include <linux/delay.h>
+#include <linux/gpio/consumer.h>
+#include <linux/i2c.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/regmap.h>
+#include <linux/regulator/driver.h>
+
+enum {
+       RTQ6752_IDX_PAVDD = 0,
+       RTQ6752_IDX_NAVDD = 1,
+       RTQ6752_IDX_MAX
+};
+
+#define RTQ6752_REG_PAVDD      0x00
+#define RTQ6752_REG_NAVDD      0x01
+#define RTQ6752_REG_PAVDDONDLY 0x07
+#define RTQ6752_REG_PAVDDSSTIME        0x08
+#define RTQ6752_REG_NAVDDONDLY 0x0D
+#define RTQ6752_REG_NAVDDSSTIME        0x0E
+#define RTQ6752_REG_OPTION1    0x12
+#define RTQ6752_REG_CHSWITCH   0x16
+#define RTQ6752_REG_FAULT      0x1D
+
+#define RTQ6752_VOUT_MASK      GENMASK(5, 0)
+#define RTQ6752_NAVDDEN_MASK   BIT(3)
+#define RTQ6752_PAVDDEN_MASK   BIT(0)
+#define RTQ6752_PAVDDAD_MASK   BIT(4)
+#define RTQ6752_NAVDDAD_MASK   BIT(3)
+#define RTQ6752_PAVDDF_MASK    BIT(3)
+#define RTQ6752_NAVDDF_MASK    BIT(0)
+#define RTQ6752_ENABLE_MASK    (BIT(RTQ6752_IDX_MAX) - 1)
+
+#define RTQ6752_VOUT_MINUV     5000000
+#define RTQ6752_VOUT_STEPUV    50000
+#define RTQ6752_VOUT_NUM       47
+#define RTQ6752_I2CRDY_TIMEUS  1000
+#define RTQ6752_MINSS_TIMEUS   5000
+
+struct rtq6752_priv {
+       struct regmap *regmap;
+       struct gpio_desc *enable_gpio;
+       struct mutex lock;
+       unsigned char enable_flag;
+};
+
+static int rtq6752_set_vdd_enable(struct regulator_dev *rdev)
+{
+       struct rtq6752_priv *priv = rdev_get_drvdata(rdev);
+       int rid = rdev_get_id(rdev), ret;
+
+       mutex_lock(&priv->lock);
+       if (priv->enable_gpio) {
+               gpiod_set_value(priv->enable_gpio, 1);
+
+               usleep_range(RTQ6752_I2CRDY_TIMEUS,
+                            RTQ6752_I2CRDY_TIMEUS + 100);
+       }
+
+       if (!priv->enable_flag) {
+               regcache_cache_only(priv->regmap, false);
+               ret = regcache_sync(priv->regmap);
+               if (ret) {
+                       mutex_unlock(&priv->lock);
+                       return ret;
+               }
+       }
+
+       priv->enable_flag |= BIT(rid);
+       mutex_unlock(&priv->lock);
+
+       return regulator_enable_regmap(rdev);
+}
+
+static int rtq6752_set_vdd_disable(struct regulator_dev *rdev)
+{
+       struct rtq6752_priv *priv = rdev_get_drvdata(rdev);
+       int rid = rdev_get_id(rdev), ret;
+
+       ret = regulator_disable_regmap(rdev);
+       if (ret)
+               return ret;
+
+       mutex_lock(&priv->lock);
+       priv->enable_flag &= ~BIT(rid);
+
+       if (!priv->enable_flag) {
+               regcache_cache_only(priv->regmap, true);
+               regcache_mark_dirty(priv->regmap);
+       }
+
+       if (priv->enable_gpio)
+               gpiod_set_value(priv->enable_gpio, 0);
+
+       mutex_unlock(&priv->lock);
+
+       return 0;
+}
+
+static int rtq6752_get_error_flags(struct regulator_dev *rdev,
+                                  unsigned int *flags)
+{
+       unsigned int val, events = 0;
+       const unsigned int fault_mask[] = {
+               RTQ6752_PAVDDF_MASK, RTQ6752_NAVDDF_MASK };
+       int rid = rdev_get_id(rdev), ret;
+
+       ret = regmap_read(rdev->regmap, RTQ6752_REG_FAULT, &val);
+       if (ret)
+               return ret;
+
+       if (val & fault_mask[rid])
+               events = REGULATOR_ERROR_REGULATION_OUT;
+
+       *flags = events;
+       return 0;
+}
+
+static const struct regulator_ops rtq6752_regulator_ops = {
+       .list_voltage = regulator_list_voltage_linear,
+       .set_voltage_sel = regulator_set_voltage_sel_regmap,
+       .get_voltage_sel = regulator_get_voltage_sel_regmap,
+       .enable = rtq6752_set_vdd_enable,
+       .disable = rtq6752_set_vdd_disable,
+       .is_enabled = regulator_is_enabled_regmap,
+       .set_active_discharge = regulator_set_active_discharge_regmap,
+       .get_error_flags = rtq6752_get_error_flags,
+};
+
+static const struct regulator_desc rtq6752_regulator_descs[] = {
+       {
+               .name = "rtq6752-pavdd",
+               .of_match = of_match_ptr("pavdd"),
+               .regulators_node = of_match_ptr("regulators"),
+               .id = RTQ6752_IDX_PAVDD,
+               .n_voltages = RTQ6752_VOUT_NUM,
+               .ops = &rtq6752_regulator_ops,
+               .owner = THIS_MODULE,
+               .min_uV = RTQ6752_VOUT_MINUV,
+               .uV_step = RTQ6752_VOUT_STEPUV,
+               .enable_time = RTQ6752_MINSS_TIMEUS,
+               .vsel_reg = RTQ6752_REG_PAVDD,
+               .vsel_mask = RTQ6752_VOUT_MASK,
+               .enable_reg = RTQ6752_REG_CHSWITCH,
+               .enable_mask = RTQ6752_PAVDDEN_MASK,
+               .active_discharge_reg = RTQ6752_REG_OPTION1,
+               .active_discharge_mask = RTQ6752_PAVDDAD_MASK,
+               .active_discharge_off = RTQ6752_PAVDDAD_MASK,
+       },
+       {
+               .name = "rtq6752-navdd",
+               .of_match = of_match_ptr("navdd"),
+               .regulators_node = of_match_ptr("regulators"),
+               .id = RTQ6752_IDX_NAVDD,
+               .n_voltages = RTQ6752_VOUT_NUM,
+               .ops = &rtq6752_regulator_ops,
+               .owner = THIS_MODULE,
+               .min_uV = RTQ6752_VOUT_MINUV,
+               .uV_step = RTQ6752_VOUT_STEPUV,
+               .enable_time = RTQ6752_MINSS_TIMEUS,
+               .vsel_reg = RTQ6752_REG_NAVDD,
+               .vsel_mask = RTQ6752_VOUT_MASK,
+               .enable_reg = RTQ6752_REG_CHSWITCH,
+               .enable_mask = RTQ6752_NAVDDEN_MASK,
+               .active_discharge_reg = RTQ6752_REG_OPTION1,
+               .active_discharge_mask = RTQ6752_NAVDDAD_MASK,
+               .active_discharge_off = RTQ6752_NAVDDAD_MASK,
+       }
+};
+
+static int rtq6752_init_device_properties(struct rtq6752_priv *priv)
+{
+       u8 raw_vals[] = { 0, 0 };
+       int ret;
+
+       /* Configure PAVDD on and softstart delay time to the minimum */
+       ret = regmap_raw_write(priv->regmap, RTQ6752_REG_PAVDDONDLY, raw_vals,
+                              ARRAY_SIZE(raw_vals));
+       if (ret)
+               return ret;
+
+       /* Configure NAVDD on and softstart delay time to the minimum */
+       return regmap_raw_write(priv->regmap, RTQ6752_REG_NAVDDONDLY, raw_vals,
+                               ARRAY_SIZE(raw_vals));
+}
+
+static bool rtq6752_is_volatile_reg(struct device *dev, unsigned int reg)
+{
+       if (reg == RTQ6752_REG_FAULT)
+               return true;
+       return false;
+}
+
+static const struct reg_default rtq6752_reg_defaults[] = {
+       { RTQ6752_REG_PAVDD, 0x14 },
+       { RTQ6752_REG_NAVDD, 0x14 },
+       { RTQ6752_REG_PAVDDONDLY, 0x01 },
+       { RTQ6752_REG_PAVDDSSTIME, 0x01 },
+       { RTQ6752_REG_NAVDDONDLY, 0x01 },
+       { RTQ6752_REG_NAVDDSSTIME, 0x01 },
+       { RTQ6752_REG_OPTION1, 0x07 },
+       { RTQ6752_REG_CHSWITCH, 0x29 },
+};
+
+static const struct regmap_config rtq6752_regmap_config = {
+       .reg_bits = 8,
+       .val_bits = 8,
+       .cache_type = REGCACHE_RBTREE,
+       .max_register = RTQ6752_REG_FAULT,
+       .reg_defaults = rtq6752_reg_defaults,
+       .num_reg_defaults = ARRAY_SIZE(rtq6752_reg_defaults),
+       .volatile_reg = rtq6752_is_volatile_reg,
+};
+
+static int rtq6752_probe(struct i2c_client *i2c)
+{
+       struct rtq6752_priv *priv;
+       struct regulator_config reg_cfg = {};
+       struct regulator_dev *rdev;
+       int i, ret;
+
+       priv = devm_kzalloc(&i2c->dev, sizeof(*priv), GFP_KERNEL);
+       if (!priv)
+               return -ENOMEM;
+
+       mutex_init(&priv->lock);
+
+       priv->enable_gpio = devm_gpiod_get_optional(&i2c->dev, "enable",
+                                                   GPIOD_OUT_HIGH);
+       if (IS_ERR(priv->enable_gpio)) {
+               dev_err(&i2c->dev, "Failed to get 'enable' gpio\n");
+               return PTR_ERR(priv->enable_gpio);
+       }
+
+       usleep_range(RTQ6752_I2CRDY_TIMEUS, RTQ6752_I2CRDY_TIMEUS + 100);
+       /* Default EN pin to high, PAVDD and NAVDD will be on */
+       priv->enable_flag = RTQ6752_ENABLE_MASK;
+
+       priv->regmap = devm_regmap_init_i2c(i2c, &rtq6752_regmap_config);
+       if (IS_ERR(priv->regmap)) {
+               dev_err(&i2c->dev, "Failed to init regmap\n");
+               return PTR_ERR(priv->regmap);
+       }
+
+       ret = rtq6752_init_device_properties(priv);
+       if (ret) {
+               dev_err(&i2c->dev, "Failed to init device properties\n");
+               return ret;
+       }
+
+       reg_cfg.dev = &i2c->dev;
+       reg_cfg.regmap = priv->regmap;
+       reg_cfg.driver_data = priv;
+
+       for (i = 0; i < ARRAY_SIZE(rtq6752_regulator_descs); i++) {
+               rdev = devm_regulator_register(&i2c->dev,
+                                              rtq6752_regulator_descs + i,
+                                              &reg_cfg);
+               if (IS_ERR(rdev)) {
+                       dev_err(&i2c->dev, "Failed to init %d regulator\n", i);
+                       return PTR_ERR(rdev);
+               }
+       }
+
+       return 0;
+}
+
+static const struct of_device_id __maybe_unused rtq6752_device_table[] = {
+       { .compatible = "richtek,rtq6752", },
+       {}
+};
+MODULE_DEVICE_TABLE(of, rtq6752_device_table);
+
+static struct i2c_driver rtq6752_driver = {
+       .driver = {
+               .name = "rtq6752",
+               .of_match_table = rtq6752_device_table,
+       },
+       .probe_new = rtq6752_probe,
+};
+module_i2c_driver(rtq6752_driver);
+
+MODULE_AUTHOR("ChiYuan Huang <cy_huang@richtek.com>");
+MODULE_DESCRIPTION("Richtek RTQ6752 Regulator Driver");
+MODULE_LICENSE("GPL v2");
index e021ae08cbaa4d216688e077eade6866ece1ff70..8360b3947eadad33298c1fded5e0b6f85ef5ef18 100644 (file)
 #include <linux/gpio/consumer.h>
 #include <linux/mfd/sy7636a.h>
 
-#define SY7636A_POLL_ENABLED_TIME 500
+struct sy7636a_data {
+       struct regmap *regmap;
+       struct gpio_desc *pgood_gpio;
+};
 
 static int sy7636a_get_vcom_voltage_op(struct regulator_dev *rdev)
 {
@@ -35,10 +38,10 @@ static int sy7636a_get_vcom_voltage_op(struct regulator_dev *rdev)
 
 static int sy7636a_get_status(struct regulator_dev *rdev)
 {
-       struct sy7636a *sy7636a = rdev_get_drvdata(rdev);
+       struct sy7636a_data *data = dev_get_drvdata(rdev->dev.parent);
        int ret = 0;
 
-       ret = gpiod_get_value_cansleep(sy7636a->pgood_gpio);
+       ret = gpiod_get_value_cansleep(data->pgood_gpio);
        if (ret < 0)
                dev_err(&rdev->dev, "Failed to read pgood gpio: %d\n", ret);
 
@@ -61,46 +64,50 @@ static const struct regulator_desc desc = {
        .owner = THIS_MODULE,
        .enable_reg = SY7636A_REG_OPERATION_MODE_CRL,
        .enable_mask = SY7636A_OPERATION_MODE_CRL_ONOFF,
-       .poll_enabled_time = SY7636A_POLL_ENABLED_TIME,
        .regulators_node = of_match_ptr("regulators"),
        .of_match = of_match_ptr("vcom"),
 };
 
 static int sy7636a_regulator_probe(struct platform_device *pdev)
 {
-       struct sy7636a *sy7636a = dev_get_drvdata(pdev->dev.parent);
+       struct regmap *regmap = dev_get_drvdata(pdev->dev.parent);
        struct regulator_config config = { };
        struct regulator_dev *rdev;
        struct gpio_desc *gdp;
+       struct sy7636a_data *data;
        int ret;
 
-       if (!sy7636a)
+       if (!regmap)
                return -EPROBE_DEFER;
 
-       platform_set_drvdata(pdev, sy7636a);
-
-       gdp = devm_gpiod_get(sy7636a->dev, "epd-pwr-good", GPIOD_IN);
+       gdp = devm_gpiod_get(pdev->dev.parent, "epd-pwr-good", GPIOD_IN);
        if (IS_ERR(gdp)) {
-               dev_err(sy7636a->dev, "Power good GPIO fault %ld\n", PTR_ERR(gdp));
+               dev_err(pdev->dev.parent, "Power good GPIO fault %ld\n", PTR_ERR(gdp));
                return PTR_ERR(gdp);
        }
 
-       sy7636a->pgood_gpio = gdp;
+       data = devm_kzalloc(&pdev->dev, sizeof(struct sy7636a_data), GFP_KERNEL);
+       if (!data)
+               return -ENOMEM;
+
+       data->regmap = regmap;
+       data->pgood_gpio = gdp;
+
+       platform_set_drvdata(pdev, data);
 
-       ret = regmap_write(sy7636a->regmap, SY7636A_REG_POWER_ON_DELAY_TIME, 0x0);
+       ret = regmap_write(regmap, SY7636A_REG_POWER_ON_DELAY_TIME, 0x0);
        if (ret) {
-               dev_err(sy7636a->dev, "Failed to initialize regulator: %d\n", ret);
+               dev_err(pdev->dev.parent, "Failed to initialize regulator: %d\n", ret);
                return ret;
        }
 
        config.dev = &pdev->dev;
-       config.dev->of_node = sy7636a->dev->of_node;
-       config.driver_data = sy7636a;
-       config.regmap = sy7636a->regmap;
+       config.dev->of_node = pdev->dev.parent->of_node;
+       config.regmap = regmap;
 
        rdev = devm_regulator_register(&pdev->dev, &desc, &config);
        if (IS_ERR(rdev)) {
-               dev_err(sy7636a->dev, "Failed to register %s regulator\n",
+               dev_err(pdev->dev.parent, "Failed to register %s regulator\n",
                        pdev->name);
                return PTR_ERR(rdev);
        }
index 62d243f3b9040692aa0bd61d9968bc3cbdbc3dc9..5e915cf307b3b5968ed7296b2e6a8ed09278fca6 100644 (file)
@@ -25,6 +25,7 @@ struct sy8824_config {
        unsigned int vsel_min;
        unsigned int vsel_step;
        unsigned int vsel_count;
+       const struct regmap_config *config;
 };
 
 struct sy8824_device_info {
@@ -110,6 +111,15 @@ static int sy8824_regulator_register(struct sy8824_device_info *di,
 static const struct regmap_config sy8824_regmap_config = {
        .reg_bits = 8,
        .val_bits = 8,
+       .num_reg_defaults_raw = 1,
+       .cache_type = REGCACHE_FLAT,
+};
+
+static const struct regmap_config sy20276_regmap_config = {
+       .reg_bits = 8,
+       .val_bits = 8,
+       .num_reg_defaults_raw = 2,
+       .cache_type = REGCACHE_FLAT,
 };
 
 static int sy8824_i2c_probe(struct i2c_client *client)
@@ -134,7 +144,7 @@ static int sy8824_i2c_probe(struct i2c_client *client)
        di->dev = dev;
        di->cfg = of_device_get_match_data(dev);
 
-       regmap = devm_regmap_init_i2c(client, &sy8824_regmap_config);
+       regmap = devm_regmap_init_i2c(client, di->cfg->config);
        if (IS_ERR(regmap)) {
                dev_err(dev, "Failed to allocate regmap!\n");
                return PTR_ERR(regmap);
@@ -160,6 +170,7 @@ static const struct sy8824_config sy8824c_cfg = {
        .vsel_min = 762500,
        .vsel_step = 12500,
        .vsel_count = 64,
+       .config = &sy8824_regmap_config,
 };
 
 static const struct sy8824_config sy8824e_cfg = {
@@ -169,6 +180,7 @@ static const struct sy8824_config sy8824e_cfg = {
        .vsel_min = 700000,
        .vsel_step = 12500,
        .vsel_count = 64,
+       .config = &sy8824_regmap_config,
 };
 
 static const struct sy8824_config sy20276_cfg = {
@@ -178,6 +190,7 @@ static const struct sy8824_config sy20276_cfg = {
        .vsel_min = 600000,
        .vsel_step = 10000,
        .vsel_count = 128,
+       .config = &sy20276_regmap_config,
 };
 
 static const struct sy8824_config sy20278_cfg = {
@@ -187,6 +200,7 @@ static const struct sy8824_config sy20278_cfg = {
        .vsel_min = 762500,
        .vsel_step = 12500,
        .vsel_count = 64,
+       .config = &sy20276_regmap_config,
 };
 
 static const struct of_device_id sy8824_dt_ids[] = {
index 52e8c17afe24378f825bd726975987d1b270b677..7d5d9f879ce3c05971088f50d72d239d33d87600 100644 (file)
 #define   SY8827N_MODE         (1 << 6)
 #define SY8827N_VSEL1          1
 #define SY8827N_CTRL           2
+#define SY8827N_ID1            3
+#define SY8827N_ID2            4
+#define SY8827N_PGOOD          5
+#define SY8827N_MAX            (SY8827N_PGOOD + 1)
 
 #define SY8827N_NVOLTAGES      64
 #define SY8827N_VSELMIN                600000
@@ -102,9 +106,19 @@ static int sy8827n_regulator_register(struct sy8827n_device_info *di,
        return PTR_ERR_OR_ZERO(rdev);
 }
 
+static bool sy8827n_volatile_reg(struct device *dev, unsigned int reg)
+{
+       if (reg == SY8827N_PGOOD)
+               return true;
+       return false;
+}
+
 static const struct regmap_config sy8827n_regmap_config = {
        .reg_bits = 8,
        .val_bits = 8,
+       .volatile_reg = sy8827n_volatile_reg,
+       .num_reg_defaults_raw = SY8827N_MAX,
+       .cache_type = REGCACHE_FLAT,
 };
 
 static int sy8827n_i2c_probe(struct i2c_client *client)
index 1d5b0a1b86f78013717f4627ed0e6555654231f1..06cbe60c990f96bb569fe996092c9686c3aae709 100644 (file)
@@ -1211,12 +1211,10 @@ static int tps65910_probe(struct platform_device *pdev)
 
                rdev = devm_regulator_register(&pdev->dev, &pmic->desc[i],
                                               &config);
-               if (IS_ERR(rdev)) {
-                       dev_err(tps65910->dev,
-                               "failed to register %s regulator\n",
-                               pdev->name);
-                       return PTR_ERR(rdev);
-               }
+               if (IS_ERR(rdev))
+                       return dev_err_probe(tps65910->dev, PTR_ERR(rdev),
+                                            "failed to register %s regulator\n",
+                                            pdev->name);
 
                /* Save regulator for cleanup */
                pmic->rdev[i] = rdev;
index cbadb1c9967906e99aa34157374aae8e432ebb53..d2a37978fc3a8973f3ff845b773a3fc7a71c8876 100644 (file)
@@ -37,7 +37,6 @@ struct vctrl_voltage_table {
 struct vctrl_data {
        struct regulator_dev *rdev;
        struct regulator_desc desc;
-       struct regulator *ctrl_reg;
        bool enabled;
        unsigned int min_slew_down_rate;
        unsigned int ovp_threshold;
@@ -82,7 +81,12 @@ static int vctrl_calc_output_voltage(struct vctrl_data *vctrl, int ctrl_uV)
 static int vctrl_get_voltage(struct regulator_dev *rdev)
 {
        struct vctrl_data *vctrl = rdev_get_drvdata(rdev);
-       int ctrl_uV = regulator_get_voltage_rdev(vctrl->ctrl_reg->rdev);
+       int ctrl_uV;
+
+       if (!rdev->supply)
+               return -EPROBE_DEFER;
+
+       ctrl_uV = regulator_get_voltage_rdev(rdev->supply->rdev);
 
        return vctrl_calc_output_voltage(vctrl, ctrl_uV);
 }
@@ -92,14 +96,19 @@ static int vctrl_set_voltage(struct regulator_dev *rdev,
                             unsigned int *selector)
 {
        struct vctrl_data *vctrl = rdev_get_drvdata(rdev);
-       struct regulator *ctrl_reg = vctrl->ctrl_reg;
-       int orig_ctrl_uV = regulator_get_voltage_rdev(ctrl_reg->rdev);
-       int uV = vctrl_calc_output_voltage(vctrl, orig_ctrl_uV);
+       int orig_ctrl_uV;
+       int uV;
        int ret;
 
+       if (!rdev->supply)
+               return -EPROBE_DEFER;
+
+       orig_ctrl_uV = regulator_get_voltage_rdev(rdev->supply->rdev);
+       uV = vctrl_calc_output_voltage(vctrl, orig_ctrl_uV);
+
        if (req_min_uV >= uV || !vctrl->ovp_threshold)
                /* voltage rising or no OVP */
-               return regulator_set_voltage_rdev(ctrl_reg->rdev,
+               return regulator_set_voltage_rdev(rdev->supply->rdev,
                        vctrl_calc_ctrl_voltage(vctrl, req_min_uV),
                        vctrl_calc_ctrl_voltage(vctrl, req_max_uV),
                        PM_SUSPEND_ON);
@@ -117,7 +126,7 @@ static int vctrl_set_voltage(struct regulator_dev *rdev,
                next_uV = max_t(int, req_min_uV, uV - max_drop_uV);
                next_ctrl_uV = vctrl_calc_ctrl_voltage(vctrl, next_uV);
 
-               ret = regulator_set_voltage_rdev(ctrl_reg->rdev,
+               ret = regulator_set_voltage_rdev(rdev->supply->rdev,
                                            next_ctrl_uV,
                                            next_ctrl_uV,
                                            PM_SUSPEND_ON);
@@ -134,7 +143,7 @@ static int vctrl_set_voltage(struct regulator_dev *rdev,
 
 err:
        /* Try to go back to original voltage */
-       regulator_set_voltage_rdev(ctrl_reg->rdev, orig_ctrl_uV, orig_ctrl_uV,
+       regulator_set_voltage_rdev(rdev->supply->rdev, orig_ctrl_uV, orig_ctrl_uV,
                                   PM_SUSPEND_ON);
 
        return ret;
@@ -151,16 +160,18 @@ static int vctrl_set_voltage_sel(struct regulator_dev *rdev,
                                 unsigned int selector)
 {
        struct vctrl_data *vctrl = rdev_get_drvdata(rdev);
-       struct regulator *ctrl_reg = vctrl->ctrl_reg;
        unsigned int orig_sel = vctrl->sel;
        int ret;
 
+       if (!rdev->supply)
+               return -EPROBE_DEFER;
+
        if (selector >= rdev->desc->n_voltages)
                return -EINVAL;
 
        if (selector >= vctrl->sel || !vctrl->ovp_threshold) {
                /* voltage rising or no OVP */
-               ret = regulator_set_voltage_rdev(ctrl_reg->rdev,
+               ret = regulator_set_voltage_rdev(rdev->supply->rdev,
                                            vctrl->vtable[selector].ctrl,
                                            vctrl->vtable[selector].ctrl,
                                            PM_SUSPEND_ON);
@@ -179,7 +190,7 @@ static int vctrl_set_voltage_sel(struct regulator_dev *rdev,
                else
                        next_sel = vctrl->vtable[vctrl->sel].ovp_min_sel;
 
-               ret = regulator_set_voltage_rdev(ctrl_reg->rdev,
+               ret = regulator_set_voltage_rdev(rdev->supply->rdev,
                                            vctrl->vtable[next_sel].ctrl,
                                            vctrl->vtable[next_sel].ctrl,
                                            PM_SUSPEND_ON);
@@ -202,7 +213,7 @@ static int vctrl_set_voltage_sel(struct regulator_dev *rdev,
 err:
        if (vctrl->sel != orig_sel) {
                /* Try to go back to original voltage */
-               if (!regulator_set_voltage_rdev(ctrl_reg->rdev,
+               if (!regulator_set_voltage_rdev(rdev->supply->rdev,
                                           vctrl->vtable[orig_sel].ctrl,
                                           vctrl->vtable[orig_sel].ctrl,
                                           PM_SUSPEND_ON))
@@ -234,10 +245,6 @@ static int vctrl_parse_dt(struct platform_device *pdev,
        u32 pval;
        u32 vrange_ctrl[2];
 
-       vctrl->ctrl_reg = devm_regulator_get(&pdev->dev, "ctrl");
-       if (IS_ERR(vctrl->ctrl_reg))
-               return PTR_ERR(vctrl->ctrl_reg);
-
        ret = of_property_read_u32(np, "ovp-threshold-percent", &pval);
        if (!ret) {
                vctrl->ovp_threshold = pval;
@@ -315,11 +322,11 @@ static int vctrl_cmp_ctrl_uV(const void *a, const void *b)
        return at->ctrl - bt->ctrl;
 }
 
-static int vctrl_init_vtable(struct platform_device *pdev)
+static int vctrl_init_vtable(struct platform_device *pdev,
+                            struct regulator *ctrl_reg)
 {
        struct vctrl_data *vctrl = platform_get_drvdata(pdev);
        struct regulator_desc *rdesc = &vctrl->desc;
-       struct regulator *ctrl_reg = vctrl->ctrl_reg;
        struct vctrl_voltage_range *vrange_ctrl = &vctrl->vrange.ctrl;
        int n_voltages;
        int ctrl_uV;
@@ -395,23 +402,19 @@ static int vctrl_init_vtable(struct platform_device *pdev)
 static int vctrl_enable(struct regulator_dev *rdev)
 {
        struct vctrl_data *vctrl = rdev_get_drvdata(rdev);
-       int ret = regulator_enable(vctrl->ctrl_reg);
 
-       if (!ret)
-               vctrl->enabled = true;
+       vctrl->enabled = true;
 
-       return ret;
+       return 0;
 }
 
 static int vctrl_disable(struct regulator_dev *rdev)
 {
        struct vctrl_data *vctrl = rdev_get_drvdata(rdev);
-       int ret = regulator_disable(vctrl->ctrl_reg);
 
-       if (!ret)
-               vctrl->enabled = false;
+       vctrl->enabled = false;
 
-       return ret;
+       return 0;
 }
 
 static int vctrl_is_enabled(struct regulator_dev *rdev)
@@ -447,6 +450,7 @@ static int vctrl_probe(struct platform_device *pdev)
        struct regulator_desc *rdesc;
        struct regulator_config cfg = { };
        struct vctrl_voltage_range *vrange_ctrl;
+       struct regulator *ctrl_reg;
        int ctrl_uV;
        int ret;
 
@@ -461,15 +465,20 @@ static int vctrl_probe(struct platform_device *pdev)
        if (ret)
                return ret;
 
+       ctrl_reg = devm_regulator_get(&pdev->dev, "ctrl");
+       if (IS_ERR(ctrl_reg))
+               return PTR_ERR(ctrl_reg);
+
        vrange_ctrl = &vctrl->vrange.ctrl;
 
        rdesc = &vctrl->desc;
        rdesc->name = "vctrl";
        rdesc->type = REGULATOR_VOLTAGE;
        rdesc->owner = THIS_MODULE;
+       rdesc->supply_name = "ctrl";
 
-       if ((regulator_get_linear_step(vctrl->ctrl_reg) == 1) ||
-           (regulator_count_voltages(vctrl->ctrl_reg) == -EINVAL)) {
+       if ((regulator_get_linear_step(ctrl_reg) == 1) ||
+           (regulator_count_voltages(ctrl_reg) == -EINVAL)) {
                rdesc->continuous_voltage_range = true;
                rdesc->ops = &vctrl_ops_cont;
        } else {
@@ -486,11 +495,12 @@ static int vctrl_probe(struct platform_device *pdev)
        cfg.init_data = init_data;
 
        if (!rdesc->continuous_voltage_range) {
-               ret = vctrl_init_vtable(pdev);
+               ret = vctrl_init_vtable(pdev, ctrl_reg);
                if (ret)
                        return ret;
 
-               ctrl_uV = regulator_get_voltage_rdev(vctrl->ctrl_reg->rdev);
+               /* Use locked consumer API when not in regulator framework */
+               ctrl_uV = regulator_get_voltage(ctrl_reg);
                if (ctrl_uV < 0) {
                        dev_err(&pdev->dev, "failed to get control voltage\n");
                        return ctrl_uV;
@@ -513,6 +523,9 @@ static int vctrl_probe(struct platform_device *pdev)
                }
        }
 
+       /* Drop ctrl-supply here in favor of regulator core managed supply */
+       devm_regulator_put(ctrl_reg);
+
        vctrl->rdev = devm_regulator_register(&pdev->dev, rdesc, &cfg);
        if (IS_ERR(vctrl->rdev)) {
                ret = PTR_ERR(vctrl->rdev);
index 328f70f633eb10977387c9b10899d209c7b16024..5656cac04b4c33d539e986993d981c8a0b9727db 100644 (file)
@@ -116,7 +116,7 @@ config RESET_LPC18XX
 
 config RESET_MCHP_SPARX5
        bool "Microchip Sparx5 reset driver"
-       depends on HAS_IOMEM || COMPILE_TEST
+       depends on ARCH_SPARX5 || COMPILE_TEST
        default y if SPARX5_SWITCH
        select MFD_SYSCON
        help
index daa425e74c965fc85ee11d3f5e9a9b390ec1ee70..59dc0ff9af9e850a22ee681324586d98f4a6b580 100644 (file)
@@ -53,7 +53,8 @@ static int zynqmp_reset_status(struct reset_controller_dev *rcdev,
                               unsigned long id)
 {
        struct zynqmp_reset_data *priv = to_zynqmp_reset_data(rcdev);
-       int val, err;
+       int err;
+       u32 val;
 
        err = zynqmp_pm_reset_get_status(priv->data->reset_id + id, &val);
        if (err)
index 9f64244089465537b8ab127f5d3d866c7d699a13..468cbeb539ff05c9fff3d9647fae01b782dd3230 100644 (file)
@@ -575,10 +575,8 @@ int dasd_ioctl(struct block_device *bdev, fmode_t mode,
        else
                argp = (void __user *)arg;
 
-       if ((_IOC_DIR(cmd) != _IOC_NONE) && !arg) {
-               PRINT_DEBUG("empty data ptr");
+       if ((_IOC_DIR(cmd) != _IOC_NONE) && !arg)
                return -EINVAL;
-       }
 
        base = dasd_device_from_gendisk(bdev->bd_disk);
        if (!base)
index 792b4bfa6d9ae8333f676c5ffcb48f6e1ef5eb22..b4b84e3e0949fe0e9d0ef6465613080fca676e23 100644 (file)
 #include <linux/platform_device.h>
 #include <asm/types.h>
 #include <asm/irq.h>
+#include <asm/debug.h>
 
 #include "sclp.h"
 
 #define SCLP_HEADER            "sclp: "
 
+struct sclp_trace_entry {
+       char id[4];
+       u32 a;
+       u64 b;
+};
+
+#define SCLP_TRACE_ENTRY_SIZE          sizeof(struct sclp_trace_entry)
+#define SCLP_TRACE_MAX_SIZE            128
+#define SCLP_TRACE_EVENT_MAX_SIZE      64
+
+/* Debug trace area intended for all entries in abbreviated form. */
+DEFINE_STATIC_DEBUG_INFO(sclp_debug, "sclp", 8, 1, SCLP_TRACE_ENTRY_SIZE,
+                        &debug_hex_ascii_view);
+
+/* Error trace area intended for full entries relating to failed requests. */
+DEFINE_STATIC_DEBUG_INFO(sclp_debug_err, "sclp_err", 4, 1,
+                        SCLP_TRACE_ENTRY_SIZE, &debug_hex_ascii_view);
+
 /* Lock to protect internal data consistency. */
 static DEFINE_SPINLOCK(sclp_lock);
 
@@ -54,6 +73,114 @@ int sclp_console_drop = 1;
 /* Number of times the console dropped buffer pages */
 unsigned long sclp_console_full;
 
+/* The currently active SCLP command word. */
+static sclp_cmdw_t active_cmd;
+
+static inline void sclp_trace(int prio, char *id, u32 a, u64 b, bool err)
+{
+       struct sclp_trace_entry e;
+
+       memset(&e, 0, sizeof(e));
+       strncpy(e.id, id, sizeof(e.id));
+       e.a = a;
+       e.b = b;
+       debug_event(&sclp_debug, prio, &e, sizeof(e));
+       if (err)
+               debug_event(&sclp_debug_err, 0, &e, sizeof(e));
+}
+
+static inline int no_zeroes_len(void *data, int len)
+{
+       char *d = data;
+
+       /* Minimize trace area usage by not tracing trailing zeroes. */
+       while (len > SCLP_TRACE_ENTRY_SIZE && d[len - 1] == 0)
+               len--;
+
+       return len;
+}
+
+static inline void sclp_trace_bin(int prio, void *d, int len, int errlen)
+{
+       debug_event(&sclp_debug, prio, d, no_zeroes_len(d, len));
+       if (errlen)
+               debug_event(&sclp_debug_err, 0, d, no_zeroes_len(d, errlen));
+}
+
+static inline int abbrev_len(sclp_cmdw_t cmd, struct sccb_header *sccb)
+{
+       struct evbuf_header *evbuf = (struct evbuf_header *)(sccb + 1);
+       int len = sccb->length, limit = SCLP_TRACE_MAX_SIZE;
+
+       /* Full SCCB tracing if debug level is set to max. */
+       if (sclp_debug.level == DEBUG_MAX_LEVEL)
+               return len;
+
+       /* Minimal tracing for console writes. */
+       if (cmd == SCLP_CMDW_WRITE_EVENT_DATA &&
+           (evbuf->type == EVTYP_MSG  || evbuf->type == EVTYP_VT220MSG))
+               limit = SCLP_TRACE_ENTRY_SIZE;
+
+       return min(len, limit);
+}
+
+static inline void sclp_trace_sccb(int prio, char *id, u32 a, u64 b,
+                                  sclp_cmdw_t cmd, struct sccb_header *sccb,
+                                  bool err)
+{
+       sclp_trace(prio, id, a, b, err);
+       if (sccb) {
+               sclp_trace_bin(prio + 1, sccb, abbrev_len(cmd, sccb),
+                              err ? sccb->length : 0);
+       }
+}
+
+static inline void sclp_trace_evbuf(int prio, char *id, u32 a, u64 b,
+                                   struct evbuf_header *evbuf, bool err)
+{
+       sclp_trace(prio, id, a, b, err);
+       sclp_trace_bin(prio + 1, evbuf,
+                      min((int)evbuf->length, (int)SCLP_TRACE_EVENT_MAX_SIZE),
+                      err ? evbuf->length : 0);
+}
+
+static inline void sclp_trace_req(int prio, char *id, struct sclp_req *req,
+                                 bool err)
+{
+       struct sccb_header *sccb = req->sccb;
+       union {
+               struct {
+                       u16 status;
+                       u16 response;
+                       u16 timeout;
+                       u16 start_count;
+               };
+               u64 b;
+       } summary;
+
+       summary.status = req->status;
+       summary.response = sccb ? sccb->response_code : 0;
+       summary.timeout = (u16)req->queue_timeout;
+       summary.start_count = (u16)req->start_count;
+
+       sclp_trace(prio, id, (u32)(addr_t)sccb, summary.b, err);
+}
+
+static inline void sclp_trace_register(int prio, char *id, u32 a, u64 b,
+                                      struct sclp_register *reg)
+{
+       struct {
+               u64 receive;
+               u64 send;
+       } d;
+
+       d.receive = reg->receive_mask;
+       d.send = reg->send_mask;
+
+       sclp_trace(prio, id, a, b, false);
+       sclp_trace_bin(prio, &d, sizeof(d), 0);
+}
+
 static int __init sclp_setup_console_pages(char *str)
 {
        int pages, rc;
@@ -162,6 +289,9 @@ static void sclp_request_timeout(bool force_restart)
 {
        unsigned long flags;
 
+       /* TMO: A timeout occurred (a=force_restart) */
+       sclp_trace(2, "TMO", force_restart, 0, true);
+
        spin_lock_irqsave(&sclp_lock, flags);
        if (force_restart) {
                if (sclp_running_state == sclp_running_state_running) {
@@ -237,6 +367,12 @@ static void sclp_req_queue_timeout(struct timer_list *unused)
 
        do {
                req = __sclp_req_queue_remove_expired_req();
+
+               if (req) {
+                       /* RQTM: Request timed out (a=sccb, b=summary) */
+                       sclp_trace_req(2, "RQTM", req, true);
+               }
+
                if (req && req->callback)
                        req->callback(req, req->callback_data);
        } while (req);
@@ -248,6 +384,25 @@ static void sclp_req_queue_timeout(struct timer_list *unused)
        spin_unlock_irqrestore(&sclp_lock, flags);
 }
 
+static int sclp_service_call_trace(sclp_cmdw_t command, void *sccb)
+{
+       static u64 srvc_count;
+       int rc;
+
+       /* SRV1: Service call about to be issued (a=command, b=sccb address) */
+       sclp_trace_sccb(0, "SRV1", command, (u64)sccb, command, sccb, false);
+
+       rc = sclp_service_call(command, sccb);
+
+       /* SRV2: Service call was issued (a=rc, b=SRVC sequence number) */
+       sclp_trace(0, "SRV2", -rc, ++srvc_count, rc != 0);
+
+       if (rc == 0)
+               active_cmd = command;
+
+       return rc;
+}
+
 /* Try to start a request. Return zero if the request was successfully
  * started or if it will be started at a later time. Return non-zero otherwise.
  * Called while sclp_lock is locked. */
@@ -259,7 +414,7 @@ __sclp_start_request(struct sclp_req *req)
        if (sclp_running_state != sclp_running_state_idle)
                return 0;
        del_timer(&sclp_request_timer);
-       rc = sclp_service_call(req->command, req->sccb);
+       rc = sclp_service_call_trace(req->command, req->sccb);
        req->start_count++;
 
        if (rc == 0) {
@@ -309,6 +464,10 @@ sclp_process_queue(void)
                }
                /* Post-processing for aborted request */
                list_del(&req->list);
+
+               /* RQAB: Request aborted (a=sccb, b=summary) */
+               sclp_trace_req(2, "RQAB", req, true);
+
                if (req->callback) {
                        spin_unlock_irqrestore(&sclp_lock, flags);
                        req->callback(req, req->callback_data);
@@ -341,6 +500,10 @@ sclp_add_request(struct sclp_req *req)
                spin_unlock_irqrestore(&sclp_lock, flags);
                return -EIO;
        }
+
+       /* RQAD: Request was added (a=sccb, b=caller) */
+       sclp_trace(2, "RQAD", (u32)(addr_t)req->sccb, _RET_IP_, false);
+
        req->status = SCLP_REQ_QUEUED;
        req->start_count = 0;
        list_add_tail(&req->list, &sclp_req_queue);
@@ -394,6 +557,11 @@ sclp_dispatch_evbufs(struct sccb_header *sccb)
                        else
                                reg = NULL;
                }
+
+               /* EVNT: Event callback (b=receiver) */
+               sclp_trace_evbuf(2, "EVNT", 0, reg ? (u64)reg->receiver_fn : 0,
+                                evbuf, !reg);
+
                if (reg && reg->receiver_fn) {
                        spin_unlock_irqrestore(&sclp_lock, flags);
                        reg->receiver_fn(evbuf);
@@ -455,6 +623,30 @@ __sclp_find_req(u32 sccb)
        return NULL;
 }
 
+static bool ok_response(u32 sccb_int, sclp_cmdw_t cmd)
+{
+       struct sccb_header *sccb = (struct sccb_header *)(addr_t)sccb_int;
+       struct evbuf_header *evbuf;
+       u16 response;
+
+       if (!sccb)
+               return true;
+
+       /* Check SCCB response. */
+       response = sccb->response_code & 0xff;
+       if (response != 0x10 && response != 0x20)
+               return false;
+
+       /* Check event-processed flag on outgoing events. */
+       if (cmd == SCLP_CMDW_WRITE_EVENT_DATA) {
+               evbuf = (struct evbuf_header *)(sccb + 1);
+               if (!(evbuf->flags & 0x80))
+                       return false;
+       }
+
+       return true;
+}
+
 /* Handler for external interruption. Perform request post-processing.
  * Prepare read event data request if necessary. Start processing of next
  * request on queue. */
@@ -469,6 +661,12 @@ static void sclp_interrupt_handler(struct ext_code ext_code,
        spin_lock(&sclp_lock);
        finished_sccb = param32 & 0xfffffff8;
        evbuf_pending = param32 & 0x3;
+
+       /* INT: Interrupt received (a=intparm, b=cmd) */
+       sclp_trace_sccb(0, "INT", param32, active_cmd, active_cmd,
+                       (struct sccb_header *)(addr_t)finished_sccb,
+                       !ok_response(finished_sccb, active_cmd));
+
        if (finished_sccb) {
                del_timer(&sclp_request_timer);
                sclp_running_state = sclp_running_state_reset_pending;
@@ -477,13 +675,21 @@ static void sclp_interrupt_handler(struct ext_code ext_code,
                        /* Request post-processing */
                        list_del(&req->list);
                        req->status = SCLP_REQ_DONE;
+
+                       /* RQOK: Request success (a=sccb, b=summary) */
+                       sclp_trace_req(2, "RQOK", req, false);
+
                        if (req->callback) {
                                spin_unlock(&sclp_lock);
                                req->callback(req, req->callback_data);
                                spin_lock(&sclp_lock);
                        }
+               } else {
+                       /* UNEX: Unexpected SCCB completion (a=sccb address) */
+                       sclp_trace(0, "UNEX", finished_sccb, 0, true);
                }
                sclp_running_state = sclp_running_state_idle;
+               active_cmd = 0;
        }
        if (evbuf_pending &&
            sclp_activation_state == sclp_activation_state_active)
@@ -507,9 +713,13 @@ sclp_sync_wait(void)
        unsigned long long old_tick;
        unsigned long flags;
        unsigned long cr0, cr0_sync;
+       static u64 sync_count;
        u64 timeout;
        int irq_context;
 
+       /* SYN1: Synchronous wait start (a=runstate, b=sync count) */
+       sclp_trace(4, "SYN1", sclp_running_state, ++sync_count, false);
+
        /* We'll be disabling timer interrupts, so we need a custom timeout
         * mechanism */
        timeout = 0;
@@ -547,6 +757,9 @@ sclp_sync_wait(void)
                _local_bh_enable();
        local_tick_enable(old_tick);
        local_irq_restore(flags);
+
+       /* SYN2: Synchronous wait end (a=runstate, b=sync_count) */
+       sclp_trace(4, "SYN2", sclp_running_state, sync_count, false);
 }
 EXPORT_SYMBOL(sclp_sync_wait);
 
@@ -576,8 +789,13 @@ sclp_dispatch_state_change(void)
                                reg = NULL;
                }
                spin_unlock_irqrestore(&sclp_lock, flags);
-               if (reg && reg->state_change_fn)
+               if (reg && reg->state_change_fn) {
+                       /* STCG: State-change callback (b=callback) */
+                       sclp_trace(2, "STCG", 0, (u64)reg->state_change_fn,
+                                  false);
+
                        reg->state_change_fn(reg);
+               }
        } while (reg);
 }
 
@@ -651,6 +869,9 @@ sclp_register(struct sclp_register *reg)
        sccb_mask_t send_mask;
        int rc;
 
+       /* REG: Event listener registered (b=caller) */
+       sclp_trace_register(2, "REG", 0, _RET_IP_, reg);
+
        rc = sclp_init();
        if (rc)
                return rc;
@@ -683,6 +904,9 @@ sclp_unregister(struct sclp_register *reg)
 {
        unsigned long flags;
 
+       /* UREG: Event listener unregistered (b=caller) */
+       sclp_trace_register(2, "UREG", 0, _RET_IP_, reg);
+
        spin_lock_irqsave(&sclp_lock, flags);
        list_del(&reg->list);
        spin_unlock_irqrestore(&sclp_lock, flags);
@@ -932,7 +1156,7 @@ sclp_check_interface(void)
        for (retry = 0; retry <= SCLP_INIT_RETRY; retry++) {
                __sclp_make_init_req(0, 0);
                sccb = (struct init_sccb *) sclp_init_req.sccb;
-               rc = sclp_service_call(sclp_init_req.command, sccb);
+               rc = sclp_service_call_trace(sclp_init_req.command, sccb);
                if (rc == -EIO)
                        break;
                sclp_init_req.status = SCLP_REQ_RUNNING;
index 8dd8ad83b78b20722a81b4949886a011fd51f812..5e434108aae6aa3d74e2ab2becf2830875e8ad29 100644 (file)
@@ -310,8 +310,6 @@ extern int sclp_console_drop;
 extern unsigned long sclp_console_full;
 extern bool sclp_mask_compat_mode;
 
-extern char *sclp_early_sccb;
-
 void sclp_early_wait_irq(void);
 int sclp_early_cmd(sclp_cmdw_t cmd, void *sccb);
 unsigned int sclp_early_con_check_linemode(struct init_sccb *sccb);
index ab0518cfdcfee8934c137dce3b0d59da4ff8f300..998933e836101c8b4beb49b85823073c212627db 100644 (file)
@@ -457,7 +457,7 @@ static int __init sclp_detect_standby_memory(void)
        struct read_storage_sccb *sccb;
        int i, id, assigned, rc;
 
-       if (OLDMEM_BASE) /* No standby memory in kdump mode */
+       if (oldmem_data.start) /* No standby memory in kdump mode */
                return 0;
        if ((sclp.facilities & 0xe00000000000ULL) != 0xe00000000000ULL)
                return 0;
index 039b2074db7e5d39a88aeed516630e62dfc05e4e..c365110f2daec554a4ea037f8980331e7c24e784 100644 (file)
@@ -50,12 +50,12 @@ static void sclp_cpu_capability_notify(struct work_struct *work)
 
        s390_update_cpu_mhz();
        pr_info("CPU capability may have changed\n");
-       get_online_cpus();
+       cpus_read_lock();
        for_each_online_cpu(cpu) {
                dev = get_cpu_device(cpu);
                kobject_uevent(&dev->kobj, KOBJ_CHANGE);
        }
-       put_online_cpus();
+       cpus_read_unlock();
 }
 
 static void __ref sclp_cpu_change_notify(struct work_struct *work)
index b7329af076a0f04c235afb80577a5067ac7e40ae..676634de65a89177c1245c43fca7ef5f8f4bedd6 100644 (file)
@@ -17,7 +17,7 @@
 
 static struct read_info_sccb __bootdata(sclp_info_sccb);
 static int __bootdata(sclp_info_sccb_valid);
-char *sclp_early_sccb = (char *) EARLY_SCCB_OFFSET;
+char *__bootdata(sclp_early_sccb);
 int sclp_init_state = sclp_init_state_uninitialized;
 /*
  * Used to keep track of the size of the event masks. Qemu until version 2.11
@@ -211,6 +211,11 @@ static int sclp_early_setup(int disable, int *have_linemode, int *have_vt220)
        return rc;
 }
 
+void sclp_early_set_buffer(void *sccb)
+{
+       sclp_early_sccb = sccb;
+}
+
 /*
  * Output one or more lines of text on the SCLP console (VT220 and /
  * or line-mode).
@@ -235,11 +240,20 @@ void sclp_early_printk(const char *str)
        __sclp_early_printk(str, strlen(str));
 }
 
+/*
+ * We can't pass sclp_info_sccb to sclp_early_cmd() here directly,
+ * because it might not fulfil the requiremets for a SCLP communication buffer:
+ *   - lie below 2G in memory
+ *   - be page-aligned
+ * Therefore, we use the buffer sclp_early_sccb (which fulfils all those
+ * requirements) temporarily for communication and copy a received response
+ * back into the buffer sclp_info_sccb upon successful completion.
+ */
 int __init sclp_early_read_info(void)
 {
        int i;
        int length = test_facility(140) ? EXT_SCCB_READ_SCP : PAGE_SIZE;
-       struct read_info_sccb *sccb = &sclp_info_sccb;
+       struct read_info_sccb *sccb = (struct read_info_sccb *)sclp_early_sccb;
        sclp_cmdw_t commands[] = {SCLP_CMDW_READ_SCP_INFO_FORCED,
                                  SCLP_CMDW_READ_SCP_INFO};
 
@@ -251,6 +265,7 @@ int __init sclp_early_read_info(void)
                if (sclp_early_cmd(commands[i], sccb))
                        break;
                if (sccb->header.response_code == 0x10) {
+                       memcpy(&sclp_info_sccb, sccb, length);
                        sclp_info_sccb_valid = 1;
                        return 0;
                }
index b5b0848da93ba5e0135aa566e2e5abf6b12ccfcb..3ba2d934a3e89f5091f6de0450b78604a6dcf3d7 100644 (file)
@@ -269,7 +269,7 @@ static int __init zcore_init(void)
 
        if (!is_ipl_type_dump())
                return -ENODATA;
-       if (OLDMEM_BASE)
+       if (oldmem_data.start)
                return -ENODATA;
 
        zcore_dbf = debug_register("zcore", 4, 1, 4 * sizeof(long));
index a974943c27dacc935763776f2f82edadce4db78f..0ce48a354e04ef4ee70977f28d9979f4f4824635 100644 (file)
@@ -430,9 +430,26 @@ static ssize_t pimpampom_show(struct device *dev,
 }
 static DEVICE_ATTR_RO(pimpampom);
 
+static ssize_t dev_busid_show(struct device *dev,
+                             struct device_attribute *attr,
+                             char *buf)
+{
+       struct subchannel *sch = to_subchannel(dev);
+       struct pmcw *pmcw = &sch->schib.pmcw;
+
+       if ((pmcw->st == SUBCHANNEL_TYPE_IO ||
+            pmcw->st == SUBCHANNEL_TYPE_MSG) && pmcw->dnv)
+               return sysfs_emit(buf, "0.%x.%04x\n", sch->schid.ssid,
+                                 pmcw->dev);
+       else
+               return sysfs_emit(buf, "none\n");
+}
+static DEVICE_ATTR_RO(dev_busid);
+
 static struct attribute *io_subchannel_type_attrs[] = {
        &dev_attr_chpids.attr,
        &dev_attr_pimpampom.attr,
+       &dev_attr_dev_busid.attr,
        NULL,
 };
 ATTRIBUTE_GROUPS(io_subchannel_type);
@@ -886,6 +903,18 @@ static ssize_t real_cssid_show(struct device *dev, struct device_attribute *a,
 }
 static DEVICE_ATTR_RO(real_cssid);
 
+static ssize_t rescan_store(struct device *dev, struct device_attribute *a,
+                           const char *buf, size_t count)
+{
+       CIO_TRACE_EVENT(4, "usr-rescan");
+
+       css_schedule_eval_all();
+       css_complete_work();
+
+       return count;
+}
+static DEVICE_ATTR_WO(rescan);
+
 static ssize_t cm_enable_show(struct device *dev, struct device_attribute *a,
                              char *buf)
 {
@@ -932,6 +961,7 @@ static umode_t cm_enable_mode(struct kobject *kobj, struct attribute *attr,
 
 static struct attribute *cssdev_attrs[] = {
        &dev_attr_real_cssid.attr,
+       &dev_attr_rescan.attr,
        NULL,
 };
 
index f69ffbb8edc99d66d730843973965101eb7e4e79..99c2212dc6a6b7fe70eb3b343fc7ccc80d79e036 100644 (file)
@@ -126,21 +126,9 @@ static inline int do_eqbs(u64 token, unsigned char *state, int queue,
 
 struct qdio_irq;
 
-struct siga_flag {
-       u8 input:1;
-       u8 output:1;
-       u8 sync:1;
-       u8 sync_after_ai:1;
-       u8 sync_out_after_pci:1;
-       u8:3;
-} __attribute__ ((packed));
-
 struct qdio_dev_perf_stat {
        unsigned int adapter_int;
        unsigned int qdio_int;
-       unsigned int pci_request_int;
-
-       unsigned int tasklet_outbound;
 
        unsigned int siga_read;
        unsigned int siga_write;
@@ -150,7 +138,6 @@ struct qdio_dev_perf_stat {
        unsigned int stop_polling;
        unsigned int inbound_queue_full;
        unsigned int outbound_call;
-       unsigned int outbound_handler;
        unsigned int outbound_queue_full;
        unsigned int fast_requeue;
        unsigned int target_full;
@@ -180,12 +167,6 @@ struct qdio_input_q {
 };
 
 struct qdio_output_q {
-       /* PCIs are enabled for the queue */
-       int pci_out_enabled;
-       /* timer to check for more outbound work */
-       struct timer_list timer;
-       /* tasklet to check for completions */
-       struct tasklet_struct tasklet;
 };
 
 /*
@@ -250,8 +231,7 @@ struct qdio_irq {
        unsigned long sch_token;        /* QEBSM facility */
 
        enum qdio_irq_states state;
-
-       struct siga_flag siga_flag;     /* siga sync information from qdioac */
+       u8 qdioac1;
 
        int nr_input_qs;
        int nr_output_qs;
@@ -263,7 +243,6 @@ struct qdio_irq {
        struct qdio_ssqd_desc ssqd_desc;
        void (*orig_handler) (struct ccw_device *, unsigned long, struct irb *);
 
-       unsigned int scan_threshold;    /* used SBALs before tasklet schedule */
        int perf_stat_enabled;
 
        struct qdr *qdr;
@@ -325,13 +304,9 @@ static inline void qdio_deliver_irq(struct qdio_irq *irq)
 #define pci_out_supported(irq) ((irq)->qib.ac & QIB_AC_OUTBOUND_PCI_SUPPORTED)
 #define is_qebsm(q)                    (q->irq_ptr->sch_token != 0)
 
-#define need_siga_in(q)                        (q->irq_ptr->siga_flag.input)
-#define need_siga_out(q)               (q->irq_ptr->siga_flag.output)
-#define need_siga_sync(q)              (unlikely(q->irq_ptr->siga_flag.sync))
-#define need_siga_sync_after_ai(q)     \
-       (unlikely(q->irq_ptr->siga_flag.sync_after_ai))
-#define need_siga_sync_out_after_pci(q)        \
-       (unlikely(q->irq_ptr->siga_flag.sync_out_after_pci))
+#define qdio_need_siga_in(irq)         ((irq)->qdioac1 & AC1_SIGA_INPUT_NEEDED)
+#define qdio_need_siga_out(irq)                ((irq)->qdioac1 & AC1_SIGA_OUTPUT_NEEDED)
+#define qdio_need_siga_sync(irq)       (unlikely((irq)->qdioac1 & AC1_SIGA_SYNC_NEEDED))
 
 #define for_each_input_queue(irq_ptr, q, i)            \
        for (i = 0; i < irq_ptr->nr_input_qs &&         \
@@ -345,11 +320,6 @@ static inline void qdio_deliver_irq(struct qdio_irq *irq)
 #define sub_buf(bufnr, dec)    QDIO_BUFNR((bufnr) - (dec))
 #define prev_buf(bufnr)                sub_buf(bufnr, 1)
 
-#define queue_irqs_enabled(q)                  \
-       (test_bit(QDIO_QUEUE_IRQS_DISABLED, &q->u.in.queue_irq_state) == 0)
-#define queue_irqs_disabled(q)                 \
-       (test_bit(QDIO_QUEUE_IRQS_DISABLED, &q->u.in.queue_irq_state) != 0)
-
 extern u64 last_ai_time;
 
 /* prototypes for thin interrupt */
@@ -360,8 +330,6 @@ void qdio_thinint_exit(void);
 int test_nonshared_ind(struct qdio_irq *);
 
 /* prototypes for setup */
-void qdio_outbound_tasklet(struct tasklet_struct *t);
-void qdio_outbound_timer(struct timer_list *t);
 void qdio_int_handler(struct ccw_device *cdev, unsigned long intparm,
                      struct irb *irb);
 int qdio_allocate_qs(struct qdio_irq *irq_ptr, int nr_input_qs,
index 00384f58f218f34c3c592737234d495ae05a75b9..4bb7965daa0fc7b8d048a0839fdb4737e84b4fe3 100644 (file)
@@ -197,8 +197,6 @@ DEFINE_SHOW_ATTRIBUTE(ssqd);
 static char *qperf_names[] = {
        "Assumed adapter interrupts",
        "QDIO interrupts",
-       "Requested PCIs",
-       "Outbound tasklet runs",
        "SIGA read",
        "SIGA write",
        "SIGA sync",
@@ -206,7 +204,6 @@ static char *qperf_names[] = {
        "Inbound stop_polling",
        "Inbound queue full",
        "Outbound calls",
-       "Outbound handler",
        "Outbound queue full",
        "Outbound fast_requeue",
        "Outbound target_full",
index 3052fab00597c7a81d4e417b472175367d276755..45e810c6ea3ba2eed80dd2baf36219fde95feea4 100644 (file)
@@ -10,7 +10,6 @@
 #include <linux/module.h>
 #include <linux/init.h>
 #include <linux/kernel.h>
-#include <linux/timer.h>
 #include <linux/delay.h>
 #include <linux/gfp.h>
 #include <linux/io.h>
@@ -304,12 +303,22 @@ static inline int qdio_siga_sync(struct qdio_q *q, unsigned int output,
        return (cc) ? -EIO : 0;
 }
 
+static inline int qdio_sync_input_queue(struct qdio_q *q)
+{
+       return qdio_siga_sync(q, 0, q->mask);
+}
+
+static inline int qdio_sync_output_queue(struct qdio_q *q)
+{
+       return qdio_siga_sync(q, q->mask, 0);
+}
+
 static inline int qdio_siga_sync_q(struct qdio_q *q)
 {
        if (q->is_input_q)
-               return qdio_siga_sync(q, 0, q->mask);
+               return qdio_sync_input_queue(q);
        else
-               return qdio_siga_sync(q, q->mask, 0);
+               return qdio_sync_output_queue(q);
 }
 
 static int qdio_siga_output(struct qdio_q *q, unsigned int count,
@@ -373,22 +382,10 @@ static inline int qdio_siga_input(struct qdio_q *q)
        return (cc) ? -EIO : 0;
 }
 
-#define qdio_siga_sync_out(q) qdio_siga_sync(q, ~0U, 0)
-#define qdio_siga_sync_all(q) qdio_siga_sync(q, ~0U, ~0U)
-
-static inline void qdio_sync_queues(struct qdio_q *q)
-{
-       /* PCI capable outbound queues will also be scanned so sync them too */
-       if (pci_out_supported(q->irq_ptr))
-               qdio_siga_sync_all(q);
-       else
-               qdio_siga_sync_q(q);
-}
-
 int debug_get_buf_state(struct qdio_q *q, unsigned int bufnr,
                        unsigned char *state)
 {
-       if (need_siga_sync(q))
+       if (qdio_need_siga_sync(q->irq_ptr))
                qdio_siga_sync_q(q);
        return get_buf_state(q, bufnr, state, 0);
 }
@@ -455,10 +452,9 @@ static int get_inbound_buffer_frontier(struct qdio_q *q, unsigned int start,
        if (!count)
                return 0;
 
-       /*
-        * No siga sync here, as a PCI or we after a thin interrupt
-        * already sync'ed the queues.
-        */
+       if (qdio_need_siga_sync(q->irq_ptr))
+               qdio_sync_input_queue(q);
+
        count = get_buf_states(q, start, &state, count, 1);
        if (!count)
                return 0;
@@ -510,8 +506,8 @@ static inline int qdio_inbound_q_done(struct qdio_q *q, unsigned int start)
        if (!atomic_read(&q->nr_buf_used))
                return 1;
 
-       if (need_siga_sync(q))
-               qdio_siga_sync_q(q);
+       if (qdio_need_siga_sync(q->irq_ptr))
+               qdio_sync_input_queue(q);
        get_buf_state(q, start, &state, 0);
 
        if (state == SLSB_P_INPUT_PRIMED || state == SLSB_P_INPUT_ERROR)
@@ -521,15 +517,6 @@ static inline int qdio_inbound_q_done(struct qdio_q *q, unsigned int start)
        return 1;
 }
 
-static inline int qdio_tasklet_schedule(struct qdio_q *q)
-{
-       if (likely(q->irq_ptr->state == QDIO_IRQ_STATE_ACTIVE)) {
-               tasklet_schedule(&q->u.out.tasklet);
-               return 0;
-       }
-       return -EPERM;
-}
-
 static int get_outbound_buffer_frontier(struct qdio_q *q, unsigned int start,
                                        unsigned int *error)
 {
@@ -538,17 +525,13 @@ static int get_outbound_buffer_frontier(struct qdio_q *q, unsigned int start,
 
        q->timestamp = get_tod_clock_fast();
 
-       if (need_siga_sync(q))
-               if (((queue_type(q) != QDIO_IQDIO_QFMT) &&
-                   !pci_out_supported(q->irq_ptr)) ||
-                   (queue_type(q) == QDIO_IQDIO_QFMT &&
-                   multicast_outbound(q)))
-                       qdio_siga_sync_q(q);
-
        count = atomic_read(&q->nr_buf_used);
        if (!count)
                return 0;
 
+       if (qdio_need_siga_sync(q->irq_ptr))
+               qdio_sync_output_queue(q);
+
        count = get_buf_states(q, start, &state, count, 0);
        if (!count)
                return 0;
@@ -595,19 +578,13 @@ static int get_outbound_buffer_frontier(struct qdio_q *q, unsigned int start,
        }
 }
 
-/* all buffers processed? */
-static inline int qdio_outbound_q_done(struct qdio_q *q)
-{
-       return atomic_read(&q->nr_buf_used) == 0;
-}
-
 static int qdio_kick_outbound_q(struct qdio_q *q, unsigned int count,
                                unsigned long aob)
 {
        int retries = 0, cc;
        unsigned int busy_bit;
 
-       if (!need_siga_out(q))
+       if (!qdio_need_siga_out(q->irq_ptr))
                return 0;
 
        DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "siga-w:%1d", q->nr);
@@ -644,75 +621,6 @@ retry:
        return cc;
 }
 
-void qdio_outbound_tasklet(struct tasklet_struct *t)
-{
-       struct qdio_output_q *out_q = from_tasklet(out_q, t, tasklet);
-       struct qdio_q *q = container_of(out_q, struct qdio_q, u.out);
-       unsigned int start = q->first_to_check;
-       unsigned int error = 0;
-       int count;
-
-       qperf_inc(q, tasklet_outbound);
-       WARN_ON_ONCE(atomic_read(&q->nr_buf_used) < 0);
-
-       count = get_outbound_buffer_frontier(q, start, &error);
-       if (count) {
-               q->first_to_check = add_buf(start, count);
-
-               if (q->irq_ptr->state == QDIO_IRQ_STATE_ACTIVE) {
-                       qperf_inc(q, outbound_handler);
-                       DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "koh: s:%02x c:%02x",
-                                     start, count);
-
-                       q->handler(q->irq_ptr->cdev, error, q->nr, start,
-                                  count, q->irq_ptr->int_parm);
-               }
-       }
-
-       if (queue_type(q) == QDIO_ZFCP_QFMT && !pci_out_supported(q->irq_ptr) &&
-           !qdio_outbound_q_done(q))
-               goto sched;
-
-       if (q->u.out.pci_out_enabled)
-               return;
-
-       /*
-        * Now we know that queue type is either qeth without pci enabled
-        * or HiperSockets. Make sure buffer switch from PRIMED to EMPTY
-        * is noticed and outbound_handler is called after some time.
-        */
-       if (qdio_outbound_q_done(q))
-               del_timer_sync(&q->u.out.timer);
-       else
-               if (!timer_pending(&q->u.out.timer) &&
-                   likely(q->irq_ptr->state == QDIO_IRQ_STATE_ACTIVE))
-                       mod_timer(&q->u.out.timer, jiffies + 10 * HZ);
-       return;
-
-sched:
-       qdio_tasklet_schedule(q);
-}
-
-void qdio_outbound_timer(struct timer_list *t)
-{
-       struct qdio_q *q = from_timer(q, t, u.out.timer);
-
-       qdio_tasklet_schedule(q);
-}
-
-static inline void qdio_check_outbound_pci_queues(struct qdio_irq *irq)
-{
-       struct qdio_q *out;
-       int i;
-
-       if (!pci_out_supported(irq) || !irq->scan_threshold)
-               return;
-
-       for_each_output_queue(irq, out, i)
-               if (!qdio_outbound_q_done(out))
-                       qdio_tasklet_schedule(out);
-}
-
 static inline void qdio_set_state(struct qdio_irq *irq_ptr,
                                  enum qdio_irq_states state)
 {
@@ -734,25 +642,11 @@ static void qdio_irq_check_sense(struct qdio_irq *irq_ptr, struct irb *irb)
 /* PCI interrupt handler */
 static void qdio_int_handler_pci(struct qdio_irq *irq_ptr)
 {
-       int i;
-       struct qdio_q *q;
-
        if (unlikely(irq_ptr->state != QDIO_IRQ_STATE_ACTIVE))
                return;
 
        qdio_deliver_irq(irq_ptr);
        irq_ptr->last_data_irq_time = S390_lowcore.int_clock;
-
-       if (!pci_out_supported(irq_ptr) || !irq_ptr->scan_threshold)
-               return;
-
-       for_each_output_queue(irq_ptr, q, i) {
-               if (qdio_outbound_q_done(q))
-                       continue;
-               if (need_siga_sync(q) && need_siga_sync_out_after_pci(q))
-                       qdio_siga_sync_q(q);
-               qdio_tasklet_schedule(q);
-       }
 }
 
 static void qdio_handle_activate_check(struct qdio_irq *irq_ptr,
@@ -879,15 +773,34 @@ int qdio_get_ssqd_desc(struct ccw_device *cdev,
 }
 EXPORT_SYMBOL_GPL(qdio_get_ssqd_desc);
 
-static void qdio_shutdown_queues(struct qdio_irq *irq_ptr)
+static int qdio_cancel_ccw(struct qdio_irq *irq, int how)
 {
-       struct qdio_q *q;
-       int i;
+       struct ccw_device *cdev = irq->cdev;
+       long timeout;
+       int rc;
 
-       for_each_output_queue(irq_ptr, q, i) {
-               del_timer_sync(&q->u.out.timer);
-               tasklet_kill(&q->u.out.tasklet);
+       spin_lock_irq(get_ccwdev_lock(cdev));
+       qdio_set_state(irq, QDIO_IRQ_STATE_CLEANUP);
+       if (how & QDIO_FLAG_CLEANUP_USING_CLEAR)
+               rc = ccw_device_clear(cdev, QDIO_DOING_CLEANUP);
+       else
+               /* default behaviour is halt */
+               rc = ccw_device_halt(cdev, QDIO_DOING_CLEANUP);
+       spin_unlock_irq(get_ccwdev_lock(cdev));
+       if (rc) {
+               DBF_ERROR("%4x SHUTD ERR", irq->schid.sch_no);
+               DBF_ERROR("rc:%4d", rc);
+               return rc;
        }
+
+       timeout = wait_event_interruptible_timeout(cdev->private->wait_q,
+                                                  irq->state == QDIO_IRQ_STATE_INACTIVE ||
+                                                  irq->state == QDIO_IRQ_STATE_ERR,
+                                                  10 * HZ);
+       if (timeout <= 0)
+               rc = (timeout == -ERESTARTSYS) ? -EINTR : -ETIME;
+
+       return rc;
 }
 
 /**
@@ -919,35 +832,13 @@ int qdio_shutdown(struct ccw_device *cdev, int how)
        }
 
        /*
-        * Indicate that the device is going down. Scheduling the queue
-        * tasklets is forbidden from here on.
+        * Indicate that the device is going down.
         */
        qdio_set_state(irq_ptr, QDIO_IRQ_STATE_STOPPED);
 
-       qdio_shutdown_queues(irq_ptr);
        qdio_shutdown_debug_entries(irq_ptr);
 
-       /* cleanup subchannel */
-       spin_lock_irq(get_ccwdev_lock(cdev));
-       qdio_set_state(irq_ptr, QDIO_IRQ_STATE_CLEANUP);
-       if (how & QDIO_FLAG_CLEANUP_USING_CLEAR)
-               rc = ccw_device_clear(cdev, QDIO_DOING_CLEANUP);
-       else
-               /* default behaviour is halt */
-               rc = ccw_device_halt(cdev, QDIO_DOING_CLEANUP);
-       spin_unlock_irq(get_ccwdev_lock(cdev));
-       if (rc) {
-               DBF_ERROR("%4x SHUTD ERR", irq_ptr->schid.sch_no);
-               DBF_ERROR("rc:%4d", rc);
-               goto no_cleanup;
-       }
-
-       wait_event_interruptible_timeout(cdev->private->wait_q,
-               irq_ptr->state == QDIO_IRQ_STATE_INACTIVE ||
-               irq_ptr->state == QDIO_IRQ_STATE_ERR,
-               10 * HZ);
-
-no_cleanup:
+       rc = qdio_cancel_ccw(irq_ptr, how);
        qdio_shutdown_thinint(irq_ptr);
        qdio_shutdown_irq(irq_ptr);
 
@@ -1061,8 +952,6 @@ static void qdio_trace_init_data(struct qdio_irq *irq,
        DBF_DEV_EVENT(DBF_ERR, irq, "qfmt:%1u", data->q_format);
        DBF_DEV_EVENT(DBF_ERR, irq, "qpff%4x", data->qib_param_field_format);
        DBF_DEV_HEX(irq, &data->qib_param_field, sizeof(void *), DBF_ERR);
-       DBF_DEV_HEX(irq, &data->input_slib_elements, sizeof(void *), DBF_ERR);
-       DBF_DEV_HEX(irq, &data->output_slib_elements, sizeof(void *), DBF_ERR);
        DBF_DEV_EVENT(DBF_ERR, irq, "niq:%1u noq:%1u", data->no_input_qs,
                      data->no_output_qs);
        DBF_DEV_HEX(irq, &data->input_handler, sizeof(void *), DBF_ERR);
@@ -1083,6 +972,7 @@ int qdio_establish(struct ccw_device *cdev,
 {
        struct qdio_irq *irq_ptr = cdev->private->qdio_data;
        struct subchannel_id schid;
+       long timeout;
        int rc;
 
        ccw_device_get_schid(cdev, &schid);
@@ -1111,17 +1001,14 @@ int qdio_establish(struct ccw_device *cdev,
        qdio_setup_irq(irq_ptr, init_data);
 
        rc = qdio_establish_thinint(irq_ptr);
-       if (rc) {
-               qdio_shutdown_irq(irq_ptr);
-               mutex_unlock(&irq_ptr->setup_mutex);
-               return rc;
-       }
+       if (rc)
+               goto err_thinint;
 
        /* establish q */
        irq_ptr->ccw.cmd_code = irq_ptr->equeue.cmd;
        irq_ptr->ccw.flags = CCW_FLAG_SLI;
        irq_ptr->ccw.count = irq_ptr->equeue.count;
-       irq_ptr->ccw.cda = (u32)((addr_t)irq_ptr->qdr);
+       irq_ptr->ccw.cda = (u32) virt_to_phys(irq_ptr->qdr);
 
        spin_lock_irq(get_ccwdev_lock(cdev));
        ccw_device_set_options_mask(cdev, 0);
@@ -1131,20 +1018,20 @@ int qdio_establish(struct ccw_device *cdev,
        if (rc) {
                DBF_ERROR("%4x est IO ERR", irq_ptr->schid.sch_no);
                DBF_ERROR("rc:%4x", rc);
-               qdio_shutdown_thinint(irq_ptr);
-               qdio_shutdown_irq(irq_ptr);
-               mutex_unlock(&irq_ptr->setup_mutex);
-               return rc;
+               goto err_ccw_start;
        }
 
-       wait_event_interruptible_timeout(cdev->private->wait_q,
-               irq_ptr->state == QDIO_IRQ_STATE_ESTABLISHED ||
-               irq_ptr->state == QDIO_IRQ_STATE_ERR, HZ);
+       timeout = wait_event_interruptible_timeout(cdev->private->wait_q,
+                                                  irq_ptr->state == QDIO_IRQ_STATE_ESTABLISHED ||
+                                                  irq_ptr->state == QDIO_IRQ_STATE_ERR, HZ);
+       if (timeout <= 0) {
+               rc = (timeout == -ERESTARTSYS) ? -EINTR : -ETIME;
+               goto err_ccw_timeout;
+       }
 
        if (irq_ptr->state != QDIO_IRQ_STATE_ESTABLISHED) {
-               mutex_unlock(&irq_ptr->setup_mutex);
-               qdio_shutdown(cdev, QDIO_FLAG_CLEANUP_USING_CLEAR);
-               return -EIO;
+               rc = -EIO;
+               goto err_ccw_error;
        }
 
        qdio_setup_ssqd_info(irq_ptr);
@@ -1156,6 +1043,17 @@ int qdio_establish(struct ccw_device *cdev,
        qdio_print_subchannel_info(irq_ptr);
        qdio_setup_debug_entries(irq_ptr);
        return 0;
+
+err_ccw_timeout:
+       qdio_cancel_ccw(irq_ptr, QDIO_FLAG_CLEANUP_USING_CLEAR);
+err_ccw_error:
+err_ccw_start:
+       qdio_shutdown_thinint(irq_ptr);
+err_thinint:
+       qdio_shutdown_irq(irq_ptr);
+       qdio_set_state(irq_ptr, QDIO_IRQ_STATE_INACTIVE);
+       mutex_unlock(&irq_ptr->setup_mutex);
+       return rc;
 }
 EXPORT_SYMBOL_GPL(qdio_establish);
 
@@ -1219,12 +1117,10 @@ EXPORT_SYMBOL_GPL(qdio_activate);
 /**
  * handle_inbound - reset processed input buffers
  * @q: queue containing the buffers
- * @callflags: flags
  * @bufnr: first buffer to process
  * @count: how many buffers are emptied
  */
-static int handle_inbound(struct qdio_q *q, unsigned int callflags,
-                         int bufnr, int count)
+static int handle_inbound(struct qdio_q *q, int bufnr, int count)
 {
        int overlap;
 
@@ -1241,7 +1137,7 @@ static int handle_inbound(struct qdio_q *q, unsigned int callflags,
        count = set_buf_states(q, bufnr, SLSB_CU_INPUT_EMPTY, count);
        atomic_add(count, &q->nr_buf_used);
 
-       if (need_siga_in(q))
+       if (qdio_need_siga_in(q->irq_ptr))
                return qdio_siga_input(q);
 
        return 0;
@@ -1250,16 +1146,13 @@ static int handle_inbound(struct qdio_q *q, unsigned int callflags,
 /**
  * handle_outbound - process filled outbound buffers
  * @q: queue containing the buffers
- * @callflags: flags
  * @bufnr: first buffer to process
  * @count: how many buffers are filled
  * @aob: asynchronous operation block
  */
-static int handle_outbound(struct qdio_q *q, unsigned int callflags,
-                          unsigned int bufnr, unsigned int count,
+static int handle_outbound(struct qdio_q *q, unsigned int bufnr, unsigned int count,
                           struct qaob *aob)
 {
-       const unsigned int scan_threshold = q->irq_ptr->scan_threshold;
        unsigned char state = 0;
        int used, rc = 0;
 
@@ -1271,19 +1164,13 @@ static int handle_outbound(struct qdio_q *q, unsigned int callflags,
        if (used == QDIO_MAX_BUFFERS_PER_Q)
                qperf_inc(q, outbound_queue_full);
 
-       if (callflags & QDIO_FLAG_PCI_OUT) {
-               q->u.out.pci_out_enabled = 1;
-               qperf_inc(q, pci_request_int);
-       } else
-               q->u.out.pci_out_enabled = 0;
-
        if (queue_type(q) == QDIO_IQDIO_QFMT) {
                unsigned long phys_aob = aob ? virt_to_phys(aob) : 0;
 
                WARN_ON_ONCE(!IS_ALIGNED(phys_aob, 256));
                rc = qdio_kick_outbound_q(q, count, phys_aob);
-       } else if (need_siga_sync(q)) {
-               rc = qdio_siga_sync_q(q);
+       } else if (qdio_need_siga_sync(q->irq_ptr)) {
+               rc = qdio_sync_output_queue(q);
        } else if (count < QDIO_MAX_BUFFERS_PER_Q &&
                   get_buf_state(q, prev_buf(bufnr), &state, 0) > 0 &&
                   state == SLSB_CU_OUTPUT_PRIMED) {
@@ -1293,18 +1180,6 @@ static int handle_outbound(struct qdio_q *q, unsigned int callflags,
                rc = qdio_kick_outbound_q(q, count, 0);
        }
 
-       /* Let drivers implement their own completion scanning: */
-       if (!scan_threshold)
-               return rc;
-
-       /* in case of SIGA errors we must process the error immediately */
-       if (used >= scan_threshold || rc)
-               qdio_tasklet_schedule(q);
-       else
-               /* free the SBALs in case of no further traffic */
-               if (!timer_pending(&q->u.out.timer) &&
-                   likely(q->irq_ptr->state == QDIO_IRQ_STATE_ACTIVE))
-                       mod_timer(&q->u.out.timer, jiffies + HZ);
        return rc;
 }
 
@@ -1336,11 +1211,9 @@ int do_QDIO(struct ccw_device *cdev, unsigned int callflags,
        if (!count)
                return 0;
        if (callflags & QDIO_FLAG_SYNC_INPUT)
-               return handle_inbound(irq_ptr->input_qs[q_nr],
-                                     callflags, bufnr, count);
+               return handle_inbound(irq_ptr->input_qs[q_nr], bufnr, count);
        else if (callflags & QDIO_FLAG_SYNC_OUTPUT)
-               return handle_outbound(irq_ptr->output_qs[q_nr],
-                                      callflags, bufnr, count, aob);
+               return handle_outbound(irq_ptr->output_qs[q_nr], bufnr, count, aob);
        return -EINVAL;
 }
 EXPORT_SYMBOL_GPL(do_QDIO);
@@ -1420,52 +1293,10 @@ int qdio_inspect_queue(struct ccw_device *cdev, unsigned int nr, bool is_input,
                return -ENODEV;
        q = is_input ? irq_ptr->input_qs[nr] : irq_ptr->output_qs[nr];
 
-       if (need_siga_sync(q))
-               qdio_siga_sync_q(q);
-
        return __qdio_inspect_queue(q, bufnr, error);
 }
 EXPORT_SYMBOL_GPL(qdio_inspect_queue);
 
-/**
- * qdio_get_next_buffers - process input buffers
- * @cdev: associated ccw_device for the qdio subchannel
- * @nr: input queue number
- * @bufnr: first filled buffer number
- * @error: buffers are in error state
- *
- * Return codes
- *   < 0 - error
- *   = 0 - no new buffers found
- *   > 0 - number of processed buffers
- */
-int qdio_get_next_buffers(struct ccw_device *cdev, int nr, int *bufnr,
-                         int *error)
-{
-       struct qdio_q *q;
-       struct qdio_irq *irq_ptr = cdev->private->qdio_data;
-
-       if (!irq_ptr)
-               return -ENODEV;
-       q = irq_ptr->input_qs[nr];
-
-       /*
-        * Cannot rely on automatic sync after interrupt since queues may
-        * also be examined without interrupt.
-        */
-       if (need_siga_sync(q))
-               qdio_sync_queues(q);
-
-       qdio_check_outbound_pci_queues(irq_ptr);
-
-       /* Note: upper-layer MUST stop processing immediately here ... */
-       if (unlikely(q->irq_ptr->state != QDIO_IRQ_STATE_ACTIVE))
-               return -EIO;
-
-       return __qdio_inspect_queue(q, bufnr, error);
-}
-EXPORT_SYMBOL(qdio_get_next_buffers);
-
 /**
  * qdio_stop_irq - disable interrupt processing for the device
  * @cdev: associated ccw_device for the qdio subchannel
index da67e49794023db6c7b977fda3f981bb8a758b3c..20efafe4789735aa0796bc220867b4697032f5aa 100644 (file)
@@ -89,55 +89,6 @@ void qdio_reset_buffers(struct qdio_buffer **buf, unsigned int count)
 }
 EXPORT_SYMBOL_GPL(qdio_reset_buffers);
 
-/*
- * qebsm is only available under 64bit but the adapter sets the feature
- * flag anyway, so we manually override it.
- */
-static inline int qebsm_possible(void)
-{
-       return css_general_characteristics.qebsm;
-}
-
-/*
- * qib_param_field: pointer to 128 bytes or NULL, if no param field
- * nr_input_qs: pointer to nr_queues*128 words of data or NULL
- */
-static void set_impl_params(struct qdio_irq *irq_ptr,
-                           unsigned int qib_param_field_format,
-                           unsigned char *qib_param_field,
-                           unsigned long *input_slib_elements,
-                           unsigned long *output_slib_elements)
-{
-       struct qdio_q *q;
-       int i, j;
-
-       if (!irq_ptr)
-               return;
-
-       irq_ptr->qib.pfmt = qib_param_field_format;
-       if (qib_param_field)
-               memcpy(irq_ptr->qib.parm, qib_param_field,
-                      sizeof(irq_ptr->qib.parm));
-
-       if (!input_slib_elements)
-               goto output;
-
-       for_each_input_queue(irq_ptr, q, i) {
-               for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; j++)
-                       q->slib->slibe[j].parms =
-                               input_slib_elements[i * QDIO_MAX_BUFFERS_PER_Q + j];
-       }
-output:
-       if (!output_slib_elements)
-               return;
-
-       for_each_output_queue(irq_ptr, q, i) {
-               for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; j++)
-                       q->slib->slibe[j].parms =
-                               output_slib_elements[i * QDIO_MAX_BUFFERS_PER_Q + j];
-       }
-}
-
 static void __qdio_free_queues(struct qdio_q **queues, unsigned int count)
 {
        struct qdio_q *q;
@@ -267,26 +218,9 @@ static void setup_queues(struct qdio_irq *irq_ptr,
                q->is_input_q = 0;
                setup_storage_lists(q, irq_ptr,
                                    qdio_init->output_sbal_addr_array[i], i);
-
-               tasklet_setup(&q->u.out.tasklet, qdio_outbound_tasklet);
-               timer_setup(&q->u.out.timer, qdio_outbound_timer, 0);
        }
 }
 
-static void process_ac_flags(struct qdio_irq *irq_ptr, unsigned char qdioac)
-{
-       if (qdioac & AC1_SIGA_INPUT_NEEDED)
-               irq_ptr->siga_flag.input = 1;
-       if (qdioac & AC1_SIGA_OUTPUT_NEEDED)
-               irq_ptr->siga_flag.output = 1;
-       if (qdioac & AC1_SIGA_SYNC_NEEDED)
-               irq_ptr->siga_flag.sync = 1;
-       if (!(qdioac & AC1_AUTOMATIC_SYNC_ON_THININT))
-               irq_ptr->siga_flag.sync_after_ai = 1;
-       if (!(qdioac & AC1_AUTOMATIC_SYNC_ON_OUT_PCI))
-               irq_ptr->siga_flag.sync_out_after_pci = 1;
-}
-
 static void check_and_setup_qebsm(struct qdio_irq *irq_ptr,
                                  unsigned char qdioac, unsigned long token)
 {
@@ -363,7 +297,7 @@ void qdio_setup_ssqd_info(struct qdio_irq *irq_ptr)
                qdioac = irq_ptr->ssqd_desc.qdioac1;
 
        check_and_setup_qebsm(irq_ptr, qdioac, irq_ptr->ssqd_desc.sch_token);
-       process_ac_flags(irq_ptr, qdioac);
+       irq_ptr->qdioac1 = qdioac;
        DBF_EVENT("ac 1:%2x 2:%4x", qdioac, irq_ptr->ssqd_desc.qdioac2);
        DBF_EVENT("3:%4x qib:%4x", irq_ptr->ssqd_desc.qdioac3, irq_ptr->qib.ac);
 }
@@ -386,6 +320,8 @@ static void setup_qdr(struct qdio_irq *irq_ptr,
        struct qdesfmt0 *desc = &irq_ptr->qdr->qdf0[0];
        int i;
 
+       memset(irq_ptr->qdr, 0, sizeof(struct qdr));
+
        irq_ptr->qdr->qfmt = qdio_init->q_format;
        irq_ptr->qdr->ac = qdio_init->qdr_ac;
        irq_ptr->qdr->iqdcnt = qdio_init->no_input_qs;
@@ -405,12 +341,15 @@ static void setup_qdr(struct qdio_irq *irq_ptr,
 static void setup_qib(struct qdio_irq *irq_ptr,
                      struct qdio_initialize *init_data)
 {
-       if (qebsm_possible())
-               irq_ptr->qib.rflags |= QIB_RFLAGS_ENABLE_QEBSM;
-
-       irq_ptr->qib.rflags |= init_data->qib_rflags;
+       memset(&irq_ptr->qib, 0, sizeof(irq_ptr->qib));
 
        irq_ptr->qib.qfmt = init_data->q_format;
+       irq_ptr->qib.pfmt = init_data->qib_param_field_format;
+
+       irq_ptr->qib.rflags = init_data->qib_rflags;
+       if (css_general_characteristics.qebsm)
+               irq_ptr->qib.rflags |= QIB_RFLAGS_ENABLE_QEBSM;
+
        if (init_data->no_input_qs)
                irq_ptr->qib.isliba =
                        (unsigned long)(irq_ptr->input_qs[0]->slib);
@@ -419,6 +358,10 @@ static void setup_qib(struct qdio_irq *irq_ptr,
                        (unsigned long)(irq_ptr->output_qs[0]->slib);
        memcpy(irq_ptr->qib.ebcnam, dev_name(&irq_ptr->cdev->dev), 8);
        ASCEBC(irq_ptr->qib.ebcnam, 8);
+
+       if (init_data->qib_param_field)
+               memcpy(irq_ptr->qib.parm, init_data->qib_param_field,
+                      sizeof(irq_ptr->qib.parm));
 }
 
 int qdio_setup_irq(struct qdio_irq *irq_ptr, struct qdio_initialize *init_data)
@@ -426,8 +369,7 @@ int qdio_setup_irq(struct qdio_irq *irq_ptr, struct qdio_initialize *init_data)
        struct ccw_device *cdev = irq_ptr->cdev;
        struct ciw *ciw;
 
-       memset(&irq_ptr->qib, 0, sizeof(irq_ptr->qib));
-       memset(&irq_ptr->siga_flag, 0, sizeof(irq_ptr->siga_flag));
+       irq_ptr->qdioac1 = 0;
        memset(&irq_ptr->ccw, 0, sizeof(irq_ptr->ccw));
        memset(&irq_ptr->ssqd_desc, 0, sizeof(irq_ptr->ssqd_desc));
        memset(&irq_ptr->perf_stat, 0, sizeof(irq_ptr->perf_stat));
@@ -436,13 +378,9 @@ int qdio_setup_irq(struct qdio_irq *irq_ptr, struct qdio_initialize *init_data)
        irq_ptr->sch_token = irq_ptr->perf_stat_enabled = 0;
        irq_ptr->state = QDIO_IRQ_STATE_INACTIVE;
 
-       /* wipes qib.ac, required by ar7063 */
-       memset(irq_ptr->qdr, 0, sizeof(struct qdr));
-
        irq_ptr->int_parm = init_data->int_parm;
        irq_ptr->nr_input_qs = init_data->no_input_qs;
        irq_ptr->nr_output_qs = init_data->no_output_qs;
-       irq_ptr->scan_threshold = init_data->scan_threshold;
        ccw_device_get_schid(cdev, &irq_ptr->schid);
        setup_queues(irq_ptr, init_data);
 
@@ -450,10 +388,6 @@ int qdio_setup_irq(struct qdio_irq *irq_ptr, struct qdio_initialize *init_data)
        set_bit(QDIO_IRQ_DISABLED, &irq_ptr->poll_state);
 
        setup_qib(irq_ptr, init_data);
-       set_impl_params(irq_ptr, init_data->qib_param_field_format,
-                       init_data->qib_param_field,
-                       init_data->input_slib_elements,
-                       init_data->output_slib_elements);
 
        /* fill input and output descriptors */
        setup_qdr(irq_ptr, init_data);
@@ -497,11 +431,8 @@ void qdio_shutdown_irq(struct qdio_irq *irq)
 
 void qdio_print_subchannel_info(struct qdio_irq *irq_ptr)
 {
-       char s[80];
-
-       snprintf(s, 80, "qdio: %s %s on SC %x using "
-                "AI:%d QEBSM:%d PRI:%d TDD:%d SIGA:%s%s%s%s%s\n",
-                dev_name(&irq_ptr->cdev->dev),
+       dev_info(&irq_ptr->cdev->dev,
+                "qdio: %s on SC %x using AI:%d QEBSM:%d PRI:%d TDD:%d SIGA:%s%s%s\n",
                 (irq_ptr->qib.qfmt == QDIO_QETH_QFMT) ? "OSA" :
                        ((irq_ptr->qib.qfmt == QDIO_ZFCP_QFMT) ? "ZFCP" : "HS"),
                 irq_ptr->schid.sch_no,
@@ -509,12 +440,9 @@ void qdio_print_subchannel_info(struct qdio_irq *irq_ptr)
                 (irq_ptr->sch_token) ? 1 : 0,
                 pci_out_supported(irq_ptr) ? 1 : 0,
                 css_general_characteristics.aif_tdd,
-                (irq_ptr->siga_flag.input) ? "R" : " ",
-                (irq_ptr->siga_flag.output) ? "W" : " ",
-                (irq_ptr->siga_flag.sync) ? "S" : " ",
-                (irq_ptr->siga_flag.sync_after_ai) ? "A" : " ",
-                (irq_ptr->siga_flag.sync_out_after_pci) ? "P" : " ");
-       printk(KERN_INFO "%s", s);
+                qdio_need_siga_in(irq_ptr) ? "R" : " ",
+                qdio_need_siga_out(irq_ptr) ? "W" : " ",
+                qdio_need_siga_sync(irq_ptr) ? "S" : " ");
 }
 
 int __init qdio_setup_init(void)
@@ -541,7 +469,7 @@ int __init qdio_setup_init(void)
                  (css_general_characteristics.aif_osa) ? 1 : 0);
 
        /* Check for QEBSM support in general (bit 58). */
-       DBF_EVENT("cssQEBSM:%1d", (qebsm_possible()) ? 1 : 0);
+       DBF_EVENT("cssQEBSM:%1d", css_general_characteristics.qebsm);
        rc = 0;
 out:
        return rc;
index 8d3a1d84a757562248ef7e2979532ae6911c8693..439c1f6d2866612e83e21fdb1e9f54974d12d467 100644 (file)
@@ -127,22 +127,13 @@ static struct bus_type ap_bus_type;
 /* Adapter interrupt definitions */
 static void ap_interrupt_handler(struct airq_struct *airq, bool floating);
 
-static int ap_airq_flag;
+static bool ap_irq_flag;
 
 static struct airq_struct ap_airq = {
        .handler = ap_interrupt_handler,
        .isc = AP_ISC,
 };
 
-/**
- * ap_using_interrupts() - Returns non-zero if interrupt support is
- * available.
- */
-static inline int ap_using_interrupts(void)
-{
-       return ap_airq_flag;
-}
-
 /**
  * ap_airq_ptr() - Get the address of the adapter interrupt indicator
  *
@@ -152,7 +143,7 @@ static inline int ap_using_interrupts(void)
  */
 void *ap_airq_ptr(void)
 {
-       if (ap_using_interrupts())
+       if (ap_irq_flag)
                return ap_airq.lsi_ptr;
        return NULL;
 }
@@ -396,7 +387,7 @@ void ap_wait(enum ap_sm_wait wait)
        switch (wait) {
        case AP_SM_WAIT_AGAIN:
        case AP_SM_WAIT_INTERRUPT:
-               if (ap_using_interrupts())
+               if (ap_irq_flag)
                        break;
                if (ap_poll_kthread) {
                        wake_up(&ap_poll_wait);
@@ -471,7 +462,7 @@ static void ap_tasklet_fn(unsigned long dummy)
         * be received. Doing it in the beginning of the tasklet is therefor
         * important that no requests on any AP get lost.
         */
-       if (ap_using_interrupts())
+       if (ap_irq_flag)
                xchg(ap_airq.lsi_ptr, 0);
 
        spin_lock_bh(&ap_queues_lock);
@@ -541,7 +532,7 @@ static int ap_poll_thread_start(void)
 {
        int rc;
 
-       if (ap_using_interrupts() || ap_poll_kthread)
+       if (ap_irq_flag || ap_poll_kthread)
                return 0;
        mutex_lock(&ap_poll_thread_mutex);
        ap_poll_kthread = kthread_run(ap_poll_thread, NULL, "appoll");
@@ -703,7 +694,7 @@ static int __ap_calc_helper(struct device *dev, void *arg)
 
        if (is_queue_dev(dev)) {
                pctrs->apqns++;
-               if ((to_ap_dev(dev))->drv)
+               if (dev->driver)
                        pctrs->bound++;
        }
 
@@ -883,7 +874,6 @@ static int ap_device_probe(struct device *dev)
                         to_ap_queue(dev)->qid);
        spin_unlock_bh(&ap_queues_lock);
 
-       ap_dev->drv = ap_drv;
        rc = ap_drv->probe ? ap_drv->probe(ap_dev) : -ENODEV;
 
        if (rc) {
@@ -891,7 +881,6 @@ static int ap_device_probe(struct device *dev)
                if (is_queue_dev(dev))
                        hash_del(&to_ap_queue(dev)->hnode);
                spin_unlock_bh(&ap_queues_lock);
-               ap_dev->drv = NULL;
        } else
                ap_check_bindings_complete();
 
@@ -904,7 +893,7 @@ out:
 static int ap_device_remove(struct device *dev)
 {
        struct ap_device *ap_dev = to_ap_dev(dev);
-       struct ap_driver *ap_drv = ap_dev->drv;
+       struct ap_driver *ap_drv = to_ap_drv(dev->driver);
 
        /* prepare ap queue device removal */
        if (is_queue_dev(dev))
@@ -923,7 +912,6 @@ static int ap_device_remove(struct device *dev)
        if (is_queue_dev(dev))
                hash_del(&to_ap_queue(dev)->hnode);
        spin_unlock_bh(&ap_queues_lock);
-       ap_dev->drv = NULL;
 
        put_device(dev);
 
@@ -1187,7 +1175,7 @@ static BUS_ATTR_RO(ap_adapter_mask);
 static ssize_t ap_interrupts_show(struct bus_type *bus, char *buf)
 {
        return scnprintf(buf, PAGE_SIZE, "%d\n",
-                        ap_using_interrupts() ? 1 : 0);
+                        ap_irq_flag ? 1 : 0);
 }
 
 static BUS_ATTR_RO(ap_interrupts);
@@ -1912,7 +1900,7 @@ static int __init ap_module_init(void)
        /* enable interrupts if available */
        if (ap_interrupts_available()) {
                rc = register_adapter_interrupt(&ap_airq);
-               ap_airq_flag = (rc == 0);
+               ap_irq_flag = (rc == 0);
        }
 
        /* Create /sys/bus/ap. */
@@ -1956,7 +1944,7 @@ out_work:
 out_bus:
        bus_unregister(&ap_bus_type);
 out:
-       if (ap_using_interrupts())
+       if (ap_irq_flag)
                unregister_adapter_interrupt(&ap_airq);
        kfree(ap_qci_info);
        return rc;
index 8f18abdbbc2ba5aedeb0f4432b9aeffbb6bb986a..95b577754b35a47fdbea811513092e98327b21fa 100644 (file)
@@ -80,12 +80,6 @@ static inline int ap_test_bit(unsigned int *ptr, unsigned int nr)
 #define AP_FUNC_EP11  5
 #define AP_FUNC_APXA  6
 
-/*
- * AP interrupt states
- */
-#define AP_INTR_DISABLED       0       /* AP interrupt disabled */
-#define AP_INTR_ENABLED                1       /* AP interrupt enabled */
-
 /*
  * AP queue state machine states
  */
@@ -112,7 +106,7 @@ enum ap_sm_event {
  * AP queue state wait behaviour
  */
 enum ap_sm_wait {
-       AP_SM_WAIT_AGAIN,       /* retry immediately */
+       AP_SM_WAIT_AGAIN = 0,   /* retry immediately */
        AP_SM_WAIT_TIMEOUT,     /* wait for timeout */
        AP_SM_WAIT_INTERRUPT,   /* wait for thin interrupt (if available) */
        AP_SM_WAIT_NONE,        /* no wait */
@@ -157,7 +151,6 @@ void ap_driver_unregister(struct ap_driver *);
 
 struct ap_device {
        struct device device;
-       struct ap_driver *drv;          /* Pointer to AP device driver. */
        int device_type;                /* AP device type. */
 };
 
@@ -165,7 +158,6 @@ struct ap_device {
 
 struct ap_card {
        struct ap_device ap_dev;
-       void *private;                  /* ap driver private pointer. */
        int raw_hwtype;                 /* AP raw hardware type. */
        unsigned int functions;         /* AP device function bitfield. */
        int queue_depth;                /* AP queue depth.*/
@@ -182,11 +174,10 @@ struct ap_queue {
        struct hlist_node hnode;        /* Node for the ap_queues hashtable */
        struct ap_card *card;           /* Ptr to assoc. AP card. */
        spinlock_t lock;                /* Per device lock. */
-       void *private;                  /* ap driver private pointer. */
        enum ap_dev_state dev_state;    /* queue device state */
        bool config;                    /* configured state */
        ap_qid_t qid;                   /* AP queue id. */
-       int interrupt;                  /* indicate if interrupts are enabled */
+       bool interrupt;                 /* indicate if interrupts are enabled */
        int queue_count;                /* # messages currently on AP queue. */
        int pendingq_count;             /* # requests on pendingq list. */
        int requestq_count;             /* # requests on requestq list. */
index 669f96fddad61b2be4bb513f6580f4eb233031b7..d70c4d3d0907f385a1042fc5158ad74cbc1afea6 100644 (file)
@@ -19,7 +19,7 @@
 static void __ap_flush_queue(struct ap_queue *aq);
 
 /**
- * ap_queue_enable_interruption(): Enable interruption on an AP queue.
+ * ap_queue_enable_irq(): Enable interrupt support on this AP queue.
  * @qid: The AP queue number
  * @ind: the notification indicator byte
  *
@@ -27,7 +27,7 @@ static void __ap_flush_queue(struct ap_queue *aq);
  * value it waits a while and tests the AP queue if interrupts
  * have been switched on using ap_test_queue().
  */
-static int ap_queue_enable_interruption(struct ap_queue *aq, void *ind)
+static int ap_queue_enable_irq(struct ap_queue *aq, void *ind)
 {
        struct ap_queue_status status;
        struct ap_qirq_ctrl qirqctrl = { 0 };
@@ -218,7 +218,8 @@ static enum ap_sm_wait ap_sm_read(struct ap_queue *aq)
                return AP_SM_WAIT_NONE;
        case AP_RESPONSE_NO_PENDING_REPLY:
                if (aq->queue_count > 0)
-                       return AP_SM_WAIT_INTERRUPT;
+                       return aq->interrupt ?
+                               AP_SM_WAIT_INTERRUPT : AP_SM_WAIT_TIMEOUT;
                aq->sm_state = AP_SM_STATE_IDLE;
                return AP_SM_WAIT_NONE;
        default:
@@ -272,7 +273,8 @@ static enum ap_sm_wait ap_sm_write(struct ap_queue *aq)
                fallthrough;
        case AP_RESPONSE_Q_FULL:
                aq->sm_state = AP_SM_STATE_QUEUE_FULL;
-               return AP_SM_WAIT_INTERRUPT;
+               return aq->interrupt ?
+                       AP_SM_WAIT_INTERRUPT : AP_SM_WAIT_TIMEOUT;
        case AP_RESPONSE_RESET_IN_PROGRESS:
                aq->sm_state = AP_SM_STATE_RESET_WAIT;
                return AP_SM_WAIT_TIMEOUT;
@@ -322,7 +324,7 @@ static enum ap_sm_wait ap_sm_reset(struct ap_queue *aq)
        case AP_RESPONSE_NORMAL:
        case AP_RESPONSE_RESET_IN_PROGRESS:
                aq->sm_state = AP_SM_STATE_RESET_WAIT;
-               aq->interrupt = AP_INTR_DISABLED;
+               aq->interrupt = false;
                return AP_SM_WAIT_TIMEOUT;
        default:
                aq->dev_state = AP_DEV_STATE_ERROR;
@@ -355,7 +357,7 @@ static enum ap_sm_wait ap_sm_reset_wait(struct ap_queue *aq)
        switch (status.response_code) {
        case AP_RESPONSE_NORMAL:
                lsi_ptr = ap_airq_ptr();
-               if (lsi_ptr && ap_queue_enable_interruption(aq, lsi_ptr) == 0)
+               if (lsi_ptr && ap_queue_enable_irq(aq, lsi_ptr) == 0)
                        aq->sm_state = AP_SM_STATE_SETIRQ_WAIT;
                else
                        aq->sm_state = (aq->queue_count > 0) ?
@@ -396,7 +398,7 @@ static enum ap_sm_wait ap_sm_setirq_wait(struct ap_queue *aq)
 
        if (status.irq_enabled == 1) {
                /* Irqs are now enabled */
-               aq->interrupt = AP_INTR_ENABLED;
+               aq->interrupt = true;
                aq->sm_state = (aq->queue_count > 0) ?
                        AP_SM_STATE_WORKING : AP_SM_STATE_IDLE;
        }
@@ -586,7 +588,7 @@ static ssize_t interrupt_show(struct device *dev,
        spin_lock_bh(&aq->lock);
        if (aq->sm_state == AP_SM_STATE_SETIRQ_WAIT)
                rc = scnprintf(buf, PAGE_SIZE, "Enable Interrupt pending.\n");
-       else if (aq->interrupt == AP_INTR_ENABLED)
+       else if (aq->interrupt)
                rc = scnprintf(buf, PAGE_SIZE, "Interrupts enabled.\n");
        else
                rc = scnprintf(buf, PAGE_SIZE, "Interrupts disabled.\n");
@@ -767,7 +769,7 @@ struct ap_queue *ap_queue_create(ap_qid_t qid, int device_type)
        aq->ap_dev.device.type = &ap_queue_type;
        aq->ap_dev.device_type = device_type;
        aq->qid = qid;
-       aq->interrupt = AP_INTR_DISABLED;
+       aq->interrupt = false;
        spin_lock_init(&aq->lock);
        INIT_LIST_HEAD(&aq->pendingq);
        INIT_LIST_HEAD(&aq->requestq);
index 122c85c224695e40ab2df876c37f134d1d58e478..67f145589f58e19eda433314f8b38c05d6fc9d67 100644 (file)
@@ -35,7 +35,7 @@ static int match_apqn(struct device *dev, const void *data)
 }
 
 /**
- * vfio_ap_get_queue: Retrieve a queue with a specific APQN from a list
+ * vfio_ap_get_queue - retrieve a queue with a specific APQN from a list
  * @matrix_mdev: the associated mediated matrix
  * @apqn: The queue APQN
  *
@@ -43,7 +43,7 @@ static int match_apqn(struct device *dev, const void *data)
  * devices of the vfio_ap_drv.
  * Verify that the APID and the APQI are set in the matrix.
  *
- * Returns the pointer to the associated vfio_ap_queue
+ * Return: the pointer to the associated vfio_ap_queue
  */
 static struct vfio_ap_queue *vfio_ap_get_queue(
                                        struct ap_matrix_mdev *matrix_mdev,
@@ -64,7 +64,7 @@ static struct vfio_ap_queue *vfio_ap_get_queue(
 }
 
 /**
- * vfio_ap_wait_for_irqclear
+ * vfio_ap_wait_for_irqclear - clears the IR bit or gives up after 5 tries
  * @apqn: The AP Queue number
  *
  * Checks the IRQ bit for the status of this APQN using ap_tapq.
@@ -72,7 +72,6 @@ static struct vfio_ap_queue *vfio_ap_get_queue(
  * Returns if ap_tapq function failed with invalid, deconfigured or
  * checkstopped AP.
  * Otherwise retries up to 5 times after waiting 20ms.
- *
  */
 static void vfio_ap_wait_for_irqclear(int apqn)
 {
@@ -105,13 +104,12 @@ static void vfio_ap_wait_for_irqclear(int apqn)
 }
 
 /**
- * vfio_ap_free_aqic_resources
+ * vfio_ap_free_aqic_resources - free vfio_ap_queue resources
  * @q: The vfio_ap_queue
  *
  * Unregisters the ISC in the GIB when the saved ISC not invalid.
- * Unpin the guest's page holding the NIB when it exist.
- * Reset the saved_pfn and saved_isc to invalid values.
- *
+ * Unpins the guest's page holding the NIB when it exists.
+ * Resets the saved_pfn and saved_isc to invalid values.
  */
 static void vfio_ap_free_aqic_resources(struct vfio_ap_queue *q)
 {
@@ -130,7 +128,7 @@ static void vfio_ap_free_aqic_resources(struct vfio_ap_queue *q)
 }
 
 /**
- * vfio_ap_irq_disable
+ * vfio_ap_irq_disable - disables and clears an ap_queue interrupt
  * @q: The vfio_ap_queue
  *
  * Uses ap_aqic to disable the interruption and in case of success, reset
@@ -144,6 +142,8 @@ static void vfio_ap_free_aqic_resources(struct vfio_ap_queue *q)
  *
  * Returns if ap_aqic function failed with invalid, deconfigured or
  * checkstopped AP.
+ *
+ * Return: &struct ap_queue_status
  */
 static struct ap_queue_status vfio_ap_irq_disable(struct vfio_ap_queue *q)
 {
@@ -183,9 +183,8 @@ end_free:
 }
 
 /**
- * vfio_ap_setirq: Enable Interruption for a APQN
+ * vfio_ap_irq_enable - Enable Interruption for a APQN
  *
- * @dev: the device associated with the ap_queue
  * @q:  the vfio_ap_queue holding AQIC parameters
  *
  * Pin the NIB saved in *q
@@ -197,6 +196,8 @@ end_free:
  *
  * Otherwise return the ap_queue_status returned by the ap_aqic(),
  * all retry handling will be done by the guest.
+ *
+ * Return: &struct ap_queue_status
  */
 static struct ap_queue_status vfio_ap_irq_enable(struct vfio_ap_queue *q,
                                                 int isc,
@@ -253,7 +254,7 @@ static struct ap_queue_status vfio_ap_irq_enable(struct vfio_ap_queue *q,
 }
 
 /**
- * handle_pqap: PQAP instruction callback
+ * handle_pqap - PQAP instruction callback
  *
  * @vcpu: The vcpu on which we received the PQAP instruction
  *
@@ -270,8 +271,8 @@ static struct ap_queue_status vfio_ap_irq_enable(struct vfio_ap_queue *q,
  * We take the matrix_dev lock to ensure serialization on queues and
  * mediated device access.
  *
- * Return 0 if we could handle the request inside KVM.
- * otherwise, returns -EOPNOTSUPP to let QEMU handle the fault.
+ * Return: 0 if we could handle the request inside KVM.
+ * Otherwise, returns -EOPNOTSUPP to let QEMU handle the fault.
  */
 static int handle_pqap(struct kvm_vcpu *vcpu)
 {
@@ -426,7 +427,7 @@ struct vfio_ap_queue_reserved {
 };
 
 /**
- * vfio_ap_has_queue
+ * vfio_ap_has_queue - determines if the AP queue containing the target in @data
  *
  * @dev: an AP queue device
  * @data: a struct vfio_ap_queue_reserved reference
@@ -443,7 +444,7 @@ struct vfio_ap_queue_reserved {
  * - If @data contains only an apqi value, @data will be flagged as
  *   reserved if the APQI field in the AP queue device matches
  *
- * Returns 0 to indicate the input to function succeeded. Returns -EINVAL if
+ * Return: 0 to indicate the input to function succeeded. Returns -EINVAL if
  * @data does not contain either an apid or apqi.
  */
 static int vfio_ap_has_queue(struct device *dev, void *data)
@@ -473,9 +474,9 @@ static int vfio_ap_has_queue(struct device *dev, void *data)
 }
 
 /**
- * vfio_ap_verify_queue_reserved
+ * vfio_ap_verify_queue_reserved - verifies that the AP queue containing
+ * @apid or @aqpi is reserved
  *
- * @matrix_dev: a mediated matrix device
  * @apid: an AP adapter ID
  * @apqi: an AP queue index
  *
@@ -492,7 +493,7 @@ static int vfio_ap_has_queue(struct device *dev, void *data)
  * - If only @apqi is not NULL, then there must be an AP queue device bound
  *   to the vfio_ap driver with an APQN containing @apqi
  *
- * Returns 0 if the AP queue is reserved; otherwise, returns -EADDRNOTAVAIL.
+ * Return: 0 if the AP queue is reserved; otherwise, returns -EADDRNOTAVAIL.
  */
 static int vfio_ap_verify_queue_reserved(unsigned long *apid,
                                         unsigned long *apqi)
@@ -536,15 +537,15 @@ vfio_ap_mdev_verify_queues_reserved_for_apid(struct ap_matrix_mdev *matrix_mdev,
 }
 
 /**
- * vfio_ap_mdev_verify_no_sharing
+ * vfio_ap_mdev_verify_no_sharing - verifies that the AP matrix is not configured
+ *
+ * @matrix_mdev: the mediated matrix device
  *
  * Verifies that the APQNs derived from the cross product of the AP adapter IDs
  * and AP queue indexes comprising the AP matrix are not configured for another
  * mediated device. AP queue sharing is not allowed.
  *
- * @matrix_mdev: the mediated matrix device
- *
- * Returns 0 if the APQNs are not shared, otherwise; returns -EADDRINUSE.
+ * Return: 0 if the APQNs are not shared; otherwise returns -EADDRINUSE.
  */
 static int vfio_ap_mdev_verify_no_sharing(struct ap_matrix_mdev *matrix_mdev)
 {
@@ -578,7 +579,8 @@ static int vfio_ap_mdev_verify_no_sharing(struct ap_matrix_mdev *matrix_mdev)
 }
 
 /**
- * assign_adapter_store
+ * assign_adapter_store - parses the APID from @buf and sets the
+ * corresponding bit in the mediated matrix device's APM
  *
  * @dev:       the matrix device
  * @attr:      the mediated matrix device's assign_adapter attribute
@@ -586,10 +588,7 @@ static int vfio_ap_mdev_verify_no_sharing(struct ap_matrix_mdev *matrix_mdev)
  *             be assigned
  * @count:     the number of bytes in @buf
  *
- * Parses the APID from @buf and sets the corresponding bit in the mediated
- * matrix device's APM.
- *
- * Returns the number of bytes processed if the APID is valid; otherwise,
+ * Return: the number of bytes processed if the APID is valid; otherwise,
  * returns one of the following errors:
  *
  *     1. -EINVAL
@@ -666,17 +665,15 @@ done:
 static DEVICE_ATTR_WO(assign_adapter);
 
 /**
- * unassign_adapter_store
+ * unassign_adapter_store - parses the APID from @buf and clears the
+ * corresponding bit in the mediated matrix device's APM
  *
  * @dev:       the matrix device
  * @attr:      the mediated matrix device's unassign_adapter attribute
  * @buf:       a buffer containing the adapter number (APID) to be unassigned
  * @count:     the number of bytes in @buf
  *
- * Parses the APID from @buf and clears the corresponding bit in the mediated
- * matrix device's APM.
- *
- * Returns the number of bytes processed if the APID is valid; otherwise,
+ * Return: the number of bytes processed if the APID is valid; otherwise,
  * returns one of the following errors:
  *     -EINVAL if the APID is not a number
  *     -ENODEV if the APID it exceeds the maximum value configured for the
@@ -740,7 +737,9 @@ vfio_ap_mdev_verify_queues_reserved_for_apqi(struct ap_matrix_mdev *matrix_mdev,
 }
 
 /**
- * assign_domain_store
+ * assign_domain_store - parses the APQI from @buf and sets the
+ * corresponding bit in the mediated matrix device's AQM
+ *
  *
  * @dev:       the matrix device
  * @attr:      the mediated matrix device's assign_domain attribute
@@ -748,10 +747,7 @@ vfio_ap_mdev_verify_queues_reserved_for_apqi(struct ap_matrix_mdev *matrix_mdev,
  *             be assigned
  * @count:     the number of bytes in @buf
  *
- * Parses the APQI from @buf and sets the corresponding bit in the mediated
- * matrix device's AQM.
- *
- * Returns the number of bytes processed if the APQI is valid; otherwise returns
+ * Return: the number of bytes processed if the APQI is valid; otherwise returns
  * one of the following errors:
  *
  *     1. -EINVAL
@@ -824,7 +820,8 @@ static DEVICE_ATTR_WO(assign_domain);
 
 
 /**
- * unassign_domain_store
+ * unassign_domain_store - parses the APQI from @buf and clears the
+ * corresponding bit in the mediated matrix device's AQM
  *
  * @dev:       the matrix device
  * @attr:      the mediated matrix device's unassign_domain attribute
@@ -832,10 +829,7 @@ static DEVICE_ATTR_WO(assign_domain);
  *             be unassigned
  * @count:     the number of bytes in @buf
  *
- * Parses the APQI from @buf and clears the corresponding bit in the
- * mediated matrix device's AQM.
- *
- * Returns the number of bytes processed if the APQI is valid; otherwise,
+ * Return: the number of bytes processed if the APQI is valid; otherwise,
  * returns one of the following errors:
  *     -EINVAL if the APQI is not a number
  *     -ENODEV if the APQI exceeds the maximum value configured for the system
@@ -879,17 +873,16 @@ done:
 static DEVICE_ATTR_WO(unassign_domain);
 
 /**
- * assign_control_domain_store
+ * assign_control_domain_store - parses the domain ID from @buf and sets
+ * the corresponding bit in the mediated matrix device's ADM
+ *
  *
  * @dev:       the matrix device
  * @attr:      the mediated matrix device's assign_control_domain attribute
  * @buf:       a buffer containing the domain ID to be assigned
  * @count:     the number of bytes in @buf
  *
- * Parses the domain ID from @buf and sets the corresponding bit in the mediated
- * matrix device's ADM.
- *
- * Returns the number of bytes processed if the domain ID is valid; otherwise,
+ * Return: the number of bytes processed if the domain ID is valid; otherwise,
  * returns one of the following errors:
  *     -EINVAL if the ID is not a number
  *     -ENODEV if the ID exceeds the maximum value configured for the system
@@ -937,17 +930,15 @@ done:
 static DEVICE_ATTR_WO(assign_control_domain);
 
 /**
- * unassign_control_domain_store
+ * unassign_control_domain_store - parses the domain ID from @buf and
+ * clears the corresponding bit in the mediated matrix device's ADM
  *
  * @dev:       the matrix device
  * @attr:      the mediated matrix device's unassign_control_domain attribute
  * @buf:       a buffer containing the domain ID to be unassigned
  * @count:     the number of bytes in @buf
  *
- * Parses the domain ID from @buf and clears the corresponding bit in the
- * mediated matrix device's ADM.
- *
- * Returns the number of bytes processed if the domain ID is valid; otherwise,
+ * Return: the number of bytes processed if the domain ID is valid; otherwise,
  * returns one of the following errors:
  *     -EINVAL if the ID is not a number
  *     -ENODEV if the ID exceeds the maximum value configured for the system
@@ -1085,14 +1076,12 @@ static const struct attribute_group *vfio_ap_mdev_attr_groups[] = {
 };
 
 /**
- * vfio_ap_mdev_set_kvm
+ * vfio_ap_mdev_set_kvm - sets all data for @matrix_mdev that are needed
+ * to manage AP resources for the guest whose state is represented by @kvm
  *
  * @matrix_mdev: a mediated matrix device
  * @kvm: reference to KVM instance
  *
- * Sets all data for @matrix_mdev that are needed to manage AP resources
- * for the guest whose state is represented by @kvm.
- *
  * Note: The matrix_dev->lock must be taken prior to calling
  * this function; however, the lock will be temporarily released while the
  * guest's AP configuration is set to avoid a potential lockdep splat.
@@ -1100,7 +1089,7 @@ static const struct attribute_group *vfio_ap_mdev_attr_groups[] = {
  * certain circumstances, will result in a circular lock dependency if this is
  * done under the @matrix_mdev->lock.
  *
- * Return 0 if no other mediated matrix device has a reference to @kvm;
+ * Return: 0 if no other mediated matrix device has a reference to @kvm;
  * otherwise, returns an -EPERM.
  */
 static int vfio_ap_mdev_set_kvm(struct ap_matrix_mdev *matrix_mdev,
@@ -1131,8 +1120,8 @@ static int vfio_ap_mdev_set_kvm(struct ap_matrix_mdev *matrix_mdev,
        return 0;
 }
 
-/*
- * vfio_ap_mdev_iommu_notifier: IOMMU notifier callback
+/**
+ * vfio_ap_mdev_iommu_notifier - IOMMU notifier callback
  *
  * @nb: The notifier block
  * @action: Action to be taken
@@ -1141,6 +1130,7 @@ static int vfio_ap_mdev_set_kvm(struct ap_matrix_mdev *matrix_mdev,
  * For an UNMAP request, unpin the guest IOVA (the NIB guest address we
  * pinned before). Other requests are ignored.
  *
+ * Return: for an UNMAP request, NOFITY_OK; otherwise NOTIFY_DONE.
  */
 static int vfio_ap_mdev_iommu_notifier(struct notifier_block *nb,
                                       unsigned long action, void *data)
@@ -1161,19 +1151,17 @@ static int vfio_ap_mdev_iommu_notifier(struct notifier_block *nb,
 }
 
 /**
- * vfio_ap_mdev_unset_kvm
+ * vfio_ap_mdev_unset_kvm - performs clean-up of resources no longer needed
+ * by @matrix_mdev.
  *
  * @matrix_mdev: a matrix mediated device
  *
- * Performs clean-up of resources no longer needed by @matrix_mdev.
- *
  * Note: The matrix_dev->lock must be taken prior to calling
  * this function; however, the lock will be temporarily released while the
  * guest's AP configuration is cleared to avoid a potential lockdep splat.
  * The kvm->lock is taken to clear the guest's AP configuration which, under
  * certain circumstances, will result in a circular lock dependency if this is
  * done under the @matrix_mdev->lock.
- *
  */
 static void vfio_ap_mdev_unset_kvm(struct ap_matrix_mdev *matrix_mdev)
 {
index 529ffe26ea9d95465d0ffa01956661c99227dcc2..fa0cb8633040b14bbc62690aeddf6fb3d9275507 100644 (file)
@@ -572,14 +572,14 @@ static inline struct zcrypt_queue *zcrypt_pick_queue(struct zcrypt_card *zc,
                                                     struct module **pmod,
                                                     unsigned int weight)
 {
-       if (!zq || !try_module_get(zq->queue->ap_dev.drv->driver.owner))
+       if (!zq || !try_module_get(zq->queue->ap_dev.device.driver->owner))
                return NULL;
        zcrypt_queue_get(zq);
        get_device(&zq->queue->ap_dev.device);
        atomic_add(weight, &zc->load);
        atomic_add(weight, &zq->load);
        zq->request_count++;
-       *pmod = zq->queue->ap_dev.drv->driver.owner;
+       *pmod = zq->queue->ap_dev.device.driver->owner;
        return zq;
 }
 
index 40fd5d37d26afe4f0551882aaa538164d4134034..ef11d2a0ca6c591e6f6726f52ca61be7f031ab04 100644 (file)
@@ -39,7 +39,7 @@
 static ssize_t type_show(struct device *dev,
                         struct device_attribute *attr, char *buf)
 {
-       struct zcrypt_card *zc = to_ap_card(dev)->private;
+       struct zcrypt_card *zc = dev_get_drvdata(dev);
 
        return scnprintf(buf, PAGE_SIZE, "%s\n", zc->type_string);
 }
@@ -50,8 +50,8 @@ static ssize_t online_show(struct device *dev,
                           struct device_attribute *attr,
                           char *buf)
 {
+       struct zcrypt_card *zc = dev_get_drvdata(dev);
        struct ap_card *ac = to_ap_card(dev);
-       struct zcrypt_card *zc = ac->private;
        int online = ac->config && zc->online ? 1 : 0;
 
        return scnprintf(buf, PAGE_SIZE, "%d\n", online);
@@ -61,8 +61,8 @@ static ssize_t online_store(struct device *dev,
                            struct device_attribute *attr,
                            const char *buf, size_t count)
 {
+       struct zcrypt_card *zc = dev_get_drvdata(dev);
        struct ap_card *ac = to_ap_card(dev);
-       struct zcrypt_card *zc = ac->private;
        struct zcrypt_queue *zq;
        int online, id, i = 0, maxzqs = 0;
        struct zcrypt_queue **zq_uelist = NULL;
@@ -116,7 +116,7 @@ static ssize_t load_show(struct device *dev,
                         struct device_attribute *attr,
                         char *buf)
 {
-       struct zcrypt_card *zc = to_ap_card(dev)->private;
+       struct zcrypt_card *zc = dev_get_drvdata(dev);
 
        return scnprintf(buf, PAGE_SIZE, "%d\n", atomic_read(&zc->load));
 }
index bc34bedf9db8b33c473a2c7bfe1dbb98f0031459..6a3c2b460965296f26d0de7a3c8cc2034567923c 100644 (file)
@@ -1724,10 +1724,10 @@ static int fetch_cca_info(u16 cardnr, u16 domain, struct cca_info *ci)
        rlen = vlen = PAGE_SIZE/2;
        rc = cca_query_crypto_facility(cardnr, domain, "STATICSB",
                                       rarray, &rlen, varray, &vlen);
-       if (rc == 0 && rlen >= 10*8 && vlen >= 240) {
-               ci->new_apka_mk_state = (char) rarray[7*8];
-               ci->cur_apka_mk_state = (char) rarray[8*8];
-               ci->old_apka_mk_state = (char) rarray[9*8];
+       if (rc == 0 && rlen >= 13*8 && vlen >= 240) {
+               ci->new_apka_mk_state = (char) rarray[10*8];
+               ci->cur_apka_mk_state = (char) rarray[11*8];
+               ci->old_apka_mk_state = (char) rarray[12*8];
                if (ci->old_apka_mk_state == '2')
                        memcpy(&ci->old_apka_mkvp, varray + 208, 8);
                if (ci->cur_apka_mk_state == '2')
index 62ceeb7fc12506dea5e0cd53f229e852219f75f3..fa8293d3700667634578774397a9b9829c0ca072 100644 (file)
@@ -89,7 +89,7 @@ static int zcrypt_cex2a_card_probe(struct ap_device *ap_dev)
        if (!zc)
                return -ENOMEM;
        zc->card = ac;
-       ac->private = zc;
+       dev_set_drvdata(&ap_dev->device, zc);
 
        if (ac->ap_dev.device_type == AP_DEVICE_TYPE_CEX2A) {
                zc->min_mod_size = CEX2A_MIN_MOD_SIZE;
@@ -118,7 +118,6 @@ static int zcrypt_cex2a_card_probe(struct ap_device *ap_dev)
 
        rc = zcrypt_card_register(zc);
        if (rc) {
-               ac->private = NULL;
                zcrypt_card_free(zc);
        }
 
@@ -131,10 +130,9 @@ static int zcrypt_cex2a_card_probe(struct ap_device *ap_dev)
  */
 static void zcrypt_cex2a_card_remove(struct ap_device *ap_dev)
 {
-       struct zcrypt_card *zc = to_ap_card(&ap_dev->device)->private;
+       struct zcrypt_card *zc = dev_get_drvdata(&ap_dev->device);
 
-       if (zc)
-               zcrypt_card_unregister(zc);
+       zcrypt_card_unregister(zc);
 }
 
 static struct ap_driver zcrypt_cex2a_card_driver = {
@@ -176,10 +174,9 @@ static int zcrypt_cex2a_queue_probe(struct ap_device *ap_dev)
        ap_queue_init_state(aq);
        ap_queue_init_reply(aq, &zq->reply);
        aq->request_timeout = CEX2A_CLEANUP_TIME;
-       aq->private = zq;
+       dev_set_drvdata(&ap_dev->device, zq);
        rc = zcrypt_queue_register(zq);
        if (rc) {
-               aq->private = NULL;
                zcrypt_queue_free(zq);
        }
 
@@ -192,11 +189,9 @@ static int zcrypt_cex2a_queue_probe(struct ap_device *ap_dev)
  */
 static void zcrypt_cex2a_queue_remove(struct ap_device *ap_dev)
 {
-       struct ap_queue *aq = to_ap_queue(&ap_dev->device);
-       struct zcrypt_queue *zq = aq->private;
+       struct zcrypt_queue *zq = dev_get_drvdata(&ap_dev->device);
 
-       if (zq)
-               zcrypt_queue_unregister(zq);
+       zcrypt_queue_unregister(zq);
 }
 
 static struct ap_driver zcrypt_cex2a_queue_driver = {
index 7a8cbdbe44080e58631fe9f7fd1776931b178fc3..a0b9f1153e12852ab6be0cf5d4336f5f20482b77 100644 (file)
@@ -66,9 +66,9 @@ static ssize_t cca_serialnr_show(struct device *dev,
                                 struct device_attribute *attr,
                                 char *buf)
 {
+       struct zcrypt_card *zc = dev_get_drvdata(dev);
        struct cca_info ci;
        struct ap_card *ac = to_ap_card(dev);
-       struct zcrypt_card *zc = ac->private;
 
        memset(&ci, 0, sizeof(ci));
 
@@ -97,9 +97,9 @@ static ssize_t cca_mkvps_show(struct device *dev,
                              struct device_attribute *attr,
                              char *buf)
 {
+       struct zcrypt_queue *zq = dev_get_drvdata(dev);
        int n = 0;
        struct cca_info ci;
-       struct zcrypt_queue *zq = to_ap_queue(dev)->private;
        static const char * const cao_state[] = { "invalid", "valid" };
        static const char * const new_state[] = { "empty", "partial", "full" };
 
@@ -261,7 +261,7 @@ static int zcrypt_cex2c_card_probe(struct ap_device *ap_dev)
        if (!zc)
                return -ENOMEM;
        zc->card = ac;
-       ac->private = zc;
+       dev_set_drvdata(&ap_dev->device, zc);
        switch (ac->ap_dev.device_type) {
        case AP_DEVICE_TYPE_CEX2C:
                zc->user_space_type = ZCRYPT_CEX2C;
@@ -287,7 +287,6 @@ static int zcrypt_cex2c_card_probe(struct ap_device *ap_dev)
 
        rc = zcrypt_card_register(zc);
        if (rc) {
-               ac->private = NULL;
                zcrypt_card_free(zc);
                return rc;
        }
@@ -297,7 +296,6 @@ static int zcrypt_cex2c_card_probe(struct ap_device *ap_dev)
                                        &cca_card_attr_grp);
                if (rc) {
                        zcrypt_card_unregister(zc);
-                       ac->private = NULL;
                        zcrypt_card_free(zc);
                }
        }
@@ -311,13 +309,13 @@ static int zcrypt_cex2c_card_probe(struct ap_device *ap_dev)
  */
 static void zcrypt_cex2c_card_remove(struct ap_device *ap_dev)
 {
+       struct zcrypt_card *zc = dev_get_drvdata(&ap_dev->device);
        struct ap_card *ac = to_ap_card(&ap_dev->device);
-       struct zcrypt_card *zc = to_ap_card(&ap_dev->device)->private;
 
        if (ap_test_bit(&ac->functions, AP_FUNC_COPRO))
                sysfs_remove_group(&ap_dev->device.kobj, &cca_card_attr_grp);
-       if (zc)
-               zcrypt_card_unregister(zc);
+
+       zcrypt_card_unregister(zc);
 }
 
 static struct ap_driver zcrypt_cex2c_card_driver = {
@@ -359,10 +357,9 @@ static int zcrypt_cex2c_queue_probe(struct ap_device *ap_dev)
        ap_queue_init_state(aq);
        ap_queue_init_reply(aq, &zq->reply);
        aq->request_timeout = CEX2C_CLEANUP_TIME;
-       aq->private = zq;
+       dev_set_drvdata(&ap_dev->device, zq);
        rc = zcrypt_queue_register(zq);
        if (rc) {
-               aq->private = NULL;
                zcrypt_queue_free(zq);
                return rc;
        }
@@ -372,7 +369,6 @@ static int zcrypt_cex2c_queue_probe(struct ap_device *ap_dev)
                                        &cca_queue_attr_grp);
                if (rc) {
                        zcrypt_queue_unregister(zq);
-                       aq->private = NULL;
                        zcrypt_queue_free(zq);
                }
        }
@@ -386,13 +382,13 @@ static int zcrypt_cex2c_queue_probe(struct ap_device *ap_dev)
  */
 static void zcrypt_cex2c_queue_remove(struct ap_device *ap_dev)
 {
+       struct zcrypt_queue *zq = dev_get_drvdata(&ap_dev->device);
        struct ap_queue *aq = to_ap_queue(&ap_dev->device);
-       struct zcrypt_queue *zq = aq->private;
 
        if (ap_test_bit(&aq->card->functions, AP_FUNC_COPRO))
                sysfs_remove_group(&ap_dev->device.kobj, &cca_queue_attr_grp);
-       if (zq)
-               zcrypt_queue_unregister(zq);
+
+       zcrypt_queue_unregister(zq);
 }
 
 static struct ap_driver zcrypt_cex2c_queue_driver = {
index f518b5fc7e5deb7c0d0194aebe64c2dda7aa7b53..1f7ec54142e15bdc637d0635bf111133ef5063a2 100644 (file)
@@ -75,9 +75,9 @@ static ssize_t cca_serialnr_show(struct device *dev,
                                 struct device_attribute *attr,
                                 char *buf)
 {
+       struct zcrypt_card *zc = dev_get_drvdata(dev);
        struct cca_info ci;
        struct ap_card *ac = to_ap_card(dev);
-       struct zcrypt_card *zc = ac->private;
 
        memset(&ci, 0, sizeof(ci));
 
@@ -106,9 +106,9 @@ static ssize_t cca_mkvps_show(struct device *dev,
                              struct device_attribute *attr,
                              char *buf)
 {
+       struct zcrypt_queue *zq = dev_get_drvdata(dev);
        int n = 0;
        struct cca_info ci;
-       struct zcrypt_queue *zq = to_ap_queue(dev)->private;
        static const char * const cao_state[] = { "invalid", "valid" };
        static const char * const new_state[] = { "empty", "partial", "full" };
 
@@ -187,9 +187,9 @@ static ssize_t ep11_api_ordinalnr_show(struct device *dev,
                                       struct device_attribute *attr,
                                       char *buf)
 {
+       struct zcrypt_card *zc = dev_get_drvdata(dev);
        struct ep11_card_info ci;
        struct ap_card *ac = to_ap_card(dev);
-       struct zcrypt_card *zc = ac->private;
 
        memset(&ci, 0, sizeof(ci));
 
@@ -208,9 +208,9 @@ static ssize_t ep11_fw_version_show(struct device *dev,
                                    struct device_attribute *attr,
                                    char *buf)
 {
+       struct zcrypt_card *zc = dev_get_drvdata(dev);
        struct ep11_card_info ci;
        struct ap_card *ac = to_ap_card(dev);
-       struct zcrypt_card *zc = ac->private;
 
        memset(&ci, 0, sizeof(ci));
 
@@ -231,9 +231,9 @@ static ssize_t ep11_serialnr_show(struct device *dev,
                                  struct device_attribute *attr,
                                  char *buf)
 {
+       struct zcrypt_card *zc = dev_get_drvdata(dev);
        struct ep11_card_info ci;
        struct ap_card *ac = to_ap_card(dev);
-       struct zcrypt_card *zc = ac->private;
 
        memset(&ci, 0, sizeof(ci));
 
@@ -264,10 +264,10 @@ static ssize_t ep11_card_op_modes_show(struct device *dev,
                                       struct device_attribute *attr,
                                       char *buf)
 {
+       struct zcrypt_card *zc = dev_get_drvdata(dev);
        int i, n = 0;
        struct ep11_card_info ci;
        struct ap_card *ac = to_ap_card(dev);
-       struct zcrypt_card *zc = ac->private;
 
        memset(&ci, 0, sizeof(ci));
 
@@ -309,9 +309,9 @@ static ssize_t ep11_mkvps_show(struct device *dev,
                               struct device_attribute *attr,
                               char *buf)
 {
+       struct zcrypt_queue *zq = dev_get_drvdata(dev);
        int n = 0;
        struct ep11_domain_info di;
-       struct zcrypt_queue *zq = to_ap_queue(dev)->private;
        static const char * const cwk_state[] = { "invalid", "valid" };
        static const char * const nwk_state[] = { "empty", "uncommitted",
                                                  "committed" };
@@ -357,9 +357,9 @@ static ssize_t ep11_queue_op_modes_show(struct device *dev,
                                        struct device_attribute *attr,
                                        char *buf)
 {
+       struct zcrypt_queue *zq = dev_get_drvdata(dev);
        int i, n = 0;
        struct ep11_domain_info di;
-       struct zcrypt_queue *zq = to_ap_queue(dev)->private;
 
        memset(&di, 0, sizeof(di));
 
@@ -441,7 +441,7 @@ static int zcrypt_cex4_card_probe(struct ap_device *ap_dev)
        if (!zc)
                return -ENOMEM;
        zc->card = ac;
-       ac->private = zc;
+       dev_set_drvdata(&ap_dev->device, zc);
        if (ap_test_bit(&ac->functions, AP_FUNC_ACCEL)) {
                if (ac->ap_dev.device_type == AP_DEVICE_TYPE_CEX4) {
                        zc->type_string = "CEX4A";
@@ -539,7 +539,6 @@ static int zcrypt_cex4_card_probe(struct ap_device *ap_dev)
 
        rc = zcrypt_card_register(zc);
        if (rc) {
-               ac->private = NULL;
                zcrypt_card_free(zc);
                return rc;
        }
@@ -549,7 +548,6 @@ static int zcrypt_cex4_card_probe(struct ap_device *ap_dev)
                                        &cca_card_attr_grp);
                if (rc) {
                        zcrypt_card_unregister(zc);
-                       ac->private = NULL;
                        zcrypt_card_free(zc);
                }
        } else if (ap_test_bit(&ac->functions, AP_FUNC_EP11)) {
@@ -557,7 +555,6 @@ static int zcrypt_cex4_card_probe(struct ap_device *ap_dev)
                                        &ep11_card_attr_grp);
                if (rc) {
                        zcrypt_card_unregister(zc);
-                       ac->private = NULL;
                        zcrypt_card_free(zc);
                }
        }
@@ -571,15 +568,15 @@ static int zcrypt_cex4_card_probe(struct ap_device *ap_dev)
  */
 static void zcrypt_cex4_card_remove(struct ap_device *ap_dev)
 {
+       struct zcrypt_card *zc = dev_get_drvdata(&ap_dev->device);
        struct ap_card *ac = to_ap_card(&ap_dev->device);
-       struct zcrypt_card *zc = ac->private;
 
        if (ap_test_bit(&ac->functions, AP_FUNC_COPRO))
                sysfs_remove_group(&ap_dev->device.kobj, &cca_card_attr_grp);
        else if (ap_test_bit(&ac->functions, AP_FUNC_EP11))
                sysfs_remove_group(&ap_dev->device.kobj, &ep11_card_attr_grp);
-       if (zc)
-               zcrypt_card_unregister(zc);
+
+       zcrypt_card_unregister(zc);
 }
 
 static struct ap_driver zcrypt_cex4_card_driver = {
@@ -629,10 +626,9 @@ static int zcrypt_cex4_queue_probe(struct ap_device *ap_dev)
        ap_queue_init_state(aq);
        ap_queue_init_reply(aq, &zq->reply);
        aq->request_timeout = CEX4_CLEANUP_TIME;
-       aq->private = zq;
+       dev_set_drvdata(&ap_dev->device, zq);
        rc = zcrypt_queue_register(zq);
        if (rc) {
-               aq->private = NULL;
                zcrypt_queue_free(zq);
                return rc;
        }
@@ -642,7 +638,6 @@ static int zcrypt_cex4_queue_probe(struct ap_device *ap_dev)
                                        &cca_queue_attr_grp);
                if (rc) {
                        zcrypt_queue_unregister(zq);
-                       aq->private = NULL;
                        zcrypt_queue_free(zq);
                }
        } else if (ap_test_bit(&aq->card->functions, AP_FUNC_EP11)) {
@@ -650,7 +645,6 @@ static int zcrypt_cex4_queue_probe(struct ap_device *ap_dev)
                                        &ep11_queue_attr_grp);
                if (rc) {
                        zcrypt_queue_unregister(zq);
-                       aq->private = NULL;
                        zcrypt_queue_free(zq);
                }
        }
@@ -664,15 +658,15 @@ static int zcrypt_cex4_queue_probe(struct ap_device *ap_dev)
  */
 static void zcrypt_cex4_queue_remove(struct ap_device *ap_dev)
 {
+       struct zcrypt_queue *zq = dev_get_drvdata(&ap_dev->device);
        struct ap_queue *aq = to_ap_queue(&ap_dev->device);
-       struct zcrypt_queue *zq = aq->private;
 
        if (ap_test_bit(&aq->card->functions, AP_FUNC_COPRO))
                sysfs_remove_group(&ap_dev->device.kobj, &cca_queue_attr_grp);
        else if (ap_test_bit(&aq->card->functions, AP_FUNC_EP11))
                sysfs_remove_group(&ap_dev->device.kobj, &ep11_queue_attr_grp);
-       if (zq)
-               zcrypt_queue_unregister(zq);
+
+       zcrypt_queue_unregister(zq);
 }
 
 static struct ap_driver zcrypt_cex4_queue_driver = {
index 20f12288a8c1665db8daab11e795a715c5ab4cc9..398bde237e376ece811addbb60b81353ab13becf 100644 (file)
@@ -40,8 +40,8 @@ static ssize_t online_show(struct device *dev,
                           struct device_attribute *attr,
                           char *buf)
 {
+       struct zcrypt_queue *zq = dev_get_drvdata(dev);
        struct ap_queue *aq = to_ap_queue(dev);
-       struct zcrypt_queue *zq = aq->private;
        int online = aq->config && zq->online ? 1 : 0;
 
        return scnprintf(buf, PAGE_SIZE, "%d\n", online);
@@ -51,8 +51,8 @@ static ssize_t online_store(struct device *dev,
                            struct device_attribute *attr,
                            const char *buf, size_t count)
 {
+       struct zcrypt_queue *zq = dev_get_drvdata(dev);
        struct ap_queue *aq = to_ap_queue(dev);
-       struct zcrypt_queue *zq = aq->private;
        struct zcrypt_card *zc = zq->zcard;
        int online;
 
@@ -83,7 +83,7 @@ static ssize_t load_show(struct device *dev,
                         struct device_attribute *attr,
                         char *buf)
 {
-       struct zcrypt_queue *zq = to_ap_queue(dev)->private;
+       struct zcrypt_queue *zq = dev_get_drvdata(dev);
 
        return scnprintf(buf, PAGE_SIZE, "%d\n", atomic_read(&zq->load));
 }
@@ -170,7 +170,7 @@ int zcrypt_queue_register(struct zcrypt_queue *zq)
        int rc;
 
        spin_lock(&zcrypt_list_lock);
-       zc = zq->queue->card->private;
+       zc = dev_get_drvdata(&zq->queue->card->ap_dev.device);
        zcrypt_card_get(zc);
        zq->zcard = zc;
        zq->online = 1; /* New devices are online by default. */
index 62f88ccbd03f87efa94ea777df71b5a4220283b6..f96755a0a2615086f0de155a8f16fe97ca21396c 100644 (file)
@@ -3804,14 +3804,10 @@ static void qeth_qdio_output_handler(struct ccw_device *ccwdev,
                                     unsigned long card_ptr)
 {
        struct qeth_card *card        = (struct qeth_card *) card_ptr;
-       struct net_device *dev = card->dev;
 
-       QETH_CARD_TEXT(card, 6, "qdouhdl");
-       if (qdio_error & QDIO_ERROR_FATAL) {
-               QETH_CARD_TEXT(card, 2, "achkcond");
-               netif_tx_stop_all_queues(dev);
-               qeth_schedule_recovery(card);
-       }
+       QETH_CARD_TEXT(card, 2, "achkcond");
+       netif_tx_stop_all_queues(card->dev);
+       qeth_schedule_recovery(card);
 }
 
 /**
index 2abf86c104d5a963a4316271dc507eb54aa564f9..d7cdd9cfe485a89f72e1f3c4a628d32926a4e6e8 100644 (file)
@@ -279,7 +279,7 @@ static void qeth_l2_set_pnso_mode(struct qeth_card *card,
 
 static void qeth_l2_dev2br_fdb_flush(struct qeth_card *card)
 {
-       struct switchdev_notifier_fdb_info info;
+       struct switchdev_notifier_fdb_info info = {};
 
        QETH_CARD_TEXT(card, 2, "fdbflush");
 
@@ -679,7 +679,7 @@ static void qeth_l2_dev2br_fdb_notify(struct qeth_card *card, u8 code,
                                      struct net_if_token *token,
                                      struct mac_addr_lnid *addr_lnid)
 {
-       struct switchdev_notifier_fdb_info info;
+       struct switchdev_notifier_fdb_info info = {};
        u8 ntfy_mac[ETH_ALEN];
 
        ether_addr_copy(ntfy_mac, addr_lnid->mac);
index 6671d9563f6c9c5b223d6ed780c94761a37589b5..8f19bed6384e277cdf6d8167360e27cd47036798 100644 (file)
@@ -69,10 +69,7 @@ static void zfcp_qdio_int_req(struct ccw_device *cdev, unsigned int qdio_err,
 {
        struct zfcp_qdio *qdio = (struct zfcp_qdio *) parm;
 
-       if (unlikely(qdio_err)) {
-               zfcp_qdio_handler_error(qdio, "qdireq1", qdio_err);
-               return;
-       }
+       zfcp_qdio_handler_error(qdio, "qdireq1", qdio_err);
 }
 
 static void zfcp_qdio_request_tasklet(struct tasklet_struct *tasklet)
index 5983e05b648f93bc6091c29a1111c49133163a35..e29523a1b530622b62bb239a29a317d95b4c292c 100644 (file)
@@ -13193,6 +13193,8 @@ lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid)
        if (!phba)
                return -ENOMEM;
 
+       INIT_LIST_HEAD(&phba->poll_list);
+
        /* Perform generic PCI device enabling operation */
        error = lpfc_enable_pci_dev(phba);
        if (error)
@@ -13327,7 +13329,6 @@ lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid)
        /* Enable RAS FW log support */
        lpfc_sli4_ras_setup(phba);
 
-       INIT_LIST_HEAD(&phba->poll_list);
        timer_setup(&phba->cpuhp_poll_timer, lpfc_sli4_poll_hbtimer, 0);
        cpuhp_state_add_instance_nocalls(lpfc_cpuhp_state, &phba->cpuhp);
 
index 19b1c0cf5f2a2b35abe38c6102b35751d02c9510..cf4a3a2c22ad590d6fc74fe5efa8f7edb1e965b4 100644 (file)
@@ -7851,7 +7851,7 @@ _base_make_ioc_operational(struct MPT3SAS_ADAPTER *ioc)
                        return r;
        }
 
-       rc = _base_static_config_pages(ioc);
+       r = _base_static_config_pages(ioc);
        if (r)
                return r;
 
index ae9bfc658203e32c5e2828f29ae71634b3a758fc..c0d31119d6d7ba3e8a1a258b73073c3e6c44b625 100644 (file)
@@ -808,12 +808,15 @@ store_state_field(struct device *dev, struct device_attribute *attr,
        ret = scsi_device_set_state(sdev, state);
        /*
         * If the device state changes to SDEV_RUNNING, we need to
-        * rescan the device to revalidate it, and run the queue to
-        * avoid I/O hang.
+        * run the queue to avoid I/O hang, and rescan the device
+        * to revalidate it. Running the queue first is necessary
+        * because another thread may be waiting inside
+        * blk_mq_freeze_queue_wait() and because that call may be
+        * waiting for pending I/O to finish.
         */
        if (ret == 0 && state == SDEV_RUNNING) {
-               scsi_rescan_device(dev);
                blk_mq_run_hw_queues(sdev->request_queue, true);
+               scsi_rescan_device(dev);
        }
        mutex_unlock(&sdev->state_mutex);
 
index 328bb961c2813f5ae442f7772aeb476bde69cd80..37506b3fe5a921284a4f8f2b32f165cc0d633937 100644 (file)
@@ -1199,14 +1199,24 @@ static void storvsc_on_io_completion(struct storvsc_device *stor_device,
                vstor_packet->vm_srb.sense_info_length);
 
        if (vstor_packet->vm_srb.scsi_status != 0 ||
-           vstor_packet->vm_srb.srb_status != SRB_STATUS_SUCCESS)
-               storvsc_log(device, STORVSC_LOGGING_ERROR,
+           vstor_packet->vm_srb.srb_status != SRB_STATUS_SUCCESS) {
+
+               /*
+                * Log TEST_UNIT_READY errors only as warnings. Hyper-V can
+                * return errors when detecting devices using TEST_UNIT_READY,
+                * and logging these as errors produces unhelpful noise.
+                */
+               int loglevel = (stor_pkt->vm_srb.cdb[0] == TEST_UNIT_READY) ?
+                       STORVSC_LOGGING_WARN : STORVSC_LOGGING_ERROR;
+
+               storvsc_log(device, loglevel,
                        "tag#%d cmd 0x%x status: scsi 0x%x srb 0x%x hv 0x%x\n",
                        request->cmd->request->tag,
                        stor_pkt->vm_srb.cdb[0],
                        vstor_packet->vm_srb.scsi_status,
                        vstor_packet->vm_srb.srb_status,
                        vstor_packet->status);
+       }
 
        if (vstor_packet->vm_srb.scsi_status == SAM_STAT_CHECK_CONDITION &&
            (vstor_packet->vm_srb.srb_status & SRB_STATUS_AUTOSENSE_VALID))
index f2b5d347d227bbc07b7190c53f4ac4e941fa8177..e5ae26227bdbff701ddf6f0ca387456c721a8b9f 100644 (file)
@@ -66,7 +66,7 @@ int slim_alloc_txn_tid(struct slim_controller *ctrl, struct slim_msg_txn *txn)
        int ret = 0;
 
        spin_lock_irqsave(&ctrl->txn_lock, flags);
-       ret = idr_alloc_cyclic(&ctrl->tid_idr, txn, 0,
+       ret = idr_alloc_cyclic(&ctrl->tid_idr, txn, 1,
                                SLIM_MAX_TIDS, GFP_ATOMIC);
        if (ret < 0) {
                spin_unlock_irqrestore(&ctrl->txn_lock, flags);
@@ -131,7 +131,8 @@ int slim_do_transfer(struct slim_controller *ctrl, struct slim_msg_txn *txn)
                        goto slim_xfer_err;
                }
        }
-
+       /* Initialize tid to invalid value */
+       txn->tid = 0;
        need_tid = slim_tid_txn(txn->mt, txn->mc);
 
        if (need_tid) {
@@ -163,7 +164,7 @@ int slim_do_transfer(struct slim_controller *ctrl, struct slim_msg_txn *txn)
                        txn->mt, txn->mc, txn->la, ret);
 
 slim_xfer_err:
-       if (!clk_pause_msg && (!need_tid  || ret == -ETIMEDOUT)) {
+       if (!clk_pause_msg && (txn->tid == 0  || ret == -ETIMEDOUT)) {
                /*
                 * remove runtime-pm vote if this was TX only, or
                 * if there was error during this transaction
index c054e83ab63615564e7db8f405582a13785c3ebf..7040293c2ee8fe8d38af443f6ad9dc7bd2635c72 100644 (file)
@@ -618,7 +618,7 @@ static void qcom_slim_ngd_rx(struct qcom_slim_ngd_ctrl *ctrl, u8 *buf)
                (mc == SLIM_USR_MC_GENERIC_ACK &&
                 mt == SLIM_MSG_MT_SRC_REFERRED_USER)) {
                slim_msg_response(&ctrl->ctrl, &buf[4], buf[3], len - 4);
-               pm_runtime_mark_last_busy(ctrl->dev);
+               pm_runtime_mark_last_busy(ctrl->ctrl.dev);
        }
 }
 
@@ -1080,7 +1080,8 @@ static void qcom_slim_ngd_setup(struct qcom_slim_ngd_ctrl *ctrl)
 {
        u32 cfg = readl_relaxed(ctrl->ngd->base);
 
-       if (ctrl->state == QCOM_SLIM_NGD_CTRL_DOWN)
+       if (ctrl->state == QCOM_SLIM_NGD_CTRL_DOWN ||
+               ctrl->state == QCOM_SLIM_NGD_CTRL_ASLEEP)
                qcom_slim_ngd_init_dma(ctrl);
 
        /* By default enable message queues */
@@ -1131,6 +1132,7 @@ static int qcom_slim_ngd_power_up(struct qcom_slim_ngd_ctrl *ctrl)
                        dev_info(ctrl->dev, "Subsys restart: ADSP active framer\n");
                        return 0;
                }
+               qcom_slim_ngd_setup(ctrl);
                return 0;
        }
 
@@ -1257,13 +1259,14 @@ static int qcom_slim_ngd_enable(struct qcom_slim_ngd_ctrl *ctrl, bool enable)
                }
                /* controller state should be in sync with framework state */
                complete(&ctrl->qmi.qmi_comp);
-               if (!pm_runtime_enabled(ctrl->dev) ||
-                               !pm_runtime_suspended(ctrl->dev))
-                       qcom_slim_ngd_runtime_resume(ctrl->dev);
+               if (!pm_runtime_enabled(ctrl->ctrl.dev) ||
+                        !pm_runtime_suspended(ctrl->ctrl.dev))
+                       qcom_slim_ngd_runtime_resume(ctrl->ctrl.dev);
                else
-                       pm_runtime_resume(ctrl->dev);
-               pm_runtime_mark_last_busy(ctrl->dev);
-               pm_runtime_put(ctrl->dev);
+                       pm_runtime_resume(ctrl->ctrl.dev);
+
+               pm_runtime_mark_last_busy(ctrl->ctrl.dev);
+               pm_runtime_put(ctrl->ctrl.dev);
 
                ret = slim_register_controller(&ctrl->ctrl);
                if (ret) {
@@ -1389,7 +1392,7 @@ static int qcom_slim_ngd_ssr_pdr_notify(struct qcom_slim_ngd_ctrl *ctrl,
                /* Make sure the last dma xfer is finished */
                mutex_lock(&ctrl->tx_lock);
                if (ctrl->state != QCOM_SLIM_NGD_CTRL_DOWN) {
-                       pm_runtime_get_noresume(ctrl->dev);
+                       pm_runtime_get_noresume(ctrl->ctrl.dev);
                        ctrl->state = QCOM_SLIM_NGD_CTRL_DOWN;
                        qcom_slim_ngd_down(ctrl);
                        qcom_slim_ngd_exit_dma(ctrl);
@@ -1617,6 +1620,7 @@ static int __maybe_unused qcom_slim_ngd_runtime_suspend(struct device *dev)
        struct qcom_slim_ngd_ctrl *ctrl = dev_get_drvdata(dev);
        int ret = 0;
 
+       qcom_slim_ngd_exit_dma(ctrl);
        if (!ctrl->qmi.handle)
                return 0;
 
index 3f711c1a0996a262a6f35eec9190e7478edb2549..bbae3d39c7bedb23c166c46bdc0b42baa4e620fe 100644 (file)
@@ -23,6 +23,7 @@
 #include <linux/signal.h>
 #include <linux/device.h>
 #include <linux/spinlock.h>
+#include <linux/platform_device.h>
 #include <asm/irq.h>
 #include <asm/io.h>
 #include <soc/fsl/qe/qe.h>
@@ -53,8 +54,8 @@ struct qe_ic {
        struct irq_chip hc_irq;
 
        /* VIRQ numbers of QE high/low irqs */
-       unsigned int virq_high;
-       unsigned int virq_low;
+       int virq_high;
+       int virq_low;
 };
 
 /*
@@ -404,42 +405,40 @@ static void qe_ic_cascade_muxed_mpic(struct irq_desc *desc)
        chip->irq_eoi(&desc->irq_data);
 }
 
-static void __init qe_ic_init(struct device_node *node)
+static int qe_ic_init(struct platform_device *pdev)
 {
+       struct device *dev = &pdev->dev;
        void (*low_handler)(struct irq_desc *desc);
        void (*high_handler)(struct irq_desc *desc);
        struct qe_ic *qe_ic;
-       struct resource res;
-       u32 ret;
+       struct resource *res;
+       struct device_node *node = pdev->dev.of_node;
 
-       ret = of_address_to_resource(node, 0, &res);
-       if (ret)
-               return;
+       res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+       if (res == NULL) {
+               dev_err(dev, "no memory resource defined\n");
+               return -ENODEV;
+       }
 
-       qe_ic = kzalloc(sizeof(*qe_ic), GFP_KERNEL);
+       qe_ic = devm_kzalloc(dev, sizeof(*qe_ic), GFP_KERNEL);
        if (qe_ic == NULL)
-               return;
+               return -ENOMEM;
 
-       qe_ic->irqhost = irq_domain_add_linear(node, NR_QE_IC_INTS,
-                                              &qe_ic_host_ops, qe_ic);
-       if (qe_ic->irqhost == NULL) {
-               kfree(qe_ic);
-               return;
+       qe_ic->regs = devm_ioremap(dev, res->start, resource_size(res));
+       if (qe_ic->regs == NULL) {
+               dev_err(dev, "failed to ioremap() registers\n");
+               return -ENODEV;
        }
 
-       qe_ic->regs = ioremap(res.start, resource_size(&res));
-
        qe_ic->hc_irq = qe_ic_irq_chip;
 
-       qe_ic->virq_high = irq_of_parse_and_map(node, 0);
-       qe_ic->virq_low = irq_of_parse_and_map(node, 1);
+       qe_ic->virq_high = platform_get_irq(pdev, 0);
+       qe_ic->virq_low = platform_get_irq(pdev, 1);
 
-       if (!qe_ic->virq_low) {
-               printk(KERN_ERR "Failed to map QE_IC low IRQ\n");
-               kfree(qe_ic);
-               return;
-       }
-       if (qe_ic->virq_high != qe_ic->virq_low) {
+       if (qe_ic->virq_low <= 0)
+               return -ENODEV;
+
+       if (qe_ic->virq_high > 0 && qe_ic->virq_high != qe_ic->virq_low) {
                low_handler = qe_ic_cascade_low;
                high_handler = qe_ic_cascade_high;
        } else {
@@ -447,29 +446,42 @@ static void __init qe_ic_init(struct device_node *node)
                high_handler = NULL;
        }
 
+       qe_ic->irqhost = irq_domain_add_linear(node, NR_QE_IC_INTS,
+                                              &qe_ic_host_ops, qe_ic);
+       if (qe_ic->irqhost == NULL) {
+               dev_err(dev, "failed to add irq domain\n");
+               return -ENODEV;
+       }
+
        qe_ic_write(qe_ic->regs, QEIC_CICR, 0);
 
        irq_set_handler_data(qe_ic->virq_low, qe_ic);
        irq_set_chained_handler(qe_ic->virq_low, low_handler);
 
-       if (qe_ic->virq_high && qe_ic->virq_high != qe_ic->virq_low) {
+       if (high_handler) {
                irq_set_handler_data(qe_ic->virq_high, qe_ic);
                irq_set_chained_handler(qe_ic->virq_high, high_handler);
        }
+       return 0;
 }
+static const struct of_device_id qe_ic_ids[] = {
+       { .compatible = "fsl,qe-ic"},
+       { .type = "qeic"},
+       {},
+};
 
-static int __init qe_ic_of_init(void)
+static struct platform_driver qe_ic_driver =
 {
-       struct device_node *np;
+       .driver = {
+               .name           = "qe-ic",
+               .of_match_table = qe_ic_ids,
+       },
+       .probe  = qe_ic_init,
+};
 
-       np = of_find_compatible_node(NULL, NULL, "fsl,qe-ic");
-       if (!np) {
-               np = of_find_node_by_type(NULL, "qeic");
-               if (!np)
-                       return -ENODEV;
-       }
-       qe_ic_init(np);
-       of_node_put(np);
+static int __init qe_ic_of_init(void)
+{
+       platform_driver_register(&qe_ic_driver);
        return 0;
 }
 subsys_initcall(qe_ic_of_init);
index e71a4c514f7bd5e62cd6139e6793ba4d17d05ec2..83e352b0c8f9ad7359081589677bbb1784045842 100644 (file)
@@ -658,6 +658,18 @@ config SPI_ROCKCHIP
          The main usecase of this controller is to use spi flash as boot
          device.
 
+config SPI_ROCKCHIP_SFC
+       tristate "Rockchip Serial Flash Controller (SFC)"
+       depends on ARCH_ROCKCHIP || COMPILE_TEST
+       depends on HAS_IOMEM && HAS_DMA
+       help
+         This enables support for Rockchip serial flash controller. This
+         is a specialized controller used to access SPI flash on some
+         Rockchip SOCs.
+
+         ROCKCHIP SFC supports DMA and PIO modes. When DMA is not available,
+         the driver automatically falls back to PIO mode.
+
 config SPI_RB4XX
        tristate "Mikrotik RB4XX SPI master"
        depends on SPI_MASTER && ATH79
index 13e54c45e9df07e201e26fc35672ac92a0250dc5..699db95c84416f10c1a623f03c79ddb2e865d9da 100644 (file)
@@ -95,6 +95,7 @@ obj-$(CONFIG_SPI_QCOM_GENI)           += spi-geni-qcom.o
 obj-$(CONFIG_SPI_QCOM_QSPI)            += spi-qcom-qspi.o
 obj-$(CONFIG_SPI_QUP)                  += spi-qup.o
 obj-$(CONFIG_SPI_ROCKCHIP)             += spi-rockchip.o
+obj-$(CONFIG_SPI_ROCKCHIP_SFC)         += spi-rockchip-sfc.o
 obj-$(CONFIG_SPI_RB4XX)                        += spi-rb4xx.o
 obj-$(CONFIG_MACH_REALTEK_RTL)         += spi-realtek-rtl.o
 obj-$(CONFIG_SPI_RPCIF)                        += spi-rpc-if.o
index 37eab100a7d8a928c7917b2220a5945c0223fee2..7d709a8c833bb6b8e33562fe079dd50b35fcc152 100644 (file)
@@ -143,12 +143,12 @@ static void bcm2835aux_debugfs_remove(struct bcm2835aux_spi *bs)
 }
 #endif /* CONFIG_DEBUG_FS */
 
-static inline u32 bcm2835aux_rd(struct bcm2835aux_spi *bs, unsigned reg)
+static inline u32 bcm2835aux_rd(struct bcm2835aux_spi *bs, unsigned int reg)
 {
        return readl(bs->regs + reg);
 }
 
-static inline void bcm2835aux_wr(struct bcm2835aux_spi *bs, unsigned reg,
+static inline void bcm2835aux_wr(struct bcm2835aux_spi *bs, unsigned int reg,
                                 u32 val)
 {
        writel(val, bs->regs + reg);
index 8996115ce736a9cbe4346cf8b8b11b9483e7f048..263ce904732775986f9190598ffe964b300ae932 100644 (file)
@@ -444,7 +444,7 @@ static int mcfqspi_remove(struct platform_device *pdev)
        mcfqspi_wr_qmr(mcfqspi, MCFQSPI_QMR_MSTR);
 
        mcfqspi_cs_teardown(mcfqspi);
-       clk_disable(mcfqspi->clk);
+       clk_disable_unprepare(mcfqspi->clk);
 
        return 0;
 }
index e114e6fe5ea5b4029324186cd6b1b17ddae45669..d112c2cac042b60cc0d96dc0cfb5e825ebf0a150 100644 (file)
@@ -213,12 +213,6 @@ static void davinci_spi_chipselect(struct spi_device *spi, int value)
         * line for the controller
         */
        if (spi->cs_gpiod) {
-               /*
-                * FIXME: is this code ever executed? This host does not
-                * set SPI_MASTER_GPIO_SS so this chipselect callback should
-                * not get called from the SPI core when we are using
-                * GPIOs for chip select.
-                */
                if (value == BITBANG_CS_ACTIVE)
                        gpiod_set_value(spi->cs_gpiod, 1);
                else
@@ -945,7 +939,7 @@ static int davinci_spi_probe(struct platform_device *pdev)
        master->bus_num = pdev->id;
        master->num_chipselect = pdata->num_chipselect;
        master->bits_per_word_mask = SPI_BPW_RANGE_MASK(2, 16);
-       master->flags = SPI_MASTER_MUST_RX;
+       master->flags = SPI_MASTER_MUST_RX | SPI_MASTER_GPIO_SS;
        master->setup = davinci_spi_setup;
        master->cleanup = davinci_spi_cleanup;
        master->can_dma = davinci_spi_can_dma;
index aa676559d2738b3c3c2c130d99cf0c0fe39f712c..5896a7b2fade0ca204ec2d6ddc912176ef4fd007 100644 (file)
@@ -550,7 +550,7 @@ static int ep93xx_spi_prepare_hardware(struct spi_master *master)
        u32 val;
        int ret;
 
-       ret = clk_enable(espi->clk);
+       ret = clk_prepare_enable(espi->clk);
        if (ret)
                return ret;
 
@@ -570,7 +570,7 @@ static int ep93xx_spi_unprepare_hardware(struct spi_master *master)
        val &= ~SSPCR1_SSE;
        writel(val, espi->mmio + SSPCR1);
 
-       clk_disable(espi->clk);
+       clk_disable_unprepare(espi->clk);
 
        return 0;
 }
index 87f8829c399521cb5c83d953fe44b9c2b1f3b816..829770b8ec74c2bb67fc2d8c857dd6c6ac4c0184 100644 (file)
 
 #define SPI_FSI_BASE                   0x70000
 #define SPI_FSI_INIT_TIMEOUT_MS                1000
-#define SPI_FSI_MAX_XFR_SIZE           2048
-#define SPI_FSI_MAX_XFR_SIZE_RESTRICTED        8
+#define SPI_FSI_MAX_RX_SIZE            8
+#define SPI_FSI_MAX_TX_SIZE            40
 
 #define SPI_FSI_ERROR                  0x0
 #define SPI_FSI_COUNTER_CFG            0x1
-#define  SPI_FSI_COUNTER_CFG_LOOPS(x)   (((u64)(x) & 0xffULL) << 32)
-#define  SPI_FSI_COUNTER_CFG_N2_RX      BIT_ULL(8)
-#define  SPI_FSI_COUNTER_CFG_N2_TX      BIT_ULL(9)
-#define  SPI_FSI_COUNTER_CFG_N2_IMPLICIT BIT_ULL(10)
-#define  SPI_FSI_COUNTER_CFG_N2_RELOAD  BIT_ULL(11)
 #define SPI_FSI_CFG1                   0x2
 #define SPI_FSI_CLOCK_CFG              0x3
 #define  SPI_FSI_CLOCK_CFG_MM_ENABLE    BIT_ULL(32)
@@ -76,8 +71,6 @@ struct fsi_spi {
        struct device *dev;     /* SPI controller device */
        struct fsi_device *fsi; /* FSI2SPI CFAM engine device */
        u32 base;
-       size_t max_xfr_size;
-       bool restricted;
 };
 
 struct fsi_spi_sequence {
@@ -241,7 +234,7 @@ static int fsi_spi_reset(struct fsi_spi *ctx)
        return fsi_spi_write_reg(ctx, SPI_FSI_STATUS, 0ULL);
 }
 
-static int fsi_spi_sequence_add(struct fsi_spi_sequence *seq, u8 val)
+static void fsi_spi_sequence_add(struct fsi_spi_sequence *seq, u8 val)
 {
        /*
         * Add the next byte of instruction to the 8-byte sequence register.
@@ -251,8 +244,6 @@ static int fsi_spi_sequence_add(struct fsi_spi_sequence *seq, u8 val)
         */
        seq->data |= (u64)val << seq->bit;
        seq->bit -= 8;
-
-       return ((64 - seq->bit) / 8) - 2;
 }
 
 static void fsi_spi_sequence_init(struct fsi_spi_sequence *seq)
@@ -261,71 +252,11 @@ static void fsi_spi_sequence_init(struct fsi_spi_sequence *seq)
        seq->data = 0ULL;
 }
 
-static int fsi_spi_sequence_transfer(struct fsi_spi *ctx,
-                                    struct fsi_spi_sequence *seq,
-                                    struct spi_transfer *transfer)
-{
-       int loops;
-       int idx;
-       int rc;
-       u8 val = 0;
-       u8 len = min(transfer->len, 8U);
-       u8 rem = transfer->len % len;
-
-       loops = transfer->len / len;
-
-       if (transfer->tx_buf) {
-               val = SPI_FSI_SEQUENCE_SHIFT_OUT(len);
-               idx = fsi_spi_sequence_add(seq, val);
-
-               if (rem)
-                       rem = SPI_FSI_SEQUENCE_SHIFT_OUT(rem);
-       } else if (transfer->rx_buf) {
-               val = SPI_FSI_SEQUENCE_SHIFT_IN(len);
-               idx = fsi_spi_sequence_add(seq, val);
-
-               if (rem)
-                       rem = SPI_FSI_SEQUENCE_SHIFT_IN(rem);
-       } else {
-               return -EINVAL;
-       }
-
-       if (ctx->restricted && loops > 1) {
-               dev_warn(ctx->dev,
-                        "Transfer too large; no branches permitted.\n");
-               return -EINVAL;
-       }
-
-       if (loops > 1) {
-               u64 cfg = SPI_FSI_COUNTER_CFG_LOOPS(loops - 1);
-
-               fsi_spi_sequence_add(seq, SPI_FSI_SEQUENCE_BRANCH(idx));
-
-               if (transfer->rx_buf)
-                       cfg |= SPI_FSI_COUNTER_CFG_N2_RX |
-                               SPI_FSI_COUNTER_CFG_N2_TX |
-                               SPI_FSI_COUNTER_CFG_N2_IMPLICIT |
-                               SPI_FSI_COUNTER_CFG_N2_RELOAD;
-
-               rc = fsi_spi_write_reg(ctx, SPI_FSI_COUNTER_CFG, cfg);
-               if (rc)
-                       return rc;
-       } else {
-               fsi_spi_write_reg(ctx, SPI_FSI_COUNTER_CFG, 0ULL);
-       }
-
-       if (rem)
-               fsi_spi_sequence_add(seq, rem);
-
-       return 0;
-}
-
 static int fsi_spi_transfer_data(struct fsi_spi *ctx,
                                 struct spi_transfer *transfer)
 {
        int rc = 0;
        u64 status = 0ULL;
-       u64 cfg = 0ULL;
 
        if (transfer->tx_buf) {
                int nb;
@@ -363,16 +294,6 @@ static int fsi_spi_transfer_data(struct fsi_spi *ctx,
                u64 in = 0ULL;
                u8 *rx = transfer->rx_buf;
 
-               rc = fsi_spi_read_reg(ctx, SPI_FSI_COUNTER_CFG, &cfg);
-               if (rc)
-                       return rc;
-
-               if (cfg & SPI_FSI_COUNTER_CFG_N2_IMPLICIT) {
-                       rc = fsi_spi_write_reg(ctx, SPI_FSI_DATA_TX, 0);
-                       if (rc)
-                               return rc;
-               }
-
                while (transfer->len > recv) {
                        do {
                                rc = fsi_spi_read_reg(ctx, SPI_FSI_STATUS,
@@ -439,6 +360,10 @@ static int fsi_spi_transfer_init(struct fsi_spi *ctx)
                }
        } while (seq_state && (seq_state != SPI_FSI_STATUS_SEQ_STATE_IDLE));
 
+       rc = fsi_spi_write_reg(ctx, SPI_FSI_COUNTER_CFG, 0ULL);
+       if (rc)
+               return rc;
+
        rc = fsi_spi_read_reg(ctx, SPI_FSI_CLOCK_CFG, &clock_cfg);
        if (rc)
                return rc;
@@ -459,6 +384,7 @@ static int fsi_spi_transfer_one_message(struct spi_controller *ctlr,
 {
        int rc;
        u8 seq_slave = SPI_FSI_SEQUENCE_SEL_SLAVE(mesg->spi->chip_select + 1);
+       unsigned int len;
        struct spi_transfer *transfer;
        struct fsi_spi *ctx = spi_controller_get_devdata(ctlr);
 
@@ -471,8 +397,7 @@ static int fsi_spi_transfer_one_message(struct spi_controller *ctlr,
                struct spi_transfer *next = NULL;
 
                /* Sequencer must do shift out (tx) first. */
-               if (!transfer->tx_buf ||
-                   transfer->len > (ctx->max_xfr_size + 8)) {
+               if (!transfer->tx_buf || transfer->len > SPI_FSI_MAX_TX_SIZE) {
                        rc = -EINVAL;
                        goto error;
                }
@@ -486,9 +411,13 @@ static int fsi_spi_transfer_one_message(struct spi_controller *ctlr,
                fsi_spi_sequence_init(&seq);
                fsi_spi_sequence_add(&seq, seq_slave);
 
-               rc = fsi_spi_sequence_transfer(ctx, &seq, transfer);
-               if (rc)
-                       goto error;
+               len = transfer->len;
+               while (len > 8) {
+                       fsi_spi_sequence_add(&seq,
+                                            SPI_FSI_SEQUENCE_SHIFT_OUT(8));
+                       len -= 8;
+               }
+               fsi_spi_sequence_add(&seq, SPI_FSI_SEQUENCE_SHIFT_OUT(len));
 
                if (!list_is_last(&transfer->transfer_list,
                                  &mesg->transfers)) {
@@ -496,7 +425,9 @@ static int fsi_spi_transfer_one_message(struct spi_controller *ctlr,
 
                        /* Sequencer can only do shift in (rx) after tx. */
                        if (next->rx_buf) {
-                               if (next->len > ctx->max_xfr_size) {
+                               u8 shift;
+
+                               if (next->len > SPI_FSI_MAX_RX_SIZE) {
                                        rc = -EINVAL;
                                        goto error;
                                }
@@ -504,10 +435,8 @@ static int fsi_spi_transfer_one_message(struct spi_controller *ctlr,
                                dev_dbg(ctx->dev, "Sequence rx of %d bytes.\n",
                                        next->len);
 
-                               rc = fsi_spi_sequence_transfer(ctx, &seq,
-                                                              next);
-                               if (rc)
-                                       goto error;
+                               shift = SPI_FSI_SEQUENCE_SHIFT_IN(next->len);
+                               fsi_spi_sequence_add(&seq, shift);
                        } else {
                                next = NULL;
                        }
@@ -541,9 +470,7 @@ error:
 
 static size_t fsi_spi_max_transfer_size(struct spi_device *spi)
 {
-       struct fsi_spi *ctx = spi_controller_get_devdata(spi->controller);
-
-       return ctx->max_xfr_size;
+       return SPI_FSI_MAX_RX_SIZE;
 }
 
 static int fsi_spi_probe(struct device *dev)
@@ -582,14 +509,6 @@ static int fsi_spi_probe(struct device *dev)
                ctx->fsi = fsi;
                ctx->base = base + SPI_FSI_BASE;
 
-               if (of_device_is_compatible(np, "ibm,fsi2spi-restricted")) {
-                       ctx->restricted = true;
-                       ctx->max_xfr_size = SPI_FSI_MAX_XFR_SIZE_RESTRICTED;
-               } else {
-                       ctx->restricted = false;
-                       ctx->max_xfr_size = SPI_FSI_MAX_XFR_SIZE;
-               }
-
                rc = devm_spi_register_controller(dev, ctlr);
                if (rc)
                        spi_controller_put(ctlr);
index fb45e6af6638136ed071c282c74572697a32d198..fd004c9db9dc0dcab9ffd939c7f7ef3660270ccc 100644 (file)
@@ -530,6 +530,7 @@ static int dspi_request_dma(struct fsl_dspi *dspi, phys_addr_t phy_addr)
                goto err_rx_dma_buf;
        }
 
+       memset(&cfg, 0, sizeof(cfg));
        cfg.src_addr = phy_addr + SPI_POPR;
        cfg.dst_addr = phy_addr + SPI_PUSHR;
        cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
index b3861fb88711a0fb6f1ac21e93c821f2a3548799..2f51421e2a71850b9d8fa9d399211c2d16dd63ba 100644 (file)
@@ -549,12 +549,6 @@ static void setup_fifo_xfer(struct spi_transfer *xfer,
         */
        spin_lock_irq(&mas->lock);
        geni_se_setup_m_cmd(se, m_cmd, FRAGMENTATION);
-
-       /*
-        * TX_WATERMARK_REG should be set after SPI configuration and
-        * setting up GENI SE engine, as driver starts data transfer
-        * for the watermark interrupt.
-        */
        if (m_cmd & SPI_TX_ONLY) {
                if (geni_spi_handle_tx(mas))
                        writel(mas->tx_wm, se->base + SE_GENI_TX_WATERMARK_REG);
index fa68e981792943d42c2f9a3ca3e11cf542526781..8d8df51c5466690955f21758320cb1866f17f43e 100644 (file)
@@ -1052,12 +1052,8 @@ static void spi_imx_set_burst_len(struct spi_imx_data *spi_imx, int n_bits)
 
 static void spi_imx_push(struct spi_imx_data *spi_imx)
 {
-       unsigned int burst_len, fifo_words;
+       unsigned int burst_len;
 
-       if (spi_imx->dynamic_burst)
-               fifo_words = 4;
-       else
-               fifo_words = spi_imx_bytes_per_word(spi_imx->bits_per_word);
        /*
         * Reload the FIFO when the remaining bytes to be transferred in the
         * current burst is 0. This only applies when bits_per_word is a
@@ -1076,7 +1072,7 @@ static void spi_imx_push(struct spi_imx_data *spi_imx)
 
                        spi_imx->remainder = burst_len;
                } else {
-                       spi_imx->remainder = fifo_words;
+                       spi_imx->remainder = spi_imx_bytes_per_word(spi_imx->bits_per_word);
                }
        }
 
@@ -1084,8 +1080,7 @@ static void spi_imx_push(struct spi_imx_data *spi_imx)
                if (!spi_imx->count)
                        break;
                if (spi_imx->dynamic_burst &&
-                   spi_imx->txfifo >= DIV_ROUND_UP(spi_imx->remainder,
-                                                    fifo_words))
+                   spi_imx->txfifo >= DIV_ROUND_UP(spi_imx->remainder, 4))
                        break;
                spi_imx->tx(spi_imx);
                spi_imx->txfifo++;
@@ -1195,6 +1190,7 @@ static int spi_imx_setupxfer(struct spi_device *spi,
         * dynamic_burst in that case.
         */
        if (spi_imx->devtype_data->dynamic_burst && !spi_imx->slave_mode &&
+           !(spi->mode & SPI_CS_WORD) &&
            (spi_imx->bits_per_word == 8 ||
            spi_imx->bits_per_word == 16 ||
            spi_imx->bits_per_word == 32)) {
@@ -1630,6 +1626,15 @@ static int spi_imx_probe(struct platform_device *pdev)
            is_imx53_ecspi(spi_imx))
                spi_imx->bitbang.master->mode_bits |= SPI_LOOP | SPI_READY;
 
+       if (is_imx51_ecspi(spi_imx) &&
+           device_property_read_u32(&pdev->dev, "cs-gpios", NULL))
+               /*
+                * When using HW-CS implementing SPI_CS_WORD can be done by just
+                * setting the burst length to the word size. This is
+                * considerably faster than manually controlling the CS.
+                */
+               spi_imx->bitbang.master->mode_bits |= SPI_CS_WORD;
+
        spi_imx->spi_drctl = spi_drctl;
 
        init_completion(&spi_imx->xfer_done);
index 7914255521c3d4ee0cc22913bc62a2e4215a3b70..386e8c84be0af24c951526554b37e24cfbc4ae79 100644 (file)
@@ -42,8 +42,9 @@
 #define SPI_CFG1_CS_IDLE_OFFSET           0
 #define SPI_CFG1_PACKET_LOOP_OFFSET       8
 #define SPI_CFG1_PACKET_LENGTH_OFFSET     16
-#define SPI_CFG1_GET_TICK_DLY_OFFSET      30
+#define SPI_CFG1_GET_TICK_DLY_OFFSET      29
 
+#define SPI_CFG1_GET_TICK_DLY_MASK        0xe0000000
 #define SPI_CFG1_CS_IDLE_MASK             0xff
 #define SPI_CFG1_PACKET_LOOP_MASK         0xff00
 #define SPI_CFG1_PACKET_LENGTH_MASK       0x3ff0000
@@ -90,6 +91,8 @@ struct mtk_spi_compatible {
        bool enhance_timing;
        /* some IC support DMA addr extension */
        bool dma_ext;
+       /* some IC no need unprepare SPI clk */
+       bool no_need_unprepare;
 };
 
 struct mtk_spi {
@@ -104,6 +107,7 @@ struct mtk_spi {
        struct scatterlist *tx_sgl, *rx_sgl;
        u32 tx_sgl_len, rx_sgl_len;
        const struct mtk_spi_compatible *dev_comp;
+       u32 spi_clk_hz;
 };
 
 static const struct mtk_spi_compatible mtk_common_compat;
@@ -135,12 +139,21 @@ static const struct mtk_spi_compatible mt8183_compat = {
        .enhance_timing = true,
 };
 
+static const struct mtk_spi_compatible mt6893_compat = {
+       .need_pad_sel = true,
+       .must_tx = true,
+       .enhance_timing = true,
+       .dma_ext = true,
+       .no_need_unprepare = true,
+};
+
 /*
  * A piece of default chip info unless the platform
  * supplies it.
  */
 static const struct mtk_chip_config mtk_default_chip_info = {
        .sample_sel = 0,
+       .tick_delay = 0,
 };
 
 static const struct of_device_id mtk_spi_of_match[] = {
@@ -174,6 +187,9 @@ static const struct of_device_id mtk_spi_of_match[] = {
        { .compatible = "mediatek,mt8192-spi",
                .data = (void *)&mt6765_compat,
        },
+       { .compatible = "mediatek,mt6893-spi",
+               .data = (void *)&mt6893_compat,
+       },
        {}
 };
 MODULE_DEVICE_TABLE(of, mtk_spi_of_match);
@@ -192,6 +208,65 @@ static void mtk_spi_reset(struct mtk_spi *mdata)
        writel(reg_val, mdata->base + SPI_CMD_REG);
 }
 
+static int mtk_spi_set_hw_cs_timing(struct spi_device *spi)
+{
+       struct mtk_spi *mdata = spi_master_get_devdata(spi->master);
+       struct spi_delay *cs_setup = &spi->cs_setup;
+       struct spi_delay *cs_hold = &spi->cs_hold;
+       struct spi_delay *cs_inactive = &spi->cs_inactive;
+       u32 setup, hold, inactive;
+       u32 reg_val;
+       int delay;
+
+       delay = spi_delay_to_ns(cs_setup, NULL);
+       if (delay < 0)
+               return delay;
+       setup = (delay * DIV_ROUND_UP(mdata->spi_clk_hz, 1000000)) / 1000;
+
+       delay = spi_delay_to_ns(cs_hold, NULL);
+       if (delay < 0)
+               return delay;
+       hold = (delay * DIV_ROUND_UP(mdata->spi_clk_hz, 1000000)) / 1000;
+
+       delay = spi_delay_to_ns(cs_inactive, NULL);
+       if (delay < 0)
+               return delay;
+       inactive = (delay * DIV_ROUND_UP(mdata->spi_clk_hz, 1000000)) / 1000;
+
+       setup    = setup ? setup : 1;
+       hold     = hold ? hold : 1;
+       inactive = inactive ? inactive : 1;
+
+       reg_val = readl(mdata->base + SPI_CFG0_REG);
+       if (mdata->dev_comp->enhance_timing) {
+               hold = min_t(u32, hold, 0x10000);
+               setup = min_t(u32, setup, 0x10000);
+               reg_val &= ~(0xffff << SPI_ADJUST_CFG0_CS_HOLD_OFFSET);
+               reg_val |= (((hold - 1) & 0xffff)
+                          << SPI_ADJUST_CFG0_CS_HOLD_OFFSET);
+               reg_val &= ~(0xffff << SPI_ADJUST_CFG0_CS_SETUP_OFFSET);
+               reg_val |= (((setup - 1) & 0xffff)
+                          << SPI_ADJUST_CFG0_CS_SETUP_OFFSET);
+       } else {
+               hold = min_t(u32, hold, 0x100);
+               setup = min_t(u32, setup, 0x100);
+               reg_val &= ~(0xff << SPI_CFG0_CS_HOLD_OFFSET);
+               reg_val |= (((hold - 1) & 0xff) << SPI_CFG0_CS_HOLD_OFFSET);
+               reg_val &= ~(0xff << SPI_CFG0_CS_SETUP_OFFSET);
+               reg_val |= (((setup - 1) & 0xff)
+                           << SPI_CFG0_CS_SETUP_OFFSET);
+       }
+       writel(reg_val, mdata->base + SPI_CFG0_REG);
+
+       inactive = min_t(u32, inactive, 0x100);
+       reg_val = readl(mdata->base + SPI_CFG1_REG);
+       reg_val &= ~SPI_CFG1_CS_IDLE_MASK;
+       reg_val |= (((inactive - 1) & 0xff) << SPI_CFG1_CS_IDLE_OFFSET);
+       writel(reg_val, mdata->base + SPI_CFG1_REG);
+
+       return 0;
+}
+
 static int mtk_spi_prepare_message(struct spi_master *master,
                                   struct spi_message *msg)
 {
@@ -261,6 +336,15 @@ static int mtk_spi_prepare_message(struct spi_master *master,
                writel(mdata->pad_sel[spi->chip_select],
                       mdata->base + SPI_PAD_SEL_REG);
 
+       /* tick delay */
+       reg_val = readl(mdata->base + SPI_CFG1_REG);
+       reg_val &= ~SPI_CFG1_GET_TICK_DLY_MASK;
+       reg_val |= ((chip_config->tick_delay & 0x7)
+               << SPI_CFG1_GET_TICK_DLY_OFFSET);
+       writel(reg_val, mdata->base + SPI_CFG1_REG);
+
+       /* set hw cs timing */
+       mtk_spi_set_hw_cs_timing(spi);
        return 0;
 }
 
@@ -287,12 +371,11 @@ static void mtk_spi_set_cs(struct spi_device *spi, bool enable)
 static void mtk_spi_prepare_transfer(struct spi_master *master,
                                     struct spi_transfer *xfer)
 {
-       u32 spi_clk_hz, div, sck_time, reg_val;
+       u32 div, sck_time, reg_val;
        struct mtk_spi *mdata = spi_master_get_devdata(master);
 
-       spi_clk_hz = clk_get_rate(mdata->spi_clk);
-       if (xfer->speed_hz < spi_clk_hz / 2)
-               div = DIV_ROUND_UP(spi_clk_hz, xfer->speed_hz);
+       if (xfer->speed_hz < mdata->spi_clk_hz / 2)
+               div = DIV_ROUND_UP(mdata->spi_clk_hz, xfer->speed_hz);
        else
                div = 1;
 
@@ -507,52 +590,6 @@ static bool mtk_spi_can_dma(struct spi_master *master,
                (unsigned long)xfer->rx_buf % 4 == 0);
 }
 
-static int mtk_spi_set_hw_cs_timing(struct spi_device *spi,
-                                   struct spi_delay *setup,
-                                   struct spi_delay *hold,
-                                   struct spi_delay *inactive)
-{
-       struct mtk_spi *mdata = spi_master_get_devdata(spi->master);
-       u16 setup_dly, hold_dly, inactive_dly;
-       u32 reg_val;
-
-       if ((setup && setup->unit != SPI_DELAY_UNIT_SCK) ||
-           (hold && hold->unit != SPI_DELAY_UNIT_SCK) ||
-           (inactive && inactive->unit != SPI_DELAY_UNIT_SCK)) {
-               dev_err(&spi->dev,
-                       "Invalid delay unit, should be SPI_DELAY_UNIT_SCK\n");
-               return -EINVAL;
-       }
-
-       setup_dly = setup ? setup->value : 1;
-       hold_dly = hold ? hold->value : 1;
-       inactive_dly = inactive ? inactive->value : 1;
-
-       reg_val = readl(mdata->base + SPI_CFG0_REG);
-       if (mdata->dev_comp->enhance_timing) {
-               reg_val &= ~(0xffff << SPI_ADJUST_CFG0_CS_HOLD_OFFSET);
-               reg_val |= (((hold_dly - 1) & 0xffff)
-                          << SPI_ADJUST_CFG0_CS_HOLD_OFFSET);
-               reg_val &= ~(0xffff << SPI_ADJUST_CFG0_CS_SETUP_OFFSET);
-               reg_val |= (((setup_dly - 1) & 0xffff)
-                          << SPI_ADJUST_CFG0_CS_SETUP_OFFSET);
-       } else {
-               reg_val &= ~(0xff << SPI_CFG0_CS_HOLD_OFFSET);
-               reg_val |= (((hold_dly - 1) & 0xff) << SPI_CFG0_CS_HOLD_OFFSET);
-               reg_val &= ~(0xff << SPI_CFG0_CS_SETUP_OFFSET);
-               reg_val |= (((setup_dly - 1) & 0xff)
-                           << SPI_CFG0_CS_SETUP_OFFSET);
-       }
-       writel(reg_val, mdata->base + SPI_CFG0_REG);
-
-       reg_val = readl(mdata->base + SPI_CFG1_REG);
-       reg_val &= ~SPI_CFG1_CS_IDLE_MASK;
-       reg_val |= (((inactive_dly - 1) & 0xff) << SPI_CFG1_CS_IDLE_OFFSET);
-       writel(reg_val, mdata->base + SPI_CFG1_REG);
-
-       return 0;
-}
-
 static int mtk_spi_setup(struct spi_device *spi)
 {
        struct mtk_spi *mdata = spi_master_get_devdata(spi->master);
@@ -790,7 +827,12 @@ static int mtk_spi_probe(struct platform_device *pdev)
                goto err_put_master;
        }
 
-       clk_disable_unprepare(mdata->spi_clk);
+       mdata->spi_clk_hz = clk_get_rate(mdata->spi_clk);
+
+       if (mdata->dev_comp->no_need_unprepare)
+               clk_disable(mdata->spi_clk);
+       else
+               clk_disable_unprepare(mdata->spi_clk);
 
        pm_runtime_enable(&pdev->dev);
 
@@ -858,6 +900,9 @@ static int mtk_spi_remove(struct platform_device *pdev)
 
        mtk_spi_reset(mdata);
 
+       if (mdata->dev_comp->no_need_unprepare)
+               clk_unprepare(mdata->spi_clk);
+
        return 0;
 }
 
@@ -906,7 +951,10 @@ static int mtk_spi_runtime_suspend(struct device *dev)
        struct spi_master *master = dev_get_drvdata(dev);
        struct mtk_spi *mdata = spi_master_get_devdata(master);
 
-       clk_disable_unprepare(mdata->spi_clk);
+       if (mdata->dev_comp->no_need_unprepare)
+               clk_disable(mdata->spi_clk);
+       else
+               clk_disable_unprepare(mdata->spi_clk);
 
        return 0;
 }
@@ -917,7 +965,10 @@ static int mtk_spi_runtime_resume(struct device *dev)
        struct mtk_spi *mdata = spi_master_get_devdata(master);
        int ret;
 
-       ret = clk_prepare_enable(mdata->spi_clk);
+       if (mdata->dev_comp->no_need_unprepare)
+               ret = clk_enable(mdata->spi_clk);
+       else
+               ret = clk_prepare_enable(mdata->spi_clk);
        if (ret < 0) {
                dev_err(dev, "failed to enable spi_clk (%d)\n", ret);
                return ret;
index 96b418293bf2a493dca92f016e7dc342120db141..45889947afed8aa97799e10b1a66d7cf45606f86 100644 (file)
@@ -335,8 +335,10 @@ static int mxic_spi_data_xfer(struct mxic_spi *mxic, const void *txbuf,
 static bool mxic_spi_mem_supports_op(struct spi_mem *mem,
                                     const struct spi_mem_op *op)
 {
-       if (op->data.buswidth > 4 || op->addr.buswidth > 4 ||
-           op->dummy.buswidth > 4 || op->cmd.buswidth > 4)
+       bool all_false;
+
+       if (op->data.buswidth > 8 || op->addr.buswidth > 8 ||
+           op->dummy.buswidth > 8 || op->cmd.buswidth > 8)
                return false;
 
        if (op->data.nbytes && op->dummy.nbytes &&
@@ -346,7 +348,13 @@ static bool mxic_spi_mem_supports_op(struct spi_mem *mem,
        if (op->addr.nbytes > 7)
                return false;
 
-       return spi_mem_default_supports_op(mem, op);
+       all_false = !op->cmd.dtr && !op->addr.dtr && !op->dummy.dtr &&
+                   !op->data.dtr;
+
+       if (all_false)
+               return spi_mem_default_supports_op(mem, op);
+       else
+               return spi_mem_dtr_supports_op(mem, op);
 }
 
 static int mxic_spi_mem_exec_op(struct spi_mem *mem,
@@ -355,14 +363,15 @@ static int mxic_spi_mem_exec_op(struct spi_mem *mem,
        struct mxic_spi *mxic = spi_master_get_devdata(mem->spi->master);
        int nio = 1, i, ret;
        u32 ss_ctrl;
-       u8 addr[8];
-       u8 opcode = op->cmd.opcode;
+       u8 addr[8], cmd[2];
 
        ret = mxic_spi_set_freq(mxic, mem->spi->max_speed_hz);
        if (ret)
                return ret;
 
-       if (mem->spi->mode & (SPI_TX_QUAD | SPI_RX_QUAD))
+       if (mem->spi->mode & (SPI_TX_OCTAL | SPI_RX_OCTAL))
+               nio = 8;
+       else if (mem->spi->mode & (SPI_TX_QUAD | SPI_RX_QUAD))
                nio = 4;
        else if (mem->spi->mode & (SPI_TX_DUAL | SPI_RX_DUAL))
                nio = 2;
@@ -374,19 +383,26 @@ static int mxic_spi_mem_exec_op(struct spi_mem *mem,
               mxic->regs + HC_CFG);
        writel(HC_EN_BIT, mxic->regs + HC_EN);
 
-       ss_ctrl = OP_CMD_BYTES(1) | OP_CMD_BUSW(fls(op->cmd.buswidth) - 1);
+       ss_ctrl = OP_CMD_BYTES(op->cmd.nbytes) |
+                 OP_CMD_BUSW(fls(op->cmd.buswidth) - 1) |
+                 (op->cmd.dtr ? OP_CMD_DDR : 0);
 
        if (op->addr.nbytes)
                ss_ctrl |= OP_ADDR_BYTES(op->addr.nbytes) |
-                          OP_ADDR_BUSW(fls(op->addr.buswidth) - 1);
+                          OP_ADDR_BUSW(fls(op->addr.buswidth) - 1) |
+                          (op->addr.dtr ? OP_ADDR_DDR : 0);
 
        if (op->dummy.nbytes)
                ss_ctrl |= OP_DUMMY_CYC(op->dummy.nbytes);
 
        if (op->data.nbytes) {
-               ss_ctrl |= OP_DATA_BUSW(fls(op->data.buswidth) - 1);
-               if (op->data.dir == SPI_MEM_DATA_IN)
+               ss_ctrl |= OP_DATA_BUSW(fls(op->data.buswidth) - 1) |
+                          (op->data.dtr ? OP_DATA_DDR : 0);
+               if (op->data.dir == SPI_MEM_DATA_IN) {
                        ss_ctrl |= OP_READ;
+                       if (op->data.dtr)
+                               ss_ctrl |= OP_DQS_EN;
+               }
        }
 
        writel(ss_ctrl, mxic->regs + SS_CTRL(mem->spi->chip_select));
@@ -394,7 +410,10 @@ static int mxic_spi_mem_exec_op(struct spi_mem *mem,
        writel(readl(mxic->regs + HC_CFG) | HC_CFG_MAN_CS_ASSERT,
               mxic->regs + HC_CFG);
 
-       ret = mxic_spi_data_xfer(mxic, &opcode, NULL, 1);
+       for (i = 0; i < op->cmd.nbytes; i++)
+               cmd[i] = op->cmd.opcode >> (8 * (op->cmd.nbytes - i - 1));
+
+       ret = mxic_spi_data_xfer(mxic, cmd, NULL, op->cmd.nbytes);
        if (ret)
                goto out;
 
@@ -567,7 +586,8 @@ static int mxic_spi_probe(struct platform_device *pdev)
        master->bits_per_word_mask = SPI_BPW_MASK(8);
        master->mode_bits = SPI_CPOL | SPI_CPHA |
                        SPI_RX_DUAL | SPI_TX_DUAL |
-                       SPI_RX_QUAD | SPI_TX_QUAD;
+                       SPI_RX_QUAD | SPI_TX_QUAD |
+                       SPI_RX_OCTAL | SPI_TX_OCTAL;
 
        mxic_spi_hw_init(mxic);
 
index 34b31aba3981711f679f19dd21ca3598db64906d..e8de3cbbfb2acdc6e676c84d2367ac1c1c980954 100644 (file)
@@ -328,8 +328,16 @@ orion_spi_setup_transfer(struct spi_device *spi, struct spi_transfer *t)
 static void orion_spi_set_cs(struct spi_device *spi, bool enable)
 {
        struct orion_spi *orion_spi;
+       void __iomem *ctrl_reg;
+       u32 val;
 
        orion_spi = spi_master_get_devdata(spi->master);
+       ctrl_reg = spi_reg(orion_spi, ORION_SPI_IF_CTRL_REG);
+
+       val = readl(ctrl_reg);
+
+       /* Clear existing chip-select and assertion state */
+       val &= ~(ORION_SPI_CS_MASK | 0x1);
 
        /*
         * If this line is using a GPIO to control chip select, this internal
@@ -338,9 +346,7 @@ static void orion_spi_set_cs(struct spi_device *spi, bool enable)
         * as it is handled by a GPIO, but that doesn't matter. What we need
         * is to deassert the old chip select and assert some other chip select.
         */
-       orion_spi_clrbits(orion_spi, ORION_SPI_IF_CTRL_REG, ORION_SPI_CS_MASK);
-       orion_spi_setbits(orion_spi, ORION_SPI_IF_CTRL_REG,
-                         ORION_SPI_CS(spi->chip_select));
+       val |= ORION_SPI_CS(spi->chip_select);
 
        /*
         * Chip select logic is inverted from spi_set_cs(). For lines using a
@@ -350,9 +356,13 @@ static void orion_spi_set_cs(struct spi_device *spi, bool enable)
         * doesn't matter.
         */
        if (!enable)
-               orion_spi_setbits(orion_spi, ORION_SPI_IF_CTRL_REG, 0x1);
-       else
-               orion_spi_clrbits(orion_spi, ORION_SPI_IF_CTRL_REG, 0x1);
+               val |= 0x1;
+
+       /*
+        * To avoid toggling unwanted chip selects update the register
+        * with a single write.
+        */
+       writel(val, ctrl_reg);
 }
 
 static inline int orion_spi_wait_till_ready(struct orion_spi *orion_spi)
index 104bde153efd22deb6759d17229f4cc90636fd6a..5eb7b61bbb4d8dd1236b206a82b13590abdc86a4 100644 (file)
@@ -361,6 +361,7 @@ static int pic32_spi_dma_config(struct pic32_spi *pic32s, u32 dma_width)
        struct dma_slave_config cfg;
        int ret;
 
+       memset(&cfg, 0, sizeof(cfg));
        cfg.device_fc = true;
        cfg.src_addr = pic32s->dma_base + buf_offset;
        cfg.dst_addr = pic32s->dma_base + buf_offset;
index 974e30744b83805c3dcb6d10b4cf511c9274bac2..1573f6d8eb48a0cd9e5547af2fd23baa25acfa31 100644 (file)
@@ -594,24 +594,29 @@ static int u32_reader(struct driver_data *drv_data)
 
 static void reset_sccr1(struct driver_data *drv_data)
 {
-       struct chip_data *chip =
-               spi_get_ctldata(drv_data->controller->cur_msg->spi);
-       u32 sccr1_reg;
+       u32 mask = drv_data->int_cr1 | drv_data->dma_cr1, threshold;
+       struct chip_data *chip;
+
+       if (drv_data->controller->cur_msg) {
+               chip = spi_get_ctldata(drv_data->controller->cur_msg->spi);
+               threshold = chip->threshold;
+       } else {
+               threshold = 0;
+       }
 
-       sccr1_reg = pxa2xx_spi_read(drv_data, SSCR1) & ~drv_data->int_cr1;
        switch (drv_data->ssp_type) {
        case QUARK_X1000_SSP:
-               sccr1_reg &= ~QUARK_X1000_SSCR1_RFT;
+               mask |= QUARK_X1000_SSCR1_RFT;
                break;
        case CE4100_SSP:
-               sccr1_reg &= ~CE4100_SSCR1_RFT;
+               mask |= CE4100_SSCR1_RFT;
                break;
        default:
-               sccr1_reg &= ~SSCR1_RFT;
+               mask |= SSCR1_RFT;
                break;
        }
-       sccr1_reg |= chip->threshold;
-       pxa2xx_spi_write(drv_data, SSCR1, sccr1_reg);
+
+       pxa2xx_spi_update(drv_data, SSCR1, mask, threshold);
 }
 
 static void int_stop_and_reset(struct driver_data *drv_data)
@@ -724,11 +729,8 @@ static irqreturn_t interrupt_transfer(struct driver_data *drv_data)
 
 static void handle_bad_msg(struct driver_data *drv_data)
 {
+       int_stop_and_reset(drv_data);
        pxa2xx_spi_off(drv_data);
-       clear_SSCR1_bits(drv_data, drv_data->int_cr1);
-       if (!pxa25x_ssp_comp(drv_data))
-               pxa2xx_spi_write(drv_data, SSTO, 0);
-       write_SSSR_CS(drv_data, drv_data->clear_sr);
 
        dev_err(drv_data->ssp->dev, "bad message state in interrupt handler\n");
 }
@@ -1156,13 +1158,10 @@ static void pxa2xx_spi_handle_err(struct spi_controller *controller,
 {
        struct driver_data *drv_data = spi_controller_get_devdata(controller);
 
+       int_stop_and_reset(drv_data);
+
        /* Disable the SSP */
        pxa2xx_spi_off(drv_data);
-       /* Clear and disable interrupts and service requests */
-       write_SSSR_CS(drv_data, drv_data->clear_sr);
-       clear_SSCR1_bits(drv_data, drv_data->int_cr1 | drv_data->dma_cr1);
-       if (!pxa25x_ssp_comp(drv_data))
-               pxa2xx_spi_write(drv_data, SSTO, 0);
 
        /*
         * Stop the DMA if running. Note DMA callback handler may have unset
diff --git a/drivers/spi/spi-rockchip-sfc.c b/drivers/spi/spi-rockchip-sfc.c
new file mode 100644 (file)
index 0000000..a46b385
--- /dev/null
@@ -0,0 +1,694 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Rockchip Serial Flash Controller Driver
+ *
+ * Copyright (c) 2017-2021, Rockchip Inc.
+ * Author: Shawn Lin <shawn.lin@rock-chips.com>
+ *        Chris Morgan <macroalpha82@gmail.com>
+ *        Jon Lin <Jon.lin@rock-chips.com>
+ */
+
+#include <linux/bitops.h>
+#include <linux/clk.h>
+#include <linux/completion.h>
+#include <linux/dma-mapping.h>
+#include <linux/iopoll.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/spi/spi-mem.h>
+
+/* System control */
+#define SFC_CTRL                       0x0
+#define  SFC_CTRL_PHASE_SEL_NEGETIVE   BIT(1)
+#define  SFC_CTRL_CMD_BITS_SHIFT       8
+#define  SFC_CTRL_ADDR_BITS_SHIFT      10
+#define  SFC_CTRL_DATA_BITS_SHIFT      12
+
+/* Interrupt mask */
+#define SFC_IMR                                0x4
+#define  SFC_IMR_RX_FULL               BIT(0)
+#define  SFC_IMR_RX_UFLOW              BIT(1)
+#define  SFC_IMR_TX_OFLOW              BIT(2)
+#define  SFC_IMR_TX_EMPTY              BIT(3)
+#define  SFC_IMR_TRAN_FINISH           BIT(4)
+#define  SFC_IMR_BUS_ERR               BIT(5)
+#define  SFC_IMR_NSPI_ERR              BIT(6)
+#define  SFC_IMR_DMA                   BIT(7)
+
+/* Interrupt clear */
+#define SFC_ICLR                       0x8
+#define  SFC_ICLR_RX_FULL              BIT(0)
+#define  SFC_ICLR_RX_UFLOW             BIT(1)
+#define  SFC_ICLR_TX_OFLOW             BIT(2)
+#define  SFC_ICLR_TX_EMPTY             BIT(3)
+#define  SFC_ICLR_TRAN_FINISH          BIT(4)
+#define  SFC_ICLR_BUS_ERR              BIT(5)
+#define  SFC_ICLR_NSPI_ERR             BIT(6)
+#define  SFC_ICLR_DMA                  BIT(7)
+
+/* FIFO threshold level */
+#define SFC_FTLR                       0xc
+#define  SFC_FTLR_TX_SHIFT             0
+#define  SFC_FTLR_TX_MASK              0x1f
+#define  SFC_FTLR_RX_SHIFT             8
+#define  SFC_FTLR_RX_MASK              0x1f
+
+/* Reset FSM and FIFO */
+#define SFC_RCVR                       0x10
+#define  SFC_RCVR_RESET                        BIT(0)
+
+/* Enhanced mode */
+#define SFC_AX                         0x14
+
+/* Address Bit number */
+#define SFC_ABIT                       0x18
+
+/* Interrupt status */
+#define SFC_ISR                                0x1c
+#define  SFC_ISR_RX_FULL_SHIFT         BIT(0)
+#define  SFC_ISR_RX_UFLOW_SHIFT                BIT(1)
+#define  SFC_ISR_TX_OFLOW_SHIFT                BIT(2)
+#define  SFC_ISR_TX_EMPTY_SHIFT                BIT(3)
+#define  SFC_ISR_TX_FINISH_SHIFT       BIT(4)
+#define  SFC_ISR_BUS_ERR_SHIFT         BIT(5)
+#define  SFC_ISR_NSPI_ERR_SHIFT                BIT(6)
+#define  SFC_ISR_DMA_SHIFT             BIT(7)
+
+/* FIFO status */
+#define SFC_FSR                                0x20
+#define  SFC_FSR_TX_IS_FULL            BIT(0)
+#define  SFC_FSR_TX_IS_EMPTY           BIT(1)
+#define  SFC_FSR_RX_IS_EMPTY           BIT(2)
+#define  SFC_FSR_RX_IS_FULL            BIT(3)
+#define  SFC_FSR_TXLV_MASK             GENMASK(12, 8)
+#define  SFC_FSR_TXLV_SHIFT            8
+#define  SFC_FSR_RXLV_MASK             GENMASK(20, 16)
+#define  SFC_FSR_RXLV_SHIFT            16
+
+/* FSM status */
+#define SFC_SR                         0x24
+#define  SFC_SR_IS_IDLE                        0x0
+#define  SFC_SR_IS_BUSY                        0x1
+
+/* Raw interrupt status */
+#define SFC_RISR                       0x28
+#define  SFC_RISR_RX_FULL              BIT(0)
+#define  SFC_RISR_RX_UNDERFLOW         BIT(1)
+#define  SFC_RISR_TX_OVERFLOW          BIT(2)
+#define  SFC_RISR_TX_EMPTY             BIT(3)
+#define  SFC_RISR_TRAN_FINISH          BIT(4)
+#define  SFC_RISR_BUS_ERR              BIT(5)
+#define  SFC_RISR_NSPI_ERR             BIT(6)
+#define  SFC_RISR_DMA                  BIT(7)
+
+/* Version */
+#define SFC_VER                                0x2C
+#define  SFC_VER_3                     0x3
+#define  SFC_VER_4                     0x4
+#define  SFC_VER_5                     0x5
+
+/* Delay line controller resiter */
+#define SFC_DLL_CTRL0                  0x3C
+#define SFC_DLL_CTRL0_SCLK_SMP_DLL     BIT(15)
+#define SFC_DLL_CTRL0_DLL_MAX_VER4     0xFFU
+#define SFC_DLL_CTRL0_DLL_MAX_VER5     0x1FFU
+
+/* Master trigger */
+#define SFC_DMA_TRIGGER                        0x80
+#define SFC_DMA_TRIGGER_START          1
+
+/* Src or Dst addr for master */
+#define SFC_DMA_ADDR                   0x84
+
+/* Length control register extension 32GB */
+#define SFC_LEN_CTRL                   0x88
+#define SFC_LEN_CTRL_TRB_SEL           1
+#define SFC_LEN_EXT                    0x8C
+
+/* Command */
+#define SFC_CMD                                0x100
+#define  SFC_CMD_IDX_SHIFT             0
+#define  SFC_CMD_DUMMY_SHIFT           8
+#define  SFC_CMD_DIR_SHIFT             12
+#define  SFC_CMD_DIR_RD                        0
+#define  SFC_CMD_DIR_WR                        1
+#define  SFC_CMD_ADDR_SHIFT            14
+#define  SFC_CMD_ADDR_0BITS            0
+#define  SFC_CMD_ADDR_24BITS           1
+#define  SFC_CMD_ADDR_32BITS           2
+#define  SFC_CMD_ADDR_XBITS            3
+#define  SFC_CMD_TRAN_BYTES_SHIFT      16
+#define  SFC_CMD_CS_SHIFT              30
+
+/* Address */
+#define SFC_ADDR                       0x104
+
+/* Data */
+#define SFC_DATA                       0x108
+
+/* The controller and documentation reports that it supports up to 4 CS
+ * devices (0-3), however I have only been able to test a single CS (CS 0)
+ * due to the configuration of my device.
+ */
+#define SFC_MAX_CHIPSELECT_NUM         4
+
+/* The SFC can transfer max 16KB - 1 at one time
+ * we set it to 15.5KB here for alignment.
+ */
+#define SFC_MAX_IOSIZE_VER3            (512 * 31)
+
+/* DMA is only enabled for large data transmission */
+#define SFC_DMA_TRANS_THRETHOLD                (0x40)
+
+/* Maximum clock values from datasheet suggest keeping clock value under
+ * 150MHz. No minimum or average value is suggested.
+ */
+#define SFC_MAX_SPEED          (150 * 1000 * 1000)
+
+struct rockchip_sfc {
+       struct device *dev;
+       void __iomem *regbase;
+       struct clk *hclk;
+       struct clk *clk;
+       u32 frequency;
+       /* virtual mapped addr for dma_buffer */
+       void *buffer;
+       dma_addr_t dma_buffer;
+       struct completion cp;
+       bool use_dma;
+       u32 max_iosize;
+       u16 version;
+};
+
+static int rockchip_sfc_reset(struct rockchip_sfc *sfc)
+{
+       int err;
+       u32 status;
+
+       writel_relaxed(SFC_RCVR_RESET, sfc->regbase + SFC_RCVR);
+
+       err = readl_poll_timeout(sfc->regbase + SFC_RCVR, status,
+                                !(status & SFC_RCVR_RESET), 20,
+                                jiffies_to_usecs(HZ));
+       if (err)
+               dev_err(sfc->dev, "SFC reset never finished\n");
+
+       /* Still need to clear the masked interrupt from RISR */
+       writel_relaxed(0xFFFFFFFF, sfc->regbase + SFC_ICLR);
+
+       dev_dbg(sfc->dev, "reset\n");
+
+       return err;
+}
+
+static u16 rockchip_sfc_get_version(struct rockchip_sfc *sfc)
+{
+       return  (u16)(readl(sfc->regbase + SFC_VER) & 0xffff);
+}
+
+static u32 rockchip_sfc_get_max_iosize(struct rockchip_sfc *sfc)
+{
+       return SFC_MAX_IOSIZE_VER3;
+}
+
+static void rockchip_sfc_irq_unmask(struct rockchip_sfc *sfc, u32 mask)
+{
+       u32 reg;
+
+       /* Enable transfer complete interrupt */
+       reg = readl(sfc->regbase + SFC_IMR);
+       reg &= ~mask;
+       writel(reg, sfc->regbase + SFC_IMR);
+}
+
+static void rockchip_sfc_irq_mask(struct rockchip_sfc *sfc, u32 mask)
+{
+       u32 reg;
+
+       /* Disable transfer finish interrupt */
+       reg = readl(sfc->regbase + SFC_IMR);
+       reg |= mask;
+       writel(reg, sfc->regbase + SFC_IMR);
+}
+
+static int rockchip_sfc_init(struct rockchip_sfc *sfc)
+{
+       writel(0, sfc->regbase + SFC_CTRL);
+       writel(0xFFFFFFFF, sfc->regbase + SFC_ICLR);
+       rockchip_sfc_irq_mask(sfc, 0xFFFFFFFF);
+       if (rockchip_sfc_get_version(sfc) >= SFC_VER_4)
+               writel(SFC_LEN_CTRL_TRB_SEL, sfc->regbase + SFC_LEN_CTRL);
+
+       return 0;
+}
+
+static int rockchip_sfc_wait_txfifo_ready(struct rockchip_sfc *sfc, u32 timeout_us)
+{
+       int ret = 0;
+       u32 status;
+
+       ret = readl_poll_timeout(sfc->regbase + SFC_FSR, status,
+                                status & SFC_FSR_TXLV_MASK, 0,
+                                timeout_us);
+       if (ret) {
+               dev_dbg(sfc->dev, "sfc wait tx fifo timeout\n");
+
+               return -ETIMEDOUT;
+       }
+
+       return (status & SFC_FSR_TXLV_MASK) >> SFC_FSR_TXLV_SHIFT;
+}
+
+static int rockchip_sfc_wait_rxfifo_ready(struct rockchip_sfc *sfc, u32 timeout_us)
+{
+       int ret = 0;
+       u32 status;
+
+       ret = readl_poll_timeout(sfc->regbase + SFC_FSR, status,
+                                status & SFC_FSR_RXLV_MASK, 0,
+                                timeout_us);
+       if (ret) {
+               dev_dbg(sfc->dev, "sfc wait rx fifo timeout\n");
+
+               return -ETIMEDOUT;
+       }
+
+       return (status & SFC_FSR_RXLV_MASK) >> SFC_FSR_RXLV_SHIFT;
+}
+
+static void rockchip_sfc_adjust_op_work(struct spi_mem_op *op)
+{
+       if (unlikely(op->dummy.nbytes && !op->addr.nbytes)) {
+               /*
+                * SFC not support output DUMMY cycles right after CMD cycles, so
+                * treat it as ADDR cycles.
+                */
+               op->addr.nbytes = op->dummy.nbytes;
+               op->addr.buswidth = op->dummy.buswidth;
+               op->addr.val = 0xFFFFFFFFF;
+
+               op->dummy.nbytes = 0;
+       }
+}
+
+static int rockchip_sfc_xfer_setup(struct rockchip_sfc *sfc,
+                                  struct spi_mem *mem,
+                                  const struct spi_mem_op *op,
+                                  u32 len)
+{
+       u32 ctrl = 0, cmd = 0;
+
+       /* set CMD */
+       cmd = op->cmd.opcode;
+       ctrl |= ((op->cmd.buswidth >> 1) << SFC_CTRL_CMD_BITS_SHIFT);
+
+       /* set ADDR */
+       if (op->addr.nbytes) {
+               if (op->addr.nbytes == 4) {
+                       cmd |= SFC_CMD_ADDR_32BITS << SFC_CMD_ADDR_SHIFT;
+               } else if (op->addr.nbytes == 3) {
+                       cmd |= SFC_CMD_ADDR_24BITS << SFC_CMD_ADDR_SHIFT;
+               } else {
+                       cmd |= SFC_CMD_ADDR_XBITS << SFC_CMD_ADDR_SHIFT;
+                       writel(op->addr.nbytes * 8 - 1, sfc->regbase + SFC_ABIT);
+               }
+
+               ctrl |= ((op->addr.buswidth >> 1) << SFC_CTRL_ADDR_BITS_SHIFT);
+       }
+
+       /* set DUMMY */
+       if (op->dummy.nbytes) {
+               if (op->dummy.buswidth == 4)
+                       cmd |= op->dummy.nbytes * 2 << SFC_CMD_DUMMY_SHIFT;
+               else if (op->dummy.buswidth == 2)
+                       cmd |= op->dummy.nbytes * 4 << SFC_CMD_DUMMY_SHIFT;
+               else
+                       cmd |= op->dummy.nbytes * 8 << SFC_CMD_DUMMY_SHIFT;
+       }
+
+       /* set DATA */
+       if (sfc->version >= SFC_VER_4) /* Clear it if no data to transfer */
+               writel(len, sfc->regbase + SFC_LEN_EXT);
+       else
+               cmd |= len << SFC_CMD_TRAN_BYTES_SHIFT;
+       if (len) {
+               if (op->data.dir == SPI_MEM_DATA_OUT)
+                       cmd |= SFC_CMD_DIR_WR << SFC_CMD_DIR_SHIFT;
+
+               ctrl |= ((op->data.buswidth >> 1) << SFC_CTRL_DATA_BITS_SHIFT);
+       }
+       if (!len && op->addr.nbytes)
+               cmd |= SFC_CMD_DIR_WR << SFC_CMD_DIR_SHIFT;
+
+       /* set the Controller */
+       ctrl |= SFC_CTRL_PHASE_SEL_NEGETIVE;
+       cmd |= mem->spi->chip_select << SFC_CMD_CS_SHIFT;
+
+       dev_dbg(sfc->dev, "sfc addr.nbytes=%x(x%d) dummy.nbytes=%x(x%d)\n",
+               op->addr.nbytes, op->addr.buswidth,
+               op->dummy.nbytes, op->dummy.buswidth);
+       dev_dbg(sfc->dev, "sfc ctrl=%x cmd=%x addr=%llx len=%x\n",
+               ctrl, cmd, op->addr.val, len);
+
+       writel(ctrl, sfc->regbase + SFC_CTRL);
+       writel(cmd, sfc->regbase + SFC_CMD);
+       if (op->addr.nbytes)
+               writel(op->addr.val, sfc->regbase + SFC_ADDR);
+
+       return 0;
+}
+
+static int rockchip_sfc_write_fifo(struct rockchip_sfc *sfc, const u8 *buf, int len)
+{
+       u8 bytes = len & 0x3;
+       u32 dwords;
+       int tx_level;
+       u32 write_words;
+       u32 tmp = 0;
+
+       dwords = len >> 2;
+       while (dwords) {
+               tx_level = rockchip_sfc_wait_txfifo_ready(sfc, 1000);
+               if (tx_level < 0)
+                       return tx_level;
+               write_words = min_t(u32, tx_level, dwords);
+               iowrite32_rep(sfc->regbase + SFC_DATA, buf, write_words);
+               buf += write_words << 2;
+               dwords -= write_words;
+       }
+
+       /* write the rest non word aligned bytes */
+       if (bytes) {
+               tx_level = rockchip_sfc_wait_txfifo_ready(sfc, 1000);
+               if (tx_level < 0)
+                       return tx_level;
+               memcpy(&tmp, buf, bytes);
+               writel(tmp, sfc->regbase + SFC_DATA);
+       }
+
+       return len;
+}
+
+static int rockchip_sfc_read_fifo(struct rockchip_sfc *sfc, u8 *buf, int len)
+{
+       u8 bytes = len & 0x3;
+       u32 dwords;
+       u8 read_words;
+       int rx_level;
+       int tmp;
+
+       /* word aligned access only */
+       dwords = len >> 2;
+       while (dwords) {
+               rx_level = rockchip_sfc_wait_rxfifo_ready(sfc, 1000);
+               if (rx_level < 0)
+                       return rx_level;
+               read_words = min_t(u32, rx_level, dwords);
+               ioread32_rep(sfc->regbase + SFC_DATA, buf, read_words);
+               buf += read_words << 2;
+               dwords -= read_words;
+       }
+
+       /* read the rest non word aligned bytes */
+       if (bytes) {
+               rx_level = rockchip_sfc_wait_rxfifo_ready(sfc, 1000);
+               if (rx_level < 0)
+                       return rx_level;
+               tmp = readl(sfc->regbase + SFC_DATA);
+               memcpy(buf, &tmp, bytes);
+       }
+
+       return len;
+}
+
+static int rockchip_sfc_fifo_transfer_dma(struct rockchip_sfc *sfc, dma_addr_t dma_buf, size_t len)
+{
+       writel(0xFFFFFFFF, sfc->regbase + SFC_ICLR);
+       writel((u32)dma_buf, sfc->regbase + SFC_DMA_ADDR);
+       writel(SFC_DMA_TRIGGER_START, sfc->regbase + SFC_DMA_TRIGGER);
+
+       return len;
+}
+
+static int rockchip_sfc_xfer_data_poll(struct rockchip_sfc *sfc,
+                                      const struct spi_mem_op *op, u32 len)
+{
+       dev_dbg(sfc->dev, "sfc xfer_poll len=%x\n", len);
+
+       if (op->data.dir == SPI_MEM_DATA_OUT)
+               return rockchip_sfc_write_fifo(sfc, op->data.buf.out, len);
+       else
+               return rockchip_sfc_read_fifo(sfc, op->data.buf.in, len);
+}
+
+static int rockchip_sfc_xfer_data_dma(struct rockchip_sfc *sfc,
+                                     const struct spi_mem_op *op, u32 len)
+{
+       int ret;
+
+       dev_dbg(sfc->dev, "sfc xfer_dma len=%x\n", len);
+
+       if (op->data.dir == SPI_MEM_DATA_OUT)
+               memcpy(sfc->buffer, op->data.buf.out, len);
+
+       ret = rockchip_sfc_fifo_transfer_dma(sfc, sfc->dma_buffer, len);
+       if (!wait_for_completion_timeout(&sfc->cp, msecs_to_jiffies(2000))) {
+               dev_err(sfc->dev, "DMA wait for transfer finish timeout\n");
+               ret = -ETIMEDOUT;
+       }
+       rockchip_sfc_irq_mask(sfc, SFC_IMR_DMA);
+       if (op->data.dir == SPI_MEM_DATA_IN)
+               memcpy(op->data.buf.in, sfc->buffer, len);
+
+       return ret;
+}
+
+static int rockchip_sfc_xfer_done(struct rockchip_sfc *sfc, u32 timeout_us)
+{
+       int ret = 0;
+       u32 status;
+
+       ret = readl_poll_timeout(sfc->regbase + SFC_SR, status,
+                                !(status & SFC_SR_IS_BUSY),
+                                20, timeout_us);
+       if (ret) {
+               dev_err(sfc->dev, "wait sfc idle timeout\n");
+               rockchip_sfc_reset(sfc);
+
+               ret = -EIO;
+       }
+
+       return ret;
+}
+
+static int rockchip_sfc_exec_mem_op(struct spi_mem *mem, const struct spi_mem_op *op)
+{
+       struct rockchip_sfc *sfc = spi_master_get_devdata(mem->spi->master);
+       u32 len = op->data.nbytes;
+       int ret;
+
+       if (unlikely(mem->spi->max_speed_hz != sfc->frequency)) {
+               ret = clk_set_rate(sfc->clk, mem->spi->max_speed_hz);
+               if (ret)
+                       return ret;
+               sfc->frequency = mem->spi->max_speed_hz;
+               dev_dbg(sfc->dev, "set_freq=%dHz real_freq=%ldHz\n",
+                       sfc->frequency, clk_get_rate(sfc->clk));
+       }
+
+       rockchip_sfc_adjust_op_work((struct spi_mem_op *)op);
+       rockchip_sfc_xfer_setup(sfc, mem, op, len);
+       if (len) {
+               if (likely(sfc->use_dma) && len >= SFC_DMA_TRANS_THRETHOLD) {
+                       init_completion(&sfc->cp);
+                       rockchip_sfc_irq_unmask(sfc, SFC_IMR_DMA);
+                       ret = rockchip_sfc_xfer_data_dma(sfc, op, len);
+               } else {
+                       ret = rockchip_sfc_xfer_data_poll(sfc, op, len);
+               }
+
+               if (ret != len) {
+                       dev_err(sfc->dev, "xfer data failed ret %d dir %d\n", ret, op->data.dir);
+
+                       return -EIO;
+               }
+       }
+
+       return rockchip_sfc_xfer_done(sfc, 100000);
+}
+
+static int rockchip_sfc_adjust_op_size(struct spi_mem *mem, struct spi_mem_op *op)
+{
+       struct rockchip_sfc *sfc = spi_master_get_devdata(mem->spi->master);
+
+       op->data.nbytes = min(op->data.nbytes, sfc->max_iosize);
+
+       return 0;
+}
+
+static const struct spi_controller_mem_ops rockchip_sfc_mem_ops = {
+       .exec_op = rockchip_sfc_exec_mem_op,
+       .adjust_op_size = rockchip_sfc_adjust_op_size,
+};
+
+static irqreturn_t rockchip_sfc_irq_handler(int irq, void *dev_id)
+{
+       struct rockchip_sfc *sfc = dev_id;
+       u32 reg;
+
+       reg = readl(sfc->regbase + SFC_RISR);
+
+       /* Clear interrupt */
+       writel_relaxed(reg, sfc->regbase + SFC_ICLR);
+
+       if (reg & SFC_RISR_DMA) {
+               complete(&sfc->cp);
+
+               return IRQ_HANDLED;
+       }
+
+       return IRQ_NONE;
+}
+
+static int rockchip_sfc_probe(struct platform_device *pdev)
+{
+       struct device *dev = &pdev->dev;
+       struct spi_master *master;
+       struct resource *res;
+       struct rockchip_sfc *sfc;
+       int ret;
+
+       master = devm_spi_alloc_master(&pdev->dev, sizeof(*sfc));
+       if (!master)
+               return -ENOMEM;
+
+       master->flags = SPI_MASTER_HALF_DUPLEX;
+       master->mem_ops = &rockchip_sfc_mem_ops;
+       master->dev.of_node = pdev->dev.of_node;
+       master->mode_bits = SPI_TX_QUAD | SPI_TX_DUAL | SPI_RX_QUAD | SPI_RX_DUAL;
+       master->max_speed_hz = SFC_MAX_SPEED;
+       master->num_chipselect = SFC_MAX_CHIPSELECT_NUM;
+
+       sfc = spi_master_get_devdata(master);
+       sfc->dev = dev;
+
+       res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+       sfc->regbase = devm_ioremap_resource(dev, res);
+       if (IS_ERR(sfc->regbase))
+               return PTR_ERR(sfc->regbase);
+
+       sfc->clk = devm_clk_get(&pdev->dev, "clk_sfc");
+       if (IS_ERR(sfc->clk)) {
+               dev_err(&pdev->dev, "Failed to get sfc interface clk\n");
+               return PTR_ERR(sfc->clk);
+       }
+
+       sfc->hclk = devm_clk_get(&pdev->dev, "hclk_sfc");
+       if (IS_ERR(sfc->hclk)) {
+               dev_err(&pdev->dev, "Failed to get sfc ahb clk\n");
+               return PTR_ERR(sfc->hclk);
+       }
+
+       sfc->use_dma = !of_property_read_bool(sfc->dev->of_node,
+                                             "rockchip,sfc-no-dma");
+
+       if (sfc->use_dma) {
+               ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
+               if (ret) {
+                       dev_warn(dev, "Unable to set dma mask\n");
+                       return ret;
+               }
+
+               sfc->buffer = dmam_alloc_coherent(dev, SFC_MAX_IOSIZE_VER3,
+                                                 &sfc->dma_buffer,
+                                                 GFP_KERNEL);
+               if (!sfc->buffer)
+                       return -ENOMEM;
+       }
+
+       ret = clk_prepare_enable(sfc->hclk);
+       if (ret) {
+               dev_err(&pdev->dev, "Failed to enable ahb clk\n");
+               goto err_hclk;
+       }
+
+       ret = clk_prepare_enable(sfc->clk);
+       if (ret) {
+               dev_err(&pdev->dev, "Failed to enable interface clk\n");
+               goto err_clk;
+       }
+
+       /* Find the irq */
+       ret = platform_get_irq(pdev, 0);
+       if (ret < 0) {
+               dev_err(dev, "Failed to get the irq\n");
+               goto err_irq;
+       }
+
+       ret = devm_request_irq(dev, ret, rockchip_sfc_irq_handler,
+                              0, pdev->name, sfc);
+       if (ret) {
+               dev_err(dev, "Failed to request irq\n");
+
+               return ret;
+       }
+
+       ret = rockchip_sfc_init(sfc);
+       if (ret)
+               goto err_irq;
+
+       sfc->max_iosize = rockchip_sfc_get_max_iosize(sfc);
+       sfc->version = rockchip_sfc_get_version(sfc);
+
+       ret = spi_register_master(master);
+       if (ret)
+               goto err_irq;
+
+       return 0;
+
+err_irq:
+       clk_disable_unprepare(sfc->clk);
+err_clk:
+       clk_disable_unprepare(sfc->hclk);
+err_hclk:
+       return ret;
+}
+
+static int rockchip_sfc_remove(struct platform_device *pdev)
+{
+       struct spi_master *master = platform_get_drvdata(pdev);
+       struct rockchip_sfc *sfc = platform_get_drvdata(pdev);
+
+       spi_unregister_master(master);
+
+       clk_disable_unprepare(sfc->clk);
+       clk_disable_unprepare(sfc->hclk);
+
+       return 0;
+}
+
+static const struct of_device_id rockchip_sfc_dt_ids[] = {
+       { .compatible = "rockchip,sfc"},
+       { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, rockchip_sfc_dt_ids);
+
+static struct platform_driver rockchip_sfc_driver = {
+       .driver = {
+               .name   = "rockchip-sfc",
+               .of_match_table = rockchip_sfc_dt_ids,
+       },
+       .probe  = rockchip_sfc_probe,
+       .remove = rockchip_sfc_remove,
+};
+module_platform_driver(rockchip_sfc_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Rockchip Serial Flash Controller Driver");
+MODULE_AUTHOR("Shawn Lin <shawn.lin@rock-chips.com>");
+MODULE_AUTHOR("Chris Morgan <macromorgan@hotmail.com>");
+MODULE_AUTHOR("Jon Lin <Jon.lin@rock-chips.com>");
index ab19068be8675561f2529d5271b77cb2f8d06dfe..1edbf44c05a72e713d03b6ed08f940ff43704abf 100644 (file)
 
 /*
  * ADI slave devices include RTC, ADC, regulator, charger, thermal and so on.
- * The slave devices address offset is always 0x8000 and size is 4K.
+ * ADI supports 12/14bit address for r2p0, and additional 17bit for r3p0 or
+ * later versions. Since bit[1:0] are zero, so the spec describe them as
+ * 10/12/15bit address mode.
+ * The 10bit mode supports sigle slave, 12/15bit mode supports 3 slave, the
+ * high two bits is slave_id.
+ * The slave devices address offset is 0x8000 for 10/12bit address mode,
+ * and 0x20000 for 15bit mode.
  */
-#define ADI_SLAVE_ADDR_SIZE            SZ_4K
-#define ADI_SLAVE_OFFSET               0x8000
+#define ADI_10BIT_SLAVE_ADDR_SIZE      SZ_4K
+#define ADI_10BIT_SLAVE_OFFSET         0x8000
+#define ADI_12BIT_SLAVE_ADDR_SIZE      SZ_16K
+#define ADI_12BIT_SLAVE_OFFSET         0x8000
+#define ADI_15BIT_SLAVE_ADDR_SIZE      SZ_128K
+#define ADI_15BIT_SLAVE_OFFSET         0x20000
 
 /* Timeout (ms) for the trylock of hardware spinlocks */
 #define ADI_HWSPINLOCK_TIMEOUT         5000
 
 #define ADI_FIFO_DRAIN_TIMEOUT         1000
 #define ADI_READ_TIMEOUT               2000
-#define REG_ADDR_LOW_MASK              GENMASK(11, 0)
+
+/*
+ * Read back address from REG_ADI_RD_DATA bit[30:16] which maps to:
+ * REG_ADI_RD_CMD bit[14:0] for r2p0
+ * REG_ADI_RD_CMD bit[16:2] for r3p0
+ */
+#define RDBACK_ADDR_MASK_R2            GENMASK(14, 0)
+#define RDBACK_ADDR_MASK_R3            GENMASK(16, 2)
+#define RDBACK_ADDR_SHIFT_R3           2
 
 /* Registers definitions for PMIC watchdog controller */
-#define REG_WDG_LOAD_LOW               0x80
-#define REG_WDG_LOAD_HIGH              0x84
-#define REG_WDG_CTRL                   0x88
-#define REG_WDG_LOCK                   0xa0
+#define REG_WDG_LOAD_LOW               0x0
+#define REG_WDG_LOAD_HIGH              0x4
+#define REG_WDG_CTRL                   0x8
+#define REG_WDG_LOCK                   0x20
 
 /* Bits definitions for register REG_WDG_CTRL */
 #define BIT_WDG_RUN                    BIT(1)
 #define BIT_WDG_NEW                    BIT(2)
 #define BIT_WDG_RST                    BIT(3)
 
+/* Bits definitions for register REG_MODULE_EN */
+#define BIT_WDG_EN                     BIT(2)
+
 /* Registers definitions for PMIC */
 #define PMIC_RST_STATUS                        0xee8
 #define PMIC_MODULE_EN                 0xc08
 #define PMIC_CLK_EN                    0xc18
-#define BIT_WDG_EN                     BIT(2)
+#define PMIC_WDG_BASE                  0x80
 
 /* Definition of PMIC reset status register */
 #define HWRST_STATUS_SECURITY          0x02
 #define HWRST_STATUS_WATCHDOG          0xf0
 
 /* Use default timeout 50 ms that converts to watchdog values */
-#define WDG_LOAD_VAL                   ((50 * 1000) / 32768)
+#define WDG_LOAD_VAL                   ((50 * 32768) / 1000)
 #define WDG_LOAD_MASK                  GENMASK(15, 0)
 #define WDG_UNLOCK_KEY                 0xe551
 
+struct sprd_adi_wdg {
+       u32 base;
+       u32 rst_sts;
+       u32 wdg_en;
+       u32 wdg_clk;
+};
+
+struct sprd_adi_data {
+       u32 slave_offset;
+       u32 slave_addr_size;
+       int (*read_check)(u32 val, u32 reg);
+       int (*restart)(struct notifier_block *this,
+                      unsigned long mode, void *cmd);
+       void (*wdg_rst)(void *p);
+};
+
 struct sprd_adi {
        struct spi_controller   *ctlr;
        struct device           *dev;
@@ -115,26 +152,21 @@ struct sprd_adi {
        unsigned long           slave_vbase;
        unsigned long           slave_pbase;
        struct notifier_block   restart_handler;
+       const struct sprd_adi_data *data;
 };
 
-static int sprd_adi_check_paddr(struct sprd_adi *sadi, u32 paddr)
+static int sprd_adi_check_addr(struct sprd_adi *sadi, u32 reg)
 {
-       if (paddr < sadi->slave_pbase || paddr >
-           (sadi->slave_pbase + ADI_SLAVE_ADDR_SIZE)) {
+       if (reg >= sadi->data->slave_addr_size) {
                dev_err(sadi->dev,
-                       "slave physical address is incorrect, addr = 0x%x\n",
-                       paddr);
+                       "slave address offset is incorrect, reg = 0x%x\n",
+                       reg);
                return -EINVAL;
        }
 
        return 0;
 }
 
-static unsigned long sprd_adi_to_vaddr(struct sprd_adi *sadi, u32 paddr)
-{
-       return (paddr - sadi->slave_pbase + sadi->slave_vbase);
-}
-
 static int sprd_adi_drain_fifo(struct sprd_adi *sadi)
 {
        u32 timeout = ADI_FIFO_DRAIN_TIMEOUT;
@@ -161,11 +193,35 @@ static int sprd_adi_fifo_is_full(struct sprd_adi *sadi)
        return readl_relaxed(sadi->base + REG_ADI_ARM_FIFO_STS) & BIT_FIFO_FULL;
 }
 
-static int sprd_adi_read(struct sprd_adi *sadi, u32 reg_paddr, u32 *read_val)
+static int sprd_adi_read_check(u32 val, u32 addr)
+{
+       u32 rd_addr;
+
+       rd_addr = (val & RD_ADDR_MASK) >> RD_ADDR_SHIFT;
+
+       if (rd_addr != addr) {
+               pr_err("ADI read error, addr = 0x%x, val = 0x%x\n", addr, val);
+               return -EIO;
+       }
+
+       return 0;
+}
+
+static int sprd_adi_read_check_r2(u32 val, u32 reg)
+{
+       return sprd_adi_read_check(val, reg & RDBACK_ADDR_MASK_R2);
+}
+
+static int sprd_adi_read_check_r3(u32 val, u32 reg)
+{
+       return sprd_adi_read_check(val, (reg & RDBACK_ADDR_MASK_R3) >> RDBACK_ADDR_SHIFT_R3);
+}
+
+static int sprd_adi_read(struct sprd_adi *sadi, u32 reg, u32 *read_val)
 {
        int read_timeout = ADI_READ_TIMEOUT;
        unsigned long flags;
-       u32 val, rd_addr;
+       u32 val;
        int ret = 0;
 
        if (sadi->hwlock) {
@@ -178,11 +234,15 @@ static int sprd_adi_read(struct sprd_adi *sadi, u32 reg_paddr, u32 *read_val)
                }
        }
 
+       ret = sprd_adi_check_addr(sadi, reg);
+       if (ret)
+               goto out;
+
        /*
-        * Set the physical register address need to read into RD_CMD register,
+        * Set the slave address offset need to read into RD_CMD register,
         * then ADI controller will start to transfer automatically.
         */
-       writel_relaxed(reg_paddr, sadi->base + REG_ADI_RD_CMD);
+       writel_relaxed(reg, sadi->base + REG_ADI_RD_CMD);
 
        /*
         * Wait read operation complete, the BIT_RD_CMD_BUSY will be set
@@ -205,18 +265,15 @@ static int sprd_adi_read(struct sprd_adi *sadi, u32 reg_paddr, u32 *read_val)
        }
 
        /*
-        * The return value includes data and read register address, from bit 0
-        * to bit 15 are data, and from bit 16 to bit 30 are read register
-        * address. Then we can check the returned register address to validate
-        * data.
+        * The return value before adi r5p0 includes data and read register
+        * address, from bit 0to bit 15 are data, and from bit 16 to bit 30
+        * are read register address. Then we can check the returned register
+        * address to validate data.
         */
-       rd_addr = (val & RD_ADDR_MASK) >> RD_ADDR_SHIFT;
-
-       if (rd_addr != (reg_paddr & REG_ADDR_LOW_MASK)) {
-               dev_err(sadi->dev, "read error, reg addr = 0x%x, val = 0x%x\n",
-                       reg_paddr, val);
-               ret = -EIO;
-               goto out;
+       if (sadi->data->read_check) {
+               ret = sadi->data->read_check(val, reg);
+               if (ret < 0)
+                       goto out;
        }
 
        *read_val = val & RD_VALUE_MASK;
@@ -227,9 +284,8 @@ out:
        return ret;
 }
 
-static int sprd_adi_write(struct sprd_adi *sadi, u32 reg_paddr, u32 val)
+static int sprd_adi_write(struct sprd_adi *sadi, u32 reg, u32 val)
 {
-       unsigned long reg = sprd_adi_to_vaddr(sadi, reg_paddr);
        u32 timeout = ADI_FIFO_DRAIN_TIMEOUT;
        unsigned long flags;
        int ret;
@@ -244,6 +300,10 @@ static int sprd_adi_write(struct sprd_adi *sadi, u32 reg_paddr, u32 val)
                }
        }
 
+       ret = sprd_adi_check_addr(sadi, reg);
+       if (ret)
+               goto out;
+
        ret = sprd_adi_drain_fifo(sadi);
        if (ret < 0)
                goto out;
@@ -254,7 +314,8 @@ static int sprd_adi_write(struct sprd_adi *sadi, u32 reg_paddr, u32 val)
         */
        do {
                if (!sprd_adi_fifo_is_full(sadi)) {
-                       writel_relaxed(val, (void __iomem *)reg);
+                       /* we need virtual register address to write. */
+                       writel_relaxed(val, (void __iomem *)(sadi->slave_vbase + reg));
                        break;
                }
 
@@ -277,60 +338,41 @@ static int sprd_adi_transfer_one(struct spi_controller *ctlr,
                                 struct spi_transfer *t)
 {
        struct sprd_adi *sadi = spi_controller_get_devdata(ctlr);
-       u32 phy_reg, val;
+       u32 reg, val;
        int ret;
 
        if (t->rx_buf) {
-               phy_reg = *(u32 *)t->rx_buf + sadi->slave_pbase;
-
-               ret = sprd_adi_check_paddr(sadi, phy_reg);
-               if (ret)
-                       return ret;
-
-               ret = sprd_adi_read(sadi, phy_reg, &val);
-               if (ret)
-                       return ret;
-
+               reg = *(u32 *)t->rx_buf;
+               ret = sprd_adi_read(sadi, reg, &val);
                *(u32 *)t->rx_buf = val;
        } else if (t->tx_buf) {
                u32 *p = (u32 *)t->tx_buf;
-
-               /*
-                * Get the physical register address need to write and convert
-                * the physical address to virtual address. Since we need
-                * virtual register address to write.
-                */
-               phy_reg = *p++ + sadi->slave_pbase;
-               ret = sprd_adi_check_paddr(sadi, phy_reg);
-               if (ret)
-                       return ret;
-
+               reg = *p++;
                val = *p;
-               ret = sprd_adi_write(sadi, phy_reg, val);
-               if (ret)
-                       return ret;
+               ret = sprd_adi_write(sadi, reg, val);
        } else {
                dev_err(sadi->dev, "no buffer for transfer\n");
-               return -EINVAL;
+               ret = -EINVAL;
        }
 
-       return 0;
+       return ret;
 }
 
-static void sprd_adi_set_wdt_rst_mode(struct sprd_adi *sadi)
+static void sprd_adi_set_wdt_rst_mode(void *p)
 {
 #if IS_ENABLED(CONFIG_SPRD_WATCHDOG)
        u32 val;
+       struct sprd_adi *sadi = (struct sprd_adi *)p;
 
-       /* Set default watchdog reboot mode */
-       sprd_adi_read(sadi, sadi->slave_pbase + PMIC_RST_STATUS, &val);
+       /* Init watchdog reset mode */
+       sprd_adi_read(sadi, PMIC_RST_STATUS, &val);
        val |= HWRST_STATUS_WATCHDOG;
-       sprd_adi_write(sadi, sadi->slave_pbase + PMIC_RST_STATUS, val);
+       sprd_adi_write(sadi, PMIC_RST_STATUS, val);
 #endif
 }
 
-static int sprd_adi_restart_handler(struct notifier_block *this,
-                                   unsigned long mode, void *cmd)
+static int sprd_adi_restart(struct notifier_block *this, unsigned long mode,
+                                 void *cmd, struct sprd_adi_wdg *wdg)
 {
        struct sprd_adi *sadi = container_of(this, struct sprd_adi,
                                             restart_handler);
@@ -366,40 +408,40 @@ static int sprd_adi_restart_handler(struct notifier_block *this,
                reboot_mode = HWRST_STATUS_NORMAL;
 
        /* Record the reboot mode */
-       sprd_adi_read(sadi, sadi->slave_pbase + PMIC_RST_STATUS, &val);
+       sprd_adi_read(sadi, wdg->rst_sts, &val);
        val &= ~HWRST_STATUS_WATCHDOG;
        val |= reboot_mode;
-       sprd_adi_write(sadi, sadi->slave_pbase + PMIC_RST_STATUS, val);
+       sprd_adi_write(sadi, wdg->rst_sts, val);
 
        /* Enable the interface clock of the watchdog */
-       sprd_adi_read(sadi, sadi->slave_pbase + PMIC_MODULE_EN, &val);
+       sprd_adi_read(sadi, wdg->wdg_en, &val);
        val |= BIT_WDG_EN;
-       sprd_adi_write(sadi, sadi->slave_pbase + PMIC_MODULE_EN, val);
+       sprd_adi_write(sadi, wdg->wdg_en, val);
 
        /* Enable the work clock of the watchdog */
-       sprd_adi_read(sadi, sadi->slave_pbase + PMIC_CLK_EN, &val);
+       sprd_adi_read(sadi, wdg->wdg_clk, &val);
        val |= BIT_WDG_EN;
-       sprd_adi_write(sadi, sadi->slave_pbase + PMIC_CLK_EN, val);
+       sprd_adi_write(sadi, wdg->wdg_clk, val);
 
        /* Unlock the watchdog */
-       sprd_adi_write(sadi, sadi->slave_pbase + REG_WDG_LOCK, WDG_UNLOCK_KEY);
+       sprd_adi_write(sadi, wdg->base + REG_WDG_LOCK, WDG_UNLOCK_KEY);
 
-       sprd_adi_read(sadi, sadi->slave_pbase + REG_WDG_CTRL, &val);
+       sprd_adi_read(sadi, wdg->base + REG_WDG_CTRL, &val);
        val |= BIT_WDG_NEW;
-       sprd_adi_write(sadi, sadi->slave_pbase + REG_WDG_CTRL, val);
+       sprd_adi_write(sadi, wdg->base + REG_WDG_CTRL, val);
 
        /* Load the watchdog timeout value, 50ms is always enough. */
-       sprd_adi_write(sadi, sadi->slave_pbase + REG_WDG_LOAD_HIGH, 0);
-       sprd_adi_write(sadi, sadi->slave_pbase + REG_WDG_LOAD_LOW,
+       sprd_adi_write(sadi, wdg->base + REG_WDG_LOAD_HIGH, 0);
+       sprd_adi_write(sadi, wdg->base + REG_WDG_LOAD_LOW,
                       WDG_LOAD_VAL & WDG_LOAD_MASK);
 
        /* Start the watchdog to reset system */
-       sprd_adi_read(sadi, sadi->slave_pbase + REG_WDG_CTRL, &val);
+       sprd_adi_read(sadi, wdg->base + REG_WDG_CTRL, &val);
        val |= BIT_WDG_RUN | BIT_WDG_RST;
-       sprd_adi_write(sadi, sadi->slave_pbase + REG_WDG_CTRL, val);
+       sprd_adi_write(sadi, wdg->base + REG_WDG_CTRL, val);
 
        /* Lock the watchdog */
-       sprd_adi_write(sadi, sadi->slave_pbase + REG_WDG_LOCK, ~WDG_UNLOCK_KEY);
+       sprd_adi_write(sadi, wdg->base + REG_WDG_LOCK, ~WDG_UNLOCK_KEY);
 
        mdelay(1000);
 
@@ -407,6 +449,19 @@ static int sprd_adi_restart_handler(struct notifier_block *this,
        return NOTIFY_DONE;
 }
 
+static int sprd_adi_restart_sc9860(struct notifier_block *this,
+                                          unsigned long mode, void *cmd)
+{
+       struct sprd_adi_wdg wdg = {
+               .base = PMIC_WDG_BASE,
+               .rst_sts = PMIC_RST_STATUS,
+               .wdg_en = PMIC_MODULE_EN,
+               .wdg_clk = PMIC_CLK_EN,
+       };
+
+       return sprd_adi_restart(this, mode, cmd, &wdg);
+}
+
 static void sprd_adi_hw_init(struct sprd_adi *sadi)
 {
        struct device_node *np = sadi->dev->of_node;
@@ -458,10 +513,11 @@ static void sprd_adi_hw_init(struct sprd_adi *sadi)
 static int sprd_adi_probe(struct platform_device *pdev)
 {
        struct device_node *np = pdev->dev.of_node;
+       const struct sprd_adi_data *data;
        struct spi_controller *ctlr;
        struct sprd_adi *sadi;
        struct resource *res;
-       u32 num_chipselect;
+       u16 num_chipselect;
        int ret;
 
        if (!np) {
@@ -469,6 +525,12 @@ static int sprd_adi_probe(struct platform_device *pdev)
                return -ENODEV;
        }
 
+       data = of_device_get_match_data(&pdev->dev);
+       if (!data) {
+               dev_err(&pdev->dev, "no matching driver data found\n");
+               return -EINVAL;
+       }
+
        pdev->id = of_alias_get_id(np, "spi");
        num_chipselect = of_get_child_count(np);
 
@@ -486,10 +548,12 @@ static int sprd_adi_probe(struct platform_device *pdev)
                goto put_ctlr;
        }
 
-       sadi->slave_vbase = (unsigned long)sadi->base + ADI_SLAVE_OFFSET;
-       sadi->slave_pbase = res->start + ADI_SLAVE_OFFSET;
+       sadi->slave_vbase = (unsigned long)sadi->base +
+                           data->slave_offset;
+       sadi->slave_pbase = res->start + data->slave_offset;
        sadi->ctlr = ctlr;
        sadi->dev = &pdev->dev;
+       sadi->data = data;
        ret = of_hwspin_lock_get_id(np, 0);
        if (ret > 0 || (IS_ENABLED(CONFIG_HWSPINLOCK) && ret == 0)) {
                sadi->hwlock =
@@ -510,7 +574,9 @@ static int sprd_adi_probe(struct platform_device *pdev)
        }
 
        sprd_adi_hw_init(sadi);
-       sprd_adi_set_wdt_rst_mode(sadi);
+
+       if (sadi->data->wdg_rst)
+               sadi->data->wdg_rst(sadi);
 
        ctlr->dev.of_node = pdev->dev.of_node;
        ctlr->bus_num = pdev->id;
@@ -525,12 +591,14 @@ static int sprd_adi_probe(struct platform_device *pdev)
                goto put_ctlr;
        }
 
-       sadi->restart_handler.notifier_call = sprd_adi_restart_handler;
-       sadi->restart_handler.priority = 128;
-       ret = register_restart_handler(&sadi->restart_handler);
-       if (ret) {
-               dev_err(&pdev->dev, "can not register restart handler\n");
-               goto put_ctlr;
+       if (sadi->data->restart) {
+               sadi->restart_handler.notifier_call = sadi->data->restart;
+               sadi->restart_handler.priority = 128;
+               ret = register_restart_handler(&sadi->restart_handler);
+               if (ret) {
+                       dev_err(&pdev->dev, "can not register restart handler\n");
+                       goto put_ctlr;
+               }
        }
 
        return 0;
@@ -549,9 +617,38 @@ static int sprd_adi_remove(struct platform_device *pdev)
        return 0;
 }
 
+static struct sprd_adi_data sc9860_data = {
+       .slave_offset = ADI_10BIT_SLAVE_OFFSET,
+       .slave_addr_size = ADI_10BIT_SLAVE_ADDR_SIZE,
+       .read_check = sprd_adi_read_check_r2,
+       .restart = sprd_adi_restart_sc9860,
+       .wdg_rst = sprd_adi_set_wdt_rst_mode,
+};
+
+static struct sprd_adi_data sc9863_data = {
+       .slave_offset = ADI_12BIT_SLAVE_OFFSET,
+       .slave_addr_size = ADI_12BIT_SLAVE_ADDR_SIZE,
+       .read_check = sprd_adi_read_check_r3,
+};
+
+static struct sprd_adi_data ums512_data = {
+       .slave_offset = ADI_15BIT_SLAVE_OFFSET,
+       .slave_addr_size = ADI_15BIT_SLAVE_ADDR_SIZE,
+       .read_check = sprd_adi_read_check_r3,
+};
+
 static const struct of_device_id sprd_adi_of_match[] = {
        {
                .compatible = "sprd,sc9860-adi",
+               .data = &sc9860_data,
+       },
+       {
+               .compatible = "sprd,sc9863-adi",
+               .data = &sc9863_data,
+       },
+       {
+               .compatible = "sprd,ums512-adi",
+               .data = &ums512_data,
        },
        { },
 };
index 05618a618939c0114094aa0ee7340c93abcb9eef..9bd3fd1652f744bc6132198414a2e790f3866a81 100644 (file)
 #define SPI_3WIRE_TX           3
 #define SPI_3WIRE_RX           4
 
+#define STM32_SPI_AUTOSUSPEND_DELAY            1       /* 1 ms */
+
 /*
  * use PIO for small transfers, avoiding DMA setup/teardown overhead for drivers
  * without fifo buffers.
@@ -568,29 +570,30 @@ static void stm32f4_spi_read_rx(struct stm32_spi *spi)
 /**
  * stm32h7_spi_read_rxfifo - Read bytes in Receive Data Register
  * @spi: pointer to the spi controller data structure
- * @flush: boolean indicating that FIFO should be flushed
  *
  * Write in rx_buf depends on remaining bytes to avoid to write beyond
  * rx_buf end.
  */
-static void stm32h7_spi_read_rxfifo(struct stm32_spi *spi, bool flush)
+static void stm32h7_spi_read_rxfifo(struct stm32_spi *spi)
 {
        u32 sr = readl_relaxed(spi->base + STM32H7_SPI_SR);
        u32 rxplvl = FIELD_GET(STM32H7_SPI_SR_RXPLVL, sr);
 
        while ((spi->rx_len > 0) &&
               ((sr & STM32H7_SPI_SR_RXP) ||
-               (flush && ((sr & STM32H7_SPI_SR_RXWNE) || (rxplvl > 0))))) {
+               ((sr & STM32H7_SPI_SR_EOT) &&
+                ((sr & STM32H7_SPI_SR_RXWNE) || (rxplvl > 0))))) {
                u32 offs = spi->cur_xferlen - spi->rx_len;
 
                if ((spi->rx_len >= sizeof(u32)) ||
-                   (flush && (sr & STM32H7_SPI_SR_RXWNE))) {
+                   (sr & STM32H7_SPI_SR_RXWNE)) {
                        u32 *rx_buf32 = (u32 *)(spi->rx_buf + offs);
 
                        *rx_buf32 = readl_relaxed(spi->base + STM32H7_SPI_RXDR);
                        spi->rx_len -= sizeof(u32);
                } else if ((spi->rx_len >= sizeof(u16)) ||
-                          (flush && (rxplvl >= 2 || spi->cur_bpw > 8))) {
+                          (!(sr & STM32H7_SPI_SR_RXWNE) &&
+                           (rxplvl >= 2 || spi->cur_bpw > 8))) {
                        u16 *rx_buf16 = (u16 *)(spi->rx_buf + offs);
 
                        *rx_buf16 = readw_relaxed(spi->base + STM32H7_SPI_RXDR);
@@ -606,8 +609,8 @@ static void stm32h7_spi_read_rxfifo(struct stm32_spi *spi, bool flush)
                rxplvl = FIELD_GET(STM32H7_SPI_SR_RXPLVL, sr);
        }
 
-       dev_dbg(spi->dev, "%s%s: %d bytes left\n", __func__,
-               flush ? "(flush)" : "", spi->rx_len);
+       dev_dbg(spi->dev, "%s: %d bytes left (sr=%08x)\n",
+               __func__, spi->rx_len, sr);
 }
 
 /**
@@ -674,18 +677,12 @@ static void stm32f4_spi_disable(struct stm32_spi *spi)
  * stm32h7_spi_disable - Disable SPI controller
  * @spi: pointer to the spi controller data structure
  *
- * RX-Fifo is flushed when SPI controller is disabled. To prevent any data
- * loss, use stm32h7_spi_read_rxfifo(flush) to read the remaining bytes in
- * RX-Fifo.
- * Normally, if TSIZE has been configured, we should relax the hardware at the
- * reception of the EOT interrupt. But in case of error, EOT will not be
- * raised. So the subsystem unprepare_message call allows us to properly
- * complete the transfer from an hardware point of view.
+ * RX-Fifo is flushed when SPI controller is disabled.
  */
 static void stm32h7_spi_disable(struct stm32_spi *spi)
 {
        unsigned long flags;
-       u32 cr1, sr;
+       u32 cr1;
 
        dev_dbg(spi->dev, "disable controller\n");
 
@@ -698,25 +695,6 @@ static void stm32h7_spi_disable(struct stm32_spi *spi)
                return;
        }
 
-       /* Wait on EOT or suspend the flow */
-       if (readl_relaxed_poll_timeout_atomic(spi->base + STM32H7_SPI_SR,
-                                             sr, !(sr & STM32H7_SPI_SR_EOT),
-                                             10, 100000) < 0) {
-               if (cr1 & STM32H7_SPI_CR1_CSTART) {
-                       writel_relaxed(cr1 | STM32H7_SPI_CR1_CSUSP,
-                                      spi->base + STM32H7_SPI_CR1);
-                       if (readl_relaxed_poll_timeout_atomic(
-                                               spi->base + STM32H7_SPI_SR,
-                                               sr, !(sr & STM32H7_SPI_SR_SUSP),
-                                               10, 100000) < 0)
-                               dev_warn(spi->dev,
-                                        "Suspend request timeout\n");
-               }
-       }
-
-       if (!spi->cur_usedma && spi->rx_buf && (spi->rx_len > 0))
-               stm32h7_spi_read_rxfifo(spi, true);
-
        if (spi->cur_usedma && spi->dma_tx)
                dmaengine_terminate_all(spi->dma_tx);
        if (spi->cur_usedma && spi->dma_rx)
@@ -911,7 +889,7 @@ static irqreturn_t stm32h7_spi_irq_thread(int irq, void *dev_id)
                if (__ratelimit(&rs))
                        dev_dbg_ratelimited(spi->dev, "Communication suspended\n");
                if (!spi->cur_usedma && (spi->rx_buf && (spi->rx_len > 0)))
-                       stm32h7_spi_read_rxfifo(spi, false);
+                       stm32h7_spi_read_rxfifo(spi);
                /*
                 * If communication is suspended while using DMA, it means
                 * that something went wrong, so stop the current transfer
@@ -932,8 +910,10 @@ static irqreturn_t stm32h7_spi_irq_thread(int irq, void *dev_id)
 
        if (sr & STM32H7_SPI_SR_EOT) {
                if (!spi->cur_usedma && (spi->rx_buf && (spi->rx_len > 0)))
-                       stm32h7_spi_read_rxfifo(spi, true);
-               end = true;
+                       stm32h7_spi_read_rxfifo(spi);
+               if (!spi->cur_usedma ||
+                   (spi->cur_comm == SPI_SIMPLEX_TX || spi->cur_comm == SPI_3WIRE_TX))
+                       end = true;
        }
 
        if (sr & STM32H7_SPI_SR_TXP)
@@ -942,7 +922,7 @@ static irqreturn_t stm32h7_spi_irq_thread(int irq, void *dev_id)
 
        if (sr & STM32H7_SPI_SR_RXP)
                if (!spi->cur_usedma && (spi->rx_buf && (spi->rx_len > 0)))
-                       stm32h7_spi_read_rxfifo(spi, false);
+                       stm32h7_spi_read_rxfifo(spi);
 
        writel_relaxed(sr & mask, spi->base + STM32H7_SPI_IFCR);
 
@@ -1041,42 +1021,17 @@ static void stm32f4_spi_dma_tx_cb(void *data)
 }
 
 /**
- * stm32f4_spi_dma_rx_cb - dma callback
+ * stm32_spi_dma_rx_cb - dma callback
  * @data: pointer to the spi controller data structure
  *
  * DMA callback is called when the transfer is complete for DMA RX channel.
  */
-static void stm32f4_spi_dma_rx_cb(void *data)
+static void stm32_spi_dma_rx_cb(void *data)
 {
        struct stm32_spi *spi = data;
 
        spi_finalize_current_transfer(spi->master);
-       stm32f4_spi_disable(spi);
-}
-
-/**
- * stm32h7_spi_dma_cb - dma callback
- * @data: pointer to the spi controller data structure
- *
- * DMA callback is called when the transfer is complete or when an error
- * occurs. If the transfer is complete, EOT flag is raised.
- */
-static void stm32h7_spi_dma_cb(void *data)
-{
-       struct stm32_spi *spi = data;
-       unsigned long flags;
-       u32 sr;
-
-       spin_lock_irqsave(&spi->lock, flags);
-
-       sr = readl_relaxed(spi->base + STM32H7_SPI_SR);
-
-       spin_unlock_irqrestore(&spi->lock, flags);
-
-       if (!(sr & STM32H7_SPI_SR_EOT))
-               dev_warn(spi->dev, "DMA error (sr=0x%08x)\n", sr);
-
-       /* Now wait for EOT, or SUSP or OVR in case of error */
+       spi->cfg->disable(spi);
 }
 
 /**
@@ -1242,11 +1197,13 @@ static void stm32f4_spi_transfer_one_dma_start(struct stm32_spi *spi)
  */
 static void stm32h7_spi_transfer_one_dma_start(struct stm32_spi *spi)
 {
-       /* Enable the interrupts relative to the end of transfer */
-       stm32_spi_set_bits(spi, STM32H7_SPI_IER, STM32H7_SPI_IER_EOTIE |
-                                                STM32H7_SPI_IER_TXTFIE |
-                                                STM32H7_SPI_IER_OVRIE |
-                                                STM32H7_SPI_IER_MODFIE);
+       uint32_t ier = STM32H7_SPI_IER_OVRIE | STM32H7_SPI_IER_MODFIE;
+
+       /* Enable the interrupts */
+       if (spi->cur_comm == SPI_SIMPLEX_TX || spi->cur_comm == SPI_3WIRE_TX)
+               ier |= STM32H7_SPI_IER_EOTIE | STM32H7_SPI_IER_TXTFIE;
+
+       stm32_spi_set_bits(spi, STM32H7_SPI_IER, ier);
 
        stm32_spi_enable(spi);
 
@@ -1645,10 +1602,6 @@ static int stm32_spi_transfer_one(struct spi_master *master,
        struct stm32_spi *spi = spi_master_get_devdata(master);
        int ret;
 
-       /* Don't do anything on 0 bytes transfers */
-       if (transfer->len == 0)
-               return 0;
-
        spi->tx_buf = transfer->tx_buf;
        spi->rx_buf = transfer->rx_buf;
        spi->tx_len = spi->tx_buf ? transfer->len : 0;
@@ -1762,7 +1715,7 @@ static const struct stm32_spi_cfg stm32f4_spi_cfg = {
        .set_mode = stm32f4_spi_set_mode,
        .transfer_one_dma_start = stm32f4_spi_transfer_one_dma_start,
        .dma_tx_cb = stm32f4_spi_dma_tx_cb,
-       .dma_rx_cb = stm32f4_spi_dma_rx_cb,
+       .dma_rx_cb = stm32_spi_dma_rx_cb,
        .transfer_one_irq = stm32f4_spi_transfer_one_irq,
        .irq_handler_event = stm32f4_spi_irq_event,
        .irq_handler_thread = stm32f4_spi_irq_thread,
@@ -1782,8 +1735,11 @@ static const struct stm32_spi_cfg stm32h7_spi_cfg = {
        .set_data_idleness = stm32h7_spi_data_idleness,
        .set_number_of_data = stm32h7_spi_number_of_data,
        .transfer_one_dma_start = stm32h7_spi_transfer_one_dma_start,
-       .dma_rx_cb = stm32h7_spi_dma_cb,
-       .dma_tx_cb = stm32h7_spi_dma_cb,
+       .dma_rx_cb = stm32_spi_dma_rx_cb,
+       /*
+        * dma_tx_cb is not necessary since in case of TX, dma is followed by
+        * SPI access hence handling is performed within the SPI interrupt
+        */
        .transfer_one_irq = stm32h7_spi_transfer_one_irq,
        .irq_handler_thread = stm32h7_spi_irq_thread,
        .baud_rate_div_min = STM32H7_SPI_MBR_DIV_MIN,
@@ -1927,6 +1883,9 @@ static int stm32_spi_probe(struct platform_device *pdev)
        if (spi->dma_tx || spi->dma_rx)
                master->can_dma = stm32_spi_can_dma;
 
+       pm_runtime_set_autosuspend_delay(&pdev->dev,
+                                        STM32_SPI_AUTOSUSPEND_DELAY);
+       pm_runtime_use_autosuspend(&pdev->dev);
        pm_runtime_set_active(&pdev->dev);
        pm_runtime_get_noresume(&pdev->dev);
        pm_runtime_enable(&pdev->dev);
@@ -1938,6 +1897,9 @@ static int stm32_spi_probe(struct platform_device *pdev)
                goto err_pm_disable;
        }
 
+       pm_runtime_mark_last_busy(&pdev->dev);
+       pm_runtime_put_autosuspend(&pdev->dev);
+
        dev_info(&pdev->dev, "driver initialized\n");
 
        return 0;
@@ -1946,6 +1908,7 @@ err_pm_disable:
        pm_runtime_disable(&pdev->dev);
        pm_runtime_put_noidle(&pdev->dev);
        pm_runtime_set_suspended(&pdev->dev);
+       pm_runtime_dont_use_autosuspend(&pdev->dev);
 err_dma_release:
        if (spi->dma_tx)
                dma_release_channel(spi->dma_tx);
@@ -1970,6 +1933,8 @@ static int stm32_spi_remove(struct platform_device *pdev)
        pm_runtime_disable(&pdev->dev);
        pm_runtime_put_noidle(&pdev->dev);
        pm_runtime_set_suspended(&pdev->dev);
+       pm_runtime_dont_use_autosuspend(&pdev->dev);
+
        if (master->dma_tx)
                dma_release_channel(master->dma_tx);
        if (master->dma_rx)
index 5131141bbf0d484d5faf89bb3827a730c9b5b4b8..e9de1d958bbd2a0cb595450b5d3ad3fae23e30c8 100644 (file)
@@ -717,12 +717,12 @@ static void tegra_spi_deinit_dma_param(struct tegra_spi_data *tspi,
        dma_release_channel(dma_chan);
 }
 
-static int tegra_spi_set_hw_cs_timing(struct spi_device *spi,
-                                     struct spi_delay *setup,
-                                     struct spi_delay *hold,
-                                     struct spi_delay *inactive)
+static int tegra_spi_set_hw_cs_timing(struct spi_device *spi)
 {
        struct tegra_spi_data *tspi = spi_master_get_devdata(spi->master);
+       struct spi_delay *setup = &spi->cs_setup;
+       struct spi_delay *hold = &spi->cs_hold;
+       struct spi_delay *inactive = &spi->cs_inactive;
        u8 setup_dly, hold_dly, inactive_dly;
        u32 setup_hold;
        u32 spi_cs_timing;
index 6a726c95ac7a89a523a73ca10e1f3249e6852756..ebd27f88303315db415e077e75a14a34170337b6 100644 (file)
@@ -1061,33 +1061,12 @@ static int tegra_slink_probe(struct platform_device *pdev)
                dev_err(&pdev->dev, "Can not get clock %d\n", ret);
                goto exit_free_master;
        }
-       ret = clk_prepare(tspi->clk);
-       if (ret < 0) {
-               dev_err(&pdev->dev, "Clock prepare failed %d\n", ret);
-               goto exit_free_master;
-       }
-       ret = clk_enable(tspi->clk);
-       if (ret < 0) {
-               dev_err(&pdev->dev, "Clock enable failed %d\n", ret);
-               goto exit_clk_unprepare;
-       }
-
-       spi_irq = platform_get_irq(pdev, 0);
-       tspi->irq = spi_irq;
-       ret = request_threaded_irq(tspi->irq, tegra_slink_isr,
-                       tegra_slink_isr_thread, IRQF_ONESHOT,
-                       dev_name(&pdev->dev), tspi);
-       if (ret < 0) {
-               dev_err(&pdev->dev, "Failed to register ISR for IRQ %d\n",
-                                       tspi->irq);
-               goto exit_clk_disable;
-       }
 
        tspi->rst = devm_reset_control_get_exclusive(&pdev->dev, "spi");
        if (IS_ERR(tspi->rst)) {
                dev_err(&pdev->dev, "can not get reset\n");
                ret = PTR_ERR(tspi->rst);
-               goto exit_free_irq;
+               goto exit_free_master;
        }
 
        tspi->max_buf_size = SLINK_FIFO_DEPTH << 2;
@@ -1095,7 +1074,7 @@ static int tegra_slink_probe(struct platform_device *pdev)
 
        ret = tegra_slink_init_dma_param(tspi, true);
        if (ret < 0)
-               goto exit_free_irq;
+               goto exit_free_master;
        ret = tegra_slink_init_dma_param(tspi, false);
        if (ret < 0)
                goto exit_rx_dma_free;
@@ -1106,16 +1085,9 @@ static int tegra_slink_probe(struct platform_device *pdev)
        init_completion(&tspi->xfer_completion);
 
        pm_runtime_enable(&pdev->dev);
-       if (!pm_runtime_enabled(&pdev->dev)) {
-               ret = tegra_slink_runtime_resume(&pdev->dev);
-               if (ret)
-                       goto exit_pm_disable;
-       }
-
-       ret = pm_runtime_get_sync(&pdev->dev);
-       if (ret < 0) {
+       ret = pm_runtime_resume_and_get(&pdev->dev);
+       if (ret) {
                dev_err(&pdev->dev, "pm runtime get failed, e = %d\n", ret);
-               pm_runtime_put_noidle(&pdev->dev);
                goto exit_pm_disable;
        }
 
@@ -1123,33 +1095,43 @@ static int tegra_slink_probe(struct platform_device *pdev)
        udelay(2);
        reset_control_deassert(tspi->rst);
 
+       spi_irq = platform_get_irq(pdev, 0);
+       tspi->irq = spi_irq;
+       ret = request_threaded_irq(tspi->irq, tegra_slink_isr,
+                                  tegra_slink_isr_thread, IRQF_ONESHOT,
+                                  dev_name(&pdev->dev), tspi);
+       if (ret < 0) {
+               dev_err(&pdev->dev, "Failed to register ISR for IRQ %d\n",
+                       tspi->irq);
+               goto exit_pm_put;
+       }
+
        tspi->def_command_reg  = SLINK_M_S;
        tspi->def_command2_reg = SLINK_CS_ACTIVE_BETWEEN;
        tegra_slink_writel(tspi, tspi->def_command_reg, SLINK_COMMAND);
        tegra_slink_writel(tspi, tspi->def_command2_reg, SLINK_COMMAND2);
-       pm_runtime_put(&pdev->dev);
 
        master->dev.of_node = pdev->dev.of_node;
-       ret = devm_spi_register_master(&pdev->dev, master);
+       ret = spi_register_master(master);
        if (ret < 0) {
                dev_err(&pdev->dev, "can not register to master err %d\n", ret);
-               goto exit_pm_disable;
+               goto exit_free_irq;
        }
+
+       pm_runtime_put(&pdev->dev);
+
        return ret;
 
+exit_free_irq:
+       free_irq(spi_irq, tspi);
+exit_pm_put:
+       pm_runtime_put(&pdev->dev);
 exit_pm_disable:
        pm_runtime_disable(&pdev->dev);
-       if (!pm_runtime_status_suspended(&pdev->dev))
-               tegra_slink_runtime_suspend(&pdev->dev);
+
        tegra_slink_deinit_dma_param(tspi, false);
 exit_rx_dma_free:
        tegra_slink_deinit_dma_param(tspi, true);
-exit_free_irq:
-       free_irq(spi_irq, tspi);
-exit_clk_disable:
-       clk_disable(tspi->clk);
-exit_clk_unprepare:
-       clk_unprepare(tspi->clk);
 exit_free_master:
        spi_master_put(master);
        return ret;
@@ -1160,10 +1142,11 @@ static int tegra_slink_remove(struct platform_device *pdev)
        struct spi_master *master = platform_get_drvdata(pdev);
        struct tegra_slink_data *tspi = spi_master_get_devdata(master);
 
+       spi_unregister_master(master);
+
        free_irq(tspi->irq, tspi);
 
-       clk_disable(tspi->clk);
-       clk_unprepare(tspi->clk);
+       pm_runtime_disable(&pdev->dev);
 
        if (tspi->tx_dma_chan)
                tegra_slink_deinit_dma_param(tspi, false);
@@ -1171,10 +1154,6 @@ static int tegra_slink_remove(struct platform_device *pdev)
        if (tspi->rx_dma_chan)
                tegra_slink_deinit_dma_param(tspi, true);
 
-       pm_runtime_disable(&pdev->dev);
-       if (!pm_runtime_status_suspended(&pdev->dev))
-               tegra_slink_runtime_suspend(&pdev->dev);
-
        return 0;
 }
 
index 9262c6418463b6914aabad1bb0cfea00ce4f48e7..cfa222c9bd5e74f7794e74621cfc85359ccac477 100644 (file)
@@ -545,7 +545,7 @@ static int zynq_qspi_exec_mem_op(struct spi_mem *mem,
                zynq_qspi_write_op(xqspi, ZYNQ_QSPI_FIFO_DEPTH, true);
                zynq_qspi_write(xqspi, ZYNQ_QSPI_IEN_OFFSET,
                                ZYNQ_QSPI_IXR_RXTX_MASK);
-               if (!wait_for_completion_interruptible_timeout(&xqspi->data_completion,
+               if (!wait_for_completion_timeout(&xqspi->data_completion,
                                                               msecs_to_jiffies(1000)))
                        err = -ETIMEDOUT;
        }
@@ -563,7 +563,7 @@ static int zynq_qspi_exec_mem_op(struct spi_mem *mem,
                zynq_qspi_write_op(xqspi, ZYNQ_QSPI_FIFO_DEPTH, true);
                zynq_qspi_write(xqspi, ZYNQ_QSPI_IEN_OFFSET,
                                ZYNQ_QSPI_IXR_RXTX_MASK);
-               if (!wait_for_completion_interruptible_timeout(&xqspi->data_completion,
+               if (!wait_for_completion_timeout(&xqspi->data_completion,
                                                               msecs_to_jiffies(1000)))
                        err = -ETIMEDOUT;
        }
@@ -579,7 +579,7 @@ static int zynq_qspi_exec_mem_op(struct spi_mem *mem,
                zynq_qspi_write_op(xqspi, ZYNQ_QSPI_FIFO_DEPTH, true);
                zynq_qspi_write(xqspi, ZYNQ_QSPI_IEN_OFFSET,
                                ZYNQ_QSPI_IXR_RXTX_MASK);
-               if (!wait_for_completion_interruptible_timeout(&xqspi->data_completion,
+               if (!wait_for_completion_timeout(&xqspi->data_completion,
                                                               msecs_to_jiffies(1000)))
                        err = -ETIMEDOUT;
 
@@ -603,7 +603,7 @@ static int zynq_qspi_exec_mem_op(struct spi_mem *mem,
                zynq_qspi_write_op(xqspi, ZYNQ_QSPI_FIFO_DEPTH, true);
                zynq_qspi_write(xqspi, ZYNQ_QSPI_IEN_OFFSET,
                                ZYNQ_QSPI_IXR_RXTX_MASK);
-               if (!wait_for_completion_interruptible_timeout(&xqspi->data_completion,
+               if (!wait_for_completion_timeout(&xqspi->data_completion,
                                                               msecs_to_jiffies(1000)))
                        err = -ETIMEDOUT;
        }
index e4dc593b1f32af91d8dda29723cb1a95423e6678..65d14af9c01529d1bc3d2f7dc00fb69c2165621d 100644 (file)
@@ -846,9 +846,9 @@ static void spi_set_cs(struct spi_device *spi, bool enable, bool force)
        if (spi->cs_gpiod || gpio_is_valid(spi->cs_gpio) ||
            !spi->controller->set_cs_timing) {
                if (activate)
-                       spi_delay_exec(&spi->controller->cs_setup, NULL);
+                       spi_delay_exec(&spi->cs_setup, NULL);
                else
-                       spi_delay_exec(&spi->controller->cs_hold, NULL);
+                       spi_delay_exec(&spi->cs_hold, NULL);
        }
 
        if (spi->mode & SPI_CS_HIGH)
@@ -891,7 +891,7 @@ static void spi_set_cs(struct spi_device *spi, bool enable, bool force)
        if (spi->cs_gpiod || gpio_is_valid(spi->cs_gpio) ||
            !spi->controller->set_cs_timing) {
                if (!activate)
-                       spi_delay_exec(&spi->controller->cs_inactive, NULL);
+                       spi_delay_exec(&spi->cs_inactive, NULL);
        }
 }
 
index b8e8fc8ddbe9cbedf4501b57c830ffaf3524d253..809d938ae1667f1c4c8fad802cd3592a8697609d 100644 (file)
@@ -9,12 +9,11 @@
 #include <linux/input.h>
 #include <linux/time.h>
 
-#include "video.h"
-#include "audio.h"
-#include "osd.h"
-
+#include <linux/dvb/video.h>
+#include <linux/dvb/audio.h>
 #include <linux/dvb/dmx.h>
 #include <linux/dvb/ca.h>
+#include <linux/dvb/osd.h>
 #include <linux/dvb/net.h>
 #include <linux/mutex.h>
 
index ef981d3b7bb49516dafe9ad96e2592756b6d85cc..cb72393f92d3a20034cde41a181f8118cdd2b98d 100644 (file)
@@ -2059,7 +2059,7 @@ static void restore_cur(struct vc_data *vc)
 
 enum { ESnormal, ESesc, ESsquare, ESgetpars, ESfunckey,
        EShash, ESsetG0, ESsetG1, ESpercent, EScsiignore, ESnonstd,
-       ESpalette, ESosc };
+       ESpalette, ESosc, ESapc, ESpm, ESdcs };
 
 /* console_lock is held (except via vc_init()) */
 static void reset_terminal(struct vc_data *vc, int do_clear)
@@ -2133,20 +2133,28 @@ static void vc_setGx(struct vc_data *vc, unsigned int which, int c)
                vc->vc_translate = set_translate(*charset, vc);
 }
 
+/* is this state an ANSI control string? */
+static bool ansi_control_string(unsigned int state)
+{
+       if (state == ESosc || state == ESapc || state == ESpm || state == ESdcs)
+               return true;
+       return false;
+}
+
 /* console_lock is held */
 static void do_con_trol(struct tty_struct *tty, struct vc_data *vc, int c)
 {
        /*
         *  Control characters can be used in the _middle_
-        *  of an escape sequence.
+        *  of an escape sequence, aside from ANSI control strings.
         */
-       if (vc->vc_state == ESosc && c>=8 && c<=13) /* ... except for OSC */
+       if (ansi_control_string(vc->vc_state) && c >= 8 && c <= 13)
                return;
        switch (c) {
        case 0:
                return;
        case 7:
-               if (vc->vc_state == ESosc)
+               if (ansi_control_string(vc->vc_state))
                        vc->vc_state = ESnormal;
                else if (vc->vc_bell_duration)
                        kd_mksound(vc->vc_bell_pitch, vc->vc_bell_duration);
@@ -2207,6 +2215,12 @@ static void do_con_trol(struct tty_struct *tty, struct vc_data *vc, int c)
                case ']':
                        vc->vc_state = ESnonstd;
                        return;
+               case '_':
+                       vc->vc_state = ESapc;
+                       return;
+               case '^':
+                       vc->vc_state = ESpm;
+                       return;
                case '%':
                        vc->vc_state = ESpercent;
                        return;
@@ -2224,6 +2238,9 @@ static void do_con_trol(struct tty_struct *tty, struct vc_data *vc, int c)
                        if (vc->state.x < VC_TABSTOPS_COUNT)
                                set_bit(vc->state.x, vc->vc_tab_stop);
                        return;
+               case 'P':
+                       vc->vc_state = ESdcs;
+                       return;
                case 'Z':
                        respond_ID(tty);
                        return;
@@ -2520,8 +2537,14 @@ static void do_con_trol(struct tty_struct *tty, struct vc_data *vc, int c)
                vc_setGx(vc, 1, c);
                vc->vc_state = ESnormal;
                return;
+       case ESapc:
+               return;
        case ESosc:
                return;
+       case ESpm:
+               return;
+       case ESdcs:
+               return;
        default:
                vc->vc_state = ESnormal;
        }
index 0e0cd9e9e589ecd1d9139a0e42d7ced5d4caab42..3639bb6dc372e288db75960253de4663a0dede06 100644 (file)
@@ -246,6 +246,8 @@ int vt_waitactive(int n)
  *
  * XXX It should at least call into the driver, fbdev's definitely need to
  * restore their engine state. --BenH
+ *
+ * Called with the console lock held.
  */
 static int vt_kdsetmode(struct vc_data *vc, unsigned long mode)
 {
@@ -262,7 +264,6 @@ static int vt_kdsetmode(struct vc_data *vc, unsigned long mode)
                return -EINVAL;
        }
 
-       /* FIXME: this needs the console lock extending */
        if (vc->vc_mode == mode)
                return 0;
 
@@ -271,12 +272,10 @@ static int vt_kdsetmode(struct vc_data *vc, unsigned long mode)
                return 0;
 
        /* explicitly blank/unblank the screen if switching modes */
-       console_lock();
        if (mode == KD_TEXT)
                do_unblank_screen(1);
        else
                do_blank_screen(1);
-       console_unlock();
 
        return 0;
 }
@@ -378,7 +377,10 @@ static int vt_k_ioctl(struct tty_struct *tty, unsigned int cmd,
                if (!perm)
                        return -EPERM;
 
-               return vt_kdsetmode(vc, arg);
+               console_lock();
+               ret = vt_kdsetmode(vc, arg);
+               console_unlock();
+               return ret;
 
        case KDGETMODE:
                return put_user(vc->vc_mode, (int __user *)arg);
index b8d4b2d327b23fc6d7f35d7ae249535aa1f56b13..ccb68fe6202e263303bd96227de477083db0da29 100644 (file)
@@ -940,19 +940,19 @@ static struct dwc3_trb *dwc3_ep_prev_trb(struct dwc3_ep *dep, u8 index)
 
 static u32 dwc3_calc_trbs_left(struct dwc3_ep *dep)
 {
-       struct dwc3_trb         *tmp;
        u8                      trbs_left;
 
        /*
-        * If enqueue & dequeue are equal than it is either full or empty.
-        *
-        * One way to know for sure is if the TRB right before us has HWO bit
-        * set or not. If it has, then we're definitely full and can't fit any
-        * more transfers in our ring.
+        * If the enqueue & dequeue are equal then the TRB ring is either full
+        * or empty. It's considered full when there are DWC3_TRB_NUM-1 of TRBs
+        * pending to be processed by the driver.
         */
        if (dep->trb_enqueue == dep->trb_dequeue) {
-               tmp = dwc3_ep_prev_trb(dep, dep->trb_enqueue);
-               if (tmp->ctrl & DWC3_TRB_CTRL_HWO)
+               /*
+                * If there is any request remained in the started_list at
+                * this point, that means there is no TRB available.
+                */
+               if (!list_empty(&dep->started_list))
                        return 0;
 
                return DWC3_TRB_NUM - 1;
@@ -1741,13 +1741,9 @@ static void dwc3_gadget_ep_cleanup_cancelled_requests(struct dwc3_ep *dep)
 {
        struct dwc3_request             *req;
        struct dwc3_request             *tmp;
-       struct list_head                local;
        struct dwc3                     *dwc = dep->dwc;
 
-restart:
-       list_replace_init(&dep->cancelled_list, &local);
-
-       list_for_each_entry_safe(req, tmp, &local, list) {
+       list_for_each_entry_safe(req, tmp, &dep->cancelled_list, list) {
                dwc3_gadget_ep_skip_trbs(dep, req);
                switch (req->status) {
                case DWC3_REQUEST_STATUS_DISCONNECTED:
@@ -1765,9 +1761,6 @@ restart:
                        break;
                }
        }
-
-       if (!list_empty(&dep->cancelled_list))
-               goto restart;
 }
 
 static int dwc3_gadget_ep_dequeue(struct usb_ep *ep,
@@ -2250,10 +2243,8 @@ static int dwc3_gadget_pullup(struct usb_gadget *g, int is_on)
 
                ret = wait_for_completion_timeout(&dwc->ep0_in_setup,
                                msecs_to_jiffies(DWC3_PULL_UP_TIMEOUT));
-               if (ret == 0) {
-                       dev_err(dwc->dev, "timed out waiting for SETUP phase\n");
-                       return -ETIMEDOUT;
-               }
+               if (ret == 0)
+                       dev_warn(dwc->dev, "timed out waiting for SETUP phase\n");
        }
 
        /*
@@ -2465,6 +2456,7 @@ static int __dwc3_gadget_start(struct dwc3 *dwc)
        /* begin to receive SETUP packets */
        dwc->ep0state = EP0_SETUP_PHASE;
        dwc->link_state = DWC3_LINK_STATE_SS_DIS;
+       dwc->delayed_status = false;
        dwc3_ep0_out_start(dwc);
 
        dwc3_gadget_enable_irq(dwc);
@@ -2976,12 +2968,8 @@ static void dwc3_gadget_ep_cleanup_completed_requests(struct dwc3_ep *dep,
 {
        struct dwc3_request     *req;
        struct dwc3_request     *tmp;
-       struct list_head        local;
 
-restart:
-       list_replace_init(&dep->started_list, &local);
-
-       list_for_each_entry_safe(req, tmp, &local, list) {
+       list_for_each_entry_safe(req, tmp, &dep->started_list, list) {
                int ret;
 
                ret = dwc3_gadget_ep_cleanup_completed_request(dep, event,
@@ -2989,9 +2977,6 @@ restart:
                if (ret)
                        break;
        }
-
-       if (!list_empty(&dep->started_list))
-               goto restart;
 }
 
 static bool dwc3_gadget_ep_should_continue(struct dwc3_ep *dep)
index 018dd097899508e3ab5d96d6c20c68c53712e8a9..9e5c950612d066245d70c1e6e159364bd3ec950d 100644 (file)
@@ -230,7 +230,13 @@ static void u_audio_iso_fback_complete(struct usb_ep *ep,
        int status = req->status;
 
        /* i/f shutting down */
-       if (!prm->fb_ep_enabled || req->status == -ESHUTDOWN)
+       if (!prm->fb_ep_enabled) {
+               kfree(req->buf);
+               usb_ep_free_request(ep, req);
+               return;
+       }
+
+       if (req->status == -ESHUTDOWN)
                return;
 
        /*
@@ -388,8 +394,6 @@ static inline void free_ep(struct uac_rtd_params *prm, struct usb_ep *ep)
        if (!prm->ep_enabled)
                return;
 
-       prm->ep_enabled = false;
-
        audio_dev = uac->audio_dev;
        params = &audio_dev->params;
 
@@ -407,6 +411,8 @@ static inline void free_ep(struct uac_rtd_params *prm, struct usb_ep *ep)
                }
        }
 
+       prm->ep_enabled = false;
+
        if (usb_ep_disable(ep))
                dev_err(uac->card->dev, "%s:%d Error!\n", __func__, __LINE__);
 }
@@ -418,15 +424,16 @@ static inline void free_ep_fback(struct uac_rtd_params *prm, struct usb_ep *ep)
        if (!prm->fb_ep_enabled)
                return;
 
-       prm->fb_ep_enabled = false;
-
        if (prm->req_fback) {
-               usb_ep_dequeue(ep, prm->req_fback);
-               kfree(prm->req_fback->buf);
-               usb_ep_free_request(ep, prm->req_fback);
+               if (usb_ep_dequeue(ep, prm->req_fback)) {
+                       kfree(prm->req_fback->buf);
+                       usb_ep_free_request(ep, prm->req_fback);
+               }
                prm->req_fback = NULL;
        }
 
+       prm->fb_ep_enabled = false;
+
        if (usb_ep_disable(ep))
                dev_err(uac->card->dev, "%s:%d Error!\n", __func__, __LINE__);
 }
index 5923844ed8218d3df99a55dc40298784dce761a9..ef5e91a5542d99b8eb99966c4c8a543f3d9a0a28 100644 (file)
@@ -207,7 +207,8 @@ static int renesas_check_rom_state(struct pci_dev *pdev)
                        return 0;
 
                case RENESAS_ROM_STATUS_NO_RESULT: /* No result yet */
-                       return 0;
+                       dev_dbg(&pdev->dev, "Unknown ROM status ...\n");
+                       return -ENOENT;
 
                case RENESAS_ROM_STATUS_ERROR: /* Error State */
                default: /* All other states are marked as "Reserved states" */
@@ -224,14 +225,6 @@ static int renesas_fw_check_running(struct pci_dev *pdev)
        u8 fw_state;
        int err;
 
-       /* Check if device has ROM and loaded, if so skip everything */
-       err = renesas_check_rom(pdev);
-       if (err) { /* we have rom */
-               err = renesas_check_rom_state(pdev);
-               if (!err)
-                       return err;
-       }
-
        /*
         * Test if the device is actually needing the firmware. As most
         * BIOSes will initialize the device for us. If the device is
@@ -591,21 +584,39 @@ int renesas_xhci_check_request_fw(struct pci_dev *pdev,
                        (struct xhci_driver_data *)id->driver_data;
        const char *fw_name = driver_data->firmware;
        const struct firmware *fw;
+       bool has_rom;
        int err;
 
+       /* Check if device has ROM and loaded, if so skip everything */
+       has_rom = renesas_check_rom(pdev);
+       if (has_rom) {
+               err = renesas_check_rom_state(pdev);
+               if (!err)
+                       return 0;
+               else if (err != -ENOENT)
+                       has_rom = false;
+       }
+
        err = renesas_fw_check_running(pdev);
        /* Continue ahead, if the firmware is already running. */
        if (err == 0)
                return 0;
 
+       /* no firmware interface available */
        if (err != 1)
-               return err;
+               return has_rom ? 0 : err;
 
        pci_dev_get(pdev);
-       err = request_firmware(&fw, fw_name, &pdev->dev);
+       err = firmware_request_nowarn(&fw, fw_name, &pdev->dev);
        pci_dev_put(pdev);
        if (err) {
-               dev_err(&pdev->dev, "request_firmware failed: %d\n", err);
+               if (has_rom) {
+                       dev_info(&pdev->dev, "failed to load firmware %s, fallback to ROM\n",
+                                fw_name);
+                       return 0;
+               }
+               dev_err(&pdev->dev, "failed to load firmware %s: %d\n",
+                       fw_name, err);
                return err;
        }
 
index 8a521b5ea769e9b93159c3b95a9832299441af80..2db917eab79952b3ae8e9d55a7a84c080c2e1881 100644 (file)
@@ -851,7 +851,6 @@ static struct usb_serial_driver ch341_device = {
                .owner  = THIS_MODULE,
                .name   = "ch341-uart",
        },
-       .bulk_in_size      = 512,
        .id_table          = id_table,
        .num_ports         = 1,
        .open              = ch341_open,
index 039450069ca45cbf75d90baa0bdedb1ee481ea8c..29c765cc849573eac29b2c071eb0479c63fee7c9 100644 (file)
@@ -2074,6 +2074,8 @@ static const struct usb_device_id option_ids[] = {
          .driver_info = RSVD(4) | RSVD(5) },
        { USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x0105, 0xff),                     /* Fibocom NL678 series */
          .driver_info = RSVD(6) },
+       { USB_DEVICE_AND_INTERFACE_INFO(0x2cb7, 0x010b, 0xff, 0xff, 0x30) },    /* Fibocom FG150 Diag */
+       { USB_DEVICE_AND_INTERFACE_INFO(0x2cb7, 0x010b, 0xff, 0, 0) },          /* Fibocom FG150 AT */
        { USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x01a0, 0xff) },                   /* Fibocom NL668-AM/NL652-EU (laptop MBIM) */
        { USB_DEVICE_INTERFACE_CLASS(0x2df3, 0x9d03, 0xff) },                   /* LongSung M5710 */
        { USB_DEVICE_INTERFACE_CLASS(0x305a, 0x1404, 0xff) },                   /* GosunCn GM500 RNDIS */
index b9bb63d749ec9611a790674cc047524584626225..5d05de6665974fc503050fcaab0a1821e8b26a89 100644 (file)
@@ -341,6 +341,7 @@ struct tcpm_port {
        bool vbus_source;
        bool vbus_charge;
 
+       /* Set to true when Discover_Identity Command is expected to be sent in Ready states. */
        bool send_discover;
        bool op_vsafe5v;
 
@@ -370,6 +371,7 @@ struct tcpm_port {
        struct hrtimer send_discover_timer;
        struct kthread_work send_discover_work;
        bool state_machine_running;
+       /* Set to true when VDM State Machine has following actions. */
        bool vdm_sm_running;
 
        struct completion tx_complete;
@@ -1431,6 +1433,7 @@ static void tcpm_queue_vdm(struct tcpm_port *port, const u32 header,
        /* Set ready, vdm state machine will actually send */
        port->vdm_retries = 0;
        port->vdm_state = VDM_STATE_READY;
+       port->vdm_sm_running = true;
 
        mod_vdm_delayed_work(port, 0);
 }
@@ -1673,7 +1676,6 @@ static int tcpm_pd_svdm(struct tcpm_port *port, struct typec_altmode *adev,
                                rlen = 1;
                        } else {
                                tcpm_register_partner_altmodes(port);
-                               port->vdm_sm_running = false;
                        }
                        break;
                case CMD_ENTER_MODE:
@@ -1721,14 +1723,12 @@ static int tcpm_pd_svdm(struct tcpm_port *port, struct typec_altmode *adev,
                                      (VDO_SVDM_VERS(svdm_version));
                        break;
                }
-               port->vdm_sm_running = false;
                break;
        default:
                response[0] = p[0] | VDO_CMDT(CMDT_RSP_NAK);
                rlen = 1;
                response[0] = (response[0] & ~VDO_SVDM_VERS_MASK) |
                              (VDO_SVDM_VERS(svdm_version));
-               port->vdm_sm_running = false;
                break;
        }
 
@@ -1737,6 +1737,10 @@ static int tcpm_pd_svdm(struct tcpm_port *port, struct typec_altmode *adev,
        return rlen;
 }
 
+static void tcpm_pd_handle_msg(struct tcpm_port *port,
+                              enum pd_msg_request message,
+                              enum tcpm_ams ams);
+
 static void tcpm_handle_vdm_request(struct tcpm_port *port,
                                    const __le32 *payload, int cnt)
 {
@@ -1764,11 +1768,25 @@ static void tcpm_handle_vdm_request(struct tcpm_port *port,
                port->vdm_state = VDM_STATE_DONE;
        }
 
-       if (PD_VDO_SVDM(p[0])) {
+       if (PD_VDO_SVDM(p[0]) && (adev || tcpm_vdm_ams(port) || port->nr_snk_vdo)) {
+               /*
+                * Here a SVDM is received (INIT or RSP or unknown). Set the vdm_sm_running in
+                * advance because we are dropping the lock but may send VDMs soon.
+                * For the cases of INIT received:
+                *  - If no response to send, it will be cleared later in this function.
+                *  - If there are responses to send, it will be cleared in the state machine.
+                * For the cases of RSP received:
+                *  - If no further INIT to send, it will be cleared later in this function.
+                *  - Otherwise, it will be cleared in the state machine if timeout or it will go
+                *    back here until no further INIT to send.
+                * For the cases of unknown type received:
+                *  - We will send NAK and the flag will be cleared in the state machine.
+                */
+               port->vdm_sm_running = true;
                rlen = tcpm_pd_svdm(port, adev, p, cnt, response, &adev_action);
        } else {
                if (port->negotiated_rev >= PD_REV30)
-                       tcpm_queue_message(port, PD_MSG_CTRL_NOT_SUPP);
+                       tcpm_pd_handle_msg(port, PD_MSG_CTRL_NOT_SUPP, NONE_AMS);
        }
 
        /*
@@ -1833,6 +1851,8 @@ static void tcpm_handle_vdm_request(struct tcpm_port *port,
 
        if (rlen > 0)
                tcpm_queue_vdm(port, response[0], &response[1], rlen - 1);
+       else
+               port->vdm_sm_running = false;
 }
 
 static void tcpm_send_vdm(struct tcpm_port *port, u32 vid, int cmd,
@@ -1898,8 +1918,10 @@ static void vdm_run_state_machine(struct tcpm_port *port)
                 * if there's traffic or we're not in PDO ready state don't send
                 * a VDM.
                 */
-               if (port->state != SRC_READY && port->state != SNK_READY)
+               if (port->state != SRC_READY && port->state != SNK_READY) {
+                       port->vdm_sm_running = false;
                        break;
+               }
 
                /* TODO: AMS operation for Unstructured VDM */
                if (PD_VDO_SVDM(vdo_hdr) && PD_VDO_CMDT(vdo_hdr) == CMDT_INIT) {
@@ -2471,10 +2493,7 @@ static void tcpm_pd_data_request(struct tcpm_port *port,
                                           NONE_AMS);
                break;
        case PD_DATA_VENDOR_DEF:
-               if (tcpm_vdm_ams(port) || port->nr_snk_vdo)
-                       tcpm_handle_vdm_request(port, msg->payload, cnt);
-               else if (port->negotiated_rev > PD_REV20)
-                       tcpm_pd_handle_msg(port, PD_MSG_CTRL_NOT_SUPP, NONE_AMS);
+               tcpm_handle_vdm_request(port, msg->payload, cnt);
                break;
        case PD_DATA_BIST:
                port->bist_request = le32_to_cpu(msg->payload[0]);
@@ -2555,10 +2574,6 @@ static void tcpm_pd_ctrl_request(struct tcpm_port *port,
                                                                       TYPEC_PWR_MODE_PD,
                                                                       port->pps_data.active,
                                                                       port->supply_voltage);
-                               /* Set VDM running flag ASAP */
-                               if (port->data_role == TYPEC_HOST &&
-                                   port->send_discover)
-                                       port->vdm_sm_running = true;
                                tcpm_set_state(port, SNK_READY, 0);
                        } else {
                                /*
@@ -2596,14 +2611,10 @@ static void tcpm_pd_ctrl_request(struct tcpm_port *port,
                switch (port->state) {
                case SNK_NEGOTIATE_CAPABILITIES:
                        /* USB PD specification, Figure 8-43 */
-                       if (port->explicit_contract) {
+                       if (port->explicit_contract)
                                next_state = SNK_READY;
-                               if (port->data_role == TYPEC_HOST &&
-                                   port->send_discover)
-                                       port->vdm_sm_running = true;
-                       } else {
+                       else
                                next_state = SNK_WAIT_CAPABILITIES;
-                       }
 
                        /* Threshold was relaxed before sending Request. Restore it back. */
                        tcpm_set_auto_vbus_discharge_threshold(port, TYPEC_PWR_MODE_PD,
@@ -2618,10 +2629,6 @@ static void tcpm_pd_ctrl_request(struct tcpm_port *port,
                        port->pps_status = (type == PD_CTRL_WAIT ?
                                            -EAGAIN : -EOPNOTSUPP);
 
-                       if (port->data_role == TYPEC_HOST &&
-                           port->send_discover)
-                               port->vdm_sm_running = true;
-
                        /* Threshold was relaxed before sending Request. Restore it back. */
                        tcpm_set_auto_vbus_discharge_threshold(port, TYPEC_PWR_MODE_PD,
                                                               port->pps_data.active,
@@ -2697,10 +2704,6 @@ static void tcpm_pd_ctrl_request(struct tcpm_port *port,
                        }
                        break;
                case DR_SWAP_SEND:
-                       if (port->data_role == TYPEC_DEVICE &&
-                           port->send_discover)
-                               port->vdm_sm_running = true;
-
                        tcpm_set_state(port, DR_SWAP_CHANGE_DR, 0);
                        break;
                case PR_SWAP_SEND:
@@ -2738,7 +2741,7 @@ static void tcpm_pd_ctrl_request(struct tcpm_port *port,
                                           PD_MSG_CTRL_NOT_SUPP,
                                           NONE_AMS);
                } else {
-                       if (port->vdm_sm_running) {
+                       if (port->send_discover) {
                                tcpm_queue_message(port, PD_MSG_CTRL_WAIT);
                                break;
                        }
@@ -2754,7 +2757,7 @@ static void tcpm_pd_ctrl_request(struct tcpm_port *port,
                                           PD_MSG_CTRL_NOT_SUPP,
                                           NONE_AMS);
                } else {
-                       if (port->vdm_sm_running) {
+                       if (port->send_discover) {
                                tcpm_queue_message(port, PD_MSG_CTRL_WAIT);
                                break;
                        }
@@ -2763,7 +2766,7 @@ static void tcpm_pd_ctrl_request(struct tcpm_port *port,
                }
                break;
        case PD_CTRL_VCONN_SWAP:
-               if (port->vdm_sm_running) {
+               if (port->send_discover) {
                        tcpm_queue_message(port, PD_MSG_CTRL_WAIT);
                        break;
                }
@@ -4479,18 +4482,20 @@ static void run_state_machine(struct tcpm_port *port)
        /* DR_Swap states */
        case DR_SWAP_SEND:
                tcpm_pd_send_control(port, PD_CTRL_DR_SWAP);
+               if (port->data_role == TYPEC_DEVICE || port->negotiated_rev > PD_REV20)
+                       port->send_discover = true;
                tcpm_set_state_cond(port, DR_SWAP_SEND_TIMEOUT,
                                    PD_T_SENDER_RESPONSE);
                break;
        case DR_SWAP_ACCEPT:
                tcpm_pd_send_control(port, PD_CTRL_ACCEPT);
-               /* Set VDM state machine running flag ASAP */
-               if (port->data_role == TYPEC_DEVICE && port->send_discover)
-                       port->vdm_sm_running = true;
+               if (port->data_role == TYPEC_DEVICE || port->negotiated_rev > PD_REV20)
+                       port->send_discover = true;
                tcpm_set_state_cond(port, DR_SWAP_CHANGE_DR, 0);
                break;
        case DR_SWAP_SEND_TIMEOUT:
                tcpm_swap_complete(port, -ETIMEDOUT);
+               port->send_discover = false;
                tcpm_ams_finish(port);
                tcpm_set_state(port, ready_state(port), 0);
                break;
@@ -4502,7 +4507,6 @@ static void run_state_machine(struct tcpm_port *port)
                } else {
                        tcpm_set_roles(port, true, port->pwr_role,
                                       TYPEC_HOST);
-                       port->send_discover = true;
                }
                tcpm_ams_finish(port);
                tcpm_set_state(port, ready_state(port), 0);
@@ -4645,8 +4649,6 @@ static void run_state_machine(struct tcpm_port *port)
                break;
        case VCONN_SWAP_SEND_TIMEOUT:
                tcpm_swap_complete(port, -ETIMEDOUT);
-               if (port->data_role == TYPEC_HOST && port->send_discover)
-                       port->vdm_sm_running = true;
                tcpm_set_state(port, ready_state(port), 0);
                break;
        case VCONN_SWAP_START:
@@ -4662,14 +4664,10 @@ static void run_state_machine(struct tcpm_port *port)
        case VCONN_SWAP_TURN_ON_VCONN:
                tcpm_set_vconn(port, true);
                tcpm_pd_send_control(port, PD_CTRL_PS_RDY);
-               if (port->data_role == TYPEC_HOST && port->send_discover)
-                       port->vdm_sm_running = true;
                tcpm_set_state(port, ready_state(port), 0);
                break;
        case VCONN_SWAP_TURN_OFF_VCONN:
                tcpm_set_vconn(port, false);
-               if (port->data_role == TYPEC_HOST && port->send_discover)
-                       port->vdm_sm_running = true;
                tcpm_set_state(port, ready_state(port), 0);
                break;
 
@@ -4677,8 +4675,6 @@ static void run_state_machine(struct tcpm_port *port)
        case PR_SWAP_CANCEL:
        case VCONN_SWAP_CANCEL:
                tcpm_swap_complete(port, port->swap_status);
-               if (port->data_role == TYPEC_HOST && port->send_discover)
-                       port->vdm_sm_running = true;
                if (port->pwr_role == TYPEC_SOURCE)
                        tcpm_set_state(port, SRC_READY, 0);
                else
@@ -5028,9 +5024,6 @@ static void _tcpm_pd_vbus_on(struct tcpm_port *port)
        switch (port->state) {
        case SNK_TRANSITION_SINK_VBUS:
                port->explicit_contract = true;
-               /* Set the VDM flag ASAP */
-               if (port->data_role == TYPEC_HOST && port->send_discover)
-                       port->vdm_sm_running = true;
                tcpm_set_state(port, SNK_READY, 0);
                break;
        case SNK_DISCOVERY:
@@ -5425,15 +5418,18 @@ static void tcpm_send_discover_work(struct kthread_work *work)
        if (!port->send_discover)
                goto unlock;
 
+       if (port->data_role == TYPEC_DEVICE && port->negotiated_rev < PD_REV30) {
+               port->send_discover = false;
+               goto unlock;
+       }
+
        /* Retry if the port is not idle */
        if ((port->state != SRC_READY && port->state != SNK_READY) || port->vdm_sm_running) {
                mod_send_discover_delayed_work(port, SEND_DISCOVER_RETRY_MS);
                goto unlock;
        }
 
-       /* Only send the Message if the port is host for PD rev2.0 */
-       if (port->data_role == TYPEC_HOST || port->negotiated_rev > PD_REV20)
-               tcpm_send_vdm(port, USB_SID_PD, CMD_DISCOVER_IDENT, NULL, 0);
+       tcpm_send_vdm(port, USB_SID_PD, CMD_DISCOVER_IDENT, NULL, 0);
 
 unlock:
        mutex_unlock(&port->lock);
index 21b78f1cd521ac9b533b9340059e1ddeffb49d9b..351c6cfb24c3a36c5bc2c045d397bbc5a68d0aa1 100644 (file)
@@ -493,9 +493,9 @@ static int ifcvf_probe(struct pci_dev *pdev, const struct pci_device_id *id)
 
        adapter = vdpa_alloc_device(struct ifcvf_adapter, vdpa,
                                    dev, &ifc_vdpa_ops, NULL);
-       if (adapter == NULL) {
+       if (IS_ERR(adapter)) {
                IFCVF_ERR(pdev, "Failed to allocate vDPA structure");
-               return -ENOMEM;
+               return PTR_ERR(adapter);
        }
 
        pci_set_master(pdev);
index dcee6039e966829aa0948ac5e7c1d5327b10eb2c..e59135fa867eae1d15379f6ae011c75ec312f580 100644 (file)
@@ -512,11 +512,6 @@ out:
        mutex_unlock(&mr->mkey_mtx);
 }
 
-static bool map_empty(struct vhost_iotlb *iotlb)
-{
-       return !vhost_iotlb_itree_first(iotlb, 0, U64_MAX);
-}
-
 int mlx5_vdpa_handle_set_map(struct mlx5_vdpa_dev *mvdev, struct vhost_iotlb *iotlb,
                             bool *change_map)
 {
@@ -524,10 +519,6 @@ int mlx5_vdpa_handle_set_map(struct mlx5_vdpa_dev *mvdev, struct vhost_iotlb *io
        int err = 0;
 
        *change_map = false;
-       if (map_empty(iotlb)) {
-               mlx5_vdpa_destroy_mr(mvdev);
-               return 0;
-       }
        mutex_lock(&mr->mkey_mtx);
        if (mr->initialized) {
                mlx5_vdpa_info(mvdev, "memory map update\n");
index 2a31467f7ac5a0639a3d430c7551b9339c428f68..3cc12fcab08d13faabb86bcab5719e9123f43902 100644 (file)
@@ -526,7 +526,6 @@ static int cq_create(struct mlx5_vdpa_net *ndev, u16 idx, u32 num_ent)
        void __iomem *uar_page = ndev->mvdev.res.uar->map;
        u32 out[MLX5_ST_SZ_DW(create_cq_out)];
        struct mlx5_vdpa_cq *vcq = &mvq->cq;
-       unsigned int irqn;
        __be64 *pas;
        int inlen;
        void *cqc;
@@ -566,7 +565,7 @@ static int cq_create(struct mlx5_vdpa_net *ndev, u16 idx, u32 num_ent)
        /* Use vector 0 by default. Consider adding code to choose least used
         * vector.
         */
-       err = mlx5_vector2eqn(mdev, 0, &eqn, &irqn);
+       err = mlx5_vector2eqn(mdev, 0, &eqn);
        if (err)
                goto err_vec;
 
@@ -753,12 +752,12 @@ static int get_queue_type(struct mlx5_vdpa_net *ndev)
        type_mask = MLX5_CAP_DEV_VDPA_EMULATION(ndev->mvdev.mdev, virtio_queue_type);
 
        /* prefer split queue */
-       if (type_mask & MLX5_VIRTIO_EMULATION_CAP_VIRTIO_QUEUE_TYPE_PACKED)
-               return MLX5_VIRTIO_EMULATION_VIRTIO_QUEUE_TYPE_PACKED;
+       if (type_mask & MLX5_VIRTIO_EMULATION_CAP_VIRTIO_QUEUE_TYPE_SPLIT)
+               return MLX5_VIRTIO_EMULATION_VIRTIO_QUEUE_TYPE_SPLIT;
 
-       WARN_ON(!(type_mask & MLX5_VIRTIO_EMULATION_CAP_VIRTIO_QUEUE_TYPE_SPLIT));
+       WARN_ON(!(type_mask & MLX5_VIRTIO_EMULATION_CAP_VIRTIO_QUEUE_TYPE_PACKED));
 
-       return MLX5_VIRTIO_EMULATION_VIRTIO_QUEUE_TYPE_SPLIT;
+       return MLX5_VIRTIO_EMULATION_VIRTIO_QUEUE_TYPE_PACKED;
 }
 
 static bool vq_is_tx(u16 idx)
@@ -2030,6 +2029,12 @@ static int mlx5_vdpa_dev_add(struct vdpa_mgmt_dev *v_mdev, const char *name)
                return -ENOSPC;
 
        mdev = mgtdev->madev->mdev;
+       if (!(MLX5_CAP_DEV_VDPA_EMULATION(mdev, virtio_queue_type) &
+           MLX5_VIRTIO_EMULATION_CAP_VIRTIO_QUEUE_TYPE_SPLIT)) {
+               dev_warn(mdev->device, "missing support for split virtqueues\n");
+               return -EOPNOTSUPP;
+       }
+
        /* we save one virtqueue for control virtqueue should we require it */
        max_vqs = MLX5_CAP_DEV_VDPA_EMULATION(mdev, max_num_virtio_queues);
        max_vqs = min_t(u32, max_vqs, MLX5_MAX_SUPPORTED_VQS);
index 14e024de5cbf2d7d437bab32fb245e8abc5a8d09..c621cf7feec023cf60bcb5cc2a37b10feffe6834 100644 (file)
@@ -251,8 +251,10 @@ struct vdpasim *vdpasim_create(struct vdpasim_dev_attr *dev_attr)
 
        vdpasim = vdpa_alloc_device(struct vdpasim, vdpa, NULL, ops,
                                    dev_attr->name);
-       if (!vdpasim)
+       if (IS_ERR(vdpasim)) {
+               ret = PTR_ERR(vdpasim);
                goto err_alloc;
+       }
 
        vdpasim->dev_attr = *dev_attr;
        INIT_WORK(&vdpasim->work, dev_attr->work_fn);
index 7b4a6396c5535da485ae0755444bd2e775462ab0..fe05273298579901f35e303399ba38c57305cc21 100644 (file)
@@ -436,9 +436,9 @@ static int vp_vdpa_probe(struct pci_dev *pdev, const struct pci_device_id *id)
 
        vp_vdpa = vdpa_alloc_device(struct vp_vdpa, vdpa,
                                    dev, &vp_vdpa_ops, NULL);
-       if (vp_vdpa == NULL) {
+       if (IS_ERR(vp_vdpa)) {
                dev_err(dev, "vp_vdpa: Failed to allocate vDPA structure\n");
-               return -ENOMEM;
+               return PTR_ERR(vp_vdpa);
        }
 
        mdev = &vp_vdpa->mdev;
index 210ab35a7ebfe9f94297b57ef676e4bfbb8f745a..9479f7f792173a44cdd27bc039cf7b9abc7681c2 100644 (file)
@@ -614,7 +614,8 @@ static int vhost_vdpa_process_iotlb_update(struct vhost_vdpa *v,
        long pinned;
        int ret = 0;
 
-       if (msg->iova < v->range.first ||
+       if (msg->iova < v->range.first || !msg->size ||
+           msg->iova > U64_MAX - msg->size + 1 ||
            msg->iova + msg->size - 1 > v->range.last)
                return -EINVAL;
 
index b9e853e6094de7de58222fa8ea2d0a31fd95d415..59edb5a1ffe28a934574c33cef5f861467b02970 100644 (file)
@@ -735,10 +735,16 @@ static bool log_access_ok(void __user *log_base, u64 addr, unsigned long sz)
                         (sz + VHOST_PAGE_SIZE * 8 - 1) / VHOST_PAGE_SIZE / 8);
 }
 
+/* Make sure 64 bit math will not overflow. */
 static bool vhost_overflow(u64 uaddr, u64 size)
 {
-       /* Make sure 64 bit math will not overflow. */
-       return uaddr > ULONG_MAX || size > ULONG_MAX || uaddr > ULONG_MAX - size;
+       if (uaddr > ULONG_MAX || size > ULONG_MAX)
+               return true;
+
+       if (!size)
+               return false;
+
+       return uaddr > ULONG_MAX - size + 1;
 }
 
 /* Caller should have vq mutex and device mutex. */
index 4af8fa259d65f83ccda6fa2104807f685d075599..14e2043d7685216c9a26adaf007228e2c47f276e 100644 (file)
@@ -359,7 +359,7 @@ __vringh_iov(struct vringh *vrh, u16 i,
                        iov = wiov;
                else {
                        iov = riov;
-                       if (unlikely(wiov && wiov->i)) {
+                       if (unlikely(wiov && wiov->used)) {
                                vringh_bad("Readable desc %p after writable",
                                           &descs[i]);
                                err = -EINVAL;
index 4b15c00c0a0afc5de99b1af79cd277553a9322e1..49984d2cba2467ba6855c49e394a7b653cd3ddbe 100644 (file)
@@ -355,6 +355,7 @@ int register_virtio_device(struct virtio_device *dev)
        virtio_add_status(dev, VIRTIO_CONFIG_S_ACKNOWLEDGE);
 
        INIT_LIST_HEAD(&dev->vqs);
+       spin_lock_init(&dev->vqs_list_lock);
 
        /*
         * device_add() causes the bus infrastructure to look for a matching
index 09ed55de07d7d4f25f4d26e35b6a1d1a6f04e3fc..b91bc810a87e166a79a1808f37794b1a94499c4c 100644 (file)
@@ -1242,12 +1242,19 @@ static void virtio_mem_online_page_cb(struct page *page, unsigned int order)
                        do_online = virtio_mem_bbm_get_bb_state(vm, id) !=
                                    VIRTIO_MEM_BBM_BB_FAKE_OFFLINE;
                }
+
+               /*
+                * virtio_mem_set_fake_offline() might sleep, we don't need
+                * the device anymore. See virtio_mem_remove() how races
+                * between memory onlining and device removal are handled.
+                */
+               rcu_read_unlock();
+
                if (do_online)
                        generic_online_page(page, order);
                else
                        virtio_mem_set_fake_offline(PFN_DOWN(addr), 1 << order,
                                                    false);
-               rcu_read_unlock();
                return;
        }
        rcu_read_unlock();
index 222d630c41fc9b5485a562be3b34bffce4142530..b35bb2d57f62c15b7a45a7a78e2322e0c2748c40 100644 (file)
@@ -576,6 +576,13 @@ static void virtio_pci_remove(struct pci_dev *pci_dev)
        struct virtio_pci_device *vp_dev = pci_get_drvdata(pci_dev);
        struct device *dev = get_device(&vp_dev->vdev.dev);
 
+       /*
+        * Device is marked broken on surprise removal so that virtio upper
+        * layers can abort any ongoing operation.
+        */
+       if (!pci_device_is_present(pci_dev))
+               virtio_break_device(&vp_dev->vdev);
+
        pci_disable_sriov(pci_dev);
 
        unregister_virtio_device(&vp_dev->vdev);
index 89bfe46a8a7fafe61c77d47b36f3a5643bd0c091..dd95dfd85e9808575aeb65a04b86a49332d5db14 100644 (file)
@@ -11,6 +11,7 @@
 #include <linux/module.h>
 #include <linux/hrtimer.h>
 #include <linux/dma-mapping.h>
+#include <linux/spinlock.h>
 #include <xen/xen.h>
 
 #ifdef DEBUG
@@ -1755,7 +1756,9 @@ static struct virtqueue *vring_create_virtqueue_packed(
                        cpu_to_le16(vq->packed.event_flags_shadow);
        }
 
+       spin_lock(&vdev->vqs_list_lock);
        list_add_tail(&vq->vq.list, &vdev->vqs);
+       spin_unlock(&vdev->vqs_list_lock);
        return &vq->vq;
 
 err_desc_extra:
@@ -2229,7 +2232,9 @@ struct virtqueue *__vring_new_virtqueue(unsigned int index,
        memset(vq->split.desc_state, 0, vring.num *
                        sizeof(struct vring_desc_state_split));
 
+       spin_lock(&vdev->vqs_list_lock);
        list_add_tail(&vq->vq.list, &vdev->vqs);
+       spin_unlock(&vdev->vqs_list_lock);
        return &vq->vq;
 
 err_extra:
@@ -2291,6 +2296,10 @@ void vring_del_virtqueue(struct virtqueue *_vq)
 {
        struct vring_virtqueue *vq = to_vvq(_vq);
 
+       spin_lock(&vq->vq.vdev->vqs_list_lock);
+       list_del(&_vq->list);
+       spin_unlock(&vq->vq.vdev->vqs_list_lock);
+
        if (vq->we_own_ring) {
                if (vq->packed_ring) {
                        vring_free_queue(vq->vq.vdev,
@@ -2321,7 +2330,6 @@ void vring_del_virtqueue(struct virtqueue *_vq)
                kfree(vq->split.desc_state);
                kfree(vq->split.desc_extra);
        }
-       list_del(&_vq->list);
        kfree(vq);
 }
 EXPORT_SYMBOL_GPL(vring_del_virtqueue);
@@ -2373,7 +2381,7 @@ bool virtqueue_is_broken(struct virtqueue *_vq)
 {
        struct vring_virtqueue *vq = to_vvq(_vq);
 
-       return vq->broken;
+       return READ_ONCE(vq->broken);
 }
 EXPORT_SYMBOL_GPL(virtqueue_is_broken);
 
@@ -2385,10 +2393,14 @@ void virtio_break_device(struct virtio_device *dev)
 {
        struct virtqueue *_vq;
 
+       spin_lock(&dev->vqs_list_lock);
        list_for_each_entry(_vq, &dev->vqs, list) {
                struct vring_virtqueue *vq = to_vvq(_vq);
-               vq->broken = true;
+
+               /* Pairs with READ_ONCE() in virtqueue_is_broken(). */
+               WRITE_ONCE(vq->broken, true);
        }
+       spin_unlock(&dev->vqs_list_lock);
 }
 EXPORT_SYMBOL_GPL(virtio_break_device);
 
index e1a141135992f37a889849c3f4c0f29419088138..72eaef2caeb14e7e6e254b979ef37a5c71e03693 100644 (file)
@@ -151,6 +151,9 @@ virtio_vdpa_setup_vq(struct virtio_device *vdev, unsigned int index,
        if (!name)
                return NULL;
 
+       if (index >= vdpa->nvqs)
+               return ERR_PTR(-ENOENT);
+
        /* Queue shouldn't already be set up. */
        if (ops->get_vq_ready(vdpa, index))
                return ERR_PTR(-ENOENT);
index d7e361fb05482d43765243d222072a52441be0cf..a78704ae36186649ff2d0805cacc53700d339da0 100644 (file)
@@ -198,12 +198,12 @@ static void disable_dynirq(struct irq_data *data);
 
 static DEFINE_PER_CPU(unsigned int, irq_epoch);
 
-static void clear_evtchn_to_irq_row(unsigned row)
+static void clear_evtchn_to_irq_row(int *evtchn_row)
 {
        unsigned col;
 
        for (col = 0; col < EVTCHN_PER_ROW; col++)
-               WRITE_ONCE(evtchn_to_irq[row][col], -1);
+               WRITE_ONCE(evtchn_row[col], -1);
 }
 
 static void clear_evtchn_to_irq_all(void)
@@ -213,7 +213,7 @@ static void clear_evtchn_to_irq_all(void)
        for (row = 0; row < EVTCHN_ROW(xen_evtchn_max_channels()); row++) {
                if (evtchn_to_irq[row] == NULL)
                        continue;
-               clear_evtchn_to_irq_row(row);
+               clear_evtchn_to_irq_row(evtchn_to_irq[row]);
        }
 }
 
@@ -221,6 +221,7 @@ static int set_evtchn_to_irq(evtchn_port_t evtchn, unsigned int irq)
 {
        unsigned row;
        unsigned col;
+       int *evtchn_row;
 
        if (evtchn >= xen_evtchn_max_channels())
                return -EINVAL;
@@ -233,11 +234,18 @@ static int set_evtchn_to_irq(evtchn_port_t evtchn, unsigned int irq)
                if (irq == -1)
                        return 0;
 
-               evtchn_to_irq[row] = (int *)get_zeroed_page(GFP_KERNEL);
-               if (evtchn_to_irq[row] == NULL)
+               evtchn_row = (int *) __get_free_pages(GFP_KERNEL, 0);
+               if (evtchn_row == NULL)
                        return -ENOMEM;
 
-               clear_evtchn_to_irq_row(row);
+               clear_evtchn_to_irq_row(evtchn_row);
+
+               /*
+                * We've prepared an empty row for the mapping. If a different
+                * thread was faster inserting it, we can drop ours.
+                */
+               if (cmpxchg(&evtchn_to_irq[row], NULL, evtchn_row) != NULL)
+                       free_page((unsigned long) evtchn_row);
        }
 
        WRITE_ONCE(evtchn_to_irq[row][col], irq);
@@ -1009,7 +1017,7 @@ static void __unbind_from_irq(unsigned int irq)
 int xen_bind_pirq_gsi_to_irq(unsigned gsi,
                             unsigned pirq, int shareable, char *name)
 {
-       int irq = -1;
+       int irq;
        struct physdev_irq irq_op;
        int ret;
 
index 59c32c9b799fcccc5bc77a84153c7a62e432141e..c4a2dc41beacc239a92e0b611336b38f1fb5c70b 100644 (file)
@@ -121,10 +121,6 @@ static int v9fs_file_lock(struct file *filp, int cmd, struct file_lock *fl)
 
        p9_debug(P9_DEBUG_VFS, "filp: %p lock: %p\n", filp, fl);
 
-       /* No mandatory locks */
-       if (__mandatory_lock(inode) && fl->fl_type != F_UNLCK)
-               return -ENOLCK;
-
        if ((IS_SETLK(cmd) || IS_SETLKW(cmd)) && fl->fl_type != F_UNLCK) {
                filemap_write_and_wait(inode->i_mapping);
                invalidate_mapping_pages(&inode->i_data, 0, -1);
@@ -312,10 +308,6 @@ static int v9fs_file_lock_dotl(struct file *filp, int cmd, struct file_lock *fl)
        p9_debug(P9_DEBUG_VFS, "filp: %p cmd:%d lock: %p name: %pD\n",
                 filp, cmd, fl, filp);
 
-       /* No mandatory locks */
-       if (__mandatory_lock(inode) && fl->fl_type != F_UNLCK)
-               goto out_err;
-
        if ((IS_SETLK(cmd) || IS_SETLKW(cmd)) && fl->fl_type != F_UNLCK) {
                filemap_write_and_wait(inode->i_mapping);
                invalidate_mapping_pages(&inode->i_data, 0, -1);
@@ -327,7 +319,6 @@ static int v9fs_file_lock_dotl(struct file *filp, int cmd, struct file_lock *fl)
                ret = v9fs_file_getlock(filp, fl);
        else
                ret = -EINVAL;
-out_err:
        return ret;
 }
 
@@ -348,10 +339,6 @@ static int v9fs_file_flock_dotl(struct file *filp, int cmd,
        p9_debug(P9_DEBUG_VFS, "filp: %p cmd:%d lock: %p name: %pD\n",
                 filp, cmd, fl, filp);
 
-       /* No mandatory locks */
-       if (__mandatory_lock(inode) && fl->fl_type != F_UNLCK)
-               goto out_err;
-
        if (!(fl->fl_flags & FL_FLOCK))
                goto out_err;
 
index a7749c126b8e2eb05bda028d83d2eb9c2aa27257..949128bf86c95bf23155d1024c258140f213525e 100644 (file)
@@ -101,16 +101,6 @@ config FILE_LOCKING
           for filesystems like NFS and for the flock() system
           call. Disabling this option saves about 11k.
 
-config MANDATORY_FILE_LOCKING
-       bool "Enable Mandatory file locking"
-       depends on FILE_LOCKING
-       default y
-       help
-         This option enables files appropriately marked files on appropriely
-         mounted filesystems to support mandatory locking.
-
-         To the best of my knowledge this is dead code that no one cares about.
-
 source "fs/crypto/Kconfig"
 
 source "fs/verity/Kconfig"
index cb3054c7843ea49355409eec8acb68de82ef4679..c4210a3964d8b3df6b5ed38feb2270e5387d95ad 100644 (file)
@@ -772,10 +772,6 @@ int afs_lock(struct file *file, int cmd, struct file_lock *fl)
               fl->fl_type, fl->fl_flags,
               (long long) fl->fl_start, (long long) fl->fl_end);
 
-       /* AFS doesn't support mandatory locks */
-       if (__mandatory_lock(&vnode->vfs_inode) && fl->fl_type != F_UNLCK)
-               return -ENOLCK;
-
        if (IS_GETLK(cmd))
                return afs_do_getlk(file, fl);
 
index 0117d867ecf87655e54ccd4cfcb20dfe09f39c90..bd5689fa290e78c22191d1e93b5e972b6f6ca88e 100644 (file)
@@ -629,7 +629,7 @@ again:
         * inode has not been flagged as nocompress.  This flag can
         * change at any time if we discover bad compression ratios.
         */
-       if (nr_pages > 1 && inode_need_compress(BTRFS_I(inode), start, end)) {
+       if (inode_need_compress(BTRFS_I(inode), start, end)) {
                WARN_ON(pages);
                pages = kcalloc(nr_pages, sizeof(struct page *), GFP_NOFS);
                if (!pages) {
@@ -9226,8 +9226,14 @@ static int btrfs_rename_exchange(struct inode *old_dir,
        bool dest_log_pinned = false;
        bool need_abort = false;
 
-       /* we only allow rename subvolume link between subvolumes */
-       if (old_ino != BTRFS_FIRST_FREE_OBJECTID && root != dest)
+       /*
+        * For non-subvolumes allow exchange only within one subvolume, in the
+        * same inode namespace. Two subvolumes (represented as directory) can
+        * be exchanged as they're a logical link and have a fixed inode number.
+        */
+       if (root != dest &&
+           (old_ino != BTRFS_FIRST_FREE_OBJECTID ||
+            new_ino != BTRFS_FIRST_FREE_OBJECTID))
                return -EXDEV;
 
        /* close the race window with snapshot create/destroy ioctl */
index a1e2813731d14ff27cdb7cdd1c8e22562ea9693e..7e7a897ae0d3f5ac58fc9042e6b44925b5dbbceb 100644 (file)
@@ -1395,9 +1395,11 @@ static vm_fault_t ceph_filemap_fault(struct vm_fault *vmf)
                ret = VM_FAULT_SIGBUS;
        } else {
                struct address_space *mapping = inode->i_mapping;
-               struct page *page = find_or_create_page(mapping, 0,
-                                               mapping_gfp_constraint(mapping,
-                                               ~__GFP_FS));
+               struct page *page;
+
+               filemap_invalidate_lock_shared(mapping);
+               page = find_or_create_page(mapping, 0,
+                               mapping_gfp_constraint(mapping, ~__GFP_FS));
                if (!page) {
                        ret = VM_FAULT_OOM;
                        goto out_inline;
@@ -1418,6 +1420,7 @@ static vm_fault_t ceph_filemap_fault(struct vm_fault *vmf)
                vmf->page = page;
                ret = VM_FAULT_MAJOR | VM_FAULT_LOCKED;
 out_inline:
+               filemap_invalidate_unlock_shared(mapping);
                dout("filemap_fault %p %llu read inline data ret %x\n",
                     inode, off, ret);
        }
index 7bdefd0c789a6604a4cd43a2f9652a19feb5c4c7..39db97f149b9ba7e066074ebe1b593e16e2eec97 100644 (file)
@@ -1743,7 +1743,11 @@ int __ceph_mark_dirty_caps(struct ceph_inode_info *ci, int mask,
 
 struct ceph_cap_flush *ceph_alloc_cap_flush(void)
 {
-       return kmem_cache_alloc(ceph_cap_flush_cachep, GFP_KERNEL);
+       struct ceph_cap_flush *cf;
+
+       cf = kmem_cache_alloc(ceph_cap_flush_cachep, GFP_KERNEL);
+       cf->is_capsnap = false;
+       return cf;
 }
 
 void ceph_free_cap_flush(struct ceph_cap_flush *cf)
@@ -1778,7 +1782,7 @@ static bool __detach_cap_flush_from_mdsc(struct ceph_mds_client *mdsc,
                prev->wake = true;
                wake = false;
        }
-       list_del(&cf->g_list);
+       list_del_init(&cf->g_list);
        return wake;
 }
 
@@ -1793,7 +1797,7 @@ static bool __detach_cap_flush_from_ci(struct ceph_inode_info *ci,
                prev->wake = true;
                wake = false;
        }
-       list_del(&cf->i_list);
+       list_del_init(&cf->i_list);
        return wake;
 }
 
@@ -2352,7 +2356,7 @@ static void __kick_flushing_caps(struct ceph_mds_client *mdsc,
        ci->i_ceph_flags &= ~CEPH_I_KICK_FLUSH;
 
        list_for_each_entry_reverse(cf, &ci->i_cap_flush_list, i_list) {
-               if (!cf->caps) {
+               if (cf->is_capsnap) {
                        last_snap_flush = cf->tid;
                        break;
                }
@@ -2371,7 +2375,7 @@ static void __kick_flushing_caps(struct ceph_mds_client *mdsc,
 
                first_tid = cf->tid + 1;
 
-               if (cf->caps) {
+               if (!cf->is_capsnap) {
                        struct cap_msg_args arg;
 
                        dout("kick_flushing_caps %p cap %p tid %llu %s\n",
@@ -3516,7 +3520,7 @@ static void handle_cap_flush_ack(struct inode *inode, u64 flush_tid,
                        cleaned = cf->caps;
 
                /* Is this a capsnap? */
-               if (cf->caps == 0)
+               if (cf->is_capsnap)
                        continue;
 
                if (cf->tid <= flush_tid) {
@@ -3589,8 +3593,9 @@ out:
        while (!list_empty(&to_remove)) {
                cf = list_first_entry(&to_remove,
                                      struct ceph_cap_flush, i_list);
-               list_del(&cf->i_list);
-               ceph_free_cap_flush(cf);
+               list_del_init(&cf->i_list);
+               if (!cf->is_capsnap)
+                       ceph_free_cap_flush(cf);
        }
 
        if (wake_ci)
@@ -4150,11 +4155,19 @@ bad:
 
 /*
  * Delayed work handler to process end of delayed cap release LRU list.
+ *
+ * If new caps are added to the list while processing it, these won't get
+ * processed in this run.  In this case, the ci->i_hold_caps_max will be
+ * returned so that the work can be scheduled accordingly.
  */
-void ceph_check_delayed_caps(struct ceph_mds_client *mdsc)
+unsigned long ceph_check_delayed_caps(struct ceph_mds_client *mdsc)
 {
        struct inode *inode;
        struct ceph_inode_info *ci;
+       struct ceph_mount_options *opt = mdsc->fsc->mount_options;
+       unsigned long delay_max = opt->caps_wanted_delay_max * HZ;
+       unsigned long loop_start = jiffies;
+       unsigned long delay = 0;
 
        dout("check_delayed_caps\n");
        spin_lock(&mdsc->cap_delay_lock);
@@ -4162,6 +4175,11 @@ void ceph_check_delayed_caps(struct ceph_mds_client *mdsc)
                ci = list_first_entry(&mdsc->cap_delay_list,
                                      struct ceph_inode_info,
                                      i_cap_delay_list);
+               if (time_before(loop_start, ci->i_hold_caps_max - delay_max)) {
+                       dout("%s caps added recently.  Exiting loop", __func__);
+                       delay = ci->i_hold_caps_max;
+                       break;
+               }
                if ((ci->i_ceph_flags & CEPH_I_FLUSH) == 0 &&
                    time_before(jiffies, ci->i_hold_caps_max))
                        break;
@@ -4177,6 +4195,8 @@ void ceph_check_delayed_caps(struct ceph_mds_client *mdsc)
                }
        }
        spin_unlock(&mdsc->cap_delay_lock);
+
+       return delay;
 }
 
 /*
index d1755ac1d964aadbafc0693d31f2b6438c9c823e..e1d605a02d4a39047836e27b81960cb2592dba90 100644 (file)
@@ -2088,6 +2088,7 @@ static long ceph_fallocate(struct file *file, int mode,
        if (ret < 0)
                goto unlock;
 
+       filemap_invalidate_lock(inode->i_mapping);
        ceph_zero_pagecache_range(inode, offset, length);
        ret = ceph_zero_objects(inode, offset, length);
 
@@ -2100,6 +2101,7 @@ static long ceph_fallocate(struct file *file, int mode,
                if (dirty)
                        __mark_inode_dirty(inode, dirty);
        }
+       filemap_invalidate_unlock(inode->i_mapping);
 
        ceph_put_cap_refs(ci, got);
 unlock:
index fa8a847743d08c07ef6c7dc0406722359042a537..bdeb271f47d9526263dbf8d2089246212ad0bc2c 100644 (file)
@@ -240,9 +240,6 @@ int ceph_lock(struct file *file, int cmd, struct file_lock *fl)
 
        if (!(fl->fl_flags & FL_POSIX))
                return -ENOLCK;
-       /* No mandatory locks */
-       if (__mandatory_lock(file->f_mapping->host) && fl->fl_type != F_UNLCK)
-               return -ENOLCK;
 
        dout("ceph_lock, fl_owner: %p\n", fl->fl_owner);
 
index 9db1b39df7737fb37284fe2a681e3559b411b653..0b69aec23e5c4846ba47cad4faaa70f78973db09 100644 (file)
@@ -1616,7 +1616,7 @@ static int remove_session_caps_cb(struct inode *inode, struct ceph_cap *cap,
                spin_lock(&mdsc->cap_dirty_lock);
 
                list_for_each_entry(cf, &to_remove, i_list)
-                       list_del(&cf->g_list);
+                       list_del_init(&cf->g_list);
 
                if (!list_empty(&ci->i_dirty_item)) {
                        pr_warn_ratelimited(
@@ -1668,8 +1668,9 @@ static int remove_session_caps_cb(struct inode *inode, struct ceph_cap *cap,
                struct ceph_cap_flush *cf;
                cf = list_first_entry(&to_remove,
                                      struct ceph_cap_flush, i_list);
-               list_del(&cf->i_list);
-               ceph_free_cap_flush(cf);
+               list_del_init(&cf->i_list);
+               if (!cf->is_capsnap)
+                       ceph_free_cap_flush(cf);
        }
 
        wake_up_all(&ci->i_cap_wq);
@@ -4490,22 +4491,29 @@ void inc_session_sequence(struct ceph_mds_session *s)
 }
 
 /*
- * delayed work -- periodically trim expired leases, renew caps with mds
+ * delayed work -- periodically trim expired leases, renew caps with mds.  If
+ * the @delay parameter is set to 0 or if it's more than 5 secs, the default
+ * workqueue delay value of 5 secs will be used.
  */
-static void schedule_delayed(struct ceph_mds_client *mdsc)
+static void schedule_delayed(struct ceph_mds_client *mdsc, unsigned long delay)
 {
-       int delay = 5;
-       unsigned hz = round_jiffies_relative(HZ * delay);
-       schedule_delayed_work(&mdsc->delayed_work, hz);
+       unsigned long max_delay = HZ * 5;
+
+       /* 5 secs default delay */
+       if (!delay || (delay > max_delay))
+               delay = max_delay;
+       schedule_delayed_work(&mdsc->delayed_work,
+                             round_jiffies_relative(delay));
 }
 
 static void delayed_work(struct work_struct *work)
 {
-       int i;
        struct ceph_mds_client *mdsc =
                container_of(work, struct ceph_mds_client, delayed_work.work);
+       unsigned long delay;
        int renew_interval;
        int renew_caps;
+       int i;
 
        dout("mdsc delayed_work\n");
 
@@ -4545,7 +4553,7 @@ static void delayed_work(struct work_struct *work)
        }
        mutex_unlock(&mdsc->mutex);
 
-       ceph_check_delayed_caps(mdsc);
+       delay = ceph_check_delayed_caps(mdsc);
 
        ceph_queue_cap_reclaim_work(mdsc);
 
@@ -4553,7 +4561,7 @@ static void delayed_work(struct work_struct *work)
 
        maybe_recover_session(mdsc);
 
-       schedule_delayed(mdsc);
+       schedule_delayed(mdsc, delay);
 }
 
 int ceph_mdsc_init(struct ceph_fs_client *fsc)
@@ -5030,7 +5038,7 @@ void ceph_mdsc_handle_mdsmap(struct ceph_mds_client *mdsc, struct ceph_msg *msg)
                          mdsc->mdsmap->m_epoch);
 
        mutex_unlock(&mdsc->mutex);
-       schedule_delayed(mdsc);
+       schedule_delayed(mdsc, 0);
        return;
 
 bad_unlock:
index abd9af7727ad33b43e5e4fce8949209a0e2a0b19..3c444b9cb17b8ce3ce4238cf863cbbb2989d5277 100644 (file)
@@ -394,9 +394,11 @@ void ceph_mdsmap_destroy(struct ceph_mdsmap *m)
 {
        int i;
 
-       for (i = 0; i < m->possible_max_rank; i++)
-               kfree(m->m_info[i].export_targets);
-       kfree(m->m_info);
+       if (m->m_info) {
+               for (i = 0; i < m->possible_max_rank; i++)
+                       kfree(m->m_info[i].export_targets);
+               kfree(m->m_info);
+       }
        kfree(m->m_data_pg_pools);
        kfree(m);
 }
index 4ac0606dcbd41e0b6266f31c83587f24973e3667..15105f9da3fd2ad9790fa4e5110c83cd6eb8ab70 100644 (file)
@@ -67,19 +67,19 @@ void ceph_get_snap_realm(struct ceph_mds_client *mdsc,
 {
        lockdep_assert_held(&mdsc->snap_rwsem);
 
-       dout("get_realm %p %d -> %d\n", realm,
-            atomic_read(&realm->nref), atomic_read(&realm->nref)+1);
        /*
-        * since we _only_ increment realm refs or empty the empty
-        * list with snap_rwsem held, adjusting the empty list here is
-        * safe.  we do need to protect against concurrent empty list
-        * additions, however.
+        * The 0->1 and 1->0 transitions must take the snap_empty_lock
+        * atomically with the refcount change. Go ahead and bump the
+        * nref here, unless it's 0, in which case we take the spinlock
+        * and then do the increment and remove it from the list.
         */
-       if (atomic_inc_return(&realm->nref) == 1) {
-               spin_lock(&mdsc->snap_empty_lock);
+       if (atomic_inc_not_zero(&realm->nref))
+               return;
+
+       spin_lock(&mdsc->snap_empty_lock);
+       if (atomic_inc_return(&realm->nref) == 1)
                list_del_init(&realm->empty_item);
-               spin_unlock(&mdsc->snap_empty_lock);
-       }
+       spin_unlock(&mdsc->snap_empty_lock);
 }
 
 static void __insert_snap_realm(struct rb_root *root,
@@ -208,28 +208,28 @@ static void __put_snap_realm(struct ceph_mds_client *mdsc,
 {
        lockdep_assert_held_write(&mdsc->snap_rwsem);
 
-       dout("__put_snap_realm %llx %p %d -> %d\n", realm->ino, realm,
-            atomic_read(&realm->nref), atomic_read(&realm->nref)-1);
+       /*
+        * We do not require the snap_empty_lock here, as any caller that
+        * increments the value must hold the snap_rwsem.
+        */
        if (atomic_dec_and_test(&realm->nref))
                __destroy_snap_realm(mdsc, realm);
 }
 
 /*
- * caller needn't hold any locks
+ * See comments in ceph_get_snap_realm. Caller needn't hold any locks.
  */
 void ceph_put_snap_realm(struct ceph_mds_client *mdsc,
                         struct ceph_snap_realm *realm)
 {
-       dout("put_snap_realm %llx %p %d -> %d\n", realm->ino, realm,
-            atomic_read(&realm->nref), atomic_read(&realm->nref)-1);
-       if (!atomic_dec_and_test(&realm->nref))
+       if (!atomic_dec_and_lock(&realm->nref, &mdsc->snap_empty_lock))
                return;
 
        if (down_write_trylock(&mdsc->snap_rwsem)) {
+               spin_unlock(&mdsc->snap_empty_lock);
                __destroy_snap_realm(mdsc, realm);
                up_write(&mdsc->snap_rwsem);
        } else {
-               spin_lock(&mdsc->snap_empty_lock);
                list_add(&realm->empty_item, &mdsc->snap_empty);
                spin_unlock(&mdsc->snap_empty_lock);
        }
@@ -487,6 +487,9 @@ static void ceph_queue_cap_snap(struct ceph_inode_info *ci)
                pr_err("ENOMEM allocating ceph_cap_snap on %p\n", inode);
                return;
        }
+       capsnap->cap_flush.is_capsnap = true;
+       INIT_LIST_HEAD(&capsnap->cap_flush.i_list);
+       INIT_LIST_HEAD(&capsnap->cap_flush.g_list);
 
        spin_lock(&ci->i_ceph_lock);
        used = __ceph_caps_used(ci);
index 6b6332a5c113cff2e7315fe9f1023d7a0dbdb1cd..b1a363641beb61bde14d99f5d0c7e69ea1c15eff 100644 (file)
@@ -182,8 +182,9 @@ struct ceph_cap {
 
 struct ceph_cap_flush {
        u64 tid;
-       int caps; /* 0 means capsnap */
+       int caps;
        bool wake; /* wake up flush waiters when finish ? */
+       bool is_capsnap; /* true means capsnap */
        struct list_head g_list; // global
        struct list_head i_list; // per inode
 };
@@ -1167,7 +1168,7 @@ extern void ceph_flush_snaps(struct ceph_inode_info *ci,
 extern bool __ceph_should_report_size(struct ceph_inode_info *ci);
 extern void ceph_check_caps(struct ceph_inode_info *ci, int flags,
                            struct ceph_mds_session *session);
-extern void ceph_check_delayed_caps(struct ceph_mds_client *mdsc);
+extern unsigned long ceph_check_delayed_caps(struct ceph_mds_client *mdsc);
 extern void ceph_flush_dirty_caps(struct ceph_mds_client *mdsc);
 extern int  ceph_drop_caps_for_unlink(struct inode *inode);
 extern int ceph_encode_inode_release(void **p, struct inode *inode,
index c0bfc2f01030df4d1615f4866d4ad9a0d782cddf..c6a9542ca281b4129b08a2ae2b34f50f9c87c204 100644 (file)
@@ -1611,6 +1611,11 @@ struct dfs_info3_param {
        int ttl;
 };
 
+struct file_list {
+       struct list_head list;
+       struct cifsFileInfo *cfile;
+};
+
 /*
  * common struct for holding inode info when searching for or updating an
  * inode with new info
index 79402ca0ddfab6735ac366fad8872cb6a88d455b..5f8a302ffcb2fb3fcba6cd1b6ef814fc62be1806 100644 (file)
@@ -100,7 +100,7 @@ build_path_from_dentry_optional_prefix(struct dentry *direntry, void *page,
        if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_USE_PREFIX_PATH)
                pplen = cifs_sb->prepath ? strlen(cifs_sb->prepath) + 1 : 0;
 
-       s = dentry_path_raw(direntry, page, PAGE_SIZE);
+       s = dentry_path_raw(direntry, page, PATH_MAX);
        if (IS_ERR(s))
                return s;
        if (!s[1])      // for root we want "", not "/"
index 0a72840a88f1a014fcfe2db71734fc7a4f691fdc..bb98fbdd22a99aba28c3e22d7edf086ed9a19b25 100644 (file)
@@ -4847,17 +4847,6 @@ void cifs_oplock_break(struct work_struct *work)
                cifs_dbg(VFS, "Push locks rc = %d\n", rc);
 
 oplock_break_ack:
-       /*
-        * releasing stale oplock after recent reconnect of smb session using
-        * a now incorrect file handle is not a data integrity issue but do
-        * not bother sending an oplock release if session to server still is
-        * disconnected since oplock already released by the server
-        */
-       if (!cfile->oplock_break_cancelled) {
-               rc = tcon->ses->server->ops->oplock_response(tcon, &cfile->fid,
-                                                            cinode);
-               cifs_dbg(FYI, "Oplock release rc = %d\n", rc);
-       }
        /*
         * When oplock break is received and there are no active
         * file handles but cached, then schedule deferred close immediately.
@@ -4865,17 +4854,27 @@ oplock_break_ack:
         */
        spin_lock(&CIFS_I(inode)->deferred_lock);
        is_deferred = cifs_is_deferred_close(cfile, &dclose);
+       spin_unlock(&CIFS_I(inode)->deferred_lock);
        if (is_deferred &&
            cfile->deferred_close_scheduled &&
            delayed_work_pending(&cfile->deferred)) {
-               /*
-                * If there is no pending work, mod_delayed_work queues new work.
-                * So, Increase the ref count to avoid use-after-free.
-                */
-               if (!mod_delayed_work(deferredclose_wq, &cfile->deferred, 0))
-                       cifsFileInfo_get(cfile);
+               if (cancel_delayed_work(&cfile->deferred)) {
+                       _cifsFileInfo_put(cfile, false, false);
+                       goto oplock_break_done;
+               }
        }
-       spin_unlock(&CIFS_I(inode)->deferred_lock);
+       /*
+        * releasing stale oplock after recent reconnect of smb session using
+        * a now incorrect file handle is not a data integrity issue but do
+        * not bother sending an oplock release if session to server still is
+        * disconnected since oplock already released by the server
+        */
+       if (!cfile->oplock_break_cancelled) {
+               rc = tcon->ses->server->ops->oplock_response(tcon, &cfile->fid,
+                                                            cinode);
+               cifs_dbg(FYI, "Oplock release rc = %d\n", rc);
+       }
+oplock_break_done:
        _cifsFileInfo_put(cfile, false /* do not wait for ourself */, false);
        cifs_done_oplock_break(cinode);
 }
index b96b253e7635a8ce887d77b16f04a9a6d04e37fe..65f8a70cece33d3062afeb07c8c074b1da3980fc 100644 (file)
@@ -1625,7 +1625,7 @@ int cifs_unlink(struct inode *dir, struct dentry *dentry)
                goto unlink_out;
        }
 
-       cifs_close_all_deferred_files(tcon);
+       cifs_close_deferred_file(CIFS_I(inode));
        if (cap_unix(tcon->ses) && (CIFS_UNIX_POSIX_PATH_OPS_CAP &
                                le64_to_cpu(tcon->fsUnixInfo.Capability))) {
                rc = CIFSPOSIXDelFile(xid, tcon, full_path,
@@ -2084,6 +2084,7 @@ cifs_rename2(struct user_namespace *mnt_userns, struct inode *source_dir,
        FILE_UNIX_BASIC_INFO *info_buf_target;
        unsigned int xid;
        int rc, tmprc;
+       int retry_count = 0;
 
        if (flags & ~RENAME_NOREPLACE)
                return -EINVAL;
@@ -2113,10 +2114,24 @@ cifs_rename2(struct user_namespace *mnt_userns, struct inode *source_dir,
                goto cifs_rename_exit;
        }
 
-       cifs_close_all_deferred_files(tcon);
+       cifs_close_deferred_file(CIFS_I(d_inode(source_dentry)));
+       if (d_inode(target_dentry) != NULL)
+               cifs_close_deferred_file(CIFS_I(d_inode(target_dentry)));
+
        rc = cifs_do_rename(xid, source_dentry, from_name, target_dentry,
                            to_name);
 
+       if (rc == -EACCES) {
+               while (retry_count < 3) {
+                       cifs_close_all_deferred_files(tcon);
+                       rc = cifs_do_rename(xid, source_dentry, from_name, target_dentry,
+                                           to_name);
+                       if (rc != -EACCES)
+                               break;
+                       retry_count++;
+               }
+       }
+
        /*
         * No-replace is the natural behavior for CIFS, so skip unlink hacks.
         */
index 844abeb2b48fe78ef5f46e85a0e6836cd2ce3e46..9469f1cf0b46a0eec11e21e1ca9cbec32b3f6b2b 100644 (file)
@@ -723,13 +723,31 @@ void
 cifs_close_deferred_file(struct cifsInodeInfo *cifs_inode)
 {
        struct cifsFileInfo *cfile = NULL;
-       struct cifs_deferred_close *dclose;
+       struct file_list *tmp_list, *tmp_next_list;
+       struct list_head file_head;
+
+       if (cifs_inode == NULL)
+               return;
 
+       INIT_LIST_HEAD(&file_head);
+       spin_lock(&cifs_inode->open_file_lock);
        list_for_each_entry(cfile, &cifs_inode->openFileList, flist) {
-               spin_lock(&cifs_inode->deferred_lock);
-               if (cifs_is_deferred_close(cfile, &dclose))
-                       mod_delayed_work(deferredclose_wq, &cfile->deferred, 0);
-               spin_unlock(&cifs_inode->deferred_lock);
+               if (delayed_work_pending(&cfile->deferred)) {
+                       if (cancel_delayed_work(&cfile->deferred)) {
+                               tmp_list = kmalloc(sizeof(struct file_list), GFP_ATOMIC);
+                               if (tmp_list == NULL)
+                                       continue;
+                               tmp_list->cfile = cfile;
+                               list_add_tail(&tmp_list->list, &file_head);
+                       }
+               }
+       }
+       spin_unlock(&cifs_inode->open_file_lock);
+
+       list_for_each_entry_safe(tmp_list, tmp_next_list, &file_head, list) {
+               _cifsFileInfo_put(tmp_list->cfile, true, false);
+               list_del(&tmp_list->list);
+               kfree(tmp_list);
        }
 }
 
@@ -738,20 +756,30 @@ cifs_close_all_deferred_files(struct cifs_tcon *tcon)
 {
        struct cifsFileInfo *cfile;
        struct list_head *tmp;
+       struct file_list *tmp_list, *tmp_next_list;
+       struct list_head file_head;
 
+       INIT_LIST_HEAD(&file_head);
        spin_lock(&tcon->open_file_lock);
        list_for_each(tmp, &tcon->openFileList) {
                cfile = list_entry(tmp, struct cifsFileInfo, tlist);
                if (delayed_work_pending(&cfile->deferred)) {
-                       /*
-                        * If there is no pending work, mod_delayed_work queues new work.
-                        * So, Increase the ref count to avoid use-after-free.
-                        */
-                       if (!mod_delayed_work(deferredclose_wq, &cfile->deferred, 0))
-                               cifsFileInfo_get(cfile);
+                       if (cancel_delayed_work(&cfile->deferred)) {
+                               tmp_list = kmalloc(sizeof(struct file_list), GFP_ATOMIC);
+                               if (tmp_list == NULL)
+                                       continue;
+                               tmp_list->cfile = cfile;
+                               list_add_tail(&tmp_list->list, &file_head);
+                       }
                }
        }
        spin_unlock(&tcon->open_file_lock);
+
+       list_for_each_entry_safe(tmp_list, tmp_next_list, &file_head, list) {
+               _cifsFileInfo_put(tmp_list->cfile, true, false);
+               list_del(&tmp_list->list);
+               kfree(tmp_list);
+       }
 }
 
 /* parses DFS refferal V3 structure
index 2dfd0d8297eb36c616bad312c99948ef1249da6d..ddc0e8f97872d26aceb823d2a65b0bb9f009e23c 100644 (file)
@@ -3590,6 +3590,7 @@ static long smb3_punch_hole(struct file *file, struct cifs_tcon *tcon,
                return rc;
        }
 
+       filemap_invalidate_lock(inode->i_mapping);
        /*
         * We implement the punch hole through ioctl, so we need remove the page
         * caches first, otherwise the data may be inconsistent with the server.
@@ -3607,6 +3608,7 @@ static long smb3_punch_hole(struct file *file, struct cifs_tcon *tcon,
                        sizeof(struct file_zero_data_information),
                        CIFSMaxBufSize, NULL, NULL);
        free_xid(xid);
+       filemap_invalidate_unlock(inode->i_mapping);
        return rc;
 }
 
index 781d14e5f2afe282ef0195ce6c4c7efbff9695bd..b6d2e3591927825014a4fb1b6530f2e34dbd3285 100644 (file)
@@ -2426,7 +2426,7 @@ create_sd_buf(umode_t mode, bool set_owner, unsigned int *len)
        memcpy(aclptr, &acl, sizeof(struct cifs_acl));
 
        buf->ccontext.DataLength = cpu_to_le32(ptr - (__u8 *)&buf->sd);
-       *len = ptr - (__u8 *)buf;
+       *len = roundup(ptr - (__u8 *)buf, 8);
 
        return buf;
 }
index 5a0be9985bae1e7184a12a956581b3a2b6bf300d..0ad32150611ea6b26e7e7d1b36cec87d3a79758d 100644 (file)
@@ -177,28 +177,22 @@ out:
        return retval;
 }
 
-/* Fill [buffer, buffer + pos) with data coming from @from. */
-static int fill_write_buffer(struct configfs_buffer *buffer, loff_t pos,
+/* Fill @buffer with data coming from @from. */
+static int fill_write_buffer(struct configfs_buffer *buffer,
                             struct iov_iter *from)
 {
-       loff_t to_copy;
        int copied;
-       u8 *to;
 
        if (!buffer->page)
                buffer->page = (char *)__get_free_pages(GFP_KERNEL, 0);
        if (!buffer->page)
                return -ENOMEM;
 
-       to_copy = SIMPLE_ATTR_SIZE - 1 - pos;
-       if (to_copy <= 0)
-               return 0;
-       to = buffer->page + pos;
-       copied = copy_from_iter(to, to_copy, from);
+       copied = copy_from_iter(buffer->page, SIMPLE_ATTR_SIZE - 1, from);
        buffer->needs_read_fill = 1;
        /* if buf is assumed to contain a string, terminate it by \0,
         * so e.g. sscanf() can scan the string easily */
-       to[copied] = 0;
+       buffer->page[copied] = 0;
        return copied ? : -EFAULT;
 }
 
@@ -227,10 +221,10 @@ static ssize_t configfs_write_iter(struct kiocb *iocb, struct iov_iter *from)
 {
        struct file *file = iocb->ki_filp;
        struct configfs_buffer *buffer = file->private_data;
-       ssize_t len;
+       int len;
 
        mutex_lock(&buffer->mutex);
-       len = fill_write_buffer(buffer, iocb->ki_pos, from);
+       len = fill_write_buffer(buffer, from);
        if (len > 0)
                len = flush_write_buffer(file, buffer, len);
        if (len > 0)
index da41f9363568e06b012b486ef8063e269a9dcbb7..99b4e78d888f1c9775a9c5c8b3a5c0c9626ebe87 100644 (file)
--- a/fs/dax.c
+++ b/fs/dax.c
@@ -722,7 +722,7 @@ static int copy_cow_page_dax(struct block_device *bdev, struct dax_device *dax_d
                return rc;
 
        id = dax_read_lock();
-       rc = dax_direct_access(dax_dev, pgoff, PHYS_PFN(PAGE_SIZE), &kaddr, NULL);
+       rc = dax_direct_access(dax_dev, pgoff, 1, &kaddr, NULL);
        if (rc < 0) {
                dax_read_unlock(id);
                return rc;
index 54eec918562756007916f7e4182b90a204184c5e..1248ff4ef56254dd1fbe04e3bfd1d66ded0b3ba1 100644 (file)
@@ -1,6 +1,7 @@
 # SPDX-License-Identifier: GPL-2.0-only
 config EXT2_FS
        tristate "Second extended fs support"
+       select FS_IOMAP
        help
          Ext2 is a standard Linux file system for hard disks.
 
index e512630cb63eda9858f5122af5977338be8194f4..3be9dd6412b789be147cc7c331124df964dfa481 100644 (file)
@@ -667,9 +667,6 @@ struct ext2_inode_info {
        struct rw_semaphore xattr_sem;
 #endif
        rwlock_t i_meta_lock;
-#ifdef CONFIG_FS_DAX
-       struct rw_semaphore dax_sem;
-#endif
 
        /*
         * truncate_mutex is for serialising ext2_truncate() against
@@ -685,14 +682,6 @@ struct ext2_inode_info {
 #endif
 };
 
-#ifdef CONFIG_FS_DAX
-#define dax_sem_down_write(ext2_inode) down_write(&(ext2_inode)->dax_sem)
-#define dax_sem_up_write(ext2_inode)   up_write(&(ext2_inode)->dax_sem)
-#else
-#define dax_sem_down_write(ext2_inode)
-#define dax_sem_up_write(ext2_inode)
-#endif
-
 /*
  * Inode dynamic state flags
  */
index f98466acc672a4dc7fb7ddf5a62642bcc3e09ab2..eb97aa3d700eb8c3af0be6372808587e64a4a55b 100644 (file)
@@ -81,7 +81,7 @@ out_unlock:
  *
  * mmap_lock (MM)
  *   sb_start_pagefault (vfs, freeze)
- *     ext2_inode_info->dax_sem
+ *     address_space->invalidate_lock
  *       address_space->i_mmap_rwsem or page_lock (mutually exclusive in DAX)
  *         ext2_inode_info->truncate_mutex
  *
@@ -91,7 +91,6 @@ out_unlock:
 static vm_fault_t ext2_dax_fault(struct vm_fault *vmf)
 {
        struct inode *inode = file_inode(vmf->vma->vm_file);
-       struct ext2_inode_info *ei = EXT2_I(inode);
        vm_fault_t ret;
        bool write = (vmf->flags & FAULT_FLAG_WRITE) &&
                (vmf->vma->vm_flags & VM_SHARED);
@@ -100,11 +99,11 @@ static vm_fault_t ext2_dax_fault(struct vm_fault *vmf)
                sb_start_pagefault(inode->i_sb);
                file_update_time(vmf->vma->vm_file);
        }
-       down_read(&ei->dax_sem);
+       filemap_invalidate_lock_shared(inode->i_mapping);
 
        ret = dax_iomap_fault(vmf, PE_SIZE_PTE, NULL, NULL, &ext2_iomap_ops);
 
-       up_read(&ei->dax_sem);
+       filemap_invalidate_unlock_shared(inode->i_mapping);
        if (write)
                sb_end_pagefault(inode->i_sb);
        return ret;
index dadb121beb227c2f7a8f4c9c507799728afea517..333fa62661d56ea9d0135d19e1d6df8be39b4f53 100644 (file)
@@ -799,7 +799,6 @@ int ext2_get_block(struct inode *inode, sector_t iblock,
 
 }
 
-#ifdef CONFIG_FS_DAX
 static int ext2_iomap_begin(struct inode *inode, loff_t offset, loff_t length,
                unsigned flags, struct iomap *iomap, struct iomap *srcmap)
 {
@@ -852,16 +851,18 @@ const struct iomap_ops ext2_iomap_ops = {
        .iomap_begin            = ext2_iomap_begin,
        .iomap_end              = ext2_iomap_end,
 };
-#else
-/* Define empty ops for !CONFIG_FS_DAX case to avoid ugly ifdefs */
-const struct iomap_ops ext2_iomap_ops;
-#endif /* CONFIG_FS_DAX */
 
 int ext2_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
                u64 start, u64 len)
 {
-       return generic_block_fiemap(inode, fieinfo, start, len,
-                                   ext2_get_block);
+       int ret;
+
+       inode_lock(inode);
+       len = min_t(u64, len, i_size_read(inode));
+       ret = iomap_fiemap(inode, fieinfo, start, len, &ext2_iomap_ops);
+       inode_unlock(inode);
+
+       return ret;
 }
 
 static int ext2_writepage(struct page *page, struct writeback_control *wbc)
@@ -1177,7 +1178,7 @@ static void ext2_free_branches(struct inode *inode, __le32 *p, __le32 *q, int de
                ext2_free_data(inode, p, q);
 }
 
-/* dax_sem must be held when calling this function */
+/* mapping->invalidate_lock must be held when calling this function */
 static void __ext2_truncate_blocks(struct inode *inode, loff_t offset)
 {
        __le32 *i_data = EXT2_I(inode)->i_data;
@@ -1194,7 +1195,7 @@ static void __ext2_truncate_blocks(struct inode *inode, loff_t offset)
        iblock = (offset + blocksize-1) >> EXT2_BLOCK_SIZE_BITS(inode->i_sb);
 
 #ifdef CONFIG_FS_DAX
-       WARN_ON(!rwsem_is_locked(&ei->dax_sem));
+       WARN_ON(!rwsem_is_locked(&inode->i_mapping->invalidate_lock));
 #endif
 
        n = ext2_block_to_path(inode, iblock, offsets, NULL);
@@ -1276,9 +1277,9 @@ static void ext2_truncate_blocks(struct inode *inode, loff_t offset)
        if (ext2_inode_is_fast_symlink(inode))
                return;
 
-       dax_sem_down_write(EXT2_I(inode));
+       filemap_invalidate_lock(inode->i_mapping);
        __ext2_truncate_blocks(inode, offset);
-       dax_sem_up_write(EXT2_I(inode));
+       filemap_invalidate_unlock(inode->i_mapping);
 }
 
 static int ext2_setsize(struct inode *inode, loff_t newsize)
@@ -1308,10 +1309,10 @@ static int ext2_setsize(struct inode *inode, loff_t newsize)
        if (error)
                return error;
 
-       dax_sem_down_write(EXT2_I(inode));
+       filemap_invalidate_lock(inode->i_mapping);
        truncate_setsize(inode, newsize);
        __ext2_truncate_blocks(inode, newsize);
-       dax_sem_up_write(EXT2_I(inode));
+       filemap_invalidate_unlock(inode->i_mapping);
 
        inode->i_mtime = inode->i_ctime = current_time(inode);
        if (inode_needs_sync(inode)) {
index 21e09fbaa46f2ff04cb4aafbfd458aae984a742f..987bcf32ed469e9a8594bd4f2b70af1678ddd607 100644 (file)
@@ -206,9 +206,6 @@ static void init_once(void *foo)
        init_rwsem(&ei->xattr_sem);
 #endif
        mutex_init(&ei->truncate_mutex);
-#ifdef CONFIG_FS_DAX
-       init_rwsem(&ei->dax_sem);
-#endif
        inode_init_once(&ei->vfs_inode);
 }
 
index 3c51e243450db76494cc6f9f4dd8f713c27fc6f4..7ebaf66b6e313e2839266d9b436ef7a2a42047b8 100644 (file)
@@ -1086,15 +1086,6 @@ struct ext4_inode_info {
         * by other means, so we have i_data_sem.
         */
        struct rw_semaphore i_data_sem;
-       /*
-        * i_mmap_sem is for serializing page faults with truncate / punch hole
-        * operations. We have to make sure that new page cannot be faulted in
-        * a section of the inode that is being punched. We cannot easily use
-        * i_data_sem for this since we need protection for the whole punch
-        * operation and i_data_sem ranks below transaction start so we have
-        * to occasionally drop it.
-        */
-       struct rw_semaphore i_mmap_sem;
        struct inode vfs_inode;
        struct jbd2_inode *jinode;
 
@@ -2972,7 +2963,6 @@ extern int ext4_chunk_trans_blocks(struct inode *, int nrblocks);
 extern int ext4_zero_partial_blocks(handle_t *handle, struct inode *inode,
                             loff_t lstart, loff_t lend);
 extern vm_fault_t ext4_page_mkwrite(struct vm_fault *vmf);
-extern vm_fault_t ext4_filemap_fault(struct vm_fault *vmf);
 extern qsize_t *ext4_get_reserved_space(struct inode *inode);
 extern int ext4_get_projid(struct inode *inode, kprojid_t *projid);
 extern void ext4_da_release_space(struct inode *inode, int to_free);
index 92ad64b89d9b52e14a7d1295d5c69a59773953de..c33e0a2cb6c389558fe391ae654c81946e677ae5 100644 (file)
@@ -4474,6 +4474,7 @@ static long ext4_zero_range(struct file *file, loff_t offset,
                            loff_t len, int mode)
 {
        struct inode *inode = file_inode(file);
+       struct address_space *mapping = file->f_mapping;
        handle_t *handle = NULL;
        unsigned int max_blocks;
        loff_t new_size = 0;
@@ -4560,17 +4561,17 @@ static long ext4_zero_range(struct file *file, loff_t offset,
                 * Prevent page faults from reinstantiating pages we have
                 * released from page cache.
                 */
-               down_write(&EXT4_I(inode)->i_mmap_sem);
+               filemap_invalidate_lock(mapping);
 
                ret = ext4_break_layouts(inode);
                if (ret) {
-                       up_write(&EXT4_I(inode)->i_mmap_sem);
+                       filemap_invalidate_unlock(mapping);
                        goto out_mutex;
                }
 
                ret = ext4_update_disksize_before_punch(inode, offset, len);
                if (ret) {
-                       up_write(&EXT4_I(inode)->i_mmap_sem);
+                       filemap_invalidate_unlock(mapping);
                        goto out_mutex;
                }
                /* Now release the pages and zero block aligned part of pages */
@@ -4579,7 +4580,7 @@ static long ext4_zero_range(struct file *file, loff_t offset,
 
                ret = ext4_alloc_file_blocks(file, lblk, max_blocks, new_size,
                                             flags);
-               up_write(&EXT4_I(inode)->i_mmap_sem);
+               filemap_invalidate_unlock(mapping);
                if (ret)
                        goto out_mutex;
        }
@@ -5221,6 +5222,7 @@ out:
 static int ext4_collapse_range(struct inode *inode, loff_t offset, loff_t len)
 {
        struct super_block *sb = inode->i_sb;
+       struct address_space *mapping = inode->i_mapping;
        ext4_lblk_t punch_start, punch_stop;
        handle_t *handle;
        unsigned int credits;
@@ -5274,7 +5276,7 @@ static int ext4_collapse_range(struct inode *inode, loff_t offset, loff_t len)
         * Prevent page faults from reinstantiating pages we have released from
         * page cache.
         */
-       down_write(&EXT4_I(inode)->i_mmap_sem);
+       filemap_invalidate_lock(mapping);
 
        ret = ext4_break_layouts(inode);
        if (ret)
@@ -5289,15 +5291,15 @@ static int ext4_collapse_range(struct inode *inode, loff_t offset, loff_t len)
         * Write tail of the last page before removed range since it will get
         * removed from the page cache below.
         */
-       ret = filemap_write_and_wait_range(inode->i_mapping, ioffset, offset);
+       ret = filemap_write_and_wait_range(mapping, ioffset, offset);
        if (ret)
                goto out_mmap;
        /*
         * Write data that will be shifted to preserve them when discarding
         * page cache below. We are also protected from pages becoming dirty
-        * by i_mmap_sem.
+        * by i_rwsem and invalidate_lock.
         */
-       ret = filemap_write_and_wait_range(inode->i_mapping, offset + len,
+       ret = filemap_write_and_wait_range(mapping, offset + len,
                                           LLONG_MAX);
        if (ret)
                goto out_mmap;
@@ -5350,7 +5352,7 @@ out_stop:
        ext4_journal_stop(handle);
        ext4_fc_stop_ineligible(sb);
 out_mmap:
-       up_write(&EXT4_I(inode)->i_mmap_sem);
+       filemap_invalidate_unlock(mapping);
 out_mutex:
        inode_unlock(inode);
        return ret;
@@ -5367,6 +5369,7 @@ out_mutex:
 static int ext4_insert_range(struct inode *inode, loff_t offset, loff_t len)
 {
        struct super_block *sb = inode->i_sb;
+       struct address_space *mapping = inode->i_mapping;
        handle_t *handle;
        struct ext4_ext_path *path;
        struct ext4_extent *extent;
@@ -5425,7 +5428,7 @@ static int ext4_insert_range(struct inode *inode, loff_t offset, loff_t len)
         * Prevent page faults from reinstantiating pages we have released from
         * page cache.
         */
-       down_write(&EXT4_I(inode)->i_mmap_sem);
+       filemap_invalidate_lock(mapping);
 
        ret = ext4_break_layouts(inode);
        if (ret)
@@ -5526,7 +5529,7 @@ out_stop:
        ext4_journal_stop(handle);
        ext4_fc_stop_ineligible(sb);
 out_mmap:
-       up_write(&EXT4_I(inode)->i_mmap_sem);
+       filemap_invalidate_unlock(mapping);
 out_mutex:
        inode_unlock(inode);
        return ret;
index 816dedcbd541e025432ee5606bea242512397169..d3b4ed91aa6828b60ec351615148a157417955aa 100644 (file)
@@ -704,22 +704,23 @@ static vm_fault_t ext4_dax_huge_fault(struct vm_fault *vmf,
         */
        bool write = (vmf->flags & FAULT_FLAG_WRITE) &&
                (vmf->vma->vm_flags & VM_SHARED);
+       struct address_space *mapping = vmf->vma->vm_file->f_mapping;
        pfn_t pfn;
 
        if (write) {
                sb_start_pagefault(sb);
                file_update_time(vmf->vma->vm_file);
-               down_read(&EXT4_I(inode)->i_mmap_sem);
+               filemap_invalidate_lock_shared(mapping);
 retry:
                handle = ext4_journal_start_sb(sb, EXT4_HT_WRITE_PAGE,
                                               EXT4_DATA_TRANS_BLOCKS(sb));
                if (IS_ERR(handle)) {
-                       up_read(&EXT4_I(inode)->i_mmap_sem);
+                       filemap_invalidate_unlock_shared(mapping);
                        sb_end_pagefault(sb);
                        return VM_FAULT_SIGBUS;
                }
        } else {
-               down_read(&EXT4_I(inode)->i_mmap_sem);
+               filemap_invalidate_lock_shared(mapping);
        }
        result = dax_iomap_fault(vmf, pe_size, &pfn, &error, &ext4_iomap_ops);
        if (write) {
@@ -731,10 +732,10 @@ retry:
                /* Handling synchronous page fault? */
                if (result & VM_FAULT_NEEDDSYNC)
                        result = dax_finish_sync_fault(vmf, pe_size, pfn);
-               up_read(&EXT4_I(inode)->i_mmap_sem);
+               filemap_invalidate_unlock_shared(mapping);
                sb_end_pagefault(sb);
        } else {
-               up_read(&EXT4_I(inode)->i_mmap_sem);
+               filemap_invalidate_unlock_shared(mapping);
        }
 
        return result;
@@ -756,7 +757,7 @@ static const struct vm_operations_struct ext4_dax_vm_ops = {
 #endif
 
 static const struct vm_operations_struct ext4_file_vm_ops = {
-       .fault          = ext4_filemap_fault,
+       .fault          = filemap_fault,
        .map_pages      = filemap_map_pages,
        .page_mkwrite   = ext4_page_mkwrite,
 };
index d8de607849df3e20a8eae11f53dd019fbcb63185..325c038e7b232b3babf20171477f88d5dd7da420 100644 (file)
@@ -3950,20 +3950,19 @@ int ext4_update_disksize_before_punch(struct inode *inode, loff_t offset,
        return ret;
 }
 
-static void ext4_wait_dax_page(struct ext4_inode_info *ei)
+static void ext4_wait_dax_page(struct inode *inode)
 {
-       up_write(&ei->i_mmap_sem);
+       filemap_invalidate_unlock(inode->i_mapping);
        schedule();
-       down_write(&ei->i_mmap_sem);
+       filemap_invalidate_lock(inode->i_mapping);
 }
 
 int ext4_break_layouts(struct inode *inode)
 {
-       struct ext4_inode_info *ei = EXT4_I(inode);
        struct page *page;
        int error;
 
-       if (WARN_ON_ONCE(!rwsem_is_locked(&ei->i_mmap_sem)))
+       if (WARN_ON_ONCE(!rwsem_is_locked(&inode->i_mapping->invalidate_lock)))
                return -EINVAL;
 
        do {
@@ -3974,7 +3973,7 @@ int ext4_break_layouts(struct inode *inode)
                error = ___wait_var_event(&page->_refcount,
                                atomic_read(&page->_refcount) == 1,
                                TASK_INTERRUPTIBLE, 0, 0,
-                               ext4_wait_dax_page(ei));
+                               ext4_wait_dax_page(inode));
        } while (error == 0);
 
        return error;
@@ -4005,9 +4004,9 @@ int ext4_punch_hole(struct inode *inode, loff_t offset, loff_t length)
 
        ext4_clear_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA);
        if (ext4_has_inline_data(inode)) {
-               down_write(&EXT4_I(inode)->i_mmap_sem);
+               filemap_invalidate_lock(mapping);
                ret = ext4_convert_inline_data(inode);
-               up_write(&EXT4_I(inode)->i_mmap_sem);
+               filemap_invalidate_unlock(mapping);
                if (ret)
                        return ret;
        }
@@ -4058,7 +4057,7 @@ int ext4_punch_hole(struct inode *inode, loff_t offset, loff_t length)
         * Prevent page faults from reinstantiating pages we have released from
         * page cache.
         */
-       down_write(&EXT4_I(inode)->i_mmap_sem);
+       filemap_invalidate_lock(mapping);
 
        ret = ext4_break_layouts(inode);
        if (ret)
@@ -4131,7 +4130,7 @@ int ext4_punch_hole(struct inode *inode, loff_t offset, loff_t length)
 out_stop:
        ext4_journal_stop(handle);
 out_dio:
-       up_write(&EXT4_I(inode)->i_mmap_sem);
+       filemap_invalidate_unlock(mapping);
 out_mutex:
        inode_unlock(inode);
        return ret;
@@ -5426,11 +5425,11 @@ int ext4_setattr(struct user_namespace *mnt_userns, struct dentry *dentry,
                        inode_dio_wait(inode);
                }
 
-               down_write(&EXT4_I(inode)->i_mmap_sem);
+               filemap_invalidate_lock(inode->i_mapping);
 
                rc = ext4_break_layouts(inode);
                if (rc) {
-                       up_write(&EXT4_I(inode)->i_mmap_sem);
+                       filemap_invalidate_unlock(inode->i_mapping);
                        goto err_out;
                }
 
@@ -5506,7 +5505,7 @@ int ext4_setattr(struct user_namespace *mnt_userns, struct dentry *dentry,
                                error = rc;
                }
 out_mmap_sem:
-               up_write(&EXT4_I(inode)->i_mmap_sem);
+               filemap_invalidate_unlock(inode->i_mapping);
        }
 
        if (!error) {
@@ -5983,10 +5982,10 @@ int ext4_change_inode_journal_flag(struct inode *inode, int val)
         * data (and journalled aops don't know how to handle these cases).
         */
        if (val) {
-               down_write(&EXT4_I(inode)->i_mmap_sem);
+               filemap_invalidate_lock(inode->i_mapping);
                err = filemap_write_and_wait(inode->i_mapping);
                if (err < 0) {
-                       up_write(&EXT4_I(inode)->i_mmap_sem);
+                       filemap_invalidate_unlock(inode->i_mapping);
                        return err;
                }
        }
@@ -6019,7 +6018,7 @@ int ext4_change_inode_journal_flag(struct inode *inode, int val)
        percpu_up_write(&sbi->s_writepages_rwsem);
 
        if (val)
-               up_write(&EXT4_I(inode)->i_mmap_sem);
+               filemap_invalidate_unlock(inode->i_mapping);
 
        /* Finally we can mark the inode as dirty. */
 
@@ -6063,7 +6062,7 @@ vm_fault_t ext4_page_mkwrite(struct vm_fault *vmf)
        sb_start_pagefault(inode->i_sb);
        file_update_time(vma->vm_file);
 
-       down_read(&EXT4_I(inode)->i_mmap_sem);
+       filemap_invalidate_lock_shared(mapping);
 
        err = ext4_convert_inline_data(inode);
        if (err)
@@ -6176,7 +6175,7 @@ retry_alloc:
 out_ret:
        ret = block_page_mkwrite_return(err);
 out:
-       up_read(&EXT4_I(inode)->i_mmap_sem);
+       filemap_invalidate_unlock_shared(mapping);
        sb_end_pagefault(inode->i_sb);
        return ret;
 out_error:
@@ -6184,15 +6183,3 @@ out_error:
        ext4_journal_stop(handle);
        goto out;
 }
-
-vm_fault_t ext4_filemap_fault(struct vm_fault *vmf)
-{
-       struct inode *inode = file_inode(vmf->vma->vm_file);
-       vm_fault_t ret;
-
-       down_read(&EXT4_I(inode)->i_mmap_sem);
-       ret = filemap_fault(vmf);
-       up_read(&EXT4_I(inode)->i_mmap_sem);
-
-       return ret;
-}
index 6eed6170aded6739dee6f72fb2fdf0b5d1bb5395..4fb5fe083c2bcb75d5cd0c3a8ecd53b314a97729 100644 (file)
@@ -148,7 +148,7 @@ static long swap_inode_boot_loader(struct super_block *sb,
                goto journal_err_out;
        }
 
-       down_write(&EXT4_I(inode)->i_mmap_sem);
+       filemap_invalidate_lock(inode->i_mapping);
        err = filemap_write_and_wait(inode->i_mapping);
        if (err)
                goto err_out;
@@ -256,7 +256,7 @@ err_out1:
        ext4_double_up_write_data_sem(inode, inode_bl);
 
 err_out:
-       up_write(&EXT4_I(inode)->i_mmap_sem);
+       filemap_invalidate_unlock(inode->i_mapping);
 journal_err_out:
        unlock_two_nondirectories(inode, inode_bl);
        iput(inode_bl);
index dfa09a277b56fb4a1add7b5c28d8e75030c65023..d6df62fc810c886de1dcfbe375d6b57a4a576ae2 100644 (file)
@@ -90,12 +90,9 @@ static struct inode *ext4_get_journal_inode(struct super_block *sb,
 /*
  * Lock ordering
  *
- * Note the difference between i_mmap_sem (EXT4_I(inode)->i_mmap_sem) and
- * i_mmap_rwsem (inode->i_mmap_rwsem)!
- *
  * page fault path:
- * mmap_lock -> sb_start_pagefault -> i_mmap_sem (r) -> transaction start ->
- *   page lock -> i_data_sem (rw)
+ * mmap_lock -> sb_start_pagefault -> invalidate_lock (r) -> transaction start
+ *   -> page lock -> i_data_sem (rw)
  *
  * buffered write path:
  * sb_start_write -> i_mutex -> mmap_lock
@@ -103,8 +100,9 @@ static struct inode *ext4_get_journal_inode(struct super_block *sb,
  *   i_data_sem (rw)
  *
  * truncate:
- * sb_start_write -> i_mutex -> i_mmap_sem (w) -> i_mmap_rwsem (w) -> page lock
- * sb_start_write -> i_mutex -> i_mmap_sem (w) -> transaction start ->
+ * sb_start_write -> i_mutex -> invalidate_lock (w) -> i_mmap_rwsem (w) ->
+ *   page lock
+ * sb_start_write -> i_mutex -> invalidate_lock (w) -> transaction start ->
  *   i_data_sem (rw)
  *
  * direct IO:
@@ -1360,7 +1358,6 @@ static void init_once(void *foo)
        INIT_LIST_HEAD(&ei->i_orphan);
        init_rwsem(&ei->xattr_sem);
        init_rwsem(&ei->i_data_sem);
-       init_rwsem(&ei->i_mmap_sem);
        inode_init_once(&ei->vfs_inode);
        ext4_fc_init_inode(&ei->vfs_inode);
 }
index bcbe3668c1d4e1bec174014b7fde38ce987155d6..ce84aa2786c7e631fc48fecdafe9a9ccbb2be26e 100644 (file)
  */
 static inline void ext4_truncate_failed_write(struct inode *inode)
 {
+       struct address_space *mapping = inode->i_mapping;
+
        /*
         * We don't need to call ext4_break_layouts() because the blocks we
         * are truncating were never visible to userspace.
         */
-       down_write(&EXT4_I(inode)->i_mmap_sem);
-       truncate_inode_pages(inode->i_mapping, inode->i_size);
+       filemap_invalidate_lock(mapping);
+       truncate_inode_pages(mapping, inode->i_size);
        ext4_truncate(inode);
-       up_write(&EXT4_I(inode)->i_mmap_sem);
+       filemap_invalidate_unlock(mapping);
 }
 
 /*
index d2cf48c5a2e4968b2321c1a2c9b302b76e012221..eb222b35edefa5eccc9b98afab567094012fd3bf 100644 (file)
@@ -3187,12 +3187,12 @@ static void f2fs_write_failed(struct address_space *mapping, loff_t to)
        /* In the fs-verity case, f2fs_end_enable_verity() does the truncate */
        if (to > i_size && !f2fs_verity_in_progress(inode)) {
                down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
-               down_write(&F2FS_I(inode)->i_mmap_sem);
+               filemap_invalidate_lock(mapping);
 
                truncate_pagecache(inode, i_size);
                f2fs_truncate_blocks(inode, i_size, true);
 
-               up_write(&F2FS_I(inode)->i_mmap_sem);
+               filemap_invalidate_unlock(mapping);
                up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
        }
 }
@@ -3852,7 +3852,7 @@ static int f2fs_migrate_blocks(struct inode *inode, block_t start_blk,
        int ret = 0;
 
        down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
-       down_write(&F2FS_I(inode)->i_mmap_sem);
+       filemap_invalidate_lock(inode->i_mapping);
 
        set_inode_flag(inode, FI_ALIGNED_WRITE);
 
@@ -3894,7 +3894,7 @@ done:
        clear_inode_flag(inode, FI_DO_DEFRAG);
        clear_inode_flag(inode, FI_ALIGNED_WRITE);
 
-       up_write(&F2FS_I(inode)->i_mmap_sem);
+       filemap_invalidate_unlock(inode->i_mapping);
        up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
 
        return ret;
index ee8eb33e2c25ccb076a20269f7dabce513d496b9..906b2c4b50e78c0138c0de2d52c0036bf9ebd201 100644 (file)
@@ -754,7 +754,6 @@ struct f2fs_inode_info {
 
        /* avoid racing between foreground op and gc */
        struct rw_semaphore i_gc_rwsem[2];
-       struct rw_semaphore i_mmap_sem;
        struct rw_semaphore i_xattr_sem; /* avoid racing between reading and changing EAs */
 
        int i_extra_isize;              /* size of extra space located in i_addr */
index 6afd4562335fc0a3e6beb67d9ed6d041fffa19d9..1ff333755721efbaa123fe40806d9936ce822cff 100644 (file)
@@ -38,10 +38,7 @@ static vm_fault_t f2fs_filemap_fault(struct vm_fault *vmf)
        struct inode *inode = file_inode(vmf->vma->vm_file);
        vm_fault_t ret;
 
-       down_read(&F2FS_I(inode)->i_mmap_sem);
        ret = filemap_fault(vmf);
-       up_read(&F2FS_I(inode)->i_mmap_sem);
-
        if (!ret)
                f2fs_update_iostat(F2FS_I_SB(inode), APP_MAPPED_READ_IO,
                                                        F2FS_BLKSIZE);
@@ -101,7 +98,7 @@ static vm_fault_t f2fs_vm_page_mkwrite(struct vm_fault *vmf)
        f2fs_bug_on(sbi, f2fs_has_inline_data(inode));
 
        file_update_time(vmf->vma->vm_file);
-       down_read(&F2FS_I(inode)->i_mmap_sem);
+       filemap_invalidate_lock_shared(inode->i_mapping);
        lock_page(page);
        if (unlikely(page->mapping != inode->i_mapping ||
                        page_offset(page) > i_size_read(inode) ||
@@ -159,7 +156,7 @@ static vm_fault_t f2fs_vm_page_mkwrite(struct vm_fault *vmf)
 
        trace_f2fs_vm_page_mkwrite(page, DATA);
 out_sem:
-       up_read(&F2FS_I(inode)->i_mmap_sem);
+       filemap_invalidate_unlock_shared(inode->i_mapping);
 
        sb_end_pagefault(inode->i_sb);
 err:
@@ -940,7 +937,7 @@ int f2fs_setattr(struct user_namespace *mnt_userns, struct dentry *dentry,
                }
 
                down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
-               down_write(&F2FS_I(inode)->i_mmap_sem);
+               filemap_invalidate_lock(inode->i_mapping);
 
                truncate_setsize(inode, attr->ia_size);
 
@@ -950,7 +947,7 @@ int f2fs_setattr(struct user_namespace *mnt_userns, struct dentry *dentry,
                 * do not trim all blocks after i_size if target size is
                 * larger than i_size.
                 */
-               up_write(&F2FS_I(inode)->i_mmap_sem);
+               filemap_invalidate_unlock(inode->i_mapping);
                up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
                if (err)
                        return err;
@@ -1095,7 +1092,7 @@ static int punch_hole(struct inode *inode, loff_t offset, loff_t len)
                        blk_end = (loff_t)pg_end << PAGE_SHIFT;
 
                        down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
-                       down_write(&F2FS_I(inode)->i_mmap_sem);
+                       filemap_invalidate_lock(mapping);
 
                        truncate_inode_pages_range(mapping, blk_start,
                                        blk_end - 1);
@@ -1104,7 +1101,7 @@ static int punch_hole(struct inode *inode, loff_t offset, loff_t len)
                        ret = f2fs_truncate_hole(inode, pg_start, pg_end);
                        f2fs_unlock_op(sbi);
 
-                       up_write(&F2FS_I(inode)->i_mmap_sem);
+                       filemap_invalidate_unlock(mapping);
                        up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
                }
        }
@@ -1339,7 +1336,7 @@ static int f2fs_do_collapse(struct inode *inode, loff_t offset, loff_t len)
 
        /* avoid gc operation during block exchange */
        down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
-       down_write(&F2FS_I(inode)->i_mmap_sem);
+       filemap_invalidate_lock(inode->i_mapping);
 
        f2fs_lock_op(sbi);
        f2fs_drop_extent_tree(inode);
@@ -1347,7 +1344,7 @@ static int f2fs_do_collapse(struct inode *inode, loff_t offset, loff_t len)
        ret = __exchange_data_block(inode, inode, end, start, nrpages - end, true);
        f2fs_unlock_op(sbi);
 
-       up_write(&F2FS_I(inode)->i_mmap_sem);
+       filemap_invalidate_unlock(inode->i_mapping);
        up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
        return ret;
 }
@@ -1378,13 +1375,13 @@ static int f2fs_collapse_range(struct inode *inode, loff_t offset, loff_t len)
                return ret;
 
        /* write out all moved pages, if possible */
-       down_write(&F2FS_I(inode)->i_mmap_sem);
+       filemap_invalidate_lock(inode->i_mapping);
        filemap_write_and_wait_range(inode->i_mapping, offset, LLONG_MAX);
        truncate_pagecache(inode, offset);
 
        new_size = i_size_read(inode) - len;
        ret = f2fs_truncate_blocks(inode, new_size, true);
-       up_write(&F2FS_I(inode)->i_mmap_sem);
+       filemap_invalidate_unlock(inode->i_mapping);
        if (!ret)
                f2fs_i_size_write(inode, new_size);
        return ret;
@@ -1484,7 +1481,7 @@ static int f2fs_zero_range(struct inode *inode, loff_t offset, loff_t len,
                        pgoff_t end;
 
                        down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
-                       down_write(&F2FS_I(inode)->i_mmap_sem);
+                       filemap_invalidate_lock(mapping);
 
                        truncate_pagecache_range(inode,
                                (loff_t)index << PAGE_SHIFT,
@@ -1496,7 +1493,7 @@ static int f2fs_zero_range(struct inode *inode, loff_t offset, loff_t len,
                        ret = f2fs_get_dnode_of_data(&dn, index, ALLOC_NODE);
                        if (ret) {
                                f2fs_unlock_op(sbi);
-                               up_write(&F2FS_I(inode)->i_mmap_sem);
+                               filemap_invalidate_unlock(mapping);
                                up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
                                goto out;
                        }
@@ -1508,7 +1505,7 @@ static int f2fs_zero_range(struct inode *inode, loff_t offset, loff_t len,
                        f2fs_put_dnode(&dn);
 
                        f2fs_unlock_op(sbi);
-                       up_write(&F2FS_I(inode)->i_mmap_sem);
+                       filemap_invalidate_unlock(mapping);
                        up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
 
                        f2fs_balance_fs(sbi, dn.node_changed);
@@ -1543,6 +1540,7 @@ out:
 static int f2fs_insert_range(struct inode *inode, loff_t offset, loff_t len)
 {
        struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
+       struct address_space *mapping = inode->i_mapping;
        pgoff_t nr, pg_start, pg_end, delta, idx;
        loff_t new_size;
        int ret = 0;
@@ -1565,14 +1563,14 @@ static int f2fs_insert_range(struct inode *inode, loff_t offset, loff_t len)
 
        f2fs_balance_fs(sbi, true);
 
-       down_write(&F2FS_I(inode)->i_mmap_sem);
+       filemap_invalidate_lock(mapping);
        ret = f2fs_truncate_blocks(inode, i_size_read(inode), true);
-       up_write(&F2FS_I(inode)->i_mmap_sem);
+       filemap_invalidate_unlock(mapping);
        if (ret)
                return ret;
 
        /* write out all dirty pages from offset */
-       ret = filemap_write_and_wait_range(inode->i_mapping, offset, LLONG_MAX);
+       ret = filemap_write_and_wait_range(mapping, offset, LLONG_MAX);
        if (ret)
                return ret;
 
@@ -1583,7 +1581,7 @@ static int f2fs_insert_range(struct inode *inode, loff_t offset, loff_t len)
 
        /* avoid gc operation during block exchange */
        down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
-       down_write(&F2FS_I(inode)->i_mmap_sem);
+       filemap_invalidate_lock(mapping);
        truncate_pagecache(inode, offset);
 
        while (!ret && idx > pg_start) {
@@ -1599,14 +1597,14 @@ static int f2fs_insert_range(struct inode *inode, loff_t offset, loff_t len)
                                        idx + delta, nr, false);
                f2fs_unlock_op(sbi);
        }
-       up_write(&F2FS_I(inode)->i_mmap_sem);
+       filemap_invalidate_unlock(mapping);
        up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
 
        /* write out all moved pages, if possible */
-       down_write(&F2FS_I(inode)->i_mmap_sem);
-       filemap_write_and_wait_range(inode->i_mapping, offset, LLONG_MAX);
+       filemap_invalidate_lock(mapping);
+       filemap_write_and_wait_range(mapping, offset, LLONG_MAX);
        truncate_pagecache(inode, offset);
-       up_write(&F2FS_I(inode)->i_mmap_sem);
+       filemap_invalidate_unlock(mapping);
 
        if (!ret)
                f2fs_i_size_write(inode, new_size);
@@ -3440,7 +3438,7 @@ static int f2fs_release_compress_blocks(struct file *filp, unsigned long arg)
                goto out;
 
        down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
-       down_write(&F2FS_I(inode)->i_mmap_sem);
+       filemap_invalidate_lock(inode->i_mapping);
 
        last_idx = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
 
@@ -3476,7 +3474,7 @@ static int f2fs_release_compress_blocks(struct file *filp, unsigned long arg)
        }
 
        up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
-       up_write(&F2FS_I(inode)->i_mmap_sem);
+       filemap_invalidate_unlock(inode->i_mapping);
 out:
        inode_unlock(inode);
 
@@ -3593,7 +3591,7 @@ static int f2fs_reserve_compress_blocks(struct file *filp, unsigned long arg)
        }
 
        down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
-       down_write(&F2FS_I(inode)->i_mmap_sem);
+       filemap_invalidate_lock(inode->i_mapping);
 
        last_idx = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
 
@@ -3629,7 +3627,7 @@ static int f2fs_reserve_compress_blocks(struct file *filp, unsigned long arg)
        }
 
        up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
-       up_write(&F2FS_I(inode)->i_mmap_sem);
+       filemap_invalidate_unlock(inode->i_mapping);
 
        if (ret >= 0) {
                clear_inode_flag(inode, FI_COMPRESS_RELEASED);
@@ -3748,7 +3746,7 @@ static int f2fs_sec_trim_file(struct file *filp, unsigned long arg)
                goto err;
 
        down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
-       down_write(&F2FS_I(inode)->i_mmap_sem);
+       filemap_invalidate_lock(mapping);
 
        ret = filemap_write_and_wait_range(mapping, range.start,
                        to_end ? LLONG_MAX : end_addr - 1);
@@ -3835,7 +3833,7 @@ static int f2fs_sec_trim_file(struct file *filp, unsigned long arg)
                ret = f2fs_secure_erase(prev_bdev, inode, prev_index,
                                prev_block, len, range.flags);
 out:
-       up_write(&F2FS_I(inode)->i_mmap_sem);
+       filemap_invalidate_unlock(mapping);
        up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
 err:
        inode_unlock(inode);
@@ -4313,9 +4311,9 @@ write:
                /* if we couldn't write data, we should deallocate blocks. */
                if (preallocated && i_size_read(inode) < target_size) {
                        down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
-                       down_write(&F2FS_I(inode)->i_mmap_sem);
+                       filemap_invalidate_lock(inode->i_mapping);
                        f2fs_truncate(inode);
-                       up_write(&F2FS_I(inode)->i_mmap_sem);
+                       filemap_invalidate_unlock(inode->i_mapping);
                        up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
                }
 
index 8fecd3050ccd46aa7cf477545e91e939a7b4fbbf..ce2ab1b85c11dbef1d91d577ae527a25f44a0013 100644 (file)
@@ -1289,7 +1289,6 @@ static struct inode *f2fs_alloc_inode(struct super_block *sb)
        mutex_init(&fi->inmem_lock);
        init_rwsem(&fi->i_gc_rwsem[READ]);
        init_rwsem(&fi->i_gc_rwsem[WRITE]);
-       init_rwsem(&fi->i_mmap_sem);
        init_rwsem(&fi->i_xattr_sem);
 
        /* Will be used by directory only */
index f946bec8f1f1b92fed4b851be2053ab7b0cc84d6..68added37c15fe769d5dc95dc1190229651ecb24 100644 (file)
@@ -150,7 +150,8 @@ void f_delown(struct file *filp)
 pid_t f_getown(struct file *filp)
 {
        pid_t pid = 0;
-       read_lock(&filp->f_owner.lock);
+
+       read_lock_irq(&filp->f_owner.lock);
        rcu_read_lock();
        if (pid_task(filp->f_owner.pid, filp->f_owner.pid_type)) {
                pid = pid_vnr(filp->f_owner.pid);
@@ -158,7 +159,7 @@ pid_t f_getown(struct file *filp)
                        pid = -pid;
        }
        rcu_read_unlock();
-       read_unlock(&filp->f_owner.lock);
+       read_unlock_irq(&filp->f_owner.lock);
        return pid;
 }
 
@@ -208,7 +209,7 @@ static int f_getown_ex(struct file *filp, unsigned long arg)
        struct f_owner_ex owner = {};
        int ret = 0;
 
-       read_lock(&filp->f_owner.lock);
+       read_lock_irq(&filp->f_owner.lock);
        rcu_read_lock();
        if (pid_task(filp->f_owner.pid, filp->f_owner.pid_type))
                owner.pid = pid_vnr(filp->f_owner.pid);
@@ -231,7 +232,7 @@ static int f_getown_ex(struct file *filp, unsigned long arg)
                ret = -EINVAL;
                break;
        }
-       read_unlock(&filp->f_owner.lock);
+       read_unlock_irq(&filp->f_owner.lock);
 
        if (!ret) {
                ret = copy_to_user(owner_p, &owner, sizeof(owner));
@@ -249,10 +250,10 @@ static int f_getowner_uids(struct file *filp, unsigned long arg)
        uid_t src[2];
        int err;
 
-       read_lock(&filp->f_owner.lock);
+       read_lock_irq(&filp->f_owner.lock);
        src[0] = from_kuid(user_ns, filp->f_owner.uid);
        src[1] = from_kuid(user_ns, filp->f_owner.euid);
-       read_unlock(&filp->f_owner.lock);
+       read_unlock_irq(&filp->f_owner.lock);
 
        err  = put_user(src[0], &dst[0]);
        err |= put_user(src[1], &dst[1]);
@@ -1003,13 +1004,14 @@ static void kill_fasync_rcu(struct fasync_struct *fa, int sig, int band)
 {
        while (fa) {
                struct fown_struct *fown;
+               unsigned long flags;
 
                if (fa->magic != FASYNC_MAGIC) {
                        printk(KERN_ERR "kill_fasync: bad magic number in "
                               "fasync_struct!\n");
                        return;
                }
-               read_lock(&fa->fa_lock);
+               read_lock_irqsave(&fa->fa_lock, flags);
                if (fa->fa_file) {
                        fown = &fa->fa_file->f_owner;
                        /* Don't send SIGURG to processes which have not set a
@@ -1018,7 +1020,7 @@ static void kill_fasync_rcu(struct fasync_struct *fa, int sig, int band)
                        if (!(sig == SIGURG && fown->signum == 0))
                                send_sigio(fown, fa->fa_fd, band);
                }
-               read_unlock(&fa->fa_lock);
+               read_unlock_irqrestore(&fa->fa_lock, flags);
                fa = rcu_dereference(fa->fa_next);
        }
 }
index e55723744f582991b9fdf75c77905bb4b0fa37ca..281d79f8b3d372a706d62609efd6cecf21c6657c 100644 (file)
@@ -444,12 +444,12 @@ static int fuse_setup_new_dax_mapping(struct inode *inode, loff_t pos,
        /*
         * Can't do inline reclaim in fault path. We call
         * dax_layout_busy_page() before we free a range. And
-        * fuse_wait_dax_page() drops fi->i_mmap_sem lock and requires it.
-        * In fault path we enter with fi->i_mmap_sem held and can't drop
-        * it. Also in fault path we hold fi->i_mmap_sem shared and not
-        * exclusive, so that creates further issues with fuse_wait_dax_page().
-        * Hence return -EAGAIN and fuse_dax_fault() will wait for a memory
-        * range to become free and retry.
+        * fuse_wait_dax_page() drops mapping->invalidate_lock and requires it.
+        * In fault path we enter with mapping->invalidate_lock held and can't
+        * drop it. Also in fault path we hold mapping->invalidate_lock shared
+        * and not exclusive, so that creates further issues with
+        * fuse_wait_dax_page().  Hence return -EAGAIN and fuse_dax_fault()
+        * will wait for a memory range to become free and retry.
         */
        if (flags & IOMAP_FAULT) {
                alloc_dmap = alloc_dax_mapping(fcd);
@@ -513,7 +513,7 @@ static int fuse_upgrade_dax_mapping(struct inode *inode, loff_t pos,
        down_write(&fi->dax->sem);
        node = interval_tree_iter_first(&fi->dax->tree, idx, idx);
 
-       /* We are holding either inode lock or i_mmap_sem, and that should
+       /* We are holding either inode lock or invalidate_lock, and that should
         * ensure that dmap can't be truncated. We are holding a reference
         * on dmap and that should make sure it can't be reclaimed. So dmap
         * should still be there in tree despite the fact we dropped and
@@ -660,14 +660,12 @@ static const struct iomap_ops fuse_iomap_ops = {
 
 static void fuse_wait_dax_page(struct inode *inode)
 {
-       struct fuse_inode *fi = get_fuse_inode(inode);
-
-       up_write(&fi->i_mmap_sem);
+       filemap_invalidate_unlock(inode->i_mapping);
        schedule();
-       down_write(&fi->i_mmap_sem);
+       filemap_invalidate_lock(inode->i_mapping);
 }
 
-/* Should be called with fi->i_mmap_sem lock held exclusively */
+/* Should be called with mapping->invalidate_lock held exclusively */
 static int __fuse_dax_break_layouts(struct inode *inode, bool *retry,
                                    loff_t start, loff_t end)
 {
@@ -813,18 +811,18 @@ retry:
         * we do not want any read/write/mmap to make progress and try
         * to populate page cache or access memory we are trying to free.
         */
-       down_read(&get_fuse_inode(inode)->i_mmap_sem);
+       filemap_invalidate_lock_shared(inode->i_mapping);
        ret = dax_iomap_fault(vmf, pe_size, &pfn, &error, &fuse_iomap_ops);
        if ((ret & VM_FAULT_ERROR) && error == -EAGAIN) {
                error = 0;
                retry = true;
-               up_read(&get_fuse_inode(inode)->i_mmap_sem);
+               filemap_invalidate_unlock_shared(inode->i_mapping);
                goto retry;
        }
 
        if (ret & VM_FAULT_NEEDDSYNC)
                ret = dax_finish_sync_fault(vmf, pe_size, pfn);
-       up_read(&get_fuse_inode(inode)->i_mmap_sem);
+       filemap_invalidate_unlock_shared(inode->i_mapping);
 
        if (write)
                sb_end_pagefault(sb);
@@ -960,7 +958,7 @@ inode_inline_reclaim_one_dmap(struct fuse_conn_dax *fcd, struct inode *inode,
        int ret;
        struct interval_tree_node *node;
 
-       down_write(&fi->i_mmap_sem);
+       filemap_invalidate_lock(inode->i_mapping);
 
        /* Lookup a dmap and corresponding file offset to reclaim. */
        down_read(&fi->dax->sem);
@@ -1021,7 +1019,7 @@ inode_inline_reclaim_one_dmap(struct fuse_conn_dax *fcd, struct inode *inode,
 out_write_dmap_sem:
        up_write(&fi->dax->sem);
 out_mmap_sem:
-       up_write(&fi->i_mmap_sem);
+       filemap_invalidate_unlock(inode->i_mapping);
        return dmap;
 }
 
@@ -1050,10 +1048,10 @@ alloc_dax_mapping_reclaim(struct fuse_conn_dax *fcd, struct inode *inode)
                 * had a reference or some other temporary failure,
                 * Try again. We want to give up inline reclaim only
                 * if there is no range assigned to this node. Otherwise
-                * if a deadlock is possible if we sleep with fi->i_mmap_sem
-                * held and worker to free memory can't make progress due
-                * to unavailability of fi->i_mmap_sem lock. So sleep
-                * only if fi->dax->nr=0
+                * if a deadlock is possible if we sleep with
+                * mapping->invalidate_lock held and worker to free memory
+                * can't make progress due to unavailability of
+                * mapping->invalidate_lock.  So sleep only if fi->dax->nr=0
                 */
                if (retry)
                        continue;
@@ -1061,8 +1059,8 @@ alloc_dax_mapping_reclaim(struct fuse_conn_dax *fcd, struct inode *inode)
                 * There are no mappings which can be reclaimed. Wait for one.
                 * We are not holding fi->dax->sem. So it is possible
                 * that range gets added now. But as we are not holding
-                * fi->i_mmap_sem, worker should still be able to free up
-                * a range and wake us up.
+                * mapping->invalidate_lock, worker should still be able to
+                * free up a range and wake us up.
                 */
                if (!fi->dax->nr && !(fcd->nr_free_ranges > 0)) {
                        if (wait_event_killable_exclusive(fcd->range_waitq,
@@ -1108,7 +1106,7 @@ static int lookup_and_reclaim_dmap_locked(struct fuse_conn_dax *fcd,
 /*
  * Free a range of memory.
  * Locking:
- * 1. Take fi->i_mmap_sem to block dax faults.
+ * 1. Take mapping->invalidate_lock to block dax faults.
  * 2. Take fi->dax->sem to protect interval tree and also to make sure
  *    read/write can not reuse a dmap which we might be freeing.
  */
@@ -1122,7 +1120,7 @@ static int lookup_and_reclaim_dmap(struct fuse_conn_dax *fcd,
        loff_t dmap_start = start_idx << FUSE_DAX_SHIFT;
        loff_t dmap_end = (dmap_start + FUSE_DAX_SZ) - 1;
 
-       down_write(&fi->i_mmap_sem);
+       filemap_invalidate_lock(inode->i_mapping);
        ret = fuse_dax_break_layouts(inode, dmap_start, dmap_end);
        if (ret) {
                pr_debug("virtio_fs: fuse_dax_break_layouts() failed. err=%d\n",
@@ -1134,7 +1132,7 @@ static int lookup_and_reclaim_dmap(struct fuse_conn_dax *fcd,
        ret = lookup_and_reclaim_dmap_locked(fcd, inode, start_idx);
        up_write(&fi->dax->sem);
 out_mmap_sem:
-       up_write(&fi->i_mmap_sem);
+       filemap_invalidate_unlock(inode->i_mapping);
        return ret;
 }
 
@@ -1235,8 +1233,6 @@ void fuse_dax_conn_free(struct fuse_conn *fc)
 static int fuse_dax_mem_range_init(struct fuse_conn_dax *fcd)
 {
        long nr_pages, nr_ranges;
-       void *kaddr;
-       pfn_t pfn;
        struct fuse_dax_mapping *range;
        int ret, id;
        size_t dax_size = -1;
@@ -1248,8 +1244,8 @@ static int fuse_dax_mem_range_init(struct fuse_conn_dax *fcd)
        INIT_DELAYED_WORK(&fcd->free_work, fuse_dax_free_mem_worker);
 
        id = dax_read_lock();
-       nr_pages = dax_direct_access(fcd->dev, 0, PHYS_PFN(dax_size), &kaddr,
-                                    &pfn);
+       nr_pages = dax_direct_access(fcd->dev, 0, PHYS_PFN(dax_size), NULL,
+                                    NULL);
        dax_read_unlock(id);
        if (nr_pages < 0) {
                pr_debug("dax_direct_access() returned %ld\n", nr_pages);
index eade6f965b2ee32988ecb5dcd34d037566375ada..d9b977c0f38dc0a482abbba365a9f1ca2141d1fd 100644 (file)
@@ -1556,6 +1556,7 @@ int fuse_do_setattr(struct dentry *dentry, struct iattr *attr,
        struct fuse_mount *fm = get_fuse_mount(inode);
        struct fuse_conn *fc = fm->fc;
        struct fuse_inode *fi = get_fuse_inode(inode);
+       struct address_space *mapping = inode->i_mapping;
        FUSE_ARGS(args);
        struct fuse_setattr_in inarg;
        struct fuse_attr_out outarg;
@@ -1580,11 +1581,11 @@ int fuse_do_setattr(struct dentry *dentry, struct iattr *attr,
        }
 
        if (FUSE_IS_DAX(inode) && is_truncate) {
-               down_write(&fi->i_mmap_sem);
+               filemap_invalidate_lock(mapping);
                fault_blocked = true;
                err = fuse_dax_break_layouts(inode, 0, 0);
                if (err) {
-                       up_write(&fi->i_mmap_sem);
+                       filemap_invalidate_unlock(mapping);
                        return err;
                }
        }
@@ -1694,13 +1695,13 @@ int fuse_do_setattr(struct dentry *dentry, struct iattr *attr,
        if ((is_truncate || !is_wb) &&
            S_ISREG(inode->i_mode) && oldsize != outarg.attr.size) {
                truncate_pagecache(inode, outarg.attr.size);
-               invalidate_inode_pages2(inode->i_mapping);
+               invalidate_inode_pages2(mapping);
        }
 
        clear_bit(FUSE_I_SIZE_UNSTABLE, &fi->state);
 out:
        if (fault_blocked)
-               up_write(&fi->i_mmap_sem);
+               filemap_invalidate_unlock(mapping);
 
        return 0;
 
@@ -1711,7 +1712,7 @@ error:
        clear_bit(FUSE_I_SIZE_UNSTABLE, &fi->state);
 
        if (fault_blocked)
-               up_write(&fi->i_mmap_sem);
+               filemap_invalidate_unlock(mapping);
        return err;
 }
 
index 97f860cfc195fe4981751878c5a2505829380aa8..621a662c19fb8b3d6a81cf118d9ce4379218e693 100644 (file)
@@ -243,7 +243,7 @@ int fuse_open_common(struct inode *inode, struct file *file, bool isdir)
        }
 
        if (dax_truncate) {
-               down_write(&get_fuse_inode(inode)->i_mmap_sem);
+               filemap_invalidate_lock(inode->i_mapping);
                err = fuse_dax_break_layouts(inode, 0, 0);
                if (err)
                        goto out;
@@ -255,7 +255,7 @@ int fuse_open_common(struct inode *inode, struct file *file, bool isdir)
 
 out:
        if (dax_truncate)
-               up_write(&get_fuse_inode(inode)->i_mmap_sem);
+               filemap_invalidate_unlock(inode->i_mapping);
 
        if (is_wb_truncate | dax_truncate) {
                fuse_release_nowrite(inode);
@@ -2920,7 +2920,7 @@ static long fuse_file_fallocate(struct file *file, int mode, loff_t offset,
        if (lock_inode) {
                inode_lock(inode);
                if (block_faults) {
-                       down_write(&fi->i_mmap_sem);
+                       filemap_invalidate_lock(inode->i_mapping);
                        err = fuse_dax_break_layouts(inode, 0, 0);
                        if (err)
                                goto out;
@@ -2976,7 +2976,7 @@ out:
                clear_bit(FUSE_I_SIZE_UNSTABLE, &fi->state);
 
        if (block_faults)
-               up_write(&fi->i_mmap_sem);
+               filemap_invalidate_unlock(inode->i_mapping);
 
        if (lock_inode)
                inode_unlock(inode);
@@ -3045,7 +3045,7 @@ static ssize_t __fuse_copy_file_range(struct file *file_in, loff_t pos_in,
         * modifications.  Yet this does give less guarantees than if the
         * copying was performed with write(2).
         *
-        * To fix this a i_mmap_sem style lock could be used to prevent new
+        * To fix this a mapping->invalidate_lock could be used to prevent new
         * faults while the copy is ongoing.
         */
        err = fuse_writeback_range(inode_out, pos_out, pos_out + len - 1);
index 07829ce78695b9a14c06c73d9491ff820be5dba2..6fb639b97ea8893109402837904189854182c254 100644 (file)
@@ -149,13 +149,6 @@ struct fuse_inode {
        /** Lock to protect write related fields */
        spinlock_t lock;
 
-       /**
-        * Can't take inode lock in fault path (leads to circular dependency).
-        * Introduce another semaphore which can be taken in fault path and
-        * then other filesystem paths can take this to block faults.
-        */
-       struct rw_semaphore i_mmap_sem;
-
 #ifdef CONFIG_FUSE_DAX
        /*
         * Dax specific inode data
index b9beb39a4a1811a14d55ebd1597580f6d8d0e313..e07e429f32e1d26a01cd610e01f7e021cd933c5c 100644 (file)
@@ -85,7 +85,6 @@ static struct inode *fuse_alloc_inode(struct super_block *sb)
        fi->orig_ino = 0;
        fi->state = 0;
        mutex_init(&fi->mutex);
-       init_rwsem(&fi->i_mmap_sem);
        spin_lock_init(&fi->lock);
        fi->forget = fuse_alloc_forget();
        if (!fi->forget)
index 84ec053d43b46e1a2d23179d36d80577d286dde6..c559827cb6f915a505fa225fa9a84bea71901923 100644 (file)
@@ -1237,9 +1237,6 @@ static int gfs2_lock(struct file *file, int cmd, struct file_lock *fl)
 
        if (!(fl->fl_flags & FL_POSIX))
                return -ENOLCK;
-       if (__mandatory_lock(&ip->i_inode) && fl->fl_type != F_UNLCK)
-               return -ENOLCK;
-
        if (cmd == F_CANCELLK) {
                /* Hack: */
                cmd = F_SETLK;
index 2b36dc6f0a108c4647b95625cc18c734e5498e24..ec975f4668775f7b6bfeb4f275b8a24fe4978d8f 100644 (file)
@@ -2,6 +2,7 @@
 config HPFS_FS
        tristate "OS/2 HPFS file system support"
        depends on BLOCK
+       select FS_IOMAP
        help
          OS/2 is IBM's operating system for PC's, the same as Warp, and HPFS
          is the file system used for organizing files on OS/2 hard disk
index c3a49aacf20affc935b4e24fdaa188f2f1df5793..fb37f57130aa6b099eb17c3fa03b4c61a49d99c2 100644 (file)
@@ -9,6 +9,7 @@
 
 #include "hpfs_fn.h"
 #include <linux/mpage.h>
+#include <linux/iomap.h>
 #include <linux/fiemap.h>
 
 #define BLOCKS(size) (((size) + 511) >> 9)
@@ -116,6 +117,47 @@ static int hpfs_get_block(struct inode *inode, sector_t iblock, struct buffer_he
        return r;
 }
 
+static int hpfs_iomap_begin(struct inode *inode, loff_t offset, loff_t length,
+               unsigned flags, struct iomap *iomap, struct iomap *srcmap)
+{
+       struct super_block *sb = inode->i_sb;
+       unsigned int blkbits = inode->i_blkbits;
+       unsigned int n_secs;
+       secno s;
+
+       if (WARN_ON_ONCE(flags & (IOMAP_WRITE | IOMAP_ZERO)))
+               return -EINVAL;
+
+       iomap->bdev = inode->i_sb->s_bdev;
+       iomap->offset = offset;
+
+       hpfs_lock(sb);
+       s = hpfs_bmap(inode, offset >> blkbits, &n_secs);
+       if (s) {
+               n_secs = hpfs_search_hotfix_map_for_range(sb, s,
+                               min_t(loff_t, n_secs, length));
+               if (unlikely(!n_secs)) {
+                       s = hpfs_search_hotfix_map(sb, s);
+                       n_secs = 1;
+               }
+               iomap->type = IOMAP_MAPPED;
+               iomap->flags = IOMAP_F_MERGED;
+               iomap->addr = (u64)s << blkbits;
+               iomap->length = (u64)n_secs << blkbits;
+       } else {
+               iomap->type = IOMAP_HOLE;
+               iomap->addr = IOMAP_NULL_ADDR;
+               iomap->length = 1 << blkbits;
+       }
+
+       hpfs_unlock(sb);
+       return 0;
+}
+
+static const struct iomap_ops hpfs_iomap_ops = {
+       .iomap_begin            = hpfs_iomap_begin,
+};
+
 static int hpfs_readpage(struct file *file, struct page *page)
 {
        return mpage_readpage(page, hpfs_get_block);
@@ -192,7 +234,14 @@ static sector_t _hpfs_bmap(struct address_space *mapping, sector_t block)
 
 static int hpfs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, u64 start, u64 len)
 {
-       return generic_block_fiemap(inode, fieinfo, start, len, hpfs_get_block);
+       int ret;
+
+       inode_lock(inode);
+       len = min_t(u64, len, i_size_read(inode));
+       ret = iomap_fiemap(inode, fieinfo, start, len, &hpfs_iomap_ops);
+       inode_unlock(inode);
+
+       return ret;
 }
 
 const struct address_space_operations hpfs_aops = {
index c93500d84264d0ca5ddd2463b703cdb6509ba0a4..84c528cd19556c211a1772a69c06d96402998d07 100644 (file)
@@ -190,6 +190,8 @@ int inode_init_always(struct super_block *sb, struct inode *inode)
        mapping_set_gfp_mask(mapping, GFP_HIGHUSER_MOVABLE);
        mapping->private_data = NULL;
        mapping->writeback_index = 0;
+       __init_rwsem(&mapping->invalidate_lock, "mapping.invalidate_lock",
+                    &sb->s_type->invalidate_lock_key);
        inode->i_private = NULL;
        inode->i_mapping = mapping;
        INIT_HLIST_HEAD(&inode->i_dentry);      /* buggered by rcu freeing */
index 12fc19353bb04aae7c6a2f8682c41c14331a6c41..7d2ed8c7dd3128412138d2c26b6b364ca4870040 100644 (file)
@@ -129,7 +129,7 @@ struct io_cb_cancel_data {
        bool cancel_all;
 };
 
-static void create_io_worker(struct io_wq *wq, struct io_wqe *wqe, int index);
+static void create_io_worker(struct io_wq *wq, struct io_wqe *wqe, int index, bool first);
 static void io_wqe_dec_running(struct io_worker *worker);
 
 static bool io_worker_get(struct io_worker *worker)
@@ -248,18 +248,20 @@ static void io_wqe_wake_worker(struct io_wqe *wqe, struct io_wqe_acct *acct)
        rcu_read_unlock();
 
        if (!ret) {
-               bool do_create = false;
+               bool do_create = false, first = false;
 
                raw_spin_lock_irq(&wqe->lock);
                if (acct->nr_workers < acct->max_workers) {
                        atomic_inc(&acct->nr_running);
                        atomic_inc(&wqe->wq->worker_refs);
+                       if (!acct->nr_workers)
+                               first = true;
                        acct->nr_workers++;
                        do_create = true;
                }
                raw_spin_unlock_irq(&wqe->lock);
                if (do_create)
-                       create_io_worker(wqe->wq, wqe, acct->index);
+                       create_io_worker(wqe->wq, wqe, acct->index, first);
        }
 }
 
@@ -282,16 +284,26 @@ static void create_worker_cb(struct callback_head *cb)
        struct io_wq *wq;
        struct io_wqe *wqe;
        struct io_wqe_acct *acct;
+       bool do_create = false, first = false;
 
        cwd = container_of(cb, struct create_worker_data, work);
        wqe = cwd->wqe;
        wq = wqe->wq;
        acct = &wqe->acct[cwd->index];
        raw_spin_lock_irq(&wqe->lock);
-       if (acct->nr_workers < acct->max_workers)
+       if (acct->nr_workers < acct->max_workers) {
+               if (!acct->nr_workers)
+                       first = true;
                acct->nr_workers++;
+               do_create = true;
+       }
        raw_spin_unlock_irq(&wqe->lock);
-       create_io_worker(wq, cwd->wqe, cwd->index);
+       if (do_create) {
+               create_io_worker(wq, wqe, cwd->index, first);
+       } else {
+               atomic_dec(&acct->nr_running);
+               io_worker_ref_put(wq);
+       }
        kfree(cwd);
 }
 
@@ -629,7 +641,7 @@ void io_wq_worker_sleeping(struct task_struct *tsk)
        raw_spin_unlock_irq(&worker->wqe->lock);
 }
 
-static void create_io_worker(struct io_wq *wq, struct io_wqe *wqe, int index)
+static void create_io_worker(struct io_wq *wq, struct io_wqe *wqe, int index, bool first)
 {
        struct io_wqe_acct *acct = &wqe->acct[index];
        struct io_worker *worker;
@@ -670,7 +682,7 @@ fail:
        worker->flags |= IO_WORKER_F_FREE;
        if (index == IO_WQ_ACCT_BOUND)
                worker->flags |= IO_WORKER_F_BOUND;
-       if ((acct->nr_workers == 1) && (worker->flags & IO_WORKER_F_BOUND))
+       if (first && (worker->flags & IO_WORKER_F_BOUND))
                worker->flags |= IO_WORKER_F_FIXED;
        raw_spin_unlock_irq(&wqe->lock);
        wake_up_new_task(tsk);
index bf548af0426ce5c00ac83d453e4cfc9d81549efc..a2e20a6fbfed8e85c05be4e22fff8201b6c5ac46 100644 (file)
@@ -78,6 +78,7 @@
 #include <linux/task_work.h>
 #include <linux/pagemap.h>
 #include <linux/io_uring.h>
+#include <linux/tracehook.h>
 
 #define CREATE_TRACE_POINTS
 #include <trace/events/io_uring.h>
@@ -1499,7 +1500,8 @@ static bool __io_cqring_overflow_flush(struct io_ring_ctx *ctx, bool force)
        all_flushed = list_empty(&ctx->cq_overflow_list);
        if (all_flushed) {
                clear_bit(0, &ctx->check_cq_overflow);
-               ctx->rings->sq_flags &= ~IORING_SQ_CQ_OVERFLOW;
+               WRITE_ONCE(ctx->rings->sq_flags,
+                          ctx->rings->sq_flags & ~IORING_SQ_CQ_OVERFLOW);
        }
 
        if (posted)
@@ -1578,7 +1580,9 @@ static bool io_cqring_event_overflow(struct io_ring_ctx *ctx, u64 user_data,
        }
        if (list_empty(&ctx->cq_overflow_list)) {
                set_bit(0, &ctx->check_cq_overflow);
-               ctx->rings->sq_flags |= IORING_SQ_CQ_OVERFLOW;
+               WRITE_ONCE(ctx->rings->sq_flags,
+                          ctx->rings->sq_flags | IORING_SQ_CQ_OVERFLOW);
+
        }
        ocqe->cqe.user_data = user_data;
        ocqe->cqe.res = res;
@@ -2222,9 +2226,9 @@ static inline unsigned int io_put_rw_kbuf(struct io_kiocb *req)
 
 static inline bool io_run_task_work(void)
 {
-       if (current->task_works) {
+       if (test_thread_flag(TIF_NOTIFY_SIGNAL) || current->task_works) {
                __set_current_state(TASK_RUNNING);
-               task_work_run();
+               tracehook_notify_signal();
                return true;
        }
 
@@ -2473,8 +2477,10 @@ static void io_fallback_req_func(struct work_struct *work)
        struct llist_node *node = llist_del_all(&ctx->fallback_llist);
        struct io_kiocb *req, *tmp;
 
+       percpu_ref_get(&ctx->refs);
        llist_for_each_entry_safe(req, tmp, node, io_task_work.fallback_node)
                req->io_task_work.func(req);
+       percpu_ref_put(&ctx->refs);
 }
 
 static void __io_complete_rw(struct io_kiocb *req, long res, long res2,
@@ -6803,14 +6809,16 @@ static inline void io_ring_set_wakeup_flag(struct io_ring_ctx *ctx)
 {
        /* Tell userspace we may need a wakeup call */
        spin_lock_irq(&ctx->completion_lock);
-       ctx->rings->sq_flags |= IORING_SQ_NEED_WAKEUP;
+       WRITE_ONCE(ctx->rings->sq_flags,
+                  ctx->rings->sq_flags | IORING_SQ_NEED_WAKEUP);
        spin_unlock_irq(&ctx->completion_lock);
 }
 
 static inline void io_ring_clear_wakeup_flag(struct io_ring_ctx *ctx)
 {
        spin_lock_irq(&ctx->completion_lock);
-       ctx->rings->sq_flags &= ~IORING_SQ_NEED_WAKEUP;
+       WRITE_ONCE(ctx->rings->sq_flags,
+                  ctx->rings->sq_flags & ~IORING_SQ_NEED_WAKEUP);
        spin_unlock_irq(&ctx->completion_lock);
 }
 
@@ -7132,16 +7140,6 @@ static void **io_alloc_page_table(size_t size)
        return table;
 }
 
-static inline void io_rsrc_ref_lock(struct io_ring_ctx *ctx)
-{
-       spin_lock_bh(&ctx->rsrc_ref_lock);
-}
-
-static inline void io_rsrc_ref_unlock(struct io_ring_ctx *ctx)
-{
-       spin_unlock_bh(&ctx->rsrc_ref_lock);
-}
-
 static void io_rsrc_node_destroy(struct io_rsrc_node *ref_node)
 {
        percpu_ref_exit(&ref_node->refs);
@@ -7158,9 +7156,9 @@ static void io_rsrc_node_switch(struct io_ring_ctx *ctx,
                struct io_rsrc_node *rsrc_node = ctx->rsrc_node;
 
                rsrc_node->rsrc_data = data_to_kill;
-               io_rsrc_ref_lock(ctx);
+               spin_lock_irq(&ctx->rsrc_ref_lock);
                list_add_tail(&rsrc_node->node, &ctx->rsrc_ref_list);
-               io_rsrc_ref_unlock(ctx);
+               spin_unlock_irq(&ctx->rsrc_ref_lock);
 
                atomic_inc(&data_to_kill->refs);
                percpu_ref_kill(&rsrc_node->refs);
@@ -7199,17 +7197,19 @@ static int io_rsrc_ref_quiesce(struct io_rsrc_data *data, struct io_ring_ctx *ct
                /* kill initial ref, already quiesced if zero */
                if (atomic_dec_and_test(&data->refs))
                        break;
+               mutex_unlock(&ctx->uring_lock);
                flush_delayed_work(&ctx->rsrc_put_work);
                ret = wait_for_completion_interruptible(&data->done);
-               if (!ret)
+               if (!ret) {
+                       mutex_lock(&ctx->uring_lock);
                        break;
+               }
 
                atomic_inc(&data->refs);
                /* wait for all works potentially completing data->done */
                flush_delayed_work(&ctx->rsrc_put_work);
                reinit_completion(&data->done);
 
-               mutex_unlock(&ctx->uring_lock);
                ret = io_run_task_work_sig();
                mutex_lock(&ctx->uring_lock);
        } while (ret >= 0);
@@ -7668,9 +7668,10 @@ static void io_rsrc_node_ref_zero(struct percpu_ref *ref)
 {
        struct io_rsrc_node *node = container_of(ref, struct io_rsrc_node, refs);
        struct io_ring_ctx *ctx = node->rsrc_data->ctx;
+       unsigned long flags;
        bool first_add = false;
 
-       io_rsrc_ref_lock(ctx);
+       spin_lock_irqsave(&ctx->rsrc_ref_lock, flags);
        node->done = true;
 
        while (!list_empty(&ctx->rsrc_ref_list)) {
@@ -7682,7 +7683,7 @@ static void io_rsrc_node_ref_zero(struct percpu_ref *ref)
                list_del(&node->node);
                first_add |= llist_add(&node->llist, &ctx->rsrc_put_llist);
        }
-       io_rsrc_ref_unlock(ctx);
+       spin_unlock_irqrestore(&ctx->rsrc_ref_lock, flags);
 
        if (first_add)
                mod_delayed_work(system_wq, &ctx->rsrc_put_work, HZ);
@@ -8653,13 +8654,10 @@ static void io_req_caches_free(struct io_ring_ctx *ctx)
        mutex_unlock(&ctx->uring_lock);
 }
 
-static bool io_wait_rsrc_data(struct io_rsrc_data *data)
+static void io_wait_rsrc_data(struct io_rsrc_data *data)
 {
-       if (!data)
-               return false;
-       if (!atomic_dec_and_test(&data->refs))
+       if (data && !atomic_dec_and_test(&data->refs))
                wait_for_completion(&data->done);
-       return true;
 }
 
 static void io_ring_ctx_free(struct io_ring_ctx *ctx)
@@ -8671,10 +8669,14 @@ static void io_ring_ctx_free(struct io_ring_ctx *ctx)
                ctx->mm_account = NULL;
        }
 
+       /* __io_rsrc_put_work() may need uring_lock to progress, wait w/o it */
+       io_wait_rsrc_data(ctx->buf_data);
+       io_wait_rsrc_data(ctx->file_data);
+
        mutex_lock(&ctx->uring_lock);
-       if (io_wait_rsrc_data(ctx->buf_data))
+       if (ctx->buf_data)
                __io_sqe_buffers_unregister(ctx);
-       if (io_wait_rsrc_data(ctx->file_data))
+       if (ctx->file_data)
                __io_sqe_files_unregister(ctx);
        if (ctx->rings)
                __io_cqring_overflow_flush(ctx, true);
@@ -9370,9 +9372,10 @@ SYSCALL_DEFINE6(io_uring_enter, unsigned int, fd, u32, to_submit,
        if (ctx->flags & IORING_SETUP_SQPOLL) {
                io_cqring_overflow_flush(ctx, false);
 
-               ret = -EOWNERDEAD;
-               if (unlikely(ctx->sq_data->thread == NULL))
+               if (unlikely(ctx->sq_data->thread == NULL)) {
+                       ret = -EOWNERDEAD;
                        goto out;
+               }
                if (flags & IORING_ENTER_SQ_WAKEUP)
                        wake_up(&ctx->sq_data->wait);
                if (flags & IORING_ENTER_SQ_WAIT) {
@@ -9840,10 +9843,11 @@ static int io_register_personality(struct io_ring_ctx *ctx)
 
        ret = xa_alloc_cyclic(&ctx->personalities, &id, (void *)creds,
                        XA_LIMIT(0, USHRT_MAX), &ctx->pers_next, GFP_KERNEL);
-       if (!ret)
-               return id;
-       put_cred(creds);
-       return ret;
+       if (ret < 0) {
+               put_cred(creds);
+               return ret;
+       }
+       return id;
 }
 
 static int io_register_restrictions(struct io_ring_ctx *ctx, void __user *arg,
index 1e2204fa9963e30102f801a39f1aa2137c5f86e8..eea8267ae1f2e718fdb39c31c1fb8363fc5ad1b3 100644 (file)
@@ -263,209 +263,6 @@ static long ioctl_file_clone_range(struct file *file,
                                args.src_length, args.dest_offset);
 }
 
-#ifdef CONFIG_BLOCK
-
-static inline sector_t logical_to_blk(struct inode *inode, loff_t offset)
-{
-       return (offset >> inode->i_blkbits);
-}
-
-static inline loff_t blk_to_logical(struct inode *inode, sector_t blk)
-{
-       return (blk << inode->i_blkbits);
-}
-
-/**
- * __generic_block_fiemap - FIEMAP for block based inodes (no locking)
- * @inode: the inode to map
- * @fieinfo: the fiemap info struct that will be passed back to userspace
- * @start: where to start mapping in the inode
- * @len: how much space to map
- * @get_block: the fs's get_block function
- *
- * This does FIEMAP for block based inodes.  Basically it will just loop
- * through get_block until we hit the number of extents we want to map, or we
- * go past the end of the file and hit a hole.
- *
- * If it is possible to have data blocks beyond a hole past @inode->i_size, then
- * please do not use this function, it will stop at the first unmapped block
- * beyond i_size.
- *
- * If you use this function directly, you need to do your own locking. Use
- * generic_block_fiemap if you want the locking done for you.
- */
-static int __generic_block_fiemap(struct inode *inode,
-                          struct fiemap_extent_info *fieinfo, loff_t start,
-                          loff_t len, get_block_t *get_block)
-{
-       struct buffer_head map_bh;
-       sector_t start_blk, last_blk;
-       loff_t isize = i_size_read(inode);
-       u64 logical = 0, phys = 0, size = 0;
-       u32 flags = FIEMAP_EXTENT_MERGED;
-       bool past_eof = false, whole_file = false;
-       int ret = 0;
-
-       ret = fiemap_prep(inode, fieinfo, start, &len, FIEMAP_FLAG_SYNC);
-       if (ret)
-               return ret;
-
-       /*
-        * Either the i_mutex or other appropriate locking needs to be held
-        * since we expect isize to not change at all through the duration of
-        * this call.
-        */
-       if (len >= isize) {
-               whole_file = true;
-               len = isize;
-       }
-
-       /*
-        * Some filesystems can't deal with being asked to map less than
-        * blocksize, so make sure our len is at least block length.
-        */
-       if (logical_to_blk(inode, len) == 0)
-               len = blk_to_logical(inode, 1);
-
-       start_blk = logical_to_blk(inode, start);
-       last_blk = logical_to_blk(inode, start + len - 1);
-
-       do {
-               /*
-                * we set b_size to the total size we want so it will map as
-                * many contiguous blocks as possible at once
-                */
-               memset(&map_bh, 0, sizeof(struct buffer_head));
-               map_bh.b_size = len;
-
-               ret = get_block(inode, start_blk, &map_bh, 0);
-               if (ret)
-                       break;
-
-               /* HOLE */
-               if (!buffer_mapped(&map_bh)) {
-                       start_blk++;
-
-                       /*
-                        * We want to handle the case where there is an
-                        * allocated block at the front of the file, and then
-                        * nothing but holes up to the end of the file properly,
-                        * to make sure that extent at the front gets properly
-                        * marked with FIEMAP_EXTENT_LAST
-                        */
-                       if (!past_eof &&
-                           blk_to_logical(inode, start_blk) >= isize)
-                               past_eof = 1;
-
-                       /*
-                        * First hole after going past the EOF, this is our
-                        * last extent
-                        */
-                       if (past_eof && size) {
-                               flags = FIEMAP_EXTENT_MERGED|FIEMAP_EXTENT_LAST;
-                               ret = fiemap_fill_next_extent(fieinfo, logical,
-                                                             phys, size,
-                                                             flags);
-                       } else if (size) {
-                               ret = fiemap_fill_next_extent(fieinfo, logical,
-                                                             phys, size, flags);
-                               size = 0;
-                       }
-
-                       /* if we have holes up to/past EOF then we're done */
-                       if (start_blk > last_blk || past_eof || ret)
-                               break;
-               } else {
-                       /*
-                        * We have gone over the length of what we wanted to
-                        * map, and it wasn't the entire file, so add the extent
-                        * we got last time and exit.
-                        *
-                        * This is for the case where say we want to map all the
-                        * way up to the second to the last block in a file, but
-                        * the last block is a hole, making the second to last
-                        * block FIEMAP_EXTENT_LAST.  In this case we want to
-                        * see if there is a hole after the second to last block
-                        * so we can mark it properly.  If we found data after
-                        * we exceeded the length we were requesting, then we
-                        * are good to go, just add the extent to the fieinfo
-                        * and break
-                        */
-                       if (start_blk > last_blk && !whole_file) {
-                               ret = fiemap_fill_next_extent(fieinfo, logical,
-                                                             phys, size,
-                                                             flags);
-                               break;
-                       }
-
-                       /*
-                        * if size != 0 then we know we already have an extent
-                        * to add, so add it.
-                        */
-                       if (size) {
-                               ret = fiemap_fill_next_extent(fieinfo, logical,
-                                                             phys, size,
-                                                             flags);
-                               if (ret)
-                                       break;
-                       }
-
-                       logical = blk_to_logical(inode, start_blk);
-                       phys = blk_to_logical(inode, map_bh.b_blocknr);
-                       size = map_bh.b_size;
-                       flags = FIEMAP_EXTENT_MERGED;
-
-                       start_blk += logical_to_blk(inode, size);
-
-                       /*
-                        * If we are past the EOF, then we need to make sure as
-                        * soon as we find a hole that the last extent we found
-                        * is marked with FIEMAP_EXTENT_LAST
-                        */
-                       if (!past_eof && logical + size >= isize)
-                               past_eof = true;
-               }
-               cond_resched();
-               if (fatal_signal_pending(current)) {
-                       ret = -EINTR;
-                       break;
-               }
-
-       } while (1);
-
-       /* If ret is 1 then we just hit the end of the extent array */
-       if (ret == 1)
-               ret = 0;
-
-       return ret;
-}
-
-/**
- * generic_block_fiemap - FIEMAP for block based inodes
- * @inode: The inode to map
- * @fieinfo: The mapping information
- * @start: The initial block to map
- * @len: The length of the extect to attempt to map
- * @get_block: The block mapping function for the fs
- *
- * Calls __generic_block_fiemap to map the inode, after taking
- * the inode's mutex lock.
- */
-
-int generic_block_fiemap(struct inode *inode,
-                        struct fiemap_extent_info *fieinfo, u64 start,
-                        u64 len, get_block_t *get_block)
-{
-       int ret;
-       inode_lock(inode);
-       ret = __generic_block_fiemap(inode, fieinfo, start, len, get_block);
-       inode_unlock(inode);
-       return ret;
-}
-EXPORT_SYMBOL(generic_block_fiemap);
-
-#endif  /*  CONFIG_BLOCK  */
-
 /*
  * This provides compatibility with legacy XFS pre-allocation ioctls
  * which predate the fallocate syscall.
index 21edc423b79fafa97bf868da7107bfa9df37995b..678e2c51b855c05718bba33f1f223c9c2ad931cb 100644 (file)
@@ -155,7 +155,6 @@ struct iso9660_options{
        unsigned int overriderockperm:1;
        unsigned int uid_set:1;
        unsigned int gid_set:1;
-       unsigned int utf8:1;
        unsigned char map;
        unsigned char check;
        unsigned int blocksize;
@@ -356,7 +355,6 @@ static int parse_options(char *options, struct iso9660_options *popt)
        popt->gid = GLOBAL_ROOT_GID;
        popt->uid = GLOBAL_ROOT_UID;
        popt->iocharset = NULL;
-       popt->utf8 = 0;
        popt->overriderockperm = 0;
        popt->session=-1;
        popt->sbsector=-1;
@@ -389,10 +387,13 @@ static int parse_options(char *options, struct iso9660_options *popt)
                case Opt_cruft:
                        popt->cruft = 1;
                        break;
+#ifdef CONFIG_JOLIET
                case Opt_utf8:
-                       popt->utf8 = 1;
+                       kfree(popt->iocharset);
+                       popt->iocharset = kstrdup("utf8", GFP_KERNEL);
+                       if (!popt->iocharset)
+                               return 0;
                        break;
-#ifdef CONFIG_JOLIET
                case Opt_iocharset:
                        kfree(popt->iocharset);
                        popt->iocharset = match_strdup(&args[0]);
@@ -495,7 +496,6 @@ static int isofs_show_options(struct seq_file *m, struct dentry *root)
        if (sbi->s_nocompress)          seq_puts(m, ",nocompress");
        if (sbi->s_overriderockperm)    seq_puts(m, ",overriderockperm");
        if (sbi->s_showassoc)           seq_puts(m, ",showassoc");
-       if (sbi->s_utf8)                seq_puts(m, ",utf8");
 
        if (sbi->s_check)               seq_printf(m, ",check=%c", sbi->s_check);
        if (sbi->s_mapping)             seq_printf(m, ",map=%c", sbi->s_mapping);
@@ -518,9 +518,10 @@ static int isofs_show_options(struct seq_file *m, struct dentry *root)
                seq_printf(m, ",fmode=%o", sbi->s_fmode);
 
 #ifdef CONFIG_JOLIET
-       if (sbi->s_nls_iocharset &&
-           strcmp(sbi->s_nls_iocharset->charset, CONFIG_NLS_DEFAULT) != 0)
+       if (sbi->s_nls_iocharset)
                seq_printf(m, ",iocharset=%s", sbi->s_nls_iocharset->charset);
+       else
+               seq_puts(m, ",iocharset=utf8");
 #endif
        return 0;
 }
@@ -863,14 +864,13 @@ root_found:
        sbi->s_nls_iocharset = NULL;
 
 #ifdef CONFIG_JOLIET
-       if (joliet_level && opt.utf8 == 0) {
+       if (joliet_level) {
                char *p = opt.iocharset ? opt.iocharset : CONFIG_NLS_DEFAULT;
-               sbi->s_nls_iocharset = load_nls(p);
-               if (! sbi->s_nls_iocharset) {
-                       /* Fail only if explicit charset specified */
-                       if (opt.iocharset)
+               if (strcmp(p, "utf8") != 0) {
+                       sbi->s_nls_iocharset = opt.iocharset ?
+                               load_nls(opt.iocharset) : load_nls_default();
+                       if (!sbi->s_nls_iocharset)
                                goto out_freesbi;
-                       sbi->s_nls_iocharset = load_nls_default();
                }
        }
 #endif
@@ -886,7 +886,6 @@ root_found:
        sbi->s_gid = opt.gid;
        sbi->s_uid_set = opt.uid_set;
        sbi->s_gid_set = opt.gid_set;
-       sbi->s_utf8 = opt.utf8;
        sbi->s_nocompress = opt.nocompress;
        sbi->s_overriderockperm = opt.overriderockperm;
        /*
index 055ec6c586f7fd5199f24ed3f703bd7a99a83228..dcdc191ed1834876beda8b1f1eb6e27217fbf753 100644 (file)
@@ -44,7 +44,6 @@ struct isofs_sb_info {
        unsigned char s_session;
        unsigned int  s_high_sierra:1;
        unsigned int  s_rock:2;
-       unsigned int  s_utf8:1;
        unsigned int  s_cruft:1; /* Broken disks with high byte of length
                                  * containing junk */
        unsigned int  s_nocompress:1;
index be8b6a9d0b926cb5b7b5a7f95708d90f8d27785d..c0f04a1e7f695f0efe8954fe2dff58d041a9abc9 100644 (file)
@@ -41,14 +41,12 @@ uni16_to_x8(unsigned char *ascii, __be16 *uni, int len, struct nls_table *nls)
 int
 get_joliet_filename(struct iso_directory_record * de, unsigned char *outname, struct inode * inode)
 {
-       unsigned char utf8;
        struct nls_table *nls;
        unsigned char len = 0;
 
-       utf8 = ISOFS_SB(inode->i_sb)->s_utf8;
        nls = ISOFS_SB(inode->i_sb)->s_nls_iocharset;
 
-       if (utf8) {
+       if (!nls) {
                len = utf16s_to_utf8s((const wchar_t *) de->name,
                                de->name_len[0] >> 1, UTF16_BIG_ENDIAN,
                                outname, PAGE_SIZE);
index 74b2a1dfe8d87e4b12b014a51cb45bd6284f75a2..3d6fb4ae847b4c6084c4f9745f2effce71ea6d78 100644 (file)
@@ -1397,103 +1397,6 @@ static int posix_lock_inode_wait(struct inode *inode, struct file_lock *fl)
        return error;
 }
 
-#ifdef CONFIG_MANDATORY_FILE_LOCKING
-/**
- * locks_mandatory_locked - Check for an active lock
- * @file: the file to check
- *
- * Searches the inode's list of locks to find any POSIX locks which conflict.
- * This function is called from locks_verify_locked() only.
- */
-int locks_mandatory_locked(struct file *file)
-{
-       int ret;
-       struct inode *inode = locks_inode(file);
-       struct file_lock_context *ctx;
-       struct file_lock *fl;
-
-       ctx = smp_load_acquire(&inode->i_flctx);
-       if (!ctx || list_empty_careful(&ctx->flc_posix))
-               return 0;
-
-       /*
-        * Search the lock list for this inode for any POSIX locks.
-        */
-       spin_lock(&ctx->flc_lock);
-       ret = 0;
-       list_for_each_entry(fl, &ctx->flc_posix, fl_list) {
-               if (fl->fl_owner != current->files &&
-                   fl->fl_owner != file) {
-                       ret = -EAGAIN;
-                       break;
-               }
-       }
-       spin_unlock(&ctx->flc_lock);
-       return ret;
-}
-
-/**
- * locks_mandatory_area - Check for a conflicting lock
- * @inode:     the file to check
- * @filp:       how the file was opened (if it was)
- * @start:     first byte in the file to check
- * @end:       lastbyte in the file to check
- * @type:      %F_WRLCK for a write lock, else %F_RDLCK
- *
- * Searches the inode's list of locks to find any POSIX locks which conflict.
- */
-int locks_mandatory_area(struct inode *inode, struct file *filp, loff_t start,
-                        loff_t end, unsigned char type)
-{
-       struct file_lock fl;
-       int error;
-       bool sleep = false;
-
-       locks_init_lock(&fl);
-       fl.fl_pid = current->tgid;
-       fl.fl_file = filp;
-       fl.fl_flags = FL_POSIX | FL_ACCESS;
-       if (filp && !(filp->f_flags & O_NONBLOCK))
-               sleep = true;
-       fl.fl_type = type;
-       fl.fl_start = start;
-       fl.fl_end = end;
-
-       for (;;) {
-               if (filp) {
-                       fl.fl_owner = filp;
-                       fl.fl_flags &= ~FL_SLEEP;
-                       error = posix_lock_inode(inode, &fl, NULL);
-                       if (!error)
-                               break;
-               }
-
-               if (sleep)
-                       fl.fl_flags |= FL_SLEEP;
-               fl.fl_owner = current->files;
-               error = posix_lock_inode(inode, &fl, NULL);
-               if (error != FILE_LOCK_DEFERRED)
-                       break;
-               error = wait_event_interruptible(fl.fl_wait,
-                                       list_empty(&fl.fl_blocked_member));
-               if (!error) {
-                       /*
-                        * If we've been sleeping someone might have
-                        * changed the permissions behind our back.
-                        */
-                       if (__mandatory_lock(inode))
-                               continue;
-               }
-
-               break;
-       }
-       locks_delete_block(&fl);
-
-       return error;
-}
-EXPORT_SYMBOL(locks_mandatory_area);
-#endif /* CONFIG_MANDATORY_FILE_LOCKING */
-
 static void lease_clear_pending(struct file_lock *fl, int arg)
 {
        switch (arg) {
@@ -2486,14 +2389,6 @@ int fcntl_setlk(unsigned int fd, struct file *filp, unsigned int cmd,
        if (file_lock == NULL)
                return -ENOLCK;
 
-       /* Don't allow mandatory locks on files that may be memory mapped
-        * and shared.
-        */
-       if (mandatory_lock(inode) && mapping_writably_mapped(filp->f_mapping)) {
-               error = -EAGAIN;
-               goto out;
-       }
-
        error = flock_to_posix_lock(filp, file_lock, flock);
        if (error)
                goto out;
@@ -2611,21 +2506,12 @@ int fcntl_setlk64(unsigned int fd, struct file *filp, unsigned int cmd,
                struct flock64 *flock)
 {
        struct file_lock *file_lock = locks_alloc_lock();
-       struct inode *inode = locks_inode(filp);
        struct file *f;
        int error;
 
        if (file_lock == NULL)
                return -ENOLCK;
 
-       /* Don't allow mandatory locks on files that may be memory mapped
-        * and shared.
-        */
-       if (mandatory_lock(inode) && mapping_writably_mapped(filp->f_mapping)) {
-               error = -EAGAIN;
-               goto out;
-       }
-
        error = flock64_to_posix_lock(filp, file_lock, flock);
        if (error)
                goto out;
@@ -2857,8 +2743,7 @@ static void lock_get_status(struct seq_file *f, struct file_lock *fl,
                        seq_puts(f, "POSIX ");
 
                seq_printf(f, " %s ",
-                            (inode == NULL) ? "*NOINODE*" :
-                            mandatory_lock(inode) ? "MANDATORY" : "ADVISORY ");
+                            (inode == NULL) ? "*NOINODE*" : "ADVISORY ");
        } else if (IS_FLOCK(fl)) {
                if (fl->fl_type & LOCK_MAND) {
                        seq_puts(f, "FLOCK  MSNFS     ");
index bf6d8a738c599e62bf6de18b59c27cf2bd19725f..471eb9fead6e005ba2433d6001cf8cf5e778f2c2 100644 (file)
@@ -3023,9 +3023,7 @@ static int handle_truncate(struct user_namespace *mnt_userns, struct file *filp)
        /*
         * Refuse to truncate files with mandatory locks held on them.
         */
-       error = locks_verify_locked(filp);
-       if (!error)
-               error = security_path_truncate(path);
+       error = security_path_truncate(path);
        if (!error) {
                error = do_truncate(mnt_userns, path->dentry, 0,
                                    ATTR_MTIME|ATTR_CTIME|ATTR_OPEN,
index ab4174a3c802b5c5c61787f50e1765b30cb3bc6a..20caa4b4c53991c76e33d4af5930558a4caac619 100644 (file)
@@ -1715,18 +1715,14 @@ static inline bool may_mount(void)
        return ns_capable(current->nsproxy->mnt_ns->user_ns, CAP_SYS_ADMIN);
 }
 
-#ifdef CONFIG_MANDATORY_FILE_LOCKING
-static inline bool may_mandlock(void)
+static void warn_mandlock(void)
 {
-       return capable(CAP_SYS_ADMIN);
+       pr_warn_once("=======================================================\n"
+                    "WARNING: The mand mount option has been deprecated and\n"
+                    "         and is ignored by this kernel. Remove the mand\n"
+                    "         option from the mount to silence this warning.\n"
+                    "=======================================================\n");
 }
-#else
-static inline bool may_mandlock(void)
-{
-       pr_warn("VFS: \"mand\" mount option not supported");
-       return false;
-}
-#endif
 
 static int can_umount(const struct path *path, int flags)
 {
@@ -1938,6 +1934,20 @@ void drop_collected_mounts(struct vfsmount *mnt)
        namespace_unlock();
 }
 
+static bool has_locked_children(struct mount *mnt, struct dentry *dentry)
+{
+       struct mount *child;
+
+       list_for_each_entry(child, &mnt->mnt_mounts, mnt_child) {
+               if (!is_subdir(child->mnt_mountpoint, dentry))
+                       continue;
+
+               if (child->mnt.mnt_flags & MNT_LOCKED)
+                       return true;
+       }
+       return false;
+}
+
 /**
  * clone_private_mount - create a private clone of a path
  * @path: path to clone
@@ -1953,10 +1963,19 @@ struct vfsmount *clone_private_mount(const struct path *path)
        struct mount *old_mnt = real_mount(path->mnt);
        struct mount *new_mnt;
 
+       down_read(&namespace_sem);
        if (IS_MNT_UNBINDABLE(old_mnt))
-               return ERR_PTR(-EINVAL);
+               goto invalid;
+
+       if (!check_mnt(old_mnt))
+               goto invalid;
+
+       if (has_locked_children(old_mnt, path->dentry))
+               goto invalid;
 
        new_mnt = clone_mnt(old_mnt, path->dentry, CL_PRIVATE);
+       up_read(&namespace_sem);
+
        if (IS_ERR(new_mnt))
                return ERR_CAST(new_mnt);
 
@@ -1964,6 +1983,10 @@ struct vfsmount *clone_private_mount(const struct path *path)
        new_mnt->mnt_ns = MNT_NS_INTERNAL;
 
        return &new_mnt->mnt;
+
+invalid:
+       up_read(&namespace_sem);
+       return ERR_PTR(-EINVAL);
 }
 EXPORT_SYMBOL_GPL(clone_private_mount);
 
@@ -2315,19 +2338,6 @@ static int do_change_type(struct path *path, int ms_flags)
        return err;
 }
 
-static bool has_locked_children(struct mount *mnt, struct dentry *dentry)
-{
-       struct mount *child;
-       list_for_each_entry(child, &mnt->mnt_mounts, mnt_child) {
-               if (!is_subdir(child->mnt_mountpoint, dentry))
-                       continue;
-
-               if (child->mnt.mnt_flags & MNT_LOCKED)
-                       return true;
-       }
-       return false;
-}
-
 static struct mount *__do_loopback(struct path *old_path, int recurse)
 {
        struct mount *mnt = ERR_PTR(-EINVAL), *old = real_mount(old_path->mnt);
@@ -3179,8 +3189,8 @@ int path_mount(const char *dev_name, struct path *path,
                return ret;
        if (!may_mount())
                return -EPERM;
-       if ((flags & SB_MANDLOCK) && !may_mandlock())
-               return -EPERM;
+       if (flags & SB_MANDLOCK)
+               warn_mandlock();
 
        /* Default to relatime unless overriden */
        if (!(flags & MS_NOATIME))
@@ -3563,9 +3573,8 @@ SYSCALL_DEFINE3(fsmount, int, fs_fd, unsigned int, flags,
        if (fc->phase != FS_CONTEXT_AWAITING_MOUNT)
                goto err_unlock;
 
-       ret = -EPERM;
-       if ((fc->sb_flags & SB_MANDLOCK) && !may_mandlock())
-               goto err_unlock;
+       if (fc->sb_flags & SB_MANDLOCK)
+               warn_mandlock();
 
        newmount.mnt = vfs_create_mount(fc);
        if (IS_ERR(newmount.mnt)) {
index 1fef107961bc4a4ca624796de35d33c400179a2f..514be5d28d70596ba45b27ad904734af023cd072 100644 (file)
@@ -806,10 +806,6 @@ int nfs_lock(struct file *filp, int cmd, struct file_lock *fl)
 
        nfs_inc_stats(inode, NFSIOS_VFSLOCK);
 
-       /* No mandatory locks over NFS */
-       if (__mandatory_lock(inode) && fl->fl_type != F_UNLCK)
-               goto out_err;
-
        if (NFS_SERVER(inode)->flags & NFS_MOUNT_LOCAL_FCNTL)
                is_local = 1;
 
index fa67ecd5fe63f7d1502e2c135afe63914054bd03..8313e1dbb5dc498d02eb73674d741342e7f8519a 100644 (file)
@@ -5735,16 +5735,6 @@ check_special_stateids(struct net *net, svc_fh *current_fh, stateid_t *stateid,
                                NFS4_SHARE_DENY_READ);
 }
 
-/*
- * Allow READ/WRITE during grace period on recovered state only for files
- * that are not able to provide mandatory locking.
- */
-static inline int
-grace_disallows_io(struct net *net, struct inode *inode)
-{
-       return opens_in_grace(net) && mandatory_lock(inode);
-}
-
 static __be32 check_stateid_generation(stateid_t *in, stateid_t *ref, bool has_session)
 {
        /*
@@ -6026,7 +6016,6 @@ nfs4_preprocess_stateid_op(struct svc_rqst *rqstp,
                stateid_t *stateid, int flags, struct nfsd_file **nfp,
                struct nfs4_stid **cstid)
 {
-       struct inode *ino = d_inode(fhp->fh_dentry);
        struct net *net = SVC_NET(rqstp);
        struct nfsd_net *nn = net_generic(net, nfsd_net_id);
        struct nfs4_stid *s = NULL;
@@ -6035,9 +6024,6 @@ nfs4_preprocess_stateid_op(struct svc_rqst *rqstp,
        if (nfp)
                *nfp = NULL;
 
-       if (grace_disallows_io(net, ino))
-               return nfserr_grace;
-
        if (ZERO_STATEID(stateid) || ONE_STATEID(stateid)) {
                status = check_special_stateids(net, fhp, stateid, flags);
                goto done;
index a224a5e23cc11036d69fe0dd1402b18b371893dd..92e77f92268affcd3606995c7048586f10fa13e9 100644 (file)
@@ -333,7 +333,6 @@ nfsd_get_write_access(struct svc_rqst *rqstp, struct svc_fh *fhp,
                struct iattr *iap)
 {
        struct inode *inode = d_inode(fhp->fh_dentry);
-       int host_err;
 
        if (iap->ia_size < inode->i_size) {
                __be32 err;
@@ -343,20 +342,7 @@ nfsd_get_write_access(struct svc_rqst *rqstp, struct svc_fh *fhp,
                if (err)
                        return err;
        }
-
-       host_err = get_write_access(inode);
-       if (host_err)
-               goto out_nfserrno;
-
-       host_err = locks_verify_truncate(inode, NULL, iap->ia_size);
-       if (host_err)
-               goto out_put_write_access;
-       return 0;
-
-out_put_write_access:
-       put_write_access(inode);
-out_nfserrno:
-       return nfserrno(host_err);
+       return nfserrno(get_write_access(inode));
 }
 
 /*
@@ -750,13 +736,6 @@ __nfsd_open(struct svc_rqst *rqstp, struct svc_fh *fhp, umode_t type,
        err = nfserr_perm;
        if (IS_APPEND(inode) && (may_flags & NFSD_MAY_WRITE))
                goto out;
-       /*
-        * We must ignore files (but only files) which might have mandatory
-        * locks on them because there is no way to know if the accesser has
-        * the lock.
-        */
-       if (S_ISREG((inode)->i_mode) && mandatory_lock(inode))
-               goto out;
 
        if (!inode->i_fop)
                goto out;
index 64864fb40b401184550ec3114cf4e34b7b839dd6..6facdf476255d13c7ff7dc9aa36def05fcfdf896 100644 (file)
@@ -1,6 +1,7 @@
 // SPDX-License-Identifier: GPL-2.0
 #include <linux/fanotify.h>
 #include <linux/fcntl.h>
+#include <linux/fdtable.h>
 #include <linux/file.h>
 #include <linux/fs.h>
 #include <linux/anon_inodes.h>
@@ -54,22 +55,27 @@ static int fanotify_max_queued_events __read_mostly;
 
 #include <linux/sysctl.h>
 
+static long ft_zero = 0;
+static long ft_int_max = INT_MAX;
+
 struct ctl_table fanotify_table[] = {
        {
                .procname       = "max_user_groups",
                .data   = &init_user_ns.ucount_max[UCOUNT_FANOTIFY_GROUPS],
-               .maxlen         = sizeof(int),
+               .maxlen         = sizeof(long),
                .mode           = 0644,
-               .proc_handler   = proc_dointvec_minmax,
-               .extra1         = SYSCTL_ZERO,
+               .proc_handler   = proc_doulongvec_minmax,
+               .extra1         = &ft_zero,
+               .extra2         = &ft_int_max,
        },
        {
                .procname       = "max_user_marks",
                .data   = &init_user_ns.ucount_max[UCOUNT_FANOTIFY_MARKS],
-               .maxlen         = sizeof(int),
+               .maxlen         = sizeof(long),
                .mode           = 0644,
-               .proc_handler   = proc_dointvec_minmax,
-               .extra1         = SYSCTL_ZERO,
+               .proc_handler   = proc_doulongvec_minmax,
+               .extra1         = &ft_zero,
+               .extra2         = &ft_int_max,
        },
        {
                .procname       = "max_queued_events",
@@ -104,8 +110,10 @@ struct kmem_cache *fanotify_path_event_cachep __read_mostly;
 struct kmem_cache *fanotify_perm_event_cachep __read_mostly;
 
 #define FANOTIFY_EVENT_ALIGN 4
-#define FANOTIFY_INFO_HDR_LEN \
+#define FANOTIFY_FID_INFO_HDR_LEN \
        (sizeof(struct fanotify_event_info_fid) + sizeof(struct file_handle))
+#define FANOTIFY_PIDFD_INFO_HDR_LEN \
+       sizeof(struct fanotify_event_info_pidfd)
 
 static int fanotify_fid_info_len(int fh_len, int name_len)
 {
@@ -114,10 +122,11 @@ static int fanotify_fid_info_len(int fh_len, int name_len)
        if (name_len)
                info_len += name_len + 1;
 
-       return roundup(FANOTIFY_INFO_HDR_LEN + info_len, FANOTIFY_EVENT_ALIGN);
+       return roundup(FANOTIFY_FID_INFO_HDR_LEN + info_len,
+                      FANOTIFY_EVENT_ALIGN);
 }
 
-static int fanotify_event_info_len(unsigned int fid_mode,
+static int fanotify_event_info_len(unsigned int info_mode,
                                   struct fanotify_event *event)
 {
        struct fanotify_info *info = fanotify_event_info(event);
@@ -128,7 +137,8 @@ static int fanotify_event_info_len(unsigned int fid_mode,
 
        if (dir_fh_len) {
                info_len += fanotify_fid_info_len(dir_fh_len, info->name_len);
-       } else if ((fid_mode & FAN_REPORT_NAME) && (event->mask & FAN_ONDIR)) {
+       } else if ((info_mode & FAN_REPORT_NAME) &&
+                  (event->mask & FAN_ONDIR)) {
                /*
                 * With group flag FAN_REPORT_NAME, if name was not recorded in
                 * event on a directory, we will report the name ".".
@@ -136,6 +146,9 @@ static int fanotify_event_info_len(unsigned int fid_mode,
                dot_len = 1;
        }
 
+       if (info_mode & FAN_REPORT_PIDFD)
+               info_len += FANOTIFY_PIDFD_INFO_HDR_LEN;
+
        if (fh_len)
                info_len += fanotify_fid_info_len(fh_len, dot_len);
 
@@ -171,7 +184,7 @@ static struct fanotify_event *get_one_event(struct fsnotify_group *group,
        size_t event_size = FAN_EVENT_METADATA_LEN;
        struct fanotify_event *event = NULL;
        struct fsnotify_event *fsn_event;
-       unsigned int fid_mode = FAN_GROUP_FLAG(group, FANOTIFY_FID_BITS);
+       unsigned int info_mode = FAN_GROUP_FLAG(group, FANOTIFY_INFO_MODES);
 
        pr_debug("%s: group=%p count=%zd\n", __func__, group, count);
 
@@ -181,8 +194,8 @@ static struct fanotify_event *get_one_event(struct fsnotify_group *group,
                goto out;
 
        event = FANOTIFY_E(fsn_event);
-       if (fid_mode)
-               event_size += fanotify_event_info_len(fid_mode, event);
+       if (info_mode)
+               event_size += fanotify_event_info_len(info_mode, event);
 
        if (event_size > count) {
                event = ERR_PTR(-EINVAL);
@@ -303,9 +316,10 @@ static int process_access_response(struct fsnotify_group *group,
        return -ENOENT;
 }
 
-static int copy_info_to_user(__kernel_fsid_t *fsid, struct fanotify_fh *fh,
-                            int info_type, const char *name, size_t name_len,
-                            char __user *buf, size_t count)
+static int copy_fid_info_to_user(__kernel_fsid_t *fsid, struct fanotify_fh *fh,
+                                int info_type, const char *name,
+                                size_t name_len,
+                                char __user *buf, size_t count)
 {
        struct fanotify_event_info_fid info = { };
        struct file_handle handle = { };
@@ -398,6 +412,117 @@ static int copy_info_to_user(__kernel_fsid_t *fsid, struct fanotify_fh *fh,
        return info_len;
 }
 
+static int copy_pidfd_info_to_user(int pidfd,
+                                  char __user *buf,
+                                  size_t count)
+{
+       struct fanotify_event_info_pidfd info = { };
+       size_t info_len = FANOTIFY_PIDFD_INFO_HDR_LEN;
+
+       if (WARN_ON_ONCE(info_len > count))
+               return -EFAULT;
+
+       info.hdr.info_type = FAN_EVENT_INFO_TYPE_PIDFD;
+       info.hdr.len = info_len;
+       info.pidfd = pidfd;
+
+       if (copy_to_user(buf, &info, info_len))
+               return -EFAULT;
+
+       return info_len;
+}
+
+static int copy_info_records_to_user(struct fanotify_event *event,
+                                    struct fanotify_info *info,
+                                    unsigned int info_mode, int pidfd,
+                                    char __user *buf, size_t count)
+{
+       int ret, total_bytes = 0, info_type = 0;
+       unsigned int fid_mode = info_mode & FANOTIFY_FID_BITS;
+       unsigned int pidfd_mode = info_mode & FAN_REPORT_PIDFD;
+
+       /*
+        * Event info records order is as follows: dir fid + name, child fid.
+        */
+       if (fanotify_event_dir_fh_len(event)) {
+               info_type = info->name_len ? FAN_EVENT_INFO_TYPE_DFID_NAME :
+                                            FAN_EVENT_INFO_TYPE_DFID;
+               ret = copy_fid_info_to_user(fanotify_event_fsid(event),
+                                           fanotify_info_dir_fh(info),
+                                           info_type,
+                                           fanotify_info_name(info),
+                                           info->name_len, buf, count);
+               if (ret < 0)
+                       return ret;
+
+               buf += ret;
+               count -= ret;
+               total_bytes += ret;
+       }
+
+       if (fanotify_event_object_fh_len(event)) {
+               const char *dot = NULL;
+               int dot_len = 0;
+
+               if (fid_mode == FAN_REPORT_FID || info_type) {
+                       /*
+                        * With only group flag FAN_REPORT_FID only type FID is
+                        * reported. Second info record type is always FID.
+                        */
+                       info_type = FAN_EVENT_INFO_TYPE_FID;
+               } else if ((fid_mode & FAN_REPORT_NAME) &&
+                          (event->mask & FAN_ONDIR)) {
+                       /*
+                        * With group flag FAN_REPORT_NAME, if name was not
+                        * recorded in an event on a directory, report the name
+                        * "." with info type DFID_NAME.
+                        */
+                       info_type = FAN_EVENT_INFO_TYPE_DFID_NAME;
+                       dot = ".";
+                       dot_len = 1;
+               } else if ((event->mask & ALL_FSNOTIFY_DIRENT_EVENTS) ||
+                          (event->mask & FAN_ONDIR)) {
+                       /*
+                        * With group flag FAN_REPORT_DIR_FID, a single info
+                        * record has type DFID for directory entry modification
+                        * event and for event on a directory.
+                        */
+                       info_type = FAN_EVENT_INFO_TYPE_DFID;
+               } else {
+                       /*
+                        * With group flags FAN_REPORT_DIR_FID|FAN_REPORT_FID,
+                        * a single info record has type FID for event on a
+                        * non-directory, when there is no directory to report.
+                        * For example, on FAN_DELETE_SELF event.
+                        */
+                       info_type = FAN_EVENT_INFO_TYPE_FID;
+               }
+
+               ret = copy_fid_info_to_user(fanotify_event_fsid(event),
+                                           fanotify_event_object_fh(event),
+                                           info_type, dot, dot_len,
+                                           buf, count);
+               if (ret < 0)
+                       return ret;
+
+               buf += ret;
+               count -= ret;
+               total_bytes += ret;
+       }
+
+       if (pidfd_mode) {
+               ret = copy_pidfd_info_to_user(pidfd, buf, count);
+               if (ret < 0)
+                       return ret;
+
+               buf += ret;
+               count -= ret;
+               total_bytes += ret;
+       }
+
+       return total_bytes;
+}
+
 static ssize_t copy_event_to_user(struct fsnotify_group *group,
                                  struct fanotify_event *event,
                                  char __user *buf, size_t count)
@@ -405,15 +530,15 @@ static ssize_t copy_event_to_user(struct fsnotify_group *group,
        struct fanotify_event_metadata metadata;
        struct path *path = fanotify_event_path(event);
        struct fanotify_info *info = fanotify_event_info(event);
-       unsigned int fid_mode = FAN_GROUP_FLAG(group, FANOTIFY_FID_BITS);
+       unsigned int info_mode = FAN_GROUP_FLAG(group, FANOTIFY_INFO_MODES);
+       unsigned int pidfd_mode = info_mode & FAN_REPORT_PIDFD;
        struct file *f = NULL;
-       int ret, fd = FAN_NOFD;
-       int info_type = 0;
+       int ret, pidfd = FAN_NOPIDFD, fd = FAN_NOFD;
 
        pr_debug("%s: group=%p event=%p\n", __func__, group, event);
 
        metadata.event_len = FAN_EVENT_METADATA_LEN +
-                               fanotify_event_info_len(fid_mode, event);
+                               fanotify_event_info_len(info_mode, event);
        metadata.metadata_len = FAN_EVENT_METADATA_LEN;
        metadata.vers = FANOTIFY_METADATA_VERSION;
        metadata.reserved = 0;
@@ -442,6 +567,33 @@ static ssize_t copy_event_to_user(struct fsnotify_group *group,
        }
        metadata.fd = fd;
 
+       if (pidfd_mode) {
+               /*
+                * Complain if the FAN_REPORT_PIDFD and FAN_REPORT_TID mutual
+                * exclusion is ever lifted. At the time of incoporating pidfd
+                * support within fanotify, the pidfd API only supported the
+                * creation of pidfds for thread-group leaders.
+                */
+               WARN_ON_ONCE(FAN_GROUP_FLAG(group, FAN_REPORT_TID));
+
+               /*
+                * The PIDTYPE_TGID check for an event->pid is performed
+                * preemptively in an attempt to catch out cases where the event
+                * listener reads events after the event generating process has
+                * already terminated. Report FAN_NOPIDFD to the event listener
+                * in those cases, with all other pidfd creation errors being
+                * reported as FAN_EPIDFD.
+                */
+               if (metadata.pid == 0 ||
+                   !pid_has_task(event->pid, PIDTYPE_TGID)) {
+                       pidfd = FAN_NOPIDFD;
+               } else {
+                       pidfd = pidfd_create(event->pid, 0);
+                       if (pidfd < 0)
+                               pidfd = FAN_EPIDFD;
+               }
+       }
+
        ret = -EFAULT;
        /*
         * Sanity check copy size in case get_one_event() and
@@ -462,67 +614,11 @@ static ssize_t copy_event_to_user(struct fsnotify_group *group,
        if (f)
                fd_install(fd, f);
 
-       /* Event info records order is: dir fid + name, child fid */
-       if (fanotify_event_dir_fh_len(event)) {
-               info_type = info->name_len ? FAN_EVENT_INFO_TYPE_DFID_NAME :
-                                            FAN_EVENT_INFO_TYPE_DFID;
-               ret = copy_info_to_user(fanotify_event_fsid(event),
-                                       fanotify_info_dir_fh(info),
-                                       info_type, fanotify_info_name(info),
-                                       info->name_len, buf, count);
+       if (info_mode) {
+               ret = copy_info_records_to_user(event, info, info_mode, pidfd,
+                                               buf, count);
                if (ret < 0)
                        goto out_close_fd;
-
-               buf += ret;
-               count -= ret;
-       }
-
-       if (fanotify_event_object_fh_len(event)) {
-               const char *dot = NULL;
-               int dot_len = 0;
-
-               if (fid_mode == FAN_REPORT_FID || info_type) {
-                       /*
-                        * With only group flag FAN_REPORT_FID only type FID is
-                        * reported. Second info record type is always FID.
-                        */
-                       info_type = FAN_EVENT_INFO_TYPE_FID;
-               } else if ((fid_mode & FAN_REPORT_NAME) &&
-                          (event->mask & FAN_ONDIR)) {
-                       /*
-                        * With group flag FAN_REPORT_NAME, if name was not
-                        * recorded in an event on a directory, report the
-                        * name "." with info type DFID_NAME.
-                        */
-                       info_type = FAN_EVENT_INFO_TYPE_DFID_NAME;
-                       dot = ".";
-                       dot_len = 1;
-               } else if ((event->mask & ALL_FSNOTIFY_DIRENT_EVENTS) ||
-                          (event->mask & FAN_ONDIR)) {
-                       /*
-                        * With group flag FAN_REPORT_DIR_FID, a single info
-                        * record has type DFID for directory entry modification
-                        * event and for event on a directory.
-                        */
-                       info_type = FAN_EVENT_INFO_TYPE_DFID;
-               } else {
-                       /*
-                        * With group flags FAN_REPORT_DIR_FID|FAN_REPORT_FID,
-                        * a single info record has type FID for event on a
-                        * non-directory, when there is no directory to report.
-                        * For example, on FAN_DELETE_SELF event.
-                        */
-                       info_type = FAN_EVENT_INFO_TYPE_FID;
-               }
-
-               ret = copy_info_to_user(fanotify_event_fsid(event),
-                                       fanotify_event_object_fh(event),
-                                       info_type, dot, dot_len, buf, count);
-               if (ret < 0)
-                       goto out_close_fd;
-
-               buf += ret;
-               count -= ret;
        }
 
        return metadata.event_len;
@@ -532,6 +628,10 @@ out_close_fd:
                put_unused_fd(fd);
                fput(f);
        }
+
+       if (pidfd >= 0)
+               close_fd(pidfd);
+
        return ret;
 }
 
@@ -1077,6 +1177,14 @@ SYSCALL_DEFINE2(fanotify_init, unsigned int, flags, unsigned int, event_f_flags)
 #endif
                return -EINVAL;
 
+       /*
+        * A pidfd can only be returned for a thread-group leader; thus
+        * FAN_REPORT_PIDFD and FAN_REPORT_TID need to remain mutually
+        * exclusive.
+        */
+       if ((flags & FAN_REPORT_PIDFD) && (flags & FAN_REPORT_TID))
+               return -EINVAL;
+
        if (event_f_flags & ~FANOTIFY_INIT_ALL_EVENT_F_BITS)
                return -EINVAL;
 
@@ -1478,7 +1586,7 @@ static int __init fanotify_user_setup(void)
                                     FANOTIFY_DEFAULT_MAX_USER_MARKS);
 
        BUILD_BUG_ON(FANOTIFY_INIT_FLAGS & FANOTIFY_INTERNAL_GROUP_FLAGS);
-       BUILD_BUG_ON(HWEIGHT32(FANOTIFY_INIT_FLAGS) != 10);
+       BUILD_BUG_ON(HWEIGHT32(FANOTIFY_INIT_FLAGS) != 11);
        BUILD_BUG_ON(HWEIGHT32(FANOTIFY_MARK_FLAGS) != 9);
 
        fanotify_mark_cache = KMEM_CACHE(fsnotify_mark,
index 30d422b8c0fc7e13bdee316a2d058eab9d9efc73..963e6ce75b96113c5739a68698082b2aea7f3195 100644 (file)
@@ -87,15 +87,15 @@ static void fsnotify_unmount_inodes(struct super_block *sb)
 
        if (iput_inode)
                iput(iput_inode);
-       /* Wait for outstanding inode references from connectors */
-       wait_var_event(&sb->s_fsnotify_inode_refs,
-                      !atomic_long_read(&sb->s_fsnotify_inode_refs));
 }
 
 void fsnotify_sb_delete(struct super_block *sb)
 {
        fsnotify_unmount_inodes(sb);
        fsnotify_clear_marks_by_sb(sb);
+       /* Wait for outstanding object references from connectors */
+       wait_var_event(&sb->s_fsnotify_connectors,
+                      !atomic_long_read(&sb->s_fsnotify_connectors));
 }
 
 /*
index ff2063ec6b0f3c72f7d1c1a89cc44a46a301f98a..87d8a50ee80387fbf8a23d83923cfb6d4583a960 100644 (file)
@@ -27,6 +27,21 @@ static inline struct super_block *fsnotify_conn_sb(
        return container_of(conn->obj, struct super_block, s_fsnotify_marks);
 }
 
+static inline struct super_block *fsnotify_connector_sb(
+                               struct fsnotify_mark_connector *conn)
+{
+       switch (conn->type) {
+       case FSNOTIFY_OBJ_TYPE_INODE:
+               return fsnotify_conn_inode(conn)->i_sb;
+       case FSNOTIFY_OBJ_TYPE_VFSMOUNT:
+               return fsnotify_conn_mount(conn)->mnt.mnt_sb;
+       case FSNOTIFY_OBJ_TYPE_SB:
+               return fsnotify_conn_sb(conn);
+       default:
+               return NULL;
+       }
+}
+
 /* destroy all events sitting in this groups notification queue */
 extern void fsnotify_flush_notify(struct fsnotify_group *group);
 
index 98f61b31745ab96e544d6a0fd5bdd1ba37692010..62051247f6d21d4b0fed55a914ccf37ae7e530c2 100644 (file)
@@ -55,22 +55,27 @@ struct kmem_cache *inotify_inode_mark_cachep __read_mostly;
 
 #include <linux/sysctl.h>
 
+static long it_zero = 0;
+static long it_int_max = INT_MAX;
+
 struct ctl_table inotify_table[] = {
        {
                .procname       = "max_user_instances",
                .data           = &init_user_ns.ucount_max[UCOUNT_INOTIFY_INSTANCES],
-               .maxlen         = sizeof(int),
+               .maxlen         = sizeof(long),
                .mode           = 0644,
-               .proc_handler   = proc_dointvec_minmax,
-               .extra1         = SYSCTL_ZERO,
+               .proc_handler   = proc_doulongvec_minmax,
+               .extra1         = &it_zero,
+               .extra2         = &it_int_max,
        },
        {
                .procname       = "max_user_watches",
                .data           = &init_user_ns.ucount_max[UCOUNT_INOTIFY_WATCHES],
-               .maxlen         = sizeof(int),
+               .maxlen         = sizeof(long),
                .mode           = 0644,
-               .proc_handler   = proc_dointvec_minmax,
-               .extra1         = SYSCTL_ZERO,
+               .proc_handler   = proc_doulongvec_minmax,
+               .extra1         = &it_zero,
+               .extra2         = &it_int_max,
        },
        {
                .procname       = "max_queued_events",
index d32ab349db74daf9101854d8ef5e4e6cb0e8026d..95006d1d29abe195d4dd188d8e34b7c698e138bc 100644 (file)
@@ -169,6 +169,37 @@ static void fsnotify_connector_destroy_workfn(struct work_struct *work)
        }
 }
 
+static void fsnotify_get_inode_ref(struct inode *inode)
+{
+       ihold(inode);
+       atomic_long_inc(&inode->i_sb->s_fsnotify_connectors);
+}
+
+static void fsnotify_put_inode_ref(struct inode *inode)
+{
+       struct super_block *sb = inode->i_sb;
+
+       iput(inode);
+       if (atomic_long_dec_and_test(&sb->s_fsnotify_connectors))
+               wake_up_var(&sb->s_fsnotify_connectors);
+}
+
+static void fsnotify_get_sb_connectors(struct fsnotify_mark_connector *conn)
+{
+       struct super_block *sb = fsnotify_connector_sb(conn);
+
+       if (sb)
+               atomic_long_inc(&sb->s_fsnotify_connectors);
+}
+
+static void fsnotify_put_sb_connectors(struct fsnotify_mark_connector *conn)
+{
+       struct super_block *sb = fsnotify_connector_sb(conn);
+
+       if (sb && atomic_long_dec_and_test(&sb->s_fsnotify_connectors))
+               wake_up_var(&sb->s_fsnotify_connectors);
+}
+
 static void *fsnotify_detach_connector_from_object(
                                        struct fsnotify_mark_connector *conn,
                                        unsigned int *type)
@@ -182,13 +213,13 @@ static void *fsnotify_detach_connector_from_object(
        if (conn->type == FSNOTIFY_OBJ_TYPE_INODE) {
                inode = fsnotify_conn_inode(conn);
                inode->i_fsnotify_mask = 0;
-               atomic_long_inc(&inode->i_sb->s_fsnotify_inode_refs);
        } else if (conn->type == FSNOTIFY_OBJ_TYPE_VFSMOUNT) {
                fsnotify_conn_mount(conn)->mnt_fsnotify_mask = 0;
        } else if (conn->type == FSNOTIFY_OBJ_TYPE_SB) {
                fsnotify_conn_sb(conn)->s_fsnotify_mask = 0;
        }
 
+       fsnotify_put_sb_connectors(conn);
        rcu_assign_pointer(*(conn->obj), NULL);
        conn->obj = NULL;
        conn->type = FSNOTIFY_OBJ_TYPE_DETACHED;
@@ -209,19 +240,12 @@ static void fsnotify_final_mark_destroy(struct fsnotify_mark *mark)
 /* Drop object reference originally held by a connector */
 static void fsnotify_drop_object(unsigned int type, void *objp)
 {
-       struct inode *inode;
-       struct super_block *sb;
-
        if (!objp)
                return;
        /* Currently only inode references are passed to be dropped */
        if (WARN_ON_ONCE(type != FSNOTIFY_OBJ_TYPE_INODE))
                return;
-       inode = objp;
-       sb = inode->i_sb;
-       iput(inode);
-       if (atomic_long_dec_and_test(&sb->s_fsnotify_inode_refs))
-               wake_up_var(&sb->s_fsnotify_inode_refs);
+       fsnotify_put_inode_ref(objp);
 }
 
 void fsnotify_put_mark(struct fsnotify_mark *mark)
@@ -493,8 +517,12 @@ static int fsnotify_attach_connector_to_object(fsnotify_connp_t *connp,
                conn->fsid.val[0] = conn->fsid.val[1] = 0;
                conn->flags = 0;
        }
-       if (conn->type == FSNOTIFY_OBJ_TYPE_INODE)
-               inode = igrab(fsnotify_conn_inode(conn));
+       if (conn->type == FSNOTIFY_OBJ_TYPE_INODE) {
+               inode = fsnotify_conn_inode(conn);
+               fsnotify_get_inode_ref(inode);
+       }
+       fsnotify_get_sb_connectors(conn);
+
        /*
         * cmpxchg() provides the barrier so that readers of *connp can see
         * only initialized structure
@@ -502,7 +530,7 @@ static int fsnotify_attach_connector_to_object(fsnotify_connp_t *connp,
        if (cmpxchg(connp, NULL, conn)) {
                /* Someone else created list structure for us */
                if (inode)
-                       iput(inode);
+                       fsnotify_put_inode_ref(inode);
                kmem_cache_free(fsnotify_mark_connector_cachep, conn);
        }
 
index fab7c6a4a7d07e6e68fdd6a74b8c3f0976136639..73a3854b2afb941a3d26648b95775f5534d07aba 100644 (file)
@@ -101,8 +101,6 @@ int ocfs2_flock(struct file *file, int cmd, struct file_lock *fl)
 
        if (!(fl->fl_flags & FL_FLOCK))
                return -ENOLCK;
-       if (__mandatory_lock(inode))
-               return -ENOLCK;
 
        if ((osb->s_mount_opt & OCFS2_MOUNT_LOCALFLOCKS) ||
            ocfs2_mount_local(osb))
@@ -121,8 +119,6 @@ int ocfs2_lock(struct file *file, int cmd, struct file_lock *fl)
 
        if (!(fl->fl_flags & FL_POSIX))
                return -ENOLCK;
-       if (__mandatory_lock(inode) && fl->fl_type != F_UNLCK)
-               return -ENOLCK;
 
        return ocfs2_plock(osb->cconn, OCFS2_I(inode)->ip_blkno, file, cmd, fl);
 }
index 94bef26ff1b616a23544da9e0d9232ef97aca034..daa324606a41f8141567cf4395d26458db76a924 100644 (file)
--- a/fs/open.c
+++ b/fs/open.c
@@ -105,9 +105,7 @@ long vfs_truncate(const struct path *path, loff_t length)
        if (error)
                goto put_write_and_out;
 
-       error = locks_verify_truncate(inode, NULL, length);
-       if (!error)
-               error = security_path_truncate(path);
+       error = security_path_truncate(path);
        if (!error)
                error = do_truncate(mnt_userns, path->dentry, length, 0, NULL);
 
@@ -189,9 +187,7 @@ long do_sys_ftruncate(unsigned int fd, loff_t length, int small)
        if (IS_APPEND(file_inode(f.file)))
                goto out_putf;
        sb_start_write(inode->i_sb);
-       error = locks_verify_truncate(inode, f.file, length);
-       if (!error)
-               error = security_path_truncate(&f.file->f_path);
+       error = security_path_truncate(&f.file->f_path);
        if (!error)
                error = do_truncate(file_mnt_user_ns(f.file), dentry, length,
                                    ATTR_MTIME | ATTR_CTIME, f.file);
index 41ebf52f1bbce5a237378ffbfecd4b885a48b210..ebde05c9cf62e8e5a69fc87fb62c6431c434b8dc 100644 (file)
@@ -392,6 +392,7 @@ static struct dentry *ovl_lookup_real_one(struct dentry *connected,
         */
        take_dentry_name_snapshot(&name, real);
        this = lookup_one_len(name.name.name, connected, name.name.len);
+       release_dentry_name_snapshot(&name);
        err = PTR_ERR(this);
        if (IS_ERR(this)) {
                goto fail;
@@ -406,7 +407,6 @@ static struct dentry *ovl_lookup_real_one(struct dentry *connected,
        }
 
 out:
-       release_dentry_name_snapshot(&name);
        dput(parent);
        inode_unlock(dir);
        return this;
index 4d53d3b7e5fe17f95e3662c2cd2c34033ada58bc..d081faa55e830e3e66469475e5e236d107c8b681 100644 (file)
@@ -392,6 +392,51 @@ out_unlock:
        return ret;
 }
 
+/*
+ * Calling iter_file_splice_write() directly from overlay's f_op may deadlock
+ * due to lock order inversion between pipe->mutex in iter_file_splice_write()
+ * and file_start_write(real.file) in ovl_write_iter().
+ *
+ * So do everything ovl_write_iter() does and call iter_file_splice_write() on
+ * the real file.
+ */
+static ssize_t ovl_splice_write(struct pipe_inode_info *pipe, struct file *out,
+                               loff_t *ppos, size_t len, unsigned int flags)
+{
+       struct fd real;
+       const struct cred *old_cred;
+       struct inode *inode = file_inode(out);
+       struct inode *realinode = ovl_inode_real(inode);
+       ssize_t ret;
+
+       inode_lock(inode);
+       /* Update mode */
+       ovl_copyattr(realinode, inode);
+       ret = file_remove_privs(out);
+       if (ret)
+               goto out_unlock;
+
+       ret = ovl_real_fdget(out, &real);
+       if (ret)
+               goto out_unlock;
+
+       old_cred = ovl_override_creds(inode->i_sb);
+       file_start_write(real.file);
+
+       ret = iter_file_splice_write(pipe, real.file, ppos, len, flags);
+
+       file_end_write(real.file);
+       /* Update size */
+       ovl_copyattr(realinode, inode);
+       revert_creds(old_cred);
+       fdput(real);
+
+out_unlock:
+       inode_unlock(inode);
+
+       return ret;
+}
+
 static int ovl_fsync(struct file *file, loff_t start, loff_t end, int datasync)
 {
        struct fd real;
@@ -603,7 +648,7 @@ const struct file_operations ovl_file_operations = {
        .fadvise        = ovl_fadvise,
        .flush          = ovl_flush,
        .splice_read    = generic_file_splice_read,
-       .splice_write   = iter_file_splice_write,
+       .splice_write   = ovl_splice_write,
 
        .copy_file_range        = ovl_copy_file_range,
        .remap_file_range       = ovl_remap_file_range,
index e8ad2c2c77dd72ef16c237e4901cd1498ca02060..150fdf3bc68d4c812fe45a886c724e58c56f84aa 100644 (file)
@@ -481,6 +481,8 @@ static int ovl_cache_update_ino(struct path *path, struct ovl_cache_entry *p)
        }
        this = lookup_one_len(p->name, dir, p->len);
        if (IS_ERR_OR_NULL(this) || !this->d_inode) {
+               /* Mark a stale entry */
+               p->is_whiteout = true;
                if (IS_ERR(this)) {
                        err = PTR_ERR(this);
                        this = NULL;
@@ -776,6 +778,9 @@ static int ovl_iterate(struct file *file, struct dir_context *ctx)
                                if (err)
                                        goto out;
                        }
+               }
+               /* ovl_cache_update_ino() sets is_whiteout on stale entry */
+               if (!p->is_whiteout) {
                        if (!dir_emit(ctx, p->name, p->len, p->ino, p->type))
                                break;
                }
index 8e6ef62aeb1c63cab8b45d11e9d73e6dbfdee93c..6d4342bad9f15b258134fec2374955588f901de4 100644 (file)
--- a/fs/pipe.c
+++ b/fs/pipe.c
@@ -363,10 +363,9 @@ pipe_read(struct kiocb *iocb, struct iov_iter *to)
                 * _very_ unlikely case that the pipe was full, but we got
                 * no data.
                 */
-               if (unlikely(was_full)) {
+               if (unlikely(was_full))
                        wake_up_interruptible_sync_poll(&pipe->wr_wait, EPOLLOUT | EPOLLWRNORM);
-                       kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT);
-               }
+               kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT);
 
                /*
                 * But because we didn't read anything, at this point we can
@@ -385,12 +384,11 @@ pipe_read(struct kiocb *iocb, struct iov_iter *to)
                wake_next_reader = false;
        __pipe_unlock(pipe);
 
-       if (was_full) {
+       if (was_full)
                wake_up_interruptible_sync_poll(&pipe->wr_wait, EPOLLOUT | EPOLLWRNORM);
-               kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT);
-       }
        if (wake_next_reader)
                wake_up_interruptible_sync_poll(&pipe->rd_wait, EPOLLIN | EPOLLRDNORM);
+       kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT);
        if (ret > 0)
                file_accessed(filp);
        return ret;
@@ -444,9 +442,6 @@ pipe_write(struct kiocb *iocb, struct iov_iter *from)
 #endif
 
        /*
-        * Epoll nonsensically wants a wakeup whether the pipe
-        * was already empty or not.
-        *
         * If it wasn't empty we try to merge new data into
         * the last buffer.
         *
@@ -455,9 +450,9 @@ pipe_write(struct kiocb *iocb, struct iov_iter *from)
         * spanning multiple pages.
         */
        head = pipe->head;
-       was_empty = true;
+       was_empty = pipe_empty(head, pipe->tail);
        chars = total_len & (PAGE_SIZE-1);
-       if (chars && !pipe_empty(head, pipe->tail)) {
+       if (chars && !was_empty) {
                unsigned int mask = pipe->ring_size - 1;
                struct pipe_buffer *buf = &pipe->bufs[(head - 1) & mask];
                int offset = buf->offset + buf->len;
@@ -568,10 +563,9 @@ pipe_write(struct kiocb *iocb, struct iov_iter *from)
                 * become empty while we dropped the lock.
                 */
                __pipe_unlock(pipe);
-               if (was_empty) {
+               if (was_empty)
                        wake_up_interruptible_sync_poll(&pipe->rd_wait, EPOLLIN | EPOLLRDNORM);
-                       kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
-               }
+               kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
                wait_event_interruptible_exclusive(pipe->wr_wait, pipe_writable(pipe));
                __pipe_lock(pipe);
                was_empty = pipe_empty(pipe->head, pipe->tail);
@@ -590,11 +584,13 @@ out:
         * This is particularly important for small writes, because of
         * how (for example) the GNU make jobserver uses small writes to
         * wake up pending jobs
+        *
+        * Epoll nonsensically wants a wakeup whether the pipe
+        * was already empty or not.
         */
-       if (was_empty) {
+       if (was_empty || pipe->poll_usage)
                wake_up_interruptible_sync_poll(&pipe->rd_wait, EPOLLIN | EPOLLRDNORM);
-               kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
-       }
+       kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
        if (wake_next_writer)
                wake_up_interruptible_sync_poll(&pipe->wr_wait, EPOLLOUT | EPOLLWRNORM);
        if (ret > 0 && sb_start_write_trylock(file_inode(filp)->i_sb)) {
@@ -654,6 +650,9 @@ pipe_poll(struct file *filp, poll_table *wait)
        struct pipe_inode_info *pipe = filp->private_data;
        unsigned int head, tail;
 
+       /* Epoll has some historical nasty semantics, this enables them */
+       pipe->poll_usage = 1;
+
        /*
         * Reading pipe state only -- no need for acquiring the semaphore.
         *
index 9db7adf160d206d58cefe9c308513f476bc5d6e4..af057c57bdc6447177bfc03e276ba2cd63bd9401 100644 (file)
@@ -365,12 +365,8 @@ out_putf:
 
 int rw_verify_area(int read_write, struct file *file, const loff_t *ppos, size_t count)
 {
-       struct inode *inode;
-       int retval = -EINVAL;
-
-       inode = file_inode(file);
        if (unlikely((ssize_t) count < 0))
-               return retval;
+               return -EINVAL;
 
        /*
         * ranged mandatory locking does not apply to streams - it makes sense
@@ -381,19 +377,12 @@ int rw_verify_area(int read_write, struct file *file, const loff_t *ppos, size_t
 
                if (unlikely(pos < 0)) {
                        if (!unsigned_offsets(file))
-                               return retval;
+                               return -EINVAL;
                        if (count >= -pos) /* both values are in 0..LLONG_MAX */
                                return -EOVERFLOW;
                } else if (unlikely((loff_t) (pos + count) < 0)) {
                        if (!unsigned_offsets(file))
-                               return retval;
-               }
-
-               if (unlikely(inode->i_flctx && mandatory_lock(inode))) {
-                       retval = locks_mandatory_area(inode, file, pos, pos + count - 1,
-                                       read_write == READ ? F_RDLCK : F_WRLCK);
-                       if (retval < 0)
-                               return retval;
+                               return -EINVAL;
                }
        }
 
index e4a5fdd7ad7b095dd089477f361c7c8d7f0fc653..6d4a9beaa0974315c9bb3696f19318df2b59a8ae 100644 (file)
@@ -99,24 +99,12 @@ static int generic_remap_checks(struct file *file_in, loff_t pos_in,
 static int remap_verify_area(struct file *file, loff_t pos, loff_t len,
                             bool write)
 {
-       struct inode *inode = file_inode(file);
-
        if (unlikely(pos < 0 || len < 0))
                return -EINVAL;
 
        if (unlikely((loff_t) (pos + len) < 0))
                return -EINVAL;
 
-       if (unlikely(inode->i_flctx && mandatory_lock(inode))) {
-               loff_t end = len ? pos + len - 1 : OFFSET_MAX;
-               int retval;
-
-               retval = locks_mandatory_area(inode, file, pos, end,
-                               write ? F_WRLCK : F_RDLCK);
-               if (retval < 0)
-                       return retval;
-       }
-
        return security_file_permission(file, write ? MAY_WRITE : MAY_READ);
 }
 
index c19dba45aa2096d010e536b2dab31ba62eb29824..70abdfad2df171a72fa63cc70ede2f8dcbd51e3c 100644 (file)
@@ -35,7 +35,6 @@
 #include "udf_i.h"
 #include "udf_sb.h"
 
-
 static int udf_readdir(struct file *file, struct dir_context *ctx)
 {
        struct inode *dir = file_inode(file);
@@ -135,7 +134,7 @@ static int udf_readdir(struct file *file, struct dir_context *ctx)
                lfi = cfi.lengthFileIdent;
 
                if (fibh.sbh == fibh.ebh) {
-                       nameptr = fi->fileIdent + liu;
+                       nameptr = udf_get_fi_ident(fi);
                } else {
                        int poffset;    /* Unpaded ending offset */
 
@@ -153,7 +152,7 @@ static int udf_readdir(struct file *file, struct dir_context *ctx)
                                        }
                                }
                                nameptr = copy_name;
-                               memcpy(nameptr, fi->fileIdent + liu,
+                               memcpy(nameptr, udf_get_fi_ident(fi),
                                       lfi - poffset);
                                memcpy(nameptr + lfi - poffset,
                                       fibh.ebh->b_data, poffset);
index 185c3e24764875e36957d5d9776798936334e892..de17a97e8667425ae77d4452dc84afafa8c1377d 100644 (file)
@@ -307,14 +307,14 @@ struct logicalVolDesc {
        struct regid            impIdent;
        uint8_t                 impUse[128];
        struct extent_ad        integritySeqExt;
-       uint8_t                 partitionMaps[0];
+       uint8_t                 partitionMaps[];
 } __packed;
 
 /* Generic Partition Map (ECMA 167r3 3/10.7.1) */
 struct genericPartitionMap {
        uint8_t         partitionMapType;
        uint8_t         partitionMapLength;
-       uint8_t         partitionMapping[0];
+       uint8_t         partitionMapping[];
 } __packed;
 
 /* Partition Map Type (ECMA 167r3 3/10.7.1.1) */
@@ -342,7 +342,7 @@ struct unallocSpaceDesc {
        struct tag              descTag;
        __le32                  volDescSeqNum;
        __le32                  numAllocDescs;
-       struct extent_ad        allocDescs[0];
+       struct extent_ad        allocDescs[];
 } __packed;
 
 /* Terminating Descriptor (ECMA 167r3 3/10.9) */
@@ -360,9 +360,9 @@ struct logicalVolIntegrityDesc {
        uint8_t                 logicalVolContentsUse[32];
        __le32                  numOfPartitions;
        __le32                  lengthOfImpUse;
-       __le32                  freeSpaceTable[0];
-       __le32                  sizeTable[0];
-       uint8_t                 impUse[0];
+       __le32                  freeSpaceTable[];
+       /* __le32               sizeTable[]; */
+       /* uint8_t              impUse[]; */
 } __packed;
 
 /* Integrity Type (ECMA 167r3 3/10.10.3) */
@@ -471,9 +471,9 @@ struct fileIdentDesc {
        uint8_t         lengthFileIdent;
        struct long_ad  icb;
        __le16          lengthOfImpUse;
-       uint8_t         impUse[0];
-       uint8_t         fileIdent[0];
-       uint8_t         padding[0];
+       uint8_t         impUse[];
+       /* uint8_t      fileIdent[]; */
+       /* uint8_t      padding[]; */
 } __packed;
 
 /* File Characteristics (ECMA 167r3 4/14.4.3) */
@@ -578,8 +578,8 @@ struct fileEntry {
        __le64                  uniqueID;
        __le32                  lengthExtendedAttr;
        __le32                  lengthAllocDescs;
-       uint8_t                 extendedAttr[0];
-       uint8_t                 allocDescs[0];
+       uint8_t                 extendedAttr[];
+       /* uint8_t              allocDescs[]; */
 } __packed;
 
 /* Permissions (ECMA 167r3 4/14.9.5) */
@@ -632,7 +632,7 @@ struct genericFormat {
        uint8_t         attrSubtype;
        uint8_t         reserved[3];
        __le32          attrLength;
-       uint8_t         attrData[0];
+       uint8_t         attrData[];
 } __packed;
 
 /* Character Set Information (ECMA 167r3 4/14.10.3) */
@@ -643,7 +643,7 @@ struct charSetInfo {
        __le32          attrLength;
        __le32          escapeSeqLength;
        uint8_t         charSetType;
-       uint8_t         escapeSeq[0];
+       uint8_t         escapeSeq[];
 } __packed;
 
 /* Alternate Permissions (ECMA 167r3 4/14.10.4) */
@@ -682,7 +682,7 @@ struct infoTimesExtAttr {
        __le32          attrLength;
        __le32          dataLength;
        __le32          infoTimeExistence;
-       uint8_t         infoTimes[0];
+       uint8_t         infoTimes[];
 } __packed;
 
 /* Device Specification (ECMA 167r3 4/14.10.7) */
@@ -694,7 +694,7 @@ struct deviceSpec {
        __le32          impUseLength;
        __le32          majorDeviceIdent;
        __le32          minorDeviceIdent;
-       uint8_t         impUse[0];
+       uint8_t         impUse[];
 } __packed;
 
 /* Implementation Use Extended Attr (ECMA 167r3 4/14.10.8) */
@@ -705,7 +705,7 @@ struct impUseExtAttr {
        __le32          attrLength;
        __le32          impUseLength;
        struct regid    impIdent;
-       uint8_t         impUse[0];
+       uint8_t         impUse[];
 } __packed;
 
 /* Application Use Extended Attribute (ECMA 167r3 4/14.10.9) */
@@ -716,7 +716,7 @@ struct appUseExtAttr {
        __le32          attrLength;
        __le32          appUseLength;
        struct regid    appIdent;
-       uint8_t         appUse[0];
+       uint8_t         appUse[];
 } __packed;
 
 #define EXTATTR_CHAR_SET               1
@@ -733,7 +733,7 @@ struct unallocSpaceEntry {
        struct tag      descTag;
        struct icbtag   icbTag;
        __le32          lengthAllocDescs;
-       uint8_t         allocDescs[0];
+       uint8_t         allocDescs[];
 } __packed;
 
 /* Space Bitmap Descriptor (ECMA 167r3 4/14.12) */
@@ -741,7 +741,7 @@ struct spaceBitmapDesc {
        struct tag      descTag;
        __le32          numOfBits;
        __le32          numOfBytes;
-       uint8_t         bitmap[0];
+       uint8_t         bitmap[];
 } __packed;
 
 /* Partition Integrity Entry (ECMA 167r3 4/14.13) */
@@ -780,7 +780,7 @@ struct pathComponent {
        uint8_t         componentType;
        uint8_t         lengthComponentIdent;
        __le16          componentFileVersionNum;
-       dchars          componentIdent[0];
+       dchars          componentIdent[];
 } __packed;
 
 /* File Entry (ECMA 167r3 4/14.17) */
@@ -809,8 +809,8 @@ struct extendedFileEntry {
        __le64                  uniqueID;
        __le32                  lengthExtendedAttr;
        __le32                  lengthAllocDescs;
-       uint8_t                 extendedAttr[0];
-       uint8_t                 allocDescs[0];
+       uint8_t                 extendedAttr[];
+       /* uint8_t              allocDescs[]; */
 } __packed;
 
 #endif /* _ECMA_167_H */
index 4917670860a0103193915868dee9a353946e631c..1d6b7a50736bac681f03dc8af2ca7f8bb9f26681 100644 (file)
@@ -390,8 +390,7 @@ struct buffer_head *udf_expand_dir_adinicb(struct inode *inode,
                dfibh.eoffset += (sfibh.eoffset - sfibh.soffset);
                dfi = (struct fileIdentDesc *)(dbh->b_data + dfibh.soffset);
                if (udf_write_fi(inode, sfi, dfi, &dfibh, sfi->impUse,
-                                sfi->fileIdent +
-                                       le16_to_cpu(sfi->lengthOfImpUse))) {
+                                udf_get_fi_ident(sfi))) {
                        iinfo->i_alloc_type = ICBTAG_FLAG_AD_IN_ICB;
                        brelse(dbh);
                        return NULL;
index eab94527340dc3174407e6d666aaa9e91d9fa288..1614d308d0f06039ee0a4b27e0e9e9f93c44e8da 100644 (file)
@@ -173,13 +173,22 @@ struct genericFormat *udf_get_extendedattr(struct inode *inode, uint32_t type,
                else
                        offset = le32_to_cpu(eahd->appAttrLocation);
 
-               while (offset < iinfo->i_lenEAttr) {
+               while (offset + sizeof(*gaf) < iinfo->i_lenEAttr) {
+                       uint32_t attrLength;
+
                        gaf = (struct genericFormat *)&ea[offset];
+                       attrLength = le32_to_cpu(gaf->attrLength);
+
+                       /* Detect undersized elements and buffer overflows */
+                       if ((attrLength < sizeof(*gaf)) ||
+                           (attrLength > (iinfo->i_lenEAttr - offset)))
+                               break;
+
                        if (le32_to_cpu(gaf->attrType) == type &&
                                        gaf->attrSubtype == subtype)
                                return gaf;
                        else
-                               offset += le32_to_cpu(gaf->attrLength);
+                               offset += attrLength;
                }
        }
 
index 7c7c9bbbfa5719743a6caefedf6bbcb208ec7a50..caeef08efed23c91ffe30a618fa45b58e65b0ad6 100644 (file)
@@ -74,12 +74,11 @@ int udf_write_fi(struct inode *inode, struct fileIdentDesc *cfi,
 
        if (fileident) {
                if (adinicb || (offset + lfi < 0)) {
-                       memcpy((uint8_t *)sfi->fileIdent + liu, fileident, lfi);
+                       memcpy(udf_get_fi_ident(sfi), fileident, lfi);
                } else if (offset >= 0) {
                        memcpy(fibh->ebh->b_data + offset, fileident, lfi);
                } else {
-                       memcpy((uint8_t *)sfi->fileIdent + liu, fileident,
-                               -offset);
+                       memcpy(udf_get_fi_ident(sfi), fileident, -offset);
                        memcpy(fibh->ebh->b_data, fileident - offset,
                                lfi + offset);
                }
@@ -88,11 +87,11 @@ int udf_write_fi(struct inode *inode, struct fileIdentDesc *cfi,
        offset += lfi;
 
        if (adinicb || (offset + padlen < 0)) {
-               memset((uint8_t *)sfi->padding + liu + lfi, 0x00, padlen);
+               memset(udf_get_fi_ident(sfi) + lfi, 0x00, padlen);
        } else if (offset >= 0) {
                memset(fibh->ebh->b_data + offset, 0x00, padlen);
        } else {
-               memset((uint8_t *)sfi->padding + liu + lfi, 0x00, -offset);
+               memset(udf_get_fi_ident(sfi) + lfi, 0x00, -offset);
                memset(fibh->ebh->b_data, 0x00, padlen + offset);
        }
 
@@ -226,7 +225,7 @@ static struct fileIdentDesc *udf_find_entry(struct inode *dir,
                lfi = cfi->lengthFileIdent;
 
                if (fibh->sbh == fibh->ebh) {
-                       nameptr = fi->fileIdent + liu;
+                       nameptr = udf_get_fi_ident(fi);
                } else {
                        int poffset;    /* Unpaded ending offset */
 
@@ -246,7 +245,7 @@ static struct fileIdentDesc *udf_find_entry(struct inode *dir,
                                        }
                                }
                                nameptr = copy_name;
-                               memcpy(nameptr, fi->fileIdent + liu,
+                               memcpy(nameptr, udf_get_fi_ident(fi),
                                        lfi - poffset);
                                memcpy(nameptr + lfi - poffset,
                                        fibh->ebh->b_data, poffset);
index 22bc4fb2feb90173cc8bc5c7f17873a89fff1150..157de0ec0cd530823ce6d403cc5e41102ab7319a 100644 (file)
@@ -111,7 +111,7 @@ struct logicalVolIntegrityDescImpUse {
        __le16          minUDFReadRev;
        __le16          minUDFWriteRev;
        __le16          maxUDFWriteRev;
-       uint8_t         impUse[0];
+       uint8_t         impUse[];
 } __packed;
 
 /* Implementation Use Volume Descriptor (UDF 2.60 2.2.7) */
@@ -178,15 +178,6 @@ struct metadataPartitionMap {
        uint8_t         reserved2[5];
 } __packed;
 
-/* Virtual Allocation Table (UDF 1.5 2.2.10) */
-struct virtualAllocationTable15 {
-       __le32          vatEntry[0];
-       struct regid    vatIdent;
-       __le32          previousVATICBLoc;
-} __packed;
-
-#define ICBTAG_FILE_TYPE_VAT15         0x00U
-
 /* Virtual Allocation Table (UDF 2.60 2.2.11) */
 struct virtualAllocationTable20 {
        __le16          lengthHeader;
@@ -199,8 +190,8 @@ struct virtualAllocationTable20 {
        __le16          minUDFWriteRev;
        __le16          maxUDFWriteRev;
        __le16          reserved;
-       uint8_t         impUse[0];
-       __le32          vatEntry[0];
+       uint8_t         impUse[];
+       /* __le32       vatEntry[]; */
 } __packed;
 
 #define ICBTAG_FILE_TYPE_VAT20         0xF8U
@@ -217,8 +208,7 @@ struct sparingTable {
        __le16          reallocationTableLen;
        __le16          reserved;
        __le32          sequenceNum;
-       struct sparingEntry
-                       mapEntry[0];
+       struct sparingEntry mapEntry[];
 } __packed;
 
 /* Metadata File (and Metadata Mirror File) (UDF 2.60 2.2.13.1) */
@@ -241,7 +231,7 @@ struct allocDescImpUse {
 /* FreeEASpace (UDF 2.60 3.3.4.5.1.1) */
 struct freeEaSpace {
        __le16          headerChecksum;
-       uint8_t         freeEASpace[0];
+       uint8_t         freeEASpace[];
 } __packed;
 
 /* DVD Copyright Management Information (UDF 2.60 3.3.4.5.1.2) */
@@ -265,7 +255,7 @@ struct LVExtensionEA {
 /* FreeAppEASpace (UDF 2.60 3.3.4.6.1) */
 struct freeAppEASpace {
        __le16          headerChecksum;
-       uint8_t         freeEASpace[0];
+       uint8_t         freeEASpace[];
 } __packed;
 
 /* UDF Defined System Stream (UDF 2.60 3.3.7) */
index 2f83c1204e20c4d1f9451fdac39a4731b8d03ed5..b2d7c57d06881e7e91368c8cbfd86fabac330e60 100644 (file)
@@ -108,16 +108,10 @@ struct logicalVolIntegrityDescImpUse *udf_sb_lvidiu(struct super_block *sb)
                return NULL;
        lvid = (struct logicalVolIntegrityDesc *)UDF_SB(sb)->s_lvid_bh->b_data;
        partnum = le32_to_cpu(lvid->numOfPartitions);
-       if ((sb->s_blocksize - sizeof(struct logicalVolIntegrityDescImpUse) -
-            offsetof(struct logicalVolIntegrityDesc, impUse)) /
-            (2 * sizeof(uint32_t)) < partnum) {
-               udf_err(sb, "Logical volume integrity descriptor corrupted "
-                       "(numOfPartitions = %u)!\n", partnum);
-               return NULL;
-       }
        /* The offset is to skip freeSpaceTable and sizeTable arrays */
        offset = partnum * 2 * sizeof(uint32_t);
-       return (struct logicalVolIntegrityDescImpUse *)&(lvid->impUse[offset]);
+       return (struct logicalVolIntegrityDescImpUse *)
+                                       (((uint8_t *)(lvid + 1)) + offset);
 }
 
 /* UDF filesystem type */
@@ -349,10 +343,10 @@ static int udf_show_options(struct seq_file *seq, struct dentry *root)
                seq_printf(seq, ",lastblock=%u", sbi->s_last_block);
        if (sbi->s_anchor != 0)
                seq_printf(seq, ",anchor=%u", sbi->s_anchor);
-       if (UDF_QUERY_FLAG(sb, UDF_FLAG_UTF8))
-               seq_puts(seq, ",utf8");
-       if (UDF_QUERY_FLAG(sb, UDF_FLAG_NLS_MAP) && sbi->s_nls_map)
+       if (sbi->s_nls_map)
                seq_printf(seq, ",iocharset=%s", sbi->s_nls_map->charset);
+       else
+               seq_puts(seq, ",iocharset=utf8");
 
        return 0;
 }
@@ -558,19 +552,24 @@ static int udf_parse_options(char *options, struct udf_options *uopt,
                        /* Ignored (never implemented properly) */
                        break;
                case Opt_utf8:
-                       uopt->flags |= (1 << UDF_FLAG_UTF8);
+                       if (!remount) {
+                               unload_nls(uopt->nls_map);
+                               uopt->nls_map = NULL;
+                       }
                        break;
                case Opt_iocharset:
                        if (!remount) {
-                               if (uopt->nls_map)
-                                       unload_nls(uopt->nls_map);
-                               /*
-                                * load_nls() failure is handled later in
-                                * udf_fill_super() after all options are
-                                * parsed.
-                                */
+                               unload_nls(uopt->nls_map);
+                               uopt->nls_map = NULL;
+                       }
+                       /* When nls_map is not loaded then UTF-8 is used */
+                       if (!remount && strcmp(args[0].from, "utf8") != 0) {
                                uopt->nls_map = load_nls(args[0].from);
-                               uopt->flags |= (1 << UDF_FLAG_NLS_MAP);
+                               if (!uopt->nls_map) {
+                                       pr_err("iocharset %s not found\n",
+                                               args[0].from);
+                                       return 0;
+                               }
                        }
                        break;
                case Opt_uforget:
@@ -1542,6 +1541,7 @@ static void udf_load_logicalvolint(struct super_block *sb, struct kernel_extent_
        struct udf_sb_info *sbi = UDF_SB(sb);
        struct logicalVolIntegrityDesc *lvid;
        int indirections = 0;
+       u32 parts, impuselen;
 
        while (++indirections <= UDF_MAX_LVID_NESTING) {
                final_bh = NULL;
@@ -1568,15 +1568,27 @@ static void udf_load_logicalvolint(struct super_block *sb, struct kernel_extent_
 
                lvid = (struct logicalVolIntegrityDesc *)final_bh->b_data;
                if (lvid->nextIntegrityExt.extLength == 0)
-                       return;
+                       goto check;
 
                loc = leea_to_cpu(lvid->nextIntegrityExt);
        }
 
        udf_warn(sb, "Too many LVID indirections (max %u), ignoring.\n",
                UDF_MAX_LVID_NESTING);
+out_err:
        brelse(sbi->s_lvid_bh);
        sbi->s_lvid_bh = NULL;
+       return;
+check:
+       parts = le32_to_cpu(lvid->numOfPartitions);
+       impuselen = le32_to_cpu(lvid->lengthOfImpUse);
+       if (parts >= sb->s_blocksize || impuselen >= sb->s_blocksize ||
+           sizeof(struct logicalVolIntegrityDesc) + impuselen +
+           2 * parts * sizeof(u32) > sb->s_blocksize) {
+               udf_warn(sb, "Corrupted LVID (parts=%u, impuselen=%u), "
+                        "ignoring.\n", parts, impuselen);
+               goto out_err;
+       }
 }
 
 /*
@@ -2139,21 +2151,6 @@ static int udf_fill_super(struct super_block *sb, void *options, int silent)
        if (!udf_parse_options((char *)options, &uopt, false))
                goto parse_options_failure;
 
-       if (uopt.flags & (1 << UDF_FLAG_UTF8) &&
-           uopt.flags & (1 << UDF_FLAG_NLS_MAP)) {
-               udf_err(sb, "utf8 cannot be combined with iocharset\n");
-               goto parse_options_failure;
-       }
-       if ((uopt.flags & (1 << UDF_FLAG_NLS_MAP)) && !uopt.nls_map) {
-               uopt.nls_map = load_nls_default();
-               if (!uopt.nls_map)
-                       uopt.flags &= ~(1 << UDF_FLAG_NLS_MAP);
-               else
-                       udf_debug("Using default NLS map\n");
-       }
-       if (!(uopt.flags & (1 << UDF_FLAG_NLS_MAP)))
-               uopt.flags |= (1 << UDF_FLAG_UTF8);
-
        fileset.logicalBlockNum = 0xFFFFFFFF;
        fileset.partitionReferenceNum = 0xFFFF;
 
@@ -2308,8 +2305,7 @@ static int udf_fill_super(struct super_block *sb, void *options, int silent)
 error_out:
        iput(sbi->s_vat_inode);
 parse_options_failure:
-       if (uopt.nls_map)
-               unload_nls(uopt.nls_map);
+       unload_nls(uopt.nls_map);
        if (lvid_open)
                udf_close_lvid(sb);
        brelse(sbi->s_lvid_bh);
@@ -2359,8 +2355,7 @@ static void udf_put_super(struct super_block *sb)
        sbi = UDF_SB(sb);
 
        iput(sbi->s_vat_inode);
-       if (UDF_QUERY_FLAG(sb, UDF_FLAG_NLS_MAP))
-               unload_nls(sbi->s_nls_map);
+       unload_nls(sbi->s_nls_map);
        if (!sb_rdonly(sb))
                udf_close_lvid(sb);
        brelse(sbi->s_lvid_bh);
index 758efe557a199b61bc366a32545e9fcb627c49fe..4fa620543d302db892b59a399b92619632e5aa56 100644 (file)
@@ -20,8 +20,6 @@
 #define UDF_FLAG_UNDELETE              6
 #define UDF_FLAG_UNHIDE                        7
 #define UDF_FLAG_VARCONV               8
-#define UDF_FLAG_NLS_MAP               9
-#define UDF_FLAG_UTF8                  10
 #define UDF_FLAG_UID_FORGET     11    /* save -1 for uid to disk */
 #define UDF_FLAG_GID_FORGET     12
 #define UDF_FLAG_UID_SET       13
index 9dd0814f10771c27cd33872660fcf2e62bee7e30..7e258f15b8ef52772fe4ec362a47c3d64bf5473d 100644 (file)
@@ -130,6 +130,10 @@ static inline unsigned int udf_dir_entry_len(struct fileIdentDesc *cfi)
                le16_to_cpu(cfi->lengthOfImpUse) + cfi->lengthFileIdent,
                UDF_NAME_PAD);
 }
+static inline uint8_t *udf_get_fi_ident(struct fileIdentDesc *fi)
+{
+       return ((uint8_t *)(fi + 1)) + le16_to_cpu(fi->lengthOfImpUse);
+}
 
 /* file.c */
 extern long udf_ioctl(struct file *, unsigned int, unsigned long);
index 5fcfa96463ebb820cef33b83460fc8d1a9bea8b2..622569007b530bf848b98ddb7f58f45a7c442edb 100644 (file)
@@ -177,7 +177,7 @@ static int udf_name_from_CS0(struct super_block *sb,
                return 0;
        }
 
-       if (UDF_QUERY_FLAG(sb, UDF_FLAG_NLS_MAP))
+       if (UDF_SB(sb)->s_nls_map)
                conv_f = UDF_SB(sb)->s_nls_map->uni2char;
        else
                conv_f = NULL;
@@ -285,7 +285,7 @@ static int udf_name_to_CS0(struct super_block *sb,
        if (ocu_max_len <= 0)
                return 0;
 
-       if (UDF_QUERY_FLAG(sb, UDF_FLAG_NLS_MAP))
+       if (UDF_SB(sb)->s_nls_map)
                conv_f = UDF_SB(sb)->s_nls_map->char2uni;
        else
                conv_f = NULL;
index 213a97a921bb9fa2b6f2e0cda0f526110ce1892f..1cd3f940fa6aedce0f2912b42fb7b5cc5914b082 100644 (file)
@@ -1626,7 +1626,6 @@ xfs_swap_extents(
        struct xfs_bstat        *sbp = &sxp->sx_stat;
        int                     src_log_flags, target_log_flags;
        int                     error = 0;
-       int                     lock_flags;
        uint64_t                f;
        int                     resblks = 0;
        unsigned int            flags = 0;
@@ -1638,8 +1637,8 @@ xfs_swap_extents(
         * do the rest of the checks.
         */
        lock_two_nondirectories(VFS_I(ip), VFS_I(tip));
-       lock_flags = XFS_MMAPLOCK_EXCL;
-       xfs_lock_two_inodes(ip, XFS_MMAPLOCK_EXCL, tip, XFS_MMAPLOCK_EXCL);
+       filemap_invalidate_lock_two(VFS_I(ip)->i_mapping,
+                                   VFS_I(tip)->i_mapping);
 
        /* Verify that both files have the same format */
        if ((VFS_I(ip)->i_mode & S_IFMT) != (VFS_I(tip)->i_mode & S_IFMT)) {
@@ -1711,7 +1710,6 @@ xfs_swap_extents(
         * or cancel will unlock the inodes from this point onwards.
         */
        xfs_lock_two_inodes(ip, XFS_ILOCK_EXCL, tip, XFS_ILOCK_EXCL);
-       lock_flags |= XFS_ILOCK_EXCL;
        xfs_trans_ijoin(tp, ip, 0);
        xfs_trans_ijoin(tp, tip, 0);
 
@@ -1830,13 +1828,16 @@ xfs_swap_extents(
        trace_xfs_swap_extent_after(ip, 0);
        trace_xfs_swap_extent_after(tip, 1);
 
+out_unlock_ilock:
+       xfs_iunlock(ip, XFS_ILOCK_EXCL);
+       xfs_iunlock(tip, XFS_ILOCK_EXCL);
 out_unlock:
-       xfs_iunlock(ip, lock_flags);
-       xfs_iunlock(tip, lock_flags);
+       filemap_invalidate_unlock_two(VFS_I(ip)->i_mapping,
+                                     VFS_I(tip)->i_mapping);
        unlock_two_nondirectories(VFS_I(ip), VFS_I(tip));
        return error;
 
 out_trans_cancel:
        xfs_trans_cancel(tp);
-       goto out_unlock;
+       goto out_unlock_ilock;
 }
index cc3cfb12df53446cd9e09839fe8d46fe33f4f1bc..3dfbdcdb0d1ce5c3ae036cbb8b041f24588bbecf 100644 (file)
@@ -1302,7 +1302,7 @@ xfs_file_llseek(
  *
  * mmap_lock (MM)
  *   sb_start_pagefault(vfs, freeze)
- *     i_mmaplock (XFS - truncate serialisation)
+ *     invalidate_lock (vfs/XFS_MMAPLOCK - truncate serialisation)
  *       page_lock (MM)
  *         i_lock (XFS - extent map serialisation)
  */
@@ -1323,24 +1323,27 @@ __xfs_filemap_fault(
                file_update_time(vmf->vma->vm_file);
        }
 
-       xfs_ilock(XFS_I(inode), XFS_MMAPLOCK_SHARED);
        if (IS_DAX(inode)) {
                pfn_t pfn;
 
+               xfs_ilock(XFS_I(inode), XFS_MMAPLOCK_SHARED);
                ret = dax_iomap_fault(vmf, pe_size, &pfn, NULL,
                                (write_fault && !vmf->cow_page) ?
                                 &xfs_direct_write_iomap_ops :
                                 &xfs_read_iomap_ops);
                if (ret & VM_FAULT_NEEDDSYNC)
                        ret = dax_finish_sync_fault(vmf, pe_size, pfn);
+               xfs_iunlock(XFS_I(inode), XFS_MMAPLOCK_SHARED);
        } else {
-               if (write_fault)
+               if (write_fault) {
+                       xfs_ilock(XFS_I(inode), XFS_MMAPLOCK_SHARED);
                        ret = iomap_page_mkwrite(vmf,
                                        &xfs_buffered_write_iomap_ops);
-               else
+                       xfs_iunlock(XFS_I(inode), XFS_MMAPLOCK_SHARED);
+               } else {
                        ret = filemap_fault(vmf);
+               }
        }
-       xfs_iunlock(XFS_I(inode), XFS_MMAPLOCK_SHARED);
 
        if (write_fault)
                sb_end_pagefault(inode->i_sb);
index 990b72ae36350a3ccfd41b3074a2538eafa7c531..f00145e1a976ef60ccdbd6249351d8f65ff6c908 100644 (file)
@@ -132,7 +132,7 @@ xfs_ilock_attr_map_shared(
 
 /*
  * In addition to i_rwsem in the VFS inode, the xfs inode contains 2
- * multi-reader locks: i_mmap_lock and the i_lock.  This routine allows
+ * multi-reader locks: invalidate_lock and the i_lock.  This routine allows
  * various combinations of the locks to be obtained.
  *
  * The 3 locks should always be ordered so that the IO lock is obtained first,
@@ -140,23 +140,23 @@ xfs_ilock_attr_map_shared(
  *
  * Basic locking order:
  *
- * i_rwsem -> i_mmap_lock -> page_lock -> i_ilock
+ * i_rwsem -> invalidate_lock -> page_lock -> i_ilock
  *
  * mmap_lock locking order:
  *
  * i_rwsem -> page lock -> mmap_lock
- * mmap_lock -> i_mmap_lock -> page_lock
+ * mmap_lock -> invalidate_lock -> page_lock
  *
  * The difference in mmap_lock locking order mean that we cannot hold the
- * i_mmap_lock over syscall based read(2)/write(2) based IO. These IO paths can
- * fault in pages during copy in/out (for buffered IO) or require the mmap_lock
- * in get_user_pages() to map the user pages into the kernel address space for
- * direct IO. Similarly the i_rwsem cannot be taken inside a page fault because
- * page faults already hold the mmap_lock.
+ * invalidate_lock over syscall based read(2)/write(2) based IO. These IO paths
+ * can fault in pages during copy in/out (for buffered IO) or require the
+ * mmap_lock in get_user_pages() to map the user pages into the kernel address
+ * space for direct IO. Similarly the i_rwsem cannot be taken inside a page
+ * fault because page faults already hold the mmap_lock.
  *
  * Hence to serialise fully against both syscall and mmap based IO, we need to
- * take both the i_rwsem and the i_mmap_lock. These locks should *only* be both
- * taken in places where we need to invalidate the page cache in a race
+ * take both the i_rwsem and the invalidate_lock. These locks should *only* be
+ * both taken in places where we need to invalidate the page cache in a race
  * free manner (e.g. truncate, hole punch and other extent manipulation
  * functions).
  */
@@ -188,10 +188,13 @@ xfs_ilock(
                                 XFS_IOLOCK_DEP(lock_flags));
        }
 
-       if (lock_flags & XFS_MMAPLOCK_EXCL)
-               mrupdate_nested(&ip->i_mmaplock, XFS_MMAPLOCK_DEP(lock_flags));
-       else if (lock_flags & XFS_MMAPLOCK_SHARED)
-               mraccess_nested(&ip->i_mmaplock, XFS_MMAPLOCK_DEP(lock_flags));
+       if (lock_flags & XFS_MMAPLOCK_EXCL) {
+               down_write_nested(&VFS_I(ip)->i_mapping->invalidate_lock,
+                                 XFS_MMAPLOCK_DEP(lock_flags));
+       } else if (lock_flags & XFS_MMAPLOCK_SHARED) {
+               down_read_nested(&VFS_I(ip)->i_mapping->invalidate_lock,
+                                XFS_MMAPLOCK_DEP(lock_flags));
+       }
 
        if (lock_flags & XFS_ILOCK_EXCL)
                mrupdate_nested(&ip->i_lock, XFS_ILOCK_DEP(lock_flags));
@@ -240,10 +243,10 @@ xfs_ilock_nowait(
        }
 
        if (lock_flags & XFS_MMAPLOCK_EXCL) {
-               if (!mrtryupdate(&ip->i_mmaplock))
+               if (!down_write_trylock(&VFS_I(ip)->i_mapping->invalidate_lock))
                        goto out_undo_iolock;
        } else if (lock_flags & XFS_MMAPLOCK_SHARED) {
-               if (!mrtryaccess(&ip->i_mmaplock))
+               if (!down_read_trylock(&VFS_I(ip)->i_mapping->invalidate_lock))
                        goto out_undo_iolock;
        }
 
@@ -258,9 +261,9 @@ xfs_ilock_nowait(
 
 out_undo_mmaplock:
        if (lock_flags & XFS_MMAPLOCK_EXCL)
-               mrunlock_excl(&ip->i_mmaplock);
+               up_write(&VFS_I(ip)->i_mapping->invalidate_lock);
        else if (lock_flags & XFS_MMAPLOCK_SHARED)
-               mrunlock_shared(&ip->i_mmaplock);
+               up_read(&VFS_I(ip)->i_mapping->invalidate_lock);
 out_undo_iolock:
        if (lock_flags & XFS_IOLOCK_EXCL)
                up_write(&VFS_I(ip)->i_rwsem);
@@ -307,9 +310,9 @@ xfs_iunlock(
                up_read(&VFS_I(ip)->i_rwsem);
 
        if (lock_flags & XFS_MMAPLOCK_EXCL)
-               mrunlock_excl(&ip->i_mmaplock);
+               up_write(&VFS_I(ip)->i_mapping->invalidate_lock);
        else if (lock_flags & XFS_MMAPLOCK_SHARED)
-               mrunlock_shared(&ip->i_mmaplock);
+               up_read(&VFS_I(ip)->i_mapping->invalidate_lock);
 
        if (lock_flags & XFS_ILOCK_EXCL)
                mrunlock_excl(&ip->i_lock);
@@ -335,7 +338,7 @@ xfs_ilock_demote(
        if (lock_flags & XFS_ILOCK_EXCL)
                mrdemote(&ip->i_lock);
        if (lock_flags & XFS_MMAPLOCK_EXCL)
-               mrdemote(&ip->i_mmaplock);
+               downgrade_write(&VFS_I(ip)->i_mapping->invalidate_lock);
        if (lock_flags & XFS_IOLOCK_EXCL)
                downgrade_write(&VFS_I(ip)->i_rwsem);
 
@@ -343,9 +346,29 @@ xfs_ilock_demote(
 }
 
 #if defined(DEBUG) || defined(XFS_WARN)
-int
+static inline bool
+__xfs_rwsem_islocked(
+       struct rw_semaphore     *rwsem,
+       bool                    shared)
+{
+       if (!debug_locks)
+               return rwsem_is_locked(rwsem);
+
+       if (!shared)
+               return lockdep_is_held_type(rwsem, 0);
+
+       /*
+        * We are checking that the lock is held at least in shared
+        * mode but don't care that it might be held exclusively
+        * (i.e. shared | excl). Hence we check if the lock is held
+        * in any mode rather than an explicit shared mode.
+        */
+       return lockdep_is_held_type(rwsem, -1);
+}
+
+bool
 xfs_isilocked(
-       xfs_inode_t             *ip,
+       struct xfs_inode        *ip,
        uint                    lock_flags)
 {
        if (lock_flags & (XFS_ILOCK_EXCL|XFS_ILOCK_SHARED)) {
@@ -355,20 +378,17 @@ xfs_isilocked(
        }
 
        if (lock_flags & (XFS_MMAPLOCK_EXCL|XFS_MMAPLOCK_SHARED)) {
-               if (!(lock_flags & XFS_MMAPLOCK_SHARED))
-                       return !!ip->i_mmaplock.mr_writer;
-               return rwsem_is_locked(&ip->i_mmaplock.mr_lock);
+               return __xfs_rwsem_islocked(&VFS_I(ip)->i_rwsem,
+                               (lock_flags & XFS_IOLOCK_SHARED));
        }
 
-       if (lock_flags & (XFS_IOLOCK_EXCL|XFS_IOLOCK_SHARED)) {
-               if (!(lock_flags & XFS_IOLOCK_SHARED))
-                       return !debug_locks ||
-                               lockdep_is_held_type(&VFS_I(ip)->i_rwsem, 0);
-               return rwsem_is_locked(&VFS_I(ip)->i_rwsem);
+       if (lock_flags & (XFS_IOLOCK_EXCL | XFS_IOLOCK_SHARED)) {
+               return __xfs_rwsem_islocked(&VFS_I(ip)->i_rwsem,
+                               (lock_flags & XFS_IOLOCK_SHARED));
        }
 
        ASSERT(0);
-       return 0;
+       return false;
 }
 #endif
 
@@ -532,12 +552,10 @@ again:
 }
 
 /*
- * xfs_lock_two_inodes() can only be used to lock one type of lock at a time -
- * the mmaplock or the ilock, but not more than one type at a time. If we lock
- * more than one at a time, lockdep will report false positives saying we have
- * violated locking orders.  The iolock must be double-locked separately since
- * we use i_rwsem for that.  We now support taking one lock EXCL and the other
- * SHARED.
+ * xfs_lock_two_inodes() can only be used to lock ilock. The iolock and
+ * mmaplock must be double-locked separately since we use i_rwsem and
+ * invalidate_lock for that. We now support taking one lock EXCL and the
+ * other SHARED.
  */
 void
 xfs_lock_two_inodes(
@@ -555,15 +573,8 @@ xfs_lock_two_inodes(
        ASSERT(hweight32(ip1_mode) == 1);
        ASSERT(!(ip0_mode & (XFS_IOLOCK_SHARED|XFS_IOLOCK_EXCL)));
        ASSERT(!(ip1_mode & (XFS_IOLOCK_SHARED|XFS_IOLOCK_EXCL)));
-       ASSERT(!(ip0_mode & (XFS_MMAPLOCK_SHARED|XFS_MMAPLOCK_EXCL)) ||
-              !(ip0_mode & (XFS_ILOCK_SHARED|XFS_ILOCK_EXCL)));
-       ASSERT(!(ip1_mode & (XFS_MMAPLOCK_SHARED|XFS_MMAPLOCK_EXCL)) ||
-              !(ip1_mode & (XFS_ILOCK_SHARED|XFS_ILOCK_EXCL)));
-       ASSERT(!(ip1_mode & (XFS_MMAPLOCK_SHARED|XFS_MMAPLOCK_EXCL)) ||
-              !(ip0_mode & (XFS_ILOCK_SHARED|XFS_ILOCK_EXCL)));
-       ASSERT(!(ip0_mode & (XFS_MMAPLOCK_SHARED|XFS_MMAPLOCK_EXCL)) ||
-              !(ip1_mode & (XFS_ILOCK_SHARED|XFS_ILOCK_EXCL)));
-
+       ASSERT(!(ip0_mode & (XFS_MMAPLOCK_SHARED|XFS_MMAPLOCK_EXCL)));
+       ASSERT(!(ip1_mode & (XFS_MMAPLOCK_SHARED|XFS_MMAPLOCK_EXCL)));
        ASSERT(ip0->i_ino != ip1->i_ino);
 
        if (ip0->i_ino > ip1->i_ino) {
@@ -3741,11 +3752,8 @@ xfs_ilock2_io_mmap(
        ret = xfs_iolock_two_inodes_and_break_layout(VFS_I(ip1), VFS_I(ip2));
        if (ret)
                return ret;
-       if (ip1 == ip2)
-               xfs_ilock(ip1, XFS_MMAPLOCK_EXCL);
-       else
-               xfs_lock_two_inodes(ip1, XFS_MMAPLOCK_EXCL,
-                                   ip2, XFS_MMAPLOCK_EXCL);
+       filemap_invalidate_lock_two(VFS_I(ip1)->i_mapping,
+                                   VFS_I(ip2)->i_mapping);
        return 0;
 }
 
@@ -3755,12 +3763,9 @@ xfs_iunlock2_io_mmap(
        struct xfs_inode        *ip1,
        struct xfs_inode        *ip2)
 {
-       bool                    same_inode = (ip1 == ip2);
-
-       xfs_iunlock(ip2, XFS_MMAPLOCK_EXCL);
-       if (!same_inode)
-               xfs_iunlock(ip1, XFS_MMAPLOCK_EXCL);
+       filemap_invalidate_unlock_two(VFS_I(ip1)->i_mapping,
+                                     VFS_I(ip2)->i_mapping);
        inode_unlock(VFS_I(ip2));
-       if (!same_inode)
+       if (ip1 != ip2)
                inode_unlock(VFS_I(ip1));
 }
index 4b6703dbffb8fc9d8abac72e62c5d07e5c2a02e1..e0ae905554e25519dad1be21556ef6537d469c13 100644 (file)
@@ -40,7 +40,6 @@ typedef struct xfs_inode {
        /* Transaction and locking information. */
        struct xfs_inode_log_item *i_itemp;     /* logging information */
        mrlock_t                i_lock;         /* inode lock */
-       mrlock_t                i_mmaplock;     /* inode mmap IO lock */
        atomic_t                i_pincount;     /* inode pin count */
 
        /*
@@ -410,7 +409,7 @@ void                xfs_ilock(xfs_inode_t *, uint);
 int            xfs_ilock_nowait(xfs_inode_t *, uint);
 void           xfs_iunlock(xfs_inode_t *, uint);
 void           xfs_ilock_demote(xfs_inode_t *, uint);
-int            xfs_isilocked(xfs_inode_t *, uint);
+bool           xfs_isilocked(struct xfs_inode *, uint);
 uint           xfs_ilock_data_map_shared(struct xfs_inode *);
 uint           xfs_ilock_attr_map_shared(struct xfs_inode *);
 
index 2c9e26a44546b8d6085ac92a1ae2c9fa43c58f47..102cbd6066331e73eccf90636919cc8a141dfcd3 100644 (file)
@@ -709,8 +709,6 @@ xfs_fs_inode_init_once(
        atomic_set(&ip->i_pincount, 0);
        spin_lock_init(&ip->i_flags_lock);
 
-       mrlock_init(&ip->i_mmaplock, MRLOCK_ALLOW_EQUAL_PRI|MRLOCK_BARRIER,
-                    "xfsino", ip->i_ino);
        mrlock_init(&ip->i_lock, MRLOCK_ALLOW_EQUAL_PRI|MRLOCK_BARRIER,
                     "xfsino", ip->i_ino);
 }
index 70055d486bf70be1f7d1cb5dcb64d3d03e5da957..ddc346a9df9ba53a40531b2465beb28c2eb99c94 100644 (file)
@@ -462,7 +462,7 @@ static int zonefs_file_truncate(struct inode *inode, loff_t isize)
        inode_dio_wait(inode);
 
        /* Serialize against page faults */
-       down_write(&zi->i_mmap_sem);
+       filemap_invalidate_lock(inode->i_mapping);
 
        /* Serialize against zonefs_iomap_begin() */
        mutex_lock(&zi->i_truncate_mutex);
@@ -500,7 +500,7 @@ static int zonefs_file_truncate(struct inode *inode, loff_t isize)
 
 unlock:
        mutex_unlock(&zi->i_truncate_mutex);
-       up_write(&zi->i_mmap_sem);
+       filemap_invalidate_unlock(inode->i_mapping);
 
        return ret;
 }
@@ -575,18 +575,6 @@ static int zonefs_file_fsync(struct file *file, loff_t start, loff_t end,
        return ret;
 }
 
-static vm_fault_t zonefs_filemap_fault(struct vm_fault *vmf)
-{
-       struct zonefs_inode_info *zi = ZONEFS_I(file_inode(vmf->vma->vm_file));
-       vm_fault_t ret;
-
-       down_read(&zi->i_mmap_sem);
-       ret = filemap_fault(vmf);
-       up_read(&zi->i_mmap_sem);
-
-       return ret;
-}
-
 static vm_fault_t zonefs_filemap_page_mkwrite(struct vm_fault *vmf)
 {
        struct inode *inode = file_inode(vmf->vma->vm_file);
@@ -607,16 +595,16 @@ static vm_fault_t zonefs_filemap_page_mkwrite(struct vm_fault *vmf)
        file_update_time(vmf->vma->vm_file);
 
        /* Serialize against truncates */
-       down_read(&zi->i_mmap_sem);
+       filemap_invalidate_lock_shared(inode->i_mapping);
        ret = iomap_page_mkwrite(vmf, &zonefs_iomap_ops);
-       up_read(&zi->i_mmap_sem);
+       filemap_invalidate_unlock_shared(inode->i_mapping);
 
        sb_end_pagefault(inode->i_sb);
        return ret;
 }
 
 static const struct vm_operations_struct zonefs_file_vm_ops = {
-       .fault          = zonefs_filemap_fault,
+       .fault          = filemap_fault,
        .map_pages      = filemap_map_pages,
        .page_mkwrite   = zonefs_filemap_page_mkwrite,
 };
@@ -1155,7 +1143,6 @@ static struct inode *zonefs_alloc_inode(struct super_block *sb)
 
        inode_init_once(&zi->i_vnode);
        mutex_init(&zi->i_truncate_mutex);
-       init_rwsem(&zi->i_mmap_sem);
        zi->i_wr_refcnt = 0;
 
        return &zi->i_vnode;
index 51141907097c4b8a5f1dee4c5f04e4d3bc1af800..7b147907c328ed2615b718c4cb17e2e18f148dad 100644 (file)
@@ -70,12 +70,11 @@ struct zonefs_inode_info {
         * and changes to the inode private data, and in particular changes to
         * a sequential file size on completion of direct IO writes.
         * Serialization of mmap read IOs with truncate and syscall IO
-        * operations is done with i_mmap_sem in addition to i_truncate_mutex.
-        * Only zonefs_seq_file_truncate() takes both lock (i_mmap_sem first,
-        * i_truncate_mutex second).
+        * operations is done with invalidate_lock in addition to
+        * i_truncate_mutex.  Only zonefs_seq_file_truncate() takes both lock
+        * (invalidate_lock first, i_truncate_mutex second).
         */
        struct mutex            i_truncate_mutex;
-       struct rw_semaphore     i_mmap_sem;
 
        /* guarded by i_truncate_mutex */
        unsigned int            i_wr_refcnt;
index 17325416e2dee2e400dfb531e9bc5ffc64e13306..62669b36a772e7a77a0e5ea723fa3d38681830d3 100644 (file)
                NOINSTR_TEXT                                            \
                *(.text..refcount)                                      \
                *(.ref.text)                                            \
+               *(.text.asan.* .text.tsan.*)                            \
                TEXT_CFI_JT                                             \
        MEM_KEEP(init.text*)                                            \
        MEM_KEEP(exit.text*)                                            \
index 47accec68cb0fb89498d9c7174b371fa6f0f5e7e..f603325c0c30d56355991f36d5fbbfcf4de9066c 100644 (file)
@@ -38,9 +38,9 @@ extern void public_key_free(struct public_key *key);
 struct public_key_signature {
        struct asymmetric_key_id *auth_ids[2];
        u8 *s;                  /* Signature */
-       u32 s_size;             /* Number of bytes in signature */
        u8 *digest;
-       u8 digest_size;         /* Number of bytes in digest */
+       u32 s_size;             /* Number of bytes in signature */
+       u32 digest_size;        /* Number of bytes in digest */
        const char *pkey_algo;
        const char *hash_algo;
        const char *encoding;
index 7afd730d16ffbbb8935b4136cb150cdd8d68eda4..709f286e7b25355e60e1aa007ad84ee5f67ac9b5 100644 (file)
@@ -3,6 +3,7 @@
 /*
  * Common values for the SM4 algorithm
  * Copyright (C) 2018 ARM Limited or its affiliates.
+ * Copyright (c) 2021 Tianjia Zhang <tianjia.zhang@linux.alibaba.com>
  */
 
 #ifndef _CRYPTO_SM4_H
 #define SM4_BLOCK_SIZE 16
 #define SM4_RKEY_WORDS 32
 
-struct crypto_sm4_ctx {
+struct sm4_ctx {
        u32 rkey_enc[SM4_RKEY_WORDS];
        u32 rkey_dec[SM4_RKEY_WORDS];
 };
 
-int crypto_sm4_set_key(struct crypto_tfm *tfm, const u8 *in_key,
-                      unsigned int key_len);
-int crypto_sm4_expand_key(struct crypto_sm4_ctx *ctx, const u8 *in_key,
+/**
+ * sm4_expandkey - Expands the SM4 key as described in GB/T 32907-2016
+ * @ctx:       The location where the computed key will be stored.
+ * @in_key:    The supplied key.
+ * @key_len:   The length of the supplied key.
+ *
+ * Returns 0 on success. The function fails only if an invalid key size (or
+ * pointer) is supplied.
+ */
+int sm4_expandkey(struct sm4_ctx *ctx, const u8 *in_key,
                          unsigned int key_len);
 
-void crypto_sm4_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in);
-void crypto_sm4_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in);
+/**
+ * sm4_crypt_block - Encrypt or decrypt a single SM4 block
+ * @rk:                The rkey_enc for encrypt or rkey_dec for decrypt
+ * @out:       Buffer to store output data
+ * @in:        Buffer containing the input data
+ */
+void sm4_crypt_block(const u32 *rk, u8 *out, const u8 *in);
 
 #endif
index d918bf321a71cb4a9ed6e634dd907dbebd027175..3205699b5e41b18f8e9d678b1c0dba205c7503c2 100644 (file)
@@ -16,4 +16,8 @@
 #define SMB3XX_CHG_ENABLE_PIN_ACTIVE_LOW       1
 #define SMB3XX_CHG_ENABLE_PIN_ACTIVE_HIGH      2
 
+/* Polarity of INOK signal */
+#define SMB3XX_SYSOK_INOK_ACTIVE_LOW           0
+#define SMB3XX_SYSOK_INOK_ACTIVE_HIGH          1
+
 #endif
index 8b77d08d4b47f333d8d9be9c14706dffd0267fca..6c9b10d82c809a392256b6c748b47a99d35b9ec1 100644 (file)
@@ -201,8 +201,8 @@ static inline void bpf_cgroup_storage_unset(void)
 {
        int i;
 
-       for (i = 0; i < BPF_CGROUP_STORAGE_NEST_MAX; i++) {
-               if (unlikely(this_cpu_read(bpf_cgroup_storage_info[i].task) != current))
+       for (i = BPF_CGROUP_STORAGE_NEST_MAX - 1; i >= 0; i--) {
+               if (likely(this_cpu_read(bpf_cgroup_storage_info[i].task) != current))
                        continue;
 
                this_cpu_write(bpf_cgroup_storage_info[i].task, NULL);
index 59940f1744c1979add88a25a684ec11ffeafe015..65d84b67b024a899c99ce7f579115c2f958305cc 100644 (file)
@@ -407,6 +407,7 @@ struct dev_links_info {
  * @em_pd:     device's energy model performance domain
  * @pins:      For device pin management.
  *             See Documentation/driver-api/pin-control.rst for details.
+ * @msi_lock:  Lock to protect MSI mask cache and mask register
  * @msi_list:  Hosts MSI descriptors
  * @msi_domain: The generic MSI domain this device is using.
  * @numa_node: NUMA node this device is close to.
@@ -506,6 +507,7 @@ struct device {
        struct dev_pin_info     *pins;
 #endif
 #ifdef CONFIG_GENERIC_MSI_IRQ
+       raw_spinlock_t          msi_lock;
        struct list_head        msi_list;
 #endif
 #ifdef CONFIG_DMA_OPS
index 76d3562d30061912070f9103941317e65b5b3c32..4207d06996a4fb30a53b94d881b2a0e969e97f20 100644 (file)
@@ -184,6 +184,7 @@ static inline char *mc_event_error_type(const unsigned int err_type)
  * @MEM_DDR5:          Unbuffered DDR5 RAM
  * @MEM_NVDIMM:                Non-volatile RAM
  * @MEM_WIO2:          Wide I/O 2.
+ * @MEM_HBM2:          High bandwidth Memory Gen 2.
  */
 enum mem_type {
        MEM_EMPTY = 0,
@@ -212,6 +213,7 @@ enum mem_type {
        MEM_DDR5,
        MEM_NVDIMM,
        MEM_WIO2,
+       MEM_HBM2,
 };
 
 #define MEM_FLAG_EMPTY         BIT(MEM_EMPTY)
@@ -239,6 +241,7 @@ enum mem_type {
 #define MEM_FLAG_DDR5           BIT(MEM_DDR5)
 #define MEM_FLAG_NVDIMM         BIT(MEM_NVDIMM)
 #define MEM_FLAG_WIO2          BIT(MEM_WIO2)
+#define MEM_FLAG_HBM2          BIT(MEM_HBM2)
 
 /**
  * enum edac_type - Error Detection and Correction capabilities and mode
index a16dbeced15289d1c9f06eaf55a25865e532dc8f..eec3b7c40811528dfab87563cacffea4396a6021 100644 (file)
@@ -27,6 +27,8 @@ extern struct ctl_table fanotify_table[]; /* for sysctl */
 
 #define FANOTIFY_FID_BITS      (FAN_REPORT_FID | FAN_REPORT_DFID_NAME)
 
+#define FANOTIFY_INFO_MODES    (FANOTIFY_FID_BITS | FAN_REPORT_PIDFD)
+
 /*
  * fanotify_init() flags that require CAP_SYS_ADMIN.
  * We do not allow unprivileged groups to request permission events.
@@ -35,6 +37,7 @@ extern struct ctl_table fanotify_table[]; /* for sysctl */
  */
 #define FANOTIFY_ADMIN_INIT_FLAGS      (FANOTIFY_PERM_CLASSES | \
                                         FAN_REPORT_TID | \
+                                        FAN_REPORT_PIDFD | \
                                         FAN_UNLIMITED_QUEUE | \
                                         FAN_UNLIMITED_MARKS)
 
index 4e624c4665837337336ee60e285652a87988cb24..c50882f19235a2d5116220653623b4301f9dbf98 100644 (file)
@@ -18,8 +18,4 @@ int fiemap_prep(struct inode *inode, struct fiemap_extent_info *fieinfo,
 int fiemap_fill_next_extent(struct fiemap_extent_info *info, u64 logical,
                            u64 phys, u64 len, u32 flags);
 
-int generic_block_fiemap(struct inode *inode,
-               struct fiemap_extent_info *fieinfo, u64 start, u64 len,
-               get_block_t *get_block);
-
 #endif /* _LINUX_FIEMAP_H 1 */
index 640574294216c03fb68876e0f3aa8cd7ee98e13a..7193457239118d2864b2d00826a4c30e9d7331e8 100644 (file)
@@ -436,6 +436,10 @@ int pagecache_write_end(struct file *, struct address_space *mapping,
  * struct address_space - Contents of a cacheable, mappable object.
  * @host: Owner, either the inode or the block_device.
  * @i_pages: Cached pages.
+ * @invalidate_lock: Guards coherency between page cache contents and
+ *   file offset->disk block mappings in the filesystem during invalidates.
+ *   It is also used to block modification of page cache contents through
+ *   memory mappings.
  * @gfp_mask: Memory allocation flags to use for allocating pages.
  * @i_mmap_writable: Number of VM_SHARED mappings.
  * @nr_thps: Number of THPs in the pagecache (non-shmem only).
@@ -453,6 +457,7 @@ int pagecache_write_end(struct file *, struct address_space *mapping,
 struct address_space {
        struct inode            *host;
        struct xarray           i_pages;
+       struct rw_semaphore     invalidate_lock;
        gfp_t                   gfp_mask;
        atomic_t                i_mmap_writable;
 #ifdef CONFIG_READ_ONLY_THP_FOR_FS
@@ -814,9 +819,42 @@ static inline void inode_lock_shared_nested(struct inode *inode, unsigned subcla
        down_read_nested(&inode->i_rwsem, subclass);
 }
 
+static inline void filemap_invalidate_lock(struct address_space *mapping)
+{
+       down_write(&mapping->invalidate_lock);
+}
+
+static inline void filemap_invalidate_unlock(struct address_space *mapping)
+{
+       up_write(&mapping->invalidate_lock);
+}
+
+static inline void filemap_invalidate_lock_shared(struct address_space *mapping)
+{
+       down_read(&mapping->invalidate_lock);
+}
+
+static inline int filemap_invalidate_trylock_shared(
+                                       struct address_space *mapping)
+{
+       return down_read_trylock(&mapping->invalidate_lock);
+}
+
+static inline void filemap_invalidate_unlock_shared(
+                                       struct address_space *mapping)
+{
+       up_read(&mapping->invalidate_lock);
+}
+
 void lock_two_nondirectories(struct inode *, struct inode*);
 void unlock_two_nondirectories(struct inode *, struct inode*);
 
+void filemap_invalidate_lock_two(struct address_space *mapping1,
+                                struct address_space *mapping2);
+void filemap_invalidate_unlock_two(struct address_space *mapping1,
+                                  struct address_space *mapping2);
+
+
 /*
  * NOTE: in a 32bit arch with a preemptable kernel and
  * an UP compile the i_size_read/write must be atomic
@@ -1507,8 +1545,11 @@ struct super_block {
        /* Number of inodes with nlink == 0 but still referenced */
        atomic_long_t s_remove_count;
 
-       /* Pending fsnotify inode refs */
-       atomic_long_t s_fsnotify_inode_refs;
+       /*
+        * Number of inode/mount/sb objects that are being watched, note that
+        * inodes objects are currently double-accounted.
+        */
+       atomic_long_t s_fsnotify_connectors;
 
        /* Being remounted read-only */
        int s_readonly_remount;
@@ -2487,6 +2528,7 @@ struct file_system_type {
 
        struct lock_class_key i_lock_key;
        struct lock_class_key i_mutex_key;
+       struct lock_class_key invalidate_lock_key;
        struct lock_class_key i_mutex_dir_key;
 };
 
@@ -2570,90 +2612,6 @@ extern struct kobject *fs_kobj;
 
 #define MAX_RW_COUNT (INT_MAX & PAGE_MASK)
 
-#ifdef CONFIG_MANDATORY_FILE_LOCKING
-extern int locks_mandatory_locked(struct file *);
-extern int locks_mandatory_area(struct inode *, struct file *, loff_t, loff_t, unsigned char);
-
-/*
- * Candidates for mandatory locking have the setgid bit set
- * but no group execute bit -  an otherwise meaningless combination.
- */
-
-static inline int __mandatory_lock(struct inode *ino)
-{
-       return (ino->i_mode & (S_ISGID | S_IXGRP)) == S_ISGID;
-}
-
-/*
- * ... and these candidates should be on SB_MANDLOCK mounted fs,
- * otherwise these will be advisory locks
- */
-
-static inline int mandatory_lock(struct inode *ino)
-{
-       return IS_MANDLOCK(ino) && __mandatory_lock(ino);
-}
-
-static inline int locks_verify_locked(struct file *file)
-{
-       if (mandatory_lock(locks_inode(file)))
-               return locks_mandatory_locked(file);
-       return 0;
-}
-
-static inline int locks_verify_truncate(struct inode *inode,
-                                   struct file *f,
-                                   loff_t size)
-{
-       if (!inode->i_flctx || !mandatory_lock(inode))
-               return 0;
-
-       if (size < inode->i_size) {
-               return locks_mandatory_area(inode, f, size, inode->i_size - 1,
-                               F_WRLCK);
-       } else {
-               return locks_mandatory_area(inode, f, inode->i_size, size - 1,
-                               F_WRLCK);
-       }
-}
-
-#else /* !CONFIG_MANDATORY_FILE_LOCKING */
-
-static inline int locks_mandatory_locked(struct file *file)
-{
-       return 0;
-}
-
-static inline int locks_mandatory_area(struct inode *inode, struct file *filp,
-                                       loff_t start, loff_t end, unsigned char type)
-{
-       return 0;
-}
-
-static inline int __mandatory_lock(struct inode *inode)
-{
-       return 0;
-}
-
-static inline int mandatory_lock(struct inode *inode)
-{
-       return 0;
-}
-
-static inline int locks_verify_locked(struct file *file)
-{
-       return 0;
-}
-
-static inline int locks_verify_truncate(struct inode *inode, struct file *filp,
-                                       size_t size)
-{
-       return 0;
-}
-
-#endif /* CONFIG_MANDATORY_FILE_LOCKING */
-
-
 #ifdef CONFIG_FILE_LOCKING
 static inline int break_lease(struct inode *inode, unsigned int mode)
 {
index f8acddcf54fb4ce2e7b03dda41f52dd936b591e8..12d3a7d308ab9ae40afd886b80cabb0a319f3e6e 100644 (file)
@@ -30,6 +30,9 @@ static inline void fsnotify_name(struct inode *dir, __u32 mask,
                                 struct inode *child,
                                 const struct qstr *name, u32 cookie)
 {
+       if (atomic_long_read(&dir->i_sb->s_fsnotify_connectors) == 0)
+               return;
+
        fsnotify(mask, child, FSNOTIFY_EVENT_INODE, dir, name, NULL, cookie);
 }
 
@@ -41,6 +44,9 @@ static inline void fsnotify_dirent(struct inode *dir, struct dentry *dentry,
 
 static inline void fsnotify_inode(struct inode *inode, __u32 mask)
 {
+       if (atomic_long_read(&inode->i_sb->s_fsnotify_connectors) == 0)
+               return;
+
        if (S_ISDIR(inode->i_mode))
                mask |= FS_ISDIR;
 
@@ -53,6 +59,9 @@ static inline int fsnotify_parent(struct dentry *dentry, __u32 mask,
 {
        struct inode *inode = d_inode(dentry);
 
+       if (atomic_long_read(&inode->i_sb->s_fsnotify_connectors) == 0)
+               return 0;
+
        if (S_ISDIR(inode->i_mode)) {
                mask |= FS_ISDIR;
 
index a69f363b61bf736eaa30f90481ba42ed503d102e..832e65f06754265cfd8914cde56653223c8d1b8f 100644 (file)
@@ -643,6 +643,22 @@ static inline int ftrace_disable_ftrace_graph_caller(void) { return 0; }
 extern int ftrace_make_nop(struct module *mod,
                           struct dyn_ftrace *rec, unsigned long addr);
 
+/**
+ * ftrace_need_init_nop - return whether nop call sites should be initialized
+ *
+ * Normally the compiler's -mnop-mcount generates suitable nops, so we don't
+ * need to call ftrace_init_nop() if the code is built with that flag.
+ * Architectures where this is not always the case may define their own
+ * condition.
+ *
+ * Return must be:
+ *  0      if ftrace_init_nop() should be called
+ *  Nonzero if ftrace_init_nop() should not be called
+ */
+
+#ifndef ftrace_need_init_nop
+#define ftrace_need_init_nop() (!__is_defined(CC_USING_NOP_MCOUNT))
+#endif
 
 /**
  * ftrace_init_nop - initialize a nop call site
index 53aa0343bf694cb817e00ff597b52ce046e29d2c..aaf4f1b4c277c4ad38e1f0742bb480d3bb003287 100644 (file)
@@ -41,7 +41,7 @@ struct in_device {
        unsigned long           mr_qri;         /* Query Response Interval */
        unsigned char           mr_qrv;         /* Query Robustness Variable */
        unsigned char           mr_gq_running;
-       unsigned char           mr_ifc_count;
+       u32                     mr_ifc_count;
        struct timer_list       mr_gq_timer;    /* general query timer */
        struct timer_list       mr_ifc_timer;   /* interface change timer */
 
index 8e9a9ae471a6e462f4919b813e8a389b8e45bc2e..c8293c817646c7452595b304a1572d857ef7a152 100644 (file)
@@ -569,6 +569,7 @@ struct irq_chip {
  * IRQCHIP_SUPPORTS_NMI:              Chip can deliver NMIs, only for root irqchips
  * IRQCHIP_ENABLE_WAKEUP_ON_SUSPEND:  Invokes __enable_irq()/__disable_irq() for wake irqs
  *                                    in the suspend path if they are in disabled state
+ * IRQCHIP_AFFINITY_PRE_STARTUP:      Default affinity update before startup
  */
 enum {
        IRQCHIP_SET_TYPE_MASKED                 = (1 <<  0),
@@ -581,6 +582,7 @@ enum {
        IRQCHIP_SUPPORTS_LEVEL_MSI              = (1 <<  7),
        IRQCHIP_SUPPORTS_NMI                    = (1 <<  8),
        IRQCHIP_ENABLE_WAKEUP_ON_SUSPEND        = (1 <<  9),
+       IRQCHIP_AFFINITY_PRE_STARTUP            = (1 << 10),
 };
 
 #include <linux/irqdesc.h>
index a70d1ea0353252db33a75a800d6fd4c81e637602..3fe6dd8a18c19607ad1116f3977929d7c86d856e 100644 (file)
@@ -51,10 +51,11 @@ extern atomic_t kfence_allocation_gate;
 static __always_inline bool is_kfence_address(const void *addr)
 {
        /*
-        * The non-NULL check is required in case the __kfence_pool pointer was
-        * never initialized; keep it in the slow-path after the range-check.
+        * The __kfence_pool != NULL check is required to deal with the case
+        * where __kfence_pool == NULL && addr < KFENCE_POOL_SIZE. Keep it in
+        * the slow-path after the range-check!
         */
-       return unlikely((unsigned long)((char *)addr - __kfence_pool) < KFENCE_POOL_SIZE && addr);
+       return unlikely((unsigned long)((char *)addr - __kfence_pool) < KFENCE_POOL_SIZE && __kfence_pool);
 }
 
 /**
index 17b5943727d5aeaee341ce32ad71666535d14b24..fd3d0b358f2283b65b51811f422216981c67fb49 100644 (file)
@@ -41,6 +41,8 @@ int linear_range_get_selector_low(const struct linear_range *r,
 int linear_range_get_selector_high(const struct linear_range *r,
                                   unsigned int val, unsigned int *selector,
                                   bool *found);
+void linear_range_get_selector_within(const struct linear_range *r,
+                                     unsigned int val, unsigned int *selector);
 int linear_range_get_selector_low_array(const struct linear_range *r,
                                        int ranges, unsigned int val,
                                        unsigned int *selector, bool *found);
index bfe5c486f4add865bcef9ed13f087dab3eb0eba8..24797929d8a1f1483ee52d8eec5f237eb3591f06 100644 (file)
@@ -612,12 +612,15 @@ static inline bool mem_cgroup_disabled(void)
        return !cgroup_subsys_enabled(memory_cgrp_subsys);
 }
 
-static inline unsigned long mem_cgroup_protection(struct mem_cgroup *root,
-                                                 struct mem_cgroup *memcg,
-                                                 bool in_low_reclaim)
+static inline void mem_cgroup_protection(struct mem_cgroup *root,
+                                        struct mem_cgroup *memcg,
+                                        unsigned long *min,
+                                        unsigned long *low)
 {
+       *min = *low = 0;
+
        if (mem_cgroup_disabled())
-               return 0;
+               return;
 
        /*
         * There is no reclaim protection applied to a targeted reclaim.
@@ -653,13 +656,10 @@ static inline unsigned long mem_cgroup_protection(struct mem_cgroup *root,
         *
         */
        if (root == memcg)
-               return 0;
-
-       if (in_low_reclaim)
-               return READ_ONCE(memcg->memory.emin);
+               return;
 
-       return max(READ_ONCE(memcg->memory.emin),
-                  READ_ONCE(memcg->memory.elow));
+       *min = READ_ONCE(memcg->memory.emin);
+       *low = READ_ONCE(memcg->memory.elow);
 }
 
 void mem_cgroup_calculate_protection(struct mem_cgroup *root,
@@ -1147,11 +1147,12 @@ static inline void memcg_memory_event_mm(struct mm_struct *mm,
 {
 }
 
-static inline unsigned long mem_cgroup_protection(struct mem_cgroup *root,
-                                                 struct mem_cgroup *memcg,
-                                                 bool in_low_reclaim)
+static inline void mem_cgroup_protection(struct mem_cgroup *root,
+                                        struct mem_cgroup *memcg,
+                                        unsigned long *min,
+                                        unsigned long *low)
 {
-       return 0;
+       *min = *low = 0;
 }
 
 static inline void mem_cgroup_calculate_protection(struct mem_cgroup *root,
index 40a0c2dfb80ff0119bc1cf0b154a11b06a03b600..2d1895c3efbf2e17388fac8d2d3453cf24bb248b 100644 (file)
@@ -200,13 +200,13 @@ enum rt5033_reg {
 #define RT5033_REGULATOR_BUCK_VOLTAGE_MIN              1000000U
 #define RT5033_REGULATOR_BUCK_VOLTAGE_MAX              3000000U
 #define RT5033_REGULATOR_BUCK_VOLTAGE_STEP             100000U
-#define RT5033_REGULATOR_BUCK_VOLTAGE_STEP_NUM         21
+#define RT5033_REGULATOR_BUCK_VOLTAGE_STEP_NUM         32
 
 /* RT5033 regulator LDO output voltage uV */
 #define RT5033_REGULATOR_LDO_VOLTAGE_MIN               1200000U
 #define RT5033_REGULATOR_LDO_VOLTAGE_MAX               3000000U
 #define RT5033_REGULATOR_LDO_VOLTAGE_STEP              100000U
-#define RT5033_REGULATOR_LDO_VOLTAGE_STEP_NUM          19
+#define RT5033_REGULATOR_LDO_VOLTAGE_STEP_NUM          32
 
 /* RT5033 regulator SAFE LDO output voltage uV */
 #define RT5033_REGULATOR_SAFE_LDO_VOLTAGE              4900000U
index 5e08468854db4998089c0cc6522baecf02f84c64..944aa3aa30355fe33fba8133cc8e0a2ad1653c9b 100644 (file)
@@ -719,13 +719,8 @@ void mhi_device_put(struct mhi_device *mhi_dev);
  *                            host and device execution environments match and
  *                            channels are in a DISABLED state.
  * @mhi_dev: Device associated with the channels
- * @flags: MHI channel flags
  */
-int mhi_prepare_for_transfer(struct mhi_device *mhi_dev,
-                            unsigned int flags);
-
-/* Automatically allocate and queue inbound buffers */
-#define MHI_CH_INBOUND_ALLOC_BUFS BIT(0)
+int mhi_prepare_for_transfer(struct mhi_device *mhi_dev);
 
 /**
  * mhi_unprepare_from_transfer - Reset UL and DL channels for data transfer.
index 1efe3746696911817df8872063c5e1581c43dc9f..25a8be58d28951fb888b596456475ddd7b51a6b6 100644 (file)
@@ -1044,8 +1044,7 @@ void mlx5_unregister_debugfs(void);
 void mlx5_fill_page_array(struct mlx5_frag_buf *buf, __be64 *pas);
 void mlx5_fill_page_frag_array_perm(struct mlx5_frag_buf *buf, __be64 *pas, u8 perm);
 void mlx5_fill_page_frag_array(struct mlx5_frag_buf *frag_buf, __be64 *pas);
-int mlx5_vector2eqn(struct mlx5_core_dev *dev, int vector, int *eqn,
-                   unsigned int *irqn);
+int mlx5_vector2eqn(struct mlx5_core_dev *dev, int vector, int *eqn);
 int mlx5_core_attach_mcg(struct mlx5_core_dev *dev, union ib_gid *mgid, u32 qpn);
 int mlx5_core_detach_mcg(struct mlx5_core_dev *dev, union ib_gid *mgid, u32 qpn);
 
index 98b56b75c625b272d985bd2df8eb72c718eec9bf..1a9c9d94cb59f3a0e6c3f62d00d7c95111cb7ffe 100644 (file)
@@ -11,13 +11,15 @@ enum {
 };
 
 enum {
-       MLX5_VIRTIO_EMULATION_CAP_VIRTIO_QUEUE_TYPE_SPLIT   = 0x1, // do I check this caps?
-       MLX5_VIRTIO_EMULATION_CAP_VIRTIO_QUEUE_TYPE_PACKED  = 0x2,
+       MLX5_VIRTIO_EMULATION_VIRTIO_QUEUE_TYPE_SPLIT   = 0,
+       MLX5_VIRTIO_EMULATION_VIRTIO_QUEUE_TYPE_PACKED  = 1,
 };
 
 enum {
-       MLX5_VIRTIO_EMULATION_VIRTIO_QUEUE_TYPE_SPLIT   = 0,
-       MLX5_VIRTIO_EMULATION_VIRTIO_QUEUE_TYPE_PACKED  = 1,
+       MLX5_VIRTIO_EMULATION_CAP_VIRTIO_QUEUE_TYPE_SPLIT =
+               BIT(MLX5_VIRTIO_EMULATION_VIRTIO_QUEUE_TYPE_SPLIT),
+       MLX5_VIRTIO_EMULATION_CAP_VIRTIO_QUEUE_TYPE_PACKED =
+               BIT(MLX5_VIRTIO_EMULATION_VIRTIO_QUEUE_TYPE_PACKED),
 };
 
 struct mlx5_ifc_virtio_q_bits {
index 6aff469e511d131c37697368410a2a260a1258ac..e8bdcb83172b01acbe5118e2f9a011d9e04e4da8 100644 (file)
@@ -233,7 +233,7 @@ void __pci_read_msi_msg(struct msi_desc *entry, struct msi_msg *msg);
 void __pci_write_msi_msg(struct msi_desc *entry, struct msi_msg *msg);
 
 u32 __pci_msix_desc_mask_irq(struct msi_desc *desc, u32 flag);
-u32 __pci_msi_desc_mask_irq(struct msi_desc *desc, u32 mask, u32 flag);
+void __pci_msi_desc_mask_irq(struct msi_desc *desc, u32 mask, u32 flag);
 void pci_msi_mask_irq(struct irq_data *data);
 void pci_msi_unmask_irq(struct irq_data *data);
 
index 10279c4830ac30d77f1f5539f6aa6e22ce5b562e..ada1296c87d504ae43288230308247831e2d7f0c 100644 (file)
@@ -196,6 +196,9 @@ struct ip_set_region {
        u32 elements;           /* Number of elements vs timeout */
 };
 
+/* Max range where every element is added/deleted in one step */
+#define IPSET_MAX_RANGE                (1<<20)
+
 /* The max revision number supported by any set type + 1 */
 #define IPSET_REVISION_MAX     9
 
index 9225ee6d96c75632f123b0b0b3fe4a2e6789397b..ae6f4eb41cbe7f61cb02f19620a2dbb2a1406089 100644 (file)
@@ -7,7 +7,7 @@
 
 bool __do_once_start(bool *done, unsigned long *flags);
 void __do_once_done(bool *done, struct static_key_true *once_key,
-                   unsigned long *flags);
+                   unsigned long *flags, struct module *mod);
 
 /* Call a function exactly once. The idea of DO_ONCE() is to perform
  * a function call such as initialization of random seeds, etc, only
@@ -46,7 +46,7 @@ void __do_once_done(bool *done, struct static_key_true *once_key,
                        if (unlikely(___ret)) {                              \
                                func(__VA_ARGS__);                           \
                                __do_once_done(&___done, &___once_key,       \
-                                              &___flags);                   \
+                                              &___flags, THIS_MODULE);      \
                        }                                                    \
                }                                                            \
                ___ret;                                                      \
index a433f13fc4bf7b17f0bcb867e5503c0d64162b84..495b16b6b4d729360b2e85dac8e76a31ddce0dc6 100644 (file)
@@ -12,6 +12,7 @@
 #ifndef PADATA_H
 #define PADATA_H
 
+#include <linux/refcount.h>
 #include <linux/compiler_types.h>
 #include <linux/workqueue.h>
 #include <linux/spinlock.h>
@@ -96,7 +97,7 @@ struct parallel_data {
        struct padata_shell             *ps;
        struct padata_list              __percpu *reorder_list;
        struct padata_serial_queue      __percpu *squeue;
-       atomic_t                        refcnt;
+       refcount_t                      refcnt;
        unsigned int                    seq_nr;
        unsigned int                    processed;
        int                             cpu;
index fa10acb8d6a42bf3949c8831cf816d200663b968..af308e15f174c9866d8a594375cffcfa8d69a615 100644 (file)
@@ -78,6 +78,7 @@ struct file;
 
 extern struct pid *pidfd_pid(const struct file *file);
 struct pid *pidfd_get_pid(unsigned int fd, unsigned int *flags);
+int pidfd_create(struct pid *pid, unsigned int flags);
 
 static inline struct pid *get_pid(struct pid *pid)
 {
index 5d2705f1d01c3d7c2574b0a705aecf0ba27ddedf..fc5642431b923cd8c1bf33d884ad348a43924377 100644 (file)
@@ -48,6 +48,7 @@ struct pipe_buffer {
  *     @files: number of struct file referring this pipe (protected by ->i_lock)
  *     @r_counter: reader counter
  *     @w_counter: writer counter
+ *     @poll_usage: is this pipe used for epoll, which has crazy wakeups?
  *     @fasync_readers: reader side fasync
  *     @fasync_writers: writer side fasync
  *     @bufs: the circular array of pipe buffers
@@ -70,6 +71,7 @@ struct pipe_inode_info {
        unsigned int files;
        unsigned int r_counter;
        unsigned int w_counter;
+       unsigned int poll_usage;
        struct page *tmp_page;
        struct fasync_struct *fasync_readers;
        struct fasync_struct *fasync_writers;
index 45f53afc46e22473bdebfae28e956402b5248b87..271bd87bff0a259ec94e87de0177eba5a07ae795 100644 (file)
@@ -4228,6 +4228,7 @@ enum ec_device_event {
        EC_DEVICE_EVENT_TRACKPAD,
        EC_DEVICE_EVENT_DSP,
        EC_DEVICE_EVENT_WIFI,
+       EC_DEVICE_EVENT_WLC,
 };
 
 enum ec_device_event_param {
@@ -5460,6 +5461,72 @@ struct ec_response_rollback_info {
 /* Issue AP reset */
 #define EC_CMD_AP_RESET 0x0125
 
+/**
+ * Get the number of peripheral charge ports
+ */
+#define EC_CMD_PCHG_COUNT 0x0134
+
+#define EC_PCHG_MAX_PORTS 8
+
+struct ec_response_pchg_count {
+       uint8_t port_count;
+} __ec_align1;
+
+/**
+ * Get the status of a peripheral charge port
+ */
+#define EC_CMD_PCHG 0x0135
+
+struct ec_params_pchg {
+       uint8_t port;
+} __ec_align1;
+
+struct ec_response_pchg {
+       uint32_t error;                 /* enum pchg_error */
+       uint8_t state;                  /* enum pchg_state state */
+       uint8_t battery_percentage;
+       uint8_t unused0;
+       uint8_t unused1;
+       /* Fields added in version 1 */
+       uint32_t fw_version;
+       uint32_t dropped_event_count;
+} __ec_align2;
+
+enum pchg_state {
+       /* Charger is reset and not initialized. */
+       PCHG_STATE_RESET = 0,
+       /* Charger is initialized or disabled. */
+       PCHG_STATE_INITIALIZED,
+       /* Charger is enabled and ready to detect a device. */
+       PCHG_STATE_ENABLED,
+       /* Device is in proximity. */
+       PCHG_STATE_DETECTED,
+       /* Device is being charged. */
+       PCHG_STATE_CHARGING,
+       /* Device is fully charged. It implies DETECTED (& not charging). */
+       PCHG_STATE_FULL,
+       /* In download (a.k.a. firmware update) mode */
+       PCHG_STATE_DOWNLOAD,
+       /* In download mode. Ready for receiving data. */
+       PCHG_STATE_DOWNLOADING,
+       /* Device is ready for data communication. */
+       PCHG_STATE_CONNECTED,
+       /* Put no more entry below */
+       PCHG_STATE_COUNT,
+};
+
+#define EC_PCHG_STATE_TEXT { \
+       [PCHG_STATE_RESET] = "RESET", \
+       [PCHG_STATE_INITIALIZED] = "INITIALIZED", \
+       [PCHG_STATE_ENABLED] = "ENABLED", \
+       [PCHG_STATE_DETECTED] = "DETECTED", \
+       [PCHG_STATE_CHARGING] = "CHARGING", \
+       [PCHG_STATE_FULL] = "FULL", \
+       [PCHG_STATE_DOWNLOAD] = "DOWNLOAD", \
+       [PCHG_STATE_DOWNLOADING] = "DOWNLOADING", \
+       [PCHG_STATE_CONNECTED] = "CONNECTED", \
+       }
+
 /*****************************************************************************/
 /* Voltage regulator controls */
 
index 65fd5ffd257c00f6ce6129fe7d34b39c4f2497d1..f0db674f07b8a724ca5fe6732c64007e48298571 100644 (file)
@@ -12,5 +12,6 @@
 /* Board specific platform_data */
 struct mtk_chip_config {
        u32 sample_sel;
+       u32 tick_delay;
 };
 #endif
index d55c746ac56e2632594be7dca4a32b512845b9e3..dd24756a8af737c25adb354c3ce08b9f1882ddf2 100644 (file)
@@ -69,7 +69,7 @@ enum max17042_register {
        MAX17042_RelaxCFG       = 0x2A,
        MAX17042_MiscCFG        = 0x2B,
        MAX17042_TGAIN          = 0x2C,
-       MAx17042_TOFF           = 0x2D,
+       MAX17042_TOFF           = 0x2D,
        MAX17042_CGAIN          = 0x2E,
        MAX17042_COFF           = 0x2F,
 
@@ -110,13 +110,14 @@ enum max17042_register {
        MAX17042_VFSOC          = 0xFF,
 };
 
+/* Registers specific to max17055 only */
 enum max17055_register {
        MAX17055_QRes           = 0x0C,
+       MAX17055_RCell          = 0x14,
        MAX17055_TTF            = 0x20,
-       MAX17055_V_empty        = 0x3A,
-       MAX17055_TIMER          = 0x3E,
+       MAX17055_DieTemp        = 0x34,
        MAX17055_USER_MEM       = 0x40,
-       MAX17055_RGAIN          = 0x42,
+       MAX17055_RGAIN          = 0x43,
 
        MAX17055_ConvgCfg       = 0x49,
        MAX17055_VFRemCap       = 0x4A,
@@ -155,13 +156,14 @@ enum max17055_register {
        MAX17055_AtAvCap        = 0xDF,
 };
 
-/* Registers specific to max17047/50 */
+/* Registers specific to max17047/50/55 */
 enum max17047_register {
        MAX17047_QRTbl00        = 0x12,
        MAX17047_FullSOCThr     = 0x13,
        MAX17047_QRTbl10        = 0x22,
        MAX17047_QRTbl20        = 0x32,
        MAX17047_V_empty        = 0x3A,
+       MAX17047_TIMER          = 0x3E,
        MAX17047_QRTbl30        = 0x42,
 };
 
index be203985ecdd51e2924af5bf38106797428eba51..9ca1f120a211760bd08995f09030f79f3dc2f702 100644 (file)
@@ -352,6 +352,7 @@ struct power_supply_resistance_temp_table {
  */
 
 struct power_supply_battery_info {
+       unsigned int technology;            /* from the enum above */
        int energy_full_design_uwh;         /* microWatt-hours */
        int charge_full_design_uah;         /* microAmp-hours */
        int voltage_min_design_uv;          /* microVolts */
index f8633d37e35815454eb66df55016e1f0ab2d260d..d29740be4833e22c604298046784443a87e73338 100644 (file)
 #include <linux/list.h>
 #include <linux/rcupdate.h>
 
-/*
- * Why is there no list_empty_rcu()?  Because list_empty() serves this
- * purpose.  The list_empty() function fetches the RCU-protected pointer
- * and compares it to the address of the list head, but neither dereferences
- * this pointer itself nor provides this pointer to the caller.  Therefore,
- * it is not necessary to use rcu_dereference(), so that list_empty() can
- * be used anywhere you would want to use a list_empty_rcu().
- */
-
 /*
  * INIT_LIST_HEAD_RCU - Initialize a list_head visible to RCU readers
  * @list: list to be initialized
@@ -318,21 +309,29 @@ static inline void list_splice_tail_init_rcu(struct list_head *list,
 /*
  * Where are list_empty_rcu() and list_first_entry_rcu()?
  *
- * Implementing those functions following their counterparts list_empty() and
- * list_first_entry() is not advisable because they lead to subtle race
- * conditions as the following snippet shows:
+ * They do not exist because they would lead to subtle race conditions:
  *
  * if (!list_empty_rcu(mylist)) {
  *     struct foo *bar = list_first_entry_rcu(mylist, struct foo, list_member);
  *     do_something(bar);
  * }
  *
- * The list may not be empty when list_empty_rcu checks it, but it may be when
- * list_first_entry_rcu rereads the ->next pointer.
- *
- * Rereading the ->next pointer is not a problem for list_empty() and
- * list_first_entry() because they would be protected by a lock that blocks
- * writers.
+ * The list might be non-empty when list_empty_rcu() checks it, but it
+ * might have become empty by the time that list_first_entry_rcu() rereads
+ * the ->next pointer, which would result in a SEGV.
+ *
+ * When not using RCU, it is OK for list_first_entry() to re-read that
+ * pointer because both functions should be protected by some lock that
+ * blocks writers.
+ *
+ * When using RCU, list_empty() uses READ_ONCE() to fetch the
+ * RCU-protected ->next pointer and then compares it to the address of the
+ * list head.  However, it neither dereferences this pointer nor provides
+ * this pointer to its caller.  Thus, READ_ONCE() suffices (that is,
+ * rcu_dereference() is not needed), which means that list_empty() can be
+ * used anywhere you would want to use list_empty_rcu().  Just don't
+ * expect anything useful to happen if you do a subsequent lockless
+ * call to list_first_entry_rcu()!!!
  *
  * See list_first_or_null_rcu for an alternative.
  */
index d9680b798b211381d95fc7d10cf41cde731969e9..434d12fe2d4f54e5780ae053ac98c7652b9df4b1 100644 (file)
@@ -53,7 +53,7 @@ void __rcu_read_unlock(void);
  * nesting depth, but makes sense only if CONFIG_PREEMPT_RCU -- in other
  * types of kernel builds, the rcu_read_lock() nesting depth is unknowable.
  */
-#define rcu_preempt_depth() (current->rcu_read_lock_nesting)
+#define rcu_preempt_depth() READ_ONCE(current->rcu_read_lock_nesting)
 
 #else /* #ifdef CONFIG_PREEMPT_RCU */
 
@@ -167,7 +167,7 @@ void synchronize_rcu_tasks(void);
 # define synchronize_rcu_tasks synchronize_rcu
 # endif
 
-# ifdef CONFIG_TASKS_RCU_TRACE
+# ifdef CONFIG_TASKS_TRACE_RCU
 # define rcu_tasks_trace_qs(t)                                         \
        do {                                                            \
                if (!likely(READ_ONCE((t)->trc_reader_checked)) &&      \
index 953e70fafe38386fc8d44c0dec2030afaf917c40..9be015305f9f9ca2da97601756bf43f33c4c33bd 100644 (file)
@@ -14,9 +14,6 @@
 
 #include <asm/param.h> /* for HZ */
 
-/* Never flag non-existent other CPUs! */
-static inline bool rcu_eqs_special_set(int cpu) { return false; }
-
 unsigned long get_state_synchronize_rcu(void);
 unsigned long start_poll_synchronize_rcu(void);
 bool poll_state_synchronize_rcu(unsigned long oldstate);
index f5f08dd0a1163c1efb26260e38cbf256eb4dbe2f..e3c9a25a853a8cc7108c7cf435d5fa06affb46a3 100644 (file)
@@ -344,6 +344,7 @@ typedef void (*regmap_unlock)(void *);
  * @ranges: Array of configuration entries for virtual address ranges.
  * @num_ranges: Number of range configuration entries.
  * @use_hwlock: Indicate if a hardware spinlock should be used.
+ * @use_raw_spinlock: Indicate if a raw spinlock should be used.
  * @hwlock_id: Specify the hardware spinlock id.
  * @hwlock_mode: The hardware spinlock mode, should be HWLOCK_IRQSTATE,
  *              HWLOCK_IRQ or 0.
@@ -403,6 +404,7 @@ struct regmap_config {
        unsigned int num_ranges;
 
        bool use_hwlock;
+       bool use_raw_spinlock;
        unsigned int hwlock_id;
        unsigned int hwlock_mode;
 
@@ -1269,12 +1271,13 @@ void devm_regmap_field_free(struct device *dev, struct regmap_field *field);
 
 int regmap_field_bulk_alloc(struct regmap *regmap,
                             struct regmap_field **rm_field,
-                            struct reg_field *reg_field,
+                            const struct reg_field *reg_field,
                             int num_fields);
 void regmap_field_bulk_free(struct regmap_field *field);
 int devm_regmap_field_bulk_alloc(struct device *dev, struct regmap *regmap,
                                 struct regmap_field **field,
-                                struct reg_field *reg_field, int num_fields);
+                                const struct reg_field *reg_field,
+                                int num_fields);
 void devm_regmap_field_bulk_free(struct device *dev,
                                 struct regmap_field *field);
 
index f72ca73631bee4846c7756cd77a54d07f3ea941e..bbf6590a6dec25846d742600e9ef88e322f67e8e 100644 (file)
@@ -222,17 +222,12 @@ void regulator_bulk_unregister_supply_alias(struct device *dev,
 int devm_regulator_register_supply_alias(struct device *dev, const char *id,
                                         struct device *alias_dev,
                                         const char *alias_id);
-void devm_regulator_unregister_supply_alias(struct device *dev,
-                                           const char *id);
 
 int devm_regulator_bulk_register_supply_alias(struct device *dev,
                                              const char *const *id,
                                              struct device *alias_dev,
                                              const char *const *alias_id,
                                              int num_id);
-void devm_regulator_bulk_unregister_supply_alias(struct device *dev,
-                                                const char *const *id,
-                                                int num_id);
 
 /* regulator output control and status */
 int __must_check regulator_enable(struct regulator *regulator);
@@ -408,11 +403,6 @@ static inline int devm_regulator_register_supply_alias(struct device *dev,
        return 0;
 }
 
-static inline void devm_regulator_unregister_supply_alias(struct device *dev,
-                                                         const char *id)
-{
-}
-
 static inline int devm_regulator_bulk_register_supply_alias(struct device *dev,
                                                const char *const *id,
                                                struct device *alias_dev,
@@ -422,11 +412,6 @@ static inline int devm_regulator_bulk_register_supply_alias(struct device *dev,
        return 0;
 }
 
-static inline void devm_regulator_bulk_unregister_supply_alias(
-       struct device *dev, const char *const *id, int num_id)
-{
-}
-
 static inline int regulator_enable(struct regulator *regulator)
 {
        return 0;
index 4aec20387857c62877081e4e6568cc1fabc91173..bd7a73db2e66cd6fcdc0fa1ced8bb949fb95ecfd 100644 (file)
@@ -337,6 +337,12 @@ enum regulator_type {
  * @pull_down_val_on: Enabling value for control when using regmap
  *                     set_pull_down
  *
+ * @ramp_reg:          Register for controlling the regulator ramp-rate.
+ * @ramp_mask:         Bitmask for the ramp-rate control register.
+ * @ramp_delay_table:  Table for mapping the regulator ramp-rate values. Values
+ *                     should be given in units of V/S (uV/uS). See the
+ *                     regulator_set_ramp_delay_regmap().
+ *
  * @enable_time: Time taken for initial enable of regulator (in uS).
  * @off_on_delay: guard time (in uS), before re-enabling a regulator
  *
@@ -462,7 +468,7 @@ struct regulator_err_state {
 };
 
 /**
- * struct regulator_irq_data - regulator error/notification status date
+ * struct regulator_irq_data - regulator error/notification status data
  *
  * @states:    Status structs for each of the associated regulators.
  * @num_states:        Amount of associated regulators.
@@ -521,8 +527,8 @@ struct regulator_irq_data {
  *             active events as core does not clean the map data.
  *             REGULATOR_FAILED_RETRY can be returned to indicate that the
  *             status reading from IC failed. If this is repeated for
- *             fatal_cnt times the core will call die() callback or BUG()
- *             as a last resort to protect the HW.
+ *             fatal_cnt times the core will call die() callback or power-off
+ *             the system as a last resort to protect the HW.
  * @renable:   Optional callback to check status (if HW supports that) before
  *             re-enabling IRQ. If implemented this should clear the error
  *             flags so that errors fetched by regulator_get_error_flags()
@@ -531,7 +537,8 @@ struct regulator_irq_data {
  *             REGULATOR_FAILED_RETRY can be returned to
  *             indicate that the status reading from IC failed. If this is
  *             repeated for 'fatal_cnt' times the core will call die()
- *             callback or BUG() as a last resort to protect the HW.
+ *             callback or if die() is not populated then attempt to power-off
+ *             the system as a last resort to protect the HW.
  *             Returning zero indicates that the problem in HW has been solved
  *             and IRQ will be re-enabled. Returning REGULATOR_ERROR_ON
  *             indicates the error condition is still active and keeps IRQ
@@ -645,7 +652,6 @@ devm_regulator_register(struct device *dev,
                        const struct regulator_desc *regulator_desc,
                        const struct regulator_config *config);
 void regulator_unregister(struct regulator_dev *rdev);
-void devm_regulator_unregister(struct device *dev, struct regulator_dev *rdev);
 
 int regulator_notifier_call_chain(struct regulator_dev *rdev,
                                  unsigned long event, void *data);
index 68b4a514a410b77450c4d79632cbb06d251c3410..621b7f4a36395513180cb1c98df7ea77bbb8d3de 100644 (file)
@@ -112,7 +112,7 @@ struct notification_limit {
  * @over_voltage_limits:       Limits for acting on over voltage.
  * @under_voltage_limits:      Limits for acting on under voltage.
  * @temp_limits:               Limits for acting on over temperature.
-
+ *
  * @max_spread: Max possible spread between coupled regulators
  * @max_uV_step: Max possible step change in voltage
  * @valid_modes_mask: Mask of modes which may be configured by consumers.
index 9b05af9b3e28d50b1dbcf3753e0e433551df26f3..21deb5212bbdd82e58dac7b4764762c66fffba9b 100644 (file)
@@ -2,6 +2,8 @@
 #ifndef _RESCTRL_H
 #define _RESCTRL_H
 
+#include <linux/kernel.h>
+#include <linux/list.h>
 #include <linux/pid.h>
 
 #ifdef CONFIG_PROC_CPU_RESCTRL
@@ -13,4 +15,186 @@ int proc_resctrl_show(struct seq_file *m,
 
 #endif
 
+/**
+ * enum resctrl_conf_type - The type of configuration.
+ * @CDP_NONE:  No prioritisation, both code and data are controlled or monitored.
+ * @CDP_CODE:  Configuration applies to instruction fetches.
+ * @CDP_DATA:  Configuration applies to reads and writes.
+ */
+enum resctrl_conf_type {
+       CDP_NONE,
+       CDP_CODE,
+       CDP_DATA,
+};
+
+#define CDP_NUM_TYPES  (CDP_DATA + 1)
+
+/**
+ * struct resctrl_staged_config - parsed configuration to be applied
+ * @new_ctrl:          new ctrl value to be loaded
+ * @have_new_ctrl:     whether the user provided new_ctrl is valid
+ */
+struct resctrl_staged_config {
+       u32                     new_ctrl;
+       bool                    have_new_ctrl;
+};
+
+/**
+ * struct rdt_domain - group of CPUs sharing a resctrl resource
+ * @list:              all instances of this resource
+ * @id:                        unique id for this instance
+ * @cpu_mask:          which CPUs share this resource
+ * @rmid_busy_llc:     bitmap of which limbo RMIDs are above threshold
+ * @mbm_total:         saved state for MBM total bandwidth
+ * @mbm_local:         saved state for MBM local bandwidth
+ * @mbm_over:          worker to periodically read MBM h/w counters
+ * @cqm_limbo:         worker to periodically read CQM h/w counters
+ * @mbm_work_cpu:      worker CPU for MBM h/w counters
+ * @cqm_work_cpu:      worker CPU for CQM h/w counters
+ * @plr:               pseudo-locked region (if any) associated with domain
+ * @staged_config:     parsed configuration to be applied
+ */
+struct rdt_domain {
+       struct list_head                list;
+       int                             id;
+       struct cpumask                  cpu_mask;
+       unsigned long                   *rmid_busy_llc;
+       struct mbm_state                *mbm_total;
+       struct mbm_state                *mbm_local;
+       struct delayed_work             mbm_over;
+       struct delayed_work             cqm_limbo;
+       int                             mbm_work_cpu;
+       int                             cqm_work_cpu;
+       struct pseudo_lock_region       *plr;
+       struct resctrl_staged_config    staged_config[CDP_NUM_TYPES];
+};
+
+/**
+ * struct resctrl_cache - Cache allocation related data
+ * @cbm_len:           Length of the cache bit mask
+ * @min_cbm_bits:      Minimum number of consecutive bits to be set
+ * @shareable_bits:    Bitmask of shareable resource with other
+ *                     executing entities
+ * @arch_has_sparse_bitmaps:   True if a bitmap like f00f is valid.
+ * @arch_has_empty_bitmaps:    True if the '0' bitmap is valid.
+ * @arch_has_per_cpu_cfg:      True if QOS_CFG register for this cache
+ *                             level has CPU scope.
+ */
+struct resctrl_cache {
+       unsigned int    cbm_len;
+       unsigned int    min_cbm_bits;
+       unsigned int    shareable_bits;
+       bool            arch_has_sparse_bitmaps;
+       bool            arch_has_empty_bitmaps;
+       bool            arch_has_per_cpu_cfg;
+};
+
+/**
+ * enum membw_throttle_mode - System's memory bandwidth throttling mode
+ * @THREAD_THROTTLE_UNDEFINED: Not relevant to the system
+ * @THREAD_THROTTLE_MAX:       Memory bandwidth is throttled at the core
+ *                             always using smallest bandwidth percentage
+ *                             assigned to threads, aka "max throttling"
+ * @THREAD_THROTTLE_PER_THREAD:        Memory bandwidth is throttled at the thread
+ */
+enum membw_throttle_mode {
+       THREAD_THROTTLE_UNDEFINED = 0,
+       THREAD_THROTTLE_MAX,
+       THREAD_THROTTLE_PER_THREAD,
+};
+
+/**
+ * struct resctrl_membw - Memory bandwidth allocation related data
+ * @min_bw:            Minimum memory bandwidth percentage user can request
+ * @bw_gran:           Granularity at which the memory bandwidth is allocated
+ * @delay_linear:      True if memory B/W delay is in linear scale
+ * @arch_needs_linear: True if we can't configure non-linear resources
+ * @throttle_mode:     Bandwidth throttling mode when threads request
+ *                     different memory bandwidths
+ * @mba_sc:            True if MBA software controller(mba_sc) is enabled
+ * @mb_map:            Mapping of memory B/W percentage to memory B/W delay
+ */
+struct resctrl_membw {
+       u32                             min_bw;
+       u32                             bw_gran;
+       u32                             delay_linear;
+       bool                            arch_needs_linear;
+       enum membw_throttle_mode        throttle_mode;
+       bool                            mba_sc;
+       u32                             *mb_map;
+};
+
+struct rdt_parse_data;
+struct resctrl_schema;
+
+/**
+ * struct rdt_resource - attributes of a resctrl resource
+ * @rid:               The index of the resource
+ * @alloc_enabled:     Is allocation enabled on this machine
+ * @mon_enabled:       Is monitoring enabled for this feature
+ * @alloc_capable:     Is allocation available on this machine
+ * @mon_capable:       Is monitor feature available on this machine
+ * @num_rmid:          Number of RMIDs available
+ * @cache_level:       Which cache level defines scope of this resource
+ * @cache:             Cache allocation related data
+ * @membw:             If the component has bandwidth controls, their properties.
+ * @domains:           All domains for this resource
+ * @name:              Name to use in "schemata" file.
+ * @data_width:                Character width of data when displaying
+ * @default_ctrl:      Specifies default cache cbm or memory B/W percent.
+ * @format_str:                Per resource format string to show domain value
+ * @parse_ctrlval:     Per resource function pointer to parse control values
+ * @evt_list:          List of monitoring events
+ * @fflags:            flags to choose base and info files
+ * @cdp_capable:       Is the CDP feature available on this resource
+ */
+struct rdt_resource {
+       int                     rid;
+       bool                    alloc_enabled;
+       bool                    mon_enabled;
+       bool                    alloc_capable;
+       bool                    mon_capable;
+       int                     num_rmid;
+       int                     cache_level;
+       struct resctrl_cache    cache;
+       struct resctrl_membw    membw;
+       struct list_head        domains;
+       char                    *name;
+       int                     data_width;
+       u32                     default_ctrl;
+       const char              *format_str;
+       int                     (*parse_ctrlval)(struct rdt_parse_data *data,
+                                                struct resctrl_schema *s,
+                                                struct rdt_domain *d);
+       struct list_head        evt_list;
+       unsigned long           fflags;
+       bool                    cdp_capable;
+};
+
+/**
+ * struct resctrl_schema - configuration abilities of a resource presented to
+ *                        user-space
+ * @list:      Member of resctrl_schema_all.
+ * @name:      The name to use in the "schemata" file.
+ * @conf_type: Whether this schema is specific to code/data.
+ * @res:       The resource structure exported by the architecture to describe
+ *             the hardware that is configured by this schema.
+ * @num_closid:        The number of closid that can be used with this schema. When
+ *             features like CDP are enabled, this will be lower than the
+ *             hardware supports for the resource.
+ */
+struct resctrl_schema {
+       struct list_head                list;
+       char                            name[8];
+       enum resctrl_conf_type          conf_type;
+       struct rdt_resource             *res;
+       u32                             num_closid;
+};
+
+/* The number of closid supported by this resource regardless of CDP */
+u32 resctrl_arch_get_num_closid(struct rdt_resource *r);
+int resctrl_arch_update_domains(struct rdt_resource *r, u32 closid);
+u32 resctrl_arch_get_config(struct rdt_resource *r, struct rdt_domain *d,
+                           u32 closid, enum resctrl_conf_type type);
+
 #endif /* _RESCTRL_H */
index 24eda04221e9934a482d42ffbc937a454a22fc06..5b7288521300bab1155781e9abe3bf97558a8c14 100644 (file)
@@ -120,10 +120,11 @@ enum lockdown_reason {
        LOCKDOWN_MMIOTRACE,
        LOCKDOWN_DEBUGFS,
        LOCKDOWN_XMON_WR,
+       LOCKDOWN_BPF_WRITE_USER,
        LOCKDOWN_INTEGRITY_MAX,
        LOCKDOWN_KCORE,
        LOCKDOWN_KPROBES,
-       LOCKDOWN_BPF_READ,
+       LOCKDOWN_BPF_READ_KERNEL,
        LOCKDOWN_PERF,
        LOCKDOWN_TRACEFS,
        LOCKDOWN_XMON_RW,
index 97b8d12b5f2bb7564146fe60b88db48820279dfe..8371bca13729b9f9541730fdcf52b1c664ffd619 100644 (file)
@@ -147,7 +147,11 @@ extern int spi_delay_exec(struct spi_delay *_delay, struct spi_transfer *xfer);
  *     not using a GPIO line)
  * @word_delay: delay to be inserted between consecutive
  *     words of a transfer
- *
+ * @cs_setup: delay to be introduced by the controller after CS is asserted
+ * @cs_hold: delay to be introduced by the controller before CS is deasserted
+ * @cs_inactive: delay to be introduced by the controller after CS is
+ *     deasserted. If @cs_change_delay is used from @spi_transfer, then the
+ *     two delays will be added up.
  * @statistics: statistics for the spi_device
  *
  * A @spi_device is used to interchange data between an SPI slave
@@ -188,6 +192,10 @@ struct spi_device {
        int                     cs_gpio;        /* LEGACY: chip select gpio */
        struct gpio_desc        *cs_gpiod;      /* chip select gpio desc */
        struct spi_delay        word_delay; /* inter-word delay */
+       /* CS delays */
+       struct spi_delay        cs_setup;
+       struct spi_delay        cs_hold;
+       struct spi_delay        cs_inactive;
 
        /* the statistics */
        struct spi_statistics   statistics;
@@ -339,6 +347,7 @@ extern struct spi_device *spi_new_ancillary_device(struct spi_device *spi, u8 ch
  * @max_speed_hz: Highest supported transfer speed
  * @flags: other constraints relevant to this driver
  * @slave: indicates that this is an SPI slave controller
+ * @devm_allocated: whether the allocation of this struct is devres-managed
  * @max_transfer_size: function that returns the max transfer size for
  *     a &spi_device; may be %NULL, so the default %SIZE_MAX will be used.
  * @max_message_size: function that returns the max message size for
@@ -412,11 +421,6 @@ extern struct spi_device *spi_new_ancillary_device(struct spi_device *spi, u8 ch
  *          controller has native support for memory like operations.
  * @unprepare_message: undo any work done by prepare_message().
  * @slave_abort: abort the ongoing transfer request on an SPI slave controller
- * @cs_setup: delay to be introduced by the controller after CS is asserted
- * @cs_hold: delay to be introduced by the controller before CS is deasserted
- * @cs_inactive: delay to be introduced by the controller after CS is
- *     deasserted. If @cs_change_delay is used from @spi_transfer, then the
- *     two delays will be added up.
  * @cs_gpios: LEGACY: array of GPIO descs to use as chip select lines; one per
  *     CS number. Any individual value may be -ENOENT for CS lines that
  *     are not GPIOs (driven by the SPI controller itself). Use the cs_gpiods
@@ -511,7 +515,7 @@ struct spi_controller {
 
 #define SPI_MASTER_GPIO_SS             BIT(5)  /* GPIO CS must select slave */
 
-       /* flag indicating this is a non-devres managed controller */
+       /* flag indicating if the allocation of this struct is devres-managed */
        bool                    devm_allocated;
 
        /* flag indicating this is an SPI slave controller */
@@ -550,8 +554,7 @@ struct spi_controller {
         * to configure specific CS timing through spi_set_cs_timing() after
         * spi_setup().
         */
-       int (*set_cs_timing)(struct spi_device *spi, struct spi_delay *setup,
-                            struct spi_delay *hold, struct spi_delay *inactive);
+       int (*set_cs_timing)(struct spi_device *spi);
 
        /* bidirectional bulk transfers
         *
@@ -638,11 +641,6 @@ struct spi_controller {
        /* Optimized handlers for SPI memory-like operations. */
        const struct spi_controller_mem_ops *mem_ops;
 
-       /* CS delays */
-       struct spi_delay        cs_setup;
-       struct spi_delay        cs_hold;
-       struct spi_delay        cs_inactive;
-
        /* gpio chip select */
        int                     *cs_gpios;
        struct gpio_desc        **cs_gpiods;
index 0e0cf4d6a72a0ea6bfa22d86b2fdf91d7d314c6e..6cfaa0a9a9b961f467524de59edbf8aedb73f50d 100644 (file)
@@ -61,7 +61,7 @@ static inline int __srcu_read_lock(struct srcu_struct *ssp)
        int idx;
 
        idx = ((READ_ONCE(ssp->srcu_idx) + 1) & 0x2) >> 1;
-       WRITE_ONCE(ssp->srcu_lock_nesting[idx], ssp->srcu_lock_nesting[idx] + 1);
+       WRITE_ONCE(ssp->srcu_lock_nesting[idx], READ_ONCE(ssp->srcu_lock_nesting[idx]) + 1);
        return idx;
 }
 
@@ -81,11 +81,11 @@ static inline void srcu_torture_stats_print(struct srcu_struct *ssp,
 {
        int idx;
 
-       idx = ((READ_ONCE(ssp->srcu_idx) + 1) & 0x2) >> 1;
+       idx = ((data_race(READ_ONCE(ssp->srcu_idx)) + 1) & 0x2) >> 1;
        pr_alert("%s%s Tiny SRCU per-CPU(idx=%d): (%hd,%hd)\n",
                 tt, tf, idx,
-                READ_ONCE(ssp->srcu_lock_nesting[!idx]),
-                READ_ONCE(ssp->srcu_lock_nesting[idx]));
+                data_race(READ_ONCE(ssp->srcu_lock_nesting[!idx])),
+                data_race(READ_ONCE(ssp->srcu_lock_nesting[idx])));
 }
 
 #endif
index 3357ac98878d4f0eabf72e25c2e2467391c95036..8cfe49d201dd3561ff94dd24ca6a0c0b49f522eb 100644 (file)
@@ -277,6 +277,17 @@ struct vdpa_device *__vdpa_alloc_device(struct device *parent,
                                        const struct vdpa_config_ops *config,
                                        size_t size, const char *name);
 
+/**
+ * vdpa_alloc_device - allocate and initilaize a vDPA device
+ *
+ * @dev_struct: the type of the parent structure
+ * @member: the name of struct vdpa_device within the @dev_struct
+ * @parent: the parent device
+ * @config: the bus operations that is supported by this device
+ * @name: name of the vdpa device
+ *
+ * Return allocated data structure or ERR_PTR upon error
+ */
 #define vdpa_alloc_device(dev_struct, member, parent, config, name)   \
                          container_of(__vdpa_alloc_device( \
                                       parent, config, \
index b1894e0323fae45163eb2b10091d96fbc9f78401..41edbc01ffa4039524d5519caaa2f18e5884ca65 100644 (file)
@@ -110,6 +110,7 @@ struct virtio_device {
        bool config_enabled;
        bool config_change_pending;
        spinlock_t config_lock;
+       spinlock_t vqs_list_lock; /* Protects VQs list access */
        struct device dev;
        struct virtio_device_id id;
        const struct virtio_config_ops *config;
index 84db7b8f912f474310aa59b8970ce7a349def7c8..212892cf9822cd1641c0338d710d87952a14e293 100644 (file)
@@ -14,6 +14,7 @@
 #include <linux/virtio_byteorder.h>
 #include <linux/uio.h>
 #include <linux/slab.h>
+#include <linux/spinlock.h>
 #if IS_REACHABLE(CONFIG_VHOST_IOTLB)
 #include <linux/dma-direction.h>
 #include <linux/vhost_iotlb.h>
index f3c2841566a01560db1f88d800b1034a371ed991..1b9d75aedb225de26f18cd20e154fa8c712fd2c9 100644 (file)
@@ -319,14 +319,12 @@ flow_action_mixed_hw_stats_check(const struct flow_action *action,
        if (flow_offload_has_one_action(action))
                return true;
 
-       if (action) {
-               flow_action_for_each(i, action_entry, action) {
-                       if (i && action_entry->hw_stats != last_hw_stats) {
-                               NL_SET_ERR_MSG_MOD(extack, "Mixing HW stats types for actions is not supported");
-                               return false;
-                       }
-                       last_hw_stats = action_entry->hw_stats;
+       flow_action_for_each(i, action_entry, action) {
+               if (i && action_entry->hw_stats != last_hw_stats) {
+                       NL_SET_ERR_MSG_MOD(extack, "Mixing HW stats types for actions is not supported");
+                       return false;
                }
+               last_hw_stats = action_entry->hw_stats;
        }
        return true;
 }
index 15b7fbe6b15c28ef30ee7b578d3bc6d2aa368028..c412dde4d67dca88fa236522dadfbbc0eb8a97f4 100644 (file)
@@ -267,7 +267,7 @@ static inline bool fib6_check_expired(const struct fib6_info *f6i)
        return false;
 }
 
-/* Function to safely get fn->sernum for passed in rt
+/* Function to safely get fn->fn_sernum for passed in rt
  * and store result in passed in cookie.
  * Return true if we can get cookie safely
  * Return false if not
@@ -282,7 +282,7 @@ static inline bool fib6_get_cookie_safe(const struct fib6_info *f6i,
 
        if (fn) {
                *cookie = fn->fn_sernum;
-               /* pairs with smp_wmb() in fib6_update_sernum_upto_root() */
+               /* pairs with smp_wmb() in __fib6_update_sernum_upto_root() */
                smp_rmb();
                status = true;
        }
index 37e5300c7e5a1405a7f9c14491e36dc04219feb1..fefd38db95b3e6828c4248f4285e109fcc9ab497 100644 (file)
@@ -30,7 +30,6 @@ struct nf_tcp_net {
        u8 tcp_ignore_invalid_rst;
 #if IS_ENABLED(CONFIG_NF_FLOW_TABLE)
        unsigned int offload_timeout;
-       unsigned int offload_pickup;
 #endif
 };
 
@@ -44,7 +43,6 @@ struct nf_udp_net {
        unsigned int timeouts[UDP_CT_MAX];
 #if IS_ENABLED(CONFIG_NF_FLOW_TABLE)
        unsigned int offload_timeout;
-       unsigned int offload_pickup;
 #endif
 };
 
index e328c5127757178eff08e1872dd1c9b73ab1a7eb..0509d2d6be676638e35cae3c6b4325524d693c25 100644 (file)
@@ -31,6 +31,8 @@ struct psample_group *psample_group_get(struct net *net, u32 group_num);
 void psample_group_take(struct psample_group *group);
 void psample_group_put(struct psample_group *group);
 
+struct sk_buff;
+
 #if IS_ENABLED(CONFIG_PSAMPLE)
 
 void psample_sample_packet(struct psample_group *group, struct sk_buff *skb,
index 390270e00a1d14e0083f06e04cf61a3fa49968dc..f160484afc5ce624e424f33f4b35d04a78858d47 100644 (file)
@@ -48,7 +48,9 @@
        {(unsigned long)__GFP_WRITE,            "__GFP_WRITE"},         \
        {(unsigned long)__GFP_RECLAIM,          "__GFP_RECLAIM"},       \
        {(unsigned long)__GFP_DIRECT_RECLAIM,   "__GFP_DIRECT_RECLAIM"},\
-       {(unsigned long)__GFP_KSWAPD_RECLAIM,   "__GFP_KSWAPD_RECLAIM"}\
+       {(unsigned long)__GFP_KSWAPD_RECLAIM,   "__GFP_KSWAPD_RECLAIM"},\
+       {(unsigned long)__GFP_ZEROTAGS,         "__GFP_ZEROTAGS"},      \
+       {(unsigned long)__GFP_SKIP_KASAN_POISON,"__GFP_SKIP_KASAN_POISON"}\
 
 #define show_gfp_flags(flags)                                          \
        (flags) ? __print_flags(flags, "|",                             \
index fbf9c5c7dd59ab6eb765c35e0ef9040d56e07c02..64553df9d7350d03b50e62f6419a78df1bbe64e3 100644 (file)
@@ -51,6 +51,7 @@
 #define FAN_ENABLE_AUDIT       0x00000040
 
 /* Flags to determine fanotify event format */
+#define FAN_REPORT_PIDFD       0x00000080      /* Report pidfd for event->pid */
 #define FAN_REPORT_TID         0x00000100      /* event->pid is thread id */
 #define FAN_REPORT_FID         0x00000200      /* Report unique file id */
 #define FAN_REPORT_DIR_FID     0x00000400      /* Report unique directory id */
@@ -123,6 +124,7 @@ struct fanotify_event_metadata {
 #define FAN_EVENT_INFO_TYPE_FID                1
 #define FAN_EVENT_INFO_TYPE_DFID_NAME  2
 #define FAN_EVENT_INFO_TYPE_DFID       3
+#define FAN_EVENT_INFO_TYPE_PIDFD      4
 
 /* Variable length info record following event metadata */
 struct fanotify_event_info_header {
@@ -148,6 +150,15 @@ struct fanotify_event_info_fid {
        unsigned char handle[0];
 };
 
+/*
+ * This structure is used for info records of type FAN_EVENT_INFO_TYPE_PIDFD.
+ * It holds a pidfd for the pid that was responsible for generating an event.
+ */
+struct fanotify_event_info_pidfd {
+       struct fanotify_event_info_header hdr;
+       __s32 pidfd;
+};
+
 struct fanotify_response {
        __s32 fd;
        __u32 response;
@@ -160,6 +171,8 @@ struct fanotify_response {
 
 /* No fd set in event */
 #define FAN_NOFD       -1
+#define FAN_NOPIDFD    FAN_NOFD
+#define FAN_EPIDFD     -2
 
 /* Helper functions to deal with fanotify_event_metadata buffers */
 #define FAN_EVENT_METADATA_LEN (sizeof(struct fanotify_event_metadata))
index dc8b72201f6c5b19a96214f3029be292a3019665..00a60695fa5388f2150c111d23698cb7a0580d39 100644 (file)
@@ -66,8 +66,11 @@ enum {
 #define NUD_NONE       0x00
 
 /* NUD_NOARP & NUD_PERMANENT are pseudostates, they never change
-   and make no address resolution or NUD.
-   NUD_PERMANENT also cannot be deleted by garbage collectors.
+ * and make no address resolution or NUD.
+ * NUD_PERMANENT also cannot be deleted by garbage collectors.
+ * When NTF_EXT_LEARNED is set for a bridge fdb entry the different cache entry
+ * states don't make sense and thus are ignored. Such entries don't age and
+ * can roam.
  */
 
 struct nda_cacheinfo {
index 912ec60b26b09d7c4dca8e8384795235e130954a..bbcd285b22e1093d2db91f72c1fd280b4f9a49a2 100644 (file)
@@ -43,6 +43,15 @@ enum nfnl_hook_chain_info_attributes {
 };
 #define NFNLA_HOOK_INFO_MAX (__NFNLA_HOOK_INFO_MAX - 1)
 
+enum nfnl_hook_chain_desc_attributes {
+       NFNLA_CHAIN_UNSPEC,
+       NFNLA_CHAIN_TABLE,
+       NFNLA_CHAIN_FAMILY,
+       NFNLA_CHAIN_NAME,
+       __NFNLA_CHAIN_MAX,
+};
+#define NFNLA_CHAIN_MAX (__NFNLA_CHAIN_MAX - 1)
+
 /**
  * enum nfnl_hook_chaintype - chain type
  *
index f5b8246e8aa1c9af8e51d8f2da825262ea65fd49..8d97aba78c3ad03d9b261ffd0b1089e58fbf598a 100644 (file)
@@ -397,6 +397,12 @@ static int __init bootconfig_params(char *param, char *val,
        return 0;
 }
 
+static int __init warn_bootconfig(char *str)
+{
+       /* The 'bootconfig' has been handled by bootconfig_params(). */
+       return 0;
+}
+
 static void __init setup_boot_config(void)
 {
        static char tmp_cmdline[COMMAND_LINE_SIZE] __initdata;
@@ -475,9 +481,8 @@ static int __init warn_bootconfig(char *str)
        pr_warn("WARNING: 'bootconfig' found on the kernel command line but CONFIG_BOOT_CONFIG is not set.\n");
        return 0;
 }
-early_param("bootconfig", warn_bootconfig);
-
 #endif
+early_param("bootconfig", warn_bootconfig);
 
 /* Change NUL term back to "=", to make "param" the whole string. */
 static void __init repair_env_string(char *param, char *val)
index b1a5fc04492bd0febe0b57016a69d8875941bddc..0a28a8095d3e91b1392197c5ab6d6c03701a4f40 100644 (file)
@@ -1362,11 +1362,13 @@ u64 __weak bpf_probe_read_kernel(void *dst, u32 size, const void *unsafe_ptr)
 }
 
 /**
- *     __bpf_prog_run - run eBPF program on a given context
+ *     ___bpf_prog_run - run eBPF program on a given context
  *     @regs: is the array of MAX_BPF_EXT_REG eBPF pseudo-registers
  *     @insn: is the array of eBPF instructions
  *
  * Decode and execute eBPF instructions.
+ *
+ * Return: whatever value is in %BPF_R0 at program exit
  */
 static u64 ___bpf_prog_run(u64 *regs, const struct bpf_insn *insn)
 {
@@ -1878,6 +1880,9 @@ static void bpf_prog_select_func(struct bpf_prog *fp)
  *
  * Try to JIT eBPF program, if JIT is not available, use interpreter.
  * The BPF program will be executed via BPF_PROG_RUN() macro.
+ *
+ * Return: the &fp argument along with &err set to 0 for success or
+ * a negative errno code on failure
  */
 struct bpf_prog *bpf_prog_select_runtime(struct bpf_prog *fp, int *err)
 {
index 72c58cc516a397bb5e973a20f3dcd3a0066f6a44..9c011f3a26878488bf44324003a7f6c570e1432f 100644 (file)
@@ -1565,8 +1565,8 @@ alloc:
        /* We cannot do copy_from_user or copy_to_user inside
         * the rcu_read_lock. Allocate enough space here.
         */
-       keys = kvmalloc(key_size * bucket_size, GFP_USER | __GFP_NOWARN);
-       values = kvmalloc(value_size * bucket_size, GFP_USER | __GFP_NOWARN);
+       keys = kvmalloc_array(key_size, bucket_size, GFP_USER | __GFP_NOWARN);
+       values = kvmalloc_array(value_size, bucket_size, GFP_USER | __GFP_NOWARN);
        if (!keys || !values) {
                ret = -ENOMEM;
                goto after_loop;
index 62cf0038391040e20520b5a9bb04f20ae50db5f1..55f83ea09dae7243d24f0a357d1495579bd5cd13 100644 (file)
@@ -353,9 +353,15 @@ const struct bpf_func_proto bpf_jiffies64_proto = {
 #ifdef CONFIG_CGROUPS
 BPF_CALL_0(bpf_get_current_cgroup_id)
 {
-       struct cgroup *cgrp = task_dfl_cgroup(current);
+       struct cgroup *cgrp;
+       u64 cgrp_id;
 
-       return cgroup_id(cgrp);
+       rcu_read_lock();
+       cgrp = task_dfl_cgroup(current);
+       cgrp_id = cgroup_id(cgrp);
+       rcu_read_unlock();
+
+       return cgrp_id;
 }
 
 const struct bpf_func_proto bpf_get_current_cgroup_id_proto = {
@@ -366,13 +372,17 @@ const struct bpf_func_proto bpf_get_current_cgroup_id_proto = {
 
 BPF_CALL_1(bpf_get_current_ancestor_cgroup_id, int, ancestor_level)
 {
-       struct cgroup *cgrp = task_dfl_cgroup(current);
+       struct cgroup *cgrp;
        struct cgroup *ancestor;
+       u64 cgrp_id;
 
+       rcu_read_lock();
+       cgrp = task_dfl_cgroup(current);
        ancestor = cgroup_ancestor(cgrp, ancestor_level);
-       if (!ancestor)
-               return 0;
-       return cgroup_id(ancestor);
+       cgrp_id = ancestor ? cgroup_id(ancestor) : 0;
+       rcu_read_unlock();
+
+       return cgrp_id;
 }
 
 const struct bpf_func_proto bpf_get_current_ancestor_cgroup_id_proto = {
@@ -397,8 +407,8 @@ BPF_CALL_2(bpf_get_local_storage, struct bpf_map *, map, u64, flags)
        void *ptr;
        int i;
 
-       for (i = 0; i < BPF_CGROUP_STORAGE_NEST_MAX; i++) {
-               if (unlikely(this_cpu_read(bpf_cgroup_storage_info[i].task) != current))
+       for (i = BPF_CGROUP_STORAGE_NEST_MAX - 1; i >= 0; i--) {
+               if (likely(this_cpu_read(bpf_cgroup_storage_info[i].task) != current))
                        continue;
 
                storage = this_cpu_read(bpf_cgroup_storage_info[i].storage[stype]);
@@ -1070,12 +1080,12 @@ bpf_base_func_proto(enum bpf_func_id func_id)
        case BPF_FUNC_probe_read_user:
                return &bpf_probe_read_user_proto;
        case BPF_FUNC_probe_read_kernel:
-               return security_locked_down(LOCKDOWN_BPF_READ) < 0 ?
+               return security_locked_down(LOCKDOWN_BPF_READ_KERNEL) < 0 ?
                       NULL : &bpf_probe_read_kernel_proto;
        case BPF_FUNC_probe_read_user_str:
                return &bpf_probe_read_user_str_proto;
        case BPF_FUNC_probe_read_kernel_str:
-               return security_locked_down(LOCKDOWN_BPF_READ) < 0 ?
+               return security_locked_down(LOCKDOWN_BPF_READ_KERNEL) < 0 ?
                       NULL : &bpf_probe_read_kernel_str_proto;
        case BPF_FUNC_snprintf_btf:
                return &bpf_snprintf_btf_proto;
index f9bda5476ea55c5823c97c83488c42325d65388b..49f07e2bf23b9be28c9ead67bbed185b18785632 100644 (file)
@@ -5150,8 +5150,6 @@ static int check_map_func_compatibility(struct bpf_verifier_env *env,
        case BPF_MAP_TYPE_RINGBUF:
                if (func_id != BPF_FUNC_ringbuf_output &&
                    func_id != BPF_FUNC_ringbuf_reserve &&
-                   func_id != BPF_FUNC_ringbuf_submit &&
-                   func_id != BPF_FUNC_ringbuf_discard &&
                    func_id != BPF_FUNC_ringbuf_query)
                        goto error;
                break;
@@ -5260,6 +5258,12 @@ static int check_map_func_compatibility(struct bpf_verifier_env *env,
                if (map->map_type != BPF_MAP_TYPE_PERF_EVENT_ARRAY)
                        goto error;
                break;
+       case BPF_FUNC_ringbuf_output:
+       case BPF_FUNC_ringbuf_reserve:
+       case BPF_FUNC_ringbuf_query:
+               if (map->map_type != BPF_MAP_TYPE_RINGBUF)
+                       goto error;
+               break;
        case BPF_FUNC_get_stackid:
                if (map->map_type != BPF_MAP_TYPE_STACK_TRACE)
                        goto error;
@@ -11663,6 +11667,7 @@ static void sanitize_dead_code(struct bpf_verifier_env *env)
                if (aux_data[i].seen)
                        continue;
                memcpy(insn + i, &trap, sizeof(trap));
+               aux_data[i].zext_dst = false;
        }
 }
 
index e17a56639766be1b558768c1b410ef9373a0b160..9594cfd1cf2cf7ea7be1a44ad048d3b25b624acc 100644 (file)
@@ -248,9 +248,9 @@ static inline cfi_check_fn find_shadow_check_fn(unsigned long ptr)
 {
        cfi_check_fn fn;
 
-       rcu_read_lock_sched();
+       rcu_read_lock_sched_notrace();
        fn = ptr_to_check_fn(rcu_dereference_sched(cfi_shadow), ptr);
-       rcu_read_unlock_sched();
+       rcu_read_unlock_sched_notrace();
 
        return fn;
 }
@@ -269,11 +269,11 @@ static inline cfi_check_fn find_module_check_fn(unsigned long ptr)
        cfi_check_fn fn = NULL;
        struct module *mod;
 
-       rcu_read_lock_sched();
+       rcu_read_lock_sched_notrace();
        mod = __module_address(ptr);
        if (mod)
                fn = mod->cfi_check;
-       rcu_read_unlock_sched();
+       rcu_read_unlock_sched_notrace();
 
        return fn;
 }
index 7f0e58917432965397dd521c2f7c83e2d4d27617..b264ab5652ba9f5e40f3df6a95f43b2dab8d04d4 100644 (file)
@@ -347,19 +347,20 @@ static void cgroup_base_stat_flush(struct cgroup *cgrp, int cpu)
 }
 
 static struct cgroup_rstat_cpu *
-cgroup_base_stat_cputime_account_begin(struct cgroup *cgrp)
+cgroup_base_stat_cputime_account_begin(struct cgroup *cgrp, unsigned long *flags)
 {
        struct cgroup_rstat_cpu *rstatc;
 
        rstatc = get_cpu_ptr(cgrp->rstat_cpu);
-       u64_stats_update_begin(&rstatc->bsync);
+       *flags = u64_stats_update_begin_irqsave(&rstatc->bsync);
        return rstatc;
 }
 
 static void cgroup_base_stat_cputime_account_end(struct cgroup *cgrp,
-                                                struct cgroup_rstat_cpu *rstatc)
+                                                struct cgroup_rstat_cpu *rstatc,
+                                                unsigned long flags)
 {
-       u64_stats_update_end(&rstatc->bsync);
+       u64_stats_update_end_irqrestore(&rstatc->bsync, flags);
        cgroup_rstat_updated(cgrp, smp_processor_id());
        put_cpu_ptr(rstatc);
 }
@@ -367,18 +368,20 @@ static void cgroup_base_stat_cputime_account_end(struct cgroup *cgrp,
 void __cgroup_account_cputime(struct cgroup *cgrp, u64 delta_exec)
 {
        struct cgroup_rstat_cpu *rstatc;
+       unsigned long flags;
 
-       rstatc = cgroup_base_stat_cputime_account_begin(cgrp);
+       rstatc = cgroup_base_stat_cputime_account_begin(cgrp, &flags);
        rstatc->bstat.cputime.sum_exec_runtime += delta_exec;
-       cgroup_base_stat_cputime_account_end(cgrp, rstatc);
+       cgroup_base_stat_cputime_account_end(cgrp, rstatc, flags);
 }
 
 void __cgroup_account_cputime_field(struct cgroup *cgrp,
                                    enum cpu_usage_stat index, u64 delta_exec)
 {
        struct cgroup_rstat_cpu *rstatc;
+       unsigned long flags;
 
-       rstatc = cgroup_base_stat_cputime_account_begin(cgrp);
+       rstatc = cgroup_base_stat_cputime_account_begin(cgrp, &flags);
 
        switch (index) {
        case CPUTIME_USER:
@@ -394,7 +397,7 @@ void __cgroup_account_cputime_field(struct cgroup *cgrp,
                break;
        }
 
-       cgroup_base_stat_cputime_account_end(cgrp, rstatc);
+       cgroup_base_stat_cputime_account_end(cgrp, rstatc, flags);
 }
 
 /*
index e6fd2b3fc31f093cfd08cae402d1730cf56ae180..f784e08c2fbd6161e50c66a49f5ab4dfedef155b 100644 (file)
@@ -286,13 +286,13 @@ struct cred *prepare_creds(void)
        new->security = NULL;
 #endif
 
-       if (security_prepare_creds(new, old, GFP_KERNEL_ACCOUNT) < 0)
-               goto error;
-
        new->ucounts = get_ucounts(new->ucounts);
        if (!new->ucounts)
                goto error;
 
+       if (security_prepare_creds(new, old, GFP_KERNEL_ACCOUNT) < 0)
+               goto error;
+
        validate_creds(new);
        return new;
 
@@ -753,13 +753,13 @@ struct cred *prepare_kernel_cred(struct task_struct *daemon)
 #ifdef CONFIG_SECURITY
        new->security = NULL;
 #endif
-       if (security_prepare_creds(new, old, GFP_KERNEL_ACCOUNT) < 0)
-               goto error;
-
        new->ucounts = get_ucounts(new->ucounts);
        if (!new->ucounts)
                goto error;
 
+       if (security_prepare_creds(new, old, GFP_KERNEL_ACCOUNT) < 0)
+               goto error;
+
        put_cred(old);
        validate_creds(new);
        return new;
index bc94b2cc59956e923cb7bbe11d32a919ef7a053c..44f4c2d83763fd36da894f7aac8f7d2166a625ab 100644 (file)
@@ -828,10 +828,10 @@ void __init fork_init(void)
        for (i = 0; i < MAX_PER_NAMESPACE_UCOUNTS; i++)
                init_user_ns.ucount_max[i] = max_threads/2;
 
-       set_rlimit_ucount_max(&init_user_ns, UCOUNT_RLIMIT_NPROC, task_rlimit(&init_task, RLIMIT_NPROC));
-       set_rlimit_ucount_max(&init_user_ns, UCOUNT_RLIMIT_MSGQUEUE, task_rlimit(&init_task, RLIMIT_MSGQUEUE));
-       set_rlimit_ucount_max(&init_user_ns, UCOUNT_RLIMIT_SIGPENDING, task_rlimit(&init_task, RLIMIT_SIGPENDING));
-       set_rlimit_ucount_max(&init_user_ns, UCOUNT_RLIMIT_MEMLOCK, task_rlimit(&init_task, RLIMIT_MEMLOCK));
+       set_rlimit_ucount_max(&init_user_ns, UCOUNT_RLIMIT_NPROC,      RLIM_INFINITY);
+       set_rlimit_ucount_max(&init_user_ns, UCOUNT_RLIMIT_MSGQUEUE,   RLIM_INFINITY);
+       set_rlimit_ucount_max(&init_user_ns, UCOUNT_RLIMIT_SIGPENDING, RLIM_INFINITY);
+       set_rlimit_ucount_max(&init_user_ns, UCOUNT_RLIMIT_MEMLOCK,    RLIM_INFINITY);
 
 #ifdef CONFIG_VMAP_STACK
        cpuhp_setup_state(CPUHP_BP_PREPARE_DYN, "fork:vm_stack_cache",
index 7f04c7d8296e209423a14f03aa2267b46b12c22a..a98bcfc4be7bc1d09cc1c8350b42f3e9dd0194d6 100644 (file)
@@ -265,8 +265,11 @@ int irq_startup(struct irq_desc *desc, bool resend, bool force)
        } else {
                switch (__irq_startup_managed(desc, aff, force)) {
                case IRQ_STARTUP_NORMAL:
+                       if (d->chip->flags & IRQCHIP_AFFINITY_PRE_STARTUP)
+                               irq_setup_affinity(desc);
                        ret = __irq_startup(desc);
-                       irq_setup_affinity(desc);
+                       if (!(d->chip->flags & IRQCHIP_AFFINITY_PRE_STARTUP))
+                               irq_setup_affinity(desc);
                        break;
                case IRQ_STARTUP_MANAGED:
                        irq_do_set_affinity(d, aff, false);
index c41965e348b5bbfd737a3de0eb04d462e285cf45..85df3ca03efe8adb3b927fa1982f73644d2cc6fc 100644 (file)
@@ -476,11 +476,6 @@ skip_activate:
        return 0;
 
 cleanup:
-       for_each_msi_vector(desc, i, dev) {
-               irq_data = irq_domain_get_irq_data(domain, i);
-               if (irqd_is_activated(irq_data))
-                       irq_domain_deactivate_irq(irq_data);
-       }
        msi_domain_free_irqs(domain, dev);
        return ret;
 }
@@ -505,7 +500,15 @@ int msi_domain_alloc_irqs(struct irq_domain *domain, struct device *dev,
 
 void __msi_domain_free_irqs(struct irq_domain *domain, struct device *dev)
 {
+       struct irq_data *irq_data;
        struct msi_desc *desc;
+       int i;
+
+       for_each_msi_vector(desc, i, dev) {
+               irq_data = irq_domain_get_irq_data(domain, i);
+               if (irqd_is_activated(irq_data))
+                       irq_domain_deactivate_irq(irq_data);
+       }
 
        for_each_msi_entry(desc, dev) {
                /*
index d309d6fbf5bddc18293b12dcae2d1757b8609119..4d2a702d7aa95654acf024e0e67e60c70ec144f1 100644 (file)
@@ -453,6 +453,11 @@ static __always_inline void __irq_timings_store(int irq, struct irqt_stat *irqs,
         */
        index = irq_timings_interval_index(interval);
 
+       if (index > PREDICTION_BUFFER_SIZE - 1) {
+               irqs->count = 0;
+               return;
+       }
+
        /*
         * Store the index as an element of the pattern in another
         * circular array.
index e65de172ccf7cf81734db28d193e0ab4154bfb58..1d1d1b0e424897d1c9ba750d046f7e535b3b724d 100644 (file)
@@ -64,7 +64,7 @@ static noinline void microbenchmark(unsigned long iters)
 {
        const struct kcsan_ctx ctx_save = current->kcsan_ctx;
        const bool was_enabled = READ_ONCE(kcsan_enabled);
-       cycles_t cycles;
+       u64 cycles;
 
        /* We may have been called from an atomic region; reset context. */
        memset(&current->kcsan_ctx, 0, sizeof(current->kcsan_ctx));
index b3adb40549bf37118b688a3689cf8cd731ba6227..7c5a4a087cc73a74c682d48abac7aa45ca371e5d 100644 (file)
@@ -59,7 +59,7 @@ static struct task_struct **writer_tasks;
 static struct task_struct **reader_tasks;
 
 static bool lock_is_write_held;
-static bool lock_is_read_held;
+static atomic_t lock_is_read_held;
 static unsigned long last_lock_release;
 
 struct lock_stress_stats {
@@ -682,7 +682,7 @@ static int lock_torture_writer(void *arg)
                if (WARN_ON_ONCE(lock_is_write_held))
                        lwsp->n_lock_fail++;
                lock_is_write_held = true;
-               if (WARN_ON_ONCE(lock_is_read_held))
+               if (WARN_ON_ONCE(atomic_read(&lock_is_read_held)))
                        lwsp->n_lock_fail++; /* rare, but... */
 
                lwsp->n_lock_acquired++;
@@ -717,13 +717,13 @@ static int lock_torture_reader(void *arg)
                        schedule_timeout_uninterruptible(1);
 
                cxt.cur_ops->readlock(tid);
-               lock_is_read_held = true;
+               atomic_inc(&lock_is_read_held);
                if (WARN_ON_ONCE(lock_is_write_held))
                        lrsp->n_lock_fail++; /* rare, but... */
 
                lrsp->n_lock_acquired++;
                cxt.cur_ops->read_delay(&rand);
-               lock_is_read_held = false;
+               atomic_dec(&lock_is_read_held);
                cxt.cur_ops->readunlock(tid);
 
                stutter_wait("lock_torture_reader");
@@ -738,20 +738,22 @@ static int lock_torture_reader(void *arg)
 static void __torture_print_stats(char *page,
                                  struct lock_stress_stats *statp, bool write)
 {
+       long cur;
        bool fail = false;
        int i, n_stress;
-       long max = 0, min = statp ? statp[0].n_lock_acquired : 0;
+       long max = 0, min = statp ? data_race(statp[0].n_lock_acquired) : 0;
        long long sum = 0;
 
        n_stress = write ? cxt.nrealwriters_stress : cxt.nrealreaders_stress;
        for (i = 0; i < n_stress; i++) {
-               if (statp[i].n_lock_fail)
+               if (data_race(statp[i].n_lock_fail))
                        fail = true;
-               sum += statp[i].n_lock_acquired;
-               if (max < statp[i].n_lock_acquired)
-                       max = statp[i].n_lock_acquired;
-               if (min > statp[i].n_lock_acquired)
-                       min = statp[i].n_lock_acquired;
+               cur = data_race(statp[i].n_lock_acquired);
+               sum += cur;
+               if (max < cur)
+                       max = cur;
+               if (min > cur)
+                       min = cur;
        }
        page += sprintf(page,
                        "%s:  Total: %lld  Max/Min: %ld/%ld %s  Fail: %d %s\n",
@@ -996,7 +998,6 @@ static int __init lock_torture_init(void)
                }
 
                if (nreaders_stress) {
-                       lock_is_read_held = false;
                        cxt.lrsa = kmalloc_array(cxt.nrealreaders_stress,
                                                 sizeof(*cxt.lrsa),
                                                 GFP_KERNEL);
index b5d9bb5202c6b447c4ffeeffdac2ae7cff0a02e1..ad0db322ed3b45ed5bf3b7caa94fc10160d98f4c 100644 (file)
@@ -343,7 +343,7 @@ static __always_inline bool
 rt_mutex_cond_detect_deadlock(struct rt_mutex_waiter *waiter,
                              enum rtmutex_chainwalk chwalk)
 {
-       if (IS_ENABLED(CONFIG_DEBUG_RT_MUTEX))
+       if (IS_ENABLED(CONFIG_DEBUG_RT_MUTEXES))
                return waiter != NULL;
        return chwalk == RT_MUTEX_FULL_CHAINWALK;
 }
index d4d3ba6e1728a2a00b5f30d53fbbfe0a8d07829e..18d3a5c699d8407a1dce94a68f9556305831edcd 100644 (file)
@@ -9,19 +9,6 @@
  *
  * Copyright (c) 2020 Oracle and/or its affiliates.
  * Author: Daniel Jordan <daniel.m.jordan@oracle.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
  */
 
 #include <linux/completion.h>
@@ -211,7 +198,7 @@ int padata_do_parallel(struct padata_shell *ps,
        if ((pinst->flags & PADATA_RESET))
                goto out;
 
-       atomic_inc(&pd->refcnt);
+       refcount_inc(&pd->refcnt);
        padata->pd = pd;
        padata->cb_cpu = *cb_cpu;
 
@@ -383,7 +370,7 @@ static void padata_serial_worker(struct work_struct *serial_work)
        }
        local_bh_enable();
 
-       if (atomic_sub_and_test(cnt, &pd->refcnt))
+       if (refcount_sub_and_test(cnt, &pd->refcnt))
                padata_free_pd(pd);
 }
 
@@ -593,7 +580,7 @@ static struct parallel_data *padata_alloc_pd(struct padata_shell *ps)
        padata_init_reorder_list(pd);
        padata_init_squeues(pd);
        pd->seq_nr = -1;
-       atomic_set(&pd->refcnt, 1);
+       refcount_set(&pd->refcnt, 1);
        spin_lock_init(&pd->lock);
        pd->cpu = cpumask_first(pd->cpumask.pcpu);
        INIT_WORK(&pd->reorder_work, invoke_padata_reorder);
@@ -667,7 +654,7 @@ static int padata_replace(struct padata_instance *pinst)
        synchronize_rcu();
 
        list_for_each_entry_continue_reverse(ps, &pinst->pslist, list)
-               if (atomic_dec_and_test(&ps->opd->refcnt))
+               if (refcount_dec_and_test(&ps->opd->refcnt))
                        padata_free_pd(ps->opd);
 
        pinst->flags &= ~PADATA_RESET;
@@ -733,7 +720,7 @@ int padata_set_cpumask(struct padata_instance *pinst, int cpumask_type,
        struct cpumask *serial_mask, *parallel_mask;
        int err = -EINVAL;
 
-       get_online_cpus();
+       cpus_read_lock();
        mutex_lock(&pinst->lock);
 
        switch (cpumask_type) {
@@ -753,7 +740,7 @@ int padata_set_cpumask(struct padata_instance *pinst, int cpumask_type,
 
 out:
        mutex_unlock(&pinst->lock);
-       put_online_cpus();
+       cpus_read_unlock();
 
        return err;
 }
@@ -992,7 +979,7 @@ struct padata_instance *padata_alloc(const char *name)
        if (!pinst->parallel_wq)
                goto err_free_inst;
 
-       get_online_cpus();
+       cpus_read_lock();
 
        pinst->serial_wq = alloc_workqueue("%s_serial", WQ_MEM_RECLAIM |
                                           WQ_CPU_INTENSIVE, 1, name);
@@ -1026,7 +1013,7 @@ struct padata_instance *padata_alloc(const char *name)
                                                    &pinst->cpu_dead_node);
 #endif
 
-       put_online_cpus();
+       cpus_read_unlock();
 
        return pinst;
 
@@ -1036,7 +1023,7 @@ err_free_masks:
 err_free_serial_wq:
        destroy_workqueue(pinst->serial_wq);
 err_put_cpus:
-       put_online_cpus();
+       cpus_read_unlock();
        destroy_workqueue(pinst->parallel_wq);
 err_free_inst:
        kfree(pinst);
@@ -1074,9 +1061,9 @@ struct padata_shell *padata_alloc_shell(struct padata_instance *pinst)
 
        ps->pinst = pinst;
 
-       get_online_cpus();
+       cpus_read_lock();
        pd = padata_alloc_pd(ps);
-       put_online_cpus();
+       cpus_read_unlock();
 
        if (!pd)
                goto out_free_ps;
index ebdf9c60cd0b586b7e6d9f06028bf31bfd9571ab..efe87db4468364f8d2650d0b517aa2b567048f2c 100644 (file)
@@ -550,13 +550,21 @@ struct pid *pidfd_get_pid(unsigned int fd, unsigned int *flags)
  * Note, that this function can only be called after the fd table has
  * been unshared to avoid leaking the pidfd to the new process.
  *
+ * This symbol should not be explicitly exported to loadable modules.
+ *
  * Return: On success, a cloexec pidfd is returned.
  *         On error, a negative errno number will be returned.
  */
-static int pidfd_create(struct pid *pid, unsigned int flags)
+int pidfd_create(struct pid *pid, unsigned int flags)
 {
        int fd;
 
+       if (!pid || !pid_has_task(pid, PIDTYPE_TGID))
+               return -EINVAL;
+
+       if (flags & ~(O_NONBLOCK | O_RDWR | O_CLOEXEC))
+               return -EINVAL;
+
        fd = anon_inode_getfd("[pidfd]", &pidfd_fops, get_pid(pid),
                              flags | O_RDWR | O_CLOEXEC);
        if (fd < 0)
@@ -596,10 +604,7 @@ SYSCALL_DEFINE2(pidfd_open, pid_t, pid, unsigned int, flags)
        if (!p)
                return -ESRCH;
 
-       if (pid_has_task(p, PIDTYPE_TGID))
-               fd = pidfd_create(p, flags);
-       else
-               fd = -EINVAL;
+       fd = pidfd_create(p, flags);
 
        put_pid(p);
        return fd;
index dca51fe9c73f267cace93d22a800f07e11a0c0a7..2cc34a22a50600b8b94da9bda2832a8b6fbc68d0 100644 (file)
@@ -487,7 +487,7 @@ retry:
        if (gp_async) {
                cur_ops->gp_barrier();
        }
-       writer_n_durations[me] = i_max;
+       writer_n_durations[me] = i_max + 1;
        torture_kthread_stopping("rcu_scale_writer");
        return 0;
 }
@@ -561,7 +561,7 @@ rcu_scale_cleanup(void)
                        wdpp = writer_durations[i];
                        if (!wdpp)
                                continue;
-                       for (j = 0; j <= writer_n_durations[i]; j++) {
+                       for (j = 0; j < writer_n_durations[i]; j++) {
                                wdp = &wdpp[j];
                                pr_alert("%s%s %4d writer-duration: %5d %llu\n",
                                        scale_type, SCALE_FLAG,
index 40ef5417d95451bd4aab3f20c4c7f1de011c43b6..ab4215266ebee7484227094273ee170c11583a08 100644 (file)
@@ -2022,8 +2022,13 @@ static int rcu_torture_stall(void *args)
                          __func__, raw_smp_processor_id());
                while (ULONG_CMP_LT((unsigned long)ktime_get_seconds(),
                                    stop_at))
-                       if (stall_cpu_block)
+                       if (stall_cpu_block) {
+#ifdef CONFIG_PREEMPTION
+                               preempt_schedule();
+#else
                                schedule_timeout_uninterruptible(HZ);
+#endif
+                       }
                if (stall_cpu_irqsoff)
                        local_irq_enable();
                else if (!stall_cpu_block)
index d998a76fb5422e569aa283f4eac96971f0ff087e..66dc14cf5687e679907311c9869ce396d22ad8b6 100644 (file)
@@ -467,6 +467,40 @@ static struct ref_scale_ops acqrel_ops = {
        .name           = "acqrel"
 };
 
+static volatile u64 stopopts;
+
+static void ref_clock_section(const int nloops)
+{
+       u64 x = 0;
+       int i;
+
+       preempt_disable();
+       for (i = nloops; i >= 0; i--)
+               x += ktime_get_real_fast_ns();
+       preempt_enable();
+       stopopts = x;
+}
+
+static void ref_clock_delay_section(const int nloops, const int udl, const int ndl)
+{
+       u64 x = 0;
+       int i;
+
+       preempt_disable();
+       for (i = nloops; i >= 0; i--) {
+               x += ktime_get_real_fast_ns();
+               un_delay(udl, ndl);
+       }
+       preempt_enable();
+       stopopts = x;
+}
+
+static struct ref_scale_ops clock_ops = {
+       .readsection    = ref_clock_section,
+       .delaysection   = ref_clock_delay_section,
+       .name           = "clock"
+};
+
 static void rcu_scale_one_reader(void)
 {
        if (readdelay <= 0)
@@ -759,7 +793,7 @@ ref_scale_init(void)
        int firsterr = 0;
        static struct ref_scale_ops *scale_ops[] = {
                &rcu_ops, &srcu_ops, &rcu_trace_ops, &rcu_tasks_ops, &refcnt_ops, &rwlock_ops,
-               &rwsem_ops, &lock_ops, &lock_irq_ops, &acqrel_ops,
+               &rwsem_ops, &lock_ops, &lock_irq_ops, &acqrel_ops, &clock_ops,
        };
 
        if (!torture_init_begin(scale_type, verbose))
index 26344dc6483b0e0306a305f27f2f1a3bb2b35cd8..a0ba2ed49bc616205bb455accd9128c9ac9ff86c 100644 (file)
@@ -96,7 +96,7 @@ EXPORT_SYMBOL_GPL(cleanup_srcu_struct);
  */
 void __srcu_read_unlock(struct srcu_struct *ssp, int idx)
 {
-       int newval = ssp->srcu_lock_nesting[idx] - 1;
+       int newval = READ_ONCE(ssp->srcu_lock_nesting[idx]) - 1;
 
        WRITE_ONCE(ssp->srcu_lock_nesting[idx], newval);
        if (!newval && READ_ONCE(ssp->srcu_gp_waiting))
index 8536c55df51426b1b7bbaa06b612e6e74954b7c0..806160c44b1723fe46caaf10021e795a964a9ec8 100644 (file)
@@ -643,8 +643,8 @@ void exit_tasks_rcu_finish(void) { exit_tasks_rcu_finish_trace(current); }
 //
 // "Rude" variant of Tasks RCU, inspired by Steve Rostedt's trick of
 // passing an empty function to schedule_on_each_cpu().  This approach
-// provides an asynchronous call_rcu_tasks_rude() API and batching
-// of concurrent calls to the synchronous synchronize_rcu_rude() API.
+// provides an asynchronous call_rcu_tasks_rude() API and batching of
+// concurrent calls to the synchronous synchronize_rcu_tasks_rude() API.
 // This invokes schedule_on_each_cpu() in order to send IPIs far and wide
 // and induces otherwise unnecessary context switches on all online CPUs,
 // whether idle or not.
@@ -785,7 +785,10 @@ EXPORT_SYMBOL_GPL(show_rcu_tasks_rude_gp_kthread);
 //     set that task's .need_qs flag so that task's next outermost
 //     rcu_read_unlock_trace() will report the quiescent state (in which
 //     case the count of readers is incremented).  If both attempts fail,
-//     the task is added to a "holdout" list.
+//     the task is added to a "holdout" list.  Note that IPIs are used
+//     to invoke trc_read_check_handler() in the context of running tasks
+//     in order to avoid ordering overhead on common-case shared-variable
+//     accessses.
 // rcu_tasks_trace_postscan():
 //     Initialize state and attempt to identify an immediate quiescent
 //     state as above (but only for idle tasks), unblock CPU-hotplug
@@ -847,7 +850,7 @@ static DEFINE_IRQ_WORK(rcu_tasks_trace_iw, rcu_read_unlock_iw);
 /* If we are the last reader, wake up the grace-period kthread. */
 void rcu_read_unlock_trace_special(struct task_struct *t, int nesting)
 {
-       int nq = t->trc_reader_special.b.need_qs;
+       int nq = READ_ONCE(t->trc_reader_special.b.need_qs);
 
        if (IS_ENABLED(CONFIG_TASKS_TRACE_RCU_READ_MB) &&
            t->trc_reader_special.b.need_mb)
@@ -894,7 +897,7 @@ static void trc_read_check_handler(void *t_in)
 
        // If the task is not in a read-side critical section, and
        // if this is the last reader, awaken the grace-period kthread.
-       if (likely(!t->trc_reader_nesting)) {
+       if (likely(!READ_ONCE(t->trc_reader_nesting))) {
                if (WARN_ON_ONCE(atomic_dec_and_test(&trc_n_readers_need_end)))
                        wake_up(&trc_wait);
                // Mark as checked after decrement to avoid false
@@ -903,7 +906,7 @@ static void trc_read_check_handler(void *t_in)
                goto reset_ipi;
        }
        // If we are racing with an rcu_read_unlock_trace(), try again later.
-       if (unlikely(t->trc_reader_nesting < 0)) {
+       if (unlikely(READ_ONCE(t->trc_reader_nesting) < 0)) {
                if (WARN_ON_ONCE(atomic_dec_and_test(&trc_n_readers_need_end)))
                        wake_up(&trc_wait);
                goto reset_ipi;
@@ -913,14 +916,14 @@ static void trc_read_check_handler(void *t_in)
        // Get here if the task is in a read-side critical section.  Set
        // its state so that it will awaken the grace-period kthread upon
        // exit from that critical section.
-       WARN_ON_ONCE(t->trc_reader_special.b.need_qs);
+       WARN_ON_ONCE(READ_ONCE(t->trc_reader_special.b.need_qs));
        WRITE_ONCE(t->trc_reader_special.b.need_qs, true);
 
 reset_ipi:
        // Allow future IPIs to be sent on CPU and for task.
        // Also order this IPI handler against any later manipulations of
        // the intended task.
-       smp_store_release(&per_cpu(trc_ipi_to_cpu, smp_processor_id()), false); // ^^^
+       smp_store_release(per_cpu_ptr(&trc_ipi_to_cpu, smp_processor_id()), false); // ^^^
        smp_store_release(&texp->trc_ipi_to_cpu, -1); // ^^^
 }
 
@@ -950,6 +953,7 @@ static bool trc_inspect_reader(struct task_struct *t, void *arg)
                        n_heavy_reader_ofl_updates++;
                in_qs = true;
        } else {
+               // The task is not running, so C-language access is safe.
                in_qs = likely(!t->trc_reader_nesting);
        }
 
@@ -964,7 +968,7 @@ static bool trc_inspect_reader(struct task_struct *t, void *arg)
        // state so that it will awaken the grace-period kthread upon exit
        // from that critical section.
        atomic_inc(&trc_n_readers_need_end); // One more to wait on.
-       WARN_ON_ONCE(t->trc_reader_special.b.need_qs);
+       WARN_ON_ONCE(READ_ONCE(t->trc_reader_special.b.need_qs));
        WRITE_ONCE(t->trc_reader_special.b.need_qs, true);
        return true;
 }
@@ -982,7 +986,7 @@ static void trc_wait_for_one_reader(struct task_struct *t,
        // The current task had better be in a quiescent state.
        if (t == current) {
                t->trc_reader_checked = true;
-               WARN_ON_ONCE(t->trc_reader_nesting);
+               WARN_ON_ONCE(READ_ONCE(t->trc_reader_nesting));
                return;
        }
 
@@ -994,6 +998,12 @@ static void trc_wait_for_one_reader(struct task_struct *t,
        }
        put_task_struct(t);
 
+       // If this task is not yet on the holdout list, then we are in
+       // an RCU read-side critical section.  Otherwise, the invocation of
+       // rcu_add_holdout() that added it to the list did the necessary
+       // get_task_struct().  Either way, the task cannot be freed out
+       // from under this code.
+
        // If currently running, send an IPI, either way, add to list.
        trc_add_holdout(t, bhp);
        if (task_curr(t) &&
@@ -1092,8 +1102,8 @@ static void show_stalled_task_trace(struct task_struct *t, bool *firstreport)
                 ".I"[READ_ONCE(t->trc_ipi_to_cpu) > 0],
                 ".i"[is_idle_task(t)],
                 ".N"[cpu > 0 && tick_nohz_full_cpu(cpu)],
-                t->trc_reader_nesting,
-                " N"[!!t->trc_reader_special.b.need_qs],
+                READ_ONCE(t->trc_reader_nesting),
+                " N"[!!READ_ONCE(t->trc_reader_special.b.need_qs)],
                 cpu);
        sched_show_task(t);
 }
@@ -1187,7 +1197,7 @@ static void rcu_tasks_trace_postgp(struct rcu_tasks *rtp)
 static void exit_tasks_rcu_finish_trace(struct task_struct *t)
 {
        WRITE_ONCE(t->trc_reader_checked, true);
-       WARN_ON_ONCE(t->trc_reader_nesting);
+       WARN_ON_ONCE(READ_ONCE(t->trc_reader_nesting));
        WRITE_ONCE(t->trc_reader_nesting, 0);
        if (WARN_ON_ONCE(READ_ONCE(t->trc_reader_special.b.need_qs)))
                rcu_read_unlock_trace_special(t, 0);
index 51f24ecd94b2688fa8c475d4a1797d0e1c24b211..bce848e50512ea08dbe9274c9c4c81843e94675a 100644 (file)
 
 /* Data structures. */
 
-/*
- * Steal a bit from the bottom of ->dynticks for idle entry/exit
- * control.  Initially this is for TLB flushing.
- */
-#define RCU_DYNTICK_CTRL_MASK 0x1
-#define RCU_DYNTICK_CTRL_CTR  (RCU_DYNTICK_CTRL_MASK + 1)
-
 static DEFINE_PER_CPU_SHARED_ALIGNED(struct rcu_data, rcu_data) = {
        .dynticks_nesting = 1,
        .dynticks_nmi_nesting = DYNTICK_IRQ_NONIDLE,
-       .dynticks = ATOMIC_INIT(RCU_DYNTICK_CTRL_CTR),
+       .dynticks = ATOMIC_INIT(1),
 #ifdef CONFIG_RCU_NOCB_CPU
        .cblist.flags = SEGCBLIST_SOFTIRQ_ONLY,
 #endif
@@ -258,6 +251,15 @@ void rcu_softirq_qs(void)
        rcu_tasks_qs(current, false);
 }
 
+/*
+ * Increment the current CPU's rcu_data structure's ->dynticks field
+ * with ordering.  Return the new value.
+ */
+static noinline noinstr unsigned long rcu_dynticks_inc(int incby)
+{
+       return arch_atomic_add_return(incby, this_cpu_ptr(&rcu_data.dynticks));
+}
+
 /*
  * Record entry into an extended quiescent state.  This is only to be
  * called when not already in an extended quiescent state, that is,
@@ -266,7 +268,6 @@ void rcu_softirq_qs(void)
  */
 static noinstr void rcu_dynticks_eqs_enter(void)
 {
-       struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
        int seq;
 
        /*
@@ -275,13 +276,9 @@ static noinstr void rcu_dynticks_eqs_enter(void)
         * next idle sojourn.
         */
        rcu_dynticks_task_trace_enter();  // Before ->dynticks update!
-       seq = arch_atomic_add_return(RCU_DYNTICK_CTRL_CTR, &rdp->dynticks);
+       seq = rcu_dynticks_inc(1);
        // RCU is no longer watching.  Better be in extended quiescent state!
-       WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) &&
-                    (seq & RCU_DYNTICK_CTRL_CTR));
-       /* Better not have special action (TLB flush) pending! */
-       WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) &&
-                    (seq & RCU_DYNTICK_CTRL_MASK));
+       WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) && (seq & 0x1));
 }
 
 /*
@@ -291,7 +288,6 @@ static noinstr void rcu_dynticks_eqs_enter(void)
  */
 static noinstr void rcu_dynticks_eqs_exit(void)
 {
-       struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
        int seq;
 
        /*
@@ -299,15 +295,10 @@ static noinstr void rcu_dynticks_eqs_exit(void)
         * and we also must force ordering with the next RCU read-side
         * critical section.
         */
-       seq = arch_atomic_add_return(RCU_DYNTICK_CTRL_CTR, &rdp->dynticks);
+       seq = rcu_dynticks_inc(1);
        // RCU is now watching.  Better not be in an extended quiescent state!
        rcu_dynticks_task_trace_exit();  // After ->dynticks update!
-       WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) &&
-                    !(seq & RCU_DYNTICK_CTRL_CTR));
-       if (seq & RCU_DYNTICK_CTRL_MASK) {
-               arch_atomic_andnot(RCU_DYNTICK_CTRL_MASK, &rdp->dynticks);
-               smp_mb__after_atomic(); /* _exit after clearing mask. */
-       }
+       WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) && !(seq & 0x1));
 }
 
 /*
@@ -324,9 +315,9 @@ static void rcu_dynticks_eqs_online(void)
 {
        struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
 
-       if (atomic_read(&rdp->dynticks) & RCU_DYNTICK_CTRL_CTR)
+       if (atomic_read(&rdp->dynticks) & 0x1)
                return;
-       atomic_add(RCU_DYNTICK_CTRL_CTR, &rdp->dynticks);
+       rcu_dynticks_inc(1);
 }
 
 /*
@@ -336,9 +327,7 @@ static void rcu_dynticks_eqs_online(void)
  */
 static __always_inline bool rcu_dynticks_curr_cpu_in_eqs(void)
 {
-       struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
-
-       return !(arch_atomic_read(&rdp->dynticks) & RCU_DYNTICK_CTRL_CTR);
+       return !(atomic_read(this_cpu_ptr(&rcu_data.dynticks)) & 0x1);
 }
 
 /*
@@ -347,9 +336,8 @@ static __always_inline bool rcu_dynticks_curr_cpu_in_eqs(void)
  */
 static int rcu_dynticks_snap(struct rcu_data *rdp)
 {
-       int snap = atomic_add_return(0, &rdp->dynticks);
-
-       return snap & ~RCU_DYNTICK_CTRL_MASK;
+       smp_mb();  // Fundamental RCU ordering guarantee.
+       return atomic_read_acquire(&rdp->dynticks);
 }
 
 /*
@@ -358,7 +346,7 @@ static int rcu_dynticks_snap(struct rcu_data *rdp)
  */
 static bool rcu_dynticks_in_eqs(int snap)
 {
-       return !(snap & RCU_DYNTICK_CTRL_CTR);
+       return !(snap & 0x1);
 }
 
 /* Return true if the specified CPU is currently idle from an RCU viewpoint.  */
@@ -389,8 +377,7 @@ bool rcu_dynticks_zero_in_eqs(int cpu, int *vp)
        int snap;
 
        // If not quiescent, force back to earlier extended quiescent state.
-       snap = atomic_read(&rdp->dynticks) & ~(RCU_DYNTICK_CTRL_MASK |
-                                              RCU_DYNTICK_CTRL_CTR);
+       snap = atomic_read(&rdp->dynticks) & ~0x1;
 
        smp_rmb(); // Order ->dynticks and *vp reads.
        if (READ_ONCE(*vp))
@@ -398,32 +385,7 @@ bool rcu_dynticks_zero_in_eqs(int cpu, int *vp)
        smp_rmb(); // Order *vp read and ->dynticks re-read.
 
        // If still in the same extended quiescent state, we are good!
-       return snap == (atomic_read(&rdp->dynticks) & ~RCU_DYNTICK_CTRL_MASK);
-}
-
-/*
- * Set the special (bottom) bit of the specified CPU so that it
- * will take special action (such as flushing its TLB) on the
- * next exit from an extended quiescent state.  Returns true if
- * the bit was successfully set, or false if the CPU was not in
- * an extended quiescent state.
- */
-bool rcu_eqs_special_set(int cpu)
-{
-       int old;
-       int new;
-       int new_old;
-       struct rcu_data *rdp = &per_cpu(rcu_data, cpu);
-
-       new_old = atomic_read(&rdp->dynticks);
-       do {
-               old = new_old;
-               if (old & RCU_DYNTICK_CTRL_CTR)
-                       return false;
-               new = old | RCU_DYNTICK_CTRL_MASK;
-               new_old = atomic_cmpxchg(&rdp->dynticks, old, new);
-       } while (new_old != old);
-       return true;
+       return snap == atomic_read(&rdp->dynticks);
 }
 
 /*
@@ -439,13 +401,12 @@ bool rcu_eqs_special_set(int cpu)
  */
 notrace void rcu_momentary_dyntick_idle(void)
 {
-       int special;
+       int seq;
 
        raw_cpu_write(rcu_data.rcu_need_heavy_qs, false);
-       special = atomic_add_return(2 * RCU_DYNTICK_CTRL_CTR,
-                                   &this_cpu_ptr(&rcu_data)->dynticks);
+       seq = rcu_dynticks_inc(2);
        /* It is illegal to call this from idle state. */
-       WARN_ON_ONCE(!(special & RCU_DYNTICK_CTRL_CTR));
+       WARN_ON_ONCE(!(seq & 0x1));
        rcu_preempt_deferred_qs(current);
 }
 EXPORT_SYMBOL_GPL(rcu_momentary_dyntick_idle);
@@ -1325,7 +1286,7 @@ static int rcu_implicit_dynticks_qs(struct rcu_data *rdp)
         */
        jtsq = READ_ONCE(jiffies_to_sched_qs);
        ruqp = per_cpu_ptr(&rcu_data.rcu_urgent_qs, rdp->cpu);
-       rnhqp = &per_cpu(rcu_data.rcu_need_heavy_qs, rdp->cpu);
+       rnhqp = per_cpu_ptr(&rcu_data.rcu_need_heavy_qs, rdp->cpu);
        if (!READ_ONCE(*rnhqp) &&
            (time_after(jiffies, rcu_state.gp_start + jtsq * 2) ||
             time_after(jiffies, rcu_state.jiffies_resched) ||
@@ -1772,7 +1733,7 @@ static void rcu_strict_gp_boundary(void *unused)
 /*
  * Initialize a new grace period.  Return false if no grace period required.
  */
-static bool rcu_gp_init(void)
+static noinline_for_stack bool rcu_gp_init(void)
 {
        unsigned long firstseq;
        unsigned long flags;
@@ -1966,7 +1927,7 @@ static void rcu_gp_fqs(bool first_time)
 /*
  * Loop doing repeated quiescent-state forcing until the grace period ends.
  */
-static void rcu_gp_fqs_loop(void)
+static noinline_for_stack void rcu_gp_fqs_loop(void)
 {
        bool first_gp_fqs;
        int gf = 0;
@@ -1993,8 +1954,8 @@ static void rcu_gp_fqs_loop(void)
                trace_rcu_grace_period(rcu_state.name, rcu_state.gp_seq,
                                       TPS("fqswait"));
                WRITE_ONCE(rcu_state.gp_state, RCU_GP_WAIT_FQS);
-               ret = swait_event_idle_timeout_exclusive(
-                               rcu_state.gp_wq, rcu_gp_fqs_check_wake(&gf), j);
+               (void)swait_event_idle_timeout_exclusive(rcu_state.gp_wq,
+                                rcu_gp_fqs_check_wake(&gf), j);
                rcu_gp_torture_wait();
                WRITE_ONCE(rcu_state.gp_state, RCU_GP_DOING_FQS);
                /* Locking provides needed memory barriers. */
@@ -2471,9 +2432,6 @@ int rcutree_dead_cpu(unsigned int cpu)
        WRITE_ONCE(rcu_state.n_online_cpus, rcu_state.n_online_cpus - 1);
        /* Adjust any no-longer-needed kthreads. */
        rcu_boost_kthread_setaffinity(rnp, -1);
-       /* Do any needed no-CB deferred wakeups from this CPU. */
-       do_nocb_deferred_wakeup(per_cpu_ptr(&rcu_data, cpu));
-
        // Stop-machine done, so allow nohz_full to disable tick.
        tick_dep_clear(TICK_DEP_BIT_RCU);
        return 0;
@@ -4050,7 +4008,7 @@ void rcu_barrier(void)
         */
        init_completion(&rcu_state.barrier_completion);
        atomic_set(&rcu_state.barrier_cpu_count, 2);
-       get_online_cpus();
+       cpus_read_lock();
 
        /*
         * Force each CPU with callbacks to register a new callback.
@@ -4081,7 +4039,7 @@ void rcu_barrier(void)
                                          rcu_state.barrier_sequence);
                }
        }
-       put_online_cpus();
+       cpus_read_unlock();
 
        /*
         * Now that we have an rcu_barrier_callback() callback on each
@@ -4784,4 +4742,5 @@ void __init rcu_init(void)
 
 #include "tree_stall.h"
 #include "tree_exp.h"
+#include "tree_nocb.h"
 #include "tree_plugin.h"
diff --git a/kernel/rcu/tree_nocb.h b/kernel/rcu/tree_nocb.h
new file mode 100644 (file)
index 0000000..8fdf44f
--- /dev/null
@@ -0,0 +1,1496 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Read-Copy Update mechanism for mutual exclusion (tree-based version)
+ * Internal non-public definitions that provide either classic
+ * or preemptible semantics.
+ *
+ * Copyright Red Hat, 2009
+ * Copyright IBM Corporation, 2009
+ * Copyright SUSE, 2021
+ *
+ * Author: Ingo Molnar <mingo@elte.hu>
+ *        Paul E. McKenney <paulmck@linux.ibm.com>
+ *        Frederic Weisbecker <frederic@kernel.org>
+ */
+
+#ifdef CONFIG_RCU_NOCB_CPU
+static cpumask_var_t rcu_nocb_mask; /* CPUs to have callbacks offloaded. */
+static bool __read_mostly rcu_nocb_poll;    /* Offload kthread are to poll. */
+static inline int rcu_lockdep_is_held_nocb(struct rcu_data *rdp)
+{
+       return lockdep_is_held(&rdp->nocb_lock);
+}
+
+static inline bool rcu_current_is_nocb_kthread(struct rcu_data *rdp)
+{
+       /* Race on early boot between thread creation and assignment */
+       if (!rdp->nocb_cb_kthread || !rdp->nocb_gp_kthread)
+               return true;
+
+       if (current == rdp->nocb_cb_kthread || current == rdp->nocb_gp_kthread)
+               if (in_task())
+                       return true;
+       return false;
+}
+
+/*
+ * Offload callback processing from the boot-time-specified set of CPUs
+ * specified by rcu_nocb_mask.  For the CPUs in the set, there are kthreads
+ * created that pull the callbacks from the corresponding CPU, wait for
+ * a grace period to elapse, and invoke the callbacks.  These kthreads
+ * are organized into GP kthreads, which manage incoming callbacks, wait for
+ * grace periods, and awaken CB kthreads, and the CB kthreads, which only
+ * invoke callbacks.  Each GP kthread invokes its own CBs.  The no-CBs CPUs
+ * do a wake_up() on their GP kthread when they insert a callback into any
+ * empty list, unless the rcu_nocb_poll boot parameter has been specified,
+ * in which case each kthread actively polls its CPU.  (Which isn't so great
+ * for energy efficiency, but which does reduce RCU's overhead on that CPU.)
+ *
+ * This is intended to be used in conjunction with Frederic Weisbecker's
+ * adaptive-idle work, which would seriously reduce OS jitter on CPUs
+ * running CPU-bound user-mode computations.
+ *
+ * Offloading of callbacks can also be used as an energy-efficiency
+ * measure because CPUs with no RCU callbacks queued are more aggressive
+ * about entering dyntick-idle mode.
+ */
+
+
+/*
+ * Parse the boot-time rcu_nocb_mask CPU list from the kernel parameters.
+ * If the list is invalid, a warning is emitted and all CPUs are offloaded.
+ */
+static int __init rcu_nocb_setup(char *str)
+{
+       alloc_bootmem_cpumask_var(&rcu_nocb_mask);
+       if (cpulist_parse(str, rcu_nocb_mask)) {
+               pr_warn("rcu_nocbs= bad CPU range, all CPUs set\n");
+               cpumask_setall(rcu_nocb_mask);
+       }
+       return 1;
+}
+__setup("rcu_nocbs=", rcu_nocb_setup);
+
+static int __init parse_rcu_nocb_poll(char *arg)
+{
+       rcu_nocb_poll = true;
+       return 0;
+}
+early_param("rcu_nocb_poll", parse_rcu_nocb_poll);
+
+/*
+ * Don't bother bypassing ->cblist if the call_rcu() rate is low.
+ * After all, the main point of bypassing is to avoid lock contention
+ * on ->nocb_lock, which only can happen at high call_rcu() rates.
+ */
+static int nocb_nobypass_lim_per_jiffy = 16 * 1000 / HZ;
+module_param(nocb_nobypass_lim_per_jiffy, int, 0);
+
+/*
+ * Acquire the specified rcu_data structure's ->nocb_bypass_lock.  If the
+ * lock isn't immediately available, increment ->nocb_lock_contended to
+ * flag the contention.
+ */
+static void rcu_nocb_bypass_lock(struct rcu_data *rdp)
+       __acquires(&rdp->nocb_bypass_lock)
+{
+       lockdep_assert_irqs_disabled();
+       if (raw_spin_trylock(&rdp->nocb_bypass_lock))
+               return;
+       atomic_inc(&rdp->nocb_lock_contended);
+       WARN_ON_ONCE(smp_processor_id() != rdp->cpu);
+       smp_mb__after_atomic(); /* atomic_inc() before lock. */
+       raw_spin_lock(&rdp->nocb_bypass_lock);
+       smp_mb__before_atomic(); /* atomic_dec() after lock. */
+       atomic_dec(&rdp->nocb_lock_contended);
+}
+
+/*
+ * Spinwait until the specified rcu_data structure's ->nocb_lock is
+ * not contended.  Please note that this is extremely special-purpose,
+ * relying on the fact that at most two kthreads and one CPU contend for
+ * this lock, and also that the two kthreads are guaranteed to have frequent
+ * grace-period-duration time intervals between successive acquisitions
+ * of the lock.  This allows us to use an extremely simple throttling
+ * mechanism, and further to apply it only to the CPU doing floods of
+ * call_rcu() invocations.  Don't try this at home!
+ */
+static void rcu_nocb_wait_contended(struct rcu_data *rdp)
+{
+       WARN_ON_ONCE(smp_processor_id() != rdp->cpu);
+       while (WARN_ON_ONCE(atomic_read(&rdp->nocb_lock_contended)))
+               cpu_relax();
+}
+
+/*
+ * Conditionally acquire the specified rcu_data structure's
+ * ->nocb_bypass_lock.
+ */
+static bool rcu_nocb_bypass_trylock(struct rcu_data *rdp)
+{
+       lockdep_assert_irqs_disabled();
+       return raw_spin_trylock(&rdp->nocb_bypass_lock);
+}
+
+/*
+ * Release the specified rcu_data structure's ->nocb_bypass_lock.
+ */
+static void rcu_nocb_bypass_unlock(struct rcu_data *rdp)
+       __releases(&rdp->nocb_bypass_lock)
+{
+       lockdep_assert_irqs_disabled();
+       raw_spin_unlock(&rdp->nocb_bypass_lock);
+}
+
+/*
+ * Acquire the specified rcu_data structure's ->nocb_lock, but only
+ * if it corresponds to a no-CBs CPU.
+ */
+static void rcu_nocb_lock(struct rcu_data *rdp)
+{
+       lockdep_assert_irqs_disabled();
+       if (!rcu_rdp_is_offloaded(rdp))
+               return;
+       raw_spin_lock(&rdp->nocb_lock);
+}
+
+/*
+ * Release the specified rcu_data structure's ->nocb_lock, but only
+ * if it corresponds to a no-CBs CPU.
+ */
+static void rcu_nocb_unlock(struct rcu_data *rdp)
+{
+       if (rcu_rdp_is_offloaded(rdp)) {
+               lockdep_assert_irqs_disabled();
+               raw_spin_unlock(&rdp->nocb_lock);
+       }
+}
+
+/*
+ * Release the specified rcu_data structure's ->nocb_lock and restore
+ * interrupts, but only if it corresponds to a no-CBs CPU.
+ */
+static void rcu_nocb_unlock_irqrestore(struct rcu_data *rdp,
+                                      unsigned long flags)
+{
+       if (rcu_rdp_is_offloaded(rdp)) {
+               lockdep_assert_irqs_disabled();
+               raw_spin_unlock_irqrestore(&rdp->nocb_lock, flags);
+       } else {
+               local_irq_restore(flags);
+       }
+}
+
+/* Lockdep check that ->cblist may be safely accessed. */
+static void rcu_lockdep_assert_cblist_protected(struct rcu_data *rdp)
+{
+       lockdep_assert_irqs_disabled();
+       if (rcu_rdp_is_offloaded(rdp))
+               lockdep_assert_held(&rdp->nocb_lock);
+}
+
+/*
+ * Wake up any no-CBs CPUs' kthreads that were waiting on the just-ended
+ * grace period.
+ */
+static void rcu_nocb_gp_cleanup(struct swait_queue_head *sq)
+{
+       swake_up_all(sq);
+}
+
+static struct swait_queue_head *rcu_nocb_gp_get(struct rcu_node *rnp)
+{
+       return &rnp->nocb_gp_wq[rcu_seq_ctr(rnp->gp_seq) & 0x1];
+}
+
+static void rcu_init_one_nocb(struct rcu_node *rnp)
+{
+       init_swait_queue_head(&rnp->nocb_gp_wq[0]);
+       init_swait_queue_head(&rnp->nocb_gp_wq[1]);
+}
+
+/* Is the specified CPU a no-CBs CPU? */
+bool rcu_is_nocb_cpu(int cpu)
+{
+       if (cpumask_available(rcu_nocb_mask))
+               return cpumask_test_cpu(cpu, rcu_nocb_mask);
+       return false;
+}
+
+static bool __wake_nocb_gp(struct rcu_data *rdp_gp,
+                          struct rcu_data *rdp,
+                          bool force, unsigned long flags)
+       __releases(rdp_gp->nocb_gp_lock)
+{
+       bool needwake = false;
+
+       if (!READ_ONCE(rdp_gp->nocb_gp_kthread)) {
+               raw_spin_unlock_irqrestore(&rdp_gp->nocb_gp_lock, flags);
+               trace_rcu_nocb_wake(rcu_state.name, rdp->cpu,
+                                   TPS("AlreadyAwake"));
+               return false;
+       }
+
+       if (rdp_gp->nocb_defer_wakeup > RCU_NOCB_WAKE_NOT) {
+               WRITE_ONCE(rdp_gp->nocb_defer_wakeup, RCU_NOCB_WAKE_NOT);
+               del_timer(&rdp_gp->nocb_timer);
+       }
+
+       if (force || READ_ONCE(rdp_gp->nocb_gp_sleep)) {
+               WRITE_ONCE(rdp_gp->nocb_gp_sleep, false);
+               needwake = true;
+       }
+       raw_spin_unlock_irqrestore(&rdp_gp->nocb_gp_lock, flags);
+       if (needwake) {
+               trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("DoWake"));
+               wake_up_process(rdp_gp->nocb_gp_kthread);
+       }
+
+       return needwake;
+}
+
+/*
+ * Kick the GP kthread for this NOCB group.
+ */
+static bool wake_nocb_gp(struct rcu_data *rdp, bool force)
+{
+       unsigned long flags;
+       struct rcu_data *rdp_gp = rdp->nocb_gp_rdp;
+
+       raw_spin_lock_irqsave(&rdp_gp->nocb_gp_lock, flags);
+       return __wake_nocb_gp(rdp_gp, rdp, force, flags);
+}
+
+/*
+ * Arrange to wake the GP kthread for this NOCB group at some future
+ * time when it is safe to do so.
+ */
+static void wake_nocb_gp_defer(struct rcu_data *rdp, int waketype,
+                              const char *reason)
+{
+       unsigned long flags;
+       struct rcu_data *rdp_gp = rdp->nocb_gp_rdp;
+
+       raw_spin_lock_irqsave(&rdp_gp->nocb_gp_lock, flags);
+
+       /*
+        * Bypass wakeup overrides previous deferments. In case
+        * of callback storm, no need to wake up too early.
+        */
+       if (waketype == RCU_NOCB_WAKE_BYPASS) {
+               mod_timer(&rdp_gp->nocb_timer, jiffies + 2);
+               WRITE_ONCE(rdp_gp->nocb_defer_wakeup, waketype);
+       } else {
+               if (rdp_gp->nocb_defer_wakeup < RCU_NOCB_WAKE)
+                       mod_timer(&rdp_gp->nocb_timer, jiffies + 1);
+               if (rdp_gp->nocb_defer_wakeup < waketype)
+                       WRITE_ONCE(rdp_gp->nocb_defer_wakeup, waketype);
+       }
+
+       raw_spin_unlock_irqrestore(&rdp_gp->nocb_gp_lock, flags);
+
+       trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, reason);
+}
+
+/*
+ * Flush the ->nocb_bypass queue into ->cblist, enqueuing rhp if non-NULL.
+ * However, if there is a callback to be enqueued and if ->nocb_bypass
+ * proves to be initially empty, just return false because the no-CB GP
+ * kthread may need to be awakened in this case.
+ *
+ * Note that this function always returns true if rhp is NULL.
+ */
+static bool rcu_nocb_do_flush_bypass(struct rcu_data *rdp, struct rcu_head *rhp,
+                                    unsigned long j)
+{
+       struct rcu_cblist rcl;
+
+       WARN_ON_ONCE(!rcu_rdp_is_offloaded(rdp));
+       rcu_lockdep_assert_cblist_protected(rdp);
+       lockdep_assert_held(&rdp->nocb_bypass_lock);
+       if (rhp && !rcu_cblist_n_cbs(&rdp->nocb_bypass)) {
+               raw_spin_unlock(&rdp->nocb_bypass_lock);
+               return false;
+       }
+       /* Note: ->cblist.len already accounts for ->nocb_bypass contents. */
+       if (rhp)
+               rcu_segcblist_inc_len(&rdp->cblist); /* Must precede enqueue. */
+       rcu_cblist_flush_enqueue(&rcl, &rdp->nocb_bypass, rhp);
+       rcu_segcblist_insert_pend_cbs(&rdp->cblist, &rcl);
+       WRITE_ONCE(rdp->nocb_bypass_first, j);
+       rcu_nocb_bypass_unlock(rdp);
+       return true;
+}
+
+/*
+ * Flush the ->nocb_bypass queue into ->cblist, enqueuing rhp if non-NULL.
+ * However, if there is a callback to be enqueued and if ->nocb_bypass
+ * proves to be initially empty, just return false because the no-CB GP
+ * kthread may need to be awakened in this case.
+ *
+ * Note that this function always returns true if rhp is NULL.
+ */
+static bool rcu_nocb_flush_bypass(struct rcu_data *rdp, struct rcu_head *rhp,
+                                 unsigned long j)
+{
+       if (!rcu_rdp_is_offloaded(rdp))
+               return true;
+       rcu_lockdep_assert_cblist_protected(rdp);
+       rcu_nocb_bypass_lock(rdp);
+       return rcu_nocb_do_flush_bypass(rdp, rhp, j);
+}
+
+/*
+ * If the ->nocb_bypass_lock is immediately available, flush the
+ * ->nocb_bypass queue into ->cblist.
+ */
+static void rcu_nocb_try_flush_bypass(struct rcu_data *rdp, unsigned long j)
+{
+       rcu_lockdep_assert_cblist_protected(rdp);
+       if (!rcu_rdp_is_offloaded(rdp) ||
+           !rcu_nocb_bypass_trylock(rdp))
+               return;
+       WARN_ON_ONCE(!rcu_nocb_do_flush_bypass(rdp, NULL, j));
+}
+
+/*
+ * See whether it is appropriate to use the ->nocb_bypass list in order
+ * to control contention on ->nocb_lock.  A limited number of direct
+ * enqueues are permitted into ->cblist per jiffy.  If ->nocb_bypass
+ * is non-empty, further callbacks must be placed into ->nocb_bypass,
+ * otherwise rcu_barrier() breaks.  Use rcu_nocb_flush_bypass() to switch
+ * back to direct use of ->cblist.  However, ->nocb_bypass should not be
+ * used if ->cblist is empty, because otherwise callbacks can be stranded
+ * on ->nocb_bypass because we cannot count on the current CPU ever again
+ * invoking call_rcu().  The general rule is that if ->nocb_bypass is
+ * non-empty, the corresponding no-CBs grace-period kthread must not be
+ * in an indefinite sleep state.
+ *
+ * Finally, it is not permitted to use the bypass during early boot,
+ * as doing so would confuse the auto-initialization code.  Besides
+ * which, there is no point in worrying about lock contention while
+ * there is only one CPU in operation.
+ */
+static bool rcu_nocb_try_bypass(struct rcu_data *rdp, struct rcu_head *rhp,
+                               bool *was_alldone, unsigned long flags)
+{
+       unsigned long c;
+       unsigned long cur_gp_seq;
+       unsigned long j = jiffies;
+       long ncbs = rcu_cblist_n_cbs(&rdp->nocb_bypass);
+
+       lockdep_assert_irqs_disabled();
+
+       // Pure softirq/rcuc based processing: no bypassing, no
+       // locking.
+       if (!rcu_rdp_is_offloaded(rdp)) {
+               *was_alldone = !rcu_segcblist_pend_cbs(&rdp->cblist);
+               return false;
+       }
+
+       // In the process of (de-)offloading: no bypassing, but
+       // locking.
+       if (!rcu_segcblist_completely_offloaded(&rdp->cblist)) {
+               rcu_nocb_lock(rdp);
+               *was_alldone = !rcu_segcblist_pend_cbs(&rdp->cblist);
+               return false; /* Not offloaded, no bypassing. */
+       }
+
+       // Don't use ->nocb_bypass during early boot.
+       if (rcu_scheduler_active != RCU_SCHEDULER_RUNNING) {
+               rcu_nocb_lock(rdp);
+               WARN_ON_ONCE(rcu_cblist_n_cbs(&rdp->nocb_bypass));
+               *was_alldone = !rcu_segcblist_pend_cbs(&rdp->cblist);
+               return false;
+       }
+
+       // If we have advanced to a new jiffy, reset counts to allow
+       // moving back from ->nocb_bypass to ->cblist.
+       if (j == rdp->nocb_nobypass_last) {
+               c = rdp->nocb_nobypass_count + 1;
+       } else {
+               WRITE_ONCE(rdp->nocb_nobypass_last, j);
+               c = rdp->nocb_nobypass_count - nocb_nobypass_lim_per_jiffy;
+               if (ULONG_CMP_LT(rdp->nocb_nobypass_count,
+                                nocb_nobypass_lim_per_jiffy))
+                       c = 0;
+               else if (c > nocb_nobypass_lim_per_jiffy)
+                       c = nocb_nobypass_lim_per_jiffy;
+       }
+       WRITE_ONCE(rdp->nocb_nobypass_count, c);
+
+       // If there hasn't yet been all that many ->cblist enqueues
+       // this jiffy, tell the caller to enqueue onto ->cblist.  But flush
+       // ->nocb_bypass first.
+       if (rdp->nocb_nobypass_count < nocb_nobypass_lim_per_jiffy) {
+               rcu_nocb_lock(rdp);
+               *was_alldone = !rcu_segcblist_pend_cbs(&rdp->cblist);
+               if (*was_alldone)
+                       trace_rcu_nocb_wake(rcu_state.name, rdp->cpu,
+                                           TPS("FirstQ"));
+               WARN_ON_ONCE(!rcu_nocb_flush_bypass(rdp, NULL, j));
+               WARN_ON_ONCE(rcu_cblist_n_cbs(&rdp->nocb_bypass));
+               return false; // Caller must enqueue the callback.
+       }
+
+       // If ->nocb_bypass has been used too long or is too full,
+       // flush ->nocb_bypass to ->cblist.
+       if ((ncbs && j != READ_ONCE(rdp->nocb_bypass_first)) ||
+           ncbs >= qhimark) {
+               rcu_nocb_lock(rdp);
+               if (!rcu_nocb_flush_bypass(rdp, rhp, j)) {
+                       *was_alldone = !rcu_segcblist_pend_cbs(&rdp->cblist);
+                       if (*was_alldone)
+                               trace_rcu_nocb_wake(rcu_state.name, rdp->cpu,
+                                                   TPS("FirstQ"));
+                       WARN_ON_ONCE(rcu_cblist_n_cbs(&rdp->nocb_bypass));
+                       return false; // Caller must enqueue the callback.
+               }
+               if (j != rdp->nocb_gp_adv_time &&
+                   rcu_segcblist_nextgp(&rdp->cblist, &cur_gp_seq) &&
+                   rcu_seq_done(&rdp->mynode->gp_seq, cur_gp_seq)) {
+                       rcu_advance_cbs_nowake(rdp->mynode, rdp);
+                       rdp->nocb_gp_adv_time = j;
+               }
+               rcu_nocb_unlock_irqrestore(rdp, flags);
+               return true; // Callback already enqueued.
+       }
+
+       // We need to use the bypass.
+       rcu_nocb_wait_contended(rdp);
+       rcu_nocb_bypass_lock(rdp);
+       ncbs = rcu_cblist_n_cbs(&rdp->nocb_bypass);
+       rcu_segcblist_inc_len(&rdp->cblist); /* Must precede enqueue. */
+       rcu_cblist_enqueue(&rdp->nocb_bypass, rhp);
+       if (!ncbs) {
+               WRITE_ONCE(rdp->nocb_bypass_first, j);
+               trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("FirstBQ"));
+       }
+       rcu_nocb_bypass_unlock(rdp);
+       smp_mb(); /* Order enqueue before wake. */
+       if (ncbs) {
+               local_irq_restore(flags);
+       } else {
+               // No-CBs GP kthread might be indefinitely asleep, if so, wake.
+               rcu_nocb_lock(rdp); // Rare during call_rcu() flood.
+               if (!rcu_segcblist_pend_cbs(&rdp->cblist)) {
+                       trace_rcu_nocb_wake(rcu_state.name, rdp->cpu,
+                                           TPS("FirstBQwake"));
+                       __call_rcu_nocb_wake(rdp, true, flags);
+               } else {
+                       trace_rcu_nocb_wake(rcu_state.name, rdp->cpu,
+                                           TPS("FirstBQnoWake"));
+                       rcu_nocb_unlock_irqrestore(rdp, flags);
+               }
+       }
+       return true; // Callback already enqueued.
+}
+
+/*
+ * Awaken the no-CBs grace-period kthread if needed, either due to it
+ * legitimately being asleep or due to overload conditions.
+ *
+ * If warranted, also wake up the kthread servicing this CPUs queues.
+ */
+static void __call_rcu_nocb_wake(struct rcu_data *rdp, bool was_alldone,
+                                unsigned long flags)
+                                __releases(rdp->nocb_lock)
+{
+       unsigned long cur_gp_seq;
+       unsigned long j;
+       long len;
+       struct task_struct *t;
+
+       // If we are being polled or there is no kthread, just leave.
+       t = READ_ONCE(rdp->nocb_gp_kthread);
+       if (rcu_nocb_poll || !t) {
+               rcu_nocb_unlock_irqrestore(rdp, flags);
+               trace_rcu_nocb_wake(rcu_state.name, rdp->cpu,
+                                   TPS("WakeNotPoll"));
+               return;
+       }
+       // Need to actually to a wakeup.
+       len = rcu_segcblist_n_cbs(&rdp->cblist);
+       if (was_alldone) {
+               rdp->qlen_last_fqs_check = len;
+               if (!irqs_disabled_flags(flags)) {
+                       /* ... if queue was empty ... */
+                       rcu_nocb_unlock_irqrestore(rdp, flags);
+                       wake_nocb_gp(rdp, false);
+                       trace_rcu_nocb_wake(rcu_state.name, rdp->cpu,
+                                           TPS("WakeEmpty"));
+               } else {
+                       rcu_nocb_unlock_irqrestore(rdp, flags);
+                       wake_nocb_gp_defer(rdp, RCU_NOCB_WAKE,
+                                          TPS("WakeEmptyIsDeferred"));
+               }
+       } else if (len > rdp->qlen_last_fqs_check + qhimark) {
+               /* ... or if many callbacks queued. */
+               rdp->qlen_last_fqs_check = len;
+               j = jiffies;
+               if (j != rdp->nocb_gp_adv_time &&
+                   rcu_segcblist_nextgp(&rdp->cblist, &cur_gp_seq) &&
+                   rcu_seq_done(&rdp->mynode->gp_seq, cur_gp_seq)) {
+                       rcu_advance_cbs_nowake(rdp->mynode, rdp);
+                       rdp->nocb_gp_adv_time = j;
+               }
+               smp_mb(); /* Enqueue before timer_pending(). */
+               if ((rdp->nocb_cb_sleep ||
+                    !rcu_segcblist_ready_cbs(&rdp->cblist)) &&
+                   !timer_pending(&rdp->nocb_timer)) {
+                       rcu_nocb_unlock_irqrestore(rdp, flags);
+                       wake_nocb_gp_defer(rdp, RCU_NOCB_WAKE_FORCE,
+                                          TPS("WakeOvfIsDeferred"));
+               } else {
+                       rcu_nocb_unlock_irqrestore(rdp, flags);
+                       trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("WakeNot"));
+               }
+       } else {
+               rcu_nocb_unlock_irqrestore(rdp, flags);
+               trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("WakeNot"));
+       }
+       return;
+}
+
+/*
+ * Check if we ignore this rdp.
+ *
+ * We check that without holding the nocb lock but
+ * we make sure not to miss a freshly offloaded rdp
+ * with the current ordering:
+ *
+ *  rdp_offload_toggle()        nocb_gp_enabled_cb()
+ * -------------------------   ----------------------------
+ *    WRITE flags                 LOCK nocb_gp_lock
+ *    LOCK nocb_gp_lock           READ/WRITE nocb_gp_sleep
+ *    READ/WRITE nocb_gp_sleep    UNLOCK nocb_gp_lock
+ *    UNLOCK nocb_gp_lock         READ flags
+ */
+static inline bool nocb_gp_enabled_cb(struct rcu_data *rdp)
+{
+       u8 flags = SEGCBLIST_OFFLOADED | SEGCBLIST_KTHREAD_GP;
+
+       return rcu_segcblist_test_flags(&rdp->cblist, flags);
+}
+
+static inline bool nocb_gp_update_state_deoffloading(struct rcu_data *rdp,
+                                                    bool *needwake_state)
+{
+       struct rcu_segcblist *cblist = &rdp->cblist;
+
+       if (rcu_segcblist_test_flags(cblist, SEGCBLIST_OFFLOADED)) {
+               if (!rcu_segcblist_test_flags(cblist, SEGCBLIST_KTHREAD_GP)) {
+                       rcu_segcblist_set_flags(cblist, SEGCBLIST_KTHREAD_GP);
+                       if (rcu_segcblist_test_flags(cblist, SEGCBLIST_KTHREAD_CB))
+                               *needwake_state = true;
+               }
+               return false;
+       }
+
+       /*
+        * De-offloading. Clear our flag and notify the de-offload worker.
+        * We will ignore this rdp until it ever gets re-offloaded.
+        */
+       WARN_ON_ONCE(!rcu_segcblist_test_flags(cblist, SEGCBLIST_KTHREAD_GP));
+       rcu_segcblist_clear_flags(cblist, SEGCBLIST_KTHREAD_GP);
+       if (!rcu_segcblist_test_flags(cblist, SEGCBLIST_KTHREAD_CB))
+               *needwake_state = true;
+       return true;
+}
+
+
+/*
+ * No-CBs GP kthreads come here to wait for additional callbacks to show up
+ * or for grace periods to end.
+ */
+static void nocb_gp_wait(struct rcu_data *my_rdp)
+{
+       bool bypass = false;
+       long bypass_ncbs;
+       int __maybe_unused cpu = my_rdp->cpu;
+       unsigned long cur_gp_seq;
+       unsigned long flags;
+       bool gotcbs = false;
+       unsigned long j = jiffies;
+       bool needwait_gp = false; // This prevents actual uninitialized use.
+       bool needwake;
+       bool needwake_gp;
+       struct rcu_data *rdp;
+       struct rcu_node *rnp;
+       unsigned long wait_gp_seq = 0; // Suppress "use uninitialized" warning.
+       bool wasempty = false;
+
+       /*
+        * Each pass through the following loop checks for CBs and for the
+        * nearest grace period (if any) to wait for next.  The CB kthreads
+        * and the global grace-period kthread are awakened if needed.
+        */
+       WARN_ON_ONCE(my_rdp->nocb_gp_rdp != my_rdp);
+       for (rdp = my_rdp; rdp; rdp = rdp->nocb_next_cb_rdp) {
+               bool needwake_state = false;
+
+               if (!nocb_gp_enabled_cb(rdp))
+                       continue;
+               trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("Check"));
+               rcu_nocb_lock_irqsave(rdp, flags);
+               if (nocb_gp_update_state_deoffloading(rdp, &needwake_state)) {
+                       rcu_nocb_unlock_irqrestore(rdp, flags);
+                       if (needwake_state)
+                               swake_up_one(&rdp->nocb_state_wq);
+                       continue;
+               }
+               bypass_ncbs = rcu_cblist_n_cbs(&rdp->nocb_bypass);
+               if (bypass_ncbs &&
+                   (time_after(j, READ_ONCE(rdp->nocb_bypass_first) + 1) ||
+                    bypass_ncbs > 2 * qhimark)) {
+                       // Bypass full or old, so flush it.
+                       (void)rcu_nocb_try_flush_bypass(rdp, j);
+                       bypass_ncbs = rcu_cblist_n_cbs(&rdp->nocb_bypass);
+               } else if (!bypass_ncbs && rcu_segcblist_empty(&rdp->cblist)) {
+                       rcu_nocb_unlock_irqrestore(rdp, flags);
+                       if (needwake_state)
+                               swake_up_one(&rdp->nocb_state_wq);
+                       continue; /* No callbacks here, try next. */
+               }
+               if (bypass_ncbs) {
+                       trace_rcu_nocb_wake(rcu_state.name, rdp->cpu,
+                                           TPS("Bypass"));
+                       bypass = true;
+               }
+               rnp = rdp->mynode;
+
+               // Advance callbacks if helpful and low contention.
+               needwake_gp = false;
+               if (!rcu_segcblist_restempty(&rdp->cblist,
+                                            RCU_NEXT_READY_TAIL) ||
+                   (rcu_segcblist_nextgp(&rdp->cblist, &cur_gp_seq) &&
+                    rcu_seq_done(&rnp->gp_seq, cur_gp_seq))) {
+                       raw_spin_lock_rcu_node(rnp); /* irqs disabled. */
+                       needwake_gp = rcu_advance_cbs(rnp, rdp);
+                       wasempty = rcu_segcblist_restempty(&rdp->cblist,
+                                                          RCU_NEXT_READY_TAIL);
+                       raw_spin_unlock_rcu_node(rnp); /* irqs disabled. */
+               }
+               // Need to wait on some grace period?
+               WARN_ON_ONCE(wasempty &&
+                            !rcu_segcblist_restempty(&rdp->cblist,
+                                                     RCU_NEXT_READY_TAIL));
+               if (rcu_segcblist_nextgp(&rdp->cblist, &cur_gp_seq)) {
+                       if (!needwait_gp ||
+                           ULONG_CMP_LT(cur_gp_seq, wait_gp_seq))
+                               wait_gp_seq = cur_gp_seq;
+                       needwait_gp = true;
+                       trace_rcu_nocb_wake(rcu_state.name, rdp->cpu,
+                                           TPS("NeedWaitGP"));
+               }
+               if (rcu_segcblist_ready_cbs(&rdp->cblist)) {
+                       needwake = rdp->nocb_cb_sleep;
+                       WRITE_ONCE(rdp->nocb_cb_sleep, false);
+                       smp_mb(); /* CB invocation -after- GP end. */
+               } else {
+                       needwake = false;
+               }
+               rcu_nocb_unlock_irqrestore(rdp, flags);
+               if (needwake) {
+                       swake_up_one(&rdp->nocb_cb_wq);
+                       gotcbs = true;
+               }
+               if (needwake_gp)
+                       rcu_gp_kthread_wake();
+               if (needwake_state)
+                       swake_up_one(&rdp->nocb_state_wq);
+       }
+
+       my_rdp->nocb_gp_bypass = bypass;
+       my_rdp->nocb_gp_gp = needwait_gp;
+       my_rdp->nocb_gp_seq = needwait_gp ? wait_gp_seq : 0;
+
+       if (bypass && !rcu_nocb_poll) {
+               // At least one child with non-empty ->nocb_bypass, so set
+               // timer in order to avoid stranding its callbacks.
+               wake_nocb_gp_defer(my_rdp, RCU_NOCB_WAKE_BYPASS,
+                                  TPS("WakeBypassIsDeferred"));
+       }
+       if (rcu_nocb_poll) {
+               /* Polling, so trace if first poll in the series. */
+               if (gotcbs)
+                       trace_rcu_nocb_wake(rcu_state.name, cpu, TPS("Poll"));
+               schedule_timeout_idle(1);
+       } else if (!needwait_gp) {
+               /* Wait for callbacks to appear. */
+               trace_rcu_nocb_wake(rcu_state.name, cpu, TPS("Sleep"));
+               swait_event_interruptible_exclusive(my_rdp->nocb_gp_wq,
+                               !READ_ONCE(my_rdp->nocb_gp_sleep));
+               trace_rcu_nocb_wake(rcu_state.name, cpu, TPS("EndSleep"));
+       } else {
+               rnp = my_rdp->mynode;
+               trace_rcu_this_gp(rnp, my_rdp, wait_gp_seq, TPS("StartWait"));
+               swait_event_interruptible_exclusive(
+                       rnp->nocb_gp_wq[rcu_seq_ctr(wait_gp_seq) & 0x1],
+                       rcu_seq_done(&rnp->gp_seq, wait_gp_seq) ||
+                       !READ_ONCE(my_rdp->nocb_gp_sleep));
+               trace_rcu_this_gp(rnp, my_rdp, wait_gp_seq, TPS("EndWait"));
+       }
+       if (!rcu_nocb_poll) {
+               raw_spin_lock_irqsave(&my_rdp->nocb_gp_lock, flags);
+               if (my_rdp->nocb_defer_wakeup > RCU_NOCB_WAKE_NOT) {
+                       WRITE_ONCE(my_rdp->nocb_defer_wakeup, RCU_NOCB_WAKE_NOT);
+                       del_timer(&my_rdp->nocb_timer);
+               }
+               WRITE_ONCE(my_rdp->nocb_gp_sleep, true);
+               raw_spin_unlock_irqrestore(&my_rdp->nocb_gp_lock, flags);
+       }
+       my_rdp->nocb_gp_seq = -1;
+       WARN_ON(signal_pending(current));
+}
+
+/*
+ * No-CBs grace-period-wait kthread.  There is one of these per group
+ * of CPUs, but only once at least one CPU in that group has come online
+ * at least once since boot.  This kthread checks for newly posted
+ * callbacks from any of the CPUs it is responsible for, waits for a
+ * grace period, then awakens all of the rcu_nocb_cb_kthread() instances
+ * that then have callback-invocation work to do.
+ */
+static int rcu_nocb_gp_kthread(void *arg)
+{
+       struct rcu_data *rdp = arg;
+
+       for (;;) {
+               WRITE_ONCE(rdp->nocb_gp_loops, rdp->nocb_gp_loops + 1);
+               nocb_gp_wait(rdp);
+               cond_resched_tasks_rcu_qs();
+       }
+       return 0;
+}
+
+static inline bool nocb_cb_can_run(struct rcu_data *rdp)
+{
+       u8 flags = SEGCBLIST_OFFLOADED | SEGCBLIST_KTHREAD_CB;
+       return rcu_segcblist_test_flags(&rdp->cblist, flags);
+}
+
+static inline bool nocb_cb_wait_cond(struct rcu_data *rdp)
+{
+       return nocb_cb_can_run(rdp) && !READ_ONCE(rdp->nocb_cb_sleep);
+}
+
+/*
+ * Invoke any ready callbacks from the corresponding no-CBs CPU,
+ * then, if there are no more, wait for more to appear.
+ */
+static void nocb_cb_wait(struct rcu_data *rdp)
+{
+       struct rcu_segcblist *cblist = &rdp->cblist;
+       unsigned long cur_gp_seq;
+       unsigned long flags;
+       bool needwake_state = false;
+       bool needwake_gp = false;
+       bool can_sleep = true;
+       struct rcu_node *rnp = rdp->mynode;
+
+       local_irq_save(flags);
+       rcu_momentary_dyntick_idle();
+       local_irq_restore(flags);
+       /*
+        * Disable BH to provide the expected environment.  Also, when
+        * transitioning to/from NOCB mode, a self-requeuing callback might
+        * be invoked from softirq.  A short grace period could cause both
+        * instances of this callback would execute concurrently.
+        */
+       local_bh_disable();
+       rcu_do_batch(rdp);
+       local_bh_enable();
+       lockdep_assert_irqs_enabled();
+       rcu_nocb_lock_irqsave(rdp, flags);
+       if (rcu_segcblist_nextgp(cblist, &cur_gp_seq) &&
+           rcu_seq_done(&rnp->gp_seq, cur_gp_seq) &&
+           raw_spin_trylock_rcu_node(rnp)) { /* irqs already disabled. */
+               needwake_gp = rcu_advance_cbs(rdp->mynode, rdp);
+               raw_spin_unlock_rcu_node(rnp); /* irqs remain disabled. */
+       }
+
+       if (rcu_segcblist_test_flags(cblist, SEGCBLIST_OFFLOADED)) {
+               if (!rcu_segcblist_test_flags(cblist, SEGCBLIST_KTHREAD_CB)) {
+                       rcu_segcblist_set_flags(cblist, SEGCBLIST_KTHREAD_CB);
+                       if (rcu_segcblist_test_flags(cblist, SEGCBLIST_KTHREAD_GP))
+                               needwake_state = true;
+               }
+               if (rcu_segcblist_ready_cbs(cblist))
+                       can_sleep = false;
+       } else {
+               /*
+                * De-offloading. Clear our flag and notify the de-offload worker.
+                * We won't touch the callbacks and keep sleeping until we ever
+                * get re-offloaded.
+                */
+               WARN_ON_ONCE(!rcu_segcblist_test_flags(cblist, SEGCBLIST_KTHREAD_CB));
+               rcu_segcblist_clear_flags(cblist, SEGCBLIST_KTHREAD_CB);
+               if (!rcu_segcblist_test_flags(cblist, SEGCBLIST_KTHREAD_GP))
+                       needwake_state = true;
+       }
+
+       WRITE_ONCE(rdp->nocb_cb_sleep, can_sleep);
+
+       if (rdp->nocb_cb_sleep)
+               trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("CBSleep"));
+
+       rcu_nocb_unlock_irqrestore(rdp, flags);
+       if (needwake_gp)
+               rcu_gp_kthread_wake();
+
+       if (needwake_state)
+               swake_up_one(&rdp->nocb_state_wq);
+
+       do {
+               swait_event_interruptible_exclusive(rdp->nocb_cb_wq,
+                                                   nocb_cb_wait_cond(rdp));
+
+               // VVV Ensure CB invocation follows _sleep test.
+               if (smp_load_acquire(&rdp->nocb_cb_sleep)) { // ^^^
+                       WARN_ON(signal_pending(current));
+                       trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("WokeEmpty"));
+               }
+       } while (!nocb_cb_can_run(rdp));
+}
+
+/*
+ * Per-rcu_data kthread, but only for no-CBs CPUs.  Repeatedly invoke
+ * nocb_cb_wait() to do the dirty work.
+ */
+static int rcu_nocb_cb_kthread(void *arg)
+{
+       struct rcu_data *rdp = arg;
+
+       // Each pass through this loop does one callback batch, and,
+       // if there are no more ready callbacks, waits for them.
+       for (;;) {
+               nocb_cb_wait(rdp);
+               cond_resched_tasks_rcu_qs();
+       }
+       return 0;
+}
+
+/* Is a deferred wakeup of rcu_nocb_kthread() required? */
+static int rcu_nocb_need_deferred_wakeup(struct rcu_data *rdp, int level)
+{
+       return READ_ONCE(rdp->nocb_defer_wakeup) >= level;
+}
+
+/* Do a deferred wakeup of rcu_nocb_kthread(). */
+static bool do_nocb_deferred_wakeup_common(struct rcu_data *rdp_gp,
+                                          struct rcu_data *rdp, int level,
+                                          unsigned long flags)
+       __releases(rdp_gp->nocb_gp_lock)
+{
+       int ndw;
+       int ret;
+
+       if (!rcu_nocb_need_deferred_wakeup(rdp_gp, level)) {
+               raw_spin_unlock_irqrestore(&rdp_gp->nocb_gp_lock, flags);
+               return false;
+       }
+
+       ndw = rdp_gp->nocb_defer_wakeup;
+       ret = __wake_nocb_gp(rdp_gp, rdp, ndw == RCU_NOCB_WAKE_FORCE, flags);
+       trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("DeferredWake"));
+
+       return ret;
+}
+
+/* Do a deferred wakeup of rcu_nocb_kthread() from a timer handler. */
+static void do_nocb_deferred_wakeup_timer(struct timer_list *t)
+{
+       unsigned long flags;
+       struct rcu_data *rdp = from_timer(rdp, t, nocb_timer);
+
+       WARN_ON_ONCE(rdp->nocb_gp_rdp != rdp);
+       trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("Timer"));
+
+       raw_spin_lock_irqsave(&rdp->nocb_gp_lock, flags);
+       smp_mb__after_spinlock(); /* Timer expire before wakeup. */
+       do_nocb_deferred_wakeup_common(rdp, rdp, RCU_NOCB_WAKE_BYPASS, flags);
+}
+
+/*
+ * Do a deferred wakeup of rcu_nocb_kthread() from fastpath.
+ * This means we do an inexact common-case check.  Note that if
+ * we miss, ->nocb_timer will eventually clean things up.
+ */
+static bool do_nocb_deferred_wakeup(struct rcu_data *rdp)
+{
+       unsigned long flags;
+       struct rcu_data *rdp_gp = rdp->nocb_gp_rdp;
+
+       if (!rdp_gp || !rcu_nocb_need_deferred_wakeup(rdp_gp, RCU_NOCB_WAKE))
+               return false;
+
+       raw_spin_lock_irqsave(&rdp_gp->nocb_gp_lock, flags);
+       return do_nocb_deferred_wakeup_common(rdp_gp, rdp, RCU_NOCB_WAKE, flags);
+}
+
+void rcu_nocb_flush_deferred_wakeup(void)
+{
+       do_nocb_deferred_wakeup(this_cpu_ptr(&rcu_data));
+}
+EXPORT_SYMBOL_GPL(rcu_nocb_flush_deferred_wakeup);
+
+static int rdp_offload_toggle(struct rcu_data *rdp,
+                              bool offload, unsigned long flags)
+       __releases(rdp->nocb_lock)
+{
+       struct rcu_segcblist *cblist = &rdp->cblist;
+       struct rcu_data *rdp_gp = rdp->nocb_gp_rdp;
+       bool wake_gp = false;
+
+       rcu_segcblist_offload(cblist, offload);
+
+       if (rdp->nocb_cb_sleep)
+               rdp->nocb_cb_sleep = false;
+       rcu_nocb_unlock_irqrestore(rdp, flags);
+
+       /*
+        * Ignore former value of nocb_cb_sleep and force wake up as it could
+        * have been spuriously set to false already.
+        */
+       swake_up_one(&rdp->nocb_cb_wq);
+
+       raw_spin_lock_irqsave(&rdp_gp->nocb_gp_lock, flags);
+       if (rdp_gp->nocb_gp_sleep) {
+               rdp_gp->nocb_gp_sleep = false;
+               wake_gp = true;
+       }
+       raw_spin_unlock_irqrestore(&rdp_gp->nocb_gp_lock, flags);
+
+       if (wake_gp)
+               wake_up_process(rdp_gp->nocb_gp_kthread);
+
+       return 0;
+}
+
+static long rcu_nocb_rdp_deoffload(void *arg)
+{
+       struct rcu_data *rdp = arg;
+       struct rcu_segcblist *cblist = &rdp->cblist;
+       unsigned long flags;
+       int ret;
+
+       WARN_ON_ONCE(rdp->cpu != raw_smp_processor_id());
+
+       pr_info("De-offloading %d\n", rdp->cpu);
+
+       rcu_nocb_lock_irqsave(rdp, flags);
+       /*
+        * Flush once and for all now. This suffices because we are
+        * running on the target CPU holding ->nocb_lock (thus having
+        * interrupts disabled), and because rdp_offload_toggle()
+        * invokes rcu_segcblist_offload(), which clears SEGCBLIST_OFFLOADED.
+        * Thus future calls to rcu_segcblist_completely_offloaded() will
+        * return false, which means that future calls to rcu_nocb_try_bypass()
+        * will refuse to put anything into the bypass.
+        */
+       WARN_ON_ONCE(!rcu_nocb_flush_bypass(rdp, NULL, jiffies));
+       ret = rdp_offload_toggle(rdp, false, flags);
+       swait_event_exclusive(rdp->nocb_state_wq,
+                             !rcu_segcblist_test_flags(cblist, SEGCBLIST_KTHREAD_CB |
+                                                       SEGCBLIST_KTHREAD_GP));
+       /*
+        * Lock one last time to acquire latest callback updates from kthreads
+        * so we can later handle callbacks locally without locking.
+        */
+       rcu_nocb_lock_irqsave(rdp, flags);
+       /*
+        * Theoretically we could set SEGCBLIST_SOFTIRQ_ONLY after the nocb
+        * lock is released but how about being paranoid for once?
+        */
+       rcu_segcblist_set_flags(cblist, SEGCBLIST_SOFTIRQ_ONLY);
+       /*
+        * With SEGCBLIST_SOFTIRQ_ONLY, we can't use
+        * rcu_nocb_unlock_irqrestore() anymore.
+        */
+       raw_spin_unlock_irqrestore(&rdp->nocb_lock, flags);
+
+       /* Sanity check */
+       WARN_ON_ONCE(rcu_cblist_n_cbs(&rdp->nocb_bypass));
+
+
+       return ret;
+}
+
+int rcu_nocb_cpu_deoffload(int cpu)
+{
+       struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
+       int ret = 0;
+
+       mutex_lock(&rcu_state.barrier_mutex);
+       cpus_read_lock();
+       if (rcu_rdp_is_offloaded(rdp)) {
+               if (cpu_online(cpu)) {
+                       ret = work_on_cpu(cpu, rcu_nocb_rdp_deoffload, rdp);
+                       if (!ret)
+                               cpumask_clear_cpu(cpu, rcu_nocb_mask);
+               } else {
+                       pr_info("NOCB: Can't CB-deoffload an offline CPU\n");
+                       ret = -EINVAL;
+               }
+       }
+       cpus_read_unlock();
+       mutex_unlock(&rcu_state.barrier_mutex);
+
+       return ret;
+}
+EXPORT_SYMBOL_GPL(rcu_nocb_cpu_deoffload);
+
+static long rcu_nocb_rdp_offload(void *arg)
+{
+       struct rcu_data *rdp = arg;
+       struct rcu_segcblist *cblist = &rdp->cblist;
+       unsigned long flags;
+       int ret;
+
+       WARN_ON_ONCE(rdp->cpu != raw_smp_processor_id());
+       /*
+        * For now we only support re-offload, ie: the rdp must have been
+        * offloaded on boot first.
+        */
+       if (!rdp->nocb_gp_rdp)
+               return -EINVAL;
+
+       pr_info("Offloading %d\n", rdp->cpu);
+       /*
+        * Can't use rcu_nocb_lock_irqsave() while we are in
+        * SEGCBLIST_SOFTIRQ_ONLY mode.
+        */
+       raw_spin_lock_irqsave(&rdp->nocb_lock, flags);
+
+       /*
+        * We didn't take the nocb lock while working on the
+        * rdp->cblist in SEGCBLIST_SOFTIRQ_ONLY mode.
+        * Every modifications that have been done previously on
+        * rdp->cblist must be visible remotely by the nocb kthreads
+        * upon wake up after reading the cblist flags.
+        *
+        * The layout against nocb_lock enforces that ordering:
+        *
+        *  __rcu_nocb_rdp_offload()   nocb_cb_wait()/nocb_gp_wait()
+        * -------------------------   ----------------------------
+        *      WRITE callbacks           rcu_nocb_lock()
+        *      rcu_nocb_lock()           READ flags
+        *      WRITE flags               READ callbacks
+        *      rcu_nocb_unlock()         rcu_nocb_unlock()
+        */
+       ret = rdp_offload_toggle(rdp, true, flags);
+       swait_event_exclusive(rdp->nocb_state_wq,
+                             rcu_segcblist_test_flags(cblist, SEGCBLIST_KTHREAD_CB) &&
+                             rcu_segcblist_test_flags(cblist, SEGCBLIST_KTHREAD_GP));
+
+       return ret;
+}
+
+int rcu_nocb_cpu_offload(int cpu)
+{
+       struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
+       int ret = 0;
+
+       mutex_lock(&rcu_state.barrier_mutex);
+       cpus_read_lock();
+       if (!rcu_rdp_is_offloaded(rdp)) {
+               if (cpu_online(cpu)) {
+                       ret = work_on_cpu(cpu, rcu_nocb_rdp_offload, rdp);
+                       if (!ret)
+                               cpumask_set_cpu(cpu, rcu_nocb_mask);
+               } else {
+                       pr_info("NOCB: Can't CB-offload an offline CPU\n");
+                       ret = -EINVAL;
+               }
+       }
+       cpus_read_unlock();
+       mutex_unlock(&rcu_state.barrier_mutex);
+
+       return ret;
+}
+EXPORT_SYMBOL_GPL(rcu_nocb_cpu_offload);
+
+void __init rcu_init_nohz(void)
+{
+       int cpu;
+       bool need_rcu_nocb_mask = false;
+       struct rcu_data *rdp;
+
+#if defined(CONFIG_NO_HZ_FULL)
+       if (tick_nohz_full_running && cpumask_weight(tick_nohz_full_mask))
+               need_rcu_nocb_mask = true;
+#endif /* #if defined(CONFIG_NO_HZ_FULL) */
+
+       if (!cpumask_available(rcu_nocb_mask) && need_rcu_nocb_mask) {
+               if (!zalloc_cpumask_var(&rcu_nocb_mask, GFP_KERNEL)) {
+                       pr_info("rcu_nocb_mask allocation failed, callback offloading disabled.\n");
+                       return;
+               }
+       }
+       if (!cpumask_available(rcu_nocb_mask))
+               return;
+
+#if defined(CONFIG_NO_HZ_FULL)
+       if (tick_nohz_full_running)
+               cpumask_or(rcu_nocb_mask, rcu_nocb_mask, tick_nohz_full_mask);
+#endif /* #if defined(CONFIG_NO_HZ_FULL) */
+
+       if (!cpumask_subset(rcu_nocb_mask, cpu_possible_mask)) {
+               pr_info("\tNote: kernel parameter 'rcu_nocbs=', 'nohz_full', or 'isolcpus=' contains nonexistent CPUs.\n");
+               cpumask_and(rcu_nocb_mask, cpu_possible_mask,
+                           rcu_nocb_mask);
+       }
+       if (cpumask_empty(rcu_nocb_mask))
+               pr_info("\tOffload RCU callbacks from CPUs: (none).\n");
+       else
+               pr_info("\tOffload RCU callbacks from CPUs: %*pbl.\n",
+                       cpumask_pr_args(rcu_nocb_mask));
+       if (rcu_nocb_poll)
+               pr_info("\tPoll for callbacks from no-CBs CPUs.\n");
+
+       for_each_cpu(cpu, rcu_nocb_mask) {
+               rdp = per_cpu_ptr(&rcu_data, cpu);
+               if (rcu_segcblist_empty(&rdp->cblist))
+                       rcu_segcblist_init(&rdp->cblist);
+               rcu_segcblist_offload(&rdp->cblist, true);
+               rcu_segcblist_set_flags(&rdp->cblist, SEGCBLIST_KTHREAD_CB);
+               rcu_segcblist_set_flags(&rdp->cblist, SEGCBLIST_KTHREAD_GP);
+       }
+       rcu_organize_nocb_kthreads();
+}
+
+/* Initialize per-rcu_data variables for no-CBs CPUs. */
+static void __init rcu_boot_init_nocb_percpu_data(struct rcu_data *rdp)
+{
+       init_swait_queue_head(&rdp->nocb_cb_wq);
+       init_swait_queue_head(&rdp->nocb_gp_wq);
+       init_swait_queue_head(&rdp->nocb_state_wq);
+       raw_spin_lock_init(&rdp->nocb_lock);
+       raw_spin_lock_init(&rdp->nocb_bypass_lock);
+       raw_spin_lock_init(&rdp->nocb_gp_lock);
+       timer_setup(&rdp->nocb_timer, do_nocb_deferred_wakeup_timer, 0);
+       rcu_cblist_init(&rdp->nocb_bypass);
+}
+
+/*
+ * If the specified CPU is a no-CBs CPU that does not already have its
+ * rcuo CB kthread, spawn it.  Additionally, if the rcuo GP kthread
+ * for this CPU's group has not yet been created, spawn it as well.
+ */
+static void rcu_spawn_one_nocb_kthread(int cpu)
+{
+       struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
+       struct rcu_data *rdp_gp;
+       struct task_struct *t;
+
+       /*
+        * If this isn't a no-CBs CPU or if it already has an rcuo kthread,
+        * then nothing to do.
+        */
+       if (!rcu_is_nocb_cpu(cpu) || rdp->nocb_cb_kthread)
+               return;
+
+       /* If we didn't spawn the GP kthread first, reorganize! */
+       rdp_gp = rdp->nocb_gp_rdp;
+       if (!rdp_gp->nocb_gp_kthread) {
+               t = kthread_run(rcu_nocb_gp_kthread, rdp_gp,
+                               "rcuog/%d", rdp_gp->cpu);
+               if (WARN_ONCE(IS_ERR(t), "%s: Could not start rcuo GP kthread, OOM is now expected behavior\n", __func__))
+                       return;
+               WRITE_ONCE(rdp_gp->nocb_gp_kthread, t);
+       }
+
+       /* Spawn the kthread for this CPU. */
+       t = kthread_run(rcu_nocb_cb_kthread, rdp,
+                       "rcuo%c/%d", rcu_state.abbr, cpu);
+       if (WARN_ONCE(IS_ERR(t), "%s: Could not start rcuo CB kthread, OOM is now expected behavior\n", __func__))
+               return;
+       WRITE_ONCE(rdp->nocb_cb_kthread, t);
+       WRITE_ONCE(rdp->nocb_gp_kthread, rdp_gp->nocb_gp_kthread);
+}
+
+/*
+ * If the specified CPU is a no-CBs CPU that does not already have its
+ * rcuo kthread, spawn it.
+ */
+static void rcu_spawn_cpu_nocb_kthread(int cpu)
+{
+       if (rcu_scheduler_fully_active)
+               rcu_spawn_one_nocb_kthread(cpu);
+}
+
+/*
+ * Once the scheduler is running, spawn rcuo kthreads for all online
+ * no-CBs CPUs.  This assumes that the early_initcall()s happen before
+ * non-boot CPUs come online -- if this changes, we will need to add
+ * some mutual exclusion.
+ */
+static void __init rcu_spawn_nocb_kthreads(void)
+{
+       int cpu;
+
+       for_each_online_cpu(cpu)
+               rcu_spawn_cpu_nocb_kthread(cpu);
+}
+
+/* How many CB CPU IDs per GP kthread?  Default of -1 for sqrt(nr_cpu_ids). */
+static int rcu_nocb_gp_stride = -1;
+module_param(rcu_nocb_gp_stride, int, 0444);
+
+/*
+ * Initialize GP-CB relationships for all no-CBs CPU.
+ */
+static void __init rcu_organize_nocb_kthreads(void)
+{
+       int cpu;
+       bool firsttime = true;
+       bool gotnocbs = false;
+       bool gotnocbscbs = true;
+       int ls = rcu_nocb_gp_stride;
+       int nl = 0;  /* Next GP kthread. */
+       struct rcu_data *rdp;
+       struct rcu_data *rdp_gp = NULL;  /* Suppress misguided gcc warn. */
+       struct rcu_data *rdp_prev = NULL;
+
+       if (!cpumask_available(rcu_nocb_mask))
+               return;
+       if (ls == -1) {
+               ls = nr_cpu_ids / int_sqrt(nr_cpu_ids);
+               rcu_nocb_gp_stride = ls;
+       }
+
+       /*
+        * Each pass through this loop sets up one rcu_data structure.
+        * Should the corresponding CPU come online in the future, then
+        * we will spawn the needed set of rcu_nocb_kthread() kthreads.
+        */
+       for_each_cpu(cpu, rcu_nocb_mask) {
+               rdp = per_cpu_ptr(&rcu_data, cpu);
+               if (rdp->cpu >= nl) {
+                       /* New GP kthread, set up for CBs & next GP. */
+                       gotnocbs = true;
+                       nl = DIV_ROUND_UP(rdp->cpu + 1, ls) * ls;
+                       rdp->nocb_gp_rdp = rdp;
+                       rdp_gp = rdp;
+                       if (dump_tree) {
+                               if (!firsttime)
+                                       pr_cont("%s\n", gotnocbscbs
+                                                       ? "" : " (self only)");
+                               gotnocbscbs = false;
+                               firsttime = false;
+                               pr_alert("%s: No-CB GP kthread CPU %d:",
+                                        __func__, cpu);
+                       }
+               } else {
+                       /* Another CB kthread, link to previous GP kthread. */
+                       gotnocbscbs = true;
+                       rdp->nocb_gp_rdp = rdp_gp;
+                       rdp_prev->nocb_next_cb_rdp = rdp;
+                       if (dump_tree)
+                               pr_cont(" %d", cpu);
+               }
+               rdp_prev = rdp;
+       }
+       if (gotnocbs && dump_tree)
+               pr_cont("%s\n", gotnocbscbs ? "" : " (self only)");
+}
+
+/*
+ * Bind the current task to the offloaded CPUs.  If there are no offloaded
+ * CPUs, leave the task unbound.  Splat if the bind attempt fails.
+ */
+void rcu_bind_current_to_nocb(void)
+{
+       if (cpumask_available(rcu_nocb_mask) && cpumask_weight(rcu_nocb_mask))
+               WARN_ON(sched_setaffinity(current->pid, rcu_nocb_mask));
+}
+EXPORT_SYMBOL_GPL(rcu_bind_current_to_nocb);
+
+// The ->on_cpu field is available only in CONFIG_SMP=y, so...
+#ifdef CONFIG_SMP
+static char *show_rcu_should_be_on_cpu(struct task_struct *tsp)
+{
+       return tsp && task_is_running(tsp) && !tsp->on_cpu ? "!" : "";
+}
+#else // #ifdef CONFIG_SMP
+static char *show_rcu_should_be_on_cpu(struct task_struct *tsp)
+{
+       return "";
+}
+#endif // #else #ifdef CONFIG_SMP
+
+/*
+ * Dump out nocb grace-period kthread state for the specified rcu_data
+ * structure.
+ */
+static void show_rcu_nocb_gp_state(struct rcu_data *rdp)
+{
+       struct rcu_node *rnp = rdp->mynode;
+
+       pr_info("nocb GP %d %c%c%c%c%c %c[%c%c] %c%c:%ld rnp %d:%d %lu %c CPU %d%s\n",
+               rdp->cpu,
+               "kK"[!!rdp->nocb_gp_kthread],
+               "lL"[raw_spin_is_locked(&rdp->nocb_gp_lock)],
+               "dD"[!!rdp->nocb_defer_wakeup],
+               "tT"[timer_pending(&rdp->nocb_timer)],
+               "sS"[!!rdp->nocb_gp_sleep],
+               ".W"[swait_active(&rdp->nocb_gp_wq)],
+               ".W"[swait_active(&rnp->nocb_gp_wq[0])],
+               ".W"[swait_active(&rnp->nocb_gp_wq[1])],
+               ".B"[!!rdp->nocb_gp_bypass],
+               ".G"[!!rdp->nocb_gp_gp],
+               (long)rdp->nocb_gp_seq,
+               rnp->grplo, rnp->grphi, READ_ONCE(rdp->nocb_gp_loops),
+               rdp->nocb_gp_kthread ? task_state_to_char(rdp->nocb_gp_kthread) : '.',
+               rdp->nocb_cb_kthread ? (int)task_cpu(rdp->nocb_gp_kthread) : -1,
+               show_rcu_should_be_on_cpu(rdp->nocb_cb_kthread));
+}
+
+/* Dump out nocb kthread state for the specified rcu_data structure. */
+static void show_rcu_nocb_state(struct rcu_data *rdp)
+{
+       char bufw[20];
+       char bufr[20];
+       struct rcu_segcblist *rsclp = &rdp->cblist;
+       bool waslocked;
+       bool wassleep;
+
+       if (rdp->nocb_gp_rdp == rdp)
+               show_rcu_nocb_gp_state(rdp);
+
+       sprintf(bufw, "%ld", rsclp->gp_seq[RCU_WAIT_TAIL]);
+       sprintf(bufr, "%ld", rsclp->gp_seq[RCU_NEXT_READY_TAIL]);
+       pr_info("   CB %d^%d->%d %c%c%c%c%c%c F%ld L%ld C%d %c%c%s%c%s%c%c q%ld %c CPU %d%s\n",
+               rdp->cpu, rdp->nocb_gp_rdp->cpu,
+               rdp->nocb_next_cb_rdp ? rdp->nocb_next_cb_rdp->cpu : -1,
+               "kK"[!!rdp->nocb_cb_kthread],
+               "bB"[raw_spin_is_locked(&rdp->nocb_bypass_lock)],
+               "cC"[!!atomic_read(&rdp->nocb_lock_contended)],
+               "lL"[raw_spin_is_locked(&rdp->nocb_lock)],
+               "sS"[!!rdp->nocb_cb_sleep],
+               ".W"[swait_active(&rdp->nocb_cb_wq)],
+               jiffies - rdp->nocb_bypass_first,
+               jiffies - rdp->nocb_nobypass_last,
+               rdp->nocb_nobypass_count,
+               ".D"[rcu_segcblist_ready_cbs(rsclp)],
+               ".W"[!rcu_segcblist_segempty(rsclp, RCU_WAIT_TAIL)],
+               rcu_segcblist_segempty(rsclp, RCU_WAIT_TAIL) ? "" : bufw,
+               ".R"[!rcu_segcblist_segempty(rsclp, RCU_NEXT_READY_TAIL)],
+               rcu_segcblist_segempty(rsclp, RCU_NEXT_READY_TAIL) ? "" : bufr,
+               ".N"[!rcu_segcblist_segempty(rsclp, RCU_NEXT_TAIL)],
+               ".B"[!!rcu_cblist_n_cbs(&rdp->nocb_bypass)],
+               rcu_segcblist_n_cbs(&rdp->cblist),
+               rdp->nocb_cb_kthread ? task_state_to_char(rdp->nocb_cb_kthread) : '.',
+               rdp->nocb_cb_kthread ? (int)task_cpu(rdp->nocb_gp_kthread) : -1,
+               show_rcu_should_be_on_cpu(rdp->nocb_cb_kthread));
+
+       /* It is OK for GP kthreads to have GP state. */
+       if (rdp->nocb_gp_rdp == rdp)
+               return;
+
+       waslocked = raw_spin_is_locked(&rdp->nocb_gp_lock);
+       wassleep = swait_active(&rdp->nocb_gp_wq);
+       if (!rdp->nocb_gp_sleep && !waslocked && !wassleep)
+               return;  /* Nothing untoward. */
+
+       pr_info("   nocb GP activity on CB-only CPU!!! %c%c%c %c\n",
+               "lL"[waslocked],
+               "dD"[!!rdp->nocb_defer_wakeup],
+               "sS"[!!rdp->nocb_gp_sleep],
+               ".W"[wassleep]);
+}
+
+#else /* #ifdef CONFIG_RCU_NOCB_CPU */
+
+static inline int rcu_lockdep_is_held_nocb(struct rcu_data *rdp)
+{
+       return 0;
+}
+
+static inline bool rcu_current_is_nocb_kthread(struct rcu_data *rdp)
+{
+       return false;
+}
+
+/* No ->nocb_lock to acquire.  */
+static void rcu_nocb_lock(struct rcu_data *rdp)
+{
+}
+
+/* No ->nocb_lock to release.  */
+static void rcu_nocb_unlock(struct rcu_data *rdp)
+{
+}
+
+/* No ->nocb_lock to release.  */
+static void rcu_nocb_unlock_irqrestore(struct rcu_data *rdp,
+                                      unsigned long flags)
+{
+       local_irq_restore(flags);
+}
+
+/* Lockdep check that ->cblist may be safely accessed. */
+static void rcu_lockdep_assert_cblist_protected(struct rcu_data *rdp)
+{
+       lockdep_assert_irqs_disabled();
+}
+
+static void rcu_nocb_gp_cleanup(struct swait_queue_head *sq)
+{
+}
+
+static struct swait_queue_head *rcu_nocb_gp_get(struct rcu_node *rnp)
+{
+       return NULL;
+}
+
+static void rcu_init_one_nocb(struct rcu_node *rnp)
+{
+}
+
+static bool rcu_nocb_flush_bypass(struct rcu_data *rdp, struct rcu_head *rhp,
+                                 unsigned long j)
+{
+       return true;
+}
+
+static bool rcu_nocb_try_bypass(struct rcu_data *rdp, struct rcu_head *rhp,
+                               bool *was_alldone, unsigned long flags)
+{
+       return false;
+}
+
+static void __call_rcu_nocb_wake(struct rcu_data *rdp, bool was_empty,
+                                unsigned long flags)
+{
+       WARN_ON_ONCE(1);  /* Should be dead code! */
+}
+
+static void __init rcu_boot_init_nocb_percpu_data(struct rcu_data *rdp)
+{
+}
+
+static int rcu_nocb_need_deferred_wakeup(struct rcu_data *rdp, int level)
+{
+       return false;
+}
+
+static bool do_nocb_deferred_wakeup(struct rcu_data *rdp)
+{
+       return false;
+}
+
+static void rcu_spawn_cpu_nocb_kthread(int cpu)
+{
+}
+
+static void __init rcu_spawn_nocb_kthreads(void)
+{
+}
+
+static void show_rcu_nocb_state(struct rcu_data *rdp)
+{
+}
+
+#endif /* #else #ifdef CONFIG_RCU_NOCB_CPU */
index de1dc3bb7f70167e2e5405b73e79e4e31c9f0a2c..7a4876a3a882e2454d5a6084d8f439e33973ad60 100644 (file)
 
 #include "../locking/rtmutex_common.h"
 
-#ifdef CONFIG_RCU_NOCB_CPU
-static cpumask_var_t rcu_nocb_mask; /* CPUs to have callbacks offloaded. */
-static bool __read_mostly rcu_nocb_poll;    /* Offload kthread are to poll. */
-static inline int rcu_lockdep_is_held_nocb(struct rcu_data *rdp)
-{
-       return lockdep_is_held(&rdp->nocb_lock);
-}
-
-static inline bool rcu_current_is_nocb_kthread(struct rcu_data *rdp)
-{
-       /* Race on early boot between thread creation and assignment */
-       if (!rdp->nocb_cb_kthread || !rdp->nocb_gp_kthread)
-               return true;
-
-       if (current == rdp->nocb_cb_kthread || current == rdp->nocb_gp_kthread)
-               if (in_task())
-                       return true;
-       return false;
-}
-
-#else
-static inline int rcu_lockdep_is_held_nocb(struct rcu_data *rdp)
-{
-       return 0;
-}
-
-static inline bool rcu_current_is_nocb_kthread(struct rcu_data *rdp)
-{
-       return false;
-}
-
-#endif /* #ifdef CONFIG_RCU_NOCB_CPU */
-
 static bool rcu_rdp_is_offloaded(struct rcu_data *rdp)
 {
        /*
@@ -346,7 +313,7 @@ void rcu_note_context_switch(bool preempt)
 
        trace_rcu_utilization(TPS("Start context switch"));
        lockdep_assert_irqs_disabled();
-       WARN_ON_ONCE(!preempt && rcu_preempt_depth() > 0);
+       WARN_ONCE(!preempt && rcu_preempt_depth() > 0, "Voluntary context switch within RCU read-side critical section!");
        if (rcu_preempt_depth() > 0 &&
            !t->rcu_read_unlock_special.b.blocked) {
 
@@ -405,17 +372,20 @@ static int rcu_preempt_blocked_readers_cgp(struct rcu_node *rnp)
 
 static void rcu_preempt_read_enter(void)
 {
-       current->rcu_read_lock_nesting++;
+       WRITE_ONCE(current->rcu_read_lock_nesting, READ_ONCE(current->rcu_read_lock_nesting) + 1);
 }
 
 static int rcu_preempt_read_exit(void)
 {
-       return --current->rcu_read_lock_nesting;
+       int ret = READ_ONCE(current->rcu_read_lock_nesting) - 1;
+
+       WRITE_ONCE(current->rcu_read_lock_nesting, ret);
+       return ret;
 }
 
 static void rcu_preempt_depth_set(int val)
 {
-       current->rcu_read_lock_nesting = val;
+       WRITE_ONCE(current->rcu_read_lock_nesting, val);
 }
 
 /*
@@ -1479,1460 +1449,6 @@ static void rcu_cleanup_after_idle(void)
 
 #endif /* #else #if !defined(CONFIG_RCU_FAST_NO_HZ) */
 
-#ifdef CONFIG_RCU_NOCB_CPU
-
-/*
- * Offload callback processing from the boot-time-specified set of CPUs
- * specified by rcu_nocb_mask.  For the CPUs in the set, there are kthreads
- * created that pull the callbacks from the corresponding CPU, wait for
- * a grace period to elapse, and invoke the callbacks.  These kthreads
- * are organized into GP kthreads, which manage incoming callbacks, wait for
- * grace periods, and awaken CB kthreads, and the CB kthreads, which only
- * invoke callbacks.  Each GP kthread invokes its own CBs.  The no-CBs CPUs
- * do a wake_up() on their GP kthread when they insert a callback into any
- * empty list, unless the rcu_nocb_poll boot parameter has been specified,
- * in which case each kthread actively polls its CPU.  (Which isn't so great
- * for energy efficiency, but which does reduce RCU's overhead on that CPU.)
- *
- * This is intended to be used in conjunction with Frederic Weisbecker's
- * adaptive-idle work, which would seriously reduce OS jitter on CPUs
- * running CPU-bound user-mode computations.
- *
- * Offloading of callbacks can also be used as an energy-efficiency
- * measure because CPUs with no RCU callbacks queued are more aggressive
- * about entering dyntick-idle mode.
- */
-
-
-/*
- * Parse the boot-time rcu_nocb_mask CPU list from the kernel parameters.
- * If the list is invalid, a warning is emitted and all CPUs are offloaded.
- */
-static int __init rcu_nocb_setup(char *str)
-{
-       alloc_bootmem_cpumask_var(&rcu_nocb_mask);
-       if (cpulist_parse(str, rcu_nocb_mask)) {
-               pr_warn("rcu_nocbs= bad CPU range, all CPUs set\n");
-               cpumask_setall(rcu_nocb_mask);
-       }
-       return 1;
-}
-__setup("rcu_nocbs=", rcu_nocb_setup);
-
-static int __init parse_rcu_nocb_poll(char *arg)
-{
-       rcu_nocb_poll = true;
-       return 0;
-}
-early_param("rcu_nocb_poll", parse_rcu_nocb_poll);
-
-/*
- * Don't bother bypassing ->cblist if the call_rcu() rate is low.
- * After all, the main point of bypassing is to avoid lock contention
- * on ->nocb_lock, which only can happen at high call_rcu() rates.
- */
-static int nocb_nobypass_lim_per_jiffy = 16 * 1000 / HZ;
-module_param(nocb_nobypass_lim_per_jiffy, int, 0);
-
-/*
- * Acquire the specified rcu_data structure's ->nocb_bypass_lock.  If the
- * lock isn't immediately available, increment ->nocb_lock_contended to
- * flag the contention.
- */
-static void rcu_nocb_bypass_lock(struct rcu_data *rdp)
-       __acquires(&rdp->nocb_bypass_lock)
-{
-       lockdep_assert_irqs_disabled();
-       if (raw_spin_trylock(&rdp->nocb_bypass_lock))
-               return;
-       atomic_inc(&rdp->nocb_lock_contended);
-       WARN_ON_ONCE(smp_processor_id() != rdp->cpu);
-       smp_mb__after_atomic(); /* atomic_inc() before lock. */
-       raw_spin_lock(&rdp->nocb_bypass_lock);
-       smp_mb__before_atomic(); /* atomic_dec() after lock. */
-       atomic_dec(&rdp->nocb_lock_contended);
-}
-
-/*
- * Spinwait until the specified rcu_data structure's ->nocb_lock is
- * not contended.  Please note that this is extremely special-purpose,
- * relying on the fact that at most two kthreads and one CPU contend for
- * this lock, and also that the two kthreads are guaranteed to have frequent
- * grace-period-duration time intervals between successive acquisitions
- * of the lock.  This allows us to use an extremely simple throttling
- * mechanism, and further to apply it only to the CPU doing floods of
- * call_rcu() invocations.  Don't try this at home!
- */
-static void rcu_nocb_wait_contended(struct rcu_data *rdp)
-{
-       WARN_ON_ONCE(smp_processor_id() != rdp->cpu);
-       while (WARN_ON_ONCE(atomic_read(&rdp->nocb_lock_contended)))
-               cpu_relax();
-}
-
-/*
- * Conditionally acquire the specified rcu_data structure's
- * ->nocb_bypass_lock.
- */
-static bool rcu_nocb_bypass_trylock(struct rcu_data *rdp)
-{
-       lockdep_assert_irqs_disabled();
-       return raw_spin_trylock(&rdp->nocb_bypass_lock);
-}
-
-/*
- * Release the specified rcu_data structure's ->nocb_bypass_lock.
- */
-static void rcu_nocb_bypass_unlock(struct rcu_data *rdp)
-       __releases(&rdp->nocb_bypass_lock)
-{
-       lockdep_assert_irqs_disabled();
-       raw_spin_unlock(&rdp->nocb_bypass_lock);
-}
-
-/*
- * Acquire the specified rcu_data structure's ->nocb_lock, but only
- * if it corresponds to a no-CBs CPU.
- */
-static void rcu_nocb_lock(struct rcu_data *rdp)
-{
-       lockdep_assert_irqs_disabled();
-       if (!rcu_rdp_is_offloaded(rdp))
-               return;
-       raw_spin_lock(&rdp->nocb_lock);
-}
-
-/*
- * Release the specified rcu_data structure's ->nocb_lock, but only
- * if it corresponds to a no-CBs CPU.
- */
-static void rcu_nocb_unlock(struct rcu_data *rdp)
-{
-       if (rcu_rdp_is_offloaded(rdp)) {
-               lockdep_assert_irqs_disabled();
-               raw_spin_unlock(&rdp->nocb_lock);
-       }
-}
-
-/*
- * Release the specified rcu_data structure's ->nocb_lock and restore
- * interrupts, but only if it corresponds to a no-CBs CPU.
- */
-static void rcu_nocb_unlock_irqrestore(struct rcu_data *rdp,
-                                      unsigned long flags)
-{
-       if (rcu_rdp_is_offloaded(rdp)) {
-               lockdep_assert_irqs_disabled();
-               raw_spin_unlock_irqrestore(&rdp->nocb_lock, flags);
-       } else {
-               local_irq_restore(flags);
-       }
-}
-
-/* Lockdep check that ->cblist may be safely accessed. */
-static void rcu_lockdep_assert_cblist_protected(struct rcu_data *rdp)
-{
-       lockdep_assert_irqs_disabled();
-       if (rcu_rdp_is_offloaded(rdp))
-               lockdep_assert_held(&rdp->nocb_lock);
-}
-
-/*
- * Wake up any no-CBs CPUs' kthreads that were waiting on the just-ended
- * grace period.
- */
-static void rcu_nocb_gp_cleanup(struct swait_queue_head *sq)
-{
-       swake_up_all(sq);
-}
-
-static struct swait_queue_head *rcu_nocb_gp_get(struct rcu_node *rnp)
-{
-       return &rnp->nocb_gp_wq[rcu_seq_ctr(rnp->gp_seq) & 0x1];
-}
-
-static void rcu_init_one_nocb(struct rcu_node *rnp)
-{
-       init_swait_queue_head(&rnp->nocb_gp_wq[0]);
-       init_swait_queue_head(&rnp->nocb_gp_wq[1]);
-}
-
-/* Is the specified CPU a no-CBs CPU? */
-bool rcu_is_nocb_cpu(int cpu)
-{
-       if (cpumask_available(rcu_nocb_mask))
-               return cpumask_test_cpu(cpu, rcu_nocb_mask);
-       return false;
-}
-
-static bool __wake_nocb_gp(struct rcu_data *rdp_gp,
-                          struct rcu_data *rdp,
-                          bool force, unsigned long flags)
-       __releases(rdp_gp->nocb_gp_lock)
-{
-       bool needwake = false;
-
-       if (!READ_ONCE(rdp_gp->nocb_gp_kthread)) {
-               raw_spin_unlock_irqrestore(&rdp_gp->nocb_gp_lock, flags);
-               trace_rcu_nocb_wake(rcu_state.name, rdp->cpu,
-                                   TPS("AlreadyAwake"));
-               return false;
-       }
-
-       if (rdp_gp->nocb_defer_wakeup > RCU_NOCB_WAKE_NOT) {
-               WRITE_ONCE(rdp_gp->nocb_defer_wakeup, RCU_NOCB_WAKE_NOT);
-               del_timer(&rdp_gp->nocb_timer);
-       }
-
-       if (force || READ_ONCE(rdp_gp->nocb_gp_sleep)) {
-               WRITE_ONCE(rdp_gp->nocb_gp_sleep, false);
-               needwake = true;
-       }
-       raw_spin_unlock_irqrestore(&rdp_gp->nocb_gp_lock, flags);
-       if (needwake) {
-               trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("DoWake"));
-               wake_up_process(rdp_gp->nocb_gp_kthread);
-       }
-
-       return needwake;
-}
-
-/*
- * Kick the GP kthread for this NOCB group.
- */
-static bool wake_nocb_gp(struct rcu_data *rdp, bool force)
-{
-       unsigned long flags;
-       struct rcu_data *rdp_gp = rdp->nocb_gp_rdp;
-
-       raw_spin_lock_irqsave(&rdp_gp->nocb_gp_lock, flags);
-       return __wake_nocb_gp(rdp_gp, rdp, force, flags);
-}
-
-/*
- * Arrange to wake the GP kthread for this NOCB group at some future
- * time when it is safe to do so.
- */
-static void wake_nocb_gp_defer(struct rcu_data *rdp, int waketype,
-                              const char *reason)
-{
-       unsigned long flags;
-       struct rcu_data *rdp_gp = rdp->nocb_gp_rdp;
-
-       raw_spin_lock_irqsave(&rdp_gp->nocb_gp_lock, flags);
-
-       /*
-        * Bypass wakeup overrides previous deferments. In case
-        * of callback storm, no need to wake up too early.
-        */
-       if (waketype == RCU_NOCB_WAKE_BYPASS) {
-               mod_timer(&rdp_gp->nocb_timer, jiffies + 2);
-               WRITE_ONCE(rdp_gp->nocb_defer_wakeup, waketype);
-       } else {
-               if (rdp_gp->nocb_defer_wakeup < RCU_NOCB_WAKE)
-                       mod_timer(&rdp_gp->nocb_timer, jiffies + 1);
-               if (rdp_gp->nocb_defer_wakeup < waketype)
-                       WRITE_ONCE(rdp_gp->nocb_defer_wakeup, waketype);
-       }
-
-       raw_spin_unlock_irqrestore(&rdp_gp->nocb_gp_lock, flags);
-
-       trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, reason);
-}
-
-/*
- * Flush the ->nocb_bypass queue into ->cblist, enqueuing rhp if non-NULL.
- * However, if there is a callback to be enqueued and if ->nocb_bypass
- * proves to be initially empty, just return false because the no-CB GP
- * kthread may need to be awakened in this case.
- *
- * Note that this function always returns true if rhp is NULL.
- */
-static bool rcu_nocb_do_flush_bypass(struct rcu_data *rdp, struct rcu_head *rhp,
-                                    unsigned long j)
-{
-       struct rcu_cblist rcl;
-
-       WARN_ON_ONCE(!rcu_rdp_is_offloaded(rdp));
-       rcu_lockdep_assert_cblist_protected(rdp);
-       lockdep_assert_held(&rdp->nocb_bypass_lock);
-       if (rhp && !rcu_cblist_n_cbs(&rdp->nocb_bypass)) {
-               raw_spin_unlock(&rdp->nocb_bypass_lock);
-               return false;
-       }
-       /* Note: ->cblist.len already accounts for ->nocb_bypass contents. */
-       if (rhp)
-               rcu_segcblist_inc_len(&rdp->cblist); /* Must precede enqueue. */
-       rcu_cblist_flush_enqueue(&rcl, &rdp->nocb_bypass, rhp);
-       rcu_segcblist_insert_pend_cbs(&rdp->cblist, &rcl);
-       WRITE_ONCE(rdp->nocb_bypass_first, j);
-       rcu_nocb_bypass_unlock(rdp);
-       return true;
-}
-
-/*
- * Flush the ->nocb_bypass queue into ->cblist, enqueuing rhp if non-NULL.
- * However, if there is a callback to be enqueued and if ->nocb_bypass
- * proves to be initially empty, just return false because the no-CB GP
- * kthread may need to be awakened in this case.
- *
- * Note that this function always returns true if rhp is NULL.
- */
-static bool rcu_nocb_flush_bypass(struct rcu_data *rdp, struct rcu_head *rhp,
-                                 unsigned long j)
-{
-       if (!rcu_rdp_is_offloaded(rdp))
-               return true;
-       rcu_lockdep_assert_cblist_protected(rdp);
-       rcu_nocb_bypass_lock(rdp);
-       return rcu_nocb_do_flush_bypass(rdp, rhp, j);
-}
-
-/*
- * If the ->nocb_bypass_lock is immediately available, flush the
- * ->nocb_bypass queue into ->cblist.
- */
-static void rcu_nocb_try_flush_bypass(struct rcu_data *rdp, unsigned long j)
-{
-       rcu_lockdep_assert_cblist_protected(rdp);
-       if (!rcu_rdp_is_offloaded(rdp) ||
-           !rcu_nocb_bypass_trylock(rdp))
-               return;
-       WARN_ON_ONCE(!rcu_nocb_do_flush_bypass(rdp, NULL, j));
-}
-
-/*
- * See whether it is appropriate to use the ->nocb_bypass list in order
- * to control contention on ->nocb_lock.  A limited number of direct
- * enqueues are permitted into ->cblist per jiffy.  If ->nocb_bypass
- * is non-empty, further callbacks must be placed into ->nocb_bypass,
- * otherwise rcu_barrier() breaks.  Use rcu_nocb_flush_bypass() to switch
- * back to direct use of ->cblist.  However, ->nocb_bypass should not be
- * used if ->cblist is empty, because otherwise callbacks can be stranded
- * on ->nocb_bypass because we cannot count on the current CPU ever again
- * invoking call_rcu().  The general rule is that if ->nocb_bypass is
- * non-empty, the corresponding no-CBs grace-period kthread must not be
- * in an indefinite sleep state.
- *
- * Finally, it is not permitted to use the bypass during early boot,
- * as doing so would confuse the auto-initialization code.  Besides
- * which, there is no point in worrying about lock contention while
- * there is only one CPU in operation.
- */
-static bool rcu_nocb_try_bypass(struct rcu_data *rdp, struct rcu_head *rhp,
-                               bool *was_alldone, unsigned long flags)
-{
-       unsigned long c;
-       unsigned long cur_gp_seq;
-       unsigned long j = jiffies;
-       long ncbs = rcu_cblist_n_cbs(&rdp->nocb_bypass);
-
-       lockdep_assert_irqs_disabled();
-
-       // Pure softirq/rcuc based processing: no bypassing, no
-       // locking.
-       if (!rcu_rdp_is_offloaded(rdp)) {
-               *was_alldone = !rcu_segcblist_pend_cbs(&rdp->cblist);
-               return false;
-       }
-
-       // In the process of (de-)offloading: no bypassing, but
-       // locking.
-       if (!rcu_segcblist_completely_offloaded(&rdp->cblist)) {
-               rcu_nocb_lock(rdp);
-               *was_alldone = !rcu_segcblist_pend_cbs(&rdp->cblist);
-               return false; /* Not offloaded, no bypassing. */
-       }
-
-       // Don't use ->nocb_bypass during early boot.
-       if (rcu_scheduler_active != RCU_SCHEDULER_RUNNING) {
-               rcu_nocb_lock(rdp);
-               WARN_ON_ONCE(rcu_cblist_n_cbs(&rdp->nocb_bypass));
-               *was_alldone = !rcu_segcblist_pend_cbs(&rdp->cblist);
-               return false;
-       }
-
-       // If we have advanced to a new jiffy, reset counts to allow
-       // moving back from ->nocb_bypass to ->cblist.
-       if (j == rdp->nocb_nobypass_last) {
-               c = rdp->nocb_nobypass_count + 1;
-       } else {
-               WRITE_ONCE(rdp->nocb_nobypass_last, j);
-               c = rdp->nocb_nobypass_count - nocb_nobypass_lim_per_jiffy;
-               if (ULONG_CMP_LT(rdp->nocb_nobypass_count,
-                                nocb_nobypass_lim_per_jiffy))
-                       c = 0;
-               else if (c > nocb_nobypass_lim_per_jiffy)
-                       c = nocb_nobypass_lim_per_jiffy;
-       }
-       WRITE_ONCE(rdp->nocb_nobypass_count, c);
-
-       // If there hasn't yet been all that many ->cblist enqueues
-       // this jiffy, tell the caller to enqueue onto ->cblist.  But flush
-       // ->nocb_bypass first.
-       if (rdp->nocb_nobypass_count < nocb_nobypass_lim_per_jiffy) {
-               rcu_nocb_lock(rdp);
-               *was_alldone = !rcu_segcblist_pend_cbs(&rdp->cblist);
-               if (*was_alldone)
-                       trace_rcu_nocb_wake(rcu_state.name, rdp->cpu,
-                                           TPS("FirstQ"));
-               WARN_ON_ONCE(!rcu_nocb_flush_bypass(rdp, NULL, j));
-               WARN_ON_ONCE(rcu_cblist_n_cbs(&rdp->nocb_bypass));
-               return false; // Caller must enqueue the callback.
-       }
-
-       // If ->nocb_bypass has been used too long or is too full,
-       // flush ->nocb_bypass to ->cblist.
-       if ((ncbs && j != READ_ONCE(rdp->nocb_bypass_first)) ||
-           ncbs >= qhimark) {
-               rcu_nocb_lock(rdp);
-               if (!rcu_nocb_flush_bypass(rdp, rhp, j)) {
-                       *was_alldone = !rcu_segcblist_pend_cbs(&rdp->cblist);
-                       if (*was_alldone)
-                               trace_rcu_nocb_wake(rcu_state.name, rdp->cpu,
-                                                   TPS("FirstQ"));
-                       WARN_ON_ONCE(rcu_cblist_n_cbs(&rdp->nocb_bypass));
-                       return false; // Caller must enqueue the callback.
-               }
-               if (j != rdp->nocb_gp_adv_time &&
-                   rcu_segcblist_nextgp(&rdp->cblist, &cur_gp_seq) &&
-                   rcu_seq_done(&rdp->mynode->gp_seq, cur_gp_seq)) {
-                       rcu_advance_cbs_nowake(rdp->mynode, rdp);
-                       rdp->nocb_gp_adv_time = j;
-               }
-               rcu_nocb_unlock_irqrestore(rdp, flags);
-               return true; // Callback already enqueued.
-       }
-
-       // We need to use the bypass.
-       rcu_nocb_wait_contended(rdp);
-       rcu_nocb_bypass_lock(rdp);
-       ncbs = rcu_cblist_n_cbs(&rdp->nocb_bypass);
-       rcu_segcblist_inc_len(&rdp->cblist); /* Must precede enqueue. */
-       rcu_cblist_enqueue(&rdp->nocb_bypass, rhp);
-       if (!ncbs) {
-               WRITE_ONCE(rdp->nocb_bypass_first, j);
-               trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("FirstBQ"));
-       }
-       rcu_nocb_bypass_unlock(rdp);
-       smp_mb(); /* Order enqueue before wake. */
-       if (ncbs) {
-               local_irq_restore(flags);
-       } else {
-               // No-CBs GP kthread might be indefinitely asleep, if so, wake.
-               rcu_nocb_lock(rdp); // Rare during call_rcu() flood.
-               if (!rcu_segcblist_pend_cbs(&rdp->cblist)) {
-                       trace_rcu_nocb_wake(rcu_state.name, rdp->cpu,
-                                           TPS("FirstBQwake"));
-                       __call_rcu_nocb_wake(rdp, true, flags);
-               } else {
-                       trace_rcu_nocb_wake(rcu_state.name, rdp->cpu,
-                                           TPS("FirstBQnoWake"));
-                       rcu_nocb_unlock_irqrestore(rdp, flags);
-               }
-       }
-       return true; // Callback already enqueued.
-}
-
-/*
- * Awaken the no-CBs grace-period kthread if needed, either due to it
- * legitimately being asleep or due to overload conditions.
- *
- * If warranted, also wake up the kthread servicing this CPUs queues.
- */
-static void __call_rcu_nocb_wake(struct rcu_data *rdp, bool was_alldone,
-                                unsigned long flags)
-                                __releases(rdp->nocb_lock)
-{
-       unsigned long cur_gp_seq;
-       unsigned long j;
-       long len;
-       struct task_struct *t;
-
-       // If we are being polled or there is no kthread, just leave.
-       t = READ_ONCE(rdp->nocb_gp_kthread);
-       if (rcu_nocb_poll || !t) {
-               rcu_nocb_unlock_irqrestore(rdp, flags);
-               trace_rcu_nocb_wake(rcu_state.name, rdp->cpu,
-                                   TPS("WakeNotPoll"));
-               return;
-       }
-       // Need to actually to a wakeup.
-       len = rcu_segcblist_n_cbs(&rdp->cblist);
-       if (was_alldone) {
-               rdp->qlen_last_fqs_check = len;
-               if (!irqs_disabled_flags(flags)) {
-                       /* ... if queue was empty ... */
-                       rcu_nocb_unlock_irqrestore(rdp, flags);
-                       wake_nocb_gp(rdp, false);
-                       trace_rcu_nocb_wake(rcu_state.name, rdp->cpu,
-                                           TPS("WakeEmpty"));
-               } else {
-                       rcu_nocb_unlock_irqrestore(rdp, flags);
-                       wake_nocb_gp_defer(rdp, RCU_NOCB_WAKE,
-                                          TPS("WakeEmptyIsDeferred"));
-               }
-       } else if (len > rdp->qlen_last_fqs_check + qhimark) {
-               /* ... or if many callbacks queued. */
-               rdp->qlen_last_fqs_check = len;
-               j = jiffies;
-               if (j != rdp->nocb_gp_adv_time &&
-                   rcu_segcblist_nextgp(&rdp->cblist, &cur_gp_seq) &&
-                   rcu_seq_done(&rdp->mynode->gp_seq, cur_gp_seq)) {
-                       rcu_advance_cbs_nowake(rdp->mynode, rdp);
-                       rdp->nocb_gp_adv_time = j;
-               }
-               smp_mb(); /* Enqueue before timer_pending(). */
-               if ((rdp->nocb_cb_sleep ||
-                    !rcu_segcblist_ready_cbs(&rdp->cblist)) &&
-                   !timer_pending(&rdp->nocb_timer)) {
-                       rcu_nocb_unlock_irqrestore(rdp, flags);
-                       wake_nocb_gp_defer(rdp, RCU_NOCB_WAKE_FORCE,
-                                          TPS("WakeOvfIsDeferred"));
-               } else {
-                       rcu_nocb_unlock_irqrestore(rdp, flags);
-                       trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("WakeNot"));
-               }
-       } else {
-               rcu_nocb_unlock_irqrestore(rdp, flags);
-               trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("WakeNot"));
-       }
-       return;
-}
-
-/*
- * Check if we ignore this rdp.
- *
- * We check that without holding the nocb lock but
- * we make sure not to miss a freshly offloaded rdp
- * with the current ordering:
- *
- *  rdp_offload_toggle()        nocb_gp_enabled_cb()
- * -------------------------   ----------------------------
- *    WRITE flags                 LOCK nocb_gp_lock
- *    LOCK nocb_gp_lock           READ/WRITE nocb_gp_sleep
- *    READ/WRITE nocb_gp_sleep    UNLOCK nocb_gp_lock
- *    UNLOCK nocb_gp_lock         READ flags
- */
-static inline bool nocb_gp_enabled_cb(struct rcu_data *rdp)
-{
-       u8 flags = SEGCBLIST_OFFLOADED | SEGCBLIST_KTHREAD_GP;
-
-       return rcu_segcblist_test_flags(&rdp->cblist, flags);
-}
-
-static inline bool nocb_gp_update_state_deoffloading(struct rcu_data *rdp,
-                                                    bool *needwake_state)
-{
-       struct rcu_segcblist *cblist = &rdp->cblist;
-
-       if (rcu_segcblist_test_flags(cblist, SEGCBLIST_OFFLOADED)) {
-               if (!rcu_segcblist_test_flags(cblist, SEGCBLIST_KTHREAD_GP)) {
-                       rcu_segcblist_set_flags(cblist, SEGCBLIST_KTHREAD_GP);
-                       if (rcu_segcblist_test_flags(cblist, SEGCBLIST_KTHREAD_CB))
-                               *needwake_state = true;
-               }
-               return false;
-       }
-
-       /*
-        * De-offloading. Clear our flag and notify the de-offload worker.
-        * We will ignore this rdp until it ever gets re-offloaded.
-        */
-       WARN_ON_ONCE(!rcu_segcblist_test_flags(cblist, SEGCBLIST_KTHREAD_GP));
-       rcu_segcblist_clear_flags(cblist, SEGCBLIST_KTHREAD_GP);
-       if (!rcu_segcblist_test_flags(cblist, SEGCBLIST_KTHREAD_CB))
-               *needwake_state = true;
-       return true;
-}
-
-
-/*
- * No-CBs GP kthreads come here to wait for additional callbacks to show up
- * or for grace periods to end.
- */
-static void nocb_gp_wait(struct rcu_data *my_rdp)
-{
-       bool bypass = false;
-       long bypass_ncbs;
-       int __maybe_unused cpu = my_rdp->cpu;
-       unsigned long cur_gp_seq;
-       unsigned long flags;
-       bool gotcbs = false;
-       unsigned long j = jiffies;
-       bool needwait_gp = false; // This prevents actual uninitialized use.
-       bool needwake;
-       bool needwake_gp;
-       struct rcu_data *rdp;
-       struct rcu_node *rnp;
-       unsigned long wait_gp_seq = 0; // Suppress "use uninitialized" warning.
-       bool wasempty = false;
-
-       /*
-        * Each pass through the following loop checks for CBs and for the
-        * nearest grace period (if any) to wait for next.  The CB kthreads
-        * and the global grace-period kthread are awakened if needed.
-        */
-       WARN_ON_ONCE(my_rdp->nocb_gp_rdp != my_rdp);
-       for (rdp = my_rdp; rdp; rdp = rdp->nocb_next_cb_rdp) {
-               bool needwake_state = false;
-
-               if (!nocb_gp_enabled_cb(rdp))
-                       continue;
-               trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("Check"));
-               rcu_nocb_lock_irqsave(rdp, flags);
-               if (nocb_gp_update_state_deoffloading(rdp, &needwake_state)) {
-                       rcu_nocb_unlock_irqrestore(rdp, flags);
-                       if (needwake_state)
-                               swake_up_one(&rdp->nocb_state_wq);
-                       continue;
-               }
-               bypass_ncbs = rcu_cblist_n_cbs(&rdp->nocb_bypass);
-               if (bypass_ncbs &&
-                   (time_after(j, READ_ONCE(rdp->nocb_bypass_first) + 1) ||
-                    bypass_ncbs > 2 * qhimark)) {
-                       // Bypass full or old, so flush it.
-                       (void)rcu_nocb_try_flush_bypass(rdp, j);
-                       bypass_ncbs = rcu_cblist_n_cbs(&rdp->nocb_bypass);
-               } else if (!bypass_ncbs && rcu_segcblist_empty(&rdp->cblist)) {
-                       rcu_nocb_unlock_irqrestore(rdp, flags);
-                       if (needwake_state)
-                               swake_up_one(&rdp->nocb_state_wq);
-                       continue; /* No callbacks here, try next. */
-               }
-               if (bypass_ncbs) {
-                       trace_rcu_nocb_wake(rcu_state.name, rdp->cpu,
-                                           TPS("Bypass"));
-                       bypass = true;
-               }
-               rnp = rdp->mynode;
-
-               // Advance callbacks if helpful and low contention.
-               needwake_gp = false;
-               if (!rcu_segcblist_restempty(&rdp->cblist,
-                                            RCU_NEXT_READY_TAIL) ||
-                   (rcu_segcblist_nextgp(&rdp->cblist, &cur_gp_seq) &&
-                    rcu_seq_done(&rnp->gp_seq, cur_gp_seq))) {
-                       raw_spin_lock_rcu_node(rnp); /* irqs disabled. */
-                       needwake_gp = rcu_advance_cbs(rnp, rdp);
-                       wasempty = rcu_segcblist_restempty(&rdp->cblist,
-                                                          RCU_NEXT_READY_TAIL);
-                       raw_spin_unlock_rcu_node(rnp); /* irqs disabled. */
-               }
-               // Need to wait on some grace period?
-               WARN_ON_ONCE(wasempty &&
-                            !rcu_segcblist_restempty(&rdp->cblist,
-                                                     RCU_NEXT_READY_TAIL));
-               if (rcu_segcblist_nextgp(&rdp->cblist, &cur_gp_seq)) {
-                       if (!needwait_gp ||
-                           ULONG_CMP_LT(cur_gp_seq, wait_gp_seq))
-                               wait_gp_seq = cur_gp_seq;
-                       needwait_gp = true;
-                       trace_rcu_nocb_wake(rcu_state.name, rdp->cpu,
-                                           TPS("NeedWaitGP"));
-               }
-               if (rcu_segcblist_ready_cbs(&rdp->cblist)) {
-                       needwake = rdp->nocb_cb_sleep;
-                       WRITE_ONCE(rdp->nocb_cb_sleep, false);
-                       smp_mb(); /* CB invocation -after- GP end. */
-               } else {
-                       needwake = false;
-               }
-               rcu_nocb_unlock_irqrestore(rdp, flags);
-               if (needwake) {
-                       swake_up_one(&rdp->nocb_cb_wq);
-                       gotcbs = true;
-               }
-               if (needwake_gp)
-                       rcu_gp_kthread_wake();
-               if (needwake_state)
-                       swake_up_one(&rdp->nocb_state_wq);
-       }
-
-       my_rdp->nocb_gp_bypass = bypass;
-       my_rdp->nocb_gp_gp = needwait_gp;
-       my_rdp->nocb_gp_seq = needwait_gp ? wait_gp_seq : 0;
-
-       if (bypass && !rcu_nocb_poll) {
-               // At least one child with non-empty ->nocb_bypass, so set
-               // timer in order to avoid stranding its callbacks.
-               wake_nocb_gp_defer(my_rdp, RCU_NOCB_WAKE_BYPASS,
-                                  TPS("WakeBypassIsDeferred"));
-       }
-       if (rcu_nocb_poll) {
-               /* Polling, so trace if first poll in the series. */
-               if (gotcbs)
-                       trace_rcu_nocb_wake(rcu_state.name, cpu, TPS("Poll"));
-               schedule_timeout_idle(1);
-       } else if (!needwait_gp) {
-               /* Wait for callbacks to appear. */
-               trace_rcu_nocb_wake(rcu_state.name, cpu, TPS("Sleep"));
-               swait_event_interruptible_exclusive(my_rdp->nocb_gp_wq,
-                               !READ_ONCE(my_rdp->nocb_gp_sleep));
-               trace_rcu_nocb_wake(rcu_state.name, cpu, TPS("EndSleep"));
-       } else {
-               rnp = my_rdp->mynode;
-               trace_rcu_this_gp(rnp, my_rdp, wait_gp_seq, TPS("StartWait"));
-               swait_event_interruptible_exclusive(
-                       rnp->nocb_gp_wq[rcu_seq_ctr(wait_gp_seq) & 0x1],
-                       rcu_seq_done(&rnp->gp_seq, wait_gp_seq) ||
-                       !READ_ONCE(my_rdp->nocb_gp_sleep));
-               trace_rcu_this_gp(rnp, my_rdp, wait_gp_seq, TPS("EndWait"));
-       }
-       if (!rcu_nocb_poll) {
-               raw_spin_lock_irqsave(&my_rdp->nocb_gp_lock, flags);
-               if (my_rdp->nocb_defer_wakeup > RCU_NOCB_WAKE_NOT) {
-                       WRITE_ONCE(my_rdp->nocb_defer_wakeup, RCU_NOCB_WAKE_NOT);
-                       del_timer(&my_rdp->nocb_timer);
-               }
-               WRITE_ONCE(my_rdp->nocb_gp_sleep, true);
-               raw_spin_unlock_irqrestore(&my_rdp->nocb_gp_lock, flags);
-       }
-       my_rdp->nocb_gp_seq = -1;
-       WARN_ON(signal_pending(current));
-}
-
-/*
- * No-CBs grace-period-wait kthread.  There is one of these per group
- * of CPUs, but only once at least one CPU in that group has come online
- * at least once since boot.  This kthread checks for newly posted
- * callbacks from any of the CPUs it is responsible for, waits for a
- * grace period, then awakens all of the rcu_nocb_cb_kthread() instances
- * that then have callback-invocation work to do.
- */
-static int rcu_nocb_gp_kthread(void *arg)
-{
-       struct rcu_data *rdp = arg;
-
-       for (;;) {
-               WRITE_ONCE(rdp->nocb_gp_loops, rdp->nocb_gp_loops + 1);
-               nocb_gp_wait(rdp);
-               cond_resched_tasks_rcu_qs();
-       }
-       return 0;
-}
-
-static inline bool nocb_cb_can_run(struct rcu_data *rdp)
-{
-       u8 flags = SEGCBLIST_OFFLOADED | SEGCBLIST_KTHREAD_CB;
-       return rcu_segcblist_test_flags(&rdp->cblist, flags);
-}
-
-static inline bool nocb_cb_wait_cond(struct rcu_data *rdp)
-{
-       return nocb_cb_can_run(rdp) && !READ_ONCE(rdp->nocb_cb_sleep);
-}
-
-/*
- * Invoke any ready callbacks from the corresponding no-CBs CPU,
- * then, if there are no more, wait for more to appear.
- */
-static void nocb_cb_wait(struct rcu_data *rdp)
-{
-       struct rcu_segcblist *cblist = &rdp->cblist;
-       unsigned long cur_gp_seq;
-       unsigned long flags;
-       bool needwake_state = false;
-       bool needwake_gp = false;
-       bool can_sleep = true;
-       struct rcu_node *rnp = rdp->mynode;
-
-       local_irq_save(flags);
-       rcu_momentary_dyntick_idle();
-       local_irq_restore(flags);
-       /*
-        * Disable BH to provide the expected environment.  Also, when
-        * transitioning to/from NOCB mode, a self-requeuing callback might
-        * be invoked from softirq.  A short grace period could cause both
-        * instances of this callback would execute concurrently.
-        */
-       local_bh_disable();
-       rcu_do_batch(rdp);
-       local_bh_enable();
-       lockdep_assert_irqs_enabled();
-       rcu_nocb_lock_irqsave(rdp, flags);
-       if (rcu_segcblist_nextgp(cblist, &cur_gp_seq) &&
-           rcu_seq_done(&rnp->gp_seq, cur_gp_seq) &&
-           raw_spin_trylock_rcu_node(rnp)) { /* irqs already disabled. */
-               needwake_gp = rcu_advance_cbs(rdp->mynode, rdp);
-               raw_spin_unlock_rcu_node(rnp); /* irqs remain disabled. */
-       }
-
-       if (rcu_segcblist_test_flags(cblist, SEGCBLIST_OFFLOADED)) {
-               if (!rcu_segcblist_test_flags(cblist, SEGCBLIST_KTHREAD_CB)) {
-                       rcu_segcblist_set_flags(cblist, SEGCBLIST_KTHREAD_CB);
-                       if (rcu_segcblist_test_flags(cblist, SEGCBLIST_KTHREAD_GP))
-                               needwake_state = true;
-               }
-               if (rcu_segcblist_ready_cbs(cblist))
-                       can_sleep = false;
-       } else {
-               /*
-                * De-offloading. Clear our flag and notify the de-offload worker.
-                * We won't touch the callbacks and keep sleeping until we ever
-                * get re-offloaded.
-                */
-               WARN_ON_ONCE(!rcu_segcblist_test_flags(cblist, SEGCBLIST_KTHREAD_CB));
-               rcu_segcblist_clear_flags(cblist, SEGCBLIST_KTHREAD_CB);
-               if (!rcu_segcblist_test_flags(cblist, SEGCBLIST_KTHREAD_GP))
-                       needwake_state = true;
-       }
-
-       WRITE_ONCE(rdp->nocb_cb_sleep, can_sleep);
-
-       if (rdp->nocb_cb_sleep)
-               trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("CBSleep"));
-
-       rcu_nocb_unlock_irqrestore(rdp, flags);
-       if (needwake_gp)
-               rcu_gp_kthread_wake();
-
-       if (needwake_state)
-               swake_up_one(&rdp->nocb_state_wq);
-
-       do {
-               swait_event_interruptible_exclusive(rdp->nocb_cb_wq,
-                                                   nocb_cb_wait_cond(rdp));
-
-               // VVV Ensure CB invocation follows _sleep test.
-               if (smp_load_acquire(&rdp->nocb_cb_sleep)) { // ^^^
-                       WARN_ON(signal_pending(current));
-                       trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("WokeEmpty"));
-               }
-       } while (!nocb_cb_can_run(rdp));
-}
-
-/*
- * Per-rcu_data kthread, but only for no-CBs CPUs.  Repeatedly invoke
- * nocb_cb_wait() to do the dirty work.
- */
-static int rcu_nocb_cb_kthread(void *arg)
-{
-       struct rcu_data *rdp = arg;
-
-       // Each pass through this loop does one callback batch, and,
-       // if there are no more ready callbacks, waits for them.
-       for (;;) {
-               nocb_cb_wait(rdp);
-               cond_resched_tasks_rcu_qs();
-       }
-       return 0;
-}
-
-/* Is a deferred wakeup of rcu_nocb_kthread() required? */
-static int rcu_nocb_need_deferred_wakeup(struct rcu_data *rdp, int level)
-{
-       return READ_ONCE(rdp->nocb_defer_wakeup) >= level;
-}
-
-/* Do a deferred wakeup of rcu_nocb_kthread(). */
-static bool do_nocb_deferred_wakeup_common(struct rcu_data *rdp_gp,
-                                          struct rcu_data *rdp, int level,
-                                          unsigned long flags)
-       __releases(rdp_gp->nocb_gp_lock)
-{
-       int ndw;
-       int ret;
-
-       if (!rcu_nocb_need_deferred_wakeup(rdp_gp, level)) {
-               raw_spin_unlock_irqrestore(&rdp_gp->nocb_gp_lock, flags);
-               return false;
-       }
-
-       ndw = rdp_gp->nocb_defer_wakeup;
-       ret = __wake_nocb_gp(rdp_gp, rdp, ndw == RCU_NOCB_WAKE_FORCE, flags);
-       trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("DeferredWake"));
-
-       return ret;
-}
-
-/* Do a deferred wakeup of rcu_nocb_kthread() from a timer handler. */
-static void do_nocb_deferred_wakeup_timer(struct timer_list *t)
-{
-       unsigned long flags;
-       struct rcu_data *rdp = from_timer(rdp, t, nocb_timer);
-
-       WARN_ON_ONCE(rdp->nocb_gp_rdp != rdp);
-       trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("Timer"));
-
-       raw_spin_lock_irqsave(&rdp->nocb_gp_lock, flags);
-       smp_mb__after_spinlock(); /* Timer expire before wakeup. */
-       do_nocb_deferred_wakeup_common(rdp, rdp, RCU_NOCB_WAKE_BYPASS, flags);
-}
-
-/*
- * Do a deferred wakeup of rcu_nocb_kthread() from fastpath.
- * This means we do an inexact common-case check.  Note that if
- * we miss, ->nocb_timer will eventually clean things up.
- */
-static bool do_nocb_deferred_wakeup(struct rcu_data *rdp)
-{
-       unsigned long flags;
-       struct rcu_data *rdp_gp = rdp->nocb_gp_rdp;
-
-       if (!rdp_gp || !rcu_nocb_need_deferred_wakeup(rdp_gp, RCU_NOCB_WAKE))
-               return false;
-
-       raw_spin_lock_irqsave(&rdp_gp->nocb_gp_lock, flags);
-       return do_nocb_deferred_wakeup_common(rdp_gp, rdp, RCU_NOCB_WAKE, flags);
-}
-
-void rcu_nocb_flush_deferred_wakeup(void)
-{
-       do_nocb_deferred_wakeup(this_cpu_ptr(&rcu_data));
-}
-EXPORT_SYMBOL_GPL(rcu_nocb_flush_deferred_wakeup);
-
-static int rdp_offload_toggle(struct rcu_data *rdp,
-                              bool offload, unsigned long flags)
-       __releases(rdp->nocb_lock)
-{
-       struct rcu_segcblist *cblist = &rdp->cblist;
-       struct rcu_data *rdp_gp = rdp->nocb_gp_rdp;
-       bool wake_gp = false;
-
-       rcu_segcblist_offload(cblist, offload);
-
-       if (rdp->nocb_cb_sleep)
-               rdp->nocb_cb_sleep = false;
-       rcu_nocb_unlock_irqrestore(rdp, flags);
-
-       /*
-        * Ignore former value of nocb_cb_sleep and force wake up as it could
-        * have been spuriously set to false already.
-        */
-       swake_up_one(&rdp->nocb_cb_wq);
-
-       raw_spin_lock_irqsave(&rdp_gp->nocb_gp_lock, flags);
-       if (rdp_gp->nocb_gp_sleep) {
-               rdp_gp->nocb_gp_sleep = false;
-               wake_gp = true;
-       }
-       raw_spin_unlock_irqrestore(&rdp_gp->nocb_gp_lock, flags);
-
-       if (wake_gp)
-               wake_up_process(rdp_gp->nocb_gp_kthread);
-
-       return 0;
-}
-
-static long rcu_nocb_rdp_deoffload(void *arg)
-{
-       struct rcu_data *rdp = arg;
-       struct rcu_segcblist *cblist = &rdp->cblist;
-       unsigned long flags;
-       int ret;
-
-       WARN_ON_ONCE(rdp->cpu != raw_smp_processor_id());
-
-       pr_info("De-offloading %d\n", rdp->cpu);
-
-       rcu_nocb_lock_irqsave(rdp, flags);
-       /*
-        * Flush once and for all now. This suffices because we are
-        * running on the target CPU holding ->nocb_lock (thus having
-        * interrupts disabled), and because rdp_offload_toggle()
-        * invokes rcu_segcblist_offload(), which clears SEGCBLIST_OFFLOADED.
-        * Thus future calls to rcu_segcblist_completely_offloaded() will
-        * return false, which means that future calls to rcu_nocb_try_bypass()
-        * will refuse to put anything into the bypass.
-        */
-       WARN_ON_ONCE(!rcu_nocb_flush_bypass(rdp, NULL, jiffies));
-       ret = rdp_offload_toggle(rdp, false, flags);
-       swait_event_exclusive(rdp->nocb_state_wq,
-                             !rcu_segcblist_test_flags(cblist, SEGCBLIST_KTHREAD_CB |
-                                                       SEGCBLIST_KTHREAD_GP));
-       /*
-        * Lock one last time to acquire latest callback updates from kthreads
-        * so we can later handle callbacks locally without locking.
-        */
-       rcu_nocb_lock_irqsave(rdp, flags);
-       /*
-        * Theoretically we could set SEGCBLIST_SOFTIRQ_ONLY after the nocb
-        * lock is released but how about being paranoid for once?
-        */
-       rcu_segcblist_set_flags(cblist, SEGCBLIST_SOFTIRQ_ONLY);
-       /*
-        * With SEGCBLIST_SOFTIRQ_ONLY, we can't use
-        * rcu_nocb_unlock_irqrestore() anymore.
-        */
-       raw_spin_unlock_irqrestore(&rdp->nocb_lock, flags);
-
-       /* Sanity check */
-       WARN_ON_ONCE(rcu_cblist_n_cbs(&rdp->nocb_bypass));
-
-
-       return ret;
-}
-
-int rcu_nocb_cpu_deoffload(int cpu)
-{
-       struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
-       int ret = 0;
-
-       mutex_lock(&rcu_state.barrier_mutex);
-       cpus_read_lock();
-       if (rcu_rdp_is_offloaded(rdp)) {
-               if (cpu_online(cpu)) {
-                       ret = work_on_cpu(cpu, rcu_nocb_rdp_deoffload, rdp);
-                       if (!ret)
-                               cpumask_clear_cpu(cpu, rcu_nocb_mask);
-               } else {
-                       pr_info("NOCB: Can't CB-deoffload an offline CPU\n");
-                       ret = -EINVAL;
-               }
-       }
-       cpus_read_unlock();
-       mutex_unlock(&rcu_state.barrier_mutex);
-
-       return ret;
-}
-EXPORT_SYMBOL_GPL(rcu_nocb_cpu_deoffload);
-
-static long rcu_nocb_rdp_offload(void *arg)
-{
-       struct rcu_data *rdp = arg;
-       struct rcu_segcblist *cblist = &rdp->cblist;
-       unsigned long flags;
-       int ret;
-
-       WARN_ON_ONCE(rdp->cpu != raw_smp_processor_id());
-       /*
-        * For now we only support re-offload, ie: the rdp must have been
-        * offloaded on boot first.
-        */
-       if (!rdp->nocb_gp_rdp)
-               return -EINVAL;
-
-       pr_info("Offloading %d\n", rdp->cpu);
-       /*
-        * Can't use rcu_nocb_lock_irqsave() while we are in
-        * SEGCBLIST_SOFTIRQ_ONLY mode.
-        */
-       raw_spin_lock_irqsave(&rdp->nocb_lock, flags);
-
-       /*
-        * We didn't take the nocb lock while working on the
-        * rdp->cblist in SEGCBLIST_SOFTIRQ_ONLY mode.
-        * Every modifications that have been done previously on
-        * rdp->cblist must be visible remotely by the nocb kthreads
-        * upon wake up after reading the cblist flags.
-        *
-        * The layout against nocb_lock enforces that ordering:
-        *
-        *  __rcu_nocb_rdp_offload()   nocb_cb_wait()/nocb_gp_wait()
-        * -------------------------   ----------------------------
-        *      WRITE callbacks           rcu_nocb_lock()
-        *      rcu_nocb_lock()           READ flags
-        *      WRITE flags               READ callbacks
-        *      rcu_nocb_unlock()         rcu_nocb_unlock()
-        */
-       ret = rdp_offload_toggle(rdp, true, flags);
-       swait_event_exclusive(rdp->nocb_state_wq,
-                             rcu_segcblist_test_flags(cblist, SEGCBLIST_KTHREAD_CB) &&
-                             rcu_segcblist_test_flags(cblist, SEGCBLIST_KTHREAD_GP));
-
-       return ret;
-}
-
-int rcu_nocb_cpu_offload(int cpu)
-{
-       struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
-       int ret = 0;
-
-       mutex_lock(&rcu_state.barrier_mutex);
-       cpus_read_lock();
-       if (!rcu_rdp_is_offloaded(rdp)) {
-               if (cpu_online(cpu)) {
-                       ret = work_on_cpu(cpu, rcu_nocb_rdp_offload, rdp);
-                       if (!ret)
-                               cpumask_set_cpu(cpu, rcu_nocb_mask);
-               } else {
-                       pr_info("NOCB: Can't CB-offload an offline CPU\n");
-                       ret = -EINVAL;
-               }
-       }
-       cpus_read_unlock();
-       mutex_unlock(&rcu_state.barrier_mutex);
-
-       return ret;
-}
-EXPORT_SYMBOL_GPL(rcu_nocb_cpu_offload);
-
-void __init rcu_init_nohz(void)
-{
-       int cpu;
-       bool need_rcu_nocb_mask = false;
-       struct rcu_data *rdp;
-
-#if defined(CONFIG_NO_HZ_FULL)
-       if (tick_nohz_full_running && cpumask_weight(tick_nohz_full_mask))
-               need_rcu_nocb_mask = true;
-#endif /* #if defined(CONFIG_NO_HZ_FULL) */
-
-       if (!cpumask_available(rcu_nocb_mask) && need_rcu_nocb_mask) {
-               if (!zalloc_cpumask_var(&rcu_nocb_mask, GFP_KERNEL)) {
-                       pr_info("rcu_nocb_mask allocation failed, callback offloading disabled.\n");
-                       return;
-               }
-       }
-       if (!cpumask_available(rcu_nocb_mask))
-               return;
-
-#if defined(CONFIG_NO_HZ_FULL)
-       if (tick_nohz_full_running)
-               cpumask_or(rcu_nocb_mask, rcu_nocb_mask, tick_nohz_full_mask);
-#endif /* #if defined(CONFIG_NO_HZ_FULL) */
-
-       if (!cpumask_subset(rcu_nocb_mask, cpu_possible_mask)) {
-               pr_info("\tNote: kernel parameter 'rcu_nocbs=', 'nohz_full', or 'isolcpus=' contains nonexistent CPUs.\n");
-               cpumask_and(rcu_nocb_mask, cpu_possible_mask,
-                           rcu_nocb_mask);
-       }
-       if (cpumask_empty(rcu_nocb_mask))
-               pr_info("\tOffload RCU callbacks from CPUs: (none).\n");
-       else
-               pr_info("\tOffload RCU callbacks from CPUs: %*pbl.\n",
-                       cpumask_pr_args(rcu_nocb_mask));
-       if (rcu_nocb_poll)
-               pr_info("\tPoll for callbacks from no-CBs CPUs.\n");
-
-       for_each_cpu(cpu, rcu_nocb_mask) {
-               rdp = per_cpu_ptr(&rcu_data, cpu);
-               if (rcu_segcblist_empty(&rdp->cblist))
-                       rcu_segcblist_init(&rdp->cblist);
-               rcu_segcblist_offload(&rdp->cblist, true);
-               rcu_segcblist_set_flags(&rdp->cblist, SEGCBLIST_KTHREAD_CB);
-               rcu_segcblist_set_flags(&rdp->cblist, SEGCBLIST_KTHREAD_GP);
-       }
-       rcu_organize_nocb_kthreads();
-}
-
-/* Initialize per-rcu_data variables for no-CBs CPUs. */
-static void __init rcu_boot_init_nocb_percpu_data(struct rcu_data *rdp)
-{
-       init_swait_queue_head(&rdp->nocb_cb_wq);
-       init_swait_queue_head(&rdp->nocb_gp_wq);
-       init_swait_queue_head(&rdp->nocb_state_wq);
-       raw_spin_lock_init(&rdp->nocb_lock);
-       raw_spin_lock_init(&rdp->nocb_bypass_lock);
-       raw_spin_lock_init(&rdp->nocb_gp_lock);
-       timer_setup(&rdp->nocb_timer, do_nocb_deferred_wakeup_timer, 0);
-       rcu_cblist_init(&rdp->nocb_bypass);
-}
-
-/*
- * If the specified CPU is a no-CBs CPU that does not already have its
- * rcuo CB kthread, spawn it.  Additionally, if the rcuo GP kthread
- * for this CPU's group has not yet been created, spawn it as well.
- */
-static void rcu_spawn_one_nocb_kthread(int cpu)
-{
-       struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
-       struct rcu_data *rdp_gp;
-       struct task_struct *t;
-
-       /*
-        * If this isn't a no-CBs CPU or if it already has an rcuo kthread,
-        * then nothing to do.
-        */
-       if (!rcu_is_nocb_cpu(cpu) || rdp->nocb_cb_kthread)
-               return;
-
-       /* If we didn't spawn the GP kthread first, reorganize! */
-       rdp_gp = rdp->nocb_gp_rdp;
-       if (!rdp_gp->nocb_gp_kthread) {
-               t = kthread_run(rcu_nocb_gp_kthread, rdp_gp,
-                               "rcuog/%d", rdp_gp->cpu);
-               if (WARN_ONCE(IS_ERR(t), "%s: Could not start rcuo GP kthread, OOM is now expected behavior\n", __func__))
-                       return;
-               WRITE_ONCE(rdp_gp->nocb_gp_kthread, t);
-       }
-
-       /* Spawn the kthread for this CPU. */
-       t = kthread_run(rcu_nocb_cb_kthread, rdp,
-                       "rcuo%c/%d", rcu_state.abbr, cpu);
-       if (WARN_ONCE(IS_ERR(t), "%s: Could not start rcuo CB kthread, OOM is now expected behavior\n", __func__))
-               return;
-       WRITE_ONCE(rdp->nocb_cb_kthread, t);
-       WRITE_ONCE(rdp->nocb_gp_kthread, rdp_gp->nocb_gp_kthread);
-}
-
-/*
- * If the specified CPU is a no-CBs CPU that does not already have its
- * rcuo kthread, spawn it.
- */
-static void rcu_spawn_cpu_nocb_kthread(int cpu)
-{
-       if (rcu_scheduler_fully_active)
-               rcu_spawn_one_nocb_kthread(cpu);
-}
-
-/*
- * Once the scheduler is running, spawn rcuo kthreads for all online
- * no-CBs CPUs.  This assumes that the early_initcall()s happen before
- * non-boot CPUs come online -- if this changes, we will need to add
- * some mutual exclusion.
- */
-static void __init rcu_spawn_nocb_kthreads(void)
-{
-       int cpu;
-
-       for_each_online_cpu(cpu)
-               rcu_spawn_cpu_nocb_kthread(cpu);
-}
-
-/* How many CB CPU IDs per GP kthread?  Default of -1 for sqrt(nr_cpu_ids). */
-static int rcu_nocb_gp_stride = -1;
-module_param(rcu_nocb_gp_stride, int, 0444);
-
-/*
- * Initialize GP-CB relationships for all no-CBs CPU.
- */
-static void __init rcu_organize_nocb_kthreads(void)
-{
-       int cpu;
-       bool firsttime = true;
-       bool gotnocbs = false;
-       bool gotnocbscbs = true;
-       int ls = rcu_nocb_gp_stride;
-       int nl = 0;  /* Next GP kthread. */
-       struct rcu_data *rdp;
-       struct rcu_data *rdp_gp = NULL;  /* Suppress misguided gcc warn. */
-       struct rcu_data *rdp_prev = NULL;
-
-       if (!cpumask_available(rcu_nocb_mask))
-               return;
-       if (ls == -1) {
-               ls = nr_cpu_ids / int_sqrt(nr_cpu_ids);
-               rcu_nocb_gp_stride = ls;
-       }
-
-       /*
-        * Each pass through this loop sets up one rcu_data structure.
-        * Should the corresponding CPU come online in the future, then
-        * we will spawn the needed set of rcu_nocb_kthread() kthreads.
-        */
-       for_each_cpu(cpu, rcu_nocb_mask) {
-               rdp = per_cpu_ptr(&rcu_data, cpu);
-               if (rdp->cpu >= nl) {
-                       /* New GP kthread, set up for CBs & next GP. */
-                       gotnocbs = true;
-                       nl = DIV_ROUND_UP(rdp->cpu + 1, ls) * ls;
-                       rdp->nocb_gp_rdp = rdp;
-                       rdp_gp = rdp;
-                       if (dump_tree) {
-                               if (!firsttime)
-                                       pr_cont("%s\n", gotnocbscbs
-                                                       ? "" : " (self only)");
-                               gotnocbscbs = false;
-                               firsttime = false;
-                               pr_alert("%s: No-CB GP kthread CPU %d:",
-                                        __func__, cpu);
-                       }
-               } else {
-                       /* Another CB kthread, link to previous GP kthread. */
-                       gotnocbscbs = true;
-                       rdp->nocb_gp_rdp = rdp_gp;
-                       rdp_prev->nocb_next_cb_rdp = rdp;
-                       if (dump_tree)
-                               pr_cont(" %d", cpu);
-               }
-               rdp_prev = rdp;
-       }
-       if (gotnocbs && dump_tree)
-               pr_cont("%s\n", gotnocbscbs ? "" : " (self only)");
-}
-
-/*
- * Bind the current task to the offloaded CPUs.  If there are no offloaded
- * CPUs, leave the task unbound.  Splat if the bind attempt fails.
- */
-void rcu_bind_current_to_nocb(void)
-{
-       if (cpumask_available(rcu_nocb_mask) && cpumask_weight(rcu_nocb_mask))
-               WARN_ON(sched_setaffinity(current->pid, rcu_nocb_mask));
-}
-EXPORT_SYMBOL_GPL(rcu_bind_current_to_nocb);
-
-// The ->on_cpu field is available only in CONFIG_SMP=y, so...
-#ifdef CONFIG_SMP
-static char *show_rcu_should_be_on_cpu(struct task_struct *tsp)
-{
-       return tsp && task_is_running(tsp) && !tsp->on_cpu ? "!" : "";
-}
-#else // #ifdef CONFIG_SMP
-static char *show_rcu_should_be_on_cpu(struct task_struct *tsp)
-{
-       return "";
-}
-#endif // #else #ifdef CONFIG_SMP
-
-/*
- * Dump out nocb grace-period kthread state for the specified rcu_data
- * structure.
- */
-static void show_rcu_nocb_gp_state(struct rcu_data *rdp)
-{
-       struct rcu_node *rnp = rdp->mynode;
-
-       pr_info("nocb GP %d %c%c%c%c%c %c[%c%c] %c%c:%ld rnp %d:%d %lu %c CPU %d%s\n",
-               rdp->cpu,
-               "kK"[!!rdp->nocb_gp_kthread],
-               "lL"[raw_spin_is_locked(&rdp->nocb_gp_lock)],
-               "dD"[!!rdp->nocb_defer_wakeup],
-               "tT"[timer_pending(&rdp->nocb_timer)],
-               "sS"[!!rdp->nocb_gp_sleep],
-               ".W"[swait_active(&rdp->nocb_gp_wq)],
-               ".W"[swait_active(&rnp->nocb_gp_wq[0])],
-               ".W"[swait_active(&rnp->nocb_gp_wq[1])],
-               ".B"[!!rdp->nocb_gp_bypass],
-               ".G"[!!rdp->nocb_gp_gp],
-               (long)rdp->nocb_gp_seq,
-               rnp->grplo, rnp->grphi, READ_ONCE(rdp->nocb_gp_loops),
-               rdp->nocb_gp_kthread ? task_state_to_char(rdp->nocb_gp_kthread) : '.',
-               rdp->nocb_cb_kthread ? (int)task_cpu(rdp->nocb_gp_kthread) : -1,
-               show_rcu_should_be_on_cpu(rdp->nocb_cb_kthread));
-}
-
-/* Dump out nocb kthread state for the specified rcu_data structure. */
-static void show_rcu_nocb_state(struct rcu_data *rdp)
-{
-       char bufw[20];
-       char bufr[20];
-       struct rcu_segcblist *rsclp = &rdp->cblist;
-       bool waslocked;
-       bool wassleep;
-
-       if (rdp->nocb_gp_rdp == rdp)
-               show_rcu_nocb_gp_state(rdp);
-
-       sprintf(bufw, "%ld", rsclp->gp_seq[RCU_WAIT_TAIL]);
-       sprintf(bufr, "%ld", rsclp->gp_seq[RCU_NEXT_READY_TAIL]);
-       pr_info("   CB %d^%d->%d %c%c%c%c%c%c F%ld L%ld C%d %c%c%s%c%s%c%c q%ld %c CPU %d%s\n",
-               rdp->cpu, rdp->nocb_gp_rdp->cpu,
-               rdp->nocb_next_cb_rdp ? rdp->nocb_next_cb_rdp->cpu : -1,
-               "kK"[!!rdp->nocb_cb_kthread],
-               "bB"[raw_spin_is_locked(&rdp->nocb_bypass_lock)],
-               "cC"[!!atomic_read(&rdp->nocb_lock_contended)],
-               "lL"[raw_spin_is_locked(&rdp->nocb_lock)],
-               "sS"[!!rdp->nocb_cb_sleep],
-               ".W"[swait_active(&rdp->nocb_cb_wq)],
-               jiffies - rdp->nocb_bypass_first,
-               jiffies - rdp->nocb_nobypass_last,
-               rdp->nocb_nobypass_count,
-               ".D"[rcu_segcblist_ready_cbs(rsclp)],
-               ".W"[!rcu_segcblist_segempty(rsclp, RCU_WAIT_TAIL)],
-               rcu_segcblist_segempty(rsclp, RCU_WAIT_TAIL) ? "" : bufw,
-               ".R"[!rcu_segcblist_segempty(rsclp, RCU_NEXT_READY_TAIL)],
-               rcu_segcblist_segempty(rsclp, RCU_NEXT_READY_TAIL) ? "" : bufr,
-               ".N"[!rcu_segcblist_segempty(rsclp, RCU_NEXT_TAIL)],
-               ".B"[!!rcu_cblist_n_cbs(&rdp->nocb_bypass)],
-               rcu_segcblist_n_cbs(&rdp->cblist),
-               rdp->nocb_cb_kthread ? task_state_to_char(rdp->nocb_cb_kthread) : '.',
-               rdp->nocb_cb_kthread ? (int)task_cpu(rdp->nocb_gp_kthread) : -1,
-               show_rcu_should_be_on_cpu(rdp->nocb_cb_kthread));
-
-       /* It is OK for GP kthreads to have GP state. */
-       if (rdp->nocb_gp_rdp == rdp)
-               return;
-
-       waslocked = raw_spin_is_locked(&rdp->nocb_gp_lock);
-       wassleep = swait_active(&rdp->nocb_gp_wq);
-       if (!rdp->nocb_gp_sleep && !waslocked && !wassleep)
-               return;  /* Nothing untoward. */
-
-       pr_info("   nocb GP activity on CB-only CPU!!! %c%c%c %c\n",
-               "lL"[waslocked],
-               "dD"[!!rdp->nocb_defer_wakeup],
-               "sS"[!!rdp->nocb_gp_sleep],
-               ".W"[wassleep]);
-}
-
-#else /* #ifdef CONFIG_RCU_NOCB_CPU */
-
-/* No ->nocb_lock to acquire.  */
-static void rcu_nocb_lock(struct rcu_data *rdp)
-{
-}
-
-/* No ->nocb_lock to release.  */
-static void rcu_nocb_unlock(struct rcu_data *rdp)
-{
-}
-
-/* No ->nocb_lock to release.  */
-static void rcu_nocb_unlock_irqrestore(struct rcu_data *rdp,
-                                      unsigned long flags)
-{
-       local_irq_restore(flags);
-}
-
-/* Lockdep check that ->cblist may be safely accessed. */
-static void rcu_lockdep_assert_cblist_protected(struct rcu_data *rdp)
-{
-       lockdep_assert_irqs_disabled();
-}
-
-static void rcu_nocb_gp_cleanup(struct swait_queue_head *sq)
-{
-}
-
-static struct swait_queue_head *rcu_nocb_gp_get(struct rcu_node *rnp)
-{
-       return NULL;
-}
-
-static void rcu_init_one_nocb(struct rcu_node *rnp)
-{
-}
-
-static bool rcu_nocb_flush_bypass(struct rcu_data *rdp, struct rcu_head *rhp,
-                                 unsigned long j)
-{
-       return true;
-}
-
-static bool rcu_nocb_try_bypass(struct rcu_data *rdp, struct rcu_head *rhp,
-                               bool *was_alldone, unsigned long flags)
-{
-       return false;
-}
-
-static void __call_rcu_nocb_wake(struct rcu_data *rdp, bool was_empty,
-                                unsigned long flags)
-{
-       WARN_ON_ONCE(1);  /* Should be dead code! */
-}
-
-static void __init rcu_boot_init_nocb_percpu_data(struct rcu_data *rdp)
-{
-}
-
-static int rcu_nocb_need_deferred_wakeup(struct rcu_data *rdp, int level)
-{
-       return false;
-}
-
-static bool do_nocb_deferred_wakeup(struct rcu_data *rdp)
-{
-       return false;
-}
-
-static void rcu_spawn_cpu_nocb_kthread(int cpu)
-{
-}
-
-static void __init rcu_spawn_nocb_kthreads(void)
-{
-}
-
-static void show_rcu_nocb_state(struct rcu_data *rdp)
-{
-}
-
-#endif /* #else #ifdef CONFIG_RCU_NOCB_CPU */
-
 /*
  * Is this CPU a NO_HZ_FULL CPU that should ignore RCU so that the
  * grace-period kthread will do force_quiescent_state() processing?
@@ -2982,17 +1498,17 @@ static void noinstr rcu_dynticks_task_exit(void)
 /* Turn on heavyweight RCU tasks trace readers on idle/user entry. */
 static void rcu_dynticks_task_trace_enter(void)
 {
-#ifdef CONFIG_TASKS_RCU_TRACE
+#ifdef CONFIG_TASKS_TRACE_RCU
        if (IS_ENABLED(CONFIG_TASKS_TRACE_RCU_READ_MB))
                current->trc_reader_special.b.need_mb = true;
-#endif /* #ifdef CONFIG_TASKS_RCU_TRACE */
+#endif /* #ifdef CONFIG_TASKS_TRACE_RCU */
 }
 
 /* Turn off heavyweight RCU tasks trace readers on idle/user exit. */
 static void rcu_dynticks_task_trace_exit(void)
 {
-#ifdef CONFIG_TASKS_RCU_TRACE
+#ifdef CONFIG_TASKS_TRACE_RCU
        if (IS_ENABLED(CONFIG_TASKS_TRACE_RCU_READ_MB))
                current->trc_reader_special.b.need_mb = false;
-#endif /* #ifdef CONFIG_TASKS_RCU_TRACE */
+#endif /* #ifdef CONFIG_TASKS_TRACE_RCU */
 }
index 6c76988cc019f6b8db077c442804ebf450e84bf4..677ee3d8671bf02daf9d8712105f7f8611bcf15f 100644 (file)
@@ -7,6 +7,8 @@
  * Author: Paul E. McKenney <paulmck@linux.ibm.com>
  */
 
+#include <linux/kvm_para.h>
+
 //////////////////////////////////////////////////////////////////////////////
 //
 // Controlling CPU stall warnings, including delay calculation.
@@ -117,17 +119,14 @@ static void panic_on_rcu_stall(void)
 }
 
 /**
- * rcu_cpu_stall_reset - prevent further stall warnings in current grace period
- *
- * Set the stall-warning timeout way off into the future, thus preventing
- * any RCU CPU stall-warning messages from appearing in the current set of
- * RCU grace periods.
+ * rcu_cpu_stall_reset - restart stall-warning timeout for current grace period
  *
  * The caller must disable hard irqs.
  */
 void rcu_cpu_stall_reset(void)
 {
-       WRITE_ONCE(rcu_state.jiffies_stall, jiffies + ULONG_MAX / 2);
+       WRITE_ONCE(rcu_state.jiffies_stall,
+                  jiffies + rcu_jiffies_till_stall_check());
 }
 
 //////////////////////////////////////////////////////////////////////////////
@@ -267,8 +266,10 @@ static int rcu_print_task_stall(struct rcu_node *rnp, unsigned long flags)
        struct task_struct *ts[8];
 
        lockdep_assert_irqs_disabled();
-       if (!rcu_preempt_blocked_readers_cgp(rnp))
+       if (!rcu_preempt_blocked_readers_cgp(rnp)) {
+               raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
                return 0;
+       }
        pr_err("\tTasks blocked on level-%d rcu_node (CPUs %d-%d):",
               rnp->level, rnp->grplo, rnp->grphi);
        t = list_entry(rnp->gp_tasks->prev,
@@ -280,8 +281,8 @@ static int rcu_print_task_stall(struct rcu_node *rnp, unsigned long flags)
                        break;
        }
        raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
-       for (i--; i; i--) {
-               t = ts[i];
+       while (i) {
+               t = ts[--i];
                if (!try_invoke_on_locked_down_task(t, check_slow_task, &rscr))
                        pr_cont(" P%d", t->pid);
                else
@@ -350,7 +351,7 @@ static void rcu_dump_cpu_stacks(void)
 
 static void print_cpu_stall_fast_no_hz(char *cp, int cpu)
 {
-       struct rcu_data *rdp = &per_cpu(rcu_data, cpu);
+       struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
 
        sprintf(cp, "last_accelerate: %04lx/%04lx dyntick_enabled: %d",
                rdp->last_accelerate & 0xffff, jiffies & 0xffff,
@@ -464,9 +465,10 @@ static void rcu_check_gp_kthread_starvation(void)
                pr_err("%s kthread starved for %ld jiffies! g%ld f%#x %s(%d) ->state=%#x ->cpu=%d\n",
                       rcu_state.name, j,
                       (long)rcu_seq_current(&rcu_state.gp_seq),
-                      data_race(rcu_state.gp_flags),
-                      gp_state_getname(rcu_state.gp_state), rcu_state.gp_state,
-                      gpk ? gpk->__state : ~0, cpu);
+                      data_race(READ_ONCE(rcu_state.gp_flags)),
+                      gp_state_getname(rcu_state.gp_state),
+                      data_race(READ_ONCE(rcu_state.gp_state)),
+                      gpk ? data_race(READ_ONCE(gpk->__state)) : ~0, cpu);
                if (gpk) {
                        pr_err("\tUnless %s kthread gets sufficient CPU time, OOM is now expected behavior.\n", rcu_state.name);
                        pr_err("RCU grace-period kthread stack dump:\n");
@@ -509,7 +511,7 @@ static void rcu_check_gp_kthread_expired_fqs_timer(void)
                       (long)rcu_seq_current(&rcu_state.gp_seq),
                       data_race(rcu_state.gp_flags),
                       gp_state_getname(RCU_GP_WAIT_FQS), RCU_GP_WAIT_FQS,
-                      gpk->__state);
+                      data_race(READ_ONCE(gpk->__state)));
                pr_err("\tPossible timer handling issue on cpu=%d timer-softirq=%u\n",
                       cpu, kstat_softirqs_cpu(TIMER_SOFTIRQ, cpu));
        }
@@ -568,11 +570,11 @@ static void print_other_cpu_stall(unsigned long gp_seq, unsigned long gps)
                        pr_err("INFO: Stall ended before state dump start\n");
                } else {
                        j = jiffies;
-                       gpa = data_race(rcu_state.gp_activity);
+                       gpa = data_race(READ_ONCE(rcu_state.gp_activity));
                        pr_err("All QSes seen, last %s kthread activity %ld (%ld-%ld), jiffies_till_next_fqs=%ld, root ->qsmask %#lx\n",
                               rcu_state.name, j - gpa, j, gpa,
-                              data_race(jiffies_till_next_fqs),
-                              rcu_get_root()->qsmask);
+                              data_race(READ_ONCE(jiffies_till_next_fqs)),
+                              data_race(READ_ONCE(rcu_get_root()->qsmask)));
                }
        }
        /* Rewrite if needed in case of slow consoles. */
@@ -646,6 +648,7 @@ static void print_cpu_stall(unsigned long gps)
 
 static void check_cpu_stall(struct rcu_data *rdp)
 {
+       bool didstall = false;
        unsigned long gs1;
        unsigned long gs2;
        unsigned long gps;
@@ -691,24 +694,46 @@ static void check_cpu_stall(struct rcu_data *rdp)
            ULONG_CMP_GE(gps, js))
                return; /* No stall or GP completed since entering function. */
        rnp = rdp->mynode;
-       jn = jiffies + 3 * rcu_jiffies_till_stall_check() + 3;
+       jn = jiffies + ULONG_MAX / 2;
        if (rcu_gp_in_progress() &&
            (READ_ONCE(rnp->qsmask) & rdp->grpmask) &&
            cmpxchg(&rcu_state.jiffies_stall, js, jn) == js) {
 
+               /*
+                * If a virtual machine is stopped by the host it can look to
+                * the watchdog like an RCU stall. Check to see if the host
+                * stopped the vm.
+                */
+               if (kvm_check_and_clear_guest_paused())
+                       return;
+
                /* We haven't checked in, so go dump stack. */
                print_cpu_stall(gps);
                if (READ_ONCE(rcu_cpu_stall_ftrace_dump))
                        rcu_ftrace_dump(DUMP_ALL);
+               didstall = true;
 
        } else if (rcu_gp_in_progress() &&
                   ULONG_CMP_GE(j, js + RCU_STALL_RAT_DELAY) &&
                   cmpxchg(&rcu_state.jiffies_stall, js, jn) == js) {
 
+               /*
+                * If a virtual machine is stopped by the host it can look to
+                * the watchdog like an RCU stall. Check to see if the host
+                * stopped the vm.
+                */
+               if (kvm_check_and_clear_guest_paused())
+                       return;
+
                /* They had a few time units to dump stack, so complain. */
                print_other_cpu_stall(gs2, gps);
                if (READ_ONCE(rcu_cpu_stall_ftrace_dump))
                        rcu_ftrace_dump(DUMP_ALL);
+               didstall = true;
+       }
+       if (didstall && READ_ONCE(rcu_state.jiffies_stall) == jn) {
+               jn = jiffies + 3 * rcu_jiffies_till_stall_check() + 3;
+               WRITE_ONCE(rcu_state.jiffies_stall, jn);
        }
 }
 
@@ -742,7 +767,7 @@ bool rcu_check_boost_fail(unsigned long gp_state, int *cpup)
 
        rcu_for_each_leaf_node(rnp) {
                if (!cpup) {
-                       if (READ_ONCE(rnp->qsmask)) {
+                       if (data_race(READ_ONCE(rnp->qsmask))) {
                                return false;
                        } else {
                                if (READ_ONCE(rnp->gp_tasks))
@@ -791,32 +816,34 @@ void show_rcu_gp_kthreads(void)
        struct task_struct *t = READ_ONCE(rcu_state.gp_kthread);
 
        j = jiffies;
-       ja = j - data_race(rcu_state.gp_activity);
-       jr = j - data_race(rcu_state.gp_req_activity);
-       js = j - data_race(rcu_state.gp_start);
-       jw = j - data_race(rcu_state.gp_wake_time);
+       ja = j - data_race(READ_ONCE(rcu_state.gp_activity));
+       jr = j - data_race(READ_ONCE(rcu_state.gp_req_activity));
+       js = j - data_race(READ_ONCE(rcu_state.gp_start));
+       jw = j - data_race(READ_ONCE(rcu_state.gp_wake_time));
        pr_info("%s: wait state: %s(%d) ->state: %#x ->rt_priority %u delta ->gp_start %lu ->gp_activity %lu ->gp_req_activity %lu ->gp_wake_time %lu ->gp_wake_seq %ld ->gp_seq %ld ->gp_seq_needed %ld ->gp_max %lu ->gp_flags %#x\n",
                rcu_state.name, gp_state_getname(rcu_state.gp_state),
-               rcu_state.gp_state, t ? t->__state : 0x1ffff, t ? t->rt_priority : 0xffU,
-               js, ja, jr, jw, (long)data_race(rcu_state.gp_wake_seq),
-               (long)data_race(rcu_state.gp_seq),
-               (long)data_race(rcu_get_root()->gp_seq_needed),
-               data_race(rcu_state.gp_max),
-               data_race(rcu_state.gp_flags));
+               data_race(READ_ONCE(rcu_state.gp_state)),
+               t ? data_race(READ_ONCE(t->__state)) : 0x1ffff, t ? t->rt_priority : 0xffU,
+               js, ja, jr, jw, (long)data_race(READ_ONCE(rcu_state.gp_wake_seq)),
+               (long)data_race(READ_ONCE(rcu_state.gp_seq)),
+               (long)data_race(READ_ONCE(rcu_get_root()->gp_seq_needed)),
+               data_race(READ_ONCE(rcu_state.gp_max)),
+               data_race(READ_ONCE(rcu_state.gp_flags)));
        rcu_for_each_node_breadth_first(rnp) {
                if (ULONG_CMP_GE(READ_ONCE(rcu_state.gp_seq), READ_ONCE(rnp->gp_seq_needed)) &&
-                   !data_race(rnp->qsmask) && !data_race(rnp->boost_tasks) &&
-                   !data_race(rnp->exp_tasks) && !data_race(rnp->gp_tasks))
+                   !data_race(READ_ONCE(rnp->qsmask)) && !data_race(READ_ONCE(rnp->boost_tasks)) &&
+                   !data_race(READ_ONCE(rnp->exp_tasks)) && !data_race(READ_ONCE(rnp->gp_tasks)))
                        continue;
                pr_info("\trcu_node %d:%d ->gp_seq %ld ->gp_seq_needed %ld ->qsmask %#lx %c%c%c%c ->n_boosts %ld\n",
                        rnp->grplo, rnp->grphi,
-                       (long)data_race(rnp->gp_seq), (long)data_race(rnp->gp_seq_needed),
-                       data_race(rnp->qsmask),
-                       ".b"[!!data_race(rnp->boost_kthread_task)],
-                       ".B"[!!data_race(rnp->boost_tasks)],
-                       ".E"[!!data_race(rnp->exp_tasks)],
-                       ".G"[!!data_race(rnp->gp_tasks)],
-                       data_race(rnp->n_boosts));
+                       (long)data_race(READ_ONCE(rnp->gp_seq)),
+                       (long)data_race(READ_ONCE(rnp->gp_seq_needed)),
+                       data_race(READ_ONCE(rnp->qsmask)),
+                       ".b"[!!data_race(READ_ONCE(rnp->boost_kthread_task))],
+                       ".B"[!!data_race(READ_ONCE(rnp->boost_tasks))],
+                       ".E"[!!data_race(READ_ONCE(rnp->exp_tasks))],
+                       ".G"[!!data_race(READ_ONCE(rnp->gp_tasks))],
+                       data_race(READ_ONCE(rnp->n_boosts)));
                if (!rcu_is_leaf_node(rnp))
                        continue;
                for_each_leaf_node_possible_cpu(rnp, cpu) {
@@ -826,12 +853,12 @@ void show_rcu_gp_kthreads(void)
                                         READ_ONCE(rdp->gp_seq_needed)))
                                continue;
                        pr_info("\tcpu %d ->gp_seq_needed %ld\n",
-                               cpu, (long)data_race(rdp->gp_seq_needed));
+                               cpu, (long)data_race(READ_ONCE(rdp->gp_seq_needed)));
                }
        }
        for_each_possible_cpu(cpu) {
                rdp = per_cpu_ptr(&rcu_data, cpu);
-               cbs += data_race(rdp->n_cbs_invoked);
+               cbs += data_race(READ_ONCE(rdp->n_cbs_invoked));
                if (rcu_segcblist_is_offloaded(&rdp->cblist))
                        show_rcu_nocb_state(rdp);
        }
@@ -913,11 +940,11 @@ void rcu_fwd_progress_check(unsigned long j)
 
        if (rcu_gp_in_progress()) {
                pr_info("%s: GP age %lu jiffies\n",
-                       __func__, jiffies - rcu_state.gp_start);
+                       __func__, jiffies - data_race(READ_ONCE(rcu_state.gp_start)));
                show_rcu_gp_kthreads();
        } else {
                pr_info("%s: Last GP end %lu jiffies ago\n",
-                       __func__, jiffies - rcu_state.gp_end);
+                       __func__, jiffies - data_race(READ_ONCE(rcu_state.gp_end)));
                preempt_disable();
                rdp = this_cpu_ptr(&rcu_data);
                rcu_check_gp_start_stall(rdp->mynode, rdp, j);
index 29e8fc5d91a7bc457883cd9329d562d0d1eacbd3..64a08288b1a6d21a270fc4ff48f4fb7fc4ab66bc 100644 (file)
@@ -64,6 +64,7 @@ torture_param(bool, use_cpus_read_lock, 0, "Use cpus_read_lock() to exclude CPU
 torture_param(int, verbose, 0, "Enable verbose debugging printk()s");
 torture_param(int, weight_resched, -1, "Testing weight for resched_cpu() operations.");
 torture_param(int, weight_single, -1, "Testing weight for single-CPU no-wait operations.");
+torture_param(int, weight_single_rpc, -1, "Testing weight for single-CPU RPC operations.");
 torture_param(int, weight_single_wait, -1, "Testing weight for single-CPU operations.");
 torture_param(int, weight_many, -1, "Testing weight for multi-CPU no-wait operations.");
 torture_param(int, weight_many_wait, -1, "Testing weight for multi-CPU operations.");
@@ -86,6 +87,8 @@ struct scf_statistics {
        long long n_resched;
        long long n_single;
        long long n_single_ofl;
+       long long n_single_rpc;
+       long long n_single_rpc_ofl;
        long long n_single_wait;
        long long n_single_wait_ofl;
        long long n_many;
@@ -101,14 +104,17 @@ static DEFINE_PER_CPU(long long, scf_invoked_count);
 // Data for random primitive selection
 #define SCF_PRIM_RESCHED       0
 #define SCF_PRIM_SINGLE                1
-#define SCF_PRIM_MANY          2
-#define SCF_PRIM_ALL           3
-#define SCF_NPRIMS             7 // Need wait and no-wait versions of each,
-                                 //  except for SCF_PRIM_RESCHED.
+#define SCF_PRIM_SINGLE_RPC    2
+#define SCF_PRIM_MANY          3
+#define SCF_PRIM_ALL           4
+#define SCF_NPRIMS             8 // Need wait and no-wait versions of each,
+                                 //  except for SCF_PRIM_RESCHED and
+                                 //  SCF_PRIM_SINGLE_RPC.
 
 static char *scf_prim_name[] = {
        "resched_cpu",
        "smp_call_function_single",
+       "smp_call_function_single_rpc",
        "smp_call_function_many",
        "smp_call_function",
 };
@@ -128,6 +134,8 @@ struct scf_check {
        bool scfc_out;
        int scfc_cpu; // -1 for not _single().
        bool scfc_wait;
+       bool scfc_rpc;
+       struct completion scfc_completion;
 };
 
 // Use to wait for all threads to start.
@@ -158,6 +166,7 @@ static void scf_torture_stats_print(void)
                scfs.n_resched += scf_stats_p[i].n_resched;
                scfs.n_single += scf_stats_p[i].n_single;
                scfs.n_single_ofl += scf_stats_p[i].n_single_ofl;
+               scfs.n_single_rpc += scf_stats_p[i].n_single_rpc;
                scfs.n_single_wait += scf_stats_p[i].n_single_wait;
                scfs.n_single_wait_ofl += scf_stats_p[i].n_single_wait_ofl;
                scfs.n_many += scf_stats_p[i].n_many;
@@ -168,9 +177,10 @@ static void scf_torture_stats_print(void)
        if (atomic_read(&n_errs) || atomic_read(&n_mb_in_errs) ||
            atomic_read(&n_mb_out_errs) || atomic_read(&n_alloc_errs))
                bangstr = "!!! ";
-       pr_alert("%s %sscf_invoked_count %s: %lld resched: %lld single: %lld/%lld single_ofl: %lld/%lld many: %lld/%lld all: %lld/%lld ",
+       pr_alert("%s %sscf_invoked_count %s: %lld resched: %lld single: %lld/%lld single_ofl: %lld/%lld single_rpc: %lld single_rpc_ofl: %lld many: %lld/%lld all: %lld/%lld ",
                 SCFTORT_FLAG, bangstr, isdone ? "VER" : "ver", invoked_count, scfs.n_resched,
                 scfs.n_single, scfs.n_single_wait, scfs.n_single_ofl, scfs.n_single_wait_ofl,
+                scfs.n_single_rpc, scfs.n_single_rpc_ofl,
                 scfs.n_many, scfs.n_many_wait, scfs.n_all, scfs.n_all_wait);
        torture_onoff_stats();
        pr_cont("ste: %d stnmie: %d stnmoe: %d staf: %d\n", atomic_read(&n_errs),
@@ -282,10 +292,13 @@ static void scf_handler(void *scfc_in)
 out:
        if (unlikely(!scfcp))
                return;
-       if (scfcp->scfc_wait)
+       if (scfcp->scfc_wait) {
                WRITE_ONCE(scfcp->scfc_out, true);
-       else
+               if (scfcp->scfc_rpc)
+                       complete(&scfcp->scfc_completion);
+       } else {
                kfree(scfcp);
+       }
 }
 
 // As above, but check for correct CPU.
@@ -319,6 +332,7 @@ static void scftorture_invoke_one(struct scf_statistics *scfp, struct torture_ra
                        scfcp->scfc_cpu = -1;
                        scfcp->scfc_wait = scfsp->scfs_wait;
                        scfcp->scfc_out = false;
+                       scfcp->scfc_rpc = false;
                }
        }
        switch (scfsp->scfs_prim) {
@@ -350,6 +364,34 @@ static void scftorture_invoke_one(struct scf_statistics *scfp, struct torture_ra
                        scfcp = NULL;
                }
                break;
+       case SCF_PRIM_SINGLE_RPC:
+               if (!scfcp)
+                       break;
+               cpu = torture_random(trsp) % nr_cpu_ids;
+               scfp->n_single_rpc++;
+               scfcp->scfc_cpu = cpu;
+               scfcp->scfc_wait = true;
+               init_completion(&scfcp->scfc_completion);
+               scfcp->scfc_rpc = true;
+               barrier(); // Prevent race-reduction compiler optimizations.
+               scfcp->scfc_in = true;
+               ret = smp_call_function_single(cpu, scf_handler_1, (void *)scfcp, 0);
+               if (!ret) {
+                       if (use_cpus_read_lock)
+                               cpus_read_unlock();
+                       else
+                               preempt_enable();
+                       wait_for_completion(&scfcp->scfc_completion);
+                       if (use_cpus_read_lock)
+                               cpus_read_lock();
+                       else
+                               preempt_disable();
+               } else {
+                       scfp->n_single_rpc_ofl++;
+                       kfree(scfcp);
+                       scfcp = NULL;
+               }
+               break;
        case SCF_PRIM_MANY:
                if (scfsp->scfs_wait)
                        scfp->n_many_wait++;
@@ -379,10 +421,12 @@ static void scftorture_invoke_one(struct scf_statistics *scfp, struct torture_ra
        }
        if (scfcp && scfsp->scfs_wait) {
                if (WARN_ON_ONCE((num_online_cpus() > 1 || scfsp->scfs_prim == SCF_PRIM_SINGLE) &&
-                                !scfcp->scfc_out))
+                                !scfcp->scfc_out)) {
+                       pr_warn("%s: Memory-ordering failure, scfs_prim: %d.\n", __func__, scfsp->scfs_prim);
                        atomic_inc(&n_mb_out_errs); // Leak rather than trash!
-               else
+               } else {
                        kfree(scfcp);
+               }
                barrier(); // Prevent race-reduction compiler optimizations.
        }
        if (use_cpus_read_lock)
@@ -453,8 +497,8 @@ static void
 scftorture_print_module_parms(const char *tag)
 {
        pr_alert(SCFTORT_FLAG
-                "--- %s:  verbose=%d holdoff=%d longwait=%d nthreads=%d onoff_holdoff=%d onoff_interval=%d shutdown_secs=%d stat_interval=%d stutter=%d use_cpus_read_lock=%d, weight_resched=%d, weight_single=%d, weight_single_wait=%d, weight_many=%d, weight_many_wait=%d, weight_all=%d, weight_all_wait=%d\n", tag,
-                verbose, holdoff, longwait, nthreads, onoff_holdoff, onoff_interval, shutdown, stat_interval, stutter, use_cpus_read_lock, weight_resched, weight_single, weight_single_wait, weight_many, weight_many_wait, weight_all, weight_all_wait);
+                "--- %s:  verbose=%d holdoff=%d longwait=%d nthreads=%d onoff_holdoff=%d onoff_interval=%d shutdown_secs=%d stat_interval=%d stutter=%d use_cpus_read_lock=%d, weight_resched=%d, weight_single=%d, weight_single_rpc=%d, weight_single_wait=%d, weight_many=%d, weight_many_wait=%d, weight_all=%d, weight_all_wait=%d\n", tag,
+                verbose, holdoff, longwait, nthreads, onoff_holdoff, onoff_interval, shutdown, stat_interval, stutter, use_cpus_read_lock, weight_resched, weight_single, weight_single_rpc, weight_single_wait, weight_many, weight_many_wait, weight_all, weight_all_wait);
 }
 
 static void scf_cleanup_handler(void *unused)
@@ -469,7 +513,7 @@ static void scf_torture_cleanup(void)
                return;
 
        WRITE_ONCE(scfdone, true);
-       if (nthreads)
+       if (nthreads && scf_stats_p)
                for (i = 0; i < nthreads; i++)
                        torture_stop_kthread("scftorture_invoker", scf_stats_p[i].task);
        else
@@ -497,6 +541,7 @@ static int __init scf_torture_init(void)
        int firsterr = 0;
        unsigned long weight_resched1 = weight_resched;
        unsigned long weight_single1 = weight_single;
+       unsigned long weight_single_rpc1 = weight_single_rpc;
        unsigned long weight_single_wait1 = weight_single_wait;
        unsigned long weight_many1 = weight_many;
        unsigned long weight_many_wait1 = weight_many_wait;
@@ -508,11 +553,13 @@ static int __init scf_torture_init(void)
 
        scftorture_print_module_parms("Start of test");
 
-       if (weight_resched == -1 && weight_single == -1 && weight_single_wait == -1 &&
+       if (weight_resched == -1 &&
+           weight_single == -1 && weight_single_rpc == -1 && weight_single_wait == -1 &&
            weight_many == -1 && weight_many_wait == -1 &&
            weight_all == -1 && weight_all_wait == -1) {
                weight_resched1 = 2 * nr_cpu_ids;
                weight_single1 = 2 * nr_cpu_ids;
+               weight_single_rpc1 = 2 * nr_cpu_ids;
                weight_single_wait1 = 2 * nr_cpu_ids;
                weight_many1 = 2;
                weight_many_wait1 = 2;
@@ -523,6 +570,8 @@ static int __init scf_torture_init(void)
                        weight_resched1 = 0;
                if (weight_single == -1)
                        weight_single1 = 0;
+               if (weight_single_rpc == -1)
+                       weight_single_rpc1 = 0;
                if (weight_single_wait == -1)
                        weight_single_wait1 = 0;
                if (weight_many == -1)
@@ -534,7 +583,7 @@ static int __init scf_torture_init(void)
                if (weight_all_wait == -1)
                        weight_all_wait1 = 0;
        }
-       if (weight_single1 == 0 && weight_single_wait1 == 0 &&
+       if (weight_single1 == 0 && weight_single_rpc1 == 0 && weight_single_wait1 == 0 &&
            weight_many1 == 0 && weight_many_wait1 == 0 &&
            weight_all1 == 0 && weight_all_wait1 == 0) {
                VERBOSE_SCFTORTOUT_ERRSTRING("all zero weights makes no sense");
@@ -546,6 +595,7 @@ static int __init scf_torture_init(void)
        else if (weight_resched1)
                VERBOSE_SCFTORTOUT_ERRSTRING("built as module, weight_resched ignored");
        scf_sel_add(weight_single1, SCF_PRIM_SINGLE, false);
+       scf_sel_add(weight_single_rpc1, SCF_PRIM_SINGLE_RPC, true);
        scf_sel_add(weight_single_wait1, SCF_PRIM_SINGLE, true);
        scf_sel_add(weight_many1, SCF_PRIM_MANY, false);
        scf_sel_add(weight_many_wait1, SCF_PRIM_MANY, true);
index 20ffcc04413449d6eedc709e1dc8096f50a1cc65..2b9ed1172533e2c8a4d3e2063db3f6c2efa06c1e 100644 (file)
@@ -237,9 +237,30 @@ static DEFINE_MUTEX(sched_core_mutex);
 static atomic_t sched_core_count;
 static struct cpumask sched_core_mask;
 
+static void sched_core_lock(int cpu, unsigned long *flags)
+{
+       const struct cpumask *smt_mask = cpu_smt_mask(cpu);
+       int t, i = 0;
+
+       local_irq_save(*flags);
+       for_each_cpu(t, smt_mask)
+               raw_spin_lock_nested(&cpu_rq(t)->__lock, i++);
+}
+
+static void sched_core_unlock(int cpu, unsigned long *flags)
+{
+       const struct cpumask *smt_mask = cpu_smt_mask(cpu);
+       int t;
+
+       for_each_cpu(t, smt_mask)
+               raw_spin_unlock(&cpu_rq(t)->__lock);
+       local_irq_restore(*flags);
+}
+
 static void __sched_core_flip(bool enabled)
 {
-       int cpu, t, i;
+       unsigned long flags;
+       int cpu, t;
 
        cpus_read_lock();
 
@@ -250,19 +271,12 @@ static void __sched_core_flip(bool enabled)
        for_each_cpu(cpu, &sched_core_mask) {
                const struct cpumask *smt_mask = cpu_smt_mask(cpu);
 
-               i = 0;
-               local_irq_disable();
-               for_each_cpu(t, smt_mask) {
-                       /* supports up to SMT8 */
-                       raw_spin_lock_nested(&cpu_rq(t)->__lock, i++);
-               }
+               sched_core_lock(cpu, &flags);
 
                for_each_cpu(t, smt_mask)
                        cpu_rq(t)->core_enabled = enabled;
 
-               for_each_cpu(t, smt_mask)
-                       raw_spin_unlock(&cpu_rq(t)->__lock);
-               local_irq_enable();
+               sched_core_unlock(cpu, &flags);
 
                cpumask_andnot(&sched_core_mask, &sched_core_mask, smt_mask);
        }
@@ -5736,35 +5750,109 @@ void queue_core_balance(struct rq *rq)
        queue_balance_callback(rq, &per_cpu(core_balance_head, rq->cpu), sched_core_balance);
 }
 
-static inline void sched_core_cpu_starting(unsigned int cpu)
+static void sched_core_cpu_starting(unsigned int cpu)
 {
        const struct cpumask *smt_mask = cpu_smt_mask(cpu);
-       struct rq *rq, *core_rq = NULL;
-       int i;
+       struct rq *rq = cpu_rq(cpu), *core_rq = NULL;
+       unsigned long flags;
+       int t;
+
+       sched_core_lock(cpu, &flags);
+
+       WARN_ON_ONCE(rq->core != rq);
 
-       core_rq = cpu_rq(cpu)->core;
+       /* if we're the first, we'll be our own leader */
+       if (cpumask_weight(smt_mask) == 1)
+               goto unlock;
 
-       if (!core_rq) {
-               for_each_cpu(i, smt_mask) {
-                       rq = cpu_rq(i);
-                       if (rq->core && rq->core == rq)
-                               core_rq = rq;
+       /* find the leader */
+       for_each_cpu(t, smt_mask) {
+               if (t == cpu)
+                       continue;
+               rq = cpu_rq(t);
+               if (rq->core == rq) {
+                       core_rq = rq;
+                       break;
                }
+       }
 
-               if (!core_rq)
-                       core_rq = cpu_rq(cpu);
+       if (WARN_ON_ONCE(!core_rq)) /* whoopsie */
+               goto unlock;
 
-               for_each_cpu(i, smt_mask) {
-                       rq = cpu_rq(i);
+       /* install and validate core_rq */
+       for_each_cpu(t, smt_mask) {
+               rq = cpu_rq(t);
 
-                       WARN_ON_ONCE(rq->core && rq->core != core_rq);
+               if (t == cpu)
                        rq->core = core_rq;
-               }
+
+               WARN_ON_ONCE(rq->core != core_rq);
        }
+
+unlock:
+       sched_core_unlock(cpu, &flags);
 }
+
+static void sched_core_cpu_deactivate(unsigned int cpu)
+{
+       const struct cpumask *smt_mask = cpu_smt_mask(cpu);
+       struct rq *rq = cpu_rq(cpu), *core_rq = NULL;
+       unsigned long flags;
+       int t;
+
+       sched_core_lock(cpu, &flags);
+
+       /* if we're the last man standing, nothing to do */
+       if (cpumask_weight(smt_mask) == 1) {
+               WARN_ON_ONCE(rq->core != rq);
+               goto unlock;
+       }
+
+       /* if we're not the leader, nothing to do */
+       if (rq->core != rq)
+               goto unlock;
+
+       /* find a new leader */
+       for_each_cpu(t, smt_mask) {
+               if (t == cpu)
+                       continue;
+               core_rq = cpu_rq(t);
+               break;
+       }
+
+       if (WARN_ON_ONCE(!core_rq)) /* impossible */
+               goto unlock;
+
+       /* copy the shared state to the new leader */
+       core_rq->core_task_seq      = rq->core_task_seq;
+       core_rq->core_pick_seq      = rq->core_pick_seq;
+       core_rq->core_cookie        = rq->core_cookie;
+       core_rq->core_forceidle     = rq->core_forceidle;
+       core_rq->core_forceidle_seq = rq->core_forceidle_seq;
+
+       /* install new leader */
+       for_each_cpu(t, smt_mask) {
+               rq = cpu_rq(t);
+               rq->core = core_rq;
+       }
+
+unlock:
+       sched_core_unlock(cpu, &flags);
+}
+
+static inline void sched_core_cpu_dying(unsigned int cpu)
+{
+       struct rq *rq = cpu_rq(cpu);
+
+       if (rq->core != rq)
+               rq->core = rq;
+}
+
 #else /* !CONFIG_SCHED_CORE */
 
 static inline void sched_core_cpu_starting(unsigned int cpu) {}
+static inline void sched_core_cpu_deactivate(unsigned int cpu) {}
+static inline void sched_core_cpu_dying(unsigned int cpu) {}
 
 static struct task_struct *
 pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
@@ -7761,6 +7849,17 @@ int __sched __cond_resched(void)
                preempt_schedule_common();
                return 1;
        }
+       /*
+        * In preemptible kernels, ->rcu_read_lock_nesting tells the tick
+        * whether the current CPU is in an RCU read-side critical section,
+        * so the tick can report quiescent states even for CPUs looping
+        * in kernel context.  In contrast, in non-preemptible kernels,
+        * RCU readers leave no in-memory hints, which means that CPU-bound
+        * processes executing in kernel context might never report an
+        * RCU quiescent state.  Therefore, the following code causes
+        * cond_resched() to report a quiescent state, but only when RCU
+        * is in urgent need of one.
+        */
 #ifndef CONFIG_PREEMPT_RCU
        rcu_all_qs();
 #endif
@@ -8707,6 +8806,8 @@ int sched_cpu_deactivate(unsigned int cpu)
         */
        if (cpumask_weight(cpu_smt_mask(cpu)) == 2)
                static_branch_dec_cpuslocked(&sched_smt_present);
+
+       sched_core_cpu_deactivate(cpu);
 #endif
 
        if (!sched_smp_initialized)
@@ -8811,6 +8912,7 @@ int sched_cpu_dying(unsigned int cpu)
        calc_load_migrate(rq);
        update_max_interval();
        hrtick_clear(rq);
+       sched_core_cpu_dying(cpu);
        return 0;
 }
 #endif
@@ -9022,7 +9124,7 @@ void __init sched_init(void)
                atomic_set(&rq->nr_iowait, 0);
 
 #ifdef CONFIG_SCHED_CORE
-               rq->core = NULL;
+               rq->core = rq;
                rq->core_pick = NULL;
                rq->core_enabled = 0;
                rq->core_tree = RB_ROOT;
index 14a41a243f7baf308cfbda57c3d3a8c6b3e0d9a3..ddefb0419d7ae476b7fe6552366d657e2df4d354 100644 (file)
@@ -1093,7 +1093,7 @@ struct rq {
        unsigned int            core_sched_seq;
        struct rb_root          core_tree;
 
-       /* shared state */
+       /* shared state -- careful with sched_core_cpu_deactivate() */
        unsigned int            core_task_seq;
        unsigned int            core_pick_seq;
        unsigned long           core_cookie;
@@ -2255,6 +2255,9 @@ static inline struct task_struct *get_push_task(struct rq *rq)
        if (p->nr_cpus_allowed == 1)
                return NULL;
 
+       if (p->migration_disabled)
+               return NULL;
+
        rq->push_busy = true;
        return get_task_struct(p);
 }
index 057e17f3215d5633fc18df103fae36702a63bba4..6469eca8078ca01eb3e0407d741964a25e88d2d1 100644 (file)
@@ -602,7 +602,7 @@ static inline void seccomp_sync_threads(unsigned long flags)
                smp_store_release(&thread->seccomp.filter,
                                  caller->seccomp.filter);
                atomic_set(&thread->seccomp.filter_count,
-                          atomic_read(&thread->seccomp.filter_count));
+                          atomic_read(&caller->seccomp.filter_count));
 
                /*
                 * Don't let an unprivileged task work around
index 0a315c387bedb25a630a61577d4e7b01471d1466..bb8f411c974b8d51297708f51dedc41c46c11757 100644 (file)
@@ -521,11 +521,11 @@ static void torture_shuffle_tasks(void)
        struct shuffle_task *stp;
 
        cpumask_setall(shuffle_tmp_mask);
-       get_online_cpus();
+       cpus_read_lock();
 
        /* No point in shuffling if there is only one online CPU (ex: UP) */
        if (num_online_cpus() == 1) {
-               put_online_cpus();
+               cpus_read_unlock();
                return;
        }
 
@@ -541,7 +541,7 @@ static void torture_shuffle_tasks(void)
                set_cpus_allowed_ptr(stp->st_t, shuffle_tmp_mask);
        mutex_unlock(&shuffle_task_mutex);
 
-       put_online_cpus();
+       cpus_read_unlock();
 }
 
 /* Shuffle tasks across CPUs, with the intent of allowing each CPU in the
index d567b1717c4c85adaad9cda48dd6aa173816a51a..3ee23f4d437fed538729362502ae3a219700869a 100644 (file)
@@ -219,6 +219,11 @@ config DYNAMIC_FTRACE_WITH_DIRECT_CALLS
        depends on DYNAMIC_FTRACE_WITH_REGS
        depends on HAVE_DYNAMIC_FTRACE_WITH_DIRECT_CALLS
 
+config DYNAMIC_FTRACE_WITH_ARGS
+       def_bool y
+       depends on DYNAMIC_FTRACE
+       depends on HAVE_DYNAMIC_FTRACE_WITH_ARGS
+
 config FUNCTION_PROFILER
        bool "Kernel function profiler"
        depends on FUNCTION_TRACER
index b4916ef388ad1fbdd476e04de9e6e45c0e3e369b..fdd14072fc3bd4ad4bea4afa10cd73dd44b8de72 100644 (file)
@@ -990,28 +990,29 @@ bpf_tracing_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
                return &bpf_get_numa_node_id_proto;
        case BPF_FUNC_perf_event_read:
                return &bpf_perf_event_read_proto;
-       case BPF_FUNC_probe_write_user:
-               return bpf_get_probe_write_proto();
        case BPF_FUNC_current_task_under_cgroup:
                return &bpf_current_task_under_cgroup_proto;
        case BPF_FUNC_get_prandom_u32:
                return &bpf_get_prandom_u32_proto;
+       case BPF_FUNC_probe_write_user:
+               return security_locked_down(LOCKDOWN_BPF_WRITE_USER) < 0 ?
+                      NULL : bpf_get_probe_write_proto();
        case BPF_FUNC_probe_read_user:
                return &bpf_probe_read_user_proto;
        case BPF_FUNC_probe_read_kernel:
-               return security_locked_down(LOCKDOWN_BPF_READ) < 0 ?
+               return security_locked_down(LOCKDOWN_BPF_READ_KERNEL) < 0 ?
                       NULL : &bpf_probe_read_kernel_proto;
        case BPF_FUNC_probe_read_user_str:
                return &bpf_probe_read_user_str_proto;
        case BPF_FUNC_probe_read_kernel_str:
-               return security_locked_down(LOCKDOWN_BPF_READ) < 0 ?
+               return security_locked_down(LOCKDOWN_BPF_READ_KERNEL) < 0 ?
                       NULL : &bpf_probe_read_kernel_str_proto;
 #ifdef CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE
        case BPF_FUNC_probe_read:
-               return security_locked_down(LOCKDOWN_BPF_READ) < 0 ?
+               return security_locked_down(LOCKDOWN_BPF_READ_KERNEL) < 0 ?
                       NULL : &bpf_probe_read_compat_proto;
        case BPF_FUNC_probe_read_str:
-               return security_locked_down(LOCKDOWN_BPF_READ) < 0 ?
+               return security_locked_down(LOCKDOWN_BPF_READ_KERNEL) < 0 ?
                       NULL : &bpf_probe_read_compat_str_proto;
 #endif
 #ifdef CONFIG_CGROUPS
index 7b180f61e6d3ccfa736a925a6cf9731f58be25f8..7efbc8aaf7f647dfc32bad4f98533d5ab2cd3273 100644 (file)
@@ -3100,6 +3100,7 @@ ops_references_rec(struct ftrace_ops *ops, struct dyn_ftrace *rec)
 
 static int ftrace_update_code(struct module *mod, struct ftrace_page *new_pgs)
 {
+       bool init_nop = ftrace_need_init_nop();
        struct ftrace_page *pg;
        struct dyn_ftrace *p;
        u64 start, stop;
@@ -3138,8 +3139,7 @@ static int ftrace_update_code(struct module *mod, struct ftrace_page *new_pgs)
                         * Do the initial record conversion from mcount jump
                         * to the NOP instructions.
                         */
-                       if (!__is_defined(CC_USING_NOP_MCOUNT) &&
-                           !ftrace_nop_initialize(mod, p))
+                       if (init_nop && !ftrace_nop_initialize(mod, p))
                                break;
 
                        update_cnt++;
index 33899a71fdc19648ad099dc307c10f4d990d0682..a1adb29ef5c1802916fb84f45ba34b8a48549d02 100644 (file)
@@ -2897,14 +2897,26 @@ int tracepoint_printk_sysctl(struct ctl_table *table, int write,
 
 void trace_event_buffer_commit(struct trace_event_buffer *fbuffer)
 {
+       enum event_trigger_type tt = ETT_NONE;
+       struct trace_event_file *file = fbuffer->trace_file;
+
+       if (__event_trigger_test_discard(file, fbuffer->buffer, fbuffer->event,
+                       fbuffer->entry, &tt))
+               goto discard;
+
        if (static_key_false(&tracepoint_printk_key.key))
                output_printk(fbuffer);
 
        if (static_branch_unlikely(&trace_event_exports_enabled))
                ftrace_exports(fbuffer->event, TRACE_EXPORT_EVENT);
-       event_trigger_unlock_commit_regs(fbuffer->trace_file, fbuffer->buffer,
-                                   fbuffer->event, fbuffer->entry,
-                                   fbuffer->trace_ctx, fbuffer->regs);
+
+       trace_buffer_unlock_commit_regs(file->tr, fbuffer->buffer,
+                       fbuffer->event, fbuffer->trace_ctx, fbuffer->regs);
+
+discard:
+       if (tt)
+               event_triggers_post_call(file, tt);
+
 }
 EXPORT_SYMBOL_GPL(trace_event_buffer_commit);
 
index a180abf76d4e1a3291e0c53dd064773a711e9fee..4a0e693000c6ccf667f481bd52ff6e1a1095ed54 100644 (file)
@@ -1389,38 +1389,6 @@ event_trigger_unlock_commit(struct trace_event_file *file,
                event_triggers_post_call(file, tt);
 }
 
-/**
- * event_trigger_unlock_commit_regs - handle triggers and finish event commit
- * @file: The file pointer associated with the event
- * @buffer: The ring buffer that the event is being written to
- * @event: The event meta data in the ring buffer
- * @entry: The event itself
- * @trace_ctx: The tracing context flags.
- *
- * This is a helper function to handle triggers that require data
- * from the event itself. It also tests the event against filters and
- * if the event is soft disabled and should be discarded.
- *
- * Same as event_trigger_unlock_commit() but calls
- * trace_buffer_unlock_commit_regs() instead of trace_buffer_unlock_commit().
- */
-static inline void
-event_trigger_unlock_commit_regs(struct trace_event_file *file,
-                                struct trace_buffer *buffer,
-                                struct ring_buffer_event *event,
-                                void *entry, unsigned int trace_ctx,
-                                struct pt_regs *regs)
-{
-       enum event_trigger_type tt = ETT_NONE;
-
-       if (!__event_trigger_test_discard(file, buffer, event, entry, &tt))
-               trace_buffer_unlock_commit_regs(file->tr, buffer, event,
-                                               trace_ctx, regs);
-
-       if (tt)
-               event_triggers_post_call(file, tt);
-}
-
 #define FILTER_PRED_INVALID    ((unsigned short)-1)
 #define FILTER_PRED_IS_RIGHT   (1 << 15)
 #define FILTER_PRED_FOLD       (1 << 15)
index 949ef09dc53799af3fcc9a176ca5966b1381e636..a48aa2a2875b5044c493c2b18a46abc3a3acd239 100644 (file)
@@ -3430,6 +3430,8 @@ trace_action_create_field_var(struct hist_trigger_data *hist_data,
                        event = data->match_data.event;
                }
 
+               if (!event)
+                       goto free;
                /*
                 * At this point, we're looking at a field on another
                 * event.  Because we can't modify a hist trigger on
index a7e3c24dee13f03f576a3eb5445ab1849c7288a6..b61eefe5ccf53134044821f09940cd58cc96b8de 100644 (file)
@@ -253,10 +253,40 @@ static struct osnoise_data {
  */
 static bool osnoise_busy;
 
+#ifdef CONFIG_PREEMPT_RT
 /*
  * Print the osnoise header info.
  */
 static void print_osnoise_headers(struct seq_file *s)
+{
+       if (osnoise_data.tainted)
+               seq_puts(s, "# osnoise is tainted!\n");
+
+       seq_puts(s, "#                                _-------=> irqs-off\n");
+       seq_puts(s, "#                               / _------=> need-resched\n");
+       seq_puts(s, "#                              | / _-----=> need-resched-lazy\n");
+       seq_puts(s, "#                              || / _----=> hardirq/softirq\n");
+       seq_puts(s, "#                              ||| / _---=> preempt-depth\n");
+       seq_puts(s, "#                              |||| / _--=> preempt-lazy-depth\n");
+       seq_puts(s, "#                              ||||| / _-=> migrate-disable\n");
+
+       seq_puts(s, "#                              |||||| /          ");
+       seq_puts(s, "                                     MAX\n");
+
+       seq_puts(s, "#                              ||||| /                         ");
+       seq_puts(s, "                    SINGLE      Interference counters:\n");
+
+       seq_puts(s, "#                              |||||||               RUNTIME   ");
+       seq_puts(s, "   NOISE  %% OF CPU  NOISE    +-----------------------------+\n");
+
+       seq_puts(s, "#           TASK-PID      CPU# |||||||   TIMESTAMP    IN US    ");
+       seq_puts(s, "   IN US  AVAILABLE  IN US     HW    NMI    IRQ   SIRQ THREAD\n");
+
+       seq_puts(s, "#              | |         |   |||||||      |           |      ");
+       seq_puts(s, "       |    |            |      |      |      |      |      |\n");
+}
+#else /* CONFIG_PREEMPT_RT */
+static void print_osnoise_headers(struct seq_file *s)
 {
        if (osnoise_data.tainted)
                seq_puts(s, "# osnoise is tainted!\n");
@@ -279,6 +309,7 @@ static void print_osnoise_headers(struct seq_file *s)
        seq_puts(s, "#              | |         |   ||||      |           |      ");
        seq_puts(s, "       |    |            |      |      |      |      |      |\n");
 }
+#endif /* CONFIG_PREEMPT_RT */
 
 /*
  * osnoise_taint - report an osnoise error.
@@ -323,6 +354,24 @@ static void trace_osnoise_sample(struct osnoise_sample *sample)
 /*
  * Print the timerlat header info.
  */
+#ifdef CONFIG_PREEMPT_RT
+static void print_timerlat_headers(struct seq_file *s)
+{
+       seq_puts(s, "#                                _-------=> irqs-off\n");
+       seq_puts(s, "#                               / _------=> need-resched\n");
+       seq_puts(s, "#                              | / _-----=> need-resched-lazy\n");
+       seq_puts(s, "#                              || / _----=> hardirq/softirq\n");
+       seq_puts(s, "#                              ||| / _---=> preempt-depth\n");
+       seq_puts(s, "#                              |||| / _--=> preempt-lazy-depth\n");
+       seq_puts(s, "#                              ||||| / _-=> migrate-disable\n");
+       seq_puts(s, "#                              |||||| /\n");
+       seq_puts(s, "#                              |||||||             ACTIVATION\n");
+       seq_puts(s, "#           TASK-PID      CPU# |||||||   TIMESTAMP    ID     ");
+       seq_puts(s, "       CONTEXT                LATENCY\n");
+       seq_puts(s, "#              | |         |   |||||||      |         |      ");
+       seq_puts(s, "            |                       |\n");
+}
+#else /* CONFIG_PREEMPT_RT */
 static void print_timerlat_headers(struct seq_file *s)
 {
        seq_puts(s, "#                                _-----=> irqs-off\n");
@@ -336,6 +385,7 @@ static void print_timerlat_headers(struct seq_file *s)
        seq_puts(s, "#              | |         |   ||||      |         |      ");
        seq_puts(s, "            |                       |\n");
 }
+#endif /* CONFIG_PREEMPT_RT */
 
 /*
  * Record an timerlat_sample into the tracer buffer.
@@ -1025,9 +1075,13 @@ diff_osn_sample_stats(struct osnoise_variables *osn_var, struct osnoise_sample *
 /*
  * osnoise_stop_tracing - Stop tracing and the tracer.
  */
-static void osnoise_stop_tracing(void)
+static __always_inline void osnoise_stop_tracing(void)
 {
        struct trace_array *tr = osnoise_trace;
+
+       trace_array_printk_buf(tr->array_buffer.buffer, _THIS_IP_,
+                       "stop tracing hit on cpu %d\n", smp_processor_id());
+
        tracer_tracing_off(tr);
 }
 
index 77be3bbe3cc4b443cc3c6b0fb1dcb111edc14b7c..bb51849e6375288493d1429e07a49ee8de925986 100644 (file)
@@ -58,14 +58,17 @@ static struct ctl_table_root set_root = {
        .permissions = set_permissions,
 };
 
-#define UCOUNT_ENTRY(name)                             \
-       {                                               \
-               .procname       = name,                 \
-               .maxlen         = sizeof(int),          \
-               .mode           = 0644,                 \
-               .proc_handler   = proc_dointvec_minmax, \
-               .extra1         = SYSCTL_ZERO,          \
-               .extra2         = SYSCTL_INT_MAX,       \
+static long ue_zero = 0;
+static long ue_int_max = INT_MAX;
+
+#define UCOUNT_ENTRY(name)                                     \
+       {                                                       \
+               .procname       = name,                         \
+               .maxlen         = sizeof(long),                 \
+               .mode           = 0644,                         \
+               .proc_handler   = proc_doulongvec_minmax,       \
+               .extra1         = &ue_zero,                     \
+               .extra2         = &ue_int_max,                  \
        }
 static struct ctl_table user_table[] = {
        UCOUNT_ENTRY("max_user_namespaces"),
index 14c032de276e6bf71b745ea25bb820f5c28a5580..545ccbddf6a1da69f56dda720ab396012ce0a81d 100644 (file)
@@ -128,3 +128,6 @@ config CRYPTO_LIB_CHACHA20POLY1305
 
 config CRYPTO_LIB_SHA256
        tristate
+
+config CRYPTO_LIB_SM4
+       tristate
index 3a435629d9ce9c4806f75068d1891dad411556e6..73205ed269bad635d2d057272d01abca4b2dd3f6 100644 (file)
@@ -38,6 +38,9 @@ libpoly1305-y                                 += poly1305.o
 obj-$(CONFIG_CRYPTO_LIB_SHA256)                        += libsha256.o
 libsha256-y                                    := sha256.o
 
+obj-$(CONFIG_CRYPTO_LIB_SM4)                   += libsm4.o
+libsm4-y                                       := sm4.o
+
 ifneq ($(CONFIG_CRYPTO_MANAGER_DISABLE_TESTS),y)
 libblake2s-y                                   += blake2s-selftest.o
 libchacha20poly1305-y                          += chacha20poly1305-selftest.o
index c64ac8bfb6a97b7c78acb5d8cca570c4c9bfe71f..4055aa593ec498606fdf0196056dd6c37c29ff94 100644 (file)
@@ -73,7 +73,7 @@ void blake2s256_hmac(u8 *out, const u8 *in, const u8 *key, const size_t inlen,
 }
 EXPORT_SYMBOL(blake2s256_hmac);
 
-static int __init mod_init(void)
+static int __init blake2s_mod_init(void)
 {
        if (!IS_ENABLED(CONFIG_CRYPTO_MANAGER_DISABLE_TESTS) &&
            WARN_ON(!blake2s_selftest()))
@@ -81,12 +81,12 @@ static int __init mod_init(void)
        return 0;
 }
 
-static void __exit mod_exit(void)
+static void __exit blake2s_mod_exit(void)
 {
 }
 
-module_init(mod_init);
-module_exit(mod_exit);
+module_init(blake2s_mod_init);
+module_exit(blake2s_mod_exit);
 MODULE_LICENSE("GPL v2");
 MODULE_DESCRIPTION("BLAKE2s hash function");
 MODULE_AUTHOR("Jason A. Donenfeld <Jason@zx2c4.com>");
index c2fcdb98cc02c2d2bb9002d12014d56bed3f9e12..fa6a9440fc95e3e9344514e4797153b40758b69b 100644 (file)
@@ -354,7 +354,7 @@ bool chacha20poly1305_decrypt_sg_inplace(struct scatterlist *src, size_t src_len
 }
 EXPORT_SYMBOL(chacha20poly1305_decrypt_sg_inplace);
 
-static int __init mod_init(void)
+static int __init chacha20poly1305_init(void)
 {
        if (!IS_ENABLED(CONFIG_CRYPTO_MANAGER_DISABLE_TESTS) &&
            WARN_ON(!chacha20poly1305_selftest()))
@@ -362,12 +362,12 @@ static int __init mod_init(void)
        return 0;
 }
 
-static void __exit mod_exit(void)
+static void __exit chacha20poly1305_exit(void)
 {
 }
 
-module_init(mod_init);
-module_exit(mod_exit);
+module_init(chacha20poly1305_init);
+module_exit(chacha20poly1305_exit);
 MODULE_LICENSE("GPL v2");
 MODULE_DESCRIPTION("ChaCha20Poly1305 AEAD construction");
 MODULE_AUTHOR("Jason A. Donenfeld <Jason@zx2c4.com>");
index fb29739e8c290262b911e18771bea7fc3f3f7f05..064b352c6907b168c77f656399af65586965332d 100644 (file)
@@ -13,7 +13,7 @@
 #include <linux/module.h>
 #include <linux/init.h>
 
-static int __init mod_init(void)
+static int __init curve25519_init(void)
 {
        if (!IS_ENABLED(CONFIG_CRYPTO_MANAGER_DISABLE_TESTS) &&
            WARN_ON(!curve25519_selftest()))
@@ -21,12 +21,12 @@ static int __init mod_init(void)
        return 0;
 }
 
-static void __exit mod_exit(void)
+static void __exit curve25519_exit(void)
 {
 }
 
-module_init(mod_init);
-module_exit(mod_exit);
+module_init(curve25519_init);
+module_exit(curve25519_exit);
 
 MODULE_LICENSE("GPL v2");
 MODULE_DESCRIPTION("Curve25519 scalar multiplication");
diff --git a/lib/crypto/sm4.c b/lib/crypto/sm4.c
new file mode 100644 (file)
index 0000000..633b59f
--- /dev/null
@@ -0,0 +1,176 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * SM4, as specified in
+ * https://tools.ietf.org/id/draft-ribose-cfrg-sm4-10.html
+ *
+ * Copyright (C) 2018 ARM Limited or its affiliates.
+ * Copyright (c) 2021 Tianjia Zhang <tianjia.zhang@linux.alibaba.com>
+ */
+
+#include <linux/module.h>
+#include <asm/unaligned.h>
+#include <crypto/sm4.h>
+
+static const u32 fk[4] = {
+       0xa3b1bac6, 0x56aa3350, 0x677d9197, 0xb27022dc
+};
+
+static const u32 __cacheline_aligned ck[32] = {
+       0x00070e15, 0x1c232a31, 0x383f464d, 0x545b6269,
+       0x70777e85, 0x8c939aa1, 0xa8afb6bd, 0xc4cbd2d9,
+       0xe0e7eef5, 0xfc030a11, 0x181f262d, 0x343b4249,
+       0x50575e65, 0x6c737a81, 0x888f969d, 0xa4abb2b9,
+       0xc0c7ced5, 0xdce3eaf1, 0xf8ff060d, 0x141b2229,
+       0x30373e45, 0x4c535a61, 0x686f767d, 0x848b9299,
+       0xa0a7aeb5, 0xbcc3cad1, 0xd8dfe6ed, 0xf4fb0209,
+       0x10171e25, 0x2c333a41, 0x484f565d, 0x646b7279
+};
+
+static const u8 __cacheline_aligned sbox[256] = {
+       0xd6, 0x90, 0xe9, 0xfe, 0xcc, 0xe1, 0x3d, 0xb7,
+       0x16, 0xb6, 0x14, 0xc2, 0x28, 0xfb, 0x2c, 0x05,
+       0x2b, 0x67, 0x9a, 0x76, 0x2a, 0xbe, 0x04, 0xc3,
+       0xaa, 0x44, 0x13, 0x26, 0x49, 0x86, 0x06, 0x99,
+       0x9c, 0x42, 0x50, 0xf4, 0x91, 0xef, 0x98, 0x7a,
+       0x33, 0x54, 0x0b, 0x43, 0xed, 0xcf, 0xac, 0x62,
+       0xe4, 0xb3, 0x1c, 0xa9, 0xc9, 0x08, 0xe8, 0x95,
+       0x80, 0xdf, 0x94, 0xfa, 0x75, 0x8f, 0x3f, 0xa6,
+       0x47, 0x07, 0xa7, 0xfc, 0xf3, 0x73, 0x17, 0xba,
+       0x83, 0x59, 0x3c, 0x19, 0xe6, 0x85, 0x4f, 0xa8,
+       0x68, 0x6b, 0x81, 0xb2, 0x71, 0x64, 0xda, 0x8b,
+       0xf8, 0xeb, 0x0f, 0x4b, 0x70, 0x56, 0x9d, 0x35,
+       0x1e, 0x24, 0x0e, 0x5e, 0x63, 0x58, 0xd1, 0xa2,
+       0x25, 0x22, 0x7c, 0x3b, 0x01, 0x21, 0x78, 0x87,
+       0xd4, 0x00, 0x46, 0x57, 0x9f, 0xd3, 0x27, 0x52,
+       0x4c, 0x36, 0x02, 0xe7, 0xa0, 0xc4, 0xc8, 0x9e,
+       0xea, 0xbf, 0x8a, 0xd2, 0x40, 0xc7, 0x38, 0xb5,
+       0xa3, 0xf7, 0xf2, 0xce, 0xf9, 0x61, 0x15, 0xa1,
+       0xe0, 0xae, 0x5d, 0xa4, 0x9b, 0x34, 0x1a, 0x55,
+       0xad, 0x93, 0x32, 0x30, 0xf5, 0x8c, 0xb1, 0xe3,
+       0x1d, 0xf6, 0xe2, 0x2e, 0x82, 0x66, 0xca, 0x60,
+       0xc0, 0x29, 0x23, 0xab, 0x0d, 0x53, 0x4e, 0x6f,
+       0xd5, 0xdb, 0x37, 0x45, 0xde, 0xfd, 0x8e, 0x2f,
+       0x03, 0xff, 0x6a, 0x72, 0x6d, 0x6c, 0x5b, 0x51,
+       0x8d, 0x1b, 0xaf, 0x92, 0xbb, 0xdd, 0xbc, 0x7f,
+       0x11, 0xd9, 0x5c, 0x41, 0x1f, 0x10, 0x5a, 0xd8,
+       0x0a, 0xc1, 0x31, 0x88, 0xa5, 0xcd, 0x7b, 0xbd,
+       0x2d, 0x74, 0xd0, 0x12, 0xb8, 0xe5, 0xb4, 0xb0,
+       0x89, 0x69, 0x97, 0x4a, 0x0c, 0x96, 0x77, 0x7e,
+       0x65, 0xb9, 0xf1, 0x09, 0xc5, 0x6e, 0xc6, 0x84,
+       0x18, 0xf0, 0x7d, 0xec, 0x3a, 0xdc, 0x4d, 0x20,
+       0x79, 0xee, 0x5f, 0x3e, 0xd7, 0xcb, 0x39, 0x48
+};
+
+static inline u32 sm4_t_non_lin_sub(u32 x)
+{
+       u32 out;
+
+       out  = (u32)sbox[x & 0xff];
+       out |= (u32)sbox[(x >> 8) & 0xff] << 8;
+       out |= (u32)sbox[(x >> 16) & 0xff] << 16;
+       out |= (u32)sbox[(x >> 24) & 0xff] << 24;
+
+       return out;
+}
+
+static inline u32 sm4_key_lin_sub(u32 x)
+{
+       return x ^ rol32(x, 13) ^ rol32(x, 23);
+}
+
+static inline u32 sm4_enc_lin_sub(u32 x)
+{
+       return x ^ rol32(x, 2) ^ rol32(x, 10) ^ rol32(x, 18) ^ rol32(x, 24);
+}
+
+static inline u32 sm4_key_sub(u32 x)
+{
+       return sm4_key_lin_sub(sm4_t_non_lin_sub(x));
+}
+
+static inline u32 sm4_enc_sub(u32 x)
+{
+       return sm4_enc_lin_sub(sm4_t_non_lin_sub(x));
+}
+
+static inline u32 sm4_round(u32 x0, u32 x1, u32 x2, u32 x3, u32 rk)
+{
+       return x0 ^ sm4_enc_sub(x1 ^ x2 ^ x3 ^ rk);
+}
+
+
+/**
+ * sm4_expandkey - Expands the SM4 key as described in GB/T 32907-2016
+ * @ctx:       The location where the computed key will be stored.
+ * @in_key:    The supplied key.
+ * @key_len:   The length of the supplied key.
+ *
+ * Returns 0 on success. The function fails only if an invalid key size (or
+ * pointer) is supplied.
+ */
+int sm4_expandkey(struct sm4_ctx *ctx, const u8 *in_key,
+                         unsigned int key_len)
+{
+       u32 rk[4];
+       const u32 *key = (u32 *)in_key;
+       int i;
+
+       if (key_len != SM4_KEY_SIZE)
+               return -EINVAL;
+
+       rk[0] = get_unaligned_be32(&key[0]) ^ fk[0];
+       rk[1] = get_unaligned_be32(&key[1]) ^ fk[1];
+       rk[2] = get_unaligned_be32(&key[2]) ^ fk[2];
+       rk[3] = get_unaligned_be32(&key[3]) ^ fk[3];
+
+       for (i = 0; i < 32; i += 4) {
+               rk[0] ^= sm4_key_sub(rk[1] ^ rk[2] ^ rk[3] ^ ck[i + 0]);
+               rk[1] ^= sm4_key_sub(rk[2] ^ rk[3] ^ rk[0] ^ ck[i + 1]);
+               rk[2] ^= sm4_key_sub(rk[3] ^ rk[0] ^ rk[1] ^ ck[i + 2]);
+               rk[3] ^= sm4_key_sub(rk[0] ^ rk[1] ^ rk[2] ^ ck[i + 3]);
+
+               ctx->rkey_enc[i + 0] = rk[0];
+               ctx->rkey_enc[i + 1] = rk[1];
+               ctx->rkey_enc[i + 2] = rk[2];
+               ctx->rkey_enc[i + 3] = rk[3];
+               ctx->rkey_dec[31 - 0 - i] = rk[0];
+               ctx->rkey_dec[31 - 1 - i] = rk[1];
+               ctx->rkey_dec[31 - 2 - i] = rk[2];
+               ctx->rkey_dec[31 - 3 - i] = rk[3];
+       }
+
+       return 0;
+}
+EXPORT_SYMBOL_GPL(sm4_expandkey);
+
+/**
+ * sm4_crypt_block - Encrypt or decrypt a single SM4 block
+ * @rk:                The rkey_enc for encrypt or rkey_dec for decrypt
+ * @out:       Buffer to store output data
+ * @in:        Buffer containing the input data
+ */
+void sm4_crypt_block(const u32 *rk, u8 *out, const u8 *in)
+{
+       u32 x[4], i;
+
+       x[0] = get_unaligned_be32(in + 0 * 4);
+       x[1] = get_unaligned_be32(in + 1 * 4);
+       x[2] = get_unaligned_be32(in + 2 * 4);
+       x[3] = get_unaligned_be32(in + 3 * 4);
+
+       for (i = 0; i < 32; i += 4) {
+               x[0] = sm4_round(x[0], x[1], x[2], x[3], rk[i + 0]);
+               x[1] = sm4_round(x[1], x[2], x[3], x[0], rk[i + 1]);
+               x[2] = sm4_round(x[2], x[3], x[0], x[1], rk[i + 2]);
+               x[3] = sm4_round(x[3], x[0], x[1], x[2], rk[i + 3]);
+       }
+
+       put_unaligned_be32(x[3 - 0], out + 0 * 4);
+       put_unaligned_be32(x[3 - 1], out + 1 * 4);
+       put_unaligned_be32(x[3 - 2], out + 2 * 4);
+       put_unaligned_be32(x[3 - 3], out + 3 * 4);
+}
+EXPORT_SYMBOL_GPL(sm4_crypt_block);
+
+MODULE_DESCRIPTION("Generic SM4 library");
+MODULE_LICENSE("GPL v2");
index c0d67c541849a28f70ab265141cc6a001f9bce4d..60be9e24bd576ac410f52a894e09d9df4ad5ebea 100644 (file)
@@ -19,7 +19,7 @@
  */
 int devmem_is_allowed(unsigned long pfn)
 {
-       if (iomem_is_exclusive(pfn << PAGE_SHIFT))
+       if (iomem_is_exclusive(PFN_PHYS(pfn)))
                return 0;
        if (!page_is_ram(pfn))
                return 1;
index ced5c15d3f048105d374a064b9126d4c4cb8e405..a1a7dfa881def894cc8cce8368cb6e9c21dcd6b1 100644 (file)
@@ -241,5 +241,36 @@ int linear_range_get_selector_high(const struct linear_range *r,
 }
 EXPORT_SYMBOL_GPL(linear_range_get_selector_high);
 
+/**
+ * linear_range_get_selector_within - return linear range selector for value
+ * @r:         pointer to linear range where selector is looked from
+ * @val:       value for which the selector is searched
+ * @selector:  address where found selector value is updated
+ *
+ * Return selector for which range value is closest match for given
+ * input value. Value is matching if it is equal or lower than given
+ * value. But return maximum selector if given value is higher than
+ * maximum value.
+ */
+void linear_range_get_selector_within(const struct linear_range *r,
+                                     unsigned int val, unsigned int *selector)
+{
+       if (r->min > val) {
+               *selector = r->min_sel;
+               return;
+       }
+
+       if (linear_range_get_max_value(r) < val) {
+               *selector = r->max_sel;
+               return;
+       }
+
+       if (r->step == 0)
+               *selector = r->min_sel;
+       else
+               *selector = (val - r->min) / r->step + r->min_sel;
+}
+EXPORT_SYMBOL_GPL(linear_range_get_selector_within);
+
 MODULE_DESCRIPTION("linear-ranges helper");
 MODULE_LICENSE("GPL");
index 9a75ca3f7edf95e6956d82ff9f1b00ce66f3fac2..bc81419f400c5576e50b0afca66a9188b3b52d68 100644 (file)
@@ -148,7 +148,7 @@ int mpi_resize(MPI a, unsigned nlimbs)
                return 0;       /* no need to do it */
 
        if (a->d) {
-               p = kmalloc_array(nlimbs, sizeof(mpi_limb_t), GFP_KERNEL);
+               p = kcalloc(nlimbs, sizeof(mpi_limb_t), GFP_KERNEL);
                if (!p)
                        return -ENOMEM;
                memcpy(p, a->d, a->alloced * sizeof(mpi_limb_t));
index 8b7d6235217ee3bcea4612e4d9cbd713ff12e74b..59149bf3bfb4a97e4fa7febee737155d700bae48 100644 (file)
@@ -3,10 +3,12 @@
 #include <linux/spinlock.h>
 #include <linux/once.h>
 #include <linux/random.h>
+#include <linux/module.h>
 
 struct once_work {
        struct work_struct work;
        struct static_key_true *key;
+       struct module *module;
 };
 
 static void once_deferred(struct work_struct *w)
@@ -16,10 +18,11 @@ static void once_deferred(struct work_struct *w)
        work = container_of(w, struct once_work, work);
        BUG_ON(!static_key_enabled(work->key));
        static_branch_disable(work->key);
+       module_put(work->module);
        kfree(work);
 }
 
-static void once_disable_jump(struct static_key_true *key)
+static void once_disable_jump(struct static_key_true *key, struct module *mod)
 {
        struct once_work *w;
 
@@ -29,6 +32,8 @@ static void once_disable_jump(struct static_key_true *key)
 
        INIT_WORK(&w->work, once_deferred);
        w->key = key;
+       w->module = mod;
+       __module_get(mod);
        schedule_work(&w->work);
 }
 
@@ -53,11 +58,11 @@ bool __do_once_start(bool *done, unsigned long *flags)
 EXPORT_SYMBOL(__do_once_start);
 
 void __do_once_done(bool *done, struct static_key_true *once_key,
-                   unsigned long *flags)
+                   unsigned long *flags, struct module *mod)
        __releases(once_lock)
 {
        *done = true;
        spin_unlock_irqrestore(&once_lock, *flags);
-       once_disable_jump(once_key);
+       once_disable_jump(once_key, mod);
 }
 EXPORT_SYMBOL(__do_once_done);
index 77bd0b1d32967c92a30c70df9afceb244587d0e5..b2de45a581f4f56fc83a9dcdc83ad56cda94c1e2 100644 (file)
@@ -29,6 +29,7 @@
 #include <linux/errno.h>
 #include <linux/slab.h>
 
+#include <asm/unaligned.h>
 #include <asm/byteorder.h>
 #include <asm/word-at-a-time.h>
 #include <asm/page.h>
@@ -935,6 +936,21 @@ __visible int memcmp(const void *cs, const void *ct, size_t count)
        const unsigned char *su1, *su2;
        int res = 0;
 
+#ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
+       if (count >= sizeof(unsigned long)) {
+               const unsigned long *u1 = cs;
+               const unsigned long *u2 = ct;
+               do {
+                       if (get_unaligned(u1) != get_unaligned(u2))
+                               break;
+                       u1++;
+                       u2++;
+                       count -= sizeof(unsigned long);
+               } while (count >= sizeof(unsigned long));
+               cs = u1;
+               ct = u2;
+       }
+#endif
        for (su1 = cs, su2 = ct; 0 < count; ++su1, ++su2, count--)
                if ((res = *su1 - *su2) != 0)
                        break;
index d1458ecf2f51ec2100eae25e3c53801e96acc0e5..0fad08331cf441a22323e54fefdeb4608720e4fa 100644 (file)
@@ -76,8 +76,9 @@
  *      ->swap_lock            (exclusive_swap_page, others)
  *        ->i_pages lock
  *
- *  ->i_mutex
- *    ->i_mmap_rwsem           (truncate->unmap_mapping_range)
+ *  ->i_rwsem
+ *    ->invalidate_lock                (acquired by fs in truncate path)
+ *      ->i_mmap_rwsem         (truncate->unmap_mapping_range)
  *
  *  ->mmap_lock
  *    ->i_mmap_rwsem
  *        ->i_pages lock       (arch-dependent flush_dcache_mmap_lock)
  *
  *  ->mmap_lock
- *    ->lock_page              (access_process_vm)
+ *    ->invalidate_lock                (filemap_fault)
+ *      ->lock_page            (filemap_fault, access_process_vm)
  *
- *  ->i_mutex                  (generic_perform_write)
+ *  ->i_rwsem                  (generic_perform_write)
  *    ->mmap_lock              (fault_in_pages_readable->do_page_fault)
  *
  *  bdi->wb.list_lock
@@ -1007,6 +1009,44 @@ struct page *__page_cache_alloc(gfp_t gfp)
 EXPORT_SYMBOL(__page_cache_alloc);
 #endif
 
+/*
+ * filemap_invalidate_lock_two - lock invalidate_lock for two mappings
+ *
+ * Lock exclusively invalidate_lock of any passed mapping that is not NULL.
+ *
+ * @mapping1: the first mapping to lock
+ * @mapping2: the second mapping to lock
+ */
+void filemap_invalidate_lock_two(struct address_space *mapping1,
+                                struct address_space *mapping2)
+{
+       if (mapping1 > mapping2)
+               swap(mapping1, mapping2);
+       if (mapping1)
+               down_write(&mapping1->invalidate_lock);
+       if (mapping2 && mapping1 != mapping2)
+               down_write_nested(&mapping2->invalidate_lock, 1);
+}
+EXPORT_SYMBOL(filemap_invalidate_lock_two);
+
+/*
+ * filemap_invalidate_unlock_two - unlock invalidate_lock for two mappings
+ *
+ * Unlock exclusive invalidate_lock of any passed mapping that is not NULL.
+ *
+ * @mapping1: the first mapping to unlock
+ * @mapping2: the second mapping to unlock
+ */
+void filemap_invalidate_unlock_two(struct address_space *mapping1,
+                                  struct address_space *mapping2)
+{
+       if (mapping1)
+               up_write(&mapping1->invalidate_lock);
+       if (mapping2 && mapping1 != mapping2)
+               up_write(&mapping2->invalidate_lock);
+}
+EXPORT_SYMBOL(filemap_invalidate_unlock_two);
+
 /*
  * In order to wait for pages to become available there must be
  * waitqueues associated with pages. By using a hash table of
@@ -2368,20 +2408,30 @@ static int filemap_update_page(struct kiocb *iocb,
 {
        int error;
 
+       if (iocb->ki_flags & IOCB_NOWAIT) {
+               if (!filemap_invalidate_trylock_shared(mapping))
+                       return -EAGAIN;
+       } else {
+               filemap_invalidate_lock_shared(mapping);
+       }
+
        if (!trylock_page(page)) {
+               error = -EAGAIN;
                if (iocb->ki_flags & (IOCB_NOWAIT | IOCB_NOIO))
-                       return -EAGAIN;
+                       goto unlock_mapping;
                if (!(iocb->ki_flags & IOCB_WAITQ)) {
+                       filemap_invalidate_unlock_shared(mapping);
                        put_and_wait_on_page_locked(page, TASK_KILLABLE);
                        return AOP_TRUNCATED_PAGE;
                }
                error = __lock_page_async(page, iocb->ki_waitq);
                if (error)
-                       return error;
+                       goto unlock_mapping;
        }
 
+       error = AOP_TRUNCATED_PAGE;
        if (!page->mapping)
-               goto truncated;
+               goto unlock;
 
        error = 0;
        if (filemap_range_uptodate(mapping, iocb->ki_pos, iter, page))
@@ -2392,15 +2442,13 @@ static int filemap_update_page(struct kiocb *iocb,
                goto unlock;
 
        error = filemap_read_page(iocb->ki_filp, mapping, page);
-       if (error == AOP_TRUNCATED_PAGE)
-               put_page(page);
-       return error;
-truncated:
-       unlock_page(page);
-       put_page(page);
-       return AOP_TRUNCATED_PAGE;
+       goto unlock_mapping;
 unlock:
        unlock_page(page);
+unlock_mapping:
+       filemap_invalidate_unlock_shared(mapping);
+       if (error == AOP_TRUNCATED_PAGE)
+               put_page(page);
        return error;
 }
 
@@ -2415,6 +2463,19 @@ static int filemap_create_page(struct file *file,
        if (!page)
                return -ENOMEM;
 
+       /*
+        * Protect against truncate / hole punch. Grabbing invalidate_lock here
+        * assures we cannot instantiate and bring uptodate new pagecache pages
+        * after evicting page cache during truncate and before actually
+        * freeing blocks.  Note that we could release invalidate_lock after
+        * inserting the page into page cache as the locked page would then be
+        * enough to synchronize with hole punching. But there are code paths
+        * such as filemap_update_page() filling in partially uptodate pages or
+        * ->readpages() that need to hold invalidate_lock while mapping blocks
+        * for IO so let's hold the lock here as well to keep locking rules
+        * simple.
+        */
+       filemap_invalidate_lock_shared(mapping);
        error = add_to_page_cache_lru(page, mapping, index,
                        mapping_gfp_constraint(mapping, GFP_KERNEL));
        if (error == -EEXIST)
@@ -2426,9 +2487,11 @@ static int filemap_create_page(struct file *file,
        if (error)
                goto error;
 
+       filemap_invalidate_unlock_shared(mapping);
        pagevec_add(pvec, page);
        return 0;
 error:
+       filemap_invalidate_unlock_shared(mapping);
        put_page(page);
        return error;
 }
@@ -2967,6 +3030,7 @@ vm_fault_t filemap_fault(struct vm_fault *vmf)
        pgoff_t max_off;
        struct page *page;
        vm_fault_t ret = 0;
+       bool mapping_locked = false;
 
        max_off = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
        if (unlikely(offset >= max_off))
@@ -2976,25 +3040,39 @@ vm_fault_t filemap_fault(struct vm_fault *vmf)
         * Do we have something in the page cache already?
         */
        page = find_get_page(mapping, offset);
-       if (likely(page) && !(vmf->flags & FAULT_FLAG_TRIED)) {
+       if (likely(page)) {
                /*
-                * We found the page, so try async readahead before
-                * waiting for the lock.
+                * We found the page, so try async readahead before waiting for
+                * the lock.
                 */
-               fpin = do_async_mmap_readahead(vmf, page);
-       } else if (!page) {
+               if (!(vmf->flags & FAULT_FLAG_TRIED))
+                       fpin = do_async_mmap_readahead(vmf, page);
+               if (unlikely(!PageUptodate(page))) {
+                       filemap_invalidate_lock_shared(mapping);
+                       mapping_locked = true;
+               }
+       } else {
                /* No page in the page cache at all */
                count_vm_event(PGMAJFAULT);
                count_memcg_event_mm(vmf->vma->vm_mm, PGMAJFAULT);
                ret = VM_FAULT_MAJOR;
                fpin = do_sync_mmap_readahead(vmf);
 retry_find:
+               /*
+                * See comment in filemap_create_page() why we need
+                * invalidate_lock
+                */
+               if (!mapping_locked) {
+                       filemap_invalidate_lock_shared(mapping);
+                       mapping_locked = true;
+               }
                page = pagecache_get_page(mapping, offset,
                                          FGP_CREAT|FGP_FOR_MMAP,
                                          vmf->gfp_mask);
                if (!page) {
                        if (fpin)
                                goto out_retry;
+                       filemap_invalidate_unlock_shared(mapping);
                        return VM_FAULT_OOM;
                }
        }
@@ -3014,8 +3092,20 @@ retry_find:
         * We have a locked page in the page cache, now we need to check
         * that it's up-to-date. If not, it is going to be due to an error.
         */
-       if (unlikely(!PageUptodate(page)))
+       if (unlikely(!PageUptodate(page))) {
+               /*
+                * The page was in cache and uptodate and now it is not.
+                * Strange but possible since we didn't hold the page lock all
+                * the time. Let's drop everything get the invalidate lock and
+                * try again.
+                */
+               if (!mapping_locked) {
+                       unlock_page(page);
+                       put_page(page);
+                       goto retry_find;
+               }
                goto page_not_uptodate;
+       }
 
        /*
         * We've made it this far and we had to drop our mmap_lock, now is the
@@ -3026,6 +3116,8 @@ retry_find:
                unlock_page(page);
                goto out_retry;
        }
+       if (mapping_locked)
+               filemap_invalidate_unlock_shared(mapping);
 
        /*
         * Found the page and have a reference on it.
@@ -3056,6 +3148,7 @@ page_not_uptodate:
 
        if (!error || error == AOP_TRUNCATED_PAGE)
                goto retry_find;
+       filemap_invalidate_unlock_shared(mapping);
 
        return VM_FAULT_SIGBUS;
 
@@ -3067,6 +3160,8 @@ out_retry:
         */
        if (page)
                put_page(page);
+       if (mapping_locked)
+               filemap_invalidate_unlock_shared(mapping);
        if (fpin)
                fput(fpin);
        return ret | VM_FAULT_RETRY;
@@ -3437,6 +3532,8 @@ out:
  *
  * If the page does not get brought uptodate, return -EIO.
  *
+ * The function expects mapping->invalidate_lock to be already held.
+ *
  * Return: up to date page on success, ERR_PTR() on failure.
  */
 struct page *read_cache_page(struct address_space *mapping,
@@ -3460,6 +3557,8 @@ EXPORT_SYMBOL(read_cache_page);
  *
  * If the page does not get brought uptodate, return -EIO.
  *
+ * The function expects mapping->invalidate_lock to be already held.
+ *
  * Return: up to date page on success, ERR_PTR() on failure.
  */
 struct page *read_cache_page_gfp(struct address_space *mapping,
@@ -3704,12 +3803,12 @@ EXPORT_SYMBOL(generic_perform_write);
  * modification times and calls proper subroutines depending on whether we
  * do direct IO or a standard buffered write.
  *
- * It expects i_mutex to be grabbed unless we work on a block device or similar
+ * It expects i_rwsem to be grabbed unless we work on a block device or similar
  * object which does not need locking at all.
  *
  * This function does *not* take care of syncing data in case of O_SYNC write.
  * A caller has to handle it. This is mainly due to the fact that we want to
- * avoid syncing under i_mutex.
+ * avoid syncing under i_rwsem.
  *
  * Return:
  * * number of bytes written, even for truncated writes
@@ -3797,7 +3896,7 @@ EXPORT_SYMBOL(__generic_file_write_iter);
  *
  * This is a wrapper around __generic_file_write_iter() to be used by most
  * filesystems. It takes care of syncing the file in case of O_SYNC file
- * and acquires i_mutex as needed.
+ * and acquires i_rwsem as needed.
  * Return:
  * * negative error code if no data has been written at all of
  *   vfs_fsync_range() failed for a synchronous write
index 42b8b1fa65218b8c3034b1ba672be295c7d4ea00..b94717977d178abae26c6ed2619a3e99667d861c 100644 (file)
--- a/mm/gup.c
+++ b/mm/gup.c
@@ -1558,9 +1558,12 @@ long faultin_vma_page_range(struct vm_area_struct *vma, unsigned long start,
                gup_flags |= FOLL_WRITE;
 
        /*
-        * See check_vma_flags(): Will return -EFAULT on incompatible mappings
-        * or with insufficient permissions.
+        * We want to report -EINVAL instead of -EFAULT for any permission
+        * problems or incompatible mappings.
         */
+       if (check_vma_flags(vma, gup_flags))
+               return -EINVAL;
+
        return __get_user_pages(mm, start, nr_pages, gup_flags,
                                NULL, NULL, locked);
 }
index dfc940d5221dca12017307f4a6c905b68dea0257..8ea35ba6699f2608b4ec82c09f07d41c838cb6ae 100644 (file)
@@ -2476,7 +2476,7 @@ void restore_reserve_on_error(struct hstate *h, struct vm_area_struct *vma,
                if (!rc) {
                        /*
                         * This indicates there is an entry in the reserve map
-                        * added by alloc_huge_page.  We know it was added
+                        * not added by alloc_huge_page.  We know it was added
                         * before the alloc_huge_page call, otherwise
                         * HPageRestoreReserve would be set on the page.
                         * Remove the entry so that a subsequent allocation
@@ -4660,7 +4660,9 @@ retry_avoidcopy:
        spin_unlock(ptl);
        mmu_notifier_invalidate_range_end(&range);
 out_release_all:
-       restore_reserve_on_error(h, vma, haddr, new_page);
+       /* No restore in case of successful pagetable update (Break COW) */
+       if (new_page != old_page)
+               restore_reserve_on_error(h, vma, haddr, new_page);
        put_page(new_page);
 out_release_old:
        put_page(old_page);
@@ -4776,7 +4778,7 @@ static vm_fault_t hugetlb_no_page(struct mm_struct *mm,
        pte_t new_pte;
        spinlock_t *ptl;
        unsigned long haddr = address & huge_page_mask(h);
-       bool new_page = false;
+       bool new_page, new_pagecache_page = false;
 
        /*
         * Currently, we are forced to kill the process in the event the
@@ -4799,6 +4801,7 @@ static vm_fault_t hugetlb_no_page(struct mm_struct *mm,
                goto out;
 
 retry:
+       new_page = false;
        page = find_lock_page(mapping, idx);
        if (!page) {
                /* Check for page in userfault range */
@@ -4842,6 +4845,7 @@ retry:
                                        goto retry;
                                goto out;
                        }
+                       new_pagecache_page = true;
                } else {
                        lock_page(page);
                        if (unlikely(anon_vma_prepare(vma))) {
@@ -4926,7 +4930,9 @@ backout:
        spin_unlock(ptl);
 backout_unlocked:
        unlock_page(page);
-       restore_reserve_on_error(h, vma, haddr, page);
+       /* restore reserve for newly allocated pages not in page cache */
+       if (new_page && !new_pagecache_page)
+               restore_reserve_on_error(h, vma, haddr, page);
        put_page(page);
        goto out;
 }
@@ -5135,6 +5141,7 @@ int hugetlb_mcopy_atomic_pte(struct mm_struct *dst_mm,
        int ret = -ENOMEM;
        struct page *page;
        int writable;
+       bool new_pagecache_page = false;
 
        if (is_continue) {
                ret = -EFAULT;
@@ -5228,6 +5235,7 @@ int hugetlb_mcopy_atomic_pte(struct mm_struct *dst_mm,
                ret = huge_add_to_page_cache(page, mapping, idx);
                if (ret)
                        goto out_release_nounlock;
+               new_pagecache_page = true;
        }
 
        ptl = huge_pte_lockptr(h, dst_mm, dst_pte);
@@ -5291,7 +5299,8 @@ out_release_unlock:
        if (vm_shared || is_continue)
                unlock_page(page);
 out_release_nounlock:
-       restore_reserve_on_error(h, dst_vma, dst_addr, page);
+       if (!new_pagecache_page)
+               restore_reserve_on_error(h, dst_vma, dst_addr, page);
        put_page(page);
        goto out;
 }
index 942cbc16ad26ba8bc6032ce4b7416d8725b750ad..eb6307c199eae2ab7bb31d041fecd58cf7efd628 100644 (file)
 #include <linux/tracepoint.h>
 #include <trace/events/printk.h>
 
+#include <asm/kfence.h>
+
 #include "kfence.h"
 
+/* May be overridden by <asm/kfence.h>. */
+#ifndef arch_kfence_test_address
+#define arch_kfence_test_address(addr) (addr)
+#endif
+
 /* Report as observed from console. */
 static struct {
        spinlock_t lock;
@@ -82,6 +89,7 @@ static const char *get_access_type(const struct expect_report *r)
 /* Check observed report matches information in @r. */
 static bool report_matches(const struct expect_report *r)
 {
+       unsigned long addr = (unsigned long)r->addr;
        bool ret = false;
        unsigned long flags;
        typeof(observed.lines) expect;
@@ -131,22 +139,25 @@ static bool report_matches(const struct expect_report *r)
        switch (r->type) {
        case KFENCE_ERROR_OOB:
                cur += scnprintf(cur, end - cur, "Out-of-bounds %s at", get_access_type(r));
+               addr = arch_kfence_test_address(addr);
                break;
        case KFENCE_ERROR_UAF:
                cur += scnprintf(cur, end - cur, "Use-after-free %s at", get_access_type(r));
+               addr = arch_kfence_test_address(addr);
                break;
        case KFENCE_ERROR_CORRUPTION:
                cur += scnprintf(cur, end - cur, "Corrupted memory at");
                break;
        case KFENCE_ERROR_INVALID:
                cur += scnprintf(cur, end - cur, "Invalid %s at", get_access_type(r));
+               addr = arch_kfence_test_address(addr);
                break;
        case KFENCE_ERROR_INVALID_FREE:
                cur += scnprintf(cur, end - cur, "Invalid free of");
                break;
        }
 
-       cur += scnprintf(cur, end - cur, " 0x%p", (void *)r->addr);
+       cur += scnprintf(cur, end - cur, " 0x%p", (void *)addr);
 
        spin_lock_irqsave(&observed.lock, flags);
        if (!report_available())
index 228a2fbe065746933a4eb52fdc411c878eb3845a..73d46d16d5755e2a1181e0b5ac19dc15e102af9f 100644 (file)
@@ -290,7 +290,7 @@ static void hex_dump_object(struct seq_file *seq,
        warn_or_seq_printf(seq, "  hex dump (first %zu bytes):\n", len);
        kasan_disable_current();
        warn_or_seq_hex_dump(seq, DUMP_PREFIX_NONE, HEX_ROW_SIZE,
-                            HEX_GROUP_SIZE, ptr, len, HEX_ASCII);
+                            HEX_GROUP_SIZE, kasan_reset_tag((void *)ptr), len, HEX_ASCII);
        kasan_enable_current();
 }
 
@@ -1171,7 +1171,7 @@ static bool update_checksum(struct kmemleak_object *object)
 
        kasan_disable_current();
        kcsan_disable_current();
-       object->checksum = crc32(0, (void *)object->pointer, object->size);
+       object->checksum = crc32(0, kasan_reset_tag((void *)object->pointer), object->size);
        kasan_enable_current();
        kcsan_enable_current();
 
@@ -1246,7 +1246,7 @@ static void scan_block(void *_start, void *_end,
                        break;
 
                kasan_disable_current();
-               pointer = *ptr;
+               pointer = *(unsigned long *)kasan_reset_tag((void *)ptr);
                kasan_enable_current();
 
                untagged_ptr = (unsigned long)kasan_reset_tag((void *)pointer);
index 6d3d348b17f47d15e3880eecbcc6f52f2798ec44..56324a3dbc4e98f9cf1283901c95617b03087b4e 100644 (file)
@@ -862,10 +862,12 @@ static long madvise_populate(struct vm_area_struct *vma,
                        switch (pages) {
                        case -EINTR:
                                return -EINTR;
-                       case -EFAULT: /* Incompatible mappings / permissions. */
+                       case -EINVAL: /* Incompatible mappings / permissions. */
                                return -EINVAL;
                        case -EHWPOISON:
                                return -EHWPOISON;
+                       case -EFAULT: /* VM_FAULT_SIGBUS or VM_FAULT_SIGSEGV */
+                               return -EFAULT;
                        default:
                                pr_warn_once("%s: unhandled return value: %ld\n",
                                             __func__, pages);
@@ -910,7 +912,7 @@ static long madvise_remove(struct vm_area_struct *vma,
                        + ((loff_t)vma->vm_pgoff << PAGE_SHIFT);
 
        /*
-        * Filesystem's fallocate may need to take i_mutex.  We need to
+        * Filesystem's fallocate may need to take i_rwsem.  We need to
         * explicitly grab a reference because the vma (and hence the
         * vma's reference to the file) can go away as soon as we drop
         * mmap_lock.
index eb8e87c4833fc261327e32066ca97d914174a988..702a81dfe72dc412077890587dca82019e374983 100644 (file)
@@ -3106,13 +3106,15 @@ void mod_objcg_state(struct obj_cgroup *objcg, struct pglist_data *pgdat,
                stock->cached_pgdat = pgdat;
        } else if (stock->cached_pgdat != pgdat) {
                /* Flush the existing cached vmstat data */
+               struct pglist_data *oldpg = stock->cached_pgdat;
+
                if (stock->nr_slab_reclaimable_b) {
-                       mod_objcg_mlstate(objcg, pgdat, NR_SLAB_RECLAIMABLE_B,
+                       mod_objcg_mlstate(objcg, oldpg, NR_SLAB_RECLAIMABLE_B,
                                          stock->nr_slab_reclaimable_b);
                        stock->nr_slab_reclaimable_b = 0;
                }
                if (stock->nr_slab_unreclaimable_b) {
-                       mod_objcg_mlstate(objcg, pgdat, NR_SLAB_UNRECLAIMABLE_B,
+                       mod_objcg_mlstate(objcg, oldpg, NR_SLAB_UNRECLAIMABLE_B,
                                          stock->nr_slab_unreclaimable_b);
                        stock->nr_slab_unreclaimable_b = 0;
                }
index eefd823deb679ad0b9d84b5b374614aace672851..e1f87cf13235ff5f705a60e5198150019c1e00a1 100644 (file)
@@ -866,7 +866,7 @@ static int me_pagecache_clean(struct page *p, unsigned long pfn)
        /*
         * Truncation is a bit tricky. Enable it per file system for now.
         *
-        * Open: to take i_mutex or not for this? Right now we don't.
+        * Open: to take i_rwsem or not for this? Right now we don't.
         */
        ret = truncate_error_page(p, pfn, mapping);
 out:
@@ -1146,7 +1146,7 @@ static int __get_hwpoison_page(struct page *page)
         * unexpected races caused by taking a page refcount.
         */
        if (!HWPoisonHandlable(head))
-               return 0;
+               return -EBUSY;
 
        if (PageTransHuge(head)) {
                /*
@@ -1199,9 +1199,15 @@ try_again:
                        }
                        goto out;
                } else if (ret == -EBUSY) {
-                       /* We raced with freeing huge page to buddy, retry. */
-                       if (pass++ < 3)
+                       /*
+                        * We raced with (possibly temporary) unhandlable
+                        * page, retry.
+                        */
+                       if (pass++ < 3) {
+                               shake_page(p, 1);
                                goto try_again;
+                       }
+                       ret = -EIO;
                        goto out;
                }
        }
index 8cb75b26ea4ff8805b5b22e85a212c8a4992df68..86c3af79e874e9ff986276e1b812d2892fdc863b 100644 (file)
@@ -1731,6 +1731,7 @@ failed_removal_isolated:
        undo_isolate_page_range(start_pfn, end_pfn, MIGRATE_MOVABLE);
        memory_notify(MEM_CANCEL_OFFLINE, &arg);
 failed_removal_pcplists_disabled:
+       lru_cache_enable();
        zone_pcp_enable(zone);
 failed_removal:
        pr_debug("memory offlining [mem %#010llx-%#010llx] failed due to %s\n",
index ca54d36d203ae4c598b53d4bb9b2df75e2d00e92..181a113b545df6502f3da4cd7b9b09e19321041e 100644 (file)
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -1517,12 +1517,6 @@ unsigned long do_mmap(struct file *file, unsigned long addr,
                        if (IS_APPEND(inode) && (file->f_mode & FMODE_WRITE))
                                return -EACCES;
 
-                       /*
-                        * Make sure there are no mandatory locks on the file.
-                        */
-                       if (locks_verify_locked(file))
-                               return -EAGAIN;
-
                        vm_flags |= VM_SHARED | VM_MAYSHARE;
                        if (!(file->f_mode & FMODE_WRITE))
                                vm_flags &= ~(VM_MAYWRITE | VM_SHARED);
index 3a93d40548104e8e438dbb0b63b6047c8784aea3..9d0ad98f838cfb85532a8098ee1a7419fed33dac 100644 (file)
@@ -826,9 +826,6 @@ static int validate_mmap_request(struct file *file,
                            (file->f_mode & FMODE_WRITE))
                                return -EACCES;
 
-                       if (locks_verify_locked(file))
-                               return -EAGAIN;
-
                        if (!(capabilities & NOMMU_MAP_DIRECT))
                                return -ENODEV;
 
index 856b175c15a4fa2c7590526d4941e4b1a2dd39e5..eeb3a9cb36bb4ff417247501d53c7b0796db6045 100644 (file)
@@ -3453,19 +3453,10 @@ void free_unref_page_list(struct list_head *list)
                 * comment in free_unref_page.
                 */
                migratetype = get_pcppage_migratetype(page);
-               if (unlikely(migratetype >= MIGRATE_PCPTYPES)) {
-                       if (unlikely(is_migrate_isolate(migratetype))) {
-                               list_del(&page->lru);
-                               free_one_page(page_zone(page), page, pfn, 0,
-                                                       migratetype, FPI_NONE);
-                               continue;
-                       }
-
-                       /*
-                        * Non-isolated types over MIGRATE_PCPTYPES get added
-                        * to the MIGRATE_MOVABLE pcp list.
-                        */
-                       set_pcppage_migratetype(page, MIGRATE_MOVABLE);
+               if (unlikely(is_migrate_isolate(migratetype))) {
+                       list_del(&page->lru);
+                       free_one_page(page_zone(page), page, pfn, 0, migratetype, FPI_NONE);
+                       continue;
                }
 
                set_page_private(page, pfn);
@@ -3475,7 +3466,15 @@ void free_unref_page_list(struct list_head *list)
        list_for_each_entry_safe(page, next, list, lru) {
                pfn = page_private(page);
                set_page_private(page, 0);
+
+               /*
+                * Non-isolated types over MIGRATE_PCPTYPES get added
+                * to the MIGRATE_MOVABLE pcp list.
+                */
                migratetype = get_pcppage_migratetype(page);
+               if (unlikely(migratetype >= MIGRATE_PCPTYPES))
+                       migratetype = MIGRATE_MOVABLE;
+
                trace_mm_page_free_batched(page);
                free_unref_page_commit(page, pfn, migratetype, 0);
 
index d589f147f4c2f6792d5061ae0fabce35e7efe789..41b75d76d36e1bf07c0f7a0449ebf78761a93227 100644 (file)
@@ -192,6 +192,7 @@ void page_cache_ra_unbounded(struct readahead_control *ractl,
         */
        unsigned int nofs = memalloc_nofs_save();
 
+       filemap_invalidate_lock_shared(mapping);
        /*
         * Preallocate as many pages as we will need.
         */
@@ -236,6 +237,7 @@ void page_cache_ra_unbounded(struct readahead_control *ractl,
         * will then handle the error.
         */
        read_pages(ractl, &page_pool, false);
+       filemap_invalidate_unlock_shared(mapping);
        memalloc_nofs_restore(nofs);
 }
 EXPORT_SYMBOL_GPL(page_cache_ra_unbounded);
index b9eb5c12f3fe10bc83ce0d26d3f804d7f252f517..2d29a57d29e8d2021588b17c306bc6f5199c7d97 100644 (file)
--- a/mm/rmap.c
+++ b/mm/rmap.c
 /*
  * Lock ordering in mm:
  *
- * inode->i_mutex      (while writing or truncating, not reading or faulting)
+ * inode->i_rwsem      (while writing or truncating, not reading or faulting)
  *   mm->mmap_lock
- *     page->flags PG_locked (lock_page)   * (see huegtlbfs below)
- *       hugetlbfs_i_mmap_rwsem_key (in huge_pmd_share)
- *         mapping->i_mmap_rwsem
- *           hugetlb_fault_mutex (hugetlbfs specific page fault mutex)
- *           anon_vma->rwsem
- *             mm->page_table_lock or pte_lock
- *               swap_lock (in swap_duplicate, swap_info_get)
- *                 mmlist_lock (in mmput, drain_mmlist and others)
- *                 mapping->private_lock (in __set_page_dirty_buffers)
- *                   lock_page_memcg move_lock (in __set_page_dirty_buffers)
- *                     i_pages lock (widely used)
- *                       lruvec->lru_lock (in lock_page_lruvec_irq)
- *                 inode->i_lock (in set_page_dirty's __mark_inode_dirty)
- *                 bdi.wb->list_lock (in set_page_dirty's __mark_inode_dirty)
- *                   sb_lock (within inode_lock in fs/fs-writeback.c)
- *                   i_pages lock (widely used, in set_page_dirty,
- *                             in arch-dependent flush_dcache_mmap_lock,
- *                             within bdi.wb->list_lock in __sync_single_inode)
+ *     mapping->invalidate_lock (in filemap_fault)
+ *       page->flags PG_locked (lock_page)   * (see hugetlbfs below)
+ *         hugetlbfs_i_mmap_rwsem_key (in huge_pmd_share)
+ *           mapping->i_mmap_rwsem
+ *             hugetlb_fault_mutex (hugetlbfs specific page fault mutex)
+ *             anon_vma->rwsem
+ *               mm->page_table_lock or pte_lock
+ *                 swap_lock (in swap_duplicate, swap_info_get)
+ *                   mmlist_lock (in mmput, drain_mmlist and others)
+ *                   mapping->private_lock (in __set_page_dirty_buffers)
+ *                     lock_page_memcg move_lock (in __set_page_dirty_buffers)
+ *                       i_pages lock (widely used)
+ *                         lruvec->lru_lock (in lock_page_lruvec_irq)
+ *                   inode->i_lock (in set_page_dirty's __mark_inode_dirty)
+ *                   bdi.wb->list_lock (in set_page_dirty's __mark_inode_dirty)
+ *                     sb_lock (within inode_lock in fs/fs-writeback.c)
+ *                     i_pages lock (widely used, in set_page_dirty,
+ *                               in arch-dependent flush_dcache_mmap_lock,
+ *                               within bdi.wb->list_lock in __sync_single_inode)
  *
- * anon_vma->rwsem,mapping->i_mutex      (memory_failure, collect_procs_anon)
+ * anon_vma->rwsem,mapping->i_mmap_rwsem   (memory_failure, collect_procs_anon)
  *   ->tasklist_lock
  *     pte map lock
  *
index 70d9ce294bb4975644ec041fe5f523d9f45e9aa2..3107acee4f71828271d19b50794c9e7c8a7d557d 100644 (file)
@@ -96,7 +96,7 @@ static struct vfsmount *shm_mnt;
 
 /*
  * shmem_fallocate communicates with shmem_fault or shmem_writepage via
- * inode->i_private (with i_mutex making sure that it has only one user at
+ * inode->i_private (with i_rwsem making sure that it has only one user at
  * a time): we would prefer not to enlarge the shmem inode just for that.
  */
 struct shmem_falloc {
@@ -774,7 +774,7 @@ static int shmem_free_swap(struct address_space *mapping,
  * Determine (in bytes) how many of the shmem object's pages mapped by the
  * given offsets are swapped out.
  *
- * This is safe to call without i_mutex or the i_pages lock thanks to RCU,
+ * This is safe to call without i_rwsem or the i_pages lock thanks to RCU,
  * as long as the inode doesn't go away and racy results are not a problem.
  */
 unsigned long shmem_partial_swap_usage(struct address_space *mapping,
@@ -806,7 +806,7 @@ unsigned long shmem_partial_swap_usage(struct address_space *mapping,
  * Determine (in bytes) how many of the shmem object's pages mapped by the
  * given vma is swapped out.
  *
- * This is safe to call without i_mutex or the i_pages lock thanks to RCU,
+ * This is safe to call without i_rwsem or the i_pages lock thanks to RCU,
  * as long as the inode doesn't go away and racy results are not a problem.
  */
 unsigned long shmem_swap_usage(struct vm_area_struct *vma)
@@ -1069,7 +1069,7 @@ static int shmem_setattr(struct user_namespace *mnt_userns,
                loff_t oldsize = inode->i_size;
                loff_t newsize = attr->ia_size;
 
-               /* protected by i_mutex */
+               /* protected by i_rwsem */
                if ((newsize < oldsize && (info->seals & F_SEAL_SHRINK)) ||
                    (newsize > oldsize && (info->seals & F_SEAL_GROW)))
                        return -EPERM;
@@ -1696,8 +1696,7 @@ static int shmem_swapin_page(struct inode *inode, pgoff_t index,
        struct address_space *mapping = inode->i_mapping;
        struct shmem_inode_info *info = SHMEM_I(inode);
        struct mm_struct *charge_mm = vma ? vma->vm_mm : NULL;
-       struct swap_info_struct *si;
-       struct page *page = NULL;
+       struct page *page;
        swp_entry_t swap;
        int error;
 
@@ -1705,12 +1704,6 @@ static int shmem_swapin_page(struct inode *inode, pgoff_t index,
        swap = radix_to_swp_entry(*pagep);
        *pagep = NULL;
 
-       /* Prevent swapoff from happening to us. */
-       si = get_swap_device(swap);
-       if (!si) {
-               error = EINVAL;
-               goto failed;
-       }
        /* Look it up and read it in.. */
        page = lookup_swap_cache(swap, NULL, 0);
        if (!page) {
@@ -1772,8 +1765,6 @@ static int shmem_swapin_page(struct inode *inode, pgoff_t index,
        swap_free(swap);
 
        *pagep = page;
-       if (si)
-               put_swap_device(si);
        return 0;
 failed:
        if (!shmem_confirm_swap(mapping, index, swap))
@@ -1784,9 +1775,6 @@ unlock:
                put_page(page);
        }
 
-       if (si)
-               put_swap_device(si);
-
        return error;
 }
 
@@ -2071,7 +2059,7 @@ static vm_fault_t shmem_fault(struct vm_fault *vmf)
        /*
         * Trinity finds that probing a hole which tmpfs is punching can
         * prevent the hole-punch from ever completing: which in turn
-        * locks writers out with its hold on i_mutex.  So refrain from
+        * locks writers out with its hold on i_rwsem.  So refrain from
         * faulting pages into the hole while it's being punched.  Although
         * shmem_undo_range() does remove the additions, it may be unable to
         * keep up, as each new page needs its own unmap_mapping_range() call,
@@ -2082,7 +2070,7 @@ static vm_fault_t shmem_fault(struct vm_fault *vmf)
         * we just need to make racing faults a rare case.
         *
         * The implementation below would be much simpler if we just used a
-        * standard mutex or completion: but we cannot take i_mutex in fault,
+        * standard mutex or completion: but we cannot take i_rwsem in fault,
         * and bloating every shmem inode for this unlikely case would be sad.
         */
        if (unlikely(inode->i_private)) {
@@ -2482,7 +2470,7 @@ shmem_write_begin(struct file *file, struct address_space *mapping,
        struct shmem_inode_info *info = SHMEM_I(inode);
        pgoff_t index = pos >> PAGE_SHIFT;
 
-       /* i_mutex is held by caller */
+       /* i_rwsem is held by caller */
        if (unlikely(info->seals & (F_SEAL_GROW |
                                   F_SEAL_WRITE | F_SEAL_FUTURE_WRITE))) {
                if (info->seals & (F_SEAL_WRITE | F_SEAL_FUTURE_WRITE))
@@ -2582,7 +2570,7 @@ static ssize_t shmem_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
 
                /*
                 * We must evaluate after, since reads (unlike writes)
-                * are called without i_mutex protection against truncate
+                * are called without i_rwsem protection against truncate
                 */
                nr = PAGE_SIZE;
                i_size = i_size_read(inode);
@@ -2652,7 +2640,7 @@ static loff_t shmem_file_llseek(struct file *file, loff_t offset, int whence)
                return -ENXIO;
 
        inode_lock(inode);
-       /* We're holding i_mutex so we can access i_size directly */
+       /* We're holding i_rwsem so we can access i_size directly */
        offset = mapping_seek_hole_data(mapping, offset, inode->i_size, whence);
        if (offset >= 0)
                offset = vfs_setpos(file, offset, MAX_LFS_FILESIZE);
@@ -2681,7 +2669,7 @@ static long shmem_fallocate(struct file *file, int mode, loff_t offset,
                loff_t unmap_end = round_down(offset + len, PAGE_SIZE) - 1;
                DECLARE_WAIT_QUEUE_HEAD_ONSTACK(shmem_falloc_waitq);
 
-               /* protected by i_mutex */
+               /* protected by i_rwsem */
                if (info->seals & (F_SEAL_WRITE | F_SEAL_FUTURE_WRITE)) {
                        error = -EPERM;
                        goto out;
index af984e4990e8fc8efc6e6c264267e330f5b20178..f77d8cd79ef7f7d6855df7ae9bb4659001cee387 100644 (file)
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -576,8 +576,8 @@ static void print_section(char *level, char *text, u8 *addr,
                          unsigned int length)
 {
        metadata_access_enable();
-       print_hex_dump(level, kasan_reset_tag(text), DUMP_PREFIX_ADDRESS,
-                       16, 1, addr, length, 1);
+       print_hex_dump(level, text, DUMP_PREFIX_ADDRESS,
+                       16, 1, kasan_reset_tag((void *)addr), length, 1);
        metadata_access_disable();
 }
 
@@ -1400,12 +1400,13 @@ check_slabs:
 static int __init setup_slub_debug(char *str)
 {
        slab_flags_t flags;
+       slab_flags_t global_flags;
        char *saved_str;
        char *slab_list;
        bool global_slub_debug_changed = false;
        bool slab_list_specified = false;
 
-       slub_debug = DEBUG_DEFAULT_FLAGS;
+       global_flags = DEBUG_DEFAULT_FLAGS;
        if (*str++ != '=' || !*str)
                /*
                 * No options specified. Switch on full debugging.
@@ -1417,7 +1418,7 @@ static int __init setup_slub_debug(char *str)
                str = parse_slub_debug_flags(str, &flags, &slab_list, true);
 
                if (!slab_list) {
-                       slub_debug = flags;
+                       global_flags = flags;
                        global_slub_debug_changed = true;
                } else {
                        slab_list_specified = true;
@@ -1426,16 +1427,18 @@ static int __init setup_slub_debug(char *str)
 
        /*
         * For backwards compatibility, a single list of flags with list of
-        * slabs means debugging is only enabled for those slabs, so the global
-        * slub_debug should be 0. We can extended that to multiple lists as
+        * slabs means debugging is only changed for those slabs, so the global
+        * slub_debug should be unchanged (0 or DEBUG_DEFAULT_FLAGS, depending
+        * on CONFIG_SLUB_DEBUG_ON). We can extended that to multiple lists as
         * long as there is no option specifying flags without a slab list.
         */
        if (slab_list_specified) {
                if (!global_slub_debug_changed)
-                       slub_debug = 0;
+                       global_flags = slub_debug;
                slub_debug_string = saved_str;
        }
 out:
+       slub_debug = global_flags;
        if (slub_debug != 0 || slub_debug_string)
                static_branch_enable(&slub_debug_enabled);
        else
@@ -3236,12 +3239,12 @@ struct detached_freelist {
        struct kmem_cache *s;
 };
 
-static inline void free_nonslab_page(struct page *page)
+static inline void free_nonslab_page(struct page *page, void *object)
 {
        unsigned int order = compound_order(page);
 
        VM_BUG_ON_PAGE(!PageCompound(page), page);
-       kfree_hook(page_address(page));
+       kfree_hook(object);
        mod_lruvec_page_state(page, NR_SLAB_UNRECLAIMABLE_B, -(PAGE_SIZE << order));
        __free_pages(page, order);
 }
@@ -3282,7 +3285,7 @@ int build_detached_freelist(struct kmem_cache *s, size_t size,
        if (!s) {
                /* Handle kalloc'ed objects */
                if (unlikely(!PageSlab(page))) {
-                       free_nonslab_page(page);
+                       free_nonslab_page(page, object);
                        p[size] = NULL; /* mark object processed */
                        return size;
                }
@@ -4258,7 +4261,7 @@ void kfree(const void *x)
 
        page = virt_to_head_page(x);
        if (unlikely(!PageSlab(page))) {
-               free_nonslab_page(page);
+               free_nonslab_page(page, object);
                return;
        }
        slab_free(page->slab_cache, page, object, NULL, 1, _RET_IP_);
index c56aa9ac050dd2f96dd8c9582ee5044d2d7fddb2..bc7cee6b2ec54c7654676a180e0265355d814524 100644 (file)
@@ -628,13 +628,6 @@ struct page *swap_cluster_readahead(swp_entry_t entry, gfp_t gfp_mask,
        if (!mask)
                goto skip;
 
-       /* Test swap type to make sure the dereference is safe */
-       if (likely(si->flags & (SWP_BLKDEV | SWP_FS_OPS))) {
-               struct inode *inode = si->swap_file->f_mapping->host;
-               if (inode_read_congested(inode))
-                       goto skip;
-       }
-
        do_poll = false;
        /* Read a page_cluster sized and aligned cluster around offset. */
        start_offset = offset & ~mask;
index 234ddd879caa1e4db6d64c7b97ce7887276ff0c1..44ad5e515140e7f6aaf157bf9d9467824091c77c 100644 (file)
@@ -412,7 +412,8 @@ EXPORT_SYMBOL(truncate_inode_pages_range);
  * @mapping: mapping to truncate
  * @lstart: offset from which to truncate
  *
- * Called under (and serialised by) inode->i_mutex.
+ * Called under (and serialised by) inode->i_rwsem and
+ * mapping->invalidate_lock.
  *
  * Note: When this function returns, there can be a page in the process of
  * deletion (inside __delete_from_page_cache()) in the specified range.  Thus
@@ -429,7 +430,7 @@ EXPORT_SYMBOL(truncate_inode_pages);
  * truncate_inode_pages_final - truncate *all* pages before inode dies
  * @mapping: mapping to truncate
  *
- * Called under (and serialized by) inode->i_mutex.
+ * Called under (and serialized by) inode->i_rwsem.
  *
  * Filesystems have to use this in the .evict_inode path to inform the
  * VM that this is the final truncate and the inode is going away.
@@ -748,7 +749,7 @@ EXPORT_SYMBOL(truncate_pagecache);
  * setattr function when ATTR_SIZE is passed in.
  *
  * Must be called with a lock serializing truncates and writes (generally
- * i_mutex but e.g. xfs uses a different lock) and before all filesystem
+ * i_rwsem but e.g. xfs uses a different lock) and before all filesystem
  * specific block truncation has been performed.
  */
 void truncate_setsize(struct inode *inode, loff_t newsize)
@@ -777,7 +778,7 @@ EXPORT_SYMBOL(truncate_setsize);
  *
  * The function must be called after i_size is updated so that page fault
  * coming after we unlock the page will already see the new i_size.
- * The function must be called while we still hold i_mutex - this not only
+ * The function must be called while we still hold i_rwsem - this not only
  * makes sure i_size is stable but also that userspace cannot observe new
  * i_size value before we are prepared to store mmap writes at new inode size.
  */
index 4620df62f0ffa1e6e631c041f8f236af389619f4..eeae2f6bc53203926678fb515ec6d1a96b2d6b3f 100644 (file)
@@ -100,9 +100,12 @@ struct scan_control {
        unsigned int may_swap:1;
 
        /*
-        * Cgroups are not reclaimed below their configured memory.low,
-        * unless we threaten to OOM. If any cgroups are skipped due to
-        * memory.low and nothing was reclaimed, go back for memory.low.
+        * Cgroup memory below memory.low is protected as long as we
+        * don't threaten to OOM. If any cgroup is reclaimed at
+        * reduced force or passed over entirely due to its memory.low
+        * setting (memcg_low_skipped), and nothing is reclaimed as a
+        * result, then go back for one more cycle that reclaims the protected
+        * memory (memcg_low_reclaim) to avert OOM.
         */
        unsigned int memcg_low_reclaim:1;
        unsigned int memcg_low_skipped:1;
@@ -2537,15 +2540,14 @@ out:
        for_each_evictable_lru(lru) {
                int file = is_file_lru(lru);
                unsigned long lruvec_size;
+               unsigned long low, min;
                unsigned long scan;
-               unsigned long protection;
 
                lruvec_size = lruvec_lru_size(lruvec, lru, sc->reclaim_idx);
-               protection = mem_cgroup_protection(sc->target_mem_cgroup,
-                                                  memcg,
-                                                  sc->memcg_low_reclaim);
+               mem_cgroup_protection(sc->target_mem_cgroup, memcg,
+                                     &min, &low);
 
-               if (protection) {
+               if (min || low) {
                        /*
                         * Scale a cgroup's reclaim pressure by proportioning
                         * its current usage to its memory.low or memory.min
@@ -2576,6 +2578,15 @@ out:
                         * hard protection.
                         */
                        unsigned long cgroup_size = mem_cgroup_size(memcg);
+                       unsigned long protection;
+
+                       /* memory.low scaling, make sure we retry before OOM */
+                       if (!sc->memcg_low_reclaim && low > min) {
+                               protection = low;
+                               sc->memcg_low_skipped = 1;
+                       } else {
+                               protection = min;
+                       }
 
                        /* Avoid TOCTOU with earlier protection check */
                        cgroup_size = max(cgroup_size, protection);
@@ -4413,11 +4424,13 @@ static int __node_reclaim(struct pglist_data *pgdat, gfp_t gfp_mask, unsigned in
                .may_swap = 1,
                .reclaim_idx = gfp_zone(gfp_mask),
        };
+       unsigned long pflags;
 
        trace_mm_vmscan_node_reclaim_begin(pgdat->node_id, order,
                                           sc.gfp_mask);
 
        cond_resched();
+       psi_memstall_enter(&pflags);
        fs_reclaim_acquire(sc.gfp_mask);
        /*
         * We need to be able to allocate from the reserves for RECLAIM_UNMAP
@@ -4442,6 +4455,7 @@ static int __node_reclaim(struct pglist_data *pgdat, gfp_t gfp_mask, unsigned in
        current->flags &= ~PF_SWAPWRITE;
        memalloc_noreclaim_restore(noreclaim_flag);
        fs_reclaim_release(sc.gfp_mask);
+       psi_memstall_leave(&pflags);
 
        trace_mm_vmscan_node_reclaim_end(sc.nr_reclaimed);
 
index 1cc75c811e247bc6cc9f6083be0d17fe7e84ee4d..caa16bf30fb5598753b42080434fca9767dc975c 100644 (file)
@@ -7,6 +7,7 @@
 #include <linux/vmalloc.h>
 #include <linux/etherdevice.h>
 #include <linux/filter.h>
+#include <linux/rcupdate_trace.h>
 #include <linux/sched/signal.h>
 #include <net/bpf_sk_storage.h>
 #include <net/sock.h>
@@ -951,7 +952,10 @@ int bpf_prog_test_run_syscall(struct bpf_prog *prog,
                        goto out;
                }
        }
+
+       rcu_read_lock_trace();
        retval = bpf_prog_run_pin_on_cpu(prog, ctx);
+       rcu_read_unlock_trace();
 
        if (copy_to_user(&uattr->test.retval, &retval, sizeof(u32))) {
                err = -EFAULT;
index bbab9984f24e5f60e81611ae1603c6c3b46a7eec..ef743f94254d76362ba15f60bccddd57e3fb6798 100644 (file)
@@ -166,8 +166,7 @@ static int br_switchdev_event(struct notifier_block *unused,
        case SWITCHDEV_FDB_ADD_TO_BRIDGE:
                fdb_info = ptr;
                err = br_fdb_external_learn_add(br, p, fdb_info->addr,
-                                               fdb_info->vid,
-                                               fdb_info->is_local, false);
+                                               fdb_info->vid, false);
                if (err) {
                        err = notifier_from_errno(err);
                        break;
index 835cec1e5a0357dc9faae2e2141665074032d236..5dee30966ed355fda199533104ccf2e1a591ac1a 100644 (file)
@@ -1044,10 +1044,7 @@ static int __br_fdb_add(struct ndmsg *ndm, struct net_bridge *br,
                                           "FDB entry towards bridge must be permanent");
                        return -EINVAL;
                }
-
-               err = br_fdb_external_learn_add(br, p, addr, vid,
-                                               ndm->ndm_state & NUD_PERMANENT,
-                                               true);
+               err = br_fdb_external_learn_add(br, p, addr, vid, true);
        } else {
                spin_lock_bh(&br->hash_lock);
                err = fdb_add_entry(br, p, addr, ndm, nlh_flags, vid, nfea_tb);
@@ -1275,7 +1272,7 @@ void br_fdb_unsync_static(struct net_bridge *br, struct net_bridge_port *p)
 }
 
 int br_fdb_external_learn_add(struct net_bridge *br, struct net_bridge_port *p,
-                             const unsigned char *addr, u16 vid, bool is_local,
+                             const unsigned char *addr, u16 vid,
                              bool swdev_notify)
 {
        struct net_bridge_fdb_entry *fdb;
@@ -1293,7 +1290,7 @@ int br_fdb_external_learn_add(struct net_bridge *br, struct net_bridge_port *p,
                if (swdev_notify)
                        flags |= BIT(BR_FDB_ADDED_BY_USER);
 
-               if (is_local)
+               if (!p)
                        flags |= BIT(BR_FDB_LOCAL);
 
                fdb = fdb_create(br, p, addr, vid, flags);
@@ -1322,7 +1319,7 @@ int br_fdb_external_learn_add(struct net_bridge *br, struct net_bridge_port *p,
                if (swdev_notify)
                        set_bit(BR_FDB_ADDED_BY_USER, &fdb->flags);
 
-               if (is_local)
+               if (!p)
                        set_bit(BR_FDB_LOCAL, &fdb->flags);
 
                if (modified)
index 6e4a32354a138f96c96055ddbc2f25351b7fb945..14cd6ef9611171e4af12202d543a206c8cbe97a8 100644 (file)
@@ -616,6 +616,7 @@ int br_add_if(struct net_bridge *br, struct net_device *dev,
 
        err = dev_set_allmulti(dev, 1);
        if (err) {
+               br_multicast_del_port(p);
                kfree(p);       /* kobject not yet init'd, manually free */
                goto err1;
        }
@@ -729,6 +730,7 @@ err4:
 err3:
        sysfs_remove_link(br->ifobj, p->dev->name);
 err2:
+       br_multicast_del_port(p);
        kobject_put(&p->kobj);
        dev_set_allmulti(dev, -1);
 err1:
index aa64d8d63ca3fb03ed3578d54d8a3a3e87ab5039..2b48b204205e68dd2b092d61578b7b6ccb228487 100644 (file)
@@ -711,7 +711,7 @@ int br_fdb_get(struct sk_buff *skb, struct nlattr *tb[], struct net_device *dev,
 int br_fdb_sync_static(struct net_bridge *br, struct net_bridge_port *p);
 void br_fdb_unsync_static(struct net_bridge *br, struct net_bridge_port *p);
 int br_fdb_external_learn_add(struct net_bridge *br, struct net_bridge_port *p,
-                             const unsigned char *addr, u16 vid, bool is_local,
+                             const unsigned char *addr, u16 vid,
                              bool swdev_notify);
 int br_fdb_external_learn_del(struct net_bridge *br, struct net_bridge_port *p,
                              const unsigned char *addr, u16 vid,
index 8d033a75a766ef529e201bc04060c2b14e7709cb..fdbed315855536d0825981ef4497af6cdf4ce51e 100644 (file)
@@ -88,6 +88,12 @@ static int nf_br_ip_fragment(struct net *net, struct sock *sk,
 
                        skb = ip_fraglist_next(&iter);
                }
+
+               if (!err)
+                       return 0;
+
+               kfree_skb_list(iter.frag);
+
                return err;
        }
 slow_path:
index 75431ca9300fb9c486404da3dda0d344440d4fbc..1a455847da54fc06802598a4631c277e6a2eeeed 100644 (file)
@@ -158,7 +158,7 @@ static void linkwatch_do_dev(struct net_device *dev)
        clear_bit(__LINK_STATE_LINKWATCH_PENDING, &dev->state);
 
        rfc2863_policy(dev);
-       if (dev->flags & IFF_UP && netif_device_present(dev)) {
+       if (dev->flags & IFF_UP) {
                if (netif_carrier_ok(dev))
                        dev_activate(dev);
                else
@@ -204,7 +204,8 @@ static void __linkwatch_run_queue(int urgent_only)
                dev = list_first_entry(&wrk, struct net_device, link_watch_list);
                list_del_init(&dev->link_watch_list);
 
-               if (urgent_only && !linkwatch_urgent_event(dev)) {
+               if (!netif_device_present(dev) ||
+                   (urgent_only && !linkwatch_urgent_event(dev))) {
                        list_add_tail(&dev->link_watch_list, &lweventlist);
                        continue;
                }
index 5e4eb45b139c7138ad3fe2d223dc1ac1d405cdd1..8ab7b402244c00567232103948574df749c350d4 100644 (file)
@@ -634,7 +634,15 @@ bool page_pool_return_skb_page(struct page *page)
        struct page_pool *pp;
 
        page = compound_head(page);
-       if (unlikely(page->pp_magic != PP_SIGNATURE))
+
+       /* page->pp_magic is OR'ed with PP_SIGNATURE after the allocation
+        * in order to preserve any existing bits, such as bit 0 for the
+        * head page of compound page and bit 1 for pfmemalloc page, so
+        * mask those bits for freeing side when doing below checking,
+        * and page_is_pfmemalloc() is checked in __page_pool_put_page()
+        * to avoid recycling the pfmemalloc page.
+        */
+       if (unlikely((page->pp_magic & ~0x3UL) != PP_SIGNATURE))
                return false;
 
        pp = page->pp;
index f6af3e74fc445a1db118a01d39aeb614c26580c5..662eb1c37f47b9dbdd238b34012bca6a559976f6 100644 (file)
@@ -2608,6 +2608,7 @@ static int do_setlink(const struct sk_buff *skb,
                return err;
 
        if (tb[IFLA_NET_NS_PID] || tb[IFLA_NET_NS_FD] || tb[IFLA_TARGET_NETNSID]) {
+               const char *pat = ifname && ifname[0] ? ifname : NULL;
                struct net *net;
                int new_ifindex;
 
@@ -2623,7 +2624,7 @@ static int do_setlink(const struct sk_buff *skb,
                else
                        new_ifindex = 0;
 
-               err = __dev_change_net_namespace(dev, net, ifname, new_ifindex);
+               err = __dev_change_net_namespace(dev, net, pat, new_ifindex);
                put_net(net);
                if (err)
                        goto errout;
index 9cc9d1ee6cdb9a6af1696caae05e26d8289eede5..c5c1d2b8045e8efd9bf32a2db1e679c13cbf1852 100644 (file)
@@ -41,9 +41,9 @@ extern bool dccp_debug;
 #define dccp_pr_debug_cat(format, a...)   DCCP_PRINTK(dccp_debug, format, ##a)
 #define dccp_debug(fmt, a...)            dccp_pr_debug_cat(KERN_DEBUG fmt, ##a)
 #else
-#define dccp_pr_debug(format, a...)
-#define dccp_pr_debug_cat(format, a...)
-#define dccp_debug(format, a...)
+#define dccp_pr_debug(format, a...)      do {} while (0)
+#define dccp_pr_debug_cat(format, a...)          do {} while (0)
+#define dccp_debug(format, a...)         do {} while (0)
 #endif
 
 extern struct inet_hashinfo dccp_hashinfo;
index 532085da8d8fb131e28204101b783309182d2968..23be8e01026bfd1c17eba2d86290119fdf214c36 100644 (file)
@@ -2291,8 +2291,8 @@ static int dsa_slave_netdevice_event(struct notifier_block *nb,
 static void
 dsa_fdb_offload_notify(struct dsa_switchdev_event_work *switchdev_work)
 {
+       struct switchdev_notifier_fdb_info info = {};
        struct dsa_switch *ds = switchdev_work->ds;
-       struct switchdev_notifier_fdb_info info;
        struct dsa_port *dp;
 
        if (!dsa_is_user_port(ds, switchdev_work->port))
index a45a0401adc50b00091de6ec8d041a48a1c03385..c25f7617770c8e0e9693c6116cdec0573422a1b3 100644 (file)
@@ -984,6 +984,11 @@ static const struct proto_ops ieee802154_dgram_ops = {
        .sendpage          = sock_no_sendpage,
 };
 
+static void ieee802154_sock_destruct(struct sock *sk)
+{
+       skb_queue_purge(&sk->sk_receive_queue);
+}
+
 /* Create a socket. Initialise the socket, blank the addresses
  * set the state.
  */
@@ -1024,7 +1029,7 @@ static int ieee802154_create(struct net *net, struct socket *sock,
        sock->ops = ops;
 
        sock_init_data(sock, sk);
-       /* FIXME: sk->sk_destruct */
+       sk->sk_destruct = ieee802154_sock_destruct;
        sk->sk_family = PF_IEEE802154;
 
        /* Checksums on by default */
index 099259fc826aa228cd3ea8999341357558a101d9..7fbd0b532f5291df4f14cab9cb5e77436b45bd6e 100644 (file)
@@ -465,14 +465,16 @@ void cipso_v4_doi_free(struct cipso_v4_doi *doi_def)
        if (!doi_def)
                return;
 
-       switch (doi_def->type) {
-       case CIPSO_V4_MAP_TRANS:
-               kfree(doi_def->map.std->lvl.cipso);
-               kfree(doi_def->map.std->lvl.local);
-               kfree(doi_def->map.std->cat.cipso);
-               kfree(doi_def->map.std->cat.local);
-               kfree(doi_def->map.std);
-               break;
+       if (doi_def->map.std) {
+               switch (doi_def->type) {
+               case CIPSO_V4_MAP_TRANS:
+                       kfree(doi_def->map.std->lvl.cipso);
+                       kfree(doi_def->map.std->lvl.local);
+                       kfree(doi_def->map.std->cat.cipso);
+                       kfree(doi_def->map.std->cat.local);
+                       kfree(doi_def->map.std);
+                       break;
+               }
        }
        kfree(doi_def);
 }
index 6b3c558a4f232652b97a078d48f302864e60a866..00576bae183d30518e376ad3846d4fe6025aaea7 100644 (file)
@@ -803,10 +803,17 @@ static void igmp_gq_timer_expire(struct timer_list *t)
 static void igmp_ifc_timer_expire(struct timer_list *t)
 {
        struct in_device *in_dev = from_timer(in_dev, t, mr_ifc_timer);
+       u32 mr_ifc_count;
 
        igmpv3_send_cr(in_dev);
-       if (in_dev->mr_ifc_count) {
-               in_dev->mr_ifc_count--;
+restart:
+       mr_ifc_count = READ_ONCE(in_dev->mr_ifc_count);
+
+       if (mr_ifc_count) {
+               if (cmpxchg(&in_dev->mr_ifc_count,
+                           mr_ifc_count,
+                           mr_ifc_count - 1) != mr_ifc_count)
+                       goto restart;
                igmp_ifc_start_timer(in_dev,
                                     unsolicited_report_interval(in_dev));
        }
@@ -818,7 +825,7 @@ static void igmp_ifc_event(struct in_device *in_dev)
        struct net *net = dev_net(in_dev->dev);
        if (IGMP_V1_SEEN(in_dev) || IGMP_V2_SEEN(in_dev))
                return;
-       in_dev->mr_ifc_count = in_dev->mr_qrv ?: net->ipv4.sysctl_igmp_qrv;
+       WRITE_ONCE(in_dev->mr_ifc_count, in_dev->mr_qrv ?: net->ipv4.sysctl_igmp_qrv);
        igmp_ifc_start_timer(in_dev, 1);
 }
 
@@ -957,7 +964,7 @@ static bool igmp_heard_query(struct in_device *in_dev, struct sk_buff *skb,
                                in_dev->mr_qri;
                }
                /* cancel the interface change timer */
-               in_dev->mr_ifc_count = 0;
+               WRITE_ONCE(in_dev->mr_ifc_count, 0);
                if (del_timer(&in_dev->mr_ifc_timer))
                        __in_dev_put(in_dev);
                /* clear deleted report items */
@@ -1724,7 +1731,7 @@ void ip_mc_down(struct in_device *in_dev)
                igmp_group_dropped(pmc);
 
 #ifdef CONFIG_IP_MULTICAST
-       in_dev->mr_ifc_count = 0;
+       WRITE_ONCE(in_dev->mr_ifc_count, 0);
        if (del_timer(&in_dev->mr_ifc_timer))
                __in_dev_put(in_dev);
        in_dev->mr_gq_running = 0;
@@ -1941,7 +1948,7 @@ static int ip_mc_del_src(struct in_device *in_dev, __be32 *pmca, int sfmode,
                pmc->sfmode = MCAST_INCLUDE;
 #ifdef CONFIG_IP_MULTICAST
                pmc->crcount = in_dev->mr_qrv ?: net->ipv4.sysctl_igmp_qrv;
-               in_dev->mr_ifc_count = pmc->crcount;
+               WRITE_ONCE(in_dev->mr_ifc_count, pmc->crcount);
                for (psf = pmc->sources; psf; psf = psf->sf_next)
                        psf->sf_crcount = 0;
                igmp_ifc_event(pmc->interface);
@@ -2120,7 +2127,7 @@ static int ip_mc_add_src(struct in_device *in_dev, __be32 *pmca, int sfmode,
                /* else no filters; keep old mode for reports */
 
                pmc->crcount = in_dev->mr_qrv ?: net->ipv4.sysctl_igmp_qrv;
-               in_dev->mr_ifc_count = pmc->crcount;
+               WRITE_ONCE(in_dev->mr_ifc_count, pmc->crcount);
                for (psf = pmc->sources; psf; psf = psf->sf_next)
                        psf->sf_crcount = 0;
                igmp_ifc_event(in_dev);
index 12dca0c85f3c38302a76eda64ef8a03dd2d80318..95419b7adf5ce0c4f8d390983e61811d1991db87 100644 (file)
@@ -473,6 +473,8 @@ static void __gre_xmit(struct sk_buff *skb, struct net_device *dev,
 
 static int gre_handle_offloads(struct sk_buff *skb, bool csum)
 {
+       if (csum && skb_checksum_start(skb) < skb->data)
+               return -EINVAL;
        return iptunnel_handle_offloads(skb, csum ? SKB_GSO_GRE_CSUM : SKB_GSO_GRE);
 }
 
index 99c06944501ab1a8de0960acfdc9f1825b7079b1..a6f20ee3533554b210d27c4ab6637ca7a05b148b 100644 (file)
@@ -600,14 +600,14 @@ static struct fib_nh_exception *fnhe_oldest(struct fnhe_hash_bucket *hash)
        return oldest;
 }
 
-static inline u32 fnhe_hashfun(__be32 daddr)
+static u32 fnhe_hashfun(__be32 daddr)
 {
-       static u32 fnhe_hashrnd __read_mostly;
-       u32 hval;
+       static siphash_key_t fnhe_hash_key __read_mostly;
+       u64 hval;
 
-       net_get_random_once(&fnhe_hashrnd, sizeof(fnhe_hashrnd));
-       hval = jhash_1word((__force u32)daddr, fnhe_hashrnd);
-       return hash_32(hval, FNHE_HASH_SHIFT);
+       net_get_random_once(&fnhe_hash_key, sizeof(fnhe_hash_key));
+       hval = siphash_1u32((__force u32)daddr, &fnhe_hash_key);
+       return hash_64(hval, FNHE_HASH_SHIFT);
 }
 
 static void fill_route_from_fnhe(struct rtable *rt, struct fib_nh_exception *fnhe)
index 6ea3dc2e421946c2d384a16e022efc1a7ab1874a..6274462b86b4b9dd4ed821f0789d57ae3ce3cbf8 100644 (file)
@@ -1041,7 +1041,7 @@ static void bbr_init(struct sock *sk)
        bbr->prior_cwnd = 0;
        tp->snd_ssthresh = TCP_INFINITE_SSTHRESH;
        bbr->rtt_cnt = 0;
-       bbr->next_rtt_delivered = 0;
+       bbr->next_rtt_delivered = tp->delivered;
        bbr->prev_ca_state = TCP_CA_Open;
        bbr->packet_conservation = 0;
 
index 2d650dc24349b3369c271328c70a2214b9651fda..ef75c9b05f17e4d0db615539bc39693be321cc43 100644 (file)
@@ -1341,7 +1341,7 @@ static void __fib6_update_sernum_upto_root(struct fib6_info *rt,
        struct fib6_node *fn = rcu_dereference_protected(rt->fib6_node,
                                lockdep_is_held(&rt->fib6_table->tb6_lock));
 
-       /* paired with smp_rmb() in rt6_get_cookie_safe() */
+       /* paired with smp_rmb() in fib6_get_cookie_safe() */
        smp_wmb();
        while (fn) {
                fn->fn_sernum = sernum;
index bc224f917bbd53beb9b8af5bdef3fb9794b8ee44..7a5e90e093630177a778cd71cc115f8776e771a6 100644 (file)
@@ -629,6 +629,8 @@ drop:
 
 static int gre_handle_offloads(struct sk_buff *skb, bool csum)
 {
+       if (csum && skb_checksum_start(skb) < skb->data)
+               return -EINVAL;
        return iptunnel_handle_offloads(skb,
                                        csum ? SKB_GSO_GRE_CSUM : SKB_GSO_GRE);
 }
index b6ddf23d38330ded88509b8507998ce82a72799b..c5e8ecb96426bda619fe242351e40dcf6ff68bcf 100644 (file)
@@ -41,6 +41,7 @@
 #include <linux/nsproxy.h>
 #include <linux/slab.h>
 #include <linux/jhash.h>
+#include <linux/siphash.h>
 #include <net/net_namespace.h>
 #include <net/snmp.h>
 #include <net/ipv6.h>
@@ -1484,17 +1485,24 @@ static void rt6_exception_remove_oldest(struct rt6_exception_bucket *bucket)
 static u32 rt6_exception_hash(const struct in6_addr *dst,
                              const struct in6_addr *src)
 {
-       static u32 seed __read_mostly;
-       u32 val;
+       static siphash_key_t rt6_exception_key __read_mostly;
+       struct {
+               struct in6_addr dst;
+               struct in6_addr src;
+       } __aligned(SIPHASH_ALIGNMENT) combined = {
+               .dst = *dst,
+       };
+       u64 val;
 
-       net_get_random_once(&seed, sizeof(seed));
-       val = jhash2((const u32 *)dst, sizeof(*dst)/sizeof(u32), seed);
+       net_get_random_once(&rt6_exception_key, sizeof(rt6_exception_key));
 
 #ifdef CONFIG_IPV6_SUBTREES
        if (src)
-               val = jhash2((const u32 *)src, sizeof(*src)/sizeof(u32), val);
+               combined.src = *src;
 #endif
-       return hash_32(val, FIB6_EXCEPTION_BUCKET_SIZE_SHIFT);
+       val = siphash(&combined, sizeof(combined), &rt6_exception_key);
+
+       return hash_64(val, FIB6_EXCEPTION_BUCKET_SIZE_SHIFT);
 }
 
 /* Helper function to find the cached rt in the hash table
index 05f4c3c72619f7647f3c3e1465b652bac3108403..fcae76ddd586cf5dbac7aa17a53656245e0eaefa 100644 (file)
@@ -260,6 +260,8 @@ static void ieee80211_restart_work(struct work_struct *work)
        flush_work(&local->radar_detected_work);
 
        rtnl_lock();
+       /* we might do interface manipulations, so need both */
+       wiphy_lock(local->hw.wiphy);
 
        WARN(test_bit(SCAN_HW_SCANNING, &local->scanning),
             "%s called with hardware scan in progress\n", __func__);
index 4452455aef7fa801ea8d5318867804477d78c9f3..7adcbc1f7d49e5161a6393dc6ce104a2a8aab80c 100644 (file)
@@ -885,20 +885,16 @@ static bool check_fully_established(struct mptcp_sock *msk, struct sock *ssk,
                return subflow->mp_capable;
        }
 
-       if (mp_opt->dss && mp_opt->use_ack) {
+       if ((mp_opt->dss && mp_opt->use_ack) ||
+           (mp_opt->add_addr && !mp_opt->echo)) {
                /* subflows are fully established as soon as we get any
-                * additional ack.
+                * additional ack, including ADD_ADDR.
                 */
                subflow->fully_established = 1;
                WRITE_ONCE(msk->fully_established, true);
                goto fully_established;
        }
 
-       if (mp_opt->add_addr) {
-               WRITE_ONCE(msk->fully_established, true);
-               return true;
-       }
-
        /* If the first established packet does not contain MP_CAPABLE + data
         * then fallback to TCP. Fallback scenarios requires a reset for
         * MP_JOIN subflows.
index 56263c2c4014cd81359a5bd84bccd227f56c60c5..7b379445978332f65dde7f5bf960601400dec8ba 100644 (file)
@@ -1135,36 +1135,12 @@ next:
        return 0;
 }
 
-struct addr_entry_release_work {
-       struct rcu_work rwork;
-       struct mptcp_pm_addr_entry *entry;
-};
-
-static void mptcp_pm_release_addr_entry(struct work_struct *work)
+/* caller must ensure the RCU grace period is already elapsed */
+static void __mptcp_pm_release_addr_entry(struct mptcp_pm_addr_entry *entry)
 {
-       struct addr_entry_release_work *w;
-       struct mptcp_pm_addr_entry *entry;
-
-       w = container_of(to_rcu_work(work), struct addr_entry_release_work, rwork);
-       entry = w->entry;
-       if (entry) {
-               if (entry->lsk)
-                       sock_release(entry->lsk);
-               kfree(entry);
-       }
-       kfree(w);
-}
-
-static void mptcp_pm_free_addr_entry(struct mptcp_pm_addr_entry *entry)
-{
-       struct addr_entry_release_work *w;
-
-       w = kmalloc(sizeof(*w), GFP_ATOMIC);
-       if (w) {
-               INIT_RCU_WORK(&w->rwork, mptcp_pm_release_addr_entry);
-               w->entry = entry;
-               queue_rcu_work(system_wq, &w->rwork);
-       }
+       if (entry->lsk)
+               sock_release(entry->lsk);
+       kfree(entry);
 }
 
 static int mptcp_nl_remove_id_zero_address(struct net *net,
@@ -1244,7 +1220,8 @@ static int mptcp_nl_cmd_del_addr(struct sk_buff *skb, struct genl_info *info)
        spin_unlock_bh(&pernet->lock);
 
        mptcp_nl_remove_subflow_and_signal_addr(sock_net(skb->sk), &entry->addr);
-       mptcp_pm_free_addr_entry(entry);
+       synchronize_rcu();
+       __mptcp_pm_release_addr_entry(entry);
 
        return ret;
 }
@@ -1297,6 +1274,7 @@ static void mptcp_nl_remove_addrs_list(struct net *net,
        }
 }
 
+/* caller must ensure the RCU grace period is already elapsed */
 static void __flush_addrs(struct list_head *list)
 {
        while (!list_empty(list)) {
@@ -1305,7 +1283,7 @@ static void __flush_addrs(struct list_head *list)
                cur = list_entry(list->next,
                                 struct mptcp_pm_addr_entry, list);
                list_del_rcu(&cur->list);
-               mptcp_pm_free_addr_entry(cur);
+               __mptcp_pm_release_addr_entry(cur);
        }
 }
 
@@ -1329,6 +1307,7 @@ static int mptcp_nl_cmd_flush_addrs(struct sk_buff *skb, struct genl_info *info)
        bitmap_zero(pernet->id_bitmap, MAX_ADDR_ID + 1);
        spin_unlock_bh(&pernet->lock);
        mptcp_nl_remove_addrs_list(sock_net(skb->sk), &free_list);
+       synchronize_rcu();
        __flush_addrs(&free_list);
        return 0;
 }
@@ -1939,7 +1918,8 @@ static void __net_exit pm_nl_exit_net(struct list_head *net_list)
                struct pm_nl_pernet *pernet = net_generic(net, pm_nl_pernet_id);
 
                /* net is removed from namespace list, can't race with
-                * other modifiers
+                * other modifiers, also netns core already waited for a
+                * RCU grace period.
                 */
                __flush_addrs(&pernet->local_addr_list);
        }
index d1bef23fd4f582698f44429e19a31b85e5a4bfa6..dd30c03d5a23f5b3e4aabb41d20ccae5295a376a 100644 (file)
@@ -132,8 +132,11 @@ hash_ip4_uadt(struct ip_set *set, struct nlattr *tb[],
                ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP_TO], &ip_to);
                if (ret)
                        return ret;
-               if (ip > ip_to)
+               if (ip > ip_to) {
+                       if (ip_to == 0)
+                               return -IPSET_ERR_HASH_ELEM;
                        swap(ip, ip_to);
+               }
        } else if (tb[IPSET_ATTR_CIDR]) {
                u8 cidr = nla_get_u8(tb[IPSET_ATTR_CIDR]);
 
@@ -144,6 +147,10 @@ hash_ip4_uadt(struct ip_set *set, struct nlattr *tb[],
 
        hosts = h->netmask == 32 ? 1 : 2 << (32 - h->netmask - 1);
 
+       /* 64bit division is not allowed on 32bit */
+       if (((u64)ip_to - ip + 1) >> (32 - h->netmask) > IPSET_MAX_RANGE)
+               return -ERANGE;
+
        if (retried) {
                ip = ntohl(h->next.ip);
                e.ip = htonl(ip);
index 18346d18aa16c4ca6e5c290260237f810cfb65c3..153de3457423e58601f309bb797da33cc1a40229 100644 (file)
@@ -121,6 +121,8 @@ hash_ipmark4_uadt(struct ip_set *set, struct nlattr *tb[],
 
        e.mark = ntohl(nla_get_be32(tb[IPSET_ATTR_MARK]));
        e.mark &= h->markmask;
+       if (e.mark == 0 && e.ip == 0)
+               return -IPSET_ERR_HASH_ELEM;
 
        if (adt == IPSET_TEST ||
            !(tb[IPSET_ATTR_IP_TO] || tb[IPSET_ATTR_CIDR])) {
@@ -133,8 +135,11 @@ hash_ipmark4_uadt(struct ip_set *set, struct nlattr *tb[],
                ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP_TO], &ip_to);
                if (ret)
                        return ret;
-               if (ip > ip_to)
+               if (ip > ip_to) {
+                       if (e.mark == 0 && ip_to == 0)
+                               return -IPSET_ERR_HASH_ELEM;
                        swap(ip, ip_to);
+               }
        } else if (tb[IPSET_ATTR_CIDR]) {
                u8 cidr = nla_get_u8(tb[IPSET_ATTR_CIDR]);
 
@@ -143,6 +148,9 @@ hash_ipmark4_uadt(struct ip_set *set, struct nlattr *tb[],
                ip_set_mask_from_to(ip, ip_to, cidr);
        }
 
+       if (((u64)ip_to - ip + 1) > IPSET_MAX_RANGE)
+               return -ERANGE;
+
        if (retried)
                ip = ntohl(h->next.ip);
        for (; ip <= ip_to; ip++) {
index e1ca111965158345bc7250851a046968367ad50b..7303138e46be1bdae05869a1d96df3ec6fe273fb 100644 (file)
@@ -173,6 +173,9 @@ hash_ipport4_uadt(struct ip_set *set, struct nlattr *tb[],
                        swap(port, port_to);
        }
 
+       if (((u64)ip_to - ip + 1)*(port_to - port + 1) > IPSET_MAX_RANGE)
+               return -ERANGE;
+
        if (retried)
                ip = ntohl(h->next.ip);
        for (; ip <= ip_to; ip++) {
index ab179e064597c5e8efc1b32494fd569c5ec2d6bd..334fb1ad0e86cfca13fa5c3e69b5ae84ffc0fc90 100644 (file)
@@ -180,6 +180,9 @@ hash_ipportip4_uadt(struct ip_set *set, struct nlattr *tb[],
                        swap(port, port_to);
        }
 
+       if (((u64)ip_to - ip + 1)*(port_to - port + 1) > IPSET_MAX_RANGE)
+               return -ERANGE;
+
        if (retried)
                ip = ntohl(h->next.ip);
        for (; ip <= ip_to; ip++) {
index 8f075b44cf64e0ccd6c50bf6bd7e2d056f0333f3..7df94f437f600f98e48a62dcfb3078b8b01da6e2 100644 (file)
@@ -253,6 +253,9 @@ hash_ipportnet4_uadt(struct ip_set *set, struct nlattr *tb[],
                        swap(port, port_to);
        }
 
+       if (((u64)ip_to - ip + 1)*(port_to - port + 1) > IPSET_MAX_RANGE)
+               return -ERANGE;
+
        ip2_to = ip2_from;
        if (tb[IPSET_ATTR_IP2_TO]) {
                ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP2_TO], &ip2_to);
index c1a11f041ac6b11b948657ee55bcab6daedfe17f..1422739d9aa255fb0b7620b7057eabc137b00d84 100644 (file)
@@ -140,7 +140,7 @@ hash_net4_uadt(struct ip_set *set, struct nlattr *tb[],
        ipset_adtfn adtfn = set->variant->adt[adt];
        struct hash_net4_elem e = { .cidr = HOST_MASK };
        struct ip_set_ext ext = IP_SET_INIT_UEXT(set);
-       u32 ip = 0, ip_to = 0;
+       u32 ip = 0, ip_to = 0, ipn, n = 0;
        int ret;
 
        if (tb[IPSET_ATTR_LINENO])
@@ -188,6 +188,15 @@ hash_net4_uadt(struct ip_set *set, struct nlattr *tb[],
                if (ip + UINT_MAX == ip_to)
                        return -IPSET_ERR_HASH_RANGE;
        }
+       ipn = ip;
+       do {
+               ipn = ip_set_range_to_cidr(ipn, ip_to, &e.cidr);
+               n++;
+       } while (ipn++ < ip_to);
+
+       if (n > IPSET_MAX_RANGE)
+               return -ERANGE;
+
        if (retried)
                ip = ntohl(h->next.ip);
        do {
index ddd51c2e1cb36a0f9185b2e6bbbe374081bfdaba..9810f5bf63f5e42d5715d0419049c0c097f2e587 100644 (file)
@@ -202,7 +202,7 @@ hash_netiface4_uadt(struct ip_set *set, struct nlattr *tb[],
        ipset_adtfn adtfn = set->variant->adt[adt];
        struct hash_netiface4_elem e = { .cidr = HOST_MASK, .elem = 1 };
        struct ip_set_ext ext = IP_SET_INIT_UEXT(set);
-       u32 ip = 0, ip_to = 0;
+       u32 ip = 0, ip_to = 0, ipn, n = 0;
        int ret;
 
        if (tb[IPSET_ATTR_LINENO])
@@ -256,6 +256,14 @@ hash_netiface4_uadt(struct ip_set *set, struct nlattr *tb[],
        } else {
                ip_set_mask_from_to(ip, ip_to, e.cidr);
        }
+       ipn = ip;
+       do {
+               ipn = ip_set_range_to_cidr(ipn, ip_to, &e.cidr);
+               n++;
+       } while (ipn++ < ip_to);
+
+       if (n > IPSET_MAX_RANGE)
+               return -ERANGE;
 
        if (retried)
                ip = ntohl(h->next.ip);
index 6532f0505e66f8cab2f0f43f38a390354334899d..3d09eefe998a7f82f9549d141daeb13e311f3486 100644 (file)
@@ -168,7 +168,8 @@ hash_netnet4_uadt(struct ip_set *set, struct nlattr *tb[],
        struct hash_netnet4_elem e = { };
        struct ip_set_ext ext = IP_SET_INIT_UEXT(set);
        u32 ip = 0, ip_to = 0;
-       u32 ip2 = 0, ip2_from = 0, ip2_to = 0;
+       u32 ip2 = 0, ip2_from = 0, ip2_to = 0, ipn;
+       u64 n = 0, m = 0;
        int ret;
 
        if (tb[IPSET_ATTR_LINENO])
@@ -244,6 +245,19 @@ hash_netnet4_uadt(struct ip_set *set, struct nlattr *tb[],
        } else {
                ip_set_mask_from_to(ip2_from, ip2_to, e.cidr[1]);
        }
+       ipn = ip;
+       do {
+               ipn = ip_set_range_to_cidr(ipn, ip_to, &e.cidr[0]);
+               n++;
+       } while (ipn++ < ip_to);
+       ipn = ip2_from;
+       do {
+               ipn = ip_set_range_to_cidr(ipn, ip2_to, &e.cidr[1]);
+               m++;
+       } while (ipn++ < ip2_to);
+
+       if (n*m > IPSET_MAX_RANGE)
+               return -ERANGE;
 
        if (retried) {
                ip = ntohl(h->next.ip[0]);
index ec1564a1cb5a59c36401bc4484ceaebac19689d3..09cf72eb37f8d2549b3527530f1e4ca9e7d10efa 100644 (file)
@@ -158,7 +158,8 @@ hash_netport4_uadt(struct ip_set *set, struct nlattr *tb[],
        ipset_adtfn adtfn = set->variant->adt[adt];
        struct hash_netport4_elem e = { .cidr = HOST_MASK - 1 };
        struct ip_set_ext ext = IP_SET_INIT_UEXT(set);
-       u32 port, port_to, p = 0, ip = 0, ip_to = 0;
+       u32 port, port_to, p = 0, ip = 0, ip_to = 0, ipn;
+       u64 n = 0;
        bool with_ports = false;
        u8 cidr;
        int ret;
@@ -235,6 +236,14 @@ hash_netport4_uadt(struct ip_set *set, struct nlattr *tb[],
        } else {
                ip_set_mask_from_to(ip, ip_to, e.cidr + 1);
        }
+       ipn = ip;
+       do {
+               ipn = ip_set_range_to_cidr(ipn, ip_to, &cidr);
+               n++;
+       } while (ipn++ < ip_to);
+
+       if (n*(port_to - port + 1) > IPSET_MAX_RANGE)
+               return -ERANGE;
 
        if (retried) {
                ip = ntohl(h->next.ip);
index 0e91d1e82f1cfd3e0de59a3327125e723a6c9c31..19bcdb3141f6e6f17d061477c8517f0f94ef0a55 100644 (file)
@@ -182,7 +182,8 @@ hash_netportnet4_uadt(struct ip_set *set, struct nlattr *tb[],
        struct hash_netportnet4_elem e = { };
        struct ip_set_ext ext = IP_SET_INIT_UEXT(set);
        u32 ip = 0, ip_to = 0, p = 0, port, port_to;
-       u32 ip2_from = 0, ip2_to = 0, ip2;
+       u32 ip2_from = 0, ip2_to = 0, ip2, ipn;
+       u64 n = 0, m = 0;
        bool with_ports = false;
        int ret;
 
@@ -284,6 +285,19 @@ hash_netportnet4_uadt(struct ip_set *set, struct nlattr *tb[],
        } else {
                ip_set_mask_from_to(ip2_from, ip2_to, e.cidr[1]);
        }
+       ipn = ip;
+       do {
+               ipn = ip_set_range_to_cidr(ipn, ip_to, &e.cidr[0]);
+               n++;
+       } while (ipn++ < ip_to);
+       ipn = ip2_from;
+       do {
+               ipn = ip_set_range_to_cidr(ipn, ip2_to, &e.cidr[1]);
+               m++;
+       } while (ipn++ < ip2_to);
+
+       if (n*m*(port_to - port + 1) > IPSET_MAX_RANGE)
+               return -ERANGE;
 
        if (retried) {
                ip = ntohl(h->next.ip[0]);
index 5c03e510675193c1812bb9b8291963d3c35d748b..d31dbccbe7bd457a28fb10d9a2ea969d496fd4ca 100644 (file)
@@ -66,22 +66,17 @@ EXPORT_SYMBOL_GPL(nf_conntrack_hash);
 
 struct conntrack_gc_work {
        struct delayed_work     dwork;
-       u32                     last_bucket;
+       u32                     next_bucket;
        bool                    exiting;
        bool                    early_drop;
-       long                    next_gc_run;
 };
 
 static __read_mostly struct kmem_cache *nf_conntrack_cachep;
 static DEFINE_SPINLOCK(nf_conntrack_locks_all_lock);
 static __read_mostly bool nf_conntrack_locks_all;
 
-/* every gc cycle scans at most 1/GC_MAX_BUCKETS_DIV part of table */
-#define GC_MAX_BUCKETS_DIV     128u
-/* upper bound of full table scan */
-#define GC_MAX_SCAN_JIFFIES    (16u * HZ)
-/* desired ratio of entries found to be expired */
-#define GC_EVICT_RATIO 50u
+#define GC_SCAN_INTERVAL       (120u * HZ)
+#define GC_SCAN_MAX_DURATION   msecs_to_jiffies(10)
 
 static struct conntrack_gc_work conntrack_gc_work;
 
@@ -1363,17 +1358,13 @@ static bool gc_worker_can_early_drop(const struct nf_conn *ct)
 
 static void gc_worker(struct work_struct *work)
 {
-       unsigned int min_interval = max(HZ / GC_MAX_BUCKETS_DIV, 1u);
-       unsigned int i, goal, buckets = 0, expired_count = 0;
-       unsigned int nf_conntrack_max95 = 0;
+       unsigned long end_time = jiffies + GC_SCAN_MAX_DURATION;
+       unsigned int i, hashsz, nf_conntrack_max95 = 0;
+       unsigned long next_run = GC_SCAN_INTERVAL;
        struct conntrack_gc_work *gc_work;
-       unsigned int ratio, scanned = 0;
-       unsigned long next_run;
-
        gc_work = container_of(work, struct conntrack_gc_work, dwork.work);
 
-       goal = nf_conntrack_htable_size / GC_MAX_BUCKETS_DIV;
-       i = gc_work->last_bucket;
+       i = gc_work->next_bucket;
        if (gc_work->early_drop)
                nf_conntrack_max95 = nf_conntrack_max / 100u * 95u;
 
@@ -1381,15 +1372,15 @@ static void gc_worker(struct work_struct *work)
                struct nf_conntrack_tuple_hash *h;
                struct hlist_nulls_head *ct_hash;
                struct hlist_nulls_node *n;
-               unsigned int hashsz;
                struct nf_conn *tmp;
 
-               i++;
                rcu_read_lock();
 
                nf_conntrack_get_ht(&ct_hash, &hashsz);
-               if (i >= hashsz)
-                       i = 0;
+               if (i >= hashsz) {
+                       rcu_read_unlock();
+                       break;
+               }
 
                hlist_nulls_for_each_entry_rcu(h, n, &ct_hash[i], hnnode) {
                        struct nf_conntrack_net *cnet;
@@ -1397,7 +1388,6 @@ static void gc_worker(struct work_struct *work)
 
                        tmp = nf_ct_tuplehash_to_ctrack(h);
 
-                       scanned++;
                        if (test_bit(IPS_OFFLOAD_BIT, &tmp->status)) {
                                nf_ct_offload_timeout(tmp);
                                continue;
@@ -1405,7 +1395,6 @@ static void gc_worker(struct work_struct *work)
 
                        if (nf_ct_is_expired(tmp)) {
                                nf_ct_gc_expired(tmp);
-                               expired_count++;
                                continue;
                        }
 
@@ -1438,7 +1427,14 @@ static void gc_worker(struct work_struct *work)
                 */
                rcu_read_unlock();
                cond_resched();
-       } while (++buckets < goal);
+               i++;
+
+               if (time_after(jiffies, end_time) && i < hashsz) {
+                       gc_work->next_bucket = i;
+                       next_run = 0;
+                       break;
+               }
+       } while (i < hashsz);
 
        if (gc_work->exiting)
                return;
@@ -1449,40 +1445,17 @@ static void gc_worker(struct work_struct *work)
         *
         * This worker is only here to reap expired entries when system went
         * idle after a busy period.
-        *
-        * The heuristics below are supposed to balance conflicting goals:
-        *
-        * 1. Minimize time until we notice a stale entry
-        * 2. Maximize scan intervals to not waste cycles
-        *
-        * Normally, expire ratio will be close to 0.
-        *
-        * As soon as a sizeable fraction of the entries have expired
-        * increase scan frequency.
         */
-       ratio = scanned ? expired_count * 100 / scanned : 0;
-       if (ratio > GC_EVICT_RATIO) {
-               gc_work->next_gc_run = min_interval;
-       } else {
-               unsigned int max = GC_MAX_SCAN_JIFFIES / GC_MAX_BUCKETS_DIV;
-
-               BUILD_BUG_ON((GC_MAX_SCAN_JIFFIES / GC_MAX_BUCKETS_DIV) == 0);
-
-               gc_work->next_gc_run += min_interval;
-               if (gc_work->next_gc_run > max)
-                       gc_work->next_gc_run = max;
+       if (next_run) {
+               gc_work->early_drop = false;
+               gc_work->next_bucket = 0;
        }
-
-       next_run = gc_work->next_gc_run;
-       gc_work->last_bucket = i;
-       gc_work->early_drop = false;
        queue_delayed_work(system_power_efficient_wq, &gc_work->dwork, next_run);
 }
 
 static void conntrack_gc_work_init(struct conntrack_gc_work *gc_work)
 {
        INIT_DEFERRABLE_WORK(&gc_work->dwork, gc_worker);
-       gc_work->next_gc_run = HZ;
        gc_work->exiting = false;
 }
 
index 3259416f2ea4b1a85066c48ddb8f50478f99bae6..af5115e127cfd75e09ba8dc9e1bf70719c8dc645 100644 (file)
@@ -1478,7 +1478,6 @@ void nf_conntrack_tcp_init_net(struct net *net)
 
 #if IS_ENABLED(CONFIG_NF_FLOW_TABLE)
        tn->offload_timeout = 30 * HZ;
-       tn->offload_pickup = 120 * HZ;
 #endif
 }
 
index 698fee49e7324929ffa38ddf2d65fa931e09dce8..f8e3c0d2602f689ca35fc1ac4c6845e6a32d5ae4 100644 (file)
@@ -271,7 +271,6 @@ void nf_conntrack_udp_init_net(struct net *net)
 
 #if IS_ENABLED(CONFIG_NF_FLOW_TABLE)
        un->offload_timeout = 30 * HZ;
-       un->offload_pickup = 30 * HZ;
 #endif
 }
 
index 214d9f9e499bd0c8feecb9c4f151da1a3f3f78f5..e84b499b7bfa3fc7107387b10f1bb3d76080d941 100644 (file)
@@ -575,7 +575,6 @@ enum nf_ct_sysctl_index {
        NF_SYSCTL_CT_PROTO_TIMEOUT_TCP_UNACK,
 #if IS_ENABLED(CONFIG_NF_FLOW_TABLE)
        NF_SYSCTL_CT_PROTO_TIMEOUT_TCP_OFFLOAD,
-       NF_SYSCTL_CT_PROTO_TIMEOUT_TCP_OFFLOAD_PICKUP,
 #endif
        NF_SYSCTL_CT_PROTO_TCP_LOOSE,
        NF_SYSCTL_CT_PROTO_TCP_LIBERAL,
@@ -585,7 +584,6 @@ enum nf_ct_sysctl_index {
        NF_SYSCTL_CT_PROTO_TIMEOUT_UDP_STREAM,
 #if IS_ENABLED(CONFIG_NF_FLOW_TABLE)
        NF_SYSCTL_CT_PROTO_TIMEOUT_UDP_OFFLOAD,
-       NF_SYSCTL_CT_PROTO_TIMEOUT_UDP_OFFLOAD_PICKUP,
 #endif
        NF_SYSCTL_CT_PROTO_TIMEOUT_ICMP,
        NF_SYSCTL_CT_PROTO_TIMEOUT_ICMPV6,
@@ -776,12 +774,6 @@ static struct ctl_table nf_ct_sysctl_table[] = {
                .mode           = 0644,
                .proc_handler   = proc_dointvec_jiffies,
        },
-       [NF_SYSCTL_CT_PROTO_TIMEOUT_TCP_OFFLOAD_PICKUP] = {
-               .procname       = "nf_flowtable_tcp_pickup",
-               .maxlen         = sizeof(unsigned int),
-               .mode           = 0644,
-               .proc_handler   = proc_dointvec_jiffies,
-       },
 #endif
        [NF_SYSCTL_CT_PROTO_TCP_LOOSE] = {
                .procname       = "nf_conntrack_tcp_loose",
@@ -832,12 +824,6 @@ static struct ctl_table nf_ct_sysctl_table[] = {
                .mode           = 0644,
                .proc_handler   = proc_dointvec_jiffies,
        },
-       [NF_SYSCTL_CT_PROTO_TIMEOUT_UDP_OFFLOAD_PICKUP] = {
-               .procname       = "nf_flowtable_udp_pickup",
-               .maxlen         = sizeof(unsigned int),
-               .mode           = 0644,
-               .proc_handler   = proc_dointvec_jiffies,
-       },
 #endif
        [NF_SYSCTL_CT_PROTO_TIMEOUT_ICMP] = {
                .procname       = "nf_conntrack_icmp_timeout",
@@ -1018,7 +1004,6 @@ static void nf_conntrack_standalone_init_tcp_sysctl(struct net *net,
 
 #if IS_ENABLED(CONFIG_NF_FLOW_TABLE)
        table[NF_SYSCTL_CT_PROTO_TIMEOUT_TCP_OFFLOAD].data = &tn->offload_timeout;
-       table[NF_SYSCTL_CT_PROTO_TIMEOUT_TCP_OFFLOAD_PICKUP].data = &tn->offload_pickup;
 #endif
 
 }
@@ -1111,7 +1096,6 @@ static int nf_conntrack_standalone_init_sysctl(struct net *net)
        table[NF_SYSCTL_CT_PROTO_TIMEOUT_UDP_STREAM].data = &un->timeouts[UDP_CT_REPLIED];
 #if IS_ENABLED(CONFIG_NF_FLOW_TABLE)
        table[NF_SYSCTL_CT_PROTO_TIMEOUT_UDP_OFFLOAD].data = &un->offload_timeout;
-       table[NF_SYSCTL_CT_PROTO_TIMEOUT_UDP_OFFLOAD_PICKUP].data = &un->offload_pickup;
 #endif
 
        nf_conntrack_standalone_init_tcp_sysctl(net, table);
index 551976e4284cad8f373c9c23236a7096375974aa..8788b519255e8edbb6c9158a125b42f02abc4d33 100644 (file)
@@ -183,7 +183,7 @@ static void flow_offload_fixup_ct_timeout(struct nf_conn *ct)
        const struct nf_conntrack_l4proto *l4proto;
        struct net *net = nf_ct_net(ct);
        int l4num = nf_ct_protonum(ct);
-       unsigned int timeout;
+       s32 timeout;
 
        l4proto = nf_ct_l4proto_find(l4num);
        if (!l4proto)
@@ -192,15 +192,20 @@ static void flow_offload_fixup_ct_timeout(struct nf_conn *ct)
        if (l4num == IPPROTO_TCP) {
                struct nf_tcp_net *tn = nf_tcp_pernet(net);
 
-               timeout = tn->offload_pickup;
+               timeout = tn->timeouts[TCP_CONNTRACK_ESTABLISHED];
+               timeout -= tn->offload_timeout;
        } else if (l4num == IPPROTO_UDP) {
                struct nf_udp_net *tn = nf_udp_pernet(net);
 
-               timeout = tn->offload_pickup;
+               timeout = tn->timeouts[UDP_CT_REPLIED];
+               timeout -= tn->offload_timeout;
        } else {
                return;
        }
 
+       if (timeout < 0)
+               timeout = 0;
+
        if (nf_flow_timeout_delta(ct->timeout) > (__s32)timeout)
                ct->timeout = nfct_time_stamp + timeout;
 }
index 202f57d17bab8474aea378a3955a76a29796874f..f554e2ea32eed6347bf6cd066cc26430e2efde10 100644 (file)
@@ -89,11 +89,15 @@ static int nfnl_hook_put_nft_chain_info(struct sk_buff *nlskb,
        if (!nest2)
                goto cancel_nest;
 
-       ret = nla_put_string(nlskb, NFTA_CHAIN_TABLE, chain->table->name);
+       ret = nla_put_string(nlskb, NFNLA_CHAIN_TABLE, chain->table->name);
        if (ret)
                goto cancel_nest;
 
-       ret = nla_put_string(nlskb, NFTA_CHAIN_NAME, chain->name);
+       ret = nla_put_string(nlskb, NFNLA_CHAIN_NAME, chain->name);
+       if (ret)
+               goto cancel_nest;
+
+       ret = nla_put_u8(nlskb, NFNLA_CHAIN_FAMILY, chain->table->family);
        if (ret)
                goto cancel_nest;
 
@@ -109,18 +113,19 @@ cancel_nest:
 static int nfnl_hook_dump_one(struct sk_buff *nlskb,
                              const struct nfnl_dump_hook_data *ctx,
                              const struct nf_hook_ops *ops,
-                             unsigned int seq)
+                             int family, unsigned int seq)
 {
        u16 event = nfnl_msg_type(NFNL_SUBSYS_HOOK, NFNL_MSG_HOOK_GET);
        unsigned int portid = NETLINK_CB(nlskb).portid;
        struct nlmsghdr *nlh;
        int ret = -EMSGSIZE;
+       u32 hooknum;
 #ifdef CONFIG_KALLSYMS
        char sym[KSYM_SYMBOL_LEN];
        char *module_name;
 #endif
        nlh = nfnl_msg_put(nlskb, portid, seq, event,
-                          NLM_F_MULTI, ops->pf, NFNETLINK_V0, 0);
+                          NLM_F_MULTI, family, NFNETLINK_V0, 0);
        if (!nlh)
                goto nla_put_failure;
 
@@ -135,6 +140,7 @@ static int nfnl_hook_dump_one(struct sk_buff *nlskb,
        if (module_name) {
                char *end;
 
+               *module_name = '\0';
                module_name += 2;
                end = strchr(module_name, ']');
                if (end) {
@@ -151,7 +157,12 @@ static int nfnl_hook_dump_one(struct sk_buff *nlskb,
                goto nla_put_failure;
 #endif
 
-       ret = nla_put_be32(nlskb, NFNLA_HOOK_HOOKNUM, htonl(ops->hooknum));
+       if (ops->pf == NFPROTO_INET && ops->hooknum == NF_INET_INGRESS)
+               hooknum = NF_NETDEV_INGRESS;
+       else
+               hooknum = ops->hooknum;
+
+       ret = nla_put_be32(nlskb, NFNLA_HOOK_HOOKNUM, htonl(hooknum));
        if (ret)
                goto nla_put_failure;
 
@@ -259,7 +270,8 @@ static int nfnl_hook_dump(struct sk_buff *nlskb,
        ops = nf_hook_entries_get_hook_ops(e);
 
        for (; i < e->num_hook_entries; i++) {
-               err = nfnl_hook_dump_one(nlskb, ctx, ops[i], cb->seq);
+               err = nfnl_hook_dump_one(nlskb, ctx, ops[i], family,
+                                        cb->nlh->nlmsg_seq);
                if (err)
                        break;
        }
index e586424d8b04a377d7b1b3649f95168771fd6feb..9713035b89e3ab2a20f9826621063a66c2f4c994 100644 (file)
@@ -293,14 +293,14 @@ static bool icmp6hdr_ok(struct sk_buff *skb)
 }
 
 /**
- * Parse vlan tag from vlan header.
+ * parse_vlan_tag - Parse vlan tag from vlan header.
  * @skb: skb containing frame to parse
  * @key_vh: pointer to parsed vlan tag
  * @untag_vlan: should the vlan header be removed from the frame
  *
- * Returns ERROR on memory error.
- * Returns 0 if it encounters a non-vlan or incomplete packet.
- * Returns 1 after successfully parsing vlan tag.
+ * Return: ERROR on memory error.
+ * %0 if it encounters a non-vlan or incomplete packet.
+ * %1 after successfully parsing vlan tag.
  */
 static int parse_vlan_tag(struct sk_buff *skb, struct vlan_head *key_vh,
                          bool untag_vlan)
@@ -532,6 +532,7 @@ static int parse_nsh(struct sk_buff *skb, struct sw_flow_key *key)
  *       L3 header
  * @key: output flow key
  *
+ * Return: %0 if successful, otherwise a negative errno value.
  */
 static int key_extract_l3l4(struct sk_buff *skb, struct sw_flow_key *key)
 {
@@ -748,8 +749,6 @@ static int key_extract_l3l4(struct sk_buff *skb, struct sw_flow_key *key)
  *
  * The caller must ensure that skb->len >= ETH_HLEN.
  *
- * Returns 0 if successful, otherwise a negative errno value.
- *
  * Initializes @skb header fields as follows:
  *
  *    - skb->mac_header: the L2 header.
@@ -764,6 +763,8 @@ static int key_extract_l3l4(struct sk_buff *skb, struct sw_flow_key *key)
  *
  *    - skb->protocol: the type of the data starting at skb->network_header.
  *      Equals to key->eth.type.
+ *
+ * Return: %0 if successful, otherwise a negative errno value.
  */
 static int key_extract(struct sk_buff *skb, struct sw_flow_key *key)
 {
index 88deb5b41429f5852a7613d50aaf4003861172f5..cf2ce581248968293efd5cd5d3462d8a4ca94e4e 100644 (file)
@@ -507,6 +507,7 @@ void ovs_vport_send(struct vport *vport, struct sk_buff *skb, u8 mac_proto)
        }
 
        skb->dev = vport->dev;
+       skb->tstamp = 0;
        vport->ops->send(skb);
        return;
 
index 1dc955ca57d3be7281cadd6e4553080aa611fe7b..fa611678af05260b313d12644a0ea12c49c43f0c 100644 (file)
@@ -15,7 +15,6 @@ struct qrtr_mhi_dev {
        struct qrtr_endpoint ep;
        struct mhi_device *mhi_dev;
        struct device *dev;
-       struct completion ready;
 };
 
 /* From MHI to QRTR */
@@ -51,10 +50,6 @@ static int qcom_mhi_qrtr_send(struct qrtr_endpoint *ep, struct sk_buff *skb)
        struct qrtr_mhi_dev *qdev = container_of(ep, struct qrtr_mhi_dev, ep);
        int rc;
 
-       rc = wait_for_completion_interruptible(&qdev->ready);
-       if (rc)
-               goto free_skb;
-
        if (skb->sk)
                sock_hold(skb->sk);
 
@@ -84,7 +79,7 @@ static int qcom_mhi_qrtr_probe(struct mhi_device *mhi_dev,
        int rc;
 
        /* start channels */
-       rc = mhi_prepare_for_transfer(mhi_dev, 0);
+       rc = mhi_prepare_for_transfer(mhi_dev);
        if (rc)
                return rc;
 
@@ -101,15 +96,6 @@ static int qcom_mhi_qrtr_probe(struct mhi_device *mhi_dev,
        if (rc)
                return rc;
 
-       /* start channels */
-       rc = mhi_prepare_for_transfer(mhi_dev, MHI_CH_INBOUND_ALLOC_BUFS);
-       if (rc) {
-               qrtr_endpoint_unregister(&qdev->ep);
-               dev_set_drvdata(&mhi_dev->dev, NULL);
-               return rc;
-       }
-
-       complete_all(&qdev->ready);
        dev_dbg(qdev->dev, "Qualcomm MHI QRTR driver probed\n");
 
        return 0;
index 171b7f3be6ef35265a9031b75f1161816bec55d4..0c30908628bae040de1f4d0d16c0d195282c61a8 100644 (file)
@@ -493,7 +493,7 @@ int qrtr_endpoint_post(struct qrtr_endpoint *ep, const void *data, size_t len)
                goto err;
        }
 
-       if (len != ALIGN(size, 4) + hdrlen)
+       if (!size || len != ALIGN(size, 4) + hdrlen)
                goto err;
 
        if (cb->dst_port != QRTR_PORT_CTRL && cb->type != QRTR_TYPE_DATA &&
index 9b6ffff72f2d117d78a885f0499a448dfb7337e5..28c1b00221780f7971cff55e4f04fd90196ce2e9 100644 (file)
@@ -131,9 +131,9 @@ static int rds_ib_post_reg_frmr(struct rds_ib_mr *ibmr)
                cpu_relax();
        }
 
-       ret = ib_map_mr_sg_zbva(frmr->mr, ibmr->sg, ibmr->sg_len,
+       ret = ib_map_mr_sg_zbva(frmr->mr, ibmr->sg, ibmr->sg_dma_len,
                                &off, PAGE_SIZE);
-       if (unlikely(ret != ibmr->sg_len))
+       if (unlikely(ret != ibmr->sg_dma_len))
                return ret < 0 ? ret : -EINVAL;
 
        if (cmpxchg(&frmr->fr_state,
index 7153c67f641e1778b2661582e2f58cdb4c6d5a40..2ef4cd2c848b2d975ef0f625b51dd461ebec3a52 100644 (file)
@@ -273,6 +273,9 @@ static int tcf_mirred_act(struct sk_buff *skb, const struct tc_action *a,
                        goto out;
        }
 
+       /* All mirred/redirected skbs should clear previous ct info */
+       nf_reset_ct(skb2);
+
        want_ingress = tcf_mirred_act_wants_ingress(m_eaction);
 
        expects_nh = want_ingress || !m_mac_header_xmit;
index 951542843cab282f4841e2bb7a5bc6afc5f33ee8..28af8b1e1bb1f750df8c355d58beedadf3a9e8e5 100644 (file)
@@ -720,7 +720,7 @@ static u32 cake_hash(struct cake_tin_data *q, const struct sk_buff *skb,
 skip_hash:
        if (flow_override)
                flow_hash = flow_override - 1;
-       else if (use_skbhash)
+       else if (use_skbhash && (flow_mode & CAKE_FLOW_FLOWS))
                flow_hash = skb->hash;
        if (host_override) {
                dsthost_hash = host_override - 1;
index c1e84d1eeaba8fc2181c9dba3bfad78b6f981097..c76701ac35abf58e8aa1aa0aa10f2406801e4d00 100644 (file)
@@ -660,6 +660,13 @@ static int ets_qdisc_change(struct Qdisc *sch, struct nlattr *opt,
        sch_tree_lock(sch);
 
        q->nbands = nbands;
+       for (i = nstrict; i < q->nstrict; i++) {
+               INIT_LIST_HEAD(&q->classes[i].alist);
+               if (q->classes[i].qdisc->q.qlen) {
+                       list_add_tail(&q->classes[i].alist, &q->active);
+                       q->classes[i].deficit = quanta[i];
+               }
+       }
        q->nstrict = nstrict;
        memcpy(q->prio2band, priomap, sizeof(priomap));
 
index 898389611ae8179bdb53395b2bf738c88f83bd71..c038efc23ce386fa96b7cc7ba177bc63034f1023 100644 (file)
@@ -795,7 +795,7 @@ static int smc_connect_rdma(struct smc_sock *smc,
                        reason_code = SMC_CLC_DECL_NOSRVLINK;
                        goto connect_abort;
                }
-               smc->conn.lnk = link;
+               smc_switch_link_and_count(&smc->conn, link);
        }
 
        /* create send buffer and rmb */
index cd0d7c908b2ab81d315734d40c5fc5129d0279ee..c160ff50c053ad0b55a18df1d9959a5ee7abb6d5 100644 (file)
@@ -917,8 +917,8 @@ static int smc_switch_cursor(struct smc_sock *smc, struct smc_cdc_tx_pend *pend,
        return rc;
 }
 
-static void smc_switch_link_and_count(struct smc_connection *conn,
-                                     struct smc_link *to_lnk)
+void smc_switch_link_and_count(struct smc_connection *conn,
+                              struct smc_link *to_lnk)
 {
        atomic_dec(&conn->lnk->conn_cnt);
        conn->lnk = to_lnk;
index 6d6fd1397c87de5ff1427b1fc3bf3d3cc7db94fa..c043ecdca5c445efe565acabc1ecd69adf90eaf2 100644 (file)
@@ -97,6 +97,7 @@ struct smc_link {
        unsigned long           *wr_tx_mask;    /* bit mask of used indexes */
        u32                     wr_tx_cnt;      /* number of WR send buffers */
        wait_queue_head_t       wr_tx_wait;     /* wait for free WR send buf */
+       atomic_t                wr_tx_refcnt;   /* tx refs to link */
 
        struct smc_wr_buf       *wr_rx_bufs;    /* WR recv payload buffers */
        struct ib_recv_wr       *wr_rx_ibs;     /* WR recv meta data */
@@ -109,6 +110,7 @@ struct smc_link {
 
        struct ib_reg_wr        wr_reg;         /* WR register memory region */
        wait_queue_head_t       wr_reg_wait;    /* wait for wr_reg result */
+       atomic_t                wr_reg_refcnt;  /* reg refs to link */
        enum smc_wr_reg_state   wr_reg_state;   /* state of wr_reg request */
 
        u8                      gid[SMC_GID_SIZE];/* gid matching used vlan id*/
@@ -444,6 +446,8 @@ void smc_core_exit(void);
 int smcr_link_init(struct smc_link_group *lgr, struct smc_link *lnk,
                   u8 link_idx, struct smc_init_info *ini);
 void smcr_link_clear(struct smc_link *lnk, bool log);
+void smc_switch_link_and_count(struct smc_connection *conn,
+                              struct smc_link *to_lnk);
 int smcr_buf_map_lgr(struct smc_link *lnk);
 int smcr_buf_reg_lgr(struct smc_link *lnk);
 void smcr_lgr_set_type(struct smc_link_group *lgr, enum smc_lgr_type new_type);
index 273eaf1bfe49a6af2094e46b4c5a4029583f4c66..2e7560eba9812635c22f18f41aafc38b063dd02a 100644 (file)
@@ -888,6 +888,7 @@ int smc_llc_cli_add_link(struct smc_link *link, struct smc_llc_qentry *qentry)
        if (!rc)
                goto out;
 out_clear_lnk:
+       lnk_new->state = SMC_LNK_INACTIVE;
        smcr_link_clear(lnk_new, false);
 out_reject:
        smc_llc_cli_add_link_reject(qentry);
@@ -1184,6 +1185,7 @@ int smc_llc_srv_add_link(struct smc_link *link)
                goto out_err;
        return 0;
 out_err:
+       link_new->state = SMC_LNK_INACTIVE;
        smcr_link_clear(link_new, false);
        return rc;
 }
@@ -1286,10 +1288,8 @@ static void smc_llc_process_cli_delete_link(struct smc_link_group *lgr)
        del_llc->reason = 0;
        smc_llc_send_message(lnk, &qentry->msg); /* response */
 
-       if (smc_link_downing(&lnk_del->state)) {
-               if (smc_switch_conns(lgr, lnk_del, false))
-                       smc_wr_tx_wait_no_pending_sends(lnk_del);
-       }
+       if (smc_link_downing(&lnk_del->state))
+               smc_switch_conns(lgr, lnk_del, false);
        smcr_link_clear(lnk_del, true);
 
        active_links = smc_llc_active_link_count(lgr);
@@ -1805,8 +1805,6 @@ void smc_llc_link_clear(struct smc_link *link, bool log)
                                    link->smcibdev->ibdev->name, link->ibport);
        complete(&link->llc_testlink_resp);
        cancel_delayed_work_sync(&link->llc_testlink_wrk);
-       smc_wr_wakeup_reg_wait(link);
-       smc_wr_wakeup_tx_wait(link);
 }
 
 /* register a new rtoken at the remote peer (for all links) */
index 289025cd545ac8e07721d1eb537f76eabe079103..c79361dfcdfb9f21d9541a7249b84227ca603b38 100644 (file)
@@ -496,7 +496,7 @@ static int smc_tx_rdma_writes(struct smc_connection *conn,
 /* Wakeup sndbuf consumers from any context (IRQ or process)
  * since there is more data to transmit; usable snd_wnd as max transmit
  */
-static int smcr_tx_sndbuf_nonempty(struct smc_connection *conn)
+static int _smcr_tx_sndbuf_nonempty(struct smc_connection *conn)
 {
        struct smc_cdc_producer_flags *pflags = &conn->local_tx_ctrl.prod_flags;
        struct smc_link *link = conn->lnk;
@@ -550,6 +550,22 @@ out_unlock:
        return rc;
 }
 
+static int smcr_tx_sndbuf_nonempty(struct smc_connection *conn)
+{
+       struct smc_link *link = conn->lnk;
+       int rc = -ENOLINK;
+
+       if (!link)
+               return rc;
+
+       atomic_inc(&link->wr_tx_refcnt);
+       if (smc_link_usable(link))
+               rc = _smcr_tx_sndbuf_nonempty(conn);
+       if (atomic_dec_and_test(&link->wr_tx_refcnt))
+               wake_up_all(&link->wr_tx_wait);
+       return rc;
+}
+
 static int smcd_tx_sndbuf_nonempty(struct smc_connection *conn)
 {
        struct smc_cdc_producer_flags *pflags = &conn->local_tx_ctrl.prod_flags;
index cbc73a7e4d590ed1e4cb989b7898d8163187351c..a419e9af36b982d0bbe6b4ce3a29478f42e360d2 100644 (file)
@@ -322,9 +322,12 @@ int smc_wr_reg_send(struct smc_link *link, struct ib_mr *mr)
        if (rc)
                return rc;
 
+       atomic_inc(&link->wr_reg_refcnt);
        rc = wait_event_interruptible_timeout(link->wr_reg_wait,
                                              (link->wr_reg_state != POSTED),
                                              SMC_WR_REG_MR_WAIT_TIME);
+       if (atomic_dec_and_test(&link->wr_reg_refcnt))
+               wake_up_all(&link->wr_reg_wait);
        if (!rc) {
                /* timeout - terminate link */
                smcr_link_down_cond_sched(link);
@@ -566,10 +569,15 @@ void smc_wr_free_link(struct smc_link *lnk)
                return;
        ibdev = lnk->smcibdev->ibdev;
 
+       smc_wr_wakeup_reg_wait(lnk);
+       smc_wr_wakeup_tx_wait(lnk);
+
        if (smc_wr_tx_wait_no_pending_sends(lnk))
                memset(lnk->wr_tx_mask, 0,
                       BITS_TO_LONGS(SMC_WR_BUF_CNT) *
                                                sizeof(*lnk->wr_tx_mask));
+       wait_event(lnk->wr_reg_wait, (!atomic_read(&lnk->wr_reg_refcnt)));
+       wait_event(lnk->wr_tx_wait, (!atomic_read(&lnk->wr_tx_refcnt)));
 
        if (lnk->wr_rx_dma_addr) {
                ib_dma_unmap_single(ibdev, lnk->wr_rx_dma_addr,
@@ -728,7 +736,9 @@ int smc_wr_create_link(struct smc_link *lnk)
        memset(lnk->wr_tx_mask, 0,
               BITS_TO_LONGS(SMC_WR_BUF_CNT) * sizeof(*lnk->wr_tx_mask));
        init_waitqueue_head(&lnk->wr_tx_wait);
+       atomic_set(&lnk->wr_tx_refcnt, 0);
        init_waitqueue_head(&lnk->wr_reg_wait);
+       atomic_set(&lnk->wr_reg_refcnt, 0);
        return rc;
 
 dma_unmap:
index d66a8e44a1aeb56ad3018d04d0195fb55f49729b..dbb41821b1b8598599490cc269457a83b7b9781a 100644 (file)
@@ -835,7 +835,8 @@ static int svc_handle_xprt(struct svc_rqst *rqstp, struct svc_xprt *xprt)
                rqstp->rq_stime = ktime_get();
                rqstp->rq_reserved = serv->sv_max_mesg;
                atomic_add(rqstp->rq_reserved, &xprt->xpt_reserved);
-       }
+       } else
+               svc_xprt_received(xprt);
 out:
        trace_svc_handle_xprt(xprt, len);
        return len;
index cf586840caeb7511422bcca6744139df75c0742a..1b7a487c88419779536908b9a3e85cc7233a3778 100644 (file)
@@ -913,7 +913,7 @@ static int link_schedule_user(struct tipc_link *l, struct tipc_msg *hdr)
        skb = tipc_msg_create(SOCK_WAKEUP, 0, INT_H_SIZE, 0,
                              dnode, l->addr, dport, 0, 0);
        if (!skb)
-               return -ENOMEM;
+               return -ENOBUFS;
        msg_set_dest_droppable(buf_msg(skb), true);
        TIPC_SKB_CB(skb)->chain_imp = msg_importance(hdr);
        skb_queue_tail(&l->wakeupq, skb);
@@ -1031,7 +1031,7 @@ void tipc_link_reset(struct tipc_link *l)
  *
  * Consumes the buffer chain.
  * Messages at TIPC_SYSTEM_IMPORTANCE are always accepted
- * Return: 0 if success, or errno: -ELINKCONG, -EMSGSIZE or -ENOBUFS or -ENOMEM
+ * Return: 0 if success, or errno: -ELINKCONG, -EMSGSIZE or -ENOBUFS
  */
 int tipc_link_xmit(struct tipc_link *l, struct sk_buff_head *list,
                   struct sk_buff_head *xmitq)
@@ -1089,7 +1089,7 @@ int tipc_link_xmit(struct tipc_link *l, struct sk_buff_head *list,
                        if (!_skb) {
                                kfree_skb(skb);
                                __skb_queue_purge(list);
-                               return -ENOMEM;
+                               return -ENOBUFS;
                        }
                        __skb_queue_tail(transmq, skb);
                        tipc_link_set_skb_retransmit_time(skb, l);
index 75b99b7eda2227e62246e533724afdaf08eb9a44..8754bd885169dc351de34da41128bb7dc0f092d8 100644 (file)
@@ -1518,7 +1518,7 @@ static int __tipc_sendmsg(struct socket *sock, struct msghdr *m, size_t dlen)
 
        if (unlikely(syn && !rc)) {
                tipc_set_sk_state(sk, TIPC_CONNECTING);
-               if (timeout) {
+               if (dlen && timeout) {
                        timeout = msecs_to_jiffies(timeout);
                        tipc_wait_for_connect(sock, &timeout);
                }
index e0c2c992ad9c5f648aff86a80c7231539a837544..4f7c99dfd16cfeeaca42355366540ea99c9e83ea 100644 (file)
@@ -357,11 +357,14 @@ static void virtio_vsock_event_fill(struct virtio_vsock *vsock)
 
 static void virtio_vsock_reset_sock(struct sock *sk)
 {
-       lock_sock(sk);
+       /* vmci_transport.c doesn't take sk_lock here either.  At least we're
+        * under vsock_table_lock so the sock cannot disappear while we're
+        * executing.
+        */
+
        sk->sk_state = TCP_CLOSE;
        sk->sk_err = ECONNRESET;
        sk_error_report(sk);
-       release_sock(sk);
 }
 
 static void virtio_vsock_update_guest_cid(struct virtio_vsock *vsock)
index 09533cbb7221db776753b62d70699b1b1b22a606..9ffa9e9c5c554a6939cf175560a617414ce38a00 100644 (file)
@@ -58,10 +58,11 @@ const char *const lockdown_reasons[LOCKDOWN_CONFIDENTIALITY_MAX+1] = {
        [LOCKDOWN_MMIOTRACE] = "unsafe mmio",
        [LOCKDOWN_DEBUGFS] = "debugfs access",
        [LOCKDOWN_XMON_WR] = "xmon write access",
+       [LOCKDOWN_BPF_WRITE_USER] = "use of bpf to write user RAM",
        [LOCKDOWN_INTEGRITY_MAX] = "integrity",
        [LOCKDOWN_KCORE] = "/proc/kcore access",
        [LOCKDOWN_KPROBES] = "use of kprobes",
-       [LOCKDOWN_BPF_READ] = "use of bpf to read kernel RAM",
+       [LOCKDOWN_BPF_READ_KERNEL] = "use of bpf to read kernel RAM",
        [LOCKDOWN_PERF] = "unsafe use of perf",
        [LOCKDOWN_TRACEFS] = "use of tracefs",
        [LOCKDOWN_XMON_RW] = "xmon read and write access",
index 09c0e2a6489c4b4700050cf0ed710a456445ccfb..71323d807dbf45ca3eb95d61397afe7f2c93e4f5 100644 (file)
@@ -251,7 +251,10 @@ static bool hw_support_mmap(struct snd_pcm_substream *substream)
 
        switch (substream->dma_buffer.dev.type) {
        case SNDRV_DMA_TYPE_UNKNOWN:
-               return false;
+               /* we can't know the device, so just assume that the driver does
+                * everything right
+                */
+               return true;
        case SNDRV_DMA_TYPE_CONTINUOUS:
        case SNDRV_DMA_TYPE_VMALLOC:
                return true;
index 0ef242fdd3bc38dd7341fe81246814020f6cbf9b..fff18b5d4e05249ebdaa6bd024366be3eae313f7 100644 (file)
@@ -153,7 +153,7 @@ static int init_stream(struct snd_oxfw *oxfw, struct amdtp_stream *stream)
        struct cmp_connection *conn;
        enum cmp_direction c_dir;
        enum amdtp_stream_direction s_dir;
-       unsigned int flags = CIP_UNAWARE_SYT;
+       unsigned int flags = 0;
        int err;
 
        if (!(oxfw->quirks & SND_OXFW_QUIRK_BLOCKING_TRANSMISSION))
@@ -161,6 +161,13 @@ static int init_stream(struct snd_oxfw *oxfw, struct amdtp_stream *stream)
        else
                flags |= CIP_BLOCKING;
 
+       // OXFW 970/971 has no function to generate playback timing according to the sequence
+       // of value in syt field, thus the packet should include NO_INFO value in the field.
+       // However, some models just ignore data blocks in packet with NO_INFO for audio data
+       // processing.
+       if (!(oxfw->quirks & SND_OXFW_QUIRK_IGNORE_NO_INFO_PACKET))
+               flags |= CIP_UNAWARE_SYT;
+
        if (stream == &oxfw->tx_stream) {
                conn = &oxfw->out_conn;
                c_dir = CMP_OUTPUT;
index 84971d78d152451abd6dcfa5bda4a948b8ebe85f..cb5b5e3a481b9ba3149fdbee7fb896c4bf7f7d46 100644 (file)
@@ -159,8 +159,10 @@ static int detect_quirks(struct snd_oxfw *oxfw, const struct ieee1394_device_id
                return snd_oxfw_scs1x_add(oxfw);
        }
 
-       if (entry->vendor_id == OUI_APOGEE && entry->model_id == MODEL_DUET_FW)
-               oxfw->quirks |= SND_OXFW_QUIRK_BLOCKING_TRANSMISSION;
+       if (entry->vendor_id == OUI_APOGEE && entry->model_id == MODEL_DUET_FW) {
+               oxfw->quirks |= SND_OXFW_QUIRK_BLOCKING_TRANSMISSION |
+                               SND_OXFW_QUIRK_IGNORE_NO_INFO_PACKET;
+       }
 
        /*
         * TASCAM FireOne has physical control and requires a pair of additional
index ee47abcb0c909896bfd99eed85c73f661231f58c..c13034f6c2ca53c99b3496af0dae78fb0a94c11f 100644 (file)
@@ -42,6 +42,11 @@ enum snd_oxfw_quirk {
        SND_OXFW_QUIRK_BLOCKING_TRANSMISSION = 0x04,
        // Stanton SCS1.d and SCS1.m support unique transaction.
        SND_OXFW_QUIRK_SCS_TRANSACTION = 0x08,
+       // Apogee Duet FireWire ignores data blocks in packet with NO_INFO for audio data
+       // processing, while output level meter moves. Any value in syt field of packet takes
+       // the device to process audio data even if the value is invalid in a point of
+       // IEC 61883-1/6.
+       SND_OXFW_QUIRK_IGNORE_NO_INFO_PACKET = 0x10,
 };
 
 /* This is an arbitrary number for convinience. */
index e97d00585e8ec219a63e4e8e086b9861a51cd3df..481d8f8d33962ecd376d828343a84fa57eda05c9 100644 (file)
@@ -3460,7 +3460,7 @@ static int cap_put_caller(struct snd_kcontrol *kcontrol,
        struct hda_gen_spec *spec = codec->spec;
        const struct hda_input_mux *imux;
        struct nid_path *path;
-       int i, adc_idx, err = 0;
+       int i, adc_idx, ret, err = 0;
 
        imux = &spec->input_mux;
        adc_idx = kcontrol->id.index;
@@ -3470,9 +3470,13 @@ static int cap_put_caller(struct snd_kcontrol *kcontrol,
                if (!path || !path->ctls[type])
                        continue;
                kcontrol->private_value = path->ctls[type];
-               err = func(kcontrol, ucontrol);
-               if (err < 0)
+               ret = func(kcontrol, ucontrol);
+               if (ret < 0) {
+                       err = ret;
                        break;
+               }
+               if (ret > 0)
+                       err = 1;
        }
        mutex_unlock(&codec->control_mutex);
        if (err >= 0 && spec->cap_sync_hook)
index 0322b289505e7f89196932a2d064a7ed32bdb4b7..0062c18b646afe5220593862b437091e2a773ba7 100644 (file)
@@ -883,10 +883,11 @@ static unsigned int azx_get_pos_skl(struct azx *chip, struct azx_dev *azx_dev)
        return azx_get_pos_posbuf(chip, azx_dev);
 }
 
-static void azx_shutdown_chip(struct azx *chip)
+static void __azx_shutdown_chip(struct azx *chip, bool skip_link_reset)
 {
        azx_stop_chip(chip);
-       azx_enter_link_reset(chip);
+       if (!skip_link_reset)
+               azx_enter_link_reset(chip);
        azx_clear_irq_pending(chip);
        display_power(chip, false);
 }
@@ -895,6 +896,11 @@ static void azx_shutdown_chip(struct azx *chip)
 static DEFINE_MUTEX(card_list_lock);
 static LIST_HEAD(card_list);
 
+static void azx_shutdown_chip(struct azx *chip)
+{
+       __azx_shutdown_chip(chip, false);
+}
+
 static void azx_add_card_list(struct azx *chip)
 {
        struct hda_intel *hda = container_of(chip, struct hda_intel, chip);
@@ -2385,7 +2391,7 @@ static void azx_shutdown(struct pci_dev *pci)
                return;
        chip = card->private_data;
        if (chip && chip->running)
-               azx_shutdown_chip(chip);
+               __azx_shutdown_chip(chip, true);
 }
 
 /* PCI IDs */
index 21c521596c9d02c8f5c91429e59d2d4a1f06300b..7ad689f991e7edf93037886ac5190977792339c3 100644 (file)
@@ -6658,6 +6658,7 @@ enum {
        ALC287_FIXUP_IDEAPAD_BASS_SPK_AMP,
        ALC623_FIXUP_LENOVO_THINKSTATION_P340,
        ALC255_FIXUP_ACER_HEADPHONE_AND_MIC,
+       ALC236_FIXUP_HP_LIMIT_INT_MIC_BOOST,
 };
 
 static const struct hda_fixup alc269_fixups[] = {
@@ -8242,6 +8243,12 @@ static const struct hda_fixup alc269_fixups[] = {
                .chained = true,
                .chain_id = ALC255_FIXUP_XIAOMI_HEADSET_MIC
        },
+       [ALC236_FIXUP_HP_LIMIT_INT_MIC_BOOST] = {
+               .type = HDA_FIXUP_FUNC,
+               .v.func = alc269_fixup_limit_int_mic_boost,
+               .chained = true,
+               .chain_id = ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF,
+       },
 };
 
 static const struct snd_pci_quirk alc269_fixup_tbl[] = {
@@ -8332,6 +8339,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
        SND_PCI_QUIRK(0x1028, 0x0a2e, "Dell", ALC236_FIXUP_DELL_AIO_HEADSET_MIC),
        SND_PCI_QUIRK(0x1028, 0x0a30, "Dell", ALC236_FIXUP_DELL_AIO_HEADSET_MIC),
        SND_PCI_QUIRK(0x1028, 0x0a58, "Dell", ALC255_FIXUP_DELL_HEADSET_MIC),
+       SND_PCI_QUIRK(0x1028, 0x0a61, "Dell XPS 15 9510", ALC289_FIXUP_DUAL_SPK),
        SND_PCI_QUIRK(0x1028, 0x164a, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x1028, 0x164b, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x103c, 0x1586, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC2),
@@ -8431,13 +8439,14 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
        SND_PCI_QUIRK(0x103c, 0x87f4, "HP", ALC287_FIXUP_HP_GPIO_LED),
        SND_PCI_QUIRK(0x103c, 0x87f5, "HP", ALC287_FIXUP_HP_GPIO_LED),
        SND_PCI_QUIRK(0x103c, 0x87f7, "HP Spectre x360 14", ALC245_FIXUP_HP_X360_AMP),
+       SND_PCI_QUIRK(0x103c, 0x8805, "HP ProBook 650 G8 Notebook PC", ALC236_FIXUP_HP_GPIO_LED),
        SND_PCI_QUIRK(0x103c, 0x880d, "HP EliteBook 830 G8 Notebook PC", ALC285_FIXUP_HP_GPIO_LED),
        SND_PCI_QUIRK(0x103c, 0x8846, "HP EliteBook 850 G8 Notebook PC", ALC285_FIXUP_HP_GPIO_LED),
        SND_PCI_QUIRK(0x103c, 0x8847, "HP EliteBook x360 830 G8 Notebook PC", ALC285_FIXUP_HP_GPIO_LED),
        SND_PCI_QUIRK(0x103c, 0x884b, "HP EliteBook 840 Aero G8 Notebook PC", ALC285_FIXUP_HP_GPIO_LED),
        SND_PCI_QUIRK(0x103c, 0x884c, "HP EliteBook 840 G8 Notebook PC", ALC285_FIXUP_HP_GPIO_LED),
-       SND_PCI_QUIRK(0x103c, 0x8862, "HP ProBook 445 G8 Notebook PC", ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF),
-       SND_PCI_QUIRK(0x103c, 0x8863, "HP ProBook 445 G8 Notebook PC", ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF),
+       SND_PCI_QUIRK(0x103c, 0x8862, "HP ProBook 445 G8 Notebook PC", ALC236_FIXUP_HP_LIMIT_INT_MIC_BOOST),
+       SND_PCI_QUIRK(0x103c, 0x8863, "HP ProBook 445 G8 Notebook PC", ALC236_FIXUP_HP_LIMIT_INT_MIC_BOOST),
        SND_PCI_QUIRK(0x103c, 0x886d, "HP ZBook Fury 17.3 Inch G8 Mobile Workstation PC", ALC285_FIXUP_HP_GPIO_AMP_INIT),
        SND_PCI_QUIRK(0x103c, 0x8870, "HP ZBook Fury 15.6 Inch G8 Mobile Workstation PC", ALC285_FIXUP_HP_GPIO_AMP_INIT),
        SND_PCI_QUIRK(0x103c, 0x8873, "HP ZBook Studio 15.6 Inch G8 Mobile Workstation PC", ALC285_FIXUP_HP_GPIO_AMP_INIT),
@@ -8465,6 +8474,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
        SND_PCI_QUIRK(0x1043, 0x16e3, "ASUS UX50", ALC269_FIXUP_STEREO_DMIC),
        SND_PCI_QUIRK(0x1043, 0x1740, "ASUS UX430UA", ALC295_FIXUP_ASUS_DACS),
        SND_PCI_QUIRK(0x1043, 0x17d1, "ASUS UX431FL", ALC294_FIXUP_ASUS_DUAL_SPK),
+       SND_PCI_QUIRK(0x1043, 0x1662, "ASUS GV301QH", ALC294_FIXUP_ASUS_DUAL_SPK),
        SND_PCI_QUIRK(0x1043, 0x1881, "ASUS Zephyrus S/M", ALC294_FIXUP_ASUS_GX502_PINS),
        SND_PCI_QUIRK(0x1043, 0x18b1, "Asus MJ401TA", ALC256_FIXUP_ASUS_HEADSET_MIC),
        SND_PCI_QUIRK(0x1043, 0x18f1, "Asus FX505DT", ALC256_FIXUP_ASUS_HEADSET_MIC),
index a5c1a2c4eae4e6f2d53968d514696e371e759584..773a136161f11a03b68eda435ca3d398ddd91577 100644 (file)
@@ -1041,6 +1041,7 @@ static const struct hda_fixup via_fixups[] = {
 };
 
 static const struct snd_pci_quirk vt2002p_fixups[] = {
+       SND_PCI_QUIRK(0x1043, 0x13f7, "Asus B23E", VIA_FIXUP_POWER_SAVE),
        SND_PCI_QUIRK(0x1043, 0x1487, "Asus G75", VIA_FIXUP_ASUS_G75),
        SND_PCI_QUIRK(0x1043, 0x8532, "Asus X202E", VIA_FIXUP_INTMIC_BOOST),
        SND_PCI_QUIRK_VENDOR(0x1558, "Clevo", VIA_FIXUP_POWER_SAVE),
index 8a13462e1a63ab5069b0d27d47acd0757ae2c470..5dcf77af07afcbfcede12ec0b3c24de5fbfb3e38 100644 (file)
@@ -36,6 +36,7 @@ config SND_SOC_COMPRESS
 
 config SND_SOC_TOPOLOGY
        bool
+       select SND_DYNAMIC_MINORS
 
 config SND_SOC_TOPOLOGY_KUNIT_TEST
        tristate "KUnit tests for SoC topology"
index 9449fb40a956bd064462b4f4386a8878e0b1be7e..3c60c5f96dcba43a0e577df41617950db70b4e13 100644 (file)
@@ -525,6 +525,7 @@ static struct snd_soc_dai_link cz_dai_7219_98357[] = {
                                | SND_SOC_DAIFMT_CBM_CFM,
                .init = cz_da7219_init,
                .dpcm_playback = 1,
+               .stop_dma_first = 1,
                .ops = &cz_da7219_play_ops,
                SND_SOC_DAILINK_REG(designware1, dlgs, platform),
        },
@@ -534,6 +535,7 @@ static struct snd_soc_dai_link cz_dai_7219_98357[] = {
                .dai_fmt = SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_NF
                                | SND_SOC_DAIFMT_CBM_CFM,
                .dpcm_capture = 1,
+               .stop_dma_first = 1,
                .ops = &cz_da7219_cap_ops,
                SND_SOC_DAILINK_REG(designware2, dlgs, platform),
        },
@@ -543,6 +545,7 @@ static struct snd_soc_dai_link cz_dai_7219_98357[] = {
                .dai_fmt = SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_NF
                                | SND_SOC_DAIFMT_CBM_CFM,
                .dpcm_playback = 1,
+               .stop_dma_first = 1,
                .ops = &cz_max_play_ops,
                SND_SOC_DAILINK_REG(designware3, mx, platform),
        },
@@ -553,6 +556,7 @@ static struct snd_soc_dai_link cz_dai_7219_98357[] = {
                .dai_fmt = SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_NF
                                | SND_SOC_DAIFMT_CBM_CFM,
                .dpcm_capture = 1,
+               .stop_dma_first = 1,
                .ops = &cz_dmic0_cap_ops,
                SND_SOC_DAILINK_REG(designware3, adau, platform),
        },
@@ -563,6 +567,7 @@ static struct snd_soc_dai_link cz_dai_7219_98357[] = {
                .dai_fmt = SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_NF
                                | SND_SOC_DAIFMT_CBM_CFM,
                .dpcm_capture = 1,
+               .stop_dma_first = 1,
                .ops = &cz_dmic1_cap_ops,
                SND_SOC_DAILINK_REG(designware2, adau, platform),
        },
index 143155a840aca797b2a54b28829e026c85adae14..cc1ce6f22caaddf6ac06b5716bc2292c77c74548 100644 (file)
@@ -969,7 +969,7 @@ static int acp_dma_hw_params(struct snd_soc_component *component,
 
        acp_set_sram_bank_state(rtd->acp_mmio, 0, true);
        /* Save for runtime private data */
-       rtd->dma_addr = substream->dma_buffer.addr;
+       rtd->dma_addr = runtime->dma_addr;
        rtd->order = get_order(size);
 
        /* Fill the page table entries in ACP SRAM */
index 8148b0d22e880398fbaed3f9e0fbd5a1bffbb661..597d7c4b2a6b08f8f30ec35a5d1eaa396179137a 100644 (file)
@@ -286,7 +286,7 @@ static int acp3x_dma_hw_params(struct snd_soc_component *component,
                pr_err("pinfo failed\n");
        }
        size = params_buffer_bytes(params);
-       rtd->dma_addr = substream->dma_buffer.addr;
+       rtd->dma_addr = substream->runtime->dma_addr;
        rtd->num_pages = (PAGE_ALIGN(size) >> PAGE_SHIFT);
        config_acp3x_dma(rtd, substream->stream);
        return 0;
index bd20622b09334f99e4fa1267499edba3e59c6ec9..0391c28dd0788f56f71aa0291f96da0f381baa5e 100644 (file)
@@ -242,7 +242,7 @@ static int acp_pdm_dma_hw_params(struct snd_soc_component *component,
                return -EINVAL;
        size = params_buffer_bytes(params);
        period_bytes = params_period_bytes(params);
-       rtd->dma_addr = substream->dma_buffer.addr;
+       rtd->dma_addr = substream->runtime->dma_addr;
        rtd->num_pages = (PAGE_ALIGN(size) >> PAGE_SHIFT);
        config_acp_dma(rtd, substream->stream);
        init_pdm_ring_buffer(MEM_WINDOW_START, size, period_bytes,
index 19438da5dfa5d68c04513b9bac95d38609380aa1..7b8040e812a188c7b37c3b21bccdc12e00832f4b 100644 (file)
@@ -382,6 +382,8 @@ static const struct dev_pm_ops rn_acp_pm = {
        .runtime_resume =  snd_rn_acp_resume,
        .suspend = snd_rn_acp_suspend,
        .resume =       snd_rn_acp_resume,
+       .restore =      snd_rn_acp_resume,
+       .poweroff =     snd_rn_acp_suspend,
 };
 
 static void snd_rn_acp_remove(struct pci_dev *pci)
index a3b784ed4f70a28c6c158942da927c5ed5674be6..db16071205ba936bb2c8eca6ff722b55c8fdb475 100644 (file)
@@ -1559,6 +1559,7 @@ config SND_SOC_WCD934X
 config SND_SOC_WCD938X
        depends on SND_SOC_WCD938X_SDW
        tristate
+       depends on SOUNDWIRE || !SOUNDWIRE
 
 config SND_SOC_WCD938X_SDW
        tristate "WCD9380/WCD9385 Codec - SDW"
index de8b83dd2c7643de8126e710763d6a49cfeb46db..7bb38c3708420945011ae3da049c312975ffec4c 100644 (file)
@@ -583,7 +583,10 @@ obj-$(CONFIG_SND_SOC_WCD_MBHC)     += snd-soc-wcd-mbhc.o
 obj-$(CONFIG_SND_SOC_WCD9335)  += snd-soc-wcd9335.o
 obj-$(CONFIG_SND_SOC_WCD934X)  += snd-soc-wcd934x.o
 obj-$(CONFIG_SND_SOC_WCD938X)  += snd-soc-wcd938x.o
-obj-$(CONFIG_SND_SOC_WCD938X_SDW) += snd-soc-wcd938x-sdw.o
+ifdef CONFIG_SND_SOC_WCD938X_SDW
+# avoid link failure by forcing sdw code built-in when needed
+obj-$(CONFIG_SND_SOC_WCD938X) += snd-soc-wcd938x-sdw.o
+endif
 obj-$(CONFIG_SND_SOC_WL1273)   += snd-soc-wl1273.o
 obj-$(CONFIG_SND_SOC_WM0010)   += snd-soc-wm0010.o
 obj-$(CONFIG_SND_SOC_WM1250_EV1) += snd-soc-wm1250-ev1.o
index eff013f295be493e5227fbdd6242c3a4b18c1090..99c022be94a6882295a5590e80a6f581f142822b 100644 (file)
@@ -405,7 +405,7 @@ static const struct regmap_config cs42l42_regmap = {
        .use_single_write = true,
 };
 
-static DECLARE_TLV_DB_SCALE(adc_tlv, -9600, 100, false);
+static DECLARE_TLV_DB_SCALE(adc_tlv, -9700, 100, true);
 static DECLARE_TLV_DB_SCALE(mixer_tlv, -6300, 100, true);
 
 static const char * const cs42l42_hpf_freq_text[] = {
@@ -425,34 +425,23 @@ static SOC_ENUM_SINGLE_DECL(cs42l42_wnf3_freq_enum, CS42L42_ADC_WNF_HPF_CTL,
                            CS42L42_ADC_WNF_CF_SHIFT,
                            cs42l42_wnf3_freq_text);
 
-static const char * const cs42l42_wnf05_freq_text[] = {
-       "280Hz", "315Hz", "350Hz", "385Hz",
-       "420Hz", "455Hz", "490Hz", "525Hz"
-};
-
-static SOC_ENUM_SINGLE_DECL(cs42l42_wnf05_freq_enum, CS42L42_ADC_WNF_HPF_CTL,
-                           CS42L42_ADC_WNF_CF_SHIFT,
-                           cs42l42_wnf05_freq_text);
-
 static const struct snd_kcontrol_new cs42l42_snd_controls[] = {
        /* ADC Volume and Filter Controls */
        SOC_SINGLE("ADC Notch Switch", CS42L42_ADC_CTL,
-                               CS42L42_ADC_NOTCH_DIS_SHIFT, true, false),
+                               CS42L42_ADC_NOTCH_DIS_SHIFT, true, true),
        SOC_SINGLE("ADC Weak Force Switch", CS42L42_ADC_CTL,
                                CS42L42_ADC_FORCE_WEAK_VCM_SHIFT, true, false),
        SOC_SINGLE("ADC Invert Switch", CS42L42_ADC_CTL,
                                CS42L42_ADC_INV_SHIFT, true, false),
        SOC_SINGLE("ADC Boost Switch", CS42L42_ADC_CTL,
                                CS42L42_ADC_DIG_BOOST_SHIFT, true, false),
-       SOC_SINGLE_SX_TLV("ADC Volume", CS42L42_ADC_VOLUME,
-                               CS42L42_ADC_VOL_SHIFT, 0xA0, 0x6C, adc_tlv),
+       SOC_SINGLE_S8_TLV("ADC Volume", CS42L42_ADC_VOLUME, -97, 12, adc_tlv),
        SOC_SINGLE("ADC WNF Switch", CS42L42_ADC_WNF_HPF_CTL,
                                CS42L42_ADC_WNF_EN_SHIFT, true, false),
        SOC_SINGLE("ADC HPF Switch", CS42L42_ADC_WNF_HPF_CTL,
                                CS42L42_ADC_HPF_EN_SHIFT, true, false),
        SOC_ENUM("HPF Corner Freq", cs42l42_hpf_freq_enum),
        SOC_ENUM("WNF 3dB Freq", cs42l42_wnf3_freq_enum),
-       SOC_ENUM("WNF 05dB Freq", cs42l42_wnf05_freq_enum),
 
        /* DAC Volume and Filter Controls */
        SOC_SINGLE("DACA Invert Switch", CS42L42_DAC_CTL1,
@@ -471,8 +460,8 @@ static const struct snd_soc_dapm_widget cs42l42_dapm_widgets[] = {
        SND_SOC_DAPM_OUTPUT("HP"),
        SND_SOC_DAPM_DAC("DAC", NULL, CS42L42_PWR_CTL1, CS42L42_HP_PDN_SHIFT, 1),
        SND_SOC_DAPM_MIXER("MIXER", CS42L42_PWR_CTL1, CS42L42_MIXER_PDN_SHIFT, 1, NULL, 0),
-       SND_SOC_DAPM_AIF_IN("SDIN1", NULL, 0, CS42L42_ASP_RX_DAI0_EN, CS42L42_ASP_RX0_CH1_SHIFT, 0),
-       SND_SOC_DAPM_AIF_IN("SDIN2", NULL, 1, CS42L42_ASP_RX_DAI0_EN, CS42L42_ASP_RX0_CH2_SHIFT, 0),
+       SND_SOC_DAPM_AIF_IN("SDIN1", NULL, 0, SND_SOC_NOPM, 0, 0),
+       SND_SOC_DAPM_AIF_IN("SDIN2", NULL, 1, SND_SOC_NOPM, 0, 0),
 
        /* Playback Requirements */
        SND_SOC_DAPM_SUPPLY("ASP DAI0", CS42L42_PWR_CTL1, CS42L42_ASP_DAI_PDN_SHIFT, 1, NULL, 0),
@@ -630,6 +619,8 @@ static int cs42l42_pll_config(struct snd_soc_component *component)
 
        for (i = 0; i < ARRAY_SIZE(pll_ratio_table); i++) {
                if (pll_ratio_table[i].sclk == clk) {
+                       cs42l42->pll_config = i;
+
                        /* Configure the internal sample rate */
                        snd_soc_component_update_bits(component, CS42L42_MCLK_CTL,
                                        CS42L42_INTERNAL_FS_MASK,
@@ -638,14 +629,9 @@ static int cs42l42_pll_config(struct snd_soc_component *component)
                                        (pll_ratio_table[i].mclk_int !=
                                        24000000)) <<
                                        CS42L42_INTERNAL_FS_SHIFT);
-                       /* Set the MCLK src (PLL or SCLK) and the divide
-                        * ratio
-                        */
+
                        snd_soc_component_update_bits(component, CS42L42_MCLK_SRC_SEL,
-                                       CS42L42_MCLK_SRC_SEL_MASK |
                                        CS42L42_MCLKDIV_MASK,
-                                       (pll_ratio_table[i].mclk_src_sel
-                                       << CS42L42_MCLK_SRC_SEL_SHIFT) |
                                        (pll_ratio_table[i].mclk_div <<
                                        CS42L42_MCLKDIV_SHIFT));
                        /* Set up the LRCLK */
@@ -681,15 +667,6 @@ static int cs42l42_pll_config(struct snd_soc_component *component)
                                        CS42L42_FSYNC_PULSE_WIDTH_MASK,
                                        CS42L42_FRAC1_VAL(fsync - 1) <<
                                        CS42L42_FSYNC_PULSE_WIDTH_SHIFT);
-                       snd_soc_component_update_bits(component,
-                                       CS42L42_ASP_FRM_CFG,
-                                       CS42L42_ASP_5050_MASK,
-                                       CS42L42_ASP_5050_MASK);
-                       /* Set the frame delay to 1.0 SCLK clocks */
-                       snd_soc_component_update_bits(component, CS42L42_ASP_FRM_CFG,
-                                       CS42L42_ASP_FSD_MASK,
-                                       CS42L42_ASP_FSD_1_0 <<
-                                       CS42L42_ASP_FSD_SHIFT);
                        /* Set the sample rates (96k or lower) */
                        snd_soc_component_update_bits(component, CS42L42_FS_RATE_EN,
                                        CS42L42_FS_EN_MASK,
@@ -789,7 +766,18 @@ static int cs42l42_set_dai_fmt(struct snd_soc_dai *codec_dai, unsigned int fmt)
        /* interface format */
        switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) {
        case SND_SOC_DAIFMT_I2S:
-       case SND_SOC_DAIFMT_LEFT_J:
+               /*
+                * 5050 mode, frame starts on falling edge of LRCLK,
+                * frame delayed by 1.0 SCLKs
+                */
+               snd_soc_component_update_bits(component,
+                                             CS42L42_ASP_FRM_CFG,
+                                             CS42L42_ASP_STP_MASK |
+                                             CS42L42_ASP_5050_MASK |
+                                             CS42L42_ASP_FSD_MASK,
+                                             CS42L42_ASP_5050_MASK |
+                                             (CS42L42_ASP_FSD_1_0 <<
+                                               CS42L42_ASP_FSD_SHIFT));
                break;
        default:
                return -EINVAL;
@@ -819,6 +807,25 @@ static int cs42l42_set_dai_fmt(struct snd_soc_dai *codec_dai, unsigned int fmt)
        return 0;
 }
 
+static int cs42l42_dai_startup(struct snd_pcm_substream *substream, struct snd_soc_dai *dai)
+{
+       struct snd_soc_component *component = dai->component;
+       struct cs42l42_private *cs42l42 = snd_soc_component_get_drvdata(component);
+
+       /*
+        * Sample rates < 44.1 kHz would produce an out-of-range SCLK with
+        * a standard I2S frame. If the machine driver sets SCLK it must be
+        * legal.
+        */
+       if (cs42l42->sclk)
+               return 0;
+
+       /* Machine driver has not set a SCLK, limit bottom end to 44.1 kHz */
+       return snd_pcm_hw_constraint_minmax(substream->runtime,
+                                           SNDRV_PCM_HW_PARAM_RATE,
+                                           44100, 192000);
+}
+
 static int cs42l42_pcm_hw_params(struct snd_pcm_substream *substream,
                                struct snd_pcm_hw_params *params,
                                struct snd_soc_dai *dai)
@@ -832,6 +839,10 @@ static int cs42l42_pcm_hw_params(struct snd_pcm_substream *substream,
        cs42l42->srate = params_rate(params);
        cs42l42->bclk = snd_soc_params_to_bclk(params);
 
+       /* I2S frame always has 2 channels even for mono audio */
+       if (channels == 1)
+               cs42l42->bclk *= 2;
+
        switch(substream->stream) {
        case SNDRV_PCM_STREAM_CAPTURE:
                if (channels == 2) {
@@ -855,6 +866,17 @@ static int cs42l42_pcm_hw_params(struct snd_pcm_substream *substream,
                snd_soc_component_update_bits(component, CS42L42_ASP_RX_DAI0_CH2_AP_RES,
                                                         CS42L42_ASP_RX_CH_AP_MASK |
                                                         CS42L42_ASP_RX_CH_RES_MASK, val);
+
+               /* Channel B comes from the last active channel */
+               snd_soc_component_update_bits(component, CS42L42_SP_RX_CH_SEL,
+                                             CS42L42_SP_RX_CHB_SEL_MASK,
+                                             (channels - 1) << CS42L42_SP_RX_CHB_SEL_SHIFT);
+
+               /* Both LRCLK slots must be enabled */
+               snd_soc_component_update_bits(component, CS42L42_ASP_RX_DAI0_EN,
+                                             CS42L42_ASP_RX0_CH_EN_MASK,
+                                             BIT(CS42L42_ASP_RX0_CH1_SHIFT) |
+                                             BIT(CS42L42_ASP_RX0_CH2_SHIFT));
                break;
        default:
                break;
@@ -900,13 +922,21 @@ static int cs42l42_mute_stream(struct snd_soc_dai *dai, int mute, int stream)
                         */
                        regmap_multi_reg_write(cs42l42->regmap, cs42l42_to_osc_seq,
                                               ARRAY_SIZE(cs42l42_to_osc_seq));
+
+                       /* Must disconnect PLL before stopping it */
+                       snd_soc_component_update_bits(component,
+                                                     CS42L42_MCLK_SRC_SEL,
+                                                     CS42L42_MCLK_SRC_SEL_MASK,
+                                                     0);
+                       usleep_range(100, 200);
+
                        snd_soc_component_update_bits(component, CS42L42_PLL_CTL1,
                                                      CS42L42_PLL_START_MASK, 0);
                }
        } else {
                if (!cs42l42->stream_use) {
                        /* SCLK must be running before codec unmute */
-                       if ((cs42l42->bclk < 11289600) && (cs42l42->sclk < 11289600)) {
+                       if (pll_ratio_table[cs42l42->pll_config].mclk_src_sel) {
                                snd_soc_component_update_bits(component, CS42L42_PLL_CTL1,
                                                              CS42L42_PLL_START_MASK, 1);
 
@@ -927,6 +957,12 @@ static int cs42l42_mute_stream(struct snd_soc_dai *dai, int mute, int stream)
                                                               CS42L42_PLL_LOCK_TIMEOUT_US);
                                if (ret < 0)
                                        dev_warn(component->dev, "PLL failed to lock: %d\n", ret);
+
+                               /* PLL must be running to drive glitchless switch logic */
+                               snd_soc_component_update_bits(component,
+                                                             CS42L42_MCLK_SRC_SEL,
+                                                             CS42L42_MCLK_SRC_SEL_MASK,
+                                                             CS42L42_MCLK_SRC_SEL_MASK);
                        }
 
                        /* Mark SCLK as present, turn off internal oscillator */
@@ -960,8 +996,8 @@ static int cs42l42_mute_stream(struct snd_soc_dai *dai, int mute, int stream)
                         SNDRV_PCM_FMTBIT_S24_LE |\
                         SNDRV_PCM_FMTBIT_S32_LE )
 
-
 static const struct snd_soc_dai_ops cs42l42_ops = {
+       .startup        = cs42l42_dai_startup,
        .hw_params      = cs42l42_pcm_hw_params,
        .set_fmt        = cs42l42_set_dai_fmt,
        .set_sysclk     = cs42l42_set_sysclk,
index 206b3c81d3e02d02cfca8aca91490138d4731712..8734f6828f3edebef3e06ca81cdf93009db59a8a 100644 (file)
 
 /* Page 0x25 Audio Port Registers */
 #define CS42L42_SP_RX_CH_SEL           (CS42L42_PAGE_25 + 0x01)
+#define CS42L42_SP_RX_CHB_SEL_SHIFT    2
+#define CS42L42_SP_RX_CHB_SEL_MASK     (3 << CS42L42_SP_RX_CHB_SEL_SHIFT)
 
 #define CS42L42_SP_RX_ISOC_CTL         (CS42L42_PAGE_25 + 0x02)
 #define CS42L42_SP_RX_RSYNC_SHIFT      6
@@ -775,6 +777,7 @@ struct  cs42l42_private {
        struct gpio_desc *reset_gpio;
        struct completion pdn_done;
        struct snd_soc_jack *jack;
+       int pll_config;
        int bclk;
        u32 sclk;
        u32 srate;
index 15bd8335f6678203527ee08c46c855f61e4cb44e..db88be48c9980196550bb654785d175aca62e70e 100644 (file)
@@ -828,36 +828,6 @@ static void nau8824_int_status_clear_all(struct regmap *regmap)
        }
 }
 
-static void nau8824_dapm_disable_pin(struct nau8824 *nau8824, const char *pin)
-{
-       struct snd_soc_dapm_context *dapm = nau8824->dapm;
-       const char *prefix = dapm->component->name_prefix;
-       char prefixed_pin[80];
-
-       if (prefix) {
-               snprintf(prefixed_pin, sizeof(prefixed_pin), "%s %s",
-                        prefix, pin);
-               snd_soc_dapm_disable_pin(dapm, prefixed_pin);
-       } else {
-               snd_soc_dapm_disable_pin(dapm, pin);
-       }
-}
-
-static void nau8824_dapm_enable_pin(struct nau8824 *nau8824, const char *pin)
-{
-       struct snd_soc_dapm_context *dapm = nau8824->dapm;
-       const char *prefix = dapm->component->name_prefix;
-       char prefixed_pin[80];
-
-       if (prefix) {
-               snprintf(prefixed_pin, sizeof(prefixed_pin), "%s %s",
-                        prefix, pin);
-               snd_soc_dapm_force_enable_pin(dapm, prefixed_pin);
-       } else {
-               snd_soc_dapm_force_enable_pin(dapm, pin);
-       }
-}
-
 static void nau8824_eject_jack(struct nau8824 *nau8824)
 {
        struct snd_soc_dapm_context *dapm = nau8824->dapm;
@@ -866,8 +836,8 @@ static void nau8824_eject_jack(struct nau8824 *nau8824)
        /* Clear all interruption status */
        nau8824_int_status_clear_all(regmap);
 
-       nau8824_dapm_disable_pin(nau8824, "SAR");
-       nau8824_dapm_disable_pin(nau8824, "MICBIAS");
+       snd_soc_dapm_disable_pin(dapm, "SAR");
+       snd_soc_dapm_disable_pin(dapm, "MICBIAS");
        snd_soc_dapm_sync(dapm);
 
        /* Enable the insertion interruption, disable the ejection
@@ -897,8 +867,8 @@ static void nau8824_jdet_work(struct work_struct *work)
        struct regmap *regmap = nau8824->regmap;
        int adc_value, event = 0, event_mask = 0;
 
-       nau8824_dapm_enable_pin(nau8824, "MICBIAS");
-       nau8824_dapm_enable_pin(nau8824, "SAR");
+       snd_soc_dapm_enable_pin(dapm, "MICBIAS");
+       snd_soc_dapm_enable_pin(dapm, "SAR");
        snd_soc_dapm_sync(dapm);
 
        msleep(100);
@@ -909,8 +879,8 @@ static void nau8824_jdet_work(struct work_struct *work)
        if (adc_value < HEADSET_SARADC_THD) {
                event |= SND_JACK_HEADPHONE;
 
-               nau8824_dapm_disable_pin(nau8824, "SAR");
-               nau8824_dapm_disable_pin(nau8824, "MICBIAS");
+               snd_soc_dapm_disable_pin(dapm, "SAR");
+               snd_soc_dapm_disable_pin(dapm, "MICBIAS");
                snd_soc_dapm_sync(dapm);
        } else {
                event |= SND_JACK_HEADSET;
index abcd6f48378880651f7e4e761ede1a934589277d..51ecaa2abcd1447078c0bfcfb63d3a7a2ef0b068 100644 (file)
@@ -44,6 +44,7 @@ static const struct reg_sequence patch_list[] = {
        {RT5682_I2C_CTRL, 0x000f},
        {RT5682_PLL2_INTERNAL, 0x8266},
        {RT5682_SAR_IL_CMD_3, 0x8365},
+       {RT5682_SAR_IL_CMD_6, 0x0180},
 };
 
 void rt5682_apply_patch_list(struct rt5682_priv *rt5682, struct device *dev)
index b504d63385b38b3e3c074b87dc11d3d48e07bc18..52d2c968b5c0e9dd070d93280756bc40dfc7ce70 100644 (file)
@@ -35,6 +35,9 @@
 
 #include "tlv320aic31xx.h"
 
+static int aic31xx_set_jack(struct snd_soc_component *component,
+                            struct snd_soc_jack *jack, void *data);
+
 static const struct reg_default aic31xx_reg_defaults[] = {
        { AIC31XX_CLKMUX, 0x00 },
        { AIC31XX_PLLPR, 0x11 },
@@ -1256,6 +1259,13 @@ static int aic31xx_power_on(struct snd_soc_component *component)
                return ret;
        }
 
+       /*
+        * The jack detection configuration is in the same register
+        * that is used to report jack detect status so is volatile
+        * and not covered by the cache sync, restore it separately.
+        */
+       aic31xx_set_jack(component, aic31xx->jack, NULL);
+
        return 0;
 }
 
index dcd8aeb45cb317af4199a2f9084870df43897b5e..2e9175b37dc9ccf0a57439049209a38bb90d249f 100644 (file)
@@ -682,11 +682,20 @@ static int aic32x4_set_dosr(struct snd_soc_component *component, u16 dosr)
 static int aic32x4_set_processing_blocks(struct snd_soc_component *component,
                                                u8 r_block, u8 p_block)
 {
-       if (r_block > 18 || p_block > 25)
-               return -EINVAL;
+       struct aic32x4_priv *aic32x4 = snd_soc_component_get_drvdata(component);
+
+       if (aic32x4->type == AIC32X4_TYPE_TAS2505) {
+               if (r_block || p_block > 3)
+                       return -EINVAL;
 
-       snd_soc_component_write(component, AIC32X4_ADCSPB, r_block);
-       snd_soc_component_write(component, AIC32X4_DACSPB, p_block);
+               snd_soc_component_write(component, AIC32X4_DACSPB, p_block);
+       } else { /* AIC32x4 */
+               if (r_block > 18 || p_block > 25)
+                       return -EINVAL;
+
+               snd_soc_component_write(component, AIC32X4_ADCSPB, r_block);
+               snd_soc_component_write(component, AIC32X4_DACSPB, p_block);
+       }
 
        return 0;
 }
@@ -695,6 +704,7 @@ static int aic32x4_setup_clocks(struct snd_soc_component *component,
                                unsigned int sample_rate, unsigned int channels,
                                unsigned int bit_depth)
 {
+       struct aic32x4_priv *aic32x4 = snd_soc_component_get_drvdata(component);
        u8 aosr;
        u16 dosr;
        u8 adc_resource_class, dac_resource_class;
@@ -721,19 +731,28 @@ static int aic32x4_setup_clocks(struct snd_soc_component *component,
                adc_resource_class = 6;
                dac_resource_class = 8;
                dosr_increment = 8;
-               aic32x4_set_processing_blocks(component, 1, 1);
+               if (aic32x4->type == AIC32X4_TYPE_TAS2505)
+                       aic32x4_set_processing_blocks(component, 0, 1);
+               else
+                       aic32x4_set_processing_blocks(component, 1, 1);
        } else if (sample_rate <= 96000) {
                aosr = 64;
                adc_resource_class = 6;
                dac_resource_class = 8;
                dosr_increment = 4;
-               aic32x4_set_processing_blocks(component, 1, 9);
+               if (aic32x4->type == AIC32X4_TYPE_TAS2505)
+                       aic32x4_set_processing_blocks(component, 0, 1);
+               else
+                       aic32x4_set_processing_blocks(component, 1, 9);
        } else if (sample_rate == 192000) {
                aosr = 32;
                adc_resource_class = 3;
                dac_resource_class = 4;
                dosr_increment = 2;
-               aic32x4_set_processing_blocks(component, 13, 19);
+               if (aic32x4->type == AIC32X4_TYPE_TAS2505)
+                       aic32x4_set_processing_blocks(component, 0, 1);
+               else
+                       aic32x4_set_processing_blocks(component, 13, 19);
        } else {
                dev_err(component->dev, "Sampling rate not supported\n");
                return -EINVAL;
index 549d98241daec1ce6e6c22e19772ac6209b57613..fe15cbc7bcafdcd384859c2168fa57e3653ee5ec 100644 (file)
@@ -747,7 +747,6 @@ static void wm_adsp2_init_debugfs(struct wm_adsp *dsp,
 static void wm_adsp2_cleanup_debugfs(struct wm_adsp *dsp)
 {
        wm_adsp_debugfs_clear(dsp);
-       debugfs_remove_recursive(dsp->debugfs_root);
 }
 #else
 static inline void wm_adsp2_init_debugfs(struct wm_adsp *dsp,
index 4124aa2fc2479a7737507788ccf456ac739e4684..905c7965f6539ab121efd535630520c6f460f650 100644 (file)
@@ -127,7 +127,7 @@ static void sst_fill_alloc_params(struct snd_pcm_substream *substream,
        snd_pcm_uframes_t period_size;
        ssize_t periodbytes;
        ssize_t buffer_bytes = snd_pcm_lib_buffer_bytes(substream);
-       u32 buffer_addr = virt_to_phys(substream->dma_buffer.area);
+       u32 buffer_addr = virt_to_phys(substream->runtime->dma_area);
 
        channels = substream->runtime->channels;
        period_size = substream->runtime->period_size;
@@ -233,7 +233,6 @@ static int sst_platform_alloc_stream(struct snd_pcm_substream *substream,
        /* set codec params and inform SST driver the same */
        sst_fill_pcm_params(substream, &param);
        sst_fill_alloc_params(substream, &alloc_params);
-       substream->runtime->dma_area = substream->dma_buffer.area;
        str_params.sparams = param;
        str_params.aparams = alloc_params;
        str_params.codec = SST_CODEC_TYPE_PCM;
index 896251d742fef2ea1458ebe05d91c0c2fd4cd102..b7b3b0bf994a73c326b9892f4194c5b5b1dfe669 100644 (file)
@@ -404,7 +404,7 @@ static int audio_probe(struct platform_device *pdev)
                return -ENOMEM;
 
        /* By default dais[0] is configured for max98373 */
-       if (!strcmp(pdev->name, "sof_da7219_max98360a")) {
+       if (!strcmp(pdev->name, "sof_da7219_mx98360a")) {
                dais[0] = (struct snd_soc_dai_link) {
                        .name = "SSP1-Codec",
                        .id = 0,
index c2a5933bfcfc1ad0d2795a6b00d041b272d414f0..700a18561a9405bc314770545e6694f8611feb18 100644 (file)
@@ -104,8 +104,6 @@ static int kirkwood_dma_open(struct snd_soc_component *component,
        int err;
        struct snd_pcm_runtime *runtime = substream->runtime;
        struct kirkwood_dma_data *priv = kirkwood_priv(substream);
-       const struct mbus_dram_target_info *dram;
-       unsigned long addr;
 
        snd_soc_set_runtime_hwparams(substream, &kirkwood_dma_snd_hw);
 
@@ -142,20 +140,14 @@ static int kirkwood_dma_open(struct snd_soc_component *component,
                writel((unsigned int)-1, priv->io + KIRKWOOD_ERR_MASK);
        }
 
-       dram = mv_mbus_dram_info();
-       addr = substream->dma_buffer.addr;
        if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
                if (priv->substream_play)
                        return -EBUSY;
                priv->substream_play = substream;
-               kirkwood_dma_conf_mbus_windows(priv->io,
-                       KIRKWOOD_PLAYBACK_WIN, addr, dram);
        } else {
                if (priv->substream_rec)
                        return -EBUSY;
                priv->substream_rec = substream;
-               kirkwood_dma_conf_mbus_windows(priv->io,
-                       KIRKWOOD_RECORD_WIN, addr, dram);
        }
 
        return 0;
@@ -182,6 +174,23 @@ static int kirkwood_dma_close(struct snd_soc_component *component,
        return 0;
 }
 
+static int kirkwood_dma_hw_params(struct snd_soc_component *component,
+                                 struct snd_pcm_substream *substream,
+                                 struct snd_pcm_hw_params *params)
+{
+       struct kirkwood_dma_data *priv = kirkwood_priv(substream);
+       const struct mbus_dram_target_info *dram = mv_mbus_dram_info();
+       unsigned long addr = substream->runtime->dma_addr;
+
+       if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
+               kirkwood_dma_conf_mbus_windows(priv->io,
+                       KIRKWOOD_PLAYBACK_WIN, addr, dram);
+       else
+               kirkwood_dma_conf_mbus_windows(priv->io,
+                       KIRKWOOD_RECORD_WIN, addr, dram);
+       return 0;
+}
+
 static int kirkwood_dma_prepare(struct snd_soc_component *component,
                                struct snd_pcm_substream *substream)
 {
@@ -246,6 +255,7 @@ const struct snd_soc_component_driver kirkwood_soc_component = {
        .name           = DRV_NAME,
        .open           = kirkwood_dma_open,
        .close          = kirkwood_dma_close,
+       .hw_params      = kirkwood_dma_hw_params,
        .prepare        = kirkwood_dma_prepare,
        .pointer        = kirkwood_dma_pointer,
        .pcm_construct  = kirkwood_dma_new,
index 3a5e84e16a87bc8f8dea8ed33a3c910f86b670a6..c8dfd0de30e4f35aad75fc0297c693345acd12c0 100644 (file)
@@ -148,86 +148,75 @@ int snd_soc_component_set_bias_level(struct snd_soc_component *component,
        return soc_component_ret(component, ret);
 }
 
-static int soc_component_pin(struct snd_soc_component *component,
-                            const char *pin,
-                            int (*pin_func)(struct snd_soc_dapm_context *dapm,
-                                            const char *pin))
-{
-       struct snd_soc_dapm_context *dapm =
-               snd_soc_component_get_dapm(component);
-       char *full_name;
-       int ret;
-
-       if (!component->name_prefix) {
-               ret = pin_func(dapm, pin);
-               goto end;
-       }
-
-       full_name = kasprintf(GFP_KERNEL, "%s %s", component->name_prefix, pin);
-       if (!full_name) {
-               ret = -ENOMEM;
-               goto end;
-       }
-
-       ret = pin_func(dapm, full_name);
-       kfree(full_name);
-end:
-       return soc_component_ret(component, ret);
-}
-
 int snd_soc_component_enable_pin(struct snd_soc_component *component,
                                 const char *pin)
 {
-       return soc_component_pin(component, pin, snd_soc_dapm_enable_pin);
+       struct snd_soc_dapm_context *dapm =
+               snd_soc_component_get_dapm(component);
+       return snd_soc_dapm_enable_pin(dapm, pin);
 }
 EXPORT_SYMBOL_GPL(snd_soc_component_enable_pin);
 
 int snd_soc_component_enable_pin_unlocked(struct snd_soc_component *component,
                                          const char *pin)
 {
-       return soc_component_pin(component, pin, snd_soc_dapm_enable_pin_unlocked);
+       struct snd_soc_dapm_context *dapm =
+               snd_soc_component_get_dapm(component);
+       return snd_soc_dapm_enable_pin_unlocked(dapm, pin);
 }
 EXPORT_SYMBOL_GPL(snd_soc_component_enable_pin_unlocked);
 
 int snd_soc_component_disable_pin(struct snd_soc_component *component,
                                  const char *pin)
 {
-       return soc_component_pin(component, pin, snd_soc_dapm_disable_pin);
+       struct snd_soc_dapm_context *dapm =
+               snd_soc_component_get_dapm(component);
+       return snd_soc_dapm_disable_pin(dapm, pin);
 }
 EXPORT_SYMBOL_GPL(snd_soc_component_disable_pin);
 
 int snd_soc_component_disable_pin_unlocked(struct snd_soc_component *component,
                                           const char *pin)
 {
-       return soc_component_pin(component, pin, snd_soc_dapm_disable_pin_unlocked);
+       struct snd_soc_dapm_context *dapm = 
+               snd_soc_component_get_dapm(component);
+       return snd_soc_dapm_disable_pin_unlocked(dapm, pin);
 }
 EXPORT_SYMBOL_GPL(snd_soc_component_disable_pin_unlocked);
 
 int snd_soc_component_nc_pin(struct snd_soc_component *component,
                             const char *pin)
 {
-       return soc_component_pin(component, pin, snd_soc_dapm_nc_pin);
+       struct snd_soc_dapm_context *dapm =
+               snd_soc_component_get_dapm(component);
+       return snd_soc_dapm_nc_pin(dapm, pin);
 }
 EXPORT_SYMBOL_GPL(snd_soc_component_nc_pin);
 
 int snd_soc_component_nc_pin_unlocked(struct snd_soc_component *component,
                                      const char *pin)
 {
-       return soc_component_pin(component, pin, snd_soc_dapm_nc_pin_unlocked);
+       struct snd_soc_dapm_context *dapm =
+               snd_soc_component_get_dapm(component);
+       return snd_soc_dapm_nc_pin_unlocked(dapm, pin);
 }
 EXPORT_SYMBOL_GPL(snd_soc_component_nc_pin_unlocked);
 
 int snd_soc_component_get_pin_status(struct snd_soc_component *component,
                                     const char *pin)
 {
-       return soc_component_pin(component, pin, snd_soc_dapm_get_pin_status);
+       struct snd_soc_dapm_context *dapm =
+               snd_soc_component_get_dapm(component);
+       return snd_soc_dapm_get_pin_status(dapm, pin);
 }
 EXPORT_SYMBOL_GPL(snd_soc_component_get_pin_status);
 
 int snd_soc_component_force_enable_pin(struct snd_soc_component *component,
                                       const char *pin)
 {
-       return soc_component_pin(component, pin, snd_soc_dapm_force_enable_pin);
+       struct snd_soc_dapm_context *dapm =
+               snd_soc_component_get_dapm(component);
+       return snd_soc_dapm_force_enable_pin(dapm, pin);
 }
 EXPORT_SYMBOL_GPL(snd_soc_component_force_enable_pin);
 
@@ -235,7 +224,9 @@ int snd_soc_component_force_enable_pin_unlocked(
        struct snd_soc_component *component,
        const char *pin)
 {
-       return soc_component_pin(component, pin, snd_soc_dapm_force_enable_pin_unlocked);
+       struct snd_soc_dapm_context *dapm =
+               snd_soc_component_get_dapm(component);
+       return snd_soc_dapm_force_enable_pin_unlocked(dapm, pin);
 }
 EXPORT_SYMBOL_GPL(snd_soc_component_force_enable_pin_unlocked);
 
index 4bce89b5ea407d87dbb228a931e6530d050992dd..4447f515e8b191f396ef485e266535b678837a80 100644 (file)
@@ -278,6 +278,8 @@ config SND_SOC_SOF_HDA
 
 config SND_SOC_SOF_INTEL_SOUNDWIRE_LINK_BASELINE
        tristate
+       select SOUNDWIRE_INTEL if SND_SOC_SOF_INTEL_SOUNDWIRE
+       select SND_INTEL_SOUNDWIRE_ACPI if SND_SOC_SOF_INTEL_SOUNDWIRE
 
 config SND_SOC_SOF_INTEL_SOUNDWIRE
        tristate "SOF support for SoundWire"
@@ -285,8 +287,6 @@ config SND_SOC_SOF_INTEL_SOUNDWIRE
        depends on SND_SOC_SOF_INTEL_SOUNDWIRE_LINK_BASELINE
        depends on ACPI && SOUNDWIRE
        depends on !(SOUNDWIRE=m && SND_SOC_SOF_INTEL_SOUNDWIRE_LINK_BASELINE=y)
-       select SOUNDWIRE_INTEL
-       select SND_INTEL_SOUNDWIRE_ACPI
        help
          This adds support for SoundWire with Sound Open Firmware
          for Intel(R) platforms.
index c91aa951df22618954c9b42eac8e892fff780665..acfeca42604cdef019c665509e5e37a24c67c198 100644 (file)
@@ -107,8 +107,8 @@ void hda_dsp_ipc_get_reply(struct snd_sof_dev *sdev)
        } else {
                /* reply correct size ? */
                if (reply.hdr.size != msg->reply_size &&
-                       /* getter payload is never known upfront */
-                       !(reply.hdr.cmd & SOF_IPC_GLB_PROBE)) {
+                   /* getter payload is never known upfront */
+                   ((reply.hdr.cmd & SOF_GLB_TYPE_MASK) != SOF_IPC_GLB_PROBE)) {
                        dev_err(sdev->dev, "error: reply expected %zu got %u bytes\n",
                                msg->reply_size, reply.hdr.size);
                        ret = -EINVAL;
index e1e368ff2b120327121bd69b453e8cea4fd45969..891e6e1b912103ef78827dad782a96a7601b9799 100644 (file)
@@ -187,12 +187,16 @@ static int hda_sdw_probe(struct snd_sof_dev *sdev)
 int hda_sdw_startup(struct snd_sof_dev *sdev)
 {
        struct sof_intel_hda_dev *hdev;
+       struct snd_sof_pdata *pdata = sdev->pdata;
 
        hdev = sdev->pdata->hw_pdata;
 
        if (!hdev->sdw)
                return 0;
 
+       if (pdata->machine && !pdata->machine->mach_params.link_mask)
+               return 0;
+
        return sdw_intel_startup(hdev->sdw);
 }
 
@@ -1002,6 +1006,14 @@ static int hda_generic_machine_select(struct snd_sof_dev *sdev)
                        hda_mach->mach_params.dmic_num = dmic_num;
                        pdata->machine = hda_mach;
                        pdata->tplg_filename = tplg_filename;
+
+                       if (codec_num == 2) {
+                               /*
+                                * Prevent SoundWire links from starting when an external
+                                * HDaudio codec is used
+                                */
+                               hda_mach->mach_params.link_mask = 0;
+                       }
                }
        }
 
index 3c1628a3a1acd86c1231889e00ebba4af17da870..3d9736e7381f84baff6feb92ef1492eee431f228 100644 (file)
@@ -198,7 +198,7 @@ static int uniphier_aiodma_mmap(struct snd_soc_component *component,
        vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
 
        return remap_pfn_range(vma, vma->vm_start,
-                              substream->dma_buffer.addr >> PAGE_SHIFT,
+                              substream->runtime->dma_addr >> PAGE_SHIFT,
                               vma->vm_end - vma->vm_start, vma->vm_page_prot);
 }
 
index 1d59fb668c77a929b0a622470e83ba3a0e328009..91afea9d5de6787906ac5e817ba3406488fa3f58 100644 (file)
@@ -452,8 +452,8 @@ static int xlnx_formatter_pcm_hw_params(struct snd_soc_component *component,
 
        stream_data->buffer_size = size;
 
-       low = lower_32_bits(substream->dma_buffer.addr);
-       high = upper_32_bits(substream->dma_buffer.addr);
+       low = lower_32_bits(runtime->dma_addr);
+       high = upper_32_bits(runtime->dma_addr);
        writel(low, stream_data->mmio + XLNX_AUD_BUFF_ADDR_LSB);
        writel(high, stream_data->mmio + XLNX_AUD_BUFF_ADDR_MSB);
 
index 8b7a9830dd221f195e8c50828834d84af023fdb5..3430667b0d241dd02250ecf7819441ac81794086 100644 (file)
@@ -1031,7 +1031,7 @@ struct sys_stat_struct {
  *     scall32-o32.S in the kernel sources.
  *   - the system call is performed by calling "syscall"
  *   - syscall return comes in v0, and register a3 needs to be checked to know
- *     if an error occured, in which case errno is in v0.
+ *     if an error occurred, in which case errno is in v0.
  *   - the arguments are cast to long and assigned into the target registers
  *     which are then simply passed as registers to the asm code, so that we
  *     don't have to experience issues with register constraints.
@@ -2243,6 +2243,19 @@ unsigned int sleep(unsigned int seconds)
                return 0;
 }
 
+static __attribute__((unused))
+int msleep(unsigned int msecs)
+{
+       struct timeval my_timeval = { msecs / 1000, (msecs % 1000) * 1000 };
+
+       if (sys_select(0, 0, 0, 0, &my_timeval) < 0)
+               return (my_timeval.tv_sec * 1000) +
+                       (my_timeval.tv_usec / 1000) +
+                       !!(my_timeval.tv_usec % 1000);
+       else
+               return 0;
+}
+
 static __attribute__((unused))
 int stat(const char *path, struct stat *buf)
 {
index 81461813ec620c1ba2c21719e15d0a10b5157d7e..d9bd6f5f8f46fe229996dbfb12c50794658ffb01 100644 (file)
@@ -131,8 +131,7 @@ static int copy_file(struct io_uring *ring, off_t insize)
        writes = reads = offset = 0;
 
        while (insize || write_left) {
-               unsigned long had_reads;
-               int got_comp;
+               int had_reads, got_comp;
 
                /*
                 * Queue up as many reads as we can
@@ -174,8 +173,13 @@ static int copy_file(struct io_uring *ring, off_t insize)
                        if (!got_comp) {
                                ret = io_uring_wait_cqe(ring, &cqe);
                                got_comp = 1;
-                       } else
+                       } else {
                                ret = io_uring_peek_cqe(ring, &cqe);
+                               if (ret == -EAGAIN) {
+                                       cqe = NULL;
+                                       ret = 0;
+                               }
+                       }
                        if (ret < 0) {
                                fprintf(stderr, "io_uring_peek_cqe: %s\n",
                                                        strerror(-ret));
@@ -194,7 +198,7 @@ static int copy_file(struct io_uring *ring, off_t insize)
                                fprintf(stderr, "cqe failed: %s\n",
                                                strerror(-cqe->res));
                                return 1;
-                       } else if ((size_t) cqe->res != data->iov.iov_len) {
+                       } else if (cqe->res != data->iov.iov_len) {
                                /* Short read/write, adjust and requeue */
                                data->iov.iov_base += cqe->res;
                                data->iov.iov_len -= cqe->res;
@@ -221,6 +225,25 @@ static int copy_file(struct io_uring *ring, off_t insize)
                }
        }
 
+       /* wait out pending writes */
+       while (writes) {
+               struct io_data *data;
+
+               ret = io_uring_wait_cqe(ring, &cqe);
+               if (ret) {
+                       fprintf(stderr, "wait_cqe=%d\n", ret);
+                       return 1;
+               }
+               if (cqe->res < 0) {
+                       fprintf(stderr, "write res=%d\n", cqe->res);
+                       return 1;
+               }
+               data = io_uring_cqe_get_data(cqe);
+               free(data);
+               writes--;
+               io_uring_cqe_seen(ring, cqe);
+       }
+
        return 0;
 }
 
index b46760b93bb40d1bad13fbf2d9b8040a847013b9..7ff3d5ce44f999722bcb9eb65510534257ef1d44 100644 (file)
@@ -804,6 +804,7 @@ static struct btf *btf_new(const void *data, __u32 size, struct btf *base_btf)
        btf->nr_types = 0;
        btf->start_id = 1;
        btf->start_str_off = 0;
+       btf->fd = -1;
 
        if (base_btf) {
                btf->base_btf = base_btf;
@@ -832,8 +833,6 @@ static struct btf *btf_new(const void *data, __u32 size, struct btf *base_btf)
        if (err)
                goto done;
 
-       btf->fd = -1;
-
 done:
        if (err) {
                btf__free(btf);
index ecaae2927ab8189b0a9b4c68494c2442fc3a22a3..cd8c703dde71870665303822ccc2f0b76c05b861 100644 (file)
@@ -75,6 +75,9 @@ probe_load(enum bpf_prog_type prog_type, const struct bpf_insn *insns,
        case BPF_PROG_TYPE_CGROUP_SOCK_ADDR:
                xattr.expected_attach_type = BPF_CGROUP_INET4_CONNECT;
                break;
+       case BPF_PROG_TYPE_CGROUP_SOCKOPT:
+               xattr.expected_attach_type = BPF_CGROUP_GETSOCKOPT;
+               break;
        case BPF_PROG_TYPE_SK_LOOKUP:
                xattr.expected_attach_type = BPF_SK_LOOKUP;
                break;
@@ -104,7 +107,6 @@ probe_load(enum bpf_prog_type prog_type, const struct bpf_insn *insns,
        case BPF_PROG_TYPE_SK_REUSEPORT:
        case BPF_PROG_TYPE_FLOW_DISSECTOR:
        case BPF_PROG_TYPE_CGROUP_SYSCTL:
-       case BPF_PROG_TYPE_CGROUP_SOCKOPT:
        case BPF_PROG_TYPE_TRACING:
        case BPF_PROG_TYPE_STRUCT_OPS:
        case BPF_PROG_TYPE_EXT:
index 54f367cbadaee9093270da036fc63a696ee68414..b1bff5fb0f65db91a9f638553758444fdcd2a100 100644 (file)
@@ -434,7 +434,7 @@ static int nd_intel_test_finish_query(struct nfit_test *t,
                dev_dbg(dev, "%s: transition out verify\n", __func__);
                fw->state = FW_STATE_UPDATED;
                fw->missed_activate = false;
-               /* fall through */
+               fallthrough;
        case FW_STATE_UPDATED:
                nd_cmd->status = 0;
                /* bogus test version */
index 2c8935b3e65dad9df32b1fde9a8c4bae6e24faa6..ee454327e5c6047c9ce40de4da4ad51f1eb38037 100644 (file)
        .result = ACCEPT,
        .retval = 2,
 },
+{
+       "dead code: zero extension",
+       .insns = {
+       BPF_MOV64_IMM(BPF_REG_0, 0),
+       BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_0, -4),
+       BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1),
+       BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_10, -4),
+       BPF_EXIT_INSN(),
+       },
+       .result = ACCEPT,
+       .retval = 0,
+},
index 15d937ba96cad3eba09970975b608fb12ad71e69..fd1ffaa5a1358e66fe2ccf61f0654f272dbd3f24 100755 (executable)
@@ -68,16 +68,12 @@ do
        cpumask=`awk -v cpus="$cpus" -v me=$me -v n=$n 'BEGIN {
                srand(n + me + systime());
                ncpus = split(cpus, ca);
-               curcpu = ca[int(rand() * ncpus + 1)];
-               z = "";
-               for (i = 1; 4 * i <= curcpu; i++)
-                       z = z "0";
-               print "0x" 2 ^ (curcpu % 4) z;
+               print ca[int(rand() * ncpus + 1)];
        }' < /dev/null`
        n=$(($n+1))
-       if ! taskset -p $cpumask $$ > /dev/null 2>&1
+       if ! taskset -c -p $cpumask $$ > /dev/null 2>&1
        then
-               echo taskset failure: '"taskset -p ' $cpumask $$ '"'
+               echo taskset failure: '"taskset -c -p ' $cpumask $$ '"'
                exit 1
        fi
 
index e5cc6b2f195eb9fb916ef66c011b427569fb655e..1af5d6b86b39247243259f739b35df286126b117 100755 (executable)
@@ -14,7 +14,7 @@ if test -z "$TORTURE_KCONFIG_KCSAN_ARG"
 then
        exit 0
 fi
-cat $1/*/console.log |
+find $1 -name console.log -exec cat {} \; |
        grep "BUG: KCSAN: " |
        sed -e 's/^\[[^]]*] //' |
        sort |
index d8c8483c46f1340e5888c7bf6dd1fc1c4896078b..5a0023d183daca7134d8194aad44bb0425b99ff0 100755 (executable)
@@ -142,7 +142,7 @@ then
        echo "Cannot copy from $oldrun to $rundir."
        usage
 fi
-rm -f "$rundir"/*/{console.log,console.log.diags,qemu_pid,qemu-retval,Warnings,kvm-test-1-run.sh.out,kvm-test-1-run-qemu.sh.out,vmlinux} "$rundir"/log
+rm -f "$rundir"/*/{console.log,console.log.diags,qemu_pid,qemu-pid,qemu-retval,Warnings,kvm-test-1-run.sh.out,kvm-test-1-run-qemu.sh.out,vmlinux} "$rundir"/log
 touch "$rundir/log"
 echo $scriptname $args | tee -a "$rundir/log"
 echo $oldrun > "$rundir/re-run"
@@ -179,6 +179,6 @@ if test -n "$dryrun"
 then
        echo ---- Dryrun complete, directory: $rundir | tee -a "$rundir/log"
 else
-       ( cd "$rundir"; sh $T/runbatches.sh )
+       ( cd "$rundir"; sh $T/runbatches.sh ) | tee -a "$rundir/log"
        kvm-end-run-stats.sh "$rundir" "$starttime"
 fi
diff --git a/tools/testing/selftests/rcutorture/bin/kvm-assign-cpus.sh b/tools/testing/selftests/rcutorture/bin/kvm-assign-cpus.sh
new file mode 100755 (executable)
index 0000000..f99b2c1
--- /dev/null
@@ -0,0 +1,106 @@
+#!/bin/sh
+# SPDX-License-Identifier: GPL-2.0+
+#
+# Produce awk statements roughly depicting the system's CPU and cache
+# layout.  If the required information is not available, produce
+# error messages as awk comments.  Successful exit regardless.
+#
+# Usage: kvm-assign-cpus.sh /path/to/sysfs
+
+T=/tmp/kvm-assign-cpus.sh.$$
+trap 'rm -rf $T' 0 2
+mkdir $T
+
+sysfsdir=${1-/sys/devices/system/node}
+if ! cd "$sysfsdir" > $T/msg 2>&1
+then
+       sed -e 's/^/# /' < $T/msg
+       exit 0
+fi
+nodelist="`ls -d node*`"
+for i in node*
+do
+       if ! test -d $i/
+       then
+               echo "# Not a directory: $sysfsdir/node*"
+               exit 0
+       fi
+       for j in $i/cpu*/cache/index*
+       do
+               if ! test -d $j/
+               then
+                       echo "# Not a directory: $sysfsdir/$j"
+                       exit 0
+               else
+                       break
+               fi
+       done
+       indexlist="`ls -d $i/cpu* | grep 'cpu[0-9][0-9]*' | head -1 | sed -e 's,^.*$,ls -d &/cache/index*,' | sh | sed -e 's,^.*/,,'`"
+       break
+done
+for i in node*/cpu*/cache/index*/shared_cpu_list
+do
+       if ! test -f $i
+       then
+               echo "# Not a file: $sysfsdir/$i"
+               exit 0
+       else
+               break
+       fi
+done
+firstshared=
+for i in $indexlist
+do
+       rm -f $T/cpulist
+       for n in node*
+       do
+               f="$n/cpu*/cache/$i/shared_cpu_list"
+               if ! cat $f > $T/msg 2>&1
+               then
+                       sed -e 's/^/# /' < $T/msg
+                       exit 0
+               fi
+               cat $f >> $T/cpulist
+       done
+       if grep -q '[-,]' $T/cpulist
+       then
+               if test -z "$firstshared"
+               then
+                       firstshared="$i"
+               fi
+       fi
+done
+if test -z "$firstshared"
+then
+       splitindex="`echo $indexlist | sed -e 's/ .*$//'`"
+else
+       splitindex="$firstshared"
+fi
+nodenum=0
+for n in node*
+do
+       cat $n/cpu*/cache/$splitindex/shared_cpu_list | sort -u -k1n |
+       awk -v nodenum="$nodenum" '
+       BEGIN {
+               idx = 0;
+       }
+
+       {
+               nlists = split($0, cpulists, ",");
+               for (i = 1; i <= nlists; i++) {
+                       listsize = split(cpulists[i], cpus, "-");
+                       if (listsize == 1)
+                               cpus[2] = cpus[1];
+                       for (j = cpus[1]; j <= cpus[2]; j++) {
+                               print "cpu[" nodenum "][" idx "] = " j ";";
+                               idx++;
+                       }
+               }
+       }
+
+       END {
+               print "nodecpus[" nodenum "] = " idx ";";
+       }'
+       nodenum=`expr $nodenum + 1`
+done
+echo "numnodes = $nodenum;"
diff --git a/tools/testing/selftests/rcutorture/bin/kvm-get-cpus-script.sh b/tools/testing/selftests/rcutorture/bin/kvm-get-cpus-script.sh
new file mode 100755 (executable)
index 0000000..20c7c53
--- /dev/null
@@ -0,0 +1,88 @@
+#!/bin/sh
+# SPDX-License-Identifier: GPL-2.0+
+#
+# Create an awk script that takes as input numbers of CPUs and outputs
+# lists of CPUs, one per line in both cases.
+#
+# Usage: kvm-get-cpus-script.sh /path/to/cpu/arrays /path/to/put/script [ /path/to/state ]
+#
+# The CPU arrays are output by kvm-assign-cpus.sh, and are valid awk
+# statements initializing the variables describing the system's topology.
+#
+# The optional state is input by this script (if the file exists and is
+# non-empty), and can also be output by this script.
+
+cpuarrays="${1-/sys/devices/system/node}"
+scriptfile="${2}"
+statefile="${3}"
+
+if ! test -f "$cpuarrays"
+then
+       echo "File not found: $cpuarrays" 1>&2
+       exit 1
+fi
+scriptdir="`dirname "$scriptfile"`"
+if ! test -d "$scriptdir" || ! test -x "$scriptdir" || ! test -w "$scriptdir"
+then
+       echo "Directory not usable for script output: $scriptdir"
+       exit 1
+fi
+
+cat << '___EOF___' > "$scriptfile"
+BEGIN {
+___EOF___
+cat "$cpuarrays" >> "$scriptfile"
+if test -r "$statefile"
+then
+       cat "$statefile" >> "$scriptfile"
+fi
+cat << '___EOF___' >> "$scriptfile"
+}
+
+# Do we have the system architecture to guide CPU affinity?
+function gotcpus()
+{
+       return numnodes != "";
+}
+
+# Return a comma-separated list of the next n CPUs.
+function nextcpus(n,  i, s)
+{
+       for (i = 0; i < n; i++) {
+               if (nodecpus[curnode] == "")
+                       curnode = 0;
+               if (cpu[curnode][curcpu[curnode]] == "")
+                       curcpu[curnode] = 0;
+               if (s != "")
+                       s = s ",";
+               s = s cpu[curnode][curcpu[curnode]];
+               curcpu[curnode]++;
+               curnode++
+       }
+       return s;
+}
+
+# Dump out the current node/CPU state so that a later invocation of this
+# script can continue where this one left off.  Of course, this only works
+# when a state file was specified and where there was valid sysfs state.
+# Returns 1 if the state was dumped, 0 otherwise.
+#
+# Dumping the state for one system configuration and loading it into
+# another isn't likely to do what you want, whatever that might be.
+function dumpcpustate(  i, fn)
+{
+___EOF___
+echo ' fn = "'"$statefile"'";' >> $scriptfile
+cat << '___EOF___' >> "$scriptfile"
+       if (fn != "" && gotcpus()) {
+               print "curnode = " curnode ";" > fn;
+               for (i = 0; i < numnodes; i++)
+                       if (curcpu[i] != "")
+                               print "curcpu[" i "] = " curcpu[i] ";" >> fn;
+               return 1;
+       }
+       if (fn != "")
+               print "# No CPU state to dump." > fn;
+       return 0;
+}
+___EOF___
index f3a7a5e2b89d49a0752a0714f64714f7eaf50b6d..db2c0e2c8e1def6dc864df3c4a15bb62de3de957 100755 (executable)
@@ -25,7 +25,7 @@ then
        echo "$configfile -------"
 else
        title="$configfile ------- $ncs acquisitions/releases"
-       dur=`sed -e 's/^.* locktorture.shutdown_secs=//' -e 's/ .*$//' < $i/qemu-cmd 2> /dev/null`
+       dur=`grep -v '^#' $i/qemu-cmd | sed -e 's/^.* locktorture.shutdown_secs=//' -e 's/ .*$//' 2> /dev/null`
        if test -z "$dur"
        then
                :
index 671bfee4fcef4cd87340477ee884f0aa07d191a6..3afa5c6eda4f745bec7826e5a416ac44ca0bb3c0 100755 (executable)
@@ -25,7 +25,7 @@ if test -z "$nscfs"
 then
        echo "$configfile ------- "
 else
-       dur="`sed -e 's/^.* scftorture.shutdown_secs=//' -e 's/ .*$//' < $i/qemu-cmd 2> /dev/null`"
+       dur="`grep -v '^#' $i/qemu-cmd | sed -e 's/^.* scftorture.shutdown_secs=//' -e 's/ .*$//' 2> /dev/null`"
        if test -z "$dur"
        then
                rate=""
index e01b31b87044100f979c2ca18752a92a0ef48bcc..0a5419982ab3ead8b0986d787b356ddbd07ac169 100755 (executable)
@@ -74,7 +74,10 @@ do
        done
        if test -f "$rd/kcsan.sum"
        then
-               if grep -q CONFIG_KCSAN=y $T
+               if ! test -f $T
+               then
+                       :
+               elif grep -q CONFIG_KCSAN=y $T
                then
                        echo "Compiler or architecture does not support KCSAN!"
                        echo Did you forget to switch your compiler with '--kmake-arg CC=<cc-that-supports-kcsan>'?
diff --git a/tools/testing/selftests/rcutorture/bin/kvm-remote-noreap.sh b/tools/testing/selftests/rcutorture/bin/kvm-remote-noreap.sh
new file mode 100755 (executable)
index 0000000..014ce68
--- /dev/null
@@ -0,0 +1,30 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0+
+#
+# Periodically scan a directory tree to prevent files from being reaped
+# by systemd and friends on long runs.
+#
+# Usage: kvm-remote-noreap.sh pathname
+#
+# Copyright (C) 2021 Facebook, Inc.
+#
+# Authors: Paul E. McKenney <paulmck@kernel.org>
+
+pathname="$1"
+if test "$pathname" = ""
+then
+       echo Usage: kvm-remote-noreap.sh pathname
+       exit 1
+fi
+if ! test -d "$pathname"
+then
+       echo  Usage: kvm-remote-noreap.sh pathname
+       echo "       pathname must be a directory."
+       exit 2
+fi
+
+while test -d "$pathname"
+do
+       find "$pathname" -type f -exec touch -c {} \; > /dev/null 2>&1
+       sleep 30
+done
index 79e680e0e7bf3febde4676be43abb3ebfa8519d6..03126eb6ec5ae5d1df29a2a3d2691f390dcf225b 100755 (executable)
@@ -124,10 +124,12 @@ awk < "$rundir"/scenarios -v dest="$T/bin" -v rundir="$rundir" '
        n = $1;
        sub(/\./, "", n);
        fn = dest "/kvm-remote-" n ".sh"
+       print "kvm-remote-noreap.sh " rundir " &" > fn;
        scenarios = "";
        for (i = 2; i <= NF; i++)
                scenarios = scenarios " " $i;
-       print "kvm-test-1-run-batch.sh" scenarios > fn;
+       print "kvm-test-1-run-batch.sh" scenarios >> fn;
+       print "sync" >> fn;
        print "rm " rundir "/remote.run" >> fn;
 }'
 chmod +x $T/bin/kvm-remote-*.sh
@@ -172,11 +174,20 @@ checkremotefile () {
        do
                ssh $1 "test -f \"$2\""
                ret=$?
-               if test "$ret" -ne 255
+               if test "$ret" -eq 255
                then
+                       echo " ---" ssh failure to $1 checking for file $2, retry after $sleeptime seconds. `date`
+               elif test "$ret" -eq 0
+               then
+                       return 0
+               elif test "$ret" -eq 1
+               then
+                       echo " ---" File \"$2\" not found: ssh $1 test -f \"$2\"
+                       return 1
+               else
+                       echo " ---" Exit code $ret: ssh $1 test -f \"$2\", retry after $sleeptime seconds. `date`
                        return $ret
                fi
-               echo " ---" ssh failure to $1 checking for file $2, retry after $sleeptime seconds. `date`
                sleep $sleeptime
        done
 }
@@ -242,7 +253,8 @@ do
        do
                sleep 30
        done
-       ( cd "$oldrun"; ssh $i "cd $rundir; tar -czf - kvm-remote-*.sh.out */console.log */kvm-test-1-run*.sh.out */qemu_pid */qemu-retval; rm -rf $T > /dev/null 2>&1" | tar -xzf - )
+       echo " ---" Collecting results from $i `date`
+       ( cd "$oldrun"; ssh $i "cd $rundir; tar -czf - kvm-remote-*.sh.out */console.log */kvm-test-1-run*.sh.out */qemu[_-]pid */qemu-retval */qemu-affinity; rm -rf $T > /dev/null 2>&1" | tar -xzf - )
 done
 
 ( kvm-end-run-stats.sh "$oldrun" "$starttime"; echo $? > $T/exitcode ) | tee -a "$oldrun/remote-log"
index 7ea0809e229e9309a02469fbb4dd3d304a2605b1..1e29d656501bca56d49f8397199ca1733fc00121 100755 (executable)
@@ -50,10 +50,34 @@ grep '^#' $1/qemu-cmd | sed -e 's/^# //' > $T/qemu-cmd-settings
 echo ---- System running test: `uname -a`
 echo ---- Starting kernels. `date` | tee -a log
 $TORTURE_JITTER_START
+kvm-assign-cpus.sh /sys/devices/system/node > $T/cpuarray.awk
 for i in "$@"
 do
        echo ---- System running test: `uname -a` > $i/kvm-test-1-run-qemu.sh.out
        echo > $i/kvm-test-1-run-qemu.sh.out
+       export TORTURE_AFFINITY=
+       kvm-get-cpus-script.sh $T/cpuarray.awk $T/cpubatches.awk $T/cpustate
+       cat << '        ___EOF___' >> $T/cpubatches.awk
+       END {
+               affinitylist = "";
+               if (!gotcpus()) {
+                       print "echo No CPU-affinity information, so no taskset command.";
+               } else if (cpu_count !~ /^[0-9][0-9]*$/) {
+                       print "echo " scenario ": Bogus number of CPUs (old qemu-cmd?), so no taskset command.";
+               } else {
+                       affinitylist = nextcpus(cpu_count);
+                       if (!(affinitylist ~ /^[0-9,-][0-9,-]*$/))
+                               print "echo " scenario ": Bogus CPU-affinity information, so no taskset command.";
+                       else if (!dumpcpustate())
+                               print "echo " scenario ": Could not dump state, so no taskset command.";
+                       else
+                               print "export TORTURE_AFFINITY=" affinitylist;
+               }
+       }
+       ___EOF___
+       cpu_count="`grep '# TORTURE_CPU_COUNT=' $i/qemu-cmd | sed -e 's/^.*=//'`"
+       affinity_export="`awk -f $T/cpubatches.awk -v cpu_count="$cpu_count" -v scenario=$i < /dev/null`"
+       $affinity_export
        kvm-test-1-run-qemu.sh $i >> $i/kvm-test-1-run-qemu.sh.out 2>&1 &
 done
 for i in $runfiles
index 5b1aa2a4f3f695b7d387f2cd56909bc5cd57222f..44280582c594e6b541ade49c82e2a4a2524fe7c8 100755 (executable)
@@ -39,27 +39,34 @@ echo ' ---' `date`: Starting kernel, PID $$
 grep '^#' $resdir/qemu-cmd | sed -e 's/^# //' > $T/qemu-cmd-settings
 . $T/qemu-cmd-settings
 
-# Decorate qemu-cmd with redirection, backgrounding, and PID capture
-sed -e 's/$/ 2>\&1 \&/' < $resdir/qemu-cmd > $T/qemu-cmd
-echo 'echo $! > $resdir/qemu_pid' >> $T/qemu-cmd
+# Decorate qemu-cmd with affinity, redirection, backgrounding, and PID capture
+taskset_command=
+if test -n "$TORTURE_AFFINITY"
+then
+       taskset_command="taskset -c $TORTURE_AFFINITY "
+fi
+sed -e 's/^[^#].*$/'"$taskset_command"'& 2>\&1 \&/' < $resdir/qemu-cmd > $T/qemu-cmd
+echo 'qemu_pid=$!' >> $T/qemu-cmd
+echo 'echo $qemu_pid > $resdir/qemu-pid' >> $T/qemu-cmd
+echo 'taskset -c -p $qemu_pid > $resdir/qemu-affinity' >> $T/qemu-cmd
 
 # In case qemu refuses to run...
 echo "NOTE: $QEMU either did not run or was interactive" > $resdir/console.log
 
 # Attempt to run qemu
 kstarttime=`gawk 'BEGIN { print systime() }' < /dev/null`
-( . $T/qemu-cmd; wait `cat  $resdir/qemu_pid`; echo $? > $resdir/qemu-retval ) &
+( . $T/qemu-cmd; wait `cat  $resdir/qemu-pid`; echo $? > $resdir/qemu-retval ) &
 commandcompleted=0
 if test -z "$TORTURE_KCONFIG_GDB_ARG"
 then
        sleep 10 # Give qemu's pid a chance to reach the file
-       if test -s "$resdir/qemu_pid"
+       if test -s "$resdir/qemu-pid"
        then
-               qemu_pid=`cat "$resdir/qemu_pid"`
-               echo Monitoring qemu job at pid $qemu_pid
+               qemu_pid=`cat "$resdir/qemu-pid"`
+               echo Monitoring qemu job at pid $qemu_pid `date`
        else
                qemu_pid=""
-               echo Monitoring qemu job at yet-as-unknown pid
+               echo Monitoring qemu job at yet-as-unknown pid `date`
        fi
 fi
 if test -n "$TORTURE_KCONFIG_GDB_ARG"
@@ -82,9 +89,9 @@ then
 fi
 while :
 do
-       if test -z "$qemu_pid" -a -s "$resdir/qemu_pid"
+       if test -z "$qemu_pid" && test -s "$resdir/qemu-pid"
        then
-               qemu_pid=`cat "$resdir/qemu_pid"`
+               qemu_pid=`cat "$resdir/qemu-pid"`
        fi
        kruntime=`gawk 'BEGIN { print systime() - '"$kstarttime"' }' < /dev/null`
        if test -z "$qemu_pid" || kill -0 "$qemu_pid" > /dev/null 2>&1
@@ -115,22 +122,22 @@ do
                break
        fi
 done
-if test -z "$qemu_pid" -a -s "$resdir/qemu_pid"
+if test -z "$qemu_pid" && test -s "$resdir/qemu-pid"
 then
-       qemu_pid=`cat "$resdir/qemu_pid"`
+       qemu_pid=`cat "$resdir/qemu-pid"`
 fi
-if test $commandcompleted -eq 0 -a -n "$qemu_pid"
+if test $commandcompleted -eq 0 && test -n "$qemu_pid"
 then
        if ! test -f "$resdir/../STOP.1"
        then
-               echo Grace period for qemu job at pid $qemu_pid
+               echo Grace period for qemu job at pid $qemu_pid `date`
        fi
        oldline="`tail $resdir/console.log`"
        while :
        do
                if test -f "$resdir/../STOP.1"
                then
-                       echo "PID $qemu_pid killed due to run STOP.1 request" >> $resdir/Warnings 2>&1
+                       echo "PID $qemu_pid killed due to run STOP.1 request `date`" >> $resdir/Warnings 2>&1
                        kill -KILL $qemu_pid
                        break
                fi
@@ -152,13 +159,17 @@ then
                then
                        last_ts=0
                fi
-               if test "$newline" != "$oldline" -a "$last_ts" -lt $((seconds + $TORTURE_SHUTDOWN_GRACE))
+               if test "$newline" != "$oldline" && test "$last_ts" -lt $((seconds + $TORTURE_SHUTDOWN_GRACE)) && test "$last_ts" -gt "$TORTURE_SHUTDOWN_GRACE"
                then
                        must_continue=yes
+                       if test $kruntime -ge $((seconds + $TORTURE_SHUTDOWN_GRACE))
+                       then
+                               echo Continuing at console.log time $last_ts \"`tail -n 1 $resdir/console.log`\" `date`
+                       fi
                fi
-               if test $must_continue = no -a $kruntime -ge $((seconds + $TORTURE_SHUTDOWN_GRACE))
+               if test $must_continue = no && test $kruntime -ge $((seconds + $TORTURE_SHUTDOWN_GRACE))
                then
-                       echo "!!! PID $qemu_pid hung at $kruntime vs. $seconds seconds" >> $resdir/Warnings 2>&1
+                       echo "!!! PID $qemu_pid hung at $kruntime vs. $seconds seconds `date`" >> $resdir/Warnings 2>&1
                        kill -KILL $qemu_pid
                        break
                fi
@@ -172,5 +183,3 @@ fi
 
 # Tell the script that this run is done.
 rm -f $resdir/build.run
-
-parse-console.sh $resdir/console.log $title
index 420ed5ce9d32f5ff55aeff98c390a0a9175e097e..f4c8055dbf7add00472132014614f2d88051a567 100755 (executable)
@@ -205,6 +205,7 @@ echo "# TORTURE_KCONFIG_GDB_ARG=\"$TORTURE_KCONFIG_GDB_ARG\"" >> $resdir/qemu-cm
 echo "# TORTURE_JITTER_START=\"$TORTURE_JITTER_START\"" >> $resdir/qemu-cmd
 echo "# TORTURE_JITTER_STOP=\"$TORTURE_JITTER_STOP\"" >> $resdir/qemu-cmd
 echo "# TORTURE_TRUST_MAKE=\"$TORTURE_TRUST_MAKE\"; export TORTURE_TRUST_MAKE" >> $resdir/qemu-cmd
+echo "# TORTURE_CPU_COUNT=$cpu_count" >> $resdir/qemu-cmd
 
 if test -n "$TORTURE_BUILDONLY"
 then
@@ -214,3 +215,4 @@ then
 fi
 
 kvm-test-1-run-qemu.sh $resdir
+parse-console.sh $resdir/console.log $title
index b4ac4ee332226bee3c8c36ab118db03d2ca57b12..f442d84fb2a3e01c459bdf9296d164bbc2cf2f04 100755 (executable)
@@ -430,17 +430,10 @@ then
        git diff HEAD >> $resdir/$ds/testid.txt
 fi
 ___EOF___
-awk < $T/cfgcpu.pack \
-       -v TORTURE_BUILDONLY="$TORTURE_BUILDONLY" \
-       -v CONFIGDIR="$CONFIGFRAG/" \
-       -v KVM="$KVM" \
-       -v ncpus=$cpus \
-       -v jitter="$jitter" \
-       -v rd=$resdir/$ds/ \
-       -v dur=$dur \
-       -v TORTURE_QEMU_ARG="$TORTURE_QEMU_ARG" \
-       -v TORTURE_BOOTARGS="$TORTURE_BOOTARGS" \
-'BEGIN {
+kvm-assign-cpus.sh /sys/devices/system/node > $T/cpuarray.awk
+kvm-get-cpus-script.sh $T/cpuarray.awk $T/dumpbatches.awk
+cat << '___EOF___' >> $T/dumpbatches.awk
+BEGIN {
        i = 0;
 }
 
@@ -451,7 +444,7 @@ awk < $T/cfgcpu.pack \
 }
 
 # Dump out the scripting required to run one test batch.
-function dump(first, pastlast, batchnum)
+function dump(first, pastlast, batchnum,  affinitylist)
 {
        print "echo ----Start batch " batchnum ": `date` | tee -a " rd "log";
        print "needqemurun="
@@ -483,6 +476,14 @@ function dump(first, pastlast, batchnum)
                print "echo ", cfr[jn], cpusr[jn] ovf ": Starting build. `date` | tee -a " rd "log";
                print "mkdir " rd cfr[jn] " || :";
                print "touch " builddir ".wait";
+               affinitylist = "";
+               if (gotcpus()) {
+                       affinitylist = nextcpus(cpusr[jn]);
+               }
+               if (affinitylist ~ /^[0-9,-][0-9,-]*$/)
+                       print "export TORTURE_AFFINITY=" affinitylist;
+               else
+                       print "export TORTURE_AFFINITY=";
                print "kvm-test-1-run.sh " CONFIGDIR cf[j], rd cfr[jn], dur " \"" TORTURE_QEMU_ARG "\" \"" TORTURE_BOOTARGS "\" > " rd cfr[jn]  "/kvm-test-1-run.sh.out 2>&1 &"
                print "echo ", cfr[jn], cpusr[jn] ovf ": Waiting for build to complete. `date` | tee -a " rd "log";
                print "while test -f " builddir ".wait"
@@ -560,7 +561,19 @@ END {
        # Dump the last batch.
        if (ncpus != 0)
                dump(first, i, batchnum);
-}' >> $T/script
+}
+___EOF___
+awk < $T/cfgcpu.pack \
+       -v TORTURE_BUILDONLY="$TORTURE_BUILDONLY" \
+       -v CONFIGDIR="$CONFIGFRAG/" \
+       -v KVM="$KVM" \
+       -v ncpus=$cpus \
+       -v jitter="$jitter" \
+       -v rd=$resdir/$ds/ \
+       -v dur=$dur \
+       -v TORTURE_QEMU_ARG="$TORTURE_QEMU_ARG" \
+       -v TORTURE_BOOTARGS="$TORTURE_BOOTARGS" \
+       -f $T/dumpbatches.awk >> $T/script
 echo kvm-end-run-stats.sh "$resdir/$ds" "$starttime" >> $T/script
 
 # Extract the tests and their batches from the script.
index 53ec7c046262d5b79737b2c2267977aaf7821132..363f56081eff3dc3d3d088902c5fa1e9635534af 100755 (executable)
@@ -53,6 +53,7 @@ do_refscale=yes
 do_kvfree=yes
 do_kasan=yes
 do_kcsan=no
+do_clocksourcewd=yes
 
 # doyesno - Helper function for yes/no arguments
 function doyesno () {
@@ -72,6 +73,7 @@ usage () {
        echo "       --configs-scftorture \"config-file list w/ repeat factor (2*CFLIST)\""
        echo "       --doall"
        echo "       --doallmodconfig / --do-no-allmodconfig"
+       echo "       --do-clocksourcewd / --do-no-clocksourcewd"
        echo "       --do-kasan / --do-no-kasan"
        echo "       --do-kcsan / --do-no-kcsan"
        echo "       --do-kvfree / --do-no-kvfree"
@@ -109,7 +111,7 @@ do
                configs_scftorture="$configs_scftorture $2"
                shift
                ;;
-       --doall)
+       --do-all|--doall)
                do_allmodconfig=yes
                do_rcutorture=yes
                do_locktorture=yes
@@ -119,10 +121,14 @@ do
                do_kvfree=yes
                do_kasan=yes
                do_kcsan=yes
+               do_clocksourcewd=yes
                ;;
        --do-allmodconfig|--do-no-allmodconfig)
                do_allmodconfig=`doyesno "$1" --do-allmodconfig`
                ;;
+       --do-clocksourcewd|--do-no-clocksourcewd)
+               do_clocksourcewd=`doyesno "$1" --do-clocksourcewd`
+               ;;
        --do-kasan|--do-no-kasan)
                do_kasan=`doyesno "$1" --do-kasan`
                ;;
@@ -135,7 +141,7 @@ do
        --do-locktorture|--do-no-locktorture)
                do_locktorture=`doyesno "$1" --do-locktorture`
                ;;
-       --do-none)
+       --do-none|--donone)
                do_allmodconfig=no
                do_rcutorture=no
                do_locktorture=no
@@ -145,6 +151,7 @@ do
                do_kvfree=no
                do_kasan=no
                do_kcsan=no
+               do_clocksourcewd=no
                ;;
        --do-rcuscale|--do-no-rcuscale)
                do_rcuscale=`doyesno "$1" --do-rcuscale`
@@ -279,9 +286,9 @@ function torture_one {
 #      torture_bootargs="[ kernel boot arguments ]"
 #      torture_set flavor [ kvm.sh arguments ]
 #
-# Note that "flavor" is an arbitrary string.  Supply --torture if needed.
-# Note that quoting is problematic.  So on the command line, pass multiple
-# values with multiple kvm.sh argument instances.
+# Note that "flavor" is an arbitrary string that does not affect kvm.sh
+# in any way.  So also supply --torture if you need something other than
+# the default.
 function torture_set {
        local cur_kcsan_kmake_args=
        local kcsan_kmake_tag=
@@ -377,6 +384,22 @@ then
        torture_set "rcuscale-kvfree" tools/testing/selftests/rcutorture/bin/kvm.sh --torture rcuscale --allcpus --duration 10 --kconfig "CONFIG_NR_CPUS=$HALF_ALLOTED_CPUS" --memory 1G --trust-make
 fi
 
+if test "$do_clocksourcewd" = "yes"
+then
+       torture_bootargs="rcupdate.rcu_cpu_stall_suppress_at_boot=1 torture.disable_onoff_at_boot rcupdate.rcu_task_stall_timeout=30000"
+       torture_set "clocksourcewd-1" tools/testing/selftests/rcutorture/bin/kvm.sh --allcpus --duration 45s --configs TREE03 --kconfig "CONFIG_TEST_CLOCKSOURCE_WATCHDOG=y" --trust-make
+
+       torture_bootargs="rcupdate.rcu_cpu_stall_suppress_at_boot=1 torture.disable_onoff_at_boot rcupdate.rcu_task_stall_timeout=30000 clocksource.max_cswd_read_retries=1"
+       torture_set "clocksourcewd-2" tools/testing/selftests/rcutorture/bin/kvm.sh --allcpus --duration 45s --configs TREE03 --kconfig "CONFIG_TEST_CLOCKSOURCE_WATCHDOG=y" --trust-make
+
+       # In case our work is already done...
+       if test "$do_rcutorture" != "yes"
+       then
+               torture_bootargs="rcupdate.rcu_cpu_stall_suppress_at_boot=1 torture.disable_onoff_at_boot rcupdate.rcu_task_stall_timeout=30000"
+               torture_set "clocksourcewd-3" tools/testing/selftests/rcutorture/bin/kvm.sh --allcpus --duration 45s --configs TREE03 --trust-make
+       fi
+fi
+
 echo " --- " $scriptname $args
 echo " --- " Done `date` | tee -a $T/log
 ret=0
@@ -395,6 +418,10 @@ then
        nfailures="`wc -l "$T/failures" | awk '{ print $1 }'`"
        ret=2
 fi
+if test "$do_kcsan" = "yes"
+then
+       TORTURE_KCONFIG_KCSAN_ARG=1 tools/testing/selftests/rcutorture/bin/kcsan-collapse.sh tools/testing/selftests/rcutorture/res/$ds > tools/testing/selftests/rcutorture/res/$ds/kcsan.sum
+fi
 echo Started at $startdate, ended at `date`, duration `get_starttime_duration $starttime`. | tee -a $T/log
 echo Summary: Successes: $nsuccesses Failures: $nfailures. | tee -a $T/log
 tdir="`cat $T/successes $T/failures | head -1 | awk '{ print $NF }' | sed -e 's,/[^/]\+/*$,,'`"
index bafe94cbd7390b626d31655ba7b7515fe6d0ac0e..3ca112444ce7791caf82c50dd592b896e428583c 100644 (file)
@@ -1,5 +1,5 @@
 CONFIG_SMP=y
-CONFIG_NR_CPUS=2
+CONFIG_NR_CPUS=4
 CONFIG_HOTPLUG_CPU=y
 CONFIG_PREEMPT_NONE=n
 CONFIG_PREEMPT_VOLUNTARY=n
index bafe94cbd7390b626d31655ba7b7515fe6d0ac0e..3ca112444ce7791caf82c50dd592b896e428583c 100644 (file)
@@ -1,5 +1,5 @@
 CONFIG_SMP=y
-CONFIG_NR_CPUS=2
+CONFIG_NR_CPUS=4
 CONFIG_HOTPLUG_CPU=y
 CONFIG_PREEMPT_NONE=n
 CONFIG_PREEMPT_VOLUNTARY=n
index ea4399020c6c1c94093a058c808197450f553ef8..dc02083803ce574a769e93d230e87b078e583f87 100644 (file)
@@ -1,5 +1,5 @@
 CONFIG_SMP=y
-CONFIG_NR_CPUS=2
+CONFIG_NR_CPUS=4
 CONFIG_PREEMPT_NONE=n
 CONFIG_PREEMPT_VOLUNTARY=n
 CONFIG_PREEMPT=y
index dee7a3d6c5a5f8d8505b174dfdcdbf4e4fe2c4df..92bbc5a15c39ff44f7be2ce5aac8eab2b9d28517 100644 (file)
@@ -55,10 +55,27 @@ static bool alloc_q1q2_ctx(const uint8_t *s, const uint8_t *m,
        return true;
 }
 
+static void reverse_bytes(void *data, int length)
+{
+       int i = 0;
+       int j = length - 1;
+       uint8_t temp;
+       uint8_t *ptr = data;
+
+       while (i < j) {
+               temp = ptr[i];
+               ptr[i] = ptr[j];
+               ptr[j] = temp;
+               i++;
+               j--;
+       }
+}
+
 static bool calc_q1q2(const uint8_t *s, const uint8_t *m, uint8_t *q1,
                      uint8_t *q2)
 {
        struct q1q2_ctx ctx;
+       int len;
 
        if (!alloc_q1q2_ctx(s, m, &ctx)) {
                fprintf(stderr, "Not enough memory for Q1Q2 calculation\n");
@@ -89,8 +106,10 @@ static bool calc_q1q2(const uint8_t *s, const uint8_t *m, uint8_t *q1,
                goto out;
        }
 
-       BN_bn2bin(ctx.q1, q1);
-       BN_bn2bin(ctx.q2, q2);
+       len = BN_bn2bin(ctx.q1, q1);
+       reverse_bytes(q1, len);
+       len = BN_bn2bin(ctx.q2, q2);
+       reverse_bytes(q2, len);
 
        free_q1q2_ctx(&ctx);
        return true;
@@ -152,22 +171,6 @@ static RSA *gen_sign_key(void)
        return key;
 }
 
-static void reverse_bytes(void *data, int length)
-{
-       int i = 0;
-       int j = length - 1;
-       uint8_t temp;
-       uint8_t *ptr = data;
-
-       while (i < j) {
-               temp = ptr[i];
-               ptr[i] = ptr[j];
-               ptr[j] = temp;
-               i++;
-               j--;
-       }
-}
-
 enum mrtags {
        MRECREATE = 0x0045544145524345,
        MREADD = 0x0000000044444145,
@@ -367,8 +370,6 @@ bool encl_measure(struct encl *encl)
        /* BE -> LE */
        reverse_bytes(sigstruct->signature, SGX_MODULUS_SIZE);
        reverse_bytes(sigstruct->modulus, SGX_MODULUS_SIZE);
-       reverse_bytes(sigstruct->q1, SGX_MODULUS_SIZE);
-       reverse_bytes(sigstruct->q2, SGX_MODULUS_SIZE);
 
        EVP_MD_CTX_destroy(ctx);
        RSA_free(key);
index b587b9a7a124bb2fcda464c6efcc284dd4c37742..0d7bbe49359d834cd9eb211128c39fe7bb06b47a 100644 (file)
@@ -4,7 +4,8 @@ test: virtio_test vringh_test
 virtio_test: virtio_ring.o virtio_test.o
 vringh_test: vringh_test.o vringh.o virtio_ring.o
 
-CFLAGS += -g -O2 -Werror -Wall -I. -I../include/ -I ../../usr/include/ -Wno-pointer-sign -fno-strict-overflow -fno-strict-aliasing -fno-common -MMD -U_FORTIFY_SOURCE -include ../../include/linux/kconfig.h
+CFLAGS += -g -O2 -Werror -Wno-maybe-uninitialized -Wall -I. -I../include/ -I ../../usr/include/ -Wno-pointer-sign -fno-strict-overflow -fno-strict-aliasing -fno-common -MMD -U_FORTIFY_SOURCE -include ../../include/linux/kconfig.h
+LDFLAGS += -lpthread
 vpath %.c ../../drivers/virtio ../../drivers/vhost
 mod:
        ${MAKE} -C `pwd`/../.. M=`pwd`/vhost_test V=${V}
diff --git a/tools/virtio/linux/spinlock.h b/tools/virtio/linux/spinlock.h
new file mode 100644 (file)
index 0000000..028e3cd
--- /dev/null
@@ -0,0 +1,56 @@
+#ifndef SPINLOCK_H_STUB
+#define SPINLOCK_H_STUB
+
+#include <pthread.h>
+
+typedef pthread_spinlock_t  spinlock_t;
+
+static inline void spin_lock_init(spinlock_t *lock)
+{
+       int r = pthread_spin_init(lock, 0);
+       assert(!r);
+}
+
+static inline void spin_lock(spinlock_t *lock)
+{
+       int ret = pthread_spin_lock(lock);
+       assert(!ret);
+}
+
+static inline void spin_unlock(spinlock_t *lock)
+{
+       int ret = pthread_spin_unlock(lock);
+       assert(!ret);
+}
+
+static inline void spin_lock_bh(spinlock_t *lock)
+{
+       spin_lock(lock);
+}
+
+static inline void spin_unlock_bh(spinlock_t *lock)
+{
+       spin_unlock(lock);
+}
+
+static inline void spin_lock_irq(spinlock_t *lock)
+{
+       spin_lock(lock);
+}
+
+static inline void spin_unlock_irq(spinlock_t *lock)
+{
+       spin_unlock(lock);
+}
+
+static inline void spin_lock_irqsave(spinlock_t *lock, unsigned long f)
+{
+       spin_lock(lock);
+}
+
+static inline void spin_unlock_irqrestore(spinlock_t *lock, unsigned long f)
+{
+       spin_unlock(lock);
+}
+
+#endif
index 5d90254ddae472fb99274b5d7a418420c54b3d97..363b982283011542ef05cd495987a6305bd77599 100644 (file)
@@ -3,6 +3,7 @@
 #define LINUX_VIRTIO_H
 #include <linux/scatterlist.h>
 #include <linux/kernel.h>
+#include <linux/spinlock.h>
 
 struct device {
        void *parent;
@@ -12,6 +13,7 @@ struct virtio_device {
        struct device dev;
        u64 features;
        struct list_head vqs;
+       spinlock_t vqs_list_lock;
 };
 
 struct virtqueue {