Merge tag 'drm-misc-next-2021-09-23' of git://anongit.freedesktop.org/drm/drm-misc...
authorDave Airlie <airlied@redhat.com>
Fri, 24 Sep 2021 03:48:48 +0000 (13:48 +1000)
committerDave Airlie <airlied@redhat.com>
Fri, 24 Sep 2021 03:49:08 +0000 (13:49 +1000)
drm-misc-next for 5.15:

UAPI Changes:

Cross-subsystem Changes:

Core Changes:

Driver Changes:
  - Conversions to dev_err_probe() helper
  - rockchip: Various build improvements, Use
    DRM_BRIDGE_ATTACH_NO_CONNECTOR for LVDS and RGB
  - panel: New panel-edp driver

Signed-off-by: Dave Airlie <airlied@redhat.com>
From: Maxime Ripard <maxime@cerno.tech>
Link: https://patchwork.freedesktop.org/patch/msgid/20210923074522.zaja7mzxeimxf6g3@gilmour
271 files changed:
Documentation/admin-guide/README.rst
Documentation/devicetree/bindings/arm/tegra.yaml
Documentation/devicetree/bindings/display/mediatek/mediatek,disp.txt
Documentation/devicetree/bindings/net/allwinner,sun8i-a83t-emac.yaml
Documentation/devicetree/bindings/ufs/samsung,exynos-ufs.yaml [new file with mode: 0644]
Documentation/networking/dsa/sja1105.rst
Documentation/process/changes.rst
Documentation/translations/zh_CN/admin-guide/README.rst
Documentation/translations/zh_TW/admin-guide/README.rst
MAINTAINERS
Makefile
arch/alpha/Kconfig
arch/alpha/include/asm/asm-prototypes.h
arch/alpha/include/asm/io.h
arch/alpha/include/asm/jensen.h
arch/alpha/include/asm/setup.h [new file with mode: 0644]
arch/alpha/include/uapi/asm/setup.h
arch/alpha/kernel/sys_jensen.c
arch/alpha/lib/Makefile
arch/alpha/lib/udiv-qrnnd.S [moved from arch/alpha/math-emu/qrnnd.S with 98% similarity]
arch/alpha/math-emu/Makefile
arch/alpha/math-emu/math.c
arch/arm64/Kconfig
arch/arm64/kernel/fpsimd.c
arch/arm64/kernel/process.c
arch/m68k/include/asm/raw_io.h
arch/m68k/mvme147/config.c
arch/m68k/mvme16x/config.c
arch/parisc/include/asm/page.h
arch/parisc/lib/iomap.c
arch/powerpc/boot/Makefile
arch/powerpc/include/asm/asm-const.h
arch/powerpc/kernel/interrupt.c
arch/powerpc/kernel/interrupt_64.S
arch/powerpc/kernel/mce.c
arch/powerpc/kvm/book3s_hv_rmhandlers.S
arch/powerpc/sysdev/xics/xics-common.c
arch/riscv/Kconfig
arch/s390/Kconfig
arch/s390/Makefile
arch/s390/configs/debug_defconfig
arch/s390/configs/defconfig
arch/s390/pci/pci_mmio.c
arch/sh/boot/Makefile
arch/sparc/kernel/ioport.c
arch/sparc/kernel/mdesc.c
arch/x86/Kconfig
arch/x86/Makefile_32.cpu
arch/x86/hyperv/hv_apic.c
arch/x86/include/asm/uaccess.h
arch/x86/kernel/cpu/mce/core.c
arch/x86/kernel/setup_percpu.c
arch/x86/mm/init_64.c
arch/x86/mm/kasan_init_64.c
arch/x86/mm/numa.c
arch/x86/mm/numa_emulation.c
arch/x86/mm/pat/memtype.c
arch/x86/xen/enlighten_pv.c
arch/x86/xen/mmu_pv.c
block/blk-cgroup.c
block/blk-integrity.c
block/blk-mq-tag.c
drivers/base/arch_numa.c
drivers/base/power/trace.c
drivers/cpufreq/cpufreq_governor_attr_set.c
drivers/cpufreq/intel_pstate.c
drivers/cpufreq/vexpress-spc-cpufreq.c
drivers/gpu/drm/amd/amdgpu/amdgpu.h
drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c
drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.h
drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
drivers/gpu/drm/amd/amdkfd/kfd_device.c
drivers/gpu/drm/amd/amdkfd/kfd_priv.h
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.h
drivers/gpu/drm/amd/display/amdgpu_dm/dc_fpu.c
drivers/gpu/drm/amd/display/dc/core/dc_link.c
drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
drivers/gpu/drm/amd/display/dc/dce/dce_panel_cntl.c
drivers/gpu/drm/amd/pm/inc/smu11_driver_if_cyan_skillfish.h
drivers/gpu/drm/amd/pm/inc/smu_types.h
drivers/gpu/drm/amd/pm/inc/smu_v11_8_ppsmc.h
drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c
drivers/gpu/drm/amd/pm/swsmu/smu11/cyan_skillfish_ppt.c
drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c
drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c
drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c
drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c
drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c
drivers/gpu/drm/amd/pm/swsmu/smu_cmn.h
drivers/gpu/drm/etnaviv/etnaviv_buffer.c
drivers/gpu/drm/etnaviv/etnaviv_gem.c
drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
drivers/gpu/drm/etnaviv/etnaviv_gpu.c
drivers/gpu/drm/etnaviv/etnaviv_gpu.h
drivers/gpu/drm/etnaviv/etnaviv_iommu.c
drivers/gpu/drm/etnaviv/etnaviv_iommu_v2.c
drivers/gpu/drm/etnaviv/etnaviv_mmu.c
drivers/gpu/drm/etnaviv/etnaviv_mmu.h
drivers/gpu/drm/i915/Makefile
drivers/gpu/drm/i915/display/intel_dp.c
drivers/gpu/drm/i915/display/intel_dp_link_training.c
drivers/gpu/drm/i915/gem/i915_gem_context.c
drivers/gpu/drm/i915/gem/selftests/i915_gem_dmabuf.c
drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c
drivers/gpu/drm/i915/gt/intel_rps.c
drivers/gpu/drm/i915/gt/uc/intel_uc.c
drivers/gpu/drm/radeon/radeon_kms.c
drivers/gpu/drm/rockchip/cdn-dp-core.c
drivers/gpu/drm/vc4/vc4_hdmi.c
drivers/hv/ring_buffer.c
drivers/macintosh/smu.c
drivers/net/dsa/lantiq_gswip.c
drivers/net/dsa/qca8k.c
drivers/net/ethernet/3com/3c515.c
drivers/net/ethernet/8390/ne.c
drivers/net/ethernet/amd/ni65.c
drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
drivers/net/ethernet/broadcom/bnxt/bnxt.c
drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c
drivers/net/ethernet/cadence/macb_pci.c
drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c
drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
drivers/net/ethernet/i825xx/82596.c
drivers/net/ethernet/ibm/ibmvnic.c
drivers/net/ethernet/intel/ice/ice.h
drivers/net/ethernet/intel/ice/ice_idc.c
drivers/net/ethernet/intel/igc/igc_main.c
drivers/net/ethernet/mellanox/mlx5/core/devlink.c
drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c
drivers/net/ethernet/mellanox/mlx5/core/en.h
drivers/net/ethernet/mellanox/mlx5/core/en/rep/bridge.c
drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c
drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.c
drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
drivers/net/ethernet/mellanox/mlx5/core/lag.c
drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_main.c
drivers/net/ethernet/microsoft/mana/hw_channel.c
drivers/net/ethernet/netronome/nfp/flower/offload.c
drivers/net/ethernet/qlogic/qed/qed_mcp.c
drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
drivers/net/ethernet/rdc/r6040.c
drivers/net/ethernet/sfc/efx_channels.c
drivers/net/ethernet/sfc/net_driver.h
drivers/net/ethernet/sfc/tx.c
drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
drivers/net/hamradio/6pack.c
drivers/net/hamradio/dmascc.c
drivers/net/ipa/ipa_table.c
drivers/net/phy/dp83640_reg.h
drivers/net/phy/phy_device.c
drivers/net/phy/phylink.c
drivers/net/wan/Makefile
drivers/nvme/host/core.c
drivers/nvme/host/multipath.c
drivers/nvme/host/rdma.c
drivers/nvme/host/tcp.c
drivers/nvme/target/configfs.c
drivers/of/device.c
drivers/of/property.c
drivers/pci/pci-acpi.c
drivers/pci/quirks.c
drivers/pci/vpd.c
drivers/rtc/rtc-cmos.c
drivers/s390/char/sclp_early.c
drivers/s390/crypto/ap_bus.c
drivers/s390/crypto/ap_queue.c
drivers/spi/spi-tegra20-slink.c
drivers/vhost/net.c
drivers/video/fbdev/Kconfig
drivers/xen/Kconfig
drivers/xen/balloon.c
drivers/xen/swiotlb-xen.c
fs/io-wq.c
fs/io_uring.c
fs/qnx4/dir.c
include/asm-generic/io.h
include/asm-generic/iomap.h
include/asm-generic/mshyperv.h
include/asm-generic/pci_iomap.h
include/asm-generic/vmlinux.lds.h
include/linux/cgroup-defs.h
include/linux/cgroup.h
include/linux/compiler-clang.h
include/linux/compiler-gcc.h
include/linux/compiler.h
include/linux/compiler_attributes.h
include/linux/memblock.h
include/linux/mmap_lock.h
include/linux/overflow.h
include/linux/sched.h
include/linux/skbuff.h
include/linux/uio.h
include/net/dsa.h
include/uapi/linux/io_uring.h
init/main.c
ipc/sem.c
kernel/bpf/disasm.c
kernel/bpf/disasm.h
kernel/bpf/stackmap.c
kernel/bpf/verifier.c
kernel/cgroup/cgroup.c
kernel/dma/debug.c
kernel/dma/mapping.c
kernel/events/core.c
kernel/locking/rwbase_rt.c
kernel/printk/printk.c
lib/Kconfig.debug
lib/bootconfig.c
lib/iov_iter.c
lib/pci_iomap.c
mm/ksm.c
mm/memblock.c
net/caif/chnl_net.c
net/core/netclassid_cgroup.c
net/core/netprio_cgroup.c
net/dccp/minisocks.c
net/dsa/dsa.c
net/dsa/dsa2.c
net/dsa/dsa_priv.h
net/dsa/slave.c
net/ipv4/tcp_input.c
net/ipv4/udp_tunnel_nic.c
net/ipv6/ip6_fib.c
net/l2tp/l2tp_core.c
net/mctp/route.c
net/packet/af_packet.c
net/tipc/socket.c
net/unix/af_unix.c
scripts/Makefile.clang
scripts/Makefile.modpost
scripts/checkkconfigsymbols.py
scripts/clang-tools/gen_compile_commands.py
scripts/min-tool-version.sh
tools/bootconfig/include/linux/memblock.h
tools/include/linux/compiler-gcc.h
tools/include/linux/overflow.h
tools/lib/perf/evsel.c
tools/perf/builtin-script.c
tools/perf/ui/browser.c
tools/perf/ui/browser.h
tools/perf/ui/browsers/annotate.c
tools/perf/util/bpf-event.c
tools/perf/util/machine.c
tools/testing/selftests/bpf/cgroup_helpers.c
tools/testing/selftests/bpf/cgroup_helpers.h
tools/testing/selftests/bpf/network_helpers.c
tools/testing/selftests/bpf/network_helpers.h
tools/testing/selftests/bpf/prog_tests/cgroup_v1v2.c [new file with mode: 0644]
tools/testing/selftests/bpf/prog_tests/task_pt_regs.c
tools/testing/selftests/bpf/progs/connect4_dropper.c [new file with mode: 0644]
tools/testing/selftests/bpf/progs/test_task_pt_regs.c
tools/testing/selftests/nci/nci_dev.c
tools/testing/selftests/net/altnames.sh
tools/testing/selftests/powerpc/tm/tm-syscall-asm.S
tools/testing/selftests/powerpc/tm/tm-syscall.c

index 35314b63008c258209d4454a86692fed462e3c46..caa3c09a5c3f91f73fd386370eb415c5e71ee9eb 100644 (file)
@@ -259,7 +259,7 @@ Configuring the kernel
 Compiling the kernel
 --------------------
 
- - Make sure you have at least gcc 4.9 available.
+ - Make sure you have at least gcc 5.1 available.
    For more information, refer to :ref:`Documentation/process/changes.rst <changes>`.
 
    Please note that you can still run a.out user programs with this kernel.
index b962fa6d649c35ab4da1c20ddc399d202a06833e..d79d36ac0c44af58d316483a1a0fedd91ed720f4 100644 (file)
@@ -54,7 +54,7 @@ properties:
           - const: toradex,apalis_t30
           - const: nvidia,tegra30
       - items:
-          - const: toradex,apalis_t30-eval-v1.1
+          - const: toradex,apalis_t30-v1.1-eval
           - const: toradex,apalis_t30-eval
           - const: toradex,apalis_t30-v1.1
           - const: toradex,apalis_t30
index fbb59c9ddda65d9cbc8cccb073bd75e7b0a694d7..78044c340e2089144fdc2dabe6765e488c36dd2e 100644 (file)
@@ -9,7 +9,7 @@ function block.
 
 All DISP device tree nodes must be siblings to the central MMSYS_CONFIG node.
 For a description of the MMSYS_CONFIG binding, see
-Documentation/devicetree/bindings/arm/mediatek/mediatek,mmsys.txt.
+Documentation/devicetree/bindings/arm/mediatek/mediatek,mmsys.yaml.
 
 DISP function blocks
 ====================
index 7f2578d48e3f0bb769ca936ec7299c7103e19c36..9eb4bb529ad5c99ca3d124e5b184a339b35c989b 100644 (file)
@@ -19,7 +19,9 @@ properties:
       - const: allwinner,sun8i-v3s-emac
       - const: allwinner,sun50i-a64-emac
       - items:
-          - const: allwinner,sun50i-h6-emac
+          - enum:
+              - allwinner,sun20i-d1-emac
+              - allwinner,sun50i-h6-emac
           - const: allwinner,sun50i-a64-emac
 
   reg:
diff --git a/Documentation/devicetree/bindings/ufs/samsung,exynos-ufs.yaml b/Documentation/devicetree/bindings/ufs/samsung,exynos-ufs.yaml
new file mode 100644 (file)
index 0000000..b9ca8ef
--- /dev/null
@@ -0,0 +1,89 @@
+# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/ufs/samsung,exynos-ufs.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Samsung SoC series UFS host controller Device Tree Bindings
+
+maintainers:
+  - Alim Akhtar <alim.akhtar@samsung.com>
+
+description: |
+  Each Samsung UFS host controller instance should have its own node.
+  This binding define Samsung specific binding other then what is used
+  in the common ufshcd bindings
+  [1] Documentation/devicetree/bindings/ufs/ufshcd-pltfrm.txt
+
+properties:
+
+  compatible:
+    enum:
+      - samsung,exynos7-ufs
+
+  reg:
+    items:
+      - description: HCI register
+      - description: vendor specific register
+      - description: unipro register
+      - description: UFS protector register
+
+  reg-names:
+    items:
+      - const: hci
+      - const: vs_hci
+      - const: unipro
+      - const: ufsp
+
+  clocks:
+    items:
+      - description: ufs link core clock
+      - description: unipro main clock
+
+  clock-names:
+    items:
+      - const: core_clk
+      - const: sclk_unipro_main
+
+  interrupts:
+    maxItems: 1
+
+  phys:
+    maxItems: 1
+
+  phy-names:
+    const: ufs-phy
+
+required:
+  - compatible
+  - reg
+  - interrupts
+  - phys
+  - phy-names
+  - clocks
+  - clock-names
+
+additionalProperties: false
+
+examples:
+  - |
+    #include <dt-bindings/interrupt-controller/arm-gic.h>
+    #include <dt-bindings/clock/exynos7-clk.h>
+
+    ufs: ufs@15570000 {
+       compatible = "samsung,exynos7-ufs";
+       reg = <0x15570000 0x100>,
+             <0x15570100 0x100>,
+             <0x15571000 0x200>,
+             <0x15572000 0x300>;
+       reg-names = "hci", "vs_hci", "unipro", "ufsp";
+       interrupts = <GIC_SPI 200 IRQ_TYPE_LEVEL_HIGH>;
+       clocks = <&clock_fsys1 ACLK_UFS20_LINK>,
+                <&clock_fsys1 SCLK_UFSUNIPRO20_USER>;
+       clock-names = "core_clk", "sclk_unipro_main";
+       pinctrl-names = "default";
+       pinctrl-0 = <&ufs_rst_n &ufs_refclk_out>;
+       phys = <&ufs_phy>;
+       phy-names = "ufs-phy";
+    };
+...
index 564caeebe2b26940edca7984ba83fa733bdafe63..29b1bae0cf000674f327d1552f47e77c0cb46581 100644 (file)
@@ -296,7 +296,7 @@ not available.
 Device Tree bindings and board design
 =====================================
 
-This section references ``Documentation/devicetree/bindings/net/dsa/sja1105.txt``
+This section references ``Documentation/devicetree/bindings/net/dsa/nxp,sja1105.yaml``
 and aims to showcase some potential switch caveats.
 
 RMII PHY role and out-of-band signaling
index d3a8557b66a1abb9ebcda48b0872e8727ce94850..e35ab74a0f804b0453e323b91ebacc6feeda0851 100644 (file)
@@ -29,7 +29,7 @@ you probably needn't concern yourself with pcmciautils.
 ====================== ===============  ========================================
         Program        Minimal version       Command to check the version
 ====================== ===============  ========================================
-GNU C                  4.9              gcc --version
+GNU C                  5.1              gcc --version
 Clang/LLVM (optional)  10.0.1           clang --version
 GNU make               3.81             make --version
 binutils               2.23             ld -v
index 669a022f68175e93a462ccb256e12bbae7d182a5..980eb20521cfe3417ab349f5452f53b9c68cade8 100644 (file)
@@ -223,7 +223,7 @@ Linux内核5.x版本 <http://kernel.org/>
 编译内核
 ---------
 
- - 确保您至少有gcc 4.9可用。
+ - 确保您至少有gcc 5.1可用。
    有关更多信息,请参阅 :ref:`Documentation/process/changes.rst <changes>` 。
 
    请注意,您仍然可以使用此内核运行a.out用户程序。
index b752e50359e699e93850aa16cab485c00dd78e74..6ce97edbab37f58cbcb7aefa736555358bc2c79d 100644 (file)
@@ -226,7 +226,7 @@ Linux內核5.x版本 <http://kernel.org/>
 編譯內核
 ---------
 
- - 確保您至少有gcc 4.9可用。
+ - 確保您至少有gcc 5.1可用。
    有關更多信息,請參閱 :ref:`Documentation/process/changes.rst <changes>` 。
 
    請注意,您仍然可以使用此內核運行a.out用戶程序。
index a1bcf58c7a233c859cd699d59618ba79d566af8b..f9d5d3ffd18a5b14e4726fb3b07568a790d35eca 100644 (file)
@@ -14356,7 +14356,8 @@ F:      Documentation/devicetree/bindings/pci/intel,ixp4xx-pci.yaml
 F:     drivers/pci/controller/pci-ixp4xx.c
 
 PCI DRIVER FOR INTEL VOLUME MANAGEMENT DEVICE (VMD)
-M:     Jonathan Derrick <jonathan.derrick@intel.com>
+M:     Nirmal Patel <nirmal.patel@linux.intel.com>
+R:     Jonathan Derrick <jonathan.derrick@linux.dev>
 L:     linux-pci@vger.kernel.org
 S:     Supported
 F:     drivers/pci/controller/vmd.c
index 7cfe4ff36f44aa66938fea538109b6495a4de2d4..5e7c1d85444123523fef31d4172643760b3aa0a5 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -2,7 +2,7 @@
 VERSION = 5
 PATCHLEVEL = 15
 SUBLEVEL = 0
-EXTRAVERSION = -rc1
+EXTRAVERSION = -rc2
 NAME = Opossums on Parade
 
 # *DOCUMENTATION*
@@ -849,12 +849,6 @@ endif
 
 DEBUG_CFLAGS   :=
 
-# Workaround for GCC versions < 5.0
-# https://gcc.gnu.org/bugzilla/show_bug.cgi?id=61801
-ifdef CONFIG_CC_IS_GCC
-DEBUG_CFLAGS   += $(call cc-ifversion, -lt, 0500, $(call cc-option, -fno-var-tracking-assignments))
-endif
-
 ifdef CONFIG_DEBUG_INFO
 
 ifdef CONFIG_DEBUG_INFO_SPLIT
index 02e5b673bdade31dfd1bdd9c38f497d1e1d4afc2..4e87783c90ad708339a3a869adfa81d75615b27b 100644 (file)
@@ -20,7 +20,7 @@ config ALPHA
        select NEED_SG_DMA_LENGTH
        select VIRT_TO_BUS
        select GENERIC_IRQ_PROBE
-       select GENERIC_PCI_IOMAP if PCI
+       select GENERIC_PCI_IOMAP
        select AUTO_IRQ_AFFINITY if SMP
        select GENERIC_IRQ_SHOW
        select ARCH_WANT_IPC_PARSE_VERSION
@@ -199,7 +199,6 @@ config ALPHA_EIGER
 
 config ALPHA_JENSEN
        bool "Jensen"
-       depends on BROKEN
        select HAVE_EISA
        help
          DEC PC 150 AXP (aka Jensen): This is a very old Digital system - one
index b34cc1f06720ff4c3b6fb843e25ea9879a2ad0d4..c8ae46fc2e745fec17dd88f2d877553695283f5f 100644 (file)
@@ -16,3 +16,4 @@ extern void __divlu(void);
 extern void __remlu(void);
 extern void __divqu(void);
 extern void __remqu(void);
+extern unsigned long __udiv_qrnnd(unsigned long *, unsigned long, unsigned long , unsigned long);
index 0fab5ac90775827345e7c0522a27bf6fd440b184..c9cb554fbe54ced5d8dd27e6b3430f37f71c9b34 100644 (file)
@@ -60,7 +60,7 @@ extern inline void set_hae(unsigned long new_hae)
  * Change virtual addresses to physical addresses and vv.
  */
 #ifdef USE_48_BIT_KSEG
-static inline unsigned long virt_to_phys(void *address)
+static inline unsigned long virt_to_phys(volatile void *address)
 {
        return (unsigned long)address - IDENT_ADDR;
 }
@@ -70,7 +70,7 @@ static inline void * phys_to_virt(unsigned long address)
        return (void *) (address + IDENT_ADDR);
 }
 #else
-static inline unsigned long virt_to_phys(void *address)
+static inline unsigned long virt_to_phys(volatile void *address)
 {
         unsigned long phys = (unsigned long)address;
 
@@ -106,7 +106,7 @@ static inline void * phys_to_virt(unsigned long address)
 extern unsigned long __direct_map_base;
 extern unsigned long __direct_map_size;
 
-static inline unsigned long __deprecated virt_to_bus(void *address)
+static inline unsigned long __deprecated virt_to_bus(volatile void *address)
 {
        unsigned long phys = virt_to_phys(address);
        unsigned long bus = phys + __direct_map_base;
index 916895155a889a79b09c5d9912b8c208e58a3892..1c4131453db2ca100b2a7522289d4f8368f45756 100644 (file)
@@ -111,18 +111,18 @@ __EXTERN_INLINE void jensen_set_hae(unsigned long addr)
  * convinced that I need one of the newer machines.
  */
 
-static inline unsigned int jensen_local_inb(unsigned long addr)
+__EXTERN_INLINE unsigned int jensen_local_inb(unsigned long addr)
 {
        return 0xff & *(vuip)((addr << 9) + EISA_VL82C106);
 }
 
-static inline void jensen_local_outb(u8 b, unsigned long addr)
+__EXTERN_INLINE void jensen_local_outb(u8 b, unsigned long addr)
 {
        *(vuip)((addr << 9) + EISA_VL82C106) = b;
        mb();
 }
 
-static inline unsigned int jensen_bus_inb(unsigned long addr)
+__EXTERN_INLINE unsigned int jensen_bus_inb(unsigned long addr)
 {
        long result;
 
@@ -131,7 +131,7 @@ static inline unsigned int jensen_bus_inb(unsigned long addr)
        return __kernel_extbl(result, addr & 3);
 }
 
-static inline void jensen_bus_outb(u8 b, unsigned long addr)
+__EXTERN_INLINE void jensen_bus_outb(u8 b, unsigned long addr)
 {
        jensen_set_hae(0);
        *(vuip)((addr << 7) + EISA_IO + 0x00) = b * 0x01010101;
diff --git a/arch/alpha/include/asm/setup.h b/arch/alpha/include/asm/setup.h
new file mode 100644 (file)
index 0000000..262aab9
--- /dev/null
@@ -0,0 +1,43 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+#ifndef __ALPHA_SETUP_H
+#define __ALPHA_SETUP_H
+
+#include <uapi/asm/setup.h>
+
+/*
+ * We leave one page for the initial stack page, and one page for
+ * the initial process structure. Also, the console eats 3 MB for
+ * the initial bootloader (one of which we can reclaim later).
+ */
+#define BOOT_PCB       0x20000000
+#define BOOT_ADDR      0x20000000
+/* Remove when official MILO sources have ELF support: */
+#define BOOT_SIZE      (16*1024)
+
+#ifdef CONFIG_ALPHA_LEGACY_START_ADDRESS
+#define KERNEL_START_PHYS      0x300000 /* Old bootloaders hardcoded this.  */
+#else
+#define KERNEL_START_PHYS      0x1000000 /* required: Wildfire/Titan/Marvel */
+#endif
+
+#define KERNEL_START   (PAGE_OFFSET+KERNEL_START_PHYS)
+#define SWAPPER_PGD    KERNEL_START
+#define INIT_STACK     (PAGE_OFFSET+KERNEL_START_PHYS+0x02000)
+#define EMPTY_PGT      (PAGE_OFFSET+KERNEL_START_PHYS+0x04000)
+#define EMPTY_PGE      (PAGE_OFFSET+KERNEL_START_PHYS+0x08000)
+#define ZERO_PGE       (PAGE_OFFSET+KERNEL_START_PHYS+0x0A000)
+
+#define START_ADDR     (PAGE_OFFSET+KERNEL_START_PHYS+0x10000)
+
+/*
+ * This is setup by the secondary bootstrap loader.  Because
+ * the zero page is zeroed out as soon as the vm system is
+ * initialized, we need to copy things out into a more permanent
+ * place.
+ */
+#define PARAM                  ZERO_PGE
+#define COMMAND_LINE           ((char *)(absolute_pointer(PARAM + 0x0000)))
+#define INITRD_START           (*(unsigned long *) (PARAM+0x100))
+#define INITRD_SIZE            (*(unsigned long *) (PARAM+0x108))
+
+#endif
index 13b7ee465b0e521c7ffd6ac621935d09018fee8d..f881ea5947cbc1d42241f9d9cb5e7178cdc50f17 100644 (file)
@@ -1,43 +1,7 @@
 /* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
-#ifndef __ALPHA_SETUP_H
-#define __ALPHA_SETUP_H
+#ifndef _UAPI__ALPHA_SETUP_H
+#define _UAPI__ALPHA_SETUP_H
 
 #define COMMAND_LINE_SIZE      256
 
-/*
- * We leave one page for the initial stack page, and one page for
- * the initial process structure. Also, the console eats 3 MB for
- * the initial bootloader (one of which we can reclaim later).
- */
-#define BOOT_PCB       0x20000000
-#define BOOT_ADDR      0x20000000
-/* Remove when official MILO sources have ELF support: */
-#define BOOT_SIZE      (16*1024)
-
-#ifdef CONFIG_ALPHA_LEGACY_START_ADDRESS
-#define KERNEL_START_PHYS      0x300000 /* Old bootloaders hardcoded this.  */
-#else
-#define KERNEL_START_PHYS      0x1000000 /* required: Wildfire/Titan/Marvel */
-#endif
-
-#define KERNEL_START   (PAGE_OFFSET+KERNEL_START_PHYS)
-#define SWAPPER_PGD    KERNEL_START
-#define INIT_STACK     (PAGE_OFFSET+KERNEL_START_PHYS+0x02000)
-#define EMPTY_PGT      (PAGE_OFFSET+KERNEL_START_PHYS+0x04000)
-#define EMPTY_PGE      (PAGE_OFFSET+KERNEL_START_PHYS+0x08000)
-#define ZERO_PGE       (PAGE_OFFSET+KERNEL_START_PHYS+0x0A000)
-
-#define START_ADDR     (PAGE_OFFSET+KERNEL_START_PHYS+0x10000)
-
-/*
- * This is setup by the secondary bootstrap loader.  Because
- * the zero page is zeroed out as soon as the vm system is
- * initialized, we need to copy things out into a more permanent
- * place.
- */
-#define PARAM                  ZERO_PGE
-#define COMMAND_LINE           ((char*)(PARAM + 0x0000))
-#define INITRD_START           (*(unsigned long *) (PARAM+0x100))
-#define INITRD_SIZE            (*(unsigned long *) (PARAM+0x108))
-
-#endif
+#endif /* _UAPI__ALPHA_SETUP_H */
index e5d870ff225f4b327277ea382dd5c57aecf10d40..5c9c88428124447c51b035418c2d6e537449b5f8 100644 (file)
@@ -7,6 +7,11 @@
  *
  * Code supporting the Jensen.
  */
+#define __EXTERN_INLINE
+#include <asm/io.h>
+#include <asm/jensen.h>
+#undef  __EXTERN_INLINE
+
 #include <linux/interrupt.h>
 #include <linux/kernel.h>
 #include <linux/types.h>
 
 #include <asm/ptrace.h>
 
-#define __EXTERN_INLINE inline
-#include <asm/io.h>
-#include <asm/jensen.h>
-#undef  __EXTERN_INLINE
-
 #include <asm/dma.h>
 #include <asm/irq.h>
 #include <asm/mmu_context.h>
index 854d5e79979e4ce929d7998bf1135a12880238f8..1cc74f7b50efbebe3c873db5a3cc55207f005248 100644 (file)
@@ -14,6 +14,7 @@ ev6-$(CONFIG_ALPHA_EV6) := ev6-
 ev67-$(CONFIG_ALPHA_EV67) := ev67-
 
 lib-y =        __divqu.o __remqu.o __divlu.o __remlu.o \
+       udiv-qrnnd.o \
        udelay.o \
        $(ev6-y)memset.o \
        $(ev6-y)memcpy.o \
similarity index 98%
rename from arch/alpha/math-emu/qrnnd.S
rename to arch/alpha/lib/udiv-qrnnd.S
index d6373ec1bff9e99a9f65368d713a2f42ecd0bf15..b887aa5428e55778dd224367f8f6f0df8113c683 100644 (file)
@@ -25,6 +25,7 @@
  # along with GCC; see the file COPYING.  If not, write to the 
  # Free Software Foundation, Inc., 59 Temple Place - Suite 330, Boston,
  # MA 02111-1307, USA.
+#include <asm/export.h>
 
         .set noreorder
         .set noat
@@ -161,3 +162,4 @@ $Odd:
        ret     $31,($26),1
 
        .end    __udiv_qrnnd
+EXPORT_SYMBOL(__udiv_qrnnd)
index 6eda0973e1832ffe7c490b539b79709ef6f0cfe2..3206402a879291d92fa717cd4f8da6dd7627c35b 100644 (file)
@@ -7,4 +7,4 @@ ccflags-y := -w
 
 obj-$(CONFIG_MATHEMU) += math-emu.o
 
-math-emu-objs := math.o qrnnd.o
+math-emu-objs := math.o
index f7cef66af88de6ebbda3057c47db09234ba99d9a..4212258f3cfdc8ef83f12e679003be42ee215cd5 100644 (file)
@@ -403,5 +403,3 @@ alpha_fp_emul_imprecise (struct pt_regs *regs, unsigned long write_mask)
 egress:
        return si_code;
 }
-
-EXPORT_SYMBOL(__udiv_qrnnd);
index 077f2ec4eeb23ff82e12d563ab696198cd977b32..5c7ae4c3954be956d9ca39b2f7795a52b6cfbb7f 100644 (file)
@@ -86,7 +86,7 @@ config ARM64
        select ARCH_SUPPORTS_LTO_CLANG_THIN
        select ARCH_SUPPORTS_CFI_CLANG
        select ARCH_SUPPORTS_ATOMIC_RMW
-       select ARCH_SUPPORTS_INT128 if CC_HAS_INT128 && (GCC_VERSION >= 50000 || CC_IS_CLANG)
+       select ARCH_SUPPORTS_INT128 if CC_HAS_INT128
        select ARCH_SUPPORTS_NUMA_BALANCING
        select ARCH_WANT_COMPAT_IPC_PARSE_VERSION if COMPAT
        select ARCH_WANT_DEFAULT_BPF_JIT
index 5a294f20e9decd9d0c4aa6c7ab91f63bb6b2b4e6..ff4962750b3d0f66327167eaf0686a8efdfc9a29 100644 (file)
@@ -513,7 +513,7 @@ size_t sve_state_size(struct task_struct const *task)
 void sve_alloc(struct task_struct *task)
 {
        if (task->thread.sve_state) {
-               memset(task->thread.sve_state, 0, sve_state_size(current));
+               memset(task->thread.sve_state, 0, sve_state_size(task));
                return;
        }
 
index 19100fe8f7e499851251e2d1e9d201fa21aa9d7a..40adb8cdbf5af667d595abe5908cb81723d89722 100644 (file)
@@ -18,7 +18,6 @@
 #include <linux/mman.h>
 #include <linux/mm.h>
 #include <linux/nospec.h>
-#include <linux/sched.h>
 #include <linux/stddef.h>
 #include <linux/sysctl.h>
 #include <linux/unistd.h>
@@ -58,7 +57,7 @@
 
 #if defined(CONFIG_STACKPROTECTOR) && !defined(CONFIG_STACKPROTECTOR_PER_TASK)
 #include <linux/stackprotector.h>
-unsigned long __stack_chk_guard __read_mostly;
+unsigned long __stack_chk_guard __ro_after_init;
 EXPORT_SYMBOL(__stack_chk_guard);
 #endif
 
index 911826ea83ce1a3f9524b6a882051f85e7abbf6e..80eb2396d01ebf61895f8471c33b0e9728f25bcd 100644 (file)
  * two accesses to memory, which may be undesirable for some devices.
  */
 #define in_8(addr) \
-    ({ u8 __v = (*(__force volatile u8 *) (addr)); __v; })
+    ({ u8 __v = (*(__force volatile u8 *) (unsigned long)(addr)); __v; })
 #define in_be16(addr) \
-    ({ u16 __v = (*(__force volatile u16 *) (addr)); __v; })
+    ({ u16 __v = (*(__force volatile u16 *) (unsigned long)(addr)); __v; })
 #define in_be32(addr) \
-    ({ u32 __v = (*(__force volatile u32 *) (addr)); __v; })
+    ({ u32 __v = (*(__force volatile u32 *) (unsigned long)(addr)); __v; })
 #define in_le16(addr) \
-    ({ u16 __v = le16_to_cpu(*(__force volatile __le16 *) (addr)); __v; })
+    ({ u16 __v = le16_to_cpu(*(__force volatile __le16 *) (unsigned long)(addr)); __v; })
 #define in_le32(addr) \
-    ({ u32 __v = le32_to_cpu(*(__force volatile __le32 *) (addr)); __v; })
+    ({ u32 __v = le32_to_cpu(*(__force volatile __le32 *) (unsigned long)(addr)); __v; })
 
-#define out_8(addr,b) (void)((*(__force volatile u8 *) (addr)) = (b))
-#define out_be16(addr,w) (void)((*(__force volatile u16 *) (addr)) = (w))
-#define out_be32(addr,l) (void)((*(__force volatile u32 *) (addr)) = (l))
-#define out_le16(addr,w) (void)((*(__force volatile __le16 *) (addr)) = cpu_to_le16(w))
-#define out_le32(addr,l) (void)((*(__force volatile __le32 *) (addr)) = cpu_to_le32(l))
+#define out_8(addr,b) (void)((*(__force volatile u8 *) (unsigned long)(addr)) = (b))
+#define out_be16(addr,w) (void)((*(__force volatile u16 *) (unsigned long)(addr)) = (w))
+#define out_be32(addr,l) (void)((*(__force volatile u32 *) (unsigned long)(addr)) = (l))
+#define out_le16(addr,w) (void)((*(__force volatile __le16 *) (unsigned long)(addr)) = cpu_to_le16(w))
+#define out_le32(addr,l) (void)((*(__force volatile __le32 *) (unsigned long)(addr)) = cpu_to_le32(l))
 
 #define raw_inb in_8
 #define raw_inw in_be16
index e1e90c49a49624ccf2cc81f427723de0c89dd28f..dfd6202fd403e92b208f5c8ba15c37be2912a298 100644 (file)
@@ -171,7 +171,6 @@ static int bcd2int (unsigned char b)
 
 int mvme147_hwclk(int op, struct rtc_time *t)
 {
-#warning check me!
        if (!op) {
                m147_rtc->ctrl = RTC_READ;
                t->tm_year = bcd2int (m147_rtc->bcd_year);
@@ -183,6 +182,9 @@ int mvme147_hwclk(int op, struct rtc_time *t)
                m147_rtc->ctrl = 0;
                if (t->tm_year < 70)
                        t->tm_year += 100;
+       } else {
+               /* FIXME Setting the time is not yet supported */
+               return -EOPNOTSUPP;
        }
        return 0;
 }
index b59593c7cfb9dfbfd9073b18166bca1cdd5c0515..b4422c2dfbbf4f7c0a1131dd0dab5413e7cc50f0 100644 (file)
@@ -436,7 +436,6 @@ int bcd2int (unsigned char b)
 
 int mvme16x_hwclk(int op, struct rtc_time *t)
 {
-#warning check me!
        if (!op) {
                rtc->ctrl = RTC_READ;
                t->tm_year = bcd2int (rtc->bcd_year);
@@ -448,6 +447,9 @@ int mvme16x_hwclk(int op, struct rtc_time *t)
                rtc->ctrl = 0;
                if (t->tm_year < 70)
                        t->tm_year += 100;
+       } else {
+               /* FIXME Setting the time is not yet supported */
+               return -EOPNOTSUPP;
        }
        return 0;
 }
index d00313d1274e899e18eb3ba7c9ea0185fbb73512..0561568f7b4898d4959b97576f25ca08cde37ebe 100644 (file)
@@ -184,7 +184,7 @@ extern int npmem_ranges;
 #include <asm-generic/getorder.h>
 #include <asm/pdc.h>
 
-#define PAGE0   ((struct zeropage *)__PAGE_OFFSET)
+#define PAGE0   ((struct zeropage *)absolute_pointer(__PAGE_OFFSET))
 
 /* DEFINITION OF THE ZERO-PAGE (PAG0) */
 /* based on work by Jason Eckhardt (jason@equator.com) */
index f03adb1999e77ed05f1282be1b24aa5c601403ce..367f6397bda7a4c05f0d577047e4a0d15df01859 100644 (file)
@@ -513,12 +513,15 @@ void ioport_unmap(void __iomem *addr)
        }
 }
 
+#ifdef CONFIG_PCI
 void pci_iounmap(struct pci_dev *dev, void __iomem * addr)
 {
        if (!INDIRECT_ADDR(addr)) {
                iounmap(addr);
        }
 }
+EXPORT_SYMBOL(pci_iounmap);
+#endif
 
 EXPORT_SYMBOL(ioread8);
 EXPORT_SYMBOL(ioread16);
@@ -544,4 +547,3 @@ EXPORT_SYMBOL(iowrite16_rep);
 EXPORT_SYMBOL(iowrite32_rep);
 EXPORT_SYMBOL(ioport_map);
 EXPORT_SYMBOL(ioport_unmap);
-EXPORT_SYMBOL(pci_iounmap);
index 6900d0ac242122de7b7f1e7a8c9ea164508f5bda..089ee3ea55c8ae64144e79f940b87a5abdd91abc 100644 (file)
@@ -35,7 +35,6 @@ endif
 BOOTCFLAGS    := -Wall -Wundef -Wstrict-prototypes -Wno-trigraphs \
                 -fno-strict-aliasing -O2 -msoft-float -mno-altivec -mno-vsx \
                 -pipe -fomit-frame-pointer -fno-builtin -fPIC -nostdinc \
-                -include $(srctree)/include/linux/compiler_attributes.h \
                 $(LINUXINCLUDE)
 
 ifdef CONFIG_PPC64_BOOT_WRAPPER
@@ -70,6 +69,7 @@ ifeq ($(call cc-option-yn, -fstack-protector),y)
 BOOTCFLAGS     += -fno-stack-protector
 endif
 
+BOOTCFLAGS     += -include $(srctree)/include/linux/compiler_attributes.h
 BOOTCFLAGS     += -I$(objtree)/$(obj) -I$(srctree)/$(obj)
 
 DTC_FLAGS      ?= -p 1024
index 0ce2368bd20f975dde774f05dc19123fc53d77b1..dbfa5e1e3198af0faf0cf90e51cb144f27e678c4 100644 (file)
 #  define ASM_CONST(x)         __ASM_CONST(x)
 #endif
 
-/*
- * Inline assembly memory constraint
- *
- * GCC 4.9 doesn't properly handle pre update memory constraint "m<>"
- *
- */
-#if defined(GCC_VERSION) && GCC_VERSION < 50000
-#define UPD_CONSTR ""
-#else
 #define UPD_CONSTR "<>"
-#endif
 
 #endif /* _ASM_POWERPC_ASM_CONST_H */
index a73f3f70a6576ff9ab4a1450a123e8d011837a93..de10a26972581d94129d962e73614f744c598706 100644 (file)
@@ -18,6 +18,7 @@
 #include <asm/switch_to.h>
 #include <asm/syscall.h>
 #include <asm/time.h>
+#include <asm/tm.h>
 #include <asm/unistd.h>
 
 #if defined(CONFIG_PPC_ADV_DEBUG_REGS) && defined(CONFIG_PPC32)
@@ -136,6 +137,48 @@ notrace long system_call_exception(long r3, long r4, long r5,
         */
        irq_soft_mask_regs_set_state(regs, IRQS_ENABLED);
 
+       /*
+        * If system call is called with TM active, set _TIF_RESTOREALL to
+        * prevent RFSCV being used to return to userspace, because POWER9
+        * TM implementation has problems with this instruction returning to
+        * transactional state. Final register values are not relevant because
+        * the transaction will be aborted upon return anyway. Or in the case
+        * of unsupported_scv SIGILL fault, the return state does not much
+        * matter because it's an edge case.
+        */
+       if (IS_ENABLED(CONFIG_PPC_TRANSACTIONAL_MEM) &&
+                       unlikely(MSR_TM_TRANSACTIONAL(regs->msr)))
+               current_thread_info()->flags |= _TIF_RESTOREALL;
+
+       /*
+        * If the system call was made with a transaction active, doom it and
+        * return without performing the system call. Unless it was an
+        * unsupported scv vector, in which case it's treated like an illegal
+        * instruction.
+        */
+#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
+       if (unlikely(MSR_TM_TRANSACTIONAL(regs->msr)) &&
+           !trap_is_unsupported_scv(regs)) {
+               /* Enable TM in the kernel, and disable EE (for scv) */
+               hard_irq_disable();
+               mtmsr(mfmsr() | MSR_TM);
+
+               /* tabort, this dooms the transaction, nothing else */
+               asm volatile(".long 0x7c00071d | ((%0) << 16)"
+                               :: "r"(TM_CAUSE_SYSCALL|TM_CAUSE_PERSISTENT));
+
+               /*
+                * Userspace will never see the return value. Execution will
+                * resume after the tbegin. of the aborted transaction with the
+                * checkpointed register state. A context switch could occur
+                * or signal delivered to the process before resuming the
+                * doomed transaction context, but that should all be handled
+                * as expected.
+                */
+               return -ENOSYS;
+       }
+#endif // CONFIG_PPC_TRANSACTIONAL_MEM
+
        local_irq_enable();
 
        if (unlikely(current_thread_info()->flags & _TIF_SYSCALL_DOTRACE)) {
index d4212d2ff0b54bc64b3167c8e2b34120d08eb70e..ec950b08a8dcc06571ba8f65076dd357bb16f118 100644 (file)
@@ -12,7 +12,6 @@
 #include <asm/mmu.h>
 #include <asm/ppc_asm.h>
 #include <asm/ptrace.h>
-#include <asm/tm.h>
 
        .section        ".toc","aw"
 SYS_CALL_TABLE:
@@ -55,12 +54,6 @@ COMPAT_SYS_CALL_TABLE:
        .globl system_call_vectored_\name
 system_call_vectored_\name:
 _ASM_NOKPROBE_SYMBOL(system_call_vectored_\name)
-#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
-BEGIN_FTR_SECTION
-       extrdi. r10, r12, 1, (63-MSR_TS_T_LG) /* transaction active? */
-       bne     tabort_syscall
-END_FTR_SECTION_IFSET(CPU_FTR_TM)
-#endif
        SCV_INTERRUPT_TO_KERNEL
        mr      r10,r1
        ld      r1,PACAKSAVE(r13)
@@ -247,12 +240,6 @@ _ASM_NOKPROBE_SYMBOL(system_call_common_real)
        .globl system_call_common
 system_call_common:
 _ASM_NOKPROBE_SYMBOL(system_call_common)
-#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
-BEGIN_FTR_SECTION
-       extrdi. r10, r12, 1, (63-MSR_TS_T_LG) /* transaction active? */
-       bne     tabort_syscall
-END_FTR_SECTION_IFSET(CPU_FTR_TM)
-#endif
        mr      r10,r1
        ld      r1,PACAKSAVE(r13)
        std     r10,0(r1)
@@ -425,34 +412,6 @@ SOFT_MASK_TABLE(.Lsyscall_rst_start, 1b)
 RESTART_TABLE(.Lsyscall_rst_start, .Lsyscall_rst_end, syscall_restart)
 #endif
 
-#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
-tabort_syscall:
-_ASM_NOKPROBE_SYMBOL(tabort_syscall)
-       /* Firstly we need to enable TM in the kernel */
-       mfmsr   r10
-       li      r9, 1
-       rldimi  r10, r9, MSR_TM_LG, 63-MSR_TM_LG
-       mtmsrd  r10, 0
-
-       /* tabort, this dooms the transaction, nothing else */
-       li      r9, (TM_CAUSE_SYSCALL|TM_CAUSE_PERSISTENT)
-       TABORT(R9)
-
-       /*
-        * Return directly to userspace. We have corrupted user register state,
-        * but userspace will never see that register state. Execution will
-        * resume after the tbegin of the aborted transaction with the
-        * checkpointed register state.
-        */
-       li      r9, MSR_RI
-       andc    r10, r10, r9
-       mtmsrd  r10, 1
-       mtspr   SPRN_SRR0, r11
-       mtspr   SPRN_SRR1, r12
-       RFI_TO_USER
-       b       .       /* prevent speculative execution */
-#endif
-
        /*
         * If MSR EE/RI was never enabled, IRQs not reconciled, NVGPRs not
         * touched, no exit work created, then this can be used.
index 47a683cd00d246e6b8a5fe2f648802acd21cd8e4..fd829f7f25a47ee207cb7a7226b1ba384d27c36c 100644 (file)
@@ -249,6 +249,7 @@ void machine_check_queue_event(void)
 {
        int index;
        struct machine_check_event evt;
+       unsigned long msr;
 
        if (!get_mce_event(&evt, MCE_EVENT_RELEASE))
                return;
@@ -262,8 +263,20 @@ void machine_check_queue_event(void)
        memcpy(&local_paca->mce_info->mce_event_queue[index],
               &evt, sizeof(evt));
 
-       /* Queue irq work to process this event later. */
-       irq_work_queue(&mce_event_process_work);
+       /*
+        * Queue irq work to process this event later. Before
+        * queuing the work enable translation for non radix LPAR,
+        * as irq_work_queue may try to access memory outside RMO
+        * region.
+        */
+       if (!radix_enabled() && firmware_has_feature(FW_FEATURE_LPAR)) {
+               msr = mfmsr();
+               mtmsr(msr | MSR_IR | MSR_DR);
+               irq_work_queue(&mce_event_process_work);
+               mtmsr(msr);
+       } else {
+               irq_work_queue(&mce_event_process_work);
+       }
 }
 
 void mce_common_process_ue(struct pt_regs *regs,
index 75079397c2a563c97072faa75b7715d7b719a6a5..90484425a1e6b283ddaa4a58383c65f37ffaf595 100644 (file)
@@ -2536,7 +2536,7 @@ END_FTR_SECTION_IFCLR(CPU_FTR_P9_TM_HV_ASSIST)
        /* The following code handles the fake_suspend = 1 case */
        mflr    r0
        std     r0, PPC_LR_STKOFF(r1)
-       stdu    r1, -PPC_MIN_STKFRM(r1)
+       stdu    r1, -TM_FRAME_SIZE(r1)
 
        /* Turn on TM. */
        mfmsr   r8
@@ -2551,10 +2551,42 @@ BEGIN_FTR_SECTION
 END_FTR_SECTION_IFSET(CPU_FTR_P9_TM_XER_SO_BUG)
        nop
 
+       /*
+        * It's possible that treclaim. may modify registers, if we have lost
+        * track of fake-suspend state in the guest due to it using rfscv.
+        * Save and restore registers in case this occurs.
+        */
+       mfspr   r3, SPRN_DSCR
+       mfspr   r4, SPRN_XER
+       mfspr   r5, SPRN_AMR
+       /* SPRN_TAR would need to be saved here if the kernel ever used it */
+       mfcr    r12
+       SAVE_NVGPRS(r1)
+       SAVE_GPR(2, r1)
+       SAVE_GPR(3, r1)
+       SAVE_GPR(4, r1)
+       SAVE_GPR(5, r1)
+       stw     r12, 8(r1)
+       std     r1, HSTATE_HOST_R1(r13)
+
        /* We have to treclaim here because that's the only way to do S->N */
        li      r3, TM_CAUSE_KVM_RESCHED
        TRECLAIM(R3)
 
+       GET_PACA(r13)
+       ld      r1, HSTATE_HOST_R1(r13)
+       REST_GPR(2, r1)
+       REST_GPR(3, r1)
+       REST_GPR(4, r1)
+       REST_GPR(5, r1)
+       lwz     r12, 8(r1)
+       REST_NVGPRS(r1)
+       mtspr   SPRN_DSCR, r3
+       mtspr   SPRN_XER, r4
+       mtspr   SPRN_AMR, r5
+       mtcr    r12
+       HMT_MEDIUM
+
        /*
         * We were in fake suspend, so we are not going to save the
         * register state as the guest checkpointed state (since
@@ -2582,7 +2614,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_P9_TM_XER_SO_BUG)
        std     r5, VCPU_TFHAR(r9)
        std     r6, VCPU_TFIAR(r9)
 
-       addi    r1, r1, PPC_MIN_STKFRM
+       addi    r1, r1, TM_FRAME_SIZE
        ld      r0, PPC_LR_STKOFF(r1)
        mtlr    r0
        blr
index 5c1a157a83b8989928dcd816d404852d1ff10a8e..244a727c6ba49c8eb59ba9de65e51694a27aa644 100644 (file)
@@ -348,9 +348,9 @@ static int xics_host_map(struct irq_domain *domain, unsigned int virq,
        if (xics_ics->check(xics_ics, hwirq))
                return -EINVAL;
 
-       /* No chip data for the XICS domain */
+       /* Let the ICS be the chip data for the XICS domain. For ICS native */
        irq_domain_set_info(domain, virq, hwirq, xics_ics->chip,
-                           NULL, handle_fasteoi_irq, NULL, NULL);
+                           xics_ics, handle_fasteoi_irq, NULL, NULL);
 
        return 0;
 }
index c3f3fd583e04b63bae46331821ca1580fbebcb9c..301a54233c7e26a598eb63f4a75aad9301c12141 100644 (file)
@@ -236,7 +236,7 @@ config ARCH_RV32I
 config ARCH_RV64I
        bool "RV64I"
        select 64BIT
-       select ARCH_SUPPORTS_INT128 if CC_HAS_INT128 && GCC_VERSION >= 50000
+       select ARCH_SUPPORTS_INT128 if CC_HAS_INT128
        select HAVE_DYNAMIC_FTRACE if !XIP_KERNEL && MMU && $(cc-option,-fpatchable-function-entry=8)
        select HAVE_DYNAMIC_FTRACE_WITH_REGS if HAVE_DYNAMIC_FTRACE
        select HAVE_FTRACE_MCOUNT_RECORD if !XIP_KERNEL
index 2bd90c51efd34981a1f5376f279d1bf9a189cb85..b86de61b8caa2f2909b4c569c3f574942c07652a 100644 (file)
@@ -685,16 +685,6 @@ config STACK_GUARD
          The minimum size for the stack guard should be 256 for 31 bit and
          512 for 64 bit.
 
-config WARN_DYNAMIC_STACK
-       def_bool n
-       prompt "Emit compiler warnings for function with dynamic stack usage"
-       help
-         This option enables the compiler option -mwarn-dynamicstack. If the
-         compiler supports this options generates warnings for functions
-         that dynamically allocate stack space using alloca.
-
-         Say N if you are unsure.
-
 endmenu
 
 menu "I/O subsystem"
index a3cf33ad009fc53e52464e68c30899e43ba4499b..450b351dfa8ef37c238443cbf5c4b84876aaeaba 100644 (file)
@@ -85,13 +85,6 @@ cflags-$(CONFIG_CHECK_STACK) += -mstack-guard=$(CONFIG_STACK_GUARD)
 endif
 endif
 
-ifdef CONFIG_WARN_DYNAMIC_STACK
-  ifneq ($(call cc-option,-mwarn-dynamicstack),)
-    KBUILD_CFLAGS += -mwarn-dynamicstack
-    KBUILD_CFLAGS_DECOMPRESSOR += -mwarn-dynamicstack
-  endif
-endif
-
 ifdef CONFIG_EXPOLINE
   ifneq ($(call cc-option,$(CC_FLAGS_MARCH) -mindirect-branch=thunk),)
     CC_FLAGS_EXPOLINE := -mindirect-branch=thunk
index 37b6115ed80e8a0698defe66318ac0caf355d1d5..6aad18ee131d6634efd7188b9660d723f2ea36a6 100644 (file)
@@ -10,6 +10,7 @@ CONFIG_BPF_JIT=y
 CONFIG_BPF_JIT_ALWAYS_ON=y
 CONFIG_BPF_LSM=y
 CONFIG_PREEMPT=y
+CONFIG_SCHED_CORE=y
 CONFIG_BSD_PROCESS_ACCT=y
 CONFIG_BSD_PROCESS_ACCT_V3=y
 CONFIG_TASKSTATS=y
@@ -503,6 +504,7 @@ CONFIG_NLMON=m
 # CONFIG_NET_VENDOR_HUAWEI is not set
 # CONFIG_NET_VENDOR_INTEL is not set
 # CONFIG_NET_VENDOR_MICROSOFT is not set
+# CONFIG_NET_VENDOR_LITEX is not set
 # CONFIG_NET_VENDOR_MARVELL is not set
 CONFIG_MLX4_EN=m
 CONFIG_MLX5_CORE=m
@@ -661,7 +663,6 @@ CONFIG_NFSD_V3_ACL=y
 CONFIG_NFSD_V4=y
 CONFIG_NFSD_V4_SECURITY_LABEL=y
 CONFIG_CIFS=m
-CONFIG_CIFS_WEAK_PW_HASH=y
 CONFIG_CIFS_UPCALL=y
 CONFIG_CIFS_XATTR=y
 CONFIG_CIFS_POSIX=y
@@ -720,6 +721,8 @@ CONFIG_CRYPTO_XCBC=m
 CONFIG_CRYPTO_VMAC=m
 CONFIG_CRYPTO_CRC32=m
 CONFIG_CRYPTO_BLAKE2S=m
+CONFIG_CRYPTO_MD4=m
+CONFIG_CRYPTO_MD5=y
 CONFIG_CRYPTO_MICHAEL_MIC=m
 CONFIG_CRYPTO_RMD160=m
 CONFIG_CRYPTO_SHA3=m
@@ -774,7 +777,6 @@ CONFIG_RANDOM32_SELFTEST=y
 CONFIG_DMA_CMA=y
 CONFIG_CMA_SIZE_MBYTES=0
 CONFIG_DMA_API_DEBUG=y
-CONFIG_STRING_SELFTEST=y
 CONFIG_PRINTK_TIME=y
 CONFIG_DYNAMIC_DEBUG=y
 CONFIG_DEBUG_INFO=y
@@ -853,12 +855,12 @@ CONFIG_FAIL_FUNCTION=y
 CONFIG_FAULT_INJECTION_STACKTRACE_FILTER=y
 CONFIG_LKDTM=m
 CONFIG_TEST_MIN_HEAP=y
-CONFIG_TEST_SORT=y
 CONFIG_KPROBES_SANITY_TEST=y
 CONFIG_RBTREE_TEST=y
 CONFIG_INTERVAL_TREE_TEST=m
 CONFIG_PERCPU_TEST=m
 CONFIG_ATOMIC64_SELFTEST=y
+CONFIG_STRING_SELFTEST=y
 CONFIG_TEST_BITOPS=m
 CONFIG_TEST_BPF=m
 CONFIG_TEST_LIVEPATCH=m
index 56a1cc85c5d72dedd1c93e5d851cc6dfcb02e79d..f08b161c94463cd2b2a02d7d13c730d3dffb0ebf 100644 (file)
@@ -8,6 +8,7 @@ CONFIG_BPF_SYSCALL=y
 CONFIG_BPF_JIT=y
 CONFIG_BPF_JIT_ALWAYS_ON=y
 CONFIG_BPF_LSM=y
+CONFIG_SCHED_CORE=y
 CONFIG_BSD_PROCESS_ACCT=y
 CONFIG_BSD_PROCESS_ACCT_V3=y
 CONFIG_TASKSTATS=y
@@ -494,6 +495,7 @@ CONFIG_NLMON=m
 # CONFIG_NET_VENDOR_HUAWEI is not set
 # CONFIG_NET_VENDOR_INTEL is not set
 # CONFIG_NET_VENDOR_MICROSOFT is not set
+# CONFIG_NET_VENDOR_LITEX is not set
 # CONFIG_NET_VENDOR_MARVELL is not set
 CONFIG_MLX4_EN=m
 CONFIG_MLX5_CORE=m
@@ -648,7 +650,6 @@ CONFIG_NFSD_V3_ACL=y
 CONFIG_NFSD_V4=y
 CONFIG_NFSD_V4_SECURITY_LABEL=y
 CONFIG_CIFS=m
-CONFIG_CIFS_WEAK_PW_HASH=y
 CONFIG_CIFS_UPCALL=y
 CONFIG_CIFS_XATTR=y
 CONFIG_CIFS_POSIX=y
@@ -708,6 +709,8 @@ CONFIG_CRYPTO_XCBC=m
 CONFIG_CRYPTO_VMAC=m
 CONFIG_CRYPTO_CRC32=m
 CONFIG_CRYPTO_BLAKE2S=m
+CONFIG_CRYPTO_MD4=m
+CONFIG_CRYPTO_MD5=y
 CONFIG_CRYPTO_MICHAEL_MIC=m
 CONFIG_CRYPTO_RMD160=m
 CONFIG_CRYPTO_SHA3=m
index ae683aa623ace069c7cd8e663632cd9608fb5734..c5b35ea129cfa5addd89efb09530dac88d39e088 100644 (file)
@@ -159,7 +159,7 @@ SYSCALL_DEFINE3(s390_pci_mmio_write, unsigned long, mmio_addr,
 
        mmap_read_lock(current->mm);
        ret = -EINVAL;
-       vma = find_vma(current->mm, mmio_addr);
+       vma = vma_lookup(current->mm, mmio_addr);
        if (!vma)
                goto out_unlock_mmap;
        if (!(vma->vm_flags & (VM_IO | VM_PFNMAP)))
@@ -298,7 +298,7 @@ SYSCALL_DEFINE3(s390_pci_mmio_read, unsigned long, mmio_addr,
 
        mmap_read_lock(current->mm);
        ret = -EINVAL;
-       vma = find_vma(current->mm, mmio_addr);
+       vma = vma_lookup(current->mm, mmio_addr);
        if (!vma)
                goto out_unlock_mmap;
        if (!(vma->vm_flags & (VM_IO | VM_PFNMAP)))
index 58592dfa5cb602ed58eb34dcdead6f1cc66755c6..c081e7e2d6e7fed5ae9e1426efe686af457263ce 100644 (file)
@@ -80,30 +80,30 @@ $(obj)/vmlinux.bin.xz: $(obj)/vmlinux.bin FORCE
 $(obj)/vmlinux.bin.lzo: $(obj)/vmlinux.bin FORCE
        $(call if_changed,lzo)
 
-$(obj)/uImage.bz2: $(obj)/vmlinux.bin.bz2
+$(obj)/uImage.bz2: $(obj)/vmlinux.bin.bz2 FORCE
        $(call if_changed,uimage,bzip2)
 
-$(obj)/uImage.gz: $(obj)/vmlinux.bin.gz
+$(obj)/uImage.gz: $(obj)/vmlinux.bin.gz FORCE
        $(call if_changed,uimage,gzip)
 
-$(obj)/uImage.lzma: $(obj)/vmlinux.bin.lzma
+$(obj)/uImage.lzma: $(obj)/vmlinux.bin.lzma FORCE
        $(call if_changed,uimage,lzma)
 
-$(obj)/uImage.xz: $(obj)/vmlinux.bin.xz
+$(obj)/uImage.xz: $(obj)/vmlinux.bin.xz FORCE
        $(call if_changed,uimage,xz)
 
-$(obj)/uImage.lzo: $(obj)/vmlinux.bin.lzo
+$(obj)/uImage.lzo: $(obj)/vmlinux.bin.lzo FORCE
        $(call if_changed,uimage,lzo)
 
-$(obj)/uImage.bin: $(obj)/vmlinux.bin
+$(obj)/uImage.bin: $(obj)/vmlinux.bin FORCE
        $(call if_changed,uimage,none)
 
 OBJCOPYFLAGS_vmlinux.srec := -I binary -O srec
-$(obj)/vmlinux.srec: $(obj)/compressed/vmlinux
+$(obj)/vmlinux.srec: $(obj)/compressed/vmlinux FORCE
        $(call if_changed,objcopy)
 
 OBJCOPYFLAGS_uImage.srec := -I binary -O srec
-$(obj)/uImage.srec: $(obj)/uImage
+$(obj)/uImage.srec: $(obj)/uImage FORCE
        $(call if_changed,objcopy)
 
 $(obj)/uImage: $(obj)/uImage.$(suffix-y)
index 8e1d72a16759494086c2e8fe85f3044e673f12ef..7ceae24b0ca991992b346140c49e92bb3009f762 100644 (file)
@@ -356,7 +356,9 @@ err_nomem:
 void arch_dma_free(struct device *dev, size_t size, void *cpu_addr,
                dma_addr_t dma_addr, unsigned long attrs)
 {
-       if (!sparc_dma_free_resource(cpu_addr, PAGE_ALIGN(size)))
+       size = PAGE_ALIGN(size);
+
+       if (!sparc_dma_free_resource(cpu_addr, size))
                return;
 
        dma_make_coherent(dma_addr, size);
index 8e645ddac58e2dea89d88efd156c4c3128d4e215..30f171b7b00c23aeb887a3c7fa13020e47420f45 100644 (file)
@@ -39,6 +39,7 @@ struct mdesc_hdr {
        u32     node_sz; /* node block size */
        u32     name_sz; /* name block size */
        u32     data_sz; /* data block size */
+       char    data[];
 } __attribute__((aligned(16)));
 
 struct mdesc_elem {
@@ -612,7 +613,7 @@ EXPORT_SYMBOL(mdesc_get_node_info);
 
 static struct mdesc_elem *node_block(struct mdesc_hdr *mdesc)
 {
-       return (struct mdesc_elem *) (mdesc + 1);
+       return (struct mdesc_elem *) mdesc->data;
 }
 
 static void *name_block(struct mdesc_hdr *mdesc)
index 4e001bbbb4256f412236fd6229e5073ec2b36ddc..dad7f85dcdea06f3742a968b04afb8e06c583c9b 100644 (file)
@@ -339,6 +339,11 @@ config NEED_PER_CPU_PAGE_FIRST_CHUNK
 config ARCH_HIBERNATION_POSSIBLE
        def_bool y
 
+config ARCH_NR_GPIO
+       int
+       default 1024 if X86_64
+       default 512
+
 config ARCH_SUSPEND_POSSIBLE
        def_bool y
 
index e7355f8b51c2b8e1f51c885d275771247afa3c36..94834c4b5e5ef279f9f0da9521a76f9a82348556 100644 (file)
@@ -4,6 +4,12 @@
 
 tune           = $(call cc-option,-mtune=$(1),$(2))
 
+ifdef CONFIG_CC_IS_CLANG
+align          := -falign-functions=0 $(call cc-option,-falign-jumps=0) $(call cc-option,-falign-loops=0)
+else
+align          := -falign-functions=0 -falign-jumps=0 -falign-loops=0
+endif
+
 cflags-$(CONFIG_M486SX)                += -march=i486
 cflags-$(CONFIG_M486)          += -march=i486
 cflags-$(CONFIG_M586)          += -march=i586
@@ -19,11 +25,11 @@ cflags-$(CONFIG_MK6)                += -march=k6
 # They make zero difference whatsosever to performance at this time.
 cflags-$(CONFIG_MK7)           += -march=athlon
 cflags-$(CONFIG_MK8)           += $(call cc-option,-march=k8,-march=athlon)
-cflags-$(CONFIG_MCRUSOE)       += -march=i686 -falign-functions=0 -falign-jumps=0 -falign-loops=0
-cflags-$(CONFIG_MEFFICEON)     += -march=i686 $(call tune,pentium3) -falign-functions=0 -falign-jumps=0 -falign-loops=0
+cflags-$(CONFIG_MCRUSOE)       += -march=i686 $(align)
+cflags-$(CONFIG_MEFFICEON)     += -march=i686 $(call tune,pentium3) $(align)
 cflags-$(CONFIG_MWINCHIPC6)    += $(call cc-option,-march=winchip-c6,-march=i586)
 cflags-$(CONFIG_MWINCHIP3D)    += $(call cc-option,-march=winchip2,-march=i586)
-cflags-$(CONFIG_MCYRIXIII)     += $(call cc-option,-march=c3,-march=i486) -falign-functions=0 -falign-jumps=0 -falign-loops=0
+cflags-$(CONFIG_MCYRIXIII)     += $(call cc-option,-march=c3,-march=i486) $(align)
 cflags-$(CONFIG_MVIAC3_2)      += $(call cc-option,-march=c3-2,-march=i686)
 cflags-$(CONFIG_MVIAC7)                += -march=i686
 cflags-$(CONFIG_MCORE2)                += -march=i686 $(call tune,core2)
index 90e682a92820dea099bc5cbb75a703581d828c0e..32a1ad356c183f35873cf37926edda5c79c63734 100644 (file)
@@ -99,7 +99,8 @@ static void hv_apic_eoi_write(u32 reg, u32 val)
 /*
  * IPI implementation on Hyper-V.
  */
-static bool __send_ipi_mask_ex(const struct cpumask *mask, int vector)
+static bool __send_ipi_mask_ex(const struct cpumask *mask, int vector,
+               bool exclude_self)
 {
        struct hv_send_ipi_ex **arg;
        struct hv_send_ipi_ex *ipi_arg;
@@ -123,7 +124,10 @@ static bool __send_ipi_mask_ex(const struct cpumask *mask, int vector)
 
        if (!cpumask_equal(mask, cpu_present_mask)) {
                ipi_arg->vp_set.format = HV_GENERIC_SET_SPARSE_4K;
-               nr_bank = cpumask_to_vpset(&(ipi_arg->vp_set), mask);
+               if (exclude_self)
+                       nr_bank = cpumask_to_vpset_noself(&(ipi_arg->vp_set), mask);
+               else
+                       nr_bank = cpumask_to_vpset(&(ipi_arg->vp_set), mask);
        }
        if (nr_bank < 0)
                goto ipi_mask_ex_done;
@@ -138,15 +142,25 @@ ipi_mask_ex_done:
        return hv_result_success(status);
 }
 
-static bool __send_ipi_mask(const struct cpumask *mask, int vector)
+static bool __send_ipi_mask(const struct cpumask *mask, int vector,
+               bool exclude_self)
 {
-       int cur_cpu, vcpu;
+       int cur_cpu, vcpu, this_cpu = smp_processor_id();
        struct hv_send_ipi ipi_arg;
        u64 status;
+       unsigned int weight;
 
        trace_hyperv_send_ipi_mask(mask, vector);
 
-       if (cpumask_empty(mask))
+       weight = cpumask_weight(mask);
+
+       /*
+        * Do nothing if
+        *   1. the mask is empty
+        *   2. the mask only contains self when exclude_self is true
+        */
+       if (weight == 0 ||
+           (exclude_self && weight == 1 && cpumask_test_cpu(this_cpu, mask)))
                return true;
 
        if (!hv_hypercall_pg)
@@ -172,6 +186,8 @@ static bool __send_ipi_mask(const struct cpumask *mask, int vector)
        ipi_arg.cpu_mask = 0;
 
        for_each_cpu(cur_cpu, mask) {
+               if (exclude_self && cur_cpu == this_cpu)
+                       continue;
                vcpu = hv_cpu_number_to_vp_number(cur_cpu);
                if (vcpu == VP_INVAL)
                        return false;
@@ -191,7 +207,7 @@ static bool __send_ipi_mask(const struct cpumask *mask, int vector)
        return hv_result_success(status);
 
 do_ex_hypercall:
-       return __send_ipi_mask_ex(mask, vector);
+       return __send_ipi_mask_ex(mask, vector, exclude_self);
 }
 
 static bool __send_ipi_one(int cpu, int vector)
@@ -208,7 +224,7 @@ static bool __send_ipi_one(int cpu, int vector)
                return false;
 
        if (vp >= 64)
-               return __send_ipi_mask_ex(cpumask_of(cpu), vector);
+               return __send_ipi_mask_ex(cpumask_of(cpu), vector, false);
 
        status = hv_do_fast_hypercall16(HVCALL_SEND_IPI, vector, BIT_ULL(vp));
        return hv_result_success(status);
@@ -222,20 +238,13 @@ static void hv_send_ipi(int cpu, int vector)
 
 static void hv_send_ipi_mask(const struct cpumask *mask, int vector)
 {
-       if (!__send_ipi_mask(mask, vector))
+       if (!__send_ipi_mask(mask, vector, false))
                orig_apic.send_IPI_mask(mask, vector);
 }
 
 static void hv_send_ipi_mask_allbutself(const struct cpumask *mask, int vector)
 {
-       unsigned int this_cpu = smp_processor_id();
-       struct cpumask new_mask;
-       const struct cpumask *local_mask;
-
-       cpumask_copy(&new_mask, mask);
-       cpumask_clear_cpu(this_cpu, &new_mask);
-       local_mask = &new_mask;
-       if (!__send_ipi_mask(local_mask, vector))
+       if (!__send_ipi_mask(mask, vector, true))
                orig_apic.send_IPI_mask_allbutself(mask, vector);
 }
 
@@ -246,7 +255,7 @@ static void hv_send_ipi_allbutself(int vector)
 
 static void hv_send_ipi_all(int vector)
 {
-       if (!__send_ipi_mask(cpu_online_mask, vector))
+       if (!__send_ipi_mask(cpu_online_mask, vector, false))
                orig_apic.send_IPI_all(vector);
 }
 
index c9fa7be3df82ddb9495961b3e2f22b1ac07edafa..5c95d242f38d708e313f39cc781fa8455fdd16ed 100644 (file)
@@ -301,8 +301,8 @@ do {                                                                        \
        unsigned int __gu_low, __gu_high;                               \
        const unsigned int __user *__gu_ptr;                            \
        __gu_ptr = (const void __user *)(ptr);                          \
-       __get_user_asm(__gu_low, ptr, "l", "=r", label);                \
-       __get_user_asm(__gu_high, ptr+1, "l", "=r", label);             \
+       __get_user_asm(__gu_low, __gu_ptr, "l", "=r", label);           \
+       __get_user_asm(__gu_high, __gu_ptr+1, "l", "=r", label);        \
        (x) = ((unsigned long long)__gu_high << 32) | __gu_low;         \
 } while (0)
 #else
index 8cb7816d03b4cd27dd9a2143533e4d684e6a0823..193204aee880178f6d30412b4c068a033473e330 100644 (file)
@@ -1253,6 +1253,9 @@ static void __mc_scan_banks(struct mce *m, struct pt_regs *regs, struct mce *fin
 
 static void kill_me_now(struct callback_head *ch)
 {
+       struct task_struct *p = container_of(ch, struct task_struct, mce_kill_me);
+
+       p->mce_count = 0;
        force_sig(SIGBUS);
 }
 
@@ -1262,6 +1265,7 @@ static void kill_me_maybe(struct callback_head *cb)
        int flags = MF_ACTION_REQUIRED;
        int ret;
 
+       p->mce_count = 0;
        pr_err("Uncorrected hardware memory error in user-access at %llx", p->mce_addr);
 
        if (!p->mce_ripv)
@@ -1290,17 +1294,34 @@ static void kill_me_maybe(struct callback_head *cb)
        }
 }
 
-static void queue_task_work(struct mce *m, int kill_current_task)
+static void queue_task_work(struct mce *m, char *msg, int kill_current_task)
 {
-       current->mce_addr = m->addr;
-       current->mce_kflags = m->kflags;
-       current->mce_ripv = !!(m->mcgstatus & MCG_STATUS_RIPV);
-       current->mce_whole_page = whole_page(m);
+       int count = ++current->mce_count;
 
-       if (kill_current_task)
-               current->mce_kill_me.func = kill_me_now;
-       else
-               current->mce_kill_me.func = kill_me_maybe;
+       /* First call, save all the details */
+       if (count == 1) {
+               current->mce_addr = m->addr;
+               current->mce_kflags = m->kflags;
+               current->mce_ripv = !!(m->mcgstatus & MCG_STATUS_RIPV);
+               current->mce_whole_page = whole_page(m);
+
+               if (kill_current_task)
+                       current->mce_kill_me.func = kill_me_now;
+               else
+                       current->mce_kill_me.func = kill_me_maybe;
+       }
+
+       /* Ten is likely overkill. Don't expect more than two faults before task_work() */
+       if (count > 10)
+               mce_panic("Too many consecutive machine checks while accessing user data", m, msg);
+
+       /* Second or later call, make sure page address matches the one from first call */
+       if (count > 1 && (current->mce_addr >> PAGE_SHIFT) != (m->addr >> PAGE_SHIFT))
+               mce_panic("Consecutive machine checks to different user pages", m, msg);
+
+       /* Do not call task_work_add() more than once */
+       if (count > 1)
+               return;
 
        task_work_add(current, &current->mce_kill_me, TWA_RESUME);
 }
@@ -1438,7 +1459,7 @@ noinstr void do_machine_check(struct pt_regs *regs)
                /* If this triggers there is no way to recover. Die hard. */
                BUG_ON(!on_thread_stack() || !user_mode(regs));
 
-               queue_task_work(&m, kill_current_task);
+               queue_task_work(&m, msg, kill_current_task);
 
        } else {
                /*
@@ -1456,7 +1477,7 @@ noinstr void do_machine_check(struct pt_regs *regs)
                }
 
                if (m.kflags & MCE_IN_KERNEL_COPYIN)
-                       queue_task_work(&m, kill_current_task);
+                       queue_task_work(&m, msg, kill_current_task);
        }
 out:
        mce_wrmsrl(MSR_IA32_MCG_STATUS, 0);
index 78a32b956e8104237c15f72ebf58460578878dea..5afd985591939cc285f0b035cc2e2fc810cef4e2 100644 (file)
@@ -135,7 +135,7 @@ static void * __init pcpu_fc_alloc(unsigned int cpu, size_t size, size_t align)
 
 static void __init pcpu_fc_free(void *ptr, size_t size)
 {
-       memblock_free(__pa(ptr), size);
+       memblock_free_ptr(ptr, size);
 }
 
 static int __init pcpu_cpu_distance(unsigned int from, unsigned int to)
index a6e11763763fb3de079e0971c4d6e518492ea997..36098226a95731b5f91a2f2ab156bb9e25716aca 100644 (file)
@@ -1432,18 +1432,18 @@ int kern_addr_valid(unsigned long addr)
                return 0;
 
        p4d = p4d_offset(pgd, addr);
-       if (p4d_none(*p4d))
+       if (!p4d_present(*p4d))
                return 0;
 
        pud = pud_offset(p4d, addr);
-       if (pud_none(*pud))
+       if (!pud_present(*pud))
                return 0;
 
        if (pud_large(*pud))
                return pfn_valid(pud_pfn(*pud));
 
        pmd = pmd_offset(pud, addr);
-       if (pmd_none(*pmd))
+       if (!pmd_present(*pmd))
                return 0;
 
        if (pmd_large(*pmd))
index 1a50434c8a4dab44d6d23742d3bcd2dbbc1e2d99..ef885370719a605876b3375e22db72f8d025cb64 100644 (file)
@@ -49,8 +49,7 @@ static void __init kasan_populate_pmd(pmd_t *pmd, unsigned long addr,
                        p = early_alloc(PMD_SIZE, nid, false);
                        if (p && pmd_set_huge(pmd, __pa(p), PAGE_KERNEL))
                                return;
-                       else if (p)
-                               memblock_free(__pa(p), PMD_SIZE);
+                       memblock_free_ptr(p, PMD_SIZE);
                }
 
                p = early_alloc(PAGE_SIZE, nid, true);
@@ -86,8 +85,7 @@ static void __init kasan_populate_pud(pud_t *pud, unsigned long addr,
                        p = early_alloc(PUD_SIZE, nid, false);
                        if (p && pud_set_huge(pud, __pa(p), PAGE_KERNEL))
                                return;
-                       else if (p)
-                               memblock_free(__pa(p), PUD_SIZE);
+                       memblock_free_ptr(p, PUD_SIZE);
                }
 
                p = early_alloc(PAGE_SIZE, nid, true);
index a1b5c71099e61d50095666e18e16e6b921c5d664..1e9b93b088dbf0dc454d2ebb30dcab055d1f7f59 100644 (file)
@@ -355,7 +355,7 @@ void __init numa_reset_distance(void)
 
        /* numa_distance could be 1LU marking allocation failure, test cnt */
        if (numa_distance_cnt)
-               memblock_free(__pa(numa_distance), size);
+               memblock_free_ptr(numa_distance, size);
        numa_distance_cnt = 0;
        numa_distance = NULL;   /* enable table creation */
 }
index 737491b13728c3ba88146d8ba728abbc2cdd92db..e801e30089c436087681b8f09ad88e76d3046d56 100644 (file)
@@ -517,8 +517,7 @@ void __init numa_emulation(struct numa_meminfo *numa_meminfo, int numa_dist_cnt)
        }
 
        /* free the copied physical distance table */
-       if (phys_dist)
-               memblock_free(__pa(phys_dist), phys_size);
+       memblock_free_ptr(phys_dist, phys_size);
        return;
 
 no_emu:
index 3112ca7786ed1462686fb8fbae96b621d3334195..4ba2a3ee4bce1258f0b86a47a47c823e3ff2eb49 100644 (file)
@@ -583,7 +583,12 @@ int memtype_reserve(u64 start, u64 end, enum page_cache_mode req_type,
        int err = 0;
 
        start = sanitize_phys(start);
-       end = sanitize_phys(end);
+
+       /*
+        * The end address passed into this function is exclusive, but
+        * sanitize_phys() expects an inclusive address.
+        */
+       end = sanitize_phys(end - 1) + 1;
        if (start >= end) {
                WARN(1, "%s failed: [mem %#010Lx-%#010Lx], req %s\n", __func__,
                                start, end - 1, cattr_name(req_type));
index 753f63734c1345bdeb64b78e16d7363fad5e647a..349f780a15674d80b2b8757780a8cb1ce5d2481b 100644 (file)
@@ -1214,6 +1214,11 @@ static void __init xen_dom0_set_legacy_features(void)
        x86_platform.legacy.rtc = 1;
 }
 
+static void __init xen_domu_set_legacy_features(void)
+{
+       x86_platform.legacy.rtc = 0;
+}
+
 /* First C function to be called on Xen boot */
 asmlinkage __visible void __init xen_start_kernel(void)
 {
@@ -1359,6 +1364,8 @@ asmlinkage __visible void __init xen_start_kernel(void)
                add_preferred_console("xenboot", 0, NULL);
                if (pci_xen)
                        x86_init.pci.arch_init = pci_xen_init;
+               x86_platform.set_legacy_features =
+                               xen_domu_set_legacy_features;
        } else {
                const struct dom0_vga_console_info *info =
                        (void *)((char *)xen_start_info +
index 1df5f01529e59856fcde44d615650a07d1c90c83..8d751939c6f302690856db16cd39dad62762024f 100644 (file)
@@ -1518,14 +1518,17 @@ static inline void xen_alloc_ptpage(struct mm_struct *mm, unsigned long pfn,
        if (pinned) {
                struct page *page = pfn_to_page(pfn);
 
-               if (static_branch_likely(&xen_struct_pages_ready))
+               pinned = false;
+               if (static_branch_likely(&xen_struct_pages_ready)) {
+                       pinned = PagePinned(page);
                        SetPagePinned(page);
+               }
 
                xen_mc_batch();
 
                __set_pfn_prot(pfn, PAGE_KERNEL_RO);
 
-               if (level == PT_PTE && USE_SPLIT_PTE_PTLOCKS)
+               if (level == PT_PTE && USE_SPLIT_PTE_PTLOCKS && !pinned)
                        __pin_pagetable_pfn(MMUEXT_PIN_L1_TABLE, pfn);
 
                xen_mc_issue(PARAVIRT_LAZY_MMU);
index 3c88a79a319bbb0bad48fc147bf5e2ac7ed2a9ec..38b9f7684952a71607a0c7869fcd885995ed80e9 100644 (file)
@@ -1182,10 +1182,6 @@ int blkcg_init_queue(struct request_queue *q)
        if (preloaded)
                radix_tree_preload_end();
 
-       ret = blk_iolatency_init(q);
-       if (ret)
-               goto err_destroy_all;
-
        ret = blk_ioprio_init(q);
        if (ret)
                goto err_destroy_all;
@@ -1194,6 +1190,12 @@ int blkcg_init_queue(struct request_queue *q)
        if (ret)
                goto err_destroy_all;
 
+       ret = blk_iolatency_init(q);
+       if (ret) {
+               blk_throtl_exit(q);
+               goto err_destroy_all;
+       }
+
        return 0;
 
 err_destroy_all:
@@ -1364,10 +1366,14 @@ enomem:
        /* alloc failed, nothing's initialized yet, free everything */
        spin_lock_irq(&q->queue_lock);
        list_for_each_entry(blkg, &q->blkg_list, q_node) {
+               struct blkcg *blkcg = blkg->blkcg;
+
+               spin_lock(&blkcg->lock);
                if (blkg->pd[pol->plid]) {
                        pol->pd_free_fn(blkg->pd[pol->plid]);
                        blkg->pd[pol->plid] = NULL;
                }
+               spin_unlock(&blkcg->lock);
        }
        spin_unlock_irq(&q->queue_lock);
        ret = -ENOMEM;
@@ -1399,12 +1405,16 @@ void blkcg_deactivate_policy(struct request_queue *q,
        __clear_bit(pol->plid, q->blkcg_pols);
 
        list_for_each_entry(blkg, &q->blkg_list, q_node) {
+               struct blkcg *blkcg = blkg->blkcg;
+
+               spin_lock(&blkcg->lock);
                if (blkg->pd[pol->plid]) {
                        if (pol->pd_offline_fn)
                                pol->pd_offline_fn(blkg->pd[pol->plid]);
                        pol->pd_free_fn(blkg->pd[pol->plid]);
                        blkg->pd[pol->plid] = NULL;
                }
+               spin_unlock(&blkcg->lock);
        }
 
        spin_unlock_irq(&q->queue_lock);
index 69a12177dfb62f6d1f1fc3f1f1166ca52cceae5c..16d5d5338392a22ac6fd41bc93025943b22a7138 100644 (file)
@@ -426,8 +426,15 @@ EXPORT_SYMBOL(blk_integrity_register);
  */
 void blk_integrity_unregister(struct gendisk *disk)
 {
+       struct blk_integrity *bi = &disk->queue->integrity;
+
+       if (!bi->profile)
+               return;
+
+       /* ensure all bios are off the integrity workqueue */
+       blk_flush_integrity();
        blk_queue_flag_clear(QUEUE_FLAG_STABLE_WRITES, disk->queue);
-       memset(&disk->queue->integrity, 0, sizeof(struct blk_integrity));
+       memset(bi, 0, sizeof(*bi));
 }
 EXPORT_SYMBOL(blk_integrity_unregister);
 
index 86f87346232a6b9a7f704dc4e0d41378ee7bd056..ff5caeb825429d91c3f6bf58af68d91fc50b222d 100644 (file)
@@ -208,7 +208,7 @@ static struct request *blk_mq_find_and_get_req(struct blk_mq_tags *tags,
 
        spin_lock_irqsave(&tags->lock, flags);
        rq = tags->rqs[bitnr];
-       if (!rq || !refcount_inc_not_zero(&rq->ref))
+       if (!rq || rq->tag != bitnr || !refcount_inc_not_zero(&rq->ref))
                rq = NULL;
        spin_unlock_irqrestore(&tags->lock, flags);
        return rq;
index 46c503486e9629a1786c63f14f80405da8f468a7..00fb4120a5b3a8166882216b91316d86ab0c9950 100644 (file)
@@ -264,7 +264,7 @@ void __init numa_free_distance(void)
        size = numa_distance_cnt * numa_distance_cnt *
                sizeof(numa_distance[0]);
 
-       memblock_free(__pa(numa_distance), size);
+       memblock_free_ptr(numa_distance, size);
        numa_distance_cnt = 0;
        numa_distance = NULL;
 }
index a97f33d0c59f93846c46521823903d5aeb62426a..94665037f4a35aff03273c61c3703a44244c7841 100644 (file)
@@ -13,6 +13,7 @@
 #include <linux/export.h>
 #include <linux/rtc.h>
 #include <linux/suspend.h>
+#include <linux/init.h>
 
 #include <linux/mc146818rtc.h>
 
@@ -165,6 +166,9 @@ void generate_pm_trace(const void *tracedata, unsigned int user)
        const char *file = *(const char **)(tracedata + 2);
        unsigned int user_hash_value, file_hash_value;
 
+       if (!x86_platform.legacy.rtc)
+               return;
+
        user_hash_value = user % USERHASH;
        file_hash_value = hash_string(lineno, file, FILEHASH);
        set_magic_time(user_hash_value, file_hash_value, dev_hash_value);
@@ -267,6 +271,9 @@ static struct notifier_block pm_trace_nb = {
 
 static int __init early_resume_init(void)
 {
+       if (!x86_platform.legacy.rtc)
+               return 0;
+
        hash_value_early_read = read_magic_time();
        register_pm_notifier(&pm_trace_nb);
        return 0;
@@ -277,6 +284,9 @@ static int __init late_resume_init(void)
        unsigned int val = hash_value_early_read;
        unsigned int user, file, dev;
 
+       if (!x86_platform.legacy.rtc)
+               return 0;
+
        user = val % USERHASH;
        val = val / USERHASH;
        file = val % FILEHASH;
index 66b05a326910eaa01c3e1c50e551f5e9e4cf530b..a6f365b9cc1ad8b026d906dcff015885cebdef40 100644 (file)
@@ -74,8 +74,8 @@ unsigned int gov_attr_set_put(struct gov_attr_set *attr_set, struct list_head *l
        if (count)
                return count;
 
-       kobject_put(&attr_set->kobj);
        mutex_destroy(&attr_set->update_lock);
+       kobject_put(&attr_set->kobj);
        return 0;
 }
 EXPORT_SYMBOL_GPL(gov_attr_set_put);
index 1097f826ad70eab9197310f3258c13c69d687a62..8c176b7dae415bbb112b5721deb67faffa7c1de5 100644 (file)
@@ -3205,11 +3205,15 @@ static int __init intel_pstate_init(void)
        if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL)
                return -ENODEV;
 
-       if (no_load)
-               return -ENODEV;
-
        id = x86_match_cpu(hwp_support_ids);
        if (id) {
+               bool hwp_forced = intel_pstate_hwp_is_enabled();
+
+               if (hwp_forced)
+                       pr_info("HWP enabled by BIOS\n");
+               else if (no_load)
+                       return -ENODEV;
+
                copy_cpu_funcs(&core_funcs);
                /*
                 * Avoid enabling HWP for processors without EPP support,
@@ -3219,8 +3223,7 @@ static int __init intel_pstate_init(void)
                 * If HWP is enabled already, though, there is no choice but to
                 * deal with it.
                 */
-               if ((!no_hwp && boot_cpu_has(X86_FEATURE_HWP_EPP)) ||
-                   intel_pstate_hwp_is_enabled()) {
+               if ((!no_hwp && boot_cpu_has(X86_FEATURE_HWP_EPP)) || hwp_forced) {
                        hwp_active++;
                        hwp_mode_bdw = id->driver_data;
                        intel_pstate.attr = hwp_cpufreq_attrs;
@@ -3235,7 +3238,11 @@ static int __init intel_pstate_init(void)
 
                        goto hwp_cpu_matched;
                }
+               pr_info("HWP not enabled\n");
        } else {
+               if (no_load)
+                       return -ENODEV;
+
                id = x86_match_cpu(intel_pstate_cpu_ids);
                if (!id) {
                        pr_info("CPU model not supported\n");
@@ -3314,10 +3321,9 @@ static int __init intel_pstate_setup(char *str)
        else if (!strcmp(str, "passive"))
                default_driver = &intel_cpufreq;
 
-       if (!strcmp(str, "no_hwp")) {
-               pr_info("HWP disabled\n");
+       if (!strcmp(str, "no_hwp"))
                no_hwp = 1;
-       }
+
        if (!strcmp(str, "force"))
                force_load = 1;
        if (!strcmp(str, "hwp_only"))
index 284b6bd040b1af3bb214aba57bf1ee7fdac4c1b8..d295f405c4bb08cb371741fb98c44a61e8f0b814 100644 (file)
@@ -451,7 +451,6 @@ static int ve_spc_cpufreq_init(struct cpufreq_policy *policy)
 static int ve_spc_cpufreq_exit(struct cpufreq_policy *policy)
 {
        struct device *cpu_dev;
-       int cur_cluster = cpu_to_cluster(policy->cpu);
 
        cpu_dev = get_cpu_device(policy->cpu);
        if (!cpu_dev) {
index dc3c6b3a00e531f2e325b59de35db3804bf6b6f3..d356e329e6f89e19f7af3c03f4268f0fa934dfc1 100644 (file)
@@ -758,7 +758,7 @@ enum amd_hw_ip_block_type {
        MAX_HWIP
 };
 
-#define HWIP_MAX_INSTANCE      8
+#define HWIP_MAX_INSTANCE      10
 
 struct amd_powerplay {
        void *pp_handle;
index 3003ee1c94873a195ac2daf2a56b2899b74c4940..1d41c2c00623ba209b32f4a034e960ecdadb5c2e 100644 (file)
@@ -192,6 +192,16 @@ void amdgpu_amdkfd_suspend(struct amdgpu_device *adev, bool run_pm)
                kgd2kfd_suspend(adev->kfd.dev, run_pm);
 }
 
+int amdgpu_amdkfd_resume_iommu(struct amdgpu_device *adev)
+{
+       int r = 0;
+
+       if (adev->kfd.dev)
+               r = kgd2kfd_resume_iommu(adev->kfd.dev);
+
+       return r;
+}
+
 int amdgpu_amdkfd_resume(struct amdgpu_device *adev, bool run_pm)
 {
        int r = 0;
index ec028cf963f5a70404df5e26bc5371fa94ab43b8..3bc52b2c604fbe50e79cc8c124fb74920451e843 100644 (file)
@@ -137,6 +137,7 @@ int amdgpu_amdkfd_init(void);
 void amdgpu_amdkfd_fini(void);
 
 void amdgpu_amdkfd_suspend(struct amdgpu_device *adev, bool run_pm);
+int amdgpu_amdkfd_resume_iommu(struct amdgpu_device *adev);
 int amdgpu_amdkfd_resume(struct amdgpu_device *adev, bool run_pm);
 void amdgpu_amdkfd_interrupt(struct amdgpu_device *adev,
                        const void *ih_ring_entry);
@@ -327,6 +328,7 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd,
                         const struct kgd2kfd_shared_resources *gpu_resources);
 void kgd2kfd_device_exit(struct kfd_dev *kfd);
 void kgd2kfd_suspend(struct kfd_dev *kfd, bool run_pm);
+int kgd2kfd_resume_iommu(struct kfd_dev *kfd);
 int kgd2kfd_resume(struct kfd_dev *kfd, bool run_pm);
 int kgd2kfd_pre_reset(struct kfd_dev *kfd);
 int kgd2kfd_post_reset(struct kfd_dev *kfd);
@@ -365,6 +367,11 @@ static inline void kgd2kfd_suspend(struct kfd_dev *kfd, bool run_pm)
 {
 }
 
+static int __maybe_unused kgd2kfd_resume_iommu(struct kfd_dev *kfd)
+{
+       return 0;
+}
+
 static inline int kgd2kfd_resume(struct kfd_dev *kfd, bool run_pm)
 {
        return 0;
index 277128846dd128cd78a890c7b772cd99c023caeb..463b9c0283f7ea3862441f5923139e632d11554a 100644 (file)
@@ -1544,20 +1544,18 @@ int amdgpu_debugfs_init(struct amdgpu_device *adev)
        struct dentry *ent;
        int r, i;
 
-
-
        ent = debugfs_create_file("amdgpu_preempt_ib", 0600, root, adev,
                                  &fops_ib_preempt);
-       if (!ent) {
+       if (IS_ERR(ent)) {
                DRM_ERROR("unable to create amdgpu_preempt_ib debugsfs file\n");
-               return -EIO;
+               return PTR_ERR(ent);
        }
 
        ent = debugfs_create_file("amdgpu_force_sclk", 0200, root, adev,
                                  &fops_sclk_set);
-       if (!ent) {
+       if (IS_ERR(ent)) {
                DRM_ERROR("unable to create amdgpu_set_sclk debugsfs file\n");
-               return -EIO;
+               return PTR_ERR(ent);
        }
 
        /* Register debugfs entries for amdgpu_ttm */
index 41c6b3aacd37921ab5c392ee84240561fb2624bb..ab3794c42d363d8dde5dd4ea0e60b5d1135ffe56 100644 (file)
@@ -2394,6 +2394,10 @@ static int amdgpu_device_ip_init(struct amdgpu_device *adev)
        if (r)
                goto init_failed;
 
+       r = amdgpu_amdkfd_resume_iommu(adev);
+       if (r)
+               goto init_failed;
+
        r = amdgpu_device_ip_hw_init_phase1(adev);
        if (r)
                goto init_failed;
@@ -3148,6 +3152,10 @@ static int amdgpu_device_ip_resume(struct amdgpu_device *adev)
 {
        int r;
 
+       r = amdgpu_amdkfd_resume_iommu(adev);
+       if (r)
+               return r;
+
        r = amdgpu_device_ip_resume_phase1(adev);
        if (r)
                return r;
@@ -4601,6 +4609,10 @@ int amdgpu_do_asic_reset(struct list_head *device_list_handle,
                                dev_warn(tmp_adev->dev, "asic atom init failed!");
                        } else {
                                dev_info(tmp_adev->dev, "GPU reset succeeded, trying to resume\n");
+                               r = amdgpu_amdkfd_resume_iommu(tmp_adev);
+                               if (r)
+                                       goto out;
+
                                r = amdgpu_device_ip_resume_phase1(tmp_adev);
                                if (r)
                                        goto out;
index c7797eac83c3a09cccbca9a9df03692aa318c6e4..9ff600a38559d8f37b761797b012f1b39c75219e 100644 (file)
@@ -598,7 +598,7 @@ void amdgpu_gmc_tmz_set(struct amdgpu_device *adev)
                break;
        default:
                adev->gmc.tmz_enabled = false;
-               dev_warn(adev->dev,
+               dev_info(adev->dev,
                         "Trusted Memory Zone (TMZ) feature not supported\n");
                break;
        }
index dc44c946a2442a430559a5625e28f7011abbf4ff..98732518543e539e601866a4beda3666732408db 100644 (file)
@@ -757,7 +757,7 @@ Out:
        return res;
 }
 
-inline uint32_t amdgpu_ras_eeprom_max_record_count(void)
+uint32_t amdgpu_ras_eeprom_max_record_count(void)
 {
        return RAS_MAX_RECORD_COUNT;
 }
index f95fc61b30219c9b6d27a2ff1befe21a41294b32..6bb00578bfbbcdc2cb99e85304007d85ab09560e 100644 (file)
@@ -120,7 +120,7 @@ int amdgpu_ras_eeprom_read(struct amdgpu_ras_eeprom_control *control,
 int amdgpu_ras_eeprom_append(struct amdgpu_ras_eeprom_control *control,
                             struct eeprom_table_record *records, const u32 num);
 
-inline uint32_t amdgpu_ras_eeprom_max_record_count(void);
+uint32_t amdgpu_ras_eeprom_max_record_count(void);
 
 void amdgpu_ras_debugfs_set_ret_size(struct amdgpu_ras_eeprom_control *control);
 
index 7b634a1517f9c1e90c0f8c61f4e8d9735fe279e5..0554576d36955832c2483043f5aaeff08fea45e4 100644 (file)
@@ -428,8 +428,8 @@ int amdgpu_debugfs_ring_init(struct amdgpu_device *adev,
        ent = debugfs_create_file(name,
                                  S_IFREG | S_IRUGO, root,
                                  ring, &amdgpu_debugfs_ring_fops);
-       if (!ent)
-               return -ENOMEM;
+       if (IS_ERR(ent))
+               return PTR_ERR(ent);
 
        i_size_write(ent->d_inode, ring->ring_size + 12);
        ring->ent = ent;
index 1129e17e9f09c15d0a04139af826c3c894eb95af..820fcb24231f9f08ab4a11452feb3fdb516737ae 100644 (file)
@@ -515,6 +515,15 @@ static int amdgpu_bo_move(struct ttm_buffer_object *bo, bool evict,
                goto out;
        }
 
+       if (bo->type == ttm_bo_type_device &&
+           new_mem->mem_type == TTM_PL_VRAM &&
+           old_mem->mem_type != TTM_PL_VRAM) {
+               /* amdgpu_bo_fault_reserve_notify will re-set this if the CPU
+                * accesses the BO after it's moved.
+                */
+               abo->flags &= ~AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
+       }
+
        if (adev->mman.buffer_funcs_enabled) {
                if (((old_mem->mem_type == TTM_PL_SYSTEM &&
                      new_mem->mem_type == TTM_PL_VRAM) ||
@@ -545,15 +554,6 @@ static int amdgpu_bo_move(struct ttm_buffer_object *bo, bool evict,
                        return r;
        }
 
-       if (bo->type == ttm_bo_type_device &&
-           new_mem->mem_type == TTM_PL_VRAM &&
-           old_mem->mem_type != TTM_PL_VRAM) {
-               /* amdgpu_bo_fault_reserve_notify will re-set this if the CPU
-                * accesses the BO after it's moved.
-                */
-               abo->flags &= ~AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
-       }
-
 out:
        /* update statistics */
        atomic64_add(bo->base.size, &adev->num_bytes_moved);
index 16a57b70cc1a97a3a485b7aa297d3c2414205587..98d1b3ab3a46436a68c390171d2009103f5034e5 100644 (file)
@@ -468,6 +468,7 @@ static const struct kfd_device_info navi10_device_info = {
        .needs_iommu_device = false,
        .supports_cwsr = true,
        .needs_pci_atomics = true,
+       .no_atomic_fw_version = 145,
        .num_sdma_engines = 2,
        .num_xgmi_sdma_engines = 0,
        .num_sdma_queues_per_engine = 8,
@@ -487,6 +488,7 @@ static const struct kfd_device_info navi12_device_info = {
        .needs_iommu_device = false,
        .supports_cwsr = true,
        .needs_pci_atomics = true,
+       .no_atomic_fw_version = 145,
        .num_sdma_engines = 2,
        .num_xgmi_sdma_engines = 0,
        .num_sdma_queues_per_engine = 8,
@@ -506,6 +508,7 @@ static const struct kfd_device_info navi14_device_info = {
        .needs_iommu_device = false,
        .supports_cwsr = true,
        .needs_pci_atomics = true,
+       .no_atomic_fw_version = 145,
        .num_sdma_engines = 2,
        .num_xgmi_sdma_engines = 0,
        .num_sdma_queues_per_engine = 8,
@@ -525,6 +528,7 @@ static const struct kfd_device_info sienna_cichlid_device_info = {
        .needs_iommu_device = false,
        .supports_cwsr = true,
        .needs_pci_atomics = true,
+       .no_atomic_fw_version = 92,
        .num_sdma_engines = 4,
        .num_xgmi_sdma_engines = 0,
        .num_sdma_queues_per_engine = 8,
@@ -544,6 +548,7 @@ static const struct kfd_device_info navy_flounder_device_info = {
        .needs_iommu_device = false,
        .supports_cwsr = true,
        .needs_pci_atomics = true,
+       .no_atomic_fw_version = 92,
        .num_sdma_engines = 2,
        .num_xgmi_sdma_engines = 0,
        .num_sdma_queues_per_engine = 8,
@@ -562,7 +567,8 @@ static const struct kfd_device_info vangogh_device_info = {
        .mqd_size_aligned = MQD_SIZE_ALIGNED,
        .needs_iommu_device = false,
        .supports_cwsr = true,
-       .needs_pci_atomics = false,
+       .needs_pci_atomics = true,
+       .no_atomic_fw_version = 92,
        .num_sdma_engines = 1,
        .num_xgmi_sdma_engines = 0,
        .num_sdma_queues_per_engine = 2,
@@ -582,6 +588,7 @@ static const struct kfd_device_info dimgrey_cavefish_device_info = {
        .needs_iommu_device = false,
        .supports_cwsr = true,
        .needs_pci_atomics = true,
+       .no_atomic_fw_version = 92,
        .num_sdma_engines = 2,
        .num_xgmi_sdma_engines = 0,
        .num_sdma_queues_per_engine = 8,
@@ -601,6 +608,7 @@ static const struct kfd_device_info beige_goby_device_info = {
        .needs_iommu_device = false,
        .supports_cwsr = true,
        .needs_pci_atomics = true,
+       .no_atomic_fw_version = 92,
        .num_sdma_engines = 1,
        .num_xgmi_sdma_engines = 0,
        .num_sdma_queues_per_engine = 8,
@@ -619,7 +627,8 @@ static const struct kfd_device_info yellow_carp_device_info = {
        .mqd_size_aligned = MQD_SIZE_ALIGNED,
        .needs_iommu_device = false,
        .supports_cwsr = true,
-       .needs_pci_atomics = false,
+       .needs_pci_atomics = true,
+       .no_atomic_fw_version = 92,
        .num_sdma_engines = 1,
        .num_xgmi_sdma_engines = 0,
        .num_sdma_queues_per_engine = 2,
@@ -708,20 +717,6 @@ struct kfd_dev *kgd2kfd_probe(struct kgd_dev *kgd,
        if (!kfd)
                return NULL;
 
-       /* Allow BIF to recode atomics to PCIe 3.0 AtomicOps.
-        * 32 and 64-bit requests are possible and must be
-        * supported.
-        */
-       kfd->pci_atomic_requested = amdgpu_amdkfd_have_atomics_support(kgd);
-       if (device_info->needs_pci_atomics &&
-           !kfd->pci_atomic_requested) {
-               dev_info(kfd_device,
-                        "skipped device %x:%x, PCI rejects atomics\n",
-                        pdev->vendor, pdev->device);
-               kfree(kfd);
-               return NULL;
-       }
-
        kfd->kgd = kgd;
        kfd->device_info = device_info;
        kfd->pdev = pdev;
@@ -821,6 +816,23 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd,
        kfd->vm_info.vmid_num_kfd = kfd->vm_info.last_vmid_kfd
                        - kfd->vm_info.first_vmid_kfd + 1;
 
+       /* Allow BIF to recode atomics to PCIe 3.0 AtomicOps.
+        * 32 and 64-bit requests are possible and must be
+        * supported.
+        */
+       kfd->pci_atomic_requested = amdgpu_amdkfd_have_atomics_support(kfd->kgd);
+       if (!kfd->pci_atomic_requested &&
+           kfd->device_info->needs_pci_atomics &&
+           (!kfd->device_info->no_atomic_fw_version ||
+            kfd->mec_fw_version < kfd->device_info->no_atomic_fw_version)) {
+               dev_info(kfd_device,
+                        "skipped device %x:%x, PCI rejects atomics %d<%d\n",
+                        kfd->pdev->vendor, kfd->pdev->device,
+                        kfd->mec_fw_version,
+                        kfd->device_info->no_atomic_fw_version);
+               return false;
+       }
+
        /* Verify module parameters regarding mapped process number*/
        if ((hws_max_conc_proc < 0)
                        || (hws_max_conc_proc > kfd->vm_info.vmid_num_kfd)) {
@@ -1057,17 +1069,21 @@ int kgd2kfd_resume(struct kfd_dev *kfd, bool run_pm)
        return ret;
 }
 
-static int kfd_resume(struct kfd_dev *kfd)
+int kgd2kfd_resume_iommu(struct kfd_dev *kfd)
 {
        int err = 0;
 
        err = kfd_iommu_resume(kfd);
-       if (err) {
+       if (err)
                dev_err(kfd_device,
                        "Failed to resume IOMMU for device %x:%x\n",
                        kfd->pdev->vendor, kfd->pdev->device);
-               return err;
-       }
+       return err;
+}
+
+static int kfd_resume(struct kfd_dev *kfd)
+{
+       int err = 0;
 
        err = kfd->dqm->ops.start(kfd->dqm);
        if (err) {
index ab83b0de6b22d5d152dd5f3023e48f80013007d0..6d8f9bb2d9057310a2cb3ca47017592c94fae420 100644 (file)
@@ -207,6 +207,7 @@ struct kfd_device_info {
        bool supports_cwsr;
        bool needs_iommu_device;
        bool needs_pci_atomics;
+       uint32_t no_atomic_fw_version;
        unsigned int num_sdma_engines;
        unsigned int num_xgmi_sdma_engines;
        unsigned int num_sdma_queues_per_engine;
index 9b1fc54555ee63130dcbaf49f2750ddf325cfae7..66c799f5c7cf7e43acc3af2a38409241e029c3fb 100644 (file)
@@ -998,6 +998,8 @@ static void mmhub_read_system_context(struct amdgpu_device *adev, struct dc_phy_
        uint32_t agp_base, agp_bot, agp_top;
        PHYSICAL_ADDRESS_LOC page_table_start, page_table_end, page_table_base;
 
+       memset(pa_config, 0, sizeof(*pa_config));
+
        logical_addr_low  = min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18;
        pt_base = amdgpu_gmc_pd_addr(adev->gart.bo);
 
@@ -6024,21 +6026,23 @@ static inline int dm_set_vblank(struct drm_crtc *crtc, bool enable)
                return 0;
 
 #if defined(CONFIG_DRM_AMD_DC_DCN)
-       work = kzalloc(sizeof(*work), GFP_ATOMIC);
-       if (!work)
-               return -ENOMEM;
+       if (dm->vblank_control_workqueue) {
+               work = kzalloc(sizeof(*work), GFP_ATOMIC);
+               if (!work)
+                       return -ENOMEM;
 
-       INIT_WORK(&work->work, vblank_control_worker);
-       work->dm = dm;
-       work->acrtc = acrtc;
-       work->enable = enable;
+               INIT_WORK(&work->work, vblank_control_worker);
+               work->dm = dm;
+               work->acrtc = acrtc;
+               work->enable = enable;
 
-       if (acrtc_state->stream) {
-               dc_stream_retain(acrtc_state->stream);
-               work->stream = acrtc_state->stream;
-       }
+               if (acrtc_state->stream) {
+                       dc_stream_retain(acrtc_state->stream);
+                       work->stream = acrtc_state->stream;
+               }
 
-       queue_work(dm->vblank_control_workqueue, &work->work);
+               queue_work(dm->vblank_control_workqueue, &work->work);
+       }
 #endif
 
        return 0;
@@ -6792,14 +6796,15 @@ const struct drm_encoder_helper_funcs amdgpu_dm_encoder_helper_funcs = {
 
 #if defined(CONFIG_DRM_AMD_DC_DCN)
 static int dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state *state,
-                                           struct dc_state *dc_state)
+                                           struct dc_state *dc_state,
+                                           struct dsc_mst_fairness_vars *vars)
 {
        struct dc_stream_state *stream = NULL;
        struct drm_connector *connector;
        struct drm_connector_state *new_con_state;
        struct amdgpu_dm_connector *aconnector;
        struct dm_connector_state *dm_conn_state;
-       int i, j, clock, bpp;
+       int i, j, clock;
        int vcpi, pbn_div, pbn = 0;
 
        for_each_new_connector_in_state(state, connector, new_con_state, i) {
@@ -6838,9 +6843,15 @@ static int dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state *state,
                }
 
                pbn_div = dm_mst_get_pbn_divider(stream->link);
-               bpp = stream->timing.dsc_cfg.bits_per_pixel;
                clock = stream->timing.pix_clk_100hz / 10;
-               pbn = drm_dp_calc_pbn_mode(clock, bpp, true);
+               /* pbn is calculated by compute_mst_dsc_configs_for_state*/
+               for (j = 0; j < dc_state->stream_count; j++) {
+                       if (vars[j].aconnector == aconnector) {
+                               pbn = vars[j].pbn;
+                               break;
+                       }
+               }
+
                vcpi = drm_dp_mst_atomic_enable_dsc(state,
                                                    aconnector->port,
                                                    pbn, pbn_div,
@@ -7519,6 +7530,32 @@ static void amdgpu_dm_connector_add_common_modes(struct drm_encoder *encoder,
        }
 }
 
+static void amdgpu_set_panel_orientation(struct drm_connector *connector)
+{
+       struct drm_encoder *encoder;
+       struct amdgpu_encoder *amdgpu_encoder;
+       const struct drm_display_mode *native_mode;
+
+       if (connector->connector_type != DRM_MODE_CONNECTOR_eDP &&
+           connector->connector_type != DRM_MODE_CONNECTOR_LVDS)
+               return;
+
+       encoder = amdgpu_dm_connector_to_encoder(connector);
+       if (!encoder)
+               return;
+
+       amdgpu_encoder = to_amdgpu_encoder(encoder);
+
+       native_mode = &amdgpu_encoder->native_mode;
+       if (native_mode->hdisplay == 0 || native_mode->vdisplay == 0)
+               return;
+
+       drm_connector_set_panel_orientation_with_quirk(connector,
+                                                      DRM_MODE_PANEL_ORIENTATION_UNKNOWN,
+                                                      native_mode->hdisplay,
+                                                      native_mode->vdisplay);
+}
+
 static void amdgpu_dm_connector_ddc_get_modes(struct drm_connector *connector,
                                              struct edid *edid)
 {
@@ -7547,6 +7584,8 @@ static void amdgpu_dm_connector_ddc_get_modes(struct drm_connector *connector,
                 * restored here.
                 */
                amdgpu_dm_update_freesync_caps(connector, edid);
+
+               amdgpu_set_panel_orientation(connector);
        } else {
                amdgpu_dm_connector->num_modes = 0;
        }
@@ -8058,8 +8097,26 @@ static bool is_content_protection_different(struct drm_connector_state *state,
            state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
                state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
 
-       /* Check if something is connected/enabled, otherwise we start hdcp but nothing is connected/enabled
-        * hot-plug, headless s3, dpms
+       /* Stream removed and re-enabled
+        *
+        * Can sometimes overlap with the HPD case,
+        * thus set update_hdcp to false to avoid
+        * setting HDCP multiple times.
+        *
+        * Handles:     DESIRED -> DESIRED (Special case)
+        */
+       if (!(old_state->crtc && old_state->crtc->enabled) &&
+               state->crtc && state->crtc->enabled &&
+               connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) {
+               dm_con_state->update_hdcp = false;
+               return true;
+       }
+
+       /* Hot-plug, headless s3, dpms
+        *
+        * Only start HDCP if the display is connected/enabled.
+        * update_hdcp flag will be set to false until the next
+        * HPD comes in.
         *
         * Handles:     DESIRED -> DESIRED (Special case)
         */
@@ -8648,7 +8705,8 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
                 * If PSR or idle optimizations are enabled then flush out
                 * any pending work before hardware programming.
                 */
-               flush_workqueue(dm->vblank_control_workqueue);
+               if (dm->vblank_control_workqueue)
+                       flush_workqueue(dm->vblank_control_workqueue);
 #endif
 
                bundle->stream_update.stream = acrtc_state->stream;
@@ -8983,7 +9041,8 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
                /* if there mode set or reset, disable eDP PSR */
                if (mode_set_reset_required) {
 #if defined(CONFIG_DRM_AMD_DC_DCN)
-                       flush_workqueue(dm->vblank_control_workqueue);
+                       if (dm->vblank_control_workqueue)
+                               flush_workqueue(dm->vblank_control_workqueue);
 #endif
                        amdgpu_dm_psr_disable_all(dm);
                }
@@ -10243,6 +10302,9 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
        int ret, i;
        bool lock_and_validation_needed = false;
        struct dm_crtc_state *dm_old_crtc_state;
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+       struct dsc_mst_fairness_vars vars[MAX_PIPES];
+#endif
 
        trace_amdgpu_dm_atomic_check_begin(state);
 
@@ -10473,10 +10535,10 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
                        goto fail;
 
 #if defined(CONFIG_DRM_AMD_DC_DCN)
-               if (!compute_mst_dsc_configs_for_state(state, dm_state->context))
+               if (!compute_mst_dsc_configs_for_state(state, dm_state->context, vars))
                        goto fail;
 
-               ret = dm_update_mst_vcpi_slots_for_dsc(state, dm_state->context);
+               ret = dm_update_mst_vcpi_slots_for_dsc(state, dm_state->context, vars);
                if (ret)
                        goto fail;
 #endif
@@ -10492,7 +10554,8 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
                        goto fail;
                status = dc_validate_global_state(dc, dm_state->context, false);
                if (status != DC_OK) {
-                       DC_LOG_WARNING("DC global validation failure: %s (%d)",
+                       drm_dbg_atomic(dev,
+                                      "DC global validation failure: %s (%d)",
                                       dc_status_to_str(status), status);
                        ret = -EINVAL;
                        goto fail;
index 1bcba6943fd7ffb2196daf59068b8f9e71e497da..7af0d58c231b6cb0b4acdfe2e9d639b1b448d2c3 100644 (file)
@@ -518,12 +518,7 @@ struct dsc_mst_fairness_params {
        uint32_t num_slices_h;
        uint32_t num_slices_v;
        uint32_t bpp_overwrite;
-};
-
-struct dsc_mst_fairness_vars {
-       int pbn;
-       bool dsc_enabled;
-       int bpp_x16;
+       struct amdgpu_dm_connector *aconnector;
 };
 
 static int kbps_to_peak_pbn(int kbps)
@@ -750,12 +745,12 @@ static void try_disable_dsc(struct drm_atomic_state *state,
 
 static bool compute_mst_dsc_configs_for_link(struct drm_atomic_state *state,
                                             struct dc_state *dc_state,
-                                            struct dc_link *dc_link)
+                                            struct dc_link *dc_link,
+                                            struct dsc_mst_fairness_vars *vars)
 {
        int i;
        struct dc_stream_state *stream;
        struct dsc_mst_fairness_params params[MAX_PIPES];
-       struct dsc_mst_fairness_vars vars[MAX_PIPES];
        struct amdgpu_dm_connector *aconnector;
        int count = 0;
        bool debugfs_overwrite = false;
@@ -776,6 +771,7 @@ static bool compute_mst_dsc_configs_for_link(struct drm_atomic_state *state,
                params[count].timing = &stream->timing;
                params[count].sink = stream->sink;
                aconnector = (struct amdgpu_dm_connector *)stream->dm_stream_context;
+               params[count].aconnector = aconnector;
                params[count].port = aconnector->port;
                params[count].clock_force_enable = aconnector->dsc_settings.dsc_force_enable;
                if (params[count].clock_force_enable == DSC_CLK_FORCE_ENABLE)
@@ -798,6 +794,7 @@ static bool compute_mst_dsc_configs_for_link(struct drm_atomic_state *state,
        }
        /* Try no compression */
        for (i = 0; i < count; i++) {
+               vars[i].aconnector = params[i].aconnector;
                vars[i].pbn = kbps_to_peak_pbn(params[i].bw_range.stream_kbps);
                vars[i].dsc_enabled = false;
                vars[i].bpp_x16 = 0;
@@ -851,7 +848,8 @@ static bool compute_mst_dsc_configs_for_link(struct drm_atomic_state *state,
 }
 
 bool compute_mst_dsc_configs_for_state(struct drm_atomic_state *state,
-                                      struct dc_state *dc_state)
+                                      struct dc_state *dc_state,
+                                      struct dsc_mst_fairness_vars *vars)
 {
        int i, j;
        struct dc_stream_state *stream;
@@ -882,7 +880,7 @@ bool compute_mst_dsc_configs_for_state(struct drm_atomic_state *state,
                        return false;
 
                mutex_lock(&aconnector->mst_mgr.lock);
-               if (!compute_mst_dsc_configs_for_link(state, dc_state, stream->link)) {
+               if (!compute_mst_dsc_configs_for_link(state, dc_state, stream->link, vars)) {
                        mutex_unlock(&aconnector->mst_mgr.lock);
                        return false;
                }
index b38bd68121ceb56cae8fbb847ff7852d08bff03e..900d3f7a84989e3b22f855a36c82191482bc7963 100644 (file)
@@ -39,8 +39,17 @@ void
 dm_dp_create_fake_mst_encoders(struct amdgpu_device *adev);
 
 #if defined(CONFIG_DRM_AMD_DC_DCN)
+
+struct dsc_mst_fairness_vars {
+       int pbn;
+       bool dsc_enabled;
+       int bpp_x16;
+       struct amdgpu_dm_connector *aconnector;
+};
+
 bool compute_mst_dsc_configs_for_state(struct drm_atomic_state *state,
-                                      struct dc_state *dc_state);
+                                      struct dc_state *dc_state,
+                                      struct dsc_mst_fairness_vars *vars);
 #endif
 
 #endif
index c9f47d16747235faaf85134bd386050e4b75a48f..b1bf80da3a55fd6f59f9153312043a0a8463564f 100644 (file)
@@ -62,7 +62,7 @@ inline void dc_assert_fp_enabled(void)
        depth = *pcpu;
        put_cpu_ptr(&fpu_recursion_depth);
 
-       ASSERT(depth > 1);
+       ASSERT(depth >= 1);
 }
 
 /**
index 8bd7f42a8053fc21067c4f116e2e702feab2874c..1e44b13c1c7def50df2a317bd85aaa25705c2d19 100644 (file)
@@ -2586,13 +2586,21 @@ static struct abm *get_abm_from_stream_res(const struct dc_link *link)
 
 int dc_link_get_backlight_level(const struct dc_link *link)
 {
-
        struct abm *abm = get_abm_from_stream_res(link);
+       struct panel_cntl *panel_cntl = link->panel_cntl;
+       struct dc  *dc = link->ctx->dc;
+       struct dmcu *dmcu = dc->res_pool->dmcu;
+       bool fw_set_brightness = true;
 
-       if (abm == NULL || abm->funcs->get_current_backlight == NULL)
-               return DC_ERROR_UNEXPECTED;
+       if (dmcu)
+               fw_set_brightness = dmcu->funcs->is_dmcu_initialized(dmcu);
 
-       return (int) abm->funcs->get_current_backlight(abm);
+       if (!fw_set_brightness && panel_cntl->funcs->get_current_backlight)
+               return panel_cntl->funcs->get_current_backlight(panel_cntl);
+       else if (abm != NULL && abm->funcs->get_current_backlight != NULL)
+               return (int) abm->funcs->get_current_backlight(abm);
+       else
+               return DC_ERROR_UNEXPECTED;
 }
 
 int dc_link_get_target_backlight_pwm(const struct dc_link *link)
index 330edd666b7d8277bf68c05f4e29ec78d20cb7b1..f6dbc5a747576d68b82d20c832e7717fc846ad58 100644 (file)
@@ -1,4 +1,26 @@
-/* Copyright 2015 Advanced Micro Devices, Inc. */
+/*
+ * Copyright 2015 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ */
 #include "dm_services.h"
 #include "dc.h"
 #include "dc_link_dp.h"
@@ -1840,9 +1862,13 @@ bool perform_link_training_with_retries(
                dp_disable_link_phy(link, signal);
 
                /* Abort link training if failure due to sink being unplugged. */
-               if (status == LINK_TRAINING_ABORT)
-                       break;
-               else if (do_fallback) {
+               if (status == LINK_TRAINING_ABORT) {
+                       enum dc_connection_type type = dc_connection_none;
+
+                       dc_link_detect_sink(link, &type);
+                       if (type == dc_connection_none)
+                               break;
+               } else if (do_fallback) {
                        decide_fallback_link_setting(*link_setting, &current_setting, status);
                        /* Fail link training if reduced link bandwidth no longer meets
                         * stream requirements.
index e9233923586316349952ad5dc9914be364044a74..e8570060d007ba5bab0db3b3395aca2b9c487573 100644 (file)
@@ -49,7 +49,6 @@
 static unsigned int dce_get_16_bit_backlight_from_pwm(struct panel_cntl *panel_cntl)
 {
        uint64_t current_backlight;
-       uint32_t round_result;
        uint32_t bl_period, bl_int_count;
        uint32_t bl_pwm, fractional_duty_cycle_en;
        uint32_t bl_period_mask, bl_pwm_mask;
@@ -84,15 +83,6 @@ static unsigned int dce_get_16_bit_backlight_from_pwm(struct panel_cntl *panel_c
        current_backlight = div_u64(current_backlight, bl_period);
        current_backlight = (current_backlight + 1) >> 1;
 
-       current_backlight = (uint64_t)(current_backlight) * bl_period;
-
-       round_result = (uint32_t)(current_backlight & 0xFFFFFFFF);
-
-       round_result = (round_result >> (bl_int_count-1)) & 1;
-
-       current_backlight >>= bl_int_count;
-       current_backlight += round_result;
-
        return (uint32_t)(current_backlight);
 }
 
index 8a08ecc34c6992a64ead985bf1f2047075a35800..4884a4e1f261c88fb6c09647c884e2fbd4da9ec9 100644 (file)
 #define TABLE_PMSTATUSLOG        3 // Called by Tools for Agm logging
 #define TABLE_DPMCLOCKS          4 // Called by Driver; defined here, but not used, for backward compatible
 #define TABLE_MOMENTARY_PM       5 // Called by Tools; defined here, but not used, for backward compatible
-#define TABLE_COUNT              6
+#define TABLE_SMU_METRICS        6 // Called by Driver
+#define TABLE_COUNT              7
 
-#define NUM_DSPCLK_LEVELS              8
-#define NUM_SOCCLK_DPM_LEVELS  8
-#define NUM_DCEFCLK_DPM_LEVELS 4
-#define NUM_FCLK_DPM_LEVELS            4
-#define NUM_MEMCLK_DPM_LEVELS  4
+typedef struct SmuMetricsTable_t {
+       //CPU status
+       uint16_t CoreFrequency[6];              //[MHz]
+       uint32_t CorePower[6];                  //[mW]
+       uint16_t CoreTemperature[6];            //[centi-Celsius]
+       uint16_t L3Frequency[2];                //[MHz]
+       uint16_t L3Temperature[2];              //[centi-Celsius]
+       uint16_t C0Residency[6];                //Percentage
 
-#define NUMBER_OF_PSTATES              8
-#define NUMBER_OF_CORES                        8
+       // GFX status
+       uint16_t GfxclkFrequency;               //[MHz]
+       uint16_t GfxTemperature;                //[centi-Celsius]
 
-typedef enum {
-       S3_TYPE_ENTRY,
-       S5_TYPE_ENTRY,
-} Sleep_Type_e;
+       // SOC IP info
+       uint16_t SocclkFrequency;               //[MHz]
+       uint16_t VclkFrequency;                 //[MHz]
+       uint16_t DclkFrequency;                 //[MHz]
+       uint16_t MemclkFrequency;               //[MHz]
 
-typedef enum {
-       GFX_OFF = 0,
-       GFX_ON  = 1,
-} GFX_Mode_e;
+       // power, VF info for CPU/GFX telemetry rails, and then socket power total
+       uint32_t Voltage[2];                    //[mV] indices: VDDCR_VDD, VDDCR_GFX
+       uint32_t Current[2];                    //[mA] indices: VDDCR_VDD, VDDCR_GFX
+       uint32_t Power[2];                      //[mW] indices: VDDCR_VDD, VDDCR_GFX
+       uint32_t CurrentSocketPower;            //[mW]
 
-typedef enum {
-       CPU_P0 = 0,
-       CPU_P1,
-       CPU_P2,
-       CPU_P3,
-       CPU_P4,
-       CPU_P5,
-       CPU_P6,
-       CPU_P7
-} CPU_PState_e;
+       uint16_t SocTemperature;                //[centi-Celsius]
+       uint16_t EdgeTemperature;
+       uint16_t ThrottlerStatus;
+       uint16_t Spare;
 
-typedef enum {
-       CPU_CORE0 = 0,
-       CPU_CORE1,
-       CPU_CORE2,
-       CPU_CORE3,
-       CPU_CORE4,
-       CPU_CORE5,
-       CPU_CORE6,
-       CPU_CORE7
-} CORE_ID_e;
+} SmuMetricsTable_t;
 
-typedef enum {
-       DF_DPM0 = 0,
-       DF_DPM1,
-       DF_DPM2,
-       DF_DPM3,
-       DF_PState_Count
-} DF_PState_e;
-
-typedef enum {
-       GFX_DPM0 = 0,
-       GFX_DPM1,
-       GFX_DPM2,
-       GFX_DPM3,
-       GFX_PState_Count
-} GFX_PState_e;
+typedef struct SmuMetrics_t {
+       SmuMetricsTable_t Current;
+       SmuMetricsTable_t Average;
+       uint32_t SampleStartTime;
+       uint32_t SampleStopTime;
+       uint32_t Accnt;
+} SmuMetrics_t;
 
 #endif
index 6f1b1b50d527c6c0f43b9edd8e3d551d0b7dc6e1..18b862a90fbe5c8857686b35acc0616f8c27aa87 100644 (file)
        __SMU_DUMMY_MAP(SetUclkDpmMode),                \
        __SMU_DUMMY_MAP(LightSBR),                      \
        __SMU_DUMMY_MAP(GfxDriverResetRecovery),        \
-       __SMU_DUMMY_MAP(BoardPowerCalibration),
+       __SMU_DUMMY_MAP(BoardPowerCalibration),   \
+       __SMU_DUMMY_MAP(RequestGfxclk),           \
+       __SMU_DUMMY_MAP(ForceGfxVid),             \
+       __SMU_DUMMY_MAP(UnforceGfxVid),
 
 #undef __SMU_DUMMY_MAP
 #define __SMU_DUMMY_MAP(type)  SMU_MSG_##type
index 6e6088760b180a3bc06f69c8e6fb3ac29bfe19c4..909a86aa60f328e3c33d0e4d3def613f05b93039 100644 (file)
 #define PPSMC_MSG_SetDriverTableVMID                    0x34
 #define PPSMC_MSG_SetSoftMinCclk                        0x35
 #define PPSMC_MSG_SetSoftMaxCclk                        0x36
-#define PPSMC_Message_Count                             0x37
+#define PPSMC_MSG_GetGfxFrequency                       0x37
+#define PPSMC_MSG_GetGfxVid                             0x38
+#define PPSMC_MSG_ForceGfxFreq                          0x39
+#define PPSMC_MSG_UnForceGfxFreq                        0x3A
+#define PPSMC_MSG_ForceGfxVid                           0x3B
+#define PPSMC_MSG_UnforceGfxVid                         0x3C
+#define PPSMC_MSG_GetEnabledSmuFeatures                 0x3D
+#define PPSMC_Message_Count                             0x3E
 
 #endif
index 3ab1ce4d3419b0da81a68d25bd2c839af839a65d..04863a7971155ab1177f37b9f8bdc343d0c1c0c3 100644 (file)
@@ -1404,7 +1404,7 @@ static int smu_disable_dpms(struct smu_context *smu)
         */
        if (smu->uploading_custom_pp_table &&
            (adev->asic_type >= CHIP_NAVI10) &&
-           (adev->asic_type <= CHIP_DIMGREY_CAVEFISH))
+           (adev->asic_type <= CHIP_BEIGE_GOBY))
                return smu_disable_all_features_with_exception(smu,
                                                               true,
                                                               SMU_FEATURE_COUNT);
index e343cc218990ba4ccebc6727e059d86cbb125a1f..082f01893f3d130ca660a937926e7e2b24376ea4 100644 (file)
@@ -771,8 +771,12 @@ static int arcturus_print_clk_levels(struct smu_context *smu,
        struct smu_11_0_dpm_context *dpm_context = NULL;
        uint32_t gen_speed, lane_width;
 
-       if (amdgpu_ras_intr_triggered())
-               return sysfs_emit(buf, "unavailable\n");
+       smu_cmn_get_sysfs_buf(&buf, &size);
+
+       if (amdgpu_ras_intr_triggered()) {
+               size += sysfs_emit_at(buf, size, "unavailable\n");
+               return size;
+       }
 
        dpm_context = smu_dpm->dpm_context;
 
index b05f9541accc3222855962abfebd479aa3e9291b..3d4c65bc29dc3daa29c53753421764c79d710bcc 100644 (file)
 #undef pr_info
 #undef pr_debug
 
+/* unit: MHz */
+#define CYAN_SKILLFISH_SCLK_MIN                        1000
+#define CYAN_SKILLFISH_SCLK_MAX                        2000
+#define CYAN_SKILLFISH_SCLK_DEFAULT                    1800
+
+/* unit: mV */
+#define CYAN_SKILLFISH_VDDC_MIN                        700
+#define CYAN_SKILLFISH_VDDC_MAX                        1129
+#define CYAN_SKILLFISH_VDDC_MAGIC                      5118 // 0x13fe
+
+static struct gfx_user_settings {
+       uint32_t sclk;
+       uint32_t vddc;
+} cyan_skillfish_user_settings;
+
+#define FEATURE_MASK(feature) (1ULL << feature)
+#define SMC_DPM_FEATURE ( \
+       FEATURE_MASK(FEATURE_FCLK_DPM_BIT)      |       \
+       FEATURE_MASK(FEATURE_SOC_DPM_BIT)       |       \
+       FEATURE_MASK(FEATURE_GFX_DPM_BIT))
+
 static struct cmn2asic_msg_mapping cyan_skillfish_message_map[SMU_MSG_MAX_COUNT] = {
        MSG_MAP(TestMessage,                    PPSMC_MSG_TestMessage,                  0),
        MSG_MAP(GetSmuVersion,                  PPSMC_MSG_GetSmuVersion,                0),
@@ -52,14 +73,473 @@ static struct cmn2asic_msg_mapping cyan_skillfish_message_map[SMU_MSG_MAX_COUNT]
        MSG_MAP(SetDriverDramAddrLow,           PPSMC_MSG_SetDriverTableDramAddrLow,    0),
        MSG_MAP(TransferTableSmu2Dram,          PPSMC_MSG_TransferTableSmu2Dram,        0),
        MSG_MAP(TransferTableDram2Smu,          PPSMC_MSG_TransferTableDram2Smu,        0),
+       MSG_MAP(GetEnabledSmuFeatures,          PPSMC_MSG_GetEnabledSmuFeatures,        0),
+       MSG_MAP(RequestGfxclk,                  PPSMC_MSG_RequestGfxclk,                0),
+       MSG_MAP(ForceGfxVid,                    PPSMC_MSG_ForceGfxVid,                  0),
+       MSG_MAP(UnforceGfxVid,                  PPSMC_MSG_UnforceGfxVid,                0),
+};
+
+static struct cmn2asic_mapping cyan_skillfish_table_map[SMU_TABLE_COUNT] = {
+       TAB_MAP_VALID(SMU_METRICS),
 };
 
+static int cyan_skillfish_tables_init(struct smu_context *smu)
+{
+       struct smu_table_context *smu_table = &smu->smu_table;
+       struct smu_table *tables = smu_table->tables;
+
+       SMU_TABLE_INIT(tables, SMU_TABLE_SMU_METRICS,
+                               sizeof(SmuMetrics_t),
+                               PAGE_SIZE,
+                               AMDGPU_GEM_DOMAIN_VRAM);
+
+       smu_table->metrics_table = kzalloc(sizeof(SmuMetrics_t), GFP_KERNEL);
+       if (!smu_table->metrics_table)
+               goto err0_out;
+
+       smu_table->gpu_metrics_table_size = sizeof(struct gpu_metrics_v2_2);
+       smu_table->gpu_metrics_table = kzalloc(smu_table->gpu_metrics_table_size, GFP_KERNEL);
+       if (!smu_table->gpu_metrics_table)
+               goto err1_out;
+
+       smu_table->metrics_time = 0;
+
+       return 0;
+
+err1_out:
+       smu_table->gpu_metrics_table_size = 0;
+       kfree(smu_table->metrics_table);
+err0_out:
+       return -ENOMEM;
+}
+
+static int cyan_skillfish_init_smc_tables(struct smu_context *smu)
+{
+       int ret = 0;
+
+       ret = cyan_skillfish_tables_init(smu);
+       if (ret)
+               return ret;
+
+       return smu_v11_0_init_smc_tables(smu);
+}
+
+static int cyan_skillfish_finit_smc_tables(struct smu_context *smu)
+{
+       struct smu_table_context *smu_table = &smu->smu_table;
+
+       kfree(smu_table->metrics_table);
+       smu_table->metrics_table = NULL;
+
+       kfree(smu_table->gpu_metrics_table);
+       smu_table->gpu_metrics_table = NULL;
+       smu_table->gpu_metrics_table_size = 0;
+
+       smu_table->metrics_time = 0;
+
+       return 0;
+}
+
+static int
+cyan_skillfish_get_smu_metrics_data(struct smu_context *smu,
+                                       MetricsMember_t member,
+                                       uint32_t *value)
+{
+       struct smu_table_context *smu_table = &smu->smu_table;
+       SmuMetrics_t *metrics = (SmuMetrics_t *)smu_table->metrics_table;
+       int ret = 0;
+
+       mutex_lock(&smu->metrics_lock);
+
+       ret = smu_cmn_get_metrics_table_locked(smu, NULL, false);
+       if (ret) {
+               mutex_unlock(&smu->metrics_lock);
+               return ret;
+       }
+
+       switch (member) {
+       case METRICS_CURR_GFXCLK:
+               *value = metrics->Current.GfxclkFrequency;
+               break;
+       case METRICS_CURR_SOCCLK:
+               *value = metrics->Current.SocclkFrequency;
+               break;
+       case METRICS_CURR_VCLK:
+               *value = metrics->Current.VclkFrequency;
+               break;
+       case METRICS_CURR_DCLK:
+               *value = metrics->Current.DclkFrequency;
+               break;
+       case METRICS_CURR_UCLK:
+               *value = metrics->Current.MemclkFrequency;
+               break;
+       case METRICS_AVERAGE_SOCKETPOWER:
+               *value = (metrics->Current.CurrentSocketPower << 8) /
+                               1000;
+               break;
+       case METRICS_TEMPERATURE_EDGE:
+               *value = metrics->Current.GfxTemperature / 100 *
+                               SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
+               break;
+       case METRICS_TEMPERATURE_HOTSPOT:
+               *value = metrics->Current.SocTemperature / 100 *
+                               SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
+               break;
+       case METRICS_VOLTAGE_VDDSOC:
+               *value = metrics->Current.Voltage[0];
+               break;
+       case METRICS_VOLTAGE_VDDGFX:
+               *value = metrics->Current.Voltage[1];
+               break;
+       case METRICS_THROTTLER_STATUS:
+               *value = metrics->Current.ThrottlerStatus;
+               break;
+       default:
+               *value = UINT_MAX;
+               break;
+       }
+
+       mutex_unlock(&smu->metrics_lock);
+
+       return ret;
+}
+
+static int cyan_skillfish_read_sensor(struct smu_context *smu,
+                                       enum amd_pp_sensors sensor,
+                                       void *data,
+                                       uint32_t *size)
+{
+       int ret = 0;
+
+       if (!data || !size)
+               return -EINVAL;
+
+       mutex_lock(&smu->sensor_lock);
+
+       switch (sensor) {
+       case AMDGPU_PP_SENSOR_GFX_SCLK:
+               ret = cyan_skillfish_get_smu_metrics_data(smu,
+                                                  METRICS_CURR_GFXCLK,
+                                                  (uint32_t *)data);
+               *(uint32_t *)data *= 100;
+               *size = 4;
+               break;
+       case AMDGPU_PP_SENSOR_GFX_MCLK:
+               ret = cyan_skillfish_get_smu_metrics_data(smu,
+                                                  METRICS_CURR_UCLK,
+                                                  (uint32_t *)data);
+               *(uint32_t *)data *= 100;
+               *size = 4;
+               break;
+       case AMDGPU_PP_SENSOR_GPU_POWER:
+               ret = cyan_skillfish_get_smu_metrics_data(smu,
+                                                  METRICS_AVERAGE_SOCKETPOWER,
+                                                  (uint32_t *)data);
+               *size = 4;
+               break;
+       case AMDGPU_PP_SENSOR_HOTSPOT_TEMP:
+               ret = cyan_skillfish_get_smu_metrics_data(smu,
+                                                  METRICS_TEMPERATURE_HOTSPOT,
+                                                  (uint32_t *)data);
+               *size = 4;
+               break;
+       case AMDGPU_PP_SENSOR_EDGE_TEMP:
+               ret = cyan_skillfish_get_smu_metrics_data(smu,
+                                                  METRICS_TEMPERATURE_EDGE,
+                                                  (uint32_t *)data);
+               *size = 4;
+               break;
+       case AMDGPU_PP_SENSOR_VDDNB:
+               ret = cyan_skillfish_get_smu_metrics_data(smu,
+                                                  METRICS_VOLTAGE_VDDSOC,
+                                                  (uint32_t *)data);
+               *size = 4;
+               break;
+       case AMDGPU_PP_SENSOR_VDDGFX:
+               ret = cyan_skillfish_get_smu_metrics_data(smu,
+                                                  METRICS_VOLTAGE_VDDGFX,
+                                                  (uint32_t *)data);
+               *size = 4;
+               break;
+       default:
+               ret = -EOPNOTSUPP;
+               break;
+       }
+
+       mutex_unlock(&smu->sensor_lock);
+
+       return ret;
+}
+
+static int cyan_skillfish_get_current_clk_freq(struct smu_context *smu,
+                                               enum smu_clk_type clk_type,
+                                               uint32_t *value)
+{
+       MetricsMember_t member_type;
+
+       switch (clk_type) {
+       case SMU_GFXCLK:
+       case SMU_SCLK:
+               member_type = METRICS_CURR_GFXCLK;
+               break;
+       case SMU_FCLK:
+       case SMU_MCLK:
+               member_type = METRICS_CURR_UCLK;
+               break;
+       case SMU_SOCCLK:
+               member_type = METRICS_CURR_SOCCLK;
+               break;
+       case SMU_VCLK:
+               member_type = METRICS_CURR_VCLK;
+               break;
+       case SMU_DCLK:
+               member_type = METRICS_CURR_DCLK;
+               break;
+       default:
+               return -EINVAL;
+       }
+
+       return cyan_skillfish_get_smu_metrics_data(smu, member_type, value);
+}
+
+static int cyan_skillfish_print_clk_levels(struct smu_context *smu,
+                                       enum smu_clk_type clk_type,
+                                       char *buf)
+{
+       int ret = 0, size = 0;
+       uint32_t cur_value = 0;
+
+       smu_cmn_get_sysfs_buf(&buf, &size);
+
+       switch (clk_type) {
+       case SMU_OD_SCLK:
+               ret  = cyan_skillfish_get_smu_metrics_data(smu, METRICS_CURR_GFXCLK, &cur_value);
+               if (ret)
+                       return ret;
+               size += sysfs_emit_at(buf, size,"%s:\n", "OD_SCLK");
+               size += sysfs_emit_at(buf, size, "0: %uMhz *\n", cur_value);
+               break;
+       case SMU_OD_VDDC_CURVE:
+               ret  = cyan_skillfish_get_smu_metrics_data(smu, METRICS_VOLTAGE_VDDGFX, &cur_value);
+               if (ret)
+                       return ret;
+               size += sysfs_emit_at(buf, size,"%s:\n", "OD_VDDC");
+               size += sysfs_emit_at(buf, size, "0: %umV *\n", cur_value);
+               break;
+       case SMU_OD_RANGE:
+               size += sysfs_emit_at(buf, size, "%s:\n", "OD_RANGE");
+               size += sysfs_emit_at(buf, size, "SCLK: %7uMhz %10uMhz\n",
+                                               CYAN_SKILLFISH_SCLK_MIN, CYAN_SKILLFISH_SCLK_MAX);
+               size += sysfs_emit_at(buf, size, "VDDC: %7umV  %10umV\n",
+                                               CYAN_SKILLFISH_VDDC_MIN, CYAN_SKILLFISH_VDDC_MAX);
+               break;
+       case SMU_GFXCLK:
+       case SMU_SCLK:
+       case SMU_FCLK:
+       case SMU_MCLK:
+       case SMU_SOCCLK:
+       case SMU_VCLK:
+       case SMU_DCLK:
+               ret = cyan_skillfish_get_current_clk_freq(smu, clk_type, &cur_value);
+               if (ret)
+                       return ret;
+               size += sysfs_emit_at(buf, size, "0: %uMhz *\n", cur_value);
+               break;
+       default:
+               dev_warn(smu->adev->dev, "Unsupported clock type\n");
+               return ret;
+       }
+
+       return size;
+}
+
+static bool cyan_skillfish_is_dpm_running(struct smu_context *smu)
+{
+       struct amdgpu_device *adev = smu->adev;
+       int ret = 0;
+       uint32_t feature_mask[2];
+       uint64_t feature_enabled;
+
+       /* we need to re-init after suspend so return false */
+       if (adev->in_suspend)
+               return false;
+
+       ret = smu_cmn_get_enabled_32_bits_mask(smu, feature_mask, 2);
+
+       if (ret)
+               return false;
+
+       feature_enabled = (uint64_t)feature_mask[0] |
+                               ((uint64_t)feature_mask[1] << 32);
+
+       return !!(feature_enabled & SMC_DPM_FEATURE);
+}
+
+static ssize_t cyan_skillfish_get_gpu_metrics(struct smu_context *smu,
+                                               void **table)
+{
+       struct smu_table_context *smu_table = &smu->smu_table;
+       struct gpu_metrics_v2_2 *gpu_metrics =
+               (struct gpu_metrics_v2_2 *)smu_table->gpu_metrics_table;
+       SmuMetrics_t metrics;
+       int i, ret = 0;
+
+       ret = smu_cmn_get_metrics_table(smu, &metrics, true);
+       if (ret)
+               return ret;
+
+       smu_cmn_init_soft_gpu_metrics(gpu_metrics, 2, 2);
+
+       gpu_metrics->temperature_gfx = metrics.Current.GfxTemperature;
+       gpu_metrics->temperature_soc = metrics.Current.SocTemperature;
+
+       gpu_metrics->average_socket_power = metrics.Current.CurrentSocketPower;
+       gpu_metrics->average_soc_power = metrics.Current.Power[0];
+       gpu_metrics->average_gfx_power = metrics.Current.Power[1];
+
+       gpu_metrics->average_gfxclk_frequency = metrics.Average.GfxclkFrequency;
+       gpu_metrics->average_socclk_frequency = metrics.Average.SocclkFrequency;
+       gpu_metrics->average_uclk_frequency = metrics.Average.MemclkFrequency;
+       gpu_metrics->average_fclk_frequency = metrics.Average.MemclkFrequency;
+       gpu_metrics->average_vclk_frequency = metrics.Average.VclkFrequency;
+       gpu_metrics->average_dclk_frequency = metrics.Average.DclkFrequency;
+
+       gpu_metrics->current_gfxclk = metrics.Current.GfxclkFrequency;
+       gpu_metrics->current_socclk = metrics.Current.SocclkFrequency;
+       gpu_metrics->current_uclk = metrics.Current.MemclkFrequency;
+       gpu_metrics->current_fclk = metrics.Current.MemclkFrequency;
+       gpu_metrics->current_vclk = metrics.Current.VclkFrequency;
+       gpu_metrics->current_dclk = metrics.Current.DclkFrequency;
+
+       for (i = 0; i < 6; i++) {
+               gpu_metrics->temperature_core[i] = metrics.Current.CoreTemperature[i];
+               gpu_metrics->average_core_power[i] = metrics.Average.CorePower[i];
+               gpu_metrics->current_coreclk[i] = metrics.Current.CoreFrequency[i];
+       }
+
+       for (i = 0; i < 2; i++) {
+               gpu_metrics->temperature_l3[i] = metrics.Current.L3Temperature[i];
+               gpu_metrics->current_l3clk[i] = metrics.Current.L3Frequency[i];
+       }
+
+       gpu_metrics->throttle_status = metrics.Current.ThrottlerStatus;
+       gpu_metrics->system_clock_counter = ktime_get_boottime_ns();
+
+       *table = (void *)gpu_metrics;
+
+       return sizeof(struct gpu_metrics_v2_2);
+}
+
+static int cyan_skillfish_od_edit_dpm_table(struct smu_context *smu,
+                                       enum PP_OD_DPM_TABLE_COMMAND type,
+                                       long input[], uint32_t size)
+{
+       int ret = 0;
+       uint32_t vid;
+
+       switch (type) {
+       case PP_OD_EDIT_VDDC_CURVE:
+               if (size != 3 || input[0] != 0) {
+                       dev_err(smu->adev->dev, "Invalid parameter!\n");
+                       return -EINVAL;
+               }
+
+               if (input[1] <= CYAN_SKILLFISH_SCLK_MIN ||
+                       input[1] > CYAN_SKILLFISH_SCLK_MAX) {
+                       dev_err(smu->adev->dev, "Invalid sclk! Valid sclk range: %uMHz - %uMhz\n",
+                                       CYAN_SKILLFISH_SCLK_MIN, CYAN_SKILLFISH_SCLK_MAX);
+                       return -EINVAL;
+               }
+
+               if (input[2] <= CYAN_SKILLFISH_VDDC_MIN ||
+                       input[2] > CYAN_SKILLFISH_VDDC_MAX) {
+                       dev_err(smu->adev->dev, "Invalid vddc! Valid vddc range: %umV - %umV\n",
+                                       CYAN_SKILLFISH_VDDC_MIN, CYAN_SKILLFISH_VDDC_MAX);
+                       return -EINVAL;
+               }
+
+               cyan_skillfish_user_settings.sclk = input[1];
+               cyan_skillfish_user_settings.vddc = input[2];
+
+               break;
+       case PP_OD_RESTORE_DEFAULT_TABLE:
+               if (size != 0) {
+                       dev_err(smu->adev->dev, "Invalid parameter!\n");
+                       return -EINVAL;
+               }
+
+               cyan_skillfish_user_settings.sclk = CYAN_SKILLFISH_SCLK_DEFAULT;
+               cyan_skillfish_user_settings.vddc = CYAN_SKILLFISH_VDDC_MAGIC;
+
+               break;
+       case PP_OD_COMMIT_DPM_TABLE:
+               if (size != 0) {
+                       dev_err(smu->adev->dev, "Invalid parameter!\n");
+                       return -EINVAL;
+               }
+
+               if (cyan_skillfish_user_settings.sclk < CYAN_SKILLFISH_SCLK_MIN ||
+                   cyan_skillfish_user_settings.sclk > CYAN_SKILLFISH_SCLK_MAX) {
+                       dev_err(smu->adev->dev, "Invalid sclk! Valid sclk range: %uMHz - %uMhz\n",
+                                       CYAN_SKILLFISH_SCLK_MIN, CYAN_SKILLFISH_SCLK_MAX);
+                       return -EINVAL;
+               }
+
+               if ((cyan_skillfish_user_settings.vddc != CYAN_SKILLFISH_VDDC_MAGIC) &&
+                       (cyan_skillfish_user_settings.vddc < CYAN_SKILLFISH_VDDC_MIN ||
+                       cyan_skillfish_user_settings.vddc > CYAN_SKILLFISH_VDDC_MAX)) {
+                       dev_err(smu->adev->dev, "Invalid vddc! Valid vddc range: %umV - %umV\n",
+                                       CYAN_SKILLFISH_VDDC_MIN, CYAN_SKILLFISH_VDDC_MAX);
+                       return -EINVAL;
+               }
+
+               ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_RequestGfxclk,
+                                       cyan_skillfish_user_settings.sclk, NULL);
+               if (ret) {
+                       dev_err(smu->adev->dev, "Set sclk failed!\n");
+                       return ret;
+               }
+
+               if (cyan_skillfish_user_settings.vddc == CYAN_SKILLFISH_VDDC_MAGIC) {
+                       ret = smu_cmn_send_smc_msg(smu, SMU_MSG_UnforceGfxVid, NULL);
+                       if (ret) {
+                               dev_err(smu->adev->dev, "Unforce vddc failed!\n");
+                               return ret;
+                       }
+               } else {
+                       /*
+                        * PMFW accepts SVI2 VID code, convert voltage to VID:
+                        * vid = (uint32_t)((1.55 - voltage) * 160.0 + 0.00001)
+                        */
+                       vid = (1550 - cyan_skillfish_user_settings.vddc) * 160 / 1000;
+                       ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_ForceGfxVid, vid, NULL);
+                       if (ret) {
+                               dev_err(smu->adev->dev, "Force vddc failed!\n");
+                               return ret;
+                       }
+               }
+
+               break;
+       default:
+               return -EOPNOTSUPP;
+       }
+
+       return ret;
+}
+
 static const struct pptable_funcs cyan_skillfish_ppt_funcs = {
 
        .check_fw_status = smu_v11_0_check_fw_status,
        .check_fw_version = smu_v11_0_check_fw_version,
        .init_power = smu_v11_0_init_power,
        .fini_power = smu_v11_0_fini_power,
+       .init_smc_tables = cyan_skillfish_init_smc_tables,
+       .fini_smc_tables = cyan_skillfish_finit_smc_tables,
+       .read_sensor = cyan_skillfish_read_sensor,
+       .print_clk_levels = cyan_skillfish_print_clk_levels,
+       .is_dpm_running = cyan_skillfish_is_dpm_running,
+       .get_gpu_metrics = cyan_skillfish_get_gpu_metrics,
+       .od_edit_dpm_table = cyan_skillfish_od_edit_dpm_table,
        .register_irq_handler = smu_v11_0_register_irq_handler,
        .notify_memory_pool_location = smu_v11_0_notify_memory_pool_location,
        .send_smc_msg_with_param = smu_cmn_send_smc_msg_with_param,
@@ -72,5 +552,6 @@ void cyan_skillfish_set_ppt_funcs(struct smu_context *smu)
 {
        smu->ppt_funcs = &cyan_skillfish_ppt_funcs;
        smu->message_map = cyan_skillfish_message_map;
+       smu->table_map = cyan_skillfish_table_map;
        smu->is_apu = true;
 }
index a5fc5d7cb6c79226072a416a885865fce0749798..b1ad451af06bd02a764fc9b2b5136889206692bc 100644 (file)
@@ -1279,6 +1279,8 @@ static int navi10_print_clk_levels(struct smu_context *smu,
        struct smu_11_0_overdrive_table *od_settings = smu->od_settings;
        uint32_t min_value, max_value;
 
+       smu_cmn_get_sysfs_buf(&buf, &size);
+
        switch (clk_type) {
        case SMU_GFXCLK:
        case SMU_SCLK:
@@ -1392,7 +1394,7 @@ static int navi10_print_clk_levels(struct smu_context *smu,
        case SMU_OD_RANGE:
                if (!smu->od_enabled || !od_table || !od_settings)
                        break;
-               size = sysfs_emit(buf, "%s:\n", "OD_RANGE");
+               size += sysfs_emit_at(buf, size, "%s:\n", "OD_RANGE");
 
                if (navi10_od_feature_is_supported(od_settings, SMU_11_0_ODCAP_GFXCLK_LIMITS)) {
                        navi10_od_setting_get_range(od_settings, SMU_11_0_ODSETTING_GFXCLKFMIN,
@@ -2272,7 +2274,27 @@ static int navi10_baco_enter(struct smu_context *smu)
 {
        struct amdgpu_device *adev = smu->adev;
 
-       if (adev->in_runpm)
+       /*
+        * This aims the case below:
+        *   amdgpu driver loaded -> runpm suspend kicked -> sound driver loaded
+        *
+        * For NAVI10 and later ASICs, we rely on PMFW to handle the runpm. To
+        * make that possible, PMFW needs to acknowledge the dstate transition
+        * process for both gfx(function 0) and audio(function 1) function of
+        * the ASIC.
+        *
+        * The PCI device's initial runpm status is RUNPM_SUSPENDED. So as the
+        * device representing the audio function of the ASIC. And that means
+        * even if the sound driver(snd_hda_intel) was not loaded yet, it's still
+        * possible runpm suspend kicked on the ASIC. However without the dstate
+        * transition notification from audio function, pmfw cannot handle the
+        * BACO in/exit correctly. And that will cause driver hang on runpm
+        * resuming.
+        *
+        * To address this, we revert to legacy message way(driver masters the
+        * timing for BACO in/exit) on sound driver missing.
+        */
+       if (adev->in_runpm && smu_cmn_is_audio_func_enabled(adev))
                return smu_v11_0_baco_set_armd3_sequence(smu, BACO_SEQ_BACO);
        else
                return smu_v11_0_baco_enter(smu);
@@ -2282,7 +2304,7 @@ static int navi10_baco_exit(struct smu_context *smu)
 {
        struct amdgpu_device *adev = smu->adev;
 
-       if (adev->in_runpm) {
+       if (adev->in_runpm && smu_cmn_is_audio_func_enabled(adev)) {
                /* Wait for PMFW handling for the Dstate change */
                msleep(10);
                return smu_v11_0_baco_set_armd3_sequence(smu, BACO_SEQ_ULPS);
index 5e292c3f5050e3a806b530236ac6f490be515828..ca57221e39629c68c9c62b836131edbd149058b6 100644 (file)
@@ -1058,6 +1058,8 @@ static int sienna_cichlid_print_clk_levels(struct smu_context *smu,
        uint32_t min_value, max_value;
        uint32_t smu_version;
 
+       smu_cmn_get_sysfs_buf(&buf, &size);
+
        switch (clk_type) {
        case SMU_GFXCLK:
        case SMU_SCLK:
@@ -1180,7 +1182,7 @@ static int sienna_cichlid_print_clk_levels(struct smu_context *smu,
                if (!smu->od_enabled || !od_table || !od_settings)
                        break;
 
-               size = sysfs_emit(buf, "%s:\n", "OD_RANGE");
+               size += sysfs_emit_at(buf, size, "%s:\n", "OD_RANGE");
 
                if (sienna_cichlid_is_od_feature_supported(od_settings, SMU_11_0_7_ODCAP_GFXCLK_LIMITS)) {
                        sienna_cichlid_get_od_setting_range(od_settings, SMU_11_0_7_ODSETTING_GFXCLKFMIN,
@@ -2187,7 +2189,7 @@ static int sienna_cichlid_baco_enter(struct smu_context *smu)
 {
        struct amdgpu_device *adev = smu->adev;
 
-       if (adev->in_runpm)
+       if (adev->in_runpm && smu_cmn_is_audio_func_enabled(adev))
                return smu_v11_0_baco_set_armd3_sequence(smu, BACO_SEQ_BACO);
        else
                return smu_v11_0_baco_enter(smu);
@@ -2197,7 +2199,7 @@ static int sienna_cichlid_baco_exit(struct smu_context *smu)
 {
        struct amdgpu_device *adev = smu->adev;
 
-       if (adev->in_runpm) {
+       if (adev->in_runpm && smu_cmn_is_audio_func_enabled(adev)) {
                /* Wait for PMFW handling for the Dstate change */
                msleep(10);
                return smu_v11_0_baco_set_armd3_sequence(smu, BACO_SEQ_ULPS);
index 3a3421452e57a1f52173081ab304994b6fe845e4..f6ef0ce6e9e2c30e509df6c6e077b02fa3af7f29 100644 (file)
@@ -589,10 +589,12 @@ static int vangogh_print_legacy_clk_levels(struct smu_context *smu,
        if (ret)
                return ret;
 
+       smu_cmn_get_sysfs_buf(&buf, &size);
+
        switch (clk_type) {
        case SMU_OD_SCLK:
                if (smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL) {
-                       size = sysfs_emit(buf, "%s:\n", "OD_SCLK");
+                       size += sysfs_emit_at(buf, size, "%s:\n", "OD_SCLK");
                        size += sysfs_emit_at(buf, size, "0: %10uMhz\n",
                        (smu->gfx_actual_hard_min_freq > 0) ? smu->gfx_actual_hard_min_freq : smu->gfx_default_hard_min_freq);
                        size += sysfs_emit_at(buf, size, "1: %10uMhz\n",
@@ -601,7 +603,7 @@ static int vangogh_print_legacy_clk_levels(struct smu_context *smu,
                break;
        case SMU_OD_CCLK:
                if (smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL) {
-                       size = sysfs_emit(buf, "CCLK_RANGE in Core%d:\n",  smu->cpu_core_id_select);
+                       size += sysfs_emit_at(buf, size, "CCLK_RANGE in Core%d:\n",  smu->cpu_core_id_select);
                        size += sysfs_emit_at(buf, size, "0: %10uMhz\n",
                        (smu->cpu_actual_soft_min_freq > 0) ? smu->cpu_actual_soft_min_freq : smu->cpu_default_soft_min_freq);
                        size += sysfs_emit_at(buf, size, "1: %10uMhz\n",
@@ -610,7 +612,7 @@ static int vangogh_print_legacy_clk_levels(struct smu_context *smu,
                break;
        case SMU_OD_RANGE:
                if (smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL) {
-                       size = sysfs_emit(buf, "%s:\n", "OD_RANGE");
+                       size += sysfs_emit_at(buf, size, "%s:\n", "OD_RANGE");
                        size += sysfs_emit_at(buf, size, "SCLK: %7uMhz %10uMhz\n",
                                smu->gfx_default_hard_min_freq, smu->gfx_default_soft_max_freq);
                        size += sysfs_emit_at(buf, size, "CCLK: %7uMhz %10uMhz\n",
@@ -688,10 +690,12 @@ static int vangogh_print_clk_levels(struct smu_context *smu,
        if (ret)
                return ret;
 
+       smu_cmn_get_sysfs_buf(&buf, &size);
+
        switch (clk_type) {
        case SMU_OD_SCLK:
                if (smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL) {
-                       size = sysfs_emit(buf, "%s:\n", "OD_SCLK");
+                       size += sysfs_emit_at(buf, size, "%s:\n", "OD_SCLK");
                        size += sysfs_emit_at(buf, size, "0: %10uMhz\n",
                        (smu->gfx_actual_hard_min_freq > 0) ? smu->gfx_actual_hard_min_freq : smu->gfx_default_hard_min_freq);
                        size += sysfs_emit_at(buf, size, "1: %10uMhz\n",
@@ -700,7 +704,7 @@ static int vangogh_print_clk_levels(struct smu_context *smu,
                break;
        case SMU_OD_CCLK:
                if (smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL) {
-                       size = sysfs_emit(buf, "CCLK_RANGE in Core%d:\n",  smu->cpu_core_id_select);
+                       size += sysfs_emit_at(buf, size, "CCLK_RANGE in Core%d:\n",  smu->cpu_core_id_select);
                        size += sysfs_emit_at(buf, size, "0: %10uMhz\n",
                        (smu->cpu_actual_soft_min_freq > 0) ? smu->cpu_actual_soft_min_freq : smu->cpu_default_soft_min_freq);
                        size += sysfs_emit_at(buf, size, "1: %10uMhz\n",
@@ -709,7 +713,7 @@ static int vangogh_print_clk_levels(struct smu_context *smu,
                break;
        case SMU_OD_RANGE:
                if (smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL) {
-                       size = sysfs_emit(buf, "%s:\n", "OD_RANGE");
+                       size += sysfs_emit_at(buf, size, "%s:\n", "OD_RANGE");
                        size += sysfs_emit_at(buf, size, "SCLK: %7uMhz %10uMhz\n",
                                smu->gfx_default_hard_min_freq, smu->gfx_default_soft_max_freq);
                        size += sysfs_emit_at(buf, size, "CCLK: %7uMhz %10uMhz\n",
index 5aa175e12a7837ad7a6b3dfcd427df9eb7a0de57..145f13b8c977d6406ea30b60efef69be53ba1042 100644 (file)
@@ -497,6 +497,8 @@ static int renoir_print_clk_levels(struct smu_context *smu,
        if (ret)
                return ret;
 
+       smu_cmn_get_sysfs_buf(&buf, &size);
+
        switch (clk_type) {
        case SMU_OD_RANGE:
                if (smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL) {
index ab652028e0034d12a2575c523e85e6c356e6f55a..5019903db492afc5197cfa396eaf3e5eebb7315b 100644 (file)
@@ -733,15 +733,19 @@ static int aldebaran_print_clk_levels(struct smu_context *smu,
        uint32_t freq_values[3] = {0};
        uint32_t min_clk, max_clk;
 
-       if (amdgpu_ras_intr_triggered())
-               return sysfs_emit(buf, "unavailable\n");
+       smu_cmn_get_sysfs_buf(&buf, &size);
+
+       if (amdgpu_ras_intr_triggered()) {
+               size += sysfs_emit_at(buf, size, "unavailable\n");
+               return size;
+       }
 
        dpm_context = smu_dpm->dpm_context;
 
        switch (type) {
 
        case SMU_OD_SCLK:
-               size = sysfs_emit(buf, "%s:\n", "GFXCLK");
+               size += sysfs_emit_at(buf, size, "%s:\n", "GFXCLK");
                fallthrough;
        case SMU_SCLK:
                ret = aldebaran_get_current_clk_freq_by_table(smu, SMU_GFXCLK, &now);
@@ -795,7 +799,7 @@ static int aldebaran_print_clk_levels(struct smu_context *smu,
                break;
 
        case SMU_OD_MCLK:
-               size = sysfs_emit(buf, "%s:\n", "MCLK");
+               size += sysfs_emit_at(buf, size, "%s:\n", "MCLK");
                fallthrough;
        case SMU_MCLK:
                ret = aldebaran_get_current_clk_freq_by_table(smu, SMU_UCLK, &now);
index 627ba2eec7fda2a2281dfb9dd5b9d7d366c5cf71..a403657151ba19f6826710540cde08a05fed4221 100644 (file)
@@ -1052,16 +1052,18 @@ static int yellow_carp_print_clk_levels(struct smu_context *smu,
        int i, size = 0, ret = 0;
        uint32_t cur_value = 0, value = 0, count = 0;
 
+       smu_cmn_get_sysfs_buf(&buf, &size);
+
        switch (clk_type) {
        case SMU_OD_SCLK:
-               size = sysfs_emit(buf, "%s:\n", "OD_SCLK");
+               size += sysfs_emit_at(buf, size, "%s:\n", "OD_SCLK");
                size += sysfs_emit_at(buf, size, "0: %10uMhz\n",
                (smu->gfx_actual_hard_min_freq > 0) ? smu->gfx_actual_hard_min_freq : smu->gfx_default_hard_min_freq);
                size += sysfs_emit_at(buf, size, "1: %10uMhz\n",
                (smu->gfx_actual_soft_max_freq > 0) ? smu->gfx_actual_soft_max_freq : smu->gfx_default_soft_max_freq);
                break;
        case SMU_OD_RANGE:
-               size = sysfs_emit(buf, "%s:\n", "OD_RANGE");
+               size += sysfs_emit_at(buf, size, "%s:\n", "OD_RANGE");
                size += sysfs_emit_at(buf, size, "SCLK: %7uMhz %10uMhz\n",
                                                smu->gfx_default_hard_min_freq, smu->gfx_default_soft_max_freq);
                break;
index 66711ab24c15a13e197a6150b117437afbb7c83c..843d2cbfc71d4caea9570d6408ffd76bf9828bd9 100644 (file)
@@ -1053,3 +1053,24 @@ int smu_cmn_set_mp1_state(struct smu_context *smu,
 
        return ret;
 }
+
+bool smu_cmn_is_audio_func_enabled(struct amdgpu_device *adev)
+{
+       struct pci_dev *p = NULL;
+       bool snd_driver_loaded;
+
+       /*
+        * If the ASIC comes with no audio function, we always assume
+        * it is "enabled".
+        */
+       p = pci_get_domain_bus_and_slot(pci_domain_nr(adev->pdev->bus),
+                       adev->pdev->bus->number, 1);
+       if (!p)
+               return true;
+
+       snd_driver_loaded = pci_is_enabled(p) ? true : false;
+
+       pci_dev_put(p);
+
+       return snd_driver_loaded;
+}
index 16993daa2ae0425a17572b02bf0a231a87559c15..beea03810bcaabc476e682446c5fc35973ed9dba 100644 (file)
@@ -110,5 +110,20 @@ void smu_cmn_init_soft_gpu_metrics(void *table, uint8_t frev, uint8_t crev);
 int smu_cmn_set_mp1_state(struct smu_context *smu,
                          enum pp_mp1_state mp1_state);
 
+/*
+ * Helper function to make sysfs_emit_at() happy. Align buf to
+ * the current page boundary and record the offset.
+ */
+static inline void smu_cmn_get_sysfs_buf(char **buf, int *offset)
+{
+       if (!*buf || !offset)
+               return;
+
+       *offset = offset_in_page(*buf);
+       *buf -= *offset;
+}
+
+bool smu_cmn_is_audio_func_enabled(struct amdgpu_device *adev);
+
 #endif
 #endif
index 76d38561c91031c06bd4a39fd13782695813b417..cf741c5c82d2533747c28590e6591b5bc8d7bcf9 100644 (file)
@@ -397,8 +397,7 @@ void etnaviv_buffer_queue(struct etnaviv_gpu *gpu, u32 exec_state,
                if (switch_mmu_context) {
                        struct etnaviv_iommu_context *old_context = gpu->mmu_context;
 
-                       etnaviv_iommu_context_get(mmu_context);
-                       gpu->mmu_context = mmu_context;
+                       gpu->mmu_context = etnaviv_iommu_context_get(mmu_context);
                        etnaviv_iommu_context_put(old_context);
                }
 
index 8f1b5af47dd633bd3c6f6a9abcaf81daf765e587..f0b2540e60e46ee587b1039d51dd44ce5934d0db 100644 (file)
@@ -294,8 +294,7 @@ struct etnaviv_vram_mapping *etnaviv_gem_mapping_get(
                list_del(&mapping->obj_node);
        }
 
-       etnaviv_iommu_context_get(mmu_context);
-       mapping->context = mmu_context;
+       mapping->context = etnaviv_iommu_context_get(mmu_context);
        mapping->use = 1;
 
        ret = etnaviv_iommu_map_gem(mmu_context, etnaviv_obj,
index 4dd7d9d541c0903c1e66fbe074af9d846e396c6b..486259e154aff63f7cdfed03395348cfd822084e 100644 (file)
@@ -532,8 +532,7 @@ int etnaviv_ioctl_gem_submit(struct drm_device *dev, void *data,
                goto err_submit_objects;
 
        submit->ctx = file->driver_priv;
-       etnaviv_iommu_context_get(submit->ctx->mmu);
-       submit->mmu_context = submit->ctx->mmu;
+       submit->mmu_context = etnaviv_iommu_context_get(submit->ctx->mmu);
        submit->exec_state = args->exec_state;
        submit->flags = args->flags;
 
index c297fffe06eb3021d1a0ac22fad55573dc5b206c..cc5b07f86346320c9380c3c32519e992cdf6dc7c 100644 (file)
@@ -569,6 +569,12 @@ static int etnaviv_hw_reset(struct etnaviv_gpu *gpu)
        /* We rely on the GPU running, so program the clock */
        etnaviv_gpu_update_clock(gpu);
 
+       gpu->fe_running = false;
+       gpu->exec_state = -1;
+       if (gpu->mmu_context)
+               etnaviv_iommu_context_put(gpu->mmu_context);
+       gpu->mmu_context = NULL;
+
        return 0;
 }
 
@@ -637,19 +643,23 @@ void etnaviv_gpu_start_fe(struct etnaviv_gpu *gpu, u32 address, u16 prefetch)
                          VIVS_MMUv2_SEC_COMMAND_CONTROL_ENABLE |
                          VIVS_MMUv2_SEC_COMMAND_CONTROL_PREFETCH(prefetch));
        }
+
+       gpu->fe_running = true;
 }
 
-static void etnaviv_gpu_start_fe_idleloop(struct etnaviv_gpu *gpu)
+static void etnaviv_gpu_start_fe_idleloop(struct etnaviv_gpu *gpu,
+                                         struct etnaviv_iommu_context *context)
 {
-       u32 address = etnaviv_cmdbuf_get_va(&gpu->buffer,
-                               &gpu->mmu_context->cmdbuf_mapping);
        u16 prefetch;
+       u32 address;
 
        /* setup the MMU */
-       etnaviv_iommu_restore(gpu, gpu->mmu_context);
+       etnaviv_iommu_restore(gpu, context);
 
        /* Start command processor */
        prefetch = etnaviv_buffer_init(gpu);
+       address = etnaviv_cmdbuf_get_va(&gpu->buffer,
+                                       &gpu->mmu_context->cmdbuf_mapping);
 
        etnaviv_gpu_start_fe(gpu, address, prefetch);
 }
@@ -832,7 +842,6 @@ int etnaviv_gpu_init(struct etnaviv_gpu *gpu)
        /* Now program the hardware */
        mutex_lock(&gpu->lock);
        etnaviv_gpu_hw_init(gpu);
-       gpu->exec_state = -1;
        mutex_unlock(&gpu->lock);
 
        pm_runtime_mark_last_busy(gpu->dev);
@@ -1057,8 +1066,6 @@ void etnaviv_gpu_recover_hang(struct etnaviv_gpu *gpu)
        spin_unlock(&gpu->event_spinlock);
 
        etnaviv_gpu_hw_init(gpu);
-       gpu->exec_state = -1;
-       gpu->mmu_context = NULL;
 
        mutex_unlock(&gpu->lock);
        pm_runtime_mark_last_busy(gpu->dev);
@@ -1370,14 +1377,12 @@ struct dma_fence *etnaviv_gpu_submit(struct etnaviv_gem_submit *submit)
                goto out_unlock;
        }
 
-       if (!gpu->mmu_context) {
-               etnaviv_iommu_context_get(submit->mmu_context);
-               gpu->mmu_context = submit->mmu_context;
-               etnaviv_gpu_start_fe_idleloop(gpu);
-       } else {
-               etnaviv_iommu_context_get(gpu->mmu_context);
-               submit->prev_mmu_context = gpu->mmu_context;
-       }
+       if (!gpu->fe_running)
+               etnaviv_gpu_start_fe_idleloop(gpu, submit->mmu_context);
+
+       if (submit->prev_mmu_context)
+               etnaviv_iommu_context_put(submit->prev_mmu_context);
+       submit->prev_mmu_context = etnaviv_iommu_context_get(gpu->mmu_context);
 
        if (submit->nr_pmrs) {
                gpu->event[event[1]].sync_point = &sync_point_perfmon_sample_pre;
@@ -1579,7 +1584,7 @@ int etnaviv_gpu_wait_idle(struct etnaviv_gpu *gpu, unsigned int timeout_ms)
 
 static int etnaviv_gpu_hw_suspend(struct etnaviv_gpu *gpu)
 {
-       if (gpu->initialized && gpu->mmu_context) {
+       if (gpu->initialized && gpu->fe_running) {
                /* Replace the last WAIT with END */
                mutex_lock(&gpu->lock);
                etnaviv_buffer_end(gpu);
@@ -1592,8 +1597,7 @@ static int etnaviv_gpu_hw_suspend(struct etnaviv_gpu *gpu)
                 */
                etnaviv_gpu_wait_idle(gpu, 100);
 
-               etnaviv_iommu_context_put(gpu->mmu_context);
-               gpu->mmu_context = NULL;
+               gpu->fe_running = false;
        }
 
        gpu->exec_state = -1;
@@ -1741,6 +1745,9 @@ static void etnaviv_gpu_unbind(struct device *dev, struct device *master,
        etnaviv_gpu_hw_suspend(gpu);
 #endif
 
+       if (gpu->mmu_context)
+               etnaviv_iommu_context_put(gpu->mmu_context);
+
        if (gpu->initialized) {
                etnaviv_cmdbuf_free(&gpu->buffer);
                etnaviv_iommu_global_fini(gpu);
index 8ea48697d13218da8f2cfdc2e657932365f48391..1c75c8ed5bcea2c5bec1d923ed35fc9a78884c67 100644 (file)
@@ -101,6 +101,7 @@ struct etnaviv_gpu {
        struct workqueue_struct *wq;
        struct drm_gpu_scheduler sched;
        bool initialized;
+       bool fe_running;
 
        /* 'ring'-buffer: */
        struct etnaviv_cmdbuf buffer;
index 1a7c89a67bea3c4e2464c8017d7f4d69474d78b1..afe5dd6a9925bf2cbf0171afd579ccaaab153851 100644 (file)
@@ -92,6 +92,10 @@ static void etnaviv_iommuv1_restore(struct etnaviv_gpu *gpu,
        struct etnaviv_iommuv1_context *v1_context = to_v1_context(context);
        u32 pgtable;
 
+       if (gpu->mmu_context)
+               etnaviv_iommu_context_put(gpu->mmu_context);
+       gpu->mmu_context = etnaviv_iommu_context_get(context);
+
        /* set base addresses */
        gpu_write(gpu, VIVS_MC_MEMORY_BASE_ADDR_RA, context->global->memory_base);
        gpu_write(gpu, VIVS_MC_MEMORY_BASE_ADDR_FE, context->global->memory_base);
index f8bf488e9d717360aa21a9f7f3db45ed25dd5278..d664ae29ae2096996996bf2ed7c624a33396620a 100644 (file)
@@ -172,6 +172,10 @@ static void etnaviv_iommuv2_restore_nonsec(struct etnaviv_gpu *gpu,
        if (gpu_read(gpu, VIVS_MMUv2_CONTROL) & VIVS_MMUv2_CONTROL_ENABLE)
                return;
 
+       if (gpu->mmu_context)
+               etnaviv_iommu_context_put(gpu->mmu_context);
+       gpu->mmu_context = etnaviv_iommu_context_get(context);
+
        prefetch = etnaviv_buffer_config_mmuv2(gpu,
                                (u32)v2_context->mtlb_dma,
                                (u32)context->global->bad_page_dma);
@@ -192,6 +196,10 @@ static void etnaviv_iommuv2_restore_sec(struct etnaviv_gpu *gpu,
        if (gpu_read(gpu, VIVS_MMUv2_SEC_CONTROL) & VIVS_MMUv2_SEC_CONTROL_ENABLE)
                return;
 
+       if (gpu->mmu_context)
+               etnaviv_iommu_context_put(gpu->mmu_context);
+       gpu->mmu_context = etnaviv_iommu_context_get(context);
+
        gpu_write(gpu, VIVS_MMUv2_PTA_ADDRESS_LOW,
                  lower_32_bits(context->global->v2.pta_dma));
        gpu_write(gpu, VIVS_MMUv2_PTA_ADDRESS_HIGH,
index dab1b58006d836dea6b6c5ea88a5fb3a3d5b57d4..9fb1a2aadbcb0fa9c740cc33cb68123a0a111895 100644 (file)
@@ -199,6 +199,7 @@ static int etnaviv_iommu_find_iova(struct etnaviv_iommu_context *context,
                 */
                list_for_each_entry_safe(m, n, &list, scan_node) {
                        etnaviv_iommu_remove_mapping(context, m);
+                       etnaviv_iommu_context_put(m->context);
                        m->context = NULL;
                        list_del_init(&m->mmu_node);
                        list_del_init(&m->scan_node);
index d1d6902fd13beeaf1eda709cbc249a574115a5e6..e4a0b7d09c2eab6f39498d03123e575b60cd4198 100644 (file)
@@ -105,9 +105,11 @@ void etnaviv_iommu_dump(struct etnaviv_iommu_context *ctx, void *buf);
 struct etnaviv_iommu_context *
 etnaviv_iommu_context_init(struct etnaviv_iommu_global *global,
                           struct etnaviv_cmdbuf_suballoc *suballoc);
-static inline void etnaviv_iommu_context_get(struct etnaviv_iommu_context *ctx)
+static inline struct etnaviv_iommu_context *
+etnaviv_iommu_context_get(struct etnaviv_iommu_context *ctx)
 {
        kref_get(&ctx->refcount);
+       return ctx;
 }
 void etnaviv_iommu_context_put(struct etnaviv_iommu_context *ctx);
 void etnaviv_iommu_restore(struct etnaviv_gpu *gpu,
index 642a5b5a1b81c4cf803c06e74eddd9ff9453a13d..335ba9f43d8f7c1d721970a977024e597bc78bf0 100644 (file)
@@ -19,7 +19,6 @@ subdir-ccflags-y += $(call cc-disable-warning, missing-field-initializers)
 subdir-ccflags-y += $(call cc-disable-warning, unused-but-set-variable)
 # clang warnings
 subdir-ccflags-y += $(call cc-disable-warning, sign-compare)
-subdir-ccflags-y += $(call cc-disable-warning, sometimes-uninitialized)
 subdir-ccflags-y += $(call cc-disable-warning, initializer-overrides)
 subdir-ccflags-y += $(call cc-disable-warning, frame-address)
 subdir-ccflags-$(CONFIG_DRM_I915_WERROR) += -Werror
index 04175f359fd6857c4941095cf0e6489cd9bb74bc..abe3d61b624329c9a993aa3a763e5c39bce67588 100644 (file)
@@ -2445,11 +2445,14 @@ intel_edp_init_dpcd(struct intel_dp *intel_dp)
         */
        if (drm_dp_dpcd_read(&intel_dp->aux, DP_EDP_DPCD_REV,
                             intel_dp->edp_dpcd, sizeof(intel_dp->edp_dpcd)) ==
-                            sizeof(intel_dp->edp_dpcd))
+                            sizeof(intel_dp->edp_dpcd)) {
                drm_dbg_kms(&dev_priv->drm, "eDP DPCD: %*ph\n",
                            (int)sizeof(intel_dp->edp_dpcd),
                            intel_dp->edp_dpcd);
 
+               intel_dp->use_max_params = intel_dp->edp_dpcd[0] < DP_EDP_14;
+       }
+
        /*
         * This has to be called after intel_dp->edp_dpcd is filled, PSR checks
         * for SET_POWER_CAPABLE bit in intel_dp->edp_dpcd[1]
index 053a3c2f726776b5e50aaaf72c40b31630fe5674..508a514c5e37dc241d0b6e3cdd7aab44ab0eb617 100644 (file)
@@ -848,7 +848,7 @@ intel_dp_link_train_all_phys(struct intel_dp *intel_dp,
        }
 
        if (ret)
-               intel_dp_link_train_phy(intel_dp, crtc_state, DP_PHY_DPRX);
+               ret = intel_dp_link_train_phy(intel_dp, crtc_state, DP_PHY_DPRX);
 
        if (intel_dp->set_idle_link_train)
                intel_dp->set_idle_link_train(intel_dp, crtc_state);
index cff72679ad7c36f40f33900cd310b60efaff31a0..9ccf4b29b82e107dddec3019ac9243b88d105528 100644 (file)
@@ -986,6 +986,9 @@ void i915_gem_context_release(struct kref *ref)
        trace_i915_context_free(ctx);
        GEM_BUG_ON(!i915_gem_context_is_closed(ctx));
 
+       if (ctx->syncobj)
+               drm_syncobj_put(ctx->syncobj);
+
        mutex_destroy(&ctx->engines_mutex);
        mutex_destroy(&ctx->lut_mutex);
 
@@ -1205,9 +1208,6 @@ static void context_close(struct i915_gem_context *ctx)
        if (vm)
                i915_vm_close(vm);
 
-       if (ctx->syncobj)
-               drm_syncobj_put(ctx->syncobj);
-
        ctx->file_priv = ERR_PTR(-EBADF);
 
        /*
index ffae7df5e4d7d76e737e6c2a7ee8c41545637037..4a6bb64c3a35410717ccd42c2104c1ba77181ffe 100644 (file)
@@ -59,13 +59,13 @@ static int igt_dmabuf_import_self(void *arg)
                err = PTR_ERR(import);
                goto out_dmabuf;
        }
+       import_obj = to_intel_bo(import);
 
        if (import != &obj->base) {
                pr_err("i915_gem_prime_import created a new object!\n");
                err = -EINVAL;
                goto out_import;
        }
-       import_obj = to_intel_bo(import);
 
        i915_gem_object_lock(import_obj, NULL);
        err = __i915_gem_object_get_pages(import_obj);
@@ -128,6 +128,8 @@ static int igt_dmabuf_import_same_driver_lmem(void *arg)
                pr_err("i915_gem_prime_import failed with the wrong err=%ld\n",
                       PTR_ERR(import));
                err = PTR_ERR(import);
+       } else {
+               err = 0;
        }
 
        dma_buf_put(dmabuf);
@@ -176,6 +178,7 @@ static int igt_dmabuf_import_same_driver(struct drm_i915_private *i915,
                err = PTR_ERR(import);
                goto out_dmabuf;
        }
+       import_obj = to_intel_bo(import);
 
        if (import == &obj->base) {
                pr_err("i915_gem_prime_import reused gem object!\n");
@@ -183,8 +186,6 @@ static int igt_dmabuf_import_same_driver(struct drm_i915_private *i915,
                goto out_import;
        }
 
-       import_obj = to_intel_bo(import);
-
        i915_gem_object_lock(import_obj, NULL);
        err = __i915_gem_object_get_pages(import_obj);
        if (err) {
index b20f5621f62b127a21cd5d5b8c3b311c02405738..a2c34e5a1c540c944f47ac5db53b7b6ba33cd198 100644 (file)
@@ -581,6 +581,20 @@ static enum i915_mmap_type default_mapping(struct drm_i915_private *i915)
        return I915_MMAP_TYPE_GTT;
 }
 
+static struct drm_i915_gem_object *
+create_sys_or_internal(struct drm_i915_private *i915,
+                      unsigned long size)
+{
+       if (HAS_LMEM(i915)) {
+               struct intel_memory_region *sys_region =
+                       i915->mm.regions[INTEL_REGION_SMEM];
+
+               return __i915_gem_object_create_user(i915, size, &sys_region, 1);
+       }
+
+       return i915_gem_object_create_internal(i915, size);
+}
+
 static bool assert_mmap_offset(struct drm_i915_private *i915,
                               unsigned long size,
                               int expected)
@@ -589,7 +603,7 @@ static bool assert_mmap_offset(struct drm_i915_private *i915,
        u64 offset;
        int ret;
 
-       obj = i915_gem_object_create_internal(i915, size);
+       obj = create_sys_or_internal(i915, size);
        if (IS_ERR(obj))
                return expected && expected == PTR_ERR(obj);
 
@@ -633,6 +647,7 @@ static int igt_mmap_offset_exhaustion(void *arg)
        struct drm_mm_node *hole, *next;
        int loop, err = 0;
        u64 offset;
+       int enospc = HAS_LMEM(i915) ? -ENXIO : -ENOSPC;
 
        /* Disable background reaper */
        disable_retire_worker(i915);
@@ -683,14 +698,14 @@ static int igt_mmap_offset_exhaustion(void *arg)
        }
 
        /* Too large */
-       if (!assert_mmap_offset(i915, 2 * PAGE_SIZE, -ENOSPC)) {
+       if (!assert_mmap_offset(i915, 2 * PAGE_SIZE, enospc)) {
                pr_err("Unexpectedly succeeded in inserting too large object into single page hole\n");
                err = -EINVAL;
                goto out;
        }
 
        /* Fill the hole, further allocation attempts should then fail */
-       obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
+       obj = create_sys_or_internal(i915, PAGE_SIZE);
        if (IS_ERR(obj)) {
                err = PTR_ERR(obj);
                pr_err("Unable to create object for reclaimed hole\n");
@@ -703,7 +718,7 @@ static int igt_mmap_offset_exhaustion(void *arg)
                goto err_obj;
        }
 
-       if (!assert_mmap_offset(i915, PAGE_SIZE, -ENOSPC)) {
+       if (!assert_mmap_offset(i915, PAGE_SIZE, enospc)) {
                pr_err("Unexpectedly succeeded in inserting object into no holes!\n");
                err = -EINVAL;
                goto err_obj;
@@ -839,10 +854,9 @@ static int wc_check(struct drm_i915_gem_object *obj)
 
 static bool can_mmap(struct drm_i915_gem_object *obj, enum i915_mmap_type type)
 {
-       struct drm_i915_private *i915 = to_i915(obj->base.dev);
        bool no_map;
 
-       if (HAS_LMEM(i915))
+       if (obj->ops->mmap_offset)
                return type == I915_MMAP_TYPE_FIXED;
        else if (type == I915_MMAP_TYPE_FIXED)
                return false;
index d812b27835f8ce8f9388f7e0b018025684215106..591a5224287ea83475df1184855e8a34bc6478f3 100644 (file)
@@ -1973,8 +1973,14 @@ u32 intel_rps_read_actual_frequency(struct intel_rps *rps)
 u32 intel_rps_read_punit_req(struct intel_rps *rps)
 {
        struct intel_uncore *uncore = rps_to_uncore(rps);
+       struct intel_runtime_pm *rpm = rps_to_uncore(rps)->rpm;
+       intel_wakeref_t wakeref;
+       u32 freq = 0;
 
-       return intel_uncore_read(uncore, GEN6_RPNSWREQ);
+       with_intel_runtime_pm_if_in_use(rpm, wakeref)
+               freq = intel_uncore_read(uncore, GEN6_RPNSWREQ);
+
+       return freq;
 }
 
 static u32 intel_rps_get_req(u32 pureq)
index b104fb7607eb25b8b2587d73fdcc743d45b11a95..86c318516e14f1fc5a92126597d147a92637cbb1 100644 (file)
@@ -172,11 +172,6 @@ void intel_uc_driver_remove(struct intel_uc *uc)
        __uc_free_load_err_log(uc);
 }
 
-static inline bool guc_communication_enabled(struct intel_guc *guc)
-{
-       return intel_guc_ct_enabled(&guc->ct);
-}
-
 /*
  * Events triggered while CT buffers are disabled are logged in the SCRATCH_15
  * register using the same bits used in the CT message payload. Since our
@@ -210,7 +205,7 @@ static void guc_get_mmio_msg(struct intel_guc *guc)
 static void guc_handle_mmio_msg(struct intel_guc *guc)
 {
        /* we need communication to be enabled to reply to GuC */
-       GEM_BUG_ON(!guc_communication_enabled(guc));
+       GEM_BUG_ON(!intel_guc_ct_enabled(&guc->ct));
 
        spin_lock_irq(&guc->irq_lock);
        if (guc->mmio_msg) {
@@ -226,7 +221,7 @@ static int guc_enable_communication(struct intel_guc *guc)
        struct drm_i915_private *i915 = gt->i915;
        int ret;
 
-       GEM_BUG_ON(guc_communication_enabled(guc));
+       GEM_BUG_ON(intel_guc_ct_enabled(&guc->ct));
 
        ret = i915_inject_probe_error(i915, -ENXIO);
        if (ret)
@@ -662,7 +657,7 @@ static int __uc_resume(struct intel_uc *uc, bool enable_communication)
                return 0;
 
        /* Make sure we enable communication if and only if it's disabled */
-       GEM_BUG_ON(enable_communication == guc_communication_enabled(guc));
+       GEM_BUG_ON(enable_communication == intel_guc_ct_enabled(&guc->ct));
 
        if (enable_communication)
                guc_enable_communication(guc);
index 0473583dcdac241ab6272005bd41aa23e88ff93d..482fb0ae6cb5db1f6a6723798171924bdf7c15bf 100644 (file)
@@ -119,7 +119,7 @@ int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags)
 #endif
 
        if (pci_find_capability(pdev, PCI_CAP_ID_AGP))
-               rdev->agp = radeon_agp_head_init(rdev->ddev);
+               rdev->agp = radeon_agp_head_init(dev);
        if (rdev->agp) {
                rdev->agp->agp_mtrr = arch_phys_wc_add(
                        rdev->agp->agp_info.aper_base,
index 5dce875dbd119c2640444978a160f8db9518fd1f..16497c31d9f914d2d7bb18f458489d3c3be4f126 100644 (file)
@@ -1121,7 +1121,7 @@ static int cdn_dp_suspend(struct device *dev)
        return ret;
 }
 
-static int cdn_dp_resume(struct device *dev)
+static __maybe_unused int cdn_dp_resume(struct device *dev)
 {
        struct cdn_dp_device *dp = dev_get_drvdata(dev);
 
index 4a11150431140a10a15cb9cb93a43c69423a2548..b4b4653fe301ed6866d61edb7b2ac4f057e8d153 100644 (file)
@@ -167,8 +167,6 @@ vc4_hdmi_connector_detect(struct drm_connector *connector, bool force)
        struct vc4_hdmi *vc4_hdmi = connector_to_vc4_hdmi(connector);
        bool connected = false;
 
-       WARN_ON(pm_runtime_resume_and_get(&vc4_hdmi->pdev->dev));
-
        if (vc4_hdmi->hpd_gpio &&
            gpiod_get_value_cansleep(vc4_hdmi->hpd_gpio)) {
                connected = true;
@@ -189,12 +187,10 @@ vc4_hdmi_connector_detect(struct drm_connector *connector, bool force)
                        }
                }
 
-               pm_runtime_put(&vc4_hdmi->pdev->dev);
                return connector_status_connected;
        }
 
        cec_phys_addr_invalidate(vc4_hdmi->cec_adap);
-       pm_runtime_put(&vc4_hdmi->pdev->dev);
        return connector_status_disconnected;
 }
 
@@ -436,7 +432,7 @@ static void vc4_hdmi_set_avi_infoframe(struct drm_encoder *encoder)
        struct vc4_hdmi_encoder *vc4_encoder = to_vc4_hdmi_encoder(encoder);
        struct drm_connector *connector = &vc4_hdmi->connector;
        struct drm_connector_state *cstate = connector->state;
-       struct drm_crtc *crtc = cstate->crtc;
+       struct drm_crtc *crtc = encoder->crtc;
        const struct drm_display_mode *mode = &crtc->state->adjusted_mode;
        union hdmi_infoframe frame;
        int ret;
@@ -541,11 +537,8 @@ static bool vc4_hdmi_supports_scrambling(struct drm_encoder *encoder,
 
 static void vc4_hdmi_enable_scrambling(struct drm_encoder *encoder)
 {
+       struct drm_display_mode *mode = &encoder->crtc->state->adjusted_mode;
        struct vc4_hdmi *vc4_hdmi = encoder_to_vc4_hdmi(encoder);
-       struct drm_connector *connector = &vc4_hdmi->connector;
-       struct drm_connector_state *cstate = connector->state;
-       struct drm_crtc *crtc = cstate->crtc;
-       struct drm_display_mode *mode = &crtc->state->adjusted_mode;
 
        if (!vc4_hdmi_supports_scrambling(encoder, mode))
                return;
@@ -566,18 +559,17 @@ static void vc4_hdmi_enable_scrambling(struct drm_encoder *encoder)
 static void vc4_hdmi_disable_scrambling(struct drm_encoder *encoder)
 {
        struct vc4_hdmi *vc4_hdmi = encoder_to_vc4_hdmi(encoder);
-       struct drm_connector *connector = &vc4_hdmi->connector;
-       struct drm_connector_state *cstate = connector->state;
+       struct drm_crtc *crtc = encoder->crtc;
 
        /*
-        * At boot, connector->state will be NULL. Since we don't know the
+        * At boot, encoder->crtc will be NULL. Since we don't know the
         * state of the scrambler and in order to avoid any
         * inconsistency, let's disable it all the time.
         */
-       if (cstate && !vc4_hdmi_supports_scrambling(encoder, &cstate->crtc->mode))
+       if (crtc && !vc4_hdmi_supports_scrambling(encoder, &crtc->mode))
                return;
 
-       if (cstate && !vc4_hdmi_mode_needs_scrambling(&cstate->crtc->mode))
+       if (crtc && !vc4_hdmi_mode_needs_scrambling(&crtc->mode))
                return;
 
        if (delayed_work_pending(&vc4_hdmi->scrambling_work))
@@ -635,6 +627,7 @@ static void vc4_hdmi_encoder_post_crtc_powerdown(struct drm_encoder *encoder,
                vc4_hdmi->variant->phy_disable(vc4_hdmi);
 
        clk_disable_unprepare(vc4_hdmi->pixel_bvb_clock);
+       clk_disable_unprepare(vc4_hdmi->hsm_clock);
        clk_disable_unprepare(vc4_hdmi->pixel_clock);
 
        ret = pm_runtime_put(&vc4_hdmi->pdev->dev);
@@ -898,9 +891,7 @@ static void vc4_hdmi_encoder_pre_crtc_configure(struct drm_encoder *encoder,
                vc4_hdmi_encoder_get_connector_state(encoder, state);
        struct vc4_hdmi_connector_state *vc4_conn_state =
                conn_state_to_vc4_hdmi_conn_state(conn_state);
-       struct drm_crtc_state *crtc_state =
-               drm_atomic_get_new_crtc_state(state, conn_state->crtc);
-       struct drm_display_mode *mode = &crtc_state->adjusted_mode;
+       struct drm_display_mode *mode = &encoder->crtc->state->adjusted_mode;
        struct vc4_hdmi *vc4_hdmi = encoder_to_vc4_hdmi(encoder);
        unsigned long bvb_rate, pixel_rate, hsm_rate;
        int ret;
@@ -947,6 +938,13 @@ static void vc4_hdmi_encoder_pre_crtc_configure(struct drm_encoder *encoder,
                return;
        }
 
+       ret = clk_prepare_enable(vc4_hdmi->hsm_clock);
+       if (ret) {
+               DRM_ERROR("Failed to turn on HSM clock: %d\n", ret);
+               clk_disable_unprepare(vc4_hdmi->pixel_clock);
+               return;
+       }
+
        vc4_hdmi_cec_update_clk_div(vc4_hdmi);
 
        if (pixel_rate > 297000000)
@@ -959,6 +957,7 @@ static void vc4_hdmi_encoder_pre_crtc_configure(struct drm_encoder *encoder,
        ret = clk_set_min_rate(vc4_hdmi->pixel_bvb_clock, bvb_rate);
        if (ret) {
                DRM_ERROR("Failed to set pixel bvb clock rate: %d\n", ret);
+               clk_disable_unprepare(vc4_hdmi->hsm_clock);
                clk_disable_unprepare(vc4_hdmi->pixel_clock);
                return;
        }
@@ -966,6 +965,7 @@ static void vc4_hdmi_encoder_pre_crtc_configure(struct drm_encoder *encoder,
        ret = clk_prepare_enable(vc4_hdmi->pixel_bvb_clock);
        if (ret) {
                DRM_ERROR("Failed to turn on pixel bvb clock: %d\n", ret);
+               clk_disable_unprepare(vc4_hdmi->hsm_clock);
                clk_disable_unprepare(vc4_hdmi->pixel_clock);
                return;
        }
@@ -985,11 +985,7 @@ static void vc4_hdmi_encoder_pre_crtc_configure(struct drm_encoder *encoder,
 static void vc4_hdmi_encoder_pre_crtc_enable(struct drm_encoder *encoder,
                                             struct drm_atomic_state *state)
 {
-       struct drm_connector_state *conn_state =
-               vc4_hdmi_encoder_get_connector_state(encoder, state);
-       struct drm_crtc_state *crtc_state =
-               drm_atomic_get_new_crtc_state(state, conn_state->crtc);
-       struct drm_display_mode *mode = &crtc_state->adjusted_mode;
+       struct drm_display_mode *mode = &encoder->crtc->state->adjusted_mode;
        struct vc4_hdmi_encoder *vc4_encoder = to_vc4_hdmi_encoder(encoder);
        struct vc4_hdmi *vc4_hdmi = encoder_to_vc4_hdmi(encoder);
 
@@ -1012,11 +1008,7 @@ static void vc4_hdmi_encoder_pre_crtc_enable(struct drm_encoder *encoder,
 static void vc4_hdmi_encoder_post_crtc_enable(struct drm_encoder *encoder,
                                              struct drm_atomic_state *state)
 {
-       struct drm_connector_state *conn_state =
-               vc4_hdmi_encoder_get_connector_state(encoder, state);
-       struct drm_crtc_state *crtc_state =
-               drm_atomic_get_new_crtc_state(state, conn_state->crtc);
-       struct drm_display_mode *mode = &crtc_state->adjusted_mode;
+       struct drm_display_mode *mode = &encoder->crtc->state->adjusted_mode;
        struct vc4_hdmi *vc4_hdmi = encoder_to_vc4_hdmi(encoder);
        struct vc4_hdmi_encoder *vc4_encoder = to_vc4_hdmi_encoder(encoder);
        bool hsync_pos = mode->flags & DRM_MODE_FLAG_PHSYNC;
@@ -1204,8 +1196,8 @@ static void vc4_hdmi_audio_set_mai_clock(struct vc4_hdmi *vc4_hdmi,
 
 static void vc4_hdmi_set_n_cts(struct vc4_hdmi *vc4_hdmi, unsigned int samplerate)
 {
-       struct drm_connector *connector = &vc4_hdmi->connector;
-       struct drm_crtc *crtc = connector->state->crtc;
+       struct drm_encoder *encoder = &vc4_hdmi->encoder.base.base;
+       struct drm_crtc *crtc = encoder->crtc;
        const struct drm_display_mode *mode = &crtc->state->adjusted_mode;
        u32 n, cts;
        u64 tmp;
@@ -1238,13 +1230,13 @@ static inline struct vc4_hdmi *dai_to_hdmi(struct snd_soc_dai *dai)
 static int vc4_hdmi_audio_startup(struct device *dev, void *data)
 {
        struct vc4_hdmi *vc4_hdmi = dev_get_drvdata(dev);
-       struct drm_connector *connector = &vc4_hdmi->connector;
+       struct drm_encoder *encoder = &vc4_hdmi->encoder.base.base;
 
        /*
         * If the HDMI encoder hasn't probed, or the encoder is
         * currently in DVI mode, treat the codec dai as missing.
         */
-       if (!connector->state || !(HDMI_READ(HDMI_RAM_PACKET_CONFIG) &
+       if (!encoder->crtc || !(HDMI_READ(HDMI_RAM_PACKET_CONFIG) &
                                VC4_HDMI_RAM_PACKET_ENABLE))
                return -ENODEV;
 
@@ -2114,29 +2106,6 @@ static int vc5_hdmi_init_resources(struct vc4_hdmi *vc4_hdmi)
        return 0;
 }
 
-#ifdef CONFIG_PM
-static int vc4_hdmi_runtime_suspend(struct device *dev)
-{
-       struct vc4_hdmi *vc4_hdmi = dev_get_drvdata(dev);
-
-       clk_disable_unprepare(vc4_hdmi->hsm_clock);
-
-       return 0;
-}
-
-static int vc4_hdmi_runtime_resume(struct device *dev)
-{
-       struct vc4_hdmi *vc4_hdmi = dev_get_drvdata(dev);
-       int ret;
-
-       ret = clk_prepare_enable(vc4_hdmi->hsm_clock);
-       if (ret)
-               return ret;
-
-       return 0;
-}
-#endif
-
 static int vc4_hdmi_bind(struct device *dev, struct device *master, void *data)
 {
        const struct vc4_hdmi_variant *variant = of_device_get_match_data(dev);
@@ -2391,18 +2360,11 @@ static const struct of_device_id vc4_hdmi_dt_match[] = {
        {}
 };
 
-static const struct dev_pm_ops vc4_hdmi_pm_ops = {
-       SET_RUNTIME_PM_OPS(vc4_hdmi_runtime_suspend,
-                          vc4_hdmi_runtime_resume,
-                          NULL)
-};
-
 struct platform_driver vc4_hdmi_driver = {
        .probe = vc4_hdmi_dev_probe,
        .remove = vc4_hdmi_dev_remove,
        .driver = {
                .name = "vc4_hdmi",
                .of_match_table = vc4_hdmi_dt_match,
-               .pm = &vc4_hdmi_pm_ops,
        },
 };
index 2aee356840a2b480cb88ef569ef12b2d9da44a59..314015d9e912d2e8693e54e5de4dcd650aec1dcc 100644 (file)
@@ -245,6 +245,7 @@ void hv_ringbuffer_cleanup(struct hv_ring_buffer_info *ring_info)
        mutex_unlock(&ring_info->ring_buffer_mutex);
 
        kfree(ring_info->pkt_buffer);
+       ring_info->pkt_buffer = NULL;
        ring_info->pkt_buffer_size = 0;
 }
 
index 94fb63a7b357372a890c95da371964739610f03d..fe63d5ee201b866c0e0681b9605e60ba4aaf2720 100644 (file)
@@ -570,7 +570,7 @@ fail_msg_node:
 fail_db_node:
        of_node_put(smu->db_node);
 fail_bootmem:
-       memblock_free(__pa(smu), sizeof(struct smu_device));
+       memblock_free_ptr(smu, sizeof(struct smu_device));
        smu = NULL;
 fail_np:
        of_node_put(np);
index 64d6dfa83122035810ddcd967c943bf8c40a224f..267324889dd647e6fcf571b9aa3195c79d32613d 100644 (file)
@@ -1885,6 +1885,12 @@ static int gswip_gphy_fw_load(struct gswip_priv *priv, struct gswip_gphy_fw *gph
 
        reset_control_assert(gphy_fw->reset);
 
+       /* The vendor BSP uses a 200ms delay after asserting the reset line.
+        * Without this some users are observing that the PHY is not coming up
+        * on the MDIO bus.
+        */
+       msleep(200);
+
        ret = request_firmware(&fw, gphy_fw->fw_name, dev);
        if (ret) {
                dev_err(dev, "failed to load firmware: %s, error: %i\n",
index 1f63f50f73f1733558b7867752d13b6d13a2b7c8..bda5a9bf4f529e62dd83fed11ff15aeef49a2d09 100644 (file)
@@ -643,10 +643,8 @@ qca8k_mdio_busy_wait(struct mii_bus *bus, u32 reg, u32 mask)
 }
 
 static int
-qca8k_mdio_write(struct mii_bus *salve_bus, int phy, int regnum, u16 data)
+qca8k_mdio_write(struct mii_bus *bus, int phy, int regnum, u16 data)
 {
-       struct qca8k_priv *priv = salve_bus->priv;
-       struct mii_bus *bus = priv->bus;
        u16 r1, r2, page;
        u32 val;
        int ret;
@@ -682,10 +680,8 @@ exit:
 }
 
 static int
-qca8k_mdio_read(struct mii_bus *salve_bus, int phy, int regnum)
+qca8k_mdio_read(struct mii_bus *bus, int phy, int regnum)
 {
-       struct qca8k_priv *priv = salve_bus->priv;
-       struct mii_bus *bus = priv->bus;
        u16 r1, r2, page;
        u32 val;
        int ret;
@@ -726,6 +722,24 @@ exit:
        return ret;
 }
 
+static int
+qca8k_internal_mdio_write(struct mii_bus *slave_bus, int phy, int regnum, u16 data)
+{
+       struct qca8k_priv *priv = slave_bus->priv;
+       struct mii_bus *bus = priv->bus;
+
+       return qca8k_mdio_write(bus, phy, regnum, data);
+}
+
+static int
+qca8k_internal_mdio_read(struct mii_bus *slave_bus, int phy, int regnum)
+{
+       struct qca8k_priv *priv = slave_bus->priv;
+       struct mii_bus *bus = priv->bus;
+
+       return qca8k_mdio_read(bus, phy, regnum);
+}
+
 static int
 qca8k_phy_write(struct dsa_switch *ds, int port, int regnum, u16 data)
 {
@@ -775,8 +789,8 @@ qca8k_mdio_register(struct qca8k_priv *priv, struct device_node *mdio)
 
        bus->priv = (void *)priv;
        bus->name = "qca8k slave mii";
-       bus->read = qca8k_mdio_read;
-       bus->write = qca8k_mdio_write;
+       bus->read = qca8k_internal_mdio_read;
+       bus->write = qca8k_internal_mdio_write;
        snprintf(bus->id, MII_BUS_ID_SIZE, "qca8k-%d",
                 ds->index);
 
index 8d90fed5d33eb6fa08d5d2024e4d7347e580e698..6f0ea2facea9e616976a886352d1ee746653323d 100644 (file)
@@ -1050,7 +1050,7 @@ static netdev_tx_t corkscrew_start_xmit(struct sk_buff *skb,
 #ifdef VORTEX_BUS_MASTER
        if (vp->bus_master) {
                /* Set the bus-master controller to transfer the packet. */
-               outl((int) (skb->data), ioaddr + Wn7_MasterAddr);
+               outl(isa_virt_to_bus(skb->data), ioaddr + Wn7_MasterAddr);
                outw((skb->len + 3) & ~3, ioaddr + Wn7_MasterLen);
                vp->tx_skb = skb;
                outw(StartDMADown, ioaddr + EL3_CMD);
index 53660bc8d6ff60b54754561518f2876933d38c2d..9afc712f594847ed93d6904a563b61cb23097223 100644 (file)
@@ -922,13 +922,16 @@ static void __init ne_add_devices(void)
        }
 }
 
-#ifdef MODULE
 static int __init ne_init(void)
 {
        int retval;
-       ne_add_devices();
+
+       if (IS_MODULE(CONFIG_NE2000))
+               ne_add_devices();
+
        retval = platform_driver_probe(&ne_driver, ne_drv_probe);
-       if (retval) {
+
+       if (IS_MODULE(CONFIG_NE2000) && retval) {
                if (io[0] == 0)
                        pr_notice("ne.c: You must supply \"io=0xNNN\""
                               " value(s) for ISA cards.\n");
@@ -941,18 +944,8 @@ static int __init ne_init(void)
        return retval;
 }
 module_init(ne_init);
-#else /* MODULE */
-static int __init ne_init(void)
-{
-       int retval = platform_driver_probe(&ne_driver, ne_drv_probe);
-
-       /* Unregister unused platform_devices. */
-       ne_loop_rm_unreg(0);
-       return retval;
-}
-module_init(ne_init);
 
-#ifdef CONFIG_NETDEV_LEGACY_INIT
+#if !defined(MODULE) && defined(CONFIG_NETDEV_LEGACY_INIT)
 struct net_device * __init ne_probe(int unit)
 {
        int this_dev;
@@ -994,7 +987,6 @@ struct net_device * __init ne_probe(int unit)
        return ERR_PTR(-ENODEV);
 }
 #endif
-#endif /* MODULE */
 
 static void __exit ne_exit(void)
 {
index b5df7ad5a83f18372a541e65548bb0f910bd8b26..032e8922b4829b8e676c0bc35f6857ae552e14c3 100644 (file)
@@ -748,7 +748,7 @@ static void ni65_stop_start(struct net_device *dev,struct priv *p)
 #ifdef XMT_VIA_SKB
                        skb_save[i] = p->tmd_skb[i];
 #endif
-                       buffer[i] = (u32) isa_bus_to_virt(tmdp->u.buffer);
+                       buffer[i] = (unsigned long)isa_bus_to_virt(tmdp->u.buffer);
                        blen[i] = tmdp->blen;
                        tmdp->u.s.status = 0x0;
                }
index f255fd0b16dbd91608f05df09f9d3ef4865c362c..6fbf735fca31c2ad4e6e0af222098e7f808651cb 100644 (file)
@@ -1224,7 +1224,7 @@ int bnx2x_iov_init_one(struct bnx2x *bp, int int_mode_param,
 
        /* SR-IOV capability was enabled but there are no VFs*/
        if (iov->total == 0) {
-               err = -EINVAL;
+               err = 0;
                goto failed;
        }
 
index ea0c45d33814d639ca455cf772ec78cc77bc5fcb..037767b370d54fbfce7b0c322a6b8badde56aebe 100644 (file)
@@ -2213,12 +2213,11 @@ static int bnxt_async_event_process(struct bnxt *bp,
                        DIV_ROUND_UP(fw_health->polling_dsecs * HZ,
                                     bp->current_interval * 10);
                fw_health->tmr_counter = fw_health->tmr_multiplier;
-               if (!fw_health->enabled) {
+               if (!fw_health->enabled)
                        fw_health->last_fw_heartbeat =
                                bnxt_fw_health_readl(bp, BNXT_FW_HEARTBEAT_REG);
-                       fw_health->last_fw_reset_cnt =
-                               bnxt_fw_health_readl(bp, BNXT_FW_RESET_CNT_REG);
-               }
+               fw_health->last_fw_reset_cnt =
+                       bnxt_fw_health_readl(bp, BNXT_FW_RESET_CNT_REG);
                netif_info(bp, drv, bp->dev,
                           "Error recovery info: error recovery[1], master[%d], reset count[%u], health status: 0x%x\n",
                           fw_health->master, fw_health->last_fw_reset_cnt,
@@ -2730,6 +2729,9 @@ static void bnxt_free_tx_skbs(struct bnxt *bp)
                struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
                int j;
 
+               if (!txr->tx_buf_ring)
+                       continue;
+
                for (j = 0; j < max_idx;) {
                        struct bnxt_sw_tx_bd *tx_buf = &txr->tx_buf_ring[j];
                        struct sk_buff *skb;
@@ -2814,6 +2816,9 @@ static void bnxt_free_one_rx_ring_skbs(struct bnxt *bp, int ring_nr)
        }
 
 skip_rx_tpa_free:
+       if (!rxr->rx_buf_ring)
+               goto skip_rx_buf_free;
+
        for (i = 0; i < max_idx; i++) {
                struct bnxt_sw_rx_bd *rx_buf = &rxr->rx_buf_ring[i];
                dma_addr_t mapping = rx_buf->mapping;
@@ -2836,6 +2841,11 @@ skip_rx_tpa_free:
                        kfree(data);
                }
        }
+
+skip_rx_buf_free:
+       if (!rxr->rx_agg_ring)
+               goto skip_rx_agg_free;
+
        for (i = 0; i < max_agg_idx; i++) {
                struct bnxt_sw_rx_agg_bd *rx_agg_buf = &rxr->rx_agg_ring[i];
                struct page *page = rx_agg_buf->page;
@@ -2852,6 +2862,8 @@ skip_rx_tpa_free:
 
                __free_page(page);
        }
+
+skip_rx_agg_free:
        if (rxr->rx_page) {
                __free_page(rxr->rx_page);
                rxr->rx_page = NULL;
@@ -2900,6 +2912,9 @@ static void bnxt_free_ring(struct bnxt *bp, struct bnxt_ring_mem_info *rmem)
        struct pci_dev *pdev = bp->pdev;
        int i;
 
+       if (!rmem->pg_arr)
+               goto skip_pages;
+
        for (i = 0; i < rmem->nr_pages; i++) {
                if (!rmem->pg_arr[i])
                        continue;
@@ -2909,6 +2924,7 @@ static void bnxt_free_ring(struct bnxt *bp, struct bnxt_ring_mem_info *rmem)
 
                rmem->pg_arr[i] = NULL;
        }
+skip_pages:
        if (rmem->pg_tbl) {
                size_t pg_tbl_size = rmem->nr_pages * 8;
 
@@ -3228,10 +3244,14 @@ static int bnxt_alloc_tx_rings(struct bnxt *bp)
 
 static void bnxt_free_cp_arrays(struct bnxt_cp_ring_info *cpr)
 {
+       struct bnxt_ring_struct *ring = &cpr->cp_ring_struct;
+
        kfree(cpr->cp_desc_ring);
        cpr->cp_desc_ring = NULL;
+       ring->ring_mem.pg_arr = NULL;
        kfree(cpr->cp_desc_mapping);
        cpr->cp_desc_mapping = NULL;
+       ring->ring_mem.dma_arr = NULL;
 }
 
 static int bnxt_alloc_cp_arrays(struct bnxt_cp_ring_info *cpr, int n)
@@ -12207,6 +12227,11 @@ static void bnxt_fw_reset_task(struct work_struct *work)
                        return;
                }
 
+               if ((bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY) &&
+                   bp->fw_health->enabled) {
+                       bp->fw_health->last_fw_reset_cnt =
+                               bnxt_fw_health_readl(bp, BNXT_FW_RESET_CNT_REG);
+               }
                bp->fw_reset_state = 0;
                /* Make sure fw_reset_state is 0 before clearing the flag */
                smp_mb__before_atomic();
index 46fae1acbeed69dd2182b6cd99b87fb9e35d62d3..e6a4a768b10b29a791db83567e9b34385b53e7dc 100644 (file)
@@ -1884,9 +1884,6 @@ bnxt_tc_indr_block_cb_lookup(struct bnxt *bp, struct net_device *netdev)
 {
        struct bnxt_flower_indr_block_cb_priv *cb_priv;
 
-       /* All callback list access should be protected by RTNL. */
-       ASSERT_RTNL();
-
        list_for_each_entry(cb_priv, &bp->tc_indr_block_list, list)
                if (cb_priv->tunnel_netdev == netdev)
                        return cb_priv;
index 8b7b59908a1ab453a9afc4940d3a76779dc750be..f66d22de5168d2be6583f2b78d43a2003ab874c7 100644 (file)
@@ -111,9 +111,9 @@ static void macb_remove(struct pci_dev *pdev)
        struct platform_device *plat_dev = pci_get_drvdata(pdev);
        struct macb_platform_data *plat_data = dev_get_platdata(&plat_dev->dev);
 
-       platform_device_unregister(plat_dev);
        clk_unregister(plat_data->pclk);
        clk_unregister(plat_data->hclk);
+       platform_device_unregister(plat_dev);
 }
 
 static const struct pci_device_id dev_id_table[] = {
index 22af3d6ce1783262070e1a51cea59acde51f406b..adc54a72666121ce5b38c2c1ab4e2ba94eaf506d 100644 (file)
@@ -61,6 +61,9 @@ static unsigned int tx_sgl = 1;
 module_param(tx_sgl, uint, 0600);
 MODULE_PARM_DESC(tx_sgl, "Minimum number of frags when using dma_map_sg() to optimize the IOMMU mapping");
 
+static bool page_pool_enabled = true;
+module_param(page_pool_enabled, bool, 0400);
+
 #define HNS3_SGL_SIZE(nfrag)   (sizeof(struct scatterlist) * (nfrag) + \
                                 sizeof(struct sg_table))
 #define HNS3_MAX_SGL_SIZE      ALIGN(HNS3_SGL_SIZE(HNS3_MAX_TSO_BD_NUM), \
@@ -73,6 +76,7 @@ MODULE_PARM_DESC(tx_sgl, "Minimum number of frags when using dma_map_sg() to opt
 #define HNS3_OUTER_VLAN_TAG    2
 
 #define HNS3_MIN_TX_LEN                33U
+#define HNS3_MIN_TUN_PKT_LEN   65U
 
 /* hns3_pci_tbl - PCI Device ID Table
  *
@@ -1424,8 +1428,11 @@ static int hns3_set_l2l3l4(struct sk_buff *skb, u8 ol4_proto,
                               l4.tcp->doff);
                break;
        case IPPROTO_UDP:
-               if (hns3_tunnel_csum_bug(skb))
-                       return skb_checksum_help(skb);
+               if (hns3_tunnel_csum_bug(skb)) {
+                       int ret = skb_put_padto(skb, HNS3_MIN_TUN_PKT_LEN);
+
+                       return ret ? ret : skb_checksum_help(skb);
+               }
 
                hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4CS_B, 1);
                hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4T_S,
@@ -4753,7 +4760,8 @@ static int hns3_alloc_ring_memory(struct hns3_enet_ring *ring)
                goto out_with_desc_cb;
 
        if (!HNAE3_IS_TX_RING(ring)) {
-               hns3_alloc_page_pool(ring);
+               if (page_pool_enabled)
+                       hns3_alloc_page_pool(ring);
 
                ret = hns3_alloc_ring_buffers(ring);
                if (ret)
index 68ed1715ac5285945b529e4ac85e6fc782da1341..87d96f82c3182c74bda6959dc958b42dd177bf35 100644 (file)
@@ -1724,6 +1724,10 @@ hclge_dbg_get_imp_stats_info(struct hclge_dev *hdev, char *buf, int len)
        }
 
        bd_num = le32_to_cpu(req->bd_num);
+       if (!bd_num) {
+               dev_err(&hdev->pdev->dev, "imp statistics bd number is 0!\n");
+               return -EINVAL;
+       }
 
        desc_src = kcalloc(bd_num, sizeof(struct hclge_desc), GFP_KERNEL);
        if (!desc_src)
index e55ba2e511b1a3eafb73437d53ae613d919f7be0..f1e46ba799f9df0aa3ea6a843abba2a4fcba69ee 100644 (file)
@@ -1528,9 +1528,10 @@ static void hclge_init_kdump_kernel_config(struct hclge_dev *hdev)
 static int hclge_configure(struct hclge_dev *hdev)
 {
        struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
+       const struct cpumask *cpumask = cpu_online_mask;
        struct hclge_cfg cfg;
        unsigned int i;
-       int ret;
+       int node, ret;
 
        ret = hclge_get_cfg(hdev, &cfg);
        if (ret)
@@ -1595,11 +1596,12 @@ static int hclge_configure(struct hclge_dev *hdev)
 
        hclge_init_kdump_kernel_config(hdev);
 
-       /* Set the init affinity based on pci func number */
-       i = cpumask_weight(cpumask_of_node(dev_to_node(&hdev->pdev->dev)));
-       i = i ? PCI_FUNC(hdev->pdev->devfn) % i : 0;
-       cpumask_set_cpu(cpumask_local_spread(i, dev_to_node(&hdev->pdev->dev)),
-                       &hdev->affinity_mask);
+       /* Set the affinity based on numa node */
+       node = dev_to_node(&hdev->pdev->dev);
+       if (node != NUMA_NO_NODE)
+               cpumask = cpumask_of_node(node);
+
+       cpumask_copy(&hdev->affinity_mask, cpumask);
 
        return ret;
 }
@@ -8125,11 +8127,12 @@ static void hclge_ae_stop(struct hnae3_handle *handle)
        hclge_clear_arfs_rules(hdev);
        spin_unlock_bh(&hdev->fd_rule_lock);
 
-       /* If it is not PF reset, the firmware will disable the MAC,
+       /* If it is not PF reset or FLR, the firmware will disable the MAC,
         * so it only need to stop phy here.
         */
        if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) &&
-           hdev->reset_type != HNAE3_FUNC_RESET) {
+           hdev->reset_type != HNAE3_FUNC_RESET &&
+           hdev->reset_type != HNAE3_FLR_RESET) {
                hclge_mac_stop_phy(hdev);
                hclge_update_link_status(hdev);
                return;
index 82e72702012031ef3d719624fd2b4a11f6effeed..a69e892277b311f8535ae2abb563119b7038864b 100644 (file)
@@ -2465,6 +2465,8 @@ static irqreturn_t hclgevf_misc_irq_handle(int irq, void *data)
 
        hclgevf_enable_vector(&hdev->misc_vector, false);
        event_cause = hclgevf_check_evt_cause(hdev, &clearval);
+       if (event_cause != HCLGEVF_VECTOR0_EVENT_OTHER)
+               hclgevf_clear_event_cause(hdev, clearval);
 
        switch (event_cause) {
        case HCLGEVF_VECTOR0_EVENT_RST:
@@ -2477,10 +2479,8 @@ static irqreturn_t hclgevf_misc_irq_handle(int irq, void *data)
                break;
        }
 
-       if (event_cause != HCLGEVF_VECTOR0_EVENT_OTHER) {
-               hclgevf_clear_event_cause(hdev, clearval);
+       if (event_cause != HCLGEVF_VECTOR0_EVENT_OTHER)
                hclgevf_enable_vector(&hdev->misc_vector, true);
-       }
 
        return IRQ_HANDLED;
 }
index b8a40146b89585e477bb63c2c8ae348e87d054fe..b482f6f633bd558aeb5b88dd5f93c4bb4f7130a6 100644 (file)
@@ -1144,7 +1144,7 @@ static struct net_device * __init i82596_probe(void)
                        err = -ENODEV;
                        goto out;
                }
-               memcpy(eth_addr, (void *) 0xfffc1f2c, ETH_ALEN);        /* YUCK! Get addr from NOVRAM */
+               memcpy(eth_addr, absolute_pointer(0xfffc1f2c), ETH_ALEN); /* YUCK! Get addr from NOVRAM */
                dev->base_addr = MVME_I596_BASE;
                dev->irq = (unsigned) MVME16x_IRQ_I596;
                goto found;
index a775c69e4fd7f089d56341fca8ad979bd98c3488..a4579b3401204fe0698baf65a79842922112b4ee 100644 (file)
@@ -4700,6 +4700,22 @@ static int handle_login_rsp(union ibmvnic_crq *login_rsp_crq,
                return 0;
        }
 
+       if (adapter->failover_pending) {
+               adapter->init_done_rc = -EAGAIN;
+               netdev_dbg(netdev, "Failover pending, ignoring login response\n");
+               complete(&adapter->init_done);
+               /* login response buffer will be released on reset */
+               return 0;
+       }
+
+       if (adapter->failover_pending) {
+               adapter->init_done_rc = -EAGAIN;
+               netdev_dbg(netdev, "Failover pending, ignoring login response\n");
+               complete(&adapter->init_done);
+               /* login response buffer will be released on reset */
+               return 0;
+       }
+
        netdev->mtu = adapter->req_mtu - ETH_HLEN;
 
        netdev_dbg(adapter->netdev, "Login Response Buffer:\n");
index eadcb99583464f04c69412212742379683a895a9..3c4f08d20414e063c59e69f6a96483fe2ea141bd 100644 (file)
@@ -695,6 +695,7 @@ static inline void ice_set_rdma_cap(struct ice_pf *pf)
 {
        if (pf->hw.func_caps.common_cap.rdma && pf->num_rdma_msix) {
                set_bit(ICE_FLAG_RDMA_ENA, pf->flags);
+               set_bit(ICE_FLAG_AUX_ENA, pf->flags);
                ice_plug_aux_dev(pf);
        }
 }
@@ -707,5 +708,6 @@ static inline void ice_clear_rdma_cap(struct ice_pf *pf)
 {
        ice_unplug_aux_dev(pf);
        clear_bit(ICE_FLAG_RDMA_ENA, pf->flags);
+       clear_bit(ICE_FLAG_AUX_ENA, pf->flags);
 }
 #endif /* _ICE_H_ */
index 1f2afdf6cd483d79fbe98d2a4a6fff2c9e5bb9d6..adcc9a251595a75a4e84d8590f8b923b6b982b4d 100644 (file)
@@ -271,6 +271,12 @@ int ice_plug_aux_dev(struct ice_pf *pf)
        struct auxiliary_device *adev;
        int ret;
 
+       /* if this PF doesn't support a technology that requires auxiliary
+        * devices, then gracefully exit
+        */
+       if (!ice_is_aux_ena(pf))
+               return 0;
+
        iadev = kzalloc(sizeof(*iadev), GFP_KERNEL);
        if (!iadev)
                return -ENOMEM;
index b877efae61dfafe09970ff62b713e78d0c5cb41b..0e19b4d02e628ac6afecbc9a1fc19616e4cb4a80 100644 (file)
@@ -6350,7 +6350,9 @@ static int igc_probe(struct pci_dev *pdev,
        if (pci_using_dac)
                netdev->features |= NETIF_F_HIGHDMA;
 
-       netdev->vlan_features |= netdev->features;
+       netdev->vlan_features |= netdev->features | NETIF_F_TSO_MANGLEID;
+       netdev->mpls_features |= NETIF_F_HW_CSUM;
+       netdev->hw_enc_features |= netdev->vlan_features;
 
        /* MTU range: 68 - 9216 */
        netdev->min_mtu = ETH_MIN_MTU;
index e84287ffc7cea46272bd5c3f55616431324b531e..dcf9f27ba2efdf9673e03976ed522a36c46bbc70 100644 (file)
@@ -658,11 +658,10 @@ static const struct devlink_param enable_rdma_param =
 
 static int mlx5_devlink_rdma_param_register(struct devlink *devlink)
 {
-       struct mlx5_core_dev *dev = devlink_priv(devlink);
        union devlink_param_value value;
        int err;
 
-       if (!IS_ENABLED(CONFIG_MLX5_INFINIBAND) || MLX5_ESWITCH_MANAGER(dev))
+       if (!IS_ENABLED(CONFIG_MLX5_INFINIBAND))
                return 0;
 
        err = devlink_param_register(devlink, &enable_rdma_param);
@@ -679,9 +678,7 @@ static int mlx5_devlink_rdma_param_register(struct devlink *devlink)
 
 static void mlx5_devlink_rdma_param_unregister(struct devlink *devlink)
 {
-       struct mlx5_core_dev *dev = devlink_priv(devlink);
-
-       if (!IS_ENABLED(CONFIG_MLX5_INFINIBAND) || MLX5_ESWITCH_MANAGER(dev))
+       if (!IS_ENABLED(CONFIG_MLX5_INFINIBAND))
                return;
 
        devlink_param_unpublish(devlink, &enable_rdma_param);
index 3f8a98093f8cb79606698247da2ad4f2147f809a..f9cf9fb31547973e41808db83ce27500052f0758 100644 (file)
@@ -1007,7 +1007,7 @@ int mlx5_fw_tracer_init(struct mlx5_fw_tracer *tracer)
        err = mlx5_core_alloc_pd(dev, &tracer->buff.pdn);
        if (err) {
                mlx5_core_warn(dev, "FWTracer: Failed to allocate PD %d\n", err);
-               return err;
+               goto err_cancel_work;
        }
 
        err = mlx5_fw_tracer_create_mkey(tracer);
@@ -1031,6 +1031,7 @@ err_notifier_unregister:
        mlx5_core_destroy_mkey(dev, &tracer->buff.mkey);
 err_dealloc_pd:
        mlx5_core_dealloc_pd(dev, tracer->buff.pdn);
+err_cancel_work:
        cancel_work_sync(&tracer->read_fw_strings_work);
        return err;
 }
index 669a75f3537a64833ac3378face27013fac2e7a6..7b8c8187543a9d3ae897459b6d3c16c5092cffb1 100644 (file)
@@ -922,7 +922,7 @@ void mlx5e_set_rx_mode_work(struct work_struct *work);
 
 int mlx5e_hwstamp_set(struct mlx5e_priv *priv, struct ifreq *ifr);
 int mlx5e_hwstamp_get(struct mlx5e_priv *priv, struct ifreq *ifr);
-int mlx5e_modify_rx_cqe_compression_locked(struct mlx5e_priv *priv, bool val);
+int mlx5e_modify_rx_cqe_compression_locked(struct mlx5e_priv *priv, bool val, bool rx_filter);
 
 int mlx5e_vlan_rx_add_vid(struct net_device *dev, __always_unused __be16 proto,
                          u16 vid);
index 0c38c2e319be93c7ca082e432e019bb28351ff30..b5ddaa82755f170eae6c6a17d70861f9013f23b7 100644 (file)
@@ -137,7 +137,7 @@ static int mlx5_esw_bridge_port_changeupper(struct notifier_block *nb, void *ptr
        u16 vport_num, esw_owner_vhca_id;
        struct netlink_ext_ack *extack;
        int ifindex = upper->ifindex;
-       int err;
+       int err = 0;
 
        if (!netif_is_bridge_master(upper))
                return 0;
@@ -244,7 +244,7 @@ mlx5_esw_bridge_port_obj_attr_set(struct net_device *dev,
        struct netlink_ext_ack *extack = switchdev_notifier_info_to_extack(&port_attr_info->info);
        const struct switchdev_attr *attr = port_attr_info->attr;
        u16 vport_num, esw_owner_vhca_id;
-       int err;
+       int err = 0;
 
        if (!mlx5_esw_bridge_lower_rep_vport_num_vhca_id_get(dev, br_offloads->esw, &vport_num,
                                                             &esw_owner_vhca_id))
index 51a4d80f7fa34033d8229d1c9eb27bdc8e3c7669..de03684528bbf97fcd4502306e5483daa3fe759d 100644 (file)
@@ -300,9 +300,6 @@ mlx5e_rep_indr_block_priv_lookup(struct mlx5e_rep_priv *rpriv,
 {
        struct mlx5e_rep_indr_block_priv *cb_priv;
 
-       /* All callback list access should be protected by RTNL. */
-       ASSERT_RTNL();
-
        list_for_each_entry(cb_priv,
                            &rpriv->uplink_priv.tc_indr_block_priv_list,
                            list)
index bf0313e2682bafb098f5685181716e856835b956..13056cb9757d4b117ff7fbb83dd5a1cec3be40d0 100644 (file)
@@ -572,7 +572,7 @@ void mlx5e_rx_res_channels_activate(struct mlx5e_rx_res *res, struct mlx5e_chann
        if (res->features & MLX5E_RX_RES_FEATURE_PTP) {
                u32 rqn;
 
-               if (mlx5e_channels_get_ptp_rqn(chs, &rqn))
+               if (!mlx5e_channels_get_ptp_rqn(chs, &rqn))
                        rqn = res->drop_rqn;
 
                err = mlx5e_rqt_redirect_direct(&res->ptp.rqt, rqn);
index 2cfd12953909bd2cfbf66c95637583e417565083..306fb5d6a36d3582ad1439cb8fca35830b004e5e 100644 (file)
@@ -1884,7 +1884,7 @@ static int set_pflag_rx_cqe_based_moder(struct net_device *netdev, bool enable)
        return set_pflag_cqe_based_moder(netdev, enable, true);
 }
 
-int mlx5e_modify_rx_cqe_compression_locked(struct mlx5e_priv *priv, bool new_val)
+int mlx5e_modify_rx_cqe_compression_locked(struct mlx5e_priv *priv, bool new_val, bool rx_filter)
 {
        bool curr_val = MLX5E_GET_PFLAG(&priv->channels.params, MLX5E_PFLAG_RX_CQE_COMPRESS);
        struct mlx5e_params new_params;
@@ -1896,8 +1896,7 @@ int mlx5e_modify_rx_cqe_compression_locked(struct mlx5e_priv *priv, bool new_val
        if (curr_val == new_val)
                return 0;
 
-       if (new_val && !priv->profile->rx_ptp_support &&
-           priv->tstamp.rx_filter != HWTSTAMP_FILTER_NONE) {
+       if (new_val && !priv->profile->rx_ptp_support && rx_filter) {
                netdev_err(priv->netdev,
                           "Profile doesn't support enabling of CQE compression while hardware time-stamping is enabled.\n");
                return -EINVAL;
@@ -1905,7 +1904,7 @@ int mlx5e_modify_rx_cqe_compression_locked(struct mlx5e_priv *priv, bool new_val
 
        new_params = priv->channels.params;
        MLX5E_SET_PFLAG(&new_params, MLX5E_PFLAG_RX_CQE_COMPRESS, new_val);
-       if (priv->tstamp.rx_filter != HWTSTAMP_FILTER_NONE)
+       if (rx_filter)
                new_params.ptp_rx = new_val;
 
        if (new_params.ptp_rx == priv->channels.params.ptp_rx)
@@ -1928,12 +1927,14 @@ static int set_pflag_rx_cqe_compress(struct net_device *netdev,
 {
        struct mlx5e_priv *priv = netdev_priv(netdev);
        struct mlx5_core_dev *mdev = priv->mdev;
+       bool rx_filter;
        int err;
 
        if (!MLX5_CAP_GEN(mdev, cqe_compression))
                return -EOPNOTSUPP;
 
-       err = mlx5e_modify_rx_cqe_compression_locked(priv, enable);
+       rx_filter = priv->tstamp.rx_filter != HWTSTAMP_FILTER_NONE;
+       err = mlx5e_modify_rx_cqe_compression_locked(priv, enable, rx_filter);
        if (err)
                return err;
 
index 47efd858964d6b809b3c3539470ae4faa89e8c1a..3fd515e7bf3024cf8f2cc15420518a0682d49664 100644 (file)
@@ -3554,14 +3554,14 @@ static int mlx5e_hwstamp_config_no_ptp_rx(struct mlx5e_priv *priv, bool rx_filte
 
        if (!rx_filter)
                /* Reset CQE compression to Admin default */
-               return mlx5e_modify_rx_cqe_compression_locked(priv, rx_cqe_compress_def);
+               return mlx5e_modify_rx_cqe_compression_locked(priv, rx_cqe_compress_def, false);
 
        if (!MLX5E_GET_PFLAG(&priv->channels.params, MLX5E_PFLAG_RX_CQE_COMPRESS))
                return 0;
 
        /* Disable CQE compression */
        netdev_warn(priv->netdev, "Disabling RX cqe compression\n");
-       err = mlx5e_modify_rx_cqe_compression_locked(priv, false);
+       err = mlx5e_modify_rx_cqe_compression_locked(priv, false, true);
        if (err)
                netdev_err(priv->netdev, "Failed disabling cqe compression err=%d\n", err);
 
index 9fe8e3c204d63a117a21c68f6cda37b463ba25fa..fe501ba88bea9cd11548851dab9eaf4e5503be98 100644 (file)
@@ -1682,14 +1682,13 @@ static int build_match_list(struct match_list *match_head,
 
                curr_match = kmalloc(sizeof(*curr_match), GFP_ATOMIC);
                if (!curr_match) {
+                       rcu_read_unlock();
                        free_match_list(match_head, ft_locked);
-                       err = -ENOMEM;
-                       goto out;
+                       return -ENOMEM;
                }
                curr_match->g = g;
                list_add_tail(&curr_match->list, &match_head->list);
        }
-out:
        rcu_read_unlock();
        return err;
 }
index 49ca57c6d31dceeef31e77d3d06ab293cd304395..ca5690b0a7abbf3628e7d56d96f9d9ee6233099a 100644 (file)
@@ -927,9 +927,12 @@ void mlx5_lag_disable_change(struct mlx5_core_dev *dev)
        struct mlx5_core_dev *dev1;
        struct mlx5_lag *ldev;
 
+       ldev = mlx5_lag_dev(dev);
+       if (!ldev)
+               return;
+
        mlx5_dev_list_lock();
 
-       ldev = mlx5_lag_dev(dev);
        dev0 = ldev->pf[MLX5_LAG_P1].dev;
        dev1 = ldev->pf[MLX5_LAG_P2].dev;
 
@@ -946,8 +949,11 @@ void mlx5_lag_enable_change(struct mlx5_core_dev *dev)
 {
        struct mlx5_lag *ldev;
 
-       mlx5_dev_list_lock();
        ldev = mlx5_lag_dev(dev);
+       if (!ldev)
+               return;
+
+       mlx5_dev_list_lock();
        ldev->mode_changes_in_progress--;
        mlx5_dev_list_unlock();
        mlx5_queue_bond_work(ldev, 0);
index 3e85b17f5857303d9628f0ca68725d34cca75c11..6704f5c1aa32e78cb1b73ee8ab0093091bd19f78 100644 (file)
@@ -142,6 +142,13 @@ static int mlxbf_gige_open(struct net_device *netdev)
        err = mlxbf_gige_clean_port(priv);
        if (err)
                goto free_irqs;
+
+       /* Clear driver's valid_polarity to match hardware,
+        * since the above call to clean_port() resets the
+        * receive polarity used by hardware.
+        */
+       priv->valid_polarity = 0;
+
        err = mlxbf_gige_rx_init(priv);
        if (err)
                goto free_irqs;
index c1310ea1c216f2860a0b194e7f718e9e1e221090..d5c485a6d2845db469900b816b1b85db4cf6b417 100644 (file)
@@ -398,9 +398,7 @@ static int mana_hwc_alloc_dma_buf(struct hw_channel_context *hwc, u16 q_depth,
        int err;
        u16 i;
 
-       dma_buf = kzalloc(sizeof(*dma_buf) +
-                         q_depth * sizeof(struct hwc_work_request),
-                         GFP_KERNEL);
+       dma_buf = kzalloc(struct_size(dma_buf, reqs, q_depth), GFP_KERNEL);
        if (!dma_buf)
                return -ENOMEM;
 
index 556c3495211da4fdce712821c7bc7f9334a94123..64c0ef57ad42634a0d521f5d521084e5d44a1662 100644 (file)
@@ -1767,9 +1767,6 @@ nfp_flower_indr_block_cb_priv_lookup(struct nfp_app *app,
        struct nfp_flower_indr_block_cb_priv *cb_priv;
        struct nfp_flower_priv *priv = app->priv;
 
-       /* All callback list access should be protected by RTNL. */
-       ASSERT_RTNL();
-
        list_for_each_entry(cb_priv, &priv->indr_block_cb_priv, list)
                if (cb_priv->netdev == netdev)
                        return cb_priv;
index 6e5a6cc97d0eb694767183add5afdc3936bee499..24cd415677756984047c5d3307066664089c4c14 100644 (file)
@@ -3367,6 +3367,7 @@ qed_mcp_get_nvm_image_att(struct qed_hwfn *p_hwfn,
                          struct qed_nvm_image_att *p_image_att)
 {
        enum nvm_image_type type;
+       int rc;
        u32 i;
 
        /* Translate image_id into MFW definitions */
@@ -3395,7 +3396,10 @@ qed_mcp_get_nvm_image_att(struct qed_hwfn *p_hwfn,
                return -EINVAL;
        }
 
-       qed_mcp_nvm_info_populate(p_hwfn);
+       rc = qed_mcp_nvm_info_populate(p_hwfn);
+       if (rc)
+               return rc;
+
        for (i = 0; i < p_hwfn->nvm_info.num_images; i++)
                if (type == p_hwfn->nvm_info.image_att[i].image_type)
                        break;
index 0a2f34fc8b24a1d9e71ac16c7efd0c211223cdc9..27dffa299ca6f45dac1dcc6851e0378046334bff 100644 (file)
@@ -1354,10 +1354,10 @@ static int qlcnic_83xx_copy_fw_file(struct qlcnic_adapter *adapter)
        struct qlc_83xx_fw_info *fw_info = adapter->ahw->fw_info;
        const struct firmware *fw = fw_info->fw;
        u32 dest, *p_cache, *temp;
-       int i, ret = -EIO;
        __le32 *temp_le;
        u8 data[16];
        size_t size;
+       int i, ret;
        u64 addr;
 
        temp = vzalloc(fw->size);
index 4b2eca5e08e29a43b5502c91bd9b6df0eef4ed28..01ef5efd7bc2ac36e12dfea5ea5b9841a1cfcac1 100644 (file)
 #define PHY_ST         0x8A    /* PHY status register */
 #define MAC_SM         0xAC    /* MAC status machine */
 #define  MAC_SM_RST    0x0002  /* MAC status machine reset */
+#define MD_CSC         0xb6    /* MDC speed control register */
+#define  MD_CSC_DEFAULT        0x0030
 #define MAC_ID         0xBE    /* Identifier register */
 
 #define TX_DCNT                0x80    /* TX descriptor count */
@@ -355,8 +357,9 @@ static void r6040_reset_mac(struct r6040_private *lp)
 {
        void __iomem *ioaddr = lp->base;
        int limit = MAC_DEF_TIMEOUT;
-       u16 cmd;
+       u16 cmd, md_csc;
 
+       md_csc = ioread16(ioaddr + MD_CSC);
        iowrite16(MAC_RST, ioaddr + MCR1);
        while (limit--) {
                cmd = ioread16(ioaddr + MCR1);
@@ -368,6 +371,10 @@ static void r6040_reset_mac(struct r6040_private *lp)
        iowrite16(MAC_SM_RST, ioaddr + MAC_SM);
        iowrite16(0, ioaddr + MAC_SM);
        mdelay(5);
+
+       /* Restore MDIO clock frequency */
+       if (md_csc != MD_CSC_DEFAULT)
+               iowrite16(md_csc, ioaddr + MD_CSC);
 }
 
 static void r6040_init_mac_regs(struct net_device *dev)
index e5b0d795c3012c40a393395016a74a434d0ef4c7..3dbea028b325ceed9ad813263527cd8963740db8 100644 (file)
@@ -166,32 +166,46 @@ static int efx_allocate_msix_channels(struct efx_nic *efx,
         * We need a channel per event queue, plus a VI per tx queue.
         * This may be more pessimistic than it needs to be.
         */
-       if (n_channels + n_xdp_ev > max_channels) {
-               netif_err(efx, drv, efx->net_dev,
-                         "Insufficient resources for %d XDP event queues (%d other channels, max %d)\n",
-                         n_xdp_ev, n_channels, max_channels);
-               netif_err(efx, drv, efx->net_dev,
-                         "XDP_TX and XDP_REDIRECT will not work on this interface");
-               efx->n_xdp_channels = 0;
-               efx->xdp_tx_per_channel = 0;
-               efx->xdp_tx_queue_count = 0;
+       if (n_channels >= max_channels) {
+               efx->xdp_txq_queues_mode = EFX_XDP_TX_QUEUES_BORROWED;
+               netif_warn(efx, drv, efx->net_dev,
+                          "Insufficient resources for %d XDP event queues (%d other channels, max %d)\n",
+                          n_xdp_ev, n_channels, max_channels);
+               netif_warn(efx, drv, efx->net_dev,
+                          "XDP_TX and XDP_REDIRECT might decrease device's performance\n");
        } else if (n_channels + n_xdp_tx > efx->max_vis) {
-               netif_err(efx, drv, efx->net_dev,
-                         "Insufficient resources for %d XDP TX queues (%d other channels, max VIs %d)\n",
-                         n_xdp_tx, n_channels, efx->max_vis);
-               netif_err(efx, drv, efx->net_dev,
-                         "XDP_TX and XDP_REDIRECT will not work on this interface");
-               efx->n_xdp_channels = 0;
-               efx->xdp_tx_per_channel = 0;
-               efx->xdp_tx_queue_count = 0;
+               efx->xdp_txq_queues_mode = EFX_XDP_TX_QUEUES_BORROWED;
+               netif_warn(efx, drv, efx->net_dev,
+                          "Insufficient resources for %d XDP TX queues (%d other channels, max VIs %d)\n",
+                          n_xdp_tx, n_channels, efx->max_vis);
+               netif_warn(efx, drv, efx->net_dev,
+                          "XDP_TX and XDP_REDIRECT might decrease device's performance\n");
+       } else if (n_channels + n_xdp_ev > max_channels) {
+               efx->xdp_txq_queues_mode = EFX_XDP_TX_QUEUES_SHARED;
+               netif_warn(efx, drv, efx->net_dev,
+                          "Insufficient resources for %d XDP event queues (%d other channels, max %d)\n",
+                          n_xdp_ev, n_channels, max_channels);
+
+               n_xdp_ev = max_channels - n_channels;
+               netif_warn(efx, drv, efx->net_dev,
+                          "XDP_TX and XDP_REDIRECT will work with reduced performance (%d cpus/tx_queue)\n",
+                          DIV_ROUND_UP(n_xdp_tx, tx_per_ev * n_xdp_ev));
        } else {
+               efx->xdp_txq_queues_mode = EFX_XDP_TX_QUEUES_DEDICATED;
+       }
+
+       if (efx->xdp_txq_queues_mode != EFX_XDP_TX_QUEUES_BORROWED) {
                efx->n_xdp_channels = n_xdp_ev;
                efx->xdp_tx_per_channel = tx_per_ev;
                efx->xdp_tx_queue_count = n_xdp_tx;
                n_channels += n_xdp_ev;
                netif_dbg(efx, drv, efx->net_dev,
                          "Allocating %d TX and %d event queues for XDP\n",
-                         n_xdp_tx, n_xdp_ev);
+                         n_xdp_ev * tx_per_ev, n_xdp_ev);
+       } else {
+               efx->n_xdp_channels = 0;
+               efx->xdp_tx_per_channel = 0;
+               efx->xdp_tx_queue_count = n_xdp_tx;
        }
 
        if (vec_count < n_channels) {
@@ -858,6 +872,20 @@ rollback:
        goto out;
 }
 
+static inline int
+efx_set_xdp_tx_queue(struct efx_nic *efx, int xdp_queue_number,
+                    struct efx_tx_queue *tx_queue)
+{
+       if (xdp_queue_number >= efx->xdp_tx_queue_count)
+               return -EINVAL;
+
+       netif_dbg(efx, drv, efx->net_dev, "Channel %u TXQ %u is XDP %u, HW %u\n",
+                 tx_queue->channel->channel, tx_queue->label,
+                 xdp_queue_number, tx_queue->queue);
+       efx->xdp_tx_queues[xdp_queue_number] = tx_queue;
+       return 0;
+}
+
 int efx_set_channels(struct efx_nic *efx)
 {
        struct efx_tx_queue *tx_queue;
@@ -896,20 +924,9 @@ int efx_set_channels(struct efx_nic *efx)
                        if (efx_channel_is_xdp_tx(channel)) {
                                efx_for_each_channel_tx_queue(tx_queue, channel) {
                                        tx_queue->queue = next_queue++;
-
-                                       /* We may have a few left-over XDP TX
-                                        * queues owing to xdp_tx_queue_count
-                                        * not dividing evenly by EFX_MAX_TXQ_PER_CHANNEL.
-                                        * We still allocate and probe those
-                                        * TXQs, but never use them.
-                                        */
-                                       if (xdp_queue_number < efx->xdp_tx_queue_count) {
-                                               netif_dbg(efx, drv, efx->net_dev, "Channel %u TXQ %u is XDP %u, HW %u\n",
-                                                         channel->channel, tx_queue->label,
-                                                         xdp_queue_number, tx_queue->queue);
-                                               efx->xdp_tx_queues[xdp_queue_number] = tx_queue;
+                                       rc = efx_set_xdp_tx_queue(efx, xdp_queue_number, tx_queue);
+                                       if (rc == 0)
                                                xdp_queue_number++;
-                                       }
                                }
                        } else {
                                efx_for_each_channel_tx_queue(tx_queue, channel) {
@@ -918,10 +935,35 @@ int efx_set_channels(struct efx_nic *efx)
                                                  channel->channel, tx_queue->label,
                                                  tx_queue->queue);
                                }
+
+                               /* If XDP is borrowing queues from net stack, it must use the queue
+                                * with no csum offload, which is the first one of the channel
+                                * (note: channel->tx_queue_by_type is not initialized yet)
+                                */
+                               if (efx->xdp_txq_queues_mode == EFX_XDP_TX_QUEUES_BORROWED) {
+                                       tx_queue = &channel->tx_queue[0];
+                                       rc = efx_set_xdp_tx_queue(efx, xdp_queue_number, tx_queue);
+                                       if (rc == 0)
+                                               xdp_queue_number++;
+                               }
                        }
                }
        }
-       WARN_ON(xdp_queue_number != efx->xdp_tx_queue_count);
+       WARN_ON(efx->xdp_txq_queues_mode == EFX_XDP_TX_QUEUES_DEDICATED &&
+               xdp_queue_number != efx->xdp_tx_queue_count);
+       WARN_ON(efx->xdp_txq_queues_mode != EFX_XDP_TX_QUEUES_DEDICATED &&
+               xdp_queue_number > efx->xdp_tx_queue_count);
+
+       /* If we have more CPUs than assigned XDP TX queues, assign the already
+        * existing queues to the exceeding CPUs
+        */
+       next_queue = 0;
+       while (xdp_queue_number < efx->xdp_tx_queue_count) {
+               tx_queue = efx->xdp_tx_queues[next_queue++];
+               rc = efx_set_xdp_tx_queue(efx, xdp_queue_number, tx_queue);
+               if (rc == 0)
+                       xdp_queue_number++;
+       }
 
        rc = netif_set_real_num_tx_queues(efx->net_dev, efx->n_tx_channels);
        if (rc)
index 9b4b25704271041f3bebdc91d24d45ac4b2d0ff7..f6981810039d06dca945385e2796eda8bf57719d 100644 (file)
@@ -782,6 +782,12 @@ struct efx_async_filter_insertion {
 #define EFX_RPS_MAX_IN_FLIGHT  8
 #endif /* CONFIG_RFS_ACCEL */
 
+enum efx_xdp_tx_queues_mode {
+       EFX_XDP_TX_QUEUES_DEDICATED,    /* one queue per core, locking not needed */
+       EFX_XDP_TX_QUEUES_SHARED,       /* each queue used by more than 1 core */
+       EFX_XDP_TX_QUEUES_BORROWED      /* queues borrowed from net stack */
+};
+
 /**
  * struct efx_nic - an Efx NIC
  * @name: Device name (net device name or bus id before net device registered)
@@ -820,6 +826,7 @@ struct efx_async_filter_insertion {
  *     should be allocated for this NIC
  * @xdp_tx_queue_count: Number of entries in %xdp_tx_queues.
  * @xdp_tx_queues: Array of pointers to tx queues used for XDP transmit.
+ * @xdp_txq_queues_mode: XDP TX queues sharing strategy.
  * @rxq_entries: Size of receive queues requested by user.
  * @txq_entries: Size of transmit queues requested by user.
  * @txq_stop_thresh: TX queue fill level at or above which we stop it.
@@ -979,6 +986,7 @@ struct efx_nic {
 
        unsigned int xdp_tx_queue_count;
        struct efx_tx_queue **xdp_tx_queues;
+       enum efx_xdp_tx_queues_mode xdp_txq_queues_mode;
 
        unsigned rxq_entries;
        unsigned txq_entries;
index 0c6650d2e23997cd97304a76c7cb739f2115199b..d16e031e95f44d09c4fcc37456ef68bf23e8aa6c 100644 (file)
@@ -428,23 +428,32 @@ int efx_xdp_tx_buffers(struct efx_nic *efx, int n, struct xdp_frame **xdpfs,
        unsigned int len;
        int space;
        int cpu;
-       int i;
+       int i = 0;
 
-       cpu = raw_smp_processor_id();
+       if (unlikely(n && !xdpfs))
+               return -EINVAL;
+       if (unlikely(!n))
+               return 0;
 
-       if (!efx->xdp_tx_queue_count ||
-           unlikely(cpu >= efx->xdp_tx_queue_count))
+       cpu = raw_smp_processor_id();
+       if (unlikely(cpu >= efx->xdp_tx_queue_count))
                return -EINVAL;
 
        tx_queue = efx->xdp_tx_queues[cpu];
        if (unlikely(!tx_queue))
                return -EINVAL;
 
-       if (unlikely(n && !xdpfs))
-               return -EINVAL;
+       if (efx->xdp_txq_queues_mode != EFX_XDP_TX_QUEUES_DEDICATED)
+               HARD_TX_LOCK(efx->net_dev, tx_queue->core_txq, cpu);
 
-       if (!n)
-               return 0;
+       /* If we're borrowing net stack queues we have to handle stop-restart
+        * or we might block the queue and it will be considered as frozen
+        */
+       if (efx->xdp_txq_queues_mode == EFX_XDP_TX_QUEUES_BORROWED) {
+               if (netif_tx_queue_stopped(tx_queue->core_txq))
+                       goto unlock;
+               efx_tx_maybe_stop_queue(tx_queue);
+       }
 
        /* Check for available space. We should never need multiple
         * descriptors per frame.
@@ -484,6 +493,10 @@ int efx_xdp_tx_buffers(struct efx_nic *efx, int n, struct xdp_frame **xdpfs,
        if (flush && i > 0)
                efx_nic_push_buffers(tx_queue);
 
+unlock:
+       if (efx->xdp_txq_queues_mode != EFX_XDP_TX_QUEUES_DEDICATED)
+               HARD_TX_UNLOCK(efx->net_dev, tx_queue->core_txq);
+
        return i == 0 ? -EIO : i;
 }
 
index ece02b35a6cec4a7e6652f678dbac61705e2a3d5..553c4403258aa2c30cb2ca06f30f2db69efc6f3f 100644 (file)
@@ -309,7 +309,7 @@ static void stmmac_clk_csr_set(struct stmmac_priv *priv)
                        priv->clk_csr = STMMAC_CSR_100_150M;
                else if ((clk_rate >= CSR_F_150M) && (clk_rate < CSR_F_250M))
                        priv->clk_csr = STMMAC_CSR_150_250M;
-               else if ((clk_rate >= CSR_F_250M) && (clk_rate < CSR_F_300M))
+               else if ((clk_rate >= CSR_F_250M) && (clk_rate <= CSR_F_300M))
                        priv->clk_csr = STMMAC_CSR_250_300M;
        }
 
@@ -7118,7 +7118,6 @@ int stmmac_suspend(struct device *dev)
        struct net_device *ndev = dev_get_drvdata(dev);
        struct stmmac_priv *priv = netdev_priv(ndev);
        u32 chan;
-       int ret;
 
        if (!ndev || !netif_running(ndev))
                return 0;
@@ -7150,13 +7149,6 @@ int stmmac_suspend(struct device *dev)
        } else {
                stmmac_mac_set(priv, priv->ioaddr, false);
                pinctrl_pm_select_sleep_state(priv->device);
-               /* Disable clock in case of PWM is off */
-               clk_disable_unprepare(priv->plat->clk_ptp_ref);
-               ret = pm_runtime_force_suspend(dev);
-               if (ret) {
-                       mutex_unlock(&priv->lock);
-                       return ret;
-               }
        }
 
        mutex_unlock(&priv->lock);
@@ -7242,12 +7234,6 @@ int stmmac_resume(struct device *dev)
                priv->irq_wake = 0;
        } else {
                pinctrl_pm_select_default_state(priv->device);
-               /* enable the clk previously disabled */
-               ret = pm_runtime_force_resume(dev);
-               if (ret)
-                       return ret;
-               if (priv->plat->clk_ptp_ref)
-                       clk_prepare_enable(priv->plat->clk_ptp_ref);
                /* reset the phy so that it's ready */
                if (priv->mii)
                        stmmac_mdio_reset(priv->mii);
index 5ca710844cc1eb9a1c41dbcbca92bdd8d2218d14..62cec9bfcd33722d6b124695fcd327ca6d238248 100644 (file)
@@ -9,6 +9,7 @@
 *******************************************************************************/
 
 #include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
 #include <linux/module.h>
 #include <linux/io.h>
 #include <linux/of.h>
@@ -771,9 +772,52 @@ static int __maybe_unused stmmac_runtime_resume(struct device *dev)
        return stmmac_bus_clks_config(priv, true);
 }
 
+static int __maybe_unused stmmac_pltfr_noirq_suspend(struct device *dev)
+{
+       struct net_device *ndev = dev_get_drvdata(dev);
+       struct stmmac_priv *priv = netdev_priv(ndev);
+       int ret;
+
+       if (!netif_running(ndev))
+               return 0;
+
+       if (!device_may_wakeup(priv->device) || !priv->plat->pmt) {
+               /* Disable clock in case of PWM is off */
+               clk_disable_unprepare(priv->plat->clk_ptp_ref);
+
+               ret = pm_runtime_force_suspend(dev);
+               if (ret)
+                       return ret;
+       }
+
+       return 0;
+}
+
+static int __maybe_unused stmmac_pltfr_noirq_resume(struct device *dev)
+{
+       struct net_device *ndev = dev_get_drvdata(dev);
+       struct stmmac_priv *priv = netdev_priv(ndev);
+       int ret;
+
+       if (!netif_running(ndev))
+               return 0;
+
+       if (!device_may_wakeup(priv->device) || !priv->plat->pmt) {
+               /* enable the clk previously disabled */
+               ret = pm_runtime_force_resume(dev);
+               if (ret)
+                       return ret;
+
+               clk_prepare_enable(priv->plat->clk_ptp_ref);
+       }
+
+       return 0;
+}
+
 const struct dev_pm_ops stmmac_pltfr_pm_ops = {
        SET_SYSTEM_SLEEP_PM_OPS(stmmac_pltfr_suspend, stmmac_pltfr_resume)
        SET_RUNTIME_PM_OPS(stmmac_runtime_suspend, stmmac_runtime_resume, NULL)
+       SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(stmmac_pltfr_noirq_suspend, stmmac_pltfr_noirq_resume)
 };
 EXPORT_SYMBOL_GPL(stmmac_pltfr_pm_ops);
 
index 8fe8887d506a3e269d34542b75486624bdc03a3e..6192244b304ab976e0b17f7fd3eb568585f16a54 100644 (file)
@@ -68,9 +68,9 @@
 #define SIXP_DAMA_OFF          0
 
 /* default level 2 parameters */
-#define SIXP_TXDELAY                   (HZ/4)  /* in 1 s */
+#define SIXP_TXDELAY                   25      /* 250 ms */
 #define SIXP_PERSIST                   50      /* in 256ths */
-#define SIXP_SLOTTIME                  (HZ/10) /* in 1 s */
+#define SIXP_SLOTTIME                  10      /* 100 ms */
 #define SIXP_INIT_RESYNC_TIMEOUT       (3*HZ/2) /* in 1 s */
 #define SIXP_RESYNC_TIMEOUT            5*HZ    /* in 1 s */
 
index b50b7fafd8d6c525d34a05de696acefa496605ac..f4c3efc3e0745c621d8e1ff6bea66b1c8efb5be6 100644 (file)
@@ -973,7 +973,7 @@ static inline void tx_on(struct scc_priv *priv)
                flags = claim_dma_lock();
                set_dma_mode(priv->param.dma, DMA_MODE_WRITE);
                set_dma_addr(priv->param.dma,
-                            (int) priv->tx_buf[priv->tx_tail] + n);
+                            virt_to_bus(priv->tx_buf[priv->tx_tail]) + n);
                set_dma_count(priv->param.dma,
                              priv->tx_len[priv->tx_tail] - n);
                release_dma_lock(flags);
@@ -1020,7 +1020,7 @@ static inline void rx_on(struct scc_priv *priv)
                flags = claim_dma_lock();
                set_dma_mode(priv->param.dma, DMA_MODE_READ);
                set_dma_addr(priv->param.dma,
-                            (int) priv->rx_buf[priv->rx_head]);
+                            virt_to_bus(priv->rx_buf[priv->rx_head]));
                set_dma_count(priv->param.dma, BUF_SIZE);
                release_dma_lock(flags);
                enable_dma(priv->param.dma);
@@ -1233,7 +1233,7 @@ static void special_condition(struct scc_priv *priv, int rc)
                if (priv->param.dma >= 0) {
                        flags = claim_dma_lock();
                        set_dma_addr(priv->param.dma,
-                                    (int) priv->rx_buf[priv->rx_head]);
+                                    virt_to_bus(priv->rx_buf[priv->rx_head]));
                        set_dma_count(priv->param.dma, BUF_SIZE);
                        release_dma_lock(flags);
                } else {
index 2324e1b93e37dc9a95f95157c25117b03018246f..1da334f54944a0cbe36f96b13fd9a4e5fd2d9370 100644 (file)
@@ -430,7 +430,8 @@ static void ipa_table_init_add(struct gsi_trans *trans, bool filter,
         * table region determines the number of entries it has.
         */
        if (filter) {
-               count = hweight32(ipa->filter_map);
+               /* Include one extra "slot" to hold the filter map itself */
+               count = 1 + hweight32(ipa->filter_map);
                hash_count = hash_mem->size ? count : 0;
        } else {
                count = mem->size / sizeof(__le64);
index 21aa24c741b9671222e1baa63c92177b0ffa72d6..daae7fa58fb822886c5a374577225d572680b1de 100644 (file)
@@ -5,7 +5,7 @@
 #ifndef HAVE_DP83640_REGISTERS
 #define HAVE_DP83640_REGISTERS
 
-#define PAGE0                     0x0000
+/* #define PAGE0                  0x0000 */
 #define PHYCR2                    0x001c /* PHY Control Register 2 */
 
 #define PAGE4                     0x0004
index 9e2891d8e8dd98219caffce9db345f90190ac964..ba5ad86ec8261256af4523e6ddb1f00b98bc5cf1 100644 (file)
@@ -233,9 +233,11 @@ static DEFINE_MUTEX(phy_fixup_lock);
 
 static bool mdio_bus_phy_may_suspend(struct phy_device *phydev)
 {
+       struct device_driver *drv = phydev->mdio.dev.driver;
+       struct phy_driver *phydrv = to_phy_driver(drv);
        struct net_device *netdev = phydev->attached_dev;
 
-       if (!phydev->drv->suspend)
+       if (!drv || !phydrv->suspend)
                return false;
 
        /* PHY not attached? May suspend if the PHY has not already been
index a1464b764d4dcd3209e25b9199dac96db68234bb..0a0abe8e4be0b64b5c093accec38e94c6988b4b1 100644 (file)
@@ -1607,6 +1607,32 @@ int phylink_ethtool_ksettings_set(struct phylink *pl,
        if (config.an_enabled && phylink_is_empty_linkmode(config.advertising))
                return -EINVAL;
 
+       /* If this link is with an SFP, ensure that changes to advertised modes
+        * also cause the associated interface to be selected such that the
+        * link can be configured correctly.
+        */
+       if (pl->sfp_port && pl->sfp_bus) {
+               config.interface = sfp_select_interface(pl->sfp_bus,
+                                                       config.advertising);
+               if (config.interface == PHY_INTERFACE_MODE_NA) {
+                       phylink_err(pl,
+                                   "selection of interface failed, advertisement %*pb\n",
+                                   __ETHTOOL_LINK_MODE_MASK_NBITS,
+                                   config.advertising);
+                       return -EINVAL;
+               }
+
+               /* Revalidate with the selected interface */
+               linkmode_copy(support, pl->supported);
+               if (phylink_validate(pl, support, &config)) {
+                       phylink_err(pl, "validation of %s/%s with support %*pb failed\n",
+                                   phylink_an_mode_str(pl->cur_link_an_mode),
+                                   phy_modes(config.interface),
+                                   __ETHTOOL_LINK_MODE_MASK_NBITS, support);
+                       return -EINVAL;
+               }
+       }
+
        mutex_lock(&pl->state_mutex);
        pl->link_config.speed = config.speed;
        pl->link_config.duplex = config.duplex;
@@ -2186,7 +2212,9 @@ static int phylink_sfp_config(struct phylink *pl, u8 mode,
        if (phy_interface_mode_is_8023z(iface) && pl->phydev)
                return -EINVAL;
 
-       changed = !linkmode_equal(pl->supported, support);
+       changed = !linkmode_equal(pl->supported, support) ||
+                 !linkmode_equal(pl->link_config.advertising,
+                                 config.advertising);
        if (changed) {
                linkmode_copy(pl->supported, support);
                linkmode_copy(pl->link_config.advertising, config.advertising);
index f6b92efffc94c340f758155a26a08391c30c0aac..480bcd1f6c1cb490ca40c1ff8afc92551d720c25 100644 (file)
@@ -34,6 +34,8 @@ obj-$(CONFIG_SLIC_DS26522)    += slic_ds26522.o
 clean-files := wanxlfw.inc
 $(obj)/wanxl.o:        $(obj)/wanxlfw.inc
 
+CROSS_COMPILE_M68K = m68k-linux-gnu-
+
 ifeq ($(CONFIG_WANXL_BUILD_FIRMWARE),y)
 ifeq ($(ARCH),m68k)
   M68KCC = $(CC)
index 7efb31b87f37aa756124d52606d270a76f905ad2..6600e138945e28f4cf6989b152736a63cafcefe5 100644 (file)
@@ -3524,7 +3524,9 @@ static struct nvme_ns_head *nvme_find_ns_head(struct nvme_subsystem *subsys,
        lockdep_assert_held(&subsys->lock);
 
        list_for_each_entry(h, &subsys->nsheads, entry) {
-               if (h->ns_id == nsid && nvme_tryget_ns_head(h))
+               if (h->ns_id != nsid)
+                       continue;
+               if (!list_empty(&h->list) && nvme_tryget_ns_head(h))
                        return h;
        }
 
@@ -3843,6 +3845,10 @@ static void nvme_ns_remove(struct nvme_ns *ns)
 
        mutex_lock(&ns->ctrl->subsys->lock);
        list_del_rcu(&ns->siblings);
+       if (list_empty(&ns->head->list)) {
+               list_del_init(&ns->head->entry);
+               last_path = true;
+       }
        mutex_unlock(&ns->ctrl->subsys->lock);
 
        /* guarantee not available in head->list */
@@ -3856,20 +3862,11 @@ static void nvme_ns_remove(struct nvme_ns *ns)
                nvme_cdev_del(&ns->cdev, &ns->cdev_device);
        del_gendisk(ns->disk);
        blk_cleanup_queue(ns->queue);
-       if (blk_get_integrity(ns->disk))
-               blk_integrity_unregister(ns->disk);
 
        down_write(&ns->ctrl->namespaces_rwsem);
        list_del_init(&ns->list);
        up_write(&ns->ctrl->namespaces_rwsem);
 
-       /* Synchronize with nvme_init_ns_head() */
-       mutex_lock(&ns->head->subsys->lock);
-       if (list_empty(&ns->head->list)) {
-               list_del_init(&ns->head->entry);
-               last_path = true;
-       }
-       mutex_unlock(&ns->head->subsys->lock);
        if (last_path)
                nvme_mpath_shutdown_disk(ns->head);
        nvme_put_ns(ns);
index 5d7bc58a27bd984dc7f19341dcdcf1a3d3af658d..e8ccdd398f784443241d659761414f0e622c111b 100644 (file)
@@ -600,14 +600,17 @@ static int nvme_update_ana_state(struct nvme_ctrl *ctrl,
 
        down_read(&ctrl->namespaces_rwsem);
        list_for_each_entry(ns, &ctrl->namespaces, list) {
-               unsigned nsid = le32_to_cpu(desc->nsids[n]);
-
+               unsigned nsid;
+again:
+               nsid = le32_to_cpu(desc->nsids[n]);
                if (ns->head->ns_id < nsid)
                        continue;
                if (ns->head->ns_id == nsid)
                        nvme_update_ns_ana_state(desc, ns);
                if (++n == nr_nsids)
                        break;
+               if (ns->head->ns_id > nsid)
+                       goto again;
        }
        up_read(&ctrl->namespaces_rwsem);
        return 0;
index a68704e39084e038e02acae5c46bd7725281449c..042c594bc57e21915363acee1521c473d217a0cb 100644 (file)
@@ -656,8 +656,8 @@ static void nvme_rdma_free_queue(struct nvme_rdma_queue *queue)
        if (!test_and_clear_bit(NVME_RDMA_Q_ALLOCATED, &queue->flags))
                return;
 
-       nvme_rdma_destroy_queue_ib(queue);
        rdma_destroy_id(queue->cm_id);
+       nvme_rdma_destroy_queue_ib(queue);
        mutex_destroy(&queue->queue_lock);
 }
 
@@ -1815,14 +1815,10 @@ static int nvme_rdma_conn_established(struct nvme_rdma_queue *queue)
        for (i = 0; i < queue->queue_size; i++) {
                ret = nvme_rdma_post_recv(queue, &queue->rsp_ring[i]);
                if (ret)
-                       goto out_destroy_queue_ib;
+                       return ret;
        }
 
        return 0;
-
-out_destroy_queue_ib:
-       nvme_rdma_destroy_queue_ib(queue);
-       return ret;
 }
 
 static int nvme_rdma_conn_rejected(struct nvme_rdma_queue *queue,
@@ -1916,14 +1912,10 @@ static int nvme_rdma_route_resolved(struct nvme_rdma_queue *queue)
        if (ret) {
                dev_err(ctrl->ctrl.device,
                        "rdma_connect_locked failed (%d).\n", ret);
-               goto out_destroy_queue_ib;
+               return ret;
        }
 
        return 0;
-
-out_destroy_queue_ib:
-       nvme_rdma_destroy_queue_ib(queue);
-       return ret;
 }
 
 static int nvme_rdma_cm_handler(struct rdma_cm_id *cm_id,
@@ -1954,8 +1946,6 @@ static int nvme_rdma_cm_handler(struct rdma_cm_id *cm_id,
        case RDMA_CM_EVENT_ROUTE_ERROR:
        case RDMA_CM_EVENT_CONNECT_ERROR:
        case RDMA_CM_EVENT_UNREACHABLE:
-               nvme_rdma_destroy_queue_ib(queue);
-               fallthrough;
        case RDMA_CM_EVENT_ADDR_ERROR:
                dev_dbg(queue->ctrl->ctrl.device,
                        "CM error event %d\n", ev->event);
index e2ab12f3f51c92a0fb676771e25d931fd30bc13c..e4249b7dc05682fad3893e4ec6f3ee182b1e22d2 100644 (file)
@@ -274,6 +274,12 @@ static inline void nvme_tcp_send_all(struct nvme_tcp_queue *queue)
        } while (ret > 0);
 }
 
+static inline bool nvme_tcp_queue_more(struct nvme_tcp_queue *queue)
+{
+       return !list_empty(&queue->send_list) ||
+               !llist_empty(&queue->req_list) || queue->more_requests;
+}
+
 static inline void nvme_tcp_queue_request(struct nvme_tcp_request *req,
                bool sync, bool last)
 {
@@ -294,9 +300,10 @@ static inline void nvme_tcp_queue_request(struct nvme_tcp_request *req,
                nvme_tcp_send_all(queue);
                queue->more_requests = false;
                mutex_unlock(&queue->send_mutex);
-       } else if (last) {
-               queue_work_on(queue->io_cpu, nvme_tcp_wq, &queue->io_work);
        }
+
+       if (last && nvme_tcp_queue_more(queue))
+               queue_work_on(queue->io_cpu, nvme_tcp_wq, &queue->io_work);
 }
 
 static void nvme_tcp_process_req_list(struct nvme_tcp_queue *queue)
@@ -906,12 +913,6 @@ done:
        read_unlock_bh(&sk->sk_callback_lock);
 }
 
-static inline bool nvme_tcp_queue_more(struct nvme_tcp_queue *queue)
-{
-       return !list_empty(&queue->send_list) ||
-               !llist_empty(&queue->req_list) || queue->more_requests;
-}
-
 static inline void nvme_tcp_done_send_req(struct nvme_tcp_queue *queue)
 {
        queue->request = NULL;
@@ -1145,8 +1146,7 @@ static void nvme_tcp_io_work(struct work_struct *w)
                                pending = true;
                        else if (unlikely(result < 0))
                                break;
-               } else
-                       pending = !llist_empty(&queue->req_list);
+               }
 
                result = nvme_tcp_try_recv(queue);
                if (result > 0)
index d784f3c200b4cefcaab34c8466b7242a8cef7d4b..be5d82421e3a458a5dc2f37de7877320df998b5c 100644 (file)
@@ -1067,7 +1067,7 @@ static ssize_t nvmet_subsys_attr_serial_show(struct config_item *item,
 {
        struct nvmet_subsys *subsys = to_subsys(item);
 
-       return snprintf(page, PAGE_SIZE, "%*s\n",
+       return snprintf(page, PAGE_SIZE, "%.*s\n",
                        NVMET_SN_MAX_SIZE, subsys->serial);
 }
 
index 5b043ee308240bf0d389e92aebb462dcb044382a..b0800c260f64a219848888eee8762fafee0aef64 100644 (file)
@@ -85,7 +85,11 @@ of_dma_set_restricted_buffer(struct device *dev, struct device_node *np)
                        break;
        }
 
-       if (i != count && of_reserved_mem_device_init_by_idx(dev, of_node, i))
+       /*
+        * Attempt to initialize a restricted-dma-pool region if one was found.
+        * Note that count can hold a negative error code.
+        */
+       if (i < count && of_reserved_mem_device_init_by_idx(dev, of_node, i))
                dev_warn(dev, "failed to initialise \"restricted-dma-pool\" memory node\n");
 }
 
index 3fd74bb34819b75e87fd7e60cac82d97ff160a9c..a3483484a5a2a4b6e7cdb6db27ce58a0b2f90944 100644 (file)
@@ -1291,7 +1291,6 @@ DEFINE_SIMPLE_PROP(pwms, "pwms", "#pwm-cells")
 DEFINE_SIMPLE_PROP(resets, "resets", "#reset-cells")
 DEFINE_SIMPLE_PROP(leds, "leds", NULL)
 DEFINE_SIMPLE_PROP(backlight, "backlight", NULL)
-DEFINE_SIMPLE_PROP(phy_handle, "phy-handle", NULL)
 DEFINE_SUFFIX_PROP(regulators, "-supply", NULL)
 DEFINE_SUFFIX_PROP(gpio, "-gpio", "#gpio-cells")
 
@@ -1380,7 +1379,6 @@ static const struct supplier_bindings of_supplier_bindings[] = {
        { .parse_prop = parse_resets, },
        { .parse_prop = parse_leds, },
        { .parse_prop = parse_backlight, },
-       { .parse_prop = parse_phy_handle, },
        { .parse_prop = parse_gpio_compat, },
        { .parse_prop = parse_interrupts, },
        { .parse_prop = parse_regulators, },
index a1b1e2a0163299f7ecc78d382bed7df09a95f1fa..0f40943a9a18a6302c4fc8079ac391f5223dbe1a 100644 (file)
@@ -937,7 +937,7 @@ static struct acpi_device *acpi_pci_find_companion(struct device *dev);
 
 void pci_set_acpi_fwnode(struct pci_dev *dev)
 {
-       if (!ACPI_COMPANION(&dev->dev) && !pci_dev_is_added(dev))
+       if (!dev_fwnode(&dev->dev) && !pci_dev_is_added(dev))
                ACPI_COMPANION_SET(&dev->dev,
                                   acpi_pci_find_companion(&dev->dev));
 }
index e5089af8ad90a00329adb01189c469e07da4b22c..4537d1ea14fdc034a4a0c72789ca8b745315c60b 100644 (file)
@@ -5435,7 +5435,7 @@ DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID,
                              PCI_CLASS_MULTIMEDIA_HD_AUDIO, 8, quirk_gpu_hda);
 
 /*
- * Create device link for NVIDIA GPU with integrated USB xHCI Host
+ * Create device link for GPUs with integrated USB xHCI Host
  * controller to VGA.
  */
 static void quirk_gpu_usb(struct pci_dev *usb)
@@ -5444,9 +5444,11 @@ static void quirk_gpu_usb(struct pci_dev *usb)
 }
 DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID,
                              PCI_CLASS_SERIAL_USB, 8, quirk_gpu_usb);
+DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_VENDOR_ID_ATI, PCI_ANY_ID,
+                             PCI_CLASS_SERIAL_USB, 8, quirk_gpu_usb);
 
 /*
- * Create device link for NVIDIA GPU with integrated Type-C UCSI controller
+ * Create device link for GPUs with integrated Type-C UCSI controller
  * to VGA. Currently there is no class code defined for UCSI device over PCI
  * so using UNKNOWN class for now and it will be updated when UCSI
  * over PCI gets a class code.
@@ -5459,6 +5461,9 @@ static void quirk_gpu_usb_typec_ucsi(struct pci_dev *ucsi)
 DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID,
                              PCI_CLASS_SERIAL_UNKNOWN, 8,
                              quirk_gpu_usb_typec_ucsi);
+DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_VENDOR_ID_ATI, PCI_ANY_ID,
+                             PCI_CLASS_SERIAL_UNKNOWN, 8,
+                             quirk_gpu_usb_typec_ucsi);
 
 /*
  * Enable the NVIDIA GPU integrated HDA controller if the BIOS left it
index 25557b272a4f9a630d174a664c015b85c74c137f..4be24890132ede060e4c12f3601864db4af867cc 100644 (file)
@@ -99,6 +99,24 @@ error:
        return off ?: PCI_VPD_SZ_INVALID;
 }
 
+static bool pci_vpd_available(struct pci_dev *dev)
+{
+       struct pci_vpd *vpd = &dev->vpd;
+
+       if (!vpd->cap)
+               return false;
+
+       if (vpd->len == 0) {
+               vpd->len = pci_vpd_size(dev);
+               if (vpd->len == PCI_VPD_SZ_INVALID) {
+                       vpd->cap = 0;
+                       return false;
+               }
+       }
+
+       return true;
+}
+
 /*
  * Wait for last operation to complete.
  * This code has to spin since there is no other notification from the PCI
@@ -145,7 +163,7 @@ static ssize_t pci_vpd_read(struct pci_dev *dev, loff_t pos, size_t count,
        loff_t end = pos + count;
        u8 *buf = arg;
 
-       if (!vpd->cap)
+       if (!pci_vpd_available(dev))
                return -ENODEV;
 
        if (pos < 0)
@@ -206,7 +224,7 @@ static ssize_t pci_vpd_write(struct pci_dev *dev, loff_t pos, size_t count,
        loff_t end = pos + count;
        int ret = 0;
 
-       if (!vpd->cap)
+       if (!pci_vpd_available(dev))
                return -ENODEV;
 
        if (pos < 0 || (pos & 3) || (count & 3))
@@ -242,14 +260,11 @@ static ssize_t pci_vpd_write(struct pci_dev *dev, loff_t pos, size_t count,
 
 void pci_vpd_init(struct pci_dev *dev)
 {
+       if (dev->vpd.len == PCI_VPD_SZ_INVALID)
+               return;
+
        dev->vpd.cap = pci_find_capability(dev, PCI_CAP_ID_VPD);
        mutex_init(&dev->vpd.lock);
-
-       if (!dev->vpd.len)
-               dev->vpd.len = pci_vpd_size(dev);
-
-       if (dev->vpd.len == PCI_VPD_SZ_INVALID)
-               dev->vpd.cap = 0;
 }
 
 static ssize_t vpd_read(struct file *filp, struct kobject *kobj,
@@ -294,13 +309,14 @@ const struct attribute_group pci_dev_vpd_attr_group = {
 
 void *pci_vpd_alloc(struct pci_dev *dev, unsigned int *size)
 {
-       unsigned int len = dev->vpd.len;
+       unsigned int len;
        void *buf;
        int cnt;
 
-       if (!dev->vpd.cap)
+       if (!pci_vpd_available(dev))
                return ERR_PTR(-ENODEV);
 
+       len = dev->vpd.len;
        buf = kmalloc(len, GFP_KERNEL);
        if (!buf)
                return ERR_PTR(-ENOMEM);
index eb15067a605eff35cb11856439683963512a1cfa..4eb53412b8085e12d668c2aef3b215b939094143 100644 (file)
@@ -1047,7 +1047,9 @@ static void cmos_check_wkalrm(struct device *dev)
         * ACK the rtc irq here
         */
        if (t_now >= cmos->alarm_expires && cmos_use_acpi_alarm()) {
+               local_irq_disable();
                cmos_interrupt(0, (void *)cmos->rtc);
+               local_irq_enable();
                return;
        }
 
index 2f3515fa242a3dc18284bd2b321c7b76ae679ad6..f3d5c7f4c13d29aa48be077cc9e03456316cdb1a 100644 (file)
@@ -45,13 +45,14 @@ static void __init sclp_early_facilities_detect(void)
        sclp.has_gisaf = !!(sccb->fac118 & 0x08);
        sclp.has_hvs = !!(sccb->fac119 & 0x80);
        sclp.has_kss = !!(sccb->fac98 & 0x01);
-       sclp.has_sipl = !!(sccb->cbl & 0x4000);
        if (sccb->fac85 & 0x02)
                S390_lowcore.machine_flags |= MACHINE_FLAG_ESOP;
        if (sccb->fac91 & 0x40)
                S390_lowcore.machine_flags |= MACHINE_FLAG_TLB_GUEST;
        if (sccb->cpuoff > 134)
                sclp.has_diag318 = !!(sccb->byte_134 & 0x80);
+       if (sccb->cpuoff > 137)
+               sclp.has_sipl = !!(sccb->cbl & 0x4000);
        sclp.rnmax = sccb->rnmax ? sccb->rnmax : sccb->rnmax2;
        sclp.rzm = sccb->rnsize ? sccb->rnsize : sccb->rnsize2;
        sclp.rzm <<= 20;
index f433428057d9ce1e8461a6aba742a9d66e50ab83..d9b804943d1923a60d187f3bb733c6405d7652b4 100644 (file)
@@ -213,7 +213,6 @@ static inline int ap_fetch_qci_info(struct ap_config_info *info)
  * ap_init_qci_info(): Allocate and query qci config info.
  * Does also update the static variables ap_max_domain_id
  * and ap_max_adapter_id if this info is available.
-
  */
 static void __init ap_init_qci_info(void)
 {
@@ -439,6 +438,7 @@ static enum hrtimer_restart ap_poll_timeout(struct hrtimer *unused)
 /**
  * ap_interrupt_handler() - Schedule ap_tasklet on interrupt
  * @airq: pointer to adapter interrupt descriptor
+ * @floating: ignored
  */
 static void ap_interrupt_handler(struct airq_struct *airq, bool floating)
 {
@@ -1786,6 +1786,7 @@ static inline void ap_scan_adapter(int ap)
 /**
  * ap_scan_bus(): Scan the AP bus for new devices
  * Runs periodically, workqueue timer (ap_config_time)
+ * @unused: Unused pointer.
  */
 static void ap_scan_bus(struct work_struct *unused)
 {
index d70c4d3d0907f385a1042fc5158ad74cbc1afea6..9ea48bf0ee40d18b33df24c451333b561c738434 100644 (file)
@@ -20,7 +20,7 @@ static void __ap_flush_queue(struct ap_queue *aq);
 
 /**
  * ap_queue_enable_irq(): Enable interrupt support on this AP queue.
- * @qid: The AP queue number
+ * @aq: The AP queue
  * @ind: the notification indicator byte
  *
  * Enables interruption on AP queue via ap_aqic(). Based on the return
@@ -311,7 +311,7 @@ static enum ap_sm_wait ap_sm_read_write(struct ap_queue *aq)
 
 /**
  * ap_sm_reset(): Reset an AP queue.
- * @qid: The AP queue number
+ * @aq: The AP queue
  *
  * Submit the Reset command to an AP queue.
  */
index ebd27f88303315db415e077e75a14a34170337b6..56c6feb2ba3eb5c9f8311767db4417fb5091c048 100644 (file)
@@ -1185,7 +1185,7 @@ static int tegra_slink_resume(struct device *dev)
 }
 #endif
 
-static int tegra_slink_runtime_suspend(struct device *dev)
+static int __maybe_unused tegra_slink_runtime_suspend(struct device *dev)
 {
        struct spi_master *master = dev_get_drvdata(dev);
        struct tegra_slink_data *tspi = spi_master_get_devdata(master);
@@ -1197,7 +1197,7 @@ static int tegra_slink_runtime_suspend(struct device *dev)
        return 0;
 }
 
-static int tegra_slink_runtime_resume(struct device *dev)
+static int __maybe_unused tegra_slink_runtime_resume(struct device *dev)
 {
        struct spi_master *master = dev_get_drvdata(dev);
        struct tegra_slink_data *tspi = spi_master_get_devdata(master);
index 3a249ee7e14401d0f7459f8c401e2f9aebdce201..28ef323882fb29c66c04b6128021041e70b1f49a 100644 (file)
@@ -467,7 +467,7 @@ static void vhost_tx_batch(struct vhost_net *net,
                .num = nvq->batched_xdp,
                .ptr = nvq->xdp,
        };
-       int err;
+       int i, err;
 
        if (nvq->batched_xdp == 0)
                goto signal_used;
@@ -476,6 +476,15 @@ static void vhost_tx_batch(struct vhost_net *net,
        err = sock->ops->sendmsg(sock, msghdr, 0);
        if (unlikely(err < 0)) {
                vq_err(&nvq->vq, "Fail to batch sending packets\n");
+
+               /* free pages owned by XDP; since this is an unlikely error path,
+                * keep it simple and avoid more complex bulk update for the
+                * used pages
+                */
+               for (i = 0; i < nvq->batched_xdp; ++i)
+                       put_page(virt_to_head_page(nvq->xdp[i].data));
+               nvq->batched_xdp = 0;
+               nvq->done_idx = 0;
                return;
        }
 
index d33c5cd684c0b8d0780426b7310286cc94d5ffaf..b26b79dfcac92b20305140c4434eda091cbc74f8 100644 (file)
@@ -582,7 +582,9 @@ config FB_HP300
 
 config FB_TGA
        tristate "TGA/SFB+ framebuffer support"
-       depends on FB && (ALPHA || TC)
+       depends on FB
+       depends on PCI || TC
+       depends on ALPHA || TC
        select FB_CFB_FILLRECT
        select FB_CFB_COPYAREA
        select FB_CFB_IMAGEBLIT
index 5f1ce59b44b95c5bdd4aad5a04dcd61559585eca..a37eb52fb4010750112d38e5e200c291b288fed6 100644 (file)
@@ -214,7 +214,7 @@ config XEN_PVCALLS_FRONTEND
          implements them.
 
 config XEN_PVCALLS_BACKEND
-       bool "XEN PV Calls backend driver"
+       tristate "XEN PV Calls backend driver"
        depends on INET && XEN && XEN_BACKEND
        help
          Experimental backend for the Xen PV Calls protocol
index 671c71245a7b2f6c2e985c81b8c5c9b5bd1e9bcf..2d2803883306d51cac15b97f093533ed66e57223 100644 (file)
@@ -43,6 +43,8 @@
 #include <linux/sched.h>
 #include <linux/cred.h>
 #include <linux/errno.h>
+#include <linux/freezer.h>
+#include <linux/kthread.h>
 #include <linux/mm.h>
 #include <linux/memblock.h>
 #include <linux/pagemap.h>
@@ -115,7 +117,7 @@ static struct ctl_table xen_root[] = {
 #define EXTENT_ORDER (fls(XEN_PFN_PER_PAGE) - 1)
 
 /*
- * balloon_process() state:
+ * balloon_thread() state:
  *
  * BP_DONE: done or nothing to do,
  * BP_WAIT: wait to be rescheduled,
@@ -130,6 +132,8 @@ enum bp_state {
        BP_ECANCELED
 };
 
+/* Main waiting point for xen-balloon thread. */
+static DECLARE_WAIT_QUEUE_HEAD(balloon_thread_wq);
 
 static DEFINE_MUTEX(balloon_mutex);
 
@@ -144,10 +148,6 @@ static xen_pfn_t frame_list[PAGE_SIZE / sizeof(xen_pfn_t)];
 static LIST_HEAD(ballooned_pages);
 static DECLARE_WAIT_QUEUE_HEAD(balloon_wq);
 
-/* Main work function, always executed in process context. */
-static void balloon_process(struct work_struct *work);
-static DECLARE_DELAYED_WORK(balloon_worker, balloon_process);
-
 /* When ballooning out (allocating memory to return to Xen) we don't really
    want the kernel to try too hard since that can trigger the oom killer. */
 #define GFP_BALLOON \
@@ -366,7 +366,7 @@ static void xen_online_page(struct page *page, unsigned int order)
 static int xen_memory_notifier(struct notifier_block *nb, unsigned long val, void *v)
 {
        if (val == MEM_ONLINE)
-               schedule_delayed_work(&balloon_worker, 0);
+               wake_up(&balloon_thread_wq);
 
        return NOTIFY_OK;
 }
@@ -491,18 +491,43 @@ static enum bp_state decrease_reservation(unsigned long nr_pages, gfp_t gfp)
 }
 
 /*
- * As this is a work item it is guaranteed to run as a single instance only.
+ * Stop waiting if either state is not BP_EAGAIN and ballooning action is
+ * needed, or if the credit has changed while state is BP_EAGAIN.
+ */
+static bool balloon_thread_cond(enum bp_state state, long credit)
+{
+       if (state != BP_EAGAIN)
+               credit = 0;
+
+       return current_credit() != credit || kthread_should_stop();
+}
+
+/*
+ * As this is a kthread it is guaranteed to run as a single instance only.
  * We may of course race updates of the target counts (which are protected
  * by the balloon lock), or with changes to the Xen hard limit, but we will
  * recover from these in time.
  */
-static void balloon_process(struct work_struct *work)
+static int balloon_thread(void *unused)
 {
        enum bp_state state = BP_DONE;
        long credit;
+       unsigned long timeout;
+
+       set_freezable();
+       for (;;) {
+               if (state == BP_EAGAIN)
+                       timeout = balloon_stats.schedule_delay * HZ;
+               else
+                       timeout = 3600 * HZ;
+               credit = current_credit();
 
+               wait_event_interruptible_timeout(balloon_thread_wq,
+                                balloon_thread_cond(state, credit), timeout);
+
+               if (kthread_should_stop())
+                       return 0;
 
-       do {
                mutex_lock(&balloon_mutex);
 
                credit = current_credit();
@@ -529,12 +554,7 @@ static void balloon_process(struct work_struct *work)
                mutex_unlock(&balloon_mutex);
 
                cond_resched();
-
-       } while (credit && state == BP_DONE);
-
-       /* Schedule more work if there is some still to be done. */
-       if (state == BP_EAGAIN)
-               schedule_delayed_work(&balloon_worker, balloon_stats.schedule_delay * HZ);
+       }
 }
 
 /* Resets the Xen limit, sets new target, and kicks off processing. */
@@ -542,7 +562,7 @@ void balloon_set_new_target(unsigned long target)
 {
        /* No need for lock. Not read-modify-write updates. */
        balloon_stats.target_pages = target;
-       schedule_delayed_work(&balloon_worker, 0);
+       wake_up(&balloon_thread_wq);
 }
 EXPORT_SYMBOL_GPL(balloon_set_new_target);
 
@@ -647,7 +667,7 @@ void free_xenballooned_pages(int nr_pages, struct page **pages)
 
        /* The balloon may be too large now. Shrink it if needed. */
        if (current_credit())
-               schedule_delayed_work(&balloon_worker, 0);
+               wake_up(&balloon_thread_wq);
 
        mutex_unlock(&balloon_mutex);
 }
@@ -679,6 +699,8 @@ static void __init balloon_add_region(unsigned long start_pfn,
 
 static int __init balloon_init(void)
 {
+       struct task_struct *task;
+
        if (!xen_domain())
                return -ENODEV;
 
@@ -722,6 +744,12 @@ static int __init balloon_init(void)
        }
 #endif
 
+       task = kthread_run(balloon_thread, NULL, "xen-balloon");
+       if (IS_ERR(task)) {
+               pr_err("xen-balloon thread could not be started, ballooning will not work!\n");
+               return PTR_ERR(task);
+       }
+
        /* Init the xen-balloon driver. */
        xen_balloon_init();
 
index 643fe440c46e5ebb4ba0042da6506d3910475699..8c10edf9efe6af580e11ceded0c5bfea954b68b6 100644 (file)
@@ -106,27 +106,26 @@ static int is_xen_swiotlb_buffer(struct device *dev, dma_addr_t dma_addr)
 
 static int xen_swiotlb_fixup(void *buf, unsigned long nslabs)
 {
-       int i, rc;
-       int dma_bits;
+       int rc;
+       unsigned int order = get_order(IO_TLB_SEGSIZE << IO_TLB_SHIFT);
+       unsigned int i, dma_bits = order + PAGE_SHIFT;
        dma_addr_t dma_handle;
        phys_addr_t p = virt_to_phys(buf);
 
-       dma_bits = get_order(IO_TLB_SEGSIZE << IO_TLB_SHIFT) + PAGE_SHIFT;
+       BUILD_BUG_ON(IO_TLB_SEGSIZE & (IO_TLB_SEGSIZE - 1));
+       BUG_ON(nslabs % IO_TLB_SEGSIZE);
 
        i = 0;
        do {
-               int slabs = min(nslabs - i, (unsigned long)IO_TLB_SEGSIZE);
-
                do {
                        rc = xen_create_contiguous_region(
-                               p + (i << IO_TLB_SHIFT),
-                               get_order(slabs << IO_TLB_SHIFT),
+                               p + (i << IO_TLB_SHIFT), order,
                                dma_bits, &dma_handle);
                } while (rc && dma_bits++ < MAX_DMA_BITS);
                if (rc)
                        return rc;
 
-               i += slabs;
+               i += IO_TLB_SEGSIZE;
        } while (i < nslabs);
        return 0;
 }
@@ -153,9 +152,7 @@ static const char *xen_swiotlb_error(enum xen_swiotlb_err err)
        return "";
 }
 
-#define DEFAULT_NSLABS         ALIGN(SZ_64M >> IO_TLB_SHIFT, IO_TLB_SEGSIZE)
-
-int __ref xen_swiotlb_init(void)
+int xen_swiotlb_init(void)
 {
        enum xen_swiotlb_err m_ret = XEN_SWIOTLB_UNKNOWN;
        unsigned long bytes = swiotlb_size_or_default();
@@ -185,7 +182,7 @@ retry:
                order--;
        }
        if (!start)
-               goto error;
+               goto exit;
        if (order != get_order(bytes)) {
                pr_warn("Warning: only able to allocate %ld MB for software IO TLB\n",
                        (PAGE_SIZE << order) >> 20);
@@ -208,15 +205,15 @@ retry:
        swiotlb_set_max_segment(PAGE_SIZE);
        return 0;
 error:
-       if (repeat--) {
+       if (nslabs > 1024 && repeat--) {
                /* Min is 2MB */
-               nslabs = max(1024UL, (nslabs >> 1));
-               pr_info("Lowering to %luMB\n",
-                       (nslabs << IO_TLB_SHIFT) >> 20);
+               nslabs = max(1024UL, ALIGN(nslabs >> 1, IO_TLB_SEGSIZE));
+               bytes = nslabs << IO_TLB_SHIFT;
+               pr_info("Lowering to %luMB\n", bytes >> 20);
                goto retry;
        }
+exit:
        pr_err("%s (rc:%d)\n", xen_swiotlb_error(m_ret), rc);
-       free_pages((unsigned long)start, order);
        return rc;
 }
 
@@ -244,9 +241,9 @@ retry:
        rc = xen_swiotlb_fixup(start, nslabs);
        if (rc) {
                memblock_free(__pa(start), PAGE_ALIGN(bytes));
-               if (repeat--) {
+               if (nslabs > 1024 && repeat--) {
                        /* Min is 2MB */
-                       nslabs = max(1024UL, (nslabs >> 1));
+                       nslabs = max(1024UL, ALIGN(nslabs >> 1, IO_TLB_SEGSIZE));
                        bytes = nslabs << IO_TLB_SHIFT;
                        pr_info("Lowering to %luMB\n", bytes >> 20);
                        goto retry;
@@ -254,7 +251,7 @@ retry:
                panic("%s (rc:%d)", xen_swiotlb_error(XEN_SWIOTLB_EFIXUP), rc);
        }
 
-       if (swiotlb_init_with_tbl(start, nslabs, false))
+       if (swiotlb_init_with_tbl(start, nslabs, true))
                panic("Cannot allocate SWIOTLB buffer");
        swiotlb_set_max_segment(PAGE_SIZE);
 }
index 6c55362c1f99a52e010f9c62a65d18670dc24184..c2e0e8e80949a7089268e2854a6b48c5802825f3 100644 (file)
@@ -14,6 +14,7 @@
 #include <linux/rculist_nulls.h>
 #include <linux/cpu.h>
 #include <linux/tracehook.h>
+#include <uapi/linux/io_uring.h>
 
 #include "io-wq.h"
 
@@ -176,7 +177,6 @@ static void io_worker_ref_put(struct io_wq *wq)
 static void io_worker_exit(struct io_worker *worker)
 {
        struct io_wqe *wqe = worker->wqe;
-       struct io_wqe_acct *acct = io_wqe_get_acct(worker);
 
        if (refcount_dec_and_test(&worker->ref))
                complete(&worker->ref_done);
@@ -186,7 +186,6 @@ static void io_worker_exit(struct io_worker *worker)
        if (worker->flags & IO_WORKER_F_FREE)
                hlist_nulls_del_rcu(&worker->nulls_node);
        list_del_rcu(&worker->all_list);
-       acct->nr_workers--;
        preempt_disable();
        io_wqe_dec_running(worker);
        worker->flags = 0;
@@ -246,8 +245,6 @@ static bool io_wqe_activate_free_worker(struct io_wqe *wqe,
  */
 static bool io_wqe_create_worker(struct io_wqe *wqe, struct io_wqe_acct *acct)
 {
-       bool do_create = false;
-
        /*
         * Most likely an attempt to queue unbounded work on an io_wq that
         * wasn't setup with any unbounded workers.
@@ -256,18 +253,15 @@ static bool io_wqe_create_worker(struct io_wqe *wqe, struct io_wqe_acct *acct)
                pr_warn_once("io-wq is not configured for unbound workers");
 
        raw_spin_lock(&wqe->lock);
-       if (acct->nr_workers < acct->max_workers) {
-               acct->nr_workers++;
-               do_create = true;
+       if (acct->nr_workers == acct->max_workers) {
+               raw_spin_unlock(&wqe->lock);
+               return true;
        }
+       acct->nr_workers++;
        raw_spin_unlock(&wqe->lock);
-       if (do_create) {
-               atomic_inc(&acct->nr_running);
-               atomic_inc(&wqe->wq->worker_refs);
-               return create_io_worker(wqe->wq, wqe, acct->index);
-       }
-
-       return true;
+       atomic_inc(&acct->nr_running);
+       atomic_inc(&wqe->wq->worker_refs);
+       return create_io_worker(wqe->wq, wqe, acct->index);
 }
 
 static void io_wqe_inc_running(struct io_worker *worker)
@@ -574,6 +568,7 @@ loop:
                }
                /* timed out, exit unless we're the last worker */
                if (last_timeout && acct->nr_workers > 1) {
+                       acct->nr_workers--;
                        raw_spin_unlock(&wqe->lock);
                        __set_current_state(TASK_RUNNING);
                        break;
@@ -1287,6 +1282,10 @@ int io_wq_max_workers(struct io_wq *wq, int *new_count)
 {
        int i, node, prev = 0;
 
+       BUILD_BUG_ON((int) IO_WQ_ACCT_BOUND   != (int) IO_WQ_BOUND);
+       BUILD_BUG_ON((int) IO_WQ_ACCT_UNBOUND != (int) IO_WQ_UNBOUND);
+       BUILD_BUG_ON((int) IO_WQ_ACCT_NR      != 2);
+
        for (i = 0; i < 2; i++) {
                if (new_count[i] > task_rlimit(current, RLIMIT_NPROC))
                        new_count[i] = task_rlimit(current, RLIMIT_NPROC);
index 16fb7436043c27653d9e1b8533c767a6a4683c74..e372d5b9f6dc04ce31b39d4499602d54815867f6 100644 (file)
@@ -712,6 +712,7 @@ struct io_async_rw {
        struct iovec                    fast_iov[UIO_FASTIOV];
        const struct iovec              *free_iovec;
        struct iov_iter                 iter;
+       struct iov_iter_state           iter_state;
        size_t                          bytes_done;
        struct wait_page_queue          wpq;
 };
@@ -735,7 +736,6 @@ enum {
        REQ_F_BUFFER_SELECTED_BIT,
        REQ_F_COMPLETE_INLINE_BIT,
        REQ_F_REISSUE_BIT,
-       REQ_F_DONT_REISSUE_BIT,
        REQ_F_CREDS_BIT,
        REQ_F_REFCOUNT_BIT,
        REQ_F_ARM_LTIMEOUT_BIT,
@@ -782,8 +782,6 @@ enum {
        REQ_F_COMPLETE_INLINE   = BIT(REQ_F_COMPLETE_INLINE_BIT),
        /* caller should reissue async */
        REQ_F_REISSUE           = BIT(REQ_F_REISSUE_BIT),
-       /* don't attempt request reissue, see io_rw_reissue() */
-       REQ_F_DONT_REISSUE      = BIT(REQ_F_DONT_REISSUE_BIT),
        /* supports async reads */
        REQ_F_NOWAIT_READ       = BIT(REQ_F_NOWAIT_READ_BIT),
        /* supports async writes */
@@ -2444,13 +2442,6 @@ static void io_iopoll_complete(struct io_ring_ctx *ctx, unsigned int *nr_events,
                req = list_first_entry(done, struct io_kiocb, inflight_entry);
                list_del(&req->inflight_entry);
 
-               if (READ_ONCE(req->result) == -EAGAIN &&
-                   !(req->flags & REQ_F_DONT_REISSUE)) {
-                       req->iopoll_completed = 0;
-                       io_req_task_queue_reissue(req);
-                       continue;
-               }
-
                __io_cqring_fill_event(ctx, req->user_data, req->result,
                                        io_put_rw_kbuf(req));
                (*nr_events)++;
@@ -2613,8 +2604,7 @@ static bool io_resubmit_prep(struct io_kiocb *req)
 
        if (!rw)
                return !io_req_prep_async(req);
-       /* may have left rw->iter inconsistent on -EIOCBQUEUED */
-       iov_iter_revert(&rw->iter, req->result - iov_iter_count(&rw->iter));
+       iov_iter_restore(&rw->iter, &rw->iter_state);
        return true;
 }
 
@@ -2714,10 +2704,9 @@ static void io_complete_rw_iopoll(struct kiocb *kiocb, long res, long res2)
        if (kiocb->ki_flags & IOCB_WRITE)
                kiocb_end_write(req);
        if (unlikely(res != req->result)) {
-               if (!(res == -EAGAIN && io_rw_should_reissue(req) &&
-                   io_resubmit_prep(req))) {
-                       req_set_fail(req);
-                       req->flags |= REQ_F_DONT_REISSUE;
+               if (res == -EAGAIN && io_rw_should_reissue(req)) {
+                       req->flags |= REQ_F_REISSUE;
+                       return;
                }
        }
 
@@ -2843,7 +2832,8 @@ static bool io_file_supports_nowait(struct io_kiocb *req, int rw)
        return __io_file_supports_nowait(req->file, rw);
 }
 
-static int io_prep_rw(struct io_kiocb *req, const struct io_uring_sqe *sqe)
+static int io_prep_rw(struct io_kiocb *req, const struct io_uring_sqe *sqe,
+                     int rw)
 {
        struct io_ring_ctx *ctx = req->ctx;
        struct kiocb *kiocb = &req->rw.kiocb;
@@ -2865,8 +2855,13 @@ static int io_prep_rw(struct io_kiocb *req, const struct io_uring_sqe *sqe)
        if (unlikely(ret))
                return ret;
 
-       /* don't allow async punt for O_NONBLOCK or RWF_NOWAIT */
-       if ((kiocb->ki_flags & IOCB_NOWAIT) || (file->f_flags & O_NONBLOCK))
+       /*
+        * If the file is marked O_NONBLOCK, still allow retry for it if it
+        * supports async. Otherwise it's impossible to use O_NONBLOCK files
+        * reliably. If not, or it IOCB_NOWAIT is set, don't retry.
+        */
+       if ((kiocb->ki_flags & IOCB_NOWAIT) ||
+           ((file->f_flags & O_NONBLOCK) && !io_file_supports_nowait(req, rw)))
                req->flags |= REQ_F_NOWAIT;
 
        ioprio = READ_ONCE(sqe->ioprio);
@@ -2931,7 +2926,6 @@ static void kiocb_done(struct kiocb *kiocb, ssize_t ret,
 {
        struct io_kiocb *req = container_of(kiocb, struct io_kiocb, rw.kiocb);
        struct io_async_rw *io = req->async_data;
-       bool check_reissue = kiocb->ki_complete == io_complete_rw;
 
        /* add previously done IO, if any */
        if (io && io->bytes_done > 0) {
@@ -2943,19 +2937,27 @@ static void kiocb_done(struct kiocb *kiocb, ssize_t ret,
 
        if (req->flags & REQ_F_CUR_POS)
                req->file->f_pos = kiocb->ki_pos;
-       if (ret >= 0 && check_reissue)
+       if (ret >= 0 && (kiocb->ki_complete == io_complete_rw))
                __io_complete_rw(req, ret, 0, issue_flags);
        else
                io_rw_done(kiocb, ret);
 
-       if (check_reissue && (req->flags & REQ_F_REISSUE)) {
+       if (req->flags & REQ_F_REISSUE) {
                req->flags &= ~REQ_F_REISSUE;
                if (io_resubmit_prep(req)) {
                        io_req_task_queue_reissue(req);
                } else {
+                       unsigned int cflags = io_put_rw_kbuf(req);
+                       struct io_ring_ctx *ctx = req->ctx;
+
                        req_set_fail(req);
-                       __io_req_complete(req, issue_flags, ret,
-                                         io_put_rw_kbuf(req));
+                       if (issue_flags & IO_URING_F_NONBLOCK) {
+                               mutex_lock(&ctx->uring_lock);
+                               __io_req_complete(req, issue_flags, ret, cflags);
+                               mutex_unlock(&ctx->uring_lock);
+                       } else {
+                               __io_req_complete(req, issue_flags, ret, cflags);
+                       }
                }
        }
 }
@@ -3263,12 +3265,15 @@ static ssize_t loop_rw_iter(int rw, struct io_kiocb *req, struct iov_iter *iter)
                                ret = nr;
                        break;
                }
+               if (!iov_iter_is_bvec(iter)) {
+                       iov_iter_advance(iter, nr);
+               } else {
+                       req->rw.len -= nr;
+                       req->rw.addr += nr;
+               }
                ret += nr;
                if (nr != iovec.iov_len)
                        break;
-               req->rw.len -= nr;
-               req->rw.addr += nr;
-               iov_iter_advance(iter, nr);
        }
 
        return ret;
@@ -3315,12 +3320,17 @@ static int io_setup_async_rw(struct io_kiocb *req, const struct iovec *iovec,
        if (!force && !io_op_defs[req->opcode].needs_async_setup)
                return 0;
        if (!req->async_data) {
+               struct io_async_rw *iorw;
+
                if (io_alloc_async_data(req)) {
                        kfree(iovec);
                        return -ENOMEM;
                }
 
                io_req_map_rw(req, iovec, fast_iov, iter);
+               iorw = req->async_data;
+               /* we've copied and mapped the iter, ensure state is saved */
+               iov_iter_save_state(&iorw->iter, &iorw->iter_state);
        }
        return 0;
 }
@@ -3339,6 +3349,7 @@ static inline int io_rw_prep_async(struct io_kiocb *req, int rw)
        iorw->free_iovec = iov;
        if (iov)
                req->flags |= REQ_F_NEED_CLEANUP;
+       iov_iter_save_state(&iorw->iter, &iorw->iter_state);
        return 0;
 }
 
@@ -3346,7 +3357,7 @@ static int io_read_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
 {
        if (unlikely(!(req->file->f_mode & FMODE_READ)))
                return -EBADF;
-       return io_prep_rw(req, sqe);
+       return io_prep_rw(req, sqe, READ);
 }
 
 /*
@@ -3442,19 +3453,28 @@ static int io_read(struct io_kiocb *req, unsigned int issue_flags)
        struct kiocb *kiocb = &req->rw.kiocb;
        struct iov_iter __iter, *iter = &__iter;
        struct io_async_rw *rw = req->async_data;
-       ssize_t io_size, ret, ret2;
        bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
+       struct iov_iter_state __state, *state;
+       ssize_t ret, ret2;
 
        if (rw) {
                iter = &rw->iter;
+               state = &rw->iter_state;
+               /*
+                * We come here from an earlier attempt, restore our state to
+                * match in case it doesn't. It's cheap enough that we don't
+                * need to make this conditional.
+                */
+               iov_iter_restore(iter, state);
                iovec = NULL;
        } else {
                ret = io_import_iovec(READ, req, &iovec, iter, !force_nonblock);
                if (ret < 0)
                        return ret;
+               state = &__state;
+               iov_iter_save_state(iter, state);
        }
-       io_size = iov_iter_count(iter);
-       req->result = io_size;
+       req->result = iov_iter_count(iter);
 
        /* Ensure we clear previously set non-block flag */
        if (!force_nonblock)
@@ -3468,7 +3488,7 @@ static int io_read(struct io_kiocb *req, unsigned int issue_flags)
                return ret ?: -EAGAIN;
        }
 
-       ret = rw_verify_area(READ, req->file, io_kiocb_ppos(kiocb), io_size);
+       ret = rw_verify_area(READ, req->file, io_kiocb_ppos(kiocb), req->result);
        if (unlikely(ret)) {
                kfree(iovec);
                return ret;
@@ -3484,30 +3504,49 @@ static int io_read(struct io_kiocb *req, unsigned int issue_flags)
                /* no retry on NONBLOCK nor RWF_NOWAIT */
                if (req->flags & REQ_F_NOWAIT)
                        goto done;
-               /* some cases will consume bytes even on error returns */
-               iov_iter_reexpand(iter, iter->count + iter->truncated);
-               iov_iter_revert(iter, io_size - iov_iter_count(iter));
                ret = 0;
        } else if (ret == -EIOCBQUEUED) {
                goto out_free;
-       } else if (ret <= 0 || ret == io_size || !force_nonblock ||
+       } else if (ret <= 0 || ret == req->result || !force_nonblock ||
                   (req->flags & REQ_F_NOWAIT) || !need_read_all(req)) {
                /* read all, failed, already did sync or don't want to retry */
                goto done;
        }
 
+       /*
+        * Don't depend on the iter state matching what was consumed, or being
+        * untouched in case of error. Restore it and we'll advance it
+        * manually if we need to.
+        */
+       iov_iter_restore(iter, state);
+
        ret2 = io_setup_async_rw(req, iovec, inline_vecs, iter, true);
        if (ret2)
                return ret2;
 
        iovec = NULL;
        rw = req->async_data;
-       /* now use our persistent iterator, if we aren't already */
-       iter = &rw->iter;
+       /*
+        * Now use our persistent iterator and state, if we aren't already.
+        * We've restored and mapped the iter to match.
+        */
+       if (iter != &rw->iter) {
+               iter = &rw->iter;
+               state = &rw->iter_state;
+       }
 
        do {
-               io_size -= ret;
+               /*
+                * We end up here because of a partial read, either from
+                * above or inside this loop. Advance the iter by the bytes
+                * that were consumed.
+                */
+               iov_iter_advance(iter, ret);
+               if (!iov_iter_count(iter))
+                       break;
                rw->bytes_done += ret;
+               iov_iter_save_state(iter, state);
+
                /* if we can retry, do so with the callbacks armed */
                if (!io_rw_should_retry(req)) {
                        kiocb->ki_flags &= ~IOCB_WAITQ;
@@ -3525,7 +3564,8 @@ static int io_read(struct io_kiocb *req, unsigned int issue_flags)
                        return 0;
                /* we got some bytes, but not all. retry. */
                kiocb->ki_flags &= ~IOCB_WAITQ;
-       } while (ret > 0 && ret < io_size);
+               iov_iter_restore(iter, state);
+       } while (ret > 0);
 done:
        kiocb_done(kiocb, ret, issue_flags);
 out_free:
@@ -3539,7 +3579,7 @@ static int io_write_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
 {
        if (unlikely(!(req->file->f_mode & FMODE_WRITE)))
                return -EBADF;
-       return io_prep_rw(req, sqe);
+       return io_prep_rw(req, sqe, WRITE);
 }
 
 static int io_write(struct io_kiocb *req, unsigned int issue_flags)
@@ -3548,19 +3588,24 @@ static int io_write(struct io_kiocb *req, unsigned int issue_flags)
        struct kiocb *kiocb = &req->rw.kiocb;
        struct iov_iter __iter, *iter = &__iter;
        struct io_async_rw *rw = req->async_data;
-       ssize_t ret, ret2, io_size;
        bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
+       struct iov_iter_state __state, *state;
+       ssize_t ret, ret2;
 
        if (rw) {
                iter = &rw->iter;
+               state = &rw->iter_state;
+               iov_iter_restore(iter, state);
                iovec = NULL;
        } else {
                ret = io_import_iovec(WRITE, req, &iovec, iter, !force_nonblock);
                if (ret < 0)
                        return ret;
+               state = &__state;
+               iov_iter_save_state(iter, state);
        }
-       io_size = iov_iter_count(iter);
-       req->result = io_size;
+       req->result = iov_iter_count(iter);
+       ret2 = 0;
 
        /* Ensure we clear previously set non-block flag */
        if (!force_nonblock)
@@ -3577,7 +3622,7 @@ static int io_write(struct io_kiocb *req, unsigned int issue_flags)
            (req->flags & REQ_F_ISREG))
                goto copy_iov;
 
-       ret = rw_verify_area(WRITE, req->file, io_kiocb_ppos(kiocb), io_size);
+       ret = rw_verify_area(WRITE, req->file, io_kiocb_ppos(kiocb), req->result);
        if (unlikely(ret))
                goto out_free;
 
@@ -3624,9 +3669,9 @@ done:
                kiocb_done(kiocb, ret2, issue_flags);
        } else {
 copy_iov:
-               /* some cases will consume bytes even on error returns */
-               iov_iter_reexpand(iter, iter->count + iter->truncated);
-               iov_iter_revert(iter, io_size - iov_iter_count(iter));
+               iov_iter_restore(iter, state);
+               if (ret2 > 0)
+                       iov_iter_advance(iter, ret2);
                ret = io_setup_async_rw(req, iovec, inline_vecs, iter, false);
                return ret ?: -EAGAIN;
        }
@@ -7515,6 +7560,14 @@ static int io_cqring_wait(struct io_ring_ctx *ctx, int min_events,
                        break;
        } while (1);
 
+       if (uts) {
+               struct timespec64 ts;
+
+               if (get_timespec64(&ts, uts))
+                       return -EFAULT;
+               timeout = timespec64_to_jiffies(&ts);
+       }
+
        if (sig) {
 #ifdef CONFIG_COMPAT
                if (in_compat_syscall())
@@ -7528,14 +7581,6 @@ static int io_cqring_wait(struct io_ring_ctx *ctx, int min_events,
                        return ret;
        }
 
-       if (uts) {
-               struct timespec64 ts;
-
-               if (get_timespec64(&ts, uts))
-                       return -EFAULT;
-               timeout = timespec64_to_jiffies(&ts);
-       }
-
        init_waitqueue_func_entry(&iowq.wq, io_wake_function);
        iowq.wq.private = current;
        INIT_LIST_HEAD(&iowq.wq.entry);
@@ -8284,11 +8329,27 @@ static int io_sqe_file_register(struct io_ring_ctx *ctx, struct file *file,
 #endif
 }
 
+static int io_queue_rsrc_removal(struct io_rsrc_data *data, unsigned idx,
+                                struct io_rsrc_node *node, void *rsrc)
+{
+       struct io_rsrc_put *prsrc;
+
+       prsrc = kzalloc(sizeof(*prsrc), GFP_KERNEL);
+       if (!prsrc)
+               return -ENOMEM;
+
+       prsrc->tag = *io_get_tag_slot(data, idx);
+       prsrc->rsrc = rsrc;
+       list_add(&prsrc->list, &node->rsrc_list);
+       return 0;
+}
+
 static int io_install_fixed_file(struct io_kiocb *req, struct file *file,
                                 unsigned int issue_flags, u32 slot_index)
 {
        struct io_ring_ctx *ctx = req->ctx;
        bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
+       bool needs_switch = false;
        struct io_fixed_file *file_slot;
        int ret = -EBADF;
 
@@ -8304,9 +8365,22 @@ static int io_install_fixed_file(struct io_kiocb *req, struct file *file,
 
        slot_index = array_index_nospec(slot_index, ctx->nr_user_files);
        file_slot = io_fixed_file_slot(&ctx->file_table, slot_index);
-       ret = -EBADF;
-       if (file_slot->file_ptr)
-               goto err;
+
+       if (file_slot->file_ptr) {
+               struct file *old_file;
+
+               ret = io_rsrc_node_switch_start(ctx);
+               if (ret)
+                       goto err;
+
+               old_file = (struct file *)(file_slot->file_ptr & FFS_MASK);
+               ret = io_queue_rsrc_removal(ctx->file_data, slot_index,
+                                           ctx->rsrc_node, old_file);
+               if (ret)
+                       goto err;
+               file_slot->file_ptr = 0;
+               needs_switch = true;
+       }
 
        *io_get_tag_slot(ctx->file_data, slot_index) = 0;
        io_fixed_file_set(file_slot, file);
@@ -8318,27 +8392,14 @@ static int io_install_fixed_file(struct io_kiocb *req, struct file *file,
 
        ret = 0;
 err:
+       if (needs_switch)
+               io_rsrc_node_switch(ctx, ctx->file_data);
        io_ring_submit_unlock(ctx, !force_nonblock);
        if (ret)
                fput(file);
        return ret;
 }
 
-static int io_queue_rsrc_removal(struct io_rsrc_data *data, unsigned idx,
-                                struct io_rsrc_node *node, void *rsrc)
-{
-       struct io_rsrc_put *prsrc;
-
-       prsrc = kzalloc(sizeof(*prsrc), GFP_KERNEL);
-       if (!prsrc)
-               return -ENOMEM;
-
-       prsrc->tag = *io_get_tag_slot(data, idx);
-       prsrc->rsrc = rsrc;
-       list_add(&prsrc->list, &node->rsrc_list);
-       return 0;
-}
-
 static int __io_sqe_files_update(struct io_ring_ctx *ctx,
                                 struct io_uring_rsrc_update2 *up,
                                 unsigned nr_args)
@@ -10560,10 +10621,12 @@ static int io_register_iowq_max_workers(struct io_ring_ctx *ctx,
                         * ordering. Fine to drop uring_lock here, we hold
                         * a ref to the ctx.
                         */
+                       refcount_inc(&sqd->refs);
                        mutex_unlock(&ctx->uring_lock);
                        mutex_lock(&sqd->lock);
                        mutex_lock(&ctx->uring_lock);
-                       tctx = sqd->thread->io_uring;
+                       if (sqd->thread)
+                               tctx = sqd->thread->io_uring;
                }
        } else {
                tctx = current->io_uring;
@@ -10577,16 +10640,20 @@ static int io_register_iowq_max_workers(struct io_ring_ctx *ctx,
        if (ret)
                goto err;
 
-       if (sqd)
+       if (sqd) {
                mutex_unlock(&sqd->lock);
+               io_put_sq_data(sqd);
+       }
 
        if (copy_to_user(arg, new_count, sizeof(new_count)))
                return -EFAULT;
 
        return 0;
 err:
-       if (sqd)
+       if (sqd) {
                mutex_unlock(&sqd->lock);
+               io_put_sq_data(sqd);
+       }
        return ret;
 }
 
index a6ee23aadd2837829037a61beb22bfa24e99c50c..2a66844b7ff875e3920b103602d917ebc76aa268 100644 (file)
 #include <linux/buffer_head.h>
 #include "qnx4.h"
 
+/*
+ * A qnx4 directory entry is an inode entry or link info
+ * depending on the status field in the last byte. The
+ * first byte is where the name start either way, and a
+ * zero means it's empty.
+ */
+union qnx4_directory_entry {
+       struct {
+               char de_name;
+               char de_pad[62];
+               char de_status;
+       };
+       struct qnx4_inode_entry inode;
+       struct qnx4_link_info link;
+};
+
 static int qnx4_readdir(struct file *file, struct dir_context *ctx)
 {
        struct inode *inode = file_inode(file);
        unsigned int offset;
        struct buffer_head *bh;
-       struct qnx4_inode_entry *de;
-       struct qnx4_link_info *le;
        unsigned long blknum;
        int ix, ino;
        int size;
@@ -38,27 +52,30 @@ static int qnx4_readdir(struct file *file, struct dir_context *ctx)
                }
                ix = (ctx->pos >> QNX4_DIR_ENTRY_SIZE_BITS) % QNX4_INODES_PER_BLOCK;
                for (; ix < QNX4_INODES_PER_BLOCK; ix++, ctx->pos += QNX4_DIR_ENTRY_SIZE) {
+                       union qnx4_directory_entry *de;
+                       const char *name;
+
                        offset = ix * QNX4_DIR_ENTRY_SIZE;
-                       de = (struct qnx4_inode_entry *) (bh->b_data + offset);
-                       if (!de->di_fname[0])
+                       de = (union qnx4_directory_entry *) (bh->b_data + offset);
+
+                       if (!de->de_name)
                                continue;
-                       if (!(de->di_status & (QNX4_FILE_USED|QNX4_FILE_LINK)))
+                       if (!(de->de_status & (QNX4_FILE_USED|QNX4_FILE_LINK)))
                                continue;
-                       if (!(de->di_status & QNX4_FILE_LINK))
-                               size = QNX4_SHORT_NAME_MAX;
-                       else
-                               size = QNX4_NAME_MAX;
-                       size = strnlen(de->di_fname, size);
-                       QNX4DEBUG((KERN_INFO "qnx4_readdir:%.*s\n", size, de->di_fname));
-                       if (!(de->di_status & QNX4_FILE_LINK))
+                       if (!(de->de_status & QNX4_FILE_LINK)) {
+                               size = sizeof(de->inode.di_fname);
+                               name = de->inode.di_fname;
                                ino = blknum * QNX4_INODES_PER_BLOCK + ix - 1;
-                       else {
-                               le  = (struct qnx4_link_info*)de;
-                               ino = ( le32_to_cpu(le->dl_inode_blk) - 1 ) *
+                       } else {
+                               size = sizeof(de->link.dl_fname);
+                               name = de->link.dl_fname;
+                               ino = ( le32_to_cpu(de->link.dl_inode_blk) - 1 ) *
                                        QNX4_INODES_PER_BLOCK +
-                                       le->dl_inode_ndx;
+                                       de->link.dl_inode_ndx;
                        }
-                       if (!dir_emit(ctx, de->di_fname, size, ino, DT_UNKNOWN)) {
+                       size = strnlen(name, size);
+                       QNX4DEBUG((KERN_INFO "qnx4_readdir:%.*s\n", size, name));
+                       if (!dir_emit(ctx, name, size, ino, DT_UNKNOWN)) {
                                brelse(bh);
                                return 0;
                        }
index e93375c710b932504d629bb68c3c08bc3ec24072..cc7338f9e0d1bc90319076f30b8f0f2457681887 100644 (file)
@@ -1023,16 +1023,7 @@ static inline void __iomem *ioport_map(unsigned long port, unsigned int nr)
        port &= IO_SPACE_LIMIT;
        return (port > MMIO_UPPER_LIMIT) ? NULL : PCI_IOBASE + port;
 }
-#define __pci_ioport_unmap __pci_ioport_unmap
-static inline void __pci_ioport_unmap(void __iomem *p)
-{
-       uintptr_t start = (uintptr_t) PCI_IOBASE;
-       uintptr_t addr = (uintptr_t) p;
-
-       if (addr >= start && addr < start + IO_SPACE_LIMIT)
-               return;
-       iounmap(p);
-}
+#define ARCH_HAS_GENERIC_IOPORT_MAP
 #endif
 
 #ifndef ioport_unmap
@@ -1048,21 +1039,10 @@ extern void ioport_unmap(void __iomem *p);
 #endif /* CONFIG_HAS_IOPORT_MAP */
 
 #ifndef CONFIG_GENERIC_IOMAP
-struct pci_dev;
-extern void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long max);
-
-#ifndef __pci_ioport_unmap
-static inline void __pci_ioport_unmap(void __iomem *p) {}
-#endif
-
 #ifndef pci_iounmap
-#define pci_iounmap pci_iounmap
-static inline void pci_iounmap(struct pci_dev *dev, void __iomem *p)
-{
-       __pci_ioport_unmap(p);
-}
+#define ARCH_WANTS_GENERIC_PCI_IOUNMAP
+#endif
 #endif
-#endif /* CONFIG_GENERIC_IOMAP */
 
 #ifndef xlate_dev_mem_ptr
 #define xlate_dev_mem_ptr xlate_dev_mem_ptr
index 9b3eb6d86200180d1fe6ff40687c807bb6107bc4..08237ae8b840d8baf5ce3028b1ceccaa5ff0199c 100644 (file)
@@ -110,16 +110,6 @@ static inline void __iomem *ioremap_np(phys_addr_t offset, size_t size)
 }
 #endif
 
-#ifdef CONFIG_PCI
-/* Destroy a virtual mapping cookie for a PCI BAR (memory or IO) */
-struct pci_dev;
-extern void pci_iounmap(struct pci_dev *dev, void __iomem *);
-#elif defined(CONFIG_GENERIC_IOMAP)
-struct pci_dev;
-static inline void pci_iounmap(struct pci_dev *dev, void __iomem *addr)
-{ }
-#endif
-
 #include <asm-generic/pci_iomap.h>
 
 #endif
index c1ab6a6e72b535a0cecdc29a1a55dd6e65650f78..d3eae6cdbacbb81688bc8834c01524ff9f79a047 100644 (file)
@@ -197,10 +197,12 @@ static inline int hv_cpu_number_to_vp_number(int cpu_number)
        return hv_vp_index[cpu_number];
 }
 
-static inline int cpumask_to_vpset(struct hv_vpset *vpset,
-                                   const struct cpumask *cpus)
+static inline int __cpumask_to_vpset(struct hv_vpset *vpset,
+                                   const struct cpumask *cpus,
+                                   bool exclude_self)
 {
        int cpu, vcpu, vcpu_bank, vcpu_offset, nr_bank = 1;
+       int this_cpu = smp_processor_id();
 
        /* valid_bank_mask can represent up to 64 banks */
        if (hv_max_vp_index / 64 >= 64)
@@ -218,6 +220,8 @@ static inline int cpumask_to_vpset(struct hv_vpset *vpset,
         * Some banks may end up being empty but this is acceptable.
         */
        for_each_cpu(cpu, cpus) {
+               if (exclude_self && cpu == this_cpu)
+                       continue;
                vcpu = hv_cpu_number_to_vp_number(cpu);
                if (vcpu == VP_INVAL)
                        return -1;
@@ -232,6 +236,19 @@ static inline int cpumask_to_vpset(struct hv_vpset *vpset,
        return nr_bank;
 }
 
+static inline int cpumask_to_vpset(struct hv_vpset *vpset,
+                                   const struct cpumask *cpus)
+{
+       return __cpumask_to_vpset(vpset, cpus, false);
+}
+
+static inline int cpumask_to_vpset_noself(struct hv_vpset *vpset,
+                                   const struct cpumask *cpus)
+{
+       WARN_ON_ONCE(preemptible());
+       return __cpumask_to_vpset(vpset, cpus, true);
+}
+
 void hyperv_report_panic(struct pt_regs *regs, long err, bool in_die);
 bool hv_is_hyperv_initialized(void);
 bool hv_is_hibernation_supported(void);
index df636c6d8e6c3f8344af1b0a198b4d682500937c..5a2f9bf53384b600ba41a40085a1dafbec6e6f1e 100644 (file)
@@ -18,6 +18,7 @@ extern void __iomem *pci_iomap_range(struct pci_dev *dev, int bar,
 extern void __iomem *pci_iomap_wc_range(struct pci_dev *dev, int bar,
                                        unsigned long offset,
                                        unsigned long maxlen);
+extern void pci_iounmap(struct pci_dev *dev, void __iomem *);
 /* Create a virtual mapping cookie for a port on a given PCI device.
  * Do not call this directly, it exists to make it easier for architectures
  * to override */
@@ -50,6 +51,8 @@ static inline void __iomem *pci_iomap_wc_range(struct pci_dev *dev, int bar,
 {
        return NULL;
 }
+static inline void pci_iounmap(struct pci_dev *dev, void __iomem *addr)
+{ }
 #endif
 
 #endif /* __ASM_GENERIC_PCI_IOMAP_H */
index aa50bf2959fef5c0fe4ac6c837117491cc0f9fe2..f2984af2b85bd54383453e5f4e96f645db769ef4 100644 (file)
  * GCC 4.5 and later have a 32 bytes section alignment for structures.
  * Except GCC 4.9, that feels the need to align on 64 bytes.
  */
-#if __GNUC__ == 4 && __GNUC_MINOR__ == 9
-#define STRUCT_ALIGNMENT 64
-#else
 #define STRUCT_ALIGNMENT 32
-#endif
 #define STRUCT_ALIGN() . = ALIGN(STRUCT_ALIGNMENT)
 
 /*
index e1c705fdfa7c531c0aaa8c3148853e2bcc8b83ba..db2e147e069fe52555bd8f8d39c2cd1e28341934 100644 (file)
@@ -752,107 +752,54 @@ static inline void cgroup_threadgroup_change_end(struct task_struct *tsk) {}
  * sock_cgroup_data is embedded at sock->sk_cgrp_data and contains
  * per-socket cgroup information except for memcg association.
  *
- * On legacy hierarchies, net_prio and net_cls controllers directly set
- * attributes on each sock which can then be tested by the network layer.
- * On the default hierarchy, each sock is associated with the cgroup it was
- * created in and the networking layer can match the cgroup directly.
- *
- * To avoid carrying all three cgroup related fields separately in sock,
- * sock_cgroup_data overloads (prioidx, classid) and the cgroup pointer.
- * On boot, sock_cgroup_data records the cgroup that the sock was created
- * in so that cgroup2 matches can be made; however, once either net_prio or
- * net_cls starts being used, the area is overridden to carry prioidx and/or
- * classid.  The two modes are distinguished by whether the lowest bit is
- * set.  Clear bit indicates cgroup pointer while set bit prioidx and
- * classid.
- *
- * While userland may start using net_prio or net_cls at any time, once
- * either is used, cgroup2 matching no longer works.  There is no reason to
- * mix the two and this is in line with how legacy and v2 compatibility is
- * handled.  On mode switch, cgroup references which are already being
- * pointed to by socks may be leaked.  While this can be remedied by adding
- * synchronization around sock_cgroup_data, given that the number of leaked
- * cgroups is bound and highly unlikely to be high, this seems to be the
- * better trade-off.
+ * On legacy hierarchies, net_prio and net_cls controllers directly
+ * set attributes on each sock which can then be tested by the network
+ * layer. On the default hierarchy, each sock is associated with the
+ * cgroup it was created in and the networking layer can match the
+ * cgroup directly.
  */
 struct sock_cgroup_data {
-       union {
-#ifdef __LITTLE_ENDIAN
-               struct {
-                       u8      is_data : 1;
-                       u8      no_refcnt : 1;
-                       u8      unused : 6;
-                       u8      padding;
-                       u16     prioidx;
-                       u32     classid;
-               } __packed;
-#else
-               struct {
-                       u32     classid;
-                       u16     prioidx;
-                       u8      padding;
-                       u8      unused : 6;
-                       u8      no_refcnt : 1;
-                       u8      is_data : 1;
-               } __packed;
+       struct cgroup   *cgroup; /* v2 */
+#ifdef CONFIG_CGROUP_NET_CLASSID
+       u32             classid; /* v1 */
+#endif
+#ifdef CONFIG_CGROUP_NET_PRIO
+       u16             prioidx; /* v1 */
 #endif
-               u64             val;
-       };
 };
 
-/*
- * There's a theoretical window where the following accessors race with
- * updaters and return part of the previous pointer as the prioidx or
- * classid.  Such races are short-lived and the result isn't critical.
- */
 static inline u16 sock_cgroup_prioidx(const struct sock_cgroup_data *skcd)
 {
-       /* fallback to 1 which is always the ID of the root cgroup */
-       return (skcd->is_data & 1) ? skcd->prioidx : 1;
+#ifdef CONFIG_CGROUP_NET_PRIO
+       return READ_ONCE(skcd->prioidx);
+#else
+       return 1;
+#endif
 }
 
 static inline u32 sock_cgroup_classid(const struct sock_cgroup_data *skcd)
 {
-       /* fallback to 0 which is the unconfigured default classid */
-       return (skcd->is_data & 1) ? skcd->classid : 0;
+#ifdef CONFIG_CGROUP_NET_CLASSID
+       return READ_ONCE(skcd->classid);
+#else
+       return 0;
+#endif
 }
 
-/*
- * If invoked concurrently, the updaters may clobber each other.  The
- * caller is responsible for synchronization.
- */
 static inline void sock_cgroup_set_prioidx(struct sock_cgroup_data *skcd,
                                           u16 prioidx)
 {
-       struct sock_cgroup_data skcd_buf = {{ .val = READ_ONCE(skcd->val) }};
-
-       if (sock_cgroup_prioidx(&skcd_buf) == prioidx)
-               return;
-
-       if (!(skcd_buf.is_data & 1)) {
-               skcd_buf.val = 0;
-               skcd_buf.is_data = 1;
-       }
-
-       skcd_buf.prioidx = prioidx;
-       WRITE_ONCE(skcd->val, skcd_buf.val);    /* see sock_cgroup_ptr() */
+#ifdef CONFIG_CGROUP_NET_PRIO
+       WRITE_ONCE(skcd->prioidx, prioidx);
+#endif
 }
 
 static inline void sock_cgroup_set_classid(struct sock_cgroup_data *skcd,
                                           u32 classid)
 {
-       struct sock_cgroup_data skcd_buf = {{ .val = READ_ONCE(skcd->val) }};
-
-       if (sock_cgroup_classid(&skcd_buf) == classid)
-               return;
-
-       if (!(skcd_buf.is_data & 1)) {
-               skcd_buf.val = 0;
-               skcd_buf.is_data = 1;
-       }
-
-       skcd_buf.classid = classid;
-       WRITE_ONCE(skcd->val, skcd_buf.val);    /* see sock_cgroup_ptr() */
+#ifdef CONFIG_CGROUP_NET_CLASSID
+       WRITE_ONCE(skcd->classid, classid);
+#endif
 }
 
 #else  /* CONFIG_SOCK_CGROUP_DATA */
index 7bf60454a31361f3a0dd8b24fca91a743c8eaead..75c151413fda8c9ae1436983932ad7b09c3fda23 100644 (file)
@@ -829,33 +829,13 @@ static inline void cgroup_account_cputime_field(struct task_struct *task,
  */
 #ifdef CONFIG_SOCK_CGROUP_DATA
 
-#if defined(CONFIG_CGROUP_NET_PRIO) || defined(CONFIG_CGROUP_NET_CLASSID)
-extern spinlock_t cgroup_sk_update_lock;
-#endif
-
-void cgroup_sk_alloc_disable(void);
 void cgroup_sk_alloc(struct sock_cgroup_data *skcd);
 void cgroup_sk_clone(struct sock_cgroup_data *skcd);
 void cgroup_sk_free(struct sock_cgroup_data *skcd);
 
 static inline struct cgroup *sock_cgroup_ptr(struct sock_cgroup_data *skcd)
 {
-#if defined(CONFIG_CGROUP_NET_PRIO) || defined(CONFIG_CGROUP_NET_CLASSID)
-       unsigned long v;
-
-       /*
-        * @skcd->val is 64bit but the following is safe on 32bit too as we
-        * just need the lower ulong to be written and read atomically.
-        */
-       v = READ_ONCE(skcd->val);
-
-       if (v & 3)
-               return &cgrp_dfl_root.cgrp;
-
-       return (struct cgroup *)(unsigned long)v ?: &cgrp_dfl_root.cgrp;
-#else
-       return (struct cgroup *)(unsigned long)skcd->val;
-#endif
+       return skcd->cgroup;
 }
 
 #else  /* CONFIG_CGROUP_DATA */
index 49b0ac8b6fd323b422c047043126951bd0c68531..3c4de9b6c6e3e07e06e48c27dc01eae012e3391c 100644 (file)
 #define __no_sanitize_coverage
 #endif
 
-/*
- * Not all versions of clang implement the type-generic versions
- * of the builtin overflow checkers. Fortunately, clang implements
- * __has_builtin allowing us to avoid awkward version
- * checks. Unfortunately, we don't know which version of gcc clang
- * pretends to be, so the macro may or may not be defined.
- */
-#if __has_builtin(__builtin_mul_overflow) && \
-    __has_builtin(__builtin_add_overflow) && \
-    __has_builtin(__builtin_sub_overflow)
-#define COMPILER_HAS_GENERIC_BUILTIN_OVERFLOW 1
-#endif
-
 #if __has_feature(shadow_call_stack)
 # define __noscs       __attribute__((__no_sanitize__("shadow-call-stack")))
 #endif
index 21c36b69eb06c15d4306f8c29ccbc251893095f2..bd2b881c6b63a883a118b0895bb50d3e1ae3cbb5 100644 (file)
 
 #if GCC_VERSION >= 70000
 #define KASAN_ABI_VERSION 5
-#elif GCC_VERSION >= 50000
+#else
 #define KASAN_ABI_VERSION 4
-#elif GCC_VERSION >= 40902
-#define KASAN_ABI_VERSION 3
 #endif
 
 #if __has_attribute(__no_sanitize_address__)
 #define __no_sanitize_coverage
 #endif
 
-#if GCC_VERSION >= 50100
-#define COMPILER_HAS_GENERIC_BUILTIN_OVERFLOW 1
-#endif
-
 /*
  * Turn individual warnings and errors on and off locally, depending
  * on version.
index b67261a1e3e9cf382398befac327f7ec3108b4a3..3d5af56337bdb5ea0c88dca48a45e7524bf86420 100644 (file)
@@ -188,6 +188,8 @@ void ftrace_likely_update(struct ftrace_likely_data *f, int val,
     (typeof(ptr)) (__ptr + (off)); })
 #endif
 
+#define absolute_pointer(val)  RELOC_HIDE((void *)(val), 0)
+
 #ifndef OPTIMIZER_HIDE_VAR
 /* Make the optimizer believe the variable can be manipulated arbitrarily. */
 #define OPTIMIZER_HIDE_VAR(var)                                                \
index 8f2106e9e5c124285f7474953d84966d91c5e24d..e6ec634039658e714f1c9ad9f2854580d646a318 100644 (file)
  * Provide links to the documentation of each supported compiler, if it exists.
  */
 
-/*
- * __has_attribute is supported on gcc >= 5, clang >= 2.9 and icc >= 17.
- * In the meantime, to support gcc < 5, we implement __has_attribute
- * by hand.
- */
-#ifndef __has_attribute
-# define __has_attribute(x) __GCC4_has_attribute_##x
-# define __GCC4_has_attribute___assume_aligned__      1
-# define __GCC4_has_attribute___copy__                0
-# define __GCC4_has_attribute___designated_init__     0
-# define __GCC4_has_attribute___error__               1
-# define __GCC4_has_attribute___externally_visible__  1
-# define __GCC4_has_attribute___no_caller_saved_registers__ 0
-# define __GCC4_has_attribute___noclone__             1
-# define __GCC4_has_attribute___no_profile_instrument_function__ 0
-# define __GCC4_has_attribute___nonstring__           0
-# define __GCC4_has_attribute___no_sanitize_address__ 1
-# define __GCC4_has_attribute___no_sanitize_undefined__ 1
-# define __GCC4_has_attribute___no_sanitize_coverage__ 0
-# define __GCC4_has_attribute___fallthrough__         0
-# define __GCC4_has_attribute___warning__             1
-#endif
-
 /*
  *   gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Function-Attributes.html#index-alias-function-attribute
  */
@@ -77,7 +54,6 @@
  * compiler should see some alignment anyway, when the return value is
  * massaged by 'flags = ptr & 3; ptr &= ~3;').
  *
- * Optional: only supported since gcc >= 4.9
  * Optional: not supported by icc
  *
  *   gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Function-Attributes.html#index-assume_005faligned-function-attribute
index b066024c62e3f4937849c0fa5ac0d7091166160a..34de69b3b8badcdf971cd3f34b805d1b5b23f52d 100644 (file)
@@ -118,6 +118,7 @@ int memblock_mark_nomap(phys_addr_t base, phys_addr_t size);
 int memblock_clear_nomap(phys_addr_t base, phys_addr_t size);
 
 void memblock_free_all(void);
+void memblock_free_ptr(void *ptr, size_t size);
 void reset_node_managed_pages(pg_data_t *pgdat);
 void reset_all_zones_managed_pages(void);
 
index b179f1e3541a29c30487977f5ca45c2acc7291ce..96e113e23d04ef6d5976b246e8527201b5f5f902 100644 (file)
@@ -144,15 +144,6 @@ static inline void mmap_read_unlock(struct mm_struct *mm)
        up_read(&mm->mmap_lock);
 }
 
-static inline bool mmap_read_trylock_non_owner(struct mm_struct *mm)
-{
-       if (mmap_read_trylock(mm)) {
-               rwsem_release(&mm->mmap_lock.dep_map, _RET_IP_);
-               return true;
-       }
-       return false;
-}
-
 static inline void mmap_read_unlock_non_owner(struct mm_struct *mm)
 {
        __mmap_lock_trace_released(mm, false);
index 0f12345c21fb58430ea93884c4ad5f4688ad7d18..4669632bd72bcd4f8cdf586243022f8383c43337 100644 (file)
@@ -6,12 +6,9 @@
 #include <linux/limits.h>
 
 /*
- * In the fallback code below, we need to compute the minimum and
- * maximum values representable in a given type. These macros may also
- * be useful elsewhere, so we provide them outside the
- * COMPILER_HAS_GENERIC_BUILTIN_OVERFLOW block.
- *
- * It would seem more obvious to do something like
+ * We need to compute the minimum and maximum values representable in a given
+ * type. These macros may also be useful elsewhere. It would seem more obvious
+ * to do something like:
  *
  * #define type_min(T) (T)(is_signed_type(T) ? (T)1 << (8*sizeof(T)-1) : 0)
  * #define type_max(T) (T)(is_signed_type(T) ? ((T)1 << (8*sizeof(T)-1)) - 1 : ~(T)0)
@@ -54,7 +51,6 @@ static inline bool __must_check __must_check_overflow(bool overflow)
        return unlikely(overflow);
 }
 
-#ifdef COMPILER_HAS_GENERIC_BUILTIN_OVERFLOW
 /*
  * For simplicity and code hygiene, the fallback code below insists on
  * a, b and *d having the same type (similar to the min() and max()
@@ -90,134 +86,6 @@ static inline bool __must_check __must_check_overflow(bool overflow)
        __builtin_mul_overflow(__a, __b, __d);  \
 }))
 
-#else
-
-
-/* Checking for unsigned overflow is relatively easy without causing UB. */
-#define __unsigned_add_overflow(a, b, d) ({    \
-       typeof(a) __a = (a);                    \
-       typeof(b) __b = (b);                    \
-       typeof(d) __d = (d);                    \
-       (void) (&__a == &__b);                  \
-       (void) (&__a == __d);                   \
-       *__d = __a + __b;                       \
-       *__d < __a;                             \
-})
-#define __unsigned_sub_overflow(a, b, d) ({    \
-       typeof(a) __a = (a);                    \
-       typeof(b) __b = (b);                    \
-       typeof(d) __d = (d);                    \
-       (void) (&__a == &__b);                  \
-       (void) (&__a == __d);                   \
-       *__d = __a - __b;                       \
-       __a < __b;                              \
-})
-/*
- * If one of a or b is a compile-time constant, this avoids a division.
- */
-#define __unsigned_mul_overflow(a, b, d) ({            \
-       typeof(a) __a = (a);                            \
-       typeof(b) __b = (b);                            \
-       typeof(d) __d = (d);                            \
-       (void) (&__a == &__b);                          \
-       (void) (&__a == __d);                           \
-       *__d = __a * __b;                               \
-       __builtin_constant_p(__b) ?                     \
-         __b > 0 && __a > type_max(typeof(__a)) / __b : \
-         __a > 0 && __b > type_max(typeof(__b)) / __a;  \
-})
-
-/*
- * For signed types, detecting overflow is much harder, especially if
- * we want to avoid UB. But the interface of these macros is such that
- * we must provide a result in *d, and in fact we must produce the
- * result promised by gcc's builtins, which is simply the possibly
- * wrapped-around value. Fortunately, we can just formally do the
- * operations in the widest relevant unsigned type (u64) and then
- * truncate the result - gcc is smart enough to generate the same code
- * with and without the (u64) casts.
- */
-
-/*
- * Adding two signed integers can overflow only if they have the same
- * sign, and overflow has happened iff the result has the opposite
- * sign.
- */
-#define __signed_add_overflow(a, b, d) ({      \
-       typeof(a) __a = (a);                    \
-       typeof(b) __b = (b);                    \
-       typeof(d) __d = (d);                    \
-       (void) (&__a == &__b);                  \
-       (void) (&__a == __d);                   \
-       *__d = (u64)__a + (u64)__b;             \
-       (((~(__a ^ __b)) & (*__d ^ __a))        \
-               & type_min(typeof(__a))) != 0;  \
-})
-
-/*
- * Subtraction is similar, except that overflow can now happen only
- * when the signs are opposite. In this case, overflow has happened if
- * the result has the opposite sign of a.
- */
-#define __signed_sub_overflow(a, b, d) ({      \
-       typeof(a) __a = (a);                    \
-       typeof(b) __b = (b);                    \
-       typeof(d) __d = (d);                    \
-       (void) (&__a == &__b);                  \
-       (void) (&__a == __d);                   \
-       *__d = (u64)__a - (u64)__b;             \
-       ((((__a ^ __b)) & (*__d ^ __a))         \
-               & type_min(typeof(__a))) != 0;  \
-})
-
-/*
- * Signed multiplication is rather hard. gcc always follows C99, so
- * division is truncated towards 0. This means that we can write the
- * overflow check like this:
- *
- * (a > 0 && (b > MAX/a || b < MIN/a)) ||
- * (a < -1 && (b > MIN/a || b < MAX/a) ||
- * (a == -1 && b == MIN)
- *
- * The redundant casts of -1 are to silence an annoying -Wtype-limits
- * (included in -Wextra) warning: When the type is u8 or u16, the
- * __b_c_e in check_mul_overflow obviously selects
- * __unsigned_mul_overflow, but unfortunately gcc still parses this
- * code and warns about the limited range of __b.
- */
-
-#define __signed_mul_overflow(a, b, d) ({                              \
-       typeof(a) __a = (a);                                            \
-       typeof(b) __b = (b);                                            \
-       typeof(d) __d = (d);                                            \
-       typeof(a) __tmax = type_max(typeof(a));                         \
-       typeof(a) __tmin = type_min(typeof(a));                         \
-       (void) (&__a == &__b);                                          \
-       (void) (&__a == __d);                                           \
-       *__d = (u64)__a * (u64)__b;                                     \
-       (__b > 0   && (__a > __tmax/__b || __a < __tmin/__b)) ||        \
-       (__b < (typeof(__b))-1  && (__a > __tmin/__b || __a < __tmax/__b)) || \
-       (__b == (typeof(__b))-1 && __a == __tmin);                      \
-})
-
-
-#define check_add_overflow(a, b, d)    __must_check_overflow(          \
-       __builtin_choose_expr(is_signed_type(typeof(a)),                \
-                       __signed_add_overflow(a, b, d),                 \
-                       __unsigned_add_overflow(a, b, d)))
-
-#define check_sub_overflow(a, b, d)    __must_check_overflow(          \
-       __builtin_choose_expr(is_signed_type(typeof(a)),                \
-                       __signed_sub_overflow(a, b, d),                 \
-                       __unsigned_sub_overflow(a, b, d)))
-
-#define check_mul_overflow(a, b, d)    __must_check_overflow(          \
-       __builtin_choose_expr(is_signed_type(typeof(a)),                \
-                       __signed_mul_overflow(a, b, d),                 \
-                       __unsigned_mul_overflow(a, b, d)))
-
-#endif /* COMPILER_HAS_GENERIC_BUILTIN_OVERFLOW */
-
 /** check_shl_overflow() - Calculate a left-shifted value and check overflow
  *
  * @a: Value to be shifted
index e12b524426b0286be06751bb3ae891a5e55b055e..39039ce8ac4c1774a8c1aca1d0d7bc5eb6e7485f 100644 (file)
@@ -1471,6 +1471,7 @@ struct task_struct {
                                        mce_whole_page : 1,
                                        __mce_reserved : 62;
        struct callback_head            mce_kill_me;
+       int                             mce_count;
 #endif
 
 #ifdef CONFIG_KRETPROBES
index 6bdb0db3e8258ad2745705a9b046eb1c93e05840..841e2f0f5240ba9e210bb9a3fc1cbedc2162b2a8 100644 (file)
@@ -1940,7 +1940,7 @@ static inline void __skb_insert(struct sk_buff *newsk,
        WRITE_ONCE(newsk->prev, prev);
        WRITE_ONCE(next->prev, newsk);
        WRITE_ONCE(prev->next, newsk);
-       list->qlen++;
+       WRITE_ONCE(list->qlen, list->qlen + 1);
 }
 
 static inline void __skb_queue_splice(const struct sk_buff_head *list,
index 5265024e8b9094cae8b211928e030980f7ec7d12..207101a9c5c326958eaae5b4be2208f0b6e2028e 100644 (file)
@@ -27,6 +27,12 @@ enum iter_type {
        ITER_DISCARD,
 };
 
+struct iov_iter_state {
+       size_t iov_offset;
+       size_t count;
+       unsigned long nr_segs;
+};
+
 struct iov_iter {
        u8 iter_type;
        bool data_source;
@@ -47,7 +53,6 @@ struct iov_iter {
                };
                loff_t xarray_start;
        };
-       size_t truncated;
 };
 
 static inline enum iter_type iov_iter_type(const struct iov_iter *i)
@@ -55,6 +60,14 @@ static inline enum iter_type iov_iter_type(const struct iov_iter *i)
        return i->iter_type;
 }
 
+static inline void iov_iter_save_state(struct iov_iter *iter,
+                                      struct iov_iter_state *state)
+{
+       state->iov_offset = iter->iov_offset;
+       state->count = iter->count;
+       state->nr_segs = iter->nr_segs;
+}
+
 static inline bool iter_is_iovec(const struct iov_iter *i)
 {
        return iov_iter_type(i) == ITER_IOVEC;
@@ -233,6 +246,7 @@ ssize_t iov_iter_get_pages(struct iov_iter *i, struct page **pages,
 ssize_t iov_iter_get_pages_alloc(struct iov_iter *i, struct page ***pages,
                        size_t maxsize, size_t *start);
 int iov_iter_npages(const struct iov_iter *i, int maxpages);
+void iov_iter_restore(struct iov_iter *i, struct iov_iter_state *state);
 
 const void *dup_iter(struct iov_iter *new, struct iov_iter *old, gfp_t flags);
 
@@ -255,10 +269,8 @@ static inline void iov_iter_truncate(struct iov_iter *i, u64 count)
         * conversion in assignement is by definition greater than all
         * values of size_t, including old i->count.
         */
-       if (i->count > count) {
-               i->truncated += i->count - count;
+       if (i->count > count)
                i->count = count;
-       }
 }
 
 /*
@@ -267,7 +279,6 @@ static inline void iov_iter_truncate(struct iov_iter *i, u64 count)
  */
 static inline void iov_iter_reexpand(struct iov_iter *i, size_t count)
 {
-       i->truncated -= count - i->count;
        i->count = count;
 }
 
index f9a17145255a3606d434db0e0b2ef6ec0bf76e51..258867eff2309204d6ba409f6f84f7164ef11c94 100644 (file)
@@ -447,6 +447,11 @@ static inline bool dsa_port_is_user(struct dsa_port *dp)
        return dp->type == DSA_PORT_TYPE_USER;
 }
 
+static inline bool dsa_port_is_unused(struct dsa_port *dp)
+{
+       return dp->type == DSA_PORT_TYPE_UNUSED;
+}
+
 static inline bool dsa_is_unused_port(struct dsa_switch *ds, int p)
 {
        return dsa_to_port(ds, p)->type == DSA_PORT_TYPE_UNUSED;
index 59ef35154e3d812b931294ad29e92c78b91d8595..b270a07b285eccf0938a3620b583a0bcb58d622c 100644 (file)
@@ -317,13 +317,19 @@ enum {
        IORING_REGISTER_IOWQ_AFF                = 17,
        IORING_UNREGISTER_IOWQ_AFF              = 18,
 
-       /* set/get max number of workers */
+       /* set/get max number of io-wq workers */
        IORING_REGISTER_IOWQ_MAX_WORKERS        = 19,
 
        /* this goes last */
        IORING_REGISTER_LAST
 };
 
+/* io-wq worker categories */
+enum {
+       IO_WQ_BOUND,
+       IO_WQ_UNBOUND,
+};
+
 /* deprecated, see struct io_uring_rsrc_update */
 struct io_uring_files_update {
        __u32 offset;
index 5c9a48df90e1b499f88299e6410f606b879072ff..3f721693444122243c8992d0892cf5f24d68d093 100644 (file)
@@ -924,7 +924,7 @@ static void __init print_unknown_bootoptions(void)
                end += sprintf(end, " %s", *p);
 
        pr_notice("Unknown command line parameters:%s\n", unknown_options);
-       memblock_free(__pa(unknown_options), len);
+       memblock_free_ptr(unknown_options, len);
 }
 
 asmlinkage __visible void __init __no_sanitize_address start_kernel(void)
index f833238df1ce2e08bb71907db47386c88b655a2b..6693daf4fe1124b3a69a345c42f30cf9767a739b 100644 (file)
--- a/ipc/sem.c
+++ b/ipc/sem.c
@@ -2238,7 +2238,7 @@ static long do_semtimedop(int semid, struct sembuf __user *tsops,
                return -EINVAL;
 
        if (nsops > SEMOPM_FAST) {
-               sops = kvmalloc_array(nsops, sizeof(*sops), GFP_KERNEL_ACCOUNT);
+               sops = kvmalloc_array(nsops, sizeof(*sops), GFP_KERNEL);
                if (sops == NULL)
                        return -ENOMEM;
        }
index ca3cd9aaa6ced0e65bad5617a805f9ca4ad9a715..7b4afb7d96db18306006f51b7805b1150df0789c 100644 (file)
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0-only
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
 /* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
  * Copyright (c) 2016 Facebook
  */
index e546b18d27daeda77548d00ad9f56af24a55d58f..a4b040793f443cc6a4ffa38b12721dd27cd9c690 100644 (file)
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
 /* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
  * Copyright (c) 2016 Facebook
  */
index e8eefdf8cf3ecd2555c42634af408249fd1e06f1..09a3fd97d329ea2b3ecdad1f1c5e85dbff0478ca 100644 (file)
@@ -179,7 +179,7 @@ static void stack_map_get_build_id_offset(struct bpf_stack_build_id *id_offs,
         * with build_id.
         */
        if (!user || !current || !current->mm || irq_work_busy ||
-           !mmap_read_trylock_non_owner(current->mm)) {
+           !mmap_read_trylock(current->mm)) {
                /* cannot access current->mm, fall back to ips */
                for (i = 0; i < trace_nr; i++) {
                        id_offs[i].status = BPF_STACK_BUILD_ID_IP;
@@ -204,9 +204,15 @@ static void stack_map_get_build_id_offset(struct bpf_stack_build_id *id_offs,
        }
 
        if (!work) {
-               mmap_read_unlock_non_owner(current->mm);
+               mmap_read_unlock(current->mm);
        } else {
                work->mm = current->mm;
+
+               /* The lock will be released once we're out of interrupt
+                * context. Tell lockdep that we've released it now so
+                * it doesn't complain that we forgot to release it.
+                */
+               rwsem_release(&current->mm->mmap_lock.dep_map, _RET_IP_);
                irq_work_queue(&work->irq_work);
        }
 }
index 047ac4b4703b08a72902f423a0a629803f2358cc..e76b559179054b93191b8959ddcad9ad01535e8c 100644 (file)
@@ -9912,6 +9912,8 @@ static int check_btf_line(struct bpf_verifier_env *env,
        nr_linfo = attr->line_info_cnt;
        if (!nr_linfo)
                return 0;
+       if (nr_linfo > INT_MAX / sizeof(struct bpf_line_info))
+               return -EINVAL;
 
        rec_size = attr->line_info_rec_size;
        if (rec_size < MIN_BPF_LINEINFO_SIZE ||
index 881ce1470bebad74af7f37c8ae6b6579dd433cb8..8afa8690d288cc4daf023a2ef158cba7574c39da 100644 (file)
@@ -6572,74 +6572,44 @@ int cgroup_parse_float(const char *input, unsigned dec_shift, s64 *v)
  */
 #ifdef CONFIG_SOCK_CGROUP_DATA
 
-#if defined(CONFIG_CGROUP_NET_PRIO) || defined(CONFIG_CGROUP_NET_CLASSID)
-
-DEFINE_SPINLOCK(cgroup_sk_update_lock);
-static bool cgroup_sk_alloc_disabled __read_mostly;
-
-void cgroup_sk_alloc_disable(void)
-{
-       if (cgroup_sk_alloc_disabled)
-               return;
-       pr_info("cgroup: disabling cgroup2 socket matching due to net_prio or net_cls activation\n");
-       cgroup_sk_alloc_disabled = true;
-}
-
-#else
-
-#define cgroup_sk_alloc_disabled       false
-
-#endif
-
 void cgroup_sk_alloc(struct sock_cgroup_data *skcd)
 {
-       if (cgroup_sk_alloc_disabled) {
-               skcd->no_refcnt = 1;
-               return;
-       }
-
        /* Don't associate the sock with unrelated interrupted task's cgroup. */
        if (in_interrupt())
                return;
 
        rcu_read_lock();
-
        while (true) {
                struct css_set *cset;
 
                cset = task_css_set(current);
                if (likely(cgroup_tryget(cset->dfl_cgrp))) {
-                       skcd->val = (unsigned long)cset->dfl_cgrp;
+                       skcd->cgroup = cset->dfl_cgrp;
                        cgroup_bpf_get(cset->dfl_cgrp);
                        break;
                }
                cpu_relax();
        }
-
        rcu_read_unlock();
 }
 
 void cgroup_sk_clone(struct sock_cgroup_data *skcd)
 {
-       if (skcd->val) {
-               if (skcd->no_refcnt)
-                       return;
-               /*
-                * We might be cloning a socket which is left in an empty
-                * cgroup and the cgroup might have already been rmdir'd.
-                * Don't use cgroup_get_live().
-                */
-               cgroup_get(sock_cgroup_ptr(skcd));
-               cgroup_bpf_get(sock_cgroup_ptr(skcd));
-       }
+       struct cgroup *cgrp = sock_cgroup_ptr(skcd);
+
+       /*
+        * We might be cloning a socket which is left in an empty
+        * cgroup and the cgroup might have already been rmdir'd.
+        * Don't use cgroup_get_live().
+        */
+       cgroup_get(cgrp);
+       cgroup_bpf_get(cgrp);
 }
 
 void cgroup_sk_free(struct sock_cgroup_data *skcd)
 {
        struct cgroup *cgrp = sock_cgroup_ptr(skcd);
 
-       if (skcd->no_refcnt)
-               return;
        cgroup_bpf_put(cgrp);
        cgroup_put(cgrp);
 }
index 6c90c69e5311afa9347b4f0df90412b544ed8280..95445bd6eb7258d9d6a9fd26acc79748574fa12d 100644 (file)
@@ -567,7 +567,8 @@ static void add_dma_entry(struct dma_debug_entry *entry)
                pr_err("cacheline tracking ENOMEM, dma-debug disabled\n");
                global_disable = true;
        } else if (rc == -EEXIST) {
-               pr_err("cacheline tracking EEXIST, overlapping mappings aren't supported\n");
+               err_printk(entry->dev, entry,
+                       "cacheline tracking EEXIST, overlapping mappings aren't supported\n");
        }
 }
 
index 7ee5284bff585d33f05fd029d65e5d3002cd1540..06fec5547e7c790a58ab02183d31228cb94d5f09 100644 (file)
@@ -206,7 +206,8 @@ static int __dma_map_sg_attrs(struct device *dev, struct scatterlist *sg,
 /**
  * dma_map_sg_attrs - Map the given buffer for DMA
  * @dev:       The device for which to perform the DMA operation
- * @sg:        The sg_table object describing the buffer
+ * @sg:                The sg_table object describing the buffer
+ * @nents:     Number of entries to map
  * @dir:       DMA direction
  * @attrs:     Optional DMA attributes for the map operation
  *
index 744e8726c5b2fa226ef4514f04d480fc9015d52a..0c000cb01eeb159bb60b06a9ccf7e539dcdf0344 100644 (file)
@@ -10193,7 +10193,7 @@ static void perf_event_addr_filters_apply(struct perf_event *event)
                return;
 
        if (ifh->nr_file_filters) {
-               mm = get_task_mm(event->ctx->task);
+               mm = get_task_mm(task);
                if (!mm)
                        goto restart;
 
index 4ba15088e6405bfe7ec16d956e0cba9ce9f72a09..88191f6e252c34260252e18a4cc7ee8fdbdbda59 100644 (file)
  * The risk of writer starvation is there, but the pathological use cases
  * which trigger it are not necessarily the typical RT workloads.
  *
+ * Fast-path orderings:
+ * The lock/unlock of readers can run in fast paths: lock and unlock are only
+ * atomic ops, and there is no inner lock to provide ACQUIRE and RELEASE
+ * semantics of rwbase_rt. Atomic ops should thus provide _acquire()
+ * and _release() (or stronger).
+ *
  * Common code shared between RT rw_semaphore and rwlock
  */
 
@@ -53,6 +59,7 @@ static __always_inline int rwbase_read_trylock(struct rwbase_rt *rwb)
         * set.
         */
        for (r = atomic_read(&rwb->readers); r < 0;) {
+               /* Fully-ordered if cmpxchg() succeeds, provides ACQUIRE */
                if (likely(atomic_try_cmpxchg(&rwb->readers, &r, r + 1)))
                        return 1;
        }
@@ -162,6 +169,8 @@ static __always_inline void rwbase_read_unlock(struct rwbase_rt *rwb,
        /*
         * rwb->readers can only hit 0 when a writer is waiting for the
         * active readers to leave the critical section.
+        *
+        * dec_and_test() is fully ordered, provides RELEASE.
         */
        if (unlikely(atomic_dec_and_test(&rwb->readers)))
                __rwbase_read_unlock(rwb, state);
@@ -172,7 +181,11 @@ static inline void __rwbase_write_unlock(struct rwbase_rt *rwb, int bias,
 {
        struct rt_mutex_base *rtm = &rwb->rtmutex;
 
-       atomic_add(READER_BIAS - bias, &rwb->readers);
+       /*
+        * _release() is needed in case that reader is in fast path, pairing
+        * with atomic_try_cmpxchg() in rwbase_read_trylock(), provides RELEASE
+        */
+       (void)atomic_add_return_release(READER_BIAS - bias, &rwb->readers);
        raw_spin_unlock_irqrestore(&rtm->wait_lock, flags);
        rwbase_rtmutex_unlock(rtm);
 }
@@ -196,6 +209,23 @@ static inline void rwbase_write_downgrade(struct rwbase_rt *rwb)
        __rwbase_write_unlock(rwb, WRITER_BIAS - 1, flags);
 }
 
+static inline bool __rwbase_write_trylock(struct rwbase_rt *rwb)
+{
+       /* Can do without CAS because we're serialized by wait_lock. */
+       lockdep_assert_held(&rwb->rtmutex.wait_lock);
+
+       /*
+        * _acquire is needed in case the reader is in the fast path, pairing
+        * with rwbase_read_unlock(), provides ACQUIRE.
+        */
+       if (!atomic_read_acquire(&rwb->readers)) {
+               atomic_set(&rwb->readers, WRITER_BIAS);
+               return 1;
+       }
+
+       return 0;
+}
+
 static int __sched rwbase_write_lock(struct rwbase_rt *rwb,
                                     unsigned int state)
 {
@@ -210,34 +240,30 @@ static int __sched rwbase_write_lock(struct rwbase_rt *rwb,
        atomic_sub(READER_BIAS, &rwb->readers);
 
        raw_spin_lock_irqsave(&rtm->wait_lock, flags);
-       /*
-        * set_current_state() for rw_semaphore
-        * current_save_and_set_rtlock_wait_state() for rwlock
-        */
-       rwbase_set_and_save_current_state(state);
+       if (__rwbase_write_trylock(rwb))
+               goto out_unlock;
 
-       /* Block until all readers have left the critical section. */
-       for (; atomic_read(&rwb->readers);) {
+       rwbase_set_and_save_current_state(state);
+       for (;;) {
                /* Optimized out for rwlocks */
                if (rwbase_signal_pending_state(state, current)) {
-                       __set_current_state(TASK_RUNNING);
+                       rwbase_restore_current_state();
                        __rwbase_write_unlock(rwb, 0, flags);
                        return -EINTR;
                }
+
+               if (__rwbase_write_trylock(rwb))
+                       break;
+
                raw_spin_unlock_irqrestore(&rtm->wait_lock, flags);
+               rwbase_schedule();
+               raw_spin_lock_irqsave(&rtm->wait_lock, flags);
 
-               /*
-                * Schedule and wait for the readers to leave the critical
-                * section. The last reader leaving it wakes the waiter.
-                */
-               if (atomic_read(&rwb->readers) != 0)
-                       rwbase_schedule();
                set_current_state(state);
-               raw_spin_lock_irqsave(&rtm->wait_lock, flags);
        }
-
-       atomic_set(&rwb->readers, WRITER_BIAS);
        rwbase_restore_current_state();
+
+out_unlock:
        raw_spin_unlock_irqrestore(&rtm->wait_lock, flags);
        return 0;
 }
@@ -253,8 +279,7 @@ static inline int rwbase_write_trylock(struct rwbase_rt *rwb)
        atomic_sub(READER_BIAS, &rwb->readers);
 
        raw_spin_lock_irqsave(&rtm->wait_lock, flags);
-       if (!atomic_read(&rwb->readers)) {
-               atomic_set(&rwb->readers, WRITER_BIAS);
+       if (__rwbase_write_trylock(rwb)) {
                raw_spin_unlock_irqrestore(&rtm->wait_lock, flags);
                return 1;
        }
index 825277e1e742ddfaacd6cc99f4ed7a4156d88778..a8d0a58deebc708b08574bee0e29bbbe5ff961b1 100644 (file)
@@ -1166,9 +1166,9 @@ void __init setup_log_buf(int early)
        return;
 
 err_free_descs:
-       memblock_free(__pa(new_descs), new_descs_size);
+       memblock_free_ptr(new_descs, new_descs_size);
 err_free_log_buf:
-       memblock_free(__pa(new_log_buf), new_log_buf_len);
+       memblock_free_ptr(new_log_buf, new_log_buf_len);
 }
 
 static bool __read_mostly ignore_loglevel;
index ed4a31e34098defa61c88823c9e566dbea422417..d566f601780f078cb44343473f943f84cc7a5634 100644 (file)
@@ -295,7 +295,7 @@ config DEBUG_INFO_DWARF4
 
 config DEBUG_INFO_DWARF5
        bool "Generate DWARF Version 5 debuginfo"
-       depends on GCC_VERSION >= 50000 || (CC_IS_CLANG && (AS_IS_LLVM || (AS_IS_GNU && AS_VERSION >= 23502)))
+       depends on !CC_IS_CLANG || (CC_IS_CLANG && (AS_IS_LLVM || (AS_IS_GNU && AS_VERSION >= 23502)))
        depends on !DEBUG_INFO_BTF
        help
          Generate DWARF v5 debug info. Requires binutils 2.35.2, gcc 5.0+ (gcc
index f8419cff11471c94874a5e20f72ef32abae7a5d6..5ae248b2937389363c9519954a14f86877def4c7 100644 (file)
@@ -792,7 +792,7 @@ void __init xbc_destroy_all(void)
        xbc_data = NULL;
        xbc_data_size = 0;
        xbc_node_num = 0;
-       memblock_free(__pa(xbc_nodes), sizeof(struct xbc_node) * XBC_NODE_MAX);
+       memblock_free_ptr(xbc_nodes, sizeof(struct xbc_node) * XBC_NODE_MAX);
        xbc_nodes = NULL;
        brace_index = 0;
 }
index f2d50d69a6c364c06b9e7fa4f37527224dd712c6..755c10c5138cdb51d5da166ee2900565dfdfde38 100644 (file)
@@ -1972,3 +1972,39 @@ int import_single_range(int rw, void __user *buf, size_t len,
        return 0;
 }
 EXPORT_SYMBOL(import_single_range);
+
+/**
+ * iov_iter_restore() - Restore a &struct iov_iter to the same state as when
+ *     iov_iter_save_state() was called.
+ *
+ * @i: &struct iov_iter to restore
+ * @state: state to restore from
+ *
+ * Used after iov_iter_save_state() to bring restore @i, if operations may
+ * have advanced it.
+ *
+ * Note: only works on ITER_IOVEC, ITER_BVEC, and ITER_KVEC
+ */
+void iov_iter_restore(struct iov_iter *i, struct iov_iter_state *state)
+{
+       if (WARN_ON_ONCE(!iov_iter_is_bvec(i) && !iter_is_iovec(i)) &&
+                        !iov_iter_is_kvec(i))
+               return;
+       i->iov_offset = state->iov_offset;
+       i->count = state->count;
+       /*
+        * For the *vec iters, nr_segs + iov is constant - if we increment
+        * the vec, then we also decrement the nr_segs count. Hence we don't
+        * need to track both of these, just one is enough and we can deduct
+        * the other from that. ITER_KVEC and ITER_IOVEC are the same struct
+        * size, so we can just increment the iov pointer as they are unionzed.
+        * ITER_BVEC _may_ be the same size on some archs, but on others it is
+        * not. Be safe and handle it separately.
+        */
+       BUILD_BUG_ON(sizeof(struct iovec) != sizeof(struct kvec));
+       if (iov_iter_is_bvec(i))
+               i->bvec -= state->nr_segs - i->nr_segs;
+       else
+               i->iov -= state->nr_segs - i->nr_segs;
+       i->nr_segs = state->nr_segs;
+}
index 2d3eb1cb73b8f5b9ee3307e8700d64379cdea19d..ce39ce9f3526ea2eee7891b62302369b076e8974 100644 (file)
@@ -134,4 +134,47 @@ void __iomem *pci_iomap_wc(struct pci_dev *dev, int bar, unsigned long maxlen)
        return pci_iomap_wc_range(dev, bar, 0, maxlen);
 }
 EXPORT_SYMBOL_GPL(pci_iomap_wc);
+
+/*
+ * pci_iounmap() somewhat illogically comes from lib/iomap.c for the
+ * CONFIG_GENERIC_IOMAP case, because that's the code that knows about
+ * the different IOMAP ranges.
+ *
+ * But if the architecture does not use the generic iomap code, and if
+ * it has _not_ defined it's own private pci_iounmap function, we define
+ * it here.
+ *
+ * NOTE! This default implementation assumes that if the architecture
+ * support ioport mapping (HAS_IOPORT_MAP), the ioport mapping will
+ * be fixed to the range [ PCI_IOBASE, PCI_IOBASE+IO_SPACE_LIMIT [,
+ * and does not need unmapping with 'ioport_unmap()'.
+ *
+ * If you have different rules for your architecture, you need to
+ * implement your own pci_iounmap() that knows the rules for where
+ * and how IO vs MEM get mapped.
+ *
+ * This code is odd, and the ARCH_HAS/ARCH_WANTS #define logic comes
+ * from legacy <asm-generic/io.h> header file behavior. In particular,
+ * it would seem to make sense to do the iounmap(p) for the non-IO-space
+ * case here regardless, but that's not what the old header file code
+ * did. Probably incorrectly, but this is meant to be bug-for-bug
+ * compatible.
+ */
+#if defined(ARCH_WANTS_GENERIC_PCI_IOUNMAP)
+
+void pci_iounmap(struct pci_dev *dev, void __iomem *p)
+{
+#ifdef ARCH_HAS_GENERIC_IOPORT_MAP
+       uintptr_t start = (uintptr_t) PCI_IOBASE;
+       uintptr_t addr = (uintptr_t) p;
+
+       if (addr >= start && addr < start + IO_SPACE_LIMIT)
+               return;
+       iounmap(p);
+#endif
+}
+EXPORT_SYMBOL(pci_iounmap);
+
+#endif /* ARCH_WANTS_GENERIC_PCI_IOUNMAP */
+
 #endif /* CONFIG_PCI */
index 025338128cd92415ba57ce989aa8b9ebba888bf7..a5716fdec1aac0ac9bf6f2305d5369887e671e39 100644 (file)
--- a/mm/ksm.c
+++ b/mm/ksm.c
@@ -651,10 +651,8 @@ static void remove_node_from_stable_tree(struct stable_node *stable_node)
         * from &migrate_nodes. This will verify that future list.h changes
         * don't break STABLE_NODE_DUP_HEAD. Only recent gcc can handle it.
         */
-#if defined(GCC_VERSION) && GCC_VERSION >= 40903
        BUILD_BUG_ON(STABLE_NODE_DUP_HEAD <= &migrate_nodes);
        BUILD_BUG_ON(STABLE_NODE_DUP_HEAD >= &migrate_nodes + 1);
-#endif
 
        if (stable_node->head == &migrate_nodes)
                list_del(&stable_node->list);
index 0ab5a749bfa69e07f124e9d378f8468e459053de..184dcd2e5d9987a41315ebcf53d1dfa127e59913 100644 (file)
@@ -472,7 +472,7 @@ static int __init_memblock memblock_double_array(struct memblock_type *type,
                kfree(old_array);
        else if (old_array != memblock_memory_init_regions &&
                 old_array != memblock_reserved_init_regions)
-               memblock_free(__pa(old_array), old_alloc_size);
+               memblock_free_ptr(old_array, old_alloc_size);
 
        /*
         * Reserve the new array if that comes from the memblock.  Otherwise, we
@@ -795,6 +795,20 @@ int __init_memblock memblock_remove(phys_addr_t base, phys_addr_t size)
        return memblock_remove_range(&memblock.memory, base, size);
 }
 
+/**
+ * memblock_free_ptr - free boot memory allocation
+ * @ptr: starting address of the  boot memory allocation
+ * @size: size of the boot memory block in bytes
+ *
+ * Free boot memory block previously allocated by memblock_alloc_xx() API.
+ * The freeing memory will not be released to the buddy allocator.
+ */
+void __init_memblock memblock_free_ptr(void *ptr, size_t size)
+{
+       if (ptr)
+               memblock_free(__pa(ptr), size);
+}
+
 /**
  * memblock_free - free boot memory block
  * @base: phys starting address of the  boot memory block
index 37b67194c0dfe412d51b92d9b5f8513e5b2db34a..414dc5671c45edb447342b934683b0eaf1695b39 100644 (file)
@@ -53,20 +53,6 @@ struct chnl_net {
        enum caif_states state;
 };
 
-static void robust_list_del(struct list_head *delete_node)
-{
-       struct list_head *list_node;
-       struct list_head *n;
-       ASSERT_RTNL();
-       list_for_each_safe(list_node, n, &chnl_net_list) {
-               if (list_node == delete_node) {
-                       list_del(list_node);
-                       return;
-               }
-       }
-       WARN_ON(1);
-}
-
 static int chnl_recv_cb(struct cflayer *layr, struct cfpkt *pkt)
 {
        struct sk_buff *skb;
@@ -364,6 +350,7 @@ static int chnl_net_init(struct net_device *dev)
        ASSERT_RTNL();
        priv = netdev_priv(dev);
        strncpy(priv->name, dev->name, sizeof(priv->name));
+       INIT_LIST_HEAD(&priv->list_field);
        return 0;
 }
 
@@ -372,7 +359,7 @@ static void chnl_net_uninit(struct net_device *dev)
        struct chnl_net *priv;
        ASSERT_RTNL();
        priv = netdev_priv(dev);
-       robust_list_del(&priv->list_field);
+       list_del_init(&priv->list_field);
 }
 
 static const struct net_device_ops netdev_ops = {
@@ -537,7 +524,7 @@ static void __exit chnl_exit_module(void)
        rtnl_lock();
        list_for_each_safe(list_node, _tmp, &chnl_net_list) {
                dev = list_entry(list_node, struct chnl_net, list_field);
-               list_del(list_node);
+               list_del_init(list_node);
                delete_device(dev);
        }
        rtnl_unlock();
index b49c57d35a88e62915fe1534e082f7a912197bbe..1a6a86693b745984dbc3fe0c3b39d30042a47186 100644 (file)
@@ -71,11 +71,8 @@ static int update_classid_sock(const void *v, struct file *file, unsigned n)
        struct update_classid_context *ctx = (void *)v;
        struct socket *sock = sock_from_file(file);
 
-       if (sock) {
-               spin_lock(&cgroup_sk_update_lock);
+       if (sock)
                sock_cgroup_set_classid(&sock->sk->sk_cgrp_data, ctx->classid);
-               spin_unlock(&cgroup_sk_update_lock);
-       }
        if (--ctx->batch == 0) {
                ctx->batch = UPDATE_CLASSID_BATCH;
                return n + 1;
@@ -121,8 +118,6 @@ static int write_classid(struct cgroup_subsys_state *css, struct cftype *cft,
        struct css_task_iter it;
        struct task_struct *p;
 
-       cgroup_sk_alloc_disable();
-
        cs->classid = (u32)value;
 
        css_task_iter_start(css, 0, &it);
index 99a431c56f230b3655ffec56b7e4e2a7f24871a6..8456dfbe2eb40b371901c1e753acf8513262ac41 100644 (file)
@@ -207,8 +207,6 @@ static ssize_t write_priomap(struct kernfs_open_file *of,
        if (!dev)
                return -ENODEV;
 
-       cgroup_sk_alloc_disable();
-
        rtnl_lock();
 
        ret = netprio_set_prio(of_css(of), dev, prio);
@@ -221,12 +219,10 @@ static ssize_t write_priomap(struct kernfs_open_file *of,
 static int update_netprio(const void *v, struct file *file, unsigned n)
 {
        struct socket *sock = sock_from_file(file);
-       if (sock) {
-               spin_lock(&cgroup_sk_update_lock);
+
+       if (sock)
                sock_cgroup_set_prioidx(&sock->sk->sk_cgrp_data,
                                        (unsigned long)v);
-               spin_unlock(&cgroup_sk_update_lock);
-       }
        return 0;
 }
 
@@ -235,8 +231,6 @@ static void net_prio_attach(struct cgroup_taskset *tset)
        struct task_struct *p;
        struct cgroup_subsys_state *css;
 
-       cgroup_sk_alloc_disable();
-
        cgroup_taskset_for_each(p, css, tset) {
                void *v = (void *)(unsigned long)css->id;
 
index c5c74a34d139d99fb58dc5cebe4d6a8d72a214ed..91e7a2202697143fddd1e81e2c528ea9bbc4ae4e 100644 (file)
@@ -94,6 +94,8 @@ struct sock *dccp_create_openreq_child(const struct sock *sk,
                newdp->dccps_role           = DCCP_ROLE_SERVER;
                newdp->dccps_hc_rx_ackvec   = NULL;
                newdp->dccps_service_list   = NULL;
+               newdp->dccps_hc_rx_ccid     = NULL;
+               newdp->dccps_hc_tx_ccid     = NULL;
                newdp->dccps_service        = dreq->dreq_service;
                newdp->dccps_timestamp_echo = dreq->dreq_timestamp_echo;
                newdp->dccps_timestamp_time = dreq->dreq_timestamp_time;
index 1dc45e40f961c319bf3954642b997b61a22c1265..41f36ad8b0ec674f39ce4904763dc618ebe233d1 100644 (file)
@@ -345,6 +345,11 @@ bool dsa_schedule_work(struct work_struct *work)
        return queue_work(dsa_owq, work);
 }
 
+void dsa_flush_workqueue(void)
+{
+       flush_workqueue(dsa_owq);
+}
+
 int dsa_devlink_param_get(struct devlink *dl, u32 id,
                          struct devlink_param_gset_ctx *ctx)
 {
index 1b2b25d7bd025c13102c6629dbfe3c131ba50db8..eef13cd20f19f40a5ec0eb37a01ce3cc0c7bfaec 100644 (file)
@@ -897,6 +897,33 @@ static void dsa_switch_teardown(struct dsa_switch *ds)
        ds->setup = false;
 }
 
+/* First tear down the non-shared, then the shared ports. This ensures that
+ * all work items scheduled by our switchdev handlers for user ports have
+ * completed before we destroy the refcounting kept on the shared ports.
+ */
+static void dsa_tree_teardown_ports(struct dsa_switch_tree *dst)
+{
+       struct dsa_port *dp;
+
+       list_for_each_entry(dp, &dst->ports, list)
+               if (dsa_port_is_user(dp) || dsa_port_is_unused(dp))
+                       dsa_port_teardown(dp);
+
+       dsa_flush_workqueue();
+
+       list_for_each_entry(dp, &dst->ports, list)
+               if (dsa_port_is_dsa(dp) || dsa_port_is_cpu(dp))
+                       dsa_port_teardown(dp);
+}
+
+static void dsa_tree_teardown_switches(struct dsa_switch_tree *dst)
+{
+       struct dsa_port *dp;
+
+       list_for_each_entry(dp, &dst->ports, list)
+               dsa_switch_teardown(dp->ds);
+}
+
 static int dsa_tree_setup_switches(struct dsa_switch_tree *dst)
 {
        struct dsa_port *dp;
@@ -923,26 +950,13 @@ static int dsa_tree_setup_switches(struct dsa_switch_tree *dst)
        return 0;
 
 teardown:
-       list_for_each_entry(dp, &dst->ports, list)
-               dsa_port_teardown(dp);
+       dsa_tree_teardown_ports(dst);
 
-       list_for_each_entry(dp, &dst->ports, list)
-               dsa_switch_teardown(dp->ds);
+       dsa_tree_teardown_switches(dst);
 
        return err;
 }
 
-static void dsa_tree_teardown_switches(struct dsa_switch_tree *dst)
-{
-       struct dsa_port *dp;
-
-       list_for_each_entry(dp, &dst->ports, list)
-               dsa_port_teardown(dp);
-
-       list_for_each_entry(dp, &dst->ports, list)
-               dsa_switch_teardown(dp->ds);
-}
-
 static int dsa_tree_setup_master(struct dsa_switch_tree *dst)
 {
        struct dsa_port *dp;
@@ -1052,6 +1066,8 @@ static void dsa_tree_teardown(struct dsa_switch_tree *dst)
 
        dsa_tree_teardown_master(dst);
 
+       dsa_tree_teardown_ports(dst);
+
        dsa_tree_teardown_switches(dst);
 
        dsa_tree_teardown_cpu_ports(dst);
index 33ab7d7af9eb4dc4e18ab99988459da1bae36ec7..a5c9bc7b66c6eb37c7552b55a9d56302a71266d2 100644 (file)
@@ -170,6 +170,7 @@ void dsa_tag_driver_put(const struct dsa_device_ops *ops);
 const struct dsa_device_ops *dsa_find_tagger_by_name(const char *buf);
 
 bool dsa_schedule_work(struct work_struct *work);
+void dsa_flush_workqueue(void);
 const char *dsa_tag_protocol_to_str(const struct dsa_device_ops *ops);
 
 static inline int dsa_tag_protocol_overhead(const struct dsa_device_ops *ops)
index 662ff531d4e29874f9d64e6b91d96a1e7e0e959c..a2bf2d8ac65b7db8bc27ebbe1114f94a8da94149 100644 (file)
@@ -1854,13 +1854,11 @@ static int dsa_slave_phy_setup(struct net_device *slave_dev)
                 * use the switch internal MDIO bus instead
                 */
                ret = dsa_slave_phy_connect(slave_dev, dp->index, phy_flags);
-               if (ret) {
-                       netdev_err(slave_dev,
-                                  "failed to connect to port %d: %d\n",
-                                  dp->index, ret);
-                       phylink_destroy(dp->pl);
-                       return ret;
-               }
+       }
+       if (ret) {
+               netdev_err(slave_dev, "failed to connect to PHY: %pe\n",
+                          ERR_PTR(ret));
+               phylink_destroy(dp->pl);
        }
 
        return ret;
index 3f7bd7ae7d7af8628b06c124fd8795cbf30e4ce0..141e85e6422b1f567b89d554024ee92aa5fa2d2f 100644 (file)
@@ -1346,7 +1346,7 @@ static u8 tcp_sacktag_one(struct sock *sk,
        if (dup_sack && (sacked & TCPCB_RETRANS)) {
                if (tp->undo_marker && tp->undo_retrans > 0 &&
                    after(end_seq, tp->undo_marker))
-                       tp->undo_retrans--;
+                       tp->undo_retrans = max_t(int, 0, tp->undo_retrans - pcount);
                if ((sacked & TCPCB_SACKED_ACKED) &&
                    before(start_seq, state->reord))
                                state->reord = start_seq;
index 0d122edc368dd045effa1288246e3ebe7c65496b..b91003538d87a03855df9bd35b751017d37fe031 100644 (file)
@@ -935,7 +935,7 @@ static int __init udp_tunnel_nic_init_module(void)
 {
        int err;
 
-       udp_tunnel_nic_workqueue = alloc_workqueue("udp_tunnel_nic", 0, 0);
+       udp_tunnel_nic_workqueue = alloc_ordered_workqueue("udp_tunnel_nic", 0);
        if (!udp_tunnel_nic_workqueue)
                return -ENOMEM;
 
index 1bec5b22f80d5dc5766d0a667e783fb16c6d0ba9..0371d2c1414555f56024321f1fc8510c477ea680 100644 (file)
@@ -1378,7 +1378,6 @@ int fib6_add(struct fib6_node *root, struct fib6_info *rt,
        int err = -ENOMEM;
        int allow_create = 1;
        int replace_required = 0;
-       int sernum = fib6_new_sernum(info->nl_net);
 
        if (info->nlh) {
                if (!(info->nlh->nlmsg_flags & NLM_F_CREATE))
@@ -1478,7 +1477,7 @@ int fib6_add(struct fib6_node *root, struct fib6_info *rt,
        if (!err) {
                if (rt->nh)
                        list_add(&rt->nh_list, &rt->nh->f6i_list);
-               __fib6_update_sernum_upto_root(rt, sernum);
+               __fib6_update_sernum_upto_root(rt, fib6_new_sernum(info->nl_net));
                fib6_start_gc(info->nl_net, rt);
        }
 
index 53486b162f01c176ec86c7bee4b1be5c87a7aa49..93271a2632b8ef10437a52ec931b66b631331a51 100644 (file)
@@ -869,8 +869,10 @@ static int l2tp_udp_recv_core(struct l2tp_tunnel *tunnel, struct sk_buff *skb)
        }
 
        if (tunnel->version == L2TP_HDR_VER_3 &&
-           l2tp_v3_ensure_opt_in_linear(session, skb, &ptr, &optr))
+           l2tp_v3_ensure_opt_in_linear(session, skb, &ptr, &optr)) {
+               l2tp_session_dec_refcount(session);
                goto invalid;
+       }
 
        l2tp_recv_common(session, skb, ptr, optr, hdrflags, length);
        l2tp_session_dec_refcount(session);
index 5265525011ad2a2945bbb4090c8b06cf154d0511..5ca186d53cb0f14eb6b14da352ced8eab1891de7 100644 (file)
@@ -1083,8 +1083,10 @@ static void __net_exit mctp_routes_net_exit(struct net *net)
 {
        struct mctp_route *rt;
 
+       rcu_read_lock();
        list_for_each_entry_rcu(rt, &net->mctp.routes, list)
                mctp_route_release(rt);
+       rcu_read_unlock();
 }
 
 static struct pernet_operations mctp_net_ops = {
index 543365f58e9735338b16d5e8f4c824fbaec92c9d..2a2bc64f75cfd86af81aed0cc9841503775f96d4 100644 (file)
@@ -46,6 +46,8 @@
  *                                     Copyright (C) 2011, <lokec@ccs.neu.edu>
  */
 
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
 #include <linux/ethtool.h>
 #include <linux/types.h>
 #include <linux/mm.h>
index a0a27d87f6312d95942f93846852eebb448e066a..ad570c2450be8bf2106710d3ec40ad88e21d6b17 100644 (file)
@@ -2423,7 +2423,7 @@ static int tipc_sk_backlog_rcv(struct sock *sk, struct sk_buff *skb)
 static void tipc_sk_enqueue(struct sk_buff_head *inputq, struct sock *sk,
                            u32 dport, struct sk_buff_head *xmitq)
 {
-       unsigned long time_limit = jiffies + 2;
+       unsigned long time_limit = jiffies + usecs_to_jiffies(20000);
        struct sk_buff *skb;
        unsigned int lim;
        atomic_t *dcnt;
index eb47b9de23809ac9216aae46c2fb1eae4543c890..92345c9bb60cc3b469e7cf50effe122b81c7bb89 100644 (file)
@@ -3073,7 +3073,7 @@ static __poll_t unix_dgram_poll(struct file *file, struct socket *sock,
 
                other = unix_peer(sk);
                if (other && unix_peer(other) != sk &&
-                   unix_recvq_full(other) &&
+                   unix_recvq_full_lockless(other) &&
                    unix_dgram_peer_wake_me(sk, other))
                        writable = 0;
 
index 4cce8fd0779c7c52edc5eea076297e31b8a31dbd..51fc23e2e9e50ca2ca6d3fb9ccd1e4e6d1b8f283 100644 (file)
@@ -29,7 +29,12 @@ CLANG_FLAGS  += --prefix=$(GCC_TOOLCHAIN_DIR)$(notdir $(CROSS_COMPILE))
 else
 CLANG_FLAGS    += -fintegrated-as
 endif
+# By default, clang only warns when it encounters an unknown warning flag or
+# certain optimization flags it knows it has not implemented.
+# Make it behave more like gcc by erroring when these flags are encountered
+# so they can be implemented or wrapped in cc-option.
 CLANG_FLAGS    += -Werror=unknown-warning-option
+CLANG_FLAGS    += -Werror=ignored-optimization-argument
 KBUILD_CFLAGS  += $(CLANG_FLAGS)
 KBUILD_AFLAGS  += $(CLANG_FLAGS)
 export CLANG_FLAGS
index eef56d629799a0e5e5d91196dd2b6ce07785a985..48585c4d04ade8488c27b6e0501d433219b66141 100644 (file)
@@ -13,7 +13,7 @@
 # Stage 2 is handled by this file and does the following
 # 1) Find all modules listed in modules.order
 # 2) modpost is then used to
-# 3)  create one <module>.mod.c file pr. module
+# 3)  create one <module>.mod.c file per module
 # 4)  create one Module.symvers file with CRC for all exported symbols
 
 # Step 3 is used to place certain information in the module's ELF
index b9b0f15e588026952e4b3f9dd8f9b77cfa9dac67..217d21abc86e8d4e04e17ef8fdc7108b6656066f 100755 (executable)
@@ -34,7 +34,6 @@ REGEX_SOURCE_SYMBOL = re.compile(SOURCE_SYMBOL)
 REGEX_KCONFIG_DEF = re.compile(DEF)
 REGEX_KCONFIG_EXPR = re.compile(EXPR)
 REGEX_KCONFIG_STMT = re.compile(STMT)
-REGEX_KCONFIG_HELP = re.compile(r"^\s+help\s*$")
 REGEX_FILTER_SYMBOLS = re.compile(r"[A-Za-z0-9]$")
 REGEX_NUMERIC = re.compile(r"0[xX][0-9a-fA-F]+|[0-9]+")
 REGEX_QUOTES = re.compile("(\"(.*?)\")")
@@ -102,6 +101,9 @@ def parse_options():
                      "continue.")
 
     if args.commit:
+        if args.commit.startswith('HEAD'):
+            sys.exit("The --commit option can't use the HEAD ref")
+
         args.find = False
 
     if args.ignore:
@@ -432,7 +434,6 @@ def parse_kconfig_file(kfile):
     lines = []
     defined = []
     references = []
-    skip = False
 
     if not os.path.exists(kfile):
         return defined, references
@@ -448,12 +449,6 @@ def parse_kconfig_file(kfile):
         if REGEX_KCONFIG_DEF.match(line):
             symbol_def = REGEX_KCONFIG_DEF.findall(line)
             defined.append(symbol_def[0])
-            skip = False
-        elif REGEX_KCONFIG_HELP.match(line):
-            skip = True
-        elif skip:
-            # ignore content of help messages
-            pass
         elif REGEX_KCONFIG_STMT.match(line):
             line = REGEX_QUOTES.sub("", line)
             symbols = get_symbols_in_line(line)
index 0033eedce003eefdb017d965fdc833901c53eb0a..1d1bde1fd45eb76b4639f8c720619f6712e25dcd 100755 (executable)
@@ -13,6 +13,7 @@ import logging
 import os
 import re
 import subprocess
+import sys
 
 _DEFAULT_OUTPUT = 'compile_commands.json'
 _DEFAULT_LOG_LEVEL = 'WARNING'
index 319f92104f5652f0c00a8ffba8259a10a8dce121..4edc708baa6356c49c8cabbb3de41ba2b0fb7457 100755 (executable)
@@ -17,13 +17,7 @@ binutils)
        echo 2.23.0
        ;;
 gcc)
-       # https://gcc.gnu.org/bugzilla/show_bug.cgi?id=63293
-       # https://lore.kernel.org/r/20210107111841.GN1551@shell.armlinux.org.uk
-       if [ "$SRCARCH" = arm64 ]; then
-               echo 5.1.0
-       else
-               echo 4.9.0
-       fi
+       echo 5.1.0
        ;;
 icc)
        # temporary
index 7862f217d85d37d234d4679c32ebe7e988a0580b..f2e506f7d57f92e59f933643c197e31bfe889dcd 100644 (file)
@@ -4,9 +4,8 @@
 
 #include <stdlib.h>
 
-#define __pa(addr)     (addr)
 #define SMP_CACHE_BYTES        0
 #define memblock_alloc(size, align)    malloc(size)
-#define memblock_free(paddr, size)     free(paddr)
+#define memblock_free_ptr(paddr, size) free(paddr)
 
 #endif
index 95c072b70d0e832f70a4ac07470bcd0d1ed46deb..8816f06fc6c7623ede69136c32ab79ccac862aee 100644 (file)
@@ -16,9 +16,9 @@
 # define __fallthrough __attribute__ ((fallthrough))
 #endif
 
-#if GCC_VERSION >= 40300
+#if __has_attribute(__error__)
 # define __compiletime_error(message) __attribute__((error(message)))
-#endif /* GCC_VERSION >= 40300 */
+#endif
 
 /* &a[0] degrades to a pointer: a different type from an array */
 #define __must_be_array(a)     BUILD_BUG_ON_ZERO(__same_type((a), &(a)[0]))
@@ -38,7 +38,3 @@
 #endif
 #define __printf(a, b) __attribute__((format(printf, a, b)))
 #define __scanf(a, b)  __attribute__((format(scanf, a, b)))
-
-#if GCC_VERSION >= 50100
-#define COMPILER_HAS_GENERIC_BUILTIN_OVERFLOW 1
-#endif
index 8712ff70995f42e4357eddb8bd9601bc42b246c0..dcb0c1bf686605ea68ab4701340d537b111a746b 100644 (file)
@@ -5,12 +5,9 @@
 #include <linux/compiler.h>
 
 /*
- * In the fallback code below, we need to compute the minimum and
- * maximum values representable in a given type. These macros may also
- * be useful elsewhere, so we provide them outside the
- * COMPILER_HAS_GENERIC_BUILTIN_OVERFLOW block.
- *
- * It would seem more obvious to do something like
+ * We need to compute the minimum and maximum values representable in a given
+ * type. These macros may also be useful elsewhere. It would seem more obvious
+ * to do something like:
  *
  * #define type_min(T) (T)(is_signed_type(T) ? (T)1 << (8*sizeof(T)-1) : 0)
  * #define type_max(T) (T)(is_signed_type(T) ? ((T)1 << (8*sizeof(T)-1)) - 1 : ~(T)0)
@@ -36,8 +33,6 @@
 #define type_max(T) ((T)((__type_half_max(T) - 1) + __type_half_max(T)))
 #define type_min(T) ((T)((T)-type_max(T)-(T)1))
 
-
-#ifdef COMPILER_HAS_GENERIC_BUILTIN_OVERFLOW
 /*
  * For simplicity and code hygiene, the fallback code below insists on
  * a, b and *d having the same type (similar to the min() and max()
        __builtin_mul_overflow(__a, __b, __d);  \
 })
 
-#else
-
-
-/* Checking for unsigned overflow is relatively easy without causing UB. */
-#define __unsigned_add_overflow(a, b, d) ({    \
-       typeof(a) __a = (a);                    \
-       typeof(b) __b = (b);                    \
-       typeof(d) __d = (d);                    \
-       (void) (&__a == &__b);                  \
-       (void) (&__a == __d);                   \
-       *__d = __a + __b;                       \
-       *__d < __a;                             \
-})
-#define __unsigned_sub_overflow(a, b, d) ({    \
-       typeof(a) __a = (a);                    \
-       typeof(b) __b = (b);                    \
-       typeof(d) __d = (d);                    \
-       (void) (&__a == &__b);                  \
-       (void) (&__a == __d);                   \
-       *__d = __a - __b;                       \
-       __a < __b;                              \
-})
-/*
- * If one of a or b is a compile-time constant, this avoids a division.
- */
-#define __unsigned_mul_overflow(a, b, d) ({            \
-       typeof(a) __a = (a);                            \
-       typeof(b) __b = (b);                            \
-       typeof(d) __d = (d);                            \
-       (void) (&__a == &__b);                          \
-       (void) (&__a == __d);                           \
-       *__d = __a * __b;                               \
-       __builtin_constant_p(__b) ?                     \
-         __b > 0 && __a > type_max(typeof(__a)) / __b : \
-         __a > 0 && __b > type_max(typeof(__b)) / __a;  \
-})
-
-/*
- * For signed types, detecting overflow is much harder, especially if
- * we want to avoid UB. But the interface of these macros is such that
- * we must provide a result in *d, and in fact we must produce the
- * result promised by gcc's builtins, which is simply the possibly
- * wrapped-around value. Fortunately, we can just formally do the
- * operations in the widest relevant unsigned type (u64) and then
- * truncate the result - gcc is smart enough to generate the same code
- * with and without the (u64) casts.
- */
-
-/*
- * Adding two signed integers can overflow only if they have the same
- * sign, and overflow has happened iff the result has the opposite
- * sign.
- */
-#define __signed_add_overflow(a, b, d) ({      \
-       typeof(a) __a = (a);                    \
-       typeof(b) __b = (b);                    \
-       typeof(d) __d = (d);                    \
-       (void) (&__a == &__b);                  \
-       (void) (&__a == __d);                   \
-       *__d = (u64)__a + (u64)__b;             \
-       (((~(__a ^ __b)) & (*__d ^ __a))        \
-               & type_min(typeof(__a))) != 0;  \
-})
-
-/*
- * Subtraction is similar, except that overflow can now happen only
- * when the signs are opposite. In this case, overflow has happened if
- * the result has the opposite sign of a.
- */
-#define __signed_sub_overflow(a, b, d) ({      \
-       typeof(a) __a = (a);                    \
-       typeof(b) __b = (b);                    \
-       typeof(d) __d = (d);                    \
-       (void) (&__a == &__b);                  \
-       (void) (&__a == __d);                   \
-       *__d = (u64)__a - (u64)__b;             \
-       ((((__a ^ __b)) & (*__d ^ __a))         \
-               & type_min(typeof(__a))) != 0;  \
-})
-
-/*
- * Signed multiplication is rather hard. gcc always follows C99, so
- * division is truncated towards 0. This means that we can write the
- * overflow check like this:
- *
- * (a > 0 && (b > MAX/a || b < MIN/a)) ||
- * (a < -1 && (b > MIN/a || b < MAX/a) ||
- * (a == -1 && b == MIN)
- *
- * The redundant casts of -1 are to silence an annoying -Wtype-limits
- * (included in -Wextra) warning: When the type is u8 or u16, the
- * __b_c_e in check_mul_overflow obviously selects
- * __unsigned_mul_overflow, but unfortunately gcc still parses this
- * code and warns about the limited range of __b.
- */
-
-#define __signed_mul_overflow(a, b, d) ({                              \
-       typeof(a) __a = (a);                                            \
-       typeof(b) __b = (b);                                            \
-       typeof(d) __d = (d);                                            \
-       typeof(a) __tmax = type_max(typeof(a));                         \
-       typeof(a) __tmin = type_min(typeof(a));                         \
-       (void) (&__a == &__b);                                          \
-       (void) (&__a == __d);                                           \
-       *__d = (u64)__a * (u64)__b;                                     \
-       (__b > 0   && (__a > __tmax/__b || __a < __tmin/__b)) ||        \
-       (__b < (typeof(__b))-1  && (__a > __tmin/__b || __a < __tmax/__b)) || \
-       (__b == (typeof(__b))-1 && __a == __tmin);                      \
-})
-
-
-#define check_add_overflow(a, b, d)                                    \
-       __builtin_choose_expr(is_signed_type(typeof(a)),                \
-                       __signed_add_overflow(a, b, d),                 \
-                       __unsigned_add_overflow(a, b, d))
-
-#define check_sub_overflow(a, b, d)                                    \
-       __builtin_choose_expr(is_signed_type(typeof(a)),                \
-                       __signed_sub_overflow(a, b, d),                 \
-                       __unsigned_sub_overflow(a, b, d))
-
-#define check_mul_overflow(a, b, d)                                    \
-       __builtin_choose_expr(is_signed_type(typeof(a)),                \
-                       __signed_mul_overflow(a, b, d),                 \
-                       __unsigned_mul_overflow(a, b, d))
-
-
-#endif /* COMPILER_HAS_GENERIC_BUILTIN_OVERFLOW */
-
 /**
  * array_size() - Calculate size of 2-dimensional array.
  *
index d8886720e83d8dfe4c6c50eef746fcf867224852..8441e3e1aaac3ece838985c076738f99a30f1087 100644 (file)
@@ -43,7 +43,7 @@ void perf_evsel__delete(struct perf_evsel *evsel)
        free(evsel);
 }
 
-#define FD(e, x, y) (*(int *) xyarray__entry(e->fd, x, y))
+#define FD(e, x, y) ((int *) xyarray__entry(e->fd, x, y))
 #define MMAP(e, x, y) (e->mmap ? ((struct perf_mmap *) xyarray__entry(e->mmap, x, y)) : NULL)
 
 int perf_evsel__alloc_fd(struct perf_evsel *evsel, int ncpus, int nthreads)
@@ -54,7 +54,10 @@ int perf_evsel__alloc_fd(struct perf_evsel *evsel, int ncpus, int nthreads)
                int cpu, thread;
                for (cpu = 0; cpu < ncpus; cpu++) {
                        for (thread = 0; thread < nthreads; thread++) {
-                               FD(evsel, cpu, thread) = -1;
+                               int *fd = FD(evsel, cpu, thread);
+
+                               if (fd)
+                                       *fd = -1;
                        }
                }
        }
@@ -80,7 +83,7 @@ sys_perf_event_open(struct perf_event_attr *attr,
 static int get_group_fd(struct perf_evsel *evsel, int cpu, int thread, int *group_fd)
 {
        struct perf_evsel *leader = evsel->leader;
-       int fd;
+       int *fd;
 
        if (evsel == leader) {
                *group_fd = -1;
@@ -95,10 +98,10 @@ static int get_group_fd(struct perf_evsel *evsel, int cpu, int thread, int *grou
                return -ENOTCONN;
 
        fd = FD(leader, cpu, thread);
-       if (fd == -1)
+       if (fd == NULL || *fd == -1)
                return -EBADF;
 
-       *group_fd = fd;
+       *group_fd = *fd;
 
        return 0;
 }
@@ -138,7 +141,11 @@ int perf_evsel__open(struct perf_evsel *evsel, struct perf_cpu_map *cpus,
 
        for (cpu = 0; cpu < cpus->nr; cpu++) {
                for (thread = 0; thread < threads->nr; thread++) {
-                       int fd, group_fd;
+                       int fd, group_fd, *evsel_fd;
+
+                       evsel_fd = FD(evsel, cpu, thread);
+                       if (evsel_fd == NULL)
+                               return -EINVAL;
 
                        err = get_group_fd(evsel, cpu, thread, &group_fd);
                        if (err < 0)
@@ -151,7 +158,7 @@ int perf_evsel__open(struct perf_evsel *evsel, struct perf_cpu_map *cpus,
                        if (fd < 0)
                                return -errno;
 
-                       FD(evsel, cpu, thread) = fd;
+                       *evsel_fd = fd;
                }
        }
 
@@ -163,9 +170,12 @@ static void perf_evsel__close_fd_cpu(struct perf_evsel *evsel, int cpu)
        int thread;
 
        for (thread = 0; thread < xyarray__max_y(evsel->fd); ++thread) {
-               if (FD(evsel, cpu, thread) >= 0)
-                       close(FD(evsel, cpu, thread));
-               FD(evsel, cpu, thread) = -1;
+               int *fd = FD(evsel, cpu, thread);
+
+               if (fd && *fd >= 0) {
+                       close(*fd);
+                       *fd = -1;
+               }
        }
 }
 
@@ -209,13 +219,12 @@ void perf_evsel__munmap(struct perf_evsel *evsel)
 
        for (cpu = 0; cpu < xyarray__max_x(evsel->fd); cpu++) {
                for (thread = 0; thread < xyarray__max_y(evsel->fd); thread++) {
-                       int fd = FD(evsel, cpu, thread);
-                       struct perf_mmap *map = MMAP(evsel, cpu, thread);
+                       int *fd = FD(evsel, cpu, thread);
 
-                       if (fd < 0)
+                       if (fd == NULL || *fd < 0)
                                continue;
 
-                       perf_mmap__munmap(map);
+                       perf_mmap__munmap(MMAP(evsel, cpu, thread));
                }
        }
 
@@ -239,15 +248,16 @@ int perf_evsel__mmap(struct perf_evsel *evsel, int pages)
 
        for (cpu = 0; cpu < xyarray__max_x(evsel->fd); cpu++) {
                for (thread = 0; thread < xyarray__max_y(evsel->fd); thread++) {
-                       int fd = FD(evsel, cpu, thread);
-                       struct perf_mmap *map = MMAP(evsel, cpu, thread);
+                       int *fd = FD(evsel, cpu, thread);
+                       struct perf_mmap *map;
 
-                       if (fd < 0)
+                       if (fd == NULL || *fd < 0)
                                continue;
 
+                       map = MMAP(evsel, cpu, thread);
                        perf_mmap__init(map, NULL, false, NULL);
 
-                       ret = perf_mmap__mmap(map, &mp, fd, cpu);
+                       ret = perf_mmap__mmap(map, &mp, *fd, cpu);
                        if (ret) {
                                perf_evsel__munmap(evsel);
                                return ret;
@@ -260,7 +270,9 @@ int perf_evsel__mmap(struct perf_evsel *evsel, int pages)
 
 void *perf_evsel__mmap_base(struct perf_evsel *evsel, int cpu, int thread)
 {
-       if (FD(evsel, cpu, thread) < 0 || MMAP(evsel, cpu, thread) == NULL)
+       int *fd = FD(evsel, cpu, thread);
+
+       if (fd == NULL || *fd < 0 || MMAP(evsel, cpu, thread) == NULL)
                return NULL;
 
        return MMAP(evsel, cpu, thread)->base;
@@ -295,17 +307,18 @@ int perf_evsel__read(struct perf_evsel *evsel, int cpu, int thread,
                     struct perf_counts_values *count)
 {
        size_t size = perf_evsel__read_size(evsel);
+       int *fd = FD(evsel, cpu, thread);
 
        memset(count, 0, sizeof(*count));
 
-       if (FD(evsel, cpu, thread) < 0)
+       if (fd == NULL || *fd < 0)
                return -EINVAL;
 
        if (MMAP(evsel, cpu, thread) &&
            !perf_mmap__read_self(MMAP(evsel, cpu, thread), count))
                return 0;
 
-       if (readn(FD(evsel, cpu, thread), count->values, size) <= 0)
+       if (readn(*fd, count->values, size) <= 0)
                return -errno;
 
        return 0;
@@ -318,8 +331,13 @@ static int perf_evsel__run_ioctl(struct perf_evsel *evsel,
        int thread;
 
        for (thread = 0; thread < xyarray__max_y(evsel->fd); thread++) {
-               int fd = FD(evsel, cpu, thread),
-                   err = ioctl(fd, ioc, arg);
+               int err;
+               int *fd = FD(evsel, cpu, thread);
+
+               if (fd == NULL || *fd < 0)
+                       return -1;
+
+               err = ioctl(*fd, ioc, arg);
 
                if (err)
                        return err;
index 0e824f7d8b19cd7b7b4dddcf200ae14f569cde94..6211d0b84b7a63edb843429ac1c2e6f8390f3887 100644 (file)
@@ -368,16 +368,6 @@ static inline int output_type(unsigned int type)
        return OUTPUT_TYPE_OTHER;
 }
 
-static inline unsigned int attr_type(unsigned int type)
-{
-       switch (type) {
-       case OUTPUT_TYPE_SYNTH:
-               return PERF_TYPE_SYNTH;
-       default:
-               return type;
-       }
-}
-
 static bool output_set_by_user(void)
 {
        int j;
@@ -556,6 +546,18 @@ static void set_print_ip_opts(struct perf_event_attr *attr)
                output[type].print_ip_opts |= EVSEL__PRINT_SRCLINE;
 }
 
+static struct evsel *find_first_output_type(struct evlist *evlist,
+                                           unsigned int type)
+{
+       struct evsel *evsel;
+
+       evlist__for_each_entry(evlist, evsel) {
+               if (output_type(evsel->core.attr.type) == (int)type)
+                       return evsel;
+       }
+       return NULL;
+}
+
 /*
  * verify all user requested events exist and the samples
  * have the expected data
@@ -567,7 +569,7 @@ static int perf_session__check_output_opt(struct perf_session *session)
        struct evsel *evsel;
 
        for (j = 0; j < OUTPUT_TYPE_MAX; ++j) {
-               evsel = perf_session__find_first_evtype(session, attr_type(j));
+               evsel = find_first_output_type(session->evlist, j);
 
                /*
                 * even if fields is set to 0 (ie., show nothing) event must
index 781afe42e90e0455be687c577a84665d9ee36cbc..fa5bd5c20e96b011320d0b0dfdf99c095c98c4d2 100644 (file)
@@ -757,25 +757,40 @@ void __ui_browser__line_arrow(struct ui_browser *browser, unsigned int column,
 }
 
 void ui_browser__mark_fused(struct ui_browser *browser, unsigned int column,
-                           unsigned int row, bool arrow_down)
+                           unsigned int row, int diff, bool arrow_down)
 {
-       unsigned int end_row;
+       int end_row;
 
-       if (row >= browser->top_idx)
-               end_row = row - browser->top_idx;
-       else
+       if (diff <= 0)
                return;
 
        SLsmg_set_char_set(1);
 
        if (arrow_down) {
+               if (row + diff <= browser->top_idx)
+                       return;
+
+               end_row = row + diff - browser->top_idx;
                ui_browser__gotorc(browser, end_row, column - 1);
-               SLsmg_write_char(SLSMG_ULCORN_CHAR);
-               ui_browser__gotorc(browser, end_row, column);
-               SLsmg_draw_hline(2);
-               ui_browser__gotorc(browser, end_row + 1, column - 1);
                SLsmg_write_char(SLSMG_LTEE_CHAR);
+
+               while (--end_row >= 0 && end_row > (int)(row - browser->top_idx)) {
+                       ui_browser__gotorc(browser, end_row, column - 1);
+                       SLsmg_draw_vline(1);
+               }
+
+               end_row = (int)(row - browser->top_idx);
+               if (end_row >= 0) {
+                       ui_browser__gotorc(browser, end_row, column - 1);
+                       SLsmg_write_char(SLSMG_ULCORN_CHAR);
+                       ui_browser__gotorc(browser, end_row, column);
+                       SLsmg_draw_hline(2);
+               }
        } else {
+               if (row < browser->top_idx)
+                       return;
+
+               end_row = row - browser->top_idx;
                ui_browser__gotorc(browser, end_row, column - 1);
                SLsmg_write_char(SLSMG_LTEE_CHAR);
                ui_browser__gotorc(browser, end_row, column);
index 3678eb88f119cf5f09db2708c1ff74254fe85962..510ce45540501972942c0c3a34f6429ca97de690 100644 (file)
@@ -51,7 +51,7 @@ void ui_browser__write_graph(struct ui_browser *browser, int graph);
 void __ui_browser__line_arrow(struct ui_browser *browser, unsigned int column,
                              u64 start, u64 end);
 void ui_browser__mark_fused(struct ui_browser *browser, unsigned int column,
-                           unsigned int row, bool arrow_down);
+                           unsigned int row, int diff, bool arrow_down);
 void __ui_browser__show_title(struct ui_browser *browser, const char *title);
 void ui_browser__show_title(struct ui_browser *browser, const char *title);
 int ui_browser__show(struct ui_browser *browser, const char *title,
index ef4da4295bf7f774635b72b0f8cb602246446f84..e81c2493efdf9eb1884a7471821876e370387ecc 100644 (file)
@@ -125,13 +125,20 @@ static void annotate_browser__write(struct ui_browser *browser, void *entry, int
                ab->selection = al;
 }
 
-static bool is_fused(struct annotate_browser *ab, struct disasm_line *cursor)
+static int is_fused(struct annotate_browser *ab, struct disasm_line *cursor)
 {
        struct disasm_line *pos = list_prev_entry(cursor, al.node);
        const char *name;
+       int diff = 1;
+
+       while (pos && pos->al.offset == -1) {
+               pos = list_prev_entry(pos, al.node);
+               if (!ab->opts->hide_src_code)
+                       diff++;
+       }
 
        if (!pos)
-               return false;
+               return 0;
 
        if (ins__is_lock(&pos->ins))
                name = pos->ops.locked.ins.name;
@@ -139,9 +146,11 @@ static bool is_fused(struct annotate_browser *ab, struct disasm_line *cursor)
                name = pos->ins.name;
 
        if (!name || !cursor->ins.name)
-               return false;
+               return 0;
 
-       return ins__is_fused(ab->arch, name, cursor->ins.name);
+       if (ins__is_fused(ab->arch, name, cursor->ins.name))
+               return diff;
+       return 0;
 }
 
 static void annotate_browser__draw_current_jump(struct ui_browser *browser)
@@ -155,6 +164,7 @@ static void annotate_browser__draw_current_jump(struct ui_browser *browser)
        struct annotation *notes = symbol__annotation(sym);
        u8 pcnt_width = annotation__pcnt_width(notes);
        int width;
+       int diff = 0;
 
        /* PLT symbols contain external offsets */
        if (strstr(sym->name, "@plt"))
@@ -205,11 +215,11 @@ static void annotate_browser__draw_current_jump(struct ui_browser *browser)
                                 pcnt_width + 2 + notes->widths.addr + width,
                                 from, to);
 
-       if (is_fused(ab, cursor)) {
+       diff = is_fused(ab, cursor);
+       if (diff > 0) {
                ui_browser__mark_fused(browser,
                                       pcnt_width + 3 + notes->widths.addr + width,
-                                      from - 1,
-                                      to > from);
+                                      from - diff, diff, to > from);
        }
 }
 
index 683f6d63560ed2794a1b7d0293be0cbf87536f8e..1a7112a87736ab5899b1f1d77f1b2044e1356cf0 100644 (file)
 struct btf * __weak btf__load_from_kernel_by_id(__u32 id)
 {
        struct btf *btf;
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wdeprecated-declarations"
        int err = btf__get_from_id(id, &btf);
+#pragma GCC diagnostic pop
 
        return err ? ERR_PTR(err) : btf;
 }
index da19be7da284c250e5cff7b84e87a47de5ec7b3e..44e40bad0e336ab7d45c35065d4e1c37346da543 100644 (file)
@@ -2149,6 +2149,7 @@ static int add_callchain_ip(struct thread *thread,
 
        al.filtered = 0;
        al.sym = NULL;
+       al.srcline = NULL;
        if (!cpumode) {
                thread__find_cpumode_addr_location(thread, ip, &al);
        } else {
index 033051717ba58da3c2a27a182884fb59c4282a60..f3daa44a82660a704888705cdec15234688d0c5b 100644 (file)
 #include <unistd.h>
 #include <ftw.h>
 
-
 #include "cgroup_helpers.h"
 
 /*
  * To avoid relying on the system setup, when setup_cgroup_env is called
- * we create a new mount namespace, and cgroup namespace. The cgroup2
- * root is mounted at CGROUP_MOUNT_PATH
- *
- * Unfortunately, most people don't have cgroupv2 enabled at this point in time.
- * It's easier to create our own mount namespace and manage it ourselves.
+ * we create a new mount namespace, and cgroup namespace. The cgroupv2
+ * root is mounted at CGROUP_MOUNT_PATH. Unfortunately, most people don't
+ * have cgroupv2 enabled at this point in time. It's easier to create our
+ * own mount namespace and manage it ourselves. We assume /mnt exists.
  *
- * We assume /mnt exists.
+ * Related cgroupv1 helpers are named *classid*(), since we only use the
+ * net_cls controller for tagging net_cls.classid. We assume the default
+ * mount under /sys/fs/cgroup/net_cls, which should be the case for the
+ * vast majority of users.
  */
 
 #define WALK_FD_LIMIT                  16
+
 #define CGROUP_MOUNT_PATH              "/mnt"
+#define CGROUP_MOUNT_DFLT              "/sys/fs/cgroup"
+#define NETCLS_MOUNT_PATH              CGROUP_MOUNT_DFLT "/net_cls"
 #define CGROUP_WORK_DIR                        "/cgroup-test-work-dir"
+
 #define format_cgroup_path(buf, path) \
        snprintf(buf, sizeof(buf), "%s%s%s", CGROUP_MOUNT_PATH, \
                 CGROUP_WORK_DIR, path)
 
+#define format_classid_path(buf)                               \
+       snprintf(buf, sizeof(buf), "%s%s", NETCLS_MOUNT_PATH,   \
+                CGROUP_WORK_DIR)
+
 /**
  * enable_all_controllers() - Enable all available cgroup v2 controllers
  *
@@ -139,8 +148,7 @@ static int nftwfunc(const char *filename, const struct stat *statptr,
        return 0;
 }
 
-
-static int join_cgroup_from_top(char *cgroup_path)
+static int join_cgroup_from_top(const char *cgroup_path)
 {
        char cgroup_procs_path[PATH_MAX + 1];
        pid_t pid = getpid();
@@ -313,3 +321,114 @@ int cgroup_setup_and_join(const char *path) {
        }
        return cg_fd;
 }
+
+/**
+ * setup_classid_environment() - Setup the cgroupv1 net_cls environment
+ *
+ * After calling this function, cleanup_classid_environment should be called
+ * once testing is complete.
+ *
+ * This function will print an error to stderr and return 1 if it is unable
+ * to setup the cgroup environment. If setup is successful, 0 is returned.
+ */
+int setup_classid_environment(void)
+{
+       char cgroup_workdir[PATH_MAX + 1];
+
+       format_classid_path(cgroup_workdir);
+
+       if (mount("tmpfs", CGROUP_MOUNT_DFLT, "tmpfs", 0, NULL) &&
+           errno != EBUSY) {
+               log_err("mount cgroup base");
+               return 1;
+       }
+
+       if (mkdir(NETCLS_MOUNT_PATH, 0777) && errno != EEXIST) {
+               log_err("mkdir cgroup net_cls");
+               return 1;
+       }
+
+       if (mount("net_cls", NETCLS_MOUNT_PATH, "cgroup", 0, "net_cls") &&
+           errno != EBUSY) {
+               log_err("mount cgroup net_cls");
+               return 1;
+       }
+
+       cleanup_classid_environment();
+
+       if (mkdir(cgroup_workdir, 0777) && errno != EEXIST) {
+               log_err("mkdir cgroup work dir");
+               return 1;
+       }
+
+       return 0;
+}
+
+/**
+ * set_classid() - Set a cgroupv1 net_cls classid
+ * @id: the numeric classid
+ *
+ * Writes the passed classid into the cgroup work dir's net_cls.classid
+ * file in order to later on trigger socket tagging.
+ *
+ * On success, it returns 0, otherwise on failure it returns 1. If there
+ * is a failure, it prints the error to stderr.
+ */
+int set_classid(unsigned int id)
+{
+       char cgroup_workdir[PATH_MAX - 42];
+       char cgroup_classid_path[PATH_MAX + 1];
+       int fd, rc = 0;
+
+       format_classid_path(cgroup_workdir);
+       snprintf(cgroup_classid_path, sizeof(cgroup_classid_path),
+                "%s/net_cls.classid", cgroup_workdir);
+
+       fd = open(cgroup_classid_path, O_WRONLY);
+       if (fd < 0) {
+               log_err("Opening cgroup classid: %s", cgroup_classid_path);
+               return 1;
+       }
+
+       if (dprintf(fd, "%u\n", id) < 0) {
+               log_err("Setting cgroup classid");
+               rc = 1;
+       }
+
+       close(fd);
+       return rc;
+}
+
+/**
+ * join_classid() - Join a cgroupv1 net_cls classid
+ *
+ * This function expects the cgroup work dir to be already created, as we
+ * join it here. This causes the process sockets to be tagged with the given
+ * net_cls classid.
+ *
+ * On success, it returns 0, otherwise on failure it returns 1.
+ */
+int join_classid(void)
+{
+       char cgroup_workdir[PATH_MAX + 1];
+
+       format_classid_path(cgroup_workdir);
+       return join_cgroup_from_top(cgroup_workdir);
+}
+
+/**
+ * cleanup_classid_environment() - Cleanup the cgroupv1 net_cls environment
+ *
+ * At call time, it moves the calling process to the root cgroup, and then
+ * runs the deletion process.
+ *
+ * On failure, it will print an error to stderr, and try to continue.
+ */
+void cleanup_classid_environment(void)
+{
+       char cgroup_workdir[PATH_MAX + 1];
+
+       format_classid_path(cgroup_workdir);
+       join_cgroup_from_top(NETCLS_MOUNT_PATH);
+       nftw(cgroup_workdir, nftwfunc, WALK_FD_LIMIT, FTW_DEPTH | FTW_MOUNT);
+}
index 5fe3d88e4f0d271d77ad1dcec724147620d59c21..629da3854b3e7a56fa54cfbf93a2ad086ab35eae 100644 (file)
@@ -1,6 +1,7 @@
 /* SPDX-License-Identifier: GPL-2.0 */
 #ifndef __CGROUP_HELPERS_H
 #define __CGROUP_HELPERS_H
+
 #include <errno.h>
 #include <string.h>
 
@@ -8,12 +9,21 @@
 #define log_err(MSG, ...) fprintf(stderr, "(%s:%d: errno: %s) " MSG "\n", \
        __FILE__, __LINE__, clean_errno(), ##__VA_ARGS__)
 
-
+/* cgroupv2 related */
 int cgroup_setup_and_join(const char *path);
 int create_and_get_cgroup(const char *path);
+unsigned long long get_cgroup_id(const char *path);
+
 int join_cgroup(const char *path);
+
 int setup_cgroup_environment(void);
 void cleanup_cgroup_environment(void);
-unsigned long long get_cgroup_id(const char *path);
 
-#endif
+/* cgroupv1 related */
+int set_classid(unsigned int id);
+int join_classid(void);
+
+int setup_classid_environment(void);
+void cleanup_classid_environment(void);
+
+#endif /* __CGROUP_HELPERS_H */
index 7e9f6375757a9bfb23dfb7deb572ae306c52037c..6db1af8fdee788407fe55576cf20c57347159c7c 100644 (file)
@@ -208,11 +208,26 @@ error_close:
 
 static int connect_fd_to_addr(int fd,
                              const struct sockaddr_storage *addr,
-                             socklen_t addrlen)
+                             socklen_t addrlen, const bool must_fail)
 {
-       if (connect(fd, (const struct sockaddr *)addr, addrlen)) {
-               log_err("Failed to connect to server");
-               return -1;
+       int ret;
+
+       errno = 0;
+       ret = connect(fd, (const struct sockaddr *)addr, addrlen);
+       if (must_fail) {
+               if (!ret) {
+                       log_err("Unexpected success to connect to server");
+                       return -1;
+               }
+               if (errno != EPERM) {
+                       log_err("Unexpected error from connect to server");
+                       return -1;
+               }
+       } else {
+               if (ret) {
+                       log_err("Failed to connect to server");
+                       return -1;
+               }
        }
 
        return 0;
@@ -257,7 +272,7 @@ int connect_to_fd_opts(int server_fd, const struct network_helper_opts *opts)
                       strlen(opts->cc) + 1))
                goto error_close;
 
-       if (connect_fd_to_addr(fd, &addr, addrlen))
+       if (connect_fd_to_addr(fd, &addr, addrlen, opts->must_fail))
                goto error_close;
 
        return fd;
@@ -289,7 +304,7 @@ int connect_fd_to_fd(int client_fd, int server_fd, int timeout_ms)
                return -1;
        }
 
-       if (connect_fd_to_addr(client_fd, &addr, len))
+       if (connect_fd_to_addr(client_fd, &addr, len, false))
                return -1;
 
        return 0;
index da7e132657d594663a38001f008777dea078c1f6..d198181a56487cbbc765ee26af871721af6e0187 100644 (file)
@@ -20,6 +20,7 @@ typedef __u16 __sum16;
 struct network_helper_opts {
        const char *cc;
        int timeout_ms;
+       bool must_fail;
 };
 
 /* ipv4 test vector */
diff --git a/tools/testing/selftests/bpf/prog_tests/cgroup_v1v2.c b/tools/testing/selftests/bpf/prog_tests/cgroup_v1v2.c
new file mode 100644 (file)
index 0000000..ab3b9bc
--- /dev/null
@@ -0,0 +1,79 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <test_progs.h>
+
+#include "connect4_dropper.skel.h"
+
+#include "cgroup_helpers.h"
+#include "network_helpers.h"
+
+static int run_test(int cgroup_fd, int server_fd, bool classid)
+{
+       struct network_helper_opts opts = {
+               .must_fail = true,
+       };
+       struct connect4_dropper *skel;
+       int fd, err = 0;
+
+       skel = connect4_dropper__open_and_load();
+       if (!ASSERT_OK_PTR(skel, "skel_open"))
+               return -1;
+
+       skel->links.connect_v4_dropper =
+               bpf_program__attach_cgroup(skel->progs.connect_v4_dropper,
+                                          cgroup_fd);
+       if (!ASSERT_OK_PTR(skel->links.connect_v4_dropper, "prog_attach")) {
+               err = -1;
+               goto out;
+       }
+
+       if (classid && !ASSERT_OK(join_classid(), "join_classid")) {
+               err = -1;
+               goto out;
+       }
+
+       fd = connect_to_fd_opts(server_fd, &opts);
+       if (fd < 0)
+               err = -1;
+       else
+               close(fd);
+out:
+       connect4_dropper__destroy(skel);
+       return err;
+}
+
+void test_cgroup_v1v2(void)
+{
+       struct network_helper_opts opts = {};
+       int server_fd, client_fd, cgroup_fd;
+       static const int port = 60123;
+
+       /* Step 1: Check base connectivity works without any BPF. */
+       server_fd = start_server(AF_INET, SOCK_STREAM, NULL, port, 0);
+       if (!ASSERT_GE(server_fd, 0, "server_fd"))
+               return;
+       client_fd = connect_to_fd_opts(server_fd, &opts);
+       if (!ASSERT_GE(client_fd, 0, "client_fd")) {
+               close(server_fd);
+               return;
+       }
+       close(client_fd);
+       close(server_fd);
+
+       /* Step 2: Check BPF policy prog attached to cgroups drops connectivity. */
+       cgroup_fd = test__join_cgroup("/connect_dropper");
+       if (!ASSERT_GE(cgroup_fd, 0, "cgroup_fd"))
+               return;
+       server_fd = start_server(AF_INET, SOCK_STREAM, NULL, port, 0);
+       if (!ASSERT_GE(server_fd, 0, "server_fd")) {
+               close(cgroup_fd);
+               return;
+       }
+       ASSERT_OK(run_test(cgroup_fd, server_fd, false), "cgroup-v2-only");
+       setup_classid_environment();
+       set_classid(42);
+       ASSERT_OK(run_test(cgroup_fd, server_fd, true), "cgroup-v1v2");
+       cleanup_classid_environment();
+       close(server_fd);
+       close(cgroup_fd);
+}
index 53f0e0fa1a5370b8fbfe284197284c1f3b60b49b..37c20b5ffa70f40291c1a9f73f7cf11b8ff815f7 100644 (file)
@@ -1,7 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0
 #define _GNU_SOURCE
 #include <test_progs.h>
-#include <linux/ptrace.h>
 #include "test_task_pt_regs.skel.h"
 
 void test_task_pt_regs(void)
diff --git a/tools/testing/selftests/bpf/progs/connect4_dropper.c b/tools/testing/selftests/bpf/progs/connect4_dropper.c
new file mode 100644 (file)
index 0000000..b565d99
--- /dev/null
@@ -0,0 +1,26 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <string.h>
+
+#include <linux/stddef.h>
+#include <linux/bpf.h>
+
+#include <sys/socket.h>
+
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_endian.h>
+
+#define VERDICT_REJECT 0
+#define VERDICT_PROCEED        1
+
+SEC("cgroup/connect4")
+int connect_v4_dropper(struct bpf_sock_addr *ctx)
+{
+       if (ctx->type != SOCK_STREAM)
+               return VERDICT_PROCEED;
+       if (ctx->user_port == bpf_htons(60123))
+               return VERDICT_REJECT;
+       return VERDICT_PROCEED;
+}
+
+char _license[] SEC("license") = "GPL";
index 6c059f1cfa1bb27b84b7162f93786d2c4a9334d1..e6cb09259408f2fea7929a6fbb1b313c38e69894 100644 (file)
@@ -1,12 +1,17 @@
 // SPDX-License-Identifier: GPL-2.0
 
-#include <linux/ptrace.h>
-#include <linux/bpf.h>
+#include "vmlinux.h"
 #include <bpf/bpf_helpers.h>
 #include <bpf/bpf_tracing.h>
 
-struct pt_regs current_regs = {};
-struct pt_regs ctx_regs = {};
+#define PT_REGS_SIZE sizeof(struct pt_regs)
+
+/*
+ * The kernel struct pt_regs isn't exported in its entirety to userspace.
+ * Pass it as an array to task_pt_regs.c
+ */
+char current_regs[PT_REGS_SIZE] = {};
+char ctx_regs[PT_REGS_SIZE] = {};
 int uprobe_res = 0;
 
 SEC("uprobe/trigger_func")
@@ -17,8 +22,10 @@ int handle_uprobe(struct pt_regs *ctx)
 
        current = bpf_get_current_task_btf();
        regs = (struct pt_regs *) bpf_task_pt_regs(current);
-       __builtin_memcpy(&current_regs, regs, sizeof(*regs));
-       __builtin_memcpy(&ctx_regs, ctx, sizeof(*ctx));
+       if (bpf_probe_read_kernel(current_regs, PT_REGS_SIZE, regs))
+               return 0;
+       if (bpf_probe_read_kernel(ctx_regs, PT_REGS_SIZE, ctx))
+               return 0;
 
        /* Prove that uprobe was run */
        uprobe_res = 1;
index e1bf55dabdf6569b9da744beeaedf7a57f617e17..162c41e9bcae876be1a503dba0e3d27d3d4b2beb 100644 (file)
@@ -746,7 +746,7 @@ int read_write_nci_cmd(int nfc_sock, int virtual_fd, const __u8 *cmd, __u32 cmd_
                       const __u8 *rsp, __u32 rsp_len)
 {
        char buf[256];
-       unsigned int len;
+       int len;
 
        send(nfc_sock, &cmd[3], cmd_len - 3, 0);
        len = read(virtual_fd, buf, cmd_len);
index 4254ddc3f70b560a85b64edcd9512eb7fb4bf621..1ef9e4159bba8e83b796f02b37c2b547d03db0f9 100755 (executable)
@@ -45,7 +45,7 @@ altnames_test()
        check_err $? "Got unexpected long alternative name from link show JSON"
 
        ip link property del $DUMMY_DEV altname $SHORT_NAME
-       check_err $? "Failed to add short alternative name"
+       check_err $? "Failed to delete short alternative name"
 
        ip -j -p link show $SHORT_NAME &>/dev/null
        check_fail $? "Unexpected success while trying to do link show with deleted short alternative name"
index bd1ca25febe4c71543a646a901c0470118307063..aed632d29fff5273c69210a85bdaece582d91e9b 100644 (file)
@@ -1,5 +1,5 @@
 /* SPDX-License-Identifier: GPL-2.0 */
-#include <ppc-asm.h>
+#include <basic_asm.h>
 #include <asm/unistd.h>
 
        .text
@@ -26,3 +26,38 @@ FUNC_START(getppid_tm_suspended)
 1:
        li      r3, -1
        blr
+
+
+.macro scv level
+       .long (0x44000001 | (\level) << 5)
+.endm
+
+FUNC_START(getppid_scv_tm_active)
+       PUSH_BASIC_STACK(0)
+       tbegin.
+       beq 1f
+       li      r0, __NR_getppid
+       scv     0
+       tend.
+       POP_BASIC_STACK(0)
+       blr
+1:
+       li      r3, -1
+       POP_BASIC_STACK(0)
+       blr
+
+FUNC_START(getppid_scv_tm_suspended)
+       PUSH_BASIC_STACK(0)
+       tbegin.
+       beq 1f
+       li      r0, __NR_getppid
+       tsuspend.
+       scv     0
+       tresume.
+       tend.
+       POP_BASIC_STACK(0)
+       blr
+1:
+       li      r3, -1
+       POP_BASIC_STACK(0)
+       blr
index 467a6b3134b2a36341c078b92d66ddf5fe3b9189..b763354c2eb4768eb3fe2f95b1da73c4a796e613 100644 (file)
 #include "utils.h"
 #include "tm.h"
 
+#ifndef PPC_FEATURE2_SCV
+#define PPC_FEATURE2_SCV               0x00100000 /* scv syscall */
+#endif
+
 extern int getppid_tm_active(void);
 extern int getppid_tm_suspended(void);
+extern int getppid_scv_tm_active(void);
+extern int getppid_scv_tm_suspended(void);
 
 unsigned retries = 0;
 
 #define TEST_DURATION 10 /* seconds */
 
-pid_t getppid_tm(bool suspend)
+pid_t getppid_tm(bool scv, bool suspend)
 {
        int i;
        pid_t pid;
 
        for (i = 0; i < TM_RETRIES; i++) {
-               if (suspend)
-                       pid = getppid_tm_suspended();
-               else
-                       pid = getppid_tm_active();
+               if (suspend) {
+                       if (scv)
+                               pid = getppid_scv_tm_suspended();
+                       else
+                               pid = getppid_tm_suspended();
+               } else {
+                       if (scv)
+                               pid = getppid_scv_tm_active();
+                       else
+                               pid = getppid_tm_active();
+               }
 
                if (pid >= 0)
                        return pid;
@@ -82,15 +95,24 @@ int tm_syscall(void)
                 * Test a syscall within a suspended transaction and verify
                 * that it succeeds.
                 */
-               FAIL_IF(getppid_tm(true) == -1); /* Should succeed. */
+               FAIL_IF(getppid_tm(false, true) == -1); /* Should succeed. */
 
                /*
                 * Test a syscall within an active transaction and verify that
                 * it fails with the correct failure code.
                 */
-               FAIL_IF(getppid_tm(false) != -1);  /* Should fail... */
+               FAIL_IF(getppid_tm(false, false) != -1);  /* Should fail... */
                FAIL_IF(!failure_is_persistent()); /* ...persistently... */
                FAIL_IF(!failure_is_syscall());    /* ...with code syscall. */
+
+               /* Now do it all again with scv if it is available. */
+               if (have_hwcap2(PPC_FEATURE2_SCV)) {
+                       FAIL_IF(getppid_tm(true, true) == -1); /* Should succeed. */
+                       FAIL_IF(getppid_tm(true, false) != -1);  /* Should fail... */
+                       FAIL_IF(!failure_is_persistent()); /* ...persistently... */
+                       FAIL_IF(!failure_is_syscall());    /* ...with code syscall. */
+               }
+
                gettimeofday(&now, 0);
        }