Backmerge tag 'v5.14-rc3' into drm-next
authorDave Airlie <airlied@redhat.com>
Sun, 25 Jul 2021 23:27:59 +0000 (09:27 +1000)
committerDave Airlie <airlied@redhat.com>
Sun, 25 Jul 2021 23:27:59 +0000 (09:27 +1000)
Linux 5.14-rc3

Daniel said we should pull the nouveau fix from fixes in here, probably
a good plan.

Signed-off-by: Dave Airlie <airlied@redhat.com>
307 files changed:
Documentation/arm64/tagged-address-abi.rst
Documentation/devicetree/bindings/net/imx-dwmac.txt [deleted file]
Documentation/devicetree/bindings/net/nxp,dwmac-imx.yaml [new file with mode: 0644]
Documentation/devicetree/bindings/net/snps,dwmac.yaml
Documentation/devicetree/bindings/regulator/fixed-regulator.yaml
Documentation/devicetree/bindings/sound/renesas,rsnd.yaml
Documentation/networking/af_xdp.rst
Documentation/networking/ip-sysctl.rst
Documentation/trace/histogram.rst
MAINTAINERS
Makefile
arch/arm64/boot/dts/freescale/imx8mp.dtsi
arch/arm64/boot/dts/qcom/apq8096-db820c.dtsi
arch/arm64/boot/dts/qcom/ipq8074.dtsi
arch/arm64/boot/dts/qcom/msm8996.dtsi
arch/arm64/boot/dts/qcom/msm8998.dtsi
arch/arm64/boot/dts/qcom/qcs404-evb.dtsi
arch/arm64/boot/dts/qcom/qcs404.dtsi
arch/arm64/boot/dts/qcom/sc7180.dtsi
arch/arm64/boot/dts/qcom/sdm845.dtsi
arch/arm64/boot/dts/qcom/sm8150.dtsi
arch/arm64/kernel/smccc-call.S
arch/arm64/mm/mmu.c
arch/m68k/Kconfig.machine
arch/nds32/mm/mmap.c
arch/powerpc/kvm/book3s_hv.c
arch/powerpc/kvm/book3s_hv_nested.c
arch/powerpc/kvm/book3s_hv_p9_entry.c
arch/powerpc/kvm/book3s_rtas.c
arch/powerpc/kvm/powerpc.c
arch/powerpc/mm/nohash/8xx.c
arch/powerpc/platforms/pasemi/idle.c
arch/riscv/include/asm/efi.h
arch/riscv/kernel/stacktrace.c
arch/riscv/lib/uaccess.S
arch/riscv/mm/init.c
arch/s390/boot/text_dma.S
arch/s390/configs/debug_defconfig
arch/s390/configs/defconfig
arch/s390/configs/zfcpdump_defconfig
arch/s390/include/asm/ftrace.h
arch/s390/kernel/ftrace.c
arch/s390/kernel/mcount.S
arch/s390/kernel/perf_cpum_cf.c
arch/s390/kernel/vdso32/Makefile
arch/s390/net/bpf_jit_comp.c
arch/x86/kernel/cpu/mshyperv.c
arch/x86/kernel/jump_label.c
arch/x86/mm/pgtable.c
drivers/acpi/Kconfig
drivers/acpi/utils.c
drivers/base/auxiliary.c
drivers/base/core.c
drivers/block/rbd.c
drivers/bus/mhi/core/main.c
drivers/bus/mhi/pci_generic.c
drivers/firmware/efi/dev-path-parser.c
drivers/firmware/efi/efi.c
drivers/firmware/efi/libstub/efi-stub-helper.c
drivers/firmware/efi/mokvar-table.c
drivers/firmware/efi/tpm.c
drivers/gpu/drm/amd/amdgpu/amdgpu.h
drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
drivers/gpu/drm/amd/amdgpu/nv.c
drivers/gpu/drm/amd/amdgpu/soc15.c
drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr.c
drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c
drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.h
drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
drivers/gpu/drm/amd/display/dc/core/dc_resource.c
drivers/gpu/drm/amd/display/dc/dc.h
drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.h
drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_dscl.c
drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp.c
drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp.h
drivers/gpu/drm/amd/display/dc/dcn302/dcn302_resource.c
drivers/gpu/drm/amd/display/dc/dcn303/dcn303_resource.c
drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.c
drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.h
drivers/gpu/drm/amd/display/dc/dcn31/dcn31_init.c
drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c
drivers/gpu/drm/amd/display/dc/dml/dcn21/display_mode_vba_21.c
drivers/gpu/drm/amd/display/dc/inc/hw/transform.h
drivers/gpu/drm/amd/display/dc/inc/hw_sequencer_private.h
drivers/gpu/drm/amd/pm/inc/aldebaran_ppsmc.h
drivers/gpu/drm/amd/pm/inc/smu_types.h
drivers/gpu/drm/amd/pm/inc/smu_v11_0.h
drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c
drivers/gpu/drm/drm_ioctl.c
drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
drivers/gpu/drm/i915/gem/selftests/i915_gem_execbuffer.c
drivers/gpu/drm/i915/gvt/handlers.c
drivers/gpu/drm/i915/i915_cmd_parser.c
drivers/gpu/drm/i915/i915_drv.h
drivers/gpu/drm/i915/i915_request.c
drivers/gpu/drm/panel/panel-raspberrypi-touchscreen.c
drivers/gpu/drm/ttm/ttm_bo.c
drivers/gpu/drm/ttm/ttm_bo_util.c
drivers/gpu/drm/ttm/ttm_device.c
drivers/gpu/drm/vc4/vc4_hdmi.c
drivers/hv/channel_mgmt.c
drivers/i2c/busses/i2c-mpc.c
drivers/media/pci/intel/ipu3/cio2-bridge.c
drivers/media/pci/ngene/ngene-core.c
drivers/media/pci/ngene/ngene.h
drivers/misc/eeprom/at24.c
drivers/mmc/core/block.c
drivers/mmc/core/host.c
drivers/net/bonding/bond_main.c
drivers/net/dsa/mt7530.c
drivers/net/dsa/mt7530.h
drivers/net/dsa/mv88e6xxx/Kconfig
drivers/net/dsa/sja1105/sja1105_main.c
drivers/net/ethernet/broadcom/bnxt/bnxt.c
drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c
drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c
drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.h
drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c
drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.c
drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c
drivers/net/ethernet/freescale/fman/mac.c
drivers/net/ethernet/hisilicon/hip04_eth.c
drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h
drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c
drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c
drivers/net/ethernet/ibm/ibmvnic.c
drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
drivers/net/ethernet/marvell/octeontx2/af/Makefile
drivers/net/ethernet/marvell/octeontx2/af/rvu.c
drivers/net/ethernet/marvell/octeontx2/af/rvu.h
drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c
drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c
drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c
drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c
drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c
drivers/net/ethernet/marvell/octeontx2/af/rvu_switch.c [new file with mode: 0644]
drivers/net/ethernet/microchip/sparx5/Kconfig
drivers/net/ethernet/realtek/r8169_main.c
drivers/net/ethernet/renesas/ravb.h
drivers/net/ethernet/renesas/ravb_main.c
drivers/net/ethernet/xscale/ptp_ixp46x.c
drivers/net/usb/hso.c
drivers/net/usb/r8152.c
drivers/nvme/host/core.c
drivers/nvme/host/multipath.c
drivers/nvme/host/nvme.h
drivers/nvme/host/pci.c
drivers/nvme/host/trace.h
drivers/regulator/bd9576-regulator.c
drivers/regulator/hi6421-regulator.c
drivers/regulator/hi6421v600-regulator.c
drivers/regulator/mtk-dvfsrc-regulator.c
drivers/regulator/rtmv20-regulator.c
drivers/scsi/mpt3sas/mpt3sas_base.c
drivers/scsi/mpt3sas/mpt3sas_base.h
drivers/scsi/mpt3sas/mpt3sas_scsih.c
drivers/scsi/scsi_transport_iscsi.c
drivers/spi/spi-atmel.c
drivers/spi/spi-bcm2835.c
drivers/spi/spi-cadence-quadspi.c
drivers/spi/spi-cadence.c
drivers/spi/spi-imx.c
drivers/spi/spi-mt65xx.c
drivers/spi/spi-stm32.c
drivers/target/target_core_sbc.c
drivers/target/target_core_transport.c
drivers/usb/class/cdc-wdm.c
drivers/usb/core/devio.c
drivers/usb/core/hub.c
drivers/usb/core/quirks.c
drivers/usb/dwc2/core.h
drivers/usb/dwc2/core_intr.c
drivers/usb/dwc2/gadget.c
drivers/usb/dwc2/hcd.c
drivers/usb/dwc2/params.c
drivers/usb/dwc3/core.h
drivers/usb/dwc3/ep0.c
drivers/usb/dwc3/gadget.c
drivers/usb/gadget/function/u_serial.c
drivers/usb/gadget/udc/tegra-xudc.c
drivers/usb/host/ehci-hcd.c
drivers/usb/host/max3421-hcd.c
drivers/usb/host/xhci-hub.c
drivers/usb/host/xhci-pci-renesas.c
drivers/usb/host/xhci-pci.c
drivers/usb/phy/phy.c
drivers/usb/renesas_usbhs/fifo.c
drivers/usb/serial/cp210x.c
drivers/usb/serial/option.c
drivers/usb/storage/unusual_uas.h
drivers/usb/typec/stusb160x.c
drivers/usb/typec/tipd/core.c
fs/afs/cmservice.c
fs/afs/dir.c
fs/afs/write.c
fs/btrfs/backref.c
fs/btrfs/backref.h
fs/btrfs/delayed-ref.c
fs/btrfs/extent-tree.c
fs/btrfs/inode.c
fs/btrfs/ordered-data.c
fs/btrfs/ordered-data.h
fs/btrfs/qgroup.c
fs/btrfs/qgroup.h
fs/btrfs/tests/qgroup-tests.c
fs/btrfs/tree-log.c
fs/btrfs/zoned.c
fs/ceph/mds_client.c
fs/cifs/cifssmb.c
fs/cifs/connect.c
fs/cifs/dfs_cache.c
fs/cifs/dfs_cache.h
fs/cifs/fs_context.c
fs/cifs/smb2ops.c
fs/fs-writeback.c
fs/hugetlbfs/inode.c
fs/io-wq.c
fs/io_uring.c
fs/seq_file.c
fs/userfaultfd.c
include/acpi/acpi_bus.h
include/drm/drm_ioctl.h
include/linux/blkdev.h
include/linux/highmem.h
include/linux/memblock.h
include/linux/mfd/rt5033-private.h
include/linux/pgtable.h
include/net/tcp.h
include/sound/soc.h
include/trace/events/afs.h
include/trace/events/net.h
include/trace/events/qdisc.h
kernel/bpf/verifier.c
kernel/dma/ops_helpers.c
kernel/smpboot.c
kernel/time/posix-cpu-timers.c
kernel/time/timer.c
kernel/trace/ftrace.c
kernel/trace/ring_buffer.c
kernel/trace/trace.c
kernel/trace/trace_events_hist.c
kernel/trace/trace_events_synth.c
kernel/trace/trace_synth.h
kernel/tracepoint.c
mm/backing-dev.c
mm/kfence/core.c
mm/kfence/kfence_test.c
mm/memblock.c
mm/memory.c
mm/mmap_lock.c
mm/page_alloc.c
mm/secretmem.c
net/bpf/test_run.c
net/bridge/br_fdb.c
net/caif/caif_socket.c
net/core/dev.c
net/core/skbuff.c
net/core/skmsg.c
net/decnet/af_decnet.c
net/dsa/slave.c
net/dsa/tag_ksz.c
net/ipv4/tcp_bpf.c
net/ipv4/tcp_fastopen.c
net/ipv4/tcp_ipv4.c
net/ipv4/udp.c
net/ipv4/udp_bpf.c
net/ipv6/ip6_output.c
net/ipv6/route.c
net/ipv6/udp.c
net/netrom/nr_timer.c
net/sched/act_skbmod.c
net/sched/cls_api.c
net/sched/cls_tcindex.c
net/sctp/auth.c
net/sctp/output.c
net/sctp/socket.c
sound/core/pcm_native.c
sound/hda/intel-dsp-config.c
sound/isa/sb/sb16_csp.c
sound/pci/hda/patch_hdmi.c
sound/pci/hda/patch_realtek.c
sound/soc/amd/acp-da7219-max98357a.c
sound/soc/codecs/Kconfig
sound/soc/codecs/rt5631.c
sound/soc/codecs/rt5682.c
sound/soc/codecs/tlv320aic31xx.c
sound/soc/codecs/tlv320aic31xx.h
sound/soc/codecs/tlv320aic32x4.c
sound/soc/codecs/wcd938x.c
sound/soc/codecs/wm_adsp.c
sound/soc/intel/boards/sof_sdw_max98373.c
sound/soc/soc-pcm.c
sound/soc/sof/intel/pci-tgl.c
sound/soc/tegra/tegra_pcm.c
sound/soc/ti/j721e-evm.c
sound/usb/mixer.c
sound/usb/quirks.c
tools/bpf/bpftool/common.c
tools/testing/selftests/net/nettest.c
tools/testing/selftests/net/pmtu.sh
tools/testing/selftests/vm/userfaultfd.c

index 459e6b66ff68c2ca0674794d3d42f1aa1d5b0013..0c9120ec58ae62ce1ea462be1c080d1476775152 100644 (file)
@@ -45,14 +45,24 @@ how the user addresses are used by the kernel:
 
 1. User addresses not accessed by the kernel but used for address space
    management (e.g. ``mprotect()``, ``madvise()``). The use of valid
-   tagged pointers in this context is allowed with the exception of
-   ``brk()``, ``mmap()`` and the ``new_address`` argument to
-   ``mremap()`` as these have the potential to alias with existing
-   user addresses.
-
-   NOTE: This behaviour changed in v5.6 and so some earlier kernels may
-   incorrectly accept valid tagged pointers for the ``brk()``,
-   ``mmap()`` and ``mremap()`` system calls.
+   tagged pointers in this context is allowed with these exceptions:
+
+   - ``brk()``, ``mmap()`` and the ``new_address`` argument to
+     ``mremap()`` as these have the potential to alias with existing
+      user addresses.
+
+     NOTE: This behaviour changed in v5.6 and so some earlier kernels may
+     incorrectly accept valid tagged pointers for the ``brk()``,
+     ``mmap()`` and ``mremap()`` system calls.
+
+   - The ``range.start``, ``start`` and ``dst`` arguments to the
+     ``UFFDIO_*`` ``ioctl()``s used on a file descriptor obtained from
+     ``userfaultfd()``, as fault addresses subsequently obtained by reading
+     the file descriptor will be untagged, which may otherwise confuse
+     tag-unaware programs.
+
+     NOTE: This behaviour changed in v5.14 and so some earlier kernels may
+     incorrectly accept valid tagged pointers for this system call.
 
 2. User addresses accessed by the kernel (e.g. ``write()``). This ABI
    relaxation is disabled by default and the application thread needs to
diff --git a/Documentation/devicetree/bindings/net/imx-dwmac.txt b/Documentation/devicetree/bindings/net/imx-dwmac.txt
deleted file mode 100644 (file)
index 921d522..0000000
+++ /dev/null
@@ -1,56 +0,0 @@
-IMX8 glue layer controller, NXP imx8 families support Synopsys MAC 5.10a IP.
-
-This file documents platform glue layer for IMX.
-Please see stmmac.txt for the other unchanged properties.
-
-The device node has following properties.
-
-Required properties:
-- compatible:  Should be "nxp,imx8mp-dwmac-eqos" to select glue layer
-              and "snps,dwmac-5.10a" to select IP version.
-- clocks: Must contain a phandle for each entry in clock-names.
-- clock-names: Should be "stmmaceth" for the host clock.
-              Should be "pclk" for the MAC apb clock.
-              Should be "ptp_ref" for the MAC timer clock.
-              Should be "tx" for the MAC RGMII TX clock:
-              Should be "mem" for EQOS MEM clock.
-               - "mem" clock is required for imx8dxl platform.
-               - "mem" clock is not required for imx8mp platform.
-- interrupt-names: Should contain a list of interrupt names corresponding to
-                  the interrupts in the interrupts property, if available.
-                  Should be "macirq" for the main MAC IRQ
-                  Should be "eth_wake_irq" for the IT which wake up system
-- intf_mode: Should be phandle/offset pair. The phandle to the syscon node which
-            encompases the GPR register, and the offset of the GPR register.
-               - required for imx8mp platform.
-               - is optional for imx8dxl platform.
-
-Optional properties:
-- intf_mode: is optional for imx8dxl platform.
-- snps,rmii_refclk_ext: to select RMII reference clock from external.
-
-Example:
-       eqos: ethernet@30bf0000 {
-               compatible = "nxp,imx8mp-dwmac-eqos", "snps,dwmac-5.10a";
-               reg = <0x30bf0000 0x10000>;
-               interrupts = <GIC_SPI 134 IRQ_TYPE_LEVEL_HIGH>,
-                            <GIC_SPI 135 IRQ_TYPE_LEVEL_HIGH>;
-               interrupt-names = "eth_wake_irq", "macirq";
-               clocks = <&clk IMX8MP_CLK_ENET_QOS_ROOT>,
-                        <&clk IMX8MP_CLK_QOS_ENET_ROOT>,
-                        <&clk IMX8MP_CLK_ENET_QOS_TIMER>,
-                        <&clk IMX8MP_CLK_ENET_QOS>;
-               clock-names = "stmmaceth", "pclk", "ptp_ref", "tx";
-               assigned-clocks = <&clk IMX8MP_CLK_ENET_AXI>,
-                                 <&clk IMX8MP_CLK_ENET_QOS_TIMER>,
-                                 <&clk IMX8MP_CLK_ENET_QOS>;
-               assigned-clock-parents = <&clk IMX8MP_SYS_PLL1_266M>,
-                                        <&clk IMX8MP_SYS_PLL2_100M>,
-                                        <&clk IMX8MP_SYS_PLL2_125M>;
-               assigned-clock-rates = <0>, <100000000>, <125000000>;
-               nvmem-cells = <&eth_mac0>;
-               nvmem-cell-names = "mac-address";
-               nvmem_macaddr_swap;
-               intf_mode = <&gpr 0x4>;
-               status = "disabled";
-       };
diff --git a/Documentation/devicetree/bindings/net/nxp,dwmac-imx.yaml b/Documentation/devicetree/bindings/net/nxp,dwmac-imx.yaml
new file mode 100644 (file)
index 0000000..5629b2e
--- /dev/null
@@ -0,0 +1,93 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/net/nxp,dwmac-imx.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: NXP i.MX8 DWMAC glue layer Device Tree Bindings
+
+maintainers:
+  - Joakim Zhang <qiangqing.zhang@nxp.com>
+
+# We need a select here so we don't match all nodes with 'snps,dwmac'
+select:
+  properties:
+    compatible:
+      contains:
+        enum:
+          - nxp,imx8mp-dwmac-eqos
+          - nxp,imx8dxl-dwmac-eqos
+  required:
+    - compatible
+
+allOf:
+  - $ref: "snps,dwmac.yaml#"
+
+properties:
+  compatible:
+    oneOf:
+      - items:
+          - enum:
+              - nxp,imx8mp-dwmac-eqos
+              - nxp,imx8dxl-dwmac-eqos
+          - const: snps,dwmac-5.10a
+
+  clocks:
+    minItems: 3
+    maxItems: 5
+    items:
+      - description: MAC host clock
+      - description: MAC apb clock
+      - description: MAC timer clock
+      - description: MAC RGMII TX clock
+      - description: EQOS MEM clock
+
+  clock-names:
+    minItems: 3
+    maxItems: 5
+    contains:
+      enum:
+        - stmmaceth
+        - pclk
+        - ptp_ref
+        - tx
+        - mem
+
+  intf_mode:
+    $ref: /schemas/types.yaml#/definitions/phandle-array
+    description:
+      Should be phandle/offset pair. The phandle to the syscon node which
+      encompases the GPR register, and the offset of the GPR register.
+
+  snps,rmii_refclk_ext:
+    $ref: /schemas/types.yaml#/definitions/flag
+    description:
+      To select RMII reference clock from external.
+
+required:
+  - compatible
+  - clocks
+  - clock-names
+
+unevaluatedProperties: false
+
+examples:
+  - |
+    #include <dt-bindings/interrupt-controller/arm-gic.h>
+    #include <dt-bindings/interrupt-controller/irq.h>
+    #include <dt-bindings/clock/imx8mp-clock.h>
+
+    eqos: ethernet@30bf0000 {
+            compatible = "nxp,imx8mp-dwmac-eqos","snps,dwmac-5.10a";
+            reg = <0x30bf0000 0x10000>;
+            interrupts = <GIC_SPI 135 IRQ_TYPE_LEVEL_HIGH>,
+                         <GIC_SPI 134 IRQ_TYPE_LEVEL_HIGH>;
+            interrupt-names = "macirq", "eth_wake_irq";
+            clocks = <&clk IMX8MP_CLK_ENET_QOS_ROOT>,
+                     <&clk IMX8MP_CLK_QOS_ENET_ROOT>,
+                     <&clk IMX8MP_CLK_ENET_QOS_TIMER>,
+                     <&clk IMX8MP_CLK_ENET_QOS>;
+            clock-names = "stmmaceth", "pclk", "ptp_ref", "tx";
+            phy-mode = "rgmii";
+            status = "disabled";
+    };
index d7652596a09b37863e3a3485811defb849ed30b2..42689b7d03a2ff84c35d08891fc060e06e6100a0 100644 (file)
@@ -28,6 +28,7 @@ select:
           - snps,dwmac-4.00
           - snps,dwmac-4.10a
           - snps,dwmac-4.20a
+          - snps,dwmac-5.10a
           - snps,dwxgmac
           - snps,dwxgmac-2.10
 
@@ -82,6 +83,7 @@ properties:
         - snps,dwmac-4.00
         - snps,dwmac-4.10a
         - snps,dwmac-4.20a
+        - snps,dwmac-5.10a
         - snps,dwxgmac
         - snps,dwxgmac-2.10
 
@@ -375,6 +377,7 @@ allOf:
               - snps,dwmac-4.00
               - snps,dwmac-4.10a
               - snps,dwmac-4.20a
+              - snps,dwmac-5.10a
               - snps,dwxgmac
               - snps,dwxgmac-2.10
               - st,spear600-gmac
index 8850c01bd47060c2aad7cefa74d37fa303a286a8..9b131c6facbc0032b4a994b9f0f374397380c179 100644 (file)
@@ -57,12 +57,14 @@ properties:
     maxItems: 1
 
   power-domains:
+    deprecated: true
     description:
       Power domain to use for enable control. This binding is only
       available if the compatible is chosen to regulator-fixed-domain.
     maxItems: 1
 
   required-opps:
+    deprecated: true
     description:
       Performance state to use for enable control. This binding is only
       available if the compatible is chosen to regulator-fixed-domain. The
index ee936d1aa724c13adc48d0d91487004c2e792da8..c2930d65728ed23c6eb90bb1dc604c3ea924b006 100644 (file)
@@ -114,7 +114,7 @@ properties:
 
   ports:
     $ref: /schemas/graph.yaml#/properties/ports
-    properties:
+    patternProperties:
       port(@[0-9a-f]+)?:
         $ref: audio-graph-port.yaml#
         unevaluatedProperties: false
index 42576880aa4a1b1c7b15b67a706e328bc95e7efc..60b217b436be668fe705e53ee5da35119eeb2995 100644 (file)
@@ -243,8 +243,8 @@ Configuration Flags and Socket Options
 These are the various configuration flags that can be used to control
 and monitor the behavior of AF_XDP sockets.
 
-XDP_COPY and XDP_ZERO_COPY bind flags
--------------------------------------
+XDP_COPY and XDP_ZEROCOPY bind flags
+------------------------------------
 
 When you bind to a socket, the kernel will first try to use zero-copy
 copy. If zero-copy is not supported, it will fall back on using copy
@@ -252,7 +252,7 @@ mode, i.e. copying all packets out to user space. But if you would
 like to force a certain mode, you can use the following flags. If you
 pass the XDP_COPY flag to the bind call, the kernel will force the
 socket into copy mode. If it cannot use copy mode, the bind call will
-fail with an error. Conversely, the XDP_ZERO_COPY flag will force the
+fail with an error. Conversely, the XDP_ZEROCOPY flag will force the
 socket into zero-copy mode or fail.
 
 XDP_SHARED_UMEM bind flag
index b3fa522e4cd9d48e582705df0b3b35c603144967..316c7dfa9693a368d54735e38c2312518437db50 100644 (file)
@@ -826,7 +826,7 @@ tcp_fastopen_blackhole_timeout_sec - INTEGER
        initial value when the blackhole issue goes away.
        0 to disable the blackhole detection.
 
-       By default, it is set to 1hr.
+       By default, it is set to 0 (feature is disabled).
 
 tcp_fastopen_key - list of comma separated 32-digit hexadecimal INTEGERs
        The list consists of a primary key and an optional backup key. The
index b71e09f745c3dae4511c36beb8ccf290f5f01c3e..f99be8062bc82861b1043bfddc31f91588838e02 100644 (file)
@@ -191,7 +191,7 @@ Documentation written by Tom Zanussi
                                 with the event, in nanoseconds.  May be
                                modified by .usecs to have timestamps
                                interpreted as microseconds.
-    cpu                    int  the cpu on which the event occurred.
+    common_cpu             int  the cpu on which the event occurred.
     ====================== ==== =======================================
 
 Extended error information
index e7ae7bbbff56711fd5fbdf1345db56414458d484..2519793d31a18aaa215e0ea49f2b6f0045b45453 100644 (file)
@@ -445,7 +445,7 @@ F:  drivers/platform/x86/wmi.c
 F:     include/uapi/linux/wmi.h
 
 ACRN HYPERVISOR SERVICE MODULE
-M:     Shuo Liu <shuo.a.liu@intel.com>
+M:     Fei Li <fei1.li@intel.com>
 L:     acrn-dev@lists.projectacrn.org (subscribers-only)
 S:     Supported
 W:     https://projectacrn.org
@@ -11771,6 +11771,7 @@ F:      drivers/char/hw_random/mtk-rng.c
 MEDIATEK SWITCH DRIVER
 M:     Sean Wang <sean.wang@mediatek.com>
 M:     Landen Chao <Landen.Chao@mediatek.com>
+M:     DENG Qingfang <dqfext@gmail.com>
 L:     netdev@vger.kernel.org
 S:     Maintained
 F:     drivers/net/dsa/mt7530.*
@@ -19135,7 +19136,7 @@ M:      Mauro Carvalho Chehab <mchehab@kernel.org>
 L:     linux-usb@vger.kernel.org
 S:     Maintained
 F:     Documentation/devicetree/bindings/phy/hisilicon,hi3670-usb3.yaml
-F:     drivers/phy/hisilicon/phy-kirin970-usb3.c
+F:     drivers/phy/hisilicon/phy-hi3670-usb3.c
 
 USB ISP116X DRIVER
 M:     Olav Kongas <ok@artecdesign.ee>
@@ -19813,6 +19814,14 @@ L:     netdev@vger.kernel.org
 S:     Supported
 F:     drivers/ptp/ptp_vmw.c
 
+VMWARE VMCI DRIVER
+M:     Jorgen Hansen <jhansen@vmware.com>
+M:     Vishnu Dasa <vdasa@vmware.com>
+L:     linux-kernel@vger.kernel.org
+L:     pv-drivers@vmware.com (private)
+S:     Maintained
+F:     drivers/misc/vmw_vmci/
+
 VMWARE VMMOUSE SUBDRIVER
 M:     "VMware Graphics" <linux-graphics-maintainer@vmware.com>
 M:     "VMware, Inc." <pv-drivers@vmware.com>
index e4f5895badb5eb94348a2c96831a6f9d73f0def8..6b555f64df0682bb995bf3684c35715cdeb40ebb 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -2,7 +2,7 @@
 VERSION = 5
 PATCHLEVEL = 14
 SUBLEVEL = 0
-EXTRAVERSION = -rc2
+EXTRAVERSION = -rc3
 NAME = Opossums on Parade
 
 # *DOCUMENTATION*
index 9f7c7f587d38bcfb963745bf632253808bbd7b1f..ca38d0d6c3c4acfd6455d24f202a144c8d951dee 100644 (file)
                        eqos: ethernet@30bf0000 {
                                compatible = "nxp,imx8mp-dwmac-eqos", "snps,dwmac-5.10a";
                                reg = <0x30bf0000 0x10000>;
-                               interrupts = <GIC_SPI 134 IRQ_TYPE_LEVEL_HIGH>,
-                                            <GIC_SPI 135 IRQ_TYPE_LEVEL_HIGH>;
-                               interrupt-names = "eth_wake_irq", "macirq";
+                               interrupts = <GIC_SPI 135 IRQ_TYPE_LEVEL_HIGH>,
+                                            <GIC_SPI 134 IRQ_TYPE_LEVEL_HIGH>;
+                               interrupt-names = "macirq", "eth_wake_irq";
                                clocks = <&clk IMX8MP_CLK_ENET_QOS_ROOT>,
                                         <&clk IMX8MP_CLK_QOS_ENET_ROOT>,
                                         <&clk IMX8MP_CLK_ENET_QOS_TIMER>,
index 068692350e000bafa45926d19dc66f99850bd0e8..51e17094d7b18e3f327c51f78e2f0c8dedae6017 100644 (file)
        status = "okay";
        extcon = <&usb2_id>;
 
-       usb@7600000 {
+       dwc3@7600000 {
                extcon = <&usb2_id>;
                dr_mode = "otg";
                maximum-speed = "high-speed";
        status = "okay";
        extcon = <&usb3_id>;
 
-       usb@6a00000 {
+       dwc3@6a00000 {
                extcon = <&usb3_id>;
                dr_mode = "otg";
        };
index 95d6cb8cd4c0c1779cda74856660b84f6c808b4e..f39bc10cc5bd738e6548728d08c7ed6b7d760b65 100644 (file)
                        resets = <&gcc GCC_USB0_BCR>;
                        status = "disabled";
 
-                       dwc_0: usb@8a00000 {
+                       dwc_0: dwc3@8a00000 {
                                compatible = "snps,dwc3";
                                reg = <0x8a00000 0xcd00>;
                                interrupts = <GIC_SPI 140 IRQ_TYPE_LEVEL_HIGH>;
                        resets = <&gcc GCC_USB1_BCR>;
                        status = "disabled";
 
-                       dwc_1: usb@8c00000 {
+                       dwc_1: dwc3@8c00000 {
                                compatible = "snps,dwc3";
                                reg = <0x8c00000 0xcd00>;
                                interrupts = <GIC_SPI 99 IRQ_TYPE_LEVEL_HIGH>;
index 0e1bc4669d7e10f2d9a8b06e5468995f662bbb20..78c55ca10ba9b119fb8a2e89a2fe49dfb815f2c2 100644 (file)
                        power-domains = <&gcc USB30_GDSC>;
                        status = "disabled";
 
-                       usb@6a00000 {
+                       dwc3@6a00000 {
                                compatible = "snps,dwc3";
                                reg = <0x06a00000 0xcc00>;
                                interrupts = <0 131 IRQ_TYPE_LEVEL_HIGH>;
                        qcom,select-utmi-as-pipe-clk;
                        status = "disabled";
 
-                       usb@7600000 {
+                       dwc3@7600000 {
                                compatible = "snps,dwc3";
                                reg = <0x07600000 0xcc00>;
                                interrupts = <0 138 IRQ_TYPE_LEVEL_HIGH>;
index 6f294f9c0cdf15208b08e2d64c4cdc8a686189d2..e9d3ce29937c4e01189cde089f98892f9b09dc8a 100644 (file)
 
                        resets = <&gcc GCC_USB_30_BCR>;
 
-                       usb3_dwc3: usb@a800000 {
+                       usb3_dwc3: dwc3@a800000 {
                                compatible = "snps,dwc3";
                                reg = <0x0a800000 0xcd00>;
                                interrupts = <GIC_SPI 131 IRQ_TYPE_LEVEL_HIGH>;
index f8a55307b8554adb2812f5f7eaae0d2248ba67b6..a80c578484ba3e8fbfe587031403f03d818a6d46 100644 (file)
 &usb3 {
        status = "okay";
 
-       usb@7580000 {
+       dwc3@7580000 {
                dr_mode = "host";
        };
 };
index 9c4be020d5689aa9363a9d914dcc84c56da93645..339790ba585de9545ec15deeb0e6b276cc3a176b 100644 (file)
                        assigned-clock-rates = <19200000>, <200000000>;
                        status = "disabled";
 
-                       usb@7580000 {
+                       dwc3@7580000 {
                                compatible = "snps,dwc3";
                                reg = <0x07580000 0xcd00>;
                                interrupts = <GIC_SPI 26 IRQ_TYPE_LEVEL_HIGH>;
                        assigned-clock-rates = <19200000>, <133333333>;
                        status = "disabled";
 
-                       usb@78c0000 {
+                       dwc3@78c0000 {
                                compatible = "snps,dwc3";
                                reg = <0x078c0000 0xcc00>;
                                interrupts = <GIC_SPI 44 IRQ_TYPE_LEVEL_HIGH>;
index a5d58eb928960b496f9c27b3a92690de3fcd156d..a9a052f8c63c8610d34c3b33b2b728b30dbf21f0 100644 (file)
                                        <&gem_noc MASTER_APPSS_PROC 0 &config_noc SLAVE_USB3 0>;
                        interconnect-names = "usb-ddr", "apps-usb";
 
-                       usb_1_dwc3: usb@a600000 {
+                       usb_1_dwc3: dwc3@a600000 {
                                compatible = "snps,dwc3";
                                reg = <0 0x0a600000 0 0xe000>;
                                interrupts = <GIC_SPI 133 IRQ_TYPE_LEVEL_HIGH>;
index 1796ae8372be24c269dc94ff34cf749b1416be62..0a86fe71a66d19d4d0e422fef1153527122e99b7 100644 (file)
                                        <&gladiator_noc MASTER_APPSS_PROC 0 &config_noc SLAVE_USB3_0 0>;
                        interconnect-names = "usb-ddr", "apps-usb";
 
-                       usb_1_dwc3: usb@a600000 {
+                       usb_1_dwc3: dwc3@a600000 {
                                compatible = "snps,dwc3";
                                reg = <0 0x0a600000 0 0xcd00>;
                                interrupts = <GIC_SPI 133 IRQ_TYPE_LEVEL_HIGH>;
                                        <&gladiator_noc MASTER_APPSS_PROC 0 &config_noc SLAVE_USB3_1 0>;
                        interconnect-names = "usb-ddr", "apps-usb";
 
-                       usb_2_dwc3: usb@a800000 {
+                       usb_2_dwc3: dwc3@a800000 {
                                compatible = "snps,dwc3";
                                reg = <0 0x0a800000 0 0xcd00>;
                                interrupts = <GIC_SPI 138 IRQ_TYPE_LEVEL_HIGH>;
index 612dda0fef43852623240b2d8598b274efbe9f51..eef9d79157e981a4d8fd06501f63d5d1ae622639 100644 (file)
 
                        resets = <&gcc GCC_USB30_PRIM_BCR>;
 
-                       usb_1_dwc3: usb@a600000 {
+                       usb_1_dwc3: dwc3@a600000 {
                                compatible = "snps,dwc3";
                                reg = <0 0x0a600000 0 0xcd00>;
                                interrupts = <GIC_SPI 133 IRQ_TYPE_LEVEL_HIGH>;
index d3d37f932b97a586ac8f5c7a8e5c91c3bb3890c0..487381164ff6b6e23abd5ec9fe26bb883cae0782 100644 (file)
@@ -32,20 +32,23 @@ SYM_FUNC_END(__arm_smccc_sve_check)
 EXPORT_SYMBOL(__arm_smccc_sve_check)
 
        .macro SMCCC instr
+       stp     x29, x30, [sp, #-16]!
+       mov     x29, sp
 alternative_if ARM64_SVE
        bl      __arm_smccc_sve_check
 alternative_else_nop_endif
        \instr  #0
-       ldr     x4, [sp]
+       ldr     x4, [sp, #16]
        stp     x0, x1, [x4, #ARM_SMCCC_RES_X0_OFFS]
        stp     x2, x3, [x4, #ARM_SMCCC_RES_X2_OFFS]
-       ldr     x4, [sp, #8]
+       ldr     x4, [sp, #24]
        cbz     x4, 1f /* no quirk structure */
        ldr     x9, [x4, #ARM_SMCCC_QUIRK_ID_OFFS]
        cmp     x9, #ARM_SMCCC_QUIRK_QCOM_A6
        b.ne    1f
        str     x6, [x4, ARM_SMCCC_QUIRK_STATE_OFFS]
-1:     ret
+1:     ldp     x29, x30, [sp], #16
+       ret
        .endm
 
 /*
index d74586508448885a0cac561fa688f07ed4c078e4..9ff0de1b2b93c9d91468776be0132f543135086e 100644 (file)
@@ -1339,7 +1339,6 @@ void *__init fixmap_remap_fdt(phys_addr_t dt_phys, int *size, pgprot_t prot)
        return dt_virt;
 }
 
-#if CONFIG_PGTABLE_LEVELS > 3
 int pud_set_huge(pud_t *pudp, phys_addr_t phys, pgprot_t prot)
 {
        pud_t new_pud = pfn_pud(__phys_to_pfn(phys), mk_pud_sect_prot(prot));
@@ -1354,16 +1353,6 @@ int pud_set_huge(pud_t *pudp, phys_addr_t phys, pgprot_t prot)
        return 1;
 }
 
-int pud_clear_huge(pud_t *pudp)
-{
-       if (!pud_sect(READ_ONCE(*pudp)))
-               return 0;
-       pud_clear(pudp);
-       return 1;
-}
-#endif
-
-#if CONFIG_PGTABLE_LEVELS > 2
 int pmd_set_huge(pmd_t *pmdp, phys_addr_t phys, pgprot_t prot)
 {
        pmd_t new_pmd = pfn_pmd(__phys_to_pfn(phys), mk_pmd_sect_prot(prot));
@@ -1378,6 +1367,14 @@ int pmd_set_huge(pmd_t *pmdp, phys_addr_t phys, pgprot_t prot)
        return 1;
 }
 
+int pud_clear_huge(pud_t *pudp)
+{
+       if (!pud_sect(READ_ONCE(*pudp)))
+               return 0;
+       pud_clear(pudp);
+       return 1;
+}
+
 int pmd_clear_huge(pmd_t *pmdp)
 {
        if (!pmd_sect(READ_ONCE(*pmdp)))
@@ -1385,7 +1382,6 @@ int pmd_clear_huge(pmd_t *pmdp)
        pmd_clear(pmdp);
        return 1;
 }
-#endif
 
 int pmd_free_pte_page(pmd_t *pmdp, unsigned long addr)
 {
index d964c1f2739952299125602b048315a5a937a0ab..6a07a68178856380ef4a2cb937bb2a91f4ddc3bd 100644 (file)
@@ -33,6 +33,7 @@ config MAC
        depends on MMU
        select MMU_MOTOROLA if MMU
        select HAVE_ARCH_NVRAM_OPS
+       select HAVE_PATA_PLATFORM
        select LEGACY_TIMER_TICK
        help
          This option enables support for the Apple Macintosh series of
index c206b31ce07ac4e239b05a0c8e00cc309666f88e..1bdf5e7d1b4384fe7401ef43ac3b69832ff69f3b 100644 (file)
@@ -59,7 +59,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
 
                vma = find_vma(mm, addr);
                if (TASK_SIZE - len >= addr &&
-                   (!vma || addr + len <= vma->vm_start))
+                   (!vma || addr + len <= vm_start_gap(vma)))
                        return addr;
        }
 
index 1d1fcc290fca4aa53a49a10115911da45b5c19e5..085fb8ecbf6884f91a2a0473ce223cf8595bccfe 100644 (file)
@@ -2697,8 +2697,10 @@ static int kvmppc_core_vcpu_create_hv(struct kvm_vcpu *vcpu)
                HFSCR_DSCR | HFSCR_VECVSX | HFSCR_FP | HFSCR_PREFIX;
        if (cpu_has_feature(CPU_FTR_HVMODE)) {
                vcpu->arch.hfscr &= mfspr(SPRN_HFSCR);
+#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
                if (cpu_has_feature(CPU_FTR_P9_TM_HV_ASSIST))
                        vcpu->arch.hfscr |= HFSCR_TM;
+#endif
        }
        if (cpu_has_feature(CPU_FTR_TM_COMP))
                vcpu->arch.hfscr |= HFSCR_TM;
index 8543ad538b0c30e461f987bd47add58f241c378a..898f942eb198347234e0c2f403b50a5ff6b6bafe 100644 (file)
@@ -302,6 +302,9 @@ long kvmhv_enter_nested_guest(struct kvm_vcpu *vcpu)
        if (vcpu->kvm->arch.l1_ptcr == 0)
                return H_NOT_AVAILABLE;
 
+       if (MSR_TM_TRANSACTIONAL(vcpu->arch.shregs.msr))
+               return H_BAD_MODE;
+
        /* copy parameters in */
        hv_ptr = kvmppc_get_gpr(vcpu, 4);
        regs_ptr = kvmppc_get_gpr(vcpu, 5);
@@ -322,6 +325,23 @@ long kvmhv_enter_nested_guest(struct kvm_vcpu *vcpu)
        if (l2_hv.vcpu_token >= NR_CPUS)
                return H_PARAMETER;
 
+       /*
+        * L1 must have set up a suspended state to enter the L2 in a
+        * transactional state, and only in that case. These have to be
+        * filtered out here to prevent causing a TM Bad Thing in the
+        * host HRFID. We could synthesize a TM Bad Thing back to the L1
+        * here but there doesn't seem like much point.
+        */
+       if (MSR_TM_SUSPENDED(vcpu->arch.shregs.msr)) {
+               if (!MSR_TM_ACTIVE(l2_regs.msr))
+                       return H_BAD_MODE;
+       } else {
+               if (l2_regs.msr & MSR_TS_MASK)
+                       return H_BAD_MODE;
+               if (WARN_ON_ONCE(vcpu->arch.shregs.msr & MSR_TS_MASK))
+                       return H_BAD_MODE;
+       }
+
        /* translate lpid */
        l2 = kvmhv_get_nested(vcpu->kvm, l2_hv.lpid, true);
        if (!l2)
index 83f592eadcd2d8046ddd83c1dc42a405f43c4661..961b3d70483ca1e768dd70075b1b505e0bee31cb 100644 (file)
@@ -317,6 +317,9 @@ int kvmhv_vcpu_entry_p9(struct kvm_vcpu *vcpu, u64 time_limit, unsigned long lpc
         */
        mtspr(SPRN_HDEC, hdec);
 
+#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
+tm_return_to_guest:
+#endif
        mtspr(SPRN_DAR, vcpu->arch.shregs.dar);
        mtspr(SPRN_DSISR, vcpu->arch.shregs.dsisr);
        mtspr(SPRN_SRR0, vcpu->arch.shregs.srr0);
@@ -415,11 +418,23 @@ int kvmhv_vcpu_entry_p9(struct kvm_vcpu *vcpu, u64 time_limit, unsigned long lpc
                 * is in real suspend mode and is trying to transition to
                 * transactional mode.
                 */
-               if (local_paca->kvm_hstate.fake_suspend &&
+               if (!local_paca->kvm_hstate.fake_suspend &&
                                (vcpu->arch.shregs.msr & MSR_TS_S)) {
                        if (kvmhv_p9_tm_emulation_early(vcpu)) {
-                               /* Prevent it being handled again. */
-                               trap = 0;
+                               /*
+                                * Go straight back into the guest with the
+                                * new NIP/MSR as set by TM emulation.
+                                */
+                               mtspr(SPRN_HSRR0, vcpu->arch.regs.nip);
+                               mtspr(SPRN_HSRR1, vcpu->arch.shregs.msr);
+
+                               /*
+                                * tm_return_to_guest re-loads SRR0/1, DAR,
+                                * DSISR after RI is cleared, in case they had
+                                * been clobbered by a MCE.
+                                */
+                               __mtmsrd(0, 1); /* clear RI */
+                               goto tm_return_to_guest;
                        }
                }
 #endif
@@ -499,6 +514,10 @@ int kvmhv_vcpu_entry_p9(struct kvm_vcpu *vcpu, u64 time_limit, unsigned long lpc
         * If we are in real mode, only switch MMU on after the MMU is
         * switched to host, to avoid the P9_RADIX_PREFETCH_BUG.
         */
+       if (IS_ENABLED(CONFIG_PPC_TRANSACTIONAL_MEM) &&
+           vcpu->arch.shregs.msr & MSR_TS_MASK)
+               msr |= MSR_TS_S;
+
        __mtmsrd(msr, 0);
 
        end_timing(vcpu);
index c5e677508d3b2318cacb1b8b55edf12387b6c778..0f847f1e5ddd0ba6590642548fe799f6335ca015 100644 (file)
@@ -242,6 +242,17 @@ int kvmppc_rtas_hcall(struct kvm_vcpu *vcpu)
         * value so we can restore it on the way out.
         */
        orig_rets = args.rets;
+       if (be32_to_cpu(args.nargs) >= ARRAY_SIZE(args.args)) {
+               /*
+                * Don't overflow our args array: ensure there is room for
+                * at least rets[0] (even if the call specifies 0 nret).
+                *
+                * Each handler must then check for the correct nargs and nret
+                * values, but they may always return failure in rets[0].
+                */
+               rc = -EINVAL;
+               goto fail;
+       }
        args.rets = &args.args[be32_to_cpu(args.nargs)];
 
        mutex_lock(&vcpu->kvm->arch.rtas_token_lock);
@@ -269,9 +280,17 @@ int kvmppc_rtas_hcall(struct kvm_vcpu *vcpu)
 fail:
        /*
         * We only get here if the guest has called RTAS with a bogus
-        * args pointer. That means we can't get to the args, and so we
-        * can't fail the RTAS call. So fail right out to userspace,
-        * which should kill the guest.
+        * args pointer or nargs/nret values that would overflow the
+        * array. That means we can't get to the args, and so we can't
+        * fail the RTAS call. So fail right out to userspace, which
+        * should kill the guest.
+        *
+        * SLOF should actually pass the hcall return value from the
+        * rtas handler call in r3, so enter_rtas could be modified to
+        * return a failure indication in r3 and we could return such
+        * errors to the guest rather than failing to host userspace.
+        * However old guests that don't test for failure could then
+        * continue silently after errors, so for now we won't do this.
         */
        return rc;
 }
index be33b5321a766318e9033fc5bf3a09f20e9eda0d..b4e6f70b97b940e14faa5f7e27a68cee792613e5 100644 (file)
@@ -2048,9 +2048,9 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
        {
                struct kvm_enable_cap cap;
                r = -EFAULT;
-               vcpu_load(vcpu);
                if (copy_from_user(&cap, argp, sizeof(cap)))
                        goto out;
+               vcpu_load(vcpu);
                r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap);
                vcpu_put(vcpu);
                break;
@@ -2074,9 +2074,9 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
        case KVM_DIRTY_TLB: {
                struct kvm_dirty_tlb dirty;
                r = -EFAULT;
-               vcpu_load(vcpu);
                if (copy_from_user(&dirty, argp, sizeof(dirty)))
                        goto out;
+               vcpu_load(vcpu);
                r = kvm_vcpu_ioctl_dirty_tlb(vcpu, &dirty);
                vcpu_put(vcpu);
                break;
index 60780e08911817a0983ce9ccf9f8f11d6e879538..0df9fe29dd567f0596bf7b1f37d39f0562736031 100644 (file)
@@ -240,3 +240,13 @@ void __init setup_kuap(bool disabled)
        mtspr(SPRN_MD_AP, MD_APG_KUAP);
 }
 #endif
+
+int pud_clear_huge(pud_t *pud)
+{
+        return 0;
+}
+
+int pmd_clear_huge(pmd_t *pmd)
+{
+        return 0;
+}
index 9b88e3cded7d2dc07d165444688071d9c51a3b95..534b0317fc152b6fc742210958e93be6d9571d5e 100644 (file)
@@ -42,6 +42,7 @@ static int pasemi_system_reset_exception(struct pt_regs *regs)
        switch (regs->msr & SRR1_WAKEMASK) {
        case SRR1_WAKEDEC:
                set_dec(1);
+               break;
        case SRR1_WAKEEE:
                /*
                 * Handle these when interrupts get re-enabled and we take
index 7a8f0d45b13abe1be0658c3f531f95cf0f3aa60a..49b398fe99f1b2fa1697542e7dabb7b7da693a1c 100644 (file)
@@ -28,10 +28,10 @@ int efi_set_mapping_permissions(struct mm_struct *mm, efi_memory_desc_t *md);
 
 #define ARCH_EFI_IRQ_FLAGS_MASK (SR_IE | SR_SPIE)
 
-/* Load initrd at enough distance from DRAM start */
+/* Load initrd anywhere in system RAM */
 static inline unsigned long efi_get_max_initrd_addr(unsigned long image_addr)
 {
-       return image_addr + SZ_256M;
+       return ULONG_MAX;
 }
 
 #define alloc_screen_info(x...)                (&screen_info)
index ff467b98c3e3364bcbfefeff1f681a3c4c2d8c0e..ac7593607fa6662a53a93aa9512dd7c2d4288f41 100644 (file)
@@ -132,8 +132,12 @@ unsigned long get_wchan(struct task_struct *task)
 {
        unsigned long pc = 0;
 
-       if (likely(task && task != current && !task_is_running(task)))
+       if (likely(task && task != current && !task_is_running(task))) {
+               if (!try_get_task_stack(task))
+                       return 0;
                walk_stackframe(task, NULL, save_wchan, &pc);
+               put_task_stack(task);
+       }
        return pc;
 }
 
index bceb0629e440eb63e604b6a09f5338ce1b2d2e11..63bc691cff91b275100a8339f1cc387555e79ae0 100644 (file)
@@ -30,23 +30,23 @@ ENTRY(__asm_copy_from_user)
         * t0 - end of uncopied dst
         */
        add     t0, a0, a2
-       bgtu    a0, t0, 5f
 
        /*
         * Use byte copy only if too small.
+        * SZREG holds 4 for RV32 and 8 for RV64
         */
-       li      a3, 8*SZREG /* size must be larger than size in word_copy */
+       li      a3, 9*SZREG /* size must be larger than size in word_copy */
        bltu    a2, a3, .Lbyte_copy_tail
 
        /*
-        * Copy first bytes until dst is align to word boundary.
+        * Copy first bytes until dst is aligned to word boundary.
         * a0 - start of dst
         * t1 - start of aligned dst
         */
        addi    t1, a0, SZREG-1
        andi    t1, t1, ~(SZREG-1)
        /* dst is already aligned, skip */
-       beq     a0, t1, .Lskip_first_bytes
+       beq     a0, t1, .Lskip_align_dst
 1:
        /* a5 - one byte for copying data */
        fixup lb      a5, 0(a1), 10f
@@ -55,7 +55,7 @@ ENTRY(__asm_copy_from_user)
        addi    a0, a0, 1       /* dst */
        bltu    a0, t1, 1b      /* t1 - start of aligned dst */
 
-.Lskip_first_bytes:
+.Lskip_align_dst:
        /*
         * Now dst is aligned.
         * Use shift-copy if src is misaligned.
@@ -72,10 +72,9 @@ ENTRY(__asm_copy_from_user)
         *
         * a0 - start of aligned dst
         * a1 - start of aligned src
-        * a3 - a1 & mask:(SZREG-1)
         * t0 - end of aligned dst
         */
-       addi    t0, t0, -(8*SZREG-1) /* not to over run */
+       addi    t0, t0, -(8*SZREG) /* not to over run */
 2:
        fixup REG_L   a4,        0(a1), 10f
        fixup REG_L   a5,    SZREG(a1), 10f
@@ -97,7 +96,7 @@ ENTRY(__asm_copy_from_user)
        addi    a1, a1, 8*SZREG
        bltu    a0, t0, 2b
 
-       addi    t0, t0, 8*SZREG-1 /* revert to original value */
+       addi    t0, t0, 8*SZREG /* revert to original value */
        j       .Lbyte_copy_tail
 
 .Lshift_copy:
@@ -107,7 +106,7 @@ ENTRY(__asm_copy_from_user)
         * For misaligned copy we still perform aligned word copy, but
         * we need to use the value fetched from the previous iteration and
         * do some shifts.
-        * This is safe because reading less than a word size.
+        * This is safe because reading is less than a word size.
         *
         * a0 - start of aligned dst
         * a1 - start of src
@@ -117,7 +116,7 @@ ENTRY(__asm_copy_from_user)
         */
        /* calculating aligned word boundary for dst */
        andi    t1, t0, ~(SZREG-1)
-       /* Converting unaligned src to aligned arc */
+       /* Converting unaligned src to aligned src */
        andi    a1, a1, ~(SZREG-1)
 
        /*
@@ -125,11 +124,11 @@ ENTRY(__asm_copy_from_user)
         * t3 - prev shift
         * t4 - current shift
         */
-       slli    t3, a3, LGREG
+       slli    t3, a3, 3 /* converting bytes in a3 to bits */
        li      a5, SZREG*8
        sub     t4, a5, t3
 
-       /* Load the first word to combine with seceond word */
+       /* Load the first word to combine with second word */
        fixup REG_L   a5, 0(a1), 10f
 
 3:
@@ -161,7 +160,7 @@ ENTRY(__asm_copy_from_user)
         * a1 - start of remaining src
         * t0 - end of remaining dst
         */
-       bgeu    a0, t0, 5f
+       bgeu    a0, t0, .Lout_copy_user  /* check if end of copy */
 4:
        fixup lb      a5, 0(a1), 10f
        addi    a1, a1, 1       /* src */
@@ -169,7 +168,7 @@ ENTRY(__asm_copy_from_user)
        addi    a0, a0, 1       /* dst */
        bltu    a0, t0, 4b      /* t0 - end of dst */
 
-5:
+.Lout_copy_user:
        /* Disable access to user memory */
        csrc CSR_STATUS, t6
        li      a0, 0
index 269fc648ef3d84b24c99587aacf335dc36e58e23..a14bf3910eec958c656ae29ab6c343af6dd0d28a 100644 (file)
@@ -127,10 +127,17 @@ void __init mem_init(void)
 }
 
 /*
- * The default maximal physical memory size is -PAGE_OFFSET,
- * limit the memory size via mem.
+ * The default maximal physical memory size is -PAGE_OFFSET for 32-bit kernel,
+ * whereas for 64-bit kernel, the end of the virtual address space is occupied
+ * by the modules/BPF/kernel mappings which reduces the available size of the
+ * linear mapping.
+ * Limit the memory size via mem.
  */
+#ifdef CONFIG_64BIT
+static phys_addr_t memory_limit = -PAGE_OFFSET - SZ_4G;
+#else
 static phys_addr_t memory_limit = -PAGE_OFFSET;
+#endif
 
 static int __init early_mem(char *p)
 {
@@ -152,7 +159,7 @@ static void __init setup_bootmem(void)
 {
        phys_addr_t vmlinux_end = __pa_symbol(&_end);
        phys_addr_t vmlinux_start = __pa_symbol(&_start);
-       phys_addr_t max_mapped_addr = __pa(~(ulong)0);
+       phys_addr_t __maybe_unused max_mapped_addr;
        phys_addr_t dram_end;
 
 #ifdef CONFIG_XIP_KERNEL
@@ -175,14 +182,21 @@ static void __init setup_bootmem(void)
        memblock_reserve(vmlinux_start, vmlinux_end - vmlinux_start);
 
        dram_end = memblock_end_of_DRAM();
+
+#ifndef CONFIG_64BIT
        /*
         * memblock allocator is not aware of the fact that last 4K bytes of
         * the addressable memory can not be mapped because of IS_ERR_VALUE
         * macro. Make sure that last 4k bytes are not usable by memblock
-        * if end of dram is equal to maximum addressable memory.
+        * if end of dram is equal to maximum addressable memory.  For 64-bit
+        * kernel, this problem can't happen here as the end of the virtual
+        * address space is occupied by the kernel mapping then this check must
+        * be done in create_kernel_page_table.
         */
+       max_mapped_addr = __pa(~(ulong)0);
        if (max_mapped_addr == (dram_end - 1))
                memblock_set_current_limit(max_mapped_addr - 4096);
+#endif
 
        min_low_pfn = PFN_UP(memblock_start_of_DRAM());
        max_low_pfn = max_pfn = PFN_DOWN(dram_end);
@@ -570,6 +584,14 @@ asmlinkage void __init setup_vm(uintptr_t dtb_pa)
        BUG_ON((PAGE_OFFSET % PGDIR_SIZE) != 0);
        BUG_ON((kernel_map.phys_addr % map_size) != 0);
 
+#ifdef CONFIG_64BIT
+       /*
+        * The last 4K bytes of the addressable memory can not be mapped because
+        * of IS_ERR_VALUE macro.
+        */
+       BUG_ON((kernel_map.virt_addr + kernel_map.size) > ADDRESS_SPACE_END - SZ_4K);
+#endif
+
        pt_ops.alloc_pte = alloc_pte_early;
        pt_ops.get_pte_virt = get_pte_virt_early;
 #ifndef __PAGETABLE_PMD_FOLDED
@@ -709,6 +731,8 @@ static void __init setup_vm_final(void)
                if (start <= __pa(PAGE_OFFSET) &&
                    __pa(PAGE_OFFSET) < end)
                        start = __pa(PAGE_OFFSET);
+               if (end >= __pa(PAGE_OFFSET) + memory_limit)
+                       end = __pa(PAGE_OFFSET) + memory_limit;
 
                map_size = best_map_size(start, end - start);
                for (pa = start; pa < end; pa += map_size) {
index f7c77cd518f2ba5fa2f9d21fac2d8a655988735b..5ff5fee028016158500ebf2ef65d48d283746e32 100644 (file)
@@ -9,16 +9,6 @@
 #include <asm/errno.h>
 #include <asm/sigp.h>
 
-#ifdef CC_USING_EXPOLINE
-       .pushsection .dma.text.__s390_indirect_jump_r14,"axG"
-__dma__s390_indirect_jump_r14:
-       larl    %r1,0f
-       ex      0,0(%r1)
-       j       .
-0:     br      %r14
-       .popsection
-#endif
-
        .section .dma.text,"ax"
 /*
  * Simplified version of expoline thunk. The normal thunks can not be used here,
@@ -27,11 +17,10 @@ __dma__s390_indirect_jump_r14:
  * affects a few functions that are not performance-relevant.
  */
        .macro BR_EX_DMA_r14
-#ifdef CC_USING_EXPOLINE
-       jg      __dma__s390_indirect_jump_r14
-#else
-       br      %r14
-#endif
+       larl    %r1,0f
+       ex      0,0(%r1)
+       j       .
+0:     br      %r14
        .endm
 
 /*
index 86afcc6b56bf745331f0669ce99a0d1c5ed2c477..7de253f766e81df6f3d3a0d10fd1c2c07bf8a6bf 100644 (file)
@@ -5,7 +5,12 @@ CONFIG_WATCH_QUEUE=y
 CONFIG_AUDIT=y
 CONFIG_NO_HZ_IDLE=y
 CONFIG_HIGH_RES_TIMERS=y
+CONFIG_BPF_SYSCALL=y
+CONFIG_BPF_JIT=y
+CONFIG_BPF_JIT_ALWAYS_ON=y
+CONFIG_BPF_LSM=y
 CONFIG_PREEMPT=y
+CONFIG_SCHED_CORE=y
 CONFIG_BSD_PROCESS_ACCT=y
 CONFIG_BSD_PROCESS_ACCT_V3=y
 CONFIG_TASKSTATS=y
@@ -28,14 +33,13 @@ CONFIG_CGROUP_DEVICE=y
 CONFIG_CGROUP_CPUACCT=y
 CONFIG_CGROUP_PERF=y
 CONFIG_CGROUP_BPF=y
+CONFIG_CGROUP_MISC=y
 CONFIG_NAMESPACES=y
 CONFIG_USER_NS=y
 CONFIG_CHECKPOINT_RESTORE=y
 CONFIG_SCHED_AUTOGROUP=y
 CONFIG_EXPERT=y
 # CONFIG_SYSFS_SYSCALL is not set
-CONFIG_BPF_LSM=y
-CONFIG_BPF_SYSCALL=y
 CONFIG_USERFAULTFD=y
 # CONFIG_COMPAT_BRK is not set
 CONFIG_PROFILING=y
@@ -76,6 +80,7 @@ CONFIG_BLK_DEV_THROTTLING=y
 CONFIG_BLK_WBT=y
 CONFIG_BLK_CGROUP_IOLATENCY=y
 CONFIG_BLK_CGROUP_IOCOST=y
+CONFIG_BLK_CGROUP_IOPRIO=y
 CONFIG_BLK_INLINE_ENCRYPTION=y
 CONFIG_BLK_INLINE_ENCRYPTION_FALLBACK=y
 CONFIG_PARTITION_ADVANCED=y
@@ -95,6 +100,7 @@ CONFIG_CLEANCACHE=y
 CONFIG_FRONTSWAP=y
 CONFIG_CMA_DEBUG=y
 CONFIG_CMA_DEBUGFS=y
+CONFIG_CMA_SYSFS=y
 CONFIG_CMA_AREAS=7
 CONFIG_MEM_SOFT_DIRTY=y
 CONFIG_ZSWAP=y
@@ -158,6 +164,7 @@ CONFIG_IPV6_RPL_LWTUNNEL=y
 CONFIG_MPTCP=y
 CONFIG_NETFILTER=y
 CONFIG_BRIDGE_NETFILTER=m
+CONFIG_NETFILTER_NETLINK_HOOK=m
 CONFIG_NF_CONNTRACK=m
 CONFIG_NF_CONNTRACK_SECMARK=y
 CONFIG_NF_CONNTRACK_EVENTS=y
@@ -280,6 +287,7 @@ CONFIG_IP_VS_FTP=m
 CONFIG_IP_VS_PE_SIP=m
 CONFIG_NFT_FIB_IPV4=m
 CONFIG_NF_TABLES_ARP=y
+CONFIG_NF_LOG_IPV4=m
 CONFIG_IP_NF_IPTABLES=m
 CONFIG_IP_NF_MATCH_AH=m
 CONFIG_IP_NF_MATCH_ECN=m
@@ -384,12 +392,11 @@ CONFIG_VSOCKETS=m
 CONFIG_VIRTIO_VSOCKETS=m
 CONFIG_NETLINK_DIAG=m
 CONFIG_CGROUP_NET_PRIO=y
-CONFIG_BPF_JIT=y
 CONFIG_NET_PKTGEN=m
 CONFIG_PCI=y
-CONFIG_PCI_IOV=y
 # CONFIG_PCIEASPM is not set
 CONFIG_PCI_DEBUG=y
+CONFIG_PCI_IOV=y
 CONFIG_HOTPLUG_PCI=y
 CONFIG_HOTPLUG_PCI_S390=y
 CONFIG_DEVTMPFS=y
@@ -436,7 +443,7 @@ CONFIG_MD_MULTIPATH=m
 CONFIG_MD_FAULTY=m
 CONFIG_MD_CLUSTER=m
 CONFIG_BCACHE=m
-CONFIG_BLK_DEV_DM=m
+CONFIG_BLK_DEV_DM=y
 CONFIG_DM_UNSTRIPED=m
 CONFIG_DM_CRYPT=m
 CONFIG_DM_SNAPSHOT=m
@@ -453,6 +460,7 @@ CONFIG_DM_MULTIPATH_ST=m
 CONFIG_DM_MULTIPATH_HST=m
 CONFIG_DM_MULTIPATH_IOA=m
 CONFIG_DM_DELAY=m
+CONFIG_DM_INIT=y
 CONFIG_DM_UEVENT=y
 CONFIG_DM_FLAKEY=m
 CONFIG_DM_VERITY=m
@@ -495,6 +503,7 @@ CONFIG_NLMON=m
 # CONFIG_NET_VENDOR_GOOGLE is not set
 # CONFIG_NET_VENDOR_HUAWEI is not set
 # CONFIG_NET_VENDOR_INTEL is not set
+# CONFIG_NET_VENDOR_MICROSOFT is not set
 # CONFIG_NET_VENDOR_MARVELL is not set
 CONFIG_MLX4_EN=m
 CONFIG_MLX5_CORE=m
@@ -551,7 +560,6 @@ CONFIG_INPUT_EVDEV=y
 CONFIG_LEGACY_PTY_COUNT=0
 CONFIG_VIRTIO_CONSOLE=m
 CONFIG_HW_RANDOM_VIRTIO=m
-CONFIG_RAW_DRIVER=m
 CONFIG_HANGCHECK_TIMER=m
 CONFIG_TN3270_FS=y
 CONFIG_PPS=m
@@ -574,7 +582,6 @@ CONFIG_SYNC_FILE=y
 CONFIG_VFIO=m
 CONFIG_VFIO_PCI=m
 CONFIG_VFIO_MDEV=m
-CONFIG_VFIO_MDEV_DEVICE=m
 CONFIG_VIRTIO_PCI=m
 CONFIG_VIRTIO_BALLOON=m
 CONFIG_VIRTIO_INPUT=y
@@ -619,6 +626,7 @@ CONFIG_FUSE_FS=y
 CONFIG_CUSE=m
 CONFIG_VIRTIO_FS=m
 CONFIG_OVERLAY_FS=m
+CONFIG_NETFS_STATS=y
 CONFIG_FSCACHE=m
 CONFIG_CACHEFILES=m
 CONFIG_ISO9660_FS=y
@@ -654,7 +662,6 @@ CONFIG_NFSD_V3_ACL=y
 CONFIG_NFSD_V4=y
 CONFIG_NFSD_V4_SECURITY_LABEL=y
 CONFIG_CIFS=m
-CONFIG_CIFS_STATS2=y
 CONFIG_CIFS_WEAK_PW_HASH=y
 CONFIG_CIFS_UPCALL=y
 CONFIG_CIFS_XATTR=y
@@ -682,6 +689,7 @@ CONFIG_SECURITY_SELINUX_BOOTPARAM=y
 CONFIG_SECURITY_SELINUX_DISABLE=y
 CONFIG_SECURITY_LOCKDOWN_LSM=y
 CONFIG_SECURITY_LOCKDOWN_LSM_EARLY=y
+CONFIG_SECURITY_LANDLOCK=y
 CONFIG_INTEGRITY_SIGNATURE=y
 CONFIG_INTEGRITY_ASYMMETRIC_KEYS=y
 CONFIG_IMA=y
@@ -696,6 +704,7 @@ CONFIG_CRYPTO_CRYPTD=m
 CONFIG_CRYPTO_TEST=m
 CONFIG_CRYPTO_DH=m
 CONFIG_CRYPTO_ECDH=m
+CONFIG_CRYPTO_ECDSA=m
 CONFIG_CRYPTO_ECRDSA=m
 CONFIG_CRYPTO_SM2=m
 CONFIG_CRYPTO_CURVE25519=m
@@ -843,7 +852,6 @@ CONFIG_FAULT_INJECTION_DEBUG_FS=y
 CONFIG_FAIL_FUNCTION=y
 CONFIG_FAULT_INJECTION_STACKTRACE_FILTER=y
 CONFIG_LKDTM=m
-CONFIG_TEST_LIST_SORT=y
 CONFIG_TEST_MIN_HEAP=y
 CONFIG_TEST_SORT=y
 CONFIG_KPROBES_SANITY_TEST=y
@@ -853,3 +861,4 @@ CONFIG_PERCPU_TEST=m
 CONFIG_ATOMIC64_SELFTEST=y
 CONFIG_TEST_BITOPS=m
 CONFIG_TEST_BPF=m
+CONFIG_TEST_LIVEPATCH=m
index 71b49ea5b0583ea9e25d00cae53bee2ca824d83b..b671642967ba61b09cd89eb7347f9d662dfca8b4 100644 (file)
@@ -4,6 +4,11 @@ CONFIG_WATCH_QUEUE=y
 CONFIG_AUDIT=y
 CONFIG_NO_HZ_IDLE=y
 CONFIG_HIGH_RES_TIMERS=y
+CONFIG_BPF_SYSCALL=y
+CONFIG_BPF_JIT=y
+CONFIG_BPF_JIT_ALWAYS_ON=y
+CONFIG_BPF_LSM=y
+CONFIG_SCHED_CORE=y
 CONFIG_BSD_PROCESS_ACCT=y
 CONFIG_BSD_PROCESS_ACCT_V3=y
 CONFIG_TASKSTATS=y
@@ -26,14 +31,13 @@ CONFIG_CGROUP_DEVICE=y
 CONFIG_CGROUP_CPUACCT=y
 CONFIG_CGROUP_PERF=y
 CONFIG_CGROUP_BPF=y
+CONFIG_CGROUP_MISC=y
 CONFIG_NAMESPACES=y
 CONFIG_USER_NS=y
 CONFIG_CHECKPOINT_RESTORE=y
 CONFIG_SCHED_AUTOGROUP=y
 CONFIG_EXPERT=y
 # CONFIG_SYSFS_SYSCALL is not set
-CONFIG_BPF_LSM=y
-CONFIG_BPF_SYSCALL=y
 CONFIG_USERFAULTFD=y
 # CONFIG_COMPAT_BRK is not set
 CONFIG_PROFILING=y
@@ -70,6 +74,7 @@ CONFIG_BLK_DEV_THROTTLING=y
 CONFIG_BLK_WBT=y
 CONFIG_BLK_CGROUP_IOLATENCY=y
 CONFIG_BLK_CGROUP_IOCOST=y
+CONFIG_BLK_CGROUP_IOPRIO=y
 CONFIG_BLK_INLINE_ENCRYPTION=y
 CONFIG_BLK_INLINE_ENCRYPTION_FALLBACK=y
 CONFIG_PARTITION_ADVANCED=y
@@ -87,6 +92,7 @@ CONFIG_KSM=y
 CONFIG_TRANSPARENT_HUGEPAGE=y
 CONFIG_CLEANCACHE=y
 CONFIG_FRONTSWAP=y
+CONFIG_CMA_SYSFS=y
 CONFIG_CMA_AREAS=7
 CONFIG_MEM_SOFT_DIRTY=y
 CONFIG_ZSWAP=y
@@ -149,6 +155,7 @@ CONFIG_IPV6_RPL_LWTUNNEL=y
 CONFIG_MPTCP=y
 CONFIG_NETFILTER=y
 CONFIG_BRIDGE_NETFILTER=m
+CONFIG_NETFILTER_NETLINK_HOOK=m
 CONFIG_NF_CONNTRACK=m
 CONFIG_NF_CONNTRACK_SECMARK=y
 CONFIG_NF_CONNTRACK_EVENTS=y
@@ -271,6 +278,7 @@ CONFIG_IP_VS_FTP=m
 CONFIG_IP_VS_PE_SIP=m
 CONFIG_NFT_FIB_IPV4=m
 CONFIG_NF_TABLES_ARP=y
+CONFIG_NF_LOG_IPV4=m
 CONFIG_IP_NF_IPTABLES=m
 CONFIG_IP_NF_MATCH_AH=m
 CONFIG_IP_NF_MATCH_ECN=m
@@ -374,11 +382,10 @@ CONFIG_VSOCKETS=m
 CONFIG_VIRTIO_VSOCKETS=m
 CONFIG_NETLINK_DIAG=m
 CONFIG_CGROUP_NET_PRIO=y
-CONFIG_BPF_JIT=y
 CONFIG_NET_PKTGEN=m
 CONFIG_PCI=y
-CONFIG_PCI_IOV=y
 # CONFIG_PCIEASPM is not set
+CONFIG_PCI_IOV=y
 CONFIG_HOTPLUG_PCI=y
 CONFIG_HOTPLUG_PCI_S390=y
 CONFIG_UEVENT_HELPER=y
@@ -427,7 +434,7 @@ CONFIG_MD_MULTIPATH=m
 CONFIG_MD_FAULTY=m
 CONFIG_MD_CLUSTER=m
 CONFIG_BCACHE=m
-CONFIG_BLK_DEV_DM=m
+CONFIG_BLK_DEV_DM=y
 CONFIG_DM_UNSTRIPED=m
 CONFIG_DM_CRYPT=m
 CONFIG_DM_SNAPSHOT=m
@@ -444,6 +451,7 @@ CONFIG_DM_MULTIPATH_ST=m
 CONFIG_DM_MULTIPATH_HST=m
 CONFIG_DM_MULTIPATH_IOA=m
 CONFIG_DM_DELAY=m
+CONFIG_DM_INIT=y
 CONFIG_DM_UEVENT=y
 CONFIG_DM_FLAKEY=m
 CONFIG_DM_VERITY=m
@@ -487,6 +495,7 @@ CONFIG_NLMON=m
 # CONFIG_NET_VENDOR_GOOGLE is not set
 # CONFIG_NET_VENDOR_HUAWEI is not set
 # CONFIG_NET_VENDOR_INTEL is not set
+# CONFIG_NET_VENDOR_MICROSOFT is not set
 # CONFIG_NET_VENDOR_MARVELL is not set
 CONFIG_MLX4_EN=m
 CONFIG_MLX5_CORE=m
@@ -543,7 +552,6 @@ CONFIG_INPUT_EVDEV=y
 CONFIG_LEGACY_PTY_COUNT=0
 CONFIG_VIRTIO_CONSOLE=m
 CONFIG_HW_RANDOM_VIRTIO=m
-CONFIG_RAW_DRIVER=m
 CONFIG_HANGCHECK_TIMER=m
 CONFIG_TN3270_FS=y
 # CONFIG_PTP_1588_CLOCK is not set
@@ -566,7 +574,6 @@ CONFIG_SYNC_FILE=y
 CONFIG_VFIO=m
 CONFIG_VFIO_PCI=m
 CONFIG_VFIO_MDEV=m
-CONFIG_VFIO_MDEV_DEVICE=m
 CONFIG_VIRTIO_PCI=m
 CONFIG_VIRTIO_BALLOON=m
 CONFIG_VIRTIO_INPUT=y
@@ -607,6 +614,7 @@ CONFIG_FUSE_FS=y
 CONFIG_CUSE=m
 CONFIG_VIRTIO_FS=m
 CONFIG_OVERLAY_FS=m
+CONFIG_NETFS_STATS=y
 CONFIG_FSCACHE=m
 CONFIG_CACHEFILES=m
 CONFIG_ISO9660_FS=y
@@ -642,7 +650,6 @@ CONFIG_NFSD_V3_ACL=y
 CONFIG_NFSD_V4=y
 CONFIG_NFSD_V4_SECURITY_LABEL=y
 CONFIG_CIFS=m
-CONFIG_CIFS_STATS2=y
 CONFIG_CIFS_WEAK_PW_HASH=y
 CONFIG_CIFS_UPCALL=y
 CONFIG_CIFS_XATTR=y
@@ -669,6 +676,7 @@ CONFIG_SECURITY_SELINUX_BOOTPARAM=y
 CONFIG_SECURITY_SELINUX_DISABLE=y
 CONFIG_SECURITY_LOCKDOWN_LSM=y
 CONFIG_SECURITY_LOCKDOWN_LSM_EARLY=y
+CONFIG_SECURITY_LANDLOCK=y
 CONFIG_INTEGRITY_SIGNATURE=y
 CONFIG_INTEGRITY_ASYMMETRIC_KEYS=y
 CONFIG_IMA=y
@@ -684,6 +692,7 @@ CONFIG_CRYPTO_CRYPTD=m
 CONFIG_CRYPTO_TEST=m
 CONFIG_CRYPTO_DH=m
 CONFIG_CRYPTO_ECDH=m
+CONFIG_CRYPTO_ECDSA=m
 CONFIG_CRYPTO_ECRDSA=m
 CONFIG_CRYPTO_SM2=m
 CONFIG_CRYPTO_CURVE25519=m
@@ -754,6 +763,7 @@ CONFIG_CRC8=m
 CONFIG_DMA_CMA=y
 CONFIG_CMA_SIZE_MBYTES=0
 CONFIG_PRINTK_TIME=y
+CONFIG_DYNAMIC_DEBUG=y
 CONFIG_DEBUG_INFO=y
 CONFIG_DEBUG_INFO_DWARF4=y
 CONFIG_GDB_SCRIPTS=y
@@ -781,3 +791,4 @@ CONFIG_LKDTM=m
 CONFIG_PERCPU_TEST=m
 CONFIG_ATOMIC64_SELFTEST=y
 CONFIG_TEST_BPF=m
+CONFIG_TEST_LIVEPATCH=m
index 76123a4b26ab06f221c209734b1b97b38cc1e46a..d576aaab27c919f388575d18cab3306821ea3365 100644 (file)
@@ -29,9 +29,9 @@ CONFIG_PARTITION_ADVANCED=y
 # CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
 # CONFIG_COMPACTION is not set
 # CONFIG_MIGRATION is not set
-# CONFIG_BOUNCE is not set
 CONFIG_NET=y
 # CONFIG_IUCV is not set
+# CONFIG_PCPU_DEV_REFCNT is not set
 # CONFIG_ETHTOOL_NETLINK is not set
 CONFIG_DEVTMPFS=y
 CONFIG_BLK_DEV_RAM=y
@@ -51,7 +51,6 @@ CONFIG_ZFCP=y
 # CONFIG_SERIO is not set
 # CONFIG_HVC_IUCV is not set
 # CONFIG_HW_RANDOM_S390 is not set
-CONFIG_RAW_DRIVER=y
 # CONFIG_HMC_DRV is not set
 # CONFIG_S390_TAPE is not set
 # CONFIG_VMCP is not set
index 695c61989f97c7c06becbb383bb553d89f195ca5..345cbe982a8bfa6c995185fa082647ab95fecc55 100644 (file)
@@ -19,6 +19,7 @@ void ftrace_caller(void);
 
 extern char ftrace_graph_caller_end;
 extern unsigned long ftrace_plt;
+extern void *ftrace_func;
 
 struct dyn_arch_ftrace { };
 
index c6ddeb5029b4960e028a91eeef329e3e208b6ce9..2d8f595d9196163abc0276e14adadd3d741c19c3 100644 (file)
@@ -40,6 +40,7 @@
  * trampoline (ftrace_plt), which clobbers also r1.
  */
 
+void *ftrace_func __read_mostly = ftrace_stub;
 unsigned long ftrace_plt;
 
 int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
@@ -85,6 +86,7 @@ int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
 
 int ftrace_update_ftrace_func(ftrace_func_t func)
 {
+       ftrace_func = func;
        return 0;
 }
 
index faf64c2f90f52e35b6b204534a750844461b3200..6b13797143a72acc0b476cf87e566752b321f826 100644 (file)
@@ -59,13 +59,13 @@ ENTRY(ftrace_caller)
 #ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
        aghik   %r2,%r0,-MCOUNT_INSN_SIZE
        lgrl    %r4,function_trace_op
-       lgrl    %r1,ftrace_trace_function
+       lgrl    %r1,ftrace_func
 #else
        lgr     %r2,%r0
        aghi    %r2,-MCOUNT_INSN_SIZE
        larl    %r4,function_trace_op
        lg      %r4,0(%r4)
-       larl    %r1,ftrace_trace_function
+       larl    %r1,ftrace_func
        lg      %r1,0(%r1)
 #endif
        lgr     %r3,%r14
index 975a00c8c564b32bba6b14bd6a91243b100f92bd..d7dc36ec0a60e987a84d17481ff923cc36fc5226 100644 (file)
@@ -745,7 +745,7 @@ static int __init cpumf_pmu_init(void)
        if (!cf_dbg) {
                pr_err("Registration of s390dbf(cpum_cf) failed\n");
                return -ENOMEM;
-       };
+       }
        debug_register_view(cf_dbg, &debug_sprintf_view);
 
        cpumf_pmu.attr_groups = cpumf_cf_event_group();
index b2349a3f4fa305bb412d012a714050c3b2a957fd..3457dcf103965339a5e41662b2a20c35746b05ff 100644 (file)
@@ -29,6 +29,7 @@ $(targets:%=$(obj)/%.dbg): KBUILD_CFLAGS = $(KBUILD_CFLAGS_32)
 $(targets:%=$(obj)/%.dbg): KBUILD_AFLAGS = $(KBUILD_AFLAGS_32)
 
 obj-y += vdso32_wrapper.o
+targets += vdso32.lds
 CPPFLAGS_vdso32.lds += -P -C -U$(ARCH)
 
 # Disable gcov profiling, ubsan and kasan for VDSO code
index 63cae0476bb497e336d0945a8bc1f422895fb054..2ae419f5115a5af1cd6f2af488547191dd9f66f9 100644 (file)
@@ -112,7 +112,7 @@ static inline void reg_set_seen(struct bpf_jit *jit, u32 b1)
 {
        u32 r1 = reg2hex[b1];
 
-       if (!jit->seen_reg[r1] && r1 >= 6 && r1 <= 15)
+       if (r1 >= 6 && r1 <= 15 && !jit->seen_reg[r1])
                jit->seen_reg[r1] = 1;
 }
 
index cc8f1773deca74bef60d3f3975e74dd565c8b965..c890d67a64ad0e9f8c4b2c060e3eda68a1c33169 100644 (file)
@@ -237,7 +237,7 @@ static void __init hv_smp_prepare_cpus(unsigned int max_cpus)
        for_each_present_cpu(i) {
                if (i == 0)
                        continue;
-               ret = hv_call_add_logical_proc(numa_cpu_node(i), i, i);
+               ret = hv_call_add_logical_proc(numa_cpu_node(i), i, cpu_physical_id(i));
                BUG_ON(ret);
        }
 
index 674906fad43b11fc09ee3b1f96feb0182332d71f..68f091ba84434bb2ef075bcedebb4ef0c4f59e6b 100644 (file)
@@ -79,9 +79,10 @@ __jump_label_patch(struct jump_entry *entry, enum jump_label_type type)
        return (struct jump_label_patch){.code = code, .size = size};
 }
 
-static inline void __jump_label_transform(struct jump_entry *entry,
-                                         enum jump_label_type type,
-                                         int init)
+static __always_inline void
+__jump_label_transform(struct jump_entry *entry,
+                      enum jump_label_type type,
+                      int init)
 {
        const struct jump_label_patch jlp = __jump_label_patch(entry, type);
 
index 3364fe62b9037430b29df58837d0d9ca8defbc28..3481b35cb4ec7e05ba76848e325d189adda24964 100644 (file)
@@ -682,7 +682,6 @@ int p4d_clear_huge(p4d_t *p4d)
 }
 #endif
 
-#if CONFIG_PGTABLE_LEVELS > 3
 /**
  * pud_set_huge - setup kernel PUD mapping
  *
@@ -721,23 +720,6 @@ int pud_set_huge(pud_t *pud, phys_addr_t addr, pgprot_t prot)
        return 1;
 }
 
-/**
- * pud_clear_huge - clear kernel PUD mapping when it is set
- *
- * Returns 1 on success and 0 on failure (no PUD map is found).
- */
-int pud_clear_huge(pud_t *pud)
-{
-       if (pud_large(*pud)) {
-               pud_clear(pud);
-               return 1;
-       }
-
-       return 0;
-}
-#endif
-
-#if CONFIG_PGTABLE_LEVELS > 2
 /**
  * pmd_set_huge - setup kernel PMD mapping
  *
@@ -768,6 +750,21 @@ int pmd_set_huge(pmd_t *pmd, phys_addr_t addr, pgprot_t prot)
        return 1;
 }
 
+/**
+ * pud_clear_huge - clear kernel PUD mapping when it is set
+ *
+ * Returns 1 on success and 0 on failure (no PUD map is found).
+ */
+int pud_clear_huge(pud_t *pud)
+{
+       if (pud_large(*pud)) {
+               pud_clear(pud);
+               return 1;
+       }
+
+       return 0;
+}
+
 /**
  * pmd_clear_huge - clear kernel PMD mapping when it is set
  *
@@ -782,7 +779,6 @@ int pmd_clear_huge(pmd_t *pmd)
 
        return 0;
 }
-#endif
 
 #ifdef CONFIG_X86_64
 /**
index 9d872ea477a6c143b82bc34b0029047eaccf8f62..8f9940f40baa89b8d88d1b875a00db4d0cb388b2 100644 (file)
@@ -370,7 +370,7 @@ config ACPI_TABLE_UPGRADE
 config ACPI_TABLE_OVERRIDE_VIA_BUILTIN_INITRD
        bool "Override ACPI tables from built-in initrd"
        depends on ACPI_TABLE_UPGRADE
-       depends on INITRAMFS_SOURCE!="" && INITRAMFS_COMPRESSION=""
+       depends on INITRAMFS_SOURCE!="" && INITRAMFS_COMPRESSION_NONE
        help
          This option provides functionality to override arbitrary ACPI tables
          from built-in uncompressed initrd.
index e7ddd281afff7f6ed9964c52e042e8d8393cf89a..d5cedffeeff915b7b571ae08a9298c26a3772015 100644 (file)
@@ -860,11 +860,9 @@ EXPORT_SYMBOL(acpi_dev_present);
  * Return the next match of ACPI device if another matching device was present
  * at the moment of invocation, or NULL otherwise.
  *
- * FIXME: The function does not tolerate the sudden disappearance of @adev, e.g.
- * in the case of a hotplug event. That said, the caller should ensure that
- * this will never happen.
- *
  * The caller is responsible for invoking acpi_dev_put() on the returned device.
+ * On the other hand the function invokes  acpi_dev_put() on the given @adev
+ * assuming that its reference counter had been increased beforehand.
  *
  * See additional information in acpi_dev_present() as well.
  */
@@ -880,6 +878,7 @@ acpi_dev_get_next_match_dev(struct acpi_device *adev, const char *hid, const cha
        match.hrv = hrv;
 
        dev = bus_find_device(&acpi_bus_type, start, &match, acpi_dev_match_cb);
+       acpi_dev_put(adev);
        return dev ? to_acpi_device(dev) : NULL;
 }
 EXPORT_SYMBOL(acpi_dev_get_next_match_dev);
index adc199dfba3cb3ff3e03ea3c98ae4645799fa442..6a30264ab2ba12d1369eed7fc88fa7e6767e0642 100644 (file)
@@ -231,6 +231,8 @@ EXPORT_SYMBOL_GPL(auxiliary_find_device);
 int __auxiliary_driver_register(struct auxiliary_driver *auxdrv,
                                struct module *owner, const char *modname)
 {
+       int ret;
+
        if (WARN_ON(!auxdrv->probe) || WARN_ON(!auxdrv->id_table))
                return -EINVAL;
 
@@ -246,7 +248,11 @@ int __auxiliary_driver_register(struct auxiliary_driver *auxdrv,
        auxdrv->driver.bus = &auxiliary_bus_type;
        auxdrv->driver.mod_name = modname;
 
-       return driver_register(&auxdrv->driver);
+       ret = driver_register(&auxdrv->driver);
+       if (ret)
+               kfree(auxdrv->driver.name);
+
+       return ret;
 }
 EXPORT_SYMBOL_GPL(__auxiliary_driver_register);
 
index cadcade658253c3ca050c2906537f0ad714ab0ea..f6360490a4a30de4c6454ac7cd1253ee551f9e58 100644 (file)
@@ -574,8 +574,10 @@ static void devlink_remove_symlinks(struct device *dev,
                return;
        }
 
-       snprintf(buf, len, "supplier:%s:%s", dev_bus_name(sup), dev_name(sup));
-       sysfs_remove_link(&con->kobj, buf);
+       if (device_is_registered(con)) {
+               snprintf(buf, len, "supplier:%s:%s", dev_bus_name(sup), dev_name(sup));
+               sysfs_remove_link(&con->kobj, buf);
+       }
        snprintf(buf, len, "consumer:%s:%s", dev_bus_name(con), dev_name(con));
        sysfs_remove_link(&sup->kobj, buf);
        kfree(buf);
index 531d390902dd65b7871adb7a25e99da80ebe732a..90b947c96402266af2a345cbd9eb6326df19f1f5 100644 (file)
@@ -4100,8 +4100,6 @@ again:
 
 static bool rbd_quiesce_lock(struct rbd_device *rbd_dev)
 {
-       bool need_wait;
-
        dout("%s rbd_dev %p\n", __func__, rbd_dev);
        lockdep_assert_held_write(&rbd_dev->lock_rwsem);
 
@@ -4113,11 +4111,11 @@ static bool rbd_quiesce_lock(struct rbd_device *rbd_dev)
         */
        rbd_dev->lock_state = RBD_LOCK_STATE_RELEASING;
        rbd_assert(!completion_done(&rbd_dev->releasing_wait));
-       need_wait = !list_empty(&rbd_dev->running_list);
-       downgrade_write(&rbd_dev->lock_rwsem);
-       if (need_wait)
-               wait_for_completion(&rbd_dev->releasing_wait);
-       up_read(&rbd_dev->lock_rwsem);
+       if (list_empty(&rbd_dev->running_list))
+               return true;
+
+       up_write(&rbd_dev->lock_rwsem);
+       wait_for_completion(&rbd_dev->releasing_wait);
 
        down_write(&rbd_dev->lock_rwsem);
        if (rbd_dev->lock_state != RBD_LOCK_STATE_RELEASING)
@@ -4203,15 +4201,11 @@ static void rbd_handle_acquired_lock(struct rbd_device *rbd_dev, u8 struct_v,
        if (!rbd_cid_equal(&cid, &rbd_empty_cid)) {
                down_write(&rbd_dev->lock_rwsem);
                if (rbd_cid_equal(&cid, &rbd_dev->owner_cid)) {
-                       /*
-                        * we already know that the remote client is
-                        * the owner
-                        */
-                       up_write(&rbd_dev->lock_rwsem);
-                       return;
+                       dout("%s rbd_dev %p cid %llu-%llu == owner_cid\n",
+                            __func__, rbd_dev, cid.gid, cid.handle);
+               } else {
+                       rbd_set_owner_cid(rbd_dev, &cid);
                }
-
-               rbd_set_owner_cid(rbd_dev, &cid);
                downgrade_write(&rbd_dev->lock_rwsem);
        } else {
                down_read(&rbd_dev->lock_rwsem);
@@ -4236,14 +4230,12 @@ static void rbd_handle_released_lock(struct rbd_device *rbd_dev, u8 struct_v,
        if (!rbd_cid_equal(&cid, &rbd_empty_cid)) {
                down_write(&rbd_dev->lock_rwsem);
                if (!rbd_cid_equal(&cid, &rbd_dev->owner_cid)) {
-                       dout("%s rbd_dev %p unexpected owner, cid %llu-%llu != owner_cid %llu-%llu\n",
+                       dout("%s rbd_dev %p cid %llu-%llu != owner_cid %llu-%llu\n",
                             __func__, rbd_dev, cid.gid, cid.handle,
                             rbd_dev->owner_cid.gid, rbd_dev->owner_cid.handle);
-                       up_write(&rbd_dev->lock_rwsem);
-                       return;
+               } else {
+                       rbd_set_owner_cid(rbd_dev, &rbd_empty_cid);
                }
-
-               rbd_set_owner_cid(rbd_dev, &rbd_empty_cid);
                downgrade_write(&rbd_dev->lock_rwsem);
        } else {
                down_read(&rbd_dev->lock_rwsem);
@@ -4951,6 +4943,7 @@ static int rbd_init_disk(struct rbd_device *rbd_dev)
                disk->minors = RBD_MINORS_PER_MAJOR;
        }
        disk->fops = &rbd_bd_ops;
+       disk->private_data = rbd_dev;
 
        blk_queue_flag_set(QUEUE_FLAG_NONROT, q);
        /* QUEUE_FLAG_ADD_RANDOM is off by default for blk-mq */
index 22acde118bc352560f2d329a074b71959770ae32..fc9196f11cb7dbdc68756651944c42a8eded8764 100644 (file)
@@ -773,11 +773,18 @@ static void mhi_process_cmd_completion(struct mhi_controller *mhi_cntrl,
        cmd_pkt = mhi_to_virtual(mhi_ring, ptr);
 
        chan = MHI_TRE_GET_CMD_CHID(cmd_pkt);
-       mhi_chan = &mhi_cntrl->mhi_chan[chan];
-       write_lock_bh(&mhi_chan->lock);
-       mhi_chan->ccs = MHI_TRE_GET_EV_CODE(tre);
-       complete(&mhi_chan->completion);
-       write_unlock_bh(&mhi_chan->lock);
+
+       if (chan < mhi_cntrl->max_chan &&
+           mhi_cntrl->mhi_chan[chan].configured) {
+               mhi_chan = &mhi_cntrl->mhi_chan[chan];
+               write_lock_bh(&mhi_chan->lock);
+               mhi_chan->ccs = MHI_TRE_GET_EV_CODE(tre);
+               complete(&mhi_chan->completion);
+               write_unlock_bh(&mhi_chan->lock);
+       } else {
+               dev_err(&mhi_cntrl->mhi_dev->dev,
+                       "Completion packet for invalid channel ID: %d\n", chan);
+       }
 
        mhi_del_ring_element(mhi_cntrl, mhi_ring);
 }
index ca3bc40427f8578e5d1a3a9a2ddc10e3c06a18b1..4dd1077354af0c540e753d9c8ea722ccdce6e191 100644 (file)
@@ -32,6 +32,8 @@
  * @edl: emergency download mode firmware path (if any)
  * @bar_num: PCI base address register to use for MHI MMIO register space
  * @dma_data_width: DMA transfer word size (32 or 64 bits)
+ * @sideband_wake: Devices using dedicated sideband GPIO for wakeup instead
+ *                of inband wake support (such as sdx24)
  */
 struct mhi_pci_dev_info {
        const struct mhi_controller_config *config;
@@ -40,6 +42,7 @@ struct mhi_pci_dev_info {
        const char *edl;
        unsigned int bar_num;
        unsigned int dma_data_width;
+       bool sideband_wake;
 };
 
 #define MHI_CHANNEL_CONFIG_UL(ch_num, ch_name, el_count, ev_ring) \
@@ -72,6 +75,22 @@ struct mhi_pci_dev_info {
                .doorbell_mode_switch = false,          \
        }
 
+#define MHI_CHANNEL_CONFIG_DL_AUTOQUEUE(ch_num, ch_name, el_count, ev_ring) \
+       {                                               \
+               .num = ch_num,                          \
+               .name = ch_name,                        \
+               .num_elements = el_count,               \
+               .event_ring = ev_ring,                  \
+               .dir = DMA_FROM_DEVICE,                 \
+               .ee_mask = BIT(MHI_EE_AMSS),            \
+               .pollcfg = 0,                           \
+               .doorbell = MHI_DB_BRST_DISABLE,        \
+               .lpm_notify = false,                    \
+               .offload_channel = false,               \
+               .doorbell_mode_switch = false,          \
+               .auto_queue = true,                     \
+       }
+
 #define MHI_EVENT_CONFIG_CTRL(ev_ring, el_count) \
        {                                       \
                .num_elements = el_count,       \
@@ -210,7 +229,7 @@ static const struct mhi_channel_config modem_qcom_v1_mhi_channels[] = {
        MHI_CHANNEL_CONFIG_UL(14, "QMI", 4, 0),
        MHI_CHANNEL_CONFIG_DL(15, "QMI", 4, 0),
        MHI_CHANNEL_CONFIG_UL(20, "IPCR", 8, 0),
-       MHI_CHANNEL_CONFIG_DL(21, "IPCR", 8, 0),
+       MHI_CHANNEL_CONFIG_DL_AUTOQUEUE(21, "IPCR", 8, 0),
        MHI_CHANNEL_CONFIG_UL_FP(34, "FIREHOSE", 32, 0),
        MHI_CHANNEL_CONFIG_DL_FP(35, "FIREHOSE", 32, 0),
        MHI_CHANNEL_CONFIG_HW_UL(100, "IP_HW0", 128, 2),
@@ -242,7 +261,8 @@ static const struct mhi_pci_dev_info mhi_qcom_sdx65_info = {
        .edl = "qcom/sdx65m/edl.mbn",
        .config = &modem_qcom_v1_mhiv_config,
        .bar_num = MHI_PCI_DEFAULT_BAR_NUM,
-       .dma_data_width = 32
+       .dma_data_width = 32,
+       .sideband_wake = false,
 };
 
 static const struct mhi_pci_dev_info mhi_qcom_sdx55_info = {
@@ -251,7 +271,8 @@ static const struct mhi_pci_dev_info mhi_qcom_sdx55_info = {
        .edl = "qcom/sdx55m/edl.mbn",
        .config = &modem_qcom_v1_mhiv_config,
        .bar_num = MHI_PCI_DEFAULT_BAR_NUM,
-       .dma_data_width = 32
+       .dma_data_width = 32,
+       .sideband_wake = false,
 };
 
 static const struct mhi_pci_dev_info mhi_qcom_sdx24_info = {
@@ -259,7 +280,8 @@ static const struct mhi_pci_dev_info mhi_qcom_sdx24_info = {
        .edl = "qcom/prog_firehose_sdx24.mbn",
        .config = &modem_qcom_v1_mhiv_config,
        .bar_num = MHI_PCI_DEFAULT_BAR_NUM,
-       .dma_data_width = 32
+       .dma_data_width = 32,
+       .sideband_wake = true,
 };
 
 static const struct mhi_channel_config mhi_quectel_em1xx_channels[] = {
@@ -301,7 +323,8 @@ static const struct mhi_pci_dev_info mhi_quectel_em1xx_info = {
        .edl = "qcom/prog_firehose_sdx24.mbn",
        .config = &modem_quectel_em1xx_config,
        .bar_num = MHI_PCI_DEFAULT_BAR_NUM,
-       .dma_data_width = 32
+       .dma_data_width = 32,
+       .sideband_wake = true,
 };
 
 static const struct mhi_channel_config mhi_foxconn_sdx55_channels[] = {
@@ -339,7 +362,8 @@ static const struct mhi_pci_dev_info mhi_foxconn_sdx55_info = {
        .edl = "qcom/sdx55m/edl.mbn",
        .config = &modem_foxconn_sdx55_config,
        .bar_num = MHI_PCI_DEFAULT_BAR_NUM,
-       .dma_data_width = 32
+       .dma_data_width = 32,
+       .sideband_wake = false,
 };
 
 static const struct pci_device_id mhi_pci_id_table[] = {
@@ -640,9 +664,12 @@ static int mhi_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
        mhi_cntrl->status_cb = mhi_pci_status_cb;
        mhi_cntrl->runtime_get = mhi_pci_runtime_get;
        mhi_cntrl->runtime_put = mhi_pci_runtime_put;
-       mhi_cntrl->wake_get = mhi_pci_wake_get_nop;
-       mhi_cntrl->wake_put = mhi_pci_wake_put_nop;
-       mhi_cntrl->wake_toggle = mhi_pci_wake_toggle_nop;
+
+       if (info->sideband_wake) {
+               mhi_cntrl->wake_get = mhi_pci_wake_get_nop;
+               mhi_cntrl->wake_put = mhi_pci_wake_put_nop;
+               mhi_cntrl->wake_toggle = mhi_pci_wake_toggle_nop;
+       }
 
        err = mhi_pci_claim(mhi_cntrl, info->bar_num, DMA_BIT_MASK(info->dma_data_width));
        if (err)
index 10d4457417a4ff6a3e42b6d2c39309acd8dae630..eb9c65f978419951329b9e02431c975bcceee052 100644 (file)
@@ -34,7 +34,6 @@ static long __init parse_acpi_path(const struct efi_dev_path *node,
                        break;
                if (!adev->pnp.unique_id && node->acpi.uid == 0)
                        break;
-               acpi_dev_put(adev);
        }
        if (!adev)
                return -ENODEV;
index 4b7ee3fa9224ff809a89b88bbae5cea765d9dbeb..847f33ffc4aedee73d07980e56859530626143e3 100644 (file)
@@ -896,6 +896,7 @@ static int __init efi_memreserve_map_root(void)
 static int efi_mem_reserve_iomem(phys_addr_t addr, u64 size)
 {
        struct resource *res, *parent;
+       int ret;
 
        res = kzalloc(sizeof(struct resource), GFP_ATOMIC);
        if (!res)
@@ -908,7 +909,17 @@ static int efi_mem_reserve_iomem(phys_addr_t addr, u64 size)
 
        /* we expect a conflict with a 'System RAM' region */
        parent = request_resource_conflict(&iomem_resource, res);
-       return parent ? request_resource(parent, res) : 0;
+       ret = parent ? request_resource(parent, res) : 0;
+
+       /*
+        * Given that efi_mem_reserve_iomem() can be called at any
+        * time, only call memblock_reserve() if the architecture
+        * keeps the infrastructure around.
+        */
+       if (IS_ENABLED(CONFIG_ARCH_KEEP_MEMBLOCK) && !ret)
+               memblock_reserve(addr, size);
+
+       return ret;
 }
 
 int __ref efi_mem_reserve_persistent(phys_addr_t addr, u64 size)
index aa8da0a4982941958faef955582c505815a952e5..ae87dded989db2e9ebbc4bb0c0c478f928fa257b 100644 (file)
@@ -630,8 +630,8 @@ efi_status_t efi_load_initrd_cmdline(efi_loaded_image_t *image,
  * @image:     EFI loaded image protocol
  * @load_addr: pointer to loaded initrd
  * @load_size: size of loaded initrd
- * @soft_limit:        preferred size of allocated memory for loading the initrd
- * @hard_limit:        minimum size of allocated memory
+ * @soft_limit:        preferred address for loading the initrd
+ * @hard_limit:        upper limit address for loading the initrd
  *
  * Return:     status code
  */
index d8bc013406861c87f294f1becfb8b19de9579a83..38722d2009e2060f28523a12918509684f995712 100644 (file)
@@ -180,7 +180,10 @@ void __init efi_mokvar_table_init(void)
                pr_err("EFI MOKvar config table is not valid\n");
                return;
        }
-       efi_mem_reserve(efi.mokvar_table, map_size_needed);
+
+       if (md.type == EFI_BOOT_SERVICES_DATA)
+               efi_mem_reserve(efi.mokvar_table, map_size_needed);
+
        efi_mokvar_table_size = map_size_needed;
 }
 
index c1955d320fecd6548cf99e9692562bac10f3998f..8f665678e9e398760910bb54bfbdb544251a433b 100644 (file)
@@ -62,9 +62,11 @@ int __init efi_tpm_eventlog_init(void)
        tbl_size = sizeof(*log_tbl) + log_tbl->size;
        memblock_reserve(efi.tpm_log, tbl_size);
 
-       if (efi.tpm_final_log == EFI_INVALID_TABLE_ADDR ||
-           log_tbl->version != EFI_TCG2_EVENT_LOG_FORMAT_TCG_2) {
-               pr_warn(FW_BUG "TPM Final Events table missing or invalid\n");
+       if (efi.tpm_final_log == EFI_INVALID_TABLE_ADDR) {
+               pr_info("TPM Final Events table not present\n");
+               goto out;
+       } else if (log_tbl->version != EFI_TCG2_EVENT_LOG_FORMAT_TCG_2) {
+               pr_warn(FW_BUG "TPM Final Events table invalid\n");
                goto out;
        }
 
index c0316eaba547c6577330ffefeea3c08161c7cc1d..8ac6eb9f1fdb8af08b4f510da6d748f150b6e7d9 100644 (file)
@@ -619,6 +619,13 @@ struct amdgpu_video_codec_info {
        u32 max_level;
 };
 
+#define codec_info_build(type, width, height, level) \
+                        .codec_type = type,\
+                        .max_width = width,\
+                        .max_height = height,\
+                        .max_pixels_per_frame = height * width,\
+                        .max_level = level,
+
 struct amdgpu_video_codecs {
        const u32 codec_count;
        const struct amdgpu_video_codec_info *codec_array;
index a959624d576b1006f943ef76cb4e5b158d11ff4a..2bd13fc2541a79db05edd46f5c9f2529504e4a8c 100644 (file)
@@ -1190,6 +1190,10 @@ static const struct pci_device_id pciidlist[] = {
        /* Van Gogh */
        {0x1002, 0x163F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VANGOGH|AMD_IS_APU},
 
+       /* Yellow Carp */
+       {0x1002, 0x164D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_YELLOW_CARP|AMD_IS_APU},
+       {0x1002, 0x1681, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_YELLOW_CARP|AMD_IS_APU},
+
        /* Navy_Flounder */
        {0x1002, 0x73C0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVY_FLOUNDER},
        {0x1002, 0x73C1, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVY_FLOUNDER},
index a25160797b61af0f0ea1d5a0f25fc6ca15dbe2cc..611fd10c34102cc9bf5d7346e29aad8b534e5095 100644 (file)
@@ -255,6 +255,15 @@ static int amdgpu_gem_object_mmap(struct drm_gem_object *obj, struct vm_area_str
        if (bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS)
                return -EPERM;
 
+       /* Workaround for Thunk bug creating PROT_NONE,MAP_PRIVATE mappings
+        * for debugger access to invisible VRAM. Should have used MAP_SHARED
+        * instead. Clearing VM_MAYWRITE prevents the mapping from ever
+        * becoming writable and makes is_cow_mapping(vm_flags) false.
+        */
+       if (is_cow_mapping(vma->vm_flags) &&
+           !(vma->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)))
+               vma->vm_flags &= ~VM_MAYWRITE;
+
        return drm_gem_ttm_mmap(obj, vma);
 }
 
index f5e9c022960bbfe4e0623e131a727145bbb289e2..a64b2c706090ea6abe0d38c2cf6d00e7e66a7fd9 100644 (file)
@@ -3300,6 +3300,7 @@ static const struct soc15_reg_golden golden_settings_gc_10_3[] =
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_PERFCOUNTER7_SELECT, 0xf0f001ff, 0x00000000),
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_PERFCOUNTER8_SELECT, 0xf0f001ff, 0x00000000),
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_PERFCOUNTER9_SELECT, 0xf0f001ff, 0x00000000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmSX_DEBUG_1, 0x00010000, 0x00010020),
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmTA_CNTL_AUX, 0xfff7ffff, 0x01030000),
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmUTCL1_CTRL, 0xffbfffff, 0x00a00000)
 };
@@ -3379,6 +3380,7 @@ static const struct soc15_reg_golden golden_settings_gc_10_3_vangogh[] =
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE_2, 0xffffffbf, 0x00000020),
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_CONFIG_CNTL_1_Vangogh, 0xffffffff, 0x00070103),
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQG_CONFIG, 0x000017ff, 0x00001000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmSX_DEBUG_1, 0x00010000, 0x00010020),
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmTA_CNTL_AUX, 0xfff7ffff, 0x01030000),
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmUTCL1_CTRL, 0xffffffff, 0x00400000),
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmVGT_GS_MAX_WAVE_ID, 0x00000fff, 0x000000ff),
@@ -3445,6 +3447,7 @@ static const struct soc15_reg_golden golden_settings_gc_10_3_4[] =
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_PERFCOUNTER7_SELECT, 0xf0f001ff, 0x00000000),
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_PERFCOUNTER8_SELECT, 0xf0f001ff, 0x00000000),
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_PERFCOUNTER9_SELECT, 0xf0f001ff, 0x00000000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmSX_DEBUG_1, 0x00010000, 0x00010020),
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmTA_CNTL_AUX, 0x01030000, 0x01030000),
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmUTCL1_CTRL, 0x03a00000, 0x00a00000),
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmLDS_CONFIG,  0x00000020, 0x00000020)
index 94a2c0742ee5e82ec392a3fb6c494e3312b84d2c..94d029dbf30da5c802493ccde07c36972ca5fc84 100644 (file)
 #include "smuio_v11_0.h"
 #include "smuio_v11_0_6.h"
 
-#define codec_info_build(type, width, height, level) \
-                        .codec_type = type,\
-                        .max_width = width,\
-                        .max_height = height,\
-                        .max_pixels_per_frame = height * width,\
-                        .max_level = level,
-
 static const struct amd_ip_funcs nv_common_ip_funcs;
 
 /* Navi */
 static const struct amdgpu_video_codec_info nv_video_codecs_encode_array[] =
 {
-       {
-               .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC,
-               .max_width = 4096,
-               .max_height = 2304,
-               .max_pixels_per_frame = 4096 * 2304,
-               .max_level = 0,
-       },
-       {
-               .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC,
-               .max_width = 4096,
-               .max_height = 2304,
-               .max_pixels_per_frame = 4096 * 2304,
-               .max_level = 0,
-       },
+       {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 2304, 0)},
+       {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 4096, 2304, 0)},
 };
 
 static const struct amdgpu_video_codecs nv_video_codecs_encode =
@@ -101,55 +82,13 @@ static const struct amdgpu_video_codecs nv_video_codecs_encode =
 /* Navi1x */
 static const struct amdgpu_video_codec_info nv_video_codecs_decode_array[] =
 {
-       {
-               .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2,
-               .max_width = 4096,
-               .max_height = 4096,
-               .max_pixels_per_frame = 4096 * 4096,
-               .max_level = 3,
-       },
-       {
-               .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4,
-               .max_width = 4096,
-               .max_height = 4096,
-               .max_pixels_per_frame = 4096 * 4096,
-               .max_level = 5,
-       },
-       {
-               .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC,
-               .max_width = 4096,
-               .max_height = 4096,
-               .max_pixels_per_frame = 4096 * 4096,
-               .max_level = 52,
-       },
-       {
-               .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1,
-               .max_width = 4096,
-               .max_height = 4096,
-               .max_pixels_per_frame = 4096 * 4096,
-               .max_level = 4,
-       },
-       {
-               .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC,
-               .max_width = 8192,
-               .max_height = 4352,
-               .max_pixels_per_frame = 8192 * 4352,
-               .max_level = 186,
-       },
-       {
-               .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG,
-               .max_width = 4096,
-               .max_height = 4096,
-               .max_pixels_per_frame = 4096 * 4096,
-               .max_level = 0,
-       },
-       {
-               .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VP9,
-               .max_width = 8192,
-               .max_height = 4352,
-               .max_pixels_per_frame = 8192 * 4352,
-               .max_level = 0,
-       },
+       {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2, 4096, 4906, 3)},
+       {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4, 4096, 4906, 5)},
+       {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4906, 52)},
+       {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1, 4096, 4906, 4)},
+       {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 8192, 4352, 186)},
+       {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG, 4096, 4096, 0)},
+       {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VP9, 8192, 4352, 0)},
 };
 
 static const struct amdgpu_video_codecs nv_video_codecs_decode =
@@ -161,62 +100,14 @@ static const struct amdgpu_video_codecs nv_video_codecs_decode =
 /* Sienna Cichlid */
 static const struct amdgpu_video_codec_info sc_video_codecs_decode_array[] =
 {
-       {
-               .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2,
-               .max_width = 4096,
-               .max_height = 4096,
-               .max_pixels_per_frame = 4096 * 4096,
-               .max_level = 3,
-       },
-       {
-               .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4,
-               .max_width = 4096,
-               .max_height = 4096,
-               .max_pixels_per_frame = 4096 * 4096,
-               .max_level = 5,
-       },
-       {
-               .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC,
-               .max_width = 4096,
-               .max_height = 4096,
-               .max_pixels_per_frame = 4096 * 4096,
-               .max_level = 52,
-       },
-       {
-               .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1,
-               .max_width = 4096,
-               .max_height = 4096,
-               .max_pixels_per_frame = 4096 * 4096,
-               .max_level = 4,
-       },
-       {
-               .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC,
-               .max_width = 8192,
-               .max_height = 4352,
-               .max_pixels_per_frame = 8192 * 4352,
-               .max_level = 186,
-       },
-       {
-               .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG,
-               .max_width = 4096,
-               .max_height = 4096,
-               .max_pixels_per_frame = 4096 * 4096,
-               .max_level = 0,
-       },
-       {
-               .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VP9,
-               .max_width = 8192,
-               .max_height = 4352,
-               .max_pixels_per_frame = 8192 * 4352,
-               .max_level = 0,
-       },
-       {
-               .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_AV1,
-               .max_width = 8192,
-               .max_height = 4352,
-               .max_pixels_per_frame = 8192 * 4352,
-               .max_level = 0,
-       },
+       {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2, 4096, 4906, 3)},
+       {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4, 4096, 4906, 5)},
+       {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4906, 52)},
+       {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1, 4096, 4906, 4)},
+       {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 8192, 4352, 186)},
+       {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG, 4096, 4096, 0)},
+       {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VP9, 8192, 4352, 0)},
+       {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_AV1, 8192, 4352, 0)},
 };
 
 static const struct amdgpu_video_codecs sc_video_codecs_decode =
@@ -228,80 +119,20 @@ static const struct amdgpu_video_codecs sc_video_codecs_decode =
 /* SRIOV Sienna Cichlid, not const since data is controlled by host */
 static struct amdgpu_video_codec_info sriov_sc_video_codecs_encode_array[] =
 {
-       {
-               .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC,
-               .max_width = 4096,
-               .max_height = 2304,
-               .max_pixels_per_frame = 4096 * 2304,
-               .max_level = 0,
-       },
-       {
-               .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC,
-               .max_width = 4096,
-               .max_height = 2304,
-               .max_pixels_per_frame = 4096 * 2304,
-               .max_level = 0,
-       },
+       {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 2304, 0)},
+       {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 4096, 2304, 0)},
 };
 
 static struct amdgpu_video_codec_info sriov_sc_video_codecs_decode_array[] =
 {
-       {
-               .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2,
-               .max_width = 4096,
-               .max_height = 4096,
-               .max_pixels_per_frame = 4096 * 4096,
-               .max_level = 3,
-       },
-       {
-               .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4,
-               .max_width = 4096,
-               .max_height = 4096,
-               .max_pixels_per_frame = 4096 * 4096,
-               .max_level = 5,
-       },
-       {
-               .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC,
-               .max_width = 4096,
-               .max_height = 4096,
-               .max_pixels_per_frame = 4096 * 4096,
-               .max_level = 52,
-       },
-       {
-               .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1,
-               .max_width = 4096,
-               .max_height = 4096,
-               .max_pixels_per_frame = 4096 * 4096,
-               .max_level = 4,
-       },
-       {
-               .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC,
-               .max_width = 8192,
-               .max_height = 4352,
-               .max_pixels_per_frame = 8192 * 4352,
-               .max_level = 186,
-       },
-       {
-               .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG,
-               .max_width = 4096,
-               .max_height = 4096,
-               .max_pixels_per_frame = 4096 * 4096,
-               .max_level = 0,
-       },
-       {
-               .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VP9,
-               .max_width = 8192,
-               .max_height = 4352,
-               .max_pixels_per_frame = 8192 * 4352,
-               .max_level = 0,
-       },
-       {
-               .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_AV1,
-               .max_width = 8192,
-               .max_height = 4352,
-               .max_pixels_per_frame = 8192 * 4352,
-               .max_level = 0,
-       },
+       {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2, 4096, 4906, 3)},
+       {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4, 4096, 4906, 5)},
+       {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4906, 52)},
+       {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1, 4096, 4906, 4)},
+       {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 8192, 4352, 186)},
+       {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG, 4096, 4096, 0)},
+       {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VP9, 8192, 4352, 0)},
+       {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_AV1, 8192, 4352, 0)},
 };
 
 static struct amdgpu_video_codecs sriov_sc_video_codecs_encode =
@@ -333,6 +164,19 @@ static const struct amdgpu_video_codecs bg_video_codecs_encode = {
        .codec_array = NULL,
 };
 
+/* Yellow Carp*/
+static const struct amdgpu_video_codec_info yc_video_codecs_decode_array[] = {
+       {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4906, 52)},
+       {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 8192, 4352, 186)},
+       {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VP9, 8192, 4352, 0)},
+       {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG, 4096, 4096, 0)},
+};
+
+static const struct amdgpu_video_codecs yc_video_codecs_decode = {
+       .codec_count = ARRAY_SIZE(yc_video_codecs_decode_array),
+       .codec_array = yc_video_codecs_decode_array,
+};
+
 static int nv_query_video_codecs(struct amdgpu_device *adev, bool encode,
                                 const struct amdgpu_video_codecs **codecs)
 {
@@ -353,12 +197,17 @@ static int nv_query_video_codecs(struct amdgpu_device *adev, bool encode,
        case CHIP_NAVY_FLOUNDER:
        case CHIP_DIMGREY_CAVEFISH:
        case CHIP_VANGOGH:
-       case CHIP_YELLOW_CARP:
                if (encode)
                        *codecs = &nv_video_codecs_encode;
                else
                        *codecs = &sc_video_codecs_decode;
                return 0;
+       case CHIP_YELLOW_CARP:
+               if (encode)
+                       *codecs = &nv_video_codecs_encode;
+               else
+                       *codecs = &yc_video_codecs_decode;
+               return 0;
        case CHIP_BEIGE_GOBY:
                if (encode)
                        *codecs = &bg_video_codecs_encode;
@@ -1387,7 +1236,10 @@ static int nv_common_early_init(void *handle)
                        AMD_PG_SUPPORT_VCN |
                        AMD_PG_SUPPORT_VCN_DPG |
                        AMD_PG_SUPPORT_JPEG;
-               adev->external_rev_id = adev->rev_id + 0x01;
+               if (adev->pdev->device == 0x1681)
+                       adev->external_rev_id = adev->rev_id + 0x19;
+               else
+                       adev->external_rev_id = adev->rev_id + 0x01;
                break;
        default:
                /* FIXME: not supported yet */
index b02436401d46f452ec1083ec75a5d9b8f70384db..b7d350be805020885d1f7bea3d1f9d5e39fbc2f0 100644 (file)
 /* Vega, Raven, Arcturus */
 static const struct amdgpu_video_codec_info vega_video_codecs_encode_array[] =
 {
-       {
-               .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC,
-               .max_width = 4096,
-               .max_height = 2304,
-               .max_pixels_per_frame = 4096 * 2304,
-               .max_level = 0,
-       },
-       {
-               .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC,
-               .max_width = 4096,
-               .max_height = 2304,
-               .max_pixels_per_frame = 4096 * 2304,
-               .max_level = 0,
-       },
+       {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 2304, 0)},
+       {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 4096, 2304, 0)},
 };
 
 static const struct amdgpu_video_codecs vega_video_codecs_encode =
@@ -113,48 +101,12 @@ static const struct amdgpu_video_codecs vega_video_codecs_encode =
 /* Vega */
 static const struct amdgpu_video_codec_info vega_video_codecs_decode_array[] =
 {
-       {
-               .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2,
-               .max_width = 4096,
-               .max_height = 4096,
-               .max_pixels_per_frame = 4096 * 4096,
-               .max_level = 3,
-       },
-       {
-               .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4,
-               .max_width = 4096,
-               .max_height = 4096,
-               .max_pixels_per_frame = 4096 * 4096,
-               .max_level = 5,
-       },
-       {
-               .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC,
-               .max_width = 4096,
-               .max_height = 4096,
-               .max_pixels_per_frame = 4096 * 4096,
-               .max_level = 52,
-       },
-       {
-               .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1,
-               .max_width = 4096,
-               .max_height = 4096,
-               .max_pixels_per_frame = 4096 * 4096,
-               .max_level = 4,
-       },
-       {
-               .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC,
-               .max_width = 4096,
-               .max_height = 4096,
-               .max_pixels_per_frame = 4096 * 4096,
-               .max_level = 186,
-       },
-       {
-               .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG,
-               .max_width = 4096,
-               .max_height = 4096,
-               .max_pixels_per_frame = 4096 * 4096,
-               .max_level = 0,
-       },
+       {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2, 4096, 4906, 3)},
+       {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4, 4096, 4906, 5)},
+       {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4906, 52)},
+       {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1, 4096, 4906, 4)},
+       {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 4096, 4096, 186)},
+       {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG, 4096, 4096, 0)},
 };
 
 static const struct amdgpu_video_codecs vega_video_codecs_decode =
@@ -166,55 +118,13 @@ static const struct amdgpu_video_codecs vega_video_codecs_decode =
 /* Raven */
 static const struct amdgpu_video_codec_info rv_video_codecs_decode_array[] =
 {
-       {
-               .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2,
-               .max_width = 4096,
-               .max_height = 4096,
-               .max_pixels_per_frame = 4096 * 4096,
-               .max_level = 3,
-       },
-       {
-               .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4,
-               .max_width = 4096,
-               .max_height = 4096,
-               .max_pixels_per_frame = 4096 * 4096,
-               .max_level = 5,
-       },
-       {
-               .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC,
-               .max_width = 4096,
-               .max_height = 4096,
-               .max_pixels_per_frame = 4096 * 4096,
-               .max_level = 52,
-       },
-       {
-               .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1,
-               .max_width = 4096,
-               .max_height = 4096,
-               .max_pixels_per_frame = 4096 * 4096,
-               .max_level = 4,
-       },
-       {
-               .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC,
-               .max_width = 4096,
-               .max_height = 4096,
-               .max_pixels_per_frame = 4096 * 4096,
-               .max_level = 186,
-       },
-       {
-               .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG,
-               .max_width = 4096,
-               .max_height = 4096,
-               .max_pixels_per_frame = 4096 * 4096,
-               .max_level = 0,
-       },
-       {
-               .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VP9,
-               .max_width = 4096,
-               .max_height = 4096,
-               .max_pixels_per_frame = 4096 * 4096,
-               .max_level = 0,
-       },
+       {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2, 4096, 4906, 3)},
+       {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4, 4096, 4906, 5)},
+       {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4906, 52)},
+       {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1, 4096, 4906, 4)},
+       {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 4096, 4096, 186)},
+       {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG, 4096, 4096, 0)},
+       {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VP9, 4096, 4096, 0)},
 };
 
 static const struct amdgpu_video_codecs rv_video_codecs_decode =
@@ -226,55 +136,13 @@ static const struct amdgpu_video_codecs rv_video_codecs_decode =
 /* Renoir, Arcturus */
 static const struct amdgpu_video_codec_info rn_video_codecs_decode_array[] =
 {
-       {
-               .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2,
-               .max_width = 4096,
-               .max_height = 4096,
-               .max_pixels_per_frame = 4096 * 4096,
-               .max_level = 3,
-       },
-       {
-               .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4,
-               .max_width = 4096,
-               .max_height = 4096,
-               .max_pixels_per_frame = 4096 * 4096,
-               .max_level = 5,
-       },
-       {
-               .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC,
-               .max_width = 4096,
-               .max_height = 4096,
-               .max_pixels_per_frame = 4096 * 4096,
-               .max_level = 52,
-       },
-       {
-               .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1,
-               .max_width = 4096,
-               .max_height = 4096,
-               .max_pixels_per_frame = 4096 * 4096,
-               .max_level = 4,
-       },
-       {
-               .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC,
-               .max_width = 8192,
-               .max_height = 4352,
-               .max_pixels_per_frame = 4096 * 4096,
-               .max_level = 186,
-       },
-       {
-               .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG,
-               .max_width = 4096,
-               .max_height = 4096,
-               .max_pixels_per_frame = 4096 * 4096,
-               .max_level = 0,
-       },
-       {
-               .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VP9,
-               .max_width = 8192,
-               .max_height = 4352,
-               .max_pixels_per_frame = 4096 * 4096,
-               .max_level = 0,
-       },
+       {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2, 4096, 4906, 3)},
+       {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4, 4096, 4906, 5)},
+       {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4906, 52)},
+       {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1, 4096, 4906, 4)},
+       {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 8192, 4352, 186)},
+       {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG, 4096, 4096, 0)},
+       {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VP9, 8192, 4352, 0)},
 };
 
 static const struct amdgpu_video_codecs rn_video_codecs_decode =
index 513676a6f52bc2751376b31bf690d6564baa1ff1..af7004b770aeb9ae3c6265ad2272b04e9bf14983 100644 (file)
@@ -190,6 +190,10 @@ void dcn3_init_clocks(struct clk_mgr *clk_mgr_base)
                        &clk_mgr_base->bw_params->clk_table.entries[0].dtbclk_mhz,
                        &num_levels);
 
+       /* SOCCLK */
+       dcn3_init_single_clock(clk_mgr, PPCLK_SOCCLK,
+                                       &clk_mgr_base->bw_params->clk_table.entries[0].socclk_mhz,
+                                       &num_levels);
        // DPREFCLK ???
 
        /* DISPCLK */
index 7b7d884d58be097610d1dc6fa83fc1be12dc5bc1..4a4894e9d9c9a132ed4793995e9e0ee252520ea0 100644 (file)
 
 #include "dc_dmub_srv.h"
 
+#include "yellow_carp_offset.h"
+
+#define regCLK1_CLK_PLL_REQ                    0x0237
+#define regCLK1_CLK_PLL_REQ_BASE_IDX           0
+
+#define CLK1_CLK_PLL_REQ__FbMult_int__SHIFT    0x0
+#define CLK1_CLK_PLL_REQ__PllSpineDiv__SHIFT   0xc
+#define CLK1_CLK_PLL_REQ__FbMult_frac__SHIFT   0x10
+#define CLK1_CLK_PLL_REQ__FbMult_int_MASK      0x000001FFL
+#define CLK1_CLK_PLL_REQ__PllSpineDiv_MASK     0x0000F000L
+#define CLK1_CLK_PLL_REQ__FbMult_frac_MASK     0xFFFF0000L
+
+#define REG(reg_name) \
+       (CLK_BASE.instance[0].segment[reg ## reg_name ## _BASE_IDX] + reg ## reg_name)
+
 #define TO_CLK_MGR_DCN31(clk_mgr)\
        container_of(clk_mgr, struct clk_mgr_dcn31, base)
 
@@ -124,10 +139,10 @@ static void dcn31_update_clocks(struct clk_mgr *clk_mgr_base,
         * also if safe to lower is false, we just go in the higher state
         */
        if (safe_to_lower) {
-               if (new_clocks->z9_support == DCN_Z9_SUPPORT_ALLOW &&
-                               new_clocks->z9_support != clk_mgr_base->clks.z9_support) {
+               if (new_clocks->zstate_support == DCN_ZSTATE_SUPPORT_ALLOW &&
+                               new_clocks->zstate_support != clk_mgr_base->clks.zstate_support) {
                        dcn31_smu_set_Z9_support(clk_mgr, true);
-                       clk_mgr_base->clks.z9_support = new_clocks->z9_support;
+                       clk_mgr_base->clks.zstate_support = new_clocks->zstate_support;
                }
 
                if (clk_mgr_base->clks.dtbclk_en && !new_clocks->dtbclk_en) {
@@ -148,10 +163,10 @@ static void dcn31_update_clocks(struct clk_mgr *clk_mgr_base,
                        }
                }
        } else {
-               if (new_clocks->z9_support == DCN_Z9_SUPPORT_DISALLOW &&
-                               new_clocks->z9_support != clk_mgr_base->clks.z9_support) {
+               if (new_clocks->zstate_support == DCN_ZSTATE_SUPPORT_DISALLOW &&
+                               new_clocks->zstate_support != clk_mgr_base->clks.zstate_support) {
                        dcn31_smu_set_Z9_support(clk_mgr, false);
-                       clk_mgr_base->clks.z9_support = new_clocks->z9_support;
+                       clk_mgr_base->clks.zstate_support = new_clocks->zstate_support;
                }
 
                if (!clk_mgr_base->clks.dtbclk_en && new_clocks->dtbclk_en) {
@@ -229,7 +244,32 @@ static void dcn31_update_clocks(struct clk_mgr *clk_mgr_base,
 
 static int get_vco_frequency_from_reg(struct clk_mgr_internal *clk_mgr)
 {
-       return 0;
+       /* get FbMult value */
+       struct fixed31_32 pll_req;
+       unsigned int fbmult_frac_val = 0;
+       unsigned int fbmult_int_val = 0;
+
+       /*
+        * Register value of fbmult is in 8.16 format, we are converting to 31.32
+        * to leverage the fix point operations available in driver
+        */
+
+       REG_GET(CLK1_CLK_PLL_REQ, FbMult_frac, &fbmult_frac_val); /* 16 bit fractional part*/
+       REG_GET(CLK1_CLK_PLL_REQ, FbMult_int, &fbmult_int_val); /* 8 bit integer part */
+
+       pll_req = dc_fixpt_from_int(fbmult_int_val);
+
+       /*
+        * since fractional part is only 16 bit in register definition but is 32 bit
+        * in our fix point definiton, need to shift left by 16 to obtain correct value
+        */
+       pll_req.value |= fbmult_frac_val << 16;
+
+       /* multiply by REFCLK period */
+       pll_req = dc_fixpt_mul_int(pll_req, clk_mgr->dfs_ref_freq_khz);
+
+       /* integer part is now VCO frequency in kHz */
+       return dc_fixpt_floor(pll_req);
 }
 
 static void dcn31_enable_pme_wa(struct clk_mgr *clk_mgr_base)
@@ -246,7 +286,7 @@ static void dcn31_init_clocks(struct clk_mgr *clk_mgr)
        clk_mgr->clks.p_state_change_support = true;
        clk_mgr->clks.prev_p_state_change_support = true;
        clk_mgr->clks.pwr_state = DCN_PWR_STATE_UNKNOWN;
-       clk_mgr->clks.z9_support = DCN_Z9_SUPPORT_UNKNOWN;
+       clk_mgr->clks.zstate_support = DCN_ZSTATE_SUPPORT_UNKNOWN;
 }
 
 static bool dcn31_are_clock_states_equal(struct dc_clocks *a,
@@ -260,7 +300,7 @@ static bool dcn31_are_clock_states_equal(struct dc_clocks *a,
                return false;
        else if (a->dcfclk_deep_sleep_khz != b->dcfclk_deep_sleep_khz)
                return false;
-       else if (a->z9_support != b->z9_support)
+       else if (a->zstate_support != b->zstate_support)
                return false;
        else if (a->dtbclk_en != b->dtbclk_en)
                return false;
@@ -592,6 +632,7 @@ void dcn31_clk_mgr_construct(
        clk_mgr->base.dprefclk_ss_percentage = 0;
        clk_mgr->base.dprefclk_ss_divider = 1000;
        clk_mgr->base.ss_on_dprefclk = false;
+       clk_mgr->base.dfs_ref_freq_khz = 48000;
 
        clk_mgr->smu_wm_set.wm_set = (struct dcn31_watermarks *)dm_helpers_allocate_gpu_mem(
                                clk_mgr->base.base.ctx,
index cc21cf75eafd4348ad9b2db6faec77a9fd35944a..f8f100535526df86985c4fb4d4ae1857e68e812a 100644 (file)
 #define __DCN31_CLK_MGR_H__
 #include "clk_mgr_internal.h"
 
-//CLK1_CLK_PLL_REQ
-#ifndef CLK11_CLK1_CLK_PLL_REQ__FbMult_int__SHIFT
-#define CLK11_CLK1_CLK_PLL_REQ__FbMult_int__SHIFT                                                                   0x0
-#define CLK11_CLK1_CLK_PLL_REQ__PllSpineDiv__SHIFT                                                                  0xc
-#define CLK11_CLK1_CLK_PLL_REQ__FbMult_frac__SHIFT                                                                  0x10
-#define CLK11_CLK1_CLK_PLL_REQ__FbMult_int_MASK                                                                     0x000001FFL
-#define CLK11_CLK1_CLK_PLL_REQ__PllSpineDiv_MASK                                                                    0x0000F000L
-#define CLK11_CLK1_CLK_PLL_REQ__FbMult_frac_MASK                                                                    0xFFFF0000L
-//CLK1_CLK0_DFS_CNTL
-#define CLK11_CLK1_CLK0_DFS_CNTL__CLK0_DIVIDER__SHIFT                                                               0x0
-#define CLK11_CLK1_CLK0_DFS_CNTL__CLK0_DIVIDER_MASK                                                                 0x0000007FL
-/*DPREF clock related*/
-#define CLK0_CLK3_DFS_CNTL__CLK3_DIVIDER__SHIFT                                                               0x0
-#define CLK0_CLK3_DFS_CNTL__CLK3_DIVIDER_MASK                                                                 0x0000007FL
-#define CLK1_CLK3_DFS_CNTL__CLK3_DIVIDER__SHIFT                                                               0x0
-#define CLK1_CLK3_DFS_CNTL__CLK3_DIVIDER_MASK                                                                 0x0000007FL
-#define CLK2_CLK3_DFS_CNTL__CLK3_DIVIDER__SHIFT                                                               0x0
-#define CLK2_CLK3_DFS_CNTL__CLK3_DIVIDER_MASK                                                                 0x0000007FL
-#define CLK3_CLK3_DFS_CNTL__CLK3_DIVIDER__SHIFT                                                               0x0
-#define CLK3_CLK3_DFS_CNTL__CLK3_DIVIDER_MASK                                                                 0x0000007FL
-
-//CLK3_0_CLK3_CLK_PLL_REQ
-#define CLK3_0_CLK3_CLK_PLL_REQ__FbMult_int__SHIFT                                                            0x0
-#define CLK3_0_CLK3_CLK_PLL_REQ__PllSpineDiv__SHIFT                                                           0xc
-#define CLK3_0_CLK3_CLK_PLL_REQ__FbMult_frac__SHIFT                                                           0x10
-#define CLK3_0_CLK3_CLK_PLL_REQ__FbMult_int_MASK                                                              0x000001FFL
-#define CLK3_0_CLK3_CLK_PLL_REQ__PllSpineDiv_MASK                                                             0x0000F000L
-#define CLK3_0_CLK3_CLK_PLL_REQ__FbMult_frac_MASK                                                             0xFFFF0000L
-
-#define mmCLK0_CLK3_DFS_CNTL                            0x16C60
-#define mmCLK00_CLK0_CLK3_DFS_CNTL                      0x16C60
-#define mmCLK01_CLK0_CLK3_DFS_CNTL                      0x16E60
-#define mmCLK02_CLK0_CLK3_DFS_CNTL                      0x17060
-#define mmCLK03_CLK0_CLK3_DFS_CNTL                      0x17260
-
-#define mmCLK0_CLK_PLL_REQ                              0x16C10
-#define mmCLK00_CLK0_CLK_PLL_REQ                        0x16C10
-#define mmCLK01_CLK0_CLK_PLL_REQ                        0x16E10
-#define mmCLK02_CLK0_CLK_PLL_REQ                        0x17010
-#define mmCLK03_CLK0_CLK_PLL_REQ                        0x17210
-
-#define mmCLK1_CLK_PLL_REQ                              0x1B00D
-#define mmCLK10_CLK1_CLK_PLL_REQ                        0x1B00D
-#define mmCLK11_CLK1_CLK_PLL_REQ                        0x1B20D
-#define mmCLK12_CLK1_CLK_PLL_REQ                        0x1B40D
-#define mmCLK13_CLK1_CLK_PLL_REQ                        0x1B60D
-
-#define mmCLK2_CLK_PLL_REQ                              0x17E0D
-
-/*AMCLK*/
-#define mmCLK11_CLK1_CLK0_DFS_CNTL                      0x1B23F
-#define mmCLK11_CLK1_CLK_PLL_REQ                        0x1B20D
-#endif
-
 struct dcn31_watermarks;
 
 struct dcn31_smu_watermark_set {
index 6da226bf11d59612e6b83732a94febcbb614ef70..9fb8c46dc606981aa4711a0b51862cc0e1389a1b 100644 (file)
@@ -1820,8 +1820,7 @@ bool perform_link_training_with_retries(
                                         */
                                        panel_mode = DP_PANEL_MODE_DEFAULT;
                                }
-                       } else
-                               panel_mode = DP_PANEL_MODE_DEFAULT;
+                       }
                }
 #endif
 
@@ -4650,7 +4649,10 @@ enum dp_panel_mode dp_get_panel_mode(struct dc_link *link)
                }
        }
 
-       if (link->dpcd_caps.panel_mode_edp) {
+       if (link->dpcd_caps.panel_mode_edp &&
+               (link->connector_signal == SIGNAL_TYPE_EDP ||
+                (link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT &&
+                 link->is_internal_display))) {
                return DP_PANEL_MODE_EDP;
        }
 
@@ -4914,9 +4916,7 @@ bool dc_link_set_default_brightness_aux(struct dc_link *link)
 {
        uint32_t default_backlight;
 
-       if (link &&
-               (link->dpcd_sink_ext_caps.bits.hdr_aux_backlight_control == 1 ||
-               link->dpcd_sink_ext_caps.bits.sdr_aux_backlight_control == 1)) {
+       if (link && link->dpcd_sink_ext_caps.bits.oled == 1) {
                if (!dc_link_read_default_bl_aux(link, &default_backlight))
                        default_backlight = 150000;
                // if < 5 nits or > 5000, it might be wrong readback
index a6a67244a322e93628c7a3eb15e40f64ebef7263..1596f6b7fed7c14475949ee0bebf0be2606249f7 100644 (file)
@@ -1062,7 +1062,7 @@ bool resource_build_scaling_params(struct pipe_ctx *pipe_ctx)
         * so use only 30 bpp on DCE_VERSION_11_0. Testing with DCE 11.2 and 8.3
         * did not show such problems, so this seems to be the exception.
         */
-       if (plane_state->ctx->dce_version != DCE_VERSION_11_0)
+       if (plane_state->ctx->dce_version > DCE_VERSION_11_0)
                pipe_ctx->plane_res.scl_data.lb_params.depth = LB_PIXEL_DEPTH_36BPP;
        else
                pipe_ctx->plane_res.scl_data.lb_params.depth = LB_PIXEL_DEPTH_30BPP;
index 45640f1c26c4bf79d6ea8419e0a55acda7c67591..8dcea8ff5c5ad766dc35773910910d7ee41adbe9 100644 (file)
@@ -354,10 +354,10 @@ enum dcn_pwr_state {
 };
 
 #if defined(CONFIG_DRM_AMD_DC_DCN)
-enum dcn_z9_support_state {
-       DCN_Z9_SUPPORT_UNKNOWN,
-       DCN_Z9_SUPPORT_ALLOW,
-       DCN_Z9_SUPPORT_DISALLOW,
+enum dcn_zstate_support_state {
+       DCN_ZSTATE_SUPPORT_UNKNOWN,
+       DCN_ZSTATE_SUPPORT_ALLOW,
+       DCN_ZSTATE_SUPPORT_DISALLOW,
 };
 #endif
 /*
@@ -378,7 +378,7 @@ struct dc_clocks {
        int dramclk_khz;
        bool p_state_change_support;
 #if defined(CONFIG_DRM_AMD_DC_DCN)
-       enum dcn_z9_support_state z9_support;
+       enum dcn_zstate_support_state zstate_support;
        bool dtbclk_en;
 #endif
        enum dcn_pwr_state pwr_state;
index df6539e4c73014d569e2b48ff60064a1d3d1eae8..0464a8f3db3c0b587ec0b141dd4c4c3722f26b4a 100644 (file)
@@ -636,6 +636,7 @@ struct dce_hwseq_registers {
        uint32_t ODM_MEM_PWR_CTRL3;
        uint32_t DMU_MEM_PWR_CNTL;
        uint32_t MMHUBBUB_MEM_PWR_CNTL;
+       uint32_t DCHUBBUB_ARB_HOSTVM_CNTL;
 };
  /* set field name */
 #define HWS_SF(blk_name, reg_name, field_name, post_fix)\
@@ -1110,7 +1111,8 @@ struct dce_hwseq_registers {
        type DOMAIN_POWER_FORCEON;\
        type DOMAIN_POWER_GATE;\
        type DOMAIN_PGFSM_PWR_STATUS;\
-       type HPO_HDMISTREAMCLK_G_GATE_DIS;
+       type HPO_HDMISTREAMCLK_G_GATE_DIS;\
+       type DISABLE_HOSTVM_FORCE_ALLOW_PSTATE;
 
 struct dce_hwseq_shift {
        HWSEQ_REG_FIELD_LIST(uint8_t)
index 673b93f4fea519f3117286518a27abd6fd8a2b86..cb9767ddf93d3863190e414e4df57aaa96100cbb 100644 (file)
@@ -217,6 +217,8 @@ static void dpp1_dscl_set_lb(
        const struct line_buffer_params *lb_params,
        enum lb_memory_config mem_size_config)
 {
+       uint32_t max_partitions = 63; /* Currently hardcoded on all ASICs before DCN 3.2 */
+
        /* LB */
        if (dpp->base.caps->dscl_data_proc_format == DSCL_DATA_PRCESSING_FIXED_FORMAT) {
                /* DSCL caps: pixel data processed in fixed format */
@@ -239,9 +241,12 @@ static void dpp1_dscl_set_lb(
                        LB_DATA_FORMAT__ALPHA_EN, lb_params->alpha_en); /* Alpha enable */
        }
 
+       if (dpp->base.caps->max_lb_partitions == 31)
+               max_partitions = 31;
+
        REG_SET_2(LB_MEMORY_CTRL, 0,
                MEMORY_CONFIG, mem_size_config,
-               LB_MAX_PARTITIONS, 63);
+               LB_MAX_PARTITIONS, max_partitions);
 }
 
 static const uint16_t *dpp1_dscl_get_filter_coeffs_64p(int taps, struct fixed31_32 ratio)
index 1b05a37b674d0a6416aec8cd34b864be012c9c62..b173fa3653b555adf0fb72c9ccfef39ad7423b30 100644 (file)
@@ -2093,8 +2093,10 @@ int dcn20_populate_dml_pipes_from_context(
                                - timing->v_border_bottom;
                pipes[pipe_cnt].pipe.dest.htotal = timing->h_total;
                pipes[pipe_cnt].pipe.dest.vtotal = v_total;
-               pipes[pipe_cnt].pipe.dest.hactive = timing->h_addressable;
-               pipes[pipe_cnt].pipe.dest.vactive = timing->v_addressable;
+               pipes[pipe_cnt].pipe.dest.hactive =
+                       timing->h_addressable + timing->h_border_left + timing->h_border_right;
+               pipes[pipe_cnt].pipe.dest.vactive =
+                       timing->v_addressable + timing->v_border_top + timing->v_border_bottom;
                pipes[pipe_cnt].pipe.dest.interlaced = timing->flags.INTERLACE;
                pipes[pipe_cnt].pipe.dest.pixel_rate_mhz = timing->pix_clk_100hz/10000.0;
                if (timing->timing_3d_format == TIMING_3D_FORMAT_HW_FRAME_PACKING)
@@ -3079,6 +3081,37 @@ static bool is_dtbclk_required(struct dc *dc, struct dc_state *context)
        return false;
 }
 
+static enum dcn_zstate_support_state  decide_zstate_support(struct dc *dc, struct dc_state *context)
+{
+       int plane_count;
+       int i;
+
+       plane_count = 0;
+       for (i = 0; i < dc->res_pool->pipe_count; i++) {
+               if (context->res_ctx.pipe_ctx[i].plane_state)
+                       plane_count++;
+       }
+
+       /*
+        * Zstate is allowed in following scenarios:
+        *      1. Single eDP with PSR enabled
+        *      2. 0 planes (No memory requests)
+        *      3. Single eDP without PSR but > 5ms stutter period
+        */
+       if (plane_count == 0)
+               return DCN_ZSTATE_SUPPORT_ALLOW;
+       else if (context->stream_count == 1 &&  context->streams[0]->signal == SIGNAL_TYPE_EDP) {
+               struct dc_link *link = context->streams[0]->sink->link;
+
+               if ((link->link_index == 0 && link->psr_settings.psr_feature_enabled)
+                               || context->bw_ctx.dml.vba.StutterPeriod > 5000.0)
+                       return DCN_ZSTATE_SUPPORT_ALLOW;
+               else
+                       return DCN_ZSTATE_SUPPORT_DISALLOW;
+       } else
+               return DCN_ZSTATE_SUPPORT_DISALLOW;
+}
+
 void dcn20_calculate_dlg_params(
                struct dc *dc, struct dc_state *context,
                display_e2e_pipe_params_st *pipes,
@@ -3086,7 +3119,6 @@ void dcn20_calculate_dlg_params(
                int vlevel)
 {
        int i, pipe_idx;
-       int plane_count;
 
        /* Writeback MCIF_WB arbitration parameters */
        dc->res_pool->funcs->set_mcif_arb_params(dc, context, pipes, pipe_cnt);
@@ -3102,17 +3134,7 @@ void dcn20_calculate_dlg_params(
                                                        != dm_dram_clock_change_unsupported;
        context->bw_ctx.bw.dcn.clk.dppclk_khz = 0;
 
-       context->bw_ctx.bw.dcn.clk.z9_support = (context->bw_ctx.dml.vba.StutterPeriod > 5000.0) ?
-                       DCN_Z9_SUPPORT_ALLOW : DCN_Z9_SUPPORT_DISALLOW;
-
-       plane_count = 0;
-       for (i = 0; i < dc->res_pool->pipe_count; i++) {
-               if (context->res_ctx.pipe_ctx[i].plane_state)
-                       plane_count++;
-       }
-
-       if (plane_count == 0)
-               context->bw_ctx.bw.dcn.clk.z9_support = DCN_Z9_SUPPORT_ALLOW;
+       context->bw_ctx.bw.dcn.clk.zstate_support = decide_zstate_support(dc, context);
 
        context->bw_ctx.bw.dcn.clk.dtbclk_en = is_dtbclk_required(dc, context);
 
index 2140b75540cfe187202833e55f591034ea38545d..23a52d47e61c4bdad7b89dd5401d161eb78a8b45 100644 (file)
@@ -383,13 +383,6 @@ bool dpp3_get_optimal_number_of_taps(
        int min_taps_y, min_taps_c;
        enum lb_memory_config lb_config;
 
-       /* Some ASICs does not support  FP16 scaling, so we reject modes require this*/
-       if (scl_data->viewport.width  != scl_data->h_active &&
-               scl_data->viewport.height != scl_data->v_active &&
-               dpp->caps->dscl_data_proc_format == DSCL_DATA_PRCESSING_FIXED_FORMAT &&
-               scl_data->format == PIXEL_FORMAT_FP16)
-               return false;
-
        if (scl_data->viewport.width > scl_data->h_active &&
                dpp->ctx->dc->debug.max_downscale_src_width != 0 &&
                scl_data->viewport.width > dpp->ctx->dc->debug.max_downscale_src_width)
@@ -1440,15 +1433,6 @@ bool dpp3_construct(
        dpp->tf_shift = tf_shift;
        dpp->tf_mask = tf_mask;
 
-       dpp->lb_pixel_depth_supported =
-               LB_PIXEL_DEPTH_18BPP |
-               LB_PIXEL_DEPTH_24BPP |
-               LB_PIXEL_DEPTH_30BPP |
-               LB_PIXEL_DEPTH_36BPP;
-
-       dpp->lb_bits_per_entry = LB_BITS_PER_ENTRY;
-       dpp->lb_memory_size = LB_TOTAL_NUMBER_OF_ENTRIES; /*0x1404*/
-
        return true;
 }
 
index 3fa86cd090a087efd4d5421947c465794ccfd66e..ac644ae6b9f26adba0c7103bc443148757498be7 100644 (file)
        SRI(COLOR_KEYER_BLUE, CNVC_CFG, id), \
        SRI(CURSOR_CONTROL, CURSOR0_, id),\
        SRI(OBUF_MEM_PWR_CTRL, DSCL, id),\
+       SRI(DSCL_MEM_PWR_STATUS, DSCL, id), \
        SRI(DSCL_MEM_PWR_CTRL, DSCL, id)
 
 #define DPP_REG_LIST_DCN30(id)\
        SRI(CM_SHAPER_LUT_DATA, CM, id),\
        SRI(CM_MEM_PWR_CTRL2, CM, id), \
        SRI(CM_MEM_PWR_STATUS2, CM, id), \
-       SRI(DSCL_MEM_PWR_STATUS, DSCL, id), \
-       SRI(DSCL_MEM_PWR_CTRL, DSCL, id), \
        SRI(CM_BLNDGAM_RAMA_START_SLOPE_CNTL_B, CM, id),\
        SRI(CM_BLNDGAM_RAMA_START_SLOPE_CNTL_G, CM, id),\
        SRI(CM_BLNDGAM_RAMA_START_SLOPE_CNTL_R, CM, id),\
index 16a75ba0ca824dcac51d264e3945f11e41b88904..7d3ff5d4440235e2f32e219eac69eea17c09e27c 100644 (file)
@@ -1398,11 +1398,18 @@ void dcn302_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_param
                        dcn3_02_soc.clock_limits[i].dispclk_mhz = max_dispclk_mhz;
                        dcn3_02_soc.clock_limits[i].dppclk_mhz  = max_dppclk_mhz;
                        dcn3_02_soc.clock_limits[i].phyclk_mhz  = max_phyclk_mhz;
-                       dcn3_02_soc.clock_limits[i].dtbclk_mhz = dcn3_02_soc.clock_limits[0].dtbclk_mhz;
+                       /* Populate from bw_params for DTBCLK, SOCCLK */
+                       if (!bw_params->clk_table.entries[i].dtbclk_mhz && i > 0)
+                               dcn3_02_soc.clock_limits[i].dtbclk_mhz  = dcn3_02_soc.clock_limits[i-1].dtbclk_mhz;
+                       else
+                               dcn3_02_soc.clock_limits[i].dtbclk_mhz  = bw_params->clk_table.entries[i].dtbclk_mhz;
+                       if (!bw_params->clk_table.entries[i].socclk_mhz && i > 0)
+                               dcn3_02_soc.clock_limits[i].socclk_mhz = dcn3_02_soc.clock_limits[i-1].socclk_mhz;
+                       else
+                               dcn3_02_soc.clock_limits[i].socclk_mhz = bw_params->clk_table.entries[i].socclk_mhz;
                        /* These clocks cannot come from bw_params, always fill from dcn3_02_soc[1] */
-                       /* FCLK, PHYCLK_D18, SOCCLK, DSCCLK */
+                       /* FCLK, PHYCLK_D18, DSCCLK */
                        dcn3_02_soc.clock_limits[i].phyclk_d18_mhz = dcn3_02_soc.clock_limits[0].phyclk_d18_mhz;
-                       dcn3_02_soc.clock_limits[i].socclk_mhz = dcn3_02_soc.clock_limits[0].socclk_mhz;
                        dcn3_02_soc.clock_limits[i].dscclk_mhz = dcn3_02_soc.clock_limits[0].dscclk_mhz;
                }
                /* re-init DML with updated bb */
index 34b89464ae0224da0f83abaf6f7834e002b88039..833ab13fa83401e9fc5933cdcc775185a2e166ba 100644 (file)
@@ -1326,11 +1326,18 @@ void dcn303_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_param
                        dcn3_03_soc.clock_limits[i].dispclk_mhz = max_dispclk_mhz;
                        dcn3_03_soc.clock_limits[i].dppclk_mhz  = max_dppclk_mhz;
                        dcn3_03_soc.clock_limits[i].phyclk_mhz  = max_phyclk_mhz;
-                       dcn3_03_soc.clock_limits[i].dtbclk_mhz = dcn3_03_soc.clock_limits[0].dtbclk_mhz;
+                       /* Populate from bw_params for DTBCLK, SOCCLK */
+                       if (!bw_params->clk_table.entries[i].dtbclk_mhz && i > 0)
+                               dcn3_03_soc.clock_limits[i].dtbclk_mhz = dcn3_03_soc.clock_limits[i-1].dtbclk_mhz;
+                       else
+                               dcn3_03_soc.clock_limits[i].dtbclk_mhz = bw_params->clk_table.entries[i].dtbclk_mhz;
+                       if (!bw_params->clk_table.entries[i].socclk_mhz && i > 0)
+                               dcn3_03_soc.clock_limits[i].socclk_mhz = dcn3_03_soc.clock_limits[i-1].socclk_mhz;
+                       else
+                               dcn3_03_soc.clock_limits[i].socclk_mhz = bw_params->clk_table.entries[i].socclk_mhz;
                        /* These clocks cannot come from bw_params, always fill from dcn3_03_soc[1] */
-                       /* FCLK, PHYCLK_D18, SOCCLK, DSCCLK */
+                       /* FCLK, PHYCLK_D18, DSCCLK */
                        dcn3_03_soc.clock_limits[i].phyclk_d18_mhz = dcn3_03_soc.clock_limits[0].phyclk_d18_mhz;
-                       dcn3_03_soc.clock_limits[i].socclk_mhz = dcn3_03_soc.clock_limits[0].socclk_mhz;
                        dcn3_03_soc.clock_limits[i].dscclk_mhz = dcn3_03_soc.clock_limits[0].dscclk_mhz;
                }
                /* re-init DML with updated bb */
index 836864a5a5dc0bafad2ebec8004cba84d61ad44b..6ac6faf0c533babff7fc13421f059815affe2d75 100644 (file)
@@ -47,6 +47,7 @@
 #include "dce/dmub_outbox.h"
 #include "dc_link_dp.h"
 #include "inc/link_dpcd.h"
+#include "dcn10/dcn10_hw_sequencer.h"
 
 #define DC_LOGGER_INIT(logger)
 
@@ -594,3 +595,20 @@ bool dcn31_is_abm_supported(struct dc *dc,
        }
        return false;
 }
+
+static void apply_riommu_invalidation_wa(struct dc *dc)
+{
+       struct dce_hwseq *hws = dc->hwseq;
+
+       if (!hws->wa.early_riommu_invalidation)
+               return;
+
+       REG_UPDATE(DCHUBBUB_ARB_HOSTVM_CNTL, DISABLE_HOSTVM_FORCE_ALLOW_PSTATE, 0);
+}
+
+void dcn31_init_pipes(struct dc *dc, struct dc_state *context)
+{
+       dcn10_init_pipes(dc, context);
+       apply_riommu_invalidation_wa(dc);
+
+}
index ff72f0fdd5befea9aaec36c2a709c1142c7c84b6..40dfebe78fdd1284c33784aecd666aaba94360e9 100644 (file)
@@ -52,5 +52,6 @@ void dcn31_reset_hw_ctx_wrap(
                struct dc_state *context);
 bool dcn31_is_abm_supported(struct dc *dc,
                struct dc_state *context, struct dc_stream_state *stream);
+void dcn31_init_pipes(struct dc *dc, struct dc_state *context);
 
 #endif /* __DC_HWSS_DCN31_H__ */
index e3048f8827d2b5c988d7448bc6270419df8a777e..aaf2dbd095fe1db82d00486fbd4c6af1401423b5 100644 (file)
@@ -93,7 +93,6 @@ static const struct hw_sequencer_funcs dcn31_funcs = {
        .set_flip_control_gsl = dcn20_set_flip_control_gsl,
        .get_vupdate_offset_from_vsync = dcn10_get_vupdate_offset_from_vsync,
        .calc_vupdate_position = dcn10_calc_vupdate_position,
-       .apply_idle_power_optimizations = dcn30_apply_idle_power_optimizations,
        .set_backlight_level = dcn21_set_backlight_level,
        .set_abm_immediate_disable = dcn21_set_abm_immediate_disable,
        .set_pipe = dcn21_set_pipe,
@@ -104,7 +103,7 @@ static const struct hw_sequencer_funcs dcn31_funcs = {
 };
 
 static const struct hwseq_private_funcs dcn31_private_funcs = {
-       .init_pipes = dcn10_init_pipes,
+       .init_pipes = dcn31_init_pipes,
        .update_plane_addr = dcn20_update_plane_addr,
        .plane_atomic_disconnect = dcn10_plane_atomic_disconnect,
        .update_mpcc = dcn20_update_mpcc,
index c67bc9544f5d9d69ca782adde42c8b6dde79dbdd..38c010afade150b0648e8b7960ff5778501296df 100644 (file)
@@ -220,6 +220,7 @@ struct _vcs_dpi_soc_bounding_box_st dcn3_1_soc = {
        .sr_exit_z8_time_us = 402.0,
        .sr_enter_plus_exit_z8_time_us = 520.0,
        .writeback_latency_us = 12.0,
+       .dram_channel_width_bytes = 4,
        .round_trip_ping_latency_dcfclk_cycles = 106,
        .urgent_latency_pixel_data_only_us = 4.0,
        .urgent_latency_pixel_mixed_with_vm_data_us = 4.0,
@@ -741,6 +742,7 @@ static const struct dccg_mask dccg_mask = {
 
 #define HWSEQ_DCN31_REG_LIST()\
        SR(DCHUBBUB_GLOBAL_TIMER_CNTL), \
+       SR(DCHUBBUB_ARB_HOSTVM_CNTL), \
        SR(DIO_MEM_PWR_CTRL), \
        SR(ODM_MEM_PWR_CTRL3), \
        SR(DMU_MEM_PWR_CNTL), \
@@ -801,6 +803,7 @@ static const struct dce_hwseq_registers hwseq_reg = {
 #define HWSEQ_DCN31_MASK_SH_LIST(mask_sh)\
        HWSEQ_DCN_MASK_SH_LIST(mask_sh), \
        HWS_SF(, DCHUBBUB_GLOBAL_TIMER_CNTL, DCHUBBUB_GLOBAL_TIMER_REFDIV, mask_sh), \
+       HWS_SF(, DCHUBBUB_ARB_HOSTVM_CNTL, DISABLE_HOSTVM_FORCE_ALLOW_PSTATE, mask_sh), \
        HWS_SF(, DOMAIN0_PG_CONFIG, DOMAIN_POWER_FORCEON, mask_sh), \
        HWS_SF(, DOMAIN0_PG_CONFIG, DOMAIN_POWER_GATE, mask_sh), \
        HWS_SF(, DOMAIN1_PG_CONFIG, DOMAIN_POWER_FORCEON, mask_sh), \
@@ -1299,6 +1302,7 @@ static struct dce_hwseq *dcn31_hwseq_create(
                hws->regs = &hwseq_reg;
                hws->shifts = &hwseq_shift;
                hws->masks = &hwseq_mask;
+               hws->wa.early_riommu_invalidation = true;
        }
        return hws;
 }
index c26e742e81377384fe8a1533083871f3e28e59e6..d25a7d38d21feed634a3a0fab81fe40272fe404a 100644 (file)
@@ -4889,7 +4889,7 @@ void dml21_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
                                }
                        } while ((locals->PrefetchSupported[i][j] != true || locals->VRatioInPrefetchSupported[i][j] != true)
                                        && (mode_lib->vba.NextMaxVStartup != mode_lib->vba.MaxMaxVStartup[0][0]
-                                               || mode_lib->vba.NextPrefetchMode < mode_lib->vba.MaxPrefetchMode));
+                                               || mode_lib->vba.NextPrefetchMode <= mode_lib->vba.MaxPrefetchMode));
 
                        if (locals->PrefetchSupported[i][j] == true && locals->VRatioInPrefetchSupported[i][j] == true) {
                                mode_lib->vba.BandwidthAvailableForImmediateFlip = locals->ReturnBWPerState[i][0];
index 2a0db2b03047e3cdc28acc589813fa1e67b7febb..9ac9d5e8df8b9117c5d1b7f122c68fa2d29fb743 100644 (file)
@@ -289,6 +289,9 @@ struct dpp_caps {
        /* DSCL processing pixel data in fixed or float format */
        enum dscl_data_processing_format dscl_data_proc_format;
 
+       /* max LB partitions */
+       unsigned int max_lb_partitions;
+
        /* Calculates the number of partitions in the line buffer.
         * The implementation of this function is overloaded for
         * different versions of DSCL LB.
index f7f7e4fff0c294cee232f1d0d992733a86c2dfe6..082549f75978450e6f35140163e3f5b39d24f908 100644 (file)
@@ -41,6 +41,7 @@ struct dce_hwseq_wa {
        bool DEGVIDCN10_254;
        bool DEGVIDCN21;
        bool disallow_self_refresh_during_multi_plane_transition;
+       bool early_riommu_invalidation;
 };
 
 struct hwseq_wa_state {
index 610266088ff1c9668fa94a15bb955a3c2b7fbb11..35fa0d8e92dd3a67e0ae540386410195b3ede64f 100644 (file)
 #define PPSMC_MSG_SetSystemVirtualSTBtoDramAddrLow  0x41
 
 #define PPSMC_MSG_GfxDriverResetRecovery       0x42
-#define PPSMC_Message_Count                    0x43
+#define PPSMC_MSG_BoardPowerCalibration        0x43
+#define PPSMC_Message_Count                    0x44
 
 //PPSMC Reset Types
 #define PPSMC_RESET_TYPE_WARM_RESET              0x00
index 89a16dcd0fff90abd2c7bffbd25ab357a1797c23..1d3765b873df4d11168c1628d66aff98fe5a1b0a 100644 (file)
        __SMU_DUMMY_MAP(DisableDeterminism),            \
        __SMU_DUMMY_MAP(SetUclkDpmMode),                \
        __SMU_DUMMY_MAP(LightSBR),                      \
-       __SMU_DUMMY_MAP(GfxDriverResetRecovery),
+       __SMU_DUMMY_MAP(GfxDriverResetRecovery),        \
+       __SMU_DUMMY_MAP(BoardPowerCalibration),
 
 #undef __SMU_DUMMY_MAP
 #define __SMU_DUMMY_MAP(type)  SMU_MSG_##type
index 1962a587719113567c66706af79423200f9728a8..f61b5c914a3d92458edf40b8378582289938a5b9 100644 (file)
@@ -34,7 +34,7 @@
 #define SMU11_DRIVER_IF_VERSION_Navy_Flounder 0xE
 #define SMU11_DRIVER_IF_VERSION_VANGOGH 0x03
 #define SMU11_DRIVER_IF_VERSION_Dimgrey_Cavefish 0xF
-#define SMU11_DRIVER_IF_VERSION_Beige_Goby 0x9
+#define SMU11_DRIVER_IF_VERSION_Beige_Goby 0xD
 
 /* MP Apertures */
 #define MP0_Public                     0x03800000
index 9316a726195cc00f949eba5915f5e18339d341f6..cb5485cf243f38295f657ecb13b3e0dcf5d48611 100644 (file)
@@ -134,6 +134,7 @@ static const struct cmn2asic_msg_mapping aldebaran_message_map[SMU_MSG_MAX_COUNT
        MSG_MAP(DisableDeterminism,                  PPSMC_MSG_DisableDeterminism,              0),
        MSG_MAP(SetUclkDpmMode,                      PPSMC_MSG_SetUclkDpmMode,                  0),
        MSG_MAP(GfxDriverResetRecovery,              PPSMC_MSG_GfxDriverResetRecovery,          0),
+       MSG_MAP(BoardPowerCalibration,               PPSMC_MSG_BoardPowerCalibration,           0),
 };
 
 static const struct cmn2asic_mapping aldebaran_clk_map[SMU_CLK_COUNT] = {
@@ -440,6 +441,39 @@ static int aldebaran_setup_pptable(struct smu_context *smu)
        return ret;
 }
 
+static bool aldebaran_is_primary(struct smu_context *smu)
+{
+       struct amdgpu_device *adev = smu->adev;
+
+       if (adev->smuio.funcs && adev->smuio.funcs->get_die_id)
+               return adev->smuio.funcs->get_die_id(adev) == 0;
+
+       return true;
+}
+
+static int aldebaran_run_board_btc(struct smu_context *smu)
+{
+       u32 smu_version;
+       int ret;
+
+       if (!aldebaran_is_primary(smu))
+               return 0;
+
+       ret = smu_cmn_get_smc_version(smu, NULL, &smu_version);
+       if (ret) {
+               dev_err(smu->adev->dev, "Failed to get smu version!\n");
+               return ret;
+       }
+       if (smu_version <= 0x00441d00)
+               return 0;
+
+       ret = smu_cmn_send_smc_msg(smu, SMU_MSG_BoardPowerCalibration, NULL);
+       if (ret)
+               dev_err(smu->adev->dev, "Board power calibration failed!\n");
+
+       return ret;
+}
+
 static int aldebaran_run_btc(struct smu_context *smu)
 {
        int ret;
@@ -447,6 +481,8 @@ static int aldebaran_run_btc(struct smu_context *smu)
        ret = smu_cmn_send_smc_msg(smu, SMU_MSG_RunDcBtc, NULL);
        if (ret)
                dev_err(smu->adev->dev, "RunDcBtc failed!\n");
+       else
+               ret = aldebaran_run_board_btc(smu);
 
        return ret;
 }
@@ -524,16 +560,6 @@ static int aldebaran_freqs_in_same_level(int32_t frequency1,
        return (abs(frequency1 - frequency2) <= EPSILON);
 }
 
-static bool aldebaran_is_primary(struct smu_context *smu)
-{
-       struct amdgpu_device *adev = smu->adev;
-
-       if (adev->smuio.funcs && adev->smuio.funcs->get_die_id)
-               return adev->smuio.funcs->get_die_id(adev) == 0;
-
-       return true;
-}
-
 static int aldebaran_get_smu_metrics_data(struct smu_context *smu,
                                          MetricsMember_t member,
                                          uint32_t *value)
index 98ae006616565a3b71f504709443b4cfe46a8686..f454e04240860368df92275f4918c6fcc5d597d0 100644 (file)
@@ -834,6 +834,9 @@ long drm_ioctl(struct file *filp,
        if (drm_dev_is_unplugged(dev))
                return -ENODEV;
 
+       if (DRM_IOCTL_TYPE(cmd) != DRM_IOCTL_BASE)
+               return -ENOTTY;
+
        is_driver_ioctl = nr >= DRM_COMMAND_BASE && nr < DRM_COMMAND_END;
 
        if (is_driver_ioctl) {
index 8e195fa7626a8173056ec7106d05b56b04aa0b86..989ff064cf3ab2b52403c3c881fe4f93bb5e5276 100644 (file)
 #include "i915_gem_clflush.h"
 #include "i915_gem_context.h"
 #include "i915_gem_ioctls.h"
-#include "i915_sw_fence_work.h"
 #include "i915_trace.h"
 #include "i915_user_extensions.h"
-#include "i915_memcpy.h"
 
 struct eb_vma {
        struct i915_vma *vma;
@@ -1456,6 +1454,10 @@ static u32 *reloc_gpu(struct i915_execbuffer *eb,
                int err;
                struct intel_engine_cs *engine = eb->engine;
 
+               /* If we need to copy for the cmdparser, we will stall anyway */
+               if (eb_use_cmdparser(eb))
+                       return ERR_PTR(-EWOULDBLOCK);
+
                if (!reloc_can_use_engine(engine)) {
                        engine = engine->gt->engine_class[COPY_ENGINE_CLASS][0];
                        if (!engine)
@@ -2372,217 +2374,6 @@ shadow_batch_pin(struct i915_execbuffer *eb,
        return vma;
 }
 
-struct eb_parse_work {
-       struct dma_fence_work base;
-       struct intel_engine_cs *engine;
-       struct i915_vma *batch;
-       struct i915_vma *shadow;
-       struct i915_vma *trampoline;
-       unsigned long batch_offset;
-       unsigned long batch_length;
-       unsigned long *jump_whitelist;
-       const void *batch_map;
-       void *shadow_map;
-};
-
-static int __eb_parse(struct dma_fence_work *work)
-{
-       struct eb_parse_work *pw = container_of(work, typeof(*pw), base);
-       int ret;
-       bool cookie;
-
-       cookie = dma_fence_begin_signalling();
-       ret = intel_engine_cmd_parser(pw->engine,
-                                     pw->batch,
-                                     pw->batch_offset,
-                                     pw->batch_length,
-                                     pw->shadow,
-                                     pw->jump_whitelist,
-                                     pw->shadow_map,
-                                     pw->batch_map);
-       dma_fence_end_signalling(cookie);
-
-       return ret;
-}
-
-static void __eb_parse_release(struct dma_fence_work *work)
-{
-       struct eb_parse_work *pw = container_of(work, typeof(*pw), base);
-
-       if (!IS_ERR_OR_NULL(pw->jump_whitelist))
-               kfree(pw->jump_whitelist);
-
-       if (pw->batch_map)
-               i915_gem_object_unpin_map(pw->batch->obj);
-       else
-               i915_gem_object_unpin_pages(pw->batch->obj);
-
-       i915_gem_object_unpin_map(pw->shadow->obj);
-
-       if (pw->trampoline)
-               i915_active_release(&pw->trampoline->active);
-       i915_active_release(&pw->shadow->active);
-       i915_active_release(&pw->batch->active);
-}
-
-static const struct dma_fence_work_ops eb_parse_ops = {
-       .name = "eb_parse",
-       .work = __eb_parse,
-       .release = __eb_parse_release,
-};
-
-static inline int
-__parser_mark_active(struct i915_vma *vma,
-                    struct intel_timeline *tl,
-                    struct dma_fence *fence)
-{
-       struct intel_gt_buffer_pool_node *node = vma->private;
-
-       return i915_active_ref(&node->active, tl->fence_context, fence);
-}
-
-static int
-parser_mark_active(struct eb_parse_work *pw, struct intel_timeline *tl)
-{
-       int err;
-
-       mutex_lock(&tl->mutex);
-
-       err = __parser_mark_active(pw->shadow, tl, &pw->base.dma);
-       if (err)
-               goto unlock;
-
-       if (pw->trampoline) {
-               err = __parser_mark_active(pw->trampoline, tl, &pw->base.dma);
-               if (err)
-                       goto unlock;
-       }
-
-unlock:
-       mutex_unlock(&tl->mutex);
-       return err;
-}
-
-static int eb_parse_pipeline(struct i915_execbuffer *eb,
-                            struct i915_vma *shadow,
-                            struct i915_vma *trampoline)
-{
-       struct eb_parse_work *pw;
-       struct drm_i915_gem_object *batch = eb->batch->vma->obj;
-       bool needs_clflush;
-       int err;
-
-       GEM_BUG_ON(overflows_type(eb->batch_start_offset, pw->batch_offset));
-       GEM_BUG_ON(overflows_type(eb->batch_len, pw->batch_length));
-
-       pw = kzalloc(sizeof(*pw), GFP_KERNEL);
-       if (!pw)
-               return -ENOMEM;
-
-       err = i915_active_acquire(&eb->batch->vma->active);
-       if (err)
-               goto err_free;
-
-       err = i915_active_acquire(&shadow->active);
-       if (err)
-               goto err_batch;
-
-       if (trampoline) {
-               err = i915_active_acquire(&trampoline->active);
-               if (err)
-                       goto err_shadow;
-       }
-
-       pw->shadow_map = i915_gem_object_pin_map(shadow->obj, I915_MAP_WB);
-       if (IS_ERR(pw->shadow_map)) {
-               err = PTR_ERR(pw->shadow_map);
-               goto err_trampoline;
-       }
-
-       needs_clflush =
-               !(batch->cache_coherent & I915_BO_CACHE_COHERENT_FOR_READ);
-
-       pw->batch_map = ERR_PTR(-ENODEV);
-       if (needs_clflush && i915_has_memcpy_from_wc())
-               pw->batch_map = i915_gem_object_pin_map(batch, I915_MAP_WC);
-
-       if (IS_ERR(pw->batch_map)) {
-               err = i915_gem_object_pin_pages(batch);
-               if (err)
-                       goto err_unmap_shadow;
-               pw->batch_map = NULL;
-       }
-
-       pw->jump_whitelist =
-               intel_engine_cmd_parser_alloc_jump_whitelist(eb->batch_len,
-                                                            trampoline);
-       if (IS_ERR(pw->jump_whitelist)) {
-               err = PTR_ERR(pw->jump_whitelist);
-               goto err_unmap_batch;
-       }
-
-       dma_fence_work_init(&pw->base, &eb_parse_ops);
-
-       pw->engine = eb->engine;
-       pw->batch = eb->batch->vma;
-       pw->batch_offset = eb->batch_start_offset;
-       pw->batch_length = eb->batch_len;
-       pw->shadow = shadow;
-       pw->trampoline = trampoline;
-
-       /* Mark active refs early for this worker, in case we get interrupted */
-       err = parser_mark_active(pw, eb->context->timeline);
-       if (err)
-               goto err_commit;
-
-       err = dma_resv_reserve_shared(pw->batch->resv, 1);
-       if (err)
-               goto err_commit;
-
-       err = dma_resv_reserve_shared(shadow->resv, 1);
-       if (err)
-               goto err_commit;
-
-       /* Wait for all writes (and relocs) into the batch to complete */
-       err = i915_sw_fence_await_reservation(&pw->base.chain,
-                                             pw->batch->resv, NULL, false,
-                                             0, I915_FENCE_GFP);
-       if (err < 0)
-               goto err_commit;
-
-       /* Keep the batch alive and unwritten as we parse */
-       dma_resv_add_shared_fence(pw->batch->resv, &pw->base.dma);
-
-       /* Force execution to wait for completion of the parser */
-       dma_resv_add_excl_fence(shadow->resv, &pw->base.dma);
-
-       dma_fence_work_commit_imm(&pw->base);
-       return 0;
-
-err_commit:
-       i915_sw_fence_set_error_once(&pw->base.chain, err);
-       dma_fence_work_commit_imm(&pw->base);
-       return err;
-
-err_unmap_batch:
-       if (pw->batch_map)
-               i915_gem_object_unpin_map(batch);
-       else
-               i915_gem_object_unpin_pages(batch);
-err_unmap_shadow:
-       i915_gem_object_unpin_map(shadow->obj);
-err_trampoline:
-       if (trampoline)
-               i915_active_release(&trampoline->active);
-err_shadow:
-       i915_active_release(&shadow->active);
-err_batch:
-       i915_active_release(&eb->batch->vma->active);
-err_free:
-       kfree(pw);
-       return err;
-}
-
 static struct i915_vma *eb_dispatch_secure(struct i915_execbuffer *eb, struct i915_vma *vma)
 {
        /*
@@ -2672,7 +2463,15 @@ static int eb_parse(struct i915_execbuffer *eb)
                goto err_trampoline;
        }
 
-       err = eb_parse_pipeline(eb, shadow, trampoline);
+       err = dma_resv_reserve_shared(shadow->resv, 1);
+       if (err)
+               goto err_trampoline;
+
+       err = intel_engine_cmd_parser(eb->engine,
+                                     eb->batch->vma,
+                                     eb->batch_start_offset,
+                                     eb->batch_len,
+                                     shadow, trampoline);
        if (err)
                goto err_unpin_batch;
 
index 4df505e4c53ae9539e109ec8da49e5274a82a082..16162fc2782dc2670838fd8b33917731fa7fa8b7 100644 (file)
@@ -125,6 +125,10 @@ static int igt_gpu_reloc(void *arg)
        intel_gt_pm_get(&eb.i915->gt);
 
        for_each_uabi_engine(eb.engine, eb.i915) {
+               if (intel_engine_requires_cmd_parser(eb.engine) ||
+                   intel_engine_using_cmd_parser(eb.engine))
+                       continue;
+
                reloc_cache_init(&eb.reloc_cache, eb.i915);
                memset(map, POISON_INUSE, 4096);
 
index 98eb48c24c46cea12c92c4406c3533b1895aee18..06024d321a1a5c3fe660754071ffd543865bf8dc 100644 (file)
@@ -1977,6 +1977,21 @@ static int elsp_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
        if (drm_WARN_ON(&i915->drm, !engine))
                return -EINVAL;
 
+       /*
+        * Due to d3_entered is used to indicate skipping PPGTT invalidation on
+        * vGPU reset, it's set on D0->D3 on PCI config write, and cleared after
+        * vGPU reset if in resuming.
+        * In S0ix exit, the device power state also transite from D3 to D0 as
+        * S3 resume, but no vGPU reset (triggered by QEMU devic model). After
+        * S0ix exit, all engines continue to work. However the d3_entered
+        * remains set which will break next vGPU reset logic (miss the expected
+        * PPGTT invalidation).
+        * Engines can only work in D0. Thus the 1st elsp write gives GVT a
+        * chance to clear d3_entered.
+        */
+       if (vgpu->d3_entered)
+               vgpu->d3_entered = false;
+
        execlist = &vgpu->submission.execlist[engine->id];
 
        execlist->elsp_dwords.data[3 - execlist->elsp_dwords.index] = data;
index 3992c25a191daed2e2447f33029a7fea854413a8..a3b4d99d64b91b1d7c4b979b632e3589fbc51793 100644 (file)
@@ -1145,19 +1145,41 @@ find_reg(const struct intel_engine_cs *engine, u32 addr)
 static u32 *copy_batch(struct drm_i915_gem_object *dst_obj,
                       struct drm_i915_gem_object *src_obj,
                       unsigned long offset, unsigned long length,
-                      void *dst, const void *src)
+                      bool *needs_clflush_after)
 {
-       bool needs_clflush =
-               !(src_obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_READ);
-
-       if (src) {
-               GEM_BUG_ON(!needs_clflush);
-               i915_unaligned_memcpy_from_wc(dst, src + offset, length);
-       } else {
-               struct scatterlist *sg;
+       unsigned int src_needs_clflush;
+       unsigned int dst_needs_clflush;
+       void *dst, *src;
+       int ret;
+
+       ret = i915_gem_object_prepare_write(dst_obj, &dst_needs_clflush);
+       if (ret)
+               return ERR_PTR(ret);
+
+       dst = i915_gem_object_pin_map(dst_obj, I915_MAP_WB);
+       i915_gem_object_finish_access(dst_obj);
+       if (IS_ERR(dst))
+               return dst;
+
+       ret = i915_gem_object_prepare_read(src_obj, &src_needs_clflush);
+       if (ret) {
+               i915_gem_object_unpin_map(dst_obj);
+               return ERR_PTR(ret);
+       }
+
+       src = ERR_PTR(-ENODEV);
+       if (src_needs_clflush && i915_has_memcpy_from_wc()) {
+               src = i915_gem_object_pin_map(src_obj, I915_MAP_WC);
+               if (!IS_ERR(src)) {
+                       i915_unaligned_memcpy_from_wc(dst,
+                                                     src + offset,
+                                                     length);
+                       i915_gem_object_unpin_map(src_obj);
+               }
+       }
+       if (IS_ERR(src)) {
+               unsigned long x, n, remain;
                void *ptr;
-               unsigned int x, sg_ofs;
-               unsigned long remain;
 
                /*
                 * We can avoid clflushing partial cachelines before the write
@@ -1168,40 +1190,34 @@ static u32 *copy_batch(struct drm_i915_gem_object *dst_obj,
                 * validate up to the end of the batch.
                 */
                remain = length;
-               if (!(dst_obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_READ))
+               if (dst_needs_clflush & CLFLUSH_BEFORE)
                        remain = round_up(remain,
                                          boot_cpu_data.x86_clflush_size);
 
                ptr = dst;
                x = offset_in_page(offset);
-               sg = i915_gem_object_get_sg(src_obj, offset >> PAGE_SHIFT, &sg_ofs, false);
-
-               while (remain) {
-                       unsigned long sg_max = sg->length >> PAGE_SHIFT;
-
-                       for (; remain && sg_ofs < sg_max; sg_ofs++) {
-                               unsigned long len = min(remain, PAGE_SIZE - x);
-                               void *map;
-
-                               map = kmap_atomic(nth_page(sg_page(sg), sg_ofs));
-                               if (needs_clflush)
-                                       drm_clflush_virt_range(map + x, len);
-                               memcpy(ptr, map + x, len);
-                               kunmap_atomic(map);
-
-                               ptr += len;
-                               remain -= len;
-                               x = 0;
-                       }
-
-                       sg_ofs = 0;
-                       sg = sg_next(sg);
+               for (n = offset >> PAGE_SHIFT; remain; n++) {
+                       int len = min(remain, PAGE_SIZE - x);
+
+                       src = kmap_atomic(i915_gem_object_get_page(src_obj, n));
+                       if (src_needs_clflush)
+                               drm_clflush_virt_range(src + x, len);
+                       memcpy(ptr, src + x, len);
+                       kunmap_atomic(src);
+
+                       ptr += len;
+                       remain -= len;
+                       x = 0;
                }
        }
 
+       i915_gem_object_finish_access(src_obj);
+
        memset32(dst + length, 0, (dst_obj->base.size - length) / sizeof(u32));
 
        /* dst_obj is returned with vmap pinned */
+       *needs_clflush_after = dst_needs_clflush & CLFLUSH_AFTER;
+
        return dst;
 }
 
@@ -1360,6 +1376,9 @@ static int check_bbstart(u32 *cmd, u32 offset, u32 length,
        if (target_cmd_index == offset)
                return 0;
 
+       if (IS_ERR(jump_whitelist))
+               return PTR_ERR(jump_whitelist);
+
        if (!test_bit(target_cmd_index, jump_whitelist)) {
                DRM_DEBUG("CMD: BB_START to 0x%llx not a previously executed cmd\n",
                          jump_target);
@@ -1369,28 +1388,10 @@ static int check_bbstart(u32 *cmd, u32 offset, u32 length,
        return 0;
 }
 
-/**
- * intel_engine_cmd_parser_alloc_jump_whitelist() - preallocate jump whitelist for intel_engine_cmd_parser()
- * @batch_length: length of the commands in batch_obj
- * @trampoline: Whether jump trampolines are used.
- *
- * Preallocates a jump whitelist for parsing the cmd buffer in intel_engine_cmd_parser().
- * This has to be preallocated, because the command parser runs in signaling context,
- * and may not allocate any memory.
- *
- * Return: NULL or pointer to a jump whitelist, or ERR_PTR() on failure. Use
- * IS_ERR() to check for errors. Must bre freed() with kfree().
- *
- * NULL is a valid value, meaning no allocation was required.
- */
-unsigned long *intel_engine_cmd_parser_alloc_jump_whitelist(u32 batch_length,
-                                                           bool trampoline)
+static unsigned long *alloc_whitelist(u32 batch_length)
 {
        unsigned long *jmp;
 
-       if (trampoline)
-               return NULL;
-
        /*
         * We expect batch_length to be less than 256KiB for known users,
         * i.e. we need at most an 8KiB bitmap allocation which should be
@@ -1415,9 +1416,7 @@ unsigned long *intel_engine_cmd_parser_alloc_jump_whitelist(u32 batch_length,
  * @batch_offset: byte offset in the batch at which execution starts
  * @batch_length: length of the commands in batch_obj
  * @shadow: validated copy of the batch buffer in question
- * @jump_whitelist: buffer preallocated with intel_engine_cmd_parser_alloc_jump_whitelist()
- * @shadow_map: mapping to @shadow vma
- * @batch_map: mapping to @batch vma
+ * @trampoline: true if we need to trampoline into privileged execution
  *
  * Parses the specified batch buffer looking for privilege violations as
  * described in the overview.
@@ -1425,21 +1424,21 @@ unsigned long *intel_engine_cmd_parser_alloc_jump_whitelist(u32 batch_length,
  * Return: non-zero if the parser finds violations or otherwise fails; -EACCES
  * if the batch appears legal but should use hardware parsing
  */
+
 int intel_engine_cmd_parser(struct intel_engine_cs *engine,
                            struct i915_vma *batch,
                            unsigned long batch_offset,
                            unsigned long batch_length,
                            struct i915_vma *shadow,
-                           unsigned long *jump_whitelist,
-                           void *shadow_map,
-                           const void *batch_map)
+                           bool trampoline)
 {
        u32 *cmd, *batch_end, offset = 0;
        struct drm_i915_cmd_descriptor default_desc = noop_desc;
        const struct drm_i915_cmd_descriptor *desc = &default_desc;
+       bool needs_clflush_after = false;
+       unsigned long *jump_whitelist;
        u64 batch_addr, shadow_addr;
        int ret = 0;
-       bool trampoline = !jump_whitelist;
 
        GEM_BUG_ON(!IS_ALIGNED(batch_offset, sizeof(*cmd)));
        GEM_BUG_ON(!IS_ALIGNED(batch_length, sizeof(*cmd)));
@@ -1447,8 +1446,18 @@ int intel_engine_cmd_parser(struct intel_engine_cs *engine,
                                     batch->size));
        GEM_BUG_ON(!batch_length);
 
-       cmd = copy_batch(shadow->obj, batch->obj, batch_offset, batch_length,
-                        shadow_map, batch_map);
+       cmd = copy_batch(shadow->obj, batch->obj,
+                        batch_offset, batch_length,
+                        &needs_clflush_after);
+       if (IS_ERR(cmd)) {
+               DRM_DEBUG("CMD: Failed to copy batch\n");
+               return PTR_ERR(cmd);
+       }
+
+       jump_whitelist = NULL;
+       if (!trampoline)
+               /* Defer failure until attempted use */
+               jump_whitelist = alloc_whitelist(batch_length);
 
        shadow_addr = gen8_canonical_addr(shadow->node.start);
        batch_addr = gen8_canonical_addr(batch->node.start + batch_offset);
@@ -1549,6 +1558,9 @@ int intel_engine_cmd_parser(struct intel_engine_cs *engine,
 
        i915_gem_object_flush_map(shadow->obj);
 
+       if (!IS_ERR_OR_NULL(jump_whitelist))
+               kfree(jump_whitelist);
+       i915_gem_object_unpin_map(shadow->obj);
        return ret;
 }
 
index fd9db6de51130fd3802bf674d417fcca364c6cf7..997fbe9532c190a57f72a3568382b3cb1dabb421 100644 (file)
@@ -1897,17 +1897,12 @@ const char *i915_cache_level_str(struct drm_i915_private *i915, int type);
 int i915_cmd_parser_get_version(struct drm_i915_private *dev_priv);
 int intel_engine_init_cmd_parser(struct intel_engine_cs *engine);
 void intel_engine_cleanup_cmd_parser(struct intel_engine_cs *engine);
-unsigned long *intel_engine_cmd_parser_alloc_jump_whitelist(u32 batch_length,
-                                                           bool trampoline);
-
 int intel_engine_cmd_parser(struct intel_engine_cs *engine,
                            struct i915_vma *batch,
                            unsigned long batch_offset,
                            unsigned long batch_length,
                            struct i915_vma *shadow,
-                           unsigned long *jump_whitelist,
-                           void *shadow_map,
-                           const void *batch_map);
+                           bool trampoline);
 #define I915_CMD_PARSER_TRAMPOLINE_SIZE 8
 
 /* intel_device_info.c */
index 1014c71cf7f52a5eb3f192d61b60cbb3c4338f90..37aef13085739efc40f70ae2ec61786ca49439a1 100644 (file)
@@ -1426,10 +1426,8 @@ i915_request_await_execution(struct i915_request *rq,
 
        do {
                fence = *child++;
-               if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) {
-                       i915_sw_fence_set_error_once(&rq->submit, fence->error);
+               if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
                        continue;
-               }
 
                if (fence->context == rq->fence.context)
                        continue;
@@ -1527,10 +1525,8 @@ i915_request_await_dma_fence(struct i915_request *rq, struct dma_fence *fence)
 
        do {
                fence = *child++;
-               if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) {
-                       i915_sw_fence_set_error_once(&rq->submit, fence->error);
+               if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
                        continue;
-               }
 
                /*
                 * Requests on the same timeline are explicitly ordered, along
index 2229f1af2ca8cbe57fef6c988e794ddc0fe2f56d..46029c5610c80814682a7c7fc32d1e2bf08ff42b 100644 (file)
@@ -447,7 +447,6 @@ static int rpi_touchscreen_remove(struct i2c_client *i2c)
        drm_panel_remove(&ts->base);
 
        mipi_dsi_device_unregister(ts->dsi);
-       kfree(ts->dsi);
 
        return 0;
 }
index 20fbfd9222665872f43edc2eb1cecb56da33c1d2..ea4add2b97179c47cf6ef93fd039ce5fb47d5c32 100644 (file)
@@ -102,6 +102,9 @@ void ttm_bo_move_to_lru_tail(struct ttm_buffer_object *bo,
                return;
        }
 
+       if (!mem)
+               return;
+
        man = ttm_manager_type(bdev, mem->mem_type);
        list_move_tail(&bo->lru, &man->lru[bo->priority]);
 
index 2f57f824e6dbdd502079c3f125d87d2f14d83d73..763fa6f4e07de808438493fbf8b74815329c1d19 100644 (file)
@@ -63,6 +63,9 @@ int ttm_mem_io_reserve(struct ttm_device *bdev,
 void ttm_mem_io_free(struct ttm_device *bdev,
                     struct ttm_resource *mem)
 {
+       if (!mem)
+               return;
+
        if (!mem->bus.offset && !mem->bus.addr)
                return;
 
index 5f31acec3ad76b9914b5a2f4eac95c727a1de155..519deea8e39b76fb28013be05648649b8ac5abf9 100644 (file)
@@ -100,6 +100,8 @@ static int ttm_global_init(void)
        debugfs_create_atomic_t("buffer_objects", 0444, ttm_debugfs_root,
                                &glob->bo_count);
 out:
+       if (ret)
+               --ttm_glob_use_count;
        mutex_unlock(&ttm_global_mutex);
        return ret;
 }
index d4833c878ca93c61860dc86193e0e1f2099f2485..b20530cf6a238880c7cf13b5723ff95e09b758c6 100644 (file)
@@ -1878,38 +1878,46 @@ static int vc4_hdmi_cec_init(struct vc4_hdmi *vc4_hdmi)
        vc4_hdmi_cec_update_clk_div(vc4_hdmi);
 
        if (vc4_hdmi->variant->external_irq_controller) {
-               ret = devm_request_threaded_irq(&pdev->dev,
-                                               platform_get_irq_byname(pdev, "cec-rx"),
-                                               vc4_cec_irq_handler_rx_bare,
-                                               vc4_cec_irq_handler_rx_thread, 0,
-                                               "vc4 hdmi cec rx", vc4_hdmi);
+               ret = request_threaded_irq(platform_get_irq_byname(pdev, "cec-rx"),
+                                          vc4_cec_irq_handler_rx_bare,
+                                          vc4_cec_irq_handler_rx_thread, 0,
+                                          "vc4 hdmi cec rx", vc4_hdmi);
                if (ret)
                        goto err_delete_cec_adap;
 
-               ret = devm_request_threaded_irq(&pdev->dev,
-                                               platform_get_irq_byname(pdev, "cec-tx"),
-                                               vc4_cec_irq_handler_tx_bare,
-                                               vc4_cec_irq_handler_tx_thread, 0,
-                                               "vc4 hdmi cec tx", vc4_hdmi);
+               ret = request_threaded_irq(platform_get_irq_byname(pdev, "cec-tx"),
+                                          vc4_cec_irq_handler_tx_bare,
+                                          vc4_cec_irq_handler_tx_thread, 0,
+                                          "vc4 hdmi cec tx", vc4_hdmi);
                if (ret)
-                       goto err_delete_cec_adap;
+                       goto err_remove_cec_rx_handler;
        } else {
                HDMI_WRITE(HDMI_CEC_CPU_MASK_SET, 0xffffffff);
 
-               ret = devm_request_threaded_irq(&pdev->dev, platform_get_irq(pdev, 0),
-                                               vc4_cec_irq_handler,
-                                               vc4_cec_irq_handler_thread, 0,
-                                               "vc4 hdmi cec", vc4_hdmi);
+               ret = request_threaded_irq(platform_get_irq(pdev, 0),
+                                          vc4_cec_irq_handler,
+                                          vc4_cec_irq_handler_thread, 0,
+                                          "vc4 hdmi cec", vc4_hdmi);
                if (ret)
                        goto err_delete_cec_adap;
        }
 
        ret = cec_register_adapter(vc4_hdmi->cec_adap, &pdev->dev);
        if (ret < 0)
-               goto err_delete_cec_adap;
+               goto err_remove_handlers;
 
        return 0;
 
+err_remove_handlers:
+       if (vc4_hdmi->variant->external_irq_controller)
+               free_irq(platform_get_irq_byname(pdev, "cec-tx"), vc4_hdmi);
+       else
+               free_irq(platform_get_irq(pdev, 0), vc4_hdmi);
+
+err_remove_cec_rx_handler:
+       if (vc4_hdmi->variant->external_irq_controller)
+               free_irq(platform_get_irq_byname(pdev, "cec-rx"), vc4_hdmi);
+
 err_delete_cec_adap:
        cec_delete_adapter(vc4_hdmi->cec_adap);
 
@@ -1918,6 +1926,15 @@ err_delete_cec_adap:
 
 static void vc4_hdmi_cec_exit(struct vc4_hdmi *vc4_hdmi)
 {
+       struct platform_device *pdev = vc4_hdmi->pdev;
+
+       if (vc4_hdmi->variant->external_irq_controller) {
+               free_irq(platform_get_irq_byname(pdev, "cec-rx"), vc4_hdmi);
+               free_irq(platform_get_irq_byname(pdev, "cec-tx"), vc4_hdmi);
+       } else {
+               free_irq(platform_get_irq(pdev, 0), vc4_hdmi);
+       }
+
        cec_unregister_adapter(vc4_hdmi->cec_adap);
 }
 #else
index caf6d0c4bc1b1d72efeb1e0d0b2bd2c81321241d..142308526ec6ae49468fb86f984ec0609d1ac186 100644 (file)
@@ -605,6 +605,17 @@ static void vmbus_process_offer(struct vmbus_channel *newchannel)
         */
        mutex_lock(&vmbus_connection.channel_mutex);
 
+       list_for_each_entry(channel, &vmbus_connection.chn_list, listentry) {
+               if (guid_equal(&channel->offermsg.offer.if_type,
+                              &newchannel->offermsg.offer.if_type) &&
+                   guid_equal(&channel->offermsg.offer.if_instance,
+                              &newchannel->offermsg.offer.if_instance)) {
+                       fnew = false;
+                       newchannel->primary_channel = channel;
+                       break;
+               }
+       }
+
        init_vp_index(newchannel);
 
        /* Remember the channels that should be cleaned up upon suspend. */
@@ -617,16 +628,6 @@ static void vmbus_process_offer(struct vmbus_channel *newchannel)
         */
        atomic_dec(&vmbus_connection.offer_in_progress);
 
-       list_for_each_entry(channel, &vmbus_connection.chn_list, listentry) {
-               if (guid_equal(&channel->offermsg.offer.if_type,
-                              &newchannel->offermsg.offer.if_type) &&
-                   guid_equal(&channel->offermsg.offer.if_instance,
-                              &newchannel->offermsg.offer.if_instance)) {
-                       fnew = false;
-                       break;
-               }
-       }
-
        if (fnew) {
                list_add_tail(&newchannel->listentry,
                              &vmbus_connection.chn_list);
@@ -647,7 +648,6 @@ static void vmbus_process_offer(struct vmbus_channel *newchannel)
                /*
                 * Process the sub-channel.
                 */
-               newchannel->primary_channel = channel;
                list_add_tail(&newchannel->sc_list, &channel->sc_list);
        }
 
@@ -683,6 +683,30 @@ static void vmbus_process_offer(struct vmbus_channel *newchannel)
        queue_work(wq, &newchannel->add_channel_work);
 }
 
+/*
+ * Check if CPUs used by other channels of the same device.
+ * It should only be called by init_vp_index().
+ */
+static bool hv_cpuself_used(u32 cpu, struct vmbus_channel *chn)
+{
+       struct vmbus_channel *primary = chn->primary_channel;
+       struct vmbus_channel *sc;
+
+       lockdep_assert_held(&vmbus_connection.channel_mutex);
+
+       if (!primary)
+               return false;
+
+       if (primary->target_cpu == cpu)
+               return true;
+
+       list_for_each_entry(sc, &primary->sc_list, sc_list)
+               if (sc != chn && sc->target_cpu == cpu)
+                       return true;
+
+       return false;
+}
+
 /*
  * We use this state to statically distribute the channel interrupt load.
  */
@@ -702,6 +726,7 @@ static int next_numa_node_id;
 static void init_vp_index(struct vmbus_channel *channel)
 {
        bool perf_chn = hv_is_perf_channel(channel);
+       u32 i, ncpu = num_online_cpus();
        cpumask_var_t available_mask;
        struct cpumask *alloced_mask;
        u32 target_cpu;
@@ -724,31 +749,38 @@ static void init_vp_index(struct vmbus_channel *channel)
                return;
        }
 
-       while (true) {
-               numa_node = next_numa_node_id++;
-               if (numa_node == nr_node_ids) {
-                       next_numa_node_id = 0;
-                       continue;
+       for (i = 1; i <= ncpu + 1; i++) {
+               while (true) {
+                       numa_node = next_numa_node_id++;
+                       if (numa_node == nr_node_ids) {
+                               next_numa_node_id = 0;
+                               continue;
+                       }
+                       if (cpumask_empty(cpumask_of_node(numa_node)))
+                               continue;
+                       break;
+               }
+               alloced_mask = &hv_context.hv_numa_map[numa_node];
+
+               if (cpumask_weight(alloced_mask) ==
+                   cpumask_weight(cpumask_of_node(numa_node))) {
+                       /*
+                        * We have cycled through all the CPUs in the node;
+                        * reset the alloced map.
+                        */
+                       cpumask_clear(alloced_mask);
                }
-               if (cpumask_empty(cpumask_of_node(numa_node)))
-                       continue;
-               break;
-       }
-       alloced_mask = &hv_context.hv_numa_map[numa_node];
 
-       if (cpumask_weight(alloced_mask) ==
-           cpumask_weight(cpumask_of_node(numa_node))) {
-               /*
-                * We have cycled through all the CPUs in the node;
-                * reset the alloced map.
-                */
-               cpumask_clear(alloced_mask);
-       }
+               cpumask_xor(available_mask, alloced_mask,
+                           cpumask_of_node(numa_node));
 
-       cpumask_xor(available_mask, alloced_mask, cpumask_of_node(numa_node));
+               target_cpu = cpumask_first(available_mask);
+               cpumask_set_cpu(target_cpu, alloced_mask);
 
-       target_cpu = cpumask_first(available_mask);
-       cpumask_set_cpu(target_cpu, alloced_mask);
+               if (channel->offermsg.offer.sub_channel_index >= ncpu ||
+                   i > ncpu || !hv_cpuself_used(target_cpu, channel))
+                       break;
+       }
 
        channel->target_cpu = target_cpu;
 
index 6d5014ebaab5e832f1f6f5817e498c7236184a52..a6ea1eb1394e1cd78b4c4e7711a2b6d15ff913cb 100644 (file)
@@ -635,8 +635,8 @@ static irqreturn_t mpc_i2c_isr(int irq, void *dev_id)
 
        status = readb(i2c->base + MPC_I2C_SR);
        if (status & CSR_MIF) {
-               /* Read again to allow register to stabilise */
-               status = readb(i2c->base + MPC_I2C_SR);
+               /* Wait up to 100us for transfer to properly complete */
+               readb_poll_timeout(i2c->base + MPC_I2C_SR, status, !(status & CSR_MCF), 0, 100);
                writeb(0, i2c->base + MPC_I2C_SR);
                mpc_i2c_do_intr(i2c, status);
                return IRQ_HANDLED;
index 4657e99df0339ed3a94e8541b730b0727f4642af..59a36f92267555175db081310b3a32c0b0052c6b 100644 (file)
@@ -173,10 +173,8 @@ static int cio2_bridge_connect_sensor(const struct cio2_sensor_config *cfg,
        int ret;
 
        for_each_acpi_dev_match(adev, cfg->hid, NULL, -1) {
-               if (!adev->status.enabled) {
-                       acpi_dev_put(adev);
+               if (!adev->status.enabled)
                        continue;
-               }
 
                if (bridge->n_sensors >= CIO2_NUM_PORTS) {
                        acpi_dev_put(adev);
@@ -185,7 +183,6 @@ static int cio2_bridge_connect_sensor(const struct cio2_sensor_config *cfg,
                }
 
                sensor = &bridge->sensors[bridge->n_sensors];
-               sensor->adev = adev;
                strscpy(sensor->name, cfg->hid, sizeof(sensor->name));
 
                ret = cio2_bridge_read_acpi_buffer(adev, "SSDB",
@@ -215,6 +212,7 @@ static int cio2_bridge_connect_sensor(const struct cio2_sensor_config *cfg,
                        goto err_free_swnodes;
                }
 
+               sensor->adev = acpi_dev_get(adev);
                adev->fwnode.secondary = fwnode;
 
                dev_info(&cio2->dev, "Found supported sensor %s\n",
index 07f342db6701f1084cf53d6c6455db0e00787c33..7481f553f95958c4eaea090a374e22021fcceaa1 100644 (file)
@@ -385,7 +385,7 @@ static int ngene_command_config_free_buf(struct ngene *dev, u8 *config)
 
        com.cmd.hdr.Opcode = CMD_CONFIGURE_FREE_BUFFER;
        com.cmd.hdr.Length = 6;
-       memcpy(&com.cmd.ConfigureBuffers.config, config, 6);
+       memcpy(&com.cmd.ConfigureFreeBuffers.config, config, 6);
        com.in_len = 6;
        com.out_len = 0;
 
index 84f04e0e0cb9a17f13edbffb618bd655fc8c5c2a..3d296f1998a1a36455fdddc74d3f945cc1f14056 100644 (file)
@@ -407,12 +407,14 @@ enum _BUFFER_CONFIGS {
 
 struct FW_CONFIGURE_FREE_BUFFERS {
        struct FW_HEADER hdr;
-       u8   UVI1_BufferLength;
-       u8   UVI2_BufferLength;
-       u8   TVO_BufferLength;
-       u8   AUD1_BufferLength;
-       u8   AUD2_BufferLength;
-       u8   TVA_BufferLength;
+       struct {
+               u8   UVI1_BufferLength;
+               u8   UVI2_BufferLength;
+               u8   TVO_BufferLength;
+               u8   AUD1_BufferLength;
+               u8   AUD2_BufferLength;
+               u8   TVA_BufferLength;
+       } __packed config;
 } __attribute__ ((__packed__));
 
 struct FW_CONFIGURE_UART {
index 7a6f01ace78ace991362a99abc45a4b4b741709f..305ffad131a2997bd70e72184a06374b20b8cdcc 100644 (file)
@@ -714,23 +714,20 @@ static int at24_probe(struct i2c_client *client)
        }
 
        /*
-        * If the 'label' property is not present for the AT24 EEPROM,
-        * then nvmem_config.id is initialised to NVMEM_DEVID_AUTO,
-        * and this will append the 'devid' to the name of the NVMEM
-        * device. This is purely legacy and the AT24 driver has always
-        * defaulted to this. However, if the 'label' property is
-        * present then this means that the name is specified by the
-        * firmware and this name should be used verbatim and so it is
-        * not necessary to append the 'devid'.
+        * We initialize nvmem_config.id to NVMEM_DEVID_AUTO even if the
+        * label property is set as some platform can have multiple eeproms
+        * with same label and we can not register each of those with same
+        * label. Failing to register those eeproms trigger cascade failure
+        * on such platform.
         */
+       nvmem_config.id = NVMEM_DEVID_AUTO;
+
        if (device_property_present(dev, "label")) {
-               nvmem_config.id = NVMEM_DEVID_NONE;
                err = device_property_read_string(dev, "label",
                                                  &nvmem_config.name);
                if (err)
                        return err;
        } else {
-               nvmem_config.id = NVMEM_DEVID_AUTO;
                nvmem_config.name = dev_name(dev);
        }
 
index 9890a1532cb0b23344866c2ab60197f0a97dcb25..ce8aed5629295ddbb21de7ecdf749563436b66e3 100644 (file)
@@ -28,6 +28,7 @@
 #include <linux/errno.h>
 #include <linux/hdreg.h>
 #include <linux/kdev_t.h>
+#include <linux/kref.h>
 #include <linux/blkdev.h>
 #include <linux/cdev.h>
 #include <linux/mutex.h>
@@ -111,7 +112,7 @@ struct mmc_blk_data {
 #define MMC_BLK_CMD23  (1 << 0)        /* Can do SET_BLOCK_COUNT for multiblock */
 #define MMC_BLK_REL_WR (1 << 1)        /* MMC Reliable write support */
 
-       unsigned int    usage;
+       struct kref     kref;
        unsigned int    read_only;
        unsigned int    part_type;
        unsigned int    reset_done;
@@ -181,10 +182,8 @@ static struct mmc_blk_data *mmc_blk_get(struct gendisk *disk)
 
        mutex_lock(&open_lock);
        md = disk->private_data;
-       if (md && md->usage == 0)
+       if (md && !kref_get_unless_zero(&md->kref))
                md = NULL;
-       if (md)
-               md->usage++;
        mutex_unlock(&open_lock);
 
        return md;
@@ -196,18 +195,25 @@ static inline int mmc_get_devidx(struct gendisk *disk)
        return devidx;
 }
 
-static void mmc_blk_put(struct mmc_blk_data *md)
+static void mmc_blk_kref_release(struct kref *ref)
 {
-       mutex_lock(&open_lock);
-       md->usage--;
-       if (md->usage == 0) {
-               int devidx = mmc_get_devidx(md->disk);
+       struct mmc_blk_data *md = container_of(ref, struct mmc_blk_data, kref);
+       int devidx;
 
-               ida_simple_remove(&mmc_blk_ida, devidx);
-               put_disk(md->disk);
-               kfree(md);
-       }
+       devidx = mmc_get_devidx(md->disk);
+       ida_simple_remove(&mmc_blk_ida, devidx);
+
+       mutex_lock(&open_lock);
+       md->disk->private_data = NULL;
        mutex_unlock(&open_lock);
+
+       put_disk(md->disk);
+       kfree(md);
+}
+
+static void mmc_blk_put(struct mmc_blk_data *md)
+{
+       kref_put(&md->kref, mmc_blk_kref_release);
 }
 
 static ssize_t power_ro_lock_show(struct device *dev,
@@ -2327,7 +2333,8 @@ static struct mmc_blk_data *mmc_blk_alloc_req(struct mmc_card *card,
 
        INIT_LIST_HEAD(&md->part);
        INIT_LIST_HEAD(&md->rpmbs);
-       md->usage = 1;
+       kref_init(&md->kref);
+
        md->queue.blkdata = md;
 
        md->disk->major = MMC_BLOCK_MAJOR;
index eda4a1892c33c555bb1833e1806235b3578cda61..0475d96047c4087c6d45d009d95ade7f77434e6d 100644 (file)
@@ -75,7 +75,8 @@ static void mmc_host_classdev_release(struct device *dev)
 {
        struct mmc_host *host = cls_dev_to_mmc_host(dev);
        wakeup_source_unregister(host->ws);
-       ida_simple_remove(&mmc_host_ida, host->index);
+       if (of_alias_get_id(host->parent->of_node, "mmc") < 0)
+               ida_simple_remove(&mmc_host_ida, host->index);
        kfree(host);
 }
 
@@ -502,7 +503,7 @@ static int mmc_first_nonreserved_index(void)
  */
 struct mmc_host *mmc_alloc_host(int extra, struct device *dev)
 {
-       int err;
+       int index;
        struct mmc_host *host;
        int alias_id, min_idx, max_idx;
 
@@ -515,20 +516,19 @@ struct mmc_host *mmc_alloc_host(int extra, struct device *dev)
 
        alias_id = of_alias_get_id(dev->of_node, "mmc");
        if (alias_id >= 0) {
-               min_idx = alias_id;
-               max_idx = alias_id + 1;
+               index = alias_id;
        } else {
                min_idx = mmc_first_nonreserved_index();
                max_idx = 0;
-       }
 
-       err = ida_simple_get(&mmc_host_ida, min_idx, max_idx, GFP_KERNEL);
-       if (err < 0) {
-               kfree(host);
-               return NULL;
+               index = ida_simple_get(&mmc_host_ida, min_idx, max_idx, GFP_KERNEL);
+               if (index < 0) {
+                       kfree(host);
+                       return NULL;
+               }
        }
 
-       host->index = err;
+       host->index = index;
 
        dev_set_name(&host->class_dev, "mmc%d", host->index);
        host->ws = wakeup_source_register(NULL, dev_name(&host->class_dev));
index d22d783033112f6d60a176733d0918b37434fd21..31730efa75382334900a0d306096fa3d10eb9ca9 100644 (file)
@@ -3450,7 +3450,9 @@ static int bond_master_netdev_event(unsigned long event,
                return bond_event_changename(event_bond);
        case NETDEV_UNREGISTER:
                bond_remove_proc_entry(event_bond);
+#ifdef CONFIG_XFRM_OFFLOAD
                xfrm_dev_state_flush(dev_net(bond_dev), bond_dev, true);
+#endif /* CONFIG_XFRM_OFFLOAD */
                break;
        case NETDEV_REGISTER:
                bond_create_proc_entry(event_bond);
index 93136f7e69f51fe392df97af796a8b04fd7c2a6c..69f21b71614c5bec59ca6189f620e4ec9ab08007 100644 (file)
@@ -366,6 +366,8 @@ mt7530_fdb_write(struct mt7530_priv *priv, u16 vid,
        int i;
 
        reg[1] |= vid & CVID_MASK;
+       if (vid > 1)
+               reg[1] |= ATA2_IVL;
        reg[2] |= (aging & AGE_TIMER_MASK) << AGE_TIMER;
        reg[2] |= (port_mask & PORT_MAP_MASK) << PORT_MAP;
        /* STATIC_ENT indicate that entry is static wouldn't
index 334d610a503d9c59365dd84713a0f49f320b5b59..b19b389ff10ac6036380de641e1eda410988c2c7 100644 (file)
@@ -79,6 +79,7 @@ enum mt753x_bpdu_port_fw {
 #define  STATIC_EMP                    0
 #define  STATIC_ENT                    3
 #define MT7530_ATA2                    0x78
+#define  ATA2_IVL                      BIT(15)
 
 /* Register for address table write data */
 #define MT7530_ATWD                    0x7c
index 05af632b0f597da97dcd5b9ab493a42d64106657..634a48e6616b953a873a942dacc8b10affd5ebe4 100644 (file)
@@ -12,7 +12,7 @@ config NET_DSA_MV88E6XXX
 config NET_DSA_MV88E6XXX_PTP
        bool "PTP support for Marvell 88E6xxx"
        default n
-       depends on PTP_1588_CLOCK
+       depends on NET_DSA_MV88E6XXX && PTP_1588_CLOCK
        help
          Say Y to enable PTP hardware timestamping on Marvell 88E6xxx switch
          chips that support it.
index ced8c9cb29c29731c9c8517f714269d3a2d52d96..e2dc997580a82c2a1763d1bd524016f0dff038eb 100644 (file)
@@ -397,6 +397,12 @@ static int sja1105_init_static_vlan(struct sja1105_private *priv)
                if (dsa_is_cpu_port(ds, port))
                        v->pvid = true;
                list_add(&v->list, &priv->dsa_8021q_vlans);
+
+               v = kmemdup(v, sizeof(*v), GFP_KERNEL);
+               if (!v)
+                       return -ENOMEM;
+
+               list_add(&v->list, &priv->bridge_vlans);
        }
 
        ((struct sja1105_vlan_lookup_entry *)table->entries)[0] = pvid;
index f56245eeef7b15e532434de0af1e5a0777ff9745..4db162cee911e99ba70f3c60828d70d90f555b55 100644 (file)
@@ -1671,11 +1671,16 @@ static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp,
 
        if ((tpa_info->flags2 & RX_CMP_FLAGS2_META_FORMAT_VLAN) &&
            (skb->dev->features & BNXT_HW_FEATURE_VLAN_ALL_RX)) {
-               u16 vlan_proto = tpa_info->metadata >>
-                       RX_CMP_FLAGS2_METADATA_TPID_SFT;
+               __be16 vlan_proto = htons(tpa_info->metadata >>
+                                         RX_CMP_FLAGS2_METADATA_TPID_SFT);
                u16 vtag = tpa_info->metadata & RX_CMP_FLAGS2_METADATA_TCI_MASK;
 
-               __vlan_hwaccel_put_tag(skb, htons(vlan_proto), vtag);
+               if (eth_type_vlan(vlan_proto)) {
+                       __vlan_hwaccel_put_tag(skb, vlan_proto, vtag);
+               } else {
+                       dev_kfree_skb(skb);
+                       return NULL;
+               }
        }
 
        skb_checksum_none_assert(skb);
@@ -1897,9 +1902,15 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
            (skb->dev->features & BNXT_HW_FEATURE_VLAN_ALL_RX)) {
                u32 meta_data = le32_to_cpu(rxcmp1->rx_cmp_meta_data);
                u16 vtag = meta_data & RX_CMP_FLAGS2_METADATA_TCI_MASK;
-               u16 vlan_proto = meta_data >> RX_CMP_FLAGS2_METADATA_TPID_SFT;
+               __be16 vlan_proto = htons(meta_data >>
+                                         RX_CMP_FLAGS2_METADATA_TPID_SFT);
 
-               __vlan_hwaccel_put_tag(skb, htons(vlan_proto), vtag);
+               if (eth_type_vlan(vlan_proto)) {
+                       __vlan_hwaccel_put_tag(skb, vlan_proto, vtag);
+               } else {
+                       dev_kfree_skb(skb);
+                       goto next_rx;
+               }
        }
 
        skb_checksum_none_assert(skb);
@@ -7563,8 +7574,12 @@ static int __bnxt_hwrm_func_qcaps(struct bnxt *bp)
                bp->flags &= ~BNXT_FLAG_WOL_CAP;
                if (flags & FUNC_QCAPS_RESP_FLAGS_WOL_MAGICPKT_SUPPORTED)
                        bp->flags |= BNXT_FLAG_WOL_CAP;
-               if (flags & FUNC_QCAPS_RESP_FLAGS_PTP_SUPPORTED)
+               if (flags & FUNC_QCAPS_RESP_FLAGS_PTP_SUPPORTED) {
                        __bnxt_hwrm_ptp_qcfg(bp);
+               } else {
+                       kfree(bp->ptp_cfg);
+                       bp->ptp_cfg = NULL;
+               }
        } else {
 #ifdef CONFIG_BNXT_SRIOV
                struct bnxt_vf_info *vf = &bp->vf;
@@ -10123,7 +10138,6 @@ static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
                }
        }
 
-       bnxt_ptp_start(bp);
        rc = bnxt_init_nic(bp, irq_re_init);
        if (rc) {
                netdev_err(bp->dev, "bnxt_init_nic err: %x\n", rc);
@@ -10197,6 +10211,12 @@ int bnxt_half_open_nic(struct bnxt *bp)
 {
        int rc = 0;
 
+       if (test_bit(BNXT_STATE_ABORT_ERR, &bp->state)) {
+               netdev_err(bp->dev, "A previous firmware reset has not completed, aborting half open\n");
+               rc = -ENODEV;
+               goto half_open_err;
+       }
+
        rc = bnxt_alloc_mem(bp, false);
        if (rc) {
                netdev_err(bp->dev, "bnxt_alloc_mem err: %x\n", rc);
@@ -10256,9 +10276,16 @@ static int bnxt_open(struct net_device *dev)
        rc = bnxt_hwrm_if_change(bp, true);
        if (rc)
                return rc;
+
+       if (bnxt_ptp_init(bp)) {
+               netdev_warn(dev, "PTP initialization failed.\n");
+               kfree(bp->ptp_cfg);
+               bp->ptp_cfg = NULL;
+       }
        rc = __bnxt_open_nic(bp, true, true);
        if (rc) {
                bnxt_hwrm_if_change(bp, false);
+               bnxt_ptp_clear(bp);
        } else {
                if (test_and_clear_bit(BNXT_STATE_FW_RESET_DET, &bp->state)) {
                        if (!test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) {
@@ -10349,6 +10376,7 @@ static int bnxt_close(struct net_device *dev)
 {
        struct bnxt *bp = netdev_priv(dev);
 
+       bnxt_ptp_clear(bp);
        bnxt_hwmon_close(bp);
        bnxt_close_nic(bp, true, true);
        bnxt_hwrm_shutdown_link(bp);
@@ -11335,6 +11363,7 @@ static void bnxt_fw_reset_close(struct bnxt *bp)
                bnxt_clear_int_mode(bp);
                pci_disable_device(bp->pdev);
        }
+       bnxt_ptp_clear(bp);
        __bnxt_close_nic(bp, true, false);
        bnxt_vf_reps_free(bp);
        bnxt_clear_int_mode(bp);
@@ -11959,10 +11988,21 @@ static bool bnxt_fw_reset_timeout(struct bnxt *bp)
                          (bp->fw_reset_max_dsecs * HZ / 10));
 }
 
+static void bnxt_fw_reset_abort(struct bnxt *bp, int rc)
+{
+       clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
+       if (bp->fw_reset_state != BNXT_FW_RESET_STATE_POLL_VF) {
+               bnxt_ulp_start(bp, rc);
+               bnxt_dl_health_status_update(bp, false);
+       }
+       bp->fw_reset_state = 0;
+       dev_close(bp->dev);
+}
+
 static void bnxt_fw_reset_task(struct work_struct *work)
 {
        struct bnxt *bp = container_of(work, struct bnxt, fw_reset_task.work);
-       int rc;
+       int rc = 0;
 
        if (!test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) {
                netdev_err(bp->dev, "bnxt_fw_reset_task() called when not in fw reset mode!\n");
@@ -11992,6 +12032,11 @@ static void bnxt_fw_reset_task(struct work_struct *work)
                }
                bp->fw_reset_timestamp = jiffies;
                rtnl_lock();
+               if (test_bit(BNXT_STATE_ABORT_ERR, &bp->state)) {
+                       bnxt_fw_reset_abort(bp, rc);
+                       rtnl_unlock();
+                       return;
+               }
                bnxt_fw_reset_close(bp);
                if (bp->fw_cap & BNXT_FW_CAP_ERR_RECOVER_RELOAD) {
                        bp->fw_reset_state = BNXT_FW_RESET_STATE_POLL_FW_DOWN;
@@ -12039,6 +12084,7 @@ static void bnxt_fw_reset_task(struct work_struct *work)
                        if (val == 0xffff) {
                                if (bnxt_fw_reset_timeout(bp)) {
                                        netdev_err(bp->dev, "Firmware reset aborted, PCI config space invalid\n");
+                                       rc = -ETIMEDOUT;
                                        goto fw_reset_abort;
                                }
                                bnxt_queue_fw_reset_work(bp, HZ / 1000);
@@ -12048,6 +12094,7 @@ static void bnxt_fw_reset_task(struct work_struct *work)
                clear_bit(BNXT_STATE_FW_FATAL_COND, &bp->state);
                if (pci_enable_device(bp->pdev)) {
                        netdev_err(bp->dev, "Cannot re-enable PCI device\n");
+                       rc = -ENODEV;
                        goto fw_reset_abort;
                }
                pci_set_master(bp->pdev);
@@ -12074,9 +12121,10 @@ static void bnxt_fw_reset_task(struct work_struct *work)
                }
                rc = bnxt_open(bp->dev);
                if (rc) {
-                       netdev_err(bp->dev, "bnxt_open_nic() failed\n");
-                       clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
-                       dev_close(bp->dev);
+                       netdev_err(bp->dev, "bnxt_open() failed during FW reset\n");
+                       bnxt_fw_reset_abort(bp, rc);
+                       rtnl_unlock();
+                       return;
                }
 
                bp->fw_reset_state = 0;
@@ -12103,12 +12151,8 @@ fw_reset_abort_status:
                netdev_err(bp->dev, "fw_health_status 0x%x\n", sts);
        }
 fw_reset_abort:
-       clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
-       if (bp->fw_reset_state != BNXT_FW_RESET_STATE_POLL_VF)
-               bnxt_dl_health_status_update(bp, false);
-       bp->fw_reset_state = 0;
        rtnl_lock();
-       dev_close(bp->dev);
+       bnxt_fw_reset_abort(bp, rc);
        rtnl_unlock();
 }
 
@@ -12662,7 +12706,6 @@ static void bnxt_remove_one(struct pci_dev *pdev)
        if (BNXT_PF(bp))
                devlink_port_type_clear(&bp->dl_port);
 
-       bnxt_ptp_clear(bp);
        pci_disable_pcie_error_reporting(pdev);
        unregister_netdev(dev);
        clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
@@ -13246,11 +13289,6 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
                                   rc);
        }
 
-       if (bnxt_ptp_init(bp)) {
-               netdev_warn(dev, "PTP initialization failed.\n");
-               kfree(bp->ptp_cfg);
-               bp->ptp_cfg = NULL;
-       }
        bnxt_inv_fw_health_reg(bp);
        bnxt_dl_register(bp);
 
@@ -13436,7 +13474,8 @@ static pci_ers_result_t bnxt_io_error_detected(struct pci_dev *pdev,
        if (netif_running(netdev))
                bnxt_close(netdev);
 
-       pci_disable_device(pdev);
+       if (pci_is_enabled(pdev))
+               pci_disable_device(pdev);
        bnxt_free_ctx_mem(bp);
        kfree(bp->ctx);
        bp->ctx = NULL;
index 8e90224c43a21434d4f832742d34c87cee4b9629..8a68df4d9e59cf2cf6670bffe01991fd0bbd649c 100644 (file)
@@ -433,6 +433,7 @@ static int bnxt_hwrm_queue_dscp2pri_cfg(struct bnxt *bp, struct dcb_app *app,
 static int bnxt_ets_validate(struct bnxt *bp, struct ieee_ets *ets, u8 *tc)
 {
        int total_ets_bw = 0;
+       bool zero = false;
        u8 max_tc = 0;
        int i;
 
@@ -453,13 +454,20 @@ static int bnxt_ets_validate(struct bnxt *bp, struct ieee_ets *ets, u8 *tc)
                        break;
                case IEEE_8021QAZ_TSA_ETS:
                        total_ets_bw += ets->tc_tx_bw[i];
+                       zero = zero || !ets->tc_tx_bw[i];
                        break;
                default:
                        return -ENOTSUPP;
                }
        }
-       if (total_ets_bw > 100)
+       if (total_ets_bw > 100) {
+               netdev_warn(bp->dev, "rejecting ETS config exceeding available bandwidth\n");
                return -EINVAL;
+       }
+       if (zero && total_ets_bw == 100) {
+               netdev_warn(bp->dev, "rejecting ETS config starving a TC\n");
+               return -EINVAL;
+       }
 
        if (max_tc >= bp->max_tc)
                *tc = bp->max_tc;
index f698b6bd4ff87146ceb9b01bd05bac194ec5858b..9089e7f3fbd4563e37ae7993bf594accf4cc003e 100644 (file)
@@ -385,22 +385,6 @@ int bnxt_get_rx_ts_p5(struct bnxt *bp, u64 *ts, u32 pkt_ts)
        return 0;
 }
 
-void bnxt_ptp_start(struct bnxt *bp)
-{
-       struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
-
-       if (!ptp)
-               return;
-
-       if (bp->flags & BNXT_FLAG_CHIP_P5) {
-               spin_lock_bh(&ptp->ptp_lock);
-               ptp->current_time = bnxt_refclk_read(bp, NULL);
-               WRITE_ONCE(ptp->old_time, ptp->current_time);
-               spin_unlock_bh(&ptp->ptp_lock);
-               ptp_schedule_worker(ptp->ptp_clock, 0);
-       }
-}
-
 static const struct ptp_clock_info bnxt_ptp_caps = {
        .owner          = THIS_MODULE,
        .name           = "bnxt clock",
@@ -450,7 +434,13 @@ int bnxt_ptp_init(struct bnxt *bp)
                bnxt_unmap_ptp_regs(bp);
                return err;
        }
-
+       if (bp->flags & BNXT_FLAG_CHIP_P5) {
+               spin_lock_bh(&ptp->ptp_lock);
+               ptp->current_time = bnxt_refclk_read(bp, NULL);
+               WRITE_ONCE(ptp->old_time, ptp->current_time);
+               spin_unlock_bh(&ptp->ptp_lock);
+               ptp_schedule_worker(ptp->ptp_clock, 0);
+       }
        return 0;
 }
 
index 6b6245750e206e5588679c8d7c63ed663e9b75fc..4135ea3ec7889b2438bdf40afc60eec6ab52b298 100644 (file)
@@ -75,7 +75,6 @@ int bnxt_hwtstamp_set(struct net_device *dev, struct ifreq *ifr);
 int bnxt_hwtstamp_get(struct net_device *dev, struct ifreq *ifr);
 int bnxt_get_tx_ts_p5(struct bnxt *bp, struct sk_buff *skb);
 int bnxt_get_rx_ts_p5(struct bnxt *bp, u64 *ts, u32 pkt_ts);
-void bnxt_ptp_start(struct bnxt *bp);
 int bnxt_ptp_init(struct bnxt *bp);
 void bnxt_ptp_clear(struct bnxt *bp);
 #endif
index a918e374f3c5c4f3714638332c45c0e24c4f3adc..187ff643ad2aeaf1f0bf8d3a6a24591c9545f549 100644 (file)
@@ -479,16 +479,17 @@ struct bnxt_en_dev *bnxt_ulp_probe(struct net_device *dev)
                if (!edev)
                        return ERR_PTR(-ENOMEM);
                edev->en_ops = &bnxt_en_ops_tbl;
-               if (bp->flags & BNXT_FLAG_ROCEV1_CAP)
-                       edev->flags |= BNXT_EN_FLAG_ROCEV1_CAP;
-               if (bp->flags & BNXT_FLAG_ROCEV2_CAP)
-                       edev->flags |= BNXT_EN_FLAG_ROCEV2_CAP;
                edev->net = dev;
                edev->pdev = bp->pdev;
                edev->l2_db_size = bp->db_size;
                edev->l2_db_size_nc = bp->db_size;
                bp->edev = edev;
        }
+       edev->flags &= ~BNXT_EN_FLAG_ROCE_CAP;
+       if (bp->flags & BNXT_FLAG_ROCEV1_CAP)
+               edev->flags |= BNXT_EN_FLAG_ROCEV1_CAP;
+       if (bp->flags & BNXT_FLAG_ROCEV2_CAP)
+               edev->flags |= BNXT_EN_FLAG_ROCEV2_CAP;
        return bp->edev;
 }
 EXPORT_SYMBOL(bnxt_ulp_probe);
index 4cddd628d41b24470a2c82a0ea310ef9d7ded2b2..9ed3d1ab2ca58020c7bef7469b4fd8d110aa09f7 100644 (file)
@@ -420,7 +420,7 @@ static int cn23xx_pf_setup_global_input_regs(struct octeon_device *oct)
         * bits 32:47 indicate the PVF num.
         */
        for (q_no = 0; q_no < ern; q_no++) {
-               reg_val = oct->pcie_port << CN23XX_PKT_INPUT_CTL_MAC_NUM_POS;
+               reg_val = (u64)oct->pcie_port << CN23XX_PKT_INPUT_CTL_MAC_NUM_POS;
 
                /* for VF assigned queues. */
                if (q_no < oct->sriov_info.pf_srn) {
index f3d12d0714fb53ea54c50834d5c9e102b36785ab..68b78642c045d44d49d62b8c3125a6421756dbd8 100644 (file)
@@ -2770,32 +2770,32 @@ static int dpaa2_switch_ctrl_if_setup(struct ethsw_core *ethsw)
        if (err)
                return err;
 
-       err = dpaa2_switch_seed_bp(ethsw);
-       if (err)
-               goto err_free_dpbp;
-
        err = dpaa2_switch_alloc_rings(ethsw);
        if (err)
-               goto err_drain_dpbp;
+               goto err_free_dpbp;
 
        err = dpaa2_switch_setup_dpio(ethsw);
        if (err)
                goto err_destroy_rings;
 
+       err = dpaa2_switch_seed_bp(ethsw);
+       if (err)
+               goto err_deregister_dpio;
+
        err = dpsw_ctrl_if_enable(ethsw->mc_io, 0, ethsw->dpsw_handle);
        if (err) {
                dev_err(ethsw->dev, "dpsw_ctrl_if_enable err %d\n", err);
-               goto err_deregister_dpio;
+               goto err_drain_dpbp;
        }
 
        return 0;
 
+err_drain_dpbp:
+       dpaa2_switch_drain_bp(ethsw);
 err_deregister_dpio:
        dpaa2_switch_free_dpio(ethsw);
 err_destroy_rings:
        dpaa2_switch_destroy_rings(ethsw);
-err_drain_dpbp:
-       dpaa2_switch_drain_bp(ethsw);
 err_free_dpbp:
        dpaa2_switch_free_dpbp(ethsw);
 
index 46ecb42f2ef8e627f105d408688c7a73b8e3fa0a..d9fc5c456bf3e28327264ca3866ace10f24a42e4 100644 (file)
@@ -524,6 +524,7 @@ static void setup_memac(struct mac_device *mac_dev)
        | SUPPORTED_Autoneg \
        | SUPPORTED_Pause \
        | SUPPORTED_Asym_Pause \
+       | SUPPORTED_FIBRE \
        | SUPPORTED_MII)
 
 static DEFINE_MUTEX(eth_lock);
index 12f6c2442a7ad83ae026035907c8c70d3e4cda0b..e53512f6878afd43c8fe4e0eaaf2f1740fe49d5b 100644 (file)
 /* buf unit size is cache_line_size, which is 64, so the shift is 6 */
 #define PPE_BUF_SIZE_SHIFT             6
 #define PPE_TX_BUF_HOLD                        BIT(31)
-#define CACHE_LINE_MASK                        0x3F
+#define SOC_CACHE_LINE_MASK            0x3F
 #else
 #define PPE_CFG_QOS_VMID_GRP_SHIFT     8
 #define PPE_CFG_RX_CTRL_ALIGN_SHIFT    11
@@ -531,8 +531,8 @@ hip04_mac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
 #if defined(CONFIG_HI13X1_GMAC)
        desc->cfg = (__force u32)cpu_to_be32(TX_CLEAR_WB | TX_FINISH_CACHE_INV
                | TX_RELEASE_TO_PPE | priv->port << TX_POOL_SHIFT);
-       desc->data_offset = (__force u32)cpu_to_be32(phys & CACHE_LINE_MASK);
-       desc->send_addr =  (__force u32)cpu_to_be32(phys & ~CACHE_LINE_MASK);
+       desc->data_offset = (__force u32)cpu_to_be32(phys & SOC_CACHE_LINE_MASK);
+       desc->send_addr =  (__force u32)cpu_to_be32(phys & ~SOC_CACHE_LINE_MASK);
 #else
        desc->cfg = (__force u32)cpu_to_be32(TX_CLEAR_WB | TX_FINISH_CACHE_INV);
        desc->send_addr = (__force u32)cpu_to_be32(phys);
index 0a6cda309b24a426235c0501ecc65c99a47bc12f..aa86a81c8f4af1dcdb696630bf60ab7ea9dd41a0 100644 (file)
@@ -98,6 +98,7 @@ struct hclgevf_mbx_resp_status {
        u32 origin_mbx_msg;
        bool received_resp;
        int resp_status;
+       u16 match_id;
        u8 additional_info[HCLGE_MBX_MAX_RESP_DATA_SIZE];
 };
 
@@ -143,7 +144,8 @@ struct hclge_mbx_vf_to_pf_cmd {
        u8 mbx_need_resp;
        u8 rsv1[1];
        u8 msg_len;
-       u8 rsv2[3];
+       u8 rsv2;
+       u16 match_id;
        struct hclge_vf_to_pf_msg msg;
 };
 
@@ -153,7 +155,8 @@ struct hclge_mbx_pf_to_vf_cmd {
        u8 dest_vfid;
        u8 rsv[3];
        u8 msg_len;
-       u8 rsv1[3];
+       u8 rsv1;
+       u16 match_id;
        struct hclge_pf_to_vf_msg msg;
 };
 
index dd3354a57c6206f5c4264ac9710a1936fad686fd..ebeaf12e409bcf0fd96d891c27fb2f3fd53e6159 100644 (file)
@@ -9552,13 +9552,17 @@ static int hclge_set_vport_vlan_filter(struct hclge_vport *vport, bool enable)
        if (ret)
                return ret;
 
-       if (test_bit(HNAE3_DEV_SUPPORT_PORT_VLAN_BYPASS_B, ae_dev->caps))
+       if (test_bit(HNAE3_DEV_SUPPORT_PORT_VLAN_BYPASS_B, ae_dev->caps)) {
                ret = hclge_set_port_vlan_filter_bypass(hdev, vport->vport_id,
                                                        !enable);
-       else if (!vport->vport_id)
+       } else if (!vport->vport_id) {
+               if (test_bit(HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B, ae_dev->caps))
+                       enable = false;
+
                ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT,
                                                 HCLGE_FILTER_FE_INGRESS,
                                                 enable, 0);
+       }
 
        return ret;
 }
index e10a2c36b706626f5d979f185e21be46bb8fcbb8..c0a478ae958347405b294f8818494a0ea3417eda 100644 (file)
@@ -47,6 +47,7 @@ static int hclge_gen_resp_to_vf(struct hclge_vport *vport,
 
        resp_pf_to_vf->dest_vfid = vf_to_pf_req->mbx_src_vfid;
        resp_pf_to_vf->msg_len = vf_to_pf_req->msg_len;
+       resp_pf_to_vf->match_id = vf_to_pf_req->match_id;
 
        resp_pf_to_vf->msg.code = HCLGE_MBX_PF_VF_RESP;
        resp_pf_to_vf->msg.vf_mbx_msg_code = vf_to_pf_req->msg.code;
index 52eaf82b7cd71d91d1b28f20f58d81d71dc0f280..8784d61e833f15a98777a13a49f308a8169f24e0 100644 (file)
@@ -2641,6 +2641,16 @@ static int hclgevf_rss_init_hw(struct hclgevf_dev *hdev)
 
 static int hclgevf_init_vlan_config(struct hclgevf_dev *hdev)
 {
+       struct hnae3_handle *nic = &hdev->nic;
+       int ret;
+
+       ret = hclgevf_en_hw_strip_rxvtag(nic, true);
+       if (ret) {
+               dev_err(&hdev->pdev->dev,
+                       "failed to enable rx vlan offload, ret = %d\n", ret);
+               return ret;
+       }
+
        return hclgevf_set_vlan_filter(&hdev->nic, htons(ETH_P_8021Q), 0,
                                       false);
 }
index 9b17735b9f4ce35a2251ae624cf8631c546c4eb6..772b2f8acd2e83e8f98a3041648e4f59b8330009 100644 (file)
@@ -13,6 +13,7 @@ static int hclgevf_resp_to_errno(u16 resp_code)
        return resp_code ? -resp_code : 0;
 }
 
+#define HCLGEVF_MBX_MATCH_ID_START     1
 static void hclgevf_reset_mbx_resp_status(struct hclgevf_dev *hdev)
 {
        /* this function should be called with mbx_resp.mbx_mutex held
@@ -21,6 +22,10 @@ static void hclgevf_reset_mbx_resp_status(struct hclgevf_dev *hdev)
        hdev->mbx_resp.received_resp  = false;
        hdev->mbx_resp.origin_mbx_msg = 0;
        hdev->mbx_resp.resp_status    = 0;
+       hdev->mbx_resp.match_id++;
+       /* Update match_id and ensure the value of match_id is not zero */
+       if (hdev->mbx_resp.match_id == 0)
+               hdev->mbx_resp.match_id = HCLGEVF_MBX_MATCH_ID_START;
        memset(hdev->mbx_resp.additional_info, 0, HCLGE_MBX_MAX_RESP_DATA_SIZE);
 }
 
@@ -115,6 +120,7 @@ int hclgevf_send_mbx_msg(struct hclgevf_dev *hdev,
        if (need_resp) {
                mutex_lock(&hdev->mbx_resp.mbx_mutex);
                hclgevf_reset_mbx_resp_status(hdev);
+               req->match_id = hdev->mbx_resp.match_id;
                status = hclgevf_cmd_send(&hdev->hw, &desc, 1);
                if (status) {
                        dev_err(&hdev->pdev->dev,
@@ -211,6 +217,19 @@ void hclgevf_mbx_handler(struct hclgevf_dev *hdev)
                                resp->additional_info[i] = *temp;
                                temp++;
                        }
+
+                       /* If match_id is not zero, it means PF support
+                        * match_id. If the match_id is right, VF get the
+                        * right response, otherwise ignore the response.
+                        * Driver will clear hdev->mbx_resp when send
+                        * next message which need response.
+                        */
+                       if (req->match_id) {
+                               if (req->match_id == resp->match_id)
+                                       resp->received_resp = true;
+                       } else {
+                               resp->received_resp = true;
+                       }
                        break;
                case HCLGE_MBX_LINK_STAT_CHANGE:
                case HCLGE_MBX_ASSERTING_RESET:
index ed77191d19f44cb8228290a7c00c13777ae38d79..a775c69e4fd7f089d56341fca8ad979bd98c3488 100644 (file)
@@ -1731,7 +1731,6 @@ static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
                tx_send_failed++;
                tx_dropped++;
                ret = NETDEV_TX_OK;
-               ibmvnic_tx_scrq_flush(adapter, tx_scrq);
                goto out;
        }
 
@@ -1753,6 +1752,7 @@ static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
                dev_kfree_skb_any(skb);
                tx_send_failed++;
                tx_dropped++;
+               ibmvnic_tx_scrq_flush(adapter, tx_scrq);
                ret = NETDEV_TX_OK;
                goto out;
        }
index 913253f8ecb4ecec3e7ea4dd914347e9e053fd8e..14aea40da50fb8b4b80b7d36f0b748af835ff47c 100644 (file)
@@ -1825,7 +1825,8 @@ static void ixgbe_dma_sync_frag(struct ixgbe_ring *rx_ring,
                                struct sk_buff *skb)
 {
        if (ring_uses_build_skb(rx_ring)) {
-               unsigned long offset = (unsigned long)(skb->data) & ~PAGE_MASK;
+               unsigned long mask = (unsigned long)ixgbe_rx_pg_size(rx_ring) - 1;
+               unsigned long offset = (unsigned long)(skb->data) & mask;
 
                dma_sync_single_range_for_cpu(rx_ring->dev,
                                              IXGBE_CB(skb)->dma,
index 1a3455620b381f2cc26a3266261960cc134dfae7..cc8ac36cf687de026c364034e60cfd1f555ec44f 100644 (file)
@@ -10,4 +10,4 @@ obj-$(CONFIG_OCTEONTX2_AF) += rvu_af.o
 rvu_mbox-y := mbox.o rvu_trace.o
 rvu_af-y := cgx.o rvu.o rvu_cgx.o rvu_npa.o rvu_nix.o \
                  rvu_reg.o rvu_npc.o rvu_debugfs.o ptp.o rvu_npc_fs.o \
-                 rvu_cpt.o rvu_devlink.o rpm.o rvu_cn10k.o
+                 rvu_cpt.o rvu_devlink.o rpm.o rvu_cn10k.o rvu_switch.o
index 10cddf1ac7b9efa7bde20809ddc6886e58c87d3c..017163fb3cd5cc1ecec979584830fd77f462a4c0 100644 (file)
@@ -1314,7 +1314,7 @@ int rvu_mbox_handler_detach_resources(struct rvu *rvu,
        return rvu_detach_rsrcs(rvu, detach, detach->hdr.pcifunc);
 }
 
-static int rvu_get_nix_blkaddr(struct rvu *rvu, u16 pcifunc)
+int rvu_get_nix_blkaddr(struct rvu *rvu, u16 pcifunc)
 {
        struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
        int blkaddr = BLKADDR_NIX0, vf;
@@ -2859,6 +2859,12 @@ static int rvu_enable_sriov(struct rvu *rvu)
        if (!vfs)
                return 0;
 
+       /* LBK channel number 63 is used for switching packets between
+        * CGX mapped VFs. Hence limit LBK pairs till 62 only.
+        */
+       if (vfs > 62)
+               vfs = 62;
+
        /* Save VFs number for reference in VF interrupts handlers.
         * Since interrupts might start arriving during SRIOV enablement
         * ordinary API cannot be used to get number of enabled VFs.
@@ -3001,6 +3007,8 @@ static int rvu_probe(struct pci_dev *pdev, const struct pci_device_id *id)
        /* Initialize debugfs */
        rvu_dbg_init(rvu);
 
+       mutex_init(&rvu->rswitch.switch_lock);
+
        return 0;
 err_dl:
        rvu_unregister_dl(rvu);
index 10e58a5d5861acfd3344d4020822e0eefd2f5674..91503fb2762c9bde18ef9e5d20b328e2605b8af0 100644 (file)
@@ -415,6 +415,16 @@ struct npc_kpu_profile_adapter {
        size_t                          kpus;
 };
 
+#define RVU_SWITCH_LBK_CHAN    63
+
+struct rvu_switch {
+       struct mutex switch_lock; /* Serialize flow installation */
+       u32 used_entries;
+       u16 *entry2pcifunc;
+       u16 mode;
+       u16 start_entry;
+};
+
 struct rvu {
        void __iomem            *afreg_base;
        void __iomem            *pfreg_base;
@@ -445,6 +455,7 @@ struct rvu {
 
        /* CGX */
 #define PF_CGXMAP_BASE         1 /* PF 0 is reserved for RVU PF */
+       u16                     cgx_mapped_vfs; /* maximum CGX mapped VFs */
        u8                      cgx_mapped_pfs;
        u8                      cgx_cnt_max;     /* CGX port count max */
        u8                      *pf2cgxlmac_map; /* pf to cgx_lmac map */
@@ -477,6 +488,9 @@ struct rvu {
        struct rvu_debugfs      rvu_dbg;
 #endif
        struct rvu_devlink      *rvu_dl;
+
+       /* RVU switch implementation over NPC with DMAC rules */
+       struct rvu_switch       rswitch;
 };
 
 static inline void rvu_write64(struct rvu *rvu, u64 block, u64 offset, u64 val)
@@ -691,6 +705,7 @@ int nix_aq_context_read(struct rvu *rvu, struct nix_hw *nix_hw,
                        struct nix_cn10k_aq_enq_req *aq_req,
                        struct nix_cn10k_aq_enq_rsp *aq_rsp,
                        u16 pcifunc, u8 ctype, u32 qidx);
+int rvu_get_nix_blkaddr(struct rvu *rvu, u16 pcifunc);
 
 /* NPC APIs */
 int rvu_npc_init(struct rvu *rvu);
@@ -768,4 +783,10 @@ void rvu_dbg_exit(struct rvu *rvu);
 static inline void rvu_dbg_init(struct rvu *rvu) {}
 static inline void rvu_dbg_exit(struct rvu *rvu) {}
 #endif
+
+/* RVU Switch */
+void rvu_switch_enable(struct rvu *rvu);
+void rvu_switch_disable(struct rvu *rvu);
+void rvu_switch_update_rules(struct rvu *rvu, u16 pcifunc);
+
 #endif /* RVU_H */
index 6cc8fbb7190cd6ed27fdafddb3f0fa35e6602036..fe99ac4a4dd8046c3256c60fdbacbd2dbc1f9b53 100644 (file)
@@ -126,6 +126,7 @@ static int rvu_map_cgx_lmac_pf(struct rvu *rvu)
        unsigned long lmac_bmap;
        int size, free_pkind;
        int cgx, lmac, iter;
+       int numvfs, hwvfs;
 
        if (!cgx_cnt_max)
                return 0;
@@ -166,6 +167,8 @@ static int rvu_map_cgx_lmac_pf(struct rvu *rvu)
                        pkind->pfchan_map[free_pkind] = ((pf) & 0x3F) << 16;
                        rvu_map_cgx_nix_block(rvu, pf, cgx, lmac);
                        rvu->cgx_mapped_pfs++;
+                       rvu_get_pf_numvfs(rvu, pf, &numvfs, &hwvfs);
+                       rvu->cgx_mapped_vfs += numvfs;
                        pf++;
                }
        }
index 370d4ca1e5edbd1238fd66ad55a03d7ee19a9ba0..9b2dfbf90e51049d0ce9eb17eed3e8b56e736d64 100644 (file)
@@ -2113,9 +2113,6 @@ static void rvu_print_npc_mcam_info(struct seq_file *s,
        int entry_acnt, entry_ecnt;
        int cntr_acnt, cntr_ecnt;
 
-       /* Skip PF0 */
-       if (!pcifunc)
-               return;
        rvu_npc_get_mcam_entry_alloc_info(rvu, pcifunc, blkaddr,
                                          &entry_acnt, &entry_ecnt);
        rvu_npc_get_mcam_counter_alloc_info(rvu, pcifunc, blkaddr,
@@ -2298,7 +2295,7 @@ static void rvu_dbg_npc_mcam_show_flows(struct seq_file *s,
 static void rvu_dbg_npc_mcam_show_action(struct seq_file *s,
                                         struct rvu_npc_mcam_rule *rule)
 {
-       if (rule->intf == NIX_INTF_TX) {
+       if (is_npc_intf_tx(rule->intf)) {
                switch (rule->tx_action.op) {
                case NIX_TX_ACTIONOP_DROP:
                        seq_puts(s, "\taction: Drop\n");
index 10a98bcb7c54e9f59dcbcea2a7f8a363ff05e7e3..2688186066d94c381776b9cf58df105c9791bd50 100644 (file)
@@ -1364,6 +1364,44 @@ static void rvu_health_reporters_destroy(struct rvu *rvu)
        rvu_nix_health_reporters_destroy(rvu_dl);
 }
 
+static int rvu_devlink_eswitch_mode_get(struct devlink *devlink, u16 *mode)
+{
+       struct rvu_devlink *rvu_dl = devlink_priv(devlink);
+       struct rvu *rvu = rvu_dl->rvu;
+       struct rvu_switch *rswitch;
+
+       rswitch = &rvu->rswitch;
+       *mode = rswitch->mode;
+
+       return 0;
+}
+
+static int rvu_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode,
+                                       struct netlink_ext_ack *extack)
+{
+       struct rvu_devlink *rvu_dl = devlink_priv(devlink);
+       struct rvu *rvu = rvu_dl->rvu;
+       struct rvu_switch *rswitch;
+
+       rswitch = &rvu->rswitch;
+       switch (mode) {
+       case DEVLINK_ESWITCH_MODE_LEGACY:
+       case DEVLINK_ESWITCH_MODE_SWITCHDEV:
+               if (rswitch->mode == mode)
+                       return 0;
+               rswitch->mode = mode;
+               if (mode == DEVLINK_ESWITCH_MODE_SWITCHDEV)
+                       rvu_switch_enable(rvu);
+               else
+                       rvu_switch_disable(rvu);
+               break;
+       default:
+               return -EINVAL;
+       }
+
+       return 0;
+}
+
 static int rvu_devlink_info_get(struct devlink *devlink, struct devlink_info_req *req,
                                struct netlink_ext_ack *extack)
 {
@@ -1372,6 +1410,8 @@ static int rvu_devlink_info_get(struct devlink *devlink, struct devlink_info_req
 
 static const struct devlink_ops rvu_devlink_ops = {
        .info_get = rvu_devlink_info_get,
+       .eswitch_mode_get = rvu_devlink_eswitch_mode_get,
+       .eswitch_mode_set = rvu_devlink_eswitch_mode_set,
 };
 
 int rvu_register_dl(struct rvu *rvu)
@@ -1380,14 +1420,9 @@ int rvu_register_dl(struct rvu *rvu)
        struct devlink *dl;
        int err;
 
-       rvu_dl = kzalloc(sizeof(*rvu_dl), GFP_KERNEL);
-       if (!rvu_dl)
-               return -ENOMEM;
-
        dl = devlink_alloc(&rvu_devlink_ops, sizeof(struct rvu_devlink));
        if (!dl) {
                dev_warn(rvu->dev, "devlink_alloc failed\n");
-               kfree(rvu_dl);
                return -ENOMEM;
        }
 
@@ -1395,10 +1430,10 @@ int rvu_register_dl(struct rvu *rvu)
        if (err) {
                dev_err(rvu->dev, "devlink register failed with error %d\n", err);
                devlink_free(dl);
-               kfree(rvu_dl);
                return err;
        }
 
+       rvu_dl = devlink_priv(dl);
        rvu_dl->dl = dl;
        rvu_dl->rvu = rvu;
        rvu->rvu_dl = rvu_dl;
@@ -1417,5 +1452,4 @@ void rvu_unregister_dl(struct rvu *rvu)
        rvu_health_reporters_destroy(rvu);
        devlink_unregister(dl);
        devlink_free(dl);
-       kfree(rvu_dl);
 }
index aeae377044280759f046a605f166acd8745a0146..0933699a0d2d7ddac3b49e311c5298b301fe451b 100644 (file)
@@ -1952,6 +1952,35 @@ static void nix_tl1_default_cfg(struct rvu *rvu, struct nix_hw *nix_hw,
        pfvf_map[schq] = TXSCH_SET_FLAG(pfvf_map[schq], NIX_TXSCHQ_CFG_DONE);
 }
 
+static void rvu_nix_tx_tl2_cfg(struct rvu *rvu, int blkaddr,
+                              u16 pcifunc, struct nix_txsch *txsch)
+{
+       struct rvu_hwinfo *hw = rvu->hw;
+       int lbk_link_start, lbk_links;
+       u8 pf = rvu_get_pf(pcifunc);
+       int schq;
+
+       if (!is_pf_cgxmapped(rvu, pf))
+               return;
+
+       lbk_link_start = hw->cgx_links;
+
+       for (schq = 0; schq < txsch->schq.max; schq++) {
+               if (TXSCH_MAP_FUNC(txsch->pfvf_map[schq]) != pcifunc)
+                       continue;
+               /* Enable all LBK links with channel 63 by default so that
+                * packets can be sent to LBK with a NPC TX MCAM rule
+                */
+               lbk_links = hw->lbk_links;
+               while (lbk_links--)
+                       rvu_write64(rvu, blkaddr,
+                                   NIX_AF_TL3_TL2X_LINKX_CFG(schq,
+                                                             lbk_link_start +
+                                                             lbk_links),
+                                   BIT_ULL(12) | RVU_SWITCH_LBK_CHAN);
+       }
+}
+
 int rvu_mbox_handler_nix_txschq_cfg(struct rvu *rvu,
                                    struct nix_txschq_config *req,
                                    struct msg_rsp *rsp)
@@ -2040,6 +2069,9 @@ int rvu_mbox_handler_nix_txschq_cfg(struct rvu *rvu,
                rvu_write64(rvu, blkaddr, reg, regval);
        }
 
+       rvu_nix_tx_tl2_cfg(rvu, blkaddr, pcifunc,
+                          &nix_hw->txsch[NIX_TXSCH_LVL_TL2]);
+
        return 0;
 }
 
@@ -3180,6 +3212,8 @@ int rvu_mbox_handler_nix_set_mac_addr(struct rvu *rvu,
        if (test_bit(PF_SET_VF_TRUSTED, &pfvf->flags) && from_vf)
                ether_addr_copy(pfvf->default_mac, req->mac_addr);
 
+       rvu_switch_update_rules(rvu, pcifunc);
+
        return 0;
 }
 
@@ -3849,6 +3883,8 @@ int rvu_mbox_handler_nix_lf_start_rx(struct rvu *rvu, struct msg_req *req,
        pfvf = rvu_get_pfvf(rvu, pcifunc);
        set_bit(NIXLF_INITIALIZED, &pfvf->flags);
 
+       rvu_switch_update_rules(rvu, pcifunc);
+
        return rvu_cgx_start_stop_io(rvu, pcifunc, true);
 }
 
index 3612e0a2cab324a2cf7405a841d19c89f63ee609..1097291aaa453ad389ea6d9a5dada94707fdb321 100644 (file)
@@ -442,7 +442,8 @@ static void npc_fixup_vf_rule(struct rvu *rvu, struct npc_mcam *mcam,
        owner = mcam->entry2pfvf_map[index];
        target_func = (entry->action >> 4) & 0xffff;
        /* do nothing when target is LBK/PF or owner is not PF */
-       if (is_afvf(target_func) || (owner & RVU_PFVF_FUNC_MASK) ||
+       if (is_pffunc_af(owner) || is_afvf(target_func) ||
+           (owner & RVU_PFVF_FUNC_MASK) ||
            !(target_func & RVU_PFVF_FUNC_MASK))
                return;
 
@@ -468,6 +469,8 @@ static void npc_config_mcam_entry(struct rvu *rvu, struct npc_mcam *mcam,
 {
        int bank = npc_get_bank(mcam, index);
        int kw = 0, actbank, actindex;
+       u8 tx_intf_mask = ~intf & 0x3;
+       u8 tx_intf = intf;
        u64 cam0, cam1;
 
        actbank = bank; /* Save bank id, to set action later on */
@@ -488,12 +491,21 @@ static void npc_config_mcam_entry(struct rvu *rvu, struct npc_mcam *mcam,
         */
        for (; bank < (actbank + mcam->banks_per_entry); bank++, kw = kw + 2) {
                /* Interface should be set in all banks */
+               if (is_npc_intf_tx(intf)) {
+                       /* Last bit must be set and rest don't care
+                        * for TX interfaces
+                        */
+                       tx_intf_mask = 0x1;
+                       tx_intf = intf & tx_intf_mask;
+                       tx_intf_mask = ~tx_intf & tx_intf_mask;
+               }
+
                rvu_write64(rvu, blkaddr,
                            NPC_AF_MCAMEX_BANKX_CAMX_INTF(index, bank, 1),
-                           intf);
+                           tx_intf);
                rvu_write64(rvu, blkaddr,
                            NPC_AF_MCAMEX_BANKX_CAMX_INTF(index, bank, 0),
-                           ~intf & 0x3);
+                           tx_intf_mask);
 
                /* Set the match key */
                npc_get_keyword(entry, kw, &cam0, &cam1);
@@ -650,6 +662,7 @@ void rvu_npc_install_ucast_entry(struct rvu *rvu, u16 pcifunc,
        eth_broadcast_addr((u8 *)&req.mask.dmac);
        req.features = BIT_ULL(NPC_DMAC);
        req.channel = chan;
+       req.chan_mask = 0xFFFU;
        req.intf = pfvf->nix_rx_intf;
        req.op = action.op;
        req.hdr.pcifunc = 0; /* AF is requester */
@@ -799,6 +812,7 @@ void rvu_npc_install_bcast_match_entry(struct rvu *rvu, u16 pcifunc,
        eth_broadcast_addr((u8 *)&req.mask.dmac);
        req.features = BIT_ULL(NPC_DMAC);
        req.channel = chan;
+       req.chan_mask = 0xFFFU;
        req.intf = pfvf->nix_rx_intf;
        req.entry = index;
        req.hdr.pcifunc = 0; /* AF is requester */
@@ -1745,6 +1759,8 @@ static int npc_mcam_rsrcs_init(struct rvu *rvu, int blkaddr)
        int nixlf_count = rvu_get_nixlf_count(rvu);
        struct npc_mcam *mcam = &rvu->hw->mcam;
        int rsvd, err;
+       u16 index;
+       int cntr;
        u64 cfg;
 
        /* Actual number of MCAM entries vary by entry size */
@@ -1845,6 +1861,14 @@ static int npc_mcam_rsrcs_init(struct rvu *rvu, int blkaddr)
        if (!mcam->entry2target_pffunc)
                goto free_mem;
 
+       for (index = 0; index < mcam->bmap_entries; index++) {
+               mcam->entry2pfvf_map[index] = NPC_MCAM_INVALID_MAP;
+               mcam->entry2cntr_map[index] = NPC_MCAM_INVALID_MAP;
+       }
+
+       for (cntr = 0; cntr < mcam->counters.max; cntr++)
+               mcam->cntr2pfvf_map[cntr] = NPC_MCAM_INVALID_MAP;
+
        mutex_init(&mcam->lock);
 
        return 0;
@@ -2562,7 +2586,7 @@ int rvu_mbox_handler_npc_mcam_alloc_entry(struct rvu *rvu,
        }
 
        /* Alloc request from PFFUNC with no NIXLF attached should be denied */
-       if (!is_nixlf_attached(rvu, pcifunc))
+       if (!is_pffunc_af(pcifunc) && !is_nixlf_attached(rvu, pcifunc))
                return NPC_MCAM_ALLOC_DENIED;
 
        return npc_mcam_alloc_entries(mcam, pcifunc, req, rsp);
@@ -2582,7 +2606,7 @@ int rvu_mbox_handler_npc_mcam_free_entry(struct rvu *rvu,
                return NPC_MCAM_INVALID_REQ;
 
        /* Free request from PFFUNC with no NIXLF attached, ignore */
-       if (!is_nixlf_attached(rvu, pcifunc))
+       if (!is_pffunc_af(pcifunc) && !is_nixlf_attached(rvu, pcifunc))
                return NPC_MCAM_INVALID_REQ;
 
        mutex_lock(&mcam->lock);
@@ -2594,7 +2618,7 @@ int rvu_mbox_handler_npc_mcam_free_entry(struct rvu *rvu,
        if (rc)
                goto exit;
 
-       mcam->entry2pfvf_map[req->entry] = 0;
+       mcam->entry2pfvf_map[req->entry] = NPC_MCAM_INVALID_MAP;
        mcam->entry2target_pffunc[req->entry] = 0x0;
        npc_mcam_clear_bit(mcam, req->entry);
        npc_enable_mcam_entry(rvu, mcam, blkaddr, req->entry, false);
@@ -2679,13 +2703,14 @@ int rvu_mbox_handler_npc_mcam_write_entry(struct rvu *rvu,
        else
                nix_intf = pfvf->nix_rx_intf;
 
-       if (npc_mcam_verify_channel(rvu, pcifunc, req->intf, channel)) {
+       if (!is_pffunc_af(pcifunc) &&
+           npc_mcam_verify_channel(rvu, pcifunc, req->intf, channel)) {
                rc = NPC_MCAM_INVALID_REQ;
                goto exit;
        }
 
-       if (npc_mcam_verify_pf_func(rvu, &req->entry_data, req->intf,
-                                   pcifunc)) {
+       if (!is_pffunc_af(pcifunc) &&
+           npc_mcam_verify_pf_func(rvu, &req->entry_data, req->intf, pcifunc)) {
                rc = NPC_MCAM_INVALID_REQ;
                goto exit;
        }
@@ -2836,7 +2861,7 @@ int rvu_mbox_handler_npc_mcam_alloc_counter(struct rvu *rvu,
                return NPC_MCAM_INVALID_REQ;
 
        /* If the request is from a PFFUNC with no NIXLF attached, ignore */
-       if (!is_nixlf_attached(rvu, pcifunc))
+       if (!is_pffunc_af(pcifunc) && !is_nixlf_attached(rvu, pcifunc))
                return NPC_MCAM_INVALID_REQ;
 
        /* Since list of allocated counter IDs needs to be sent to requester,
@@ -3081,7 +3106,7 @@ int rvu_mbox_handler_npc_mcam_alloc_and_write_entry(struct rvu *rvu,
        if (rc) {
                /* Free allocated MCAM entry */
                mutex_lock(&mcam->lock);
-               mcam->entry2pfvf_map[entry] = 0;
+               mcam->entry2pfvf_map[entry] = NPC_MCAM_INVALID_MAP;
                npc_mcam_clear_bit(mcam, entry);
                mutex_unlock(&mcam->lock);
                return rc;
index 68633145a8b80956fc95199aa1057f66a1779853..5c01cf4a9c5bb073d2156be1ee0b866ca1eb0fd8 100644 (file)
@@ -910,14 +910,17 @@ static void rvu_mcam_add_counter_to_rule(struct rvu *rvu, u16 pcifunc,
 
 static void npc_update_rx_entry(struct rvu *rvu, struct rvu_pfvf *pfvf,
                                struct mcam_entry *entry,
-                               struct npc_install_flow_req *req, u16 target)
+                               struct npc_install_flow_req *req,
+                               u16 target, bool pf_set_vfs_mac)
 {
+       struct rvu_switch *rswitch = &rvu->rswitch;
        struct nix_rx_action action;
-       u64 chan_mask;
 
-       chan_mask = req->chan_mask ? req->chan_mask : ~0ULL;
-       npc_update_entry(rvu, NPC_CHAN, entry, req->channel, 0, chan_mask, 0,
-                        NIX_INTF_RX);
+       if (rswitch->mode == DEVLINK_ESWITCH_MODE_SWITCHDEV && pf_set_vfs_mac)
+               req->chan_mask = 0x0; /* Do not care channel */
+
+       npc_update_entry(rvu, NPC_CHAN, entry, req->channel, 0, req->chan_mask,
+                        0, NIX_INTF_RX);
 
        *(u64 *)&action = 0x00;
        action.pf_func = target;
@@ -949,9 +952,16 @@ static void npc_update_tx_entry(struct rvu *rvu, struct rvu_pfvf *pfvf,
                                struct npc_install_flow_req *req, u16 target)
 {
        struct nix_tx_action action;
+       u64 mask = ~0ULL;
+
+       /* If AF is installing then do not care about
+        * PF_FUNC in Send Descriptor
+        */
+       if (is_pffunc_af(req->hdr.pcifunc))
+               mask = 0;
 
        npc_update_entry(rvu, NPC_PF_FUNC, entry, (__force u16)htons(target),
-                        0, ~0ULL, 0, NIX_INTF_TX);
+                        0, mask, 0, NIX_INTF_TX);
 
        *(u64 *)&action = 0x00;
        action.op = req->op;
@@ -1002,7 +1012,7 @@ static int npc_install_flow(struct rvu *rvu, int blkaddr, u16 target,
                        req->intf);
 
        if (is_npc_intf_rx(req->intf))
-               npc_update_rx_entry(rvu, pfvf, entry, req, target);
+               npc_update_rx_entry(rvu, pfvf, entry, req, target, pf_set_vfs_mac);
        else
                npc_update_tx_entry(rvu, pfvf, entry, req, target);
 
@@ -1164,7 +1174,9 @@ int rvu_mbox_handler_npc_install_flow(struct rvu *rvu,
        if (err)
                return err;
 
-       if (npc_mcam_verify_channel(rvu, target, req->intf, req->channel))
+       /* Skip channel validation if AF is installing */
+       if (!is_pffunc_af(req->hdr.pcifunc) &&
+           npc_mcam_verify_channel(rvu, target, req->intf, req->channel))
                return -EINVAL;
 
        pfvf = rvu_get_pfvf(rvu, target);
@@ -1180,6 +1192,7 @@ int rvu_mbox_handler_npc_install_flow(struct rvu *rvu,
                eth_broadcast_addr((u8 *)&req->mask.dmac);
        }
 
+       /* Proceed if NIXLF is attached or not for TX rules */
        err = nix_get_nixlf(rvu, target, &nixlf, NULL);
        if (err && is_npc_intf_rx(req->intf) && !pf_set_vfs_mac)
                return -EINVAL;
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_switch.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_switch.c
new file mode 100644 (file)
index 0000000..2e53797
--- /dev/null
@@ -0,0 +1,258 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Marvell OcteonTx2 RVU Admin Function driver
+ *
+ * Copyright (C) 2021 Marvell.
+ */
+
+#include <linux/bitfield.h>
+#include "rvu.h"
+
+static int rvu_switch_install_rx_rule(struct rvu *rvu, u16 pcifunc,
+                                     u16 chan_mask)
+{
+       struct npc_install_flow_req req = { 0 };
+       struct npc_install_flow_rsp rsp = { 0 };
+       struct rvu_pfvf *pfvf;
+
+       pfvf = rvu_get_pfvf(rvu, pcifunc);
+       /* If the pcifunc is not initialized then nothing to do.
+        * This same function will be called again via rvu_switch_update_rules
+        * after pcifunc is initialized.
+        */
+       if (!test_bit(NIXLF_INITIALIZED, &pfvf->flags))
+               return 0;
+
+       ether_addr_copy(req.packet.dmac, pfvf->mac_addr);
+       eth_broadcast_addr((u8 *)&req.mask.dmac);
+       req.hdr.pcifunc = 0; /* AF is requester */
+       req.vf = pcifunc;
+       req.features = BIT_ULL(NPC_DMAC);
+       req.channel = pfvf->rx_chan_base;
+       req.chan_mask = chan_mask;
+       req.intf = pfvf->nix_rx_intf;
+       req.op = NIX_RX_ACTION_DEFAULT;
+       req.default_rule = 1;
+
+       return rvu_mbox_handler_npc_install_flow(rvu, &req, &rsp);
+}
+
+static int rvu_switch_install_tx_rule(struct rvu *rvu, u16 pcifunc, u16 entry)
+{
+       struct npc_install_flow_req req = { 0 };
+       struct npc_install_flow_rsp rsp = { 0 };
+       struct rvu_pfvf *pfvf;
+       u8 lbkid;
+
+       pfvf = rvu_get_pfvf(rvu, pcifunc);
+       /* If the pcifunc is not initialized then nothing to do.
+        * This same function will be called again via rvu_switch_update_rules
+        * after pcifunc is initialized.
+        */
+       if (!test_bit(NIXLF_INITIALIZED, &pfvf->flags))
+               return 0;
+
+       lbkid = pfvf->nix_blkaddr == BLKADDR_NIX0 ? 0 : 1;
+       ether_addr_copy(req.packet.dmac, pfvf->mac_addr);
+       eth_broadcast_addr((u8 *)&req.mask.dmac);
+       req.hdr.pcifunc = 0; /* AF is requester */
+       req.vf = pcifunc;
+       req.entry = entry;
+       req.features = BIT_ULL(NPC_DMAC);
+       req.intf = pfvf->nix_tx_intf;
+       req.op = NIX_TX_ACTIONOP_UCAST_CHAN;
+       req.index = (lbkid << 8) | RVU_SWITCH_LBK_CHAN;
+       req.set_cntr = 1;
+
+       return rvu_mbox_handler_npc_install_flow(rvu, &req, &rsp);
+}
+
+static int rvu_switch_install_rules(struct rvu *rvu)
+{
+       struct rvu_switch *rswitch = &rvu->rswitch;
+       u16 start = rswitch->start_entry;
+       struct rvu_hwinfo *hw = rvu->hw;
+       int pf, vf, numvfs, hwvf;
+       u16 pcifunc, entry = 0;
+       int err;
+
+       for (pf = 1; pf < hw->total_pfs; pf++) {
+               if (!is_pf_cgxmapped(rvu, pf))
+                       continue;
+
+               pcifunc = pf << 10;
+               /* rvu_get_nix_blkaddr sets up the corresponding NIX block
+                * address and NIX RX and TX interfaces for a pcifunc.
+                * Generally it is called during attach call of a pcifunc but it
+                * is called here since we are pre-installing rules before
+                * nixlfs are attached
+                */
+               rvu_get_nix_blkaddr(rvu, pcifunc);
+
+               /* MCAM RX rule for a PF/VF already exists as default unicast
+                * rules installed by AF. Hence change the channel in those
+                * rules to ignore channel so that packets with the required
+                * DMAC received from LBK(by other PF/VFs in system) or from
+                * external world (from wire) are accepted.
+                */
+               err = rvu_switch_install_rx_rule(rvu, pcifunc, 0x0);
+               if (err) {
+                       dev_err(rvu->dev, "RX rule for PF%d failed(%d)\n",
+                               pf, err);
+                       return err;
+               }
+
+               err = rvu_switch_install_tx_rule(rvu, pcifunc, start + entry);
+               if (err) {
+                       dev_err(rvu->dev, "TX rule for PF%d failed(%d)\n",
+                               pf, err);
+                       return err;
+               }
+
+               rswitch->entry2pcifunc[entry++] = pcifunc;
+
+               rvu_get_pf_numvfs(rvu, pf, &numvfs, &hwvf);
+               for (vf = 0; vf < numvfs; vf++, hwvf++) {
+                       pcifunc = pf << 10 | ((vf + 1) & 0x3FF);
+                       rvu_get_nix_blkaddr(rvu, pcifunc);
+
+                       err = rvu_switch_install_rx_rule(rvu, pcifunc, 0x0);
+                       if (err) {
+                               dev_err(rvu->dev,
+                                       "RX rule for PF%dVF%d failed(%d)\n",
+                                       pf, vf, err);
+                               return err;
+                       }
+
+                       err = rvu_switch_install_tx_rule(rvu, pcifunc,
+                                                        start + entry);
+                       if (err) {
+                               dev_err(rvu->dev,
+                                       "TX rule for PF%dVF%d failed(%d)\n",
+                                       pf, vf, err);
+                               return err;
+                       }
+
+                       rswitch->entry2pcifunc[entry++] = pcifunc;
+               }
+       }
+
+       return 0;
+}
+
+void rvu_switch_enable(struct rvu *rvu)
+{
+       struct npc_mcam_alloc_entry_req alloc_req = { 0 };
+       struct npc_mcam_alloc_entry_rsp alloc_rsp = { 0 };
+       struct npc_delete_flow_req uninstall_req = { 0 };
+       struct npc_mcam_free_entry_req free_req = { 0 };
+       struct rvu_switch *rswitch = &rvu->rswitch;
+       struct msg_rsp rsp;
+       int ret;
+
+       alloc_req.contig = true;
+       alloc_req.count = rvu->cgx_mapped_pfs + rvu->cgx_mapped_vfs;
+       ret = rvu_mbox_handler_npc_mcam_alloc_entry(rvu, &alloc_req,
+                                                   &alloc_rsp);
+       if (ret) {
+               dev_err(rvu->dev,
+                       "Unable to allocate MCAM entries\n");
+               goto exit;
+       }
+
+       if (alloc_rsp.count != alloc_req.count) {
+               dev_err(rvu->dev,
+                       "Unable to allocate %d MCAM entries, got %d\n",
+                       alloc_req.count, alloc_rsp.count);
+               goto free_entries;
+       }
+
+       rswitch->entry2pcifunc = kcalloc(alloc_req.count, sizeof(u16),
+                                        GFP_KERNEL);
+       if (!rswitch->entry2pcifunc)
+               goto free_entries;
+
+       rswitch->used_entries = alloc_rsp.count;
+       rswitch->start_entry = alloc_rsp.entry;
+
+       ret = rvu_switch_install_rules(rvu);
+       if (ret)
+               goto uninstall_rules;
+
+       return;
+
+uninstall_rules:
+       uninstall_req.start = rswitch->start_entry;
+       uninstall_req.end =  rswitch->start_entry + rswitch->used_entries - 1;
+       rvu_mbox_handler_npc_delete_flow(rvu, &uninstall_req, &rsp);
+       kfree(rswitch->entry2pcifunc);
+free_entries:
+       free_req.all = 1;
+       rvu_mbox_handler_npc_mcam_free_entry(rvu, &free_req, &rsp);
+exit:
+       return;
+}
+
+void rvu_switch_disable(struct rvu *rvu)
+{
+       struct npc_delete_flow_req uninstall_req = { 0 };
+       struct npc_mcam_free_entry_req free_req = { 0 };
+       struct rvu_switch *rswitch = &rvu->rswitch;
+       struct rvu_hwinfo *hw = rvu->hw;
+       int pf, vf, numvfs, hwvf;
+       struct msg_rsp rsp;
+       u16 pcifunc;
+       int err;
+
+       if (!rswitch->used_entries)
+               return;
+
+       for (pf = 1; pf < hw->total_pfs; pf++) {
+               if (!is_pf_cgxmapped(rvu, pf))
+                       continue;
+
+               pcifunc = pf << 10;
+               err = rvu_switch_install_rx_rule(rvu, pcifunc, 0xFFF);
+               if (err)
+                       dev_err(rvu->dev,
+                               "Reverting RX rule for PF%d failed(%d)\n",
+                               pf, err);
+
+               for (vf = 0; vf < numvfs; vf++, hwvf++) {
+                       pcifunc = pf << 10 | ((vf + 1) & 0x3FF);
+                       err = rvu_switch_install_rx_rule(rvu, pcifunc, 0xFFF);
+                       if (err)
+                               dev_err(rvu->dev,
+                                       "Reverting RX rule for PF%dVF%d failed(%d)\n",
+                                       pf, vf, err);
+               }
+       }
+
+       uninstall_req.start = rswitch->start_entry;
+       uninstall_req.end =  rswitch->start_entry + rswitch->used_entries - 1;
+       free_req.all = 1;
+       rvu_mbox_handler_npc_delete_flow(rvu, &uninstall_req, &rsp);
+       rvu_mbox_handler_npc_mcam_free_entry(rvu, &free_req, &rsp);
+       rswitch->used_entries = 0;
+       kfree(rswitch->entry2pcifunc);
+}
+
+void rvu_switch_update_rules(struct rvu *rvu, u16 pcifunc)
+{
+       struct rvu_switch *rswitch = &rvu->rswitch;
+       u32 max = rswitch->used_entries;
+       u16 entry;
+
+       if (!rswitch->used_entries)
+               return;
+
+       for (entry = 0; entry < max; entry++) {
+               if (rswitch->entry2pcifunc[entry] == pcifunc)
+                       break;
+       }
+
+       if (entry >= max)
+               return;
+
+       rvu_switch_install_tx_rule(rvu, pcifunc, rswitch->start_entry + entry);
+       rvu_switch_install_rx_rule(rvu, pcifunc, 0x0);
+}
index ac403d43c74c07246e0bc4d7399ba8c4676ac6b5..7bdbb2d09a148226088423eaf83b7fb79335c23c 100644 (file)
@@ -3,6 +3,7 @@ config SPARX5_SWITCH
        depends on NET_SWITCHDEV
        depends on HAS_IOMEM
        depends on OF
+       depends on ARCH_SPARX5 || COMPILE_TEST
        select PHYLINK
        select PHY_SPARX5_SERDES
        select RESET_CONTROLLER
index f744557c33a3f231abd4e8e871a44068e3e11164..c7af5bc3b8afff79a45b029144d0d94f6fca9cf1 100644 (file)
@@ -5084,7 +5084,8 @@ static int r8169_mdio_register(struct rtl8169_private *tp)
        new_bus->priv = tp;
        new_bus->parent = &pdev->dev;
        new_bus->irq[0] = PHY_MAC_INTERRUPT;
-       snprintf(new_bus->id, MII_BUS_ID_SIZE, "r8169-%x", pci_dev_id(pdev));
+       snprintf(new_bus->id, MII_BUS_ID_SIZE, "r8169-%x-%x",
+                pci_domain_nr(pdev->bus), pci_dev_id(pdev));
 
        new_bus->read = r8169_mdio_read_reg;
        new_bus->write = r8169_mdio_write_reg;
index 86a1eb0634e80a09a0d963d330b77679a0a830b2..80e62ca2e3d32fb1d06e7da7f94e8bbbcfaec856 100644 (file)
@@ -864,7 +864,7 @@ enum GECMR_BIT {
 
 /* The Ethernet AVB descriptor definitions. */
 struct ravb_desc {
-       __le16 ds;              /* Descriptor size */
+       __le16 ds;      /* Descriptor size */
        u8 cc;          /* Content control MSBs (reserved) */
        u8 die_dt;      /* Descriptor interrupt enable and type */
        __le32 dptr;    /* Descriptor pointer */
index 69c50f81e1cb293bb7d9a63320480c099e2e8d9e..805397088850dc527a793b272a08a6e4f95b6bfd 100644 (file)
@@ -920,7 +920,7 @@ static int ravb_poll(struct napi_struct *napi, int budget)
        if (ravb_rx(ndev, &quota, q))
                goto out;
 
-       /* Processing RX Descriptor Ring */
+       /* Processing TX Descriptor Ring */
        spin_lock_irqsave(&priv->lock, flags);
        /* Clear TX interrupt */
        ravb_write(ndev, ~(mask | TIS_RESERVED), TIS);
index 99d4d9439d059d0809347c8021340d7e66c205f0..a6fb88fd42f71ed8972060e07c3d8e612714737a 100644 (file)
@@ -14,6 +14,8 @@
 #include <linux/kernel.h>
 #include <linux/ptp_clock_kernel.h>
 #include <linux/soc/ixp4xx/cpu.h>
+#include <linux/module.h>
+#include <mach/ixp4xx-regs.h>
 
 #include "ixp46x_ts.h"
 
index 63006838bdccd5b9b7d9029032051058be3d4519..dec96e8ab56791341181f0168c51523fc1eb17e9 100644 (file)
@@ -2495,7 +2495,7 @@ static struct hso_device *hso_create_net_device(struct usb_interface *interface,
                           hso_net_init);
        if (!net) {
                dev_err(&interface->dev, "Unable to create ethernet device\n");
-               goto exit;
+               goto err_hso_dev;
        }
 
        hso_net = netdev_priv(net);
@@ -2508,13 +2508,13 @@ static struct hso_device *hso_create_net_device(struct usb_interface *interface,
                                      USB_DIR_IN);
        if (!hso_net->in_endp) {
                dev_err(&interface->dev, "Can't find BULK IN endpoint\n");
-               goto exit;
+               goto err_net;
        }
        hso_net->out_endp = hso_get_ep(interface, USB_ENDPOINT_XFER_BULK,
                                       USB_DIR_OUT);
        if (!hso_net->out_endp) {
                dev_err(&interface->dev, "Can't find BULK OUT endpoint\n");
-               goto exit;
+               goto err_net;
        }
        SET_NETDEV_DEV(net, &interface->dev);
        SET_NETDEV_DEVTYPE(net, &hso_type);
@@ -2523,18 +2523,18 @@ static struct hso_device *hso_create_net_device(struct usb_interface *interface,
        for (i = 0; i < MUX_BULK_RX_BUF_COUNT; i++) {
                hso_net->mux_bulk_rx_urb_pool[i] = usb_alloc_urb(0, GFP_KERNEL);
                if (!hso_net->mux_bulk_rx_urb_pool[i])
-                       goto exit;
+                       goto err_mux_bulk_rx;
                hso_net->mux_bulk_rx_buf_pool[i] = kzalloc(MUX_BULK_RX_BUF_SIZE,
                                                           GFP_KERNEL);
                if (!hso_net->mux_bulk_rx_buf_pool[i])
-                       goto exit;
+                       goto err_mux_bulk_rx;
        }
        hso_net->mux_bulk_tx_urb = usb_alloc_urb(0, GFP_KERNEL);
        if (!hso_net->mux_bulk_tx_urb)
-               goto exit;
+               goto err_mux_bulk_rx;
        hso_net->mux_bulk_tx_buf = kzalloc(MUX_BULK_TX_BUF_SIZE, GFP_KERNEL);
        if (!hso_net->mux_bulk_tx_buf)
-               goto exit;
+               goto err_free_tx_urb;
 
        add_net_device(hso_dev);
 
@@ -2542,7 +2542,7 @@ static struct hso_device *hso_create_net_device(struct usb_interface *interface,
        result = register_netdev(net);
        if (result) {
                dev_err(&interface->dev, "Failed to register device\n");
-               goto exit;
+               goto err_free_tx_buf;
        }
 
        hso_log_port(hso_dev);
@@ -2550,8 +2550,21 @@ static struct hso_device *hso_create_net_device(struct usb_interface *interface,
        hso_create_rfkill(hso_dev, interface);
 
        return hso_dev;
-exit:
-       hso_free_net_device(hso_dev, true);
+
+err_free_tx_buf:
+       remove_net_device(hso_dev);
+       kfree(hso_net->mux_bulk_tx_buf);
+err_free_tx_urb:
+       usb_free_urb(hso_net->mux_bulk_tx_urb);
+err_mux_bulk_rx:
+       for (i = 0; i < MUX_BULK_RX_BUF_COUNT; i++) {
+               usb_free_urb(hso_net->mux_bulk_rx_urb_pool[i]);
+               kfree(hso_net->mux_bulk_rx_buf_pool[i]);
+       }
+err_net:
+       free_netdev(net);
+err_hso_dev:
+       kfree(hso_dev);
        return NULL;
 }
 
index 1692d3b1b6e19efd9df1f1e1c6e137747821e887..e09b107b5c9920ac8b39c39be80fe62df496f16d 100644 (file)
@@ -1552,7 +1552,8 @@ static int
 rtl8152_set_speed(struct r8152 *tp, u8 autoneg, u32 speed, u8 duplex,
                  u32 advertising);
 
-static int rtl8152_set_mac_address(struct net_device *netdev, void *p)
+static int __rtl8152_set_mac_address(struct net_device *netdev, void *p,
+                                    bool in_resume)
 {
        struct r8152 *tp = netdev_priv(netdev);
        struct sockaddr *addr = p;
@@ -1561,9 +1562,11 @@ static int rtl8152_set_mac_address(struct net_device *netdev, void *p)
        if (!is_valid_ether_addr(addr->sa_data))
                goto out1;
 
-       ret = usb_autopm_get_interface(tp->intf);
-       if (ret < 0)
-               goto out1;
+       if (!in_resume) {
+               ret = usb_autopm_get_interface(tp->intf);
+               if (ret < 0)
+                       goto out1;
+       }
 
        mutex_lock(&tp->control);
 
@@ -1575,11 +1578,17 @@ static int rtl8152_set_mac_address(struct net_device *netdev, void *p)
 
        mutex_unlock(&tp->control);
 
-       usb_autopm_put_interface(tp->intf);
+       if (!in_resume)
+               usb_autopm_put_interface(tp->intf);
 out1:
        return ret;
 }
 
+static int rtl8152_set_mac_address(struct net_device *netdev, void *p)
+{
+       return __rtl8152_set_mac_address(netdev, p, false);
+}
+
 /* Devices containing proper chips can support a persistent
  * host system provided MAC address.
  * Examples of this are Dell TB15 and Dell WD15 docks
@@ -1698,7 +1707,7 @@ static int determine_ethernet_addr(struct r8152 *tp, struct sockaddr *sa)
        return ret;
 }
 
-static int set_ethernet_addr(struct r8152 *tp)
+static int set_ethernet_addr(struct r8152 *tp, bool in_resume)
 {
        struct net_device *dev = tp->netdev;
        struct sockaddr sa;
@@ -1711,7 +1720,7 @@ static int set_ethernet_addr(struct r8152 *tp)
        if (tp->version == RTL_VER_01)
                ether_addr_copy(dev->dev_addr, sa.sa_data);
        else
-               ret = rtl8152_set_mac_address(dev, &sa);
+               ret = __rtl8152_set_mac_address(dev, &sa, in_resume);
 
        return ret;
 }
@@ -6763,9 +6772,10 @@ static int rtl8152_close(struct net_device *netdev)
                tp->rtl_ops.down(tp);
 
                mutex_unlock(&tp->control);
+       }
 
+       if (!res)
                usb_autopm_put_interface(tp->intf);
-       }
 
        free_all_mem(tp);
 
@@ -8443,7 +8453,7 @@ static int rtl8152_reset_resume(struct usb_interface *intf)
        clear_bit(SELECTIVE_SUSPEND, &tp->flags);
        tp->rtl_ops.init(tp);
        queue_delayed_work(system_long_wq, &tp->hw_phy_work, 0);
-       set_ethernet_addr(tp);
+       set_ethernet_addr(tp, true);
        return rtl8152_resume(intf);
 }
 
@@ -9644,7 +9654,7 @@ static int rtl8152_probe(struct usb_interface *intf,
        tp->rtl_fw.retry = true;
 #endif
        queue_delayed_work(system_long_wq, &tp->hw_phy_work, 0);
-       set_ethernet_addr(tp);
+       set_ethernet_addr(tp, false);
 
        usb_set_intfdata(intf, tp);
 
index 11779be4218686a353b230772bef20d28a2de510..dfd9dec0c1f6053b3d5de104d2f0de0c81cfa98a 100644 (file)
@@ -900,7 +900,10 @@ static inline blk_status_t nvme_setup_write_zeroes(struct nvme_ns *ns,
                cpu_to_le64(nvme_sect_to_lba(ns, blk_rq_pos(req)));
        cmnd->write_zeroes.length =
                cpu_to_le16((blk_rq_bytes(req) >> ns->lba_shift) - 1);
-       cmnd->write_zeroes.control = 0;
+       if (nvme_ns_has_pi(ns))
+               cmnd->write_zeroes.control = cpu_to_le16(NVME_RW_PRINFO_PRACT);
+       else
+               cmnd->write_zeroes.control = 0;
        return BLK_STS_OK;
 }
 
@@ -3807,6 +3810,8 @@ static void nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid,
 
 static void nvme_ns_remove(struct nvme_ns *ns)
 {
+       bool last_path = false;
+
        if (test_and_set_bit(NVME_NS_REMOVING, &ns->flags))
                return;
 
@@ -3815,8 +3820,6 @@ static void nvme_ns_remove(struct nvme_ns *ns)
 
        mutex_lock(&ns->ctrl->subsys->lock);
        list_del_rcu(&ns->siblings);
-       if (list_empty(&ns->head->list))
-               list_del_init(&ns->head->entry);
        mutex_unlock(&ns->ctrl->subsys->lock);
 
        synchronize_rcu(); /* guarantee not available in head->list */
@@ -3836,7 +3839,15 @@ static void nvme_ns_remove(struct nvme_ns *ns)
        list_del_init(&ns->list);
        up_write(&ns->ctrl->namespaces_rwsem);
 
-       nvme_mpath_check_last_path(ns);
+       /* Synchronize with nvme_init_ns_head() */
+       mutex_lock(&ns->head->subsys->lock);
+       if (list_empty(&ns->head->list)) {
+               list_del_init(&ns->head->entry);
+               last_path = true;
+       }
+       mutex_unlock(&ns->head->subsys->lock);
+       if (last_path)
+               nvme_mpath_shutdown_disk(ns->head);
        nvme_put_ns(ns);
 }
 
index 0ea5298469c3493e123c0b44eb6c0ede4377f44b..3f32c5e86bfcb202cf331959dcff71ec43127efe 100644 (file)
@@ -760,14 +760,21 @@ void nvme_mpath_add_disk(struct nvme_ns *ns, struct nvme_id_ns *id)
 #endif
 }
 
-void nvme_mpath_remove_disk(struct nvme_ns_head *head)
+void nvme_mpath_shutdown_disk(struct nvme_ns_head *head)
 {
        if (!head->disk)
                return;
+       kblockd_schedule_work(&head->requeue_work);
        if (head->disk->flags & GENHD_FL_UP) {
                nvme_cdev_del(&head->cdev, &head->cdev_device);
                del_gendisk(head->disk);
        }
+}
+
+void nvme_mpath_remove_disk(struct nvme_ns_head *head)
+{
+       if (!head->disk)
+               return;
        blk_set_queue_dying(head->disk->queue);
        /* make sure all pending bios are cleaned up */
        kblockd_schedule_work(&head->requeue_work);
index 18ef8dd03a90e2fd2c807befb0ef154fc1efa426..5cd1fa3b8464db37f78ecce70250b4e62a52cf45 100644 (file)
@@ -716,14 +716,7 @@ void nvme_mpath_uninit(struct nvme_ctrl *ctrl);
 void nvme_mpath_stop(struct nvme_ctrl *ctrl);
 bool nvme_mpath_clear_current_path(struct nvme_ns *ns);
 void nvme_mpath_clear_ctrl_paths(struct nvme_ctrl *ctrl);
-
-static inline void nvme_mpath_check_last_path(struct nvme_ns *ns)
-{
-       struct nvme_ns_head *head = ns->head;
-
-       if (head->disk && list_empty(&head->list))
-               kblockd_schedule_work(&head->requeue_work);
-}
+void nvme_mpath_shutdown_disk(struct nvme_ns_head *head);
 
 static inline void nvme_trace_bio_complete(struct request *req)
 {
@@ -772,7 +765,7 @@ static inline bool nvme_mpath_clear_current_path(struct nvme_ns *ns)
 static inline void nvme_mpath_clear_ctrl_paths(struct nvme_ctrl *ctrl)
 {
 }
-static inline void nvme_mpath_check_last_path(struct nvme_ns *ns)
+static inline void nvme_mpath_shutdown_disk(struct nvme_ns_head *head)
 {
 }
 static inline void nvme_trace_bio_complete(struct request *req)
index 320051f5a3ddcabce7083622588138d37486cede..51852085239ef90cd1af400a3d6ceaf197d96854 100644 (file)
@@ -2631,7 +2631,9 @@ static void nvme_reset_work(struct work_struct *work)
        bool was_suspend = !!(dev->ctrl.ctrl_config & NVME_CC_SHN_NORMAL);
        int result;
 
-       if (WARN_ON(dev->ctrl.state != NVME_CTRL_RESETTING)) {
+       if (dev->ctrl.state != NVME_CTRL_RESETTING) {
+               dev_warn(dev->ctrl.device, "ctrl state %d is not RESETTING\n",
+                        dev->ctrl.state);
                result = -ENODEV;
                goto out;
        }
index daaf700eae799612de0ba3a3836750deb55d51ef..35bac7a254227828b80bafe940fb0a2dc0d24be0 100644 (file)
@@ -56,7 +56,7 @@ TRACE_EVENT(nvme_setup_cmd,
                __field(u8, fctype)
                __field(u16, cid)
                __field(u32, nsid)
-               __field(u64, metadata)
+               __field(bool, metadata)
                __array(u8, cdw10, 24)
            ),
            TP_fast_assign(
@@ -66,13 +66,13 @@ TRACE_EVENT(nvme_setup_cmd,
                __entry->flags = cmd->common.flags;
                __entry->cid = cmd->common.command_id;
                __entry->nsid = le32_to_cpu(cmd->common.nsid);
-               __entry->metadata = le64_to_cpu(cmd->common.metadata);
+               __entry->metadata = !!blk_integrity_rq(req);
                __entry->fctype = cmd->fabrics.fctype;
                __assign_disk_name(__entry->disk, req->rq_disk);
                memcpy(__entry->cdw10, &cmd->common.cdw10,
                        sizeof(__entry->cdw10));
            ),
-           TP_printk("nvme%d: %sqid=%d, cmdid=%u, nsid=%u, flags=0x%x, meta=0x%llx, cmd=(%s %s)",
+           TP_printk("nvme%d: %sqid=%d, cmdid=%u, nsid=%u, flags=0x%x, meta=0x%x, cmd=(%s %s)",
                      __entry->ctrl_id, __print_disk_name(__entry->disk),
                      __entry->qid, __entry->cid, __entry->nsid,
                      __entry->flags, __entry->metadata,
index e16c3727db7ad9fb720b50dbd62cc0067efc849b..aa42da4d141edd350a52795a1a4b413c36b9cb42 100644 (file)
@@ -294,9 +294,9 @@ static bool check_temp_flag_mismatch(struct regulator_dev *rdev, int severity,
                                    struct bd957x_regulator_data *r)
 {
        if ((severity == REGULATOR_SEVERITY_ERR &&
-            r->ovd_notif != REGULATOR_EVENT_OVER_TEMP) ||
+            r->temp_notif != REGULATOR_EVENT_OVER_TEMP) ||
             (severity == REGULATOR_SEVERITY_WARN &&
-            r->ovd_notif != REGULATOR_EVENT_OVER_TEMP_WARN)) {
+            r->temp_notif != REGULATOR_EVENT_OVER_TEMP_WARN)) {
                dev_warn(rdev_get_dev(rdev),
                         "Can't support both thermal WARN and ERR\n");
                if (severity == REGULATOR_SEVERITY_WARN)
index bff8c515dcde7d35418586afcf2157939b4fe188..d144a4bdb76dabf15ba3f7386a3dabec59a42020 100644 (file)
@@ -366,9 +366,8 @@ static struct hi6421_regulator_info
 
 static int hi6421_regulator_enable(struct regulator_dev *rdev)
 {
-       struct hi6421_regulator_pdata *pdata;
+       struct hi6421_regulator_pdata *pdata = rdev_get_drvdata(rdev);
 
-       pdata = dev_get_drvdata(rdev->dev.parent);
        /* hi6421 spec requires regulator enablement must be serialized:
         *  - Because when BUCK, LDO switching from off to on, it will have
         *    a huge instantaneous current; so you can not turn on two or
@@ -385,9 +384,10 @@ static int hi6421_regulator_enable(struct regulator_dev *rdev)
 
 static unsigned int hi6421_regulator_ldo_get_mode(struct regulator_dev *rdev)
 {
-       struct hi6421_regulator_info *info = rdev_get_drvdata(rdev);
+       struct hi6421_regulator_info *info;
        unsigned int reg_val;
 
+       info = container_of(rdev->desc, struct hi6421_regulator_info, desc);
        regmap_read(rdev->regmap, rdev->desc->enable_reg, &reg_val);
        if (reg_val & info->mode_mask)
                return REGULATOR_MODE_IDLE;
@@ -397,9 +397,10 @@ static unsigned int hi6421_regulator_ldo_get_mode(struct regulator_dev *rdev)
 
 static unsigned int hi6421_regulator_buck_get_mode(struct regulator_dev *rdev)
 {
-       struct hi6421_regulator_info *info = rdev_get_drvdata(rdev);
+       struct hi6421_regulator_info *info;
        unsigned int reg_val;
 
+       info = container_of(rdev->desc, struct hi6421_regulator_info, desc);
        regmap_read(rdev->regmap, rdev->desc->enable_reg, &reg_val);
        if (reg_val & info->mode_mask)
                return REGULATOR_MODE_STANDBY;
@@ -410,9 +411,10 @@ static unsigned int hi6421_regulator_buck_get_mode(struct regulator_dev *rdev)
 static int hi6421_regulator_ldo_set_mode(struct regulator_dev *rdev,
                                                unsigned int mode)
 {
-       struct hi6421_regulator_info *info = rdev_get_drvdata(rdev);
+       struct hi6421_regulator_info *info;
        unsigned int new_mode;
 
+       info = container_of(rdev->desc, struct hi6421_regulator_info, desc);
        switch (mode) {
        case REGULATOR_MODE_NORMAL:
                new_mode = 0;
@@ -434,9 +436,10 @@ static int hi6421_regulator_ldo_set_mode(struct regulator_dev *rdev,
 static int hi6421_regulator_buck_set_mode(struct regulator_dev *rdev,
                                                unsigned int mode)
 {
-       struct hi6421_regulator_info *info = rdev_get_drvdata(rdev);
+       struct hi6421_regulator_info *info;
        unsigned int new_mode;
 
+       info = container_of(rdev->desc, struct hi6421_regulator_info, desc);
        switch (mode) {
        case REGULATOR_MODE_NORMAL:
                new_mode = 0;
@@ -459,7 +462,9 @@ static unsigned int
 hi6421_regulator_ldo_get_optimum_mode(struct regulator_dev *rdev,
                        int input_uV, int output_uV, int load_uA)
 {
-       struct hi6421_regulator_info *info = rdev_get_drvdata(rdev);
+       struct hi6421_regulator_info *info;
+
+       info = container_of(rdev->desc, struct hi6421_regulator_info, desc);
 
        if (load_uA > info->eco_microamp)
                return REGULATOR_MODE_NORMAL;
@@ -543,14 +548,13 @@ static int hi6421_regulator_probe(struct platform_device *pdev)
        if (!pdata)
                return -ENOMEM;
        mutex_init(&pdata->lock);
-       platform_set_drvdata(pdev, pdata);
 
        for (i = 0; i < ARRAY_SIZE(hi6421_regulator_info); i++) {
                /* assign per-regulator data */
                info = &hi6421_regulator_info[i];
 
                config.dev = pdev->dev.parent;
-               config.driver_data = info;
+               config.driver_data = pdata;
                config.regmap = pmic->regmap;
 
                rdev = devm_regulator_register(&pdev->dev, &info->desc,
index 9b162c0555c392da685fb582a78497debd53baf1..845bc3b4026dd2b4d4142102e26eb3cc3ca1c799 100644 (file)
@@ -98,10 +98,9 @@ static const unsigned int ldo34_voltages[] = {
 
 static int hi6421_spmi_regulator_enable(struct regulator_dev *rdev)
 {
-       struct hi6421_spmi_reg_priv *priv;
+       struct hi6421_spmi_reg_priv *priv = rdev_get_drvdata(rdev);
        int ret;
 
-       priv = dev_get_drvdata(rdev->dev.parent);
        /* cannot enable more than one regulator at one time */
        mutex_lock(&priv->enable_mutex);
 
@@ -119,9 +118,10 @@ static int hi6421_spmi_regulator_enable(struct regulator_dev *rdev)
 
 static unsigned int hi6421_spmi_regulator_get_mode(struct regulator_dev *rdev)
 {
-       struct hi6421_spmi_reg_info *sreg = rdev_get_drvdata(rdev);
+       struct hi6421_spmi_reg_info *sreg;
        unsigned int reg_val;
 
+       sreg = container_of(rdev->desc, struct hi6421_spmi_reg_info, desc);
        regmap_read(rdev->regmap, rdev->desc->enable_reg, &reg_val);
 
        if (reg_val & sreg->eco_mode_mask)
@@ -133,9 +133,10 @@ static unsigned int hi6421_spmi_regulator_get_mode(struct regulator_dev *rdev)
 static int hi6421_spmi_regulator_set_mode(struct regulator_dev *rdev,
                                          unsigned int mode)
 {
-       struct hi6421_spmi_reg_info *sreg = rdev_get_drvdata(rdev);
+       struct hi6421_spmi_reg_info *sreg;
        unsigned int val;
 
+       sreg = container_of(rdev->desc, struct hi6421_spmi_reg_info, desc);
        switch (mode) {
        case REGULATOR_MODE_NORMAL:
                val = 0;
@@ -159,7 +160,9 @@ hi6421_spmi_regulator_get_optimum_mode(struct regulator_dev *rdev,
                                       int input_uV, int output_uV,
                                       int load_uA)
 {
-       struct hi6421_spmi_reg_info *sreg = rdev_get_drvdata(rdev);
+       struct hi6421_spmi_reg_info *sreg;
+
+       sreg = container_of(rdev->desc, struct hi6421_spmi_reg_info, desc);
 
        if (!sreg->eco_uA || ((unsigned int)load_uA > sreg->eco_uA))
                return REGULATOR_MODE_NORMAL;
@@ -252,13 +255,12 @@ static int hi6421_spmi_regulator_probe(struct platform_device *pdev)
                return -ENOMEM;
 
        mutex_init(&priv->enable_mutex);
-       platform_set_drvdata(pdev, priv);
 
        for (i = 0; i < ARRAY_SIZE(regulator_info); i++) {
                info = &regulator_info[i];
 
                config.dev = pdev->dev.parent;
-               config.driver_data = info;
+               config.driver_data = priv;
                config.regmap = pmic->regmap;
 
                rdev = devm_regulator_register(dev, &info->desc, &config);
index d3d876198d6ece3437f1851ad66d12017643e354..234af3a66c77df8fe429bc8772a4848b6fed4e55 100644 (file)
@@ -179,8 +179,7 @@ static int dvfsrc_vcore_regulator_probe(struct platform_device *pdev)
        for (i = 0; i < regulator_init_data->size; i++) {
                config.dev = dev->parent;
                config.driver_data = (mt_regulators + i);
-               rdev = devm_regulator_register(dev->parent,
-                                              &(mt_regulators + i)->desc,
+               rdev = devm_regulator_register(dev, &(mt_regulators + i)->desc,
                                               &config);
                if (IS_ERR(rdev)) {
                        dev_err(dev, "failed to register %s\n",
index 4bca64de0f672d0f7e4ed5b722379a2182985b3d..2ee334174e2b01dac94ac60b5d37c5f4cb7b902c 100644 (file)
@@ -37,7 +37,7 @@
 #define RTMV20_WIDTH2_MASK     GENMASK(7, 0)
 #define RTMV20_LBPLVL_MASK     GENMASK(3, 0)
 #define RTMV20_LBPEN_MASK      BIT(7)
-#define RTMV20_STROBEPOL_MASK  BIT(1)
+#define RTMV20_STROBEPOL_MASK  BIT(0)
 #define RTMV20_VSYNPOL_MASK    BIT(1)
 #define RTMV20_FSINEN_MASK     BIT(7)
 #define RTMV20_ESEN_MASK       BIT(6)
index c39955239d1ca06c8a2c57a91b561370358a54a9..19b1c0cf5f2a2b35abe38c6102b35751d02c9510 100644 (file)
@@ -2983,13 +2983,13 @@ _base_check_enable_msix(struct MPT3SAS_ADAPTER *ioc)
 }
 
 /**
- * _base_free_irq - free irq
+ * mpt3sas_base_free_irq - free irq
  * @ioc: per adapter object
  *
  * Freeing respective reply_queue from the list.
  */
-static void
-_base_free_irq(struct MPT3SAS_ADAPTER *ioc)
+void
+mpt3sas_base_free_irq(struct MPT3SAS_ADAPTER *ioc)
 {
        struct adapter_reply_queue *reply_q, *next;
 
@@ -3191,12 +3191,12 @@ _base_check_and_enable_high_iops_queues(struct MPT3SAS_ADAPTER *ioc,
 }
 
 /**
- * _base_disable_msix - disables msix
+ * mpt3sas_base_disable_msix - disables msix
  * @ioc: per adapter object
  *
  */
-static void
-_base_disable_msix(struct MPT3SAS_ADAPTER *ioc)
+void
+mpt3sas_base_disable_msix(struct MPT3SAS_ADAPTER *ioc)
 {
        if (!ioc->msix_enable)
                return;
@@ -3304,8 +3304,8 @@ _base_enable_msix(struct MPT3SAS_ADAPTER *ioc)
        for (i = 0; i < ioc->reply_queue_count; i++) {
                r = _base_request_irq(ioc, i);
                if (r) {
-                       _base_free_irq(ioc);
-                       _base_disable_msix(ioc);
+                       mpt3sas_base_free_irq(ioc);
+                       mpt3sas_base_disable_msix(ioc);
                        goto try_ioapic;
                }
        }
@@ -3342,8 +3342,8 @@ mpt3sas_base_unmap_resources(struct MPT3SAS_ADAPTER *ioc)
 
        dexitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
 
-       _base_free_irq(ioc);
-       _base_disable_msix(ioc);
+       mpt3sas_base_free_irq(ioc);
+       mpt3sas_base_disable_msix(ioc);
 
        kfree(ioc->replyPostRegisterIndex);
        ioc->replyPostRegisterIndex = NULL;
@@ -7613,14 +7613,14 @@ _base_diag_reset(struct MPT3SAS_ADAPTER *ioc)
 }
 
 /**
- * _base_make_ioc_ready - put controller in READY state
+ * mpt3sas_base_make_ioc_ready - put controller in READY state
  * @ioc: per adapter object
  * @type: FORCE_BIG_HAMMER or SOFT_RESET
  *
  * Return: 0 for success, non-zero for failure.
  */
-static int
-_base_make_ioc_ready(struct MPT3SAS_ADAPTER *ioc, enum reset_type type)
+int
+mpt3sas_base_make_ioc_ready(struct MPT3SAS_ADAPTER *ioc, enum reset_type type)
 {
        u32 ioc_state;
        int rc;
@@ -7897,7 +7897,7 @@ mpt3sas_base_free_resources(struct MPT3SAS_ADAPTER *ioc)
        if (ioc->chip_phys && ioc->chip) {
                mpt3sas_base_mask_interrupts(ioc);
                ioc->shost_recovery = 1;
-               _base_make_ioc_ready(ioc, SOFT_RESET);
+               mpt3sas_base_make_ioc_ready(ioc, SOFT_RESET);
                ioc->shost_recovery = 0;
        }
 
@@ -8017,7 +8017,7 @@ mpt3sas_base_attach(struct MPT3SAS_ADAPTER *ioc)
        ioc->build_sg_mpi = &_base_build_sg;
        ioc->build_zero_len_sge_mpi = &_base_build_zero_len_sge;
 
-       r = _base_make_ioc_ready(ioc, SOFT_RESET);
+       r = mpt3sas_base_make_ioc_ready(ioc, SOFT_RESET);
        if (r)
                goto out_free_resources;
 
@@ -8471,7 +8471,7 @@ mpt3sas_base_hard_reset_handler(struct MPT3SAS_ADAPTER *ioc,
        _base_pre_reset_handler(ioc);
        mpt3sas_wait_for_commands_to_complete(ioc);
        mpt3sas_base_mask_interrupts(ioc);
-       r = _base_make_ioc_ready(ioc, type);
+       r = mpt3sas_base_make_ioc_ready(ioc, type);
        if (r)
                goto out;
        _base_clear_outstanding_commands(ioc);
index d4834c8ee9c0d361d279ead52b0da70653b09854..0c6c3df0038d52dc6b5e65c2087d7e5988151703 100644 (file)
@@ -1730,6 +1730,10 @@ do {     ioc_err(ioc, "In func: %s\n", __func__); \
        status, mpi_request, sz); } while (0)
 
 int mpt3sas_wait_for_ioc(struct MPT3SAS_ADAPTER *ioc, int wait_count);
+int
+mpt3sas_base_make_ioc_ready(struct MPT3SAS_ADAPTER *ioc, enum reset_type type);
+void mpt3sas_base_free_irq(struct MPT3SAS_ADAPTER *ioc);
+void mpt3sas_base_disable_msix(struct MPT3SAS_ADAPTER *ioc);
 
 /* scsih shared API */
 struct scsi_cmnd *mpt3sas_scsih_scsi_lookup_get(struct MPT3SAS_ADAPTER *ioc,
index 866d118f7931489ef18d263067dcea5b82e37e61..8e64a6f1454296df4e50723b8c2a356540525f72 100644 (file)
@@ -11295,7 +11295,12 @@ scsih_shutdown(struct pci_dev *pdev)
 
        _scsih_ir_shutdown(ioc);
        _scsih_nvme_shutdown(ioc);
-       mpt3sas_base_detach(ioc);
+       mpt3sas_base_mask_interrupts(ioc);
+       ioc->shost_recovery = 1;
+       mpt3sas_base_make_ioc_ready(ioc, SOFT_RESET);
+       ioc->shost_recovery = 0;
+       mpt3sas_base_free_irq(ioc);
+       mpt3sas_base_disable_msix(ioc);
 }
 
 
index b07105ae7c9172c8385598bfee7096f353b0e10f..d8b05d8b54708a76b945761f91c06c9b493f6b17 100644 (file)
@@ -439,39 +439,10 @@ static umode_t iscsi_iface_attr_is_visible(struct kobject *kobj,
        struct device *dev = container_of(kobj, struct device, kobj);
        struct iscsi_iface *iface = iscsi_dev_to_iface(dev);
        struct iscsi_transport *t = iface->transport;
-       int param;
-       int param_type;
+       int param = -1;
 
        if (attr == &dev_attr_iface_enabled.attr)
                param = ISCSI_NET_PARAM_IFACE_ENABLE;
-       else if (attr == &dev_attr_iface_vlan_id.attr)
-               param = ISCSI_NET_PARAM_VLAN_ID;
-       else if (attr == &dev_attr_iface_vlan_priority.attr)
-               param = ISCSI_NET_PARAM_VLAN_PRIORITY;
-       else if (attr == &dev_attr_iface_vlan_enabled.attr)
-               param = ISCSI_NET_PARAM_VLAN_ENABLED;
-       else if (attr == &dev_attr_iface_mtu.attr)
-               param = ISCSI_NET_PARAM_MTU;
-       else if (attr == &dev_attr_iface_port.attr)
-               param = ISCSI_NET_PARAM_PORT;
-       else if (attr == &dev_attr_iface_ipaddress_state.attr)
-               param = ISCSI_NET_PARAM_IPADDR_STATE;
-       else if (attr == &dev_attr_iface_delayed_ack_en.attr)
-               param = ISCSI_NET_PARAM_DELAYED_ACK_EN;
-       else if (attr == &dev_attr_iface_tcp_nagle_disable.attr)
-               param = ISCSI_NET_PARAM_TCP_NAGLE_DISABLE;
-       else if (attr == &dev_attr_iface_tcp_wsf_disable.attr)
-               param = ISCSI_NET_PARAM_TCP_WSF_DISABLE;
-       else if (attr == &dev_attr_iface_tcp_wsf.attr)
-               param = ISCSI_NET_PARAM_TCP_WSF;
-       else if (attr == &dev_attr_iface_tcp_timer_scale.attr)
-               param = ISCSI_NET_PARAM_TCP_TIMER_SCALE;
-       else if (attr == &dev_attr_iface_tcp_timestamp_en.attr)
-               param = ISCSI_NET_PARAM_TCP_TIMESTAMP_EN;
-       else if (attr == &dev_attr_iface_cache_id.attr)
-               param = ISCSI_NET_PARAM_CACHE_ID;
-       else if (attr == &dev_attr_iface_redirect_en.attr)
-               param = ISCSI_NET_PARAM_REDIRECT_EN;
        else if (attr == &dev_attr_iface_def_taskmgmt_tmo.attr)
                param = ISCSI_IFACE_PARAM_DEF_TASKMGMT_TMO;
        else if (attr == &dev_attr_iface_header_digest.attr)
@@ -508,6 +479,38 @@ static umode_t iscsi_iface_attr_is_visible(struct kobject *kobj,
                param = ISCSI_IFACE_PARAM_STRICT_LOGIN_COMP_EN;
        else if (attr == &dev_attr_iface_initiator_name.attr)
                param = ISCSI_IFACE_PARAM_INITIATOR_NAME;
+
+       if (param != -1)
+               return t->attr_is_visible(ISCSI_IFACE_PARAM, param);
+
+       if (attr == &dev_attr_iface_vlan_id.attr)
+               param = ISCSI_NET_PARAM_VLAN_ID;
+       else if (attr == &dev_attr_iface_vlan_priority.attr)
+               param = ISCSI_NET_PARAM_VLAN_PRIORITY;
+       else if (attr == &dev_attr_iface_vlan_enabled.attr)
+               param = ISCSI_NET_PARAM_VLAN_ENABLED;
+       else if (attr == &dev_attr_iface_mtu.attr)
+               param = ISCSI_NET_PARAM_MTU;
+       else if (attr == &dev_attr_iface_port.attr)
+               param = ISCSI_NET_PARAM_PORT;
+       else if (attr == &dev_attr_iface_ipaddress_state.attr)
+               param = ISCSI_NET_PARAM_IPADDR_STATE;
+       else if (attr == &dev_attr_iface_delayed_ack_en.attr)
+               param = ISCSI_NET_PARAM_DELAYED_ACK_EN;
+       else if (attr == &dev_attr_iface_tcp_nagle_disable.attr)
+               param = ISCSI_NET_PARAM_TCP_NAGLE_DISABLE;
+       else if (attr == &dev_attr_iface_tcp_wsf_disable.attr)
+               param = ISCSI_NET_PARAM_TCP_WSF_DISABLE;
+       else if (attr == &dev_attr_iface_tcp_wsf.attr)
+               param = ISCSI_NET_PARAM_TCP_WSF;
+       else if (attr == &dev_attr_iface_tcp_timer_scale.attr)
+               param = ISCSI_NET_PARAM_TCP_TIMER_SCALE;
+       else if (attr == &dev_attr_iface_tcp_timestamp_en.attr)
+               param = ISCSI_NET_PARAM_TCP_TIMESTAMP_EN;
+       else if (attr == &dev_attr_iface_cache_id.attr)
+               param = ISCSI_NET_PARAM_CACHE_ID;
+       else if (attr == &dev_attr_iface_redirect_en.attr)
+               param = ISCSI_NET_PARAM_REDIRECT_EN;
        else if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4) {
                if (attr == &dev_attr_ipv4_iface_ipaddress.attr)
                        param = ISCSI_NET_PARAM_IPV4_ADDR;
@@ -598,32 +601,7 @@ static umode_t iscsi_iface_attr_is_visible(struct kobject *kobj,
                return 0;
        }
 
-       switch (param) {
-       case ISCSI_IFACE_PARAM_DEF_TASKMGMT_TMO:
-       case ISCSI_IFACE_PARAM_HDRDGST_EN:
-       case ISCSI_IFACE_PARAM_DATADGST_EN:
-       case ISCSI_IFACE_PARAM_IMM_DATA_EN:
-       case ISCSI_IFACE_PARAM_INITIAL_R2T_EN:
-       case ISCSI_IFACE_PARAM_DATASEQ_INORDER_EN:
-       case ISCSI_IFACE_PARAM_PDU_INORDER_EN:
-       case ISCSI_IFACE_PARAM_ERL:
-       case ISCSI_IFACE_PARAM_MAX_RECV_DLENGTH:
-       case ISCSI_IFACE_PARAM_FIRST_BURST:
-       case ISCSI_IFACE_PARAM_MAX_R2T:
-       case ISCSI_IFACE_PARAM_MAX_BURST:
-       case ISCSI_IFACE_PARAM_CHAP_AUTH_EN:
-       case ISCSI_IFACE_PARAM_BIDI_CHAP_EN:
-       case ISCSI_IFACE_PARAM_DISCOVERY_AUTH_OPTIONAL:
-       case ISCSI_IFACE_PARAM_DISCOVERY_LOGOUT_EN:
-       case ISCSI_IFACE_PARAM_STRICT_LOGIN_COMP_EN:
-       case ISCSI_IFACE_PARAM_INITIATOR_NAME:
-               param_type = ISCSI_IFACE_PARAM;
-               break;
-       default:
-               param_type = ISCSI_NET_PARAM;
-       }
-
-       return t->attr_is_visible(param_type, param);
+       return t->attr_is_visible(ISCSI_NET_PARAM, param);
 }
 
 static struct attribute *iscsi_iface_attrs[] = {
index 2ef74885ffa2f98695edadfd898989e4e304b239..788dcdf25f003019cd76757b6435e97a94a434ff 100644 (file)
@@ -352,8 +352,6 @@ static void cs_activate(struct atmel_spi *as, struct spi_device *spi)
                }
 
                mr = spi_readl(as, MR);
-               if (spi->cs_gpiod)
-                       gpiod_set_value(spi->cs_gpiod, 1);
        } else {
                u32 cpol = (spi->mode & SPI_CPOL) ? SPI_BIT(CPOL) : 0;
                int i;
@@ -369,8 +367,6 @@ static void cs_activate(struct atmel_spi *as, struct spi_device *spi)
 
                mr = spi_readl(as, MR);
                mr = SPI_BFINS(PCS, ~(1 << chip_select), mr);
-               if (spi->cs_gpiod)
-                       gpiod_set_value(spi->cs_gpiod, 1);
                spi_writel(as, MR, mr);
        }
 
@@ -400,8 +396,6 @@ static void cs_deactivate(struct atmel_spi *as, struct spi_device *spi)
 
        if (!spi->cs_gpiod)
                spi_writel(as, CR, SPI_BIT(LASTXFER));
-       else
-               gpiod_set_value(spi->cs_gpiod, 0);
 }
 
 static void atmel_spi_lock(struct atmel_spi *as) __acquires(&as->lock)
@@ -1483,7 +1477,8 @@ static int atmel_spi_probe(struct platform_device *pdev)
        master->bus_num = pdev->id;
        master->num_chipselect = 4;
        master->setup = atmel_spi_setup;
-       master->flags = (SPI_MASTER_MUST_RX | SPI_MASTER_MUST_TX);
+       master->flags = (SPI_MASTER_MUST_RX | SPI_MASTER_MUST_TX |
+                       SPI_MASTER_GPIO_SS);
        master->transfer_one = atmel_spi_one_transfer;
        master->set_cs = atmel_spi_set_cs;
        master->cleanup = atmel_spi_cleanup;
index 5f8771fe1a31d08dded2d666c12127d510f233fc..775c0bf2f923d4818fc940e9b0eb11555862d7d4 100644 (file)
@@ -83,6 +83,7 @@ MODULE_PARM_DESC(polling_limit_us,
  * struct bcm2835_spi - BCM2835 SPI controller
  * @regs: base address of register map
  * @clk: core clock, divided to calculate serial clock
+ * @clk_hz: core clock cached speed
  * @irq: interrupt, signals TX FIFO empty or RX FIFO Â¾ full
  * @tfr: SPI transfer currently processed
  * @ctlr: SPI controller reverse lookup
@@ -116,6 +117,7 @@ MODULE_PARM_DESC(polling_limit_us,
 struct bcm2835_spi {
        void __iomem *regs;
        struct clk *clk;
+       unsigned long clk_hz;
        int irq;
        struct spi_transfer *tfr;
        struct spi_controller *ctlr;
@@ -1045,19 +1047,18 @@ static int bcm2835_spi_transfer_one(struct spi_controller *ctlr,
 {
        struct bcm2835_spi *bs = spi_controller_get_devdata(ctlr);
        struct bcm2835_spidev *slv = spi_get_ctldata(spi);
-       unsigned long spi_hz, clk_hz, cdiv;
+       unsigned long spi_hz, cdiv;
        unsigned long hz_per_byte, byte_limit;
        u32 cs = slv->prepare_cs;
 
        /* set clock */
        spi_hz = tfr->speed_hz;
-       clk_hz = clk_get_rate(bs->clk);
 
-       if (spi_hz >= clk_hz / 2) {
+       if (spi_hz >= bs->clk_hz / 2) {
                cdiv = 2; /* clk_hz/2 is the fastest we can go */
        } else if (spi_hz) {
                /* CDIV must be a multiple of two */
-               cdiv = DIV_ROUND_UP(clk_hz, spi_hz);
+               cdiv = DIV_ROUND_UP(bs->clk_hz, spi_hz);
                cdiv += (cdiv % 2);
 
                if (cdiv >= 65536)
@@ -1065,7 +1066,7 @@ static int bcm2835_spi_transfer_one(struct spi_controller *ctlr,
        } else {
                cdiv = 0; /* 0 is the slowest we can go */
        }
-       tfr->effective_speed_hz = cdiv ? (clk_hz / cdiv) : (clk_hz / 65536);
+       tfr->effective_speed_hz = cdiv ? (bs->clk_hz / cdiv) : (bs->clk_hz / 65536);
        bcm2835_wr(bs, BCM2835_SPI_CLK, cdiv);
 
        /* handle all the 3-wire mode */
@@ -1354,6 +1355,7 @@ static int bcm2835_spi_probe(struct platform_device *pdev)
                return bs->irq ? bs->irq : -ENODEV;
 
        clk_prepare_enable(bs->clk);
+       bs->clk_hz = clk_get_rate(bs->clk);
 
        err = bcm2835_dma_init(ctlr, &pdev->dev, bs);
        if (err)
index 7a00346ff9b92b61ac18c9e2021bea580ba8cbad..a2de23516553c18c7cc1e266a2d430ea94658802 100644 (file)
@@ -309,6 +309,9 @@ static unsigned int cqspi_calc_dummy(const struct spi_mem_op *op, bool dtr)
 {
        unsigned int dummy_clk;
 
+       if (!op->dummy.nbytes)
+               return 0;
+
        dummy_clk = op->dummy.nbytes * (8 / op->dummy.buswidth);
        if (dtr)
                dummy_clk /= 2;
@@ -797,19 +800,20 @@ static int cqspi_write_setup(struct cqspi_flash_pdata *f_pdata,
        reg = cqspi_calc_rdreg(f_pdata);
        writel(reg, reg_base + CQSPI_REG_RD_INSTR);
 
-       if (f_pdata->dtr) {
-               /*
-                * Some flashes like the cypress Semper flash expect a 4-byte
-                * dummy address with the Read SR command in DTR mode, but this
-                * controller does not support sending address with the Read SR
-                * command. So, disable write completion polling on the
-                * controller's side. spi-nor will take care of polling the
-                * status register.
-                */
-               reg = readl(reg_base + CQSPI_REG_WR_COMPLETION_CTRL);
-               reg |= CQSPI_REG_WR_DISABLE_AUTO_POLL;
-               writel(reg, reg_base + CQSPI_REG_WR_COMPLETION_CTRL);
-       }
+       /*
+        * SPI NAND flashes require the address of the status register to be
+        * passed in the Read SR command. Also, some SPI NOR flashes like the
+        * cypress Semper flash expect a 4-byte dummy address in the Read SR
+        * command in DTR mode.
+        *
+        * But this controller does not support address phase in the Read SR
+        * command when doing auto-HW polling. So, disable write completion
+        * polling on the controller's side. spinand and spi-nor will take
+        * care of polling the status register.
+        */
+       reg = readl(reg_base + CQSPI_REG_WR_COMPLETION_CTRL);
+       reg |= CQSPI_REG_WR_DISABLE_AUTO_POLL;
+       writel(reg, reg_base + CQSPI_REG_WR_COMPLETION_CTRL);
 
        reg = readl(reg_base + CQSPI_REG_SIZE);
        reg &= ~CQSPI_REG_SIZE_ADDRESS_MASK;
index a3afd1b9ac567bcc48c809edef5d0509ae5dd0aa..ceb16e70d235af4014f1266dfc786a8d57dfd68c 100644 (file)
@@ -517,6 +517,12 @@ static int cdns_spi_probe(struct platform_device *pdev)
                goto clk_dis_apb;
        }
 
+       pm_runtime_use_autosuspend(&pdev->dev);
+       pm_runtime_set_autosuspend_delay(&pdev->dev, SPI_AUTOSUSPEND_TIMEOUT);
+       pm_runtime_get_noresume(&pdev->dev);
+       pm_runtime_set_active(&pdev->dev);
+       pm_runtime_enable(&pdev->dev);
+
        ret = of_property_read_u32(pdev->dev.of_node, "num-cs", &num_cs);
        if (ret < 0)
                master->num_chipselect = CDNS_SPI_DEFAULT_NUM_CS;
@@ -531,11 +537,6 @@ static int cdns_spi_probe(struct platform_device *pdev)
        /* SPI controller initializations */
        cdns_spi_init_hw(xspi);
 
-       pm_runtime_set_active(&pdev->dev);
-       pm_runtime_enable(&pdev->dev);
-       pm_runtime_use_autosuspend(&pdev->dev);
-       pm_runtime_set_autosuspend_delay(&pdev->dev, SPI_AUTOSUSPEND_TIMEOUT);
-
        irq = platform_get_irq(pdev, 0);
        if (irq <= 0) {
                ret = -ENXIO;
@@ -566,6 +567,9 @@ static int cdns_spi_probe(struct platform_device *pdev)
 
        master->bits_per_word_mask = SPI_BPW_MASK(8);
 
+       pm_runtime_mark_last_busy(&pdev->dev);
+       pm_runtime_put_autosuspend(&pdev->dev);
+
        ret = spi_register_master(master);
        if (ret) {
                dev_err(&pdev->dev, "spi_register_master failed\n");
index 39dc02e366f4b4a14b670739965fbc8149630a97..4aee3db6d6df0a65cf7d88c0b0bfa7c2ba4a0b4a 100644 (file)
@@ -506,7 +506,7 @@ static int mx51_ecspi_prepare_message(struct spi_imx_data *spi_imx,
 {
        struct spi_device *spi = msg->spi;
        u32 ctrl = MX51_ECSPI_CTRL_ENABLE;
-       u32 testreg;
+       u32 testreg, delay;
        u32 cfg = readl(spi_imx->base + MX51_ECSPI_CONFIG);
 
        /* set Master or Slave mode */
@@ -567,6 +567,23 @@ static int mx51_ecspi_prepare_message(struct spi_imx_data *spi_imx,
 
        writel(cfg, spi_imx->base + MX51_ECSPI_CONFIG);
 
+       /*
+        * Wait until the changes in the configuration register CONFIGREG
+        * propagate into the hardware. It takes exactly one tick of the
+        * SCLK clock, but we will wait two SCLK clock just to be sure. The
+        * effect of the delay it takes for the hardware to apply changes
+        * is noticable if the SCLK clock run very slow. In such a case, if
+        * the polarity of SCLK should be inverted, the GPIO ChipSelect might
+        * be asserted before the SCLK polarity changes, which would disrupt
+        * the SPI communication as the device on the other end would consider
+        * the change of SCLK polarity as a clock tick already.
+        */
+       delay = (2 * 1000000) / spi_imx->spi_bus_clk;
+       if (likely(delay < 10)) /* SCLK is faster than 100 kHz */
+               udelay(delay);
+       else                    /* SCLK is _very_ slow */
+               usleep_range(delay, delay + 10);
+
        return 0;
 }
 
@@ -574,7 +591,7 @@ static int mx51_ecspi_prepare_transfer(struct spi_imx_data *spi_imx,
                                       struct spi_device *spi)
 {
        u32 ctrl = readl(spi_imx->base + MX51_ECSPI_CTRL);
-       u32 clk, delay;
+       u32 clk;
 
        /* Clear BL field and set the right value */
        ctrl &= ~MX51_ECSPI_CTRL_BL_MASK;
@@ -596,23 +613,6 @@ static int mx51_ecspi_prepare_transfer(struct spi_imx_data *spi_imx,
 
        writel(ctrl, spi_imx->base + MX51_ECSPI_CTRL);
 
-       /*
-        * Wait until the changes in the configuration register CONFIGREG
-        * propagate into the hardware. It takes exactly one tick of the
-        * SCLK clock, but we will wait two SCLK clock just to be sure. The
-        * effect of the delay it takes for the hardware to apply changes
-        * is noticable if the SCLK clock run very slow. In such a case, if
-        * the polarity of SCLK should be inverted, the GPIO ChipSelect might
-        * be asserted before the SCLK polarity changes, which would disrupt
-        * the SPI communication as the device on the other end would consider
-        * the change of SCLK polarity as a clock tick already.
-        */
-       delay = (2 * 1000000) / clk;
-       if (likely(delay < 10)) /* SCLK is faster than 100 kHz */
-               udelay(delay);
-       else                    /* SCLK is _very_ slow */
-               usleep_range(delay, delay + 10);
-
        return 0;
 }
 
index 976f73b9e2998334df86fc3c22f87bee56301f28..68dca8ceb3ad7db433094322dfc001aab0cf4df6 100644 (file)
@@ -427,13 +427,23 @@ static int mtk_spi_fifo_transfer(struct spi_master *master,
        mtk_spi_setup_packet(master);
 
        cnt = xfer->len / 4;
-       iowrite32_rep(mdata->base + SPI_TX_DATA_REG, xfer->tx_buf, cnt);
+       if (xfer->tx_buf)
+               iowrite32_rep(mdata->base + SPI_TX_DATA_REG, xfer->tx_buf, cnt);
+
+       if (xfer->rx_buf)
+               ioread32_rep(mdata->base + SPI_RX_DATA_REG, xfer->rx_buf, cnt);
 
        remainder = xfer->len % 4;
        if (remainder > 0) {
                reg_val = 0;
-               memcpy(&reg_val, xfer->tx_buf + (cnt * 4), remainder);
-               writel(reg_val, mdata->base + SPI_TX_DATA_REG);
+               if (xfer->tx_buf) {
+                       memcpy(&reg_val, xfer->tx_buf + (cnt * 4), remainder);
+                       writel(reg_val, mdata->base + SPI_TX_DATA_REG);
+               }
+               if (xfer->rx_buf) {
+                       reg_val = readl(mdata->base + SPI_RX_DATA_REG);
+                       memcpy(xfer->rx_buf + (cnt * 4), &reg_val, remainder);
+               }
        }
 
        mtk_spi_enable_transfer(master);
@@ -793,12 +803,6 @@ static int mtk_spi_probe(struct platform_device *pdev)
 
        pm_runtime_enable(&pdev->dev);
 
-       ret = devm_spi_register_master(&pdev->dev, master);
-       if (ret) {
-               dev_err(&pdev->dev, "failed to register master (%d)\n", ret);
-               goto err_disable_runtime_pm;
-       }
-
        if (mdata->dev_comp->need_pad_sel) {
                if (mdata->pad_num != master->num_chipselect) {
                        dev_err(&pdev->dev,
@@ -838,6 +842,12 @@ static int mtk_spi_probe(struct platform_device *pdev)
                dev_notice(&pdev->dev, "SPI dma_set_mask(%d) failed, ret:%d\n",
                           addr_bits, ret);
 
+       ret = devm_spi_register_master(&pdev->dev, master);
+       if (ret) {
+               dev_err(&pdev->dev, "failed to register master (%d)\n", ret);
+               goto err_disable_runtime_pm;
+       }
+
        return 0;
 
 err_disable_runtime_pm:
index 8ffcffbb81571c98bcc36f0840d18c6c786550d4..05618a618939c0114094aa0ee7340c93abcb9eef 100644 (file)
@@ -884,15 +884,18 @@ static irqreturn_t stm32h7_spi_irq_thread(int irq, void *dev_id)
        ier = readl_relaxed(spi->base + STM32H7_SPI_IER);
 
        mask = ier;
-       /* EOTIE is triggered on EOT, SUSP and TXC events. */
+       /*
+        * EOTIE enables irq from EOT, SUSP and TXC events. We need to set
+        * SUSP to acknowledge it later. TXC is automatically cleared
+        */
+
        mask |= STM32H7_SPI_SR_SUSP;
        /*
-        * When TXTF is set, DXPIE and TXPIE are cleared. So in case of
-        * Full-Duplex, need to poll RXP event to know if there are remaining
-        * data, before disabling SPI.
+        * DXPIE is set in Full-Duplex, one IT will be raised if TXP and RXP
+        * are set. So in case of Full-Duplex, need to poll TXP and RXP event.
         */
-       if (spi->rx_buf && !spi->cur_usedma)
-               mask |= STM32H7_SPI_SR_RXP;
+       if ((spi->cur_comm == SPI_FULL_DUPLEX) && !spi->cur_usedma)
+               mask |= STM32H7_SPI_SR_TXP | STM32H7_SPI_SR_RXP;
 
        if (!(sr & mask)) {
                dev_warn(spi->dev, "spurious IT (sr=0x%08x, ier=0x%08x)\n",
@@ -1925,6 +1928,7 @@ static int stm32_spi_probe(struct platform_device *pdev)
                master->can_dma = stm32_spi_can_dma;
 
        pm_runtime_set_active(&pdev->dev);
+       pm_runtime_get_noresume(&pdev->dev);
        pm_runtime_enable(&pdev->dev);
 
        ret = spi_register_master(master);
@@ -1940,6 +1944,8 @@ static int stm32_spi_probe(struct platform_device *pdev)
 
 err_pm_disable:
        pm_runtime_disable(&pdev->dev);
+       pm_runtime_put_noidle(&pdev->dev);
+       pm_runtime_set_suspended(&pdev->dev);
 err_dma_release:
        if (spi->dma_tx)
                dma_release_channel(spi->dma_tx);
@@ -1956,9 +1962,14 @@ static int stm32_spi_remove(struct platform_device *pdev)
        struct spi_master *master = platform_get_drvdata(pdev);
        struct stm32_spi *spi = spi_master_get_devdata(master);
 
+       pm_runtime_get_sync(&pdev->dev);
+
        spi_unregister_master(master);
        spi->cfg->disable(spi);
 
+       pm_runtime_disable(&pdev->dev);
+       pm_runtime_put_noidle(&pdev->dev);
+       pm_runtime_set_suspended(&pdev->dev);
        if (master->dma_tx)
                dma_release_channel(master->dma_tx);
        if (master->dma_rx)
@@ -1966,7 +1977,6 @@ static int stm32_spi_remove(struct platform_device *pdev)
 
        clk_disable_unprepare(spi->clk);
 
-       pm_runtime_disable(&pdev->dev);
 
        pinctrl_pm_select_sleep_state(&pdev->dev);
 
index b32f4ee88e79fdfc31dd202dc41390b1773836cb..ca1b2312d6e7b2b75cfa50f9763d143922e7064b 100644 (file)
@@ -25,7 +25,7 @@
 #include "target_core_alua.h"
 
 static sense_reason_t
-sbc_check_prot(struct se_device *, struct se_cmd *, unsigned char *, u32, bool);
+sbc_check_prot(struct se_device *, struct se_cmd *, unsigned char, u32, bool);
 static sense_reason_t sbc_execute_unmap(struct se_cmd *cmd);
 
 static sense_reason_t
@@ -279,14 +279,14 @@ static inline unsigned long long transport_lba_64_ext(unsigned char *cdb)
 }
 
 static sense_reason_t
-sbc_setup_write_same(struct se_cmd *cmd, unsigned char *flags, struct sbc_ops *ops)
+sbc_setup_write_same(struct se_cmd *cmd, unsigned char flags, struct sbc_ops *ops)
 {
        struct se_device *dev = cmd->se_dev;
        sector_t end_lba = dev->transport->get_blocks(dev) + 1;
        unsigned int sectors = sbc_get_write_same_sectors(cmd);
        sense_reason_t ret;
 
-       if ((flags[0] & 0x04) || (flags[0] & 0x02)) {
+       if ((flags & 0x04) || (flags & 0x02)) {
                pr_err("WRITE_SAME PBDATA and LBDATA"
                        " bits not supported for Block Discard"
                        " Emulation\n");
@@ -308,7 +308,7 @@ sbc_setup_write_same(struct se_cmd *cmd, unsigned char *flags, struct sbc_ops *o
        }
 
        /* We always have ANC_SUP == 0 so setting ANCHOR is always an error */
-       if (flags[0] & 0x10) {
+       if (flags & 0x10) {
                pr_warn("WRITE SAME with ANCHOR not supported\n");
                return TCM_INVALID_CDB_FIELD;
        }
@@ -316,7 +316,7 @@ sbc_setup_write_same(struct se_cmd *cmd, unsigned char *flags, struct sbc_ops *o
         * Special case for WRITE_SAME w/ UNMAP=1 that ends up getting
         * translated into block discard requests within backend code.
         */
-       if (flags[0] & 0x08) {
+       if (flags & 0x08) {
                if (!ops->execute_unmap)
                        return TCM_UNSUPPORTED_SCSI_OPCODE;
 
@@ -331,7 +331,7 @@ sbc_setup_write_same(struct se_cmd *cmd, unsigned char *flags, struct sbc_ops *o
        if (!ops->execute_write_same)
                return TCM_UNSUPPORTED_SCSI_OPCODE;
 
-       ret = sbc_check_prot(dev, cmd, &cmd->t_task_cdb[0], sectors, true);
+       ret = sbc_check_prot(dev, cmd, flags >> 5, sectors, true);
        if (ret)
                return ret;
 
@@ -717,10 +717,9 @@ sbc_set_prot_op_checks(u8 protect, bool fabric_prot, enum target_prot_type prot_
 }
 
 static sense_reason_t
-sbc_check_prot(struct se_device *dev, struct se_cmd *cmd, unsigned char *cdb,
+sbc_check_prot(struct se_device *dev, struct se_cmd *cmd, unsigned char protect,
               u32 sectors, bool is_write)
 {
-       u8 protect = cdb[1] >> 5;
        int sp_ops = cmd->se_sess->sup_prot_ops;
        int pi_prot_type = dev->dev_attrib.pi_prot_type;
        bool fabric_prot = false;
@@ -768,7 +767,7 @@ sbc_check_prot(struct se_device *dev, struct se_cmd *cmd, unsigned char *cdb,
                fallthrough;
        default:
                pr_err("Unable to determine pi_prot_type for CDB: 0x%02x "
-                      "PROTECT: 0x%02x\n", cdb[0], protect);
+                      "PROTECT: 0x%02x\n", cmd->t_task_cdb[0], protect);
                return TCM_INVALID_CDB_FIELD;
        }
 
@@ -843,7 +842,7 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops)
                if (sbc_check_dpofua(dev, cmd, cdb))
                        return TCM_INVALID_CDB_FIELD;
 
-               ret = sbc_check_prot(dev, cmd, cdb, sectors, false);
+               ret = sbc_check_prot(dev, cmd, cdb[1] >> 5, sectors, false);
                if (ret)
                        return ret;
 
@@ -857,7 +856,7 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops)
                if (sbc_check_dpofua(dev, cmd, cdb))
                        return TCM_INVALID_CDB_FIELD;
 
-               ret = sbc_check_prot(dev, cmd, cdb, sectors, false);
+               ret = sbc_check_prot(dev, cmd, cdb[1] >> 5, sectors, false);
                if (ret)
                        return ret;
 
@@ -871,7 +870,7 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops)
                if (sbc_check_dpofua(dev, cmd, cdb))
                        return TCM_INVALID_CDB_FIELD;
 
-               ret = sbc_check_prot(dev, cmd, cdb, sectors, false);
+               ret = sbc_check_prot(dev, cmd, cdb[1] >> 5, sectors, false);
                if (ret)
                        return ret;
 
@@ -892,7 +891,7 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops)
                if (sbc_check_dpofua(dev, cmd, cdb))
                        return TCM_INVALID_CDB_FIELD;
 
-               ret = sbc_check_prot(dev, cmd, cdb, sectors, true);
+               ret = sbc_check_prot(dev, cmd, cdb[1] >> 5, sectors, true);
                if (ret)
                        return ret;
 
@@ -906,7 +905,7 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops)
                if (sbc_check_dpofua(dev, cmd, cdb))
                        return TCM_INVALID_CDB_FIELD;
 
-               ret = sbc_check_prot(dev, cmd, cdb, sectors, true);
+               ret = sbc_check_prot(dev, cmd, cdb[1] >> 5, sectors, true);
                if (ret)
                        return ret;
 
@@ -921,7 +920,7 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops)
                if (sbc_check_dpofua(dev, cmd, cdb))
                        return TCM_INVALID_CDB_FIELD;
 
-               ret = sbc_check_prot(dev, cmd, cdb, sectors, true);
+               ret = sbc_check_prot(dev, cmd, cdb[1] >> 5, sectors, true);
                if (ret)
                        return ret;
 
@@ -980,7 +979,7 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops)
                        size = sbc_get_size(cmd, 1);
                        cmd->t_task_lba = get_unaligned_be64(&cdb[12]);
 
-                       ret = sbc_setup_write_same(cmd, &cdb[10], ops);
+                       ret = sbc_setup_write_same(cmd, cdb[10], ops);
                        if (ret)
                                return ret;
                        break;
@@ -1079,7 +1078,7 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops)
                size = sbc_get_size(cmd, 1);
                cmd->t_task_lba = get_unaligned_be64(&cdb[2]);
 
-               ret = sbc_setup_write_same(cmd, &cdb[1], ops);
+               ret = sbc_setup_write_same(cmd, cdb[1], ops);
                if (ret)
                        return ret;
                break;
@@ -1097,7 +1096,7 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops)
                 * Follow sbcr26 with WRITE_SAME (10) and check for the existence
                 * of byte 1 bit 3 UNMAP instead of original reserved field
                 */
-               ret = sbc_setup_write_same(cmd, &cdb[1], ops);
+               ret = sbc_setup_write_same(cmd, cdb[1], ops);
                if (ret)
                        return ret;
                break;
index 7e35eddd9eb700f36d52db375dd17b0d35ecee83..26ceabe34de554b1cc64af907efb2adc87a8fcd0 100644 (file)
@@ -886,7 +886,7 @@ void target_complete_cmd(struct se_cmd *cmd, u8 scsi_status)
        INIT_WORK(&cmd->work, success ? target_complete_ok_work :
                  target_complete_failure_work);
 
-       if (wwn->cmd_compl_affinity == SE_COMPL_AFFINITY_CPUID)
+       if (!wwn || wwn->cmd_compl_affinity == SE_COMPL_AFFINITY_CPUID)
                cpu = cmd->cpuid;
        else
                cpu = wwn->cmd_compl_affinity;
index fdf79bcf7eb09e50ae223d60df3348727375fa7e..35d5908b5478aad688d6ddae66b72c8d3377ab4d 100644 (file)
@@ -824,7 +824,7 @@ static struct usb_class_driver wdm_class = {
 };
 
 /* --- WWAN framework integration --- */
-#ifdef CONFIG_WWAN
+#ifdef CONFIG_WWAN_CORE
 static int wdm_wwan_port_start(struct wwan_port *port)
 {
        struct wdm_device *desc = wwan_port_get_drvdata(port);
@@ -963,11 +963,11 @@ static void wdm_wwan_rx(struct wdm_device *desc, int length)
        /* inbuf has been copied, it is safe to check for outstanding data */
        schedule_work(&desc->service_outs_intr);
 }
-#else /* CONFIG_WWAN */
+#else /* CONFIG_WWAN_CORE */
 static void wdm_wwan_init(struct wdm_device *desc) {}
 static void wdm_wwan_deinit(struct wdm_device *desc) {}
 static void wdm_wwan_rx(struct wdm_device *desc, int length) {}
-#endif /* CONFIG_WWAN */
+#endif /* CONFIG_WWAN_CORE */
 
 /* --- error handling --- */
 static void wdm_rxwork(struct work_struct *work)
index b97464498763947d92bd28c4757c1536a05f7e63..9618ba622a2d06c3d4bd59e7c9dd6edd3429aafd 100644 (file)
@@ -1133,7 +1133,7 @@ static int do_proc_control(struct usb_dev_state *ps,
                "wIndex=%04x wLength=%04x\n",
                ctrl->bRequestType, ctrl->bRequest, ctrl->wValue,
                ctrl->wIndex, ctrl->wLength);
-       if (ctrl->bRequestType & 0x80) {
+       if ((ctrl->bRequestType & USB_DIR_IN) && ctrl->wLength) {
                pipe = usb_rcvctrlpipe(dev, 0);
                snoop_urb(dev, NULL, pipe, ctrl->wLength, tmo, SUBMIT, NULL, 0);
 
index d1efc714133376f3be1efe1f4cfe774ecf01bd5f..86658a81d28445181198ea9f2876a8a2b44fdee9 100644 (file)
@@ -48,6 +48,7 @@
 
 #define USB_TP_TRANSMISSION_DELAY      40      /* ns */
 #define USB_TP_TRANSMISSION_DELAY_MAX  65535   /* ns */
+#define USB_PING_RESPONSE_TIME         400     /* ns */
 
 /* Protect struct usb_device->state and ->children members
  * Note: Both are also protected by ->dev.sem, except that ->state can
@@ -182,8 +183,9 @@ int usb_device_supports_lpm(struct usb_device *udev)
 }
 
 /*
- * Set the Maximum Exit Latency (MEL) for the host to initiate a transition from
- * either U1 or U2.
+ * Set the Maximum Exit Latency (MEL) for the host to wakup up the path from
+ * U1/U2, send a PING to the device and receive a PING_RESPONSE.
+ * See USB 3.1 section C.1.5.2
  */
 static void usb_set_lpm_mel(struct usb_device *udev,
                struct usb3_lpm_parameters *udev_lpm_params,
@@ -193,35 +195,37 @@ static void usb_set_lpm_mel(struct usb_device *udev,
                unsigned int hub_exit_latency)
 {
        unsigned int total_mel;
-       unsigned int device_mel;
-       unsigned int hub_mel;
 
        /*
-        * Calculate the time it takes to transition all links from the roothub
-        * to the parent hub into U0.  The parent hub must then decode the
-        * packet (hub header decode latency) to figure out which port it was
-        * bound for.
-        *
-        * The Hub Header decode latency is expressed in 0.1us intervals (0x1
-        * means 0.1us).  Multiply that by 100 to get nanoseconds.
+        * tMEL1. time to transition path from host to device into U0.
+        * MEL for parent already contains the delay up to parent, so only add
+        * the exit latency for the last link (pick the slower exit latency),
+        * and the hub header decode latency. See USB 3.1 section C 2.2.1
+        * Store MEL in nanoseconds
         */
        total_mel = hub_lpm_params->mel +
-               (hub->descriptor->u.ss.bHubHdrDecLat * 100);
+               max(udev_exit_latency, hub_exit_latency) * 1000 +
+               hub->descriptor->u.ss.bHubHdrDecLat * 100;
 
        /*
-        * How long will it take to transition the downstream hub's port into
-        * U0?  The greater of either the hub exit latency or the device exit
-        * latency.
-        *
-        * The BOS U1/U2 exit latencies are expressed in 1us intervals.
-        * Multiply that by 1000 to get nanoseconds.
+        * tMEL2. Time to submit PING packet. Sum of tTPTransmissionDelay for
+        * each link + wHubDelay for each hub. Add only for last link.
+        * tMEL4, the time for PING_RESPONSE to traverse upstream is similar.
+        * Multiply by 2 to include it as well.
         */
-       device_mel = udev_exit_latency * 1000;
-       hub_mel = hub_exit_latency * 1000;
-       if (device_mel > hub_mel)
-               total_mel += device_mel;
-       else
-               total_mel += hub_mel;
+       total_mel += (__le16_to_cpu(hub->descriptor->u.ss.wHubDelay) +
+                     USB_TP_TRANSMISSION_DELAY) * 2;
+
+       /*
+        * tMEL3, tPingResponse. Time taken by device to generate PING_RESPONSE
+        * after receiving PING. Also add 2100ns as stated in USB 3.1 C 1.5.2.4
+        * to cover the delay if the PING_RESPONSE is queued behind a Max Packet
+        * Size DP.
+        * Note these delays should be added only once for the entire path, so
+        * add them to the MEL of the device connected to the roothub.
+        */
+       if (!hub->hdev->parent)
+               total_mel += USB_PING_RESPONSE_TIME + 2100;
 
        udev_lpm_params->mel = total_mel;
 }
@@ -4112,6 +4116,47 @@ static int usb_set_lpm_timeout(struct usb_device *udev,
        return 0;
 }
 
+/*
+ * Don't allow device intiated U1/U2 if the system exit latency + one bus
+ * interval is greater than the minimum service interval of any active
+ * periodic endpoint. See USB 3.2 section 9.4.9
+ */
+static bool usb_device_may_initiate_lpm(struct usb_device *udev,
+                                       enum usb3_link_state state)
+{
+       unsigned int sel;               /* us */
+       int i, j;
+
+       if (state == USB3_LPM_U1)
+               sel = DIV_ROUND_UP(udev->u1_params.sel, 1000);
+       else if (state == USB3_LPM_U2)
+               sel = DIV_ROUND_UP(udev->u2_params.sel, 1000);
+       else
+               return false;
+
+       for (i = 0; i < udev->actconfig->desc.bNumInterfaces; i++) {
+               struct usb_interface *intf;
+               struct usb_endpoint_descriptor *desc;
+               unsigned int interval;
+
+               intf = udev->actconfig->interface[i];
+               if (!intf)
+                       continue;
+
+               for (j = 0; j < intf->cur_altsetting->desc.bNumEndpoints; j++) {
+                       desc = &intf->cur_altsetting->endpoint[j].desc;
+
+                       if (usb_endpoint_xfer_int(desc) ||
+                           usb_endpoint_xfer_isoc(desc)) {
+                               interval = (1 << (desc->bInterval - 1)) * 125;
+                               if (sel + 125 > interval)
+                                       return false;
+                       }
+               }
+       }
+       return true;
+}
+
 /*
  * Enable the hub-initiated U1/U2 idle timeouts, and enable device-initiated
  * U1/U2 entry.
@@ -4184,20 +4229,23 @@ static void usb_enable_link_state(struct usb_hcd *hcd, struct usb_device *udev,
         * U1/U2_ENABLE
         */
        if (udev->actconfig &&
-           usb_set_device_initiated_lpm(udev, state, true) == 0) {
-               if (state == USB3_LPM_U1)
-                       udev->usb3_lpm_u1_enabled = 1;
-               else if (state == USB3_LPM_U2)
-                       udev->usb3_lpm_u2_enabled = 1;
-       } else {
-               /* Don't request U1/U2 entry if the device
-                * cannot transition to U1/U2.
-                */
-               usb_set_lpm_timeout(udev, state, 0);
-               hcd->driver->disable_usb3_lpm_timeout(hcd, udev, state);
+           usb_device_may_initiate_lpm(udev, state)) {
+               if (usb_set_device_initiated_lpm(udev, state, true)) {
+                       /*
+                        * Request to enable device initiated U1/U2 failed,
+                        * better to turn off lpm in this case.
+                        */
+                       usb_set_lpm_timeout(udev, state, 0);
+                       hcd->driver->disable_usb3_lpm_timeout(hcd, udev, state);
+                       return;
+               }
        }
-}
 
+       if (state == USB3_LPM_U1)
+               udev->usb3_lpm_u1_enabled = 1;
+       else if (state == USB3_LPM_U2)
+               udev->usb3_lpm_u2_enabled = 1;
+}
 /*
  * Disable the hub-initiated U1/U2 idle timeouts, and disable device-initiated
  * U1/U2 entry.
index 6114cf83bb447956c830c7f01fb6a45f9a319eba..8239fe7129dd7a3b241813e215c04dd69df20d36 100644 (file)
@@ -501,10 +501,6 @@ static const struct usb_device_id usb_quirk_list[] = {
        /* DJI CineSSD */
        { USB_DEVICE(0x2ca3, 0x0031), .driver_info = USB_QUIRK_NO_LPM },
 
-       /* Fibocom L850-GL LTE Modem */
-       { USB_DEVICE(0x2cb7, 0x0007), .driver_info =
-                       USB_QUIRK_IGNORE_REMOTE_WAKEUP },
-
        /* INTEL VALUE SSD */
        { USB_DEVICE(0x8086, 0xf1a5), .driver_info = USB_QUIRK_RESET_RESUME },
 
index ab6b815e0089ca1196fb6bf6911387154fc24bf1..483de2bbfaabe3ca1843cb3872afbb3f34a67b73 100644 (file)
@@ -383,6 +383,9 @@ enum dwc2_ep0_state {
  *                     0 - No (default)
  *                     1 - Partial power down
  *                     2 - Hibernation
+ * @no_clock_gating:   Specifies whether to avoid clock gating feature.
+ *                     0 - No (use clock gating)
+ *                     1 - Yes (avoid it)
  * @lpm:               Enable LPM support.
  *                     0 - No
  *                     1 - Yes
@@ -480,6 +483,7 @@ struct dwc2_core_params {
 #define DWC2_POWER_DOWN_PARAM_NONE             0
 #define DWC2_POWER_DOWN_PARAM_PARTIAL          1
 #define DWC2_POWER_DOWN_PARAM_HIBERNATION      2
+       bool no_clock_gating;
 
        bool lpm;
        bool lpm_clock_gating;
index a5ab03808da69fec436b2a1c1fa74d459a334295..a5c52b237e723753e9574dfe7236d0a445cf06cb 100644 (file)
@@ -556,7 +556,8 @@ static void dwc2_handle_usb_suspend_intr(struct dwc2_hsotg *hsotg)
                                 * If neither hibernation nor partial power down are supported,
                                 * clock gating is used to save power.
                                 */
-                               dwc2_gadget_enter_clock_gating(hsotg);
+                               if (!hsotg->params.no_clock_gating)
+                                       dwc2_gadget_enter_clock_gating(hsotg);
                        }
 
                        /*
index c581ee41ac81b9a2d7fbb870c005556583dcf959..3146df6e6510d2d3f7cf5128285f38b0cc475427 100644 (file)
@@ -2749,12 +2749,14 @@ static void dwc2_hsotg_complete_in(struct dwc2_hsotg *hsotg,
                return;
        }
 
-       /* Zlp for all endpoints, for ep0 only in DATA IN stage */
+       /* Zlp for all endpoints in non DDMA, for ep0 only in DATA IN stage */
        if (hs_ep->send_zlp) {
-               dwc2_hsotg_program_zlp(hsotg, hs_ep);
                hs_ep->send_zlp = 0;
-               /* transfer will be completed on next complete interrupt */
-               return;
+               if (!using_desc_dma(hsotg)) {
+                       dwc2_hsotg_program_zlp(hsotg, hs_ep);
+                       /* transfer will be completed on next complete interrupt */
+                       return;
+               }
        }
 
        if (hs_ep->index == 0 && hsotg->ep0_state == DWC2_EP0_DATA_IN) {
@@ -3900,9 +3902,27 @@ static void dwc2_hsotg_ep_stop_xfr(struct dwc2_hsotg *hsotg,
                                         __func__);
                }
        } else {
+               /* Mask GINTSTS_GOUTNAKEFF interrupt */
+               dwc2_hsotg_disable_gsint(hsotg, GINTSTS_GOUTNAKEFF);
+
                if (!(dwc2_readl(hsotg, GINTSTS) & GINTSTS_GOUTNAKEFF))
                        dwc2_set_bit(hsotg, DCTL, DCTL_SGOUTNAK);
 
+               if (!using_dma(hsotg)) {
+                       /* Wait for GINTSTS_RXFLVL interrupt */
+                       if (dwc2_hsotg_wait_bit_set(hsotg, GINTSTS,
+                                                   GINTSTS_RXFLVL, 100)) {
+                               dev_warn(hsotg->dev, "%s: timeout GINTSTS.RXFLVL\n",
+                                        __func__);
+                       } else {
+                               /*
+                                * Pop GLOBAL OUT NAK status packet from RxFIFO
+                                * to assert GOUTNAKEFF interrupt
+                                */
+                               dwc2_readl(hsotg, GRXSTSP);
+                       }
+               }
+
                /* Wait for global nak to take effect */
                if (dwc2_hsotg_wait_bit_set(hsotg, GINTSTS,
                                            GINTSTS_GOUTNAKEFF, 100))
@@ -4348,6 +4368,9 @@ static int dwc2_hsotg_ep_sethalt(struct usb_ep *ep, int value, bool now)
                epctl = dwc2_readl(hs, epreg);
 
                if (value) {
+                       /* Unmask GOUTNAKEFF interrupt */
+                       dwc2_hsotg_en_gsint(hs, GINTSTS_GOUTNAKEFF);
+
                        if (!(dwc2_readl(hs, GINTSTS) & GINTSTS_GOUTNAKEFF))
                                dwc2_set_bit(hs, DCTL, DCTL_SGOUTNAK);
                        // STALL bit will be set in GOUTNAKEFF interrupt handler
index 035d4911a3c324d81d742dc91c775444fde42164..2a7828971d0564743e7caa933ad485c8eeb5b299 100644 (file)
@@ -3338,7 +3338,8 @@ int dwc2_port_suspend(struct dwc2_hsotg *hsotg, u16 windex)
                 * If not hibernation nor partial power down are supported,
                 * clock gating is used to save power.
                 */
-               dwc2_host_enter_clock_gating(hsotg);
+               if (!hsotg->params.no_clock_gating)
+                       dwc2_host_enter_clock_gating(hsotg);
                break;
        }
 
@@ -4402,7 +4403,8 @@ static int _dwc2_hcd_suspend(struct usb_hcd *hcd)
                 * If not hibernation nor partial power down are supported,
                 * clock gating is used to save power.
                 */
-               dwc2_host_enter_clock_gating(hsotg);
+               if (!hsotg->params.no_clock_gating)
+                       dwc2_host_enter_clock_gating(hsotg);
 
                /* After entering suspend, hardware is not accessible */
                clear_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
index 67c5eb1402325a219e5114923eed480c155cd4ee..59e119345994756166a4a4d4ef44bfd0c6302d03 100644 (file)
@@ -76,6 +76,7 @@ static void dwc2_set_s3c6400_params(struct dwc2_hsotg *hsotg)
        struct dwc2_core_params *p = &hsotg->params;
 
        p->power_down = DWC2_POWER_DOWN_PARAM_NONE;
+       p->no_clock_gating = true;
        p->phy_utmi_width = 8;
 }
 
index dccdf13b5f9e2b7f37badcfc8aeff60346d87f53..5991766239ba8b6d2a1a3b14e669bc1ed8c819de 100644 (file)
@@ -1279,6 +1279,7 @@ struct dwc3 {
        unsigned                dis_metastability_quirk:1;
 
        unsigned                dis_split_quirk:1;
+       unsigned                async_callbacks:1;
 
        u16                     imod_interval;
 };
index 3cd294264372555ac10318e558dbdbf68c15438f..2f9e45eed228063ab3ebd62dba83a629c353cf99 100644 (file)
@@ -597,11 +597,13 @@ static int dwc3_ep0_set_address(struct dwc3 *dwc, struct usb_ctrlrequest *ctrl)
 
 static int dwc3_ep0_delegate_req(struct dwc3 *dwc, struct usb_ctrlrequest *ctrl)
 {
-       int ret;
+       int ret = -EINVAL;
 
-       spin_unlock(&dwc->lock);
-       ret = dwc->gadget_driver->setup(dwc->gadget, ctrl);
-       spin_lock(&dwc->lock);
+       if (dwc->async_callbacks) {
+               spin_unlock(&dwc->lock);
+               ret = dwc->gadget_driver->setup(dwc->gadget, ctrl);
+               spin_lock(&dwc->lock);
+       }
        return ret;
 }
 
index af6d7f157989daa5bb596926cbf712b3f15f1c66..45f2bc0807e8ce40fa47467d23c8e91ecca65fae 100644 (file)
@@ -2585,6 +2585,16 @@ static int dwc3_gadget_vbus_draw(struct usb_gadget *g, unsigned int mA)
        return ret;
 }
 
+static void dwc3_gadget_async_callbacks(struct usb_gadget *g, bool enable)
+{
+       struct dwc3             *dwc = gadget_to_dwc(g);
+       unsigned long           flags;
+
+       spin_lock_irqsave(&dwc->lock, flags);
+       dwc->async_callbacks = enable;
+       spin_unlock_irqrestore(&dwc->lock, flags);
+}
+
 static const struct usb_gadget_ops dwc3_gadget_ops = {
        .get_frame              = dwc3_gadget_get_frame,
        .wakeup                 = dwc3_gadget_wakeup,
@@ -2596,6 +2606,7 @@ static const struct usb_gadget_ops dwc3_gadget_ops = {
        .udc_set_ssp_rate       = dwc3_gadget_set_ssp_rate,
        .get_config_params      = dwc3_gadget_config_params,
        .vbus_draw              = dwc3_gadget_vbus_draw,
+       .udc_async_callbacks    = dwc3_gadget_async_callbacks,
 };
 
 /* -------------------------------------------------------------------------- */
@@ -3231,7 +3242,7 @@ static void dwc3_endpoint_interrupt(struct dwc3 *dwc,
 
 static void dwc3_disconnect_gadget(struct dwc3 *dwc)
 {
-       if (dwc->gadget_driver && dwc->gadget_driver->disconnect) {
+       if (dwc->async_callbacks && dwc->gadget_driver->disconnect) {
                spin_unlock(&dwc->lock);
                dwc->gadget_driver->disconnect(dwc->gadget);
                spin_lock(&dwc->lock);
@@ -3240,7 +3251,7 @@ static void dwc3_disconnect_gadget(struct dwc3 *dwc)
 
 static void dwc3_suspend_gadget(struct dwc3 *dwc)
 {
-       if (dwc->gadget_driver && dwc->gadget_driver->suspend) {
+       if (dwc->async_callbacks && dwc->gadget_driver->suspend) {
                spin_unlock(&dwc->lock);
                dwc->gadget_driver->suspend(dwc->gadget);
                spin_lock(&dwc->lock);
@@ -3249,7 +3260,7 @@ static void dwc3_suspend_gadget(struct dwc3 *dwc)
 
 static void dwc3_resume_gadget(struct dwc3 *dwc)
 {
-       if (dwc->gadget_driver && dwc->gadget_driver->resume) {
+       if (dwc->async_callbacks && dwc->gadget_driver->resume) {
                spin_unlock(&dwc->lock);
                dwc->gadget_driver->resume(dwc->gadget);
                spin_lock(&dwc->lock);
@@ -3261,7 +3272,7 @@ static void dwc3_reset_gadget(struct dwc3 *dwc)
        if (!dwc->gadget_driver)
                return;
 
-       if (dwc->gadget->speed != USB_SPEED_UNKNOWN) {
+       if (dwc->async_callbacks && dwc->gadget->speed != USB_SPEED_UNKNOWN) {
                spin_unlock(&dwc->lock);
                usb_gadget_udc_reset(dwc->gadget, dwc->gadget_driver);
                spin_lock(&dwc->lock);
@@ -3585,7 +3596,7 @@ static void dwc3_gadget_wakeup_interrupt(struct dwc3 *dwc)
         * implemented.
         */
 
-       if (dwc->gadget_driver && dwc->gadget_driver->resume) {
+       if (dwc->async_callbacks && dwc->gadget_driver->resume) {
                spin_unlock(&dwc->lock);
                dwc->gadget_driver->resume(dwc->gadget);
                spin_lock(&dwc->lock);
index bffef8e47daca81d680bd269f459bd8d1b0d5a93..281ca766698af21c0b191197894b4ee66ac4f738 100644 (file)
@@ -1198,7 +1198,7 @@ void gserial_free_line(unsigned char port_num)
        struct gs_port  *port;
 
        mutex_lock(&ports[port_num].lock);
-       if (WARN_ON(!ports[port_num].port)) {
+       if (!ports[port_num].port) {
                mutex_unlock(&ports[port_num].lock);
                return;
        }
index a54d1cef17db8bdab647d3bb329410b3e6ee89a8..c0ca7144e5128faf64a2ff699a42c870433274c0 100644 (file)
@@ -3853,6 +3853,7 @@ static int tegra_xudc_probe(struct platform_device *pdev)
        return 0;
 
 free_eps:
+       pm_runtime_disable(&pdev->dev);
        tegra_xudc_free_eps(xudc);
 free_event_ring:
        tegra_xudc_free_event_ring(xudc);
index 36f5bf6a07523164104a21abe3999e5e4c2d7d59..10b0365f34399ae6f924dec7a7fd6f3e48dfd512 100644 (file)
@@ -703,24 +703,28 @@ EXPORT_SYMBOL_GPL(ehci_setup);
 static irqreturn_t ehci_irq (struct usb_hcd *hcd)
 {
        struct ehci_hcd         *ehci = hcd_to_ehci (hcd);
-       u32                     status, masked_status, pcd_status = 0, cmd;
+       u32                     status, current_status, masked_status, pcd_status = 0;
+       u32                     cmd;
        int                     bh;
 
        spin_lock(&ehci->lock);
 
-       status = ehci_readl(ehci, &ehci->regs->status);
+       status = 0;
+       current_status = ehci_readl(ehci, &ehci->regs->status);
+restart:
 
        /* e.g. cardbus physical eject */
-       if (status == ~(u32) 0) {
+       if (current_status == ~(u32) 0) {
                ehci_dbg (ehci, "device removed\n");
                goto dead;
        }
+       status |= current_status;
 
        /*
         * We don't use STS_FLR, but some controllers don't like it to
         * remain on, so mask it out along with the other status bits.
         */
-       masked_status = status & (INTR_MASK | STS_FLR);
+       masked_status = current_status & (INTR_MASK | STS_FLR);
 
        /* Shared IRQ? */
        if (!masked_status || unlikely(ehci->rh_state == EHCI_RH_HALTED)) {
@@ -730,6 +734,12 @@ static irqreturn_t ehci_irq (struct usb_hcd *hcd)
 
        /* clear (just) interrupts */
        ehci_writel(ehci, masked_status, &ehci->regs->status);
+
+       /* For edge interrupts, don't race with an interrupt bit being raised */
+       current_status = ehci_readl(ehci, &ehci->regs->status);
+       if (current_status & INTR_MASK)
+               goto restart;
+
        cmd = ehci_readl(ehci, &ehci->regs->command);
        bh = 0;
 
index e7a8e06098535ced93dae0da4deba4ddc6d63016..59cc1bc7f12f555ea044f7b4aa427d3eccc6aadd 100644 (file)
@@ -153,8 +153,6 @@ struct max3421_hcd {
         */
        struct urb *curr_urb;
        enum scheduling_pass sched_pass;
-       struct usb_device *loaded_dev;  /* dev that's loaded into the chip */
-       int loaded_epnum;               /* epnum whose toggles are loaded */
        int urb_done;                   /* > 0 -> no errors, < 0: errno */
        size_t curr_len;
        u8 hien;
@@ -492,39 +490,17 @@ max3421_set_speed(struct usb_hcd *hcd, struct usb_device *dev)
  * Caller must NOT hold HCD spinlock.
  */
 static void
-max3421_set_address(struct usb_hcd *hcd, struct usb_device *dev, int epnum,
-                   int force_toggles)
+max3421_set_address(struct usb_hcd *hcd, struct usb_device *dev, int epnum)
 {
-       struct max3421_hcd *max3421_hcd = hcd_to_max3421(hcd);
-       int old_epnum, same_ep, rcvtog, sndtog;
-       struct usb_device *old_dev;
+       int rcvtog, sndtog;
        u8 hctl;
 
-       old_dev = max3421_hcd->loaded_dev;
-       old_epnum = max3421_hcd->loaded_epnum;
-
-       same_ep = (dev == old_dev && epnum == old_epnum);
-       if (same_ep && !force_toggles)
-               return;
-
-       if (old_dev && !same_ep) {
-               /* save the old end-points toggles: */
-               u8 hrsl = spi_rd8(hcd, MAX3421_REG_HRSL);
-
-               rcvtog = (hrsl >> MAX3421_HRSL_RCVTOGRD_BIT) & 1;
-               sndtog = (hrsl >> MAX3421_HRSL_SNDTOGRD_BIT) & 1;
-
-               /* no locking: HCD (i.e., we) own toggles, don't we? */
-               usb_settoggle(old_dev, old_epnum, 0, rcvtog);
-               usb_settoggle(old_dev, old_epnum, 1, sndtog);
-       }
        /* setup new endpoint's toggle bits: */
        rcvtog = usb_gettoggle(dev, epnum, 0);
        sndtog = usb_gettoggle(dev, epnum, 1);
        hctl = (BIT(rcvtog + MAX3421_HCTL_RCVTOG0_BIT) |
                BIT(sndtog + MAX3421_HCTL_SNDTOG0_BIT));
 
-       max3421_hcd->loaded_epnum = epnum;
        spi_wr8(hcd, MAX3421_REG_HCTL, hctl);
 
        /*
@@ -532,7 +508,6 @@ max3421_set_address(struct usb_hcd *hcd, struct usb_device *dev, int epnum,
         * address-assignment so it's best to just always load the
         * address whenever the end-point changed/was forced.
         */
-       max3421_hcd->loaded_dev = dev;
        spi_wr8(hcd, MAX3421_REG_PERADDR, dev->devnum);
 }
 
@@ -667,7 +642,7 @@ max3421_select_and_start_urb(struct usb_hcd *hcd)
        struct max3421_hcd *max3421_hcd = hcd_to_max3421(hcd);
        struct urb *urb, *curr_urb = NULL;
        struct max3421_ep *max3421_ep;
-       int epnum, force_toggles = 0;
+       int epnum;
        struct usb_host_endpoint *ep;
        struct list_head *pos;
        unsigned long flags;
@@ -777,7 +752,6 @@ done:
                        usb_settoggle(urb->dev, epnum, 0, 1);
                        usb_settoggle(urb->dev, epnum, 1, 1);
                        max3421_ep->pkt_state = PKT_STATE_SETUP;
-                       force_toggles = 1;
                } else
                        max3421_ep->pkt_state = PKT_STATE_TRANSFER;
        }
@@ -785,7 +759,7 @@ done:
        spin_unlock_irqrestore(&max3421_hcd->lock, flags);
 
        max3421_ep->last_active = max3421_hcd->frame_number;
-       max3421_set_address(hcd, urb->dev, epnum, force_toggles);
+       max3421_set_address(hcd, urb->dev, epnum);
        max3421_set_speed(hcd, urb->dev);
        max3421_next_transfer(hcd, 0);
        return 1;
@@ -1379,6 +1353,16 @@ max3421_urb_done(struct usb_hcd *hcd)
                status = 0;
        urb = max3421_hcd->curr_urb;
        if (urb) {
+               /* save the old end-points toggles: */
+               u8 hrsl = spi_rd8(hcd, MAX3421_REG_HRSL);
+               int rcvtog = (hrsl >> MAX3421_HRSL_RCVTOGRD_BIT) & 1;
+               int sndtog = (hrsl >> MAX3421_HRSL_SNDTOGRD_BIT) & 1;
+               int epnum = usb_endpoint_num(&urb->ep->desc);
+
+               /* no locking: HCD (i.e., we) own toggles, don't we? */
+               usb_settoggle(urb->dev, epnum, 0, rcvtog);
+               usb_settoggle(urb->dev, epnum, 1, sndtog);
+
                max3421_hcd->curr_urb = NULL;
                spin_lock_irqsave(&max3421_hcd->lock, flags);
                usb_hcd_unlink_urb_from_ep(hcd, urb);
index e9b18fc176172312f512ac7b110564185261eeeb..151e93c4bd57437127bc38e23bc96d41c5d9bddf 100644 (file)
@@ -1638,11 +1638,12 @@ int xhci_hub_status_data(struct usb_hcd *hcd, char *buf)
         * Inform the usbcore about resume-in-progress by returning
         * a non-zero value even if there are no status changes.
         */
+       spin_lock_irqsave(&xhci->lock, flags);
+
        status = bus_state->resuming_ports;
 
        mask = PORT_CSC | PORT_PEC | PORT_OCC | PORT_PLC | PORT_WRC | PORT_CEC;
 
-       spin_lock_irqsave(&xhci->lock, flags);
        /* For each port, did anything change?  If so, set that bit in buf. */
        for (i = 0; i < max_ports; i++) {
                temp = readl(ports[i]->addr);
index 1da647961c25cefd533013b919a7b6a092f5cc1b..5923844ed8218d3df99a55dc40298784dce761a9 100644 (file)
@@ -207,8 +207,7 @@ static int renesas_check_rom_state(struct pci_dev *pdev)
                        return 0;
 
                case RENESAS_ROM_STATUS_NO_RESULT: /* No result yet */
-                       dev_dbg(&pdev->dev, "Unknown ROM status ...\n");
-                       break;
+                       return 0;
 
                case RENESAS_ROM_STATUS_ERROR: /* Error State */
                default: /* All other states are marked as "Reserved states" */
@@ -225,12 +224,13 @@ static int renesas_fw_check_running(struct pci_dev *pdev)
        u8 fw_state;
        int err;
 
-       /*
-        * Only if device has ROM and loaded FW we can skip loading and
-        * return success. Otherwise (even unknown state), attempt to load FW.
-        */
-       if (renesas_check_rom(pdev) && !renesas_check_rom_state(pdev))
-               return 0;
+       /* Check if device has ROM and loaded, if so skip everything */
+       err = renesas_check_rom(pdev);
+       if (err) { /* we have rom */
+               err = renesas_check_rom_state(pdev);
+               if (!err)
+                       return err;
+       }
 
        /*
         * Test if the device is actually needing the firmware. As most
index 18c2bbddf080b924f25706092fe885706b1e6d6b..1c9a7957c45c5d49755c1e00063c8aebfecada5c 100644 (file)
@@ -636,7 +636,14 @@ static const struct pci_device_id pci_ids[] = {
        { /* end: all zeroes */ }
 };
 MODULE_DEVICE_TABLE(pci, pci_ids);
+
+/*
+ * Without CONFIG_USB_XHCI_PCI_RENESAS renesas_xhci_check_request_fw() won't
+ * load firmware, so don't encumber the xhci-pci driver with it.
+ */
+#if IS_ENABLED(CONFIG_USB_XHCI_PCI_RENESAS)
 MODULE_FIRMWARE("renesas_usb_fw.mem");
+#endif
 
 /* pci driver glue; this is a "new style" PCI driver module */
 static struct pci_driver xhci_pci_driver = {
index 83ed5089475a379d2acacae71df09e14e9911b18..1b24492bb4e5f6ddec9beb1dce608cc93922d0e9 100644 (file)
@@ -86,10 +86,10 @@ static struct usb_phy *__device_to_usb_phy(struct device *dev)
 
        list_for_each_entry(usb_phy, &phy_list, head) {
                if (usb_phy->dev == dev)
-                       break;
+                       return usb_phy;
        }
 
-       return usb_phy;
+       return NULL;
 }
 
 static void usb_phy_set_default_current(struct usb_phy *usb_phy)
@@ -150,8 +150,14 @@ static int usb_phy_uevent(struct device *dev, struct kobj_uevent_env *env)
        struct usb_phy *usb_phy;
        char uchger_state[50] = { 0 };
        char uchger_type[50] = { 0 };
+       unsigned long flags;
 
+       spin_lock_irqsave(&phy_lock, flags);
        usb_phy = __device_to_usb_phy(dev);
+       spin_unlock_irqrestore(&phy_lock, flags);
+
+       if (!usb_phy)
+               return -ENODEV;
 
        snprintf(uchger_state, ARRAY_SIZE(uchger_state),
                 "USB_CHARGER_STATE=%s", usb_chger_state[usb_phy->chg_state]);
index b5e7991dc7d9e537fac334464afb4a518577f522..a3c2b01ccf7b556912f75eb6a15c67cf69ee5539 100644 (file)
@@ -101,6 +101,8 @@ static struct dma_chan *usbhsf_dma_chan_get(struct usbhs_fifo *fifo,
 #define usbhsf_dma_map(p)      __usbhsf_dma_map_ctrl(p, 1)
 #define usbhsf_dma_unmap(p)    __usbhsf_dma_map_ctrl(p, 0)
 static int __usbhsf_dma_map_ctrl(struct usbhs_pkt *pkt, int map);
+static void usbhsf_tx_irq_ctrl(struct usbhs_pipe *pipe, int enable);
+static void usbhsf_rx_irq_ctrl(struct usbhs_pipe *pipe, int enable);
 struct usbhs_pkt *usbhs_pkt_pop(struct usbhs_pipe *pipe, struct usbhs_pkt *pkt)
 {
        struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe);
@@ -123,6 +125,11 @@ struct usbhs_pkt *usbhs_pkt_pop(struct usbhs_pipe *pipe, struct usbhs_pkt *pkt)
                if (chan) {
                        dmaengine_terminate_all(chan);
                        usbhsf_dma_unmap(pkt);
+               } else {
+                       if (usbhs_pipe_is_dir_in(pipe))
+                               usbhsf_rx_irq_ctrl(pipe, 0);
+                       else
+                               usbhsf_tx_irq_ctrl(pipe, 0);
                }
 
                usbhs_pipe_clear_without_sequence(pipe, 0, 0);
index 09b845d0da41e314a6de27bacf1740732f02fb75..3c80bfbf3bec9859c50a815b66ee3fbc8d5997a8 100644 (file)
@@ -155,6 +155,7 @@ static const struct usb_device_id id_table[] = {
        { USB_DEVICE(0x10C4, 0x89A4) }, /* CESINEL FTBC Flexible Thyristor Bridge Controller */
        { USB_DEVICE(0x10C4, 0x89FB) }, /* Qivicon ZigBee USB Radio Stick */
        { USB_DEVICE(0x10C4, 0x8A2A) }, /* HubZ dual ZigBee and Z-Wave dongle */
+       { USB_DEVICE(0x10C4, 0x8A5B) }, /* CEL EM3588 ZigBee USB Stick */
        { USB_DEVICE(0x10C4, 0x8A5E) }, /* CEL EM3588 ZigBee USB Stick Long Range */
        { USB_DEVICE(0x10C4, 0x8B34) }, /* Qivicon ZigBee USB Radio Stick */
        { USB_DEVICE(0x10C4, 0xEA60) }, /* Silicon Labs factory default */
@@ -202,8 +203,8 @@ static const struct usb_device_id id_table[] = {
        { USB_DEVICE(0x1901, 0x0194) }, /* GE Healthcare Remote Alarm Box */
        { USB_DEVICE(0x1901, 0x0195) }, /* GE B850/B650/B450 CP2104 DP UART interface */
        { USB_DEVICE(0x1901, 0x0196) }, /* GE B850 CP2105 DP UART interface */
-       { USB_DEVICE(0x1901, 0x0197) }, /* GE CS1000 Display serial interface */
-       { USB_DEVICE(0x1901, 0x0198) }, /* GE CS1000 M.2 Key E serial interface */
+       { USB_DEVICE(0x1901, 0x0197) }, /* GE CS1000 M.2 Key E serial interface */
+       { USB_DEVICE(0x1901, 0x0198) }, /* GE CS1000 Display serial interface */
        { USB_DEVICE(0x199B, 0xBA30) }, /* LORD WSDA-200-USB */
        { USB_DEVICE(0x19CF, 0x3000) }, /* Parrot NMEA GPS Flight Recorder */
        { USB_DEVICE(0x1ADB, 0x0001) }, /* Schweitzer Engineering C662 Cable */
index 7608584ef4fe78f1394fc74dcce169efc2dc38ec..0fbe253dc570bc43c23fc4d3af3071eef92df79d 100644 (file)
@@ -238,6 +238,7 @@ static void option_instat_callback(struct urb *urb);
 #define QUECTEL_PRODUCT_UC15                   0x9090
 /* These u-blox products use Qualcomm's vendor ID */
 #define UBLOX_PRODUCT_R410M                    0x90b2
+#define UBLOX_PRODUCT_R6XX                     0x90fa
 /* These Yuga products use Qualcomm's vendor ID */
 #define YUGA_PRODUCT_CLM920_NC5                        0x9625
 
@@ -1101,6 +1102,8 @@ static const struct usb_device_id option_ids[] = {
        /* u-blox products using Qualcomm vendor ID */
        { USB_DEVICE(QUALCOMM_VENDOR_ID, UBLOX_PRODUCT_R410M),
          .driver_info = RSVD(1) | RSVD(3) },
+       { USB_DEVICE(QUALCOMM_VENDOR_ID, UBLOX_PRODUCT_R6XX),
+         .driver_info = RSVD(3) },
        /* Quectel products using Quectel vendor ID */
        { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EC21, 0xff, 0xff, 0xff),
          .driver_info = NUMEP2 },
index f9677a5ec31b2869fd21743c9a74c00968842d6a..c35a6db993f1b62813b7194846ce44b5078f1490 100644 (file)
@@ -45,6 +45,13 @@ UNUSUAL_DEV(0x059f, 0x105f, 0x0000, 0x9999,
                USB_SC_DEVICE, USB_PR_DEVICE, NULL,
                US_FL_NO_REPORT_OPCODES | US_FL_NO_SAME),
 
+/* Reported-by: Julian Sikorski <belegdol@gmail.com> */
+UNUSUAL_DEV(0x059f, 0x1061, 0x0000, 0x9999,
+               "LaCie",
+               "Rugged USB3-FW",
+               USB_SC_DEVICE, USB_PR_DEVICE, NULL,
+               US_FL_IGNORE_UAS),
+
 /*
  * Apricorn USB3 dongle sometimes returns "USBSUSBSUSBS" in response to SCSI
  * commands in UAS mode.  Observed with the 1.28 firmware; are there others?
index 6eaeba9b096e140884f041c4072b1775e7ad9518..e7745d1c2a5c4683ba9f769629886a5cd8651b8c 100644 (file)
@@ -685,6 +685,15 @@ static int stusb160x_probe(struct i2c_client *client)
        if (!fwnode)
                return -ENODEV;
 
+       /*
+        * This fwnode has a "compatible" property, but is never populated as a
+        * struct device. Instead we simply parse it to read the properties.
+        * This it breaks fw_devlink=on. To maintain backward compatibility
+        * with existing DT files, we work around this by deleting any
+        * fwnode_links to/from this fwnode.
+        */
+       fw_devlink_purge_absent_suppliers(fwnode);
+
        /*
         * When both VDD and VSYS power supplies are present, the low power
         * supply VSYS is selected when VSYS voltage is above 3.1 V.
@@ -739,10 +748,6 @@ static int stusb160x_probe(struct i2c_client *client)
        typec_set_pwr_opmode(chip->port, chip->pwr_opmode);
 
        if (client->irq) {
-               ret = stusb160x_irq_init(chip, client->irq);
-               if (ret)
-                       goto port_unregister;
-
                chip->role_sw = fwnode_usb_role_switch_get(fwnode);
                if (IS_ERR(chip->role_sw)) {
                        ret = PTR_ERR(chip->role_sw);
@@ -752,6 +757,10 @@ static int stusb160x_probe(struct i2c_client *client)
                                        ret);
                        goto port_unregister;
                }
+
+               ret = stusb160x_irq_init(chip, client->irq);
+               if (ret)
+                       goto role_sw_put;
        } else {
                /*
                 * If Source or Dual power role, need to enable VDD supply
@@ -775,6 +784,9 @@ static int stusb160x_probe(struct i2c_client *client)
 
        return 0;
 
+role_sw_put:
+       if (chip->role_sw)
+               usb_role_switch_put(chip->role_sw);
 port_unregister:
        typec_unregister_port(chip->port);
 all_reg_disable:
index 938219bc1b4beab3c2648f14508656c83d917fd3..21b3ae25c76d2c210396af4d9f6dc6eeabf25a33 100644 (file)
@@ -629,6 +629,15 @@ static int tps6598x_probe(struct i2c_client *client)
        if (!fwnode)
                return -ENODEV;
 
+       /*
+        * This fwnode has a "compatible" property, but is never populated as a
+        * struct device. Instead we simply parse it to read the properties.
+        * This breaks fw_devlink=on. To maintain backward compatibility
+        * with existing DT files, we work around this by deleting any
+        * fwnode_links to/from this fwnode.
+        */
+       fw_devlink_purge_absent_suppliers(fwnode);
+
        tps->role_sw = fwnode_usb_role_switch_get(fwnode);
        if (IS_ERR(tps->role_sw)) {
                ret = PTR_ERR(tps->role_sw);
index d3c6bb22c5f4890081c2e5bf06af1d34fc63cc86..a3f5de28be798d8d21964c03a24e2b6d3d2e7119 100644 (file)
@@ -29,16 +29,11 @@ static void SRXAFSCB_TellMeAboutYourself(struct work_struct *);
 
 static int afs_deliver_yfs_cb_callback(struct afs_call *);
 
-#define CM_NAME(name) \
-       char afs_SRXCB##name##_name[] __tracepoint_string =     \
-               "CB." #name
-
 /*
  * CB.CallBack operation type
  */
-static CM_NAME(CallBack);
 static const struct afs_call_type afs_SRXCBCallBack = {
-       .name           = afs_SRXCBCallBack_name,
+       .name           = "CB.CallBack",
        .deliver        = afs_deliver_cb_callback,
        .destructor     = afs_cm_destructor,
        .work           = SRXAFSCB_CallBack,
@@ -47,9 +42,8 @@ static const struct afs_call_type afs_SRXCBCallBack = {
 /*
  * CB.InitCallBackState operation type
  */
-static CM_NAME(InitCallBackState);
 static const struct afs_call_type afs_SRXCBInitCallBackState = {
-       .name           = afs_SRXCBInitCallBackState_name,
+       .name           = "CB.InitCallBackState",
        .deliver        = afs_deliver_cb_init_call_back_state,
        .destructor     = afs_cm_destructor,
        .work           = SRXAFSCB_InitCallBackState,
@@ -58,9 +52,8 @@ static const struct afs_call_type afs_SRXCBInitCallBackState = {
 /*
  * CB.InitCallBackState3 operation type
  */
-static CM_NAME(InitCallBackState3);
 static const struct afs_call_type afs_SRXCBInitCallBackState3 = {
-       .name           = afs_SRXCBInitCallBackState3_name,
+       .name           = "CB.InitCallBackState3",
        .deliver        = afs_deliver_cb_init_call_back_state3,
        .destructor     = afs_cm_destructor,
        .work           = SRXAFSCB_InitCallBackState,
@@ -69,9 +62,8 @@ static const struct afs_call_type afs_SRXCBInitCallBackState3 = {
 /*
  * CB.Probe operation type
  */
-static CM_NAME(Probe);
 static const struct afs_call_type afs_SRXCBProbe = {
-       .name           = afs_SRXCBProbe_name,
+       .name           = "CB.Probe",
        .deliver        = afs_deliver_cb_probe,
        .destructor     = afs_cm_destructor,
        .work           = SRXAFSCB_Probe,
@@ -80,9 +72,8 @@ static const struct afs_call_type afs_SRXCBProbe = {
 /*
  * CB.ProbeUuid operation type
  */
-static CM_NAME(ProbeUuid);
 static const struct afs_call_type afs_SRXCBProbeUuid = {
-       .name           = afs_SRXCBProbeUuid_name,
+       .name           = "CB.ProbeUuid",
        .deliver        = afs_deliver_cb_probe_uuid,
        .destructor     = afs_cm_destructor,
        .work           = SRXAFSCB_ProbeUuid,
@@ -91,9 +82,8 @@ static const struct afs_call_type afs_SRXCBProbeUuid = {
 /*
  * CB.TellMeAboutYourself operation type
  */
-static CM_NAME(TellMeAboutYourself);
 static const struct afs_call_type afs_SRXCBTellMeAboutYourself = {
-       .name           = afs_SRXCBTellMeAboutYourself_name,
+       .name           = "CB.TellMeAboutYourself",
        .deliver        = afs_deliver_cb_tell_me_about_yourself,
        .destructor     = afs_cm_destructor,
        .work           = SRXAFSCB_TellMeAboutYourself,
@@ -102,9 +92,8 @@ static const struct afs_call_type afs_SRXCBTellMeAboutYourself = {
 /*
  * YFS CB.CallBack operation type
  */
-static CM_NAME(YFS_CallBack);
 static const struct afs_call_type afs_SRXYFSCB_CallBack = {
-       .name           = afs_SRXCBYFS_CallBack_name,
+       .name           = "YFSCB.CallBack",
        .deliver        = afs_deliver_yfs_cb_callback,
        .destructor     = afs_cm_destructor,
        .work           = SRXAFSCB_CallBack,
index 78719f2f567e9fbce41a927f012f0dffa45329a4..ac829e63c5704cdf4cf421393728fb803a1ad927 100644 (file)
@@ -656,7 +656,6 @@ static int afs_do_lookup_one(struct inode *dir, struct dentry *dentry,
                return ret;
        }
 
-       ret = -ENOENT;
        if (!cookie.found) {
                _leave(" = -ENOENT [not found]");
                return -ENOENT;
@@ -2020,17 +2019,20 @@ static int afs_rename(struct user_namespace *mnt_userns, struct inode *old_dir,
 
                if (d_count(new_dentry) > 2) {
                        /* copy the target dentry's name */
-                       ret = -ENOMEM;
                        op->rename.tmp = d_alloc(new_dentry->d_parent,
                                                 &new_dentry->d_name);
-                       if (!op->rename.tmp)
+                       if (!op->rename.tmp) {
+                               op->error = -ENOMEM;
                                goto error;
+                       }
 
                        ret = afs_sillyrename(new_dvnode,
                                              AFS_FS_I(d_inode(new_dentry)),
                                              new_dentry, op->key);
-                       if (ret)
+                       if (ret) {
+                               op->error = ret;
                                goto error;
+                       }
 
                        op->dentry_2 = op->rename.tmp;
                        op->rename.rehash = NULL;
index 3104b62c208263f31924774ad5f6692e1955f2cb..c0534697268ef808e42e6a5762c44888e3148a3b 100644 (file)
@@ -771,14 +771,20 @@ int afs_writepages(struct address_space *mapping,
        if (wbc->range_cyclic) {
                start = mapping->writeback_index * PAGE_SIZE;
                ret = afs_writepages_region(mapping, wbc, start, LLONG_MAX, &next);
-               if (start > 0 && wbc->nr_to_write > 0 && ret == 0)
-                       ret = afs_writepages_region(mapping, wbc, 0, start,
-                                                   &next);
-               mapping->writeback_index = next / PAGE_SIZE;
+               if (ret == 0) {
+                       mapping->writeback_index = next / PAGE_SIZE;
+                       if (start > 0 && wbc->nr_to_write > 0) {
+                               ret = afs_writepages_region(mapping, wbc, 0,
+                                                           start, &next);
+                               if (ret == 0)
+                                       mapping->writeback_index =
+                                               next / PAGE_SIZE;
+                       }
+               }
        } else if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX) {
                ret = afs_writepages_region(mapping, wbc, 0, LLONG_MAX, &next);
-               if (wbc->nr_to_write > 0)
-                       mapping->writeback_index = next;
+               if (wbc->nr_to_write > 0 && ret == 0)
+                       mapping->writeback_index = next / PAGE_SIZE;
        } else {
                ret = afs_writepages_region(mapping, wbc,
                                            wbc->range_start, wbc->range_end, &next);
index 7a8a2fc195338c21f76faf7215ea34ec5954fc2a..78b202d198b8e1741690a2a10fec1955cf221440 100644 (file)
@@ -1488,15 +1488,15 @@ static int btrfs_find_all_roots_safe(struct btrfs_trans_handle *trans,
 int btrfs_find_all_roots(struct btrfs_trans_handle *trans,
                         struct btrfs_fs_info *fs_info, u64 bytenr,
                         u64 time_seq, struct ulist **roots,
-                        bool ignore_offset)
+                        bool ignore_offset, bool skip_commit_root_sem)
 {
        int ret;
 
-       if (!trans)
+       if (!trans && !skip_commit_root_sem)
                down_read(&fs_info->commit_root_sem);
        ret = btrfs_find_all_roots_safe(trans, fs_info, bytenr,
                                        time_seq, roots, ignore_offset);
-       if (!trans)
+       if (!trans && !skip_commit_root_sem)
                up_read(&fs_info->commit_root_sem);
        return ret;
 }
index 17abde7f794ce37d7f817decf34577fd90885cc9..ff5f07f9940bd9bb7ae99e69617f0a63715f2abb 100644 (file)
@@ -47,7 +47,8 @@ int btrfs_find_all_leafs(struct btrfs_trans_handle *trans,
                         const u64 *extent_item_pos, bool ignore_offset);
 int btrfs_find_all_roots(struct btrfs_trans_handle *trans,
                         struct btrfs_fs_info *fs_info, u64 bytenr,
-                        u64 time_seq, struct ulist **roots, bool ignore_offset);
+                        u64 time_seq, struct ulist **roots, bool ignore_offset,
+                        bool skip_commit_root_sem);
 char *btrfs_ref_to_path(struct btrfs_root *fs_root, struct btrfs_path *path,
                        u32 name_len, unsigned long name_off,
                        struct extent_buffer *eb_in, u64 parent,
index 06bc842ecdb34ea55cccffd0f5318681bf74dd99..ca848b1834747c22604ebf37907ff9aec09f0977 100644 (file)
@@ -974,7 +974,7 @@ int btrfs_add_delayed_tree_ref(struct btrfs_trans_handle *trans,
                kmem_cache_free(btrfs_delayed_tree_ref_cachep, ref);
 
        if (qrecord_inserted)
-               btrfs_qgroup_trace_extent_post(fs_info, record);
+               btrfs_qgroup_trace_extent_post(trans, record);
 
        return 0;
 }
@@ -1069,7 +1069,7 @@ int btrfs_add_delayed_data_ref(struct btrfs_trans_handle *trans,
 
 
        if (qrecord_inserted)
-               return btrfs_qgroup_trace_extent_post(fs_info, record);
+               return btrfs_qgroup_trace_extent_post(trans, record);
        return 0;
 }
 
index d296483d148fdfb3a91597a1accf0879cb8c4dfe..268ce58d45697d3234c7903a48dac0219c13532d 100644 (file)
@@ -6019,6 +6019,9 @@ int btrfs_trim_fs(struct btrfs_fs_info *fs_info, struct fstrim_range *range)
        mutex_lock(&fs_info->fs_devices->device_list_mutex);
        devices = &fs_info->fs_devices->devices;
        list_for_each_entry(device, devices, dev_list) {
+               if (test_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state))
+                       continue;
+
                ret = btrfs_trim_free_extents(device, &group_trimmed);
                if (ret) {
                        dev_failed++;
index 8f60314c36c55e36017f456cccf4f0506b40e375..0117d867ecf87655e54ccd4cfcb20dfe09f39c90 100644 (file)
@@ -2992,7 +2992,7 @@ static int btrfs_finish_ordered_io(struct btrfs_ordered_extent *ordered_extent)
                goto out;
        }
 
-       if (ordered_extent->disk)
+       if (ordered_extent->bdev)
                btrfs_rewrite_logical_zoned(ordered_extent);
 
        btrfs_free_io_failure_record(inode, start, end);
index 6eb41b7c0c84395e7563ba11c98e92447571154f..5c0f8481e25e0671469a5814eede82f11d96792e 100644 (file)
@@ -190,8 +190,6 @@ static int __btrfs_add_ordered_extent(struct btrfs_inode *inode, u64 file_offset
        entry->truncated_len = (u64)-1;
        entry->qgroup_rsv = ret;
        entry->physical = (u64)-1;
-       entry->disk = NULL;
-       entry->partno = (u8)-1;
 
        ASSERT(type == BTRFS_ORDERED_REGULAR ||
               type == BTRFS_ORDERED_NOCOW ||
index 566472004edd33de19dcf1965e3e527e47f4a0ce..b2d88aba8420ccc835f30f2f650e760bacb016b7 100644 (file)
@@ -145,8 +145,7 @@ struct btrfs_ordered_extent {
         * command in a workqueue context
         */
        u64 physical;
-       struct gendisk *disk;
-       u8 partno;
+       struct block_device *bdev;
 };
 
 /*
index 07ec06d4e972635c2922d9727e395a8320772324..0fa121171ca17fc286538f5b67ece77beb696df5 100644 (file)
@@ -1704,17 +1704,39 @@ int btrfs_qgroup_trace_extent_nolock(struct btrfs_fs_info *fs_info,
        return 0;
 }
 
-int btrfs_qgroup_trace_extent_post(struct btrfs_fs_info *fs_info,
+int btrfs_qgroup_trace_extent_post(struct btrfs_trans_handle *trans,
                                   struct btrfs_qgroup_extent_record *qrecord)
 {
        struct ulist *old_root;
        u64 bytenr = qrecord->bytenr;
        int ret;
 
-       ret = btrfs_find_all_roots(NULL, fs_info, bytenr, 0, &old_root, false);
+       /*
+        * We are always called in a context where we are already holding a
+        * transaction handle. Often we are called when adding a data delayed
+        * reference from btrfs_truncate_inode_items() (truncating or unlinking),
+        * in which case we will be holding a write lock on extent buffer from a
+        * subvolume tree. In this case we can't allow btrfs_find_all_roots() to
+        * acquire fs_info->commit_root_sem, because that is a higher level lock
+        * that must be acquired before locking any extent buffers.
+        *
+        * So we want btrfs_find_all_roots() to not acquire the commit_root_sem
+        * but we can't pass it a non-NULL transaction handle, because otherwise
+        * it would not use commit roots and would lock extent buffers, causing
+        * a deadlock if it ends up trying to read lock the same extent buffer
+        * that was previously write locked at btrfs_truncate_inode_items().
+        *
+        * So pass a NULL transaction handle to btrfs_find_all_roots() and
+        * explicitly tell it to not acquire the commit_root_sem - if we are
+        * holding a transaction handle we don't need its protection.
+        */
+       ASSERT(trans != NULL);
+
+       ret = btrfs_find_all_roots(NULL, trans->fs_info, bytenr, 0, &old_root,
+                                  false, true);
        if (ret < 0) {
-               fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
-               btrfs_warn(fs_info,
+               trans->fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
+               btrfs_warn(trans->fs_info,
 "error accounting new delayed refs extent (err code: %d), quota inconsistent",
                        ret);
                return 0;
@@ -1758,7 +1780,7 @@ int btrfs_qgroup_trace_extent(struct btrfs_trans_handle *trans, u64 bytenr,
                kfree(record);
                return 0;
        }
-       return btrfs_qgroup_trace_extent_post(fs_info, record);
+       return btrfs_qgroup_trace_extent_post(trans, record);
 }
 
 int btrfs_qgroup_trace_leaf_items(struct btrfs_trans_handle *trans,
@@ -2629,7 +2651,7 @@ int btrfs_qgroup_account_extents(struct btrfs_trans_handle *trans)
                                /* Search commit root to find old_roots */
                                ret = btrfs_find_all_roots(NULL, fs_info,
                                                record->bytenr, 0,
-                                               &record->old_roots, false);
+                                               &record->old_roots, false, false);
                                if (ret < 0)
                                        goto cleanup;
                        }
@@ -2645,7 +2667,7 @@ int btrfs_qgroup_account_extents(struct btrfs_trans_handle *trans)
                         * current root. It's safe inside commit_transaction().
                         */
                        ret = btrfs_find_all_roots(trans, fs_info,
-                               record->bytenr, BTRFS_SEQ_LAST, &new_roots, false);
+                          record->bytenr, BTRFS_SEQ_LAST, &new_roots, false, false);
                        if (ret < 0)
                                goto cleanup;
                        if (qgroup_to_skip) {
@@ -3179,7 +3201,7 @@ static int qgroup_rescan_leaf(struct btrfs_trans_handle *trans,
                        num_bytes = found.offset;
 
                ret = btrfs_find_all_roots(NULL, fs_info, found.objectid, 0,
-                                          &roots, false);
+                                          &roots, false, false);
                if (ret < 0)
                        goto out;
                /* For rescan, just pass old_roots as NULL */
index 7283e4f549af74269adef7c1617c086ee2ac0f9b..880e9df0dac1d74d5905de08c13bfe5e7339351f 100644 (file)
@@ -298,7 +298,7 @@ int btrfs_qgroup_trace_extent_nolock(
  * using current root, then we can move all expensive backref walk out of
  * transaction committing, but not now as qgroup accounting will be wrong again.
  */
-int btrfs_qgroup_trace_extent_post(struct btrfs_fs_info *fs_info,
+int btrfs_qgroup_trace_extent_post(struct btrfs_trans_handle *trans,
                                   struct btrfs_qgroup_extent_record *qrecord);
 
 /*
index f3137285a9e2df92523453d5be9f081c7e115575..98b5aaba46f165c203320080765ca8ff0846e7a3 100644 (file)
@@ -224,7 +224,7 @@ static int test_no_shared_qgroup(struct btrfs_root *root,
         * quota.
         */
        ret = btrfs_find_all_roots(&trans, fs_info, nodesize, 0, &old_roots,
-                       false);
+                       false, false);
        if (ret) {
                ulist_free(old_roots);
                test_err("couldn't find old roots: %d", ret);
@@ -237,7 +237,7 @@ static int test_no_shared_qgroup(struct btrfs_root *root,
                return ret;
 
        ret = btrfs_find_all_roots(&trans, fs_info, nodesize, 0, &new_roots,
-                       false);
+                       false, false);
        if (ret) {
                ulist_free(old_roots);
                ulist_free(new_roots);
@@ -261,7 +261,7 @@ static int test_no_shared_qgroup(struct btrfs_root *root,
        new_roots = NULL;
 
        ret = btrfs_find_all_roots(&trans, fs_info, nodesize, 0, &old_roots,
-                       false);
+                       false, false);
        if (ret) {
                ulist_free(old_roots);
                test_err("couldn't find old roots: %d", ret);
@@ -273,7 +273,7 @@ static int test_no_shared_qgroup(struct btrfs_root *root,
                return -EINVAL;
 
        ret = btrfs_find_all_roots(&trans, fs_info, nodesize, 0, &new_roots,
-                       false);
+                       false, false);
        if (ret) {
                ulist_free(old_roots);
                ulist_free(new_roots);
@@ -325,7 +325,7 @@ static int test_multiple_refs(struct btrfs_root *root,
        }
 
        ret = btrfs_find_all_roots(&trans, fs_info, nodesize, 0, &old_roots,
-                       false);
+                       false, false);
        if (ret) {
                ulist_free(old_roots);
                test_err("couldn't find old roots: %d", ret);
@@ -338,7 +338,7 @@ static int test_multiple_refs(struct btrfs_root *root,
                return ret;
 
        ret = btrfs_find_all_roots(&trans, fs_info, nodesize, 0, &new_roots,
-                       false);
+                       false, false);
        if (ret) {
                ulist_free(old_roots);
                ulist_free(new_roots);
@@ -360,7 +360,7 @@ static int test_multiple_refs(struct btrfs_root *root,
        }
 
        ret = btrfs_find_all_roots(&trans, fs_info, nodesize, 0, &old_roots,
-                       false);
+                       false, false);
        if (ret) {
                ulist_free(old_roots);
                test_err("couldn't find old roots: %d", ret);
@@ -373,7 +373,7 @@ static int test_multiple_refs(struct btrfs_root *root,
                return ret;
 
        ret = btrfs_find_all_roots(&trans, fs_info, nodesize, 0, &new_roots,
-                       false);
+                       false, false);
        if (ret) {
                ulist_free(old_roots);
                ulist_free(new_roots);
@@ -401,7 +401,7 @@ static int test_multiple_refs(struct btrfs_root *root,
        }
 
        ret = btrfs_find_all_roots(&trans, fs_info, nodesize, 0, &old_roots,
-                       false);
+                       false, false);
        if (ret) {
                ulist_free(old_roots);
                test_err("couldn't find old roots: %d", ret);
@@ -414,7 +414,7 @@ static int test_multiple_refs(struct btrfs_root *root,
                return ret;
 
        ret = btrfs_find_all_roots(&trans, fs_info, nodesize, 0, &new_roots,
-                       false);
+                       false, false);
        if (ret) {
                ulist_free(old_roots);
                ulist_free(new_roots);
index dc6eb088d73e34856e5d8debad67dd4c66b70ddb..9fd0348be7f5e7792147c6251c902996dd9f3714 100644 (file)
@@ -5526,16 +5526,29 @@ log_extents:
                spin_lock(&inode->lock);
                inode->logged_trans = trans->transid;
                /*
-                * Don't update last_log_commit if we logged that an inode exists
-                * after it was loaded to memory (full_sync bit set).
-                * This is to prevent data loss when we do a write to the inode,
-                * then the inode gets evicted after all delalloc was flushed,
-                * then we log it exists (due to a rename for example) and then
-                * fsync it. This last fsync would do nothing (not logging the
-                * extents previously written).
+                * Don't update last_log_commit if we logged that an inode exists.
+                * We do this for two reasons:
+                *
+                * 1) We might have had buffered writes to this inode that were
+                *    flushed and had their ordered extents completed in this
+                *    transaction, but we did not previously log the inode with
+                *    LOG_INODE_ALL. Later the inode was evicted and after that
+                *    it was loaded again and this LOG_INODE_EXISTS log operation
+                *    happened. We must make sure that if an explicit fsync against
+                *    the inode is performed later, it logs the new extents, an
+                *    updated inode item, etc, and syncs the log. The same logic
+                *    applies to direct IO writes instead of buffered writes.
+                *
+                * 2) When we log the inode with LOG_INODE_EXISTS, its inode item
+                *    is logged with an i_size of 0 or whatever value was logged
+                *    before. If later the i_size of the inode is increased by a
+                *    truncate operation, the log is synced through an fsync of
+                *    some other inode and then finally an explicit fsync against
+                *    this inode is made, we must make sure this fsync logs the
+                *    inode with the new i_size, the hole between old i_size and
+                *    the new i_size, and syncs the log.
                 */
-               if (inode_only != LOG_INODE_EXISTS ||
-                   !test_bit(BTRFS_INODE_NEEDS_FULL_SYNC, &inode->runtime_flags))
+               if (inode_only != LOG_INODE_EXISTS)
                        inode->last_log_commit = inode->last_sub_trans;
                spin_unlock(&inode->lock);
        }
index 297c0b1c0634d1d9780a421fa7247d0dc950cc08..907c2cc45c9cbc4762fb1921af297b85799fbcf6 100644 (file)
@@ -1349,8 +1349,7 @@ void btrfs_record_physical_zoned(struct inode *inode, u64 file_offset,
                return;
 
        ordered->physical = physical;
-       ordered->disk = bio->bi_bdev->bd_disk;
-       ordered->partno = bio->bi_bdev->bd_partno;
+       ordered->bdev = bio->bi_bdev;
 
        btrfs_put_ordered_extent(ordered);
 }
@@ -1362,18 +1361,16 @@ void btrfs_rewrite_logical_zoned(struct btrfs_ordered_extent *ordered)
        struct extent_map_tree *em_tree;
        struct extent_map *em;
        struct btrfs_ordered_sum *sum;
-       struct block_device *bdev;
        u64 orig_logical = ordered->disk_bytenr;
        u64 *logical = NULL;
        int nr, stripe_len;
 
        /* Zoned devices should not have partitions. So, we can assume it is 0 */
-       ASSERT(ordered->partno == 0);
-       bdev = bdgrab(ordered->disk->part0);
-       if (WARN_ON(!bdev))
+       ASSERT(!bdev_is_partition(ordered->bdev));
+       if (WARN_ON(!ordered->bdev))
                return;
 
-       if (WARN_ON(btrfs_rmap_block(fs_info, orig_logical, bdev,
+       if (WARN_ON(btrfs_rmap_block(fs_info, orig_logical, ordered->bdev,
                                     ordered->physical, &logical, &nr,
                                     &stripe_len)))
                goto out;
@@ -1402,7 +1399,6 @@ void btrfs_rewrite_logical_zoned(struct btrfs_ordered_extent *ordered)
 
 out:
        kfree(logical);
-       bdput(bdev);
 }
 
 bool btrfs_check_meta_write_pointer(struct btrfs_fs_info *fs_info,
index a818213c972fc063d882acaecb6323218a6a8ef0..9db1b39df7737fb37284fe2a681e3559b411b653 100644 (file)
@@ -4456,7 +4456,7 @@ bool check_session_state(struct ceph_mds_session *s)
                break;
        case CEPH_MDS_SESSION_CLOSING:
                /* Should never reach this when we're unmounting */
-               WARN_ON_ONCE(true);
+               WARN_ON_ONCE(s->s_ttl);
                fallthrough;
        case CEPH_MDS_SESSION_NEW:
        case CEPH_MDS_SESSION_RESTARTING:
index f72e3b3dca6953542d0f659d0f2b512ba4cf4f24..65d1a65bfc3710159b5032ea40cf827144da1b91 100644 (file)
@@ -873,8 +873,11 @@ PsxDelete:
                                InformationLevel) - 4;
        offset = param_offset + params;
 
-       /* Setup pointer to Request Data (inode type) */
-       pRqD = (struct unlink_psx_rq *)(((char *)&pSMB->hdr.Protocol) + offset);
+       /* Setup pointer to Request Data (inode type).
+        * Note that SMB offsets are from the beginning of SMB which is 4 bytes
+        * in, after RFC1001 field
+        */
+       pRqD = (struct unlink_psx_rq *)((char *)(pSMB) + offset + 4);
        pRqD->type = cpu_to_le16(type);
        pSMB->ParameterOffset = cpu_to_le16(param_offset);
        pSMB->DataOffset = cpu_to_le16(offset);
@@ -1081,7 +1084,8 @@ PsxCreat:
        param_offset = offsetof(struct smb_com_transaction2_spi_req,
                                InformationLevel) - 4;
        offset = param_offset + params;
-       pdata = (OPEN_PSX_REQ *)(((char *)&pSMB->hdr.Protocol) + offset);
+       /* SMB offsets are from the beginning of SMB which is 4 bytes in, after RFC1001 field */
+       pdata = (OPEN_PSX_REQ *)((char *)(pSMB) + offset + 4);
        pdata->Level = cpu_to_le16(SMB_QUERY_FILE_UNIX_BASIC);
        pdata->Permissions = cpu_to_le64(mode);
        pdata->PosixOpenFlags = cpu_to_le32(posix_flags);
index 1b04d6ec14ddac7d0f8aa4f86fd7898b1ff3d808..3781eee9360affa4e71bc2a60c6e5958b3a5021c 100644 (file)
@@ -220,7 +220,7 @@ cifs_reconnect(struct TCP_Server_Info *server)
 #ifdef CONFIG_CIFS_DFS_UPCALL
        struct super_block *sb = NULL;
        struct cifs_sb_info *cifs_sb = NULL;
-       struct dfs_cache_tgt_list tgt_list = {0};
+       struct dfs_cache_tgt_list tgt_list = DFS_CACHE_TGT_LIST_INIT(tgt_list);
        struct dfs_cache_tgt_iterator *tgt_it = NULL;
 #endif
 
@@ -3130,7 +3130,7 @@ static int do_dfs_failover(const char *path, const char *full_path, struct cifs_
 {
        int rc;
        char *npath = NULL;
-       struct dfs_cache_tgt_list tgt_list = {0};
+       struct dfs_cache_tgt_list tgt_list = DFS_CACHE_TGT_LIST_INIT(tgt_list);
        struct dfs_cache_tgt_iterator *tgt_it = NULL;
        struct smb3_fs_context tmp_ctx = {NULL};
 
index 7c1769714609bdf1cde634404377f5314ce3489d..28374559284413219d19fa23116f617f659f6e60 100644 (file)
@@ -19,6 +19,7 @@
 #include "cifs_debug.h"
 #include "cifs_unicode.h"
 #include "smb2glob.h"
+#include "dns_resolve.h"
 
 #include "dfs_cache.h"
 
@@ -911,6 +912,7 @@ static int get_targets(struct cache_entry *ce, struct dfs_cache_tgt_list *tl)
 
 err_free_it:
        list_for_each_entry_safe(it, nit, head, it_list) {
+               list_del(&it->it_list);
                kfree(it->it_name);
                kfree(it);
        }
@@ -1293,6 +1295,194 @@ int dfs_cache_get_tgt_share(char *path, const struct dfs_cache_tgt_iterator *it,
        return 0;
 }
 
+static bool target_share_equal(struct TCP_Server_Info *server, const char *s1, const char *s2)
+{
+       char unc[sizeof("\\\\") + SERVER_NAME_LENGTH] = {0};
+       const char *host;
+       size_t hostlen;
+       char *ip = NULL;
+       struct sockaddr sa;
+       bool match;
+       int rc;
+
+       if (strcasecmp(s1, s2))
+               return false;
+
+       /*
+        * Resolve share's hostname and check if server address matches.  Otherwise just ignore it
+        * as we could not have upcall to resolve hostname or failed to convert ip address.
+        */
+       match = true;
+       extract_unc_hostname(s1, &host, &hostlen);
+       scnprintf(unc, sizeof(unc), "\\\\%.*s", (int)hostlen, host);
+
+       rc = dns_resolve_server_name_to_ip(unc, &ip, NULL);
+       if (rc < 0) {
+               cifs_dbg(FYI, "%s: could not resolve %.*s. assuming server address matches.\n",
+                        __func__, (int)hostlen, host);
+               return true;
+       }
+
+       if (!cifs_convert_address(&sa, ip, strlen(ip))) {
+               cifs_dbg(VFS, "%s: failed to convert address \'%s\'. skip address matching.\n",
+                        __func__, ip);
+       } else {
+               mutex_lock(&server->srv_mutex);
+               match = cifs_match_ipaddr((struct sockaddr *)&server->dstaddr, &sa);
+               mutex_unlock(&server->srv_mutex);
+       }
+
+       kfree(ip);
+       return match;
+}
+
+/*
+ * Mark dfs tcon for reconnecting when the currently connected tcon does not match any of the new
+ * target shares in @refs.
+ */
+static void mark_for_reconnect_if_needed(struct cifs_tcon *tcon, struct dfs_cache_tgt_list *tl,
+                                        const struct dfs_info3_param *refs, int numrefs)
+{
+       struct dfs_cache_tgt_iterator *it;
+       int i;
+
+       for (it = dfs_cache_get_tgt_iterator(tl); it; it = dfs_cache_get_next_tgt(tl, it)) {
+               for (i = 0; i < numrefs; i++) {
+                       if (target_share_equal(tcon->ses->server, dfs_cache_get_tgt_name(it),
+                                              refs[i].node_name))
+                               return;
+               }
+       }
+
+       cifs_dbg(FYI, "%s: no cached or matched targets. mark dfs share for reconnect.\n", __func__);
+       for (i = 0; i < tcon->ses->chan_count; i++) {
+               spin_lock(&GlobalMid_Lock);
+               if (tcon->ses->chans[i].server->tcpStatus != CifsExiting)
+                       tcon->ses->chans[i].server->tcpStatus = CifsNeedReconnect;
+               spin_unlock(&GlobalMid_Lock);
+       }
+}
+
+/* Refresh dfs referral of tcon and mark it for reconnect if needed */
+static int refresh_tcon(struct cifs_ses **sessions, struct cifs_tcon *tcon, bool force_refresh)
+{
+       const char *path = tcon->dfs_path + 1;
+       struct cifs_ses *ses;
+       struct cache_entry *ce;
+       struct dfs_info3_param *refs = NULL;
+       int numrefs = 0;
+       bool needs_refresh = false;
+       struct dfs_cache_tgt_list tl = DFS_CACHE_TGT_LIST_INIT(tl);
+       int rc = 0;
+       unsigned int xid;
+
+       ses = find_ipc_from_server_path(sessions, path);
+       if (IS_ERR(ses)) {
+               cifs_dbg(FYI, "%s: could not find ipc session\n", __func__);
+               return PTR_ERR(ses);
+       }
+
+       down_read(&htable_rw_lock);
+       ce = lookup_cache_entry(path);
+       needs_refresh = force_refresh || IS_ERR(ce) || cache_entry_expired(ce);
+       if (!IS_ERR(ce)) {
+               rc = get_targets(ce, &tl);
+               if (rc)
+                       cifs_dbg(FYI, "%s: could not get dfs targets: %d\n", __func__, rc);
+       }
+       up_read(&htable_rw_lock);
+
+       if (!needs_refresh) {
+               rc = 0;
+               goto out;
+       }
+
+       xid = get_xid();
+       rc = get_dfs_referral(xid, ses, path, &refs, &numrefs);
+       free_xid(xid);
+
+       /* Create or update a cache entry with the new referral */
+       if (!rc) {
+               dump_refs(refs, numrefs);
+
+               down_write(&htable_rw_lock);
+               ce = lookup_cache_entry(path);
+               if (IS_ERR(ce))
+                       add_cache_entry_locked(refs, numrefs);
+               else if (force_refresh || cache_entry_expired(ce))
+                       update_cache_entry_locked(ce, refs, numrefs);
+               up_write(&htable_rw_lock);
+
+               mark_for_reconnect_if_needed(tcon, &tl, refs, numrefs);
+       }
+
+out:
+       dfs_cache_free_tgts(&tl);
+       free_dfs_info_array(refs, numrefs);
+       return rc;
+}
+
+/**
+ * dfs_cache_remount_fs - remount a DFS share
+ *
+ * Reconfigure dfs mount by forcing a new DFS referral and if the currently cached targets do not
+ * match any of the new targets, mark it for reconnect.
+ *
+ * @cifs_sb: cifs superblock.
+ *
+ * Return zero if remounted, otherwise non-zero.
+ */
+int dfs_cache_remount_fs(struct cifs_sb_info *cifs_sb)
+{
+       struct cifs_tcon *tcon;
+       struct mount_group *mg;
+       struct cifs_ses *sessions[CACHE_MAX_ENTRIES + 1] = {NULL};
+       int rc;
+
+       if (!cifs_sb || !cifs_sb->master_tlink)
+               return -EINVAL;
+
+       tcon = cifs_sb_master_tcon(cifs_sb);
+       if (!tcon->dfs_path) {
+               cifs_dbg(FYI, "%s: not a dfs tcon\n", __func__);
+               return 0;
+       }
+
+       if (uuid_is_null(&cifs_sb->dfs_mount_id)) {
+               cifs_dbg(FYI, "%s: tcon has no dfs mount group id\n", __func__);
+               return -EINVAL;
+       }
+
+       mutex_lock(&mount_group_list_lock);
+       mg = find_mount_group_locked(&cifs_sb->dfs_mount_id);
+       if (IS_ERR(mg)) {
+               mutex_unlock(&mount_group_list_lock);
+               cifs_dbg(FYI, "%s: tcon has ipc session to refresh referral\n", __func__);
+               return PTR_ERR(mg);
+       }
+       kref_get(&mg->refcount);
+       mutex_unlock(&mount_group_list_lock);
+
+       spin_lock(&mg->lock);
+       memcpy(&sessions, mg->sessions, mg->num_sessions * sizeof(mg->sessions[0]));
+       spin_unlock(&mg->lock);
+
+       /*
+        * After reconnecting to a different server, unique ids won't match anymore, so we disable
+        * serverino. This prevents dentry revalidation to think the dentry are stale (ESTALE).
+        */
+       cifs_autodisable_serverino(cifs_sb);
+       /*
+        * Force the use of prefix path to support failover on DFS paths that resolve to targets
+        * that have different prefix paths.
+        */
+       cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_USE_PREFIX_PATH;
+       rc = refresh_tcon(sessions, tcon, true);
+
+       kref_put(&mg->refcount, mount_group_release);
+       return rc;
+}
+
 /*
  * Refresh all active dfs mounts regardless of whether they are in cache or not.
  * (cache can be cleared)
@@ -1303,7 +1493,6 @@ static void refresh_mounts(struct cifs_ses **sessions)
        struct cifs_ses *ses;
        struct cifs_tcon *tcon, *ntcon;
        struct list_head tcons;
-       unsigned int xid;
 
        INIT_LIST_HEAD(&tcons);
 
@@ -1321,44 +1510,8 @@ static void refresh_mounts(struct cifs_ses **sessions)
        spin_unlock(&cifs_tcp_ses_lock);
 
        list_for_each_entry_safe(tcon, ntcon, &tcons, ulist) {
-               const char *path = tcon->dfs_path + 1;
-               struct cache_entry *ce;
-               struct dfs_info3_param *refs = NULL;
-               int numrefs = 0;
-               bool needs_refresh = false;
-               int rc = 0;
-
                list_del_init(&tcon->ulist);
-
-               ses = find_ipc_from_server_path(sessions, path);
-               if (IS_ERR(ses))
-                       goto next_tcon;
-
-               down_read(&htable_rw_lock);
-               ce = lookup_cache_entry(path);
-               needs_refresh = IS_ERR(ce) || cache_entry_expired(ce);
-               up_read(&htable_rw_lock);
-
-               if (!needs_refresh)
-                       goto next_tcon;
-
-               xid = get_xid();
-               rc = get_dfs_referral(xid, ses, path, &refs, &numrefs);
-               free_xid(xid);
-
-               /* Create or update a cache entry with the new referral */
-               if (!rc) {
-                       down_write(&htable_rw_lock);
-                       ce = lookup_cache_entry(path);
-                       if (IS_ERR(ce))
-                               add_cache_entry_locked(refs, numrefs);
-                       else if (cache_entry_expired(ce))
-                               update_cache_entry_locked(ce, refs, numrefs);
-                       up_write(&htable_rw_lock);
-               }
-
-next_tcon:
-               free_dfs_info_array(refs, numrefs);
+               refresh_tcon(sessions, tcon, false);
                cifs_put_tcon(tcon);
        }
 }
index b29d3ae64829a4823847573f77304aed00dd59c6..52070d1df18975de4eda0d3055fe1724dafbf7a1 100644 (file)
@@ -13,6 +13,8 @@
 #include <linux/uuid.h>
 #include "cifsglob.h"
 
+#define DFS_CACHE_TGT_LIST_INIT(var) { .tl_numtgts = 0, .tl_list = LIST_HEAD_INIT((var).tl_list), }
+
 struct dfs_cache_tgt_list {
        int tl_numtgts;
        struct list_head tl_list;
@@ -44,6 +46,7 @@ int dfs_cache_get_tgt_share(char *path, const struct dfs_cache_tgt_iterator *it,
 void dfs_cache_put_refsrv_sessions(const uuid_t *mount_id);
 void dfs_cache_add_refsrv_session(const uuid_t *mount_id, struct cifs_ses *ses);
 char *dfs_cache_canonical_path(const char *path, const struct nls_table *cp, int remap);
+int dfs_cache_remount_fs(struct cifs_sb_info *cifs_sb);
 
 static inline struct dfs_cache_tgt_iterator *
 dfs_cache_get_next_tgt(struct dfs_cache_tgt_list *tl,
index 553adfbcc22a609ab165d9d06d078bd98fe51020..9a59d7ff9a11bf7830b16ad21cf96f73959ce945 100644 (file)
@@ -13,6 +13,9 @@
 #include <linux/magic.h>
 #include <linux/security.h>
 #include <net/net_namespace.h>
+#ifdef CONFIG_CIFS_DFS_UPCALL
+#include "dfs_cache.h"
+#endif
 */
 
 #include <linux/ctype.h>
@@ -779,6 +782,10 @@ static int smb3_reconfigure(struct fs_context *fc)
        smb3_cleanup_fs_context_contents(cifs_sb->ctx);
        rc = smb3_fs_context_dup(cifs_sb->ctx, ctx);
        smb3_update_mnt_flags(cifs_sb);
+#ifdef CONFIG_CIFS_DFS_UPCALL
+       if (!rc)
+               rc = dfs_cache_remount_fs(cifs_sb);
+#endif
 
        return rc;
 }
index ba3c58e1f72565a5f3a7d570672f29a2d36270a4..23d6f4d716498f5c0c95974650893154984cb569 100644 (file)
@@ -3617,7 +3617,7 @@ static int smb3_simple_fallocate_write_range(unsigned int xid,
                                             char *buf)
 {
        struct cifs_io_parms io_parms = {0};
-       int nbytes;
+       int rc, nbytes;
        struct kvec iov[2];
 
        io_parms.netfid = cfile->fid.netfid;
@@ -3625,13 +3625,25 @@ static int smb3_simple_fallocate_write_range(unsigned int xid,
        io_parms.tcon = tcon;
        io_parms.persistent_fid = cfile->fid.persistent_fid;
        io_parms.volatile_fid = cfile->fid.volatile_fid;
-       io_parms.offset = off;
-       io_parms.length = len;
 
-       /* iov[0] is reserved for smb header */
-       iov[1].iov_base = buf;
-       iov[1].iov_len = io_parms.length;
-       return SMB2_write(xid, &io_parms, &nbytes, iov, 1);
+       while (len) {
+               io_parms.offset = off;
+               io_parms.length = len;
+               if (io_parms.length > SMB2_MAX_BUFFER_SIZE)
+                       io_parms.length = SMB2_MAX_BUFFER_SIZE;
+               /* iov[0] is reserved for smb header */
+               iov[1].iov_base = buf;
+               iov[1].iov_len = io_parms.length;
+               rc = SMB2_write(xid, &io_parms, &nbytes, iov, 1);
+               if (rc)
+                       break;
+               if (nbytes > len)
+                       return -EINVAL;
+               buf += nbytes;
+               off += nbytes;
+               len -= nbytes;
+       }
+       return rc;
 }
 
 static int smb3_simple_fallocate_range(unsigned int xid,
@@ -3655,11 +3667,6 @@ static int smb3_simple_fallocate_range(unsigned int xid,
                        (char **)&out_data, &out_data_len);
        if (rc)
                goto out;
-       /*
-        * It is already all allocated
-        */
-       if (out_data_len == 0)
-               goto out;
 
        buf = kzalloc(1024 * 1024, GFP_KERNEL);
        if (buf == NULL) {
@@ -3782,6 +3789,24 @@ static long smb3_simple_falloc(struct file *file, struct cifs_tcon *tcon,
                goto out;
        }
 
+       if (keep_size == true) {
+               /*
+                * We can not preallocate pages beyond the end of the file
+                * in SMB2
+                */
+               if (off >= i_size_read(inode)) {
+                       rc = 0;
+                       goto out;
+               }
+               /*
+                * For fallocates that are partially beyond the end of file,
+                * clamp len so we only fallocate up to the end of file.
+                */
+               if (off + len > i_size_read(inode)) {
+                       len = i_size_read(inode) - off;
+               }
+       }
+
        if ((keep_size == true) || (i_size_read(inode) >= off + len)) {
                /*
                 * At this point, we are trying to fallocate an internal
index 06d04a74ab6c70ffa0441a2a70a77267ed9f0f34..4c33705489825a8205e3e372415f4aa50ef41147 100644 (file)
@@ -521,6 +521,9 @@ static bool inode_prepare_wbs_switch(struct inode *inode,
         */
        smp_mb();
 
+       if (IS_DAX(inode))
+               return false;
+
        /* while holding I_WB_SWITCH, no one else can update the association */
        spin_lock(&inode->i_lock);
        if (!(inode->i_sb->s_flags & SB_ACTIVE) ||
index 926eeb9bf4ebeb8a324ec575ea8c6cb09612a9e5..cdfb1ae78a3f84c3364adadf34eabea079b4a8c5 100644 (file)
@@ -77,7 +77,7 @@ enum hugetlb_param {
 static const struct fs_parameter_spec hugetlb_fs_parameters[] = {
        fsparam_u32   ("gid",           Opt_gid),
        fsparam_string("min_size",      Opt_min_size),
-       fsparam_u32   ("mode",          Opt_mode),
+       fsparam_u32oct("mode",          Opt_mode),
        fsparam_string("nr_inodes",     Opt_nr_inodes),
        fsparam_string("pagesize",      Opt_pagesize),
        fsparam_string("size",          Opt_size),
index 843d4a7bcd6e94e1cdb0abbfa71a74211bee6046..cf086b01c6c675d0e9c8cbef88c1ef5df00c3fb6 100644 (file)
@@ -731,7 +731,12 @@ static void io_wqe_enqueue(struct io_wqe *wqe, struct io_wq_work *work)
        int work_flags;
        unsigned long flags;
 
-       if (test_bit(IO_WQ_BIT_EXIT, &wqe->wq->state)) {
+       /*
+        * If io-wq is exiting for this task, or if the request has explicitly
+        * been marked as one that should not get executed, cancel it here.
+        */
+       if (test_bit(IO_WQ_BIT_EXIT, &wqe->wq->state) ||
+           (work->flags & IO_WQ_WORK_CANCEL)) {
                io_run_cancel(work, wqe);
                return;
        }
index 0cac361bf6b8de27cb5e16cf4a0a4d757219f198..5a0fd6bcd3180a9b5ec470187522c765d676d10d 100644 (file)
@@ -1294,6 +1294,17 @@ static void io_queue_async_work(struct io_kiocb *req)
 
        /* init ->work of the whole link before punting */
        io_prep_async_link(req);
+
+       /*
+        * Not expected to happen, but if we do have a bug where this _can_
+        * happen, catch it here and ensure the request is marked as
+        * canceled. That will make io-wq go through the usual work cancel
+        * procedure rather than attempt to run this request (or create a new
+        * worker for it).
+        */
+       if (WARN_ON_ONCE(!same_thread_group(req->task, current)))
+               req->work.flags |= IO_WQ_WORK_CANCEL;
+
        trace_io_uring_queue_async_work(ctx, io_wq_is_hashed(&req->work), req,
                                        &req->work, req->flags);
        io_wq_enqueue(tctx->io_wq, &req->work);
@@ -2205,7 +2216,7 @@ static inline bool io_run_task_work(void)
  * Find and free completed poll iocbs
  */
 static void io_iopoll_complete(struct io_ring_ctx *ctx, unsigned int *nr_events,
-                              struct list_head *done)
+                              struct list_head *done, bool resubmit)
 {
        struct req_batch rb;
        struct io_kiocb *req;
@@ -2220,7 +2231,7 @@ static void io_iopoll_complete(struct io_ring_ctx *ctx, unsigned int *nr_events,
                req = list_first_entry(done, struct io_kiocb, inflight_entry);
                list_del(&req->inflight_entry);
 
-               if (READ_ONCE(req->result) == -EAGAIN &&
+               if (READ_ONCE(req->result) == -EAGAIN && resubmit &&
                    !(req->flags & REQ_F_DONT_REISSUE)) {
                        req->iopoll_completed = 0;
                        req_ref_get(req);
@@ -2244,7 +2255,7 @@ static void io_iopoll_complete(struct io_ring_ctx *ctx, unsigned int *nr_events,
 }
 
 static int io_do_iopoll(struct io_ring_ctx *ctx, unsigned int *nr_events,
-                       long min)
+                       long min, bool resubmit)
 {
        struct io_kiocb *req, *tmp;
        LIST_HEAD(done);
@@ -2287,7 +2298,7 @@ static int io_do_iopoll(struct io_ring_ctx *ctx, unsigned int *nr_events,
        }
 
        if (!list_empty(&done))
-               io_iopoll_complete(ctx, nr_events, &done);
+               io_iopoll_complete(ctx, nr_events, &done, resubmit);
 
        return ret;
 }
@@ -2305,7 +2316,7 @@ static void io_iopoll_try_reap_events(struct io_ring_ctx *ctx)
        while (!list_empty(&ctx->iopoll_list)) {
                unsigned int nr_events = 0;
 
-               io_do_iopoll(ctx, &nr_events, 0);
+               io_do_iopoll(ctx, &nr_events, 0, false);
 
                /* let it sleep and repeat later if can't complete a request */
                if (nr_events == 0)
@@ -2367,7 +2378,7 @@ static int io_iopoll_check(struct io_ring_ctx *ctx, long min)
                            list_empty(&ctx->iopoll_list))
                                break;
                }
-               ret = io_do_iopoll(ctx, &nr_events, min);
+               ret = io_do_iopoll(ctx, &nr_events, min, true);
        } while (!ret && nr_events < min && !need_resched());
 out:
        mutex_unlock(&ctx->uring_lock);
@@ -4802,6 +4813,7 @@ IO_NETOP_FN(recv);
 struct io_poll_table {
        struct poll_table_struct pt;
        struct io_kiocb *req;
+       int nr_entries;
        int error;
 };
 
@@ -4995,11 +5007,11 @@ static void __io_queue_proc(struct io_poll_iocb *poll, struct io_poll_table *pt,
        struct io_kiocb *req = pt->req;
 
        /*
-        * If poll->head is already set, it's because the file being polled
-        * uses multiple waitqueues for poll handling (eg one for read, one
-        * for write). Setup a separate io_poll_iocb if this happens.
+        * The file being polled uses multiple waitqueues for poll handling
+        * (e.g. one for read, one for write). Setup a separate io_poll_iocb
+        * if this happens.
         */
-       if (unlikely(poll->head)) {
+       if (unlikely(pt->nr_entries)) {
                struct io_poll_iocb *poll_one = poll;
 
                /* already have a 2nd entry, fail a third attempt */
@@ -5027,7 +5039,7 @@ static void __io_queue_proc(struct io_poll_iocb *poll, struct io_poll_table *pt,
                *poll_ptr = poll;
        }
 
-       pt->error = 0;
+       pt->nr_entries++;
        poll->head = head;
 
        if (poll->events & EPOLLEXCLUSIVE)
@@ -5104,11 +5116,16 @@ static __poll_t __io_arm_poll_handler(struct io_kiocb *req,
 
        ipt->pt._key = mask;
        ipt->req = req;
-       ipt->error = -EINVAL;
+       ipt->error = 0;
+       ipt->nr_entries = 0;
 
        mask = vfs_poll(req->file, &ipt->pt) & poll->events;
+       if (unlikely(!ipt->nr_entries) && !ipt->error)
+               ipt->error = -EINVAL;
 
        spin_lock_irq(&ctx->completion_lock);
+       if (ipt->error)
+               io_poll_remove_double(req);
        if (likely(poll->head)) {
                spin_lock(&poll->head->lock);
                if (unlikely(list_empty(&poll->wait.entry))) {
@@ -6792,7 +6809,7 @@ static int __io_sq_thread(struct io_ring_ctx *ctx, bool cap_entries)
 
                mutex_lock(&ctx->uring_lock);
                if (!list_empty(&ctx->iopoll_list))
-                       io_do_iopoll(ctx, &nr_events, 0);
+                       io_do_iopoll(ctx, &nr_events, 0, true);
 
                /*
                 * Don't submit if refs are dying, good for io_uring_register(),
@@ -7899,15 +7916,19 @@ static struct io_wq *io_init_wq_offload(struct io_ring_ctx *ctx,
        struct io_wq_data data;
        unsigned int concurrency;
 
+       mutex_lock(&ctx->uring_lock);
        hash = ctx->hash_map;
        if (!hash) {
                hash = kzalloc(sizeof(*hash), GFP_KERNEL);
-               if (!hash)
+               if (!hash) {
+                       mutex_unlock(&ctx->uring_lock);
                        return ERR_PTR(-ENOMEM);
+               }
                refcount_set(&hash->refs, 1);
                init_waitqueue_head(&hash->wait);
                ctx->hash_map = hash;
        }
+       mutex_unlock(&ctx->uring_lock);
 
        data.hash = hash;
        data.task = task;
@@ -7981,9 +8002,11 @@ static int io_sq_offload_create(struct io_ring_ctx *ctx,
                f = fdget(p->wq_fd);
                if (!f.file)
                        return -ENXIO;
-               fdput(f);
-               if (f.file->f_op != &io_uring_fops)
+               if (f.file->f_op != &io_uring_fops) {
+                       fdput(f);
                        return -EINVAL;
+               }
+               fdput(f);
        }
        if (ctx->flags & IORING_SETUP_SQPOLL) {
                struct task_struct *tsk;
index b117b212ef2887eca0f3c2412f761228740fb81d..4a2cda04d3e293b523549f72f535a0ddb9f5c631 100644 (file)
@@ -32,6 +32,9 @@ static void seq_set_overflow(struct seq_file *m)
 
 static void *seq_buf_alloc(unsigned long size)
 {
+       if (unlikely(size > MAX_RW_COUNT))
+               return NULL;
+
        return kvmalloc(size, GFP_KERNEL_ACCOUNT);
 }
 
index f6e0f0c0d0e577f0be543d9bb3c3bc2cce6891fa..5c2d806e6ae53f1c54ace2788e4b22f64a344f6e 100644 (file)
@@ -1236,23 +1236,21 @@ static __always_inline void wake_userfault(struct userfaultfd_ctx *ctx,
 }
 
 static __always_inline int validate_range(struct mm_struct *mm,
-                                         __u64 *start, __u64 len)
+                                         __u64 start, __u64 len)
 {
        __u64 task_size = mm->task_size;
 
-       *start = untagged_addr(*start);
-
-       if (*start & ~PAGE_MASK)
+       if (start & ~PAGE_MASK)
                return -EINVAL;
        if (len & ~PAGE_MASK)
                return -EINVAL;
        if (!len)
                return -EINVAL;
-       if (*start < mmap_min_addr)
+       if (start < mmap_min_addr)
                return -EINVAL;
-       if (*start >= task_size)
+       if (start >= task_size)
                return -EINVAL;
-       if (len > task_size - *start)
+       if (len > task_size - start)
                return -EINVAL;
        return 0;
 }
@@ -1316,7 +1314,7 @@ static int userfaultfd_register(struct userfaultfd_ctx *ctx,
                vm_flags |= VM_UFFD_MINOR;
        }
 
-       ret = validate_range(mm, &uffdio_register.range.start,
+       ret = validate_range(mm, uffdio_register.range.start,
                             uffdio_register.range.len);
        if (ret)
                goto out;
@@ -1522,7 +1520,7 @@ static int userfaultfd_unregister(struct userfaultfd_ctx *ctx,
        if (copy_from_user(&uffdio_unregister, buf, sizeof(uffdio_unregister)))
                goto out;
 
-       ret = validate_range(mm, &uffdio_unregister.start,
+       ret = validate_range(mm, uffdio_unregister.start,
                             uffdio_unregister.len);
        if (ret)
                goto out;
@@ -1671,7 +1669,7 @@ static int userfaultfd_wake(struct userfaultfd_ctx *ctx,
        if (copy_from_user(&uffdio_wake, buf, sizeof(uffdio_wake)))
                goto out;
 
-       ret = validate_range(ctx->mm, &uffdio_wake.start, uffdio_wake.len);
+       ret = validate_range(ctx->mm, uffdio_wake.start, uffdio_wake.len);
        if (ret)
                goto out;
 
@@ -1711,7 +1709,7 @@ static int userfaultfd_copy(struct userfaultfd_ctx *ctx,
                           sizeof(uffdio_copy)-sizeof(__s64)))
                goto out;
 
-       ret = validate_range(ctx->mm, &uffdio_copy.dst, uffdio_copy.len);
+       ret = validate_range(ctx->mm, uffdio_copy.dst, uffdio_copy.len);
        if (ret)
                goto out;
        /*
@@ -1768,7 +1766,7 @@ static int userfaultfd_zeropage(struct userfaultfd_ctx *ctx,
                           sizeof(uffdio_zeropage)-sizeof(__s64)))
                goto out;
 
-       ret = validate_range(ctx->mm, &uffdio_zeropage.range.start,
+       ret = validate_range(ctx->mm, uffdio_zeropage.range.start,
                             uffdio_zeropage.range.len);
        if (ret)
                goto out;
@@ -1818,7 +1816,7 @@ static int userfaultfd_writeprotect(struct userfaultfd_ctx *ctx,
                           sizeof(struct uffdio_writeprotect)))
                return -EFAULT;
 
-       ret = validate_range(ctx->mm, &uffdio_wp.range.start,
+       ret = validate_range(ctx->mm, uffdio_wp.range.start,
                             uffdio_wp.range.len);
        if (ret)
                return ret;
@@ -1866,7 +1864,7 @@ static int userfaultfd_continue(struct userfaultfd_ctx *ctx, unsigned long arg)
                           sizeof(uffdio_continue) - (sizeof(__s64))))
                goto out;
 
-       ret = validate_range(ctx->mm, &uffdio_continue.range.start,
+       ret = validate_range(ctx->mm, uffdio_continue.range.start,
                             uffdio_continue.range.len);
        if (ret)
                goto out;
index 1ae993fee4a5d479a2d659c29bbe1dad85a1e2a1..13d93371790ec94074c7ab8f0d169a68570a4624 100644 (file)
@@ -707,11 +707,6 @@ acpi_dev_get_first_match_dev(const char *hid, const char *uid, s64 hrv);
  * @hrv: Hardware Revision of the device, pass -1 to not check _HRV
  *
  * The caller is responsible for invoking acpi_dev_put() on the returned device.
- *
- * FIXME: Due to above requirement there is a window that may invalidate @adev
- * and next iteration will use a dangling pointer, e.g. in the case of a
- * hotplug event. That said, the caller should ensure that this will never
- * happen.
  */
 #define for_each_acpi_dev_match(adev, hid, uid, hrv)                   \
        for (adev = acpi_dev_get_first_match_dev(hid, uid, hrv);        \
@@ -725,7 +720,8 @@ static inline struct acpi_device *acpi_dev_get(struct acpi_device *adev)
 
 static inline void acpi_dev_put(struct acpi_device *adev)
 {
-       put_device(&adev->dev);
+       if (adev)
+               put_device(&adev->dev);
 }
 
 struct acpi_device *acpi_bus_get_acpi_device(acpi_handle handle);
index 10100a4bbe2ad63f556a40e7dbc1010e1bdda466..afb27cb6a7bd84a8f4e1e68f0a384b990aa79845 100644 (file)
@@ -68,6 +68,7 @@ typedef int drm_ioctl_compat_t(struct file *filp, unsigned int cmd,
                               unsigned long arg);
 
 #define DRM_IOCTL_NR(n)                _IOC_NR(n)
+#define DRM_IOCTL_TYPE(n)              _IOC_TYPE(n)
 #define DRM_MAJOR       226
 
 /**
index 3177181c43262392011cc72f485f8be1496d12c8..d3afea47ade676984674522bd0546731074869d8 100644 (file)
@@ -57,7 +57,7 @@ struct blk_keyslot_manager;
  * Maximum number of blkcg policies allowed to be registered concurrently.
  * Defined here to simplify include dependency.
  */
-#define BLKCG_MAX_POLS         5
+#define BLKCG_MAX_POLS         6
 
 typedef void (rq_end_io_fn)(struct request *, blk_status_t);
 
index 8c6e8e996c875433f82a9a735c1b03c0a9baff02..d9a606a9fc64ae570416df16bf525adef90ed424 100644 (file)
@@ -318,14 +318,16 @@ static inline void memcpy_to_page(struct page *page, size_t offset,
 
        VM_BUG_ON(offset + len > PAGE_SIZE);
        memcpy(to + offset, from, len);
+       flush_dcache_page(page);
        kunmap_local(to);
 }
 
 static inline void memzero_page(struct page *page, size_t offset, size_t len)
 {
-       char *addr = kmap_atomic(page);
+       char *addr = kmap_local_page(page);
        memset(addr + offset, 0, len);
-       kunmap_atomic(addr);
+       flush_dcache_page(page);
+       kunmap_local(addr);
 }
 
 #endif /* _LINUX_HIGHMEM_H */
index cbf46f56d1053b68cc1e9f4aa9d9c48cb46a982d..4a53c3ca86bdcb3b76cc9ad1785201bf75c702ef 100644 (file)
@@ -209,7 +209,7 @@ static inline void __next_physmem_range(u64 *idx, struct memblock_type *type,
  */
 #define for_each_mem_range(i, p_start, p_end) \
        __for_each_mem_range(i, &memblock.memory, NULL, NUMA_NO_NODE,   \
-                            MEMBLOCK_NONE, p_start, p_end, NULL)
+                            MEMBLOCK_HOTPLUG, p_start, p_end, NULL)
 
 /**
  * for_each_mem_range_rev - reverse iterate through memblock areas from
@@ -220,7 +220,7 @@ static inline void __next_physmem_range(u64 *idx, struct memblock_type *type,
  */
 #define for_each_mem_range_rev(i, p_start, p_end)                      \
        __for_each_mem_range_rev(i, &memblock.memory, NULL, NUMA_NO_NODE, \
-                                MEMBLOCK_NONE, p_start, p_end, NULL)
+                                MEMBLOCK_HOTPLUG, p_start, p_end, NULL)
 
 /**
  * for_each_reserved_mem_range - iterate over all reserved memblock areas
index 2d1895c3efbf2e17388fac8d2d3453cf24bb248b..40a0c2dfb80ff0119bc1cf0b154a11b06a03b600 100644 (file)
@@ -200,13 +200,13 @@ enum rt5033_reg {
 #define RT5033_REGULATOR_BUCK_VOLTAGE_MIN              1000000U
 #define RT5033_REGULATOR_BUCK_VOLTAGE_MAX              3000000U
 #define RT5033_REGULATOR_BUCK_VOLTAGE_STEP             100000U
-#define RT5033_REGULATOR_BUCK_VOLTAGE_STEP_NUM         32
+#define RT5033_REGULATOR_BUCK_VOLTAGE_STEP_NUM         21
 
 /* RT5033 regulator LDO output voltage uV */
 #define RT5033_REGULATOR_LDO_VOLTAGE_MIN               1200000U
 #define RT5033_REGULATOR_LDO_VOLTAGE_MAX               3000000U
 #define RT5033_REGULATOR_LDO_VOLTAGE_STEP              100000U
-#define RT5033_REGULATOR_LDO_VOLTAGE_STEP_NUM          32
+#define RT5033_REGULATOR_LDO_VOLTAGE_STEP_NUM          19
 
 /* RT5033 regulator SAFE LDO output voltage uV */
 #define RT5033_REGULATOR_SAFE_LDO_VOLTAGE              4900000U
index d147480cdefc7ff88fe4396a2a5d25dcba4223e4..e24d2c992b1129f3f2b01a8a8ddc5071abe1551c 100644 (file)
@@ -1397,34 +1397,10 @@ static inline int p4d_clear_huge(p4d_t *p4d)
 }
 #endif /* !__PAGETABLE_P4D_FOLDED */
 
-#ifndef __PAGETABLE_PUD_FOLDED
 int pud_set_huge(pud_t *pud, phys_addr_t addr, pgprot_t prot);
-int pud_clear_huge(pud_t *pud);
-#else
-static inline int pud_set_huge(pud_t *pud, phys_addr_t addr, pgprot_t prot)
-{
-       return 0;
-}
-static inline int pud_clear_huge(pud_t *pud)
-{
-       return 0;
-}
-#endif /* !__PAGETABLE_PUD_FOLDED */
-
-#ifndef __PAGETABLE_PMD_FOLDED
 int pmd_set_huge(pmd_t *pmd, phys_addr_t addr, pgprot_t prot);
+int pud_clear_huge(pud_t *pud);
 int pmd_clear_huge(pmd_t *pmd);
-#else
-static inline int pmd_set_huge(pmd_t *pmd, phys_addr_t addr, pgprot_t prot)
-{
-       return 0;
-}
-static inline int pmd_clear_huge(pmd_t *pmd)
-{
-       return 0;
-}
-#endif /* !__PAGETABLE_PMD_FOLDED */
-
 int p4d_free_pud_page(p4d_t *p4d, unsigned long addr);
 int pud_free_pmd_page(pud_t *pud, unsigned long addr);
 int pmd_free_pte_page(pmd_t *pmd, unsigned long addr);
index 17df9b047ee46dabed8797246f99e1a2fd39c243..784d5c3ef1c5be0b54194711ff7f306d271d95c3 100644 (file)
@@ -1709,7 +1709,6 @@ struct tcp_fastopen_context {
        struct rcu_head rcu;
 };
 
-extern unsigned int sysctl_tcp_fastopen_blackhole_timeout;
 void tcp_fastopen_active_disable(struct sock *sk);
 bool tcp_fastopen_active_should_disable(struct sock *sk);
 void tcp_fastopen_active_disable_ofo_check(struct sock *sk);
index 675849d07284af0edab250b7d88dd87d0266d150..8e6dd8a257c567d1de5c48c6cd963800094d1b30 100644 (file)
@@ -712,6 +712,12 @@ struct snd_soc_dai_link {
        /* Do not create a PCM for this DAI link (Backend link) */
        unsigned int ignore:1;
 
+       /* This flag will reorder stop sequence. By enabling this flag
+        * DMA controller stop sequence will be invoked first followed by
+        * CPU DAI driver stop sequence
+        */
+       unsigned int stop_dma_first:1;
+
 #ifdef CONFIG_SND_SOC_TOPOLOGY
        struct snd_soc_dobj dobj; /* For topology */
 #endif
index 3ccf591b2374064915cea8447dd8d02192e243ff..9f73ed2cf06116b8fcfee52d701f77bb93af713b 100644 (file)
@@ -174,6 +174,34 @@ enum afs_vl_operation {
        afs_VL_GetCapabilities  = 65537,        /* AFS Get VL server capabilities */
 };
 
+enum afs_cm_operation {
+       afs_CB_CallBack                 = 204,  /* AFS break callback promises */
+       afs_CB_InitCallBackState        = 205,  /* AFS initialise callback state */
+       afs_CB_Probe                    = 206,  /* AFS probe client */
+       afs_CB_GetLock                  = 207,  /* AFS get contents of CM lock table */
+       afs_CB_GetCE                    = 208,  /* AFS get cache file description */
+       afs_CB_GetXStatsVersion         = 209,  /* AFS get version of extended statistics */
+       afs_CB_GetXStats                = 210,  /* AFS get contents of extended statistics data */
+       afs_CB_InitCallBackState3       = 213,  /* AFS initialise callback state, version 3 */
+       afs_CB_ProbeUuid                = 214,  /* AFS check the client hasn't rebooted */
+};
+
+enum yfs_cm_operation {
+       yfs_CB_Probe                    = 206,  /* YFS probe client */
+       yfs_CB_GetLock                  = 207,  /* YFS get contents of CM lock table */
+       yfs_CB_XStatsVersion            = 209,  /* YFS get version of extended statistics */
+       yfs_CB_GetXStats                = 210,  /* YFS get contents of extended statistics data */
+       yfs_CB_InitCallBackState3       = 213,  /* YFS initialise callback state, version 3 */
+       yfs_CB_ProbeUuid                = 214,  /* YFS check the client hasn't rebooted */
+       yfs_CB_GetServerPrefs           = 215,
+       yfs_CB_GetCellServDV            = 216,
+       yfs_CB_GetLocalCell             = 217,
+       yfs_CB_GetCacheConfig           = 218,
+       yfs_CB_GetCellByNum             = 65537,
+       yfs_CB_TellMeAboutYourself      = 65538, /* get client capabilities */
+       yfs_CB_CallBack                 = 64204,
+};
+
 enum afs_edit_dir_op {
        afs_edit_dir_create,
        afs_edit_dir_create_error,
@@ -436,6 +464,32 @@ enum afs_cb_break_reason {
        EM(afs_YFSVL_GetCellName,               "YFSVL.GetCellName") \
        E_(afs_VL_GetCapabilities,              "VL.GetCapabilities")
 
+#define afs_cm_operations \
+       EM(afs_CB_CallBack,                     "CB.CallBack") \
+       EM(afs_CB_InitCallBackState,            "CB.InitCallBackState") \
+       EM(afs_CB_Probe,                        "CB.Probe") \
+       EM(afs_CB_GetLock,                      "CB.GetLock") \
+       EM(afs_CB_GetCE,                        "CB.GetCE") \
+       EM(afs_CB_GetXStatsVersion,             "CB.GetXStatsVersion") \
+       EM(afs_CB_GetXStats,                    "CB.GetXStats") \
+       EM(afs_CB_InitCallBackState3,           "CB.InitCallBackState3") \
+       E_(afs_CB_ProbeUuid,                    "CB.ProbeUuid")
+
+#define yfs_cm_operations \
+       EM(yfs_CB_Probe,                        "YFSCB.Probe") \
+       EM(yfs_CB_GetLock,                      "YFSCB.GetLock") \
+       EM(yfs_CB_XStatsVersion,                "YFSCB.XStatsVersion") \
+       EM(yfs_CB_GetXStats,                    "YFSCB.GetXStats") \
+       EM(yfs_CB_InitCallBackState3,           "YFSCB.InitCallBackState3") \
+       EM(yfs_CB_ProbeUuid,                    "YFSCB.ProbeUuid") \
+       EM(yfs_CB_GetServerPrefs,               "YFSCB.GetServerPrefs") \
+       EM(yfs_CB_GetCellServDV,                "YFSCB.GetCellServDV") \
+       EM(yfs_CB_GetLocalCell,                 "YFSCB.GetLocalCell") \
+       EM(yfs_CB_GetCacheConfig,               "YFSCB.GetCacheConfig") \
+       EM(yfs_CB_GetCellByNum,                 "YFSCB.GetCellByNum") \
+       EM(yfs_CB_TellMeAboutYourself,          "YFSCB.TellMeAboutYourself") \
+       E_(yfs_CB_CallBack,                     "YFSCB.CallBack")
+
 #define afs_edit_dir_ops                                 \
        EM(afs_edit_dir_create,                 "create") \
        EM(afs_edit_dir_create_error,           "c_fail") \
@@ -569,6 +623,8 @@ afs_server_traces;
 afs_cell_traces;
 afs_fs_operations;
 afs_vl_operations;
+afs_cm_operations;
+yfs_cm_operations;
 afs_edit_dir_ops;
 afs_edit_dir_reasons;
 afs_eproto_causes;
@@ -649,20 +705,21 @@ TRACE_EVENT(afs_cb_call,
 
            TP_STRUCT__entry(
                    __field(unsigned int,               call            )
-                   __field(const char *,               name            )
                    __field(u32,                        op              )
+                   __field(u16,                        service_id      )
                             ),
 
            TP_fast_assign(
                    __entry->call       = call->debug_id;
-                   __entry->name       = call->type->name;
                    __entry->op         = call->operation_ID;
+                   __entry->service_id = call->service_id;
                           ),
 
-           TP_printk("c=%08x %s o=%u",
+           TP_printk("c=%08x %s",
                      __entry->call,
-                     __entry->name,
-                     __entry->op)
+                     __entry->service_id == 2501 ?
+                     __print_symbolic(__entry->op, yfs_cm_operations) :
+                     __print_symbolic(__entry->op, afs_cm_operations))
            );
 
 TRACE_EVENT(afs_call,
index 2399073c3afc603bd303960302cf42f5bb38044a..78c448c6ab4c5cf30f82a063f318b9030da7199d 100644 (file)
@@ -136,7 +136,7 @@ DECLARE_EVENT_CLASS(net_dev_template,
                __assign_str(name, skb->dev->name);
        ),
 
-       TP_printk("dev=%s skbaddr=%p len=%u",
+       TP_printk("dev=%s skbaddr=%px len=%u",
                __get_str(name), __entry->skbaddr, __entry->len)
 )
 
index 330d32d84485b953944adf2b99e2e627b77dab0a..c3006c6b4a875badc20aa924a40e0be7d6a1433f 100644 (file)
@@ -41,11 +41,37 @@ TRACE_EVENT(qdisc_dequeue,
                __entry->txq_state      = txq->state;
        ),
 
-       TP_printk("dequeue ifindex=%d qdisc handle=0x%X parent=0x%X txq_state=0x%lX packets=%d skbaddr=%p",
+       TP_printk("dequeue ifindex=%d qdisc handle=0x%X parent=0x%X txq_state=0x%lX packets=%d skbaddr=%px",
                  __entry->ifindex, __entry->handle, __entry->parent,
                  __entry->txq_state, __entry->packets, __entry->skbaddr )
 );
 
+TRACE_EVENT(qdisc_enqueue,
+
+       TP_PROTO(struct Qdisc *qdisc, const struct netdev_queue *txq, struct sk_buff *skb),
+
+       TP_ARGS(qdisc, txq, skb),
+
+       TP_STRUCT__entry(
+               __field(struct Qdisc *, qdisc)
+               __field(void *, skbaddr)
+               __field(int, ifindex)
+               __field(u32, handle)
+               __field(u32, parent)
+       ),
+
+       TP_fast_assign(
+               __entry->qdisc = qdisc;
+               __entry->skbaddr = skb;
+               __entry->ifindex = txq->dev ? txq->dev->ifindex : 0;
+               __entry->handle  = qdisc->handle;
+               __entry->parent  = qdisc->parent;
+       ),
+
+       TP_printk("enqueue ifindex=%d qdisc handle=0x%X parent=0x%X skbaddr=%px",
+                 __entry->ifindex, __entry->handle, __entry->parent, __entry->skbaddr)
+);
+
 TRACE_EVENT(qdisc_reset,
 
        TP_PROTO(struct Qdisc *q),
index 42a4063de7cd24d5f946f0c95815501fef3019e2..9de3c9c3267cb10ddcaf1070defea4930dfbb596 100644 (file)
@@ -3677,6 +3677,8 @@ continue_func:
        if (tail_call_reachable)
                for (j = 0; j < frame; j++)
                        subprog[ret_prog[j]].tail_call_reachable = true;
+       if (subprog[0].tail_call_reachable)
+               env->prog->aux->tail_call_reachable = true;
 
        /* end of for() loop means the last insn of the 'subprog'
         * was reached. Doesn't matter whether it was JA or EXIT
index 910ae69cae7774dbd1639b8f0ff57a6d4060205e..af4a6ef48ce04a72387e53f38a833ecc76cb1d4b 100644 (file)
@@ -5,6 +5,13 @@
  */
 #include <linux/dma-map-ops.h>
 
+static struct page *dma_common_vaddr_to_page(void *cpu_addr)
+{
+       if (is_vmalloc_addr(cpu_addr))
+               return vmalloc_to_page(cpu_addr);
+       return virt_to_page(cpu_addr);
+}
+
 /*
  * Create scatter-list for the already allocated DMA buffer.
  */
@@ -12,7 +19,7 @@ int dma_common_get_sgtable(struct device *dev, struct sg_table *sgt,
                 void *cpu_addr, dma_addr_t dma_addr, size_t size,
                 unsigned long attrs)
 {
-       struct page *page = virt_to_page(cpu_addr);
+       struct page *page = dma_common_vaddr_to_page(cpu_addr);
        int ret;
 
        ret = sg_alloc_table(sgt, 1, GFP_KERNEL);
@@ -32,6 +39,7 @@ int dma_common_mmap(struct device *dev, struct vm_area_struct *vma,
        unsigned long user_count = vma_pages(vma);
        unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT;
        unsigned long off = vma->vm_pgoff;
+       struct page *page = dma_common_vaddr_to_page(cpu_addr);
        int ret = -ENXIO;
 
        vma->vm_page_prot = dma_pgprot(dev, vma->vm_page_prot, attrs);
@@ -43,7 +51,7 @@ int dma_common_mmap(struct device *dev, struct vm_area_struct *vma,
                return -ENXIO;
 
        return remap_pfn_range(vma, vma->vm_start,
-                       page_to_pfn(virt_to_page(cpu_addr)) + vma->vm_pgoff,
+                       page_to_pfn(page) + vma->vm_pgoff,
                        user_count << PAGE_SHIFT, vma->vm_page_prot);
 #else
        return -ENXIO;
index e4163042c4d6658d4fcf192bfcac04e8c2ddb5cf..cf6acab78538487105539025fca3d0dc84721c4b 100644 (file)
@@ -47,7 +47,7 @@ void __init idle_thread_set_boot_cpu(void)
  *
  * Creates the thread if it does not exist.
  */
-static inline void idle_init(unsigned int cpu)
+static __always_inline void idle_init(unsigned int cpu)
 {
        struct task_struct *tsk = per_cpu(idle_threads, cpu);
 
index 29a5e54e6e105465a5b7d2770bd6bbcaff1452a7..517be7fd175ef3664b7cf387adb1ba5decd062f1 100644 (file)
@@ -991,6 +991,11 @@ static void posix_cpu_timer_rearm(struct k_itimer *timer)
        if (!p)
                goto out;
 
+       /* Protect timer list r/w in arm_timer() */
+       sighand = lock_task_sighand(p, &flags);
+       if (unlikely(sighand == NULL))
+               goto out;
+
        /*
         * Fetch the current sample and update the timer's expiry time.
         */
@@ -1001,11 +1006,6 @@ static void posix_cpu_timer_rearm(struct k_itimer *timer)
 
        bump_cpu_timer(timer, now);
 
-       /* Protect timer list r/w in arm_timer() */
-       sighand = lock_task_sighand(p, &flags);
-       if (unlikely(sighand == NULL))
-               goto out;
-
        /*
         * Now re-arm for the new expiry time.
         */
index 3fadb58fc9d7b1c7a273b59a48b8dc82c47a1b22..9eb11c2209e56b663a9b326c682e18f8c289f49a 100644 (file)
@@ -207,6 +207,7 @@ struct timer_base {
        unsigned int            cpu;
        bool                    next_expiry_recalc;
        bool                    is_idle;
+       bool                    timers_pending;
        DECLARE_BITMAP(pending_map, WHEEL_SIZE);
        struct hlist_head       vectors[WHEEL_SIZE];
 } ____cacheline_aligned;
@@ -595,6 +596,7 @@ static void enqueue_timer(struct timer_base *base, struct timer_list *timer,
                 * can reevaluate the wheel:
                 */
                base->next_expiry = bucket_expiry;
+               base->timers_pending = true;
                base->next_expiry_recalc = false;
                trigger_dyntick_cpu(base, timer);
        }
@@ -1582,6 +1584,7 @@ static unsigned long __next_timer_interrupt(struct timer_base *base)
        }
 
        base->next_expiry_recalc = false;
+       base->timers_pending = !(next == base->clk + NEXT_TIMER_MAX_DELTA);
 
        return next;
 }
@@ -1633,7 +1636,6 @@ u64 get_next_timer_interrupt(unsigned long basej, u64 basem)
        struct timer_base *base = this_cpu_ptr(&timer_bases[BASE_STD]);
        u64 expires = KTIME_MAX;
        unsigned long nextevt;
-       bool is_max_delta;
 
        /*
         * Pretend that there is no timer pending if the cpu is offline.
@@ -1646,7 +1648,6 @@ u64 get_next_timer_interrupt(unsigned long basej, u64 basem)
        if (base->next_expiry_recalc)
                base->next_expiry = __next_timer_interrupt(base);
        nextevt = base->next_expiry;
-       is_max_delta = (nextevt == base->clk + NEXT_TIMER_MAX_DELTA);
 
        /*
         * We have a fresh next event. Check whether we can forward the
@@ -1664,7 +1665,7 @@ u64 get_next_timer_interrupt(unsigned long basej, u64 basem)
                expires = basem;
                base->is_idle = false;
        } else {
-               if (!is_max_delta)
+               if (base->timers_pending)
                        expires = basem + (u64)(nextevt - basej) * TICK_NSEC;
                /*
                 * If we expect to sleep more than a tick, mark the base idle.
@@ -1947,6 +1948,7 @@ int timers_prepare_cpu(unsigned int cpu)
                base = per_cpu_ptr(&timer_bases[b], cpu);
                base->clk = jiffies;
                base->next_expiry = base->clk + NEXT_TIMER_MAX_DELTA;
+               base->timers_pending = false;
                base->is_idle = false;
        }
        return 0;
index e6fb3e6e1ffc22e27d2237019e8bddec2cd8847d..7b180f61e6d3ccfa736a925a6cf9731f58be25f8 100644 (file)
@@ -5985,7 +5985,8 @@ ftrace_graph_release(struct inode *inode, struct file *file)
                 * infrastructure to do the synchronization, thus we must do it
                 * ourselves.
                 */
-               synchronize_rcu_tasks_rude();
+               if (old_hash != EMPTY_HASH)
+                       synchronize_rcu_tasks_rude();
 
                free_ftrace_hash(old_hash);
        }
@@ -7544,7 +7545,7 @@ int ftrace_is_dead(void)
  */
 int register_ftrace_function(struct ftrace_ops *ops)
 {
-       int ret = -1;
+       int ret;
 
        ftrace_ops_init(ops);
 
index d1463eac11a36ff6591ca01b1b74852103aaf74d..e592d1df6f888a3da7f7b75d58957f2d332eb18d 100644 (file)
@@ -3880,10 +3880,30 @@ static bool rb_per_cpu_empty(struct ring_buffer_per_cpu *cpu_buffer)
        if (unlikely(!head))
                return true;
 
-       return reader->read == rb_page_commit(reader) &&
-               (commit == reader ||
-                (commit == head &&
-                 head->read == rb_page_commit(commit)));
+       /* Reader should exhaust content in reader page */
+       if (reader->read != rb_page_commit(reader))
+               return false;
+
+       /*
+        * If writers are committing on the reader page, knowing all
+        * committed content has been read, the ring buffer is empty.
+        */
+       if (commit == reader)
+               return true;
+
+       /*
+        * If writers are committing on a page other than reader page
+        * and head page, there should always be content to read.
+        */
+       if (commit != head)
+               return false;
+
+       /*
+        * Writers are committing on the head page, we just need
+        * to care about there're committed data, and the reader will
+        * swap reader page with head page when it is to read data.
+        */
+       return rb_page_commit(commit) == 0;
 }
 
 /**
index f8b80b5bab712cd6503dd448f02073f0a8cd6664..c59dd35a6da5c561a4297202e3970e4e5f6e5b86 100644 (file)
@@ -5609,6 +5609,10 @@ static const char readme_msg[] =
        "\t            [:name=histname1]\n"
        "\t            [:<handler>.<action>]\n"
        "\t            [if <filter>]\n\n"
+       "\t    Note, special fields can be used as well:\n"
+       "\t            common_timestamp - to record current timestamp\n"
+       "\t            common_cpu - to record the CPU the event happened on\n"
+       "\n"
        "\t    When a matching event is hit, an entry is added to a hash\n"
        "\t    table using the key(s) and value(s) named, and the value of a\n"
        "\t    sum called 'hitcount' is incremented.  Keys and values\n"
index 16a9dfc9fffc3adfb0dd0e13f6efb147c9433b90..34325f41ebc0663091620af368ef2be827438f92 100644 (file)
@@ -1111,7 +1111,7 @@ static const char *hist_field_name(struct hist_field *field,
                 field->flags & HIST_FIELD_FL_ALIAS)
                field_name = hist_field_name(field->operands[0], ++level);
        else if (field->flags & HIST_FIELD_FL_CPU)
-               field_name = "cpu";
+               field_name = "common_cpu";
        else if (field->flags & HIST_FIELD_FL_EXPR ||
                 field->flags & HIST_FIELD_FL_VAR_REF) {
                if (field->system) {
@@ -1991,14 +1991,24 @@ parse_field(struct hist_trigger_data *hist_data, struct trace_event_file *file,
                hist_data->enable_timestamps = true;
                if (*flags & HIST_FIELD_FL_TIMESTAMP_USECS)
                        hist_data->attrs->ts_in_usecs = true;
-       } else if (strcmp(field_name, "cpu") == 0)
+       } else if (strcmp(field_name, "common_cpu") == 0)
                *flags |= HIST_FIELD_FL_CPU;
        else {
                field = trace_find_event_field(file->event_call, field_name);
                if (!field || !field->size) {
-                       hist_err(tr, HIST_ERR_FIELD_NOT_FOUND, errpos(field_name));
-                       field = ERR_PTR(-EINVAL);
-                       goto out;
+                       /*
+                        * For backward compatibility, if field_name
+                        * was "cpu", then we treat this the same as
+                        * common_cpu.
+                        */
+                       if (strcmp(field_name, "cpu") == 0) {
+                               *flags |= HIST_FIELD_FL_CPU;
+                       } else {
+                               hist_err(tr, HIST_ERR_FIELD_NOT_FOUND,
+                                        errpos(field_name));
+                               field = ERR_PTR(-EINVAL);
+                               goto out;
+                       }
                }
        }
  out:
@@ -5085,7 +5095,7 @@ static void hist_field_print(struct seq_file *m, struct hist_field *hist_field)
                seq_printf(m, "%s=", hist_field->var.name);
 
        if (hist_field->flags & HIST_FIELD_FL_CPU)
-               seq_puts(m, "cpu");
+               seq_puts(m, "common_cpu");
        else if (field_name) {
                if (hist_field->flags & HIST_FIELD_FL_VAR_REF ||
                    hist_field->flags & HIST_FIELD_FL_ALIAS)
index 2ac75eb6aa86cbd3dd7c9dc12062fd069f8a2085..9315fc03e3030b6f5a4a922c445375893b0175ba 100644 (file)
@@ -893,15 +893,13 @@ static struct synth_event *alloc_synth_event(const char *name, int n_fields,
        dyn_event_init(&event->devent, &synth_event_ops);
 
        for (i = 0, j = 0; i < n_fields; i++) {
+               fields[i]->field_pos = i;
                event->fields[i] = fields[i];
 
-               if (fields[i]->is_dynamic) {
-                       event->dynamic_fields[j] = fields[i];
-                       event->dynamic_fields[j]->field_pos = i;
+               if (fields[i]->is_dynamic)
                        event->dynamic_fields[j++] = fields[i];
-                       event->n_dynamic_fields++;
-               }
        }
+       event->n_dynamic_fields = j;
        event->n_fields = n_fields;
  out:
        return event;
index 6e146b959dcd0253457c03c47ec0551e7e409f6b..4007fe95cf42c103a828685d488cf121483dc36e 100644 (file)
@@ -14,10 +14,10 @@ struct synth_field {
        char *name;
        size_t size;
        unsigned int offset;
+       unsigned int field_pos;
        bool is_signed;
        bool is_string;
        bool is_dynamic;
-       bool field_pos;
 };
 
 struct synth_event {
index 976bf8ce803967d5d0bcf830981d69ee495cbf25..fc32821f8240b78b61c12c4bc352bcfea041e2a9 100644 (file)
@@ -299,8 +299,8 @@ static int tracepoint_add_func(struct tracepoint *tp,
         * a pointer to it.  This array is referenced by __DO_TRACE from
         * include/linux/tracepoint.h using rcu_dereference_sched().
         */
-       rcu_assign_pointer(tp->funcs, tp_funcs);
        tracepoint_update_call(tp, tp_funcs, false);
+       rcu_assign_pointer(tp->funcs, tp_funcs);
        static_key_enable(&tp->key);
 
        release_probes(old);
index 271f2ca862c82ca1e95b4e22598cae7e17d8c45e..f5561ea7d90ad62c28eb50e07f61e8c78df2d9d8 100644 (file)
@@ -398,12 +398,12 @@ static void cgwb_release_workfn(struct work_struct *work)
        blkcg_unpin_online(blkcg);
 
        fprop_local_destroy_percpu(&wb->memcg_completions);
-       percpu_ref_exit(&wb->refcnt);
 
        spin_lock_irq(&cgwb_lock);
        list_del(&wb->offline_node);
        spin_unlock_irq(&cgwb_lock);
 
+       percpu_ref_exit(&wb->refcnt);
        wb_exit(wb);
        WARN_ON_ONCE(!list_empty(&wb->b_attached));
        kfree_rcu(wb, rcu);
index d7666ace9d2e4aebef15f519f201dd7805571bfe..575c685aa642291bca3418828df953233f0ffe5e 100644 (file)
@@ -733,6 +733,22 @@ void kfence_shutdown_cache(struct kmem_cache *s)
 
 void *__kfence_alloc(struct kmem_cache *s, size_t size, gfp_t flags)
 {
+       /*
+        * Perform size check before switching kfence_allocation_gate, so that
+        * we don't disable KFENCE without making an allocation.
+        */
+       if (size > PAGE_SIZE)
+               return NULL;
+
+       /*
+        * Skip allocations from non-default zones, including DMA. We cannot
+        * guarantee that pages in the KFENCE pool will have the requested
+        * properties (e.g. reside in DMAable memory).
+        */
+       if ((flags & GFP_ZONEMASK) ||
+           (s->flags & (SLAB_CACHE_DMA | SLAB_CACHE_DMA32)))
+               return NULL;
+
        /*
         * allocation_gate only needs to become non-zero, so it doesn't make
         * sense to continue writing to it and pay the associated contention
@@ -757,9 +773,6 @@ void *__kfence_alloc(struct kmem_cache *s, size_t size, gfp_t flags)
        if (!READ_ONCE(kfence_enabled))
                return NULL;
 
-       if (size > PAGE_SIZE)
-               return NULL;
-
        return kfence_guarded_alloc(s, size, flags);
 }
 
index 7f24b9bcb2ec553f1fc88bdea7f5a90b59e9b27a..942cbc16ad26ba8bc6032ce4b7416d8725b750ad 100644 (file)
@@ -852,7 +852,7 @@ static void kfence_test_exit(void)
        tracepoint_synchronize_unregister();
 }
 
-late_initcall(kfence_test_init);
+late_initcall_sync(kfence_test_init);
 module_exit(kfence_test_exit);
 
 MODULE_LICENSE("GPL v2");
index 0041ff62c584e7e128a138a1a9a996449ebc1cdf..de7b553baa50004c5152d0fee14a7a682d098673 100644 (file)
@@ -947,7 +947,8 @@ static bool should_skip_region(struct memblock_type *type,
                return true;
 
        /* skip hotpluggable memory regions if needed */
-       if (movable_node_is_enabled() && memblock_is_hotpluggable(m))
+       if (movable_node_is_enabled() && memblock_is_hotpluggable(m) &&
+           !(flags & MEMBLOCK_HOTPLUG))
                return true;
 
        /* if we want mirror memory skip non-mirror memory regions */
index 747a01d495f2c5cd0ff71e0910925a3374af316d..25fc46e872142a11692e3b44402cab89ca644fb3 100644 (file)
@@ -4026,8 +4026,17 @@ vm_fault_t finish_fault(struct vm_fault *vmf)
                                return ret;
                }
 
-               if (unlikely(pte_alloc(vma->vm_mm, vmf->pmd)))
+               if (vmf->prealloc_pte) {
+                       vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd);
+                       if (likely(pmd_none(*vmf->pmd))) {
+                               mm_inc_nr_ptes(vma->vm_mm);
+                               pmd_populate(vma->vm_mm, vmf->pmd, vmf->prealloc_pte);
+                               vmf->prealloc_pte = NULL;
+                       }
+                       spin_unlock(vmf->ptl);
+               } else if (unlikely(pte_alloc(vma->vm_mm, vmf->pmd))) {
                        return VM_FAULT_OOM;
+               }
        }
 
        /* See comment in handle_pte_fault() */
index f5852a058ce0bc835943309bdc39edf0ea27893f..1854850b4b897f387c0681a45b940b8ee793e5a2 100644 (file)
@@ -156,14 +156,14 @@ static inline void put_memcg_path_buf(void)
 #define TRACE_MMAP_LOCK_EVENT(type, mm, ...)                                   \
        do {                                                                   \
                const char *memcg_path;                                        \
-               preempt_disable();                                             \
+               local_lock(&memcg_paths.lock);                                 \
                memcg_path = get_mm_memcg_path(mm);                            \
                trace_mmap_lock_##type(mm,                                     \
                                       memcg_path != NULL ? memcg_path : "",   \
                                       ##__VA_ARGS__);                         \
                if (likely(memcg_path != NULL))                                \
                        put_memcg_path_buf();                                  \
-               preempt_enable();                                              \
+               local_unlock(&memcg_paths.lock);                               \
        } while (0)
 
 #else /* !CONFIG_MEMCG */
index 3e97e68aef7a899a3441e3c3832cdbdacafc2b7f..856b175c15a4fa2c7590526d4941e4b1a2dd39e5 100644 (file)
@@ -840,21 +840,24 @@ void init_mem_debugging_and_hardening(void)
        }
 #endif
 
-       if (_init_on_alloc_enabled_early) {
-               if (page_poisoning_requested)
-                       pr_info("mem auto-init: CONFIG_PAGE_POISONING is on, "
-                               "will take precedence over init_on_alloc\n");
-               else
-                       static_branch_enable(&init_on_alloc);
-       }
-       if (_init_on_free_enabled_early) {
-               if (page_poisoning_requested)
-                       pr_info("mem auto-init: CONFIG_PAGE_POISONING is on, "
-                               "will take precedence over init_on_free\n");
-               else
-                       static_branch_enable(&init_on_free);
+       if ((_init_on_alloc_enabled_early || _init_on_free_enabled_early) &&
+           page_poisoning_requested) {
+               pr_info("mem auto-init: CONFIG_PAGE_POISONING is on, "
+                       "will take precedence over init_on_alloc and init_on_free\n");
+               _init_on_alloc_enabled_early = false;
+               _init_on_free_enabled_early = false;
        }
 
+       if (_init_on_alloc_enabled_early)
+               static_branch_enable(&init_on_alloc);
+       else
+               static_branch_disable(&init_on_alloc);
+
+       if (_init_on_free_enabled_early)
+               static_branch_enable(&init_on_free);
+       else
+               static_branch_disable(&init_on_free);
+
 #ifdef CONFIG_DEBUG_PAGEALLOC
        if (!debug_pagealloc_enabled())
                return;
index f77d25467a14ad3bdda3ff1ac69baa147e47d0cb..030f02ddc7c1dc823bc284be22976df820e62404 100644 (file)
@@ -152,6 +152,7 @@ static void secretmem_freepage(struct page *page)
 }
 
 const struct address_space_operations secretmem_aops = {
+       .set_page_dirty = __set_page_dirty_no_writeback,
        .freepage       = secretmem_freepage,
        .migratepage    = secretmem_migratepage,
        .isolate_page   = secretmem_isolate_page,
index aa47af349ba804fa6d73f79bdd437591e2c52721..1cc75c811e247bc6cc9f6083be0d17fe7e84ee4d 100644 (file)
@@ -701,6 +701,9 @@ int bpf_prog_test_run_xdp(struct bpf_prog *prog, const union bpf_attr *kattr,
        void *data;
        int ret;
 
+       if (prog->expected_attach_type == BPF_XDP_DEVMAP ||
+           prog->expected_attach_type == BPF_XDP_CPUMAP)
+               return -EINVAL;
        if (kattr->test.ctx_in || kattr->test.ctx_out)
                return -EINVAL;
 
index 2b862cffc03a0868b4d1bb5e318ca9928e34d0f0..a16191dcaed199c95e7cf72a7835a7922f9b8656 100644 (file)
@@ -780,7 +780,7 @@ int br_fdb_replay(const struct net_device *br_dev, const struct net_device *dev,
                struct net_device *dst_dev;
 
                dst_dev = dst ? dst->dev : br->dev;
-               if (dst_dev != br_dev && dst_dev != dev)
+               if (dst_dev && dst_dev != dev)
                        continue;
 
                err = br_fdb_replay_one(nb, fdb, dst_dev, action, ctx);
index 647554c9813b98a691bbc976510a0dbabf89d29d..e12fd3cad6194210b1436e7c395203803fdd9e9b 100644 (file)
@@ -539,7 +539,8 @@ static int caif_seqpkt_sendmsg(struct socket *sock, struct msghdr *msg,
                goto err;
 
        ret = -EINVAL;
-       if (unlikely(msg->msg_iter.iov->iov_base == NULL))
+       if (unlikely(msg->msg_iter.nr_segs == 0) ||
+           unlikely(msg->msg_iter.iov->iov_base == NULL))
                goto err;
        noblock = msg->msg_flags & MSG_DONTWAIT;
 
index 64b21f0a20483b290cc51ff26a8f67f8f8f27a05..8f1a47ad6781abc30099dc1de6bd6f0ad5df7135 100644 (file)
 #include <trace/events/napi.h>
 #include <trace/events/net.h>
 #include <trace/events/skb.h>
+#include <trace/events/qdisc.h>
 #include <linux/inetdevice.h>
 #include <linux/cpu_rmap.h>
 #include <linux/static_key.h>
@@ -3844,6 +3845,18 @@ static void qdisc_pkt_len_init(struct sk_buff *skb)
        }
 }
 
+static int dev_qdisc_enqueue(struct sk_buff *skb, struct Qdisc *q,
+                            struct sk_buff **to_free,
+                            struct netdev_queue *txq)
+{
+       int rc;
+
+       rc = q->enqueue(skb, q, to_free) & NET_XMIT_MASK;
+       if (rc == NET_XMIT_SUCCESS)
+               trace_qdisc_enqueue(q, txq, skb);
+       return rc;
+}
+
 static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q,
                                 struct net_device *dev,
                                 struct netdev_queue *txq)
@@ -3862,8 +3875,7 @@ static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q,
                         * of q->seqlock to protect from racing with requeuing.
                         */
                        if (unlikely(!nolock_qdisc_is_empty(q))) {
-                               rc = q->enqueue(skb, q, &to_free) &
-                                       NET_XMIT_MASK;
+                               rc = dev_qdisc_enqueue(skb, q, &to_free, txq);
                                __qdisc_run(q);
                                qdisc_run_end(q);
 
@@ -3879,7 +3891,7 @@ static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q,
                        return NET_XMIT_SUCCESS;
                }
 
-               rc = q->enqueue(skb, q, &to_free) & NET_XMIT_MASK;
+               rc = dev_qdisc_enqueue(skb, q, &to_free, txq);
                qdisc_run(q);
 
 no_lock_out:
@@ -3923,7 +3935,7 @@ no_lock_out:
                qdisc_run_end(q);
                rc = NET_XMIT_SUCCESS;
        } else {
-               rc = q->enqueue(skb, q, &to_free) & NET_XMIT_MASK;
+               rc = dev_qdisc_enqueue(skb, q, &to_free, txq);
                if (qdisc_run_begin(q)) {
                        if (unlikely(contended)) {
                                spin_unlock(&q->busylock);
@@ -9700,14 +9712,17 @@ int bpf_xdp_link_attach(const union bpf_attr *attr, struct bpf_prog *prog)
        struct net_device *dev;
        int err, fd;
 
+       rtnl_lock();
        dev = dev_get_by_index(net, attr->link_create.target_ifindex);
-       if (!dev)
+       if (!dev) {
+               rtnl_unlock();
                return -EINVAL;
+       }
 
        link = kzalloc(sizeof(*link), GFP_USER);
        if (!link) {
                err = -ENOMEM;
-               goto out_put_dev;
+               goto unlock;
        }
 
        bpf_link_init(&link->link, BPF_LINK_TYPE_XDP, &bpf_xdp_link_lops, prog);
@@ -9717,14 +9732,14 @@ int bpf_xdp_link_attach(const union bpf_attr *attr, struct bpf_prog *prog)
        err = bpf_link_prime(&link->link, &link_primer);
        if (err) {
                kfree(link);
-               goto out_put_dev;
+               goto unlock;
        }
 
-       rtnl_lock();
        err = dev_xdp_attach_link(dev, NULL, link);
        rtnl_unlock();
 
        if (err) {
+               link->dev = NULL;
                bpf_link_cleanup(&link_primer);
                goto out_put_dev;
        }
@@ -9734,6 +9749,9 @@ int bpf_xdp_link_attach(const union bpf_attr *attr, struct bpf_prog *prog)
        dev_put(dev);
        return fd;
 
+unlock:
+       rtnl_unlock();
+
 out_put_dev:
        dev_put(dev);
        return err;
index f63de967ac25939c22cd857dd76b30bdf19c0f37..fc7942c0dddc368c48260bbbb1637c6bcb9619b4 100644 (file)
@@ -663,7 +663,7 @@ static void skb_release_data(struct sk_buff *skb)
        if (skb->cloned &&
            atomic_sub_return(skb->nohdr ? (1 << SKB_DATAREF_SHIFT) + 1 : 1,
                              &shinfo->dataref))
-               return;
+               goto exit;
 
        skb_zcopy_clear(skb, true);
 
@@ -674,6 +674,17 @@ static void skb_release_data(struct sk_buff *skb)
                kfree_skb_list(shinfo->frag_list);
 
        skb_free_head(skb);
+exit:
+       /* When we clone an SKB we copy the reycling bit. The pp_recycle
+        * bit is only set on the head though, so in order to avoid races
+        * while trying to recycle fragments on __skb_frag_unref() we need
+        * to make one SKB responsible for triggering the recycle path.
+        * So disable the recycling bit if an SKB is cloned and we have
+        * additional references to to the fragmented part of the SKB.
+        * Eventually the last SKB will have the recycling bit set and it's
+        * dataref set to 0, which will trigger the recycling
+        */
+       skb->pp_recycle = 0;
 }
 
 /*
@@ -3011,8 +3022,11 @@ skb_zerocopy_headlen(const struct sk_buff *from)
 
        if (!from->head_frag ||
            skb_headlen(from) < L1_CACHE_BYTES ||
-           skb_shinfo(from)->nr_frags >= MAX_SKB_FRAGS)
+           skb_shinfo(from)->nr_frags >= MAX_SKB_FRAGS) {
                hlen = skb_headlen(from);
+               if (!hlen)
+                       hlen = from->len;
+       }
 
        if (skb_has_frag_list(from))
                hlen = from->len;
index 9b6160a191f8fe9fd8f95d48faf2f70c74b072d9..15d71288e741f94acb98c6e28ee56ad76b54b891 100644 (file)
@@ -508,10 +508,8 @@ static int sk_psock_skb_ingress_enqueue(struct sk_buff *skb,
        if (skb_linearize(skb))
                return -EAGAIN;
        num_sge = skb_to_sgvec(skb, msg->sg.data, 0, skb->len);
-       if (unlikely(num_sge < 0)) {
-               kfree(msg);
+       if (unlikely(num_sge < 0))
                return num_sge;
-       }
 
        copied = skb->len;
        msg->sg.start = 0;
@@ -530,6 +528,7 @@ static int sk_psock_skb_ingress(struct sk_psock *psock, struct sk_buff *skb)
 {
        struct sock *sk = psock->sk;
        struct sk_msg *msg;
+       int err;
 
        /* If we are receiving on the same sock skb->sk is already assigned,
         * skip memory accounting and owner transition seeing it already set
@@ -548,7 +547,10 @@ static int sk_psock_skb_ingress(struct sk_psock *psock, struct sk_buff *skb)
         * into user buffers.
         */
        skb_set_owner_r(skb, sk);
-       return sk_psock_skb_ingress_enqueue(skb, psock, sk, msg);
+       err = sk_psock_skb_ingress_enqueue(skb, psock, sk, msg);
+       if (err < 0)
+               kfree(msg);
+       return err;
 }
 
 /* Puts an skb on the ingress queue of the socket already assigned to the
@@ -559,12 +561,16 @@ static int sk_psock_skb_ingress_self(struct sk_psock *psock, struct sk_buff *skb
 {
        struct sk_msg *msg = kzalloc(sizeof(*msg), __GFP_NOWARN | GFP_ATOMIC);
        struct sock *sk = psock->sk;
+       int err;
 
        if (unlikely(!msg))
                return -EAGAIN;
        sk_msg_init(msg);
        skb_set_owner_r(skb, sk);
-       return sk_psock_skb_ingress_enqueue(skb, psock, sk, msg);
+       err = sk_psock_skb_ingress_enqueue(skb, psock, sk, msg);
+       if (err < 0)
+               kfree(msg);
+       return err;
 }
 
 static int sk_psock_handle_skb(struct sk_psock *psock, struct sk_buff *skb,
index 5dbd45dc35ad3f44933b1fa1ba29ef4a7578249e..dc92a67baea39484cd4c93913d3eae8ac4463538 100644 (file)
@@ -816,7 +816,7 @@ static int dn_auto_bind(struct socket *sock)
 static int dn_confirm_accept(struct sock *sk, long *timeo, gfp_t allocation)
 {
        struct dn_scp *scp = DN_SK(sk);
-       DEFINE_WAIT(wait);
+       DEFINE_WAIT_FUNC(wait, woken_wake_function);
        int err;
 
        if (scp->state != DN_CR)
@@ -826,11 +826,11 @@ static int dn_confirm_accept(struct sock *sk, long *timeo, gfp_t allocation)
        scp->segsize_loc = dst_metric_advmss(__sk_dst_get(sk));
        dn_send_conn_conf(sk, allocation);
 
-       prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
+       add_wait_queue(sk_sleep(sk), &wait);
        for(;;) {
                release_sock(sk);
                if (scp->state == DN_CC)
-                       *timeo = schedule_timeout(*timeo);
+                       *timeo = wait_woken(&wait, TASK_INTERRUPTIBLE, *timeo);
                lock_sock(sk);
                err = 0;
                if (scp->state == DN_RUN)
@@ -844,9 +844,8 @@ static int dn_confirm_accept(struct sock *sk, long *timeo, gfp_t allocation)
                err = -EAGAIN;
                if (!*timeo)
                        break;
-               prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
        }
-       finish_wait(sk_sleep(sk), &wait);
+       remove_wait_queue(sk_sleep(sk), &wait);
        if (err == 0) {
                sk->sk_socket->state = SS_CONNECTED;
        } else if (scp->state != DN_CC) {
@@ -858,7 +857,7 @@ static int dn_confirm_accept(struct sock *sk, long *timeo, gfp_t allocation)
 static int dn_wait_run(struct sock *sk, long *timeo)
 {
        struct dn_scp *scp = DN_SK(sk);
-       DEFINE_WAIT(wait);
+       DEFINE_WAIT_FUNC(wait, woken_wake_function);
        int err = 0;
 
        if (scp->state == DN_RUN)
@@ -867,11 +866,11 @@ static int dn_wait_run(struct sock *sk, long *timeo)
        if (!*timeo)
                return -EALREADY;
 
-       prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
+       add_wait_queue(sk_sleep(sk), &wait);
        for(;;) {
                release_sock(sk);
                if (scp->state == DN_CI || scp->state == DN_CC)
-                       *timeo = schedule_timeout(*timeo);
+                       *timeo = wait_woken(&wait, TASK_INTERRUPTIBLE, *timeo);
                lock_sock(sk);
                err = 0;
                if (scp->state == DN_RUN)
@@ -885,9 +884,8 @@ static int dn_wait_run(struct sock *sk, long *timeo)
                err = -ETIMEDOUT;
                if (!*timeo)
                        break;
-               prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
        }
-       finish_wait(sk_sleep(sk), &wait);
+       remove_wait_queue(sk_sleep(sk), &wait);
 out:
        if (err == 0) {
                sk->sk_socket->state = SS_CONNECTED;
@@ -1032,16 +1030,16 @@ static void dn_user_copy(struct sk_buff *skb, struct optdata_dn *opt)
 
 static struct sk_buff *dn_wait_for_connect(struct sock *sk, long *timeo)
 {
-       DEFINE_WAIT(wait);
+       DEFINE_WAIT_FUNC(wait, woken_wake_function);
        struct sk_buff *skb = NULL;
        int err = 0;
 
-       prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
+       add_wait_queue(sk_sleep(sk), &wait);
        for(;;) {
                release_sock(sk);
                skb = skb_dequeue(&sk->sk_receive_queue);
                if (skb == NULL) {
-                       *timeo = schedule_timeout(*timeo);
+                       *timeo = wait_woken(&wait, TASK_INTERRUPTIBLE, *timeo);
                        skb = skb_dequeue(&sk->sk_receive_queue);
                }
                lock_sock(sk);
@@ -1056,9 +1054,8 @@ static struct sk_buff *dn_wait_for_connect(struct sock *sk, long *timeo)
                err = -EAGAIN;
                if (!*timeo)
                        break;
-               prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
        }
-       finish_wait(sk_sleep(sk), &wait);
+       remove_wait_queue(sk_sleep(sk), &wait);
 
        return skb == NULL ? ERR_PTR(err) : skb;
 }
index ffbba1e7155155616c85581673611ad68f808517..532085da8d8fb131e28204101b783309182d2968 100644 (file)
@@ -1808,6 +1808,7 @@ void dsa_slave_setup_tagger(struct net_device *slave)
        struct dsa_slave_priv *p = netdev_priv(slave);
        const struct dsa_port *cpu_dp = dp->cpu_dp;
        struct net_device *master = cpu_dp->master;
+       const struct dsa_switch *ds = dp->ds;
 
        slave->needed_headroom = cpu_dp->tag_ops->needed_headroom;
        slave->needed_tailroom = cpu_dp->tag_ops->needed_tailroom;
@@ -1819,6 +1820,14 @@ void dsa_slave_setup_tagger(struct net_device *slave)
        slave->needed_tailroom += master->needed_tailroom;
 
        p->xmit = cpu_dp->tag_ops->xmit;
+
+       slave->features = master->vlan_features | NETIF_F_HW_TC;
+       if (ds->ops->port_vlan_add && ds->ops->port_vlan_del)
+               slave->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
+       slave->hw_features |= NETIF_F_HW_TC;
+       slave->features |= NETIF_F_LLTX;
+       if (slave->needed_tailroom)
+               slave->features &= ~(NETIF_F_SG | NETIF_F_FRAGLIST);
 }
 
 static struct lock_class_key dsa_slave_netdev_xmit_lock_key;
@@ -1881,11 +1890,6 @@ int dsa_slave_create(struct dsa_port *port)
        if (slave_dev == NULL)
                return -ENOMEM;
 
-       slave_dev->features = master->vlan_features | NETIF_F_HW_TC;
-       if (ds->ops->port_vlan_add && ds->ops->port_vlan_del)
-               slave_dev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
-       slave_dev->hw_features |= NETIF_F_HW_TC;
-       slave_dev->features |= NETIF_F_LLTX;
        slave_dev->ethtool_ops = &dsa_slave_ethtool_ops;
        if (!is_zero_ether_addr(port->mac))
                ether_addr_copy(slave_dev->dev_addr, port->mac);
index 53565f48934c0f4814151e6d3e08aec2df4c6de2..a201ccf2435d8dee44a042351a36337309cdbff1 100644 (file)
@@ -53,6 +53,9 @@ static struct sk_buff *ksz8795_xmit(struct sk_buff *skb, struct net_device *dev)
        u8 *tag;
        u8 *addr;
 
+       if (skb->ip_summed == CHECKSUM_PARTIAL && skb_checksum_help(skb))
+               return NULL;
+
        /* Tag encoding */
        tag = skb_put(skb, KSZ_INGRESS_TAG_LEN);
        addr = skb_mac_header(skb);
@@ -114,6 +117,9 @@ static struct sk_buff *ksz9477_xmit(struct sk_buff *skb,
        u8 *addr;
        u16 val;
 
+       if (skb->ip_summed == CHECKSUM_PARTIAL && skb_checksum_help(skb))
+               return NULL;
+
        /* Tag encoding */
        tag = skb_put(skb, KSZ9477_INGRESS_TAG_LEN);
        addr = skb_mac_header(skb);
@@ -164,6 +170,9 @@ static struct sk_buff *ksz9893_xmit(struct sk_buff *skb,
        u8 *addr;
        u8 *tag;
 
+       if (skb->ip_summed == CHECKSUM_PARTIAL && skb_checksum_help(skb))
+               return NULL;
+
        /* Tag encoding */
        tag = skb_put(skb, KSZ_INGRESS_TAG_LEN);
        addr = skb_mac_header(skb);
index f26916a62f25696950707b235283136c2fea5b9e..d3e9386b493eb3da85fe997ab93743e93fa1d049 100644 (file)
@@ -503,7 +503,7 @@ static int __init tcp_bpf_v4_build_proto(void)
        tcp_bpf_rebuild_protos(tcp_bpf_prots[TCP_BPF_IPV4], &tcp_prot);
        return 0;
 }
-core_initcall(tcp_bpf_v4_build_proto);
+late_initcall(tcp_bpf_v4_build_proto);
 
 static int tcp_bpf_assert_proto_ops(struct proto *ops)
 {
index 47c32604d38fca960d2cd56f3588bfd2e390b789..25fa4c01a17f63d33444287b62d39a3d57917cc4 100644 (file)
@@ -507,8 +507,18 @@ void tcp_fastopen_active_disable(struct sock *sk)
 {
        struct net *net = sock_net(sk);
 
+       if (!sock_net(sk)->ipv4.sysctl_tcp_fastopen_blackhole_timeout)
+               return;
+
+       /* Paired with READ_ONCE() in tcp_fastopen_active_should_disable() */
+       WRITE_ONCE(net->ipv4.tfo_active_disable_stamp, jiffies);
+
+       /* Paired with smp_rmb() in tcp_fastopen_active_should_disable().
+        * We want net->ipv4.tfo_active_disable_stamp to be updated first.
+        */
+       smp_mb__before_atomic();
        atomic_inc(&net->ipv4.tfo_active_disable_times);
-       net->ipv4.tfo_active_disable_stamp = jiffies;
+
        NET_INC_STATS(net, LINUX_MIB_TCPFASTOPENBLACKHOLE);
 }
 
@@ -519,17 +529,27 @@ void tcp_fastopen_active_disable(struct sock *sk)
 bool tcp_fastopen_active_should_disable(struct sock *sk)
 {
        unsigned int tfo_bh_timeout = sock_net(sk)->ipv4.sysctl_tcp_fastopen_blackhole_timeout;
-       int tfo_da_times = atomic_read(&sock_net(sk)->ipv4.tfo_active_disable_times);
        unsigned long timeout;
+       int tfo_da_times;
        int multiplier;
 
+       if (!tfo_bh_timeout)
+               return false;
+
+       tfo_da_times = atomic_read(&sock_net(sk)->ipv4.tfo_active_disable_times);
        if (!tfo_da_times)
                return false;
 
+       /* Paired with smp_mb__before_atomic() in tcp_fastopen_active_disable() */
+       smp_rmb();
+
        /* Limit timeout to max: 2^6 * initial timeout */
        multiplier = 1 << min(tfo_da_times - 1, 6);
-       timeout = multiplier * tfo_bh_timeout * HZ;
-       if (time_before(jiffies, sock_net(sk)->ipv4.tfo_active_disable_stamp + timeout))
+
+       /* Paired with the WRITE_ONCE() in tcp_fastopen_active_disable(). */
+       timeout = READ_ONCE(sock_net(sk)->ipv4.tfo_active_disable_stamp) +
+                 multiplier * tfo_bh_timeout * HZ;
+       if (time_before(jiffies, timeout))
                return true;
 
        /* Mark check bit so we can check for successful active TFO
index b9dc2d6197be8b8b03a4d052ad1c87987c7a62aa..a692626c19e443343027b09d04eaa6f8ce24cca2 100644 (file)
@@ -2965,7 +2965,7 @@ static int __net_init tcp_sk_init(struct net *net)
        net->ipv4.sysctl_tcp_comp_sack_nr = 44;
        net->ipv4.sysctl_tcp_fastopen = TFO_CLIENT_ENABLE;
        spin_lock_init(&net->ipv4.tcp_fastopen_ctx_lock);
-       net->ipv4.sysctl_tcp_fastopen_blackhole_timeout = 60 * 60;
+       net->ipv4.sysctl_tcp_fastopen_blackhole_timeout = 0;
        atomic_set(&net->ipv4.tfo_active_disable_times, 0);
 
        /* Reno is always built in */
index 62cd4cd52e8481599f5e98c6022c96067e800f2d..1a742b710e543e68e4ef4cb56c0c28d9597057a4 100644 (file)
@@ -645,10 +645,12 @@ static struct sock *__udp4_lib_err_encap(struct net *net,
                                         const struct iphdr *iph,
                                         struct udphdr *uh,
                                         struct udp_table *udptable,
+                                        struct sock *sk,
                                         struct sk_buff *skb, u32 info)
 {
+       int (*lookup)(struct sock *sk, struct sk_buff *skb);
        int network_offset, transport_offset;
-       struct sock *sk;
+       struct udp_sock *up;
 
        network_offset = skb_network_offset(skb);
        transport_offset = skb_transport_offset(skb);
@@ -659,18 +661,28 @@ static struct sock *__udp4_lib_err_encap(struct net *net,
        /* Transport header needs to point to the UDP header */
        skb_set_transport_header(skb, iph->ihl << 2);
 
+       if (sk) {
+               up = udp_sk(sk);
+
+               lookup = READ_ONCE(up->encap_err_lookup);
+               if (lookup && lookup(sk, skb))
+                       sk = NULL;
+
+               goto out;
+       }
+
        sk = __udp4_lib_lookup(net, iph->daddr, uh->source,
                               iph->saddr, uh->dest, skb->dev->ifindex, 0,
                               udptable, NULL);
        if (sk) {
-               int (*lookup)(struct sock *sk, struct sk_buff *skb);
-               struct udp_sock *up = udp_sk(sk);
+               up = udp_sk(sk);
 
                lookup = READ_ONCE(up->encap_err_lookup);
                if (!lookup || lookup(sk, skb))
                        sk = NULL;
        }
 
+out:
        if (!sk)
                sk = ERR_PTR(__udp4_lib_err_encap_no_sk(skb, info));
 
@@ -707,15 +719,16 @@ int __udp4_lib_err(struct sk_buff *skb, u32 info, struct udp_table *udptable)
        sk = __udp4_lib_lookup(net, iph->daddr, uh->dest,
                               iph->saddr, uh->source, skb->dev->ifindex,
                               inet_sdif(skb), udptable, NULL);
+
        if (!sk || udp_sk(sk)->encap_type) {
                /* No socket for error: try tunnels before discarding */
-               sk = ERR_PTR(-ENOENT);
                if (static_branch_unlikely(&udp_encap_needed_key)) {
-                       sk = __udp4_lib_err_encap(net, iph, uh, udptable, skb,
+                       sk = __udp4_lib_err_encap(net, iph, uh, udptable, sk, skb,
                                                  info);
                        if (!sk)
                                return 0;
-               }
+               } else
+                       sk = ERR_PTR(-ENOENT);
 
                if (IS_ERR(sk)) {
                        __ICMP_INC_STATS(net, ICMP_MIB_INERRORS);
index 45b8782aec0cc151817f7e9acd80e15f24a16cf7..9f5a5cdc38e64606502ece689ec8f1791b880c49 100644 (file)
@@ -134,7 +134,7 @@ static int __init udp_bpf_v4_build_proto(void)
        udp_bpf_rebuild_protos(&udp_bpf_prots[UDP_BPF_IPV4], &udp_prot);
        return 0;
 }
-core_initcall(udp_bpf_v4_build_proto);
+late_initcall(udp_bpf_v4_build_proto);
 
 int udp_bpf_update_proto(struct sock *sk, struct sk_psock *psock, bool restore)
 {
index 01bea76e3891c2ee8667ee83fdbff5b26fe8a23f..e1b9f7ac8bade5b3aab7f43496791ef5d8663485 100644 (file)
@@ -74,7 +74,7 @@ static int ip6_finish_output2(struct net *net, struct sock *sk, struct sk_buff *
 
                        if (likely(nskb)) {
                                if (skb->sk)
-                                       skb_set_owner_w(skb, skb->sk);
+                                       skb_set_owner_w(nskb, skb->sk);
                                consume_skb(skb);
                        } else {
                                kfree_skb(skb);
index 7b756a7dc03636c63d92491a72643e9a3a3d39c0..b6ddf23d38330ded88509b8507998ce82a72799b 100644 (file)
@@ -3769,7 +3769,7 @@ static struct fib6_info *ip6_route_info_create(struct fib6_config *cfg,
                err = PTR_ERR(rt->fib6_metrics);
                /* Do not leave garbage there. */
                rt->fib6_metrics = (struct dst_metrics *)&dst_default_metrics;
-               goto out;
+               goto out_free;
        }
 
        if (cfg->fc_flags & RTF_ADDRCONF)
index 0cc7ba531b34157feca74b876b3aa5a4013fec04..c5e15e94bb004244f4d3a42426ec9ccaef66778f 100644 (file)
@@ -502,12 +502,14 @@ static struct sock *__udp6_lib_err_encap(struct net *net,
                                         const struct ipv6hdr *hdr, int offset,
                                         struct udphdr *uh,
                                         struct udp_table *udptable,
+                                        struct sock *sk,
                                         struct sk_buff *skb,
                                         struct inet6_skb_parm *opt,
                                         u8 type, u8 code, __be32 info)
 {
+       int (*lookup)(struct sock *sk, struct sk_buff *skb);
        int network_offset, transport_offset;
-       struct sock *sk;
+       struct udp_sock *up;
 
        network_offset = skb_network_offset(skb);
        transport_offset = skb_transport_offset(skb);
@@ -518,18 +520,28 @@ static struct sock *__udp6_lib_err_encap(struct net *net,
        /* Transport header needs to point to the UDP header */
        skb_set_transport_header(skb, offset);
 
+       if (sk) {
+               up = udp_sk(sk);
+
+               lookup = READ_ONCE(up->encap_err_lookup);
+               if (lookup && lookup(sk, skb))
+                       sk = NULL;
+
+               goto out;
+       }
+
        sk = __udp6_lib_lookup(net, &hdr->daddr, uh->source,
                               &hdr->saddr, uh->dest,
                               inet6_iif(skb), 0, udptable, skb);
        if (sk) {
-               int (*lookup)(struct sock *sk, struct sk_buff *skb);
-               struct udp_sock *up = udp_sk(sk);
+               up = udp_sk(sk);
 
                lookup = READ_ONCE(up->encap_err_lookup);
                if (!lookup || lookup(sk, skb))
                        sk = NULL;
        }
 
+out:
        if (!sk) {
                sk = ERR_PTR(__udp6_lib_err_encap_no_sk(skb, opt, type, code,
                                                        offset, info));
@@ -558,16 +570,17 @@ int __udp6_lib_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
 
        sk = __udp6_lib_lookup(net, daddr, uh->dest, saddr, uh->source,
                               inet6_iif(skb), inet6_sdif(skb), udptable, NULL);
+
        if (!sk || udp_sk(sk)->encap_type) {
                /* No socket for error: try tunnels before discarding */
-               sk = ERR_PTR(-ENOENT);
                if (static_branch_unlikely(&udpv6_encap_needed_key)) {
                        sk = __udp6_lib_err_encap(net, hdr, offset, uh,
-                                                 udptable, skb,
+                                                 udptable, sk, skb,
                                                  opt, type, code, info);
                        if (!sk)
                                return 0;
-               }
+               } else
+                       sk = ERR_PTR(-ENOENT);
 
                if (IS_ERR(sk)) {
                        __ICMP6_INC_STATS(net, __in6_dev_get(skb->dev),
index 9115f8a7dd45b5ee7f8b3efc9d771f183b747013..a8da88db7893fcdc445ed74fd9c25c24ef24d47f 100644 (file)
@@ -121,11 +121,9 @@ static void nr_heartbeat_expiry(struct timer_list *t)
                   is accepted() it isn't 'dead' so doesn't get removed. */
                if (sock_flag(sk, SOCK_DESTROY) ||
                    (sk->sk_state == TCP_LISTEN && sock_flag(sk, SOCK_DEAD))) {
-                       sock_hold(sk);
                        bh_unlock_sock(sk);
                        nr_destroy_socket(sk);
-                       sock_put(sk);
-                       return;
+                       goto out;
                }
                break;
 
@@ -146,6 +144,8 @@ static void nr_heartbeat_expiry(struct timer_list *t)
 
        nr_start_heartbeat(sk);
        bh_unlock_sock(sk);
+out:
+       sock_put(sk);
 }
 
 static void nr_t2timer_expiry(struct timer_list *t)
@@ -159,6 +159,7 @@ static void nr_t2timer_expiry(struct timer_list *t)
                nr_enquiry_response(sk);
        }
        bh_unlock_sock(sk);
+       sock_put(sk);
 }
 
 static void nr_t4timer_expiry(struct timer_list *t)
@@ -169,6 +170,7 @@ static void nr_t4timer_expiry(struct timer_list *t)
        bh_lock_sock(sk);
        nr_sk(sk)->condition &= ~NR_COND_PEER_RX_BUSY;
        bh_unlock_sock(sk);
+       sock_put(sk);
 }
 
 static void nr_idletimer_expiry(struct timer_list *t)
@@ -197,6 +199,7 @@ static void nr_idletimer_expiry(struct timer_list *t)
                sock_set_flag(sk, SOCK_DEAD);
        }
        bh_unlock_sock(sk);
+       sock_put(sk);
 }
 
 static void nr_t1timer_expiry(struct timer_list *t)
@@ -209,8 +212,7 @@ static void nr_t1timer_expiry(struct timer_list *t)
        case NR_STATE_1:
                if (nr->n2count == nr->n2) {
                        nr_disconnect(sk, ETIMEDOUT);
-                       bh_unlock_sock(sk);
-                       return;
+                       goto out;
                } else {
                        nr->n2count++;
                        nr_write_internal(sk, NR_CONNREQ);
@@ -220,8 +222,7 @@ static void nr_t1timer_expiry(struct timer_list *t)
        case NR_STATE_2:
                if (nr->n2count == nr->n2) {
                        nr_disconnect(sk, ETIMEDOUT);
-                       bh_unlock_sock(sk);
-                       return;
+                       goto out;
                } else {
                        nr->n2count++;
                        nr_write_internal(sk, NR_DISCREQ);
@@ -231,8 +232,7 @@ static void nr_t1timer_expiry(struct timer_list *t)
        case NR_STATE_3:
                if (nr->n2count == nr->n2) {
                        nr_disconnect(sk, ETIMEDOUT);
-                       bh_unlock_sock(sk);
-                       return;
+                       goto out;
                } else {
                        nr->n2count++;
                        nr_requeue_frames(sk);
@@ -241,5 +241,7 @@ static void nr_t1timer_expiry(struct timer_list *t)
        }
 
        nr_start_t1timer(sk);
+out:
        bh_unlock_sock(sk);
+       sock_put(sk);
 }
index 81a1c67335be62d04a468711c353efc90df9a26f..8d17a543cc9fef921dfba1bb5fe10a6d4494f63b 100644 (file)
@@ -6,6 +6,7 @@
 */
 
 #include <linux/module.h>
+#include <linux/if_arp.h>
 #include <linux/init.h>
 #include <linux/kernel.h>
 #include <linux/skbuff.h>
@@ -33,6 +34,13 @@ static int tcf_skbmod_act(struct sk_buff *skb, const struct tc_action *a,
        tcf_lastuse_update(&d->tcf_tm);
        bstats_cpu_update(this_cpu_ptr(d->common.cpu_bstats), skb);
 
+       action = READ_ONCE(d->tcf_action);
+       if (unlikely(action == TC_ACT_SHOT))
+               goto drop;
+
+       if (!skb->dev || skb->dev->type != ARPHRD_ETHER)
+               return action;
+
        /* XXX: if you are going to edit more fields beyond ethernet header
         * (example when you add IP header replacement or vlan swap)
         * then MAX_EDIT_LEN needs to change appropriately
@@ -41,10 +49,6 @@ static int tcf_skbmod_act(struct sk_buff *skb, const struct tc_action *a,
        if (unlikely(err)) /* best policy is to drop on the floor */
                goto drop;
 
-       action = READ_ONCE(d->tcf_action);
-       if (unlikely(action == TC_ACT_SHOT))
-               goto drop;
-
        p = rcu_dereference_bh(d->skbmod_p);
        flags = p->flags;
        if (flags & SKBMOD_F_DMAC)
index d73b5c5514a9fa149e3c532c7f4f52d6d376acb8..e3e79e9bd7067da289f6458076020749c4250030 100644 (file)
@@ -2904,7 +2904,7 @@ replay:
                break;
        case RTM_GETCHAIN:
                err = tc_chain_notify(chain, skb, n->nlmsg_seq,
-                                     n->nlmsg_seq, n->nlmsg_type, true);
+                                     n->nlmsg_flags, n->nlmsg_type, true);
                if (err < 0)
                        NL_SET_ERR_MSG(extack, "Failed to send chain notify message");
                break;
index 5b274534264c2de4c1325efa3f388099f32cd7ee..e9a8a2c86bbdd033b3dca556fcdb1e79b835b5cd 100644 (file)
@@ -278,6 +278,8 @@ static int tcindex_filter_result_init(struct tcindex_filter_result *r,
                             TCA_TCINDEX_POLICE);
 }
 
+static void tcindex_free_perfect_hash(struct tcindex_data *cp);
+
 static void tcindex_partial_destroy_work(struct work_struct *work)
 {
        struct tcindex_data *p = container_of(to_rcu_work(work),
@@ -285,7 +287,8 @@ static void tcindex_partial_destroy_work(struct work_struct *work)
                                              rwork);
 
        rtnl_lock();
-       kfree(p->perfect);
+       if (p->perfect)
+               tcindex_free_perfect_hash(p);
        kfree(p);
        rtnl_unlock();
 }
index 6f8319b828b0d13c4f347acb858e70337d4c4519..fe74c5f956303f6d4840c72a0756e8e2b6875fd6 100644 (file)
@@ -860,6 +860,8 @@ int sctp_auth_set_key(struct sctp_endpoint *ep,
        if (replace) {
                list_del_init(&shkey->key_list);
                sctp_auth_shkey_release(shkey);
+               if (asoc && asoc->active_key_id == auth_key->sca_keynumber)
+                       sctp_auth_asoc_init_active_key(asoc, GFP_KERNEL);
        }
        list_add(&cur_key->key_list, sh_keys);
 
index 9032ce60d50e81be8c6adfbf72df32f968a6a2fc..4dfb5ea82b05b08386a2d7018cd6417c0ec6ba5a 100644 (file)
@@ -104,8 +104,8 @@ void sctp_packet_config(struct sctp_packet *packet, __u32 vtag,
                if (asoc->param_flags & SPP_PMTUD_ENABLE)
                        sctp_assoc_sync_pmtu(asoc);
        } else if (!sctp_transport_pl_enabled(tp) &&
-                  !sctp_transport_pmtu_check(tp)) {
-               if (asoc->param_flags & SPP_PMTUD_ENABLE)
+                  asoc->param_flags & SPP_PMTUD_ENABLE) {
+               if (!sctp_transport_pmtu_check(tp))
                        sctp_assoc_sync_pmtu(asoc);
        }
 
index e64e01f61b117b8befe206d1eb4807d96aad09b4..6b937bfd475159df6b15e6a30023e30f09ff0a07 100644 (file)
@@ -4577,6 +4577,10 @@ static int sctp_setsockopt(struct sock *sk, int level, int optname,
        }
 
        if (optlen > 0) {
+               /* Trim it to the biggest size sctp sockopt may need if necessary */
+               optlen = min_t(unsigned int, optlen,
+                              PAGE_ALIGN(USHRT_MAX +
+                                         sizeof(__u16) * sizeof(struct sctp_reset_streams)));
                kopt = memdup_sockptr(optval, optlen);
                if (IS_ERR(kopt))
                        return PTR_ERR(kopt);
index 14e32825c3395fd7fa28fd97266479cd38a42836..6a2971a7e6a1c6f0b8bac91748b9ccd02d5ed788 100644 (file)
@@ -246,12 +246,18 @@ static bool hw_support_mmap(struct snd_pcm_substream *substream)
        if (!(substream->runtime->hw.info & SNDRV_PCM_INFO_MMAP))
                return false;
 
-       if (substream->ops->mmap ||
-           (substream->dma_buffer.dev.type != SNDRV_DMA_TYPE_DEV &&
-            substream->dma_buffer.dev.type != SNDRV_DMA_TYPE_DEV_UC))
+       if (substream->ops->mmap)
                return true;
 
-       return dma_can_mmap(substream->dma_buffer.dev.dev);
+       switch (substream->dma_buffer.dev.type) {
+       case SNDRV_DMA_TYPE_UNKNOWN:
+               return false;
+       case SNDRV_DMA_TYPE_CONTINUOUS:
+       case SNDRV_DMA_TYPE_VMALLOC:
+               return true;
+       default:
+               return dma_can_mmap(substream->dma_buffer.dev.dev);
+       }
 }
 
 static int constrain_mask_params(struct snd_pcm_substream *substream,
@@ -3063,9 +3069,14 @@ static int snd_pcm_ioctl_sync_ptr_compat(struct snd_pcm_substream *substream,
                boundary = 0x7fffffff;
        snd_pcm_stream_lock_irq(substream);
        /* FIXME: we should consider the boundary for the sync from app */
-       if (!(sflags & SNDRV_PCM_SYNC_PTR_APPL))
-               control->appl_ptr = scontrol.appl_ptr;
-       else
+       if (!(sflags & SNDRV_PCM_SYNC_PTR_APPL)) {
+               err = pcm_lib_apply_appl_ptr(substream,
+                               scontrol.appl_ptr);
+               if (err < 0) {
+                       snd_pcm_stream_unlock_irq(substream);
+                       return err;
+               }
+       } else
                scontrol.appl_ptr = control->appl_ptr % boundary;
        if (!(sflags & SNDRV_PCM_SYNC_PTR_AVAIL_MIN))
                control->avail_min = scontrol.avail_min;
@@ -3664,6 +3675,8 @@ static vm_fault_t snd_pcm_mmap_data_fault(struct vm_fault *vmf)
                return VM_FAULT_SIGBUS;
        if (substream->ops->page)
                page = substream->ops->page(substream, offset);
+       else if (!snd_pcm_get_dma_buf(substream))
+               page = virt_to_page(runtime->dma_area + offset);
        else
                page = snd_sgbuf_get_page(snd_pcm_get_dma_buf(substream), offset);
        if (!page)
index d8be146793eee2d513a53539a0c1197904bdedd0..c9d0ba353463bda2109d45474bc1c88155e17bdf 100644 (file)
@@ -319,6 +319,10 @@ static const struct config_entry config_table[] = {
                .flags = FLAG_SOF | FLAG_SOF_ONLY_IF_DMIC,
                .device = 0x4b55,
        },
+       {
+               .flags = FLAG_SOF | FLAG_SOF_ONLY_IF_DMIC,
+               .device = 0x4b58,
+       },
 #endif
 
 /* Alder Lake */
index 5bbe6695689d753cc16bcc6448da3fe745953e7e..7ad8c5f7b664b45038e117a53581ca48c2f2263c 100644 (file)
@@ -816,6 +816,7 @@ static int snd_sb_csp_start(struct snd_sb_csp * p, int sample_width, int channel
        mixR = snd_sbmixer_read(p->chip, SB_DSP4_PCM_DEV + 1);
        snd_sbmixer_write(p->chip, SB_DSP4_PCM_DEV, mixL & 0x7);
        snd_sbmixer_write(p->chip, SB_DSP4_PCM_DEV + 1, mixR & 0x7);
+       spin_unlock_irqrestore(&p->chip->mixer_lock, flags);
 
        spin_lock(&p->chip->reg_lock);
        set_mode_register(p->chip, 0xc0);       /* c0 = STOP */
@@ -855,6 +856,7 @@ static int snd_sb_csp_start(struct snd_sb_csp * p, int sample_width, int channel
        spin_unlock(&p->chip->reg_lock);
 
        /* restore PCM volume */
+       spin_lock_irqsave(&p->chip->mixer_lock, flags);
        snd_sbmixer_write(p->chip, SB_DSP4_PCM_DEV, mixL);
        snd_sbmixer_write(p->chip, SB_DSP4_PCM_DEV + 1, mixR);
        spin_unlock_irqrestore(&p->chip->mixer_lock, flags);
@@ -880,6 +882,7 @@ static int snd_sb_csp_stop(struct snd_sb_csp * p)
        mixR = snd_sbmixer_read(p->chip, SB_DSP4_PCM_DEV + 1);
        snd_sbmixer_write(p->chip, SB_DSP4_PCM_DEV, mixL & 0x7);
        snd_sbmixer_write(p->chip, SB_DSP4_PCM_DEV + 1, mixR & 0x7);
+       spin_unlock_irqrestore(&p->chip->mixer_lock, flags);
 
        spin_lock(&p->chip->reg_lock);
        if (p->running & SNDRV_SB_CSP_ST_QSOUND) {
@@ -894,6 +897,7 @@ static int snd_sb_csp_stop(struct snd_sb_csp * p)
        spin_unlock(&p->chip->reg_lock);
 
        /* restore PCM volume */
+       spin_lock_irqsave(&p->chip->mixer_lock, flags);
        snd_sbmixer_write(p->chip, SB_DSP4_PCM_DEV, mixL);
        snd_sbmixer_write(p->chip, SB_DSP4_PCM_DEV + 1, mixR);
        spin_unlock_irqrestore(&p->chip->mixer_lock, flags);
index 4b2cc8cb55c49c643e119f65476a1412225f5d0e..e143e69d8184f3e4b21313d36931759f91f9f578 100644 (file)
@@ -1940,6 +1940,8 @@ static int hdmi_add_cvt(struct hda_codec *codec, hda_nid_t cvt_nid)
 static const struct snd_pci_quirk force_connect_list[] = {
        SND_PCI_QUIRK(0x103c, 0x870f, "HP", 1),
        SND_PCI_QUIRK(0x103c, 0x871a, "HP", 1),
+       SND_PCI_QUIRK(0x1462, 0xec94, "MS-7C94", 1),
+       SND_PCI_QUIRK(0x8086, 0x2081, "Intel NUC 10", 1),
        {}
 };
 
index 1389cfd5e0dbb6b423ae4ef8798abeb548176abd..caaf0e8aac111ff6ea1764e60270d4600e28af01 100644 (file)
@@ -8626,6 +8626,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
        SND_PCI_QUIRK(0x17aa, 0x3151, "ThinkCentre Station", ALC283_FIXUP_HEADSET_MIC),
        SND_PCI_QUIRK(0x17aa, 0x3176, "ThinkCentre Station", ALC283_FIXUP_HEADSET_MIC),
        SND_PCI_QUIRK(0x17aa, 0x3178, "ThinkCentre Station", ALC283_FIXUP_HEADSET_MIC),
+       SND_PCI_QUIRK(0x17aa, 0x31af, "ThinkCentre Station", ALC623_FIXUP_LENOVO_THINKSTATION_P340),
        SND_PCI_QUIRK(0x17aa, 0x3818, "Lenovo C940", ALC298_FIXUP_LENOVO_SPK_VOLUME),
        SND_PCI_QUIRK(0x17aa, 0x3827, "Ideapad S740", ALC285_FIXUP_IDEAPAD_S740_COEF),
        SND_PCI_QUIRK(0x17aa, 0x3843, "Yoga 9i", ALC287_FIXUP_IDEAPAD_BASS_SPK_AMP),
index 84e3906abd4f33050b0adaf4535d06fcdffd2593..9449fb40a956bd064462b4f4386a8878e0b1be7e 100644 (file)
@@ -576,6 +576,7 @@ static struct snd_soc_dai_link cz_dai_5682_98357[] = {
                                | SND_SOC_DAIFMT_CBM_CFM,
                .init = cz_rt5682_init,
                .dpcm_playback = 1,
+               .stop_dma_first = 1,
                .ops = &cz_rt5682_play_ops,
                SND_SOC_DAILINK_REG(designware1, rt5682, platform),
        },
@@ -585,6 +586,7 @@ static struct snd_soc_dai_link cz_dai_5682_98357[] = {
                .dai_fmt = SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_NF
                                | SND_SOC_DAIFMT_CBM_CFM,
                .dpcm_capture = 1,
+               .stop_dma_first = 1,
                .ops = &cz_rt5682_cap_ops,
                SND_SOC_DAILINK_REG(designware2, rt5682, platform),
        },
@@ -594,6 +596,7 @@ static struct snd_soc_dai_link cz_dai_5682_98357[] = {
                .dai_fmt = SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_NF
                                | SND_SOC_DAIFMT_CBM_CFM,
                .dpcm_playback = 1,
+               .stop_dma_first = 1,
                .ops = &cz_rt5682_max_play_ops,
                SND_SOC_DAILINK_REG(designware3, mx, platform),
        },
@@ -604,6 +607,7 @@ static struct snd_soc_dai_link cz_dai_5682_98357[] = {
                .dai_fmt = SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_NF
                                | SND_SOC_DAIFMT_CBM_CFM,
                .dpcm_capture = 1,
+               .stop_dma_first = 1,
                .ops = &cz_rt5682_dmic0_cap_ops,
                SND_SOC_DAILINK_REG(designware3, adau, platform),
        },
@@ -614,6 +618,7 @@ static struct snd_soc_dai_link cz_dai_5682_98357[] = {
                .dai_fmt = SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_NF
                                | SND_SOC_DAIFMT_CBM_CFM,
                .dpcm_capture = 1,
+               .stop_dma_first = 1,
                .ops = &cz_rt5682_dmic1_cap_ops,
                SND_SOC_DAILINK_REG(designware2, adau, platform),
        },
index 7ebae3f09435c0c83ddb67de36b65d716f32bde3..a3b784ed4f70a28c6c158942da927c5ed5674be6 100644 (file)
@@ -1325,7 +1325,7 @@ config SND_SOC_SSM2305
          high-efficiency mono Class-D audio power amplifiers.
 
 config SND_SOC_SSM2518
-       tristate
+       tristate "Analog Devices SSM2518 Class-D Amplifier"
        depends on I2C
 
 config SND_SOC_SSM2602
@@ -1557,6 +1557,7 @@ config SND_SOC_WCD934X
          Qualcomm SoCs like SDM845.
 
 config SND_SOC_WCD938X
+       depends on SND_SOC_WCD938X_SDW
        tristate
 
 config SND_SOC_WCD938X_SDW
@@ -1813,11 +1814,6 @@ config SND_SOC_ZL38060
          which consists of a Digital Signal Processor (DSP), several Digital
          Audio Interfaces (DAIs), analog outputs, and a block of 14 GPIOs.
 
-config SND_SOC_ZX_AUD96P22
-       tristate "ZTE ZX AUD96P22 CODEC"
-       depends on I2C
-       select REGMAP_I2C
-
 # Amp
 config SND_SOC_LM4857
        tristate
index 3000bc128b5bcbed1967cd3765b4fa45393c244e..38356ea2bd6ef363f05bb962d4041536c0645c05 100644 (file)
@@ -1695,6 +1695,8 @@ static const struct regmap_config rt5631_regmap_config = {
        .reg_defaults = rt5631_reg,
        .num_reg_defaults = ARRAY_SIZE(rt5631_reg),
        .cache_type = REGCACHE_RBTREE,
+       .use_single_read = true,
+       .use_single_write = true,
 };
 
 static int rt5631_i2c_probe(struct i2c_client *i2c,
index e4c91571abaefba346fa095c37a982585b145c86..abcd6f48378880651f7e4e761ede1a934589277d 100644 (file)
@@ -973,10 +973,14 @@ int rt5682_headset_detect(struct snd_soc_component *component, int jack_insert)
                rt5682_enable_push_button_irq(component, false);
                snd_soc_component_update_bits(component, RT5682_CBJ_CTRL_1,
                        RT5682_TRIG_JD_MASK, RT5682_TRIG_JD_LOW);
-               if (!snd_soc_dapm_get_pin_status(dapm, "MICBIAS"))
+               if (!snd_soc_dapm_get_pin_status(dapm, "MICBIAS") &&
+                       !snd_soc_dapm_get_pin_status(dapm, "PLL1") &&
+                       !snd_soc_dapm_get_pin_status(dapm, "PLL2B"))
                        snd_soc_component_update_bits(component,
                                RT5682_PWR_ANLG_1, RT5682_PWR_MB, 0);
-               if (!snd_soc_dapm_get_pin_status(dapm, "Vref2"))
+               if (!snd_soc_dapm_get_pin_status(dapm, "Vref2") &&
+                       !snd_soc_dapm_get_pin_status(dapm, "PLL1") &&
+                       !snd_soc_dapm_get_pin_status(dapm, "PLL2B"))
                        snd_soc_component_update_bits(component,
                                RT5682_PWR_ANLG_1, RT5682_PWR_VREF2, 0);
                snd_soc_component_update_bits(component, RT5682_PWR_ANLG_3,
index 51870d50f4195e348e600905602ee3cc7620e5ff..b504d63385b38b3e3c074b87dc11d3d48e07bc18 100644 (file)
@@ -1604,6 +1604,8 @@ static int aic31xx_i2c_probe(struct i2c_client *i2c,
                        ret);
                return ret;
        }
+       regcache_cache_only(aic31xx->regmap, true);
+
        aic31xx->dev = &i2c->dev;
        aic31xx->irq = i2c->irq;
 
index 81952984613d2f3f984b184c5019507f1437f21a..2513922a0292314881d6bb2300ee592bd30595e0 100644 (file)
@@ -151,8 +151,8 @@ struct aic31xx_pdata {
 #define AIC31XX_WORD_LEN_24BITS                0x02
 #define AIC31XX_WORD_LEN_32BITS                0x03
 #define AIC31XX_IFACE1_MASTER_MASK     GENMASK(3, 2)
-#define AIC31XX_BCLK_MASTER            BIT(2)
-#define AIC31XX_WCLK_MASTER            BIT(3)
+#define AIC31XX_BCLK_MASTER            BIT(3)
+#define AIC31XX_WCLK_MASTER            BIT(2)
 
 /* AIC31XX_DATA_OFFSET */
 #define AIC31XX_DATA_OFFSET_MASK       GENMASK(7, 0)
index c63b717040ed25c57a5d68f52e9fe7e0eb3996ad..dcd8aeb45cb317af4199a2f9084870df43897b5e 100644 (file)
@@ -250,8 +250,8 @@ static DECLARE_TLV_DB_SCALE(tlv_pcm, -6350, 50, 0);
 static DECLARE_TLV_DB_SCALE(tlv_driver_gain, -600, 100, 0);
 /* -12dB min, 0.5dB steps */
 static DECLARE_TLV_DB_SCALE(tlv_adc_vol, -1200, 50, 0);
-
-static DECLARE_TLV_DB_LINEAR(tlv_spk_vol, TLV_DB_GAIN_MUTE, 0);
+/* -6dB min, 1dB steps */
+static DECLARE_TLV_DB_SCALE(tlv_tas_driver_gain, -5850, 50, 0);
 static DECLARE_TLV_DB_SCALE(tlv_amp_vol, 0, 600, 1);
 
 static const char * const lo_cm_text[] = {
@@ -1063,21 +1063,20 @@ static const struct snd_soc_component_driver soc_component_dev_aic32x4 = {
 };
 
 static const struct snd_kcontrol_new aic32x4_tas2505_snd_controls[] = {
-       SOC_DOUBLE_R_S_TLV("PCM Playback Volume", AIC32X4_LDACVOL,
-                       AIC32X4_LDACVOL, 0, -0x7f, 0x30, 7, 0, tlv_pcm),
+       SOC_SINGLE_S8_TLV("PCM Playback Volume",
+                         AIC32X4_LDACVOL, -0x7f, 0x30, tlv_pcm),
        SOC_ENUM("DAC Playback PowerTune Switch", l_ptm_enum),
-       SOC_DOUBLE_R_S_TLV("HP Driver Playback Volume", AIC32X4_HPLGAIN,
-                       AIC32X4_HPLGAIN, 0, -0x6, 0x1d, 5, 0,
-                       tlv_driver_gain),
-       SOC_DOUBLE_R("HP DAC Playback Switch", AIC32X4_HPLGAIN,
-                       AIC32X4_HPLGAIN, 6, 0x01, 1),
 
-       SOC_SINGLE("Auto-mute Switch", AIC32X4_DACMUTE, 4, 7, 0),
+       SOC_SINGLE_TLV("HP Driver Gain Volume",
+                       AIC32X4_HPLGAIN, 0, 0x74, 1, tlv_tas_driver_gain),
+       SOC_SINGLE("HP DAC Playback Switch", AIC32X4_HPLGAIN, 6, 1, 1),
 
-       SOC_SINGLE_RANGE_TLV("Speaker Driver Playback Volume", TAS2505_SPKVOL1,
-                       0, 0, 117, 1, tlv_spk_vol),
-       SOC_SINGLE_TLV("Speaker Amplifier Playback Volume", TAS2505_SPKVOL2,
-                       4, 5, 0, tlv_amp_vol),
+       SOC_SINGLE_TLV("Speaker Driver Playback Volume",
+                       TAS2505_SPKVOL1, 0, 0x74, 1, tlv_tas_driver_gain),
+       SOC_SINGLE_TLV("Speaker Amplifier Playback Volume",
+                       TAS2505_SPKVOL2, 4, 5, 0, tlv_amp_vol),
+
+       SOC_SINGLE("Auto-mute Switch", AIC32X4_DACMUTE, 4, 7, 0),
 };
 
 static const struct snd_kcontrol_new hp_output_mixer_controls[] = {
index 78b76eceff8fa2fcadb5d455104275fe4594695a..2fcc97370be2bba8284d0dd42642f9cfe11d28c5 100644 (file)
@@ -3317,13 +3317,6 @@ static int wcd938x_soc_codec_probe(struct snd_soc_component *component)
                             (WCD938X_DIGITAL_INTR_LEVEL_0 + i), 0);
        }
 
-       ret = wcd938x_irq_init(wcd938x, component->dev);
-       if (ret) {
-               dev_err(component->dev, "%s: IRQ init failed: %d\n",
-                       __func__, ret);
-               return ret;
-       }
-
        wcd938x->hphr_pdm_wd_int = regmap_irq_get_virq(wcd938x->irq_chip,
                                                       WCD938X_IRQ_HPHR_PDM_WD_INT);
        wcd938x->hphl_pdm_wd_int = regmap_irq_get_virq(wcd938x->irq_chip,
@@ -3553,7 +3546,6 @@ static int wcd938x_bind(struct device *dev)
        }
        wcd938x->sdw_priv[AIF1_PB] = dev_get_drvdata(wcd938x->rxdev);
        wcd938x->sdw_priv[AIF1_PB]->wcd938x = wcd938x;
-       wcd938x->sdw_priv[AIF1_PB]->slave_irq = wcd938x->virq;
 
        wcd938x->txdev = wcd938x_sdw_device_get(wcd938x->txnode);
        if (!wcd938x->txdev) {
@@ -3562,7 +3554,6 @@ static int wcd938x_bind(struct device *dev)
        }
        wcd938x->sdw_priv[AIF1_CAP] = dev_get_drvdata(wcd938x->txdev);
        wcd938x->sdw_priv[AIF1_CAP]->wcd938x = wcd938x;
-       wcd938x->sdw_priv[AIF1_CAP]->slave_irq = wcd938x->virq;
        wcd938x->tx_sdw_dev = dev_to_sdw_dev(wcd938x->txdev);
        if (!wcd938x->tx_sdw_dev) {
                dev_err(dev, "could not get txslave with matching of dev\n");
@@ -3595,6 +3586,15 @@ static int wcd938x_bind(struct device *dev)
                return PTR_ERR(wcd938x->regmap);
        }
 
+       ret = wcd938x_irq_init(wcd938x, dev);
+       if (ret) {
+               dev_err(dev, "%s: IRQ init failed: %d\n", __func__, ret);
+               return ret;
+       }
+
+       wcd938x->sdw_priv[AIF1_PB]->slave_irq = wcd938x->virq;
+       wcd938x->sdw_priv[AIF1_CAP]->slave_irq = wcd938x->virq;
+
        ret = wcd938x_set_micbias_data(wcd938x);
        if (ret < 0) {
                dev_err(dev, "%s: bad micbias pdata\n", __func__);
index 37aa020f23f631c3635c0a925de1124db07b2177..549d98241daec1ce6e6c22e19772ac6209b57613 100644 (file)
 /*
  * HALO_CCM_CORE_CONTROL
  */
+#define HALO_CORE_RESET                     0x00000200
 #define HALO_CORE_EN                        0x00000001
 
 /*
@@ -1213,7 +1214,7 @@ static int wm_coeff_tlv_get(struct snd_kcontrol *kctl,
 
        mutex_lock(&ctl->dsp->pwr_lock);
 
-       ret = wm_coeff_read_ctrl_raw(ctl, ctl->cache, size);
+       ret = wm_coeff_read_ctrl(ctl, ctl->cache, size);
 
        if (!ret && copy_to_user(bytes, ctl->cache, size))
                ret = -EFAULT;
@@ -3333,7 +3334,8 @@ static int wm_halo_start_core(struct wm_adsp *dsp)
 {
        return regmap_update_bits(dsp->regmap,
                                  dsp->base + HALO_CCM_CORE_CONTROL,
-                                 HALO_CORE_EN, HALO_CORE_EN);
+                                 HALO_CORE_RESET | HALO_CORE_EN,
+                                 HALO_CORE_RESET | HALO_CORE_EN);
 }
 
 static void wm_halo_stop_core(struct wm_adsp *dsp)
index 0e7ed906b34177715caa17c855b31ed7a11a99df..25daef910aee184fea5abbb3b68c385f8f613005 100644 (file)
@@ -55,43 +55,68 @@ static int spk_init(struct snd_soc_pcm_runtime *rtd)
        return ret;
 }
 
-static int max98373_sdw_trigger(struct snd_pcm_substream *substream, int cmd)
+static int mx8373_enable_spk_pin(struct snd_pcm_substream *substream, bool enable)
 {
+       struct snd_soc_pcm_runtime *rtd = asoc_substream_to_rtd(substream);
+       struct snd_soc_dai *codec_dai;
+       struct snd_soc_dai *cpu_dai;
        int ret;
+       int j;
 
-       switch (cmd) {
-       case SNDRV_PCM_TRIGGER_START:
-       case SNDRV_PCM_TRIGGER_RESUME:
-       case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
-               /* enable max98373 first */
-               ret = max_98373_trigger(substream, cmd);
-               if (ret < 0)
-                       break;
-
-               ret = sdw_trigger(substream, cmd);
-               break;
-       case SNDRV_PCM_TRIGGER_STOP:
-       case SNDRV_PCM_TRIGGER_SUSPEND:
-       case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
-               ret = sdw_trigger(substream, cmd);
-               if (ret < 0)
-                       break;
-
-               ret = max_98373_trigger(substream, cmd);
-               break;
-       default:
-               ret = -EINVAL;
-               break;
+       /* set spk pin by playback only */
+       if (substream->stream == SNDRV_PCM_STREAM_CAPTURE)
+               return 0;
+
+       cpu_dai = asoc_rtd_to_cpu(rtd, 0);
+       for_each_rtd_codec_dais(rtd, j, codec_dai) {
+               struct snd_soc_dapm_context *dapm =
+                               snd_soc_component_get_dapm(cpu_dai->component);
+               char pin_name[16];
+
+               snprintf(pin_name, ARRAY_SIZE(pin_name), "%s Spk",
+                        codec_dai->component->name_prefix);
+
+               if (enable)
+                       ret = snd_soc_dapm_enable_pin(dapm, pin_name);
+               else
+                       ret = snd_soc_dapm_disable_pin(dapm, pin_name);
+
+               if (!ret)
+                       snd_soc_dapm_sync(dapm);
        }
 
-       return ret;
+       return 0;
+}
+
+static int mx8373_sdw_prepare(struct snd_pcm_substream *substream)
+{
+       int ret = 0;
+
+       /* according to soc_pcm_prepare dai link prepare is called first */
+       ret = sdw_prepare(substream);
+       if (ret < 0)
+               return ret;
+
+       return mx8373_enable_spk_pin(substream, true);
+}
+
+static int mx8373_sdw_hw_free(struct snd_pcm_substream *substream)
+{
+       int ret = 0;
+
+       /* according to soc_pcm_hw_free dai link free is called first */
+       ret = sdw_hw_free(substream);
+       if (ret < 0)
+               return ret;
+
+       return mx8373_enable_spk_pin(substream, false);
 }
 
 static const struct snd_soc_ops max_98373_sdw_ops = {
        .startup = sdw_startup,
-       .prepare = sdw_prepare,
-       .trigger = max98373_sdw_trigger,
-       .hw_free = sdw_hw_free,
+       .prepare = mx8373_sdw_prepare,
+       .trigger = sdw_trigger,
+       .hw_free = mx8373_sdw_hw_free,
        .shutdown = sdw_shutdown,
 };
 
index 46513bb97904473cf6f4baeddc04a1c9c802f0f9..d1c570ca21ea781f84ff8c5bc6db6d8a9ea0ddc5 100644 (file)
@@ -1015,6 +1015,7 @@ out:
 
 static int soc_pcm_trigger(struct snd_pcm_substream *substream, int cmd)
 {
+       struct snd_soc_pcm_runtime *rtd = asoc_substream_to_rtd(substream);
        int ret = -EINVAL, _ret = 0;
        int rollback = 0;
 
@@ -1055,14 +1056,23 @@ start_err:
        case SNDRV_PCM_TRIGGER_STOP:
        case SNDRV_PCM_TRIGGER_SUSPEND:
        case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
-               ret = snd_soc_pcm_dai_trigger(substream, cmd, rollback);
-               if (ret < 0)
-                       break;
+               if (rtd->dai_link->stop_dma_first) {
+                       ret = snd_soc_pcm_component_trigger(substream, cmd, rollback);
+                       if (ret < 0)
+                               break;
 
-               ret = snd_soc_pcm_component_trigger(substream, cmd, rollback);
-               if (ret < 0)
-                       break;
+                       ret = snd_soc_pcm_dai_trigger(substream, cmd, rollback);
+                       if (ret < 0)
+                               break;
+               } else {
+                       ret = snd_soc_pcm_dai_trigger(substream, cmd, rollback);
+                       if (ret < 0)
+                               break;
 
+                       ret = snd_soc_pcm_component_trigger(substream, cmd, rollback);
+                       if (ret < 0)
+                               break;
+               }
                ret = snd_soc_link_trigger(substream, cmd, rollback);
                break;
        }
index a00262184efab4da818ac0b45a38c3b9956fbbd7..d04ce84fe7cc2a4ead4540c651595b8fe75ad1b5 100644 (file)
@@ -89,6 +89,7 @@ static const struct sof_dev_desc adls_desc = {
 static const struct sof_dev_desc adl_desc = {
        .machines               = snd_soc_acpi_intel_adl_machines,
        .alt_machines           = snd_soc_acpi_intel_adl_sdw_machines,
+       .use_acpi_target_states = true,
        .resindex_lpe_base      = 0,
        .resindex_pcicfg_base   = -1,
        .resindex_imr_base      = -1,
index 573374b89b100a9ae296a437e27bc5e12a30f4e1..d3276b4595affb18b33083ccf1156fb717fc06bf 100644 (file)
@@ -213,19 +213,19 @@ snd_pcm_uframes_t tegra_pcm_pointer(struct snd_soc_component *component,
 }
 EXPORT_SYMBOL_GPL(tegra_pcm_pointer);
 
-static int tegra_pcm_preallocate_dma_buffer(struct snd_pcm *pcm, int stream,
+static int tegra_pcm_preallocate_dma_buffer(struct device *dev, struct snd_pcm *pcm, int stream,
                                            size_t size)
 {
        struct snd_pcm_substream *substream = pcm->streams[stream].substream;
        struct snd_dma_buffer *buf = &substream->dma_buffer;
 
-       buf->area = dma_alloc_wc(pcm->card->dev, size, &buf->addr, GFP_KERNEL);
+       buf->area = dma_alloc_wc(dev, size, &buf->addr, GFP_KERNEL);
        if (!buf->area)
                return -ENOMEM;
 
        buf->private_data = NULL;
        buf->dev.type = SNDRV_DMA_TYPE_DEV;
-       buf->dev.dev = pcm->card->dev;
+       buf->dev.dev = dev;
        buf->bytes = size;
 
        return 0;
@@ -244,31 +244,28 @@ static void tegra_pcm_deallocate_dma_buffer(struct snd_pcm *pcm, int stream)
        if (!buf->area)
                return;
 
-       dma_free_wc(pcm->card->dev, buf->bytes, buf->area, buf->addr);
+       dma_free_wc(buf->dev.dev, buf->bytes, buf->area, buf->addr);
        buf->area = NULL;
 }
 
-static int tegra_pcm_dma_allocate(struct snd_soc_pcm_runtime *rtd,
+static int tegra_pcm_dma_allocate(struct device *dev, struct snd_soc_pcm_runtime *rtd,
                                  size_t size)
 {
-       struct snd_card *card = rtd->card->snd_card;
        struct snd_pcm *pcm = rtd->pcm;
        int ret;
 
-       ret = dma_set_mask_and_coherent(card->dev, DMA_BIT_MASK(32));
+       ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
        if (ret < 0)
                return ret;
 
        if (pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream) {
-               ret = tegra_pcm_preallocate_dma_buffer(pcm,
-                       SNDRV_PCM_STREAM_PLAYBACK, size);
+               ret = tegra_pcm_preallocate_dma_buffer(dev, pcm, SNDRV_PCM_STREAM_PLAYBACK, size);
                if (ret)
                        goto err;
        }
 
        if (pcm->streams[SNDRV_PCM_STREAM_CAPTURE].substream) {
-               ret = tegra_pcm_preallocate_dma_buffer(pcm,
-                       SNDRV_PCM_STREAM_CAPTURE, size);
+               ret = tegra_pcm_preallocate_dma_buffer(dev, pcm, SNDRV_PCM_STREAM_CAPTURE, size);
                if (ret)
                        goto err_free_play;
        }
@@ -284,7 +281,16 @@ err:
 int tegra_pcm_construct(struct snd_soc_component *component,
                        struct snd_soc_pcm_runtime *rtd)
 {
-       return tegra_pcm_dma_allocate(rtd, tegra_pcm_hardware.buffer_bytes_max);
+       struct device *dev = component->dev;
+
+       /*
+        * Fallback for backwards-compatibility with older device trees that
+        * have the iommus property in the virtual, top-level "sound" node.
+        */
+       if (!of_get_property(dev->of_node, "iommus", NULL))
+               dev = rtd->card->snd_card->dev;
+
+       return tegra_pcm_dma_allocate(dev, rtd, tegra_pcm_hardware.buffer_bytes_max);
 }
 EXPORT_SYMBOL_GPL(tegra_pcm_construct);
 
index a7c0484d44ec79dbf74d2a02aa96f2d3958e7da9..265bbc5a2f96a8eba48febe8e7f42c965ba0a49c 100644 (file)
@@ -197,7 +197,7 @@ static int j721e_configure_refclk(struct j721e_priv *priv,
                return ret;
        }
 
-       if (priv->hsdiv_rates[domain->parent_clk_id] != scki) {
+       if (domain->parent_clk_id == -1 || priv->hsdiv_rates[domain->parent_clk_id] != scki) {
                dev_dbg(priv->dev,
                        "%s configuration for %u Hz: %s, %dxFS (SCKI: %u Hz)\n",
                        audio_domain == J721E_AUDIO_DOMAIN_CPB ? "CPB" : "IVI",
@@ -278,23 +278,29 @@ static int j721e_audio_startup(struct snd_pcm_substream *substream)
                                          j721e_rule_rate, &priv->rate_range,
                                          SNDRV_PCM_HW_PARAM_RATE, -1);
 
-       mutex_unlock(&priv->mutex);
 
        if (ret)
-               return ret;
+               goto out;
 
        /* Reset TDM slots to 32 */
        ret = snd_soc_dai_set_tdm_slot(cpu_dai, 0x3, 0x3, 2, 32);
        if (ret && ret != -ENOTSUPP)
-               return ret;
+               goto out;
 
        for_each_rtd_codec_dais(rtd, i, codec_dai) {
                ret = snd_soc_dai_set_tdm_slot(codec_dai, 0x3, 0x3, 2, 32);
                if (ret && ret != -ENOTSUPP)
-                       return ret;
+                       goto out;
        }
 
-       return 0;
+       if (ret == -ENOTSUPP)
+               ret = 0;
+out:
+       if (ret)
+               domain->active--;
+       mutex_unlock(&priv->mutex);
+
+       return ret;
 }
 
 static int j721e_audio_hw_params(struct snd_pcm_substream *substream,
index 30b3e128e28d86865a39b0333314a0588f8b2d30..f4cdaf1ba44ace3dfc31c231aabd27ee23d1e5d7 100644 (file)
@@ -3295,7 +3295,15 @@ static void snd_usb_mixer_dump_cval(struct snd_info_buffer *buffer,
 {
        struct usb_mixer_elem_info *cval = mixer_elem_list_to_info(list);
        static const char * const val_types[] = {
-               "BOOLEAN", "INV_BOOLEAN", "S8", "U8", "S16", "U16", "S32", "U32",
+               [USB_MIXER_BOOLEAN] = "BOOLEAN",
+               [USB_MIXER_INV_BOOLEAN] = "INV_BOOLEAN",
+               [USB_MIXER_S8] = "S8",
+               [USB_MIXER_U8] = "U8",
+               [USB_MIXER_S16] = "S16",
+               [USB_MIXER_U16] = "U16",
+               [USB_MIXER_S32] = "S32",
+               [USB_MIXER_U32] = "U32",
+               [USB_MIXER_BESPOKEN] = "BESPOKEN",
        };
        snd_iprintf(buffer, "    Info: id=%i, control=%i, cmask=0x%x, "
                            "channels=%i, type=\"%s\"\n", cval->head.id,
index 8b8bee3c3dd635aa50ec87df83c54fe628d95acb..e7accd87e0632d8f3ccd18a415d6a19a511f8233 100644 (file)
@@ -1897,6 +1897,9 @@ static const struct registration_quirk registration_quirks[] = {
        REG_QUIRK_ENTRY(0x0951, 0x16d8, 2),     /* Kingston HyperX AMP */
        REG_QUIRK_ENTRY(0x0951, 0x16ed, 2),     /* Kingston HyperX Cloud Alpha S */
        REG_QUIRK_ENTRY(0x0951, 0x16ea, 2),     /* Kingston HyperX Cloud Flight S */
+       REG_QUIRK_ENTRY(0x0ecb, 0x1f46, 2),     /* JBL Quantum 600 */
+       REG_QUIRK_ENTRY(0x0ecb, 0x2039, 2),     /* JBL Quantum 400 */
+       REG_QUIRK_ENTRY(0x0ecb, 0x203e, 2),     /* JBL Quantum 800 */
        { 0 }                                   /* terminator */
 };
 
index 1828bba19020d6cce0df684c8d1c3c4e9dc73cc5..dc6daa193557a9c6647a18ed363fafcfb7026b8a 100644 (file)
@@ -222,6 +222,11 @@ int mount_bpffs_for_pin(const char *name)
        int err = 0;
 
        file = malloc(strlen(name) + 1);
+       if (!file) {
+               p_err("mem alloc failed");
+               return -1;
+       }
+
        strcpy(file, name);
        dir = dirname(file);
 
index 6365c7fd1262ad0868c67a854a74fa0ea56e5d29..bd6288302094b5aac09ca26f514cc5b85ec46086 100644 (file)
 #include <sys/socket.h>
 #include <sys/wait.h>
 #include <linux/tcp.h>
+#include <linux/udp.h>
 #include <arpa/inet.h>
 #include <net/if.h>
 #include <netinet/in.h>
+#include <netinet/ip.h>
 #include <netdb.h>
 #include <fcntl.h>
 #include <libgen.h>
 #include <time.h>
 #include <errno.h>
 
+#include <linux/xfrm.h>
+#include <linux/ipsec.h>
+#include <linux/pfkeyv2.h>
+
 #ifndef IPV6_UNICAST_IF
 #define IPV6_UNICAST_IF         76
 #endif
@@ -114,6 +120,9 @@ struct sock_args {
                struct in_addr  in;
                struct in6_addr in6;
        } expected_raddr;
+
+       /* ESP in UDP encap test */
+       int use_xfrm;
 };
 
 static int server_mode;
@@ -1346,6 +1355,41 @@ static int bind_socket(int sd, struct sock_args *args)
        return 0;
 }
 
+static int config_xfrm_policy(int sd, struct sock_args *args)
+{
+       struct xfrm_userpolicy_info policy = {};
+       int type = UDP_ENCAP_ESPINUDP;
+       int xfrm_af = IP_XFRM_POLICY;
+       int level = SOL_IP;
+
+       if (args->type != SOCK_DGRAM) {
+               log_error("Invalid socket type. Only DGRAM could be used for XFRM\n");
+               return 1;
+       }
+
+       policy.action = XFRM_POLICY_ALLOW;
+       policy.sel.family = args->version;
+       if (args->version == AF_INET6) {
+               xfrm_af = IPV6_XFRM_POLICY;
+               level = SOL_IPV6;
+       }
+
+       policy.dir = XFRM_POLICY_OUT;
+       if (setsockopt(sd, level, xfrm_af, &policy, sizeof(policy)) < 0)
+               return 1;
+
+       policy.dir = XFRM_POLICY_IN;
+       if (setsockopt(sd, level, xfrm_af, &policy, sizeof(policy)) < 0)
+               return 1;
+
+       if (setsockopt(sd, IPPROTO_UDP, UDP_ENCAP, &type, sizeof(type)) < 0) {
+               log_err_errno("Failed to set xfrm encap");
+               return 1;
+       }
+
+       return 0;
+}
+
 static int lsock_init(struct sock_args *args)
 {
        long flags;
@@ -1389,6 +1433,11 @@ static int lsock_init(struct sock_args *args)
        if (fcntl(sd, F_SETFD, FD_CLOEXEC) < 0)
                log_err_errno("Failed to set close-on-exec flag");
 
+       if (args->use_xfrm && config_xfrm_policy(sd, args)) {
+               log_err_errno("Failed to set xfrm policy");
+               goto err;
+       }
+
 out:
        return sd;
 
@@ -1772,7 +1821,7 @@ static int ipc_parent(int cpid, int fd, struct sock_args *args)
        return client_status;
 }
 
-#define GETOPT_STR  "sr:l:c:p:t:g:P:DRn:M:X:m:d:I:BN:O:SCi6L:0:1:2:3:Fbq"
+#define GETOPT_STR  "sr:l:c:p:t:g:P:DRn:M:X:m:d:I:BN:O:SCi6xL:0:1:2:3:Fbq"
 
 static void print_usage(char *prog)
 {
@@ -1795,6 +1844,7 @@ static void print_usage(char *prog)
        "    -D|R          datagram (D) / raw (R) socket (default stream)\n"
        "    -l addr       local address to bind to in server mode\n"
        "    -c addr       local address to bind to in client mode\n"
+       "    -x            configure XFRM policy on socket\n"
        "\n"
        "    -d dev        bind socket to given device name\n"
        "    -I dev        bind socket to given device name - server mode\n"
@@ -1966,6 +2016,9 @@ int main(int argc, char *argv[])
                case 'q':
                        quiet = 1;
                        break;
+               case 'x':
+                       args.use_xfrm = 1;
+                       break;
                default:
                        print_usage(argv[0]);
                        return 1;
index 64cd2e23c5687a80a9b9afb32d5207102d38f122..543ad7513a8e9feeb705e3a22bdd0e2784119e39 100755 (executable)
 #      below for IPv6 doesn't apply here, because, on IPv4, administrative MTU
 #      changes alone won't affect PMTU
 #
+# - pmtu_vti4_udp_exception
+#       Same as pmtu_vti4_exception, but using ESP-in-UDP
+#
+# - pmtu_vti4_udp_routed_exception
+#       Set up vti tunnel on top of veth connected through routing namespace and
+#      add xfrm states and policies with ESP-in-UDP encapsulation. Check that
+#      route exception is not created if link layer MTU is not exceeded, then
+#      lower MTU on second part of routed environment and check that exception
+#      is created with the expected PMTU.
+#
 # - pmtu_vti6_exception
 #      Set up vti6 tunnel on top of veth, with xfrm states and policies, in two
 #      namespaces with matching endpoints. Check that route exception is
 #      decrease and increase MTU of tunnel, checking that route exception PMTU
 #      changes accordingly
 #
+# - pmtu_vti6_udp_exception
+#       Same as pmtu_vti6_exception, but using ESP-in-UDP
+#
+# - pmtu_vti6_udp_routed_exception
+#      Same as pmtu_vti6_udp_routed_exception but with routing between vti
+#      endpoints
+#
 # - pmtu_vti4_default_mtu
 #      Set up vti4 tunnel on top of veth, in two namespaces with matching
 #      endpoints. Check that MTU assigned to vti interface is the MTU of the
@@ -224,6 +241,10 @@ tests="
        pmtu_ipv6_ipv6_exception        IPv6 over IPv6: PMTU exceptions         1
        pmtu_vti6_exception             vti6: PMTU exceptions                   0
        pmtu_vti4_exception             vti4: PMTU exceptions                   0
+       pmtu_vti6_udp_exception         vti6: PMTU exceptions (ESP-in-UDP)      0
+       pmtu_vti4_udp_exception         vti4: PMTU exceptions (ESP-in-UDP)      0
+       pmtu_vti6_udp_routed_exception  vti6: PMTU exceptions, routed (ESP-in-UDP)      0
+       pmtu_vti4_udp_routed_exception  vti4: PMTU exceptions, routed (ESP-in-UDP)      0
        pmtu_vti4_default_mtu           vti4: default MTU assignment            0
        pmtu_vti6_default_mtu           vti6: default MTU assignment            0
        pmtu_vti4_link_add_mtu          vti4: MTU setting on link creation      0
@@ -246,7 +267,6 @@ ns_b="ip netns exec ${NS_B}"
 ns_c="ip netns exec ${NS_C}"
 ns_r1="ip netns exec ${NS_R1}"
 ns_r2="ip netns exec ${NS_R2}"
-
 # Addressing and routing for tests with routers: four network segments, with
 # index SEGMENT between 1 and 4, a common prefix (PREFIX4 or PREFIX6) and an
 # identifier ID, which is 1 for hosts (A and B), 2 for routers (R1 and R2).
@@ -279,7 +299,6 @@ routes="
        A       ${prefix6}:${b_r2}::1   ${prefix6}:${a_r2}::2
        B       default                 ${prefix6}:${b_r1}::2
 "
-
 USE_NH="no"
 #      ns      family  nh id      destination          gateway
 nexthops="
@@ -326,6 +345,7 @@ dummy6_mask="64"
 
 err_buf=
 tcpdump_pids=
+nettest_pids=
 
 err() {
        err_buf="${err_buf}${1}
@@ -548,6 +568,14 @@ setup_vti6() {
        setup_vti 6 ${veth6_a_addr} ${veth6_b_addr} ${tunnel6_a_addr} ${tunnel6_b_addr} ${tunnel6_mask}
 }
 
+setup_vti4routed() {
+       setup_vti 4 ${prefix4}.${a_r1}.1 ${prefix4}.${b_r1}.1 ${tunnel4_a_addr} ${tunnel4_b_addr} ${tunnel4_mask}
+}
+
+setup_vti6routed() {
+       setup_vti 6 ${prefix6}:${a_r1}::1 ${prefix6}:${b_r1}::1 ${tunnel6_a_addr} ${tunnel6_b_addr} ${tunnel6_mask}
+}
+
 setup_vxlan_or_geneve() {
        type="${1}"
        a_addr="${2}"
@@ -619,18 +647,36 @@ setup_xfrm() {
        proto=${1}
        veth_a_addr="${2}"
        veth_b_addr="${3}"
+       encap=${4}
 
-       run_cmd ${ns_a} ip -${proto} xfrm state add src ${veth_a_addr} dst ${veth_b_addr} spi 0x1000 proto esp aead 'rfc4106(gcm(aes))' 0x0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f 128 mode tunnel || return 1
-       run_cmd ${ns_a} ip -${proto} xfrm state add src ${veth_b_addr} dst ${veth_a_addr} spi 0x1001 proto esp aead 'rfc4106(gcm(aes))' 0x0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f 128 mode tunnel
+       run_cmd ${ns_a} ip -${proto} xfrm state add src ${veth_a_addr} dst ${veth_b_addr} spi 0x1000 proto esp aead 'rfc4106(gcm(aes))' 0x0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f 128 mode tunnel ${encap} || return 1
+       run_cmd ${ns_a} ip -${proto} xfrm state add src ${veth_b_addr} dst ${veth_a_addr} spi 0x1001 proto esp aead 'rfc4106(gcm(aes))' 0x0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f 128 mode tunnel ${encap}
        run_cmd ${ns_a} ip -${proto} xfrm policy add dir out mark 10 tmpl src ${veth_a_addr} dst ${veth_b_addr} proto esp mode tunnel
        run_cmd ${ns_a} ip -${proto} xfrm policy add dir in mark 10 tmpl src ${veth_b_addr} dst ${veth_a_addr} proto esp mode tunnel
 
-       run_cmd ${ns_b} ip -${proto} xfrm state add src ${veth_a_addr} dst ${veth_b_addr} spi 0x1000 proto esp aead 'rfc4106(gcm(aes))' 0x0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f 128 mode tunnel
-       run_cmd ${ns_b} ip -${proto} xfrm state add src ${veth_b_addr} dst ${veth_a_addr} spi 0x1001 proto esp aead 'rfc4106(gcm(aes))' 0x0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f 128 mode tunnel
+       run_cmd ${ns_b} ip -${proto} xfrm state add src ${veth_a_addr} dst ${veth_b_addr} spi 0x1000 proto esp aead 'rfc4106(gcm(aes))' 0x0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f 128 mode tunnel ${encap}
+       run_cmd ${ns_b} ip -${proto} xfrm state add src ${veth_b_addr} dst ${veth_a_addr} spi 0x1001 proto esp aead 'rfc4106(gcm(aes))' 0x0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f 128 mode tunnel ${encap}
        run_cmd ${ns_b} ip -${proto} xfrm policy add dir out mark 10 tmpl src ${veth_b_addr} dst ${veth_a_addr} proto esp mode tunnel
        run_cmd ${ns_b} ip -${proto} xfrm policy add dir in mark 10 tmpl src ${veth_a_addr} dst ${veth_b_addr} proto esp mode tunnel
 }
 
+setup_nettest_xfrm() {
+       which nettest >/dev/null
+       if [ $? -ne 0 ]; then
+               echo "'nettest' command not found; skipping tests"
+               return 1
+       fi
+
+       [ ${1} -eq 6 ] && proto="-6" || proto=""
+       port=${2}
+
+       run_cmd ${ns_a} nettest ${proto} -q -D -s -x -p ${port} -t 5 &
+       nettest_pids="${nettest_pids} $!"
+
+       run_cmd ${ns_b} nettest ${proto} -q -D -s -x -p ${port} -t 5 &
+       nettest_pids="${nettest_pids} $!"
+}
+
 setup_xfrm4() {
        setup_xfrm 4 ${veth4_a_addr} ${veth4_b_addr}
 }
@@ -639,6 +685,26 @@ setup_xfrm6() {
        setup_xfrm 6 ${veth6_a_addr} ${veth6_b_addr}
 }
 
+setup_xfrm4udp() {
+       setup_xfrm 4 ${veth4_a_addr} ${veth4_b_addr} "encap espinudp 4500 4500 0.0.0.0"
+       setup_nettest_xfrm 4 4500
+}
+
+setup_xfrm6udp() {
+       setup_xfrm 6 ${veth6_a_addr} ${veth6_b_addr} "encap espinudp 4500 4500 0.0.0.0"
+       setup_nettest_xfrm 6 4500
+}
+
+setup_xfrm4udprouted() {
+       setup_xfrm 4 ${prefix4}.${a_r1}.1 ${prefix4}.${b_r1}.1 "encap espinudp 4500 4500 0.0.0.0"
+       setup_nettest_xfrm 4 4500
+}
+
+setup_xfrm6udprouted() {
+       setup_xfrm 6 ${prefix6}:${a_r1}::1 ${prefix6}:${b_r1}::1 "encap espinudp 4500 4500 0.0.0.0"
+       setup_nettest_xfrm 6 4500
+}
+
 setup_routing_old() {
        for i in ${routes}; do
                [ "${ns}" = "" ]        && ns="${i}"            && continue
@@ -823,6 +889,11 @@ cleanup() {
        done
        tcpdump_pids=
 
+       for pid in ${nettest_pids}; do
+               kill ${pid}
+       done
+       nettest_pids=
+
        for n in ${NS_A} ${NS_B} ${NS_C} ${NS_R1} ${NS_R2}; do
                ip netns del ${n} 2> /dev/null
        done
@@ -1432,6 +1503,135 @@ test_pmtu_vti6_exception() {
        return ${fail}
 }
 
+test_pmtu_vti4_udp_exception() {
+       setup namespaces veth vti4 xfrm4udp || return $ksft_skip
+       trace "${ns_a}" veth_a    "${ns_b}" veth_b \
+             "${ns_a}" vti4_a    "${ns_b}" vti4_b
+
+       veth_mtu=1500
+       vti_mtu=$((veth_mtu - 20))
+
+       #                                UDP   SPI   SN   IV  ICV   pad length   next header
+       esp_payload_rfc4106=$((vti_mtu - 8   - 4   - 4  - 8 - 16  - 1          - 1))
+       ping_payload=$((esp_payload_rfc4106 - 28))
+
+       mtu "${ns_a}" veth_a ${veth_mtu}
+       mtu "${ns_b}" veth_b ${veth_mtu}
+       mtu "${ns_a}" vti4_a ${vti_mtu}
+       mtu "${ns_b}" vti4_b ${vti_mtu}
+
+       # Send DF packet without exceeding link layer MTU, check that no
+       # exception is created
+       run_cmd ${ns_a} ping -q -M want -i 0.1 -w 1 -s ${ping_payload} ${tunnel4_b_addr}
+       pmtu="$(route_get_dst_pmtu_from_exception "${ns_a}" ${tunnel4_b_addr})"
+       check_pmtu_value "" "${pmtu}" "sending packet smaller than PMTU (IP payload length ${esp_payload_rfc4106})" || return 1
+
+       # Now exceed link layer MTU by one byte, check that exception is created
+       # with the right PMTU value
+       run_cmd ${ns_a} ping -q -M want -i 0.1 -w 1 -s $((ping_payload + 1)) ${tunnel4_b_addr}
+       pmtu="$(route_get_dst_pmtu_from_exception "${ns_a}" ${tunnel4_b_addr})"
+       check_pmtu_value "${esp_payload_rfc4106}" "${pmtu}" "exceeding PMTU (IP payload length $((esp_payload_rfc4106 + 1)))"
+}
+
+test_pmtu_vti6_udp_exception() {
+       setup namespaces veth vti6 xfrm6udp || return $ksft_skip
+       trace "${ns_a}" veth_a    "${ns_b}" veth_b \
+             "${ns_a}" vti6_a    "${ns_b}" vti6_b
+       fail=0
+
+       # Create route exception by exceeding link layer MTU
+       mtu "${ns_a}" veth_a 4000
+       mtu "${ns_b}" veth_b 4000
+       mtu "${ns_a}" vti6_a 5000
+       mtu "${ns_b}" vti6_b 5000
+       run_cmd ${ns_a} ${ping6} -q -i 0.1 -w 1 -s 60000 ${tunnel6_b_addr}
+
+       # Check that exception was created
+       pmtu="$(route_get_dst_pmtu_from_exception "${ns_a}" ${tunnel6_b_addr})"
+       check_pmtu_value any "${pmtu}" "creating tunnel exceeding link layer MTU" || return 1
+
+       # Decrease tunnel MTU, check for PMTU decrease in route exception
+       mtu "${ns_a}" vti6_a 3000
+       pmtu="$(route_get_dst_pmtu_from_exception "${ns_a}" ${tunnel6_b_addr})"
+       check_pmtu_value "3000" "${pmtu}" "decreasing tunnel MTU" || fail=1
+
+       # Increase tunnel MTU, check for PMTU increase in route exception
+       mtu "${ns_a}" vti6_a 9000
+       pmtu="$(route_get_dst_pmtu_from_exception "${ns_a}" ${tunnel6_b_addr})"
+       check_pmtu_value "9000" "${pmtu}" "increasing tunnel MTU" || fail=1
+
+       return ${fail}
+}
+
+test_pmtu_vti4_udp_routed_exception() {
+       setup namespaces routing vti4routed xfrm4udprouted || return $ksft_skip
+       trace "${ns_a}" veth_A-R1    "${ns_b}" veth_B-R1 \
+             "${ns_a}" vti4_a       "${ns_b}" vti4_b
+
+       veth_mtu=1500
+       vti_mtu=$((veth_mtu - 20))
+
+       #                                UDP   SPI   SN   IV  ICV   pad length   next header
+       esp_payload_rfc4106=$((vti_mtu - 8   - 4   - 4  - 8 - 16  - 1          - 1))
+       ping_payload=$((esp_payload_rfc4106 - 28))
+
+        mtu "${ns_a}"  veth_A-R1 ${veth_mtu}
+        mtu "${ns_r1}" veth_R1-A ${veth_mtu}
+        mtu "${ns_b}"  veth_B-R1 ${veth_mtu}
+        mtu "${ns_r1}" veth_R1-B ${veth_mtu}
+
+       mtu "${ns_a}" vti4_a ${vti_mtu}
+       mtu "${ns_b}" vti4_b ${vti_mtu}
+
+       # Send DF packet without exceeding link layer MTU, check that no
+       # exception is created
+       run_cmd ${ns_a} ping -q -M want -i 0.1 -w 1 -s ${ping_payload} ${tunnel4_b_addr}
+       pmtu="$(route_get_dst_pmtu_from_exception "${ns_a}" ${tunnel4_b_addr})"
+       check_pmtu_value "" "${pmtu}" "sending packet smaller than PMTU (IP payload length ${esp_payload_rfc4106})" || return 1
+
+       # Now decrease link layer MTU by 8 bytes on R1, check that exception is created
+       # with the right PMTU value
+        mtu "${ns_r1}" veth_R1-B $((veth_mtu - 8))
+       run_cmd ${ns_a} ping -q -M want -i 0.1 -w 1 -s $((ping_payload)) ${tunnel4_b_addr}
+       pmtu="$(route_get_dst_pmtu_from_exception "${ns_a}" ${tunnel4_b_addr})"
+       check_pmtu_value "$((esp_payload_rfc4106 - 8))" "${pmtu}" "exceeding PMTU (IP payload length $((esp_payload_rfc4106)))"
+}
+
+test_pmtu_vti6_udp_routed_exception() {
+       setup namespaces routing vti6routed xfrm6udprouted || return $ksft_skip
+       trace "${ns_a}" veth_A-R1    "${ns_b}" veth_B-R1 \
+             "${ns_a}" vti6_a       "${ns_b}" vti6_b
+
+       veth_mtu=1500
+       vti_mtu=$((veth_mtu - 40))
+
+       #                                UDP   SPI   SN   IV  ICV   pad length   next header
+       esp_payload_rfc4106=$((vti_mtu - 8   - 4   - 4  - 8 - 16  - 1          - 1))
+       ping_payload=$((esp_payload_rfc4106 - 48))
+
+        mtu "${ns_a}"  veth_A-R1 ${veth_mtu}
+        mtu "${ns_r1}" veth_R1-A ${veth_mtu}
+        mtu "${ns_b}"  veth_B-R1 ${veth_mtu}
+        mtu "${ns_r1}" veth_R1-B ${veth_mtu}
+
+       # mtu "${ns_a}" vti6_a ${vti_mtu}
+       # mtu "${ns_b}" vti6_b ${vti_mtu}
+
+       run_cmd ${ns_a} ${ping6} -q -M want -i 0.1 -w 1 -s ${ping_payload} ${tunnel6_b_addr}
+
+       # Check that exception was not created
+       pmtu="$(route_get_dst_pmtu_from_exception "${ns_a}" ${tunnel6_b_addr})"
+       check_pmtu_value "" "${pmtu}" "sending packet smaller than PMTU (IP payload length ${esp_payload_rfc4106})" || return 1
+
+       # Now decrease link layer MTU by 8 bytes on R1, check that exception is created
+       # with the right PMTU value
+        mtu "${ns_r1}" veth_R1-B $((veth_mtu - 8))
+       run_cmd ${ns_a} ${ping6} -q -M want -i 0.1 -w 1 -s $((ping_payload)) ${tunnel6_b_addr}
+       pmtu="$(route_get_dst_pmtu_from_exception "${ns_a}" ${tunnel6_b_addr})"
+       check_pmtu_value "$((esp_payload_rfc4106 - 8))" "${pmtu}" "exceeding PMTU (IP payload length $((esp_payload_rfc4106)))"
+
+}
+
 test_pmtu_vti4_default_mtu() {
        setup namespaces veth vti4 || return $ksft_skip
 
index e363bdaff59d4e0efdc1155b287c2119ec5b0ccb..2ea438e6b8b1f868174c288bd31e01de907b9765 100644 (file)
@@ -210,8 +210,10 @@ static void anon_release_pages(char *rel_area)
 
 static void anon_allocate_area(void **alloc_area)
 {
-       if (posix_memalign(alloc_area, page_size, nr_pages * page_size))
-               err("posix_memalign() failed");
+       *alloc_area = mmap(NULL, nr_pages * page_size, PROT_READ | PROT_WRITE,
+                          MAP_ANONYMOUS | MAP_PRIVATE, -1, 0);
+       if (*alloc_area == MAP_FAILED)
+               err("mmap of anonymous memory failed");
 }
 
 static void noop_alias_mapping(__u64 *start, size_t len, unsigned long offset)