Merge branch 'i2c/for-next' of git://git.kernel.org/pub/scm/linux/kernel/git/wsa...
authorLinus Torvalds <torvalds@linux-foundation.org>
Sun, 29 Sep 2019 17:20:16 +0000 (10:20 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Sun, 29 Sep 2019 17:20:16 +0000 (10:20 -0700)
Pull  more i2c updates from Wolfram Sang:

 - make Lenovo Yoga C630 boot now that the dependencies are merged

 - restore BlockProcessCall for i801, accidently removed in this merge
   window

 - a bugfix for the riic driver

 - an improvement to the slave-eeprom driver which should have been in
   the first pull request but sadly got lost in the process

* 'i2c/for-next' of git://git.kernel.org/pub/scm/linux/kernel/git/wsa/linux:
  i2c: slave-eeprom: Add read only mode
  i2c: i801: Bring back Block Process Call support for certain platforms
  i2c: riic: Clear NACK in tend isr
  i2c: qcom-geni: Disable DMA processing on the Lenovo Yoga C630

223 files changed:
Documentation/devicetree/bindings/net/adi,adin.yaml
Documentation/devicetree/bindings/net/micrel-ksz90x1.txt
Documentation/devicetree/bindings/net/renesas,ravb.txt
Documentation/devicetree/bindings/net/snps,dwmac.yaml
Documentation/networking/devlink-trap.rst
MAINTAINERS
arch/powerpc/include/asm/cputable.h
arch/powerpc/include/asm/kvm_ppc.h
arch/powerpc/include/asm/reg.h
arch/powerpc/kernel/cpu_setup_power.S
arch/powerpc/kernel/dbell.c
arch/powerpc/kernel/dt_cpu_ftrs.c
arch/powerpc/kernel/eeh.c
arch/powerpc/kvm/book3s_hv.c
arch/powerpc/kvm/book3s_hv_nested.c
arch/powerpc/kvm/book3s_hv_rm_mmu.c
arch/powerpc/kvm/book3s_hv_rm_xics.c
arch/powerpc/kvm/book3s_hv_rmhandlers.S
arch/powerpc/mm/book3s64/hash_native.c
arch/powerpc/mm/book3s64/mmu_context.c
arch/powerpc/mm/book3s64/radix_tlb.c
arch/powerpc/mm/kasan/kasan_init_32.c
arch/powerpc/platforms/powernv/smp.c
arch/powerpc/platforms/pseries/lpar.c
arch/powerpc/platforms/pseries/papr_scm.c
arch/powerpc/platforms/pseries/pseries.h
arch/powerpc/platforms/pseries/setup.c
arch/powerpc/sysdev/xics/icp-native.c
arch/powerpc/sysdev/xics/icp-opal.c
arch/x86/purgatory/Makefile
drivers/atm/he.c
drivers/infiniband/core/addr.c
drivers/iommu/amd_iommu.c
drivers/iommu/amd_iommu_types.h
drivers/isdn/mISDN/socket.c
drivers/net/Kconfig
drivers/net/arcnet/Kconfig
drivers/net/arcnet/arcnet.c
drivers/net/can/usb/Kconfig
drivers/net/dsa/b53/b53_serdes.h
drivers/net/dsa/lantiq_pce.h
drivers/net/dsa/microchip/ksz_common.h
drivers/net/dsa/qca8k.c
drivers/net/dsa/sja1105/Kconfig
drivers/net/ethernet/Kconfig
drivers/net/ethernet/Makefile
drivers/net/ethernet/allwinner/Kconfig
drivers/net/ethernet/amazon/Kconfig
drivers/net/ethernet/amazon/ena/ena_eth_com.c
drivers/net/ethernet/aquantia/atlantic/aq_vec.c
drivers/net/ethernet/broadcom/bcmsysport.c
drivers/net/ethernet/cadence/macb_main.c
drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
drivers/net/ethernet/emulex/benet/Kconfig
drivers/net/ethernet/freescale/enetc/enetc_pf.c
drivers/net/ethernet/freescale/gianfar.c
drivers/net/ethernet/hisilicon/hix5hd2_gmac.c
drivers/net/ethernet/ibm/ibmvnic.c
drivers/net/ethernet/ibm/ibmvnic.h
drivers/net/ethernet/marvell/skge.c
drivers/net/ethernet/mellanox/mlx5/core/Kconfig
drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c
drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
drivers/net/ethernet/mellanox/mlx5/core/main.c
drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c
drivers/net/ethernet/mellanox/mlx5/core/steering/dr_matcher.c
drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c
drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c
drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h
drivers/net/ethernet/mellanox/mlxsw/spectrum.c
drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c
drivers/net/ethernet/netronome/nfp/abm/cls.c
drivers/net/ethernet/netronome/nfp/flower/main.c
drivers/net/ethernet/netx-eth.c [deleted file]
drivers/net/ethernet/ni/nixge.c
drivers/net/ethernet/nxp/Kconfig
drivers/net/ethernet/pensando/Kconfig
drivers/net/ethernet/pensando/ionic/ionic_debugfs.c
drivers/net/ethernet/pensando/ionic/ionic_lif.c
drivers/net/ethernet/qlogic/qede/qede_fp.c
drivers/net/ethernet/socionext/netsec.c
drivers/net/ethernet/socionext/sni_ave.c
drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c
drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c
drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c
drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c
drivers/net/ethernet/xilinx/xilinx_axienet_main.c
drivers/net/macsec.c
drivers/net/phy/Kconfig
drivers/net/phy/micrel.c
drivers/net/phy/national.c
drivers/net/ppp/ppp_generic.c
drivers/net/tap.c
drivers/net/usb/cdc_ncm.c
drivers/net/usb/usbnet.c
drivers/net/vrf.c
drivers/net/wireless/ath/Kconfig
drivers/net/wireless/ath/ar5523/Kconfig
drivers/net/wireless/ath/ath6kl/Kconfig
drivers/net/wireless/ath/ath9k/Kconfig
drivers/net/wireless/ath/carl9170/Kconfig
drivers/net/wireless/ath/wil6210/txrx.c
drivers/net/wireless/atmel/Kconfig
drivers/net/wireless/intel/ipw2x00/Kconfig
drivers/net/wireless/intel/iwlegacy/Kconfig
drivers/net/wireless/intel/iwlwifi/Kconfig
drivers/net/wireless/intel/iwlwifi/mvm/fw.c
drivers/net/wireless/intel/iwlwifi/mvm/tt.c
drivers/net/wireless/mediatek/mt76/mt7615/mcu.c
drivers/net/wireless/mediatek/mt76/mt7615/mt7615.h
drivers/net/wireless/ralink/rt2x00/Kconfig
drivers/net/wireless/realtek/rtw88/mac.c
drivers/net/wireless/realtek/rtw88/main.c
drivers/net/wireless/realtek/rtw88/pci.c
drivers/net/wireless/zydas/zd1211rw/zd_usb.c
drivers/nfc/st95hf/core.c
drivers/of/of_mdio.c
drivers/ptp/ptp_chardev.c
fs/exec.c
include/linux/gfp.h
include/linux/mempolicy.h
include/linux/mlx5/mlx5_ifc.h
include/linux/mm_types.h
include/linux/platform_data/eth-netx.h [deleted file]
include/linux/rcuwait.h
include/linux/sched.h
include/linux/sched/mm.h
include/linux/sched/task.h
include/linux/skbuff.h
include/net/inet_timewait_sock.h
include/net/ipv6.h
include/net/netfilter/nf_tables.h
include/net/route.h
include/net/sch_generic.h
include/uapi/linux/btf.h
include/uapi/linux/netfilter_bridge/ebtables.h
include/uapi/linux/ptp_clock.h
kernel/bpf/btf.c
kernel/bpf/xskmap.c
kernel/exit.c
kernel/fork.c
kernel/sched/core.c
kernel/sched/fair.c
kernel/sched/membarrier.c
kernel/sched/sched.h
kernel/trace/bpf_trace.c
lib/Kconfig
mm/huge_memory.c
mm/mempolicy.c
mm/page_alloc.c
mm/shmem.c
net/appletalk/ddp.c
net/ax25/af_ax25.c
net/batman-adv/Kconfig
net/core/dev.c
net/core/dst.c
net/core/skbuff.c
net/dccp/ipv6.c
net/ieee802154/socket.c
net/ife/Kconfig
net/ipv4/Kconfig
net/ipv4/inet_connection_sock.c
net/ipv4/ip_forward.c
net/ipv4/ip_output.c
net/ipv4/route.c
net/ipv4/tcp_bbr.c
net/ipv4/tcp_ipv4.c
net/ipv4/tcp_minisocks.c
net/ipv4/tcp_timer.c
net/ipv4/xfrm4_policy.c
net/ipv6/fib6_rules.c
net/ipv6/inet6_connection_sock.c
net/ipv6/ip6_fib.c
net/ipv6/ip6_output.c
net/ipv6/netfilter/Kconfig
net/ipv6/tcp_ipv6.c
net/kcm/kcmsock.c
net/ncsi/internal.h
net/ncsi/ncsi-manage.c
net/netfilter/Kconfig
net/netfilter/ipvs/Kconfig
net/netfilter/nf_tables_api.c
net/netfilter/nf_tables_offload.c
net/netfilter/nft_flow_offload.c
net/netfilter/nft_lookup.c
net/nfc/llcp_sock.c
net/openvswitch/datapath.c
net/qrtr/qrtr.c
net/rds/Kconfig
net/rds/bind.c
net/sched/Kconfig
net/sched/act_api.c
net/sched/act_sample.c
net/sched/cls_api.c
net/sched/sch_api.c
net/sched/sch_cbs.c
net/sched/sch_htb.c
net/sched/sch_multiq.c
net/sched/sch_netem.c
net/sched/sch_sfb.c
net/sctp/ipv6.c
net/xdp/xdp_umem.c
tools/lib/bpf/btf_dump.c
tools/lib/bpf/xsk.c
tools/testing/selftests/bpf/prog_tests/tcp_rtt.c
tools/testing/selftests/bpf/progs/strobemeta.h
tools/testing/selftests/bpf/test_sysctl.c
tools/testing/selftests/drivers/net/mlxsw/devlink_trap_l2_drops.sh
tools/testing/selftests/membarrier/.gitignore
tools/testing/selftests/membarrier/Makefile
tools/testing/selftests/membarrier/membarrier_test_impl.h [moved from tools/testing/selftests/membarrier/membarrier_test.c with 95% similarity]
tools/testing/selftests/membarrier/membarrier_test_multi_thread.c [new file with mode: 0644]
tools/testing/selftests/membarrier/membarrier_test_single_thread.c [new file with mode: 0644]
tools/testing/selftests/net/fib_nexthop_multiprefix.sh
tools/testing/selftests/net/fib_nexthops.sh
tools/testing/selftests/net/fib_tests.sh
tools/testing/selftests/powerpc/mm/Makefile
tools/testing/selftests/powerpc/mm/tlbie_test.c [new file with mode: 0644]
tools/testing/selftests/powerpc/tm/.gitignore
tools/testing/selftests/powerpc/tm/Makefile
tools/testing/selftests/powerpc/tm/tm-poison.c [new file with mode: 0644]
usr/include/Makefile

index 69375cb28e92074022bc3867a3244217c5aa45f7..d95cc691a65f9e013839677c8e5017bbe3e34df1 100644 (file)
@@ -36,12 +36,6 @@ properties:
     enum: [ 4, 8, 12, 16, 20, 24 ]
     default: 8
 
-  adi,disable-energy-detect:
-    description: |
-      Disables Energy Detect Powerdown Mode (default disabled, i.e energy detect
-      is enabled if this property is unspecified)
-    type: boolean
-
 examples:
   - |
     ethernet {
@@ -68,6 +62,5 @@ examples:
             reg = <1>;
 
             adi,fifo-depth-bits = <16>;
-            adi,disable-energy-detect;
         };
     };
index 5100358177c98127c169b046393cffda0f23dec6..b921731cd970e35a9221cae0f33fb982209949df 100644 (file)
@@ -12,8 +12,36 @@ and therefore may overwrite them.
 KSZ9021:
 
   All skew control options are specified in picoseconds. The minimum
-  value is 0, the maximum value is 3000, and it is incremented by 200ps
-  steps.
+  value is 0, the maximum value is 3000, and it can be specified in 200ps
+  steps, *but* these values are in not fact what you get because this chip's
+  skew values actually increase in 120ps steps, starting from -840ps. The
+  incorrect values came from an error in the original KSZ9021 datasheet
+  before it was corrected in revision 1.2 (Feb 2014), but it is too late to
+  change the driver now because of the many existing device trees that have
+  been created using values that go up in increments of 200.
+
+  The following table shows the actual skew delay you will get for each of the
+  possible devicetree values, and the number that will be programmed into the
+  corresponding pad skew register:
+
+  Device Tree Value    Delay   Pad Skew Register Value
+  -----------------------------------------------------
+       0               -840ps          0000
+       200             -720ps          0001
+       400             -600ps          0010
+       600             -480ps          0011
+       800             -360ps          0100
+       1000            -240ps          0101
+       1200            -120ps          0110
+       1400               0ps          0111
+       1600             120ps          1000
+       1800             240ps          1001
+       2000             360ps          1010
+       2200             480ps          1011
+       2400             600ps          1100
+       2600             720ps          1101
+       2800             840ps          1110
+       3000             960ps          1111
 
   Optional properties:
 
index 7ad36213093ed4b899c9bc1e74b1d5871f2680cc..5df4aa7f681154ee505b531f71c00b0bffcac626 100644 (file)
@@ -18,6 +18,7 @@ Required properties:
                R-Car Gen2 and RZ/G1 devices.
 
       - "renesas,etheravb-r8a774a1" for the R8A774A1 SoC.
+      - "renesas,etheravb-r8a774b1" for the R8A774B1 SoC.
       - "renesas,etheravb-r8a774c0" for the R8A774C0 SoC.
       - "renesas,etheravb-r8a7795" for the R8A7795 SoC.
       - "renesas,etheravb-r8a7796" for the R8A7796 SoC.
index ebe4537a7cce43589c10e8c08cbde29b4295dcc1..4845e29411e460151458a97a39256cff9deff8e7 100644 (file)
@@ -113,7 +113,7 @@ properties:
     const: stmmaceth
 
   mac-mode:
-    maxItems: 1
+    $ref: ethernet-controller.yaml#/properties/phy-connection-type
     description:
       The property is identical to 'phy-mode', and assumes that there is mode
       converter in-between the MAC & PHY (e.g. GMII-to-RGMII). This converter
index c20c7c4836640fa3f0cfe943b4d4d89367cb8ea7..8e90a85f3bd53ab0d4eefffac614469c41928c66 100644 (file)
@@ -143,7 +143,8 @@ be added to the following table:
    * - ``port_list_is_empty``
      - ``drop``
      - Traps packets that the device decided to drop in case they need to be
-       flooded and the flood list is empty
+       flooded (e.g., unknown unicast, unregistered multicast) and there are
+       no ports the packets should be flooded to
    * - ``port_loopback_filter``
      - ``drop``
      - Traps packets that the device decided to drop in case after layer 2
index 857611c746c4cd9af385d57a814fd4b6070c39c7..296de2b51c832ecc1869a77624c964ce398a5236 100644 (file)
@@ -643,6 +643,7 @@ F:  drivers/net/ethernet/alacritech/*
 
 FORCEDETH GIGABIT ETHERNET DRIVER
 M:     Rain River <rain.1986.08.12@gmail.com>
+M:     Zhu Yanjun <yanjun.zhu@oracle.com>
 L:     netdev@vger.kernel.org
 S:     Maintained
 F:     drivers/net/ethernet/nvidia/*
index a1ebcbc3931f2afe7dd6667e353d809303b1bfb0..cf00ff0d121def10d7afb75b9d5d6619a32667b7 100644 (file)
@@ -209,8 +209,9 @@ static inline void cpu_feature_keys_init(void) { }
 #define CPU_FTR_POWER9_DD2_1           LONG_ASM_CONST(0x0000080000000000)
 #define CPU_FTR_P9_TM_HV_ASSIST                LONG_ASM_CONST(0x0000100000000000)
 #define CPU_FTR_P9_TM_XER_SO_BUG       LONG_ASM_CONST(0x0000200000000000)
-#define CPU_FTR_P9_TLBIE_BUG           LONG_ASM_CONST(0x0000400000000000)
+#define CPU_FTR_P9_TLBIE_STQ_BUG       LONG_ASM_CONST(0x0000400000000000)
 #define CPU_FTR_P9_TIDR                        LONG_ASM_CONST(0x0000800000000000)
+#define CPU_FTR_P9_TLBIE_ERAT_BUG      LONG_ASM_CONST(0x0001000000000000)
 
 #ifndef __ASSEMBLY__
 
@@ -457,7 +458,7 @@ static inline void cpu_feature_keys_init(void) { }
            CPU_FTR_CFAR | CPU_FTR_HVMODE | CPU_FTR_VMX_COPY | \
            CPU_FTR_DBELL | CPU_FTR_HAS_PPR | CPU_FTR_ARCH_207S | \
            CPU_FTR_TM_COMP | CPU_FTR_ARCH_300 | CPU_FTR_PKEY | \
-           CPU_FTR_P9_TLBIE_BUG | CPU_FTR_P9_TIDR)
+           CPU_FTR_P9_TLBIE_STQ_BUG | CPU_FTR_P9_TLBIE_ERAT_BUG | CPU_FTR_P9_TIDR)
 #define CPU_FTRS_POWER9_DD2_0 CPU_FTRS_POWER9
 #define CPU_FTRS_POWER9_DD2_1 (CPU_FTRS_POWER9 | CPU_FTR_POWER9_DD2_1)
 #define CPU_FTRS_POWER9_DD2_2 (CPU_FTRS_POWER9 | CPU_FTR_POWER9_DD2_1 | \
index 8e8514efb124394813e2a6ef09505f6491e86d76..ee62776e5433cc72316bf323f43d3c5df99d97f2 100644 (file)
@@ -452,9 +452,100 @@ static inline u32 kvmppc_get_xics_latch(void)
        return xirr;
 }
 
-static inline void kvmppc_set_host_ipi(int cpu, u8 host_ipi)
+/*
+ * To avoid the need to unnecessarily exit fully to the host kernel, an IPI to
+ * a CPU thread that's running/napping inside of a guest is by default regarded
+ * as a request to wake the CPU (if needed) and continue execution within the
+ * guest, potentially to process new state like externally-generated
+ * interrupts or IPIs sent from within the guest itself (e.g. H_PROD/H_IPI).
+ *
+ * To force an exit to the host kernel, kvmppc_set_host_ipi() must be called
+ * prior to issuing the IPI to set the corresponding 'host_ipi' flag in the
+ * target CPU's PACA. To avoid unnecessary exits to the host, this flag should
+ * be immediately cleared via kvmppc_clear_host_ipi() by the IPI handler on
+ * the receiving side prior to processing the IPI work.
+ *
+ * NOTE:
+ *
+ * We currently issue an smp_mb() at the beginning of kvmppc_set_host_ipi().
+ * This is to guard against sequences such as the following:
+ *
+ *      CPU
+ *        X: smp_muxed_ipi_set_message():
+ *        X:   smp_mb()
+ *        X:   message[RESCHEDULE] = 1
+ *        X: doorbell_global_ipi(42):
+ *        X:   kvmppc_set_host_ipi(42)
+ *        X:   ppc_msgsnd_sync()/smp_mb()
+ *        X:   ppc_msgsnd() -> 42
+ *       42: doorbell_exception(): // from CPU X
+ *       42:   ppc_msgsync()
+ *      105: smp_muxed_ipi_set_message():
+ *      105:   smb_mb()
+ *           // STORE DEFERRED DUE TO RE-ORDERING
+ *    --105:   message[CALL_FUNCTION] = 1
+ *    | 105: doorbell_global_ipi(42):
+ *    | 105:   kvmppc_set_host_ipi(42)
+ *    |  42:   kvmppc_clear_host_ipi(42)
+ *    |  42: smp_ipi_demux_relaxed()
+ *    |  42: // returns to executing guest
+ *    |      // RE-ORDERED STORE COMPLETES
+ *    ->105:   message[CALL_FUNCTION] = 1
+ *      105:   ppc_msgsnd_sync()/smp_mb()
+ *      105:   ppc_msgsnd() -> 42
+ *       42: local_paca->kvm_hstate.host_ipi == 0 // IPI ignored
+ *      105: // hangs waiting on 42 to process messages/call_single_queue
+ *
+ * We also issue an smp_mb() at the end of kvmppc_clear_host_ipi(). This is
+ * to guard against sequences such as the following (as well as to create
+ * a read-side pairing with the barrier in kvmppc_set_host_ipi()):
+ *
+ *      CPU
+ *        X: smp_muxed_ipi_set_message():
+ *        X:   smp_mb()
+ *        X:   message[RESCHEDULE] = 1
+ *        X: doorbell_global_ipi(42):
+ *        X:   kvmppc_set_host_ipi(42)
+ *        X:   ppc_msgsnd_sync()/smp_mb()
+ *        X:   ppc_msgsnd() -> 42
+ *       42: doorbell_exception(): // from CPU X
+ *       42:   ppc_msgsync()
+ *           // STORE DEFERRED DUE TO RE-ORDERING
+ *    -- 42:   kvmppc_clear_host_ipi(42)
+ *    |  42: smp_ipi_demux_relaxed()
+ *    | 105: smp_muxed_ipi_set_message():
+ *    | 105:   smb_mb()
+ *    | 105:   message[CALL_FUNCTION] = 1
+ *    | 105: doorbell_global_ipi(42):
+ *    | 105:   kvmppc_set_host_ipi(42)
+ *    |      // RE-ORDERED STORE COMPLETES
+ *    -> 42:   kvmppc_clear_host_ipi(42)
+ *       42: // returns to executing guest
+ *      105:   ppc_msgsnd_sync()/smp_mb()
+ *      105:   ppc_msgsnd() -> 42
+ *       42: local_paca->kvm_hstate.host_ipi == 0 // IPI ignored
+ *      105: // hangs waiting on 42 to process messages/call_single_queue
+ */
+static inline void kvmppc_set_host_ipi(int cpu)
 {
-       paca_ptrs[cpu]->kvm_hstate.host_ipi = host_ipi;
+       /*
+        * order stores of IPI messages vs. setting of host_ipi flag
+        *
+        * pairs with the barrier in kvmppc_clear_host_ipi()
+        */
+       smp_mb();
+       paca_ptrs[cpu]->kvm_hstate.host_ipi = 1;
+}
+
+static inline void kvmppc_clear_host_ipi(int cpu)
+{
+       paca_ptrs[cpu]->kvm_hstate.host_ipi = 0;
+       /*
+        * order clearing of host_ipi flag vs. processing of IPI messages
+        *
+        * pairs with the barrier in kvmppc_set_host_ipi()
+        */
+       smp_mb();
 }
 
 static inline void kvmppc_fast_vcpu_kick(struct kvm_vcpu *vcpu)
@@ -486,7 +577,10 @@ static inline u32 kvmppc_get_xics_latch(void)
        return 0;
 }
 
-static inline void kvmppc_set_host_ipi(int cpu, u8 host_ipi)
+static inline void kvmppc_set_host_ipi(int cpu)
+{}
+
+static inline void kvmppc_clear_host_ipi(int cpu)
 {}
 
 static inline void kvmppc_fast_vcpu_kick(struct kvm_vcpu *vcpu)
index ec3714cf0989bfa1684ab4f34fde4aa46e804351..b3cbb1136bce0878148eb967dbd966755936dc76 100644 (file)
 #define   HMER_DEBUG_TRIG      (1ul << (63 - 17)) /* Debug trigger */
 #define        SPRN_HMEER      0x151   /* Hyp maintenance exception enable reg */
 #define SPRN_PCR       0x152   /* Processor compatibility register */
-#define   PCR_VEC_DIS  (1ul << (63-0)) /* Vec. disable (bit NA since POWER8) */
-#define   PCR_VSX_DIS  (1ul << (63-1)) /* VSX disable (bit NA since POWER8) */
-#define   PCR_TM_DIS   (1ul << (63-2)) /* Trans. memory disable (POWER8) */
+#define   PCR_VEC_DIS  (__MASK(63-0))  /* Vec. disable (bit NA since POWER8) */
+#define   PCR_VSX_DIS  (__MASK(63-1))  /* VSX disable (bit NA since POWER8) */
+#define   PCR_TM_DIS   (__MASK(63-2))  /* Trans. memory disable (POWER8) */
+#define   PCR_HIGH_BITS        (PCR_VEC_DIS | PCR_VSX_DIS | PCR_TM_DIS)
 /*
  * These bits are used in the function kvmppc_set_arch_compat() to specify and
  * determine both the compatibility level which we want to emulate and the
 #define   PCR_ARCH_207 0x8             /* Architecture 2.07 */
 #define   PCR_ARCH_206 0x4             /* Architecture 2.06 */
 #define   PCR_ARCH_205 0x2             /* Architecture 2.05 */
+#define   PCR_LOW_BITS (PCR_ARCH_207 | PCR_ARCH_206 | PCR_ARCH_205)
+#define   PCR_MASK     ~(PCR_HIGH_BITS | PCR_LOW_BITS) /* PCR Reserved Bits */
 #define        SPRN_HEIR       0x153   /* Hypervisor Emulated Instruction Register */
 #define SPRN_TLBINDEXR 0x154   /* P7 TLB control register */
 #define SPRN_TLBVPNR   0x155   /* P7 TLB control register */
index 3239a9fe6c1cf90c43033ece68c1e317053c735d..a460298c7ddb473b5a7978b64eb6e8a94701fc27 100644 (file)
@@ -23,6 +23,7 @@ _GLOBAL(__setup_cpu_power7)
        beqlr
        li      r0,0
        mtspr   SPRN_LPID,r0
+       LOAD_REG_IMMEDIATE(r0, PCR_MASK)
        mtspr   SPRN_PCR,r0
        mfspr   r3,SPRN_LPCR
        li      r4,(LPCR_LPES1 >> LPCR_LPES_SH)
@@ -37,6 +38,7 @@ _GLOBAL(__restore_cpu_power7)
        beqlr
        li      r0,0
        mtspr   SPRN_LPID,r0
+       LOAD_REG_IMMEDIATE(r0, PCR_MASK)
        mtspr   SPRN_PCR,r0
        mfspr   r3,SPRN_LPCR
        li      r4,(LPCR_LPES1 >> LPCR_LPES_SH)
@@ -54,6 +56,7 @@ _GLOBAL(__setup_cpu_power8)
        beqlr
        li      r0,0
        mtspr   SPRN_LPID,r0
+       LOAD_REG_IMMEDIATE(r0, PCR_MASK)
        mtspr   SPRN_PCR,r0
        mfspr   r3,SPRN_LPCR
        ori     r3, r3, LPCR_PECEDH
@@ -76,6 +79,7 @@ _GLOBAL(__restore_cpu_power8)
        beqlr
        li      r0,0
        mtspr   SPRN_LPID,r0
+       LOAD_REG_IMMEDIATE(r0, PCR_MASK)
        mtspr   SPRN_PCR,r0
        mfspr   r3,SPRN_LPCR
        ori     r3, r3, LPCR_PECEDH
@@ -98,6 +102,7 @@ _GLOBAL(__setup_cpu_power9)
        mtspr   SPRN_PSSCR,r0
        mtspr   SPRN_LPID,r0
        mtspr   SPRN_PID,r0
+       LOAD_REG_IMMEDIATE(r0, PCR_MASK)
        mtspr   SPRN_PCR,r0
        mfspr   r3,SPRN_LPCR
        LOAD_REG_IMMEDIATE(r4, LPCR_PECEDH | LPCR_PECE_HVEE | LPCR_HVICE  | LPCR_HEIC)
@@ -123,6 +128,7 @@ _GLOBAL(__restore_cpu_power9)
        mtspr   SPRN_PSSCR,r0
        mtspr   SPRN_LPID,r0
        mtspr   SPRN_PID,r0
+       LOAD_REG_IMMEDIATE(r0, PCR_MASK)
        mtspr   SPRN_PCR,r0
        mfspr   r3,SPRN_LPCR
        LOAD_REG_IMMEDIATE(r4, LPCR_PECEDH | LPCR_PECE_HVEE | LPCR_HVICE | LPCR_HEIC)
index 804b1a6196fac7f462c54c602c768d43ded64cdc..f17ff1200eaaefa77b22f9c20f0a1d0559a47e70 100644 (file)
@@ -33,7 +33,7 @@ void doorbell_global_ipi(int cpu)
 {
        u32 tag = get_hard_smp_processor_id(cpu);
 
-       kvmppc_set_host_ipi(cpu, 1);
+       kvmppc_set_host_ipi(cpu);
        /* Order previous accesses vs. msgsnd, which is treated as a store */
        ppc_msgsnd_sync();
        ppc_msgsnd(PPC_DBELL_MSGTYPE, 0, tag);
@@ -48,7 +48,7 @@ void doorbell_core_ipi(int cpu)
 {
        u32 tag = cpu_thread_in_core(cpu);
 
-       kvmppc_set_host_ipi(cpu, 1);
+       kvmppc_set_host_ipi(cpu);
        /* Order previous accesses vs. msgsnd, which is treated as a store */
        ppc_msgsnd_sync();
        ppc_msgsnd(PPC_DBELL_MSGTYPE, 0, tag);
@@ -84,7 +84,7 @@ void doorbell_exception(struct pt_regs *regs)
 
        may_hard_irq_enable();
 
-       kvmppc_set_host_ipi(smp_processor_id(), 0);
+       kvmppc_clear_host_ipi(smp_processor_id());
        __this_cpu_inc(irq_stat.doorbell_irqs);
 
        smp_ipi_demux_relaxed(); /* already performed the barrier */
index bd95318d2202c114e649633d8b06a932617f289e..180b3a5d1001a46af83ea26ca7a8d686baf64f8b 100644 (file)
@@ -101,7 +101,7 @@ static void __restore_cpu_cpufeatures(void)
        if (hv_mode) {
                mtspr(SPRN_LPID, 0);
                mtspr(SPRN_HFSCR, system_registers.hfscr);
-               mtspr(SPRN_PCR, 0);
+               mtspr(SPRN_PCR, PCR_MASK);
        }
        mtspr(SPRN_FSCR, system_registers.fscr);
 
@@ -144,6 +144,7 @@ static void __init cpufeatures_setup_cpu(void)
                mtspr(SPRN_HFSCR, 0);
        }
        mtspr(SPRN_FSCR, 0);
+       mtspr(SPRN_PCR, PCR_MASK);
 
        /*
         * LPCR does not get cleared, to match behaviour with secondaries
@@ -691,9 +692,37 @@ static bool __init cpufeatures_process_feature(struct dt_cpu_feature *f)
        return true;
 }
 
+/*
+ * Handle POWER9 broadcast tlbie invalidation issue using
+ * cpu feature flag.
+ */
+static __init void update_tlbie_feature_flag(unsigned long pvr)
+{
+       if (PVR_VER(pvr) == PVR_POWER9) {
+               /*
+                * Set the tlbie feature flag for anything below
+                * Nimbus DD 2.3 and Cumulus DD 1.3
+                */
+               if ((pvr & 0xe000) == 0) {
+                       /* Nimbus */
+                       if ((pvr & 0xfff) < 0x203)
+                               cur_cpu_spec->cpu_features |= CPU_FTR_P9_TLBIE_STQ_BUG;
+               } else if ((pvr & 0xc000) == 0) {
+                       /* Cumulus */
+                       if ((pvr & 0xfff) < 0x103)
+                               cur_cpu_spec->cpu_features |= CPU_FTR_P9_TLBIE_STQ_BUG;
+               } else {
+                       WARN_ONCE(1, "Unknown PVR");
+                       cur_cpu_spec->cpu_features |= CPU_FTR_P9_TLBIE_STQ_BUG;
+               }
+
+               cur_cpu_spec->cpu_features |= CPU_FTR_P9_TLBIE_ERAT_BUG;
+       }
+}
+
 static __init void cpufeatures_cpu_quirks(void)
 {
-       int version = mfspr(SPRN_PVR);
+       unsigned long version = mfspr(SPRN_PVR);
 
        /*
         * Not all quirks can be derived from the cpufeatures device tree.
@@ -712,10 +741,10 @@ static __init void cpufeatures_cpu_quirks(void)
 
        if ((version & 0xffff0000) == 0x004e0000) {
                cur_cpu_spec->cpu_features &= ~(CPU_FTR_DAWR);
-               cur_cpu_spec->cpu_features |= CPU_FTR_P9_TLBIE_BUG;
                cur_cpu_spec->cpu_features |= CPU_FTR_P9_TIDR;
        }
 
+       update_tlbie_feature_flag(version);
        /*
         * PKEY was not in the initial base or feature node
         * specification, but it should become optional in the next
index 0a91dee51245fca222973a486cf8b3d0ced19feb..bc8a551013be934065a9246ae55b639f0b15b8c9 100644 (file)
@@ -1960,7 +1960,7 @@ static int eeh_debugfs_break_device(struct pci_dev *pdev)
        pci_err(pdev, "Going to break: %pR\n", bar);
 
        if (pdev->is_virtfn) {
-#ifndef CONFIG_IOV
+#ifndef CONFIG_PCI_IOV
                return -ENXIO;
 #else
                /*
@@ -1980,7 +1980,7 @@ static int eeh_debugfs_break_device(struct pci_dev *pdev)
                pos  = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV);
                pos += PCI_SRIOV_CTRL;
                bit  = PCI_SRIOV_CTRL_MSE;
-#endif /* !CONFIG_IOV */
+#endif /* !CONFIG_PCI_IOV */
        } else {
                bit = PCI_COMMAND_MEMORY;
                pos = PCI_COMMAND;
index efd8f93bc9dc1272686055f94d1cdbcee8f61d65..709cf1fd4cf466773da3a0021cf03777e4a58ef6 100644 (file)
@@ -401,8 +401,11 @@ static int kvmppc_set_arch_compat(struct kvm_vcpu *vcpu, u32 arch_compat)
 
        spin_lock(&vc->lock);
        vc->arch_compat = arch_compat;
-       /* Set all PCR bits for which guest_pcr_bit <= bit < host_pcr_bit */
-       vc->pcr = host_pcr_bit - guest_pcr_bit;
+       /*
+        * Set all PCR bits for which guest_pcr_bit <= bit < host_pcr_bit
+        * Also set all reserved PCR bits
+        */
+       vc->pcr = (host_pcr_bit - guest_pcr_bit) | PCR_MASK;
        spin_unlock(&vc->lock);
 
        return 0;
@@ -3410,7 +3413,7 @@ static int kvmhv_load_hv_regs_and_go(struct kvm_vcpu *vcpu, u64 time_limit,
        }
 
        if (vc->pcr)
-               mtspr(SPRN_PCR, vc->pcr);
+               mtspr(SPRN_PCR, vc->pcr | PCR_MASK);
        mtspr(SPRN_DPDES, vc->dpdes);
        mtspr(SPRN_VTB, vc->vtb);
 
@@ -3490,7 +3493,7 @@ static int kvmhv_load_hv_regs_and_go(struct kvm_vcpu *vcpu, u64 time_limit,
        vc->vtb = mfspr(SPRN_VTB);
        mtspr(SPRN_DPDES, 0);
        if (vc->pcr)
-               mtspr(SPRN_PCR, 0);
+               mtspr(SPRN_PCR, PCR_MASK);
 
        if (vc->tb_offset_applied) {
                u64 new_tb = mftb() - vc->tb_offset_applied;
index fff90f2c3de2100f67c627f0dfbc39192d501f5d..cdf30c6eaf54268d7258490f8a76d951ad7aa9fe 100644 (file)
@@ -29,7 +29,7 @@ void kvmhv_save_hv_regs(struct kvm_vcpu *vcpu, struct hv_guest_state *hr)
 {
        struct kvmppc_vcore *vc = vcpu->arch.vcore;
 
-       hr->pcr = vc->pcr;
+       hr->pcr = vc->pcr | PCR_MASK;
        hr->dpdes = vc->dpdes;
        hr->hfscr = vcpu->arch.hfscr;
        hr->tb_offset = vc->tb_offset;
@@ -65,7 +65,7 @@ static void byteswap_hv_regs(struct hv_guest_state *hr)
        hr->lpid = swab32(hr->lpid);
        hr->vcpu_token = swab32(hr->vcpu_token);
        hr->lpcr = swab64(hr->lpcr);
-       hr->pcr = swab64(hr->pcr);
+       hr->pcr = swab64(hr->pcr) | PCR_MASK;
        hr->amor = swab64(hr->amor);
        hr->dpdes = swab64(hr->dpdes);
        hr->hfscr = swab64(hr->hfscr);
@@ -148,7 +148,7 @@ static void restore_hv_regs(struct kvm_vcpu *vcpu, struct hv_guest_state *hr)
 {
        struct kvmppc_vcore *vc = vcpu->arch.vcore;
 
-       vc->pcr = hr->pcr;
+       vc->pcr = hr->pcr | PCR_MASK;
        vc->dpdes = hr->dpdes;
        vcpu->arch.hfscr = hr->hfscr;
        vcpu->arch.dawr = hr->dawr0;
index 7186c65c61c9445608ab1e36bd8efde28382fc8b..220305454c23c9e8e8a2b3d975682de53615c93f 100644 (file)
@@ -433,6 +433,37 @@ static inline int is_mmio_hpte(unsigned long v, unsigned long r)
                (HPTE_R_KEY_HI | HPTE_R_KEY_LO));
 }
 
+static inline void fixup_tlbie_lpid(unsigned long rb_value, unsigned long lpid)
+{
+
+       if (cpu_has_feature(CPU_FTR_P9_TLBIE_ERAT_BUG)) {
+               /* Radix flush for a hash guest */
+
+               unsigned long rb,rs,prs,r,ric;
+
+               rb = PPC_BIT(52); /* IS = 2 */
+               rs = 0;  /* lpid = 0 */
+               prs = 0; /* partition scoped */
+               r = 1;   /* radix format */
+               ric = 0; /* RIC_FLSUH_TLB */
+
+               /*
+                * Need the extra ptesync to make sure we don't
+                * re-order the tlbie
+                */
+               asm volatile("ptesync": : :"memory");
+               asm volatile(PPC_TLBIE_5(%0, %4, %3, %2, %1)
+                            : : "r"(rb), "i"(r), "i"(prs),
+                              "i"(ric), "r"(rs) : "memory");
+       }
+
+       if (cpu_has_feature(CPU_FTR_P9_TLBIE_STQ_BUG)) {
+               asm volatile("ptesync": : :"memory");
+               asm volatile(PPC_TLBIE_5(%0,%1,0,0,0) : :
+                            "r" (rb_value), "r" (lpid));
+       }
+}
+
 static void do_tlbies(struct kvm *kvm, unsigned long *rbvalues,
                      long npages, int global, bool need_sync)
 {
@@ -451,16 +482,7 @@ static void do_tlbies(struct kvm *kvm, unsigned long *rbvalues,
                                     "r" (rbvalues[i]), "r" (kvm->arch.lpid));
                }
 
-               if (cpu_has_feature(CPU_FTR_P9_TLBIE_BUG)) {
-                       /*
-                        * Need the extra ptesync to make sure we don't
-                        * re-order the tlbie
-                        */
-                       asm volatile("ptesync": : :"memory");
-                       asm volatile(PPC_TLBIE_5(%0,%1,0,0,0) : :
-                                    "r" (rbvalues[0]), "r" (kvm->arch.lpid));
-               }
-
+               fixup_tlbie_lpid(rbvalues[i - 1], kvm->arch.lpid);
                asm volatile("eieio; tlbsync; ptesync" : : : "memory");
        } else {
                if (need_sync)
index 4d2ec77d806c3da231619e9888bae1aba56f0b1c..287d5911df0f4998c0234e14a08024add3ccc589 100644 (file)
@@ -58,7 +58,7 @@ static inline void icp_send_hcore_msg(int hcore, struct kvm_vcpu *vcpu)
        hcpu = hcore << threads_shift;
        kvmppc_host_rm_ops_hv->rm_core[hcore].rm_data = vcpu;
        smp_muxed_ipi_set_message(hcpu, PPC_MSG_RM_HOST_ACTION);
-       kvmppc_set_host_ipi(hcpu, 1);
+       kvmppc_set_host_ipi(hcpu);
        smp_mb();
        kvmhv_rm_send_ipi(hcpu);
 }
index 9a05b0d932efa53045444b02c8372573feabcb46..74a9cfe84aeedfb945628aa31ec0bae0348e7eec 100644 (file)
@@ -644,8 +644,10 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300)
 
        /* Load guest PCR value to select appropriate compat mode */
 37:    ld      r7, VCORE_PCR(r5)
-       cmpdi   r7, 0
+       LOAD_REG_IMMEDIATE(r6, PCR_MASK)
+       cmpld   r7, r6
        beq     38f
+       or      r7, r7, r6
        mtspr   SPRN_PCR, r7
 38:
 
@@ -1913,10 +1915,10 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
 
        /* Reset PCR */
        ld      r0, VCORE_PCR(r5)
-       cmpdi   r0, 0
+       LOAD_REG_IMMEDIATE(r6, PCR_MASK)
+       cmpld   r0, r6
        beq     18f
-       li      r0, 0
-       mtspr   SPRN_PCR, r0
+       mtspr   SPRN_PCR, r6
 18:
        /* Signal secondary CPUs to continue */
        stb     r0,VCORE_IN_GUEST(r5)
index 90ab4f31e2b3f83ff1f13657f0004e95bd14db59..523e42eb11daa4674d7eab30c86779040fdcefca 100644 (file)
@@ -197,9 +197,32 @@ static inline unsigned long  ___tlbie(unsigned long vpn, int psize,
        return va;
 }
 
-static inline void fixup_tlbie(unsigned long vpn, int psize, int apsize, int ssize)
+static inline void fixup_tlbie_vpn(unsigned long vpn, int psize,
+                                  int apsize, int ssize)
 {
-       if (cpu_has_feature(CPU_FTR_P9_TLBIE_BUG)) {
+       if (cpu_has_feature(CPU_FTR_P9_TLBIE_ERAT_BUG)) {
+               /* Radix flush for a hash guest */
+
+               unsigned long rb,rs,prs,r,ric;
+
+               rb = PPC_BIT(52); /* IS = 2 */
+               rs = 0;  /* lpid = 0 */
+               prs = 0; /* partition scoped */
+               r = 1;   /* radix format */
+               ric = 0; /* RIC_FLSUH_TLB */
+
+               /*
+                * Need the extra ptesync to make sure we don't
+                * re-order the tlbie
+                */
+               asm volatile("ptesync": : :"memory");
+               asm volatile(PPC_TLBIE_5(%0, %4, %3, %2, %1)
+                            : : "r"(rb), "i"(r), "i"(prs),
+                              "i"(ric), "r"(rs) : "memory");
+       }
+
+
+       if (cpu_has_feature(CPU_FTR_P9_TLBIE_STQ_BUG)) {
                /* Need the extra ptesync to ensure we don't reorder tlbie*/
                asm volatile("ptesync": : :"memory");
                ___tlbie(vpn, psize, apsize, ssize);
@@ -283,7 +306,7 @@ static inline void tlbie(unsigned long vpn, int psize, int apsize,
                asm volatile("ptesync": : :"memory");
        } else {
                __tlbie(vpn, psize, apsize, ssize);
-               fixup_tlbie(vpn, psize, apsize, ssize);
+               fixup_tlbie_vpn(vpn, psize, apsize, ssize);
                asm volatile("eieio; tlbsync; ptesync": : :"memory");
        }
        if (lock_tlbie && !use_local)
@@ -856,7 +879,7 @@ static void native_flush_hash_range(unsigned long number, int local)
                /*
                 * Just do one more with the last used values.
                 */
-               fixup_tlbie(vpn, psize, psize, ssize);
+               fixup_tlbie_vpn(vpn, psize, psize, ssize);
                asm volatile("eieio; tlbsync; ptesync":::"memory");
 
                if (lock_tlbie)
index 2d0cb5ba9a47e89317a1ebc9a46c3ff7d1a1a423..0ba30b8b935bcf2a0251185b2655555542c053c0 100644 (file)
@@ -256,8 +256,21 @@ void destroy_context(struct mm_struct *mm)
 #ifdef CONFIG_SPAPR_TCE_IOMMU
        WARN_ON_ONCE(!list_empty(&mm->context.iommu_group_mem_list));
 #endif
+       /*
+        * For tasks which were successfully initialized we end up calling
+        * arch_exit_mmap() which clears the process table entry. And
+        * arch_exit_mmap() is called before the required fullmm TLB flush
+        * which does a RIC=2 flush. Hence for an initialized task, we do clear
+        * any cached process table entries.
+        *
+        * The condition below handles the error case during task init. We have
+        * set the process table entry early and if we fail a task
+        * initialization, we need to ensure the process table entry is zeroed.
+        * We need not worry about process table entry caches because the task
+        * never ran with the PID value.
+        */
        if (radix_enabled())
-               WARN_ON(process_tb[mm->context.id].prtb0 != 0);
+               process_tb[mm->context.id].prtb0 = 0;
        else
                subpage_prot_free(mm);
        destroy_contexts(&mm->context);
index 631be42abd330611db0c8f67a7adc6a56dc0a57d..67af871190c6dcbcdbaaedbf192e34b2c80e7e9f 100644 (file)
@@ -196,22 +196,83 @@ static __always_inline void __tlbie_lpid_va(unsigned long va, unsigned long lpid
        trace_tlbie(lpid, 0, rb, rs, ric, prs, r);
 }
 
-static inline void fixup_tlbie(void)
+
+static inline void fixup_tlbie_va(unsigned long va, unsigned long pid,
+                                 unsigned long ap)
+{
+       if (cpu_has_feature(CPU_FTR_P9_TLBIE_ERAT_BUG)) {
+               asm volatile("ptesync": : :"memory");
+               __tlbie_va(va, 0, ap, RIC_FLUSH_TLB);
+       }
+
+       if (cpu_has_feature(CPU_FTR_P9_TLBIE_STQ_BUG)) {
+               asm volatile("ptesync": : :"memory");
+               __tlbie_va(va, pid, ap, RIC_FLUSH_TLB);
+       }
+}
+
+static inline void fixup_tlbie_va_range(unsigned long va, unsigned long pid,
+                                       unsigned long ap)
+{
+       if (cpu_has_feature(CPU_FTR_P9_TLBIE_ERAT_BUG)) {
+               asm volatile("ptesync": : :"memory");
+               __tlbie_pid(0, RIC_FLUSH_TLB);
+       }
+
+       if (cpu_has_feature(CPU_FTR_P9_TLBIE_STQ_BUG)) {
+               asm volatile("ptesync": : :"memory");
+               __tlbie_va(va, pid, ap, RIC_FLUSH_TLB);
+       }
+}
+
+static inline void fixup_tlbie_pid(unsigned long pid)
 {
-       unsigned long pid = 0;
+       /*
+        * We can use any address for the invalidation, pick one which is
+        * probably unused as an optimisation.
+        */
        unsigned long va = ((1UL << 52) - 1);
 
-       if (cpu_has_feature(CPU_FTR_P9_TLBIE_BUG)) {
+       if (cpu_has_feature(CPU_FTR_P9_TLBIE_ERAT_BUG)) {
+               asm volatile("ptesync": : :"memory");
+               __tlbie_pid(0, RIC_FLUSH_TLB);
+       }
+
+       if (cpu_has_feature(CPU_FTR_P9_TLBIE_STQ_BUG)) {
                asm volatile("ptesync": : :"memory");
                __tlbie_va(va, pid, mmu_get_ap(MMU_PAGE_64K), RIC_FLUSH_TLB);
        }
 }
 
+
+static inline void fixup_tlbie_lpid_va(unsigned long va, unsigned long lpid,
+                                      unsigned long ap)
+{
+       if (cpu_has_feature(CPU_FTR_P9_TLBIE_ERAT_BUG)) {
+               asm volatile("ptesync": : :"memory");
+               __tlbie_lpid_va(va, 0, ap, RIC_FLUSH_TLB);
+       }
+
+       if (cpu_has_feature(CPU_FTR_P9_TLBIE_STQ_BUG)) {
+               asm volatile("ptesync": : :"memory");
+               __tlbie_lpid_va(va, lpid, ap, RIC_FLUSH_TLB);
+       }
+}
+
 static inline void fixup_tlbie_lpid(unsigned long lpid)
 {
+       /*
+        * We can use any address for the invalidation, pick one which is
+        * probably unused as an optimisation.
+        */
        unsigned long va = ((1UL << 52) - 1);
 
-       if (cpu_has_feature(CPU_FTR_P9_TLBIE_BUG)) {
+       if (cpu_has_feature(CPU_FTR_P9_TLBIE_ERAT_BUG)) {
+               asm volatile("ptesync": : :"memory");
+               __tlbie_lpid(0, RIC_FLUSH_TLB);
+       }
+
+       if (cpu_has_feature(CPU_FTR_P9_TLBIE_STQ_BUG)) {
                asm volatile("ptesync": : :"memory");
                __tlbie_lpid_va(va, lpid, mmu_get_ap(MMU_PAGE_64K), RIC_FLUSH_TLB);
        }
@@ -258,6 +319,7 @@ static inline void _tlbie_pid(unsigned long pid, unsigned long ric)
        switch (ric) {
        case RIC_FLUSH_TLB:
                __tlbie_pid(pid, RIC_FLUSH_TLB);
+               fixup_tlbie_pid(pid);
                break;
        case RIC_FLUSH_PWC:
                __tlbie_pid(pid, RIC_FLUSH_PWC);
@@ -265,8 +327,8 @@ static inline void _tlbie_pid(unsigned long pid, unsigned long ric)
        case RIC_FLUSH_ALL:
        default:
                __tlbie_pid(pid, RIC_FLUSH_ALL);
+               fixup_tlbie_pid(pid);
        }
-       fixup_tlbie();
        asm volatile("eieio; tlbsync; ptesync": : :"memory");
 }
 
@@ -315,6 +377,7 @@ static inline void _tlbie_lpid(unsigned long lpid, unsigned long ric)
        switch (ric) {
        case RIC_FLUSH_TLB:
                __tlbie_lpid(lpid, RIC_FLUSH_TLB);
+               fixup_tlbie_lpid(lpid);
                break;
        case RIC_FLUSH_PWC:
                __tlbie_lpid(lpid, RIC_FLUSH_PWC);
@@ -322,8 +385,8 @@ static inline void _tlbie_lpid(unsigned long lpid, unsigned long ric)
        case RIC_FLUSH_ALL:
        default:
                __tlbie_lpid(lpid, RIC_FLUSH_ALL);
+               fixup_tlbie_lpid(lpid);
        }
-       fixup_tlbie_lpid(lpid);
        asm volatile("eieio; tlbsync; ptesync": : :"memory");
 }
 
@@ -390,6 +453,8 @@ static inline void __tlbie_va_range(unsigned long start, unsigned long end,
 
        for (addr = start; addr < end; addr += page_size)
                __tlbie_va(addr, pid, ap, RIC_FLUSH_TLB);
+
+       fixup_tlbie_va_range(addr - page_size, pid, ap);
 }
 
 static __always_inline void _tlbie_va(unsigned long va, unsigned long pid,
@@ -399,7 +464,7 @@ static __always_inline void _tlbie_va(unsigned long va, unsigned long pid,
 
        asm volatile("ptesync": : :"memory");
        __tlbie_va(va, pid, ap, ric);
-       fixup_tlbie();
+       fixup_tlbie_va(va, pid, ap);
        asm volatile("eieio; tlbsync; ptesync": : :"memory");
 }
 
@@ -457,7 +522,7 @@ static __always_inline void _tlbie_lpid_va(unsigned long va, unsigned long lpid,
 
        asm volatile("ptesync": : :"memory");
        __tlbie_lpid_va(va, lpid, ap, ric);
-       fixup_tlbie_lpid(lpid);
+       fixup_tlbie_lpid_va(va, lpid, ap);
        asm volatile("eieio; tlbsync; ptesync": : :"memory");
 }
 
@@ -469,7 +534,6 @@ static inline void _tlbie_va_range(unsigned long start, unsigned long end,
        if (also_pwc)
                __tlbie_pid(pid, RIC_FLUSH_PWC);
        __tlbie_va_range(start, end, pid, page_size, psize);
-       fixup_tlbie();
        asm volatile("eieio; tlbsync; ptesync": : :"memory");
 }
 
@@ -856,7 +920,7 @@ is_local:
                        if (gflush)
                                __tlbie_va_range(gstart, gend, pid,
                                                PUD_SIZE, MMU_PAGE_1G);
-                       fixup_tlbie();
+
                        asm volatile("eieio; tlbsync; ptesync": : :"memory");
                } else {
                        _tlbiel_va_range_multicast(mm,
index 802387b231adcd5036a6d72a26bea5f81f730d4c..0e6ed4413eeacb59e6ce3804bc0d5f35cc6bd6ff 100644 (file)
 #include <asm/code-patching.h>
 #include <mm/mmu_decl.h>
 
+static pgprot_t kasan_prot_ro(void)
+{
+       if (early_mmu_has_feature(MMU_FTR_HPTE_TABLE))
+               return PAGE_READONLY;
+
+       return PAGE_KERNEL_RO;
+}
+
 static void kasan_populate_pte(pte_t *ptep, pgprot_t prot)
 {
        unsigned long va = (unsigned long)kasan_early_shadow_page;
@@ -26,6 +34,7 @@ static int __ref kasan_init_shadow_page_tables(unsigned long k_start, unsigned l
 {
        pmd_t *pmd;
        unsigned long k_cur, k_next;
+       pgprot_t prot = slab_is_available() ? kasan_prot_ro() : PAGE_KERNEL;
 
        pmd = pmd_offset(pud_offset(pgd_offset_k(k_start), k_start), k_start);
 
@@ -43,10 +52,7 @@ static int __ref kasan_init_shadow_page_tables(unsigned long k_start, unsigned l
 
                if (!new)
                        return -ENOMEM;
-               if (early_mmu_has_feature(MMU_FTR_HPTE_TABLE))
-                       kasan_populate_pte(new, PAGE_READONLY);
-               else
-                       kasan_populate_pte(new, PAGE_KERNEL_RO);
+               kasan_populate_pte(new, prot);
 
                smp_wmb(); /* See comment in __pte_alloc */
 
@@ -103,11 +109,23 @@ static int __ref kasan_init_region(void *start, size_t size)
 
 static void __init kasan_remap_early_shadow_ro(void)
 {
-       if (early_mmu_has_feature(MMU_FTR_HPTE_TABLE))
-               kasan_populate_pte(kasan_early_shadow_pte, PAGE_READONLY);
-       else
-               kasan_populate_pte(kasan_early_shadow_pte, PAGE_KERNEL_RO);
+       pgprot_t prot = kasan_prot_ro();
+       unsigned long k_start = KASAN_SHADOW_START;
+       unsigned long k_end = KASAN_SHADOW_END;
+       unsigned long k_cur;
+       phys_addr_t pa = __pa(kasan_early_shadow_page);
+
+       kasan_populate_pte(kasan_early_shadow_pte, prot);
+
+       for (k_cur = k_start & PAGE_MASK; k_cur < k_end; k_cur += PAGE_SIZE) {
+               pmd_t *pmd = pmd_offset(pud_offset(pgd_offset_k(k_cur), k_cur), k_cur);
+               pte_t *ptep = pte_offset_kernel(pmd, k_cur);
+
+               if ((pte_val(*ptep) & PTE_RPN_MASK) != pa)
+                       continue;
 
+               __set_pte_at(&init_mm, k_cur, ptep, pfn_pte(PHYS_PFN(pa), prot), 0);
+       }
        flush_tlb_kernel_range(KASAN_SHADOW_START, KASAN_SHADOW_END);
 }
 
index 94cd96b9b7bbc5d9b224bb9c9b9f02b9516ebfeb..fbd6e6b7bbf28c4ac806681e4abd42e488504af8 100644 (file)
@@ -193,7 +193,7 @@ static void pnv_smp_cpu_kill_self(void)
                 * for coming online, which are handled via
                 * generic_check_cpu_restart() calls.
                 */
-               kvmppc_set_host_ipi(cpu, 0);
+               kvmppc_clear_host_ipi(cpu);
 
                srr1 = pnv_cpu_offline(cpu);
 
index 36b846f6e74ee503fc25f23d890ecf0a654c754f..b53359258d9953c551f9f90f694adf80240b1763 100644 (file)
@@ -56,6 +56,22 @@ EXPORT_SYMBOL(plpar_hcall);
 EXPORT_SYMBOL(plpar_hcall9);
 EXPORT_SYMBOL(plpar_hcall_norets);
 
+/*
+ * H_BLOCK_REMOVE supported block size for this page size in segment who's base
+ * page size is that page size.
+ *
+ * The first index is the segment base page size, the second one is the actual
+ * page size.
+ */
+static int hblkrm_size[MMU_PAGE_COUNT][MMU_PAGE_COUNT] __ro_after_init;
+
+/*
+ * Due to the involved complexity, and that the current hypervisor is only
+ * returning this value or 0, we are limiting the support of the H_BLOCK_REMOVE
+ * buffer size to 8 size block.
+ */
+#define HBLKRM_SUPPORTED_BLOCK_SIZE 8
+
 #ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
 static u8 dtl_mask = DTL_LOG_PREEMPT;
 #else
@@ -984,6 +1000,17 @@ static void pSeries_lpar_hpte_invalidate(unsigned long slot, unsigned long vpn,
 #define HBLKR_CTRL_ERRNOTFOUND 0x8800000000000000UL
 #define HBLKR_CTRL_ERRBUSY     0xa000000000000000UL
 
+/*
+ * Returned true if we are supporting this block size for the specified segment
+ * base page size and actual page size.
+ *
+ * Currently, we only support 8 size block.
+ */
+static inline bool is_supported_hlbkrm(int bpsize, int psize)
+{
+       return (hblkrm_size[bpsize][psize] == HBLKRM_SUPPORTED_BLOCK_SIZE);
+}
+
 /**
  * H_BLOCK_REMOVE caller.
  * @idx should point to the latest @param entry set with a PTEX.
@@ -1143,7 +1170,8 @@ static inline void __pSeries_lpar_hugepage_invalidate(unsigned long *slot,
        if (lock_tlbie)
                spin_lock_irqsave(&pSeries_lpar_tlbie_lock, flags);
 
-       if (firmware_has_feature(FW_FEATURE_BLOCK_REMOVE))
+       /* Assuming THP size is 16M */
+       if (is_supported_hlbkrm(psize, MMU_PAGE_16M))
                hugepage_block_invalidate(slot, vpn, count, psize, ssize);
        else
                hugepage_bulk_invalidate(slot, vpn, count, psize, ssize);
@@ -1311,6 +1339,137 @@ static void do_block_remove(unsigned long number, struct ppc64_tlb_batch *batch,
                (void)call_block_remove(pix, param, true);
 }
 
+/*
+ * TLB Block Invalidate Characteristics
+ *
+ * These characteristics define the size of the block the hcall H_BLOCK_REMOVE
+ * is able to process for each couple segment base page size, actual page size.
+ *
+ * The ibm,get-system-parameter properties is returning a buffer with the
+ * following layout:
+ *
+ * [ 2 bytes size of the RTAS buffer (excluding these 2 bytes) ]
+ * -----------------
+ * TLB Block Invalidate Specifiers:
+ * [ 1 byte LOG base 2 of the TLB invalidate block size being specified ]
+ * [ 1 byte Number of page sizes (N) that are supported for the specified
+ *          TLB invalidate block size ]
+ * [ 1 byte Encoded segment base page size and actual page size
+ *          MSB=0 means 4k segment base page size and actual page size
+ *          MSB=1 the penc value in mmu_psize_def ]
+ * ...
+ * -----------------
+ * Next TLB Block Invalidate Specifiers...
+ * -----------------
+ * [ 0 ]
+ */
+static inline void set_hblkrm_bloc_size(int bpsize, int psize,
+                                       unsigned int block_size)
+{
+       if (block_size > hblkrm_size[bpsize][psize])
+               hblkrm_size[bpsize][psize] = block_size;
+}
+
+/*
+ * Decode the Encoded segment base page size and actual page size.
+ * PAPR specifies:
+ *   - bit 7 is the L bit
+ *   - bits 0-5 are the penc value
+ * If the L bit is 0, this means 4K segment base page size and actual page size
+ * otherwise the penc value should be read.
+ */
+#define HBLKRM_L_MASK          0x80
+#define HBLKRM_PENC_MASK       0x3f
+static inline void __init check_lp_set_hblkrm(unsigned int lp,
+                                             unsigned int block_size)
+{
+       unsigned int bpsize, psize;
+
+       /* First, check the L bit, if not set, this means 4K */
+       if ((lp & HBLKRM_L_MASK) == 0) {
+               set_hblkrm_bloc_size(MMU_PAGE_4K, MMU_PAGE_4K, block_size);
+               return;
+       }
+
+       lp &= HBLKRM_PENC_MASK;
+       for (bpsize = 0; bpsize < MMU_PAGE_COUNT; bpsize++) {
+               struct mmu_psize_def *def = &mmu_psize_defs[bpsize];
+
+               for (psize = 0; psize < MMU_PAGE_COUNT; psize++) {
+                       if (def->penc[psize] == lp) {
+                               set_hblkrm_bloc_size(bpsize, psize, block_size);
+                               return;
+                       }
+               }
+       }
+}
+
+#define SPLPAR_TLB_BIC_TOKEN           50
+
+/*
+ * The size of the TLB Block Invalidate Characteristics is variable. But at the
+ * maximum it will be the number of possible page sizes *2 + 10 bytes.
+ * Currently MMU_PAGE_COUNT is 16, which means 42 bytes. Use a cache line size
+ * (128 bytes) for the buffer to get plenty of space.
+ */
+#define SPLPAR_TLB_BIC_MAXLENGTH       128
+
+void __init pseries_lpar_read_hblkrm_characteristics(void)
+{
+       unsigned char local_buffer[SPLPAR_TLB_BIC_MAXLENGTH];
+       int call_status, len, idx, bpsize;
+
+       spin_lock(&rtas_data_buf_lock);
+       memset(rtas_data_buf, 0, RTAS_DATA_BUF_SIZE);
+       call_status = rtas_call(rtas_token("ibm,get-system-parameter"), 3, 1,
+                               NULL,
+                               SPLPAR_TLB_BIC_TOKEN,
+                               __pa(rtas_data_buf),
+                               RTAS_DATA_BUF_SIZE);
+       memcpy(local_buffer, rtas_data_buf, SPLPAR_TLB_BIC_MAXLENGTH);
+       local_buffer[SPLPAR_TLB_BIC_MAXLENGTH - 1] = '\0';
+       spin_unlock(&rtas_data_buf_lock);
+
+       if (call_status != 0) {
+               pr_warn("%s %s Error calling get-system-parameter (0x%x)\n",
+                       __FILE__, __func__, call_status);
+               return;
+       }
+
+       /*
+        * The first two (2) bytes of the data in the buffer are the length of
+        * the returned data, not counting these first two (2) bytes.
+        */
+       len = be16_to_cpu(*((u16 *)local_buffer)) + 2;
+       if (len > SPLPAR_TLB_BIC_MAXLENGTH) {
+               pr_warn("%s too large returned buffer %d", __func__, len);
+               return;
+       }
+
+       idx = 2;
+       while (idx < len) {
+               u8 block_shift = local_buffer[idx++];
+               u32 block_size;
+               unsigned int npsize;
+
+               if (!block_shift)
+                       break;
+
+               block_size = 1 << block_shift;
+
+               for (npsize = local_buffer[idx++];
+                    npsize > 0 && idx < len; npsize--)
+                       check_lp_set_hblkrm((unsigned int) local_buffer[idx++],
+                                           block_size);
+       }
+
+       for (bpsize = 0; bpsize < MMU_PAGE_COUNT; bpsize++)
+               for (idx = 0; idx < MMU_PAGE_COUNT; idx++)
+                       if (hblkrm_size[bpsize][idx])
+                               pr_info("H_BLOCK_REMOVE supports base psize:%d psize:%d block size:%d",
+                                       bpsize, idx, hblkrm_size[bpsize][idx]);
+}
+
 /*
  * Take a spinlock around flushes to avoid bouncing the hypervisor tlbie
  * lock.
@@ -1330,7 +1489,7 @@ static void pSeries_lpar_flush_hash_range(unsigned long number, int local)
        if (lock_tlbie)
                spin_lock_irqsave(&pSeries_lpar_tlbie_lock, flags);
 
-       if (firmware_has_feature(FW_FEATURE_BLOCK_REMOVE)) {
+       if (is_supported_hlbkrm(batch->psize, batch->psize)) {
                do_block_remove(number, batch, param);
                goto out;
        }
index a5ac371a3f066ecbabb1c2899a4e36468e598a01..61883291defc385a5119b9ce1c5d8d7e059764c5 100644 (file)
@@ -65,29 +65,21 @@ static int drc_pmem_bind(struct papr_scm_priv *p)
                cond_resched();
        } while (rc == H_BUSY);
 
-       if (rc) {
-               /* H_OVERLAP needs a separate error path */
-               if (rc == H_OVERLAP)
-                       return -EBUSY;
-
-               dev_err(&p->pdev->dev, "bind err: %lld\n", rc);
-               return -ENXIO;
-       }
+       if (rc)
+               return rc;
 
        p->bound_addr = saved;
-
-       dev_dbg(&p->pdev->dev, "bound drc %x to %pR\n", p->drc_index, &p->res);
-
-       return 0;
+       dev_dbg(&p->pdev->dev, "bound drc 0x%x to %pR\n", p->drc_index, &p->res);
+       return rc;
 }
 
-static int drc_pmem_unbind(struct papr_scm_priv *p)
+static void drc_pmem_unbind(struct papr_scm_priv *p)
 {
        unsigned long ret[PLPAR_HCALL_BUFSIZE];
        uint64_t token = 0;
        int64_t rc;
 
-       dev_dbg(&p->pdev->dev, "unbind drc %x\n", p->drc_index);
+       dev_dbg(&p->pdev->dev, "unbind drc 0x%x\n", p->drc_index);
 
        /* NB: unbind has the same retry requirements as drc_pmem_bind() */
        do {
@@ -110,12 +102,48 @@ static int drc_pmem_unbind(struct papr_scm_priv *p)
        if (rc)
                dev_err(&p->pdev->dev, "unbind error: %lld\n", rc);
        else
-               dev_dbg(&p->pdev->dev, "unbind drc %x complete\n",
+               dev_dbg(&p->pdev->dev, "unbind drc 0x%x complete\n",
                        p->drc_index);
 
-       return rc == H_SUCCESS ? 0 : -ENXIO;
+       return;
 }
 
+static int drc_pmem_query_n_bind(struct papr_scm_priv *p)
+{
+       unsigned long start_addr;
+       unsigned long end_addr;
+       unsigned long ret[PLPAR_HCALL_BUFSIZE];
+       int64_t rc;
+
+
+       rc = plpar_hcall(H_SCM_QUERY_BLOCK_MEM_BINDING, ret,
+                        p->drc_index, 0);
+       if (rc)
+               goto err_out;
+       start_addr = ret[0];
+
+       /* Make sure the full region is bound. */
+       rc = plpar_hcall(H_SCM_QUERY_BLOCK_MEM_BINDING, ret,
+                        p->drc_index, p->blocks - 1);
+       if (rc)
+               goto err_out;
+       end_addr = ret[0];
+
+       if ((end_addr - start_addr) != ((p->blocks - 1) * p->block_size))
+               goto err_out;
+
+       p->bound_addr = start_addr;
+       dev_dbg(&p->pdev->dev, "bound drc 0x%x to %pR\n", p->drc_index, &p->res);
+       return rc;
+
+err_out:
+       dev_info(&p->pdev->dev,
+                "Failed to query, trying an unbind followed by bind");
+       drc_pmem_unbind(p);
+       return drc_pmem_bind(p);
+}
+
+
 static int papr_scm_meta_get(struct papr_scm_priv *p,
                             struct nd_cmd_get_config_data_hdr *hdr)
 {
@@ -436,14 +464,14 @@ static int papr_scm_probe(struct platform_device *pdev)
        rc = drc_pmem_bind(p);
 
        /* If phyp says drc memory still bound then force unbound and retry */
-       if (rc == -EBUSY) {
-               dev_warn(&pdev->dev, "Retrying bind after unbinding\n");
-               drc_pmem_unbind(p);
-               rc = drc_pmem_bind(p);
-       }
+       if (rc == H_OVERLAP)
+               rc = drc_pmem_query_n_bind(p);
 
-       if (rc)
+       if (rc != H_SUCCESS) {
+               dev_err(&p->pdev->dev, "bind err: %d\n", rc);
+               rc = -ENXIO;
                goto err;
+       }
 
        /* setup the resource for the newly bound range */
        p->res.start = p->bound_addr;
index a6624d4bd9d0d83c8bd5d04b3958d0684787617c..13fa370a87e4eb0d660482060a3b3d424483b559 100644 (file)
@@ -112,5 +112,6 @@ static inline unsigned long cmo_get_page_size(void)
 int dlpar_workqueue_init(void);
 
 void pseries_setup_rfi_flush(void);
+void pseries_lpar_read_hblkrm_characteristics(void);
 
 #endif /* _PSERIES_PSERIES_H */
index f8adcd0e45893d630aab93501213700656d05902..0a40201f315ffd14dd39c9a7b71274c1d3db25a3 100644 (file)
@@ -744,6 +744,7 @@ static void __init pSeries_setup_arch(void)
 
        pseries_setup_rfi_flush();
        setup_stf_barrier();
+       pseries_lpar_read_hblkrm_characteristics();
 
        /* By default, only probe PCI (can be overridden by rtas_pci) */
        pci_add_flags(PCI_PROBE_ONLY);
index 485569ff7ef1df5b0356e410fe89db0ea2b1655e..7d13d2ef5a9054eaf009be6891a7132ff59dade6 100644 (file)
@@ -140,7 +140,7 @@ static unsigned int icp_native_get_irq(void)
 
 static void icp_native_cause_ipi(int cpu)
 {
-       kvmppc_set_host_ipi(cpu, 1);
+       kvmppc_set_host_ipi(cpu);
        icp_native_set_qirr(cpu, IPI_PRIORITY);
 }
 
@@ -179,7 +179,7 @@ void icp_native_flush_interrupt(void)
        if (vec == XICS_IPI) {
                /* Clear pending IPI */
                int cpu = smp_processor_id();
-               kvmppc_set_host_ipi(cpu, 0);
+               kvmppc_clear_host_ipi(cpu);
                icp_native_set_qirr(cpu, 0xff);
        } else {
                pr_err("XICS: hw interrupt 0x%x to offline cpu, disabling\n",
@@ -200,7 +200,7 @@ static irqreturn_t icp_native_ipi_action(int irq, void *dev_id)
 {
        int cpu = smp_processor_id();
 
-       kvmppc_set_host_ipi(cpu, 0);
+       kvmppc_clear_host_ipi(cpu);
        icp_native_set_qirr(cpu, 0xff);
 
        return smp_ipi_demux();
index 8bb8dd7dd6ad626a255a9b2f910ca316d5c41eab..68fd2540b093173288a0190451b80f6eec5a79e3 100644 (file)
@@ -126,7 +126,7 @@ static void icp_opal_cause_ipi(int cpu)
 {
        int hw_cpu = get_hard_smp_processor_id(cpu);
 
-       kvmppc_set_host_ipi(cpu, 1);
+       kvmppc_set_host_ipi(cpu);
        opal_int_set_mfrr(hw_cpu, IPI_PRIORITY);
 }
 
@@ -134,7 +134,7 @@ static irqreturn_t icp_opal_ipi_action(int irq, void *dev_id)
 {
        int cpu = smp_processor_id();
 
-       kvmppc_set_host_ipi(cpu, 0);
+       kvmppc_clear_host_ipi(cpu);
        opal_int_set_mfrr(get_hard_smp_processor_id(cpu), 0xff);
 
        return smp_ipi_demux();
@@ -157,7 +157,7 @@ void icp_opal_flush_interrupt(void)
                if (vec == XICS_IPI) {
                        /* Clear pending IPI */
                        int cpu = smp_processor_id();
-                       kvmppc_set_host_ipi(cpu, 0);
+                       kvmppc_clear_host_ipi(cpu);
                        opal_int_set_mfrr(get_hard_smp_processor_id(cpu), 0xff);
                } else {
                        pr_err("XICS: hw interrupt 0x%x to offline cpu, "
index 527749066d31e7faee3ab0dc5de228b488acba74..fb4ee54443799363029a258e0b1b0bf01445f741 100644 (file)
@@ -25,6 +25,7 @@ KCOV_INSTRUMENT := n
 
 PURGATORY_CFLAGS_REMOVE := -mcmodel=kernel
 PURGATORY_CFLAGS := -mcmodel=large -ffreestanding -fno-zero-initialized-in-bss
+PURGATORY_CFLAGS += $(DISABLE_STACKLEAK_PLUGIN)
 
 # Default KBUILD_CFLAGS can have -pg option set when FTRACE is enabled. That
 # in turn leaves some undefined symbols like __fentry__ in purgatory and not
index 70b00ae4ec383aa5584e6f52b80c549b587ca5e2..8af793f5e811322af3d933b911419c49211e1da3 100644 (file)
@@ -1690,7 +1690,7 @@ he_service_rbrq(struct he_dev *he_dev, int group)
 
                if (RBRQ_HBUF_ERR(he_dev->rbrq_head)) {
                        hprintk("HBUF_ERR!  (cid 0x%x)\n", cid);
-                               atomic_inc(&vcc->stats->rx_drop);
+                       atomic_inc(&vcc->stats->rx_drop);
                        goto return_host_buffers;
                }
 
index 1dd467bed8fc1255d823a9083a68af9724715d66..6d7ec371e7b2fa6a790ef72a4206d77a51301889 100644 (file)
@@ -352,7 +352,7 @@ static bool has_gateway(const struct dst_entry *dst, sa_family_t family)
 
        if (family == AF_INET) {
                rt = container_of(dst, struct rtable, dst);
-               return rt->rt_gw_family == AF_INET;
+               return rt->rt_uses_gateway;
        }
 
        rt6 = container_of(dst, struct rt6_info, dst);
index 97975bb7f34735311cf606446f68a02b6b59b7ae..2369b8af81f3b7e4d829a75ef979fb1858f0682f 100644 (file)
@@ -70,7 +70,6 @@
  */
 #define AMD_IOMMU_PGSIZES      ((~0xFFFUL) & ~(2ULL << 38))
 
-static DEFINE_SPINLOCK(amd_iommu_devtable_lock);
 static DEFINE_SPINLOCK(pd_bitmap_lock);
 
 /* List of all available dev_data structures */
@@ -202,6 +201,7 @@ static struct iommu_dev_data *alloc_dev_data(u16 devid)
        if (!dev_data)
                return NULL;
 
+       spin_lock_init(&dev_data->lock);
        dev_data->devid = devid;
        ratelimit_default_init(&dev_data->rs);
 
@@ -501,6 +501,29 @@ static void iommu_uninit_device(struct device *dev)
         */
 }
 
+/*
+ * Helper function to get the first pte of a large mapping
+ */
+static u64 *first_pte_l7(u64 *pte, unsigned long *page_size,
+                        unsigned long *count)
+{
+       unsigned long pte_mask, pg_size, cnt;
+       u64 *fpte;
+
+       pg_size  = PTE_PAGE_SIZE(*pte);
+       cnt      = PAGE_SIZE_PTE_COUNT(pg_size);
+       pte_mask = ~((cnt << 3) - 1);
+       fpte     = (u64 *)(((unsigned long)pte) & pte_mask);
+
+       if (page_size)
+               *page_size = pg_size;
+
+       if (count)
+               *count = cnt;
+
+       return fpte;
+}
+
 /****************************************************************************
  *
  * Interrupt handling functions
@@ -1311,8 +1334,12 @@ static void domain_flush_np_cache(struct protection_domain *domain,
                dma_addr_t iova, size_t size)
 {
        if (unlikely(amd_iommu_np_cache)) {
+               unsigned long flags;
+
+               spin_lock_irqsave(&domain->lock, flags);
                domain_flush_pages(domain, iova, size);
                domain_flush_complete(domain);
+               spin_unlock_irqrestore(&domain->lock, flags);
        }
 }
 
@@ -1425,7 +1452,7 @@ static void free_pagetable(struct protection_domain *domain)
        BUG_ON(domain->mode < PAGE_MODE_NONE ||
               domain->mode > PAGE_MODE_6_LEVEL);
 
-       free_sub_pt(root, domain->mode, freelist);
+       freelist = free_sub_pt(root, domain->mode, freelist);
 
        free_page_list(freelist);
 }
@@ -1435,10 +1462,11 @@ static void free_pagetable(struct protection_domain *domain)
  * another level increases the size of the address space by 9 bits to a size up
  * to 64 bits.
  */
-static void increase_address_space(struct protection_domain *domain,
+static bool increase_address_space(struct protection_domain *domain,
                                   gfp_t gfp)
 {
        unsigned long flags;
+       bool ret = false;
        u64 *pte;
 
        spin_lock_irqsave(&domain->lock, flags);
@@ -1455,19 +1483,21 @@ static void increase_address_space(struct protection_domain *domain,
                                        iommu_virt_to_phys(domain->pt_root));
        domain->pt_root  = pte;
        domain->mode    += 1;
-       domain->updated  = true;
+
+       ret = true;
 
 out:
        spin_unlock_irqrestore(&domain->lock, flags);
 
-       return;
+       return ret;
 }
 
 static u64 *alloc_pte(struct protection_domain *domain,
                      unsigned long address,
                      unsigned long page_size,
                      u64 **pte_page,
-                     gfp_t gfp)
+                     gfp_t gfp,
+                     bool *updated)
 {
        int level, end_lvl;
        u64 *pte, *page;
@@ -1475,7 +1505,7 @@ static u64 *alloc_pte(struct protection_domain *domain,
        BUG_ON(!is_power_of_2(page_size));
 
        while (address > PM_LEVEL_SIZE(domain->mode))
-               increase_address_space(domain, gfp);
+               *updated = increase_address_space(domain, gfp) || *updated;
 
        level   = domain->mode - 1;
        pte     = &domain->pt_root[PM_LEVEL_INDEX(level, address)];
@@ -1489,9 +1519,32 @@ static u64 *alloc_pte(struct protection_domain *domain,
                __pte     = *pte;
                pte_level = PM_PTE_LEVEL(__pte);
 
-               if (!IOMMU_PTE_PRESENT(__pte) ||
+               /*
+                * If we replace a series of large PTEs, we need
+                * to tear down all of them.
+                */
+               if (IOMMU_PTE_PRESENT(__pte) &&
                    pte_level == PAGE_MODE_7_LEVEL) {
+                       unsigned long count, i;
+                       u64 *lpte;
+
+                       lpte = first_pte_l7(pte, NULL, &count);
+
+                       /*
+                        * Unmap the replicated PTEs that still match the
+                        * original large mapping
+                        */
+                       for (i = 0; i < count; ++i)
+                               cmpxchg64(&lpte[i], __pte, 0ULL);
+
+                       *updated = true;
+                       continue;
+               }
+
+               if (!IOMMU_PTE_PRESENT(__pte) ||
+                   pte_level == PAGE_MODE_NONE) {
                        page = (u64 *)get_zeroed_page(gfp);
+
                        if (!page)
                                return NULL;
 
@@ -1500,8 +1553,8 @@ static u64 *alloc_pte(struct protection_domain *domain,
                        /* pte could have been changed somewhere. */
                        if (cmpxchg64(pte, __pte, __npte) != __pte)
                                free_page((unsigned long)page);
-                       else if (pte_level == PAGE_MODE_7_LEVEL)
-                               domain->updated = true;
+                       else if (IOMMU_PTE_PRESENT(__pte))
+                               *updated = true;
 
                        continue;
                }
@@ -1566,17 +1619,12 @@ static u64 *fetch_pte(struct protection_domain *domain,
                *page_size = PTE_LEVEL_PAGE_SIZE(level);
        }
 
-       if (PM_PTE_LEVEL(*pte) == 0x07) {
-               unsigned long pte_mask;
-
-               /*
-                * If we have a series of large PTEs, make
-                * sure to return a pointer to the first one.
-                */
-               *page_size = pte_mask = PTE_PAGE_SIZE(*pte);
-               pte_mask   = ~((PAGE_SIZE_PTE_COUNT(pte_mask) << 3) - 1);
-               pte        = (u64 *)(((unsigned long)pte) & pte_mask);
-       }
+       /*
+        * If we have a series of large PTEs, make
+        * sure to return a pointer to the first one.
+        */
+       if (PM_PTE_LEVEL(*pte) == PAGE_MODE_7_LEVEL)
+               pte = first_pte_l7(pte, page_size, NULL);
 
        return pte;
 }
@@ -1615,26 +1663,29 @@ static int iommu_map_page(struct protection_domain *dom,
                          gfp_t gfp)
 {
        struct page *freelist = NULL;
+       bool updated = false;
        u64 __pte, *pte;
-       int i, count;
+       int ret, i, count;
 
        BUG_ON(!IS_ALIGNED(bus_addr, page_size));
        BUG_ON(!IS_ALIGNED(phys_addr, page_size));
 
+       ret = -EINVAL;
        if (!(prot & IOMMU_PROT_MASK))
-               return -EINVAL;
+               goto out;
 
        count = PAGE_SIZE_PTE_COUNT(page_size);
-       pte   = alloc_pte(dom, bus_addr, page_size, NULL, gfp);
+       pte   = alloc_pte(dom, bus_addr, page_size, NULL, gfp, &updated);
 
+       ret = -ENOMEM;
        if (!pte)
-               return -ENOMEM;
+               goto out;
 
        for (i = 0; i < count; ++i)
                freelist = free_clear_pte(&pte[i], pte[i], freelist);
 
        if (freelist != NULL)
-               dom->updated = true;
+               updated = true;
 
        if (count > 1) {
                __pte = PAGE_SIZE_PTE(__sme_set(phys_addr), page_size);
@@ -1650,12 +1701,21 @@ static int iommu_map_page(struct protection_domain *dom,
        for (i = 0; i < count; ++i)
                pte[i] = __pte;
 
-       update_domain(dom);
+       ret = 0;
+
+out:
+       if (updated) {
+               unsigned long flags;
+
+               spin_lock_irqsave(&dom->lock, flags);
+               update_domain(dom);
+               spin_unlock_irqrestore(&dom->lock, flags);
+       }
 
        /* Everything flushed out, free pages now */
        free_page_list(freelist);
 
-       return 0;
+       return ret;
 }
 
 static unsigned long iommu_unmap_page(struct protection_domain *dom,
@@ -1806,8 +1866,12 @@ static void free_gcr3_table(struct protection_domain *domain)
 
 static void dma_ops_domain_flush_tlb(struct dma_ops_domain *dom)
 {
+       unsigned long flags;
+
+       spin_lock_irqsave(&dom->domain.lock, flags);
        domain_flush_tlb(&dom->domain);
        domain_flush_complete(&dom->domain);
+       spin_unlock_irqrestore(&dom->domain.lock, flags);
 }
 
 static void iova_domain_flush_tlb(struct iova_domain *iovad)
@@ -2022,36 +2086,6 @@ static void do_detach(struct iommu_dev_data *dev_data)
        domain->dev_cnt                 -= 1;
 }
 
-/*
- * If a device is not yet associated with a domain, this function makes the
- * device visible in the domain
- */
-static int __attach_device(struct iommu_dev_data *dev_data,
-                          struct protection_domain *domain)
-{
-       int ret;
-
-       /* lock domain */
-       spin_lock(&domain->lock);
-
-       ret = -EBUSY;
-       if (dev_data->domain != NULL)
-               goto out_unlock;
-
-       /* Attach alias group root */
-       do_attach(dev_data, domain);
-
-       ret = 0;
-
-out_unlock:
-
-       /* ready */
-       spin_unlock(&domain->lock);
-
-       return ret;
-}
-
-
 static void pdev_iommuv2_disable(struct pci_dev *pdev)
 {
        pci_disable_ats(pdev);
@@ -2133,19 +2167,28 @@ static int attach_device(struct device *dev,
        unsigned long flags;
        int ret;
 
+       spin_lock_irqsave(&domain->lock, flags);
+
        dev_data = get_dev_data(dev);
 
+       spin_lock(&dev_data->lock);
+
+       ret = -EBUSY;
+       if (dev_data->domain != NULL)
+               goto out;
+
        if (!dev_is_pci(dev))
                goto skip_ats_check;
 
        pdev = to_pci_dev(dev);
        if (domain->flags & PD_IOMMUV2_MASK) {
+               ret = -EINVAL;
                if (!dev_data->passthrough)
-                       return -EINVAL;
+                       goto out;
 
                if (dev_data->iommu_v2) {
                        if (pdev_iommuv2_enable(pdev) != 0)
-                               return -EINVAL;
+                               goto out;
 
                        dev_data->ats.enabled = true;
                        dev_data->ats.qdep    = pci_ats_queue_depth(pdev);
@@ -2158,9 +2201,9 @@ static int attach_device(struct device *dev,
        }
 
 skip_ats_check:
-       spin_lock_irqsave(&amd_iommu_devtable_lock, flags);
-       ret = __attach_device(dev_data, domain);
-       spin_unlock_irqrestore(&amd_iommu_devtable_lock, flags);
+       ret = 0;
+
+       do_attach(dev_data, domain);
 
        /*
         * We might boot into a crash-kernel here. The crashed kernel
@@ -2169,23 +2212,14 @@ skip_ats_check:
         */
        domain_flush_tlb_pde(domain);
 
-       return ret;
-}
-
-/*
- * Removes a device from a protection domain (unlocked)
- */
-static void __detach_device(struct iommu_dev_data *dev_data)
-{
-       struct protection_domain *domain;
-
-       domain = dev_data->domain;
+       domain_flush_complete(domain);
 
-       spin_lock(&domain->lock);
+out:
+       spin_unlock(&dev_data->lock);
 
-       do_detach(dev_data);
+       spin_unlock_irqrestore(&domain->lock, flags);
 
-       spin_unlock(&domain->lock);
+       return ret;
 }
 
 /*
@@ -2200,6 +2234,10 @@ static void detach_device(struct device *dev)
        dev_data = get_dev_data(dev);
        domain   = dev_data->domain;
 
+       spin_lock_irqsave(&domain->lock, flags);
+
+       spin_lock(&dev_data->lock);
+
        /*
         * First check if the device is still attached. It might already
         * be detached from its domain because the generic
@@ -2207,15 +2245,12 @@ static void detach_device(struct device *dev)
         * our alias handling.
         */
        if (WARN_ON(!dev_data->domain))
-               return;
+               goto out;
 
-       /* lock device table */
-       spin_lock_irqsave(&amd_iommu_devtable_lock, flags);
-       __detach_device(dev_data);
-       spin_unlock_irqrestore(&amd_iommu_devtable_lock, flags);
+       do_detach(dev_data);
 
        if (!dev_is_pci(dev))
-               return;
+               goto out;
 
        if (domain->flags & PD_IOMMUV2_MASK && dev_data->iommu_v2)
                pdev_iommuv2_disable(to_pci_dev(dev));
@@ -2223,6 +2258,11 @@ static void detach_device(struct device *dev)
                pci_disable_ats(to_pci_dev(dev));
 
        dev_data->ats.enabled = false;
+
+out:
+       spin_unlock(&dev_data->lock);
+
+       spin_unlock_irqrestore(&domain->lock, flags);
 }
 
 static int amd_iommu_add_device(struct device *dev)
@@ -2354,15 +2394,10 @@ static void update_device_table(struct protection_domain *domain)
 
 static void update_domain(struct protection_domain *domain)
 {
-       if (!domain->updated)
-               return;
-
        update_device_table(domain);
 
        domain_flush_devices(domain);
        domain_flush_tlb_pde(domain);
-
-       domain->updated = false;
 }
 
 static int dir2prot(enum dma_data_direction direction)
@@ -2392,6 +2427,7 @@ static dma_addr_t __map_single(struct device *dev,
 {
        dma_addr_t offset = paddr & ~PAGE_MASK;
        dma_addr_t address, start, ret;
+       unsigned long flags;
        unsigned int pages;
        int prot = 0;
        int i;
@@ -2429,8 +2465,10 @@ out_unmap:
                iommu_unmap_page(&dma_dom->domain, start, PAGE_SIZE);
        }
 
+       spin_lock_irqsave(&dma_dom->domain.lock, flags);
        domain_flush_tlb(&dma_dom->domain);
        domain_flush_complete(&dma_dom->domain);
+       spin_unlock_irqrestore(&dma_dom->domain.lock, flags);
 
        dma_ops_free_iova(dma_dom, address, pages);
 
@@ -2459,8 +2497,12 @@ static void __unmap_single(struct dma_ops_domain *dma_dom,
        }
 
        if (amd_iommu_unmap_flush) {
+               unsigned long flags;
+
+               spin_lock_irqsave(&dma_dom->domain.lock, flags);
                domain_flush_tlb(&dma_dom->domain);
                domain_flush_complete(&dma_dom->domain);
+               spin_unlock_irqrestore(&dma_dom->domain.lock, flags);
                dma_ops_free_iova(dma_dom, dma_addr, pages);
        } else {
                pages = __roundup_pow_of_two(pages);
@@ -2866,16 +2908,16 @@ static void cleanup_domain(struct protection_domain *domain)
        struct iommu_dev_data *entry;
        unsigned long flags;
 
-       spin_lock_irqsave(&amd_iommu_devtable_lock, flags);
+       spin_lock_irqsave(&domain->lock, flags);
 
        while (!list_empty(&domain->dev_list)) {
                entry = list_first_entry(&domain->dev_list,
                                         struct iommu_dev_data, list);
                BUG_ON(!entry->domain);
-               __detach_device(entry);
+               do_detach(entry);
        }
 
-       spin_unlock_irqrestore(&amd_iommu_devtable_lock, flags);
+       spin_unlock_irqrestore(&domain->lock, flags);
 }
 
 static void protection_domain_free(struct protection_domain *domain)
@@ -3226,9 +3268,12 @@ static bool amd_iommu_is_attach_deferred(struct iommu_domain *domain,
 static void amd_iommu_flush_iotlb_all(struct iommu_domain *domain)
 {
        struct protection_domain *dom = to_pdomain(domain);
+       unsigned long flags;
 
+       spin_lock_irqsave(&dom->lock, flags);
        domain_flush_tlb_pde(dom);
        domain_flush_complete(dom);
+       spin_unlock_irqrestore(&dom->lock, flags);
 }
 
 static void amd_iommu_iotlb_sync(struct iommu_domain *domain,
@@ -3290,7 +3335,6 @@ void amd_iommu_domain_direct_map(struct iommu_domain *dom)
 
        /* Update data structure */
        domain->mode    = PAGE_MODE_NONE;
-       domain->updated = true;
 
        /* Make changes visible to IOMMUs */
        update_domain(domain);
@@ -3336,7 +3380,6 @@ int amd_iommu_domain_enable_v2(struct iommu_domain *dom, int pasids)
 
        domain->glx      = levels;
        domain->flags   |= PD_IOMMUV2_MASK;
-       domain->updated  = true;
 
        update_domain(domain);
 
index 9ac229e92b07475f30010338e427cb146821edc1..c9c1612d52e00090a2fb1a4e374e9468ba25607e 100644 (file)
@@ -475,7 +475,6 @@ struct protection_domain {
        int glx;                /* Number of levels for GCR3 table */
        u64 *gcr3_tbl;          /* Guest CR3 table */
        unsigned long flags;    /* flags to find out type of domain */
-       bool updated;           /* complete domain flush required */
        unsigned dev_cnt;       /* devices assigned to this domain */
        unsigned dev_iommu[MAX_IOMMUS]; /* per-IOMMU reference count */
 };
@@ -634,6 +633,9 @@ struct devid_map {
  * This struct contains device specific data for the IOMMU
  */
 struct iommu_dev_data {
+       /*Protect against attach/detach races */
+       spinlock_t lock;
+
        struct list_head list;            /* For domain->dev_list */
        struct llist_node dev_data_list;  /* For global dev_data_list */
        struct protection_domain *domain; /* Domain the device is bound to */
index c6ba37df4b9d47163a1ff6485667b696c3ce98cc..dff4132b3702c62ecebe24f3ad0fa0da1ca0c049 100644 (file)
@@ -754,6 +754,8 @@ base_sock_create(struct net *net, struct socket *sock, int protocol, int kern)
 
        if (sock->type != SOCK_RAW)
                return -ESOCKTNOSUPPORT;
+       if (!capable(CAP_NET_RAW))
+               return -EPERM;
 
        sk = sk_alloc(net, PF_ISDN, GFP_KERNEL, &mISDN_proto, kern);
        if (!sk)
index 48e209e55843c8a3f29b6b85c94469f7666f7c5a..df1c7989e13dd79533aa845d63fe5a31260c1a4b 100644 (file)
@@ -487,7 +487,7 @@ config FUJITSU_ES
        depends on ACPI
        help
          This driver provides support for Extended Socket network device
-          on Extended Partitioning of FUJITSU PRIMEQUEST 2000 E2 series.
+         on Extended Partitioning of FUJITSU PRIMEQUEST 2000 E2 series.
 
 config THUNDERBOLT_NET
        tristate "Networking over Thunderbolt cable"
index faeb4419b205d5af2060013955464172c1876ae6..27551bf3d7e43b069c15cafe5917b01c32fd73a8 100644 (file)
@@ -56,19 +56,19 @@ config ARCNET_CAP
        tristate "Enable CAP mode packet interface"
        help
          ARCnet "cap mode" packet encapsulation. Used to get the hardware
-          acknowledge back to userspace. After the initial protocol byte every
-          packet is stuffed with an extra 4 byte "cookie" which doesn't
-          actually appear on the network. After transmit the driver will send
-          back a packet with protocol byte 0 containing the status of the
-          transmission:
-             0=no hardware acknowledge
-             1=excessive nak
-             2=transmission accepted by the receiver hardware
-
-          Received packets are also stuffed with the extra 4 bytes but it will
-          be random data.
-
-          Cap only listens to protocol 1-8.
+         acknowledge back to userspace. After the initial protocol byte every
+         packet is stuffed with an extra 4 byte "cookie" which doesn't
+         actually appear on the network. After transmit the driver will send
+         back a packet with protocol byte 0 containing the status of the
+         transmission:
+            0=no hardware acknowledge
+            1=excessive nak
+            2=transmission accepted by the receiver hardware
+
+         Received packets are also stuffed with the extra 4 bytes but it will
+         be random data.
+
+         Cap only listens to protocol 1-8.
 
 config ARCNET_COM90xx
        tristate "ARCnet COM90xx (normal) chipset driver"
index 8459115d9d4e51faf405335c9886908cee5c1b02..553776cc1d29ddceb9f4855d93e5d97366dd5a8f 100644 (file)
@@ -1063,31 +1063,34 @@ EXPORT_SYMBOL(arcnet_interrupt);
 static void arcnet_rx(struct net_device *dev, int bufnum)
 {
        struct arcnet_local *lp = netdev_priv(dev);
-       struct archdr pkt;
+       union {
+               struct archdr pkt;
+               char buf[512];
+       } rxdata;
        struct arc_rfc1201 *soft;
        int length, ofs;
 
-       soft = &pkt.soft.rfc1201;
+       soft = &rxdata.pkt.soft.rfc1201;
 
-       lp->hw.copy_from_card(dev, bufnum, 0, &pkt, ARC_HDR_SIZE);
-       if (pkt.hard.offset[0]) {
-               ofs = pkt.hard.offset[0];
+       lp->hw.copy_from_card(dev, bufnum, 0, &rxdata.pkt, ARC_HDR_SIZE);
+       if (rxdata.pkt.hard.offset[0]) {
+               ofs = rxdata.pkt.hard.offset[0];
                length = 256 - ofs;
        } else {
-               ofs = pkt.hard.offset[1];
+               ofs = rxdata.pkt.hard.offset[1];
                length = 512 - ofs;
        }
 
        /* get the full header, if possible */
-       if (sizeof(pkt.soft) <= length) {
-               lp->hw.copy_from_card(dev, bufnum, ofs, soft, sizeof(pkt.soft));
+       if (sizeof(rxdata.pkt.soft) <= length) {
+               lp->hw.copy_from_card(dev, bufnum, ofs, soft, sizeof(rxdata.pkt.soft));
        } else {
-               memset(&pkt.soft, 0, sizeof(pkt.soft));
+               memset(&rxdata.pkt.soft, 0, sizeof(rxdata.pkt.soft));
                lp->hw.copy_from_card(dev, bufnum, ofs, soft, length);
        }
 
        arc_printk(D_DURING, dev, "Buffer #%d: received packet from %02Xh to %02Xh (%d+4 bytes)\n",
-                  bufnum, pkt.hard.source, pkt.hard.dest, length);
+                  bufnum, rxdata.pkt.hard.source, rxdata.pkt.hard.dest, length);
 
        dev->stats.rx_packets++;
        dev->stats.rx_bytes += length + ARC_HDR_SIZE;
@@ -1096,13 +1099,13 @@ static void arcnet_rx(struct net_device *dev, int bufnum)
        if (arc_proto_map[soft->proto]->is_ip) {
                if (BUGLVL(D_PROTO)) {
                        struct ArcProto
-                       *oldp = arc_proto_map[lp->default_proto[pkt.hard.source]],
+                       *oldp = arc_proto_map[lp->default_proto[rxdata.pkt.hard.source]],
                        *newp = arc_proto_map[soft->proto];
 
                        if (oldp != newp) {
                                arc_printk(D_PROTO, dev,
                                           "got protocol %02Xh; encap for host %02Xh is now '%c' (was '%c')\n",
-                                          soft->proto, pkt.hard.source,
+                                          soft->proto, rxdata.pkt.hard.source,
                                           newp->suffix, oldp->suffix);
                        }
                }
@@ -1111,10 +1114,10 @@ static void arcnet_rx(struct net_device *dev, int bufnum)
                lp->default_proto[0] = soft->proto;
 
                /* in striking contrast, the following isn't a hack. */
-               lp->default_proto[pkt.hard.source] = soft->proto;
+               lp->default_proto[rxdata.pkt.hard.source] = soft->proto;
        }
        /* call the protocol-specific receiver. */
-       arc_proto_map[soft->proto]->rx(dev, bufnum, &pkt, length);
+       arc_proto_map[soft->proto]->rx(dev, bufnum, &rxdata.pkt, length);
 }
 
 static void null_rx(struct net_device *dev, int bufnum,
index 4b3d0ddcda7940815494e7d126a952eb7b44d6f9..b412f7ba4f89ea889a9a7dc939278d6112ed3270 100644 (file)
@@ -15,10 +15,10 @@ config CAN_EMS_USB
          from EMS Dr. Thomas Wuensche (http://www.ems-wuensche.de).
 
 config CAN_ESD_USB2
-        tristate "ESD USB/2 CAN/USB interface"
-        ---help---
-          This driver supports the CAN-USB/2 interface
-          from esd electronic system design gmbh (http://www.esd.eu).
+       tristate "ESD USB/2 CAN/USB interface"
+       ---help---
+         This driver supports the CAN-USB/2 interface
+         from esd electronic system design gmbh (http://www.esd.eu).
 
 config CAN_GS_USB
        tristate "Geschwister Schneider UG interfaces"
index 3bb4f91aec9eafb11dd47db1f2a561a7ff13f79c..55d280fe38e4fe41f4ce8a3bd848718c19058d63 100644 (file)
@@ -1,5 +1,5 @@
-/* SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause
- *
+/* SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause */
+/*
  * Northstar Plus switch SerDes/SGMII PHY definitions
  *
  * Copyright (C) 2018 Florian Fainelli <f.fainelli@gmail.com>
index 180663138e754795046e471d44734b38f80efeaa..e2be31f3672a9e966b8ee91b6b63afba8aeac350 100644 (file)
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
 /*
  * PCE microcode extracted from UGW 7.1.1 switch api
  *
index a24d8e61fbe7b6739e9de6764db26ba876f982ac..dd60d0837fc6db51f744db06fb26f1eeefb83673 100644 (file)
@@ -303,7 +303,7 @@ static inline void ksz_pwrite32(struct ksz_device *dev, int port, int offset,
        {                                                               \
                .name = #width,                                         \
                .val_bits = (width),                                    \
-               .reg_stride = (width) / 8,                              \
+               .reg_stride = 1,                                        \
                .reg_bits = (regbits) + (regalign),                     \
                .pad_bits = (regpad),                                   \
                .max_register = BIT(regbits) - 1,                       \
index 16f15c93a102c9b66d76c22ae0e8e310a6b6edf5..684aa51684dbd09cfa209afb3dbbb4b074087b60 100644 (file)
@@ -936,6 +936,9 @@ qca8k_port_enable(struct dsa_switch *ds, int port,
 {
        struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv;
 
+       if (!dsa_is_user_port(ds, port))
+               return 0;
+
        qca8k_port_set_status(priv, port, 1);
        priv->port_sts[port].enabled = 1;
 
index 55424f39cb0dff2bf745ae6f1d8810fcdbedd7eb..f40b248f0b23a5581ffa3b3eefd69e4369f3c79b 100644 (file)
@@ -27,6 +27,7 @@ config NET_DSA_SJA1105_PTP
 config NET_DSA_SJA1105_TAS
        bool "Support for the Time-Aware Scheduler on NXP SJA1105"
        depends on NET_DSA_SJA1105
+       depends on NET_SCH_TAPRIO
        help
          This enables support for the TTEthernet-based egress scheduling
          engine in the SJA1105 DSA driver, which is controlled using a
index 1e2de9d062bf1b9401c2e292a48fb9668759c717..e8e9c166185de9ffcf45d55e8349e3678eb1d144 100644 (file)
@@ -140,17 +140,6 @@ source "drivers/net/ethernet/neterion/Kconfig"
 source "drivers/net/ethernet/netronome/Kconfig"
 source "drivers/net/ethernet/ni/Kconfig"
 source "drivers/net/ethernet/8390/Kconfig"
-
-config NET_NETX
-       tristate "NetX Ethernet support"
-       select MII
-       depends on ARCH_NETX
-       ---help---
-         This is support for the Hilscher netX builtin Ethernet ports
-
-         To compile this driver as a module, choose M here. The module
-         will be called netx-eth.
-
 source "drivers/net/ethernet/nvidia/Kconfig"
 source "drivers/net/ethernet/nxp/Kconfig"
 source "drivers/net/ethernet/oki-semi/Kconfig"
index 77f9838a76c941d6f1506345ef73a95cd2b61253..05abebc17804f48c7bb2f62def5737085bfedf3c 100644 (file)
@@ -64,7 +64,6 @@ obj-$(CONFIG_NET_VENDOR_NATSEMI) += natsemi/
 obj-$(CONFIG_NET_VENDOR_NETERION) += neterion/
 obj-$(CONFIG_NET_VENDOR_NETRONOME) += netronome/
 obj-$(CONFIG_NET_VENDOR_NI) += ni/
-obj-$(CONFIG_NET_NETX) += netx-eth.o
 obj-$(CONFIG_NET_VENDOR_NVIDIA) += nvidia/
 obj-$(CONFIG_LPC_ENET) += nxp/
 obj-$(CONFIG_NET_VENDOR_OKI) += oki-semi/
index a5e2bcbf2722b3a195f3f47a928bb42290d85505..264a482ec31d4fc35ff88cb770e692ac17f097de 100644 (file)
@@ -21,17 +21,17 @@ config NET_VENDOR_ALLWINNER
 if NET_VENDOR_ALLWINNER
 
 config SUN4I_EMAC
-        tristate "Allwinner A10 EMAC support"
+       tristate "Allwinner A10 EMAC support"
        depends on ARCH_SUNXI
        depends on OF
        select CRC32
        select MII
        select PHYLIB
        select MDIO_SUN4I
-        ---help---
-          Support for Allwinner A10 EMAC ethernet driver.
+       ---help---
+         Support for Allwinner A10 EMAC ethernet driver.
 
-          To compile this driver as a module, choose M here.  The module
-          will be called sun4i-emac.
+         To compile this driver as a module, choose M here.  The module
+         will be called sun4i-emac.
 
 endif # NET_VENDOR_ALLWINNER
index 69ca99d8ac26d883d8d018f50383309d37fa56cf..cca72a75f5518e41ae8f6bc1fcbce00ee400ecae 100644 (file)
@@ -19,6 +19,7 @@ if NET_VENDOR_AMAZON
 config ENA_ETHERNET
        tristate "Elastic Network Adapter (ENA) support"
        depends on PCI_MSI && !CPU_BIG_ENDIAN
+       select DIMLIB
        ---help---
          This driver supports Elastic Network Adapter (ENA)"
 
index 38046bf0ff4484c845846ddf8360c1db887d5d94..2845ac2777246e0dc8d9df26695ac4414d3923a8 100644 (file)
@@ -211,8 +211,8 @@ static int ena_com_sq_update_llq_tail(struct ena_com_io_sq *io_sq)
 
                pkt_ctrl->curr_bounce_buf =
                        ena_com_get_next_bounce_buffer(&io_sq->bounce_buf_ctrl);
-                       memset(io_sq->llq_buf_ctrl.curr_bounce_buf,
-                              0x0, llq_info->desc_list_entry_size);
+               memset(io_sq->llq_buf_ctrl.curr_bounce_buf,
+                      0x0, llq_info->desc_list_entry_size);
 
                pkt_ctrl->idx = 0;
                if (unlikely(llq_info->desc_stride_ctrl == ENA_ADMIN_SINGLE_DESC_PER_ENTRY))
index 28892b8acd0e1e070ed4b246548b780728c69948..a95c263a45aa497b65b03344b2b002b6cd603a04 100644 (file)
@@ -306,15 +306,13 @@ irqreturn_t aq_vec_isr_legacy(int irq, void *private)
 {
        struct aq_vec_s *self = private;
        u64 irq_mask = 0U;
-       irqreturn_t err = 0;
+       int err;
 
-       if (!self) {
-               err = -EINVAL;
-               goto err_exit;
-       }
+       if (!self)
+               return IRQ_NONE;
        err = self->aq_hw_ops->hw_irq_read(self->aq_hw, &irq_mask);
        if (err < 0)
-               goto err_exit;
+               return IRQ_NONE;
 
        if (irq_mask) {
                self->aq_hw_ops->hw_irq_disable(self->aq_hw,
@@ -322,11 +320,10 @@ irqreturn_t aq_vec_isr_legacy(int irq, void *private)
                napi_schedule(&self->napi);
        } else {
                self->aq_hw_ops->hw_irq_enable(self->aq_hw, 1U);
-               err = IRQ_NONE;
+               return IRQ_NONE;
        }
 
-err_exit:
-       return err >= 0 ? IRQ_HANDLED : IRQ_NONE;
+       return IRQ_HANDLED;
 }
 
 cpumask_t *aq_vec_get_affinity_mask(struct aq_vec_s *self)
index 7df887e4024c8b75350d210e286abfb281e15587..a977a459bd20daffb5a0a3b81126480449979080 100644 (file)
@@ -2481,7 +2481,7 @@ static int bcm_sysport_probe(struct platform_device *pdev)
 
        priv->phy_interface = of_get_phy_mode(dn);
        /* Default to GMII interface mode */
-       if (priv->phy_interface < 0)
+       if ((int)priv->phy_interface < 0)
                priv->phy_interface = PHY_INTERFACE_MODE_GMII;
 
        /* In the case of a fixed PHY, the DT node associated
index 35b59b5edf0f2ec653cd0cb21361247e4194a876..8e8d557901a970beea5287cf1ebfaa2ef1910f5b 100644 (file)
@@ -165,9 +165,8 @@ static unsigned int macb_adj_dma_desc_idx(struct macb *bp, unsigned int desc_idx
 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 static struct macb_dma_desc_64 *macb_64b_desc(struct macb *bp, struct macb_dma_desc *desc)
 {
-       if (bp->hw_dma_cap & HW_DMA_CAP_64B)
-               return (struct macb_dma_desc_64 *)((void *)desc + sizeof(struct macb_dma_desc));
-       return NULL;
+       return (struct macb_dma_desc_64 *)((void *)desc
+               + sizeof(struct macb_dma_desc));
 }
 #endif
 
index 71854a19cebef597fdf6289da071813c8bdb72fe..38024877751c409c167dd91e36a8c41a134a708b 100644 (file)
@@ -5701,7 +5701,7 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
        whoami = t4_read_reg(adapter, PL_WHOAMI_A);
        pci_read_config_word(pdev, PCI_DEVICE_ID, &device_id);
        chip = t4_get_chip_type(adapter, CHELSIO_PCI_ID_VER(device_id));
-       if (chip < 0) {
+       if ((int)chip < 0) {
                dev_err(&pdev->dev, "Device %d is not supported\n", device_id);
                err = chip;
                goto out_free_adapter;
index e8c7eb842dbe02654e4004f4491fbd56191d615f..17d300ea99559f2e742be5b2be6a9fb3a4b3599d 100644 (file)
@@ -48,5 +48,5 @@ config BE2NET_SKYHAWK
          chipsets. (e.g. OneConnect OCe14xxx)
 
 comment "WARNING: be2net is useless without any enabled chip"
-        depends on BE2NET_BE2=n && BE2NET_BE3=n && BE2NET_LANCER=n && \
+       depends on BE2NET_BE2=n && BE2NET_BE3=n && BE2NET_LANCER=n && \
        BE2NET_SKYHAWK=n && BE2NET
index 7d6513ff8507d21a2fd15973805560b7f9a0779b..b73421c3e25b17fe9fcac66b138fb722bf4d46ae 100644 (file)
@@ -785,7 +785,7 @@ static int enetc_of_get_phy(struct enetc_ndev_priv *priv)
        }
 
        priv->if_mode = of_get_phy_mode(np);
-       if (priv->if_mode < 0) {
+       if ((int)priv->if_mode < 0) {
                dev_err(priv->dev, "missing phy type\n");
                of_node_put(priv->phy_node);
                if (of_phy_is_fixed_link(np))
index 24bf7f68375fdb8bc7cf66269f165ff72e293f02..51ad86417cb13b3d3034e3ff321c8cf0f0a73001 100644 (file)
@@ -2067,7 +2067,7 @@ static int gfar_change_mtu(struct net_device *dev, int new_mtu)
        return 0;
 }
 
-void reset_gfar(struct net_device *ndev)
+static void reset_gfar(struct net_device *ndev)
 {
        struct gfar_private *priv = netdev_priv(ndev);
 
index 95a6b0926170e98166e06eca40a274392e2350c3..c41b19c760f8d5d5e22b22299139f6f0b9b13e72 100644 (file)
@@ -1194,7 +1194,7 @@ static int hix5hd2_dev_probe(struct platform_device *pdev)
                goto err_free_mdio;
 
        priv->phy_mode = of_get_phy_mode(node);
-       if (priv->phy_mode < 0) {
+       if ((int)priv->phy_mode < 0) {
                netdev_err(ndev, "not find phy-mode\n");
                ret = -EINVAL;
                goto err_mdiobus;
index 2e5172f61564113ee525a46d562b025acce11f25..2b073a3c0b8474370e1ac072548bd68d0e4f7471 100644 (file)
@@ -1207,7 +1207,7 @@ static void ibmvnic_cleanup(struct net_device *netdev)
        struct ibmvnic_adapter *adapter = netdev_priv(netdev);
 
        /* ensure that transmissions are stopped if called by do_reset */
-       if (adapter->resetting)
+       if (test_bit(0, &adapter->resetting))
                netif_tx_disable(netdev);
        else
                netif_tx_stop_all_queues(netdev);
@@ -1428,7 +1428,7 @@ static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
        u8 proto = 0;
        netdev_tx_t ret = NETDEV_TX_OK;
 
-       if (adapter->resetting) {
+       if (test_bit(0, &adapter->resetting)) {
                if (!netif_subqueue_stopped(netdev, skb))
                        netif_stop_subqueue(netdev, queue_num);
                dev_kfree_skb_any(skb);
@@ -1723,6 +1723,86 @@ static int ibmvnic_set_mac(struct net_device *netdev, void *p)
        return rc;
 }
 
+/**
+ * do_change_param_reset returns zero if we are able to keep processing reset
+ * events, or non-zero if we hit a fatal error and must halt.
+ */
+static int do_change_param_reset(struct ibmvnic_adapter *adapter,
+                                struct ibmvnic_rwi *rwi,
+                                u32 reset_state)
+{
+       struct net_device *netdev = adapter->netdev;
+       int i, rc;
+
+       netdev_dbg(adapter->netdev, "Change param resetting driver (%d)\n",
+                  rwi->reset_reason);
+
+       netif_carrier_off(netdev);
+       adapter->reset_reason = rwi->reset_reason;
+
+       ibmvnic_cleanup(netdev);
+
+       if (reset_state == VNIC_OPEN) {
+               rc = __ibmvnic_close(netdev);
+               if (rc)
+                       return rc;
+       }
+
+       release_resources(adapter);
+       release_sub_crqs(adapter, 1);
+       release_crq_queue(adapter);
+
+       adapter->state = VNIC_PROBED;
+
+       rc = init_crq_queue(adapter);
+
+       if (rc) {
+               netdev_err(adapter->netdev,
+                          "Couldn't initialize crq. rc=%d\n", rc);
+               return rc;
+       }
+
+       rc = ibmvnic_reset_init(adapter);
+       if (rc)
+               return IBMVNIC_INIT_FAILED;
+
+       /* If the adapter was in PROBE state prior to the reset,
+        * exit here.
+        */
+       if (reset_state == VNIC_PROBED)
+               return 0;
+
+       rc = ibmvnic_login(netdev);
+       if (rc) {
+               adapter->state = reset_state;
+               return rc;
+       }
+
+       rc = init_resources(adapter);
+       if (rc)
+               return rc;
+
+       ibmvnic_disable_irqs(adapter);
+
+       adapter->state = VNIC_CLOSED;
+
+       if (reset_state == VNIC_CLOSED)
+               return 0;
+
+       rc = __ibmvnic_open(netdev);
+       if (rc)
+               return IBMVNIC_OPEN_FAILED;
+
+       /* refresh device's multicast list */
+       ibmvnic_set_multi(netdev);
+
+       /* kick napi */
+       for (i = 0; i < adapter->req_rx_queues; i++)
+               napi_schedule(&adapter->napi[i]);
+
+       return 0;
+}
+
 /**
  * do_reset returns zero if we are able to keep processing reset events, or
  * non-zero if we hit a fatal error and must halt.
@@ -1738,6 +1818,8 @@ static int do_reset(struct ibmvnic_adapter *adapter,
        netdev_dbg(adapter->netdev, "Re-setting driver (%d)\n",
                   rwi->reset_reason);
 
+       rtnl_lock();
+
        netif_carrier_off(netdev);
        adapter->reset_reason = rwi->reset_reason;
 
@@ -1751,16 +1833,25 @@ static int do_reset(struct ibmvnic_adapter *adapter,
        if (reset_state == VNIC_OPEN &&
            adapter->reset_reason != VNIC_RESET_MOBILITY &&
            adapter->reset_reason != VNIC_RESET_FAILOVER) {
-               rc = __ibmvnic_close(netdev);
+               adapter->state = VNIC_CLOSING;
+
+               /* Release the RTNL lock before link state change and
+                * re-acquire after the link state change to allow
+                * linkwatch_event to grab the RTNL lock and run during
+                * a reset.
+                */
+               rtnl_unlock();
+               rc = set_link_state(adapter, IBMVNIC_LOGICAL_LNK_DN);
+               rtnl_lock();
                if (rc)
-                       return rc;
-       }
+                       goto out;
 
-       if (adapter->reset_reason == VNIC_RESET_CHANGE_PARAM ||
-           adapter->wait_for_reset) {
-               release_resources(adapter);
-               release_sub_crqs(adapter, 1);
-               release_crq_queue(adapter);
+               if (adapter->state != VNIC_CLOSING) {
+                       rc = -1;
+                       goto out;
+               }
+
+               adapter->state = VNIC_CLOSED;
        }
 
        if (adapter->reset_reason != VNIC_RESET_NON_FATAL) {
@@ -1769,9 +1860,7 @@ static int do_reset(struct ibmvnic_adapter *adapter,
                 */
                adapter->state = VNIC_PROBED;
 
-               if (adapter->wait_for_reset) {
-                       rc = init_crq_queue(adapter);
-               } else if (adapter->reset_reason == VNIC_RESET_MOBILITY) {
+               if (adapter->reset_reason == VNIC_RESET_MOBILITY) {
                        rc = ibmvnic_reenable_crq_queue(adapter);
                        release_sub_crqs(adapter, 1);
                } else {
@@ -1783,36 +1872,35 @@ static int do_reset(struct ibmvnic_adapter *adapter,
                if (rc) {
                        netdev_err(adapter->netdev,
                                   "Couldn't initialize crq. rc=%d\n", rc);
-                       return rc;
+                       goto out;
                }
 
                rc = ibmvnic_reset_init(adapter);
-               if (rc)
-                       return IBMVNIC_INIT_FAILED;
+               if (rc) {
+                       rc = IBMVNIC_INIT_FAILED;
+                       goto out;
+               }
 
                /* If the adapter was in PROBE state prior to the reset,
                 * exit here.
                 */
-               if (reset_state == VNIC_PROBED)
-                       return 0;
+               if (reset_state == VNIC_PROBED) {
+                       rc = 0;
+                       goto out;
+               }
 
                rc = ibmvnic_login(netdev);
                if (rc) {
                        adapter->state = reset_state;
-                       return rc;
+                       goto out;
                }
 
-               if (adapter->reset_reason == VNIC_RESET_CHANGE_PARAM ||
-                   adapter->wait_for_reset) {
-                       rc = init_resources(adapter);
-                       if (rc)
-                               return rc;
-               } else if (adapter->req_rx_queues != old_num_rx_queues ||
-                          adapter->req_tx_queues != old_num_tx_queues ||
-                          adapter->req_rx_add_entries_per_subcrq !=
-                                                       old_num_rx_slots ||
-                          adapter->req_tx_entries_per_subcrq !=
-                                                       old_num_tx_slots) {
+               if (adapter->req_rx_queues != old_num_rx_queues ||
+                   adapter->req_tx_queues != old_num_tx_queues ||
+                   adapter->req_rx_add_entries_per_subcrq !=
+                   old_num_rx_slots ||
+                   adapter->req_tx_entries_per_subcrq !=
+                   old_num_tx_slots) {
                        release_rx_pools(adapter);
                        release_tx_pools(adapter);
                        release_napi(adapter);
@@ -1820,32 +1908,30 @@ static int do_reset(struct ibmvnic_adapter *adapter,
 
                        rc = init_resources(adapter);
                        if (rc)
-                               return rc;
+                               goto out;
 
                } else {
                        rc = reset_tx_pools(adapter);
                        if (rc)
-                               return rc;
+                               goto out;
 
                        rc = reset_rx_pools(adapter);
                        if (rc)
-                               return rc;
+                               goto out;
                }
                ibmvnic_disable_irqs(adapter);
        }
        adapter->state = VNIC_CLOSED;
 
-       if (reset_state == VNIC_CLOSED)
-               return 0;
+       if (reset_state == VNIC_CLOSED) {
+               rc = 0;
+               goto out;
+       }
 
        rc = __ibmvnic_open(netdev);
        if (rc) {
-               if (list_empty(&adapter->rwi_list))
-                       adapter->state = VNIC_CLOSED;
-               else
-                       adapter->state = reset_state;
-
-               return 0;
+               rc = IBMVNIC_OPEN_FAILED;
+               goto out;
        }
 
        /* refresh device's multicast list */
@@ -1855,11 +1941,15 @@ static int do_reset(struct ibmvnic_adapter *adapter,
        for (i = 0; i < adapter->req_rx_queues; i++)
                napi_schedule(&adapter->napi[i]);
 
-       if (adapter->reset_reason != VNIC_RESET_FAILOVER &&
-           adapter->reset_reason != VNIC_RESET_CHANGE_PARAM)
+       if (adapter->reset_reason != VNIC_RESET_FAILOVER)
                call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, netdev);
 
-       return 0;
+       rc = 0;
+
+out:
+       rtnl_unlock();
+
+       return rc;
 }
 
 static int do_hard_reset(struct ibmvnic_adapter *adapter,
@@ -1919,14 +2009,8 @@ static int do_hard_reset(struct ibmvnic_adapter *adapter,
                return 0;
 
        rc = __ibmvnic_open(netdev);
-       if (rc) {
-               if (list_empty(&adapter->rwi_list))
-                       adapter->state = VNIC_CLOSED;
-               else
-                       adapter->state = reset_state;
-
-               return 0;
-       }
+       if (rc)
+               return IBMVNIC_OPEN_FAILED;
 
        return 0;
 }
@@ -1965,20 +2049,17 @@ static void __ibmvnic_reset(struct work_struct *work)
 {
        struct ibmvnic_rwi *rwi;
        struct ibmvnic_adapter *adapter;
-       bool we_lock_rtnl = false;
        u32 reset_state;
        int rc = 0;
 
        adapter = container_of(work, struct ibmvnic_adapter, ibmvnic_reset);
 
-       /* netif_set_real_num_xx_queues needs to take rtnl lock here
-        * unless wait_for_reset is set, in which case the rtnl lock
-        * has already been taken before initializing the reset
-        */
-       if (!adapter->wait_for_reset) {
-               rtnl_lock();
-               we_lock_rtnl = true;
+       if (test_and_set_bit_lock(0, &adapter->resetting)) {
+               schedule_delayed_work(&adapter->ibmvnic_delayed_reset,
+                                     IBMVNIC_RESET_DELAY);
+               return;
        }
+
        reset_state = adapter->state;
 
        rwi = get_next_rwi(adapter);
@@ -1990,22 +2071,43 @@ static void __ibmvnic_reset(struct work_struct *work)
                        break;
                }
 
-               if (adapter->force_reset_recovery) {
-                       adapter->force_reset_recovery = false;
-                       rc = do_hard_reset(adapter, rwi, reset_state);
+               if (rwi->reset_reason == VNIC_RESET_CHANGE_PARAM) {
+                       /* CHANGE_PARAM requestor holds rtnl_lock */
+                       rc = do_change_param_reset(adapter, rwi, reset_state);
+               } else if (adapter->force_reset_recovery) {
+                       /* Transport event occurred during previous reset */
+                       if (adapter->wait_for_reset) {
+                               /* Previous was CHANGE_PARAM; caller locked */
+                               adapter->force_reset_recovery = false;
+                               rc = do_hard_reset(adapter, rwi, reset_state);
+                       } else {
+                               rtnl_lock();
+                               adapter->force_reset_recovery = false;
+                               rc = do_hard_reset(adapter, rwi, reset_state);
+                               rtnl_unlock();
+                       }
                } else {
                        rc = do_reset(adapter, rwi, reset_state);
                }
                kfree(rwi);
-               if (rc && rc != IBMVNIC_INIT_FAILED &&
+               if (rc == IBMVNIC_OPEN_FAILED) {
+                       if (list_empty(&adapter->rwi_list))
+                               adapter->state = VNIC_CLOSED;
+                       else
+                               adapter->state = reset_state;
+                       rc = 0;
+               } else if (rc && rc != IBMVNIC_INIT_FAILED &&
                    !adapter->force_reset_recovery)
                        break;
 
                rwi = get_next_rwi(adapter);
+
+               if (rwi && (rwi->reset_reason == VNIC_RESET_FAILOVER ||
+                           rwi->reset_reason == VNIC_RESET_MOBILITY))
+                       adapter->force_reset_recovery = true;
        }
 
        if (adapter->wait_for_reset) {
-               adapter->wait_for_reset = false;
                adapter->reset_done_rc = rc;
                complete(&adapter->reset_done);
        }
@@ -2015,9 +2117,16 @@ static void __ibmvnic_reset(struct work_struct *work)
                free_all_rwi(adapter);
        }
 
-       adapter->resetting = false;
-       if (we_lock_rtnl)
-               rtnl_unlock();
+       clear_bit_unlock(0, &adapter->resetting);
+}
+
+static void __ibmvnic_delayed_reset(struct work_struct *work)
+{
+       struct ibmvnic_adapter *adapter;
+
+       adapter = container_of(work, struct ibmvnic_adapter,
+                              ibmvnic_delayed_reset.work);
+       __ibmvnic_reset(&adapter->ibmvnic_reset);
 }
 
 static int ibmvnic_reset(struct ibmvnic_adapter *adapter,
@@ -2072,14 +2181,11 @@ static int ibmvnic_reset(struct ibmvnic_adapter *adapter,
        rwi->reset_reason = reason;
        list_add_tail(&rwi->list, &adapter->rwi_list);
        spin_unlock_irqrestore(&adapter->rwi_lock, flags);
-       adapter->resetting = true;
        netdev_dbg(adapter->netdev, "Scheduling reset (reason %d)\n", reason);
        schedule_work(&adapter->ibmvnic_reset);
 
        return 0;
 err:
-       if (adapter->wait_for_reset)
-               adapter->wait_for_reset = false;
        return -ret;
 }
 
@@ -2119,7 +2225,7 @@ restart_poll:
                u16 offset;
                u8 flags = 0;
 
-               if (unlikely(adapter->resetting &&
+               if (unlikely(test_bit(0, &adapter->resetting) &&
                             adapter->reset_reason != VNIC_RESET_NON_FATAL)) {
                        enable_scrq_irq(adapter, adapter->rx_scrq[scrq_num]);
                        napi_complete_done(napi, frames_processed);
@@ -2770,7 +2876,7 @@ static int enable_scrq_irq(struct ibmvnic_adapter *adapter,
                return 1;
        }
 
-       if (adapter->resetting &&
+       if (test_bit(0, &adapter->resetting) &&
            adapter->reset_reason == VNIC_RESET_MOBILITY) {
                u64 val = (0xff000000) | scrq->hw_irq;
 
@@ -3320,7 +3426,7 @@ static int ibmvnic_send_crq(struct ibmvnic_adapter *adapter,
        if (rc) {
                if (rc == H_CLOSED) {
                        dev_warn(dev, "CRQ Queue closed\n");
-                       if (adapter->resetting)
+                       if (test_bit(0, &adapter->resetting))
                                ibmvnic_reset(adapter, VNIC_RESET_FATAL);
                }
 
@@ -4312,13 +4418,14 @@ static int handle_query_phys_parms_rsp(union ibmvnic_crq *crq,
 {
        struct net_device *netdev = adapter->netdev;
        int rc;
+       __be32 rspeed = cpu_to_be32(crq->query_phys_parms_rsp.speed);
 
        rc = crq->query_phys_parms_rsp.rc.code;
        if (rc) {
                netdev_err(netdev, "Error %d in QUERY_PHYS_PARMS\n", rc);
                return rc;
        }
-       switch (cpu_to_be32(crq->query_phys_parms_rsp.speed)) {
+       switch (rspeed) {
        case IBMVNIC_10MBPS:
                adapter->speed = SPEED_10;
                break;
@@ -4344,8 +4451,8 @@ static int handle_query_phys_parms_rsp(union ibmvnic_crq *crq,
                adapter->speed = SPEED_100000;
                break;
        default:
-               netdev_warn(netdev, "Unknown speed 0x%08x\n",
-                           cpu_to_be32(crq->query_phys_parms_rsp.speed));
+               if (netif_carrier_ok(netdev))
+                       netdev_warn(netdev, "Unknown speed 0x%08x\n", rspeed);
                adapter->speed = SPEED_UNKNOWN;
        }
        if (crq->query_phys_parms_rsp.flags1 & IBMVNIC_FULL_DUPLEX)
@@ -4395,7 +4502,7 @@ static void ibmvnic_handle_crq(union ibmvnic_crq *crq,
        case IBMVNIC_CRQ_XPORT_EVENT:
                netif_carrier_off(netdev);
                adapter->crq.active = false;
-               if (adapter->resetting)
+               if (test_bit(0, &adapter->resetting))
                        adapter->force_reset_recovery = true;
                if (gen_crq->cmd == IBMVNIC_PARTITION_MIGRATED) {
                        dev_info(dev, "Migrated, re-enabling adapter\n");
@@ -4733,7 +4840,7 @@ static int ibmvnic_reset_init(struct ibmvnic_adapter *adapter)
                return -1;
        }
 
-       if (adapter->resetting && !adapter->wait_for_reset &&
+       if (test_bit(0, &adapter->resetting) && !adapter->wait_for_reset &&
            adapter->reset_reason != VNIC_RESET_MOBILITY) {
                if (adapter->req_rx_queues != old_num_rx_queues ||
                    adapter->req_tx_queues != old_num_tx_queues) {
@@ -4845,10 +4952,12 @@ static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id)
        spin_lock_init(&adapter->stats_lock);
 
        INIT_WORK(&adapter->ibmvnic_reset, __ibmvnic_reset);
+       INIT_DELAYED_WORK(&adapter->ibmvnic_delayed_reset,
+                         __ibmvnic_delayed_reset);
        INIT_LIST_HEAD(&adapter->rwi_list);
        spin_lock_init(&adapter->rwi_lock);
        init_completion(&adapter->init_done);
-       adapter->resetting = false;
+       clear_bit(0, &adapter->resetting);
 
        do {
                rc = init_crq_queue(adapter);
index 70bd286f89325d34cad7831ce793860f5a4eed8f..ebc39248b334afb39f7d186d0a57e55150c9168f 100644 (file)
@@ -20,6 +20,7 @@
 #define IBMVNIC_INVALID_MAP    -1
 #define IBMVNIC_STATS_TIMEOUT  1
 #define IBMVNIC_INIT_FAILED    2
+#define IBMVNIC_OPEN_FAILED    3
 
 /* basic structures plus 100 2k buffers */
 #define IBMVNIC_IO_ENTITLEMENT_DEFAULT 610305
@@ -38,6 +39,8 @@
 #define IBMVNIC_MAX_LTB_SIZE ((1 << (MAX_ORDER - 1)) * PAGE_SIZE)
 #define IBMVNIC_BUFFER_HLEN 500
 
+#define IBMVNIC_RESET_DELAY 100
+
 static const char ibmvnic_priv_flags[][ETH_GSTRING_LEN] = {
 #define IBMVNIC_USE_SERVER_MAXES 0x1
        "use-server-maxes"
@@ -1076,7 +1079,8 @@ struct ibmvnic_adapter {
        spinlock_t rwi_lock;
        struct list_head rwi_list;
        struct work_struct ibmvnic_reset;
-       bool resetting;
+       struct delayed_work ibmvnic_delayed_reset;
+       unsigned long resetting;
        bool napi_enabled, from_passive_init;
 
        bool failover_pending;
index 0a2ec387a482f66df3ecd61ce3daba829a53a8bc..095f6c71b4fa1dc61476be81f49c1b5c11a5c19b 100644 (file)
@@ -3108,7 +3108,7 @@ static struct sk_buff *skge_rx_get(struct net_device *dev,
        skb_put(skb, len);
 
        if (dev->features & NETIF_F_RXCSUM) {
-               skb->csum = csum;
+               skb->csum = le16_to_cpu(csum);
                skb->ip_summed = CHECKSUM_COMPLETE;
        }
 
index 0dba272a5b2f04a8b9555e9bedb4ec63840747fc..a1f20b20529914d3956784fa87823522cccefed0 100644 (file)
@@ -20,15 +20,15 @@ config MLX5_ACCEL
        bool
 
 config MLX5_FPGA
-        bool "Mellanox Technologies Innova support"
-        depends on MLX5_CORE
+       bool "Mellanox Technologies Innova support"
+       depends on MLX5_CORE
        select MLX5_ACCEL
-        ---help---
-          Build support for the Innova family of network cards by Mellanox
-          Technologies. Innova network cards are comprised of a ConnectX chip
-          and an FPGA chip on one board. If you select this option, the
-          mlx5_core driver will include the Innova FPGA core and allow building
-          sandbox-specific client drivers.
+       ---help---
+         Build support for the Innova family of network cards by Mellanox
+         Technologies. Innova network cards are comprised of a ConnectX chip
+         and an FPGA chip on one board. If you select this option, the
+         mlx5_core driver will include the Innova FPGA core and allow building
+         sandbox-specific client drivers.
 
 config MLX5_CORE_EN
        bool "Mellanox 5th generation network adapters (ConnectX series) Ethernet support"
@@ -58,14 +58,14 @@ config MLX5_EN_RXNFC
          API.
 
 config MLX5_MPFS
-        bool "Mellanox Technologies MLX5 MPFS support"
-        depends on MLX5_CORE_EN
+       bool "Mellanox Technologies MLX5 MPFS support"
+       depends on MLX5_CORE_EN
        default y
-        ---help---
+       ---help---
          Mellanox Technologies Ethernet Multi-Physical Function Switch (MPFS)
-          support in ConnectX NIC. MPFs is required for when multi-PF configuration
-          is enabled to allow passing user configured unicast MAC addresses to the
-          requesting PF.
+         support in ConnectX NIC. MPFs is required for when multi-PF configuration
+         is enabled to allow passing user configured unicast MAC addresses to the
+         requesting PF.
 
 config MLX5_ESWITCH
        bool "Mellanox Technologies MLX5 SRIOV E-Switch support"
@@ -73,10 +73,10 @@ config MLX5_ESWITCH
        default y
        ---help---
          Mellanox Technologies Ethernet SRIOV E-Switch support in ConnectX NIC.
-          E-Switch provides internal SRIOV packet steering and switching for the
-          enabled VFs and PF in two available modes:
-                Legacy SRIOV mode (L2 mac vlan steering based).
-                Switchdev mode (eswitch offloads).
+         E-Switch provides internal SRIOV packet steering and switching for the
+         enabled VFs and PF in two available modes:
+               Legacy SRIOV mode (L2 mac vlan steering based).
+               Switchdev mode (eswitch offloads).
 
 config MLX5_CORE_EN_DCB
        bool "Data Center Bridging (DCB) Support"
index eed7101e8bb7d60b3523d54201a7489d6a372a42..acd946f2ddbe725d4612236541d2c1ad374ed7ed 100644 (file)
@@ -399,10 +399,10 @@ add_ethtool_flow_rule(struct mlx5e_priv *priv,
                      struct mlx5_flow_table *ft,
                      struct ethtool_rx_flow_spec *fs)
 {
+       struct mlx5_flow_act flow_act = { .flags = FLOW_ACT_NO_APPEND };
        struct mlx5_flow_destination *dst = NULL;
-       struct mlx5_flow_act flow_act = {0};
-       struct mlx5_flow_spec *spec;
        struct mlx5_flow_handle *rule;
+       struct mlx5_flow_spec *spec;
        int err = 0;
 
        spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
index da7555fdb4d563286738c024a113161066e9a78c..3e78a727f3e68634e2fb5f7e65838dce963a90bc 100644 (file)
@@ -1664,46 +1664,63 @@ static int parse_tunnel_attr(struct mlx5e_priv *priv,
                return err;
        }
 
-       if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS)) {
-               struct flow_match_ipv4_addrs match;
+       if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_CONTROL)) {
+               struct flow_match_control match;
+               u16 addr_type;
 
-               flow_rule_match_enc_ipv4_addrs(rule, &match);
-               MLX5_SET(fte_match_set_lyr_2_4, headers_c,
-                        src_ipv4_src_ipv6.ipv4_layout.ipv4,
-                        ntohl(match.mask->src));
-               MLX5_SET(fte_match_set_lyr_2_4, headers_v,
-                        src_ipv4_src_ipv6.ipv4_layout.ipv4,
-                        ntohl(match.key->src));
-
-               MLX5_SET(fte_match_set_lyr_2_4, headers_c,
-                        dst_ipv4_dst_ipv6.ipv4_layout.ipv4,
-                        ntohl(match.mask->dst));
-               MLX5_SET(fte_match_set_lyr_2_4, headers_v,
-                        dst_ipv4_dst_ipv6.ipv4_layout.ipv4,
-                        ntohl(match.key->dst));
-
-               MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, ethertype);
-               MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype, ETH_P_IP);
-       } else if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS)) {
-               struct flow_match_ipv6_addrs match;
+               flow_rule_match_enc_control(rule, &match);
+               addr_type = match.key->addr_type;
 
-               flow_rule_match_enc_ipv6_addrs(rule, &match);
-               memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
-                                   src_ipv4_src_ipv6.ipv6_layout.ipv6),
-                      &match.mask->src, MLX5_FLD_SZ_BYTES(ipv6_layout, ipv6));
-               memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
-                                   src_ipv4_src_ipv6.ipv6_layout.ipv6),
-                      &match.key->src, MLX5_FLD_SZ_BYTES(ipv6_layout, ipv6));
+               /* For tunnel addr_type used same key id`s as for non-tunnel */
+               if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
+                       struct flow_match_ipv4_addrs match;
 
-               memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
-                                   dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
-                      &match.mask->dst, MLX5_FLD_SZ_BYTES(ipv6_layout, ipv6));
-               memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
-                                   dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
-                      &match.key->dst, MLX5_FLD_SZ_BYTES(ipv6_layout, ipv6));
+                       flow_rule_match_enc_ipv4_addrs(rule, &match);
+                       MLX5_SET(fte_match_set_lyr_2_4, headers_c,
+                                src_ipv4_src_ipv6.ipv4_layout.ipv4,
+                                ntohl(match.mask->src));
+                       MLX5_SET(fte_match_set_lyr_2_4, headers_v,
+                                src_ipv4_src_ipv6.ipv4_layout.ipv4,
+                                ntohl(match.key->src));
 
-               MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, ethertype);
-               MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype, ETH_P_IPV6);
+                       MLX5_SET(fte_match_set_lyr_2_4, headers_c,
+                                dst_ipv4_dst_ipv6.ipv4_layout.ipv4,
+                                ntohl(match.mask->dst));
+                       MLX5_SET(fte_match_set_lyr_2_4, headers_v,
+                                dst_ipv4_dst_ipv6.ipv4_layout.ipv4,
+                                ntohl(match.key->dst));
+
+                       MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c,
+                                        ethertype);
+                       MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype,
+                                ETH_P_IP);
+               } else if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
+                       struct flow_match_ipv6_addrs match;
+
+                       flow_rule_match_enc_ipv6_addrs(rule, &match);
+                       memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
+                                           src_ipv4_src_ipv6.ipv6_layout.ipv6),
+                              &match.mask->src, MLX5_FLD_SZ_BYTES(ipv6_layout,
+                                                                  ipv6));
+                       memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
+                                           src_ipv4_src_ipv6.ipv6_layout.ipv6),
+                              &match.key->src, MLX5_FLD_SZ_BYTES(ipv6_layout,
+                                                                 ipv6));
+
+                       memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
+                                           dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
+                              &match.mask->dst, MLX5_FLD_SZ_BYTES(ipv6_layout,
+                                                                  ipv6));
+                       memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
+                                           dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
+                              &match.key->dst, MLX5_FLD_SZ_BYTES(ipv6_layout,
+                                                                 ipv6));
+
+                       MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c,
+                                        ethertype);
+                       MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype,
+                                ETH_P_IPV6);
+               }
        }
 
        if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_IP)) {
index 9648c229780374438807542c906b77549c3ba55e..e47dd7c1b909c6ba72aa2ae77953f121d6fe9cd6 100644 (file)
@@ -1568,6 +1568,7 @@ static const struct pci_device_id mlx5_core_pci_table[] = {
        { PCI_VDEVICE(MELLANOX, 0x101e), MLX5_PCI_DEV_IS_VF},   /* ConnectX Family mlx5Gen Virtual Function */
        { PCI_VDEVICE(MELLANOX, 0xa2d2) },                      /* BlueField integrated ConnectX-5 network controller */
        { PCI_VDEVICE(MELLANOX, 0xa2d3), MLX5_PCI_DEV_IS_VF},   /* BlueField integrated ConnectX-5 network controller VF */
+       { PCI_VDEVICE(MELLANOX, 0xa2d6) },                      /* BlueField-2 integrated ConnectX-6 Dx network controller */
        { 0, }
 };
 
index 7d81a7735de51a9d235ed59f4c4265d346c212b1..b74b7d0f659052e7148a20ccef762b9de06a4523 100644 (file)
@@ -615,7 +615,7 @@ static int dr_action_handle_cs_recalc(struct mlx5dr_domain *dmn,
                 * that recalculates the CS and forwards to the vport.
                 */
                ret = mlx5dr_domain_cache_get_recalc_cs_ft_addr(dest_action->vport.dmn,
-                                                               dest_action->vport.num,
+                                                               dest_action->vport.caps->num,
                                                                final_icm_addr);
                if (ret) {
                        mlx5dr_err(dmn, "Failed to get FW cs recalc flow table\n");
@@ -744,7 +744,7 @@ int mlx5dr_actions_build_ste_arr(struct mlx5dr_matcher *matcher,
                        dest_action = action;
                        if (rx_rule) {
                                /* Loopback on WIRE vport is not supported */
-                               if (action->vport.num == WIRE_PORT)
+                               if (action->vport.caps->num == WIRE_PORT)
                                        goto out_invalid_arg;
 
                                attr.final_icm_addr = action->vport.caps->icm_address_rx;
index 01008cd66f75c86cb5887dc1dbfe7b5de2beb52e..67dea7698fc996bf0a86c40e93ded23547e5d12a 100644 (file)
@@ -230,8 +230,7 @@ static int dr_matcher_set_ste_builders(struct mlx5dr_matcher *matcher,
                    (dmn->type == MLX5DR_DOMAIN_TYPE_FDB ||
                     dmn->type == MLX5DR_DOMAIN_TYPE_NIC_RX)) {
                        ret = mlx5dr_ste_build_src_gvmi_qpn(&sb[idx++], &mask,
-                                                           &dmn->info.caps,
-                                                           inner, rx);
+                                                           dmn, inner, rx);
                        if (ret)
                                return ret;
                }
@@ -458,13 +457,11 @@ static int dr_matcher_add_to_tbl(struct mlx5dr_matcher *matcher)
 
        prev_matcher = NULL;
        if (next_matcher && !first)
-               prev_matcher = list_entry(next_matcher->matcher_list.prev,
-                                         struct mlx5dr_matcher,
-                                         matcher_list);
+               prev_matcher = list_prev_entry(next_matcher, matcher_list);
        else if (!first)
-               prev_matcher = list_entry(tbl->matcher_list.prev,
-                                         struct mlx5dr_matcher,
-                                         matcher_list);
+               prev_matcher = list_last_entry(&tbl->matcher_list,
+                                              struct mlx5dr_matcher,
+                                              matcher_list);
 
        if (dmn->type == MLX5DR_DOMAIN_TYPE_FDB ||
            dmn->type == MLX5DR_DOMAIN_TYPE_NIC_RX) {
index 3bc3f66b8fa8fe0dac4f4e86e6988bc3e903e30f..4187f2b112b8e2e81f38d50a2daadc1ea666bd4c 100644 (file)
@@ -18,7 +18,7 @@ static int dr_rule_append_to_miss_list(struct mlx5dr_ste *new_last_ste,
        struct mlx5dr_ste *last_ste;
 
        /* The new entry will be inserted after the last */
-       last_ste = list_entry(miss_list->prev, struct mlx5dr_ste, miss_list_node);
+       last_ste = list_last_entry(miss_list, struct mlx5dr_ste, miss_list_node);
        WARN_ON(!last_ste);
 
        ste_info_last = kzalloc(sizeof(*ste_info_last), GFP_KERNEL);
index 6b0af64536d8b3b77d6384cda5c6b888b549de91..4efe1b0be4a84b2cee79a5d0f9052c37edebacb1 100644 (file)
@@ -429,12 +429,9 @@ static void dr_ste_remove_middle_ste(struct mlx5dr_ste *ste,
        struct mlx5dr_ste *prev_ste;
        u64 miss_addr;
 
-       prev_ste = list_entry(mlx5dr_ste_get_miss_list(ste)->prev, struct mlx5dr_ste,
-                             miss_list_node);
-       if (!prev_ste) {
-               WARN_ON(true);
+       prev_ste = list_prev_entry(ste, miss_list_node);
+       if (WARN_ON(!prev_ste))
                return;
-       }
 
        miss_addr = mlx5dr_ste_get_miss_addr(ste->hw_ste);
        mlx5dr_ste_set_miss_addr(prev_ste->hw_ste, miss_addr);
@@ -461,8 +458,8 @@ void mlx5dr_ste_free(struct mlx5dr_ste *ste,
        struct mlx5dr_ste_htbl *stats_tbl;
        LIST_HEAD(send_ste_list);
 
-       first_ste = list_entry(mlx5dr_ste_get_miss_list(ste)->next,
-                              struct mlx5dr_ste, miss_list_node);
+       first_ste = list_first_entry(mlx5dr_ste_get_miss_list(ste),
+                                    struct mlx5dr_ste, miss_list_node);
        stats_tbl = first_ste->htbl;
 
        /* Two options:
@@ -479,8 +476,7 @@ void mlx5dr_ste_free(struct mlx5dr_ste *ste,
                if (last_ste == first_ste)
                        next_ste = NULL;
                else
-                       next_ste = list_entry(ste->miss_list_node.next,
-                                             struct mlx5dr_ste, miss_list_node);
+                       next_ste = list_next_entry(ste, miss_list_node);
 
                if (!next_ste) {
                        /* One and only entry in the list */
@@ -841,6 +837,8 @@ static void dr_ste_copy_mask_misc(char *mask, struct mlx5dr_match_misc *spec)
        spec->source_sqn = MLX5_GET(fte_match_set_misc, mask, source_sqn);
 
        spec->source_port = MLX5_GET(fte_match_set_misc, mask, source_port);
+       spec->source_eswitch_owner_vhca_id = MLX5_GET(fte_match_set_misc, mask,
+                                                     source_eswitch_owner_vhca_id);
 
        spec->outer_second_prio = MLX5_GET(fte_match_set_misc, mask, outer_second_prio);
        spec->outer_second_cfi = MLX5_GET(fte_match_set_misc, mask, outer_second_cfi);
@@ -2254,11 +2252,18 @@ static int dr_ste_build_src_gvmi_qpn_bit_mask(struct mlx5dr_match_param *value,
 {
        struct mlx5dr_match_misc *misc_mask = &value->misc;
 
-       if (misc_mask->source_port != 0xffff)
+       /* Partial misc source_port is not supported */
+       if (misc_mask->source_port && misc_mask->source_port != 0xffff)
+               return -EINVAL;
+
+       /* Partial misc source_eswitch_owner_vhca_id is not supported */
+       if (misc_mask->source_eswitch_owner_vhca_id &&
+           misc_mask->source_eswitch_owner_vhca_id != 0xffff)
                return -EINVAL;
 
        DR_STE_SET_MASK(src_gvmi_qp, bit_mask, source_gvmi, misc_mask, source_port);
        DR_STE_SET_MASK(src_gvmi_qp, bit_mask, source_qp, misc_mask, source_sqn);
+       misc_mask->source_eswitch_owner_vhca_id = 0;
 
        return 0;
 }
@@ -2270,17 +2275,33 @@ static int dr_ste_build_src_gvmi_qpn_tag(struct mlx5dr_match_param *value,
        struct dr_hw_ste_format *hw_ste = (struct dr_hw_ste_format *)hw_ste_p;
        struct mlx5dr_match_misc *misc = &value->misc;
        struct mlx5dr_cmd_vport_cap *vport_cap;
+       struct mlx5dr_domain *dmn = sb->dmn;
+       struct mlx5dr_cmd_caps *caps;
        u8 *tag = hw_ste->tag;
 
        DR_STE_SET_TAG(src_gvmi_qp, tag, source_qp, misc, source_sqn);
 
-       vport_cap = mlx5dr_get_vport_cap(sb->caps, misc->source_port);
+       if (sb->vhca_id_valid) {
+               /* Find port GVMI based on the eswitch_owner_vhca_id */
+               if (misc->source_eswitch_owner_vhca_id == dmn->info.caps.gvmi)
+                       caps = &dmn->info.caps;
+               else if (dmn->peer_dmn && (misc->source_eswitch_owner_vhca_id ==
+                                          dmn->peer_dmn->info.caps.gvmi))
+                       caps = &dmn->peer_dmn->info.caps;
+               else
+                       return -EINVAL;
+       } else {
+               caps = &dmn->info.caps;
+       }
+
+       vport_cap = mlx5dr_get_vport_cap(caps, misc->source_port);
        if (!vport_cap)
                return -EINVAL;
 
        if (vport_cap->vport_gvmi)
                MLX5_SET(ste_src_gvmi_qp, tag, source_gvmi, vport_cap->vport_gvmi);
 
+       misc->source_eswitch_owner_vhca_id = 0;
        misc->source_port = 0;
 
        return 0;
@@ -2288,17 +2309,20 @@ static int dr_ste_build_src_gvmi_qpn_tag(struct mlx5dr_match_param *value,
 
 int mlx5dr_ste_build_src_gvmi_qpn(struct mlx5dr_ste_build *sb,
                                  struct mlx5dr_match_param *mask,
-                                 struct mlx5dr_cmd_caps *caps,
+                                 struct mlx5dr_domain *dmn,
                                  bool inner, bool rx)
 {
        int ret;
 
+       /* Set vhca_id_valid before we reset source_eswitch_owner_vhca_id */
+       sb->vhca_id_valid = mask->misc.source_eswitch_owner_vhca_id;
+
        ret = dr_ste_build_src_gvmi_qpn_bit_mask(mask, sb->bit_mask);
        if (ret)
                return ret;
 
        sb->rx = rx;
-       sb->caps = caps;
+       sb->dmn = dmn;
        sb->inner = inner;
        sb->lu_type = MLX5DR_STE_LU_TYPE_SRC_GVMI_AND_QP;
        sb->byte_mask = dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
index a37ee6359be2909da75e44a413e69a707156c0f4..1cb3769d4e3c84deb5e17bc76f81440511b6659c 100644 (file)
@@ -180,6 +180,8 @@ void mlx5dr_send_fill_and_append_ste_send_info(struct mlx5dr_ste *ste, u16 size,
 struct mlx5dr_ste_build {
        u8 inner:1;
        u8 rx:1;
+       u8 vhca_id_valid:1;
+       struct mlx5dr_domain *dmn;
        struct mlx5dr_cmd_caps *caps;
        u8 lu_type;
        u16 byte_mask;
@@ -331,7 +333,7 @@ void mlx5dr_ste_build_register_1(struct mlx5dr_ste_build *sb,
                                 bool inner, bool rx);
 int mlx5dr_ste_build_src_gvmi_qpn(struct mlx5dr_ste_build *sb,
                                  struct mlx5dr_match_param *mask,
-                                 struct mlx5dr_cmd_caps *caps,
+                                 struct mlx5dr_domain *dmn,
                                  bool inner, bool rx);
 void mlx5dr_ste_build_empty_always_hit(struct mlx5dr_ste_build *sb, bool rx);
 
@@ -453,7 +455,7 @@ struct mlx5dr_match_misc {
        u32 gre_c_present:1;
        /* Source port.;0xffff determines wire port */
        u32 source_port:16;
-       u32 reserved_auto2:16;
+       u32 source_eswitch_owner_vhca_id:16;
        /* VLAN ID of first VLAN tag the inner header of the incoming packet.
         * Valid only when inner_second_cvlan_tag ==1 or inner_second_svlan_tag ==1
         */
@@ -745,7 +747,6 @@ struct mlx5dr_action {
                struct {
                        struct mlx5dr_domain *dmn;
                        struct mlx5dr_cmd_vport_cap *caps;
-                       u32 num;
                } vport;
                struct {
                        u32 vlan_hdr; /* tpid_pcp_dei_vid */
index dd234cf7b39df18c52694bdd5a5d098a90c1b750..dcf9562bce8a9773d7d9d155f5371032275756f9 100644 (file)
@@ -3771,6 +3771,14 @@ static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port,
                goto err_port_qdiscs_init;
        }
 
+       err = mlxsw_sp_port_vlan_set(mlxsw_sp_port, 0, VLAN_N_VID - 1, false,
+                                    false);
+       if (err) {
+               dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to clear VLAN filter\n",
+                       mlxsw_sp_port->local_port);
+               goto err_port_vlan_clear;
+       }
+
        err = mlxsw_sp_port_nve_init(mlxsw_sp_port);
        if (err) {
                dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize NVE\n",
@@ -3818,6 +3826,7 @@ err_port_vlan_create:
 err_port_pvid_set:
        mlxsw_sp_port_nve_fini(mlxsw_sp_port);
 err_port_nve_init:
+err_port_vlan_clear:
        mlxsw_sp_tc_qdisc_fini(mlxsw_sp_port);
 err_port_qdiscs_init:
        mlxsw_sp_port_fids_fini(mlxsw_sp_port);
index 0ad1a24abfc63d3c4d84f166a44a73af2e873c31..b607919c8ad02ce5f454de309ca1ff27238f9add 100644 (file)
@@ -21,6 +21,7 @@ static int mlxsw_sp_flower_parse_actions(struct mlxsw_sp *mlxsw_sp,
                                         struct netlink_ext_ack *extack)
 {
        const struct flow_action_entry *act;
+       int mirror_act_count = 0;
        int err, i;
 
        if (!flow_action_has_entries(flow_action))
@@ -105,6 +106,11 @@ static int mlxsw_sp_flower_parse_actions(struct mlxsw_sp *mlxsw_sp,
                case FLOW_ACTION_MIRRED: {
                        struct net_device *out_dev = act->dev;
 
+                       if (mirror_act_count++) {
+                               NL_SET_ERR_MSG_MOD(extack, "Multiple mirror actions per rule are not supported");
+                               return -EOPNOTSUPP;
+                       }
+
                        err = mlxsw_sp_acl_rulei_act_mirror(mlxsw_sp, rulei,
                                                            block, out_dev,
                                                            extack);
index 23ebddfb953255802065799421f1c9ef80ea09b5..9f8a1f69c0c4caa28b08a663b0ae71933e3aa621 100644 (file)
@@ -176,8 +176,10 @@ nfp_abm_u32_knode_replace(struct nfp_abm_link *alink,
        u8 mask, val;
        int err;
 
-       if (!nfp_abm_u32_check_knode(alink->abm, knode, proto, extack))
+       if (!nfp_abm_u32_check_knode(alink->abm, knode, proto, extack)) {
+               err = -EOPNOTSUPP;
                goto err_delete;
+       }
 
        tos_off = proto == htons(ETH_P_IP) ? 16 : 20;
 
@@ -198,14 +200,18 @@ nfp_abm_u32_knode_replace(struct nfp_abm_link *alink,
                if ((iter->val & cmask) == (val & cmask) &&
                    iter->band != knode->res->classid) {
                        NL_SET_ERR_MSG_MOD(extack, "conflict with already offloaded filter");
+                       err = -EOPNOTSUPP;
                        goto err_delete;
                }
        }
 
        if (!match) {
                match = kzalloc(sizeof(*match), GFP_KERNEL);
-               if (!match)
-                       return -ENOMEM;
+               if (!match) {
+                       err = -ENOMEM;
+                       goto err_delete;
+               }
+
                list_add(&match->list, &alink->dscp_map);
        }
        match->handle = knode->handle;
@@ -221,7 +227,7 @@ nfp_abm_u32_knode_replace(struct nfp_abm_link *alink,
 
 err_delete:
        nfp_abm_u32_knode_delete(alink, knode);
-       return -EOPNOTSUPP;
+       return err;
 }
 
 static int nfp_abm_setup_tc_block_cb(enum tc_setup_type type,
index 7a20447cca194bb0f4d38ce9570dc06ae30abd84..d8ad9346a26a48bfbc64610a811538299954b76a 100644 (file)
@@ -400,6 +400,7 @@ nfp_flower_spawn_vnic_reprs(struct nfp_app *app,
                repr_priv = kzalloc(sizeof(*repr_priv), GFP_KERNEL);
                if (!repr_priv) {
                        err = -ENOMEM;
+                       nfp_repr_free(repr);
                        goto err_reprs_clean;
                }
 
@@ -413,6 +414,7 @@ nfp_flower_spawn_vnic_reprs(struct nfp_app *app,
                port = nfp_port_alloc(app, port_type, repr);
                if (IS_ERR(port)) {
                        err = PTR_ERR(port);
+                       kfree(repr_priv);
                        nfp_repr_free(repr);
                        goto err_reprs_clean;
                }
@@ -433,6 +435,7 @@ nfp_flower_spawn_vnic_reprs(struct nfp_app *app,
                err = nfp_repr_init(app, repr,
                                    port_id, port, priv->nn->dp.netdev);
                if (err) {
+                       kfree(repr_priv);
                        nfp_port_free(port);
                        nfp_repr_free(repr);
                        goto err_reprs_clean;
@@ -515,6 +518,7 @@ nfp_flower_spawn_phy_reprs(struct nfp_app *app, struct nfp_flower_priv *priv)
                repr_priv = kzalloc(sizeof(*repr_priv), GFP_KERNEL);
                if (!repr_priv) {
                        err = -ENOMEM;
+                       nfp_repr_free(repr);
                        goto err_reprs_clean;
                }
 
@@ -525,11 +529,13 @@ nfp_flower_spawn_phy_reprs(struct nfp_app *app, struct nfp_flower_priv *priv)
                port = nfp_port_alloc(app, NFP_PORT_PHYS_PORT, repr);
                if (IS_ERR(port)) {
                        err = PTR_ERR(port);
+                       kfree(repr_priv);
                        nfp_repr_free(repr);
                        goto err_reprs_clean;
                }
                err = nfp_port_init_phy_port(app->pf, app, port, i);
                if (err) {
+                       kfree(repr_priv);
                        nfp_port_free(port);
                        nfp_repr_free(repr);
                        goto err_reprs_clean;
@@ -542,6 +548,7 @@ nfp_flower_spawn_phy_reprs(struct nfp_app *app, struct nfp_flower_priv *priv)
                err = nfp_repr_init(app, repr,
                                    cmsg_port_id, port, priv->nn->dp.netdev);
                if (err) {
+                       kfree(repr_priv);
                        nfp_port_free(port);
                        nfp_repr_free(repr);
                        goto err_reprs_clean;
diff --git a/drivers/net/ethernet/netx-eth.c b/drivers/net/ethernet/netx-eth.c
deleted file mode 100644 (file)
index cf6e7eb..0000000
+++ /dev/null
@@ -1,497 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * drivers/net/ethernet/netx-eth.c
- *
- * Copyright (c) 2005 Sascha Hauer <s.hauer@pengutronix.de>, Pengutronix
- */
-
-#include <linux/init.h>
-#include <linux/interrupt.h>
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/delay.h>
-
-#include <linux/netdevice.h>
-#include <linux/platform_device.h>
-#include <linux/etherdevice.h>
-#include <linux/skbuff.h>
-#include <linux/mii.h>
-
-#include <asm/io.h>
-#include <mach/hardware.h>
-#include <mach/netx-regs.h>
-#include <mach/pfifo.h>
-#include <mach/xc.h>
-#include <linux/platform_data/eth-netx.h>
-
-/* XC Fifo Offsets */
-#define EMPTY_PTR_FIFO(xcno)    (0 + ((xcno) << 3))    /* Index of the empty pointer FIFO */
-#define IND_FIFO_PORT_HI(xcno)  (1 + ((xcno) << 3))    /* Index of the FIFO where received */
-                                                       /* Data packages are indicated by XC */
-#define IND_FIFO_PORT_LO(xcno)  (2 + ((xcno) << 3))    /* Index of the FIFO where received */
-                                                       /* Data packages are indicated by XC */
-#define REQ_FIFO_PORT_HI(xcno)  (3 + ((xcno) << 3))    /* Index of the FIFO where Data packages */
-                                                       /* have to be indicated by ARM which */
-                                                       /* shall be sent */
-#define REQ_FIFO_PORT_LO(xcno)  (4 + ((xcno) << 3))    /* Index of the FIFO where Data packages */
-                                                       /* have to be indicated by ARM which shall */
-                                                       /* be sent */
-#define CON_FIFO_PORT_HI(xcno)  (5 + ((xcno) << 3))    /* Index of the FIFO where sent Data packages */
-                                                       /* are confirmed */
-#define CON_FIFO_PORT_LO(xcno)  (6 + ((xcno) << 3))    /* Index of the FIFO where sent Data */
-                                                       /* packages are confirmed */
-#define PFIFO_MASK(xcno)        (0x7f << (xcno*8))
-
-#define FIFO_PTR_FRAMELEN_SHIFT 0
-#define FIFO_PTR_FRAMELEN_MASK  (0x7ff << 0)
-#define FIFO_PTR_FRAMELEN(len)  (((len) << 0) & FIFO_PTR_FRAMELEN_MASK)
-#define FIFO_PTR_TIMETRIG       (1<<11)
-#define FIFO_PTR_MULTI_REQ
-#define FIFO_PTR_ORIGIN         (1<<14)
-#define FIFO_PTR_VLAN           (1<<15)
-#define FIFO_PTR_FRAMENO_SHIFT  16
-#define FIFO_PTR_FRAMENO_MASK   (0x3f << 16)
-#define FIFO_PTR_FRAMENO(no)    (((no) << 16) & FIFO_PTR_FRAMENO_MASK)
-#define FIFO_PTR_SEGMENT_SHIFT  22
-#define FIFO_PTR_SEGMENT_MASK   (0xf << 22)
-#define FIFO_PTR_SEGMENT(seg)   (((seg) & 0xf) << 22)
-#define FIFO_PTR_ERROR_SHIFT    28
-#define FIFO_PTR_ERROR_MASK     (0xf << 28)
-
-#define ISR_LINK_STATUS_CHANGE (1<<4)
-#define ISR_IND_LO             (1<<3)
-#define ISR_CON_LO             (1<<2)
-#define ISR_IND_HI             (1<<1)
-#define ISR_CON_HI             (1<<0)
-
-#define ETH_MAC_LOCAL_CONFIG 0x1560
-#define ETH_MAC_4321         0x1564
-#define ETH_MAC_65           0x1568
-
-#define MAC_TRAFFIC_CLASS_ARRANGEMENT_SHIFT 16
-#define MAC_TRAFFIC_CLASS_ARRANGEMENT_MASK (0xf<<MAC_TRAFFIC_CLASS_ARRANGEMENT_SHIFT)
-#define MAC_TRAFFIC_CLASS_ARRANGEMENT(x) (((x)<<MAC_TRAFFIC_CLASS_ARRANGEMENT_SHIFT) & MAC_TRAFFIC_CLASS_ARRANGEMENT_MASK)
-#define LOCAL_CONFIG_LINK_STATUS_IRQ_EN (1<<24)
-#define LOCAL_CONFIG_CON_LO_IRQ_EN (1<<23)
-#define LOCAL_CONFIG_CON_HI_IRQ_EN (1<<22)
-#define LOCAL_CONFIG_IND_LO_IRQ_EN (1<<21)
-#define LOCAL_CONFIG_IND_HI_IRQ_EN (1<<20)
-
-#define CARDNAME "netx-eth"
-
-/* LSB must be zero */
-#define INTERNAL_PHY_ADR 0x1c
-
-struct netx_eth_priv {
-       void                    __iomem *sram_base, *xpec_base, *xmac_base;
-       int                     id;
-       struct mii_if_info      mii;
-       u32                     msg_enable;
-       struct xc               *xc;
-       spinlock_t              lock;
-};
-
-static void netx_eth_set_multicast_list(struct net_device *ndev)
-{
-       /* implement me */
-}
-
-static int
-netx_eth_hard_start_xmit(struct sk_buff *skb, struct net_device *ndev)
-{
-       struct netx_eth_priv *priv = netdev_priv(ndev);
-       unsigned char *buf = skb->data;
-       unsigned int len = skb->len;
-
-       spin_lock_irq(&priv->lock);
-       memcpy_toio(priv->sram_base + 1560, (void *)buf, len);
-       if (len < 60) {
-               memset_io(priv->sram_base + 1560 + len, 0, 60 - len);
-               len = 60;
-       }
-
-       pfifo_push(REQ_FIFO_PORT_LO(priv->id),
-                  FIFO_PTR_SEGMENT(priv->id) |
-                  FIFO_PTR_FRAMENO(1) |
-                  FIFO_PTR_FRAMELEN(len));
-
-       ndev->stats.tx_packets++;
-       ndev->stats.tx_bytes += skb->len;
-
-       netif_stop_queue(ndev);
-       spin_unlock_irq(&priv->lock);
-       dev_kfree_skb(skb);
-
-       return NETDEV_TX_OK;
-}
-
-static void netx_eth_receive(struct net_device *ndev)
-{
-       struct netx_eth_priv *priv = netdev_priv(ndev);
-       unsigned int val, frameno, seg, len;
-       unsigned char *data;
-       struct sk_buff *skb;
-
-       val = pfifo_pop(IND_FIFO_PORT_LO(priv->id));
-
-       frameno = (val & FIFO_PTR_FRAMENO_MASK) >> FIFO_PTR_FRAMENO_SHIFT;
-       seg = (val & FIFO_PTR_SEGMENT_MASK) >> FIFO_PTR_SEGMENT_SHIFT;
-       len = (val & FIFO_PTR_FRAMELEN_MASK) >> FIFO_PTR_FRAMELEN_SHIFT;
-
-       skb = netdev_alloc_skb(ndev, len);
-       if (unlikely(skb == NULL)) {
-               ndev->stats.rx_dropped++;
-               return;
-       }
-
-       data = skb_put(skb, len);
-
-       memcpy_fromio(data, priv->sram_base + frameno * 1560, len);
-
-       pfifo_push(EMPTY_PTR_FIFO(priv->id),
-               FIFO_PTR_SEGMENT(seg) | FIFO_PTR_FRAMENO(frameno));
-
-       skb->protocol = eth_type_trans(skb, ndev);
-       netif_rx(skb);
-       ndev->stats.rx_packets++;
-       ndev->stats.rx_bytes += len;
-}
-
-static irqreturn_t
-netx_eth_interrupt(int irq, void *dev_id)
-{
-       struct net_device *ndev = dev_id;
-       struct netx_eth_priv *priv = netdev_priv(ndev);
-       int status;
-       unsigned long flags;
-
-       spin_lock_irqsave(&priv->lock, flags);
-
-       status = readl(NETX_PFIFO_XPEC_ISR(priv->id));
-       while (status) {
-               int fill_level;
-               writel(status, NETX_PFIFO_XPEC_ISR(priv->id));
-
-               if ((status & ISR_CON_HI) || (status & ISR_IND_HI))
-                       printk("%s: unexpected status: 0x%08x\n",
-                           __func__, status);
-
-               fill_level =
-                   readl(NETX_PFIFO_FILL_LEVEL(IND_FIFO_PORT_LO(priv->id)));
-               while (fill_level--)
-                       netx_eth_receive(ndev);
-
-               if (status & ISR_CON_LO)
-                       netif_wake_queue(ndev);
-
-               if (status & ISR_LINK_STATUS_CHANGE)
-                       mii_check_media(&priv->mii, netif_msg_link(priv), 1);
-
-               status = readl(NETX_PFIFO_XPEC_ISR(priv->id));
-       }
-       spin_unlock_irqrestore(&priv->lock, flags);
-       return IRQ_HANDLED;
-}
-
-static int netx_eth_open(struct net_device *ndev)
-{
-       struct netx_eth_priv *priv = netdev_priv(ndev);
-
-       if (request_irq
-           (ndev->irq, netx_eth_interrupt, IRQF_SHARED, ndev->name, ndev))
-               return -EAGAIN;
-
-       writel(ndev->dev_addr[0] |
-              ndev->dev_addr[1]<<8 |
-              ndev->dev_addr[2]<<16 |
-              ndev->dev_addr[3]<<24,
-              priv->xpec_base + NETX_XPEC_RAM_START_OFS + ETH_MAC_4321);
-       writel(ndev->dev_addr[4] |
-              ndev->dev_addr[5]<<8,
-              priv->xpec_base + NETX_XPEC_RAM_START_OFS + ETH_MAC_65);
-
-       writel(LOCAL_CONFIG_LINK_STATUS_IRQ_EN |
-               LOCAL_CONFIG_CON_LO_IRQ_EN |
-               LOCAL_CONFIG_CON_HI_IRQ_EN |
-               LOCAL_CONFIG_IND_LO_IRQ_EN |
-               LOCAL_CONFIG_IND_HI_IRQ_EN,
-               priv->xpec_base + NETX_XPEC_RAM_START_OFS +
-               ETH_MAC_LOCAL_CONFIG);
-
-       mii_check_media(&priv->mii, netif_msg_link(priv), 1);
-       netif_start_queue(ndev);
-
-       return 0;
-}
-
-static int netx_eth_close(struct net_device *ndev)
-{
-       struct netx_eth_priv *priv = netdev_priv(ndev);
-
-       netif_stop_queue(ndev);
-
-       writel(0,
-           priv->xpec_base + NETX_XPEC_RAM_START_OFS + ETH_MAC_LOCAL_CONFIG);
-
-       free_irq(ndev->irq, ndev);
-
-       return 0;
-}
-
-static void netx_eth_timeout(struct net_device *ndev)
-{
-       struct netx_eth_priv *priv = netdev_priv(ndev);
-       int i;
-
-       printk(KERN_ERR "%s: transmit timed out, resetting\n", ndev->name);
-
-       spin_lock_irq(&priv->lock);
-
-       xc_reset(priv->xc);
-       xc_start(priv->xc);
-
-       for (i=2; i<=18; i++)
-               pfifo_push(EMPTY_PTR_FIFO(priv->id),
-                       FIFO_PTR_FRAMENO(i) | FIFO_PTR_SEGMENT(priv->id));
-
-       spin_unlock_irq(&priv->lock);
-
-       netif_wake_queue(ndev);
-}
-
-static int
-netx_eth_phy_read(struct net_device *ndev, int phy_id, int reg)
-{
-       unsigned int val;
-
-       val = MIIMU_SNRDY | MIIMU_PREAMBLE | MIIMU_PHYADDR(phy_id) |
-             MIIMU_REGADDR(reg) | MIIMU_PHY_NRES;
-
-       writel(val, NETX_MIIMU);
-       while (readl(NETX_MIIMU) & MIIMU_SNRDY);
-
-       return readl(NETX_MIIMU) >> 16;
-
-}
-
-static void
-netx_eth_phy_write(struct net_device *ndev, int phy_id, int reg, int value)
-{
-       unsigned int val;
-
-       val = MIIMU_SNRDY | MIIMU_PREAMBLE | MIIMU_PHYADDR(phy_id) |
-             MIIMU_REGADDR(reg) | MIIMU_PHY_NRES | MIIMU_OPMODE_WRITE |
-             MIIMU_DATA(value);
-
-       writel(val, NETX_MIIMU);
-       while (readl(NETX_MIIMU) & MIIMU_SNRDY);
-}
-
-static const struct net_device_ops netx_eth_netdev_ops = {
-       .ndo_open               = netx_eth_open,
-       .ndo_stop               = netx_eth_close,
-       .ndo_start_xmit         = netx_eth_hard_start_xmit,
-       .ndo_tx_timeout         = netx_eth_timeout,
-       .ndo_set_rx_mode        = netx_eth_set_multicast_list,
-       .ndo_validate_addr      = eth_validate_addr,
-       .ndo_set_mac_address    = eth_mac_addr,
-};
-
-static int netx_eth_enable(struct net_device *ndev)
-{
-       struct netx_eth_priv *priv = netdev_priv(ndev);
-       unsigned int mac4321, mac65;
-       int running, i, ret;
-       bool inv_mac_addr = false;
-
-       ndev->netdev_ops = &netx_eth_netdev_ops;
-       ndev->watchdog_timeo = msecs_to_jiffies(5000);
-
-       priv->msg_enable       = NETIF_MSG_LINK;
-       priv->mii.phy_id_mask  = 0x1f;
-       priv->mii.reg_num_mask = 0x1f;
-       priv->mii.force_media  = 0;
-       priv->mii.full_duplex  = 0;
-       priv->mii.dev        = ndev;
-       priv->mii.mdio_read    = netx_eth_phy_read;
-       priv->mii.mdio_write   = netx_eth_phy_write;
-       priv->mii.phy_id = INTERNAL_PHY_ADR + priv->id;
-
-       running = xc_running(priv->xc);
-       xc_stop(priv->xc);
-
-       /* if the xc engine is already running, assume the bootloader has
-        * loaded the firmware for us
-        */
-       if (running) {
-               /* get Node Address from hardware */
-               mac4321 = readl(priv->xpec_base +
-                       NETX_XPEC_RAM_START_OFS + ETH_MAC_4321);
-               mac65 = readl(priv->xpec_base +
-                       NETX_XPEC_RAM_START_OFS + ETH_MAC_65);
-
-               ndev->dev_addr[0] = mac4321 & 0xff;
-               ndev->dev_addr[1] = (mac4321 >> 8) & 0xff;
-               ndev->dev_addr[2] = (mac4321 >> 16) & 0xff;
-               ndev->dev_addr[3] = (mac4321 >> 24) & 0xff;
-               ndev->dev_addr[4] = mac65 & 0xff;
-               ndev->dev_addr[5] = (mac65 >> 8) & 0xff;
-       } else {
-               if (xc_request_firmware(priv->xc)) {
-                       printk(CARDNAME ": requesting firmware failed\n");
-                       return -ENODEV;
-               }
-       }
-
-       xc_reset(priv->xc);
-       xc_start(priv->xc);
-
-       if (!is_valid_ether_addr(ndev->dev_addr))
-               inv_mac_addr = true;
-
-       for (i=2; i<=18; i++)
-               pfifo_push(EMPTY_PTR_FIFO(priv->id),
-                       FIFO_PTR_FRAMENO(i) | FIFO_PTR_SEGMENT(priv->id));
-
-       ret = register_netdev(ndev);
-       if (inv_mac_addr)
-               printk("%s: Invalid ethernet MAC address. Please set using ip\n",
-                      ndev->name);
-
-       return ret;
-}
-
-static int netx_eth_drv_probe(struct platform_device *pdev)
-{
-       struct netx_eth_priv *priv;
-       struct net_device *ndev;
-       struct netxeth_platform_data *pdata;
-       int ret;
-
-       ndev = alloc_etherdev(sizeof (struct netx_eth_priv));
-       if (!ndev) {
-               ret = -ENOMEM;
-               goto exit;
-       }
-       SET_NETDEV_DEV(ndev, &pdev->dev);
-
-       platform_set_drvdata(pdev, ndev);
-
-       priv = netdev_priv(ndev);
-
-       pdata = dev_get_platdata(&pdev->dev);
-       priv->xc = request_xc(pdata->xcno, &pdev->dev);
-       if (!priv->xc) {
-               dev_err(&pdev->dev, "unable to request xc engine\n");
-               ret = -ENODEV;
-               goto exit_free_netdev;
-       }
-
-       ndev->irq = priv->xc->irq;
-       priv->id = pdev->id;
-       priv->xpec_base = priv->xc->xpec_base;
-       priv->xmac_base = priv->xc->xmac_base;
-       priv->sram_base = priv->xc->sram_base;
-
-       spin_lock_init(&priv->lock);
-
-       ret = pfifo_request(PFIFO_MASK(priv->id));
-       if (ret) {
-               printk("unable to request PFIFO\n");
-               goto exit_free_xc;
-       }
-
-       ret = netx_eth_enable(ndev);
-       if (ret)
-               goto exit_free_pfifo;
-
-       return 0;
-exit_free_pfifo:
-       pfifo_free(PFIFO_MASK(priv->id));
-exit_free_xc:
-       free_xc(priv->xc);
-exit_free_netdev:
-       free_netdev(ndev);
-exit:
-       return ret;
-}
-
-static int netx_eth_drv_remove(struct platform_device *pdev)
-{
-       struct net_device *ndev = platform_get_drvdata(pdev);
-       struct netx_eth_priv *priv = netdev_priv(ndev);
-
-       unregister_netdev(ndev);
-       xc_stop(priv->xc);
-       free_xc(priv->xc);
-       free_netdev(ndev);
-       pfifo_free(PFIFO_MASK(priv->id));
-
-       return 0;
-}
-
-static int netx_eth_drv_suspend(struct platform_device *pdev, pm_message_t state)
-{
-       dev_err(&pdev->dev, "suspend not implemented\n");
-       return 0;
-}
-
-static int netx_eth_drv_resume(struct platform_device *pdev)
-{
-       dev_err(&pdev->dev, "resume not implemented\n");
-       return 0;
-}
-
-static struct platform_driver netx_eth_driver = {
-       .probe          = netx_eth_drv_probe,
-       .remove         = netx_eth_drv_remove,
-       .suspend        = netx_eth_drv_suspend,
-       .resume         = netx_eth_drv_resume,
-       .driver         = {
-               .name   = CARDNAME,
-       },
-};
-
-static int __init netx_eth_init(void)
-{
-       unsigned int phy_control, val;
-
-       printk("NetX Ethernet driver\n");
-
-       phy_control = PHY_CONTROL_PHY_ADDRESS(INTERNAL_PHY_ADR>>1) |
-                     PHY_CONTROL_PHY1_MODE(PHY_MODE_ALL) |
-                     PHY_CONTROL_PHY1_AUTOMDIX |
-                     PHY_CONTROL_PHY1_EN |
-                     PHY_CONTROL_PHY0_MODE(PHY_MODE_ALL) |
-                     PHY_CONTROL_PHY0_AUTOMDIX |
-                     PHY_CONTROL_PHY0_EN |
-                     PHY_CONTROL_CLK_XLATIN;
-
-       val = readl(NETX_SYSTEM_IOC_ACCESS_KEY);
-       writel(val, NETX_SYSTEM_IOC_ACCESS_KEY);
-
-       writel(phy_control | PHY_CONTROL_RESET, NETX_SYSTEM_PHY_CONTROL);
-       udelay(100);
-
-       val = readl(NETX_SYSTEM_IOC_ACCESS_KEY);
-       writel(val, NETX_SYSTEM_IOC_ACCESS_KEY);
-
-       writel(phy_control, NETX_SYSTEM_PHY_CONTROL);
-
-       return platform_driver_register(&netx_eth_driver);
-}
-
-static void __exit netx_eth_cleanup(void)
-{
-       platform_driver_unregister(&netx_eth_driver);
-}
-
-module_init(netx_eth_init);
-module_exit(netx_eth_cleanup);
-
-MODULE_AUTHOR("Sascha Hauer, Pengutronix");
-MODULE_LICENSE("GPL");
-MODULE_ALIAS("platform:" CARDNAME);
-MODULE_FIRMWARE("xc0.bin");
-MODULE_FIRMWARE("xc1.bin");
-MODULE_FIRMWARE("xc2.bin");
index 0b384f97d2fd8816dd566911986f92b6db59346b..2761f3a3ae50886cd2688cd03a51f5e5915dc298 100644 (file)
@@ -1347,7 +1347,7 @@ static int nixge_probe(struct platform_device *pdev)
        }
 
        priv->phy_mode = of_get_phy_mode(pdev->dev.of_node);
-       if (priv->phy_mode < 0) {
+       if ((int)priv->phy_mode < 0) {
                netdev_err(ndev, "not find \"phy-mode\" property\n");
                err = -EINVAL;
                goto unregister_mdio;
index 418afb84c84bc3966c53b2aed0c6614a0d088ebd..ee83a71c25093877f517a974a76b4f329e987192 100644 (file)
@@ -1,9 +1,9 @@
 # SPDX-License-Identifier: GPL-2.0-only
 config LPC_ENET
-        tristate "NXP ethernet MAC on LPC devices"
-        depends on ARCH_LPC32XX || COMPILE_TEST
-        select PHYLIB
-        help
+       tristate "NXP ethernet MAC on LPC devices"
+       depends on ARCH_LPC32XX || COMPILE_TEST
+       select PHYLIB
+       help
          Say Y or M here if you want to use the NXP ethernet MAC included on
          some NXP LPC devices. You can safely enable this option for LPC32xx
          SoC. Also available as a module.
index 5ea570be837981df68d44a951e56cc1ef35ef822..bd0583e409dfec7d216b2145ac5990ebdff88919 100644 (file)
@@ -26,7 +26,7 @@ config IONIC
          found in
          <file:Documentation/networking/device_drivers/pensando/ionic.rst>.
 
-          To compile this driver as a module, choose M here. The module
-          will be called ionic.
+         To compile this driver as a module, choose M here. The module
+         will be called ionic.
 
 endif # NET_VENDOR_PENSANDO
index 7afc4a365b753f43ad2469ab966177a80f8ae089..bc03cecf80cc9eb4b469ef0fc2c1ec165c7802d2 100644 (file)
@@ -57,7 +57,7 @@ DEFINE_SHOW_ATTRIBUTE(identity);
 void ionic_debugfs_add_ident(struct ionic *ionic)
 {
        debugfs_create_file("identity", 0400, ionic->dentry,
-                           ionic, &identity_fops) ? 0 : -EOPNOTSUPP;
+                           ionic, &identity_fops);
 }
 
 void ionic_debugfs_add_sizes(struct ionic *ionic)
index db7c82742828cf85de1d6fead0927aeff232864e..72107a0627a9957de0ce4c01e6b561f37599b510 100644 (file)
@@ -1704,6 +1704,7 @@ static struct ionic_lif *ionic_lif_alloc(struct ionic *ionic, unsigned int index
                                              GFP_KERNEL);
 
        if (!lif->rss_ind_tbl) {
+               err = -ENOMEM;
                dev_err(dev, "Failed to allocate rss indirection table, aborting\n");
                goto err_out_free_qcqs;
        }
index 0ae28f0d2523069cb035f45454f928b2c398b418..004c0bfec41d796710a832f5e545271c138638ce 100644 (file)
@@ -779,8 +779,7 @@ qede_rx_build_skb(struct qede_dev *edev,
                        return NULL;
 
                skb_reserve(skb, pad);
-               memcpy(skb_put(skb, len),
-                      page_address(bd->data) + offset, len);
+               skb_put_data(skb, page_address(bd->data) + offset, len);
                qede_reuse_page(rxq, bd);
                goto out;
        }
index 1502fe8b0456c17a8e4b04831189d9f9175d206f..55db7fbd43cc3cfd112c31803e1318bda1555e05 100644 (file)
@@ -2007,7 +2007,7 @@ static int netsec_probe(struct platform_device *pdev)
                           NETIF_MSG_LINK | NETIF_MSG_PROBE;
 
        priv->phy_interface = device_get_phy_mode(&pdev->dev);
-       if (priv->phy_interface < 0) {
+       if ((int)priv->phy_interface < 0) {
                dev_err(&pdev->dev, "missing required property 'phy-mode'\n");
                ret = -ENODEV;
                goto free_ndev;
index 10d0c3e478ab1b5b692a996b9390d8214f3f3c09..6e984d5a729fe9d0ea176b0cb87313e94a18a616 100644 (file)
@@ -1566,7 +1566,7 @@ static int ave_probe(struct platform_device *pdev)
 
        np = dev->of_node;
        phy_mode = of_get_phy_mode(np);
-       if (phy_mode < 0) {
+       if ((int)phy_mode < 0) {
                dev_err(dev, "phy-mode not found\n");
                return -EINVAL;
        }
@@ -1662,19 +1662,19 @@ static int ave_probe(struct platform_device *pdev)
                                               "socionext,syscon-phy-mode",
                                               1, 0, &args);
        if (ret) {
-               netdev_err(ndev, "can't get syscon-phy-mode property\n");
+               dev_err(dev, "can't get syscon-phy-mode property\n");
                goto out_free_netdev;
        }
        priv->regmap = syscon_node_to_regmap(args.np);
        of_node_put(args.np);
        if (IS_ERR(priv->regmap)) {
-               netdev_err(ndev, "can't map syscon-phy-mode\n");
+               dev_err(dev, "can't map syscon-phy-mode\n");
                ret = PTR_ERR(priv->regmap);
                goto out_free_netdev;
        }
        ret = priv->data->get_pinmode(priv, phy_mode, args.args[0]);
        if (ret) {
-               netdev_err(ndev, "invalid phy-mode setting\n");
+               dev_err(dev, "invalid phy-mode setting\n");
                goto out_free_netdev;
        }
 
index 2c6d7c69c8f741c22c9c55306c04bc920355e0d8..0d21082ceb93d1d0b0e2fb1e7edbb6653ab76385 100644 (file)
@@ -191,7 +191,7 @@ static int ipq806x_gmac_of_parse(struct ipq806x_gmac *gmac)
        struct device *dev = &gmac->pdev->dev;
 
        gmac->phy_mode = of_get_phy_mode(dev->of_node);
-       if (gmac->phy_mode < 0) {
+       if ((int)gmac->phy_mode < 0) {
                dev_err(dev, "missing phy mode property\n");
                return -EINVAL;
        }
index 9cda29e4b89d53af56eaa39565a56bec2d9d5aaa..306da8f6b7d541d4d2d3656f77a911f3237a2eb4 100644 (file)
@@ -339,7 +339,7 @@ static int meson8b_dwmac_probe(struct platform_device *pdev)
 
        dwmac->dev = &pdev->dev;
        dwmac->phy_mode = of_get_phy_mode(pdev->dev.of_node);
-       if (dwmac->phy_mode < 0) {
+       if ((int)dwmac->phy_mode < 0) {
                dev_err(&pdev->dev, "missing phy-mode property\n");
                ret = -EINVAL;
                goto err_remove_config_dt;
index d5173dd02a711ed7bfb9a74a91ebc0963716cf0e..2b277b2c586bbd1486e701efa72dfa91a2f2d4d3 100644 (file)
@@ -523,19 +523,18 @@ static int dwxgmac2_rss_configure(struct mac_device_info *hw,
                                  struct stmmac_rss *cfg, u32 num_rxq)
 {
        void __iomem *ioaddr = hw->pcsr;
-       u32 *key = (u32 *)cfg->key;
        int i, ret;
        u32 value;
 
        value = readl(ioaddr + XGMAC_RSS_CTRL);
-       if (!cfg->enable) {
+       if (!cfg || !cfg->enable) {
                value &= ~XGMAC_RSSE;
                writel(value, ioaddr + XGMAC_RSS_CTRL);
                return 0;
        }
 
        for (i = 0; i < (sizeof(cfg->key) / sizeof(u32)); i++) {
-               ret = dwxgmac2_rss_write_reg(ioaddr, true, i, *key++);
+               ret = dwxgmac2_rss_write_reg(ioaddr, true, i, cfg->key[i]);
                if (ret)
                        return ret;
        }
index a6cb2aa60e6485d4a0126d3330ac0c348a9c2baa..d3232738fb257d960713738f23273886cdeeb645 100644 (file)
@@ -1557,13 +1557,15 @@ static int alloc_dma_rx_desc_resources(struct stmmac_priv *priv)
        for (queue = 0; queue < rx_count; queue++) {
                struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
                struct page_pool_params pp_params = { 0 };
+               unsigned int num_pages;
 
                rx_q->queue_index = queue;
                rx_q->priv_data = priv;
 
                pp_params.flags = PP_FLAG_DMA_MAP;
                pp_params.pool_size = DMA_RX_SIZE;
-               pp_params.order = DIV_ROUND_UP(priv->dma_buf_sz, PAGE_SIZE);
+               num_pages = DIV_ROUND_UP(priv->dma_buf_sz, PAGE_SIZE);
+               pp_params.order = ilog2(num_pages);
                pp_params.nid = dev_to_node(priv->device);
                pp_params.dev = priv->device;
                pp_params.dma_dir = DMA_FROM_DEVICE;
index c56e89e1ae563945f2e901158352772709fe48e6..5f66f6161629a3d0e4bb89e42f011f4db4f39931 100644 (file)
@@ -670,7 +670,7 @@ static int stmmac_test_flowctrl(struct stmmac_priv *priv)
        unsigned int pkt_count;
        int i, ret = 0;
 
-       if (!phydev || !phydev->pause)
+       if (!phydev || (!phydev->pause && !phydev->asym_pause))
                return -EOPNOTSUPP;
 
        tpriv = kzalloc(sizeof(*tpriv), GFP_KERNEL);
@@ -1233,12 +1233,9 @@ static int __stmmac_test_l3filt(struct stmmac_priv *priv, u32 dst, u32 src,
                return -EOPNOTSUPP;
        if (!priv->dma_cap.l3l4fnum)
                return -EOPNOTSUPP;
-       if (priv->rss.enable) {
-               struct stmmac_rss rss = { .enable = false, };
-
-               stmmac_rss_configure(priv, priv->hw, &rss,
+       if (priv->rss.enable)
+               stmmac_rss_configure(priv, priv->hw, NULL,
                                     priv->plat->rx_queues_to_use);
-       }
 
        dissector = kzalloc(sizeof(*dissector), GFP_KERNEL);
        if (!dissector) {
@@ -1357,12 +1354,9 @@ static int __stmmac_test_l4filt(struct stmmac_priv *priv, u32 dst, u32 src,
                return -EOPNOTSUPP;
        if (!priv->dma_cap.l3l4fnum)
                return -EOPNOTSUPP;
-       if (priv->rss.enable) {
-               struct stmmac_rss rss = { .enable = false, };
-
-               stmmac_rss_configure(priv, priv->hw, &rss,
+       if (priv->rss.enable)
+               stmmac_rss_configure(priv, priv->hw, NULL,
                                     priv->plat->rx_queues_to_use);
-       }
 
        dissector = kzalloc(sizeof(*dissector), GFP_KERNEL);
        if (!dissector) {
index 4fc627fb4d1177304bdf4fed1728dfb1082e8b71..676006f32f91368b63a93af927d030ca856daaff 100644 (file)
@@ -1762,7 +1762,7 @@ static int axienet_probe(struct platform_device *pdev)
                }
        } else {
                lp->phy_mode = of_get_phy_mode(pdev->dev.of_node);
-               if (lp->phy_mode < 0) {
+               if ((int)lp->phy_mode < 0) {
                        ret = -EINVAL;
                        goto free_netdev;
                }
index 8f46aa1ddec01180fab3587524b2719fc66bb216..cb7637364b40d85c9595da833873be1e99db3f7c 100644 (file)
@@ -1235,6 +1235,7 @@ deliver:
                macsec_rxsa_put(rx_sa);
        macsec_rxsc_put(rx_sc);
 
+       skb_orphan(skb);
        ret = gro_cells_receive(&macsec->gro_cells, skb);
        if (ret == NET_RX_SUCCESS)
                count_rx(dev, skb->len);
index 03be30cde552c318a265a989e9b24012e45872a2..fe602648b99f50fddd70fb778165298fddb09528 100644 (file)
@@ -460,9 +460,9 @@ config RENESAS_PHY
          Supports the Renesas PHYs uPD60620 and uPD60620A.
 
 config ROCKCHIP_PHY
-        tristate "Driver for Rockchip Ethernet PHYs"
-        ---help---
-          Currently supports the integrated Ethernet PHY.
+       tristate "Driver for Rockchip Ethernet PHYs"
+       ---help---
+         Currently supports the integrated Ethernet PHY.
 
 config SMSC_PHY
        tristate "SMSC PHYs"
index 3c8186f269f9e088cee19bd509ba03e84ed58e1c..2fea5541c35a8a02910a09bbbaa0de7b9fcbc6c1 100644 (file)
@@ -763,6 +763,8 @@ static int ksz9031_get_features(struct phy_device *phydev)
         * Whenever the device's Asymmetric Pause capability is set to 1,
         * link-up may fail after a link-up to link-down transition.
         *
+        * The Errata Sheet is for ksz9031, but ksz9021 has the same issue
+        *
         * Workaround:
         * Do not enable the Asymmetric Pause capability bit.
         */
@@ -1076,6 +1078,7 @@ static struct phy_driver ksphy_driver[] = {
        /* PHY_GBIT_FEATURES */
        .driver_data    = &ksz9021_type,
        .probe          = kszphy_probe,
+       .get_features   = ksz9031_get_features,
        .config_init    = ksz9021_config_init,
        .ack_interrupt  = kszphy_ack_interrupt,
        .config_intr    = kszphy_config_intr,
index a221dd552c3c649cc2932cebb7490e71629cb62b..a5bf0874c7d81297d82c7477c30e06d4c7c63b01 100644 (file)
@@ -105,14 +105,17 @@ static void ns_giga_speed_fallback(struct phy_device *phydev, int mode)
 
 static void ns_10_base_t_hdx_loopack(struct phy_device *phydev, int disable)
 {
+       u16 lb_dis = BIT(1);
+
        if (disable)
-               ns_exp_write(phydev, 0x1c0, ns_exp_read(phydev, 0x1c0) | 1);
+               ns_exp_write(phydev, 0x1c0,
+                            ns_exp_read(phydev, 0x1c0) | lb_dis);
        else
                ns_exp_write(phydev, 0x1c0,
-                            ns_exp_read(phydev, 0x1c0) & 0xfffe);
+                            ns_exp_read(phydev, 0x1c0) & ~lb_dis);
 
        pr_debug("10BASE-T HDX loopback %s\n",
-                (ns_exp_read(phydev, 0x1c0) & 0x0001) ? "off" : "on");
+                (ns_exp_read(phydev, 0x1c0) & lb_dis) ? "off" : "on");
 }
 
 static int ns_config_init(struct phy_device *phydev)
index a30e41a5608536a91e76af37e6aa9322b5690551..9a1b006904a7dfa3ec5aa98c7cfd6b9a6d9c5a34 100644 (file)
@@ -1415,6 +1415,8 @@ static void __ppp_xmit_process(struct ppp *ppp, struct sk_buff *skb)
                        netif_wake_queue(ppp->dev);
                else
                        netif_stop_queue(ppp->dev);
+       } else {
+               kfree_skb(skb);
        }
        ppp_xmit_unlock(ppp);
 }
index dd614c2cd994e89c66eb37625b2afb79adec2258..3ae70c7e6860ca4653ed698fca9bdd2ad8f2c037 100644 (file)
@@ -1200,7 +1200,7 @@ err_kfree:
        kfree_skb(skb);
 err:
        rcu_read_lock();
-               tap = rcu_dereference(q->tap);
+       tap = rcu_dereference(q->tap);
        if (tap && tap->count_tx_dropped)
                tap->count_tx_dropped(tap);
        rcu_read_unlock();
index 50c05d0f44cb376276c6ed17f335aeae0180bc80..00cab3f43a4ca3f3e91f1471f5ab189e78d601dc 100644 (file)
@@ -681,8 +681,12 @@ cdc_ncm_find_endpoints(struct usbnet *dev, struct usb_interface *intf)
        u8 ep;
 
        for (ep = 0; ep < intf->cur_altsetting->desc.bNumEndpoints; ep++) {
-
                e = intf->cur_altsetting->endpoint + ep;
+
+               /* ignore endpoints which cannot transfer data */
+               if (!usb_endpoint_maxp(&e->desc))
+                       continue;
+
                switch (e->desc.bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) {
                case USB_ENDPOINT_XFER_INT:
                        if (usb_endpoint_dir_in(&e->desc)) {
index 58952a79b05fb3122aace9452833759774d14835..dde05e2fdc3e6325431f079d04db8fc898be4a67 100644 (file)
@@ -100,6 +100,11 @@ int usbnet_get_endpoints(struct usbnet *dev, struct usb_interface *intf)
                        int                             intr = 0;
 
                        e = alt->endpoint + ep;
+
+                       /* ignore endpoints which cannot transfer data */
+                       if (!usb_endpoint_maxp(&e->desc))
+                               continue;
+
                        switch (e->desc.bmAttributes) {
                        case USB_ENDPOINT_XFER_INT:
                                if (!usb_endpoint_dir_in(&e->desc))
@@ -339,6 +344,8 @@ void usbnet_update_max_qlen(struct usbnet *dev)
 {
        enum usb_device_speed speed = dev->udev->speed;
 
+       if (!dev->rx_urb_size || !dev->hard_mtu)
+               goto insanity;
        switch (speed) {
        case USB_SPEED_HIGH:
                dev->rx_qlen = MAX_QUEUE_MEMORY / dev->rx_urb_size;
@@ -355,6 +362,7 @@ void usbnet_update_max_qlen(struct usbnet *dev)
                dev->tx_qlen = 5 * MAX_QUEUE_MEMORY / dev->hard_mtu;
                break;
        default:
+insanity:
                dev->rx_qlen = dev->tx_qlen = 4;
        }
 }
index 6e84328bdd402f0b66b71a92e7f894765e20dc1b..a4b38a980c3cbd0ecdc148fd2922b9bda33e6478 100644 (file)
@@ -1154,7 +1154,8 @@ static int vrf_fib_rule(const struct net_device *dev, __u8 family, bool add_it)
        struct sk_buff *skb;
        int err;
 
-       if (family == AF_INET6 && !ipv6_mod_enabled())
+       if ((family == AF_INET6 || family == RTNL_FAMILY_IP6MR) &&
+           !ipv6_mod_enabled())
                return 0;
 
        skb = nlmsg_new(vrf_fib_rule_nl_size(), GFP_KERNEL);
index d98d6ac90f3d9cd107ba937aab44405a7c2a9fac..56616d988c965fd9acf1fe2d32889378d7fd584f 100644 (file)
@@ -34,7 +34,7 @@ config ATH_TRACEPOINTS
        depends on ATH_DEBUG
        depends on EVENT_TRACING
        ---help---
-         This option enables tracepoints for atheros wireless drivers.
+        This option enables tracepoints for atheros wireless drivers.
         Currently, ath9k makes use of this facility.
 
 config ATH_REG_DYNAMIC_USER_REG_HINTS
index 41d3c9a48b08a5b983f0005772a88ad6f41bc465..65b39c7d035d713e389b7a4f2440a57a6f650d7d 100644 (file)
@@ -5,5 +5,5 @@ config AR5523
        select ATH_COMMON
        select FW_LOADER
        ---help---
-         This module add support for AR5523 based USB dongles such as D-Link
-         DWL-G132, Netgear WPN111 and many more.
+        This module add support for AR5523 based USB dongles such as D-Link
+        DWL-G132, Netgear WPN111 and many more.
index dcf8ca0dcc52d33477a27bea263894e26f87f6e5..62c22fdcca38c985375366abe37ec9a13511f165 100644 (file)
@@ -2,7 +2,7 @@
 config ATH6KL
        tristate "Atheros mobile chipsets support"
        depends on CFG80211
-        ---help---
+       ---help---
          This module adds core support for wireless adapters based on
          Atheros AR6003 and AR6004 chipsets. You still need separate
          bus drivers for USB and SDIO to be able to use real devices.
index 2d1247f61297418542bb8d6248b5829bfdb807ca..c99f4228446552378cfa270dd6e5e12fd38dccbc 100644 (file)
@@ -148,7 +148,7 @@ config ATH9K_CHANNEL_CONTEXT
        depends on ATH9K
        default n
        ---help---
-         This option enables channel context support in ath9k, which is needed
+        This option enables channel context support in ath9k, which is needed
         for multi-channel concurrency. Enable this if P2P PowerSave support
         is required.
 
index 757eb765e17c6c89d291870d0638342922fa873d..b1bce7aad3999f44111e98e6cb69a616b6e83110 100644 (file)
@@ -41,9 +41,9 @@ config CARL9170_WPC
        default y
 
 config CARL9170_HWRNG
-        bool "Random number generator"
-        depends on CARL9170 && (HW_RANDOM = y || HW_RANDOM = CARL9170)
-        default n
+       bool "Random number generator"
+       depends on CARL9170 && (HW_RANDOM = y || HW_RANDOM = CARL9170)
+       default n
        help
          Provides a hardware random number generator to the kernel.
 
index cb13652491ad5546e766f6887798ff6cedefc9fc..598c1fba9dac41150f206dc1a3a5ad5d7fe27644 100644 (file)
@@ -1012,11 +1012,11 @@ void wil_netif_rx_any(struct sk_buff *skb, struct net_device *ndev)
        skb_orphan(skb);
 
        if (security && (wil->txrx_ops.rx_crypto_check(wil, skb) != 0)) {
+               wil_dbg_txrx(wil, "Rx drop %d bytes\n", skb->len);
                dev_kfree_skb(skb);
                ndev->stats.rx_dropped++;
                stats->rx_replay++;
                stats->rx_dropped++;
-               wil_dbg_txrx(wil, "Rx drop %d bytes\n", skb->len);
                return;
        }
 
index 809bdf331848d26cbfae239fa4646ea6ac750bb0..4c0556b3a5ba8f3329292061dafac38bf1bd506b 100644 (file)
@@ -20,22 +20,22 @@ config ATMEL
       select FW_LOADER
       select CRC32
        ---help---
-        A driver 802.11b wireless cards based on the Atmel fast-vnet
-        chips. This driver supports standard Linux wireless extensions.
+       A driver 802.11b wireless cards based on the Atmel fast-vnet
+       chips. This driver supports standard Linux wireless extensions.
 
-        Many  cards based on this chipset do not have flash memory
-        and need their firmware loaded at start-up. If yours is
-        one of these, you will need to provide a firmware image
-        to be loaded into the card by the driver. The Atmel
-        firmware package can be downloaded from
-        <http://www.thekelleys.org.uk/atmel>
+       Many  cards based on this chipset do not have flash memory
+       and need their firmware loaded at start-up. If yours is
+       one of these, you will need to provide a firmware image
+       to be loaded into the card by the driver. The Atmel
+       firmware package can be downloaded from
+       <http://www.thekelleys.org.uk/atmel>
 
 config PCI_ATMEL
       tristate "Atmel at76c506 PCI cards"
       depends on ATMEL && PCI
        ---help---
-        Enable support for PCI and mini-PCI cards containing the
-        Atmel at76c506 chip.
+       Enable support for PCI and mini-PCI cards containing the
+       Atmel at76c506 chip.
 
 config PCMCIA_ATMEL
        tristate "Atmel at76c502/at76c504 PCMCIA cards"
@@ -48,11 +48,11 @@ config PCMCIA_ATMEL
          Atmel at76c502 and at76c504 chips.
 
 config AT76C50X_USB
-        tristate "Atmel at76c503/at76c505/at76c505a USB cards"
-        depends on MAC80211 && USB
-        select FW_LOADER
-        ---help---
-          Enable support for USB Wireless devices using Atmel at76c503,
-          at76c505 or at76c505a chips.
+       tristate "Atmel at76c503/at76c505/at76c505a USB cards"
+       depends on MAC80211 && USB
+       select FW_LOADER
+       ---help---
+         Enable support for USB Wireless devices using Atmel at76c503,
+         at76c505 or at76c505a chips.
 
 endif # WLAN_VENDOR_ATMEL
index 5d2878a73732568496a4535039c8d53ab5412b93..ab17903ba9f88d521a50242d4ffac10d09a22206 100644 (file)
@@ -13,37 +13,37 @@ config IPW2100
        select LIB80211
        select LIBIPW
        ---help---
-          A driver for the Intel PRO/Wireless 2100 Network 
+         A driver for the Intel PRO/Wireless 2100 Network
          Connection 802.11b wireless network adapter.
 
-          See <file:Documentation/networking/device_drivers/intel/ipw2100.txt>
+         See <file:Documentation/networking/device_drivers/intel/ipw2100.txt>
          for information on the capabilities currently enabled in this driver
          and for tips for debugging issues and problems.
 
          In order to use this driver, you will need a firmware image for it.
-          You can obtain the firmware from
-         <http://ipw2100.sf.net/>.  Once you have the firmware image, you 
+         You can obtain the firmware from
+         <http://ipw2100.sf.net/>.  Once you have the firmware image, you
          will need to place it in /lib/firmware.
 
-          You will also very likely need the Wireless Tools in order to
-          configure your card:
+         You will also very likely need the Wireless Tools in order to
+         configure your card:
 
-          <http://www.hpl.hp.com/personal/Jean_Tourrilhes/Linux/Tools.html>.
+         <http://www.hpl.hp.com/personal/Jean_Tourrilhes/Linux/Tools.html>.
+
+         It is recommended that you compile this driver as a module (M)
+         rather than built-in (Y). This driver requires firmware at device
+         initialization time, and when built-in this typically happens
+         before the filesystem is accessible (hence firmware will be
+         unavailable and initialization will fail). If you do choose to build
+         this driver into your kernel image, you can avoid this problem by
+         including the firmware and a firmware loader in an initramfs.
 
-          It is recommended that you compile this driver as a module (M)
-          rather than built-in (Y). This driver requires firmware at device
-          initialization time, and when built-in this typically happens
-          before the filesystem is accessible (hence firmware will be
-          unavailable and initialization will fail). If you do choose to build
-          this driver into your kernel image, you can avoid this problem by
-          including the firmware and a firmware loader in an initramfs.
 config IPW2100_MONITOR
-        bool "Enable promiscuous mode"
-        depends on IPW2100
-        ---help---
+       bool "Enable promiscuous mode"
+       depends on IPW2100
+       ---help---
          Enables promiscuous/monitor mode support for the ipw2100 driver.
-         With this feature compiled into the driver, you can switch to 
+         With this feature compiled into the driver, you can switch to
          promiscuous mode via the Wireless Tool's Monitor mode.  While in this
          mode, no packets can be sent.
 
@@ -51,17 +51,17 @@ config IPW2100_DEBUG
        bool "Enable full debugging output in IPW2100 module."
        depends on IPW2100
        ---help---
-         This option will enable debug tracing output for the IPW2100.  
+         This option will enable debug tracing output for the IPW2100.
 
-         This will result in the kernel module being ~60k larger.  You can 
-         control which debug output is sent to the kernel log by setting the 
-         value in 
+         This will result in the kernel module being ~60k larger.  You can
+         control which debug output is sent to the kernel log by setting the
+         value in
 
          /sys/bus/pci/drivers/ipw2100/debug_level
 
          This entry will only exist if this option is enabled.
 
-         If you are not trying to debug or develop the IPW2100 driver, you 
+         If you are not trying to debug or develop the IPW2100 driver, you
          most likely want to say N here.
 
 config IPW2200
@@ -75,37 +75,37 @@ config IPW2200
        select LIB80211
        select LIBIPW
        ---help---
-          A driver for the Intel PRO/Wireless 2200BG and 2915ABG Network
-         Connection adapters. 
+         A driver for the Intel PRO/Wireless 2200BG and 2915ABG Network
+         Connection adapters.
 
-          See <file:Documentation/networking/device_drivers/intel/ipw2200.txt>
+         See <file:Documentation/networking/device_drivers/intel/ipw2200.txt>
          for information on the capabilities currently enabled in this
          driver and for tips for debugging issues and problems.
 
          In order to use this driver, you will need a firmware image for it.
-          You can obtain the firmware from
-         <http://ipw2200.sf.net/>.  See the above referenced README.ipw2200 
+         You can obtain the firmware from
+         <http://ipw2200.sf.net/>.  See the above referenced README.ipw2200
          for information on where to install the firmware images.
 
-          You will also very likely need the Wireless Tools in order to
-          configure your card:
+         You will also very likely need the Wireless Tools in order to
+         configure your card:
 
-          <http://www.hpl.hp.com/personal/Jean_Tourrilhes/Linux/Tools.html>.
+         <http://www.hpl.hp.com/personal/Jean_Tourrilhes/Linux/Tools.html>.
 
-          It is recommended that you compile this driver as a module (M)
-          rather than built-in (Y). This driver requires firmware at device
-          initialization time, and when built-in this typically happens
-          before the filesystem is accessible (hence firmware will be
-          unavailable and initialization will fail). If you do choose to build
-          this driver into your kernel image, you can avoid this problem by
-          including the firmware and a firmware loader in an initramfs.
+         It is recommended that you compile this driver as a module (M)
+         rather than built-in (Y). This driver requires firmware at device
+         initialization time, and when built-in this typically happens
+         before the filesystem is accessible (hence firmware will be
+         unavailable and initialization will fail). If you do choose to build
+         this driver into your kernel image, you can avoid this problem by
+         including the firmware and a firmware loader in an initramfs.
 
 config IPW2200_MONITOR
-        bool "Enable promiscuous mode"
-        depends on IPW2200
-        ---help---
+       bool "Enable promiscuous mode"
+       depends on IPW2200
+       ---help---
          Enables promiscuous/monitor mode support for the ipw2200 driver.
-         With this feature compiled into the driver, you can switch to 
+         With this feature compiled into the driver, you can switch to
          promiscuous mode via the Wireless Tool's Monitor mode.  While in this
          mode, no packets can be sent.
 
@@ -118,28 +118,28 @@ config IPW2200_PROMISCUOUS
        depends on IPW2200_MONITOR
        select IPW2200_RADIOTAP
        ---help---
-          Enables the creation of a second interface prefixed 'rtap'. 
-          This second interface will provide every received in radiotap
+         Enables the creation of a second interface prefixed 'rtap'.
+         This second interface will provide every received in radiotap
          format.
 
-          This is useful for performing wireless network analysis while
-          maintaining an active association.
+         This is useful for performing wireless network analysis while
+         maintaining an active association.
+
+         Example usage:
 
-          Example usage:
+           % modprobe ipw2200 rtap_iface=1
+           % ifconfig rtap0 up
+           % tethereal -i rtap0
 
-            % modprobe ipw2200 rtap_iface=1
-            % ifconfig rtap0 up
-            % tethereal -i rtap0
+         If you do not specify 'rtap_iface=1' as a module parameter then
+         the rtap interface will not be created and you will need to turn
+         it on via sysfs:
 
-          If you do not specify 'rtap_iface=1' as a module parameter then 
-          the rtap interface will not be created and you will need to turn 
-          it on via sysfs:
-       
-            % echo 1 > /sys/bus/pci/drivers/ipw2200/*/rtap_iface
+           % echo 1 > /sys/bus/pci/drivers/ipw2200/*/rtap_iface
 
 config IPW2200_QOS
-        bool "Enable QoS support"
-        depends on IPW2200
+       bool "Enable QoS support"
+       depends on IPW2200
 
 config IPW2200_DEBUG
        bool "Enable full debugging output in IPW2200 module."
index e329fd7b09c0fcbc2e37cedade92c61398e24260..100f55858b133fdaff18d7f23cde330c208f92f6 100644 (file)
@@ -91,9 +91,9 @@ config IWLEGACY_DEBUG
          any problems you may encounter.
 
 config IWLEGACY_DEBUGFS
-        bool "iwlegacy (iwl 3945/4965) debugfs support"
-        depends on IWLEGACY && MAC80211_DEBUGFS
-        ---help---
+       bool "iwlegacy (iwl 3945/4965) debugfs support"
+       depends on IWLEGACY && MAC80211_DEBUGFS
+       ---help---
          Enable creation of debugfs files for the iwlegacy drivers. This
          is a low-impact option that allows getting insight into the
          driver's state at runtime.
index 7dbc0d38bb3bb5db765fc7019c27cb7ea5e84e76..091d621ad25fd58fe30dc63af688d9c564bfe7d2 100644 (file)
@@ -119,9 +119,9 @@ config IWLWIFI_DEBUG
          any problems you may encounter.
 
 config IWLWIFI_DEBUGFS
-        bool "iwlwifi debugfs support"
-        depends on MAC80211_DEBUGFS
-        ---help---
+       bool "iwlwifi debugfs support"
+       depends on MAC80211_DEBUGFS
+       ---help---
          Enable creation of debugfs files for the iwlwifi drivers. This
          is a low-impact option that allows getting insight into the
          driver's state at runtime.
index 014eca6596e2180d1ee6b38e010e33e157360ebe..32a5e4e5461ff0a37e5282dff8285db42254ea64 100644 (file)
@@ -889,11 +889,13 @@ static bool iwl_mvm_sar_geo_support(struct iwl_mvm *mvm)
         * firmware versions.  Unfortunately, we don't have a TLV API
         * flag to rely on, so rely on the major version which is in
         * the first byte of ucode_ver.  This was implemented
-        * initially on version 38 and then backported to 36, 29 and
-        * 17.
+        * initially on version 38 and then backported to29 and 17.
+        * The intention was to have it in 36 as well, but not all
+        * 8000 family got this feature enabled.  The 8000 family is
+        * the only one using version 36, so skip this version
+        * entirely.
         */
        return IWL_UCODE_SERIAL(mvm->fw->ucode_ver) >= 38 ||
-              IWL_UCODE_SERIAL(mvm->fw->ucode_ver) == 36 ||
               IWL_UCODE_SERIAL(mvm->fw->ucode_ver) == 29 ||
               IWL_UCODE_SERIAL(mvm->fw->ucode_ver) == 17;
 }
index 32a708301cfc84ccef53a2551d751b15b734532c..f0c539b37ea7082c798ff0edb908697e6d1d46a8 100644 (file)
@@ -555,16 +555,19 @@ static int compare_temps(const void *a, const void *b)
        return ((s16)le16_to_cpu(*(__le16 *)a) -
                (s16)le16_to_cpu(*(__le16 *)b));
 }
+#endif
 
 int iwl_mvm_send_temp_report_ths_cmd(struct iwl_mvm *mvm)
 {
        struct temp_report_ths_cmd cmd = {0};
-       int ret, i, j, idx = 0;
+       int ret;
+#ifdef CONFIG_THERMAL
+       int i, j, idx = 0;
 
        lockdep_assert_held(&mvm->mutex);
 
        if (!mvm->tz_device.tzone)
-               return -EINVAL;
+               goto send;
 
        /* The driver holds array of temperature trips that are unsorted
         * and uncompressed, the FW should get it compressed and sorted
@@ -597,6 +600,7 @@ int iwl_mvm_send_temp_report_ths_cmd(struct iwl_mvm *mvm)
        }
 
 send:
+#endif
        ret = iwl_mvm_send_cmd_pdu(mvm, WIDE_ID(PHY_OPS_GROUP,
                                                TEMP_REPORTING_THRESHOLDS_CMD),
                                   0, sizeof(cmd), &cmd);
@@ -607,6 +611,7 @@ send:
        return ret;
 }
 
+#ifdef CONFIG_THERMAL
 static int iwl_mvm_tzone_get_temp(struct thermal_zone_device *device,
                                  int *temperature)
 {
index 275d5eaed3b7f7aefad2bbb9bd5fbc693fb9c5ed..842cd81704db6f7888743ee10c145f244b806e38 100644 (file)
@@ -333,7 +333,6 @@ static int mt7615_driver_own(struct mt7615_dev *dev)
 
 static int mt7615_load_patch(struct mt7615_dev *dev)
 {
-       const char *firmware = MT7615_ROM_PATCH;
        const struct mt7615_patch_hdr *hdr;
        const struct firmware *fw = NULL;
        int len, ret, sem;
@@ -349,7 +348,7 @@ static int mt7615_load_patch(struct mt7615_dev *dev)
                return -EAGAIN;
        }
 
-       ret = request_firmware(&fw, firmware, dev->mt76.dev);
+       ret = request_firmware(&fw, MT7615_ROM_PATCH, dev->mt76.dev);
        if (ret)
                goto out;
 
@@ -447,13 +446,11 @@ mt7615_mcu_send_ram_firmware(struct mt7615_dev *dev,
 
 static int mt7615_load_ram(struct mt7615_dev *dev)
 {
-       const struct firmware *fw;
        const struct mt7615_fw_trailer *hdr;
-       const char *n9_firmware = MT7615_FIRMWARE_N9;
-       const char *cr4_firmware = MT7615_FIRMWARE_CR4;
+       const struct firmware *fw;
        int ret;
 
-       ret = request_firmware(&fw, n9_firmware, dev->mt76.dev);
+       ret = request_firmware(&fw, MT7615_FIRMWARE_N9, dev->mt76.dev);
        if (ret)
                return ret;
 
@@ -482,7 +479,7 @@ static int mt7615_load_ram(struct mt7615_dev *dev)
 
        release_firmware(fw);
 
-       ret = request_firmware(&fw, cr4_firmware, dev->mt76.dev);
+       ret = request_firmware(&fw, MT7615_FIRMWARE_CR4, dev->mt76.dev);
        if (ret)
                return ret;
 
index cef3fd43cb00e716a2b4e44d8afa8a38ec12f410..7963e302d705d3ded5ac8f62292b723c56ad2f93 100644 (file)
@@ -26,9 +26,9 @@
 #define MT7615_RX_RING_SIZE            1024
 #define MT7615_RX_MCU_RING_SIZE                512
 
-#define MT7615_FIRMWARE_CR4            "mt7615_cr4.bin"
-#define MT7615_FIRMWARE_N9             "mt7615_n9.bin"
-#define MT7615_ROM_PATCH               "mt7615_rom_patch.bin"
+#define MT7615_FIRMWARE_CR4            "mediatek/mt7615_cr4.bin"
+#define MT7615_FIRMWARE_N9             "mediatek/mt7615_n9.bin"
+#define MT7615_ROM_PATCH               "mediatek/mt7615_rom_patch.bin"
 
 #define MT7615_EEPROM_SIZE             1024
 #define MT7615_TOKEN_SIZE              4096
index 858f8aa3e616bd29fcb4ab617018e36a080e0c72..f8a9244ce012bc854bdc361ff96c024d9519d54b 100644 (file)
@@ -98,17 +98,17 @@ config RT2800PCI_RT53XX
        bool "rt2800pci - Include support for rt53xx devices (EXPERIMENTAL)"
        default y
        ---help---
-         This adds support for rt53xx wireless chipset family to the
-         rt2800pci driver.
-         Supported chips: RT5390
+        This adds support for rt53xx wireless chipset family to the
+        rt2800pci driver.
+        Supported chips: RT5390
 
 config RT2800PCI_RT3290
        bool "rt2800pci - Include support for rt3290 devices (EXPERIMENTAL)"
        default y
        ---help---
-         This adds support for rt3290 wireless chipset family to the
-         rt2800pci driver.
-         Supported chips: RT3290
+        This adds support for rt3290 wireless chipset family to the
+        rt2800pci driver.
+        Supported chips: RT3290
 endif
 
 config RT2500USB
@@ -176,16 +176,16 @@ config RT2800USB_RT3573
 config RT2800USB_RT53XX
        bool "rt2800usb - Include support for rt53xx devices (EXPERIMENTAL)"
        ---help---
-         This adds support for rt53xx wireless chipset family to the
-         rt2800usb driver.
-         Supported chips: RT5370
+        This adds support for rt53xx wireless chipset family to the
+        rt2800usb driver.
+        Supported chips: RT5370
 
 config RT2800USB_RT55XX
        bool "rt2800usb - Include support for rt55xx devices (EXPERIMENTAL)"
        ---help---
-         This adds support for rt55xx wireless chipset family to the
-         rt2800usb driver.
-         Supported chips: RT5572
+        This adds support for rt55xx wireless chipset family to the
+        rt2800usb driver.
+        Supported chips: RT5572
 
 config RT2800USB_UNKNOWN
        bool "rt2800usb - Include support for unknown (USB) devices"
index fc14b37d927d75b21f1bb6a3733c34eaffa75b9a..b61b073031e57089e1cdbc87f2d5328a149ab874 100644 (file)
@@ -707,9 +707,6 @@ int rtw_download_firmware(struct rtw_dev *rtwdev, struct rtw_fw_state *fw)
        rtwdev->h2c.last_box_num = 0;
        rtwdev->h2c.seq = 0;
 
-       rtw_fw_send_general_info(rtwdev);
-       rtw_fw_send_phydm_info(rtwdev);
-
        rtw_flag_set(rtwdev, RTW_FLAG_FW_RUNNING);
 
        return 0;
index fc8f6213fc8f25e80bae6326b609c09405bf9992..6dd457741b15dea619e6b364b78837c5e9649c35 100644 (file)
@@ -704,6 +704,10 @@ static int rtw_power_on(struct rtw_dev *rtwdev)
                goto err_off;
        }
 
+       /* send H2C after HCI has started */
+       rtw_fw_send_general_info(rtwdev);
+       rtw_fw_send_phydm_info(rtwdev);
+
        wifi_only = !rtwdev->efuse.btcoex;
        rtw_coex_power_on_setting(rtwdev);
        rtw_coex_init_hw_config(rtwdev, wifi_only);
index 3fdb52a5789a6cb3bcbb378ca03be26a7f7b1a4a..d90928be663b910c3d5cb88f54a7e5f2fe4d6d17 100644 (file)
@@ -90,16 +90,13 @@ static inline void *rtw_pci_get_tx_desc(struct rtw_pci_tx_ring *tx_ring, u8 idx)
        return tx_ring->r.head + offset;
 }
 
-static void rtw_pci_free_tx_ring(struct rtw_dev *rtwdev,
-                                struct rtw_pci_tx_ring *tx_ring)
+static void rtw_pci_free_tx_ring_skbs(struct rtw_dev *rtwdev,
+                                     struct rtw_pci_tx_ring *tx_ring)
 {
        struct pci_dev *pdev = to_pci_dev(rtwdev->dev);
        struct rtw_pci_tx_data *tx_data;
        struct sk_buff *skb, *tmp;
        dma_addr_t dma;
-       u8 *head = tx_ring->r.head;
-       u32 len = tx_ring->r.len;
-       int ring_sz = len * tx_ring->r.desc_size;
 
        /* free every skb remained in tx list */
        skb_queue_walk_safe(&tx_ring->queue, skb, tmp) {
@@ -110,21 +107,30 @@ static void rtw_pci_free_tx_ring(struct rtw_dev *rtwdev,
                pci_unmap_single(pdev, dma, skb->len, PCI_DMA_TODEVICE);
                dev_kfree_skb_any(skb);
        }
+}
+
+static void rtw_pci_free_tx_ring(struct rtw_dev *rtwdev,
+                                struct rtw_pci_tx_ring *tx_ring)
+{
+       struct pci_dev *pdev = to_pci_dev(rtwdev->dev);
+       u8 *head = tx_ring->r.head;
+       u32 len = tx_ring->r.len;
+       int ring_sz = len * tx_ring->r.desc_size;
+
+       rtw_pci_free_tx_ring_skbs(rtwdev, tx_ring);
 
        /* free the ring itself */
        pci_free_consistent(pdev, ring_sz, head, tx_ring->r.dma);
        tx_ring->r.head = NULL;
 }
 
-static void rtw_pci_free_rx_ring(struct rtw_dev *rtwdev,
-                                struct rtw_pci_rx_ring *rx_ring)
+static void rtw_pci_free_rx_ring_skbs(struct rtw_dev *rtwdev,
+                                     struct rtw_pci_rx_ring *rx_ring)
 {
        struct pci_dev *pdev = to_pci_dev(rtwdev->dev);
        struct sk_buff *skb;
-       dma_addr_t dma;
-       u8 *head = rx_ring->r.head;
        int buf_sz = RTK_PCI_RX_BUF_SIZE;
-       int ring_sz = rx_ring->r.desc_size * rx_ring->r.len;
+       dma_addr_t dma;
        int i;
 
        for (i = 0; i < rx_ring->r.len; i++) {
@@ -137,6 +143,16 @@ static void rtw_pci_free_rx_ring(struct rtw_dev *rtwdev,
                dev_kfree_skb(skb);
                rx_ring->buf[i] = NULL;
        }
+}
+
+static void rtw_pci_free_rx_ring(struct rtw_dev *rtwdev,
+                                struct rtw_pci_rx_ring *rx_ring)
+{
+       struct pci_dev *pdev = to_pci_dev(rtwdev->dev);
+       u8 *head = rx_ring->r.head;
+       int ring_sz = rx_ring->r.desc_size * rx_ring->r.len;
+
+       rtw_pci_free_rx_ring_skbs(rtwdev, rx_ring);
 
        pci_free_consistent(pdev, ring_sz, head, rx_ring->r.dma);
 }
@@ -484,6 +500,17 @@ static void rtw_pci_dma_reset(struct rtw_dev *rtwdev, struct rtw_pci *rtwpci)
        rtwpci->rx_tag = 0;
 }
 
+static void rtw_pci_dma_release(struct rtw_dev *rtwdev, struct rtw_pci *rtwpci)
+{
+       struct rtw_pci_tx_ring *tx_ring;
+       u8 queue;
+
+       for (queue = 0; queue < RTK_MAX_TX_QUEUE_NUM; queue++) {
+               tx_ring = &rtwpci->tx_rings[queue];
+               rtw_pci_free_tx_ring_skbs(rtwdev, tx_ring);
+       }
+}
+
 static int rtw_pci_start(struct rtw_dev *rtwdev)
 {
        struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
@@ -505,6 +532,7 @@ static void rtw_pci_stop(struct rtw_dev *rtwdev)
 
        spin_lock_irqsave(&rtwpci->irq_lock, flags);
        rtw_pci_disable_interrupt(rtwdev, rtwpci);
+       rtw_pci_dma_release(rtwdev, rtwpci);
        spin_unlock_irqrestore(&rtwpci->irq_lock, flags);
 }
 
index 4e44ea8c652d65aa902d0a747778fa7862328273..7b5c2fe5bd4d9cde69a629ffea584c9e2eba8be7 100644 (file)
@@ -1633,7 +1633,7 @@ static bool check_read_regs(struct zd_usb *usb, struct usb_req_read_regs *req,
         */
        if (rr->length < struct_size(regs, regs, count)) {
                dev_dbg_f(zd_usb_dev(usb),
-                        "error: actual length %d less than expected %ld\n",
+                        "error: actual length %d less than expected %zu\n",
                         rr->length, struct_size(regs, regs, count));
                return false;
        }
index 7eda62a9e0dfa3d95616c85e3339570725b01cd8..9642971e89ceae99733f18bbef78911a578fcd67 100644 (file)
@@ -661,7 +661,7 @@ static int st95hf_error_handling(struct st95hf_context *stcontext,
                        result = -ETIMEDOUT;
                else
                        result = -EIO;
-       return  result;
+               return result;
        }
 
        /* Check for CRC err only if CRC is present in the tag response */
index 000b95787df1528414411e8260a69a69271f5c93..bd6129db641782c653b14c56dcb887d47124ad51 100644 (file)
@@ -362,7 +362,7 @@ struct phy_device *of_phy_get_and_connect(struct net_device *dev,
        int ret;
 
        iface = of_get_phy_mode(np);
-       if (iface < 0)
+       if ((int)iface < 0)
                return NULL;
        if (of_phy_is_fixed_link(np)) {
                ret = of_phy_register_fixed_link(np);
index 9c18476d8d1037c4867f0e317db08fd7bef11f00..67d0199840fdd224d088eed7da7f9f1b1ffbeaee 100644 (file)
@@ -155,7 +155,7 @@ long ptp_ioctl(struct posix_clock *pc, unsigned int cmd, unsigned long arg)
                        err = -EINVAL;
                        break;
                } else if (cmd == PTP_EXTTS_REQUEST) {
-    &nbs