Merge tag 'net-5.10-rc2' of git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net
authorLinus Torvalds <torvalds@linux-foundation.org>
Thu, 29 Oct 2020 19:55:02 +0000 (12:55 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Thu, 29 Oct 2020 19:55:02 +0000 (12:55 -0700)
Pull networking fixes from Jakub Kicinski:
 "Current release regressions:

   - r8169: fix forced threading conflicting with other shared
     interrupts; we tried to fix the use of raise_softirq_irqoff from an
     IRQ handler on RT by forcing hard irqs, but this driver shares
     legacy PCI IRQs so drop the _irqoff() instead

   - tipc: fix memory leak caused by a recent syzbot report fix to
     tipc_buf_append()

  Current release - bugs in new features:

   - devlink: Unlock on error in dumpit() and fix some error codes

   - net/smc: fix null pointer dereference in smc_listen_decline()

  Previous release - regressions:

   - tcp: Prevent low rmem stalls with SO_RCVLOWAT.

   - net: protect tcf_block_unbind with block lock

   - ibmveth: Fix use of ibmveth in a bridge; the self-imposed filtering
     to only send legal frames to the hypervisor was too strict

   - net: hns3: Clear the CMDQ registers before unmapping BAR region;
     incorrect cleanup order was leading to a crash

   - bnxt_en - handful of fixes to fixes:
      - Send HWRM_FUNC_RESET fw command unconditionally, even if there
        are PCIe errors being reported
      - Check abort error state in bnxt_open_nic().
      - Invoke cancel_delayed_work_sync() for PFs also.
      - Fix regression in workqueue cleanup logic in bnxt_remove_one().

   - mlxsw: Only advertise link modes supported by both driver and
     device, after removal of 56G support from the driver 56G was not
     cleared from advertised modes

   - net/smc: fix suppressed return code

  Previous release - always broken:

   - netem: fix zero division in tabledist, caused by integer overflow

   - bnxt_en: Re-write PCI BARs after PCI fatal error.

   - cxgb4: set up filter action after rewrites

   - net: ipa: command payloads already mapped

  Misc:

   - s390/ism: fix incorrect system EID, it's okay to change since it
     was added in current release

   - vsock: use ns_capable_noaudit() on socket create to suppress false
     positive audit messages"

* tag 'net-5.10-rc2' of git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net: (36 commits)
  r8169: fix issue with forced threading in combination with shared interrupts
  netem: fix zero division in tabledist
  ibmvnic: fix ibmvnic_set_mac
  mptcp: add missing memory scheduling in the rx path
  tipc: fix memory leak caused by tipc_buf_append()
  gtp: fix an use-before-init in gtp_newlink()
  net: protect tcf_block_unbind with block lock
  ibmveth: Fix use of ibmveth in a bridge.
  net/sched: act_mpls: Add softdep on mpls_gso.ko
  ravb: Fix bit fields checking in ravb_hwtstamp_get()
  devlink: Unlock on error in dumpit()
  devlink: Fix some error codes
  chelsio/chtls: fix memory leaks in CPL handlers
  chelsio/chtls: fix deadlock issue
  net: hns3: Clear the CMDQ registers before unmapping BAR region
  bnxt_en: Send HWRM_FUNC_RESET fw command unconditionally.
  bnxt_en: Check abort error state in bnxt_open_nic().
  bnxt_en: Re-write PCI BARs after PCI fatal error.
  bnxt_en: Invoke cancel_delayed_work_sync() for PFs also.
  bnxt_en: Fix regression in workqueue cleanup logic in bnxt_remove_one().
  ...

39 files changed:
drivers/net/ethernet/broadcom/bnxt/bnxt.c
drivers/net/ethernet/broadcom/bnxt/bnxt.h
drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c
drivers/net/ethernet/chelsio/cxgb4/t4_tcb.h
drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_cm.c
drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_io.c
drivers/net/ethernet/freescale/ucc_geth.c
drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c
drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
drivers/net/ethernet/ibm/ibmveth.c
drivers/net/ethernet/ibm/ibmvnic.c
drivers/net/ethernet/mellanox/mlxsw/core.c
drivers/net/ethernet/mellanox/mlxsw/spectrum.c
drivers/net/ethernet/mellanox/mlxsw/spectrum.h
drivers/net/ethernet/mellanox/mlxsw/spectrum_ethtool.c
drivers/net/ethernet/pensando/ionic/ionic_dev.c
drivers/net/ethernet/pensando/ionic/ionic_dev.h
drivers/net/ethernet/pensando/ionic/ionic_fw.c
drivers/net/ethernet/pensando/ionic/ionic_lif.c
drivers/net/ethernet/pensando/ionic/ionic_main.c
drivers/net/ethernet/pensando/ionic/ionic_stats.h
drivers/net/ethernet/pensando/ionic/ionic_txrx.c
drivers/net/ethernet/pensando/ionic/ionic_txrx.h
drivers/net/ethernet/realtek/r8169_main.c
drivers/net/ethernet/renesas/ravb_main.c
drivers/net/gtp.c
drivers/net/ipa/gsi_trans.c
drivers/s390/net/ism_drv.c
net/core/devlink.c
net/ipv4/tcp.c
net/ipv4/tcp_input.c
net/mptcp/protocol.c
net/sched/act_mpls.c
net/sched/cls_api.c
net/sched/sch_netem.c
net/smc/af_smc.c
net/smc/smc_core.c
net/tipc/msg.c
net/vmw_vsock/af_vsock.c

index fa147865e33f4a1927c0dd439d0ab4b9bd6f150d..7975f59735d61f3da9bb018e6635a4a7a241d7a1 100644 (file)
@@ -1160,16 +1160,6 @@ static void bnxt_queue_sp_work(struct bnxt *bp)
                schedule_work(&bp->sp_task);
 }
 
-static void bnxt_cancel_sp_work(struct bnxt *bp)
-{
-       if (BNXT_PF(bp)) {
-               flush_workqueue(bnxt_pf_wq);
-       } else {
-               cancel_work_sync(&bp->sp_task);
-               cancel_delayed_work_sync(&bp->fw_reset_task);
-       }
-}
-
 static void bnxt_sched_reset(struct bnxt *bp, struct bnxt_rx_ring_info *rxr)
 {
        if (!rxr->bnapi->in_reset) {
@@ -4362,7 +4352,8 @@ static int bnxt_hwrm_do_send_msg(struct bnxt *bp, void *msg, u32 msg_len,
        u32 bar_offset = BNXT_GRCPF_REG_CHIMP_COMM;
        u16 dst = BNXT_HWRM_CHNL_CHIMP;
 
-       if (BNXT_NO_FW_ACCESS(bp))
+       if (BNXT_NO_FW_ACCESS(bp) &&
+           le16_to_cpu(req->req_type) != HWRM_FUNC_RESET)
                return -EBUSY;
 
        if (msg_len > BNXT_HWRM_MAX_REQ_LEN) {
@@ -9789,7 +9780,10 @@ int bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
 {
        int rc = 0;
 
-       rc = __bnxt_open_nic(bp, irq_re_init, link_re_init);
+       if (test_bit(BNXT_STATE_ABORT_ERR, &bp->state))
+               rc = -EIO;
+       if (!rc)
+               rc = __bnxt_open_nic(bp, irq_re_init, link_re_init);
        if (rc) {
                netdev_err(bp->dev, "nic open fail (rc: %x)\n", rc);
                dev_close(bp->dev);
@@ -12108,15 +12102,17 @@ static void bnxt_remove_one(struct pci_dev *pdev)
        if (BNXT_PF(bp))
                bnxt_sriov_disable(bp);
 
-       clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
-       bnxt_cancel_sp_work(bp);
-       bp->sp_event = 0;
-
-       bnxt_dl_fw_reporters_destroy(bp, true);
        if (BNXT_PF(bp))
                devlink_port_type_clear(&bp->dl_port);
        pci_disable_pcie_error_reporting(pdev);
        unregister_netdev(dev);
+       clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
+       /* Flush any pending tasks */
+       cancel_work_sync(&bp->sp_task);
+       cancel_delayed_work_sync(&bp->fw_reset_task);
+       bp->sp_event = 0;
+
+       bnxt_dl_fw_reporters_destroy(bp, true);
        bnxt_dl_unregister(bp);
        bnxt_shutdown_tc(bp);
 
@@ -12860,6 +12856,9 @@ static pci_ers_result_t bnxt_io_error_detected(struct pci_dev *pdev,
                return PCI_ERS_RESULT_DISCONNECT;
        }
 
+       if (state == pci_channel_io_frozen)
+               set_bit(BNXT_STATE_PCI_CHANNEL_IO_FROZEN, &bp->state);
+
        if (netif_running(netdev))
                bnxt_close(netdev);
 
@@ -12886,7 +12885,7 @@ static pci_ers_result_t bnxt_io_slot_reset(struct pci_dev *pdev)
 {
        struct net_device *netdev = pci_get_drvdata(pdev);
        struct bnxt *bp = netdev_priv(netdev);
-       int err = 0;
+       int err = 0, off;
        pci_ers_result_t result = PCI_ERS_RESULT_DISCONNECT;
 
        netdev_info(bp->dev, "PCI Slot Reset\n");
@@ -12898,6 +12897,20 @@ static pci_ers_result_t bnxt_io_slot_reset(struct pci_dev *pdev)
                        "Cannot re-enable PCI device after reset.\n");
        } else {
                pci_set_master(pdev);
+               /* Upon fatal error, our device internal logic that latches to
+                * BAR value is getting reset and will restore only upon
+                * rewritting the BARs.
+                *
+                * As pci_restore_state() does not re-write the BARs if the
+                * value is same as saved value earlier, driver needs to
+                * write the BARs to 0 to force restore, in case of fatal error.
+                */
+               if (test_and_clear_bit(BNXT_STATE_PCI_CHANNEL_IO_FROZEN,
+                                      &bp->state)) {
+                       for (off = PCI_BASE_ADDRESS_0;
+                            off <= PCI_BASE_ADDRESS_5; off += 4)
+                               pci_write_config_dword(bp->pdev, off, 0);
+               }
                pci_restore_state(pdev);
                pci_save_state(pdev);
 
index 21ef1c21f602acdd444437a33b91bcf0be5ff14d..47b3c31278798b1b076f15fdd45d1e9d5a05311a 100644 (file)
@@ -1781,6 +1781,7 @@ struct bnxt {
 #define BNXT_STATE_ABORT_ERR   5
 #define BNXT_STATE_FW_FATAL_COND       6
 #define BNXT_STATE_DRV_REGISTERED      7
+#define BNXT_STATE_PCI_CHANNEL_IO_FROZEN       8
 
 #define BNXT_NO_FW_ACCESS(bp)                                  \
        (test_bit(BNXT_STATE_FW_FATAL_COND, &(bp)->state) ||    \
index 6ec5f2f26f05526fbaf726e786e4154cac790012..4e55f7081644362871d1730492458c8c1fc8acbc 100644 (file)
@@ -145,13 +145,13 @@ static int configure_filter_smac(struct adapter *adap, struct filter_entry *f)
        int err;
 
        /* do a set-tcb for smac-sel and CWR bit.. */
-       err = set_tcb_tflag(adap, f, f->tid, TF_CCTRL_CWR_S, 1, 1);
-       if (err)
-               goto smac_err;
-
        err = set_tcb_field(adap, f, f->tid, TCB_SMAC_SEL_W,
                            TCB_SMAC_SEL_V(TCB_SMAC_SEL_M),
                            TCB_SMAC_SEL_V(f->smt->idx), 1);
+       if (err)
+               goto smac_err;
+
+       err = set_tcb_tflag(adap, f, f->tid, TF_CCTRL_CWR_S, 1, 1);
        if (!err)
                return 0;
 
@@ -862,6 +862,7 @@ int set_filter_wr(struct adapter *adapter, int fidx)
                      FW_FILTER_WR_DIRSTEERHASH_V(f->fs.dirsteerhash) |
                      FW_FILTER_WR_LPBK_V(f->fs.action == FILTER_SWITCH) |
                      FW_FILTER_WR_DMAC_V(f->fs.newdmac) |
+                     FW_FILTER_WR_SMAC_V(f->fs.newsmac) |
                      FW_FILTER_WR_INSVLAN_V(f->fs.newvlan == VLAN_INSERT ||
                                             f->fs.newvlan == VLAN_REWRITE) |
                      FW_FILTER_WR_RMVLAN_V(f->fs.newvlan == VLAN_REMOVE ||
@@ -879,7 +880,7 @@ int set_filter_wr(struct adapter *adapter, int fidx)
                 FW_FILTER_WR_OVLAN_VLD_V(f->fs.val.ovlan_vld) |
                 FW_FILTER_WR_IVLAN_VLDM_V(f->fs.mask.ivlan_vld) |
                 FW_FILTER_WR_OVLAN_VLDM_V(f->fs.mask.ovlan_vld));
-       fwr->smac_sel = 0;
+       fwr->smac_sel = f->smt->idx;
        fwr->rx_chan_rx_rpl_iq =
                htons(FW_FILTER_WR_RX_CHAN_V(0) |
                      FW_FILTER_WR_RX_RPL_IQ_V(adapter->sge.fw_evtq.abs_id));
@@ -1323,11 +1324,8 @@ static void mk_act_open_req6(struct filter_entry *f, struct sk_buff *skb,
                            TX_QUEUE_V(f->fs.nat_mode) |
                            T5_OPT_2_VALID_F |
                            RX_CHANNEL_V(cxgb4_port_e2cchan(f->dev)) |
-                           CONG_CNTRL_V((f->fs.action == FILTER_DROP) |
-                                        (f->fs.dirsteer << 1)) |
                            PACE_V((f->fs.maskhash) |
-                                  ((f->fs.dirsteerhash) << 1)) |
-                           CCTRL_ECN_V(f->fs.action == FILTER_SWITCH));
+                                  ((f->fs.dirsteerhash) << 1)));
 }
 
 static void mk_act_open_req(struct filter_entry *f, struct sk_buff *skb,
@@ -1363,11 +1361,8 @@ static void mk_act_open_req(struct filter_entry *f, struct sk_buff *skb,
                            TX_QUEUE_V(f->fs.nat_mode) |
                            T5_OPT_2_VALID_F |
                            RX_CHANNEL_V(cxgb4_port_e2cchan(f->dev)) |
-                           CONG_CNTRL_V((f->fs.action == FILTER_DROP) |
-                                        (f->fs.dirsteer << 1)) |
                            PACE_V((f->fs.maskhash) |
-                                  ((f->fs.dirsteerhash) << 1)) |
-                           CCTRL_ECN_V(f->fs.action == FILTER_SWITCH));
+                                  ((f->fs.dirsteerhash) << 1)));
 }
 
 static int cxgb4_set_hash_filter(struct net_device *dev,
@@ -2039,6 +2034,20 @@ void hash_filter_rpl(struct adapter *adap, const struct cpl_act_open_rpl *rpl)
                        }
                        return;
                }
+               switch (f->fs.action) {
+               case FILTER_PASS:
+                       if (f->fs.dirsteer)
+                               set_tcb_tflag(adap, f, tid,
+                                             TF_DIRECT_STEER_S, 1, 1);
+                       break;
+               case FILTER_DROP:
+                       set_tcb_tflag(adap, f, tid, TF_DROP_S, 1, 1);
+                       break;
+               case FILTER_SWITCH:
+                       set_tcb_tflag(adap, f, tid, TF_LPBK_S, 1, 1);
+                       break;
+               }
+
                break;
 
        default:
@@ -2106,22 +2115,11 @@ void filter_rpl(struct adapter *adap, const struct cpl_set_tcb_rpl *rpl)
                        if (ctx)
                                ctx->result = 0;
                } else if (ret == FW_FILTER_WR_FLT_ADDED) {
-                       int err = 0;
-
-                       if (f->fs.newsmac)
-                               err = configure_filter_smac(adap, f);
-
-                       if (!err) {
-                               f->pending = 0;  /* async setup completed */
-                               f->valid = 1;
-                               if (ctx) {
-                                       ctx->result = 0;
-                                       ctx->tid = idx;
-                               }
-                       } else {
-                               clear_filter(adap, f);
-                               if (ctx)
-                                       ctx->result = err;
+                       f->pending = 0;  /* async setup completed */
+                       f->valid = 1;
+                       if (ctx) {
+                               ctx->result = 0;
+                               ctx->tid = idx;
                        }
                } else {
                        /* Something went wrong.  Issue a warning about the
index 50232e063f49e598d9df7b02b5f1fa794fd3ab5c..92473dda55d9f8238f6c5c3a6ebf168349b3f8b0 100644 (file)
 #define TCB_T_FLAGS_M          0xffffffffffffffffULL
 #define TCB_T_FLAGS_V(x)       ((__u64)(x) << TCB_T_FLAGS_S)
 
+#define TF_DROP_S              22
+#define TF_DIRECT_STEER_S      23
+#define TF_LPBK_S              59
+
 #define TF_CCTRL_ECE_S         60
 #define TF_CCTRL_CWR_S         61
 #define TF_CCTRL_RFR_S         62
index ec4f79049a061bda7109b686d1525ea238d52f9d..d581c4e623f8a3bd2864fdb8a4d3beae19d76e2c 100644 (file)
@@ -772,14 +772,13 @@ static int chtls_pass_open_rpl(struct chtls_dev *cdev, struct sk_buff *skb)
        if (rpl->status != CPL_ERR_NONE) {
                pr_info("Unexpected PASS_OPEN_RPL status %u for STID %u\n",
                        rpl->status, stid);
-               return CPL_RET_BUF_DONE;
+       } else {
+               cxgb4_free_stid(cdev->tids, stid, listen_ctx->lsk->sk_family);
+               sock_put(listen_ctx->lsk);
+               kfree(listen_ctx);
+               module_put(THIS_MODULE);
        }
-       cxgb4_free_stid(cdev->tids, stid, listen_ctx->lsk->sk_family);
-       sock_put(listen_ctx->lsk);
-       kfree(listen_ctx);
-       module_put(THIS_MODULE);
-
-       return 0;
+       return CPL_RET_BUF_DONE;
 }
 
 static int chtls_close_listsrv_rpl(struct chtls_dev *cdev, struct sk_buff *skb)
@@ -796,15 +795,13 @@ static int chtls_close_listsrv_rpl(struct chtls_dev *cdev, struct sk_buff *skb)
        if (rpl->status != CPL_ERR_NONE) {
                pr_info("Unexpected CLOSE_LISTSRV_RPL status %u for STID %u\n",
                        rpl->status, stid);
-               return CPL_RET_BUF_DONE;
+       } else {
+               cxgb4_free_stid(cdev->tids, stid, listen_ctx->lsk->sk_family);
+               sock_put(listen_ctx->lsk);
+               kfree(listen_ctx);
+               module_put(THIS_MODULE);
        }
-
-       cxgb4_free_stid(cdev->tids, stid, listen_ctx->lsk->sk_family);
-       sock_put(listen_ctx->lsk);
-       kfree(listen_ctx);
-       module_put(THIS_MODULE);
-
-       return 0;
+       return CPL_RET_BUF_DONE;
 }
 
 static void chtls_purge_wr_queue(struct sock *sk)
@@ -1514,7 +1511,6 @@ static void add_to_reap_list(struct sock *sk)
        struct chtls_sock *csk = sk->sk_user_data;
 
        local_bh_disable();
-       bh_lock_sock(sk);
        release_tcp_port(sk); /* release the port immediately */
 
        spin_lock(&reap_list_lock);
@@ -1523,7 +1519,6 @@ static void add_to_reap_list(struct sock *sk)
        if (!csk->passive_reap_next)
                schedule_work(&reap_task);
        spin_unlock(&reap_list_lock);
-       bh_unlock_sock(sk);
        local_bh_enable();
 }
 
index 9fb5ca6682ea23fe2b2e4d488b8d6ede74f81bbe..188d871f6b8cdbf613a50edd09b48aa1fde39b88 100644 (file)
@@ -1585,6 +1585,7 @@ skip_copy:
                        tp->urg_data = 0;
 
                if ((avail + offset) >= skb->len) {
+                       struct sk_buff *next_skb;
                        if (ULP_SKB_CB(skb)->flags & ULPCB_FLAG_TLS_HDR) {
                                tp->copied_seq += skb->len;
                                hws->rcvpld = skb->hdr_len;
@@ -1595,8 +1596,10 @@ skip_copy:
                        chtls_free_skb(sk, skb);
                        buffers_freed++;
                        hws->copied_seq = 0;
-                       if (copied >= target &&
-                           !skb_peek(&sk->sk_receive_queue))
+                       next_skb = skb_peek(&sk->sk_receive_queue);
+                       if (copied >= target && !next_skb)
+                               break;
+                       if (ULP_SKB_CB(next_skb)->flags & ULPCB_FLAG_TLS_HDR)
                                break;
                }
        } while (len > 0);
index 714b501be7d09f805057f21222d8086900c53dc9..ba8869c3d891c0e05e29cd4fb1bcb5b17ce775ba 100644 (file)
@@ -1358,7 +1358,7 @@ static int adjust_enet_interface(struct ucc_geth_private *ugeth)
            (ugeth->phy_interface == PHY_INTERFACE_MODE_RTBI)) {
                upsmr |= UCC_GETH_UPSMR_TBIM;
        }
-       if ((ugeth->phy_interface == PHY_INTERFACE_MODE_SGMII))
+       if (ugeth->phy_interface == PHY_INTERFACE_MODE_SGMII)
                upsmr |= UCC_GETH_UPSMR_SGMM;
 
        out_be32(&uf_regs->upsmr, upsmr);
index 15f69fa863236cb838397dc7d3d2d107f8b890c2..e8495f58a1a8e81fa4dbdc86b998984fea437434 100644 (file)
@@ -1373,7 +1373,7 @@ static int hclge_tm_bp_setup(struct hclge_dev *hdev)
                        return ret;
        }
 
-       return ret;
+       return 0;
 }
 
 int hclge_pause_setup_hw(struct hclge_dev *hdev, bool init)
index 50c84c5e65d29566712772b5fc51b4bf1ab43cfa..c8e3fdd5999c4f676f65d1793357b6b47f6eaf87 100644 (file)
@@ -3262,8 +3262,8 @@ static void hclgevf_uninit_hdev(struct hclgevf_dev *hdev)
                hclgevf_uninit_msi(hdev);
        }
 
-       hclgevf_pci_uninit(hdev);
        hclgevf_cmd_uninit(hdev);
+       hclgevf_pci_uninit(hdev);
        hclgevf_uninit_mac_list(hdev);
 }
 
index 7ef3369953b6a540fa15c7ad9cb192d50f570a3e..c3ec9ceed833ed4e6d128c86bbf09aa476de91b5 100644 (file)
@@ -1031,12 +1031,6 @@ static int ibmveth_is_packet_unsupported(struct sk_buff *skb,
                ret = -EOPNOTSUPP;
        }
 
-       if (!ether_addr_equal(ether_header->h_source, netdev->dev_addr)) {
-               netdev_dbg(netdev, "source packet MAC address does not match veth device's, dropping packet.\n");
-               netdev->stats.tx_dropped++;
-               ret = -EOPNOTSUPP;
-       }
-
        return ret;
 }
 
index 8148f796a8070b2aeb7c44ebfed4ca914c7dcf00..af4dfbe28d5616568d6baa230bc63585e6478368 100644 (file)
@@ -1815,9 +1815,13 @@ static int ibmvnic_set_mac(struct net_device *netdev, void *p)
        int rc;
 
        rc = 0;
-       ether_addr_copy(adapter->mac_addr, addr->sa_data);
-       if (adapter->state != VNIC_PROBED)
+       if (!is_valid_ether_addr(addr->sa_data))
+               return -EADDRNOTAVAIL;
+
+       if (adapter->state != VNIC_PROBED) {
+               ether_addr_copy(adapter->mac_addr, addr->sa_data);
                rc = __ibmvnic_set_mac(netdev, addr->sa_data);
+       }
 
        return rc;
 }
index 7f77c2a71d1c63682a119ed64a197ae34446e951..937b8e46f8c770a7630e37165f80628f99cf40f9 100644 (file)
@@ -620,6 +620,9 @@ static void mlxsw_emad_transmit_retry(struct mlxsw_core *mlxsw_core,
                err = mlxsw_emad_transmit(trans->core, trans);
                if (err == 0)
                        return;
+
+               if (!atomic_dec_and_test(&trans->active))
+                       return;
        } else {
                err = -EIO;
        }
@@ -2064,6 +2067,8 @@ void mlxsw_core_bus_device_unregister(struct mlxsw_core *mlxsw_core,
        if (!reload)
                devlink_resources_unregister(devlink, NULL);
        mlxsw_core->bus->fini(mlxsw_core->bus_priv);
+       if (!reload)
+               devlink_free(devlink);
 
        return;
 
index 16b47fce540bb1b3ac77cd5d2524ccf36a9052bc..b08853f71b2beae79e1458e2fc2cb77eb6d4dec5 100644 (file)
@@ -1174,11 +1174,14 @@ mlxsw_sp_port_speed_by_width_set(struct mlxsw_sp_port *mlxsw_sp_port)
        u32 eth_proto_cap, eth_proto_admin, eth_proto_oper;
        const struct mlxsw_sp_port_type_speed_ops *ops;
        char ptys_pl[MLXSW_REG_PTYS_LEN];
+       u32 eth_proto_cap_masked;
        int err;
 
        ops = mlxsw_sp->port_type_speed_ops;
 
-       /* Set advertised speeds to supported speeds. */
+       /* Set advertised speeds to speeds supported by both the driver
+        * and the device.
+        */
        ops->reg_ptys_eth_pack(mlxsw_sp, ptys_pl, mlxsw_sp_port->local_port,
                               0, false);
        err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl);
@@ -1187,8 +1190,10 @@ mlxsw_sp_port_speed_by_width_set(struct mlxsw_sp_port *mlxsw_sp_port)
 
        ops->reg_ptys_eth_unpack(mlxsw_sp, ptys_pl, &eth_proto_cap,
                                 &eth_proto_admin, &eth_proto_oper);
+       eth_proto_cap_masked = ops->ptys_proto_cap_masked_get(eth_proto_cap);
        ops->reg_ptys_eth_pack(mlxsw_sp, ptys_pl, mlxsw_sp_port->local_port,
-                              eth_proto_cap, mlxsw_sp_port->link.autoneg);
+                              eth_proto_cap_masked,
+                              mlxsw_sp_port->link.autoneg);
        return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl);
 }
 
index 3e26eb6cb1404a0feb4decab8a8b1e09916c6b55..74b3959b36d4db43e23c7d4c20c690ac899a1e48 100644 (file)
@@ -342,6 +342,7 @@ struct mlxsw_sp_port_type_speed_ops {
                                    u32 *p_eth_proto_cap,
                                    u32 *p_eth_proto_admin,
                                    u32 *p_eth_proto_oper);
+       u32 (*ptys_proto_cap_masked_get)(u32 eth_proto_cap);
 };
 
 static inline struct net_device *
index 2096b6478958234c7e39c20be87a7b2be7c40cdb..540616469e2849abd00b9a7f742a06e6753ef7d7 100644 (file)
@@ -1303,6 +1303,20 @@ mlxsw_sp1_reg_ptys_eth_unpack(struct mlxsw_sp *mlxsw_sp, char *payload,
                                  p_eth_proto_oper);
 }
 
+static u32 mlxsw_sp1_ptys_proto_cap_masked_get(u32 eth_proto_cap)
+{
+       u32 ptys_proto_cap_masked = 0;
+       int i;
+
+       for (i = 0; i < MLXSW_SP1_PORT_LINK_MODE_LEN; i++) {
+               if (mlxsw_sp1_port_link_mode[i].mask & eth_proto_cap)
+                       ptys_proto_cap_masked |=
+                               mlxsw_sp1_port_link_mode[i].mask;
+       }
+
+       return ptys_proto_cap_masked;
+}
+
 const struct mlxsw_sp_port_type_speed_ops mlxsw_sp1_port_type_speed_ops = {
        .from_ptys_supported_port       = mlxsw_sp1_from_ptys_supported_port,
        .from_ptys_link                 = mlxsw_sp1_from_ptys_link,
@@ -1313,6 +1327,7 @@ const struct mlxsw_sp_port_type_speed_ops mlxsw_sp1_port_type_speed_ops = {
        .to_ptys_speed                  = mlxsw_sp1_to_ptys_speed,
        .reg_ptys_eth_pack              = mlxsw_sp1_reg_ptys_eth_pack,
        .reg_ptys_eth_unpack            = mlxsw_sp1_reg_ptys_eth_unpack,
+       .ptys_proto_cap_masked_get      = mlxsw_sp1_ptys_proto_cap_masked_get,
 };
 
 static const enum ethtool_link_mode_bit_indices
@@ -1731,6 +1746,20 @@ mlxsw_sp2_reg_ptys_eth_unpack(struct mlxsw_sp *mlxsw_sp, char *payload,
                                      p_eth_proto_admin, p_eth_proto_oper);
 }
 
+static u32 mlxsw_sp2_ptys_proto_cap_masked_get(u32 eth_proto_cap)
+{
+       u32 ptys_proto_cap_masked = 0;
+       int i;
+
+       for (i = 0; i < MLXSW_SP2_PORT_LINK_MODE_LEN; i++) {
+               if (mlxsw_sp2_port_link_mode[i].mask & eth_proto_cap)
+                       ptys_proto_cap_masked |=
+                               mlxsw_sp2_port_link_mode[i].mask;
+       }
+
+       return ptys_proto_cap_masked;
+}
+
 const struct mlxsw_sp_port_type_speed_ops mlxsw_sp2_port_type_speed_ops = {
        .from_ptys_supported_port       = mlxsw_sp2_from_ptys_supported_port,
        .from_ptys_link                 = mlxsw_sp2_from_ptys_link,
@@ -1741,4 +1770,5 @@ const struct mlxsw_sp_port_type_speed_ops mlxsw_sp2_port_type_speed_ops = {
        .to_ptys_speed                  = mlxsw_sp2_to_ptys_speed,
        .reg_ptys_eth_pack              = mlxsw_sp2_reg_ptys_eth_pack,
        .reg_ptys_eth_unpack            = mlxsw_sp2_reg_ptys_eth_unpack,
+       .ptys_proto_cap_masked_get      = mlxsw_sp2_ptys_proto_cap_masked_get,
 };
index 545c99b15df8e02fca4b5b4c7f87ee60990f2907..dc5fbc2704f3a3b4f3c37fddcb686a4ea7d804d9 100644 (file)
@@ -333,7 +333,7 @@ int ionic_set_vf_config(struct ionic *ionic, int vf, u8 attr, u8 *data)
        union ionic_dev_cmd cmd = {
                .vf_setattr.opcode = IONIC_CMD_VF_SETATTR,
                .vf_setattr.attr = attr,
-               .vf_setattr.vf_index = vf,
+               .vf_setattr.vf_index = cpu_to_le16(vf),
        };
        int err;
 
@@ -391,7 +391,7 @@ void ionic_dev_cmd_queue_identify(struct ionic_dev *idev,
 {
        union ionic_dev_cmd cmd = {
                .q_identify.opcode = IONIC_CMD_Q_IDENTIFY,
-               .q_identify.lif_type = lif_type,
+               .q_identify.lif_type = cpu_to_le16(lif_type),
                .q_identify.type = qtype,
                .q_identify.ver = qver,
        };
index c109cd5a04713e28328ed233ff5a9e15092e36fa..6c243b17312c729ea5fe0050e37bd21f5734abd0 100644 (file)
@@ -29,6 +29,7 @@ struct ionic_dev_bar {
        int res_index;
 };
 
+#ifndef __CHECKER__
 /* Registers */
 static_assert(sizeof(struct ionic_intr) == 32);
 
@@ -119,6 +120,7 @@ static_assert(sizeof(struct ionic_vf_setattr_cmd) == 64);
 static_assert(sizeof(struct ionic_vf_setattr_comp) == 16);
 static_assert(sizeof(struct ionic_vf_getattr_cmd) == 64);
 static_assert(sizeof(struct ionic_vf_getattr_comp) == 16);
+#endif /* __CHECKER__ */
 
 struct ionic_devinfo {
        u8 asic_type;
index f492ae406a605bb39933859f419a1f065a32621e..d7bbf336c6f65c505e176e13646f035e0fef15cd 100644 (file)
@@ -27,9 +27,9 @@ static void ionic_dev_cmd_firmware_download(struct ionic_dev *idev, u64 addr,
 {
        union ionic_dev_cmd cmd = {
                .fw_download.opcode = IONIC_CMD_FW_DOWNLOAD,
-               .fw_download.offset = offset,
-               .fw_download.addr = addr,
-               .fw_download.length = length
+               .fw_download.offset = cpu_to_le32(offset),
+               .fw_download.addr = cpu_to_le64(addr),
+               .fw_download.length = cpu_to_le32(length),
        };
 
        ionic_dev_cmd_go(idev, &cmd);
index d655a7ae3058cf401204be51ce4160581ac31981..a12df3946a07cdcb89474d1687755bb2a3019401 100644 (file)
@@ -1656,7 +1656,6 @@ static void ionic_txrx_deinit(struct ionic_lif *lif)
        if (lif->rxqcqs) {
                for (i = 0; i < lif->nxqs && lif->rxqcqs[i]; i++) {
                        ionic_lif_qcq_deinit(lif, lif->rxqcqs[i]);
-                       ionic_rx_flush(&lif->rxqcqs[i]->cq);
                        ionic_rx_empty(&lif->rxqcqs[i]->q);
                }
        }
@@ -1915,11 +1914,11 @@ static int ionic_get_vf_config(struct net_device *netdev,
                ret = -EINVAL;
        } else {
                ivf->vf           = vf;
-               ivf->vlan         = ionic->vfs[vf].vlanid;
+               ivf->vlan         = le16_to_cpu(ionic->vfs[vf].vlanid);
                ivf->qos          = 0;
                ivf->spoofchk     = ionic->vfs[vf].spoofchk;
                ivf->linkstate    = ionic->vfs[vf].linkstate;
-               ivf->max_tx_rate  = ionic->vfs[vf].maxrate;
+               ivf->max_tx_rate  = le32_to_cpu(ionic->vfs[vf].maxrate);
                ivf->trusted      = ionic->vfs[vf].trusted;
                ether_addr_copy(ivf->mac, ionic->vfs[vf].macaddr);
        }
@@ -2019,7 +2018,7 @@ static int ionic_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan,
                ret = ionic_set_vf_config(ionic, vf,
                                          IONIC_VF_ATTR_VLAN, (u8 *)&vlan);
                if (!ret)
-                       ionic->vfs[vf].vlanid = vlan;
+                       ionic->vfs[vf].vlanid = cpu_to_le16(vlan);
        }
 
        up_write(&ionic->vf_op_lock);
@@ -2048,7 +2047,7 @@ static int ionic_set_vf_rate(struct net_device *netdev, int vf,
                ret = ionic_set_vf_config(ionic, vf,
                                          IONIC_VF_ATTR_RATE, (u8 *)&tx_max);
                if (!ret)
-                       lif->ionic->vfs[vf].maxrate = tx_max;
+                       lif->ionic->vfs[vf].maxrate = cpu_to_le32(tx_max);
        }
 
        up_write(&ionic->vf_op_lock);
@@ -2981,14 +2980,14 @@ void ionic_lif_unregister(struct ionic_lif *lif)
 
 static void ionic_lif_queue_identify(struct ionic_lif *lif)
 {
+       union ionic_q_identity __iomem *q_ident;
        struct ionic *ionic = lif->ionic;
-       union ionic_q_identity *q_ident;
        struct ionic_dev *idev;
        int qtype;
        int err;
 
        idev = &lif->ionic->idev;
-       q_ident = (union ionic_q_identity *)&idev->dev_cmd_regs->data;
+       q_ident = (union ionic_q_identity __iomem *)&idev->dev_cmd_regs->data;
 
        for (qtype = 0; qtype < ARRAY_SIZE(ionic_qtype_versions); qtype++) {
                struct ionic_qtype_info *qti = &lif->qtype_info[qtype];
@@ -3011,14 +3010,14 @@ static void ionic_lif_queue_identify(struct ionic_lif *lif)
                                             ionic_qtype_versions[qtype]);
                err = ionic_dev_cmd_wait(ionic, DEVCMD_TIMEOUT);
                if (!err) {
-                       qti->version   = q_ident->version;
-                       qti->supported = q_ident->supported;
-                       qti->features  = le64_to_cpu(q_ident->features);
-                       qti->desc_sz   = le16_to_cpu(q_ident->desc_sz);
-                       qti->comp_sz   = le16_to_cpu(q_ident->comp_sz);
-                       qti->sg_desc_sz   = le16_to_cpu(q_ident->sg_desc_sz);
-                       qti->max_sg_elems = le16_to_cpu(q_ident->max_sg_elems);
-                       qti->sg_desc_stride = le16_to_cpu(q_ident->sg_desc_stride);
+                       qti->version   = readb(&q_ident->version);
+                       qti->supported = readb(&q_ident->supported);
+                       qti->features  = readq(&q_ident->features);
+                       qti->desc_sz   = readw(&q_ident->desc_sz);
+                       qti->comp_sz   = readw(&q_ident->comp_sz);
+                       qti->sg_desc_sz   = readw(&q_ident->sg_desc_sz);
+                       qti->max_sg_elems = readw(&q_ident->max_sg_elems);
+                       qti->sg_desc_stride = readw(&q_ident->sg_desc_stride);
                }
                mutex_unlock(&ionic->dev_cmd_lock);
 
index ee0740881af31cd5512839a1a96cf420715d396f..d355676f6c160d685239d52565cf96075297eae2 100644 (file)
@@ -311,7 +311,7 @@ int ionic_adminq_post_wait(struct ionic_lif *lif, struct ionic_admin_ctx *ctx)
 
 static void ionic_dev_cmd_clean(struct ionic *ionic)
 {
-       union ionic_dev_cmd_regs *regs = ionic->idev.dev_cmd_regs;
+       union __iomem ionic_dev_cmd_regs *regs = ionic->idev.dev_cmd_regs;
 
        iowrite32(0, &regs->doorbell);
        memset_io(&regs->cmd, 0, sizeof(regs->cmd));
@@ -333,7 +333,7 @@ int ionic_dev_cmd_wait(struct ionic *ionic, unsigned long max_seconds)
         */
        max_wait = jiffies + (max_seconds * HZ);
 try_again:
-       opcode = idev->dev_cmd_regs->cmd.cmd.opcode;
+       opcode = readb(&idev->dev_cmd_regs->cmd.cmd.opcode);
        start_time = jiffies;
        do {
                done = ionic_dev_cmd_done(idev);
index 3f543512616ee5eca55bafc36956a952bf12eef3..2a725834f792251d83b7ebb91208584cb84cc6b6 100644 (file)
@@ -49,7 +49,7 @@ extern const int ionic_num_stats_grps;
        (*((u64 *)(((u8 *)(base_ptr)) + (desc_ptr)->offset)))
 
 #define IONIC_READ_STAT_LE64(base_ptr, desc_ptr) \
-       __le64_to_cpu(*((u64 *)(((u8 *)(base_ptr)) + (desc_ptr)->offset)))
+       __le64_to_cpu(*((__le64 *)(((u8 *)(base_ptr)) + (desc_ptr)->offset)))
 
 struct ionic_stat_desc {
        char name[ETH_GSTRING_LEN];
index 169ac4f54640dfd4c60e5c72c1ef017a74a80a9e..b3d2250c77d0492fc1ca67ba92c08b67b88e9a4a 100644 (file)
@@ -200,7 +200,7 @@ static void ionic_rx_clean(struct ionic_queue *q,
        if (likely(netdev->features & NETIF_F_RXCSUM)) {
                if (comp->csum_flags & IONIC_RXQ_COMP_CSUM_F_CALC) {
                        skb->ip_summed = CHECKSUM_COMPLETE;
-                       skb->csum = (__wsum)le16_to_cpu(comp->csum);
+                       skb->csum = (__force __wsum)le16_to_cpu(comp->csum);
                        stats->csum_complete++;
                }
        } else {
@@ -253,19 +253,6 @@ static bool ionic_rx_service(struct ionic_cq *cq, struct ionic_cq_info *cq_info)
        return true;
 }
 
-void ionic_rx_flush(struct ionic_cq *cq)
-{
-       struct ionic_dev *idev = &cq->lif->ionic->idev;
-       u32 work_done;
-
-       work_done = ionic_cq_service(cq, cq->num_descs,
-                                    ionic_rx_service, NULL, NULL);
-
-       if (work_done)
-               ionic_intr_credits(idev->intr_ctrl, cq->bound_intr->index,
-                                  work_done, IONIC_INTR_CRED_RESET_COALESCE);
-}
-
 static int ionic_rx_page_alloc(struct ionic_queue *q,
                               struct ionic_page_info *page_info)
 {
@@ -413,22 +400,20 @@ static void ionic_rx_fill_cb(void *arg)
 void ionic_rx_empty(struct ionic_queue *q)
 {
        struct ionic_desc_info *desc_info;
-       struct ionic_rxq_desc *desc;
-       unsigned int i;
-       u16 idx;
-
-       idx = q->tail_idx;
-       while (idx != q->head_idx) {
-               desc_info = &q->info[idx];
-               desc = desc_info->desc;
-               desc->addr = 0;
-               desc->len = 0;
+       struct ionic_page_info *page_info;
+       unsigned int i, j;
 
-               for (i = 0; i < desc_info->npages; i++)
-                       ionic_rx_page_free(q, &desc_info->pages[i]);
+       for (i = 0; i < q->num_descs; i++) {
+               desc_info = &q->info[i];
+               for (j = 0; j < IONIC_RX_MAX_SG_ELEMS + 1; j++) {
+                       page_info = &desc_info->pages[j];
+                       if (page_info->page)
+                               ionic_rx_page_free(q, page_info);
+               }
 
+               desc_info->npages = 0;
+               desc_info->cb = NULL;
                desc_info->cb_arg = NULL;
-               idx = (idx + 1) & (q->num_descs - 1);
        }
 }
 
@@ -812,6 +797,7 @@ static int ionic_tx_tso(struct ionic_queue *q, struct sk_buff *skb)
        skb_frag_t *frag;
        bool start, done;
        bool outer_csum;
+       dma_addr_t addr;
        bool has_vlan;
        u16 desc_len;
        u8 desc_nsge;
@@ -893,11 +879,10 @@ static int ionic_tx_tso(struct ionic_queue *q, struct sk_buff *skb)
                        if (frag_left > 0) {
                                len = min(frag_left, left);
                                frag_left -= len;
-                               elem->addr =
-                                   cpu_to_le64(ionic_tx_map_frag(q, frag,
-                                                                 offset, len));
-                               if (dma_mapping_error(dev, elem->addr))
+                               addr = ionic_tx_map_frag(q, frag, offset, len);
+                               if (dma_mapping_error(dev, addr))
                                        goto err_out_abort;
+                               elem->addr = cpu_to_le64(addr);
                                elem->len = cpu_to_le16(len);
                                elem++;
                                desc_nsge++;
index a5883be0413f6da4e6bbef642f049521182e382c..7667b72232b8a32da7a33eb8e58383707544f61b 100644 (file)
@@ -4,7 +4,6 @@
 #ifndef _IONIC_TXRX_H_
 #define _IONIC_TXRX_H_
 
-void ionic_rx_flush(struct ionic_cq *cq);
 void ionic_tx_flush(struct ionic_cq *cq);
 
 void ionic_rx_fill(struct ionic_queue *q);
index 3b6ddc706e92eae265b15a45672367891b8edff9..00f13805c6f753e5f9fedcc82a36f0cdc988c3af 100644 (file)
@@ -4573,7 +4573,7 @@ static irqreturn_t rtl8169_interrupt(int irq, void *dev_instance)
        }
 
        rtl_irq_disable(tp);
-       napi_schedule_irqoff(&tp->napi);
+       napi_schedule(&tp->napi);
 out:
        rtl_ack_events(tp, status);
 
@@ -4746,7 +4746,7 @@ static int rtl_open(struct net_device *dev)
        rtl_request_firmware(tp);
 
        retval = request_irq(pci_irq_vector(pdev, 0), rtl8169_interrupt,
-                            IRQF_NO_THREAD | IRQF_SHARED, dev->name, tp);
+                            IRQF_SHARED, dev->name, tp);
        if (retval < 0)
                goto err_release_fw_2;
 
index 9c4df4ede0111eae7e6b46518686c786097ce7b2..bd30505fbc57a6e31ddb431694cda784246acf9e 100644 (file)
@@ -1744,12 +1744,16 @@ static int ravb_hwtstamp_get(struct net_device *ndev, struct ifreq *req)
        config.flags = 0;
        config.tx_type = priv->tstamp_tx_ctrl ? HWTSTAMP_TX_ON :
                                                HWTSTAMP_TX_OFF;
-       if (priv->tstamp_rx_ctrl & RAVB_RXTSTAMP_TYPE_V2_L2_EVENT)
+       switch (priv->tstamp_rx_ctrl & RAVB_RXTSTAMP_TYPE) {
+       case RAVB_RXTSTAMP_TYPE_V2_L2_EVENT:
                config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT;
-       else if (priv->tstamp_rx_ctrl & RAVB_RXTSTAMP_TYPE_ALL)
+               break;
+       case RAVB_RXTSTAMP_TYPE_ALL:
                config.rx_filter = HWTSTAMP_FILTER_ALL;
-       else
+               break;
+       default:
                config.rx_filter = HWTSTAMP_FILTER_NONE;
+       }
 
        return copy_to_user(req->ifr_data, &config, sizeof(config)) ?
                -EFAULT : 0;
index 030a1a5afe05ad5b69abf31a15202df38506e72f..dc668ed280b9f46619e3b0abe282146dde599b65 100644 (file)
@@ -657,10 +657,6 @@ static int gtp_newlink(struct net *src_net, struct net_device *dev,
 
        gtp = netdev_priv(dev);
 
-       err = gtp_encap_enable(gtp, data);
-       if (err < 0)
-               return err;
-
        if (!data[IFLA_GTP_PDP_HASHSIZE]) {
                hashsize = 1024;
        } else {
@@ -671,12 +667,16 @@ static int gtp_newlink(struct net *src_net, struct net_device *dev,
 
        err = gtp_hashtable_new(gtp, hashsize);
        if (err < 0)
-               goto out_encap;
+               return err;
+
+       err = gtp_encap_enable(gtp, data);
+       if (err < 0)
+               goto out_hashtable;
 
        err = register_netdevice(dev);
        if (err < 0) {
                netdev_dbg(dev, "failed to register new netdev %d\n", err);
-               goto out_hashtable;
+               goto out_encap;
        }
 
        gn = net_generic(dev_net(dev), gtp_net_id);
@@ -687,11 +687,11 @@ static int gtp_newlink(struct net *src_net, struct net_device *dev,
 
        return 0;
 
+out_encap:
+       gtp_encap_disable(gtp);
 out_hashtable:
        kfree(gtp->addr_hash);
        kfree(gtp->tid_hash);
-out_encap:
-       gtp_encap_disable(gtp);
        return err;
 }
 
index 43f5f5d93cb06220645435a553687567c980ff11..92642030e7356921e81b0ce133d0123aa9c15dcc 100644 (file)
@@ -397,15 +397,24 @@ void gsi_trans_cmd_add(struct gsi_trans *trans, void *buf, u32 size,
 
        /* assert(which < trans->tre_count); */
 
-       /* Set the page information for the buffer.  We also need to fill in
-        * the DMA address and length for the buffer (something dma_map_sg()
-        * normally does).
+       /* Commands are quite different from data transfer requests.
+        * Their payloads come from a pool whose memory is allocated
+        * using dma_alloc_coherent().  We therefore do *not* map them
+        * for DMA (unlike what we do for pages and skbs).
+        *
+        * When a transaction completes, the SGL is normally unmapped.
+        * A command transaction has direction DMA_NONE, which tells
+        * gsi_trans_complete() to skip the unmapping step.
+        *
+        * The only things we use directly in a command scatter/gather
+        * entry are the DMA address and length.  We still need the SG
+        * table flags to be maintained though, so assign a NULL page
+        * pointer for that purpose.
         */
        sg = &trans->sgl[which];
-
-       sg_set_buf(sg, buf, size);
+       sg_assign_page(sg, NULL);
        sg_dma_address(sg) = addr;
-       sg_dma_len(sg) = sg->length;
+       sg_dma_len(sg) = size;
 
        info = &trans->info[which];
        info->opcode = opcode;
index fe96ca3c88a5a7a8bc38740c1ff9a4403b4c956f..26cc943d2034046e2ef8e956a53dbe46c0228b8d 100644 (file)
@@ -390,7 +390,7 @@ static int ism_move(struct smcd_dev *smcd, u64 dmb_tok, unsigned int idx,
 }
 
 static struct ism_systemeid SYSTEM_EID = {
-       .seid_string = "IBM-SYSZ-IBMSEID00000000",
+       .seid_string = "IBM-SYSZ-ISMSEID00000000",
        .serial_number = "0000",
        .type = "0000",
 };
index a578634052a3c77366bee7922e8fd16eee564ec7..a932d95be798474e660d9b2dd3849999da69264f 100644 (file)
@@ -4213,10 +4213,12 @@ static int devlink_nl_region_fill(struct sk_buff *msg, struct devlink *devlink,
        if (err)
                goto nla_put_failure;
 
-       if (region->port)
-               if (nla_put_u32(msg, DEVLINK_ATTR_PORT_INDEX,
-                               region->port->index))
+       if (region->port) {
+               err = nla_put_u32(msg, DEVLINK_ATTR_PORT_INDEX,
+                                 region->port->index);
+               if (err)
                        goto nla_put_failure;
+       }
 
        err = nla_put_string(msg, DEVLINK_ATTR_REGION_NAME, region->ops->name);
        if (err)
@@ -4265,10 +4267,12 @@ devlink_nl_region_notify_build(struct devlink_region *region,
        if (err)
                goto out_cancel_msg;
 
-       if (region->port)
-               if (nla_put_u32(msg, DEVLINK_ATTR_PORT_INDEX,
-                               region->port->index))
+       if (region->port) {
+               err = nla_put_u32(msg, DEVLINK_ATTR_PORT_INDEX,
+                                 region->port->index);
+               if (err)
                        goto out_cancel_msg;
+       }
 
        err = nla_put_string(msg, DEVLINK_ATTR_REGION_NAME,
                             region->ops->name);
@@ -4915,8 +4919,10 @@ static int devlink_nl_cmd_region_read_dumpit(struct sk_buff *skb,
                index = nla_get_u32(info->attrs[DEVLINK_ATTR_PORT_INDEX]);
 
                port = devlink_port_get_by_index(devlink, index);
-               if (!port)
-                       return -ENODEV;
+               if (!port) {
+                       err = -ENODEV;
+                       goto out_unlock;
+               }
        }
 
        region_name = nla_data(attrs[DEVLINK_ATTR_REGION_NAME]);
@@ -4962,10 +4968,12 @@ static int devlink_nl_cmd_region_read_dumpit(struct sk_buff *skb,
        if (err)
                goto nla_put_failure;
 
-       if (region->port)
-               if (nla_put_u32(skb, DEVLINK_ATTR_PORT_INDEX,
-                               region->port->index))
+       if (region->port) {
+               err = nla_put_u32(skb, DEVLINK_ATTR_PORT_INDEX,
+                                 region->port->index);
+               if (err)
                        goto nla_put_failure;
+       }
 
        err = nla_put_string(skb, DEVLINK_ATTR_REGION_NAME, region_name);
        if (err)
index bae4284bf542437fab45b32d5cb5a464d88a63ee..b2bc3d7fe9e806718f6e20ad9fa775ffb83806fb 100644 (file)
@@ -485,6 +485,8 @@ static inline bool tcp_stream_is_readable(const struct tcp_sock *tp,
                        return true;
                if (tcp_rmem_pressure(sk))
                        return true;
+               if (tcp_receive_window(tp) <= inet_csk(sk)->icsk_ack.rcv_mss)
+                       return true;
        }
        if (sk->sk_prot->stream_memory_read)
                return sk->sk_prot->stream_memory_read(sk);
index fc445833b5e508d20aa52ff68fb4b698b21c4cd7..389d1b34024854a9bdcbe861d4820d1bfb495e24 100644 (file)
@@ -4908,7 +4908,8 @@ void tcp_data_ready(struct sock *sk)
        int avail = tp->rcv_nxt - tp->copied_seq;
 
        if (avail < sk->sk_rcvlowat && !tcp_rmem_pressure(sk) &&
-           !sock_flag(sk, SOCK_DONE))
+           !sock_flag(sk, SOCK_DONE) &&
+           tcp_receive_window(tp) > inet_csk(sk)->icsk_ack.rcv_mss)
                return;
 
        sk->sk_data_ready(sk);
index 185dacb3978196c040fc208a8bbeabc56715c7cd..e7419fd15d84c95e87c8834aab4339f2dc883f76 100644 (file)
@@ -274,6 +274,15 @@ static bool __mptcp_move_skb(struct mptcp_sock *msk, struct sock *ssk,
        skb_ext_reset(skb);
        skb_orphan(skb);
 
+       /* try to fetch required memory from subflow */
+       if (!sk_rmem_schedule(sk, skb, skb->truesize)) {
+               if (ssk->sk_forward_alloc < skb->truesize)
+                       goto drop;
+               __sk_mem_reclaim(ssk, skb->truesize);
+               if (!sk_rmem_schedule(sk, skb, skb->truesize))
+                       goto drop;
+       }
+
        /* the skb map_seq accounts for the skb offset:
         * mptcp_subflow_get_mapped_dsn() is based on the current tp->copied_seq
         * value
@@ -301,6 +310,7 @@ static bool __mptcp_move_skb(struct mptcp_sock *msk, struct sock *ssk,
         * will retransmit as needed, if needed.
         */
        MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_DUPDATA);
+drop:
        mptcp_drop(sk, skb);
        return false;
 }
index f40bf9771cb9eb3e030c33147dc2a96d8637f5f6..5c7456e5b5cf04643c5421a05467092c75398f44 100644 (file)
@@ -426,6 +426,7 @@ static void __exit mpls_cleanup_module(void)
 module_init(mpls_init_module);
 module_exit(mpls_cleanup_module);
 
+MODULE_SOFTDEP("post: mpls_gso");
 MODULE_AUTHOR("Netronome Systems <oss-drivers@netronome.com>");
 MODULE_LICENSE("GPL");
 MODULE_DESCRIPTION("MPLS manipulation actions");
index faeabff283a2b6cb97ad84ff90af38455eae3292..838b3fd94d77683cf3164734300cf5d7b5631c47 100644 (file)
@@ -652,12 +652,12 @@ static void tc_block_indr_cleanup(struct flow_block_cb *block_cb)
                               block_cb->indr.binder_type,
                               &block->flow_block, tcf_block_shared(block),
                               &extack);
+       rtnl_lock();
        down_write(&block->cb_lock);
        list_del(&block_cb->driver_list);
        list_move(&block_cb->list, &bo.cb_list);
-       up_write(&block->cb_lock);
-       rtnl_lock();
        tcf_block_unbind(block, &bo);
+       up_write(&block->cb_lock);
        rtnl_unlock();
 }
 
index 84f82771cdf5dce6552bd790242a8c0b8b320e06..0c345e43a09a3738e15ab5de81ae37effcff01e5 100644 (file)
@@ -330,7 +330,7 @@ static s64 tabledist(s64 mu, s32 sigma,
 
        /* default uniform distribution */
        if (dist == NULL)
-               return ((rnd % (2 * sigma)) + mu) - sigma;
+               return ((rnd % (2 * (u32)sigma)) + mu) - sigma;
 
        t = dist->table[rnd % dist->size];
        x = (sigma % NETEM_DIST_SCALE) * t;
@@ -812,6 +812,10 @@ static void get_slot(struct netem_sched_data *q, const struct nlattr *attr)
                q->slot_config.max_packets = INT_MAX;
        if (q->slot_config.max_bytes == 0)
                q->slot_config.max_bytes = INT_MAX;
+
+       /* capping dist_jitter to the range acceptable by tabledist() */
+       q->slot_config.dist_jitter = min_t(__s64, INT_MAX, abs(q->slot_config.dist_jitter));
+
        q->slot.packets_left = q->slot_config.max_packets;
        q->slot.bytes_left = q->slot_config.max_bytes;
        if (q->slot_config.min_delay | q->slot_config.max_delay |
@@ -1037,6 +1041,9 @@ static int netem_change(struct Qdisc *sch, struct nlattr *opt,
        if (tb[TCA_NETEM_SLOT])
                get_slot(q, tb[TCA_NETEM_SLOT]);
 
+       /* capping jitter to the range acceptable by tabledist() */
+       q->jitter = min_t(s64, abs(q->jitter), INT_MAX);
+
        return ret;
 
 get_table_failure:
index 82be0bd0f6e8135a5d159e8a1620ef23c3986413..e9f487c8c6d55d3c6b335a5b105f3b382ff1d8ca 100644 (file)
@@ -1317,10 +1317,10 @@ static void smc_listen_out_err(struct smc_sock *new_smc)
 
 /* listen worker: decline and fall back if possible */
 static void smc_listen_decline(struct smc_sock *new_smc, int reason_code,
-                              struct smc_init_info *ini, u8 version)
+                              int local_first, u8 version)
 {
        /* RDMA setup failed, switch back to TCP */
-       if (ini->first_contact_local)
+       if (local_first)
                smc_lgr_cleanup_early(&new_smc->conn);
        else
                smc_conn_free(&new_smc->conn);
@@ -1768,7 +1768,8 @@ static void smc_listen_work(struct work_struct *work)
 out_unlock:
        mutex_unlock(&smc_server_lgr_pending);
 out_decl:
-       smc_listen_decline(new_smc, rc, ini, version);
+       smc_listen_decline(new_smc, rc, ini ? ini->first_contact_local : 0,
+                          version);
 out_free:
        kfree(ini);
        kfree(buf);
index d790c43c473f2a5881d572d11a74067e3ae878f6..2b19863f7171b0acb27d76d1e99f62939c22722a 100644 (file)
@@ -1615,8 +1615,11 @@ static struct smc_buf_desc *smcd_new_buf_create(struct smc_link_group *lgr,
                rc = smc_ism_register_dmb(lgr, bufsize, buf_desc);
                if (rc) {
                        kfree(buf_desc);
-                       return (rc == -ENOMEM) ? ERR_PTR(-EAGAIN) :
-                                                ERR_PTR(-EIO);
+                       if (rc == -ENOMEM)
+                               return ERR_PTR(-EAGAIN);
+                       if (rc == -ENOSPC)
+                               return ERR_PTR(-ENOSPC);
+                       return ERR_PTR(-EIO);
                }
                buf_desc->pages = virt_to_page(buf_desc->cpu_addr);
                /* CDC header stored in buf. So, pretend it was smaller */
index 2a78aa70157281f1e9771f0bcc426fa73d862cb3..32c79c59052b688e93e9697f67623831169a1299 100644 (file)
@@ -150,12 +150,11 @@ int tipc_buf_append(struct sk_buff **headbuf, struct sk_buff **buf)
        if (fragid == FIRST_FRAGMENT) {
                if (unlikely(head))
                        goto err;
-               if (skb_cloned(frag))
-                       frag = skb_copy(frag, GFP_ATOMIC);
+               *buf = NULL;
+               frag = skb_unshare(frag, GFP_ATOMIC);
                if (unlikely(!frag))
                        goto err;
                head = *headbuf = frag;
-               *buf = NULL;
                TIPC_SKB_CB(head)->tail = NULL;
                if (skb_is_nonlinear(head)) {
                        skb_walk_frags(head, tail) {
index 9e93bc201cc07b26285e7f10d5502a2a6166db6b..b4d7b8aba0037b7fb00e0f8015c3f7b95e1ea42a 100644 (file)
@@ -739,7 +739,7 @@ static struct sock *__vsock_create(struct net *net,
                vsk->buffer_min_size = psk->buffer_min_size;
                vsk->buffer_max_size = psk->buffer_max_size;
        } else {
-               vsk->trusted = capable(CAP_NET_ADMIN);
+               vsk->trusted = ns_capable_noaudit(&init_user_ns, CAP_NET_ADMIN);
                vsk->owner = get_current_cred();
                vsk->connect_timeout = VSOCK_DEFAULT_CONNECT_TIMEOUT;
                vsk->buffer_size = VSOCK_DEFAULT_BUFFER_SIZE;