drivers: mellanox: use netdev_xmit_more() helper
authorFlorian Westphal <fw@strlen.de>
Mon, 1 Apr 2019 14:42:15 +0000 (16:42 +0200)
committerDavid S. Miller <davem@davemloft.net>
Tue, 2 Apr 2019 01:35:02 +0000 (18:35 -0700)
skb->xmit_more hint is now always 0. This switches the mellanox drivers
to the netdev_xmit_more() helper.

Cc: Saeed Mahameed <saeedm@mellanox.com>
Cc: Leon Romanovsky <leon@kernel.org>
Cc: Boris Pismenny <borisp@mellanox.com>
Cc: Ilya Lesokhin <ilyal@mellanox.com>
Cc: Eran Ben Elisha <eranbe@mellanox.com>
Signed-off-by: Florian Westphal <fw@strlen.de>
Signed-off-by: David S. Miller <davem@davemloft.net>
drivers/net/ethernet/mellanox/mlx4/en_tx.c
drivers/net/ethernet/mellanox/mlx5/core/en.h
drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.c
drivers/net/ethernet/mellanox/mlx5/core/en_tx.c

index fba54fb06e18cde73d80891df15b88687753daee..36a92b19e613d8dba0a5f57461e7de3d09635310 100644 (file)
@@ -1042,7 +1042,7 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
 
        send_doorbell = __netdev_tx_sent_queue(ring->tx_queue,
                                               tx_info->nr_bytes,
-                                              skb->xmit_more);
+                                              netdev_xmit_more());
 
        real_size = (real_size / 16) & 0x3f;
 
index 9e71cf03369cc418000b60096fad9ee88dc770cb..3e1ea8b42c772d2d1da240eb12963034004196c5 100644 (file)
@@ -772,7 +772,7 @@ u16 mlx5e_select_queue(struct net_device *dev, struct sk_buff *skb,
                       struct net_device *sb_dev);
 netdev_tx_t mlx5e_xmit(struct sk_buff *skb, struct net_device *dev);
 netdev_tx_t mlx5e_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb,
-                         struct mlx5e_tx_wqe *wqe, u16 pi);
+                         struct mlx5e_tx_wqe *wqe, u16 pi, bool xmit_more);
 
 void mlx5e_completion_event(struct mlx5_core_cq *mcq);
 void mlx5e_cq_error_event(struct mlx5_core_cq *mcq, enum mlx5_event event);
index be137d4a91692026acf2a2bce299667fc7228dcc..439bf5953885eb25f2aba6dd4aa1cef4cc0f4ecf 100644 (file)
@@ -181,7 +181,6 @@ static void mlx5e_tls_complete_sync_skb(struct sk_buff *skb,
         */
        nskb->ip_summed = CHECKSUM_PARTIAL;
 
-       nskb->xmit_more = 1;
        nskb->queue_mapping = skb->queue_mapping;
 }
 
@@ -248,7 +247,7 @@ mlx5e_tls_handle_ooo(struct mlx5e_tls_offload_context_tx *context,
        sq->stats->tls_resync_bytes += nskb->len;
        mlx5e_tls_complete_sync_skb(skb, nskb, tcp_seq, headln,
                                    cpu_to_be64(info.rcd_sn));
-       mlx5e_sq_xmit(sq, nskb, *wqe, *pi);
+       mlx5e_sq_xmit(sq, nskb, *wqe, *pi, true);
        mlx5e_sq_fetch_wqe(sq, wqe, pi);
        return skb;
 
index 41e2a01d3713f36bb956a566980a467e2d980f04..40f3f98aa279c7c8c1dd96abf4778d0c1bd694bf 100644 (file)
@@ -297,7 +297,8 @@ static inline void mlx5e_fill_sq_frag_edge(struct mlx5e_txqsq *sq,
 static inline void
 mlx5e_txwqe_complete(struct mlx5e_txqsq *sq, struct sk_buff *skb,
                     u8 opcode, u16 ds_cnt, u8 num_wqebbs, u32 num_bytes, u8 num_dma,
-                    struct mlx5e_tx_wqe_info *wi, struct mlx5_wqe_ctrl_seg *cseg)
+                    struct mlx5e_tx_wqe_info *wi, struct mlx5_wqe_ctrl_seg *cseg,
+                    bool xmit_more)
 {
        struct mlx5_wq_cyc *wq = &sq->wq;
 
@@ -320,14 +321,14 @@ mlx5e_txwqe_complete(struct mlx5e_txqsq *sq, struct sk_buff *skb,
                sq->stats->stopped++;
        }
 
-       if (!skb->xmit_more || netif_xmit_stopped(sq->txq))
+       if (!xmit_more || netif_xmit_stopped(sq->txq))
                mlx5e_notify_hw(wq, sq->pc, sq->uar_map, cseg);
 }
 
 #define INL_HDR_START_SZ (sizeof(((struct mlx5_wqe_eth_seg *)NULL)->inline_hdr.start))
 
 netdev_tx_t mlx5e_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb,
-                         struct mlx5e_tx_wqe *wqe, u16 pi)
+                         struct mlx5e_tx_wqe *wqe, u16 pi, bool xmit_more)
 {
        struct mlx5_wq_cyc *wq = &sq->wq;
        struct mlx5_wqe_ctrl_seg *cseg;
@@ -360,7 +361,7 @@ netdev_tx_t mlx5e_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb,
        }
 
        stats->bytes     += num_bytes;
-       stats->xmit_more += skb->xmit_more;
+       stats->xmit_more += netdev_xmit_more();
 
        headlen = skb->len - ihs - skb->data_len;
        ds_cnt += !!headlen;
@@ -423,7 +424,7 @@ netdev_tx_t mlx5e_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb,
                goto err_drop;
 
        mlx5e_txwqe_complete(sq, skb, opcode, ds_cnt, num_wqebbs, num_bytes,
-                            num_dma, wi, cseg);
+                            num_dma, wi, cseg, xmit_more);
 
        return NETDEV_TX_OK;
 
@@ -449,7 +450,7 @@ netdev_tx_t mlx5e_xmit(struct sk_buff *skb, struct net_device *dev)
        if (unlikely(!skb))
                return NETDEV_TX_OK;
 
-       return mlx5e_sq_xmit(sq, skb, wqe, pi);
+       return mlx5e_sq_xmit(sq, skb, wqe, pi, netdev_xmit_more());
 }
 
 static void mlx5e_dump_error_cqe(struct mlx5e_txqsq *sq,
@@ -659,7 +660,7 @@ netdev_tx_t mlx5i_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb,
        }
 
        stats->bytes     += num_bytes;
-       stats->xmit_more += skb->xmit_more;
+       stats->xmit_more += netdev_xmit_more();
 
        headlen = skb->len - ihs - skb->data_len;
        ds_cnt += !!headlen;
@@ -704,7 +705,7 @@ netdev_tx_t mlx5i_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb,
                goto err_drop;
 
        mlx5e_txwqe_complete(sq, skb, opcode, ds_cnt, num_wqebbs, num_bytes,
-                            num_dma, wi, cseg);
+                            num_dma, wi, cseg, false);
 
        return NETDEV_TX_OK;