net/mlx5e: Avoid reset netdev stats on configuration changes
[sfrench/cifs-2.6.git] / drivers / net / ethernet / mellanox / mlx5 / core / en_rx.c
index ac54380d41e41c19bdb92afc5283c7c9f2d213b3..bfef73b37fbc2f92ca6367ba57d303882d5f7b48 100644 (file)
@@ -65,7 +65,7 @@ static inline void mlx5e_read_title_slot(struct mlx5e_rq *rq,
        mlx5e_read_cqe_slot(cq, cqcc, &cq->title);
        cq->decmprs_left        = be32_to_cpu(cq->title.byte_cnt);
        cq->decmprs_wqe_counter = be16_to_cpu(cq->title.wqe_counter);
-       rq->stats.cqe_compress_blks++;
+       rq->stats->cqe_compress_blks++;
 }
 
 static inline void mlx5e_read_mini_arr_slot(struct mlx5e_cq *cq, u32 cqcc)
@@ -146,7 +146,7 @@ static inline u32 mlx5e_decompress_cqes_cont(struct mlx5e_rq *rq,
        mlx5e_cqes_update_owner(cq, cq->wq.cc, cqcc - cq->wq.cc);
        cq->wq.cc = cqcc;
        cq->decmprs_left -= cqe_count;
-       rq->stats.cqe_compress_pkts += cqe_count;
+       rq->stats->cqe_compress_pkts += cqe_count;
 
        return cqe_count;
 }
@@ -176,14 +176,15 @@ static inline bool mlx5e_rx_cache_put(struct mlx5e_rq *rq,
 {
        struct mlx5e_page_cache *cache = &rq->page_cache;
        u32 tail_next = (cache->tail + 1) & (MLX5E_CACHE_SIZE - 1);
+       struct mlx5e_rq_stats *stats = rq->stats;
 
        if (tail_next == cache->head) {
-               rq->stats.cache_full++;
+               stats->cache_full++;
                return false;
        }
 
        if (unlikely(mlx5e_page_is_reserved(dma_info->page))) {
-               rq->stats.cache_waive++;
+               stats->cache_waive++;
                return false;
        }
 
@@ -196,20 +197,21 @@ static inline bool mlx5e_rx_cache_get(struct mlx5e_rq *rq,
                                      struct mlx5e_dma_info *dma_info)
 {
        struct mlx5e_page_cache *cache = &rq->page_cache;
+       struct mlx5e_rq_stats *stats = rq->stats;
 
        if (unlikely(cache->head == cache->tail)) {
-               rq->stats.cache_empty++;
+               stats->cache_empty++;
                return false;
        }
 
        if (page_ref_count(cache->page_cache[cache->head].page) != 1) {
-               rq->stats.cache_busy++;
+               stats->cache_busy++;
                return false;
        }
 
        *dma_info = cache->page_cache[cache->head];
        cache->head = (cache->head + 1) & (MLX5E_CACHE_SIZE - 1);
-       rq->stats.cache_reuse++;
+       stats->cache_reuse++;
 
        dma_sync_single_for_device(rq->pdev, dma_info->addr,
                                   RQ_PAGE_SIZE(rq),
@@ -294,7 +296,7 @@ static inline void mlx5e_free_rx_wqe_reuse(struct mlx5e_rq *rq,
                                           struct mlx5e_wqe_frag_info *wi)
 {
        if (mlx5e_page_reuse(rq, wi)) {
-               rq->stats.page_reuse++;
+               rq->stats->page_reuse++;
                return;
        }
 
@@ -452,7 +454,7 @@ err_unmap:
                dma_info--;
                mlx5e_page_release(rq, dma_info, true);
        }
-       rq->stats.buff_alloc_err++;
+       rq->stats->buff_alloc_err++;
 
        return err;
 }
@@ -480,7 +482,7 @@ bool mlx5e_post_rx_wqes(struct mlx5e_rq *rq)
 
                err = mlx5e_alloc_rx_wqe(rq, wqe, wq->head);
                if (unlikely(err)) {
-                       rq->stats.buff_alloc_err++;
+                       rq->stats->buff_alloc_err++;
                        break;
                }
 
@@ -652,6 +654,7 @@ static inline void mlx5e_handle_csum(struct net_device *netdev,
                                     struct sk_buff *skb,
                                     bool   lro)
 {
+       struct mlx5e_rq_stats *stats = rq->stats;
        int network_depth = 0;
 
        if (unlikely(!(netdev->features & NETIF_F_RXCSUM)))
@@ -659,7 +662,7 @@ static inline void mlx5e_handle_csum(struct net_device *netdev,
 
        if (lro) {
                skb->ip_summed = CHECKSUM_UNNECESSARY;
-               rq->stats.csum_unnecessary++;
+               stats->csum_unnecessary++;
                return;
        }
 
@@ -674,7 +677,7 @@ static inline void mlx5e_handle_csum(struct net_device *netdev,
                        skb->csum = csum_partial(skb->data + ETH_HLEN,
                                                 network_depth - ETH_HLEN,
                                                 skb->csum);
-               rq->stats.csum_complete++;
+               stats->csum_complete++;
                return;
        }
 
@@ -684,15 +687,15 @@ static inline void mlx5e_handle_csum(struct net_device *netdev,
                if (cqe_is_tunneled(cqe)) {
                        skb->csum_level = 1;
                        skb->encapsulation = 1;
-                       rq->stats.csum_unnecessary_inner++;
+                       stats->csum_unnecessary_inner++;
                        return;
                }
-               rq->stats.csum_unnecessary++;
+               stats->csum_unnecessary++;
                return;
        }
 csum_none:
        skb->ip_summed = CHECKSUM_NONE;
-       rq->stats.csum_none++;
+       stats->csum_none++;
 }
 
 static inline void mlx5e_build_rx_skb(struct mlx5_cqe64 *cqe,
@@ -701,6 +704,7 @@ static inline void mlx5e_build_rx_skb(struct mlx5_cqe64 *cqe,
                                      struct sk_buff *skb)
 {
        u8 lro_num_seg = be32_to_cpu(cqe->srqn) >> 24;
+       struct mlx5e_rq_stats *stats = rq->stats;
        struct net_device *netdev = rq->netdev;
 
        skb->mac_len = ETH_HLEN;
@@ -710,9 +714,9 @@ static inline void mlx5e_build_rx_skb(struct mlx5_cqe64 *cqe,
                /* Subtract one since we already counted this as one
                 * "regular" packet in mlx5e_complete_rx_cqe()
                 */
-               rq->stats.packets += lro_num_seg - 1;
-               rq->stats.lro_packets++;
-               rq->stats.lro_bytes += cqe_bcnt;
+               stats->packets += lro_num_seg - 1;
+               stats->lro_packets++;
+               stats->lro_bytes += cqe_bcnt;
        }
 
        if (unlikely(mlx5e_rx_hw_stamp(rq->tstamp)))
@@ -727,7 +731,7 @@ static inline void mlx5e_build_rx_skb(struct mlx5_cqe64 *cqe,
        if (cqe_has_vlan(cqe)) {
                __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
                                       be16_to_cpu(cqe->vlan_info));
-               rq->stats.removed_vlan_packets++;
+               stats->removed_vlan_packets++;
        }
 
        skb->mark = be32_to_cpu(cqe->sop_drop_qpn) & MLX5E_TC_FLOW_ID_MASK;
@@ -741,8 +745,10 @@ static inline void mlx5e_complete_rx_cqe(struct mlx5e_rq *rq,
                                         u32 cqe_bcnt,
                                         struct sk_buff *skb)
 {
-       rq->stats.packets++;
-       rq->stats.bytes += cqe_bcnt;
+       struct mlx5e_rq_stats *stats = rq->stats;
+
+       stats->packets++;
+       stats->bytes += cqe_bcnt;
        mlx5e_build_rx_skb(cqe, cqe_bcnt, rq, skb);
 }
 
@@ -774,10 +780,12 @@ static inline bool mlx5e_xmit_xdp_frame(struct mlx5e_rq *rq,
        dma_addr_t dma_addr  = di->addr + data_offset;
        unsigned int dma_len = xdp->data_end - xdp->data;
 
+       struct mlx5e_rq_stats *stats = rq->stats;
+
        prefetchw(wqe);
 
        if (unlikely(dma_len < MLX5E_XDP_MIN_INLINE || rq->hw_mtu < dma_len)) {
-               rq->stats.xdp_drop++;
+               stats->xdp_drop++;
                return false;
        }
 
@@ -787,7 +795,7 @@ static inline bool mlx5e_xmit_xdp_frame(struct mlx5e_rq *rq,
                        mlx5e_xmit_xdp_doorbell(sq);
                        sq->db.doorbell = false;
                }
-               rq->stats.xdp_tx_full++;
+               stats->xdp_tx_full++;
                return false;
        }
 
@@ -821,7 +829,7 @@ static inline bool mlx5e_xmit_xdp_frame(struct mlx5e_rq *rq,
 
        sq->db.doorbell = true;
 
-       rq->stats.xdp_tx++;
+       stats->xdp_tx++;
        return true;
 }
 
@@ -868,7 +876,7 @@ static inline bool mlx5e_xdp_handle(struct mlx5e_rq *rq,
        case XDP_ABORTED:
                trace_xdp_exception(rq->netdev, prog, act);
        case XDP_DROP:
-               rq->stats.xdp_drop++;
+               rq->stats->xdp_drop++;
                return true;
        }
 }
@@ -881,7 +889,7 @@ struct sk_buff *mlx5e_build_linear_skb(struct mlx5e_rq *rq, void *va,
        struct sk_buff *skb = build_skb(va, frag_size);
 
        if (unlikely(!skb)) {
-               rq->stats.buff_alloc_err++;
+               rq->stats->buff_alloc_err++;
                return NULL;
        }
 
@@ -913,7 +921,7 @@ struct sk_buff *skb_from_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe,
        wi->offset += frag_size;
 
        if (unlikely((cqe->op_own >> 4) != MLX5_CQE_RESP_SEND)) {
-               rq->stats.wqe_err++;
+               rq->stats->wqe_err++;
                return NULL;
        }
 
@@ -1030,7 +1038,7 @@ mlx5e_skb_from_cqe_mpwrq_nonlinear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *w
        skb = napi_alloc_skb(rq->cq.napi,
                             ALIGN(MLX5_MPWRQ_SMALL_PACKET_THRESHOLD, sizeof(long)));
        if (unlikely(!skb)) {
-               rq->stats.buff_alloc_err++;
+               rq->stats->buff_alloc_err++;
                return NULL;
        }
 
@@ -1116,12 +1124,12 @@ void mlx5e_handle_rx_cqe_mpwrq(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
        wi->consumed_strides += cstrides;
 
        if (unlikely((cqe->op_own >> 4) != MLX5_CQE_RESP_SEND)) {
-               rq->stats.wqe_err++;
+               rq->stats->wqe_err++;
                goto mpwrq_cqe_out;
        }
 
        if (unlikely(mpwrq_is_filler_cqe(cqe))) {
-               rq->stats.mpwqe_filler++;
+               rq->stats->mpwqe_filler++;
                goto mpwrq_cqe_out;
        }
 
@@ -1276,6 +1284,7 @@ static inline void mlx5i_complete_rx_cqe(struct mlx5e_rq *rq,
                                         u32 cqe_bcnt,
                                         struct sk_buff *skb)
 {
+       struct mlx5e_rq_stats *stats = rq->stats;
        struct hwtstamp_config *tstamp;
        struct net_device *netdev;
        struct mlx5e_priv *priv;
@@ -1337,9 +1346,9 @@ static inline void mlx5i_complete_rx_cqe(struct mlx5e_rq *rq,
 
        skb->dev = netdev;
 
-       rq->stats.csum_complete++;
-       rq->stats.packets++;
-       rq->stats.bytes += cqe_bcnt;
+       stats->csum_complete++;
+       stats->packets++;
+       stats->bytes += cqe_bcnt;
 }
 
 void mlx5i_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)