u64 rx_csum_unnecessary_inner;
u64 rx_xdp_drop;
u64 rx_xdp_tx;
+ u64 rx_xdp_tx_cqe;
u64 rx_xdp_tx_full;
u64 tx_csum_none;
u64 tx_csum_partial;
u64 tx_queue_dropped;
u64 tx_xmit_more;
u64 tx_recover;
+ u64 tx_cqes;
u64 tx_queue_wake;
+ u64 tx_udp_seg_rem;
u64 tx_cqe_err;
u64 rx_wqe_err;
u64 rx_mpwqe_filler;
u64 rx_cache_empty;
u64 rx_cache_busy;
u64 rx_cache_waive;
+ u64 rx_congst_umr;
+ u64 ch_poll;
+ u64 ch_arm;
+ u64 ch_aff_change;
u64 ch_eq_rearm;
#ifdef CONFIG_MLX5_EN_TLS
u64 removed_vlan_packets;
u64 xdp_drop;
u64 xdp_tx;
+ u64 xdp_tx_cqe;
u64 xdp_tx_full;
u64 wqe_err;
u64 mpwqe_filler;
u64 cache_empty;
u64 cache_busy;
u64 cache_waive;
+ u64 congst_umr;
};
struct mlx5e_sq_stats {
u64 csum_partial_inner;
u64 added_vlan_packets;
u64 nop;
+ u64 udp_seg_rem;
#ifdef CONFIG_MLX5_EN_TLS
u64 tls_ooo;
u64 tls_resync_bytes;
u64 dropped;
u64 recover;
/* dirtied @completion */
- u64 wake ____cacheline_aligned_in_smp;
+ u64 cqes ____cacheline_aligned_in_smp;
+ u64 wake;
u64 cqe_err;
};
struct mlx5e_ch_stats {
+ u64 poll;
+ u64 arm;
+ u64 aff_change;
u64 eq_rearm;
};