2 * Copyright (c) 2015-2016, Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #ifndef __MLX5_EN_STATS_H__
34 #define __MLX5_EN_STATS_H__
36 #define MLX5E_READ_CTR64_CPU(ptr, dsc, i) \
37 (*(u64 *)((char *)ptr + dsc[i].offset))
38 #define MLX5E_READ_CTR64_BE(ptr, dsc, i) \
39 be64_to_cpu(*(__be64 *)((char *)ptr + dsc[i].offset))
40 #define MLX5E_READ_CTR32_CPU(ptr, dsc, i) \
41 (*(u32 *)((char *)ptr + dsc[i].offset))
42 #define MLX5E_READ_CTR32_BE(ptr, dsc, i) \
43 be32_to_cpu(*(__be32 *)((char *)ptr + dsc[i].offset))
45 #define MLX5E_DECLARE_STAT(type, fld) #fld, offsetof(type, fld)
46 #define MLX5E_DECLARE_RX_STAT(type, fld) "rx%d_"#fld, offsetof(type, fld)
47 #define MLX5E_DECLARE_TX_STAT(type, fld) "tx%d_"#fld, offsetof(type, fld)
48 #define MLX5E_DECLARE_XDPSQ_STAT(type, fld) "tx%d_xdp_"#fld, offsetof(type, fld)
49 #define MLX5E_DECLARE_RQ_XDPSQ_STAT(type, fld) "rx%d_xdp_tx_"#fld, offsetof(type, fld)
50 #define MLX5E_DECLARE_XSKRQ_STAT(type, fld) "rx%d_xsk_"#fld, offsetof(type, fld)
51 #define MLX5E_DECLARE_XSKSQ_STAT(type, fld) "tx%d_xsk_"#fld, offsetof(type, fld)
52 #define MLX5E_DECLARE_CH_STAT(type, fld) "ch%d_"#fld, offsetof(type, fld)
54 #define MLX5E_DECLARE_PTP_TX_STAT(type, fld) "ptp_tx%d_"#fld, offsetof(type, fld)
55 #define MLX5E_DECLARE_PTP_CH_STAT(type, fld) "ptp_ch_"#fld, offsetof(type, fld)
56 #define MLX5E_DECLARE_PTP_CQ_STAT(type, fld) "ptp_cq%d_"#fld, offsetof(type, fld)
57 #define MLX5E_DECLARE_PTP_RQ_STAT(type, fld) "ptp_rq%d_"#fld, offsetof(type, fld)
59 #define MLX5E_DECLARE_QOS_TX_STAT(type, fld) "qos_tx%d_"#fld, offsetof(type, fld)
62 char format[ETH_GSTRING_LEN];
63 size_t offset; /* Byte offset */
67 MLX5E_NDO_UPDATE_STATS = BIT(0x1),
71 struct mlx5e_stats_grp {
72 u16 update_stats_mask;
73 int (*get_num_stats)(struct mlx5e_priv *priv);
74 int (*fill_strings)(struct mlx5e_priv *priv, u8 *data, int idx);
75 int (*fill_stats)(struct mlx5e_priv *priv, u64 *data, int idx);
76 void (*update_stats)(struct mlx5e_priv *priv);
79 typedef const struct mlx5e_stats_grp *const mlx5e_stats_grp_t;
81 #define MLX5E_STATS_GRP_OP(grp, name) mlx5e_stats_grp_ ## grp ## _ ## name
83 #define MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(grp) \
84 int MLX5E_STATS_GRP_OP(grp, num_stats)(struct mlx5e_priv *priv)
86 #define MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(grp) \
87 void MLX5E_STATS_GRP_OP(grp, update_stats)(struct mlx5e_priv *priv)
89 #define MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(grp) \
90 int MLX5E_STATS_GRP_OP(grp, fill_strings)(struct mlx5e_priv *priv, u8 *data, int idx)
92 #define MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(grp) \
93 int MLX5E_STATS_GRP_OP(grp, fill_stats)(struct mlx5e_priv *priv, u64 *data, int idx)
95 #define MLX5E_STATS_GRP(grp) mlx5e_stats_grp_ ## grp
97 #define MLX5E_DECLARE_STATS_GRP(grp) \
98 const struct mlx5e_stats_grp MLX5E_STATS_GRP(grp)
100 #define MLX5E_DEFINE_STATS_GRP(grp, mask) \
101 MLX5E_DECLARE_STATS_GRP(grp) = { \
102 .get_num_stats = MLX5E_STATS_GRP_OP(grp, num_stats), \
103 .fill_stats = MLX5E_STATS_GRP_OP(grp, fill_stats), \
104 .fill_strings = MLX5E_STATS_GRP_OP(grp, fill_strings), \
105 .update_stats = MLX5E_STATS_GRP_OP(grp, update_stats), \
106 .update_stats_mask = mask, \
109 unsigned int mlx5e_stats_total_num(struct mlx5e_priv *priv);
110 void mlx5e_stats_update(struct mlx5e_priv *priv);
111 void mlx5e_stats_fill(struct mlx5e_priv *priv, u64 *data, int idx);
112 void mlx5e_stats_fill_strings(struct mlx5e_priv *priv, u8 *data);
113 void mlx5e_stats_update_ndo_stats(struct mlx5e_priv *priv);
115 void mlx5e_stats_pause_get(struct mlx5e_priv *priv,
116 struct ethtool_pause_stats *pause_stats);
117 void mlx5e_stats_fec_get(struct mlx5e_priv *priv,
118 struct ethtool_fec_stats *fec_stats);
120 void mlx5e_stats_eth_phy_get(struct mlx5e_priv *priv,
121 struct ethtool_eth_phy_stats *phy_stats);
122 void mlx5e_stats_eth_mac_get(struct mlx5e_priv *priv,
123 struct ethtool_eth_mac_stats *mac_stats);
124 void mlx5e_stats_eth_ctrl_get(struct mlx5e_priv *priv,
125 struct ethtool_eth_ctrl_stats *ctrl_stats);
126 void mlx5e_stats_rmon_get(struct mlx5e_priv *priv,
127 struct ethtool_rmon_stats *rmon,
128 const struct ethtool_rmon_hist_range **ranges);
130 /* Concrete NIC Stats */
132 struct mlx5e_sw_stats {
139 u64 tx_tso_inner_packets;
140 u64 tx_tso_inner_bytes;
141 u64 tx_added_vlan_packets;
150 u64 rx_gro_match_packets;
151 u64 rx_gro_large_hds;
152 u64 rx_mcast_packets;
154 u64 rx_removed_vlan_packets;
155 u64 rx_csum_unnecessary;
157 u64 rx_csum_complete;
158 u64 rx_csum_complete_tail;
159 u64 rx_csum_complete_tail_slow;
160 u64 rx_csum_unnecessary_inner;
172 u64 tx_csum_partial_inner;
173 u64 tx_queue_stopped;
174 u64 tx_queue_dropped;
188 u64 rx_mpwqe_filler_cqes;
189 u64 rx_mpwqe_filler_strides;
190 u64 rx_oversize_pkts_sw_drop;
191 u64 rx_buff_alloc_err;
192 u64 rx_cqe_compress_blks;
193 u64 rx_cqe_compress_pkts;
208 #ifdef CONFIG_PAGE_POOL_STATS
209 u64 rx_pp_alloc_fast;
210 u64 rx_pp_alloc_slow;
211 u64 rx_pp_alloc_slow_high_order;
212 u64 rx_pp_alloc_empty;
213 u64 rx_pp_alloc_refill;
214 u64 rx_pp_alloc_waive;
215 u64 rx_pp_recycle_cached;
216 u64 rx_pp_recycle_cache_full;
217 u64 rx_pp_recycle_ring;
218 u64 rx_pp_recycle_ring_full;
219 u64 rx_pp_recycle_released_ref;
221 #ifdef CONFIG_MLX5_EN_TLS
222 u64 tx_tls_encrypted_packets;
223 u64 tx_tls_encrypted_bytes;
225 u64 tx_tls_dump_packets;
226 u64 tx_tls_dump_bytes;
227 u64 tx_tls_resync_bytes;
228 u64 tx_tls_skip_no_sync_data;
229 u64 tx_tls_drop_no_sync_data;
230 u64 tx_tls_drop_bypass_req;
232 u64 rx_tls_decrypted_packets;
233 u64 rx_tls_decrypted_bytes;
234 u64 rx_tls_resync_req_pkt;
235 u64 rx_tls_resync_req_start;
236 u64 rx_tls_resync_req_end;
237 u64 rx_tls_resync_req_skip;
238 u64 rx_tls_resync_res_ok;
239 u64 rx_tls_resync_res_retry;
240 u64 rx_tls_resync_res_skip;
246 u64 rx_xsk_csum_complete;
247 u64 rx_xsk_csum_unnecessary;
248 u64 rx_xsk_csum_unnecessary_inner;
249 u64 rx_xsk_csum_none;
251 u64 rx_xsk_removed_vlan_packets;
253 u64 rx_xsk_xdp_redirect;
255 u64 rx_xsk_mpwqe_filler_cqes;
256 u64 rx_xsk_mpwqe_filler_strides;
257 u64 rx_xsk_oversize_pkts_sw_drop;
258 u64 rx_xsk_buff_alloc_err;
259 u64 rx_xsk_cqe_compress_blks;
260 u64 rx_xsk_cqe_compress_pkts;
261 u64 rx_xsk_congst_umr;
271 struct mlx5e_qcounter_stats {
272 u32 rx_out_of_buffer;
273 u32 rx_if_down_packets;
276 #define VNIC_ENV_GET(vnic_env_stats, c) \
277 MLX5_GET(query_vnic_env_out, (vnic_env_stats)->query_vnic_env_out, \
280 struct mlx5e_vnic_env_stats {
281 __be64 query_vnic_env_out[MLX5_ST_SZ_QW(query_vnic_env_out)];
284 #define VPORT_COUNTER_GET(vstats, c) MLX5_GET64(query_vport_counter_out, \
285 vstats->query_vport_out, c)
287 struct mlx5e_vport_stats {
288 __be64 query_vport_out[MLX5_ST_SZ_QW(query_vport_counter_out)];
291 #define PPORT_802_3_GET(pstats, c) \
292 MLX5_GET64(ppcnt_reg, pstats->IEEE_802_3_counters, \
293 counter_set.eth_802_3_cntrs_grp_data_layout.c##_high)
294 #define PPORT_2863_GET(pstats, c) \
295 MLX5_GET64(ppcnt_reg, pstats->RFC_2863_counters, \
296 counter_set.eth_2863_cntrs_grp_data_layout.c##_high)
297 #define PPORT_2819_GET(pstats, c) \
298 MLX5_GET64(ppcnt_reg, pstats->RFC_2819_counters, \
299 counter_set.eth_2819_cntrs_grp_data_layout.c##_high)
300 #define PPORT_PHY_STATISTICAL_GET(pstats, c) \
301 MLX5_GET64(ppcnt_reg, (pstats)->phy_statistical_counters, \
302 counter_set.phys_layer_statistical_cntrs.c##_high)
303 #define PPORT_PER_PRIO_GET(pstats, prio, c) \
304 MLX5_GET64(ppcnt_reg, pstats->per_prio_counters[prio], \
305 counter_set.eth_per_prio_grp_data_layout.c##_high)
306 #define NUM_PPORT_PRIO 8
307 #define PPORT_ETH_EXT_GET(pstats, c) \
308 MLX5_GET64(ppcnt_reg, (pstats)->eth_ext_counters, \
309 counter_set.eth_extended_cntrs_grp_data_layout.c##_high)
311 struct mlx5e_pport_stats {
312 __be64 IEEE_802_3_counters[MLX5_ST_SZ_QW(ppcnt_reg)];
313 __be64 RFC_2863_counters[MLX5_ST_SZ_QW(ppcnt_reg)];
314 __be64 RFC_2819_counters[MLX5_ST_SZ_QW(ppcnt_reg)];
315 __be64 per_prio_counters[NUM_PPORT_PRIO][MLX5_ST_SZ_QW(ppcnt_reg)];
316 __be64 phy_counters[MLX5_ST_SZ_QW(ppcnt_reg)];
317 __be64 phy_statistical_counters[MLX5_ST_SZ_QW(ppcnt_reg)];
318 __be64 eth_ext_counters[MLX5_ST_SZ_QW(ppcnt_reg)];
319 __be64 per_tc_prio_counters[NUM_PPORT_PRIO][MLX5_ST_SZ_QW(ppcnt_reg)];
320 __be64 per_tc_congest_prio_counters[NUM_PPORT_PRIO][MLX5_ST_SZ_QW(ppcnt_reg)];
323 #define PCIE_PERF_GET(pcie_stats, c) \
324 MLX5_GET(mpcnt_reg, (pcie_stats)->pcie_perf_counters, \
325 counter_set.pcie_perf_cntrs_grp_data_layout.c)
327 #define PCIE_PERF_GET64(pcie_stats, c) \
328 MLX5_GET64(mpcnt_reg, (pcie_stats)->pcie_perf_counters, \
329 counter_set.pcie_perf_cntrs_grp_data_layout.c##_high)
331 struct mlx5e_pcie_stats {
332 __be64 pcie_perf_counters[MLX5_ST_SZ_QW(mpcnt_reg)];
335 struct mlx5e_rq_stats {
339 u64 csum_complete_tail;
340 u64 csum_complete_tail_slow;
341 u64 csum_unnecessary;
342 u64 csum_unnecessary_inner;
349 u64 gro_match_packets;
353 u64 removed_vlan_packets;
357 u64 mpwqe_filler_cqes;
358 u64 mpwqe_filler_strides;
359 u64 oversize_pkts_sw_drop;
361 u64 cqe_compress_blks;
362 u64 cqe_compress_pkts;
371 #ifdef CONFIG_PAGE_POOL_STATS
374 u64 pp_alloc_slow_high_order;
378 u64 pp_recycle_cached;
379 u64 pp_recycle_cache_full;
381 u64 pp_recycle_ring_full;
382 u64 pp_recycle_released_ref;
384 #ifdef CONFIG_MLX5_EN_TLS
385 u64 tls_decrypted_packets;
386 u64 tls_decrypted_bytes;
387 u64 tls_resync_req_pkt;
388 u64 tls_resync_req_start;
389 u64 tls_resync_req_end;
390 u64 tls_resync_req_skip;
391 u64 tls_resync_res_ok;
392 u64 tls_resync_res_retry;
393 u64 tls_resync_res_skip;
398 struct mlx5e_sq_stats {
399 /* commonly accessed in data path */
405 u64 tso_inner_packets;
408 u64 csum_partial_inner;
409 u64 added_vlan_packets;
413 #ifdef CONFIG_MLX5_EN_TLS
414 u64 tls_encrypted_packets;
415 u64 tls_encrypted_bytes;
417 u64 tls_dump_packets;
419 u64 tls_resync_bytes;
420 u64 tls_skip_no_sync_data;
421 u64 tls_drop_no_sync_data;
422 u64 tls_drop_bypass_req;
424 /* less likely accessed in data path */
429 /* dirtied @completion */
430 u64 cqes ____cacheline_aligned_in_smp;
435 struct mlx5e_xdpsq_stats {
442 /* dirtied @completion */
443 u64 cqes ____cacheline_aligned_in_smp;
446 struct mlx5e_ch_stats {
455 struct mlx5e_ptp_cq_stats {
459 u64 abort_abs_diff_ns;
465 struct mlx5e_sw_stats sw;
466 struct mlx5e_qcounter_stats qcnt;
467 struct mlx5e_vnic_env_stats vnic;
468 struct mlx5e_vport_stats vport;
469 struct mlx5e_pport_stats pport;
470 struct rtnl_link_stats64 vf_vport;
471 struct mlx5e_pcie_stats pcie;
474 extern mlx5e_stats_grp_t mlx5e_nic_stats_grps[];
475 unsigned int mlx5e_nic_stats_grps_num(struct mlx5e_priv *priv);
477 extern MLX5E_DECLARE_STATS_GRP(sw);
478 extern MLX5E_DECLARE_STATS_GRP(qcnt);
479 extern MLX5E_DECLARE_STATS_GRP(vnic_env);
480 extern MLX5E_DECLARE_STATS_GRP(vport);
481 extern MLX5E_DECLARE_STATS_GRP(802_3);
482 extern MLX5E_DECLARE_STATS_GRP(2863);
483 extern MLX5E_DECLARE_STATS_GRP(2819);
484 extern MLX5E_DECLARE_STATS_GRP(phy);
485 extern MLX5E_DECLARE_STATS_GRP(eth_ext);
486 extern MLX5E_DECLARE_STATS_GRP(pcie);
487 extern MLX5E_DECLARE_STATS_GRP(per_prio);
488 extern MLX5E_DECLARE_STATS_GRP(pme);
489 extern MLX5E_DECLARE_STATS_GRP(channels);
490 extern MLX5E_DECLARE_STATS_GRP(per_port_buff_congest);
491 extern MLX5E_DECLARE_STATS_GRP(ipsec_sw);
492 extern MLX5E_DECLARE_STATS_GRP(ptp);
493 extern MLX5E_DECLARE_STATS_GRP(macsec_hw);
495 #endif /* __MLX5_EN_STATS_H__ */