2 * Copyright (c) 2017, Mellanox Technologies, Ltd. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
35 #include "en_accel/ipsec.h"
36 #include "en_accel/tls.h"
38 static const struct counter_desc sw_stats_desc[] = {
39 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_packets) },
40 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_bytes) },
41 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_packets) },
42 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_bytes) },
43 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tso_packets) },
44 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tso_bytes) },
45 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tso_inner_packets) },
46 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tso_inner_bytes) },
47 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_added_vlan_packets) },
48 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_nop) },
50 #ifdef CONFIG_MLX5_EN_TLS
51 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_ooo) },
52 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_resync_bytes) },
55 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_lro_packets) },
56 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_lro_bytes) },
57 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_ecn_mark) },
58 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_removed_vlan_packets) },
59 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_unnecessary) },
60 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_none) },
61 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_complete) },
62 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_complete_tail) },
63 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_complete_tail_slow) },
64 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_unnecessary_inner) },
65 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_drop) },
66 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_redirect) },
67 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx_xmit) },
68 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx_full) },
69 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx_err) },
70 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx_cqe) },
71 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_csum_none) },
72 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_csum_partial) },
73 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_csum_partial_inner) },
74 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_queue_stopped) },
75 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_queue_dropped) },
76 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xmit_more) },
77 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_recover) },
78 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_cqes) },
79 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_queue_wake) },
80 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_cqe_err) },
81 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xdp_xmit) },
82 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xdp_full) },
83 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xdp_err) },
84 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xdp_cqes) },
85 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_wqe_err) },
86 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_mpwqe_filler_cqes) },
87 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_mpwqe_filler_strides) },
88 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_oversize_pkts_sw_drop) },
89 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_buff_alloc_err) },
90 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cqe_compress_blks) },
91 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cqe_compress_pkts) },
92 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_page_reuse) },
93 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cache_reuse) },
94 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cache_full) },
95 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cache_empty) },
96 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cache_busy) },
97 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cache_waive) },
98 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_congst_umr) },
99 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_arfs_err) },
100 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, ch_events) },
101 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, ch_poll) },
102 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, ch_arm) },
103 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, ch_aff_change) },
104 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, ch_eq_rearm) },
107 #define NUM_SW_COUNTERS ARRAY_SIZE(sw_stats_desc)
109 static int mlx5e_grp_sw_get_num_stats(struct mlx5e_priv *priv)
111 return NUM_SW_COUNTERS;
114 static int mlx5e_grp_sw_fill_strings(struct mlx5e_priv *priv, u8 *data, int idx)
118 for (i = 0; i < NUM_SW_COUNTERS; i++)
119 strcpy(data + (idx++) * ETH_GSTRING_LEN, sw_stats_desc[i].format);
123 static int mlx5e_grp_sw_fill_stats(struct mlx5e_priv *priv, u64 *data, int idx)
127 for (i = 0; i < NUM_SW_COUNTERS; i++)
128 data[idx++] = MLX5E_READ_CTR64_CPU(&priv->stats.sw, sw_stats_desc, i);
132 static void mlx5e_grp_sw_update_stats(struct mlx5e_priv *priv)
134 struct mlx5e_sw_stats *s = &priv->stats.sw;
137 memset(s, 0, sizeof(*s));
139 for (i = 0; i < mlx5e_get_netdev_max_channels(priv->netdev); i++) {
140 struct mlx5e_channel_stats *channel_stats =
141 &priv->channel_stats[i];
142 struct mlx5e_xdpsq_stats *xdpsq_red_stats = &channel_stats->xdpsq;
143 struct mlx5e_xdpsq_stats *xdpsq_stats = &channel_stats->rq_xdpsq;
144 struct mlx5e_rq_stats *rq_stats = &channel_stats->rq;
145 struct mlx5e_ch_stats *ch_stats = &channel_stats->ch;
148 s->rx_packets += rq_stats->packets;
149 s->rx_bytes += rq_stats->bytes;
150 s->rx_lro_packets += rq_stats->lro_packets;
151 s->rx_lro_bytes += rq_stats->lro_bytes;
152 s->rx_ecn_mark += rq_stats->ecn_mark;
153 s->rx_removed_vlan_packets += rq_stats->removed_vlan_packets;
154 s->rx_csum_none += rq_stats->csum_none;
155 s->rx_csum_complete += rq_stats->csum_complete;
156 s->rx_csum_complete_tail += rq_stats->csum_complete_tail;
157 s->rx_csum_complete_tail_slow += rq_stats->csum_complete_tail_slow;
158 s->rx_csum_unnecessary += rq_stats->csum_unnecessary;
159 s->rx_csum_unnecessary_inner += rq_stats->csum_unnecessary_inner;
160 s->rx_xdp_drop += rq_stats->xdp_drop;
161 s->rx_xdp_redirect += rq_stats->xdp_redirect;
162 s->rx_xdp_tx_xmit += xdpsq_stats->xmit;
163 s->rx_xdp_tx_full += xdpsq_stats->full;
164 s->rx_xdp_tx_err += xdpsq_stats->err;
165 s->rx_xdp_tx_cqe += xdpsq_stats->cqes;
166 s->rx_wqe_err += rq_stats->wqe_err;
167 s->rx_mpwqe_filler_cqes += rq_stats->mpwqe_filler_cqes;
168 s->rx_mpwqe_filler_strides += rq_stats->mpwqe_filler_strides;
169 s->rx_oversize_pkts_sw_drop += rq_stats->oversize_pkts_sw_drop;
170 s->rx_buff_alloc_err += rq_stats->buff_alloc_err;
171 s->rx_cqe_compress_blks += rq_stats->cqe_compress_blks;
172 s->rx_cqe_compress_pkts += rq_stats->cqe_compress_pkts;
173 s->rx_page_reuse += rq_stats->page_reuse;
174 s->rx_cache_reuse += rq_stats->cache_reuse;
175 s->rx_cache_full += rq_stats->cache_full;
176 s->rx_cache_empty += rq_stats->cache_empty;
177 s->rx_cache_busy += rq_stats->cache_busy;
178 s->rx_cache_waive += rq_stats->cache_waive;
179 s->rx_congst_umr += rq_stats->congst_umr;
180 s->rx_arfs_err += rq_stats->arfs_err;
181 s->ch_events += ch_stats->events;
182 s->ch_poll += ch_stats->poll;
183 s->ch_arm += ch_stats->arm;
184 s->ch_aff_change += ch_stats->aff_change;
185 s->ch_eq_rearm += ch_stats->eq_rearm;
187 s->tx_xdp_xmit += xdpsq_red_stats->xmit;
188 s->tx_xdp_full += xdpsq_red_stats->full;
189 s->tx_xdp_err += xdpsq_red_stats->err;
190 s->tx_xdp_cqes += xdpsq_red_stats->cqes;
192 for (j = 0; j < priv->max_opened_tc; j++) {
193 struct mlx5e_sq_stats *sq_stats = &channel_stats->sq[j];
195 s->tx_packets += sq_stats->packets;
196 s->tx_bytes += sq_stats->bytes;
197 s->tx_tso_packets += sq_stats->tso_packets;
198 s->tx_tso_bytes += sq_stats->tso_bytes;
199 s->tx_tso_inner_packets += sq_stats->tso_inner_packets;
200 s->tx_tso_inner_bytes += sq_stats->tso_inner_bytes;
201 s->tx_added_vlan_packets += sq_stats->added_vlan_packets;
202 s->tx_nop += sq_stats->nop;
203 s->tx_queue_stopped += sq_stats->stopped;
204 s->tx_queue_wake += sq_stats->wake;
205 s->tx_queue_dropped += sq_stats->dropped;
206 s->tx_cqe_err += sq_stats->cqe_err;
207 s->tx_recover += sq_stats->recover;
208 s->tx_xmit_more += sq_stats->xmit_more;
209 s->tx_csum_partial_inner += sq_stats->csum_partial_inner;
210 s->tx_csum_none += sq_stats->csum_none;
211 s->tx_csum_partial += sq_stats->csum_partial;
212 #ifdef CONFIG_MLX5_EN_TLS
213 s->tx_tls_ooo += sq_stats->tls_ooo;
214 s->tx_tls_resync_bytes += sq_stats->tls_resync_bytes;
216 s->tx_cqes += sq_stats->cqes;
221 static const struct counter_desc q_stats_desc[] = {
222 { MLX5E_DECLARE_STAT(struct mlx5e_qcounter_stats, rx_out_of_buffer) },
225 static const struct counter_desc drop_rq_stats_desc[] = {
226 { MLX5E_DECLARE_STAT(struct mlx5e_qcounter_stats, rx_if_down_packets) },
229 #define NUM_Q_COUNTERS ARRAY_SIZE(q_stats_desc)
230 #define NUM_DROP_RQ_COUNTERS ARRAY_SIZE(drop_rq_stats_desc)
232 static int mlx5e_grp_q_get_num_stats(struct mlx5e_priv *priv)
237 num_stats += NUM_Q_COUNTERS;
239 if (priv->drop_rq_q_counter)
240 num_stats += NUM_DROP_RQ_COUNTERS;
245 static int mlx5e_grp_q_fill_strings(struct mlx5e_priv *priv, u8 *data, int idx)
249 for (i = 0; i < NUM_Q_COUNTERS && priv->q_counter; i++)
250 strcpy(data + (idx++) * ETH_GSTRING_LEN,
251 q_stats_desc[i].format);
253 for (i = 0; i < NUM_DROP_RQ_COUNTERS && priv->drop_rq_q_counter; i++)
254 strcpy(data + (idx++) * ETH_GSTRING_LEN,
255 drop_rq_stats_desc[i].format);
260 static int mlx5e_grp_q_fill_stats(struct mlx5e_priv *priv, u64 *data, int idx)
264 for (i = 0; i < NUM_Q_COUNTERS && priv->q_counter; i++)
265 data[idx++] = MLX5E_READ_CTR32_CPU(&priv->stats.qcnt,
267 for (i = 0; i < NUM_DROP_RQ_COUNTERS && priv->drop_rq_q_counter; i++)
268 data[idx++] = MLX5E_READ_CTR32_CPU(&priv->stats.qcnt,
269 drop_rq_stats_desc, i);
273 static void mlx5e_grp_q_update_stats(struct mlx5e_priv *priv)
275 struct mlx5e_qcounter_stats *qcnt = &priv->stats.qcnt;
276 u32 out[MLX5_ST_SZ_DW(query_q_counter_out)];
278 if (priv->q_counter &&
279 !mlx5_core_query_q_counter(priv->mdev, priv->q_counter, 0, out,
281 qcnt->rx_out_of_buffer = MLX5_GET(query_q_counter_out,
283 if (priv->drop_rq_q_counter &&
284 !mlx5_core_query_q_counter(priv->mdev, priv->drop_rq_q_counter, 0,
286 qcnt->rx_if_down_packets = MLX5_GET(query_q_counter_out, out,
290 #define VNIC_ENV_OFF(c) MLX5_BYTE_OFF(query_vnic_env_out, c)
291 static const struct counter_desc vnic_env_stats_desc[] = {
292 { "rx_steer_missed_packets",
293 VNIC_ENV_OFF(vport_env.nic_receive_steering_discard) },
296 #define NUM_VNIC_ENV_COUNTERS ARRAY_SIZE(vnic_env_stats_desc)
298 static int mlx5e_grp_vnic_env_get_num_stats(struct mlx5e_priv *priv)
300 return MLX5_CAP_GEN(priv->mdev, nic_receive_steering_discard) ?
301 NUM_VNIC_ENV_COUNTERS : 0;
304 static int mlx5e_grp_vnic_env_fill_strings(struct mlx5e_priv *priv, u8 *data,
309 if (!MLX5_CAP_GEN(priv->mdev, nic_receive_steering_discard))
312 for (i = 0; i < NUM_VNIC_ENV_COUNTERS; i++)
313 strcpy(data + (idx++) * ETH_GSTRING_LEN,
314 vnic_env_stats_desc[i].format);
318 static int mlx5e_grp_vnic_env_fill_stats(struct mlx5e_priv *priv, u64 *data,
323 if (!MLX5_CAP_GEN(priv->mdev, nic_receive_steering_discard))
326 for (i = 0; i < NUM_VNIC_ENV_COUNTERS; i++)
327 data[idx++] = MLX5E_READ_CTR64_BE(priv->stats.vnic.query_vnic_env_out,
328 vnic_env_stats_desc, i);
332 static void mlx5e_grp_vnic_env_update_stats(struct mlx5e_priv *priv)
334 u32 *out = (u32 *)priv->stats.vnic.query_vnic_env_out;
335 int outlen = MLX5_ST_SZ_BYTES(query_vnic_env_out);
336 u32 in[MLX5_ST_SZ_DW(query_vnic_env_in)] = {0};
337 struct mlx5_core_dev *mdev = priv->mdev;
339 if (!MLX5_CAP_GEN(priv->mdev, nic_receive_steering_discard))
342 MLX5_SET(query_vnic_env_in, in, opcode,
343 MLX5_CMD_OP_QUERY_VNIC_ENV);
344 MLX5_SET(query_vnic_env_in, in, op_mod, 0);
345 MLX5_SET(query_vnic_env_in, in, other_vport, 0);
346 mlx5_cmd_exec(mdev, in, sizeof(in), out, outlen);
349 #define VPORT_COUNTER_OFF(c) MLX5_BYTE_OFF(query_vport_counter_out, c)
350 static const struct counter_desc vport_stats_desc[] = {
351 { "rx_vport_unicast_packets",
352 VPORT_COUNTER_OFF(received_eth_unicast.packets) },
353 { "rx_vport_unicast_bytes",
354 VPORT_COUNTER_OFF(received_eth_unicast.octets) },
355 { "tx_vport_unicast_packets",
356 VPORT_COUNTER_OFF(transmitted_eth_unicast.packets) },
357 { "tx_vport_unicast_bytes",
358 VPORT_COUNTER_OFF(transmitted_eth_unicast.octets) },
359 { "rx_vport_multicast_packets",
360 VPORT_COUNTER_OFF(received_eth_multicast.packets) },
361 { "rx_vport_multicast_bytes",
362 VPORT_COUNTER_OFF(received_eth_multicast.octets) },
363 { "tx_vport_multicast_packets",
364 VPORT_COUNTER_OFF(transmitted_eth_multicast.packets) },
365 { "tx_vport_multicast_bytes",
366 VPORT_COUNTER_OFF(transmitted_eth_multicast.octets) },
367 { "rx_vport_broadcast_packets",
368 VPORT_COUNTER_OFF(received_eth_broadcast.packets) },
369 { "rx_vport_broadcast_bytes",
370 VPORT_COUNTER_OFF(received_eth_broadcast.octets) },
371 { "tx_vport_broadcast_packets",
372 VPORT_COUNTER_OFF(transmitted_eth_broadcast.packets) },
373 { "tx_vport_broadcast_bytes",
374 VPORT_COUNTER_OFF(transmitted_eth_broadcast.octets) },
375 { "rx_vport_rdma_unicast_packets",
376 VPORT_COUNTER_OFF(received_ib_unicast.packets) },
377 { "rx_vport_rdma_unicast_bytes",
378 VPORT_COUNTER_OFF(received_ib_unicast.octets) },
379 { "tx_vport_rdma_unicast_packets",
380 VPORT_COUNTER_OFF(transmitted_ib_unicast.packets) },
381 { "tx_vport_rdma_unicast_bytes",
382 VPORT_COUNTER_OFF(transmitted_ib_unicast.octets) },
383 { "rx_vport_rdma_multicast_packets",
384 VPORT_COUNTER_OFF(received_ib_multicast.packets) },
385 { "rx_vport_rdma_multicast_bytes",
386 VPORT_COUNTER_OFF(received_ib_multicast.octets) },
387 { "tx_vport_rdma_multicast_packets",
388 VPORT_COUNTER_OFF(transmitted_ib_multicast.packets) },
389 { "tx_vport_rdma_multicast_bytes",
390 VPORT_COUNTER_OFF(transmitted_ib_multicast.octets) },
393 #define NUM_VPORT_COUNTERS ARRAY_SIZE(vport_stats_desc)
395 static int mlx5e_grp_vport_get_num_stats(struct mlx5e_priv *priv)
397 return NUM_VPORT_COUNTERS;
400 static int mlx5e_grp_vport_fill_strings(struct mlx5e_priv *priv, u8 *data,
405 for (i = 0; i < NUM_VPORT_COUNTERS; i++)
406 strcpy(data + (idx++) * ETH_GSTRING_LEN, vport_stats_desc[i].format);
410 static int mlx5e_grp_vport_fill_stats(struct mlx5e_priv *priv, u64 *data,
415 for (i = 0; i < NUM_VPORT_COUNTERS; i++)
416 data[idx++] = MLX5E_READ_CTR64_BE(priv->stats.vport.query_vport_out,
417 vport_stats_desc, i);
421 static void mlx5e_grp_vport_update_stats(struct mlx5e_priv *priv)
423 int outlen = MLX5_ST_SZ_BYTES(query_vport_counter_out);
424 u32 *out = (u32 *)priv->stats.vport.query_vport_out;
425 u32 in[MLX5_ST_SZ_DW(query_vport_counter_in)] = {0};
426 struct mlx5_core_dev *mdev = priv->mdev;
428 MLX5_SET(query_vport_counter_in, in, opcode, MLX5_CMD_OP_QUERY_VPORT_COUNTER);
429 MLX5_SET(query_vport_counter_in, in, op_mod, 0);
430 MLX5_SET(query_vport_counter_in, in, other_vport, 0);
431 mlx5_cmd_exec(mdev, in, sizeof(in), out, outlen);
434 #define PPORT_802_3_OFF(c) \
435 MLX5_BYTE_OFF(ppcnt_reg, \
436 counter_set.eth_802_3_cntrs_grp_data_layout.c##_high)
437 static const struct counter_desc pport_802_3_stats_desc[] = {
438 { "tx_packets_phy", PPORT_802_3_OFF(a_frames_transmitted_ok) },
439 { "rx_packets_phy", PPORT_802_3_OFF(a_frames_received_ok) },
440 { "rx_crc_errors_phy", PPORT_802_3_OFF(a_frame_check_sequence_errors) },
441 { "tx_bytes_phy", PPORT_802_3_OFF(a_octets_transmitted_ok) },
442 { "rx_bytes_phy", PPORT_802_3_OFF(a_octets_received_ok) },
443 { "tx_multicast_phy", PPORT_802_3_OFF(a_multicast_frames_xmitted_ok) },
444 { "tx_broadcast_phy", PPORT_802_3_OFF(a_broadcast_frames_xmitted_ok) },
445 { "rx_multicast_phy", PPORT_802_3_OFF(a_multicast_frames_received_ok) },
446 { "rx_broadcast_phy", PPORT_802_3_OFF(a_broadcast_frames_received_ok) },
447 { "rx_in_range_len_errors_phy", PPORT_802_3_OFF(a_in_range_length_errors) },
448 { "rx_out_of_range_len_phy", PPORT_802_3_OFF(a_out_of_range_length_field) },
449 { "rx_oversize_pkts_phy", PPORT_802_3_OFF(a_frame_too_long_errors) },
450 { "rx_symbol_err_phy", PPORT_802_3_OFF(a_symbol_error_during_carrier) },
451 { "tx_mac_control_phy", PPORT_802_3_OFF(a_mac_control_frames_transmitted) },
452 { "rx_mac_control_phy", PPORT_802_3_OFF(a_mac_control_frames_received) },
453 { "rx_unsupported_op_phy", PPORT_802_3_OFF(a_unsupported_opcodes_received) },
454 { "rx_pause_ctrl_phy", PPORT_802_3_OFF(a_pause_mac_ctrl_frames_received) },
455 { "tx_pause_ctrl_phy", PPORT_802_3_OFF(a_pause_mac_ctrl_frames_transmitted) },
458 #define NUM_PPORT_802_3_COUNTERS ARRAY_SIZE(pport_802_3_stats_desc)
460 static int mlx5e_grp_802_3_get_num_stats(struct mlx5e_priv *priv)
462 return NUM_PPORT_802_3_COUNTERS;
465 static int mlx5e_grp_802_3_fill_strings(struct mlx5e_priv *priv, u8 *data,
470 for (i = 0; i < NUM_PPORT_802_3_COUNTERS; i++)
471 strcpy(data + (idx++) * ETH_GSTRING_LEN, pport_802_3_stats_desc[i].format);
475 static int mlx5e_grp_802_3_fill_stats(struct mlx5e_priv *priv, u64 *data,
480 for (i = 0; i < NUM_PPORT_802_3_COUNTERS; i++)
481 data[idx++] = MLX5E_READ_CTR64_BE(&priv->stats.pport.IEEE_802_3_counters,
482 pport_802_3_stats_desc, i);
486 #define MLX5_BASIC_PPCNT_SUPPORTED(mdev) \
487 (MLX5_CAP_GEN(mdev, pcam_reg) ? MLX5_CAP_PCAM_REG(mdev, ppcnt) : 1)
489 void mlx5e_grp_802_3_update_stats(struct mlx5e_priv *priv)
491 struct mlx5e_pport_stats *pstats = &priv->stats.pport;
492 struct mlx5_core_dev *mdev = priv->mdev;
493 u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {0};
494 int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
497 if (!MLX5_BASIC_PPCNT_SUPPORTED(mdev))
500 MLX5_SET(ppcnt_reg, in, local_port, 1);
501 out = pstats->IEEE_802_3_counters;
502 MLX5_SET(ppcnt_reg, in, grp, MLX5_IEEE_802_3_COUNTERS_GROUP);
503 mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
506 #define PPORT_2863_OFF(c) \
507 MLX5_BYTE_OFF(ppcnt_reg, \
508 counter_set.eth_2863_cntrs_grp_data_layout.c##_high)
509 static const struct counter_desc pport_2863_stats_desc[] = {
510 { "rx_discards_phy", PPORT_2863_OFF(if_in_discards) },
511 { "tx_discards_phy", PPORT_2863_OFF(if_out_discards) },
512 { "tx_errors_phy", PPORT_2863_OFF(if_out_errors) },
515 #define NUM_PPORT_2863_COUNTERS ARRAY_SIZE(pport_2863_stats_desc)
517 static int mlx5e_grp_2863_get_num_stats(struct mlx5e_priv *priv)
519 return NUM_PPORT_2863_COUNTERS;
522 static int mlx5e_grp_2863_fill_strings(struct mlx5e_priv *priv, u8 *data,
527 for (i = 0; i < NUM_PPORT_2863_COUNTERS; i++)
528 strcpy(data + (idx++) * ETH_GSTRING_LEN, pport_2863_stats_desc[i].format);
532 static int mlx5e_grp_2863_fill_stats(struct mlx5e_priv *priv, u64 *data,
537 for (i = 0; i < NUM_PPORT_2863_COUNTERS; i++)
538 data[idx++] = MLX5E_READ_CTR64_BE(&priv->stats.pport.RFC_2863_counters,
539 pport_2863_stats_desc, i);
543 static void mlx5e_grp_2863_update_stats(struct mlx5e_priv *priv)
545 struct mlx5e_pport_stats *pstats = &priv->stats.pport;
546 struct mlx5_core_dev *mdev = priv->mdev;
547 u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {0};
548 int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
551 MLX5_SET(ppcnt_reg, in, local_port, 1);
552 out = pstats->RFC_2863_counters;
553 MLX5_SET(ppcnt_reg, in, grp, MLX5_RFC_2863_COUNTERS_GROUP);
554 mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
557 #define PPORT_2819_OFF(c) \
558 MLX5_BYTE_OFF(ppcnt_reg, \
559 counter_set.eth_2819_cntrs_grp_data_layout.c##_high)
560 static const struct counter_desc pport_2819_stats_desc[] = {
561 { "rx_undersize_pkts_phy", PPORT_2819_OFF(ether_stats_undersize_pkts) },
562 { "rx_fragments_phy", PPORT_2819_OFF(ether_stats_fragments) },
563 { "rx_jabbers_phy", PPORT_2819_OFF(ether_stats_jabbers) },
564 { "rx_64_bytes_phy", PPORT_2819_OFF(ether_stats_pkts64octets) },
565 { "rx_65_to_127_bytes_phy", PPORT_2819_OFF(ether_stats_pkts65to127octets) },
566 { "rx_128_to_255_bytes_phy", PPORT_2819_OFF(ether_stats_pkts128to255octets) },
567 { "rx_256_to_511_bytes_phy", PPORT_2819_OFF(ether_stats_pkts256to511octets) },
568 { "rx_512_to_1023_bytes_phy", PPORT_2819_OFF(ether_stats_pkts512to1023octets) },
569 { "rx_1024_to_1518_bytes_phy", PPORT_2819_OFF(ether_stats_pkts1024to1518octets) },
570 { "rx_1519_to_2047_bytes_phy", PPORT_2819_OFF(ether_stats_pkts1519to2047octets) },
571 { "rx_2048_to_4095_bytes_phy", PPORT_2819_OFF(ether_stats_pkts2048to4095octets) },
572 { "rx_4096_to_8191_bytes_phy", PPORT_2819_OFF(ether_stats_pkts4096to8191octets) },
573 { "rx_8192_to_10239_bytes_phy", PPORT_2819_OFF(ether_stats_pkts8192to10239octets) },
576 #define NUM_PPORT_2819_COUNTERS ARRAY_SIZE(pport_2819_stats_desc)
578 static int mlx5e_grp_2819_get_num_stats(struct mlx5e_priv *priv)
580 return NUM_PPORT_2819_COUNTERS;
583 static int mlx5e_grp_2819_fill_strings(struct mlx5e_priv *priv, u8 *data,
588 for (i = 0; i < NUM_PPORT_2819_COUNTERS; i++)
589 strcpy(data + (idx++) * ETH_GSTRING_LEN, pport_2819_stats_desc[i].format);
593 static int mlx5e_grp_2819_fill_stats(struct mlx5e_priv *priv, u64 *data,
598 for (i = 0; i < NUM_PPORT_2819_COUNTERS; i++)
599 data[idx++] = MLX5E_READ_CTR64_BE(&priv->stats.pport.RFC_2819_counters,
600 pport_2819_stats_desc, i);
604 static void mlx5e_grp_2819_update_stats(struct mlx5e_priv *priv)
606 struct mlx5e_pport_stats *pstats = &priv->stats.pport;
607 struct mlx5_core_dev *mdev = priv->mdev;
608 u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {0};
609 int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
612 if (!MLX5_BASIC_PPCNT_SUPPORTED(mdev))
615 MLX5_SET(ppcnt_reg, in, local_port, 1);
616 out = pstats->RFC_2819_counters;
617 MLX5_SET(ppcnt_reg, in, grp, MLX5_RFC_2819_COUNTERS_GROUP);
618 mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
621 #define PPORT_PHY_STATISTICAL_OFF(c) \
622 MLX5_BYTE_OFF(ppcnt_reg, \
623 counter_set.phys_layer_statistical_cntrs.c##_high)
624 static const struct counter_desc pport_phy_statistical_stats_desc[] = {
625 { "rx_pcs_symbol_err_phy", PPORT_PHY_STATISTICAL_OFF(phy_symbol_errors) },
626 { "rx_corrected_bits_phy", PPORT_PHY_STATISTICAL_OFF(phy_corrected_bits) },
629 static const struct counter_desc
630 pport_phy_statistical_err_lanes_stats_desc[] = {
631 { "rx_err_lane_0_phy", PPORT_PHY_STATISTICAL_OFF(phy_corrected_bits_lane0) },
632 { "rx_err_lane_1_phy", PPORT_PHY_STATISTICAL_OFF(phy_corrected_bits_lane1) },
633 { "rx_err_lane_2_phy", PPORT_PHY_STATISTICAL_OFF(phy_corrected_bits_lane2) },
634 { "rx_err_lane_3_phy", PPORT_PHY_STATISTICAL_OFF(phy_corrected_bits_lane3) },
637 #define NUM_PPORT_PHY_STATISTICAL_COUNTERS \
638 ARRAY_SIZE(pport_phy_statistical_stats_desc)
639 #define NUM_PPORT_PHY_STATISTICAL_PER_LANE_COUNTERS \
640 ARRAY_SIZE(pport_phy_statistical_err_lanes_stats_desc)
642 static int mlx5e_grp_phy_get_num_stats(struct mlx5e_priv *priv)
644 struct mlx5_core_dev *mdev = priv->mdev;
647 /* "1" for link_down_events special counter */
650 num_stats += MLX5_CAP_PCAM_FEATURE(mdev, ppcnt_statistical_group) ?
651 NUM_PPORT_PHY_STATISTICAL_COUNTERS : 0;
653 num_stats += MLX5_CAP_PCAM_FEATURE(mdev, per_lane_error_counters) ?
654 NUM_PPORT_PHY_STATISTICAL_PER_LANE_COUNTERS : 0;
659 static int mlx5e_grp_phy_fill_strings(struct mlx5e_priv *priv, u8 *data,
662 struct mlx5_core_dev *mdev = priv->mdev;
665 strcpy(data + (idx++) * ETH_GSTRING_LEN, "link_down_events_phy");
667 if (!MLX5_CAP_PCAM_FEATURE(mdev, ppcnt_statistical_group))
670 for (i = 0; i < NUM_PPORT_PHY_STATISTICAL_COUNTERS; i++)
671 strcpy(data + (idx++) * ETH_GSTRING_LEN,
672 pport_phy_statistical_stats_desc[i].format);
674 if (MLX5_CAP_PCAM_FEATURE(mdev, per_lane_error_counters))
675 for (i = 0; i < NUM_PPORT_PHY_STATISTICAL_PER_LANE_COUNTERS; i++)
676 strcpy(data + (idx++) * ETH_GSTRING_LEN,
677 pport_phy_statistical_err_lanes_stats_desc[i].format);
682 static int mlx5e_grp_phy_fill_stats(struct mlx5e_priv *priv, u64 *data, int idx)
684 struct mlx5_core_dev *mdev = priv->mdev;
687 /* link_down_events_phy has special handling since it is not stored in __be64 format */
688 data[idx++] = MLX5_GET(ppcnt_reg, priv->stats.pport.phy_counters,
689 counter_set.phys_layer_cntrs.link_down_events);
691 if (!MLX5_CAP_PCAM_FEATURE(mdev, ppcnt_statistical_group))
694 for (i = 0; i < NUM_PPORT_PHY_STATISTICAL_COUNTERS; i++)
696 MLX5E_READ_CTR64_BE(&priv->stats.pport.phy_statistical_counters,
697 pport_phy_statistical_stats_desc, i);
699 if (MLX5_CAP_PCAM_FEATURE(mdev, per_lane_error_counters))
700 for (i = 0; i < NUM_PPORT_PHY_STATISTICAL_PER_LANE_COUNTERS; i++)
702 MLX5E_READ_CTR64_BE(&priv->stats.pport.phy_statistical_counters,
703 pport_phy_statistical_err_lanes_stats_desc,
708 static void mlx5e_grp_phy_update_stats(struct mlx5e_priv *priv)
710 struct mlx5e_pport_stats *pstats = &priv->stats.pport;
711 struct mlx5_core_dev *mdev = priv->mdev;
712 u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {0};
713 int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
716 MLX5_SET(ppcnt_reg, in, local_port, 1);
717 out = pstats->phy_counters;
718 MLX5_SET(ppcnt_reg, in, grp, MLX5_PHYSICAL_LAYER_COUNTERS_GROUP);
719 mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
721 if (!MLX5_CAP_PCAM_FEATURE(mdev, ppcnt_statistical_group))
724 out = pstats->phy_statistical_counters;
725 MLX5_SET(ppcnt_reg, in, grp, MLX5_PHYSICAL_LAYER_STATISTICAL_GROUP);
726 mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
729 #define PPORT_ETH_EXT_OFF(c) \
730 MLX5_BYTE_OFF(ppcnt_reg, \
731 counter_set.eth_extended_cntrs_grp_data_layout.c##_high)
732 static const struct counter_desc pport_eth_ext_stats_desc[] = {
733 { "rx_buffer_passed_thres_phy", PPORT_ETH_EXT_OFF(rx_buffer_almost_full) },
736 #define NUM_PPORT_ETH_EXT_COUNTERS ARRAY_SIZE(pport_eth_ext_stats_desc)
738 static int mlx5e_grp_eth_ext_get_num_stats(struct mlx5e_priv *priv)
740 if (MLX5_CAP_PCAM_FEATURE((priv)->mdev, rx_buffer_fullness_counters))
741 return NUM_PPORT_ETH_EXT_COUNTERS;
746 static int mlx5e_grp_eth_ext_fill_strings(struct mlx5e_priv *priv, u8 *data,
751 if (MLX5_CAP_PCAM_FEATURE((priv)->mdev, rx_buffer_fullness_counters))
752 for (i = 0; i < NUM_PPORT_ETH_EXT_COUNTERS; i++)
753 strcpy(data + (idx++) * ETH_GSTRING_LEN,
754 pport_eth_ext_stats_desc[i].format);
758 static int mlx5e_grp_eth_ext_fill_stats(struct mlx5e_priv *priv, u64 *data,
763 if (MLX5_CAP_PCAM_FEATURE((priv)->mdev, rx_buffer_fullness_counters))
764 for (i = 0; i < NUM_PPORT_ETH_EXT_COUNTERS; i++)
766 MLX5E_READ_CTR64_BE(&priv->stats.pport.eth_ext_counters,
767 pport_eth_ext_stats_desc, i);
771 static void mlx5e_grp_eth_ext_update_stats(struct mlx5e_priv *priv)
773 struct mlx5e_pport_stats *pstats = &priv->stats.pport;
774 struct mlx5_core_dev *mdev = priv->mdev;
775 u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {0};
776 int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
779 if (!MLX5_CAP_PCAM_FEATURE(mdev, rx_buffer_fullness_counters))
782 MLX5_SET(ppcnt_reg, in, local_port, 1);
783 out = pstats->eth_ext_counters;
784 MLX5_SET(ppcnt_reg, in, grp, MLX5_ETHERNET_EXTENDED_COUNTERS_GROUP);
785 mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
788 #define PCIE_PERF_OFF(c) \
789 MLX5_BYTE_OFF(mpcnt_reg, counter_set.pcie_perf_cntrs_grp_data_layout.c)
790 static const struct counter_desc pcie_perf_stats_desc[] = {
791 { "rx_pci_signal_integrity", PCIE_PERF_OFF(rx_errors) },
792 { "tx_pci_signal_integrity", PCIE_PERF_OFF(tx_errors) },
795 #define PCIE_PERF_OFF64(c) \
796 MLX5_BYTE_OFF(mpcnt_reg, counter_set.pcie_perf_cntrs_grp_data_layout.c##_high)
797 static const struct counter_desc pcie_perf_stats_desc64[] = {
798 { "outbound_pci_buffer_overflow", PCIE_PERF_OFF64(tx_overflow_buffer_pkt) },
801 static const struct counter_desc pcie_perf_stall_stats_desc[] = {
802 { "outbound_pci_stalled_rd", PCIE_PERF_OFF(outbound_stalled_reads) },
803 { "outbound_pci_stalled_wr", PCIE_PERF_OFF(outbound_stalled_writes) },
804 { "outbound_pci_stalled_rd_events", PCIE_PERF_OFF(outbound_stalled_reads_events) },
805 { "outbound_pci_stalled_wr_events", PCIE_PERF_OFF(outbound_stalled_writes_events) },
808 #define NUM_PCIE_PERF_COUNTERS ARRAY_SIZE(pcie_perf_stats_desc)
809 #define NUM_PCIE_PERF_COUNTERS64 ARRAY_SIZE(pcie_perf_stats_desc64)
810 #define NUM_PCIE_PERF_STALL_COUNTERS ARRAY_SIZE(pcie_perf_stall_stats_desc)
812 static int mlx5e_grp_pcie_get_num_stats(struct mlx5e_priv *priv)
816 if (MLX5_CAP_MCAM_FEATURE((priv)->mdev, pcie_performance_group))
817 num_stats += NUM_PCIE_PERF_COUNTERS;
819 if (MLX5_CAP_MCAM_FEATURE((priv)->mdev, tx_overflow_buffer_pkt))
820 num_stats += NUM_PCIE_PERF_COUNTERS64;
822 if (MLX5_CAP_MCAM_FEATURE((priv)->mdev, pcie_outbound_stalled))
823 num_stats += NUM_PCIE_PERF_STALL_COUNTERS;
828 static int mlx5e_grp_pcie_fill_strings(struct mlx5e_priv *priv, u8 *data,
833 if (MLX5_CAP_MCAM_FEATURE((priv)->mdev, pcie_performance_group))
834 for (i = 0; i < NUM_PCIE_PERF_COUNTERS; i++)
835 strcpy(data + (idx++) * ETH_GSTRING_LEN,
836 pcie_perf_stats_desc[i].format);
838 if (MLX5_CAP_MCAM_FEATURE((priv)->mdev, tx_overflow_buffer_pkt))
839 for (i = 0; i < NUM_PCIE_PERF_COUNTERS64; i++)
840 strcpy(data + (idx++) * ETH_GSTRING_LEN,
841 pcie_perf_stats_desc64[i].format);
843 if (MLX5_CAP_MCAM_FEATURE((priv)->mdev, pcie_outbound_stalled))
844 for (i = 0; i < NUM_PCIE_PERF_STALL_COUNTERS; i++)
845 strcpy(data + (idx++) * ETH_GSTRING_LEN,
846 pcie_perf_stall_stats_desc[i].format);
850 static int mlx5e_grp_pcie_fill_stats(struct mlx5e_priv *priv, u64 *data,
855 if (MLX5_CAP_MCAM_FEATURE((priv)->mdev, pcie_performance_group))
856 for (i = 0; i < NUM_PCIE_PERF_COUNTERS; i++)
858 MLX5E_READ_CTR32_BE(&priv->stats.pcie.pcie_perf_counters,
859 pcie_perf_stats_desc, i);
861 if (MLX5_CAP_MCAM_FEATURE((priv)->mdev, tx_overflow_buffer_pkt))
862 for (i = 0; i < NUM_PCIE_PERF_COUNTERS64; i++)
864 MLX5E_READ_CTR64_BE(&priv->stats.pcie.pcie_perf_counters,
865 pcie_perf_stats_desc64, i);
867 if (MLX5_CAP_MCAM_FEATURE((priv)->mdev, pcie_outbound_stalled))
868 for (i = 0; i < NUM_PCIE_PERF_STALL_COUNTERS; i++)
870 MLX5E_READ_CTR32_BE(&priv->stats.pcie.pcie_perf_counters,
871 pcie_perf_stall_stats_desc, i);
875 static void mlx5e_grp_pcie_update_stats(struct mlx5e_priv *priv)
877 struct mlx5e_pcie_stats *pcie_stats = &priv->stats.pcie;
878 struct mlx5_core_dev *mdev = priv->mdev;
879 u32 in[MLX5_ST_SZ_DW(mpcnt_reg)] = {0};
880 int sz = MLX5_ST_SZ_BYTES(mpcnt_reg);
883 if (!MLX5_CAP_MCAM_FEATURE(mdev, pcie_performance_group))
886 out = pcie_stats->pcie_perf_counters;
887 MLX5_SET(mpcnt_reg, in, grp, MLX5_PCIE_PERFORMANCE_COUNTERS_GROUP);
888 mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_MPCNT, 0, 0);
891 #define PPORT_PER_PRIO_OFF(c) \
892 MLX5_BYTE_OFF(ppcnt_reg, \
893 counter_set.eth_per_prio_grp_data_layout.c##_high)
894 static const struct counter_desc pport_per_prio_traffic_stats_desc[] = {
895 { "rx_prio%d_bytes", PPORT_PER_PRIO_OFF(rx_octets) },
896 { "rx_prio%d_packets", PPORT_PER_PRIO_OFF(rx_frames) },
897 { "tx_prio%d_bytes", PPORT_PER_PRIO_OFF(tx_octets) },
898 { "tx_prio%d_packets", PPORT_PER_PRIO_OFF(tx_frames) },
901 #define NUM_PPORT_PER_PRIO_TRAFFIC_COUNTERS ARRAY_SIZE(pport_per_prio_traffic_stats_desc)
903 static int mlx5e_grp_per_prio_traffic_get_num_stats(void)
905 return NUM_PPORT_PER_PRIO_TRAFFIC_COUNTERS * NUM_PPORT_PRIO;
908 static int mlx5e_grp_per_prio_traffic_fill_strings(struct mlx5e_priv *priv,
914 for (prio = 0; prio < NUM_PPORT_PRIO; prio++) {
915 for (i = 0; i < NUM_PPORT_PER_PRIO_TRAFFIC_COUNTERS; i++)
916 sprintf(data + (idx++) * ETH_GSTRING_LEN,
917 pport_per_prio_traffic_stats_desc[i].format, prio);
923 static int mlx5e_grp_per_prio_traffic_fill_stats(struct mlx5e_priv *priv,
929 for (prio = 0; prio < NUM_PPORT_PRIO; prio++) {
930 for (i = 0; i < NUM_PPORT_PER_PRIO_TRAFFIC_COUNTERS; i++)
932 MLX5E_READ_CTR64_BE(&priv->stats.pport.per_prio_counters[prio],
933 pport_per_prio_traffic_stats_desc, i);
939 static const struct counter_desc pport_per_prio_pfc_stats_desc[] = {
940 /* %s is "global" or "prio{i}" */
941 { "rx_%s_pause", PPORT_PER_PRIO_OFF(rx_pause) },
942 { "rx_%s_pause_duration", PPORT_PER_PRIO_OFF(rx_pause_duration) },
943 { "tx_%s_pause", PPORT_PER_PRIO_OFF(tx_pause) },
944 { "tx_%s_pause_duration", PPORT_PER_PRIO_OFF(tx_pause_duration) },
945 { "rx_%s_pause_transition", PPORT_PER_PRIO_OFF(rx_pause_transition) },
948 static const struct counter_desc pport_pfc_stall_stats_desc[] = {
949 { "tx_pause_storm_warning_events", PPORT_PER_PRIO_OFF(device_stall_minor_watermark_cnt) },
950 { "tx_pause_storm_error_events", PPORT_PER_PRIO_OFF(device_stall_critical_watermark_cnt) },
953 #define NUM_PPORT_PER_PRIO_PFC_COUNTERS ARRAY_SIZE(pport_per_prio_pfc_stats_desc)
954 #define NUM_PPORT_PFC_STALL_COUNTERS(priv) (ARRAY_SIZE(pport_pfc_stall_stats_desc) * \
955 MLX5_CAP_PCAM_FEATURE((priv)->mdev, pfcc_mask) * \
956 MLX5_CAP_DEBUG((priv)->mdev, stall_detect))
958 static unsigned long mlx5e_query_pfc_combined(struct mlx5e_priv *priv)
960 struct mlx5_core_dev *mdev = priv->mdev;
965 if (MLX5_CAP_GEN(mdev, port_type) != MLX5_CAP_PORT_TYPE_ETH)
968 err = mlx5_query_port_pfc(mdev, &pfc_en_tx, &pfc_en_rx);
970 return err ? 0 : pfc_en_tx | pfc_en_rx;
973 static bool mlx5e_query_global_pause_combined(struct mlx5e_priv *priv)
975 struct mlx5_core_dev *mdev = priv->mdev;
980 if (MLX5_CAP_GEN(mdev, port_type) != MLX5_CAP_PORT_TYPE_ETH)
983 err = mlx5_query_port_pause(mdev, &rx_pause, &tx_pause);
985 return err ? false : rx_pause | tx_pause;
988 static int mlx5e_grp_per_prio_pfc_get_num_stats(struct mlx5e_priv *priv)
990 return (mlx5e_query_global_pause_combined(priv) +
991 hweight8(mlx5e_query_pfc_combined(priv))) *
992 NUM_PPORT_PER_PRIO_PFC_COUNTERS +
993 NUM_PPORT_PFC_STALL_COUNTERS(priv);
996 static int mlx5e_grp_per_prio_pfc_fill_strings(struct mlx5e_priv *priv,
1000 unsigned long pfc_combined;
1003 pfc_combined = mlx5e_query_pfc_combined(priv);
1004 for_each_set_bit(prio, &pfc_combined, NUM_PPORT_PRIO) {
1005 for (i = 0; i < NUM_PPORT_PER_PRIO_PFC_COUNTERS; i++) {
1006 char pfc_string[ETH_GSTRING_LEN];
1008 snprintf(pfc_string, sizeof(pfc_string), "prio%d", prio);
1009 sprintf(data + (idx++) * ETH_GSTRING_LEN,
1010 pport_per_prio_pfc_stats_desc[i].format, pfc_string);
1014 if (mlx5e_query_global_pause_combined(priv)) {
1015 for (i = 0; i < NUM_PPORT_PER_PRIO_PFC_COUNTERS; i++) {
1016 sprintf(data + (idx++) * ETH_GSTRING_LEN,
1017 pport_per_prio_pfc_stats_desc[i].format, "global");
1021 for (i = 0; i < NUM_PPORT_PFC_STALL_COUNTERS(priv); i++)
1022 strcpy(data + (idx++) * ETH_GSTRING_LEN,
1023 pport_pfc_stall_stats_desc[i].format);
1028 static int mlx5e_grp_per_prio_pfc_fill_stats(struct mlx5e_priv *priv,
1032 unsigned long pfc_combined;
1035 pfc_combined = mlx5e_query_pfc_combined(priv);
1036 for_each_set_bit(prio, &pfc_combined, NUM_PPORT_PRIO) {
1037 for (i = 0; i < NUM_PPORT_PER_PRIO_PFC_COUNTERS; i++) {
1039 MLX5E_READ_CTR64_BE(&priv->stats.pport.per_prio_counters[prio],
1040 pport_per_prio_pfc_stats_desc, i);
1044 if (mlx5e_query_global_pause_combined(priv)) {
1045 for (i = 0; i < NUM_PPORT_PER_PRIO_PFC_COUNTERS; i++) {
1047 MLX5E_READ_CTR64_BE(&priv->stats.pport.per_prio_counters[0],
1048 pport_per_prio_pfc_stats_desc, i);
1052 for (i = 0; i < NUM_PPORT_PFC_STALL_COUNTERS(priv); i++)
1053 data[idx++] = MLX5E_READ_CTR64_BE(&priv->stats.pport.per_prio_counters[0],
1054 pport_pfc_stall_stats_desc, i);
1059 static int mlx5e_grp_per_prio_get_num_stats(struct mlx5e_priv *priv)
1061 return mlx5e_grp_per_prio_traffic_get_num_stats() +
1062 mlx5e_grp_per_prio_pfc_get_num_stats(priv);
1065 static int mlx5e_grp_per_prio_fill_strings(struct mlx5e_priv *priv, u8 *data,
1068 idx = mlx5e_grp_per_prio_traffic_fill_strings(priv, data, idx);
1069 idx = mlx5e_grp_per_prio_pfc_fill_strings(priv, data, idx);
1073 static int mlx5e_grp_per_prio_fill_stats(struct mlx5e_priv *priv, u64 *data,
1076 idx = mlx5e_grp_per_prio_traffic_fill_stats(priv, data, idx);
1077 idx = mlx5e_grp_per_prio_pfc_fill_stats(priv, data, idx);
1081 static void mlx5e_grp_per_prio_update_stats(struct mlx5e_priv *priv)
1083 struct mlx5e_pport_stats *pstats = &priv->stats.pport;
1084 struct mlx5_core_dev *mdev = priv->mdev;
1085 u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {0};
1086 int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
1090 if (!MLX5_BASIC_PPCNT_SUPPORTED(mdev))
1093 MLX5_SET(ppcnt_reg, in, local_port, 1);
1094 MLX5_SET(ppcnt_reg, in, grp, MLX5_PER_PRIORITY_COUNTERS_GROUP);
1095 for (prio = 0; prio < NUM_PPORT_PRIO; prio++) {
1096 out = pstats->per_prio_counters[prio];
1097 MLX5_SET(ppcnt_reg, in, prio_tc, prio);
1098 mlx5_core_access_reg(mdev, in, sz, out, sz,
1099 MLX5_REG_PPCNT, 0, 0);
1103 static const struct counter_desc mlx5e_pme_status_desc[] = {
1104 { "module_unplug", sizeof(u64) * MLX5_MODULE_STATUS_UNPLUGGED },
1107 static const struct counter_desc mlx5e_pme_error_desc[] = {
1108 { "module_bus_stuck", sizeof(u64) * MLX5_MODULE_EVENT_ERROR_BUS_STUCK },
1109 { "module_high_temp", sizeof(u64) * MLX5_MODULE_EVENT_ERROR_HIGH_TEMPERATURE },
1110 { "module_bad_shorted", sizeof(u64) * MLX5_MODULE_EVENT_ERROR_BAD_CABLE },
1113 #define NUM_PME_STATUS_STATS ARRAY_SIZE(mlx5e_pme_status_desc)
1114 #define NUM_PME_ERR_STATS ARRAY_SIZE(mlx5e_pme_error_desc)
1116 static int mlx5e_grp_pme_get_num_stats(struct mlx5e_priv *priv)
1118 return NUM_PME_STATUS_STATS + NUM_PME_ERR_STATS;
1121 static int mlx5e_grp_pme_fill_strings(struct mlx5e_priv *priv, u8 *data,
1126 for (i = 0; i < NUM_PME_STATUS_STATS; i++)
1127 strcpy(data + (idx++) * ETH_GSTRING_LEN, mlx5e_pme_status_desc[i].format);
1129 for (i = 0; i < NUM_PME_ERR_STATS; i++)
1130 strcpy(data + (idx++) * ETH_GSTRING_LEN, mlx5e_pme_error_desc[i].format);
1135 static int mlx5e_grp_pme_fill_stats(struct mlx5e_priv *priv, u64 *data,
1138 struct mlx5_pme_stats pme_stats;
1141 mlx5_get_pme_stats(priv->mdev, &pme_stats);
1143 for (i = 0; i < NUM_PME_STATUS_STATS; i++)
1144 data[idx++] = MLX5E_READ_CTR64_CPU(pme_stats.status_counters,
1145 mlx5e_pme_status_desc, i);
1147 for (i = 0; i < NUM_PME_ERR_STATS; i++)
1148 data[idx++] = MLX5E_READ_CTR64_CPU(pme_stats.error_counters,
1149 mlx5e_pme_error_desc, i);
1154 static int mlx5e_grp_ipsec_get_num_stats(struct mlx5e_priv *priv)
1156 return mlx5e_ipsec_get_count(priv);
1159 static int mlx5e_grp_ipsec_fill_strings(struct mlx5e_priv *priv, u8 *data,
1162 return idx + mlx5e_ipsec_get_strings(priv,
1163 data + idx * ETH_GSTRING_LEN);
1166 static int mlx5e_grp_ipsec_fill_stats(struct mlx5e_priv *priv, u64 *data,
1169 return idx + mlx5e_ipsec_get_stats(priv, data + idx);
1172 static void mlx5e_grp_ipsec_update_stats(struct mlx5e_priv *priv)
1174 mlx5e_ipsec_update_stats(priv);
1177 static int mlx5e_grp_tls_get_num_stats(struct mlx5e_priv *priv)
1179 return mlx5e_tls_get_count(priv);
1182 static int mlx5e_grp_tls_fill_strings(struct mlx5e_priv *priv, u8 *data,
1185 return idx + mlx5e_tls_get_strings(priv, data + idx * ETH_GSTRING_LEN);
1188 static int mlx5e_grp_tls_fill_stats(struct mlx5e_priv *priv, u64 *data, int idx)
1190 return idx + mlx5e_tls_get_stats(priv, data + idx);
1193 static const struct counter_desc rq_stats_desc[] = {
1194 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, packets) },
1195 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, bytes) },
1196 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_complete) },
1197 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_complete_tail) },
1198 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_complete_tail_slow) },
1199 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_unnecessary) },
1200 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_unnecessary_inner) },
1201 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_none) },
1202 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, xdp_drop) },
1203 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, xdp_redirect) },
1204 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, lro_packets) },
1205 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, lro_bytes) },
1206 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, ecn_mark) },
1207 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, removed_vlan_packets) },
1208 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, wqe_err) },
1209 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, mpwqe_filler_cqes) },
1210 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, mpwqe_filler_strides) },
1211 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, oversize_pkts_sw_drop) },
1212 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, buff_alloc_err) },
1213 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cqe_compress_blks) },
1214 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cqe_compress_pkts) },
1215 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, page_reuse) },
1216 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cache_reuse) },
1217 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cache_full) },
1218 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cache_empty) },
1219 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cache_busy) },
1220 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cache_waive) },
1221 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, congst_umr) },
1222 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, arfs_err) },
1225 static const struct counter_desc sq_stats_desc[] = {
1226 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, packets) },
1227 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, bytes) },
1228 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tso_packets) },
1229 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tso_bytes) },
1230 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tso_inner_packets) },
1231 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tso_inner_bytes) },
1232 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, csum_partial) },
1233 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, csum_partial_inner) },
1234 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, added_vlan_packets) },
1235 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, nop) },
1236 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, csum_none) },
1237 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, stopped) },
1238 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, dropped) },
1239 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, xmit_more) },
1240 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, recover) },
1241 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, cqes) },
1242 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, wake) },
1243 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, cqe_err) },
1246 static const struct counter_desc rq_xdpsq_stats_desc[] = {
1247 { MLX5E_DECLARE_RQ_XDPSQ_STAT(struct mlx5e_xdpsq_stats, xmit) },
1248 { MLX5E_DECLARE_RQ_XDPSQ_STAT(struct mlx5e_xdpsq_stats, full) },
1249 { MLX5E_DECLARE_RQ_XDPSQ_STAT(struct mlx5e_xdpsq_stats, err) },
1250 { MLX5E_DECLARE_RQ_XDPSQ_STAT(struct mlx5e_xdpsq_stats, cqes) },
1253 static const struct counter_desc xdpsq_stats_desc[] = {
1254 { MLX5E_DECLARE_XDPSQ_STAT(struct mlx5e_xdpsq_stats, xmit) },
1255 { MLX5E_DECLARE_XDPSQ_STAT(struct mlx5e_xdpsq_stats, full) },
1256 { MLX5E_DECLARE_XDPSQ_STAT(struct mlx5e_xdpsq_stats, err) },
1257 { MLX5E_DECLARE_XDPSQ_STAT(struct mlx5e_xdpsq_stats, cqes) },
1260 static const struct counter_desc ch_stats_desc[] = {
1261 { MLX5E_DECLARE_CH_STAT(struct mlx5e_ch_stats, events) },
1262 { MLX5E_DECLARE_CH_STAT(struct mlx5e_ch_stats, poll) },
1263 { MLX5E_DECLARE_CH_STAT(struct mlx5e_ch_stats, arm) },
1264 { MLX5E_DECLARE_CH_STAT(struct mlx5e_ch_stats, aff_change) },
1265 { MLX5E_DECLARE_CH_STAT(struct mlx5e_ch_stats, eq_rearm) },
1268 #define NUM_RQ_STATS ARRAY_SIZE(rq_stats_desc)
1269 #define NUM_SQ_STATS ARRAY_SIZE(sq_stats_desc)
1270 #define NUM_XDPSQ_STATS ARRAY_SIZE(xdpsq_stats_desc)
1271 #define NUM_RQ_XDPSQ_STATS ARRAY_SIZE(rq_xdpsq_stats_desc)
1272 #define NUM_CH_STATS ARRAY_SIZE(ch_stats_desc)
1274 static int mlx5e_grp_channels_get_num_stats(struct mlx5e_priv *priv)
1276 int max_nch = mlx5e_get_netdev_max_channels(priv->netdev);
1278 return (NUM_RQ_STATS * max_nch) +
1279 (NUM_CH_STATS * max_nch) +
1280 (NUM_SQ_STATS * max_nch * priv->max_opened_tc) +
1281 (NUM_RQ_XDPSQ_STATS * max_nch) +
1282 (NUM_XDPSQ_STATS * max_nch);
1285 static int mlx5e_grp_channels_fill_strings(struct mlx5e_priv *priv, u8 *data,
1288 int max_nch = mlx5e_get_netdev_max_channels(priv->netdev);
1291 for (i = 0; i < max_nch; i++)
1292 for (j = 0; j < NUM_CH_STATS; j++)
1293 sprintf(data + (idx++) * ETH_GSTRING_LEN,
1294 ch_stats_desc[j].format, i);
1296 for (i = 0; i < max_nch; i++) {
1297 for (j = 0; j < NUM_RQ_STATS; j++)
1298 sprintf(data + (idx++) * ETH_GSTRING_LEN,
1299 rq_stats_desc[j].format, i);
1300 for (j = 0; j < NUM_RQ_XDPSQ_STATS; j++)
1301 sprintf(data + (idx++) * ETH_GSTRING_LEN,
1302 rq_xdpsq_stats_desc[j].format, i);
1305 for (tc = 0; tc < priv->max_opened_tc; tc++)
1306 for (i = 0; i < max_nch; i++)
1307 for (j = 0; j < NUM_SQ_STATS; j++)
1308 sprintf(data + (idx++) * ETH_GSTRING_LEN,
1309 sq_stats_desc[j].format,
1310 priv->channel_tc2txq[i][tc]);
1312 for (i = 0; i < max_nch; i++)
1313 for (j = 0; j < NUM_XDPSQ_STATS; j++)
1314 sprintf(data + (idx++) * ETH_GSTRING_LEN,
1315 xdpsq_stats_desc[j].format, i);
1320 static int mlx5e_grp_channels_fill_stats(struct mlx5e_priv *priv, u64 *data,
1323 int max_nch = mlx5e_get_netdev_max_channels(priv->netdev);
1326 for (i = 0; i < max_nch; i++)
1327 for (j = 0; j < NUM_CH_STATS; j++)
1329 MLX5E_READ_CTR64_CPU(&priv->channel_stats[i].ch,
1332 for (i = 0; i < max_nch; i++) {
1333 for (j = 0; j < NUM_RQ_STATS; j++)
1335 MLX5E_READ_CTR64_CPU(&priv->channel_stats[i].rq,
1337 for (j = 0; j < NUM_RQ_XDPSQ_STATS; j++)
1339 MLX5E_READ_CTR64_CPU(&priv->channel_stats[i].rq_xdpsq,
1340 rq_xdpsq_stats_desc, j);
1343 for (tc = 0; tc < priv->max_opened_tc; tc++)
1344 for (i = 0; i < max_nch; i++)
1345 for (j = 0; j < NUM_SQ_STATS; j++)
1347 MLX5E_READ_CTR64_CPU(&priv->channel_stats[i].sq[tc],
1350 for (i = 0; i < max_nch; i++)
1351 for (j = 0; j < NUM_XDPSQ_STATS; j++)
1353 MLX5E_READ_CTR64_CPU(&priv->channel_stats[i].xdpsq,
1354 xdpsq_stats_desc, j);
1359 /* The stats groups order is opposite to the update_stats() order calls */
1360 const struct mlx5e_stats_grp mlx5e_stats_grps[] = {
1362 .get_num_stats = mlx5e_grp_sw_get_num_stats,
1363 .fill_strings = mlx5e_grp_sw_fill_strings,
1364 .fill_stats = mlx5e_grp_sw_fill_stats,
1365 .update_stats = mlx5e_grp_sw_update_stats,
1368 .get_num_stats = mlx5e_grp_q_get_num_stats,
1369 .fill_strings = mlx5e_grp_q_fill_strings,
1370 .fill_stats = mlx5e_grp_q_fill_stats,
1371 .update_stats_mask = MLX5E_NDO_UPDATE_STATS,
1372 .update_stats = mlx5e_grp_q_update_stats,
1375 .get_num_stats = mlx5e_grp_vnic_env_get_num_stats,
1376 .fill_strings = mlx5e_grp_vnic_env_fill_strings,
1377 .fill_stats = mlx5e_grp_vnic_env_fill_stats,
1378 .update_stats = mlx5e_grp_vnic_env_update_stats,
1381 .get_num_stats = mlx5e_grp_vport_get_num_stats,
1382 .fill_strings = mlx5e_grp_vport_fill_strings,
1383 .fill_stats = mlx5e_grp_vport_fill_stats,
1384 .update_stats_mask = MLX5E_NDO_UPDATE_STATS,
1385 .update_stats = mlx5e_grp_vport_update_stats,
1388 .get_num_stats = mlx5e_grp_802_3_get_num_stats,
1389 .fill_strings = mlx5e_grp_802_3_fill_strings,
1390 .fill_stats = mlx5e_grp_802_3_fill_stats,
1391 .update_stats_mask = MLX5E_NDO_UPDATE_STATS,
1392 .update_stats = mlx5e_grp_802_3_update_stats,
1395 .get_num_stats = mlx5e_grp_2863_get_num_stats,
1396 .fill_strings = mlx5e_grp_2863_fill_strings,
1397 .fill_stats = mlx5e_grp_2863_fill_stats,
1398 .update_stats = mlx5e_grp_2863_update_stats,
1401 .get_num_stats = mlx5e_grp_2819_get_num_stats,
1402 .fill_strings = mlx5e_grp_2819_fill_strings,
1403 .fill_stats = mlx5e_grp_2819_fill_stats,
1404 .update_stats = mlx5e_grp_2819_update_stats,
1407 .get_num_stats = mlx5e_grp_phy_get_num_stats,
1408 .fill_strings = mlx5e_grp_phy_fill_strings,
1409 .fill_stats = mlx5e_grp_phy_fill_stats,
1410 .update_stats = mlx5e_grp_phy_update_stats,
1413 .get_num_stats = mlx5e_grp_eth_ext_get_num_stats,
1414 .fill_strings = mlx5e_grp_eth_ext_fill_strings,
1415 .fill_stats = mlx5e_grp_eth_ext_fill_stats,
1416 .update_stats = mlx5e_grp_eth_ext_update_stats,
1419 .get_num_stats = mlx5e_grp_pcie_get_num_stats,
1420 .fill_strings = mlx5e_grp_pcie_fill_strings,
1421 .fill_stats = mlx5e_grp_pcie_fill_stats,
1422 .update_stats = mlx5e_grp_pcie_update_stats,
1425 .get_num_stats = mlx5e_grp_per_prio_get_num_stats,
1426 .fill_strings = mlx5e_grp_per_prio_fill_strings,
1427 .fill_stats = mlx5e_grp_per_prio_fill_stats,
1428 .update_stats = mlx5e_grp_per_prio_update_stats,
1431 .get_num_stats = mlx5e_grp_pme_get_num_stats,
1432 .fill_strings = mlx5e_grp_pme_fill_strings,
1433 .fill_stats = mlx5e_grp_pme_fill_stats,
1436 .get_num_stats = mlx5e_grp_ipsec_get_num_stats,
1437 .fill_strings = mlx5e_grp_ipsec_fill_strings,
1438 .fill_stats = mlx5e_grp_ipsec_fill_stats,
1439 .update_stats = mlx5e_grp_ipsec_update_stats,
1442 .get_num_stats = mlx5e_grp_tls_get_num_stats,
1443 .fill_strings = mlx5e_grp_tls_fill_strings,
1444 .fill_stats = mlx5e_grp_tls_fill_stats,
1447 .get_num_stats = mlx5e_grp_channels_get_num_stats,
1448 .fill_strings = mlx5e_grp_channels_fill_strings,
1449 .fill_stats = mlx5e_grp_channels_fill_stats,
1453 const int mlx5e_num_stats_grps = ARRAY_SIZE(mlx5e_stats_grps);