2 * Copyright (c) 2017, Mellanox Technologies, Ltd. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
34 #include "en_accel/ipsec.h"
35 #include "en_accel/tls.h"
37 static const struct counter_desc sw_stats_desc[] = {
38 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_packets) },
39 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_bytes) },
40 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_packets) },
41 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_bytes) },
42 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tso_packets) },
43 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tso_bytes) },
44 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tso_inner_packets) },
45 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tso_inner_bytes) },
46 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_added_vlan_packets) },
47 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_nop) },
49 #ifdef CONFIG_MLX5_EN_TLS
50 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_ooo) },
51 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_resync_bytes) },
54 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_lro_packets) },
55 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_lro_bytes) },
56 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_removed_vlan_packets) },
57 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_unnecessary) },
58 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_none) },
59 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_complete) },
60 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_unnecessary_inner) },
61 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_drop) },
62 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_redirect) },
63 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx_xmit) },
64 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx_full) },
65 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx_err) },
66 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx_cqe) },
67 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_csum_none) },
68 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_csum_partial) },
69 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_csum_partial_inner) },
70 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_queue_stopped) },
71 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_queue_dropped) },
72 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xmit_more) },
73 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_recover) },
74 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_cqes) },
75 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_queue_wake) },
76 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_udp_seg_rem) },
77 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_cqe_err) },
78 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xdp_xmit) },
79 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xdp_full) },
80 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xdp_err) },
81 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xdp_cqes) },
82 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_wqe_err) },
83 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_mpwqe_filler_cqes) },
84 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_mpwqe_filler_strides) },
85 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_buff_alloc_err) },
86 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cqe_compress_blks) },
87 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cqe_compress_pkts) },
88 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_page_reuse) },
89 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cache_reuse) },
90 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cache_full) },
91 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cache_empty) },
92 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cache_busy) },
93 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cache_waive) },
94 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_congst_umr) },
95 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, ch_events) },
96 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, ch_poll) },
97 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, ch_arm) },
98 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, ch_aff_change) },
99 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, ch_eq_rearm) },
102 #define NUM_SW_COUNTERS ARRAY_SIZE(sw_stats_desc)
104 static int mlx5e_grp_sw_get_num_stats(struct mlx5e_priv *priv)
106 return NUM_SW_COUNTERS;
109 static int mlx5e_grp_sw_fill_strings(struct mlx5e_priv *priv, u8 *data, int idx)
113 for (i = 0; i < NUM_SW_COUNTERS; i++)
114 strcpy(data + (idx++) * ETH_GSTRING_LEN, sw_stats_desc[i].format);
118 static int mlx5e_grp_sw_fill_stats(struct mlx5e_priv *priv, u64 *data, int idx)
122 for (i = 0; i < NUM_SW_COUNTERS; i++)
123 data[idx++] = MLX5E_READ_CTR64_CPU(&priv->stats.sw, sw_stats_desc, i);
127 void mlx5e_grp_sw_update_stats(struct mlx5e_priv *priv)
129 struct mlx5e_sw_stats temp, *s = &temp;
132 memset(s, 0, sizeof(*s));
134 for (i = 0; i < priv->profile->max_nch(priv->mdev); i++) {
135 struct mlx5e_channel_stats *channel_stats =
136 &priv->channel_stats[i];
137 struct mlx5e_xdpsq_stats *xdpsq_red_stats = &channel_stats->xdpsq;
138 struct mlx5e_xdpsq_stats *xdpsq_stats = &channel_stats->rq_xdpsq;
139 struct mlx5e_rq_stats *rq_stats = &channel_stats->rq;
140 struct mlx5e_ch_stats *ch_stats = &channel_stats->ch;
143 s->rx_packets += rq_stats->packets;
144 s->rx_bytes += rq_stats->bytes;
145 s->rx_lro_packets += rq_stats->lro_packets;
146 s->rx_lro_bytes += rq_stats->lro_bytes;
147 s->rx_removed_vlan_packets += rq_stats->removed_vlan_packets;
148 s->rx_csum_none += rq_stats->csum_none;
149 s->rx_csum_complete += rq_stats->csum_complete;
150 s->rx_csum_unnecessary += rq_stats->csum_unnecessary;
151 s->rx_csum_unnecessary_inner += rq_stats->csum_unnecessary_inner;
152 s->rx_xdp_drop += rq_stats->xdp_drop;
153 s->rx_xdp_redirect += rq_stats->xdp_redirect;
154 s->rx_xdp_tx_xmit += xdpsq_stats->xmit;
155 s->rx_xdp_tx_full += xdpsq_stats->full;
156 s->rx_xdp_tx_err += xdpsq_stats->err;
157 s->rx_xdp_tx_cqe += xdpsq_stats->cqes;
158 s->rx_wqe_err += rq_stats->wqe_err;
159 s->rx_mpwqe_filler_cqes += rq_stats->mpwqe_filler_cqes;
160 s->rx_mpwqe_filler_strides += rq_stats->mpwqe_filler_strides;
161 s->rx_buff_alloc_err += rq_stats->buff_alloc_err;
162 s->rx_cqe_compress_blks += rq_stats->cqe_compress_blks;
163 s->rx_cqe_compress_pkts += rq_stats->cqe_compress_pkts;
164 s->rx_page_reuse += rq_stats->page_reuse;
165 s->rx_cache_reuse += rq_stats->cache_reuse;
166 s->rx_cache_full += rq_stats->cache_full;
167 s->rx_cache_empty += rq_stats->cache_empty;
168 s->rx_cache_busy += rq_stats->cache_busy;
169 s->rx_cache_waive += rq_stats->cache_waive;
170 s->rx_congst_umr += rq_stats->congst_umr;
171 s->ch_events += ch_stats->events;
172 s->ch_poll += ch_stats->poll;
173 s->ch_arm += ch_stats->arm;
174 s->ch_aff_change += ch_stats->aff_change;
175 s->ch_eq_rearm += ch_stats->eq_rearm;
177 s->tx_xdp_xmit += xdpsq_red_stats->xmit;
178 s->tx_xdp_full += xdpsq_red_stats->full;
179 s->tx_xdp_err += xdpsq_red_stats->err;
180 s->tx_xdp_cqes += xdpsq_red_stats->cqes;
182 for (j = 0; j < priv->max_opened_tc; j++) {
183 struct mlx5e_sq_stats *sq_stats = &channel_stats->sq[j];
185 s->tx_packets += sq_stats->packets;
186 s->tx_bytes += sq_stats->bytes;
187 s->tx_tso_packets += sq_stats->tso_packets;
188 s->tx_tso_bytes += sq_stats->tso_bytes;
189 s->tx_tso_inner_packets += sq_stats->tso_inner_packets;
190 s->tx_tso_inner_bytes += sq_stats->tso_inner_bytes;
191 s->tx_added_vlan_packets += sq_stats->added_vlan_packets;
192 s->tx_nop += sq_stats->nop;
193 s->tx_queue_stopped += sq_stats->stopped;
194 s->tx_queue_wake += sq_stats->wake;
195 s->tx_udp_seg_rem += sq_stats->udp_seg_rem;
196 s->tx_queue_dropped += sq_stats->dropped;
197 s->tx_cqe_err += sq_stats->cqe_err;
198 s->tx_recover += sq_stats->recover;
199 s->tx_xmit_more += sq_stats->xmit_more;
200 s->tx_csum_partial_inner += sq_stats->csum_partial_inner;
201 s->tx_csum_none += sq_stats->csum_none;
202 s->tx_csum_partial += sq_stats->csum_partial;
203 #ifdef CONFIG_MLX5_EN_TLS
204 s->tx_tls_ooo += sq_stats->tls_ooo;
205 s->tx_tls_resync_bytes += sq_stats->tls_resync_bytes;
207 s->tx_cqes += sq_stats->cqes;
211 memcpy(&priv->stats.sw, s, sizeof(*s));
214 static const struct counter_desc q_stats_desc[] = {
215 { MLX5E_DECLARE_STAT(struct mlx5e_qcounter_stats, rx_out_of_buffer) },
218 static const struct counter_desc drop_rq_stats_desc[] = {
219 { MLX5E_DECLARE_STAT(struct mlx5e_qcounter_stats, rx_if_down_packets) },
222 #define NUM_Q_COUNTERS ARRAY_SIZE(q_stats_desc)
223 #define NUM_DROP_RQ_COUNTERS ARRAY_SIZE(drop_rq_stats_desc)
225 static int mlx5e_grp_q_get_num_stats(struct mlx5e_priv *priv)
230 num_stats += NUM_Q_COUNTERS;
232 if (priv->drop_rq_q_counter)
233 num_stats += NUM_DROP_RQ_COUNTERS;
238 static int mlx5e_grp_q_fill_strings(struct mlx5e_priv *priv, u8 *data, int idx)
242 for (i = 0; i < NUM_Q_COUNTERS && priv->q_counter; i++)
243 strcpy(data + (idx++) * ETH_GSTRING_LEN,
244 q_stats_desc[i].format);
246 for (i = 0; i < NUM_DROP_RQ_COUNTERS && priv->drop_rq_q_counter; i++)
247 strcpy(data + (idx++) * ETH_GSTRING_LEN,
248 drop_rq_stats_desc[i].format);
253 static int mlx5e_grp_q_fill_stats(struct mlx5e_priv *priv, u64 *data, int idx)
257 for (i = 0; i < NUM_Q_COUNTERS && priv->q_counter; i++)
258 data[idx++] = MLX5E_READ_CTR32_CPU(&priv->stats.qcnt,
260 for (i = 0; i < NUM_DROP_RQ_COUNTERS && priv->drop_rq_q_counter; i++)
261 data[idx++] = MLX5E_READ_CTR32_CPU(&priv->stats.qcnt,
262 drop_rq_stats_desc, i);
266 static void mlx5e_grp_q_update_stats(struct mlx5e_priv *priv)
268 struct mlx5e_qcounter_stats *qcnt = &priv->stats.qcnt;
269 u32 out[MLX5_ST_SZ_DW(query_q_counter_out)];
271 if (priv->q_counter &&
272 !mlx5_core_query_q_counter(priv->mdev, priv->q_counter, 0, out,
274 qcnt->rx_out_of_buffer = MLX5_GET(query_q_counter_out,
276 if (priv->drop_rq_q_counter &&
277 !mlx5_core_query_q_counter(priv->mdev, priv->drop_rq_q_counter, 0,
279 qcnt->rx_if_down_packets = MLX5_GET(query_q_counter_out, out,
283 #define VNIC_ENV_OFF(c) MLX5_BYTE_OFF(query_vnic_env_out, c)
284 static const struct counter_desc vnic_env_stats_desc[] = {
285 { "rx_steer_missed_packets",
286 VNIC_ENV_OFF(vport_env.nic_receive_steering_discard) },
289 #define NUM_VNIC_ENV_COUNTERS ARRAY_SIZE(vnic_env_stats_desc)
291 static int mlx5e_grp_vnic_env_get_num_stats(struct mlx5e_priv *priv)
293 return MLX5_CAP_GEN(priv->mdev, nic_receive_steering_discard) ?
294 NUM_VNIC_ENV_COUNTERS : 0;
297 static int mlx5e_grp_vnic_env_fill_strings(struct mlx5e_priv *priv, u8 *data,
302 if (!MLX5_CAP_GEN(priv->mdev, nic_receive_steering_discard))
305 for (i = 0; i < NUM_VNIC_ENV_COUNTERS; i++)
306 strcpy(data + (idx++) * ETH_GSTRING_LEN,
307 vnic_env_stats_desc[i].format);
311 static int mlx5e_grp_vnic_env_fill_stats(struct mlx5e_priv *priv, u64 *data,
316 if (!MLX5_CAP_GEN(priv->mdev, nic_receive_steering_discard))
319 for (i = 0; i < NUM_VNIC_ENV_COUNTERS; i++)
320 data[idx++] = MLX5E_READ_CTR64_BE(priv->stats.vnic.query_vnic_env_out,
321 vnic_env_stats_desc, i);
325 static void mlx5e_grp_vnic_env_update_stats(struct mlx5e_priv *priv)
327 u32 *out = (u32 *)priv->stats.vnic.query_vnic_env_out;
328 int outlen = MLX5_ST_SZ_BYTES(query_vnic_env_out);
329 u32 in[MLX5_ST_SZ_DW(query_vnic_env_in)] = {0};
330 struct mlx5_core_dev *mdev = priv->mdev;
332 if (!MLX5_CAP_GEN(priv->mdev, nic_receive_steering_discard))
335 MLX5_SET(query_vnic_env_in, in, opcode,
336 MLX5_CMD_OP_QUERY_VNIC_ENV);
337 MLX5_SET(query_vnic_env_in, in, op_mod, 0);
338 MLX5_SET(query_vnic_env_in, in, other_vport, 0);
339 mlx5_cmd_exec(mdev, in, sizeof(in), out, outlen);
342 #define VPORT_COUNTER_OFF(c) MLX5_BYTE_OFF(query_vport_counter_out, c)
343 static const struct counter_desc vport_stats_desc[] = {
344 { "rx_vport_unicast_packets",
345 VPORT_COUNTER_OFF(received_eth_unicast.packets) },
346 { "rx_vport_unicast_bytes",
347 VPORT_COUNTER_OFF(received_eth_unicast.octets) },
348 { "tx_vport_unicast_packets",
349 VPORT_COUNTER_OFF(transmitted_eth_unicast.packets) },
350 { "tx_vport_unicast_bytes",
351 VPORT_COUNTER_OFF(transmitted_eth_unicast.octets) },
352 { "rx_vport_multicast_packets",
353 VPORT_COUNTER_OFF(received_eth_multicast.packets) },
354 { "rx_vport_multicast_bytes",
355 VPORT_COUNTER_OFF(received_eth_multicast.octets) },
356 { "tx_vport_multicast_packets",
357 VPORT_COUNTER_OFF(transmitted_eth_multicast.packets) },
358 { "tx_vport_multicast_bytes",
359 VPORT_COUNTER_OFF(transmitted_eth_multicast.octets) },
360 { "rx_vport_broadcast_packets",
361 VPORT_COUNTER_OFF(received_eth_broadcast.packets) },
362 { "rx_vport_broadcast_bytes",
363 VPORT_COUNTER_OFF(received_eth_broadcast.octets) },
364 { "tx_vport_broadcast_packets",
365 VPORT_COUNTER_OFF(transmitted_eth_broadcast.packets) },
366 { "tx_vport_broadcast_bytes",
367 VPORT_COUNTER_OFF(transmitted_eth_broadcast.octets) },
368 { "rx_vport_rdma_unicast_packets",
369 VPORT_COUNTER_OFF(received_ib_unicast.packets) },
370 { "rx_vport_rdma_unicast_bytes",
371 VPORT_COUNTER_OFF(received_ib_unicast.octets) },
372 { "tx_vport_rdma_unicast_packets",
373 VPORT_COUNTER_OFF(transmitted_ib_unicast.packets) },
374 { "tx_vport_rdma_unicast_bytes",
375 VPORT_COUNTER_OFF(transmitted_ib_unicast.octets) },
376 { "rx_vport_rdma_multicast_packets",
377 VPORT_COUNTER_OFF(received_ib_multicast.packets) },
378 { "rx_vport_rdma_multicast_bytes",
379 VPORT_COUNTER_OFF(received_ib_multicast.octets) },
380 { "tx_vport_rdma_multicast_packets",
381 VPORT_COUNTER_OFF(transmitted_ib_multicast.packets) },
382 { "tx_vport_rdma_multicast_bytes",
383 VPORT_COUNTER_OFF(transmitted_ib_multicast.octets) },
386 #define NUM_VPORT_COUNTERS ARRAY_SIZE(vport_stats_desc)
388 static int mlx5e_grp_vport_get_num_stats(struct mlx5e_priv *priv)
390 return NUM_VPORT_COUNTERS;
393 static int mlx5e_grp_vport_fill_strings(struct mlx5e_priv *priv, u8 *data,
398 for (i = 0; i < NUM_VPORT_COUNTERS; i++)
399 strcpy(data + (idx++) * ETH_GSTRING_LEN, vport_stats_desc[i].format);
403 static int mlx5e_grp_vport_fill_stats(struct mlx5e_priv *priv, u64 *data,
408 for (i = 0; i < NUM_VPORT_COUNTERS; i++)
409 data[idx++] = MLX5E_READ_CTR64_BE(priv->stats.vport.query_vport_out,
410 vport_stats_desc, i);
414 static void mlx5e_grp_vport_update_stats(struct mlx5e_priv *priv)
416 int outlen = MLX5_ST_SZ_BYTES(query_vport_counter_out);
417 u32 *out = (u32 *)priv->stats.vport.query_vport_out;
418 u32 in[MLX5_ST_SZ_DW(query_vport_counter_in)] = {0};
419 struct mlx5_core_dev *mdev = priv->mdev;
421 MLX5_SET(query_vport_counter_in, in, opcode, MLX5_CMD_OP_QUERY_VPORT_COUNTER);
422 MLX5_SET(query_vport_counter_in, in, op_mod, 0);
423 MLX5_SET(query_vport_counter_in, in, other_vport, 0);
424 mlx5_cmd_exec(mdev, in, sizeof(in), out, outlen);
427 #define PPORT_802_3_OFF(c) \
428 MLX5_BYTE_OFF(ppcnt_reg, \
429 counter_set.eth_802_3_cntrs_grp_data_layout.c##_high)
430 static const struct counter_desc pport_802_3_stats_desc[] = {
431 { "tx_packets_phy", PPORT_802_3_OFF(a_frames_transmitted_ok) },
432 { "rx_packets_phy", PPORT_802_3_OFF(a_frames_received_ok) },
433 { "rx_crc_errors_phy", PPORT_802_3_OFF(a_frame_check_sequence_errors) },
434 { "tx_bytes_phy", PPORT_802_3_OFF(a_octets_transmitted_ok) },
435 { "rx_bytes_phy", PPORT_802_3_OFF(a_octets_received_ok) },
436 { "tx_multicast_phy", PPORT_802_3_OFF(a_multicast_frames_xmitted_ok) },
437 { "tx_broadcast_phy", PPORT_802_3_OFF(a_broadcast_frames_xmitted_ok) },
438 { "rx_multicast_phy", PPORT_802_3_OFF(a_multicast_frames_received_ok) },
439 { "rx_broadcast_phy", PPORT_802_3_OFF(a_broadcast_frames_received_ok) },
440 { "rx_in_range_len_errors_phy", PPORT_802_3_OFF(a_in_range_length_errors) },
441 { "rx_out_of_range_len_phy", PPORT_802_3_OFF(a_out_of_range_length_field) },
442 { "rx_oversize_pkts_phy", PPORT_802_3_OFF(a_frame_too_long_errors) },
443 { "rx_symbol_err_phy", PPORT_802_3_OFF(a_symbol_error_during_carrier) },
444 { "tx_mac_control_phy", PPORT_802_3_OFF(a_mac_control_frames_transmitted) },
445 { "rx_mac_control_phy", PPORT_802_3_OFF(a_mac_control_frames_received) },
446 { "rx_unsupported_op_phy", PPORT_802_3_OFF(a_unsupported_opcodes_received) },
447 { "rx_pause_ctrl_phy", PPORT_802_3_OFF(a_pause_mac_ctrl_frames_received) },
448 { "tx_pause_ctrl_phy", PPORT_802_3_OFF(a_pause_mac_ctrl_frames_transmitted) },
451 #define NUM_PPORT_802_3_COUNTERS ARRAY_SIZE(pport_802_3_stats_desc)
453 static int mlx5e_grp_802_3_get_num_stats(struct mlx5e_priv *priv)
455 return NUM_PPORT_802_3_COUNTERS;
458 static int mlx5e_grp_802_3_fill_strings(struct mlx5e_priv *priv, u8 *data,
463 for (i = 0; i < NUM_PPORT_802_3_COUNTERS; i++)
464 strcpy(data + (idx++) * ETH_GSTRING_LEN, pport_802_3_stats_desc[i].format);
468 static int mlx5e_grp_802_3_fill_stats(struct mlx5e_priv *priv, u64 *data,
473 for (i = 0; i < NUM_PPORT_802_3_COUNTERS; i++)
474 data[idx++] = MLX5E_READ_CTR64_BE(&priv->stats.pport.IEEE_802_3_counters,
475 pport_802_3_stats_desc, i);
479 static void mlx5e_grp_802_3_update_stats(struct mlx5e_priv *priv)
481 struct mlx5e_pport_stats *pstats = &priv->stats.pport;
482 struct mlx5_core_dev *mdev = priv->mdev;
483 u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {0};
484 int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
487 MLX5_SET(ppcnt_reg, in, local_port, 1);
488 out = pstats->IEEE_802_3_counters;
489 MLX5_SET(ppcnt_reg, in, grp, MLX5_IEEE_802_3_COUNTERS_GROUP);
490 mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
493 #define PPORT_2863_OFF(c) \
494 MLX5_BYTE_OFF(ppcnt_reg, \
495 counter_set.eth_2863_cntrs_grp_data_layout.c##_high)
496 static const struct counter_desc pport_2863_stats_desc[] = {
497 { "rx_discards_phy", PPORT_2863_OFF(if_in_discards) },
498 { "tx_discards_phy", PPORT_2863_OFF(if_out_discards) },
499 { "tx_errors_phy", PPORT_2863_OFF(if_out_errors) },
502 #define NUM_PPORT_2863_COUNTERS ARRAY_SIZE(pport_2863_stats_desc)
504 static int mlx5e_grp_2863_get_num_stats(struct mlx5e_priv *priv)
506 return NUM_PPORT_2863_COUNTERS;
509 static int mlx5e_grp_2863_fill_strings(struct mlx5e_priv *priv, u8 *data,
514 for (i = 0; i < NUM_PPORT_2863_COUNTERS; i++)
515 strcpy(data + (idx++) * ETH_GSTRING_LEN, pport_2863_stats_desc[i].format);
519 static int mlx5e_grp_2863_fill_stats(struct mlx5e_priv *priv, u64 *data,
524 for (i = 0; i < NUM_PPORT_2863_COUNTERS; i++)
525 data[idx++] = MLX5E_READ_CTR64_BE(&priv->stats.pport.RFC_2863_counters,
526 pport_2863_stats_desc, i);
530 static void mlx5e_grp_2863_update_stats(struct mlx5e_priv *priv)
532 struct mlx5e_pport_stats *pstats = &priv->stats.pport;
533 struct mlx5_core_dev *mdev = priv->mdev;
534 u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {0};
535 int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
538 MLX5_SET(ppcnt_reg, in, local_port, 1);
539 out = pstats->RFC_2863_counters;
540 MLX5_SET(ppcnt_reg, in, grp, MLX5_RFC_2863_COUNTERS_GROUP);
541 mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
544 #define PPORT_2819_OFF(c) \
545 MLX5_BYTE_OFF(ppcnt_reg, \
546 counter_set.eth_2819_cntrs_grp_data_layout.c##_high)
547 static const struct counter_desc pport_2819_stats_desc[] = {
548 { "rx_undersize_pkts_phy", PPORT_2819_OFF(ether_stats_undersize_pkts) },
549 { "rx_fragments_phy", PPORT_2819_OFF(ether_stats_fragments) },
550 { "rx_jabbers_phy", PPORT_2819_OFF(ether_stats_jabbers) },
551 { "rx_64_bytes_phy", PPORT_2819_OFF(ether_stats_pkts64octets) },
552 { "rx_65_to_127_bytes_phy", PPORT_2819_OFF(ether_stats_pkts65to127octets) },
553 { "rx_128_to_255_bytes_phy", PPORT_2819_OFF(ether_stats_pkts128to255octets) },
554 { "rx_256_to_511_bytes_phy", PPORT_2819_OFF(ether_stats_pkts256to511octets) },
555 { "rx_512_to_1023_bytes_phy", PPORT_2819_OFF(ether_stats_pkts512to1023octets) },
556 { "rx_1024_to_1518_bytes_phy", PPORT_2819_OFF(ether_stats_pkts1024to1518octets) },
557 { "rx_1519_to_2047_bytes_phy", PPORT_2819_OFF(ether_stats_pkts1519to2047octets) },
558 { "rx_2048_to_4095_bytes_phy", PPORT_2819_OFF(ether_stats_pkts2048to4095octets) },
559 { "rx_4096_to_8191_bytes_phy", PPORT_2819_OFF(ether_stats_pkts4096to8191octets) },
560 { "rx_8192_to_10239_bytes_phy", PPORT_2819_OFF(ether_stats_pkts8192to10239octets) },
563 #define NUM_PPORT_2819_COUNTERS ARRAY_SIZE(pport_2819_stats_desc)
565 static int mlx5e_grp_2819_get_num_stats(struct mlx5e_priv *priv)
567 return NUM_PPORT_2819_COUNTERS;
570 static int mlx5e_grp_2819_fill_strings(struct mlx5e_priv *priv, u8 *data,
575 for (i = 0; i < NUM_PPORT_2819_COUNTERS; i++)
576 strcpy(data + (idx++) * ETH_GSTRING_LEN, pport_2819_stats_desc[i].format);
580 static int mlx5e_grp_2819_fill_stats(struct mlx5e_priv *priv, u64 *data,
585 for (i = 0; i < NUM_PPORT_2819_COUNTERS; i++)
586 data[idx++] = MLX5E_READ_CTR64_BE(&priv->stats.pport.RFC_2819_counters,
587 pport_2819_stats_desc, i);
591 static void mlx5e_grp_2819_update_stats(struct mlx5e_priv *priv)
593 struct mlx5e_pport_stats *pstats = &priv->stats.pport;
594 struct mlx5_core_dev *mdev = priv->mdev;
595 u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {0};
596 int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
599 MLX5_SET(ppcnt_reg, in, local_port, 1);
600 out = pstats->RFC_2819_counters;
601 MLX5_SET(ppcnt_reg, in, grp, MLX5_RFC_2819_COUNTERS_GROUP);
602 mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
605 #define PPORT_PHY_STATISTICAL_OFF(c) \
606 MLX5_BYTE_OFF(ppcnt_reg, \
607 counter_set.phys_layer_statistical_cntrs.c##_high)
608 static const struct counter_desc pport_phy_statistical_stats_desc[] = {
609 { "rx_pcs_symbol_err_phy", PPORT_PHY_STATISTICAL_OFF(phy_symbol_errors) },
610 { "rx_corrected_bits_phy", PPORT_PHY_STATISTICAL_OFF(phy_corrected_bits) },
613 #define NUM_PPORT_PHY_STATISTICAL_COUNTERS ARRAY_SIZE(pport_phy_statistical_stats_desc)
615 static int mlx5e_grp_phy_get_num_stats(struct mlx5e_priv *priv)
617 /* "1" for link_down_events special counter */
618 return MLX5_CAP_PCAM_FEATURE((priv)->mdev, ppcnt_statistical_group) ?
619 NUM_PPORT_PHY_STATISTICAL_COUNTERS + 1 : 1;
622 static int mlx5e_grp_phy_fill_strings(struct mlx5e_priv *priv, u8 *data,
627 strcpy(data + (idx++) * ETH_GSTRING_LEN, "link_down_events_phy");
629 if (!MLX5_CAP_PCAM_FEATURE((priv)->mdev, ppcnt_statistical_group))
632 for (i = 0; i < NUM_PPORT_PHY_STATISTICAL_COUNTERS; i++)
633 strcpy(data + (idx++) * ETH_GSTRING_LEN,
634 pport_phy_statistical_stats_desc[i].format);
638 static int mlx5e_grp_phy_fill_stats(struct mlx5e_priv *priv, u64 *data, int idx)
642 /* link_down_events_phy has special handling since it is not stored in __be64 format */
643 data[idx++] = MLX5_GET(ppcnt_reg, priv->stats.pport.phy_counters,
644 counter_set.phys_layer_cntrs.link_down_events);
646 if (!MLX5_CAP_PCAM_FEATURE((priv)->mdev, ppcnt_statistical_group))
649 for (i = 0; i < NUM_PPORT_PHY_STATISTICAL_COUNTERS; i++)
651 MLX5E_READ_CTR64_BE(&priv->stats.pport.phy_statistical_counters,
652 pport_phy_statistical_stats_desc, i);
656 static void mlx5e_grp_phy_update_stats(struct mlx5e_priv *priv)
658 struct mlx5e_pport_stats *pstats = &priv->stats.pport;
659 struct mlx5_core_dev *mdev = priv->mdev;
660 u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {0};
661 int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
664 MLX5_SET(ppcnt_reg, in, local_port, 1);
665 out = pstats->phy_counters;
666 MLX5_SET(ppcnt_reg, in, grp, MLX5_PHYSICAL_LAYER_COUNTERS_GROUP);
667 mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
669 if (!MLX5_CAP_PCAM_FEATURE(mdev, ppcnt_statistical_group))
672 out = pstats->phy_statistical_counters;
673 MLX5_SET(ppcnt_reg, in, grp, MLX5_PHYSICAL_LAYER_STATISTICAL_GROUP);
674 mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
677 #define PPORT_ETH_EXT_OFF(c) \
678 MLX5_BYTE_OFF(ppcnt_reg, \
679 counter_set.eth_extended_cntrs_grp_data_layout.c##_high)
680 static const struct counter_desc pport_eth_ext_stats_desc[] = {
681 { "rx_buffer_passed_thres_phy", PPORT_ETH_EXT_OFF(rx_buffer_almost_full) },
684 #define NUM_PPORT_ETH_EXT_COUNTERS ARRAY_SIZE(pport_eth_ext_stats_desc)
686 static int mlx5e_grp_eth_ext_get_num_stats(struct mlx5e_priv *priv)
688 if (MLX5_CAP_PCAM_FEATURE((priv)->mdev, rx_buffer_fullness_counters))
689 return NUM_PPORT_ETH_EXT_COUNTERS;
694 static int mlx5e_grp_eth_ext_fill_strings(struct mlx5e_priv *priv, u8 *data,
699 if (MLX5_CAP_PCAM_FEATURE((priv)->mdev, rx_buffer_fullness_counters))
700 for (i = 0; i < NUM_PPORT_ETH_EXT_COUNTERS; i++)
701 strcpy(data + (idx++) * ETH_GSTRING_LEN,
702 pport_eth_ext_stats_desc[i].format);
706 static int mlx5e_grp_eth_ext_fill_stats(struct mlx5e_priv *priv, u64 *data,
711 if (MLX5_CAP_PCAM_FEATURE((priv)->mdev, rx_buffer_fullness_counters))
712 for (i = 0; i < NUM_PPORT_ETH_EXT_COUNTERS; i++)
714 MLX5E_READ_CTR64_BE(&priv->stats.pport.eth_ext_counters,
715 pport_eth_ext_stats_desc, i);
719 static void mlx5e_grp_eth_ext_update_stats(struct mlx5e_priv *priv)
721 struct mlx5e_pport_stats *pstats = &priv->stats.pport;
722 struct mlx5_core_dev *mdev = priv->mdev;
723 u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {0};
724 int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
727 if (!MLX5_CAP_PCAM_FEATURE(mdev, rx_buffer_fullness_counters))
730 MLX5_SET(ppcnt_reg, in, local_port, 1);
731 out = pstats->eth_ext_counters;
732 MLX5_SET(ppcnt_reg, in, grp, MLX5_ETHERNET_EXTENDED_COUNTERS_GROUP);
733 mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
736 #define PCIE_PERF_OFF(c) \
737 MLX5_BYTE_OFF(mpcnt_reg, counter_set.pcie_perf_cntrs_grp_data_layout.c)
738 static const struct counter_desc pcie_perf_stats_desc[] = {
739 { "rx_pci_signal_integrity", PCIE_PERF_OFF(rx_errors) },
740 { "tx_pci_signal_integrity", PCIE_PERF_OFF(tx_errors) },
743 #define PCIE_PERF_OFF64(c) \
744 MLX5_BYTE_OFF(mpcnt_reg, counter_set.pcie_perf_cntrs_grp_data_layout.c##_high)
745 static const struct counter_desc pcie_perf_stats_desc64[] = {
746 { "outbound_pci_buffer_overflow", PCIE_PERF_OFF64(tx_overflow_buffer_pkt) },
749 static const struct counter_desc pcie_perf_stall_stats_desc[] = {
750 { "outbound_pci_stalled_rd", PCIE_PERF_OFF(outbound_stalled_reads) },
751 { "outbound_pci_stalled_wr", PCIE_PERF_OFF(outbound_stalled_writes) },
752 { "outbound_pci_stalled_rd_events", PCIE_PERF_OFF(outbound_stalled_reads_events) },
753 { "outbound_pci_stalled_wr_events", PCIE_PERF_OFF(outbound_stalled_writes_events) },
756 #define NUM_PCIE_PERF_COUNTERS ARRAY_SIZE(pcie_perf_stats_desc)
757 #define NUM_PCIE_PERF_COUNTERS64 ARRAY_SIZE(pcie_perf_stats_desc64)
758 #define NUM_PCIE_PERF_STALL_COUNTERS ARRAY_SIZE(pcie_perf_stall_stats_desc)
760 static int mlx5e_grp_pcie_get_num_stats(struct mlx5e_priv *priv)
764 if (MLX5_CAP_MCAM_FEATURE((priv)->mdev, pcie_performance_group))
765 num_stats += NUM_PCIE_PERF_COUNTERS;
767 if (MLX5_CAP_MCAM_FEATURE((priv)->mdev, tx_overflow_buffer_pkt))
768 num_stats += NUM_PCIE_PERF_COUNTERS64;
770 if (MLX5_CAP_MCAM_FEATURE((priv)->mdev, pcie_outbound_stalled))
771 num_stats += NUM_PCIE_PERF_STALL_COUNTERS;
776 static int mlx5e_grp_pcie_fill_strings(struct mlx5e_priv *priv, u8 *data,
781 if (MLX5_CAP_MCAM_FEATURE((priv)->mdev, pcie_performance_group))
782 for (i = 0; i < NUM_PCIE_PERF_COUNTERS; i++)
783 strcpy(data + (idx++) * ETH_GSTRING_LEN,
784 pcie_perf_stats_desc[i].format);
786 if (MLX5_CAP_MCAM_FEATURE((priv)->mdev, tx_overflow_buffer_pkt))
787 for (i = 0; i < NUM_PCIE_PERF_COUNTERS64; i++)
788 strcpy(data + (idx++) * ETH_GSTRING_LEN,
789 pcie_perf_stats_desc64[i].format);
791 if (MLX5_CAP_MCAM_FEATURE((priv)->mdev, pcie_outbound_stalled))
792 for (i = 0; i < NUM_PCIE_PERF_STALL_COUNTERS; i++)
793 strcpy(data + (idx++) * ETH_GSTRING_LEN,
794 pcie_perf_stall_stats_desc[i].format);
798 static int mlx5e_grp_pcie_fill_stats(struct mlx5e_priv *priv, u64 *data,
803 if (MLX5_CAP_MCAM_FEATURE((priv)->mdev, pcie_performance_group))
804 for (i = 0; i < NUM_PCIE_PERF_COUNTERS; i++)
806 MLX5E_READ_CTR32_BE(&priv->stats.pcie.pcie_perf_counters,
807 pcie_perf_stats_desc, i);
809 if (MLX5_CAP_MCAM_FEATURE((priv)->mdev, tx_overflow_buffer_pkt))
810 for (i = 0; i < NUM_PCIE_PERF_COUNTERS64; i++)
812 MLX5E_READ_CTR64_BE(&priv->stats.pcie.pcie_perf_counters,
813 pcie_perf_stats_desc64, i);
815 if (MLX5_CAP_MCAM_FEATURE((priv)->mdev, pcie_outbound_stalled))
816 for (i = 0; i < NUM_PCIE_PERF_STALL_COUNTERS; i++)
818 MLX5E_READ_CTR32_BE(&priv->stats.pcie.pcie_perf_counters,
819 pcie_perf_stall_stats_desc, i);
823 static void mlx5e_grp_pcie_update_stats(struct mlx5e_priv *priv)
825 struct mlx5e_pcie_stats *pcie_stats = &priv->stats.pcie;
826 struct mlx5_core_dev *mdev = priv->mdev;
827 u32 in[MLX5_ST_SZ_DW(mpcnt_reg)] = {0};
828 int sz = MLX5_ST_SZ_BYTES(mpcnt_reg);
831 if (!MLX5_CAP_MCAM_FEATURE(mdev, pcie_performance_group))
834 out = pcie_stats->pcie_perf_counters;
835 MLX5_SET(mpcnt_reg, in, grp, MLX5_PCIE_PERFORMANCE_COUNTERS_GROUP);
836 mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_MPCNT, 0, 0);
839 #define PPORT_PER_PRIO_OFF(c) \
840 MLX5_BYTE_OFF(ppcnt_reg, \
841 counter_set.eth_per_prio_grp_data_layout.c##_high)
842 static const struct counter_desc pport_per_prio_traffic_stats_desc[] = {
843 { "rx_prio%d_bytes", PPORT_PER_PRIO_OFF(rx_octets) },
844 { "rx_prio%d_packets", PPORT_PER_PRIO_OFF(rx_frames) },
845 { "tx_prio%d_bytes", PPORT_PER_PRIO_OFF(tx_octets) },
846 { "tx_prio%d_packets", PPORT_PER_PRIO_OFF(tx_frames) },
849 #define NUM_PPORT_PER_PRIO_TRAFFIC_COUNTERS ARRAY_SIZE(pport_per_prio_traffic_stats_desc)
851 static int mlx5e_grp_per_prio_traffic_get_num_stats(void)
853 return NUM_PPORT_PER_PRIO_TRAFFIC_COUNTERS * NUM_PPORT_PRIO;
856 static int mlx5e_grp_per_prio_traffic_fill_strings(struct mlx5e_priv *priv,
862 for (prio = 0; prio < NUM_PPORT_PRIO; prio++) {
863 for (i = 0; i < NUM_PPORT_PER_PRIO_TRAFFIC_COUNTERS; i++)
864 sprintf(data + (idx++) * ETH_GSTRING_LEN,
865 pport_per_prio_traffic_stats_desc[i].format, prio);
871 static int mlx5e_grp_per_prio_traffic_fill_stats(struct mlx5e_priv *priv,
877 for (prio = 0; prio < NUM_PPORT_PRIO; prio++) {
878 for (i = 0; i < NUM_PPORT_PER_PRIO_TRAFFIC_COUNTERS; i++)
880 MLX5E_READ_CTR64_BE(&priv->stats.pport.per_prio_counters[prio],
881 pport_per_prio_traffic_stats_desc, i);
887 static const struct counter_desc pport_per_prio_pfc_stats_desc[] = {
888 /* %s is "global" or "prio{i}" */
889 { "rx_%s_pause", PPORT_PER_PRIO_OFF(rx_pause) },
890 { "rx_%s_pause_duration", PPORT_PER_PRIO_OFF(rx_pause_duration) },
891 { "tx_%s_pause", PPORT_PER_PRIO_OFF(tx_pause) },
892 { "tx_%s_pause_duration", PPORT_PER_PRIO_OFF(tx_pause_duration) },
893 { "rx_%s_pause_transition", PPORT_PER_PRIO_OFF(rx_pause_transition) },
896 static const struct counter_desc pport_pfc_stall_stats_desc[] = {
897 { "tx_pause_storm_warning_events ", PPORT_PER_PRIO_OFF(device_stall_minor_watermark_cnt) },
898 { "tx_pause_storm_error_events", PPORT_PER_PRIO_OFF(device_stall_critical_watermark_cnt) },
901 #define NUM_PPORT_PER_PRIO_PFC_COUNTERS ARRAY_SIZE(pport_per_prio_pfc_stats_desc)
902 #define NUM_PPORT_PFC_STALL_COUNTERS(priv) (ARRAY_SIZE(pport_pfc_stall_stats_desc) * \
903 MLX5_CAP_PCAM_FEATURE((priv)->mdev, pfcc_mask) * \
904 MLX5_CAP_DEBUG((priv)->mdev, stall_detect))
906 static unsigned long mlx5e_query_pfc_combined(struct mlx5e_priv *priv)
908 struct mlx5_core_dev *mdev = priv->mdev;
913 if (MLX5_CAP_GEN(mdev, port_type) != MLX5_CAP_PORT_TYPE_ETH)
916 err = mlx5_query_port_pfc(mdev, &pfc_en_tx, &pfc_en_rx);
918 return err ? 0 : pfc_en_tx | pfc_en_rx;
921 static bool mlx5e_query_global_pause_combined(struct mlx5e_priv *priv)
923 struct mlx5_core_dev *mdev = priv->mdev;
928 if (MLX5_CAP_GEN(mdev, port_type) != MLX5_CAP_PORT_TYPE_ETH)
931 err = mlx5_query_port_pause(mdev, &rx_pause, &tx_pause);
933 return err ? false : rx_pause | tx_pause;
936 static int mlx5e_grp_per_prio_pfc_get_num_stats(struct mlx5e_priv *priv)
938 return (mlx5e_query_global_pause_combined(priv) +
939 hweight8(mlx5e_query_pfc_combined(priv))) *
940 NUM_PPORT_PER_PRIO_PFC_COUNTERS +
941 NUM_PPORT_PFC_STALL_COUNTERS(priv);
944 static int mlx5e_grp_per_prio_pfc_fill_strings(struct mlx5e_priv *priv,
948 unsigned long pfc_combined;
951 pfc_combined = mlx5e_query_pfc_combined(priv);
952 for_each_set_bit(prio, &pfc_combined, NUM_PPORT_PRIO) {
953 for (i = 0; i < NUM_PPORT_PER_PRIO_PFC_COUNTERS; i++) {
954 char pfc_string[ETH_GSTRING_LEN];
956 snprintf(pfc_string, sizeof(pfc_string), "prio%d", prio);
957 sprintf(data + (idx++) * ETH_GSTRING_LEN,
958 pport_per_prio_pfc_stats_desc[i].format, pfc_string);
962 if (mlx5e_query_global_pause_combined(priv)) {
963 for (i = 0; i < NUM_PPORT_PER_PRIO_PFC_COUNTERS; i++) {
964 sprintf(data + (idx++) * ETH_GSTRING_LEN,
965 pport_per_prio_pfc_stats_desc[i].format, "global");
969 for (i = 0; i < NUM_PPORT_PFC_STALL_COUNTERS(priv); i++)
970 strcpy(data + (idx++) * ETH_GSTRING_LEN,
971 pport_pfc_stall_stats_desc[i].format);
976 static int mlx5e_grp_per_prio_pfc_fill_stats(struct mlx5e_priv *priv,
980 unsigned long pfc_combined;
983 pfc_combined = mlx5e_query_pfc_combined(priv);
984 for_each_set_bit(prio, &pfc_combined, NUM_PPORT_PRIO) {
985 for (i = 0; i < NUM_PPORT_PER_PRIO_PFC_COUNTERS; i++) {
987 MLX5E_READ_CTR64_BE(&priv->stats.pport.per_prio_counters[prio],
988 pport_per_prio_pfc_stats_desc, i);
992 if (mlx5e_query_global_pause_combined(priv)) {
993 for (i = 0; i < NUM_PPORT_PER_PRIO_PFC_COUNTERS; i++) {
995 MLX5E_READ_CTR64_BE(&priv->stats.pport.per_prio_counters[0],
996 pport_per_prio_pfc_stats_desc, i);
1000 for (i = 0; i < NUM_PPORT_PFC_STALL_COUNTERS(priv); i++)
1001 data[idx++] = MLX5E_READ_CTR64_BE(&priv->stats.pport.per_prio_counters[0],
1002 pport_pfc_stall_stats_desc, i);
1007 static int mlx5e_grp_per_prio_get_num_stats(struct mlx5e_priv *priv)
1009 return mlx5e_grp_per_prio_traffic_get_num_stats() +
1010 mlx5e_grp_per_prio_pfc_get_num_stats(priv);
1013 static int mlx5e_grp_per_prio_fill_strings(struct mlx5e_priv *priv, u8 *data,
1016 idx = mlx5e_grp_per_prio_traffic_fill_strings(priv, data, idx);
1017 idx = mlx5e_grp_per_prio_pfc_fill_strings(priv, data, idx);
1021 static int mlx5e_grp_per_prio_fill_stats(struct mlx5e_priv *priv, u64 *data,
1024 idx = mlx5e_grp_per_prio_traffic_fill_stats(priv, data, idx);
1025 idx = mlx5e_grp_per_prio_pfc_fill_stats(priv, data, idx);
1029 static void mlx5e_grp_per_prio_update_stats(struct mlx5e_priv *priv)
1031 struct mlx5e_pport_stats *pstats = &priv->stats.pport;
1032 struct mlx5_core_dev *mdev = priv->mdev;
1033 u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {0};
1034 int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
1038 MLX5_SET(ppcnt_reg, in, local_port, 1);
1039 MLX5_SET(ppcnt_reg, in, grp, MLX5_PER_PRIORITY_COUNTERS_GROUP);
1040 for (prio = 0; prio < NUM_PPORT_PRIO; prio++) {
1041 out = pstats->per_prio_counters[prio];
1042 MLX5_SET(ppcnt_reg, in, prio_tc, prio);
1043 mlx5_core_access_reg(mdev, in, sz, out, sz,
1044 MLX5_REG_PPCNT, 0, 0);
1048 static const struct counter_desc mlx5e_pme_status_desc[] = {
1049 { "module_unplug", 8 },
1052 static const struct counter_desc mlx5e_pme_error_desc[] = {
1053 { "module_bus_stuck", 16 }, /* bus stuck (I2C or data shorted) */
1054 { "module_high_temp", 48 }, /* high temperature */
1055 { "module_bad_shorted", 56 }, /* bad or shorted cable/module */
1058 #define NUM_PME_STATUS_STATS ARRAY_SIZE(mlx5e_pme_status_desc)
1059 #define NUM_PME_ERR_STATS ARRAY_SIZE(mlx5e_pme_error_desc)
1061 static int mlx5e_grp_pme_get_num_stats(struct mlx5e_priv *priv)
1063 return NUM_PME_STATUS_STATS + NUM_PME_ERR_STATS;
1066 static int mlx5e_grp_pme_fill_strings(struct mlx5e_priv *priv, u8 *data,
1071 for (i = 0; i < NUM_PME_STATUS_STATS; i++)
1072 strcpy(data + (idx++) * ETH_GSTRING_LEN, mlx5e_pme_status_desc[i].format);
1074 for (i = 0; i < NUM_PME_ERR_STATS; i++)
1075 strcpy(data + (idx++) * ETH_GSTRING_LEN, mlx5e_pme_error_desc[i].format);
1080 static int mlx5e_grp_pme_fill_stats(struct mlx5e_priv *priv, u64 *data,
1083 struct mlx5_priv *mlx5_priv = &priv->mdev->priv;
1086 for (i = 0; i < NUM_PME_STATUS_STATS; i++)
1087 data[idx++] = MLX5E_READ_CTR64_CPU(mlx5_priv->pme_stats.status_counters,
1088 mlx5e_pme_status_desc, i);
1090 for (i = 0; i < NUM_PME_ERR_STATS; i++)
1091 data[idx++] = MLX5E_READ_CTR64_CPU(mlx5_priv->pme_stats.error_counters,
1092 mlx5e_pme_error_desc, i);
1097 static int mlx5e_grp_ipsec_get_num_stats(struct mlx5e_priv *priv)
1099 return mlx5e_ipsec_get_count(priv);
1102 static int mlx5e_grp_ipsec_fill_strings(struct mlx5e_priv *priv, u8 *data,
1105 return idx + mlx5e_ipsec_get_strings(priv,
1106 data + idx * ETH_GSTRING_LEN);
1109 static int mlx5e_grp_ipsec_fill_stats(struct mlx5e_priv *priv, u64 *data,
1112 return idx + mlx5e_ipsec_get_stats(priv, data + idx);
1115 static void mlx5e_grp_ipsec_update_stats(struct mlx5e_priv *priv)
1117 mlx5e_ipsec_update_stats(priv);
1120 static int mlx5e_grp_tls_get_num_stats(struct mlx5e_priv *priv)
1122 return mlx5e_tls_get_count(priv);
1125 static int mlx5e_grp_tls_fill_strings(struct mlx5e_priv *priv, u8 *data,
1128 return idx + mlx5e_tls_get_strings(priv, data + idx * ETH_GSTRING_LEN);
1131 static int mlx5e_grp_tls_fill_stats(struct mlx5e_priv *priv, u64 *data, int idx)
1133 return idx + mlx5e_tls_get_stats(priv, data + idx);
1136 static const struct counter_desc rq_stats_desc[] = {
1137 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, packets) },
1138 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, bytes) },
1139 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_complete) },
1140 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_unnecessary) },
1141 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_unnecessary_inner) },
1142 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_none) },
1143 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, xdp_drop) },
1144 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, xdp_redirect) },
1145 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, lro_packets) },
1146 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, lro_bytes) },
1147 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, removed_vlan_packets) },
1148 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, wqe_err) },
1149 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, mpwqe_filler_cqes) },
1150 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, mpwqe_filler_strides) },
1151 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, buff_alloc_err) },
1152 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cqe_compress_blks) },
1153 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cqe_compress_pkts) },
1154 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, page_reuse) },
1155 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cache_reuse) },
1156 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cache_full) },
1157 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cache_empty) },
1158 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cache_busy) },
1159 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cache_waive) },
1160 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, congst_umr) },
1163 static const struct counter_desc sq_stats_desc[] = {
1164 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, packets) },
1165 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, bytes) },
1166 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tso_packets) },
1167 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tso_bytes) },
1168 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tso_inner_packets) },
1169 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tso_inner_bytes) },
1170 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, csum_partial) },
1171 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, csum_partial_inner) },
1172 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, added_vlan_packets) },
1173 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, nop) },
1174 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, csum_none) },
1175 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, stopped) },
1176 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, dropped) },
1177 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, xmit_more) },
1178 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, recover) },
1179 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, cqes) },
1180 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, wake) },
1181 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, cqe_err) },
1184 static const struct counter_desc rq_xdpsq_stats_desc[] = {
1185 { MLX5E_DECLARE_RQ_XDPSQ_STAT(struct mlx5e_xdpsq_stats, xmit) },
1186 { MLX5E_DECLARE_RQ_XDPSQ_STAT(struct mlx5e_xdpsq_stats, full) },
1187 { MLX5E_DECLARE_RQ_XDPSQ_STAT(struct mlx5e_xdpsq_stats, err) },
1188 { MLX5E_DECLARE_RQ_XDPSQ_STAT(struct mlx5e_xdpsq_stats, cqes) },
1191 static const struct counter_desc xdpsq_stats_desc[] = {
1192 { MLX5E_DECLARE_XDPSQ_STAT(struct mlx5e_xdpsq_stats, xmit) },
1193 { MLX5E_DECLARE_XDPSQ_STAT(struct mlx5e_xdpsq_stats, full) },
1194 { MLX5E_DECLARE_XDPSQ_STAT(struct mlx5e_xdpsq_stats, err) },
1195 { MLX5E_DECLARE_XDPSQ_STAT(struct mlx5e_xdpsq_stats, cqes) },
1198 static const struct counter_desc ch_stats_desc[] = {
1199 { MLX5E_DECLARE_CH_STAT(struct mlx5e_ch_stats, events) },
1200 { MLX5E_DECLARE_CH_STAT(struct mlx5e_ch_stats, poll) },
1201 { MLX5E_DECLARE_CH_STAT(struct mlx5e_ch_stats, arm) },
1202 { MLX5E_DECLARE_CH_STAT(struct mlx5e_ch_stats, aff_change) },
1203 { MLX5E_DECLARE_CH_STAT(struct mlx5e_ch_stats, eq_rearm) },
1206 #define NUM_RQ_STATS ARRAY_SIZE(rq_stats_desc)
1207 #define NUM_SQ_STATS ARRAY_SIZE(sq_stats_desc)
1208 #define NUM_XDPSQ_STATS ARRAY_SIZE(xdpsq_stats_desc)
1209 #define NUM_RQ_XDPSQ_STATS ARRAY_SIZE(rq_xdpsq_stats_desc)
1210 #define NUM_CH_STATS ARRAY_SIZE(ch_stats_desc)
1212 static int mlx5e_grp_channels_get_num_stats(struct mlx5e_priv *priv)
1214 int max_nch = priv->profile->max_nch(priv->mdev);
1216 return (NUM_RQ_STATS * max_nch) +
1217 (NUM_CH_STATS * max_nch) +
1218 (NUM_SQ_STATS * max_nch * priv->max_opened_tc) +
1219 (NUM_RQ_XDPSQ_STATS * max_nch) +
1220 (NUM_XDPSQ_STATS * max_nch);
1223 static int mlx5e_grp_channels_fill_strings(struct mlx5e_priv *priv, u8 *data,
1226 int max_nch = priv->profile->max_nch(priv->mdev);
1229 for (i = 0; i < max_nch; i++)
1230 for (j = 0; j < NUM_CH_STATS; j++)
1231 sprintf(data + (idx++) * ETH_GSTRING_LEN,
1232 ch_stats_desc[j].format, i);
1234 for (i = 0; i < max_nch; i++) {
1235 for (j = 0; j < NUM_RQ_STATS; j++)
1236 sprintf(data + (idx++) * ETH_GSTRING_LEN,
1237 rq_stats_desc[j].format, i);
1238 for (j = 0; j < NUM_RQ_XDPSQ_STATS; j++)
1239 sprintf(data + (idx++) * ETH_GSTRING_LEN,
1240 rq_xdpsq_stats_desc[j].format, i);
1243 for (tc = 0; tc < priv->max_opened_tc; tc++)
1244 for (i = 0; i < max_nch; i++)
1245 for (j = 0; j < NUM_SQ_STATS; j++)
1246 sprintf(data + (idx++) * ETH_GSTRING_LEN,
1247 sq_stats_desc[j].format,
1248 priv->channel_tc2txq[i][tc]);
1250 for (i = 0; i < max_nch; i++)
1251 for (j = 0; j < NUM_XDPSQ_STATS; j++)
1252 sprintf(data + (idx++) * ETH_GSTRING_LEN,
1253 xdpsq_stats_desc[j].format, i);
1258 static int mlx5e_grp_channels_fill_stats(struct mlx5e_priv *priv, u64 *data,
1261 int max_nch = priv->profile->max_nch(priv->mdev);
1264 for (i = 0; i < max_nch; i++)
1265 for (j = 0; j < NUM_CH_STATS; j++)
1267 MLX5E_READ_CTR64_CPU(&priv->channel_stats[i].ch,
1270 for (i = 0; i < max_nch; i++) {
1271 for (j = 0; j < NUM_RQ_STATS; j++)
1273 MLX5E_READ_CTR64_CPU(&priv->channel_stats[i].rq,
1275 for (j = 0; j < NUM_RQ_XDPSQ_STATS; j++)
1277 MLX5E_READ_CTR64_CPU(&priv->channel_stats[i].rq_xdpsq,
1278 rq_xdpsq_stats_desc, j);
1281 for (tc = 0; tc < priv->max_opened_tc; tc++)
1282 for (i = 0; i < max_nch; i++)
1283 for (j = 0; j < NUM_SQ_STATS; j++)
1285 MLX5E_READ_CTR64_CPU(&priv->channel_stats[i].sq[tc],
1288 for (i = 0; i < max_nch; i++)
1289 for (j = 0; j < NUM_XDPSQ_STATS; j++)
1291 MLX5E_READ_CTR64_CPU(&priv->channel_stats[i].xdpsq,
1292 xdpsq_stats_desc, j);
1297 /* The stats groups order is opposite to the update_stats() order calls */
1298 const struct mlx5e_stats_grp mlx5e_stats_grps[] = {
1300 .get_num_stats = mlx5e_grp_sw_get_num_stats,
1301 .fill_strings = mlx5e_grp_sw_fill_strings,
1302 .fill_stats = mlx5e_grp_sw_fill_stats,
1303 .update_stats = mlx5e_grp_sw_update_stats,
1306 .get_num_stats = mlx5e_grp_q_get_num_stats,
1307 .fill_strings = mlx5e_grp_q_fill_strings,
1308 .fill_stats = mlx5e_grp_q_fill_stats,
1309 .update_stats_mask = MLX5E_NDO_UPDATE_STATS,
1310 .update_stats = mlx5e_grp_q_update_stats,
1313 .get_num_stats = mlx5e_grp_vnic_env_get_num_stats,
1314 .fill_strings = mlx5e_grp_vnic_env_fill_strings,
1315 .fill_stats = mlx5e_grp_vnic_env_fill_stats,
1316 .update_stats = mlx5e_grp_vnic_env_update_stats,
1319 .get_num_stats = mlx5e_grp_vport_get_num_stats,
1320 .fill_strings = mlx5e_grp_vport_fill_strings,
1321 .fill_stats = mlx5e_grp_vport_fill_stats,
1322 .update_stats_mask = MLX5E_NDO_UPDATE_STATS,
1323 .update_stats = mlx5e_grp_vport_update_stats,
1326 .get_num_stats = mlx5e_grp_802_3_get_num_stats,
1327 .fill_strings = mlx5e_grp_802_3_fill_strings,
1328 .fill_stats = mlx5e_grp_802_3_fill_stats,
1329 .update_stats_mask = MLX5E_NDO_UPDATE_STATS,
1330 .update_stats = mlx5e_grp_802_3_update_stats,
1333 .get_num_stats = mlx5e_grp_2863_get_num_stats,
1334 .fill_strings = mlx5e_grp_2863_fill_strings,
1335 .fill_stats = mlx5e_grp_2863_fill_stats,
1336 .update_stats = mlx5e_grp_2863_update_stats,
1339 .get_num_stats = mlx5e_grp_2819_get_num_stats,
1340 .fill_strings = mlx5e_grp_2819_fill_strings,
1341 .fill_stats = mlx5e_grp_2819_fill_stats,
1342 .update_stats = mlx5e_grp_2819_update_stats,
1345 .get_num_stats = mlx5e_grp_phy_get_num_stats,
1346 .fill_strings = mlx5e_grp_phy_fill_strings,
1347 .fill_stats = mlx5e_grp_phy_fill_stats,
1348 .update_stats = mlx5e_grp_phy_update_stats,
1351 .get_num_stats = mlx5e_grp_eth_ext_get_num_stats,
1352 .fill_strings = mlx5e_grp_eth_ext_fill_strings,
1353 .fill_stats = mlx5e_grp_eth_ext_fill_stats,
1354 .update_stats = mlx5e_grp_eth_ext_update_stats,
1357 .get_num_stats = mlx5e_grp_pcie_get_num_stats,
1358 .fill_strings = mlx5e_grp_pcie_fill_strings,
1359 .fill_stats = mlx5e_grp_pcie_fill_stats,
1360 .update_stats = mlx5e_grp_pcie_update_stats,
1363 .get_num_stats = mlx5e_grp_per_prio_get_num_stats,
1364 .fill_strings = mlx5e_grp_per_prio_fill_strings,
1365 .fill_stats = mlx5e_grp_per_prio_fill_stats,
1366 .update_stats = mlx5e_grp_per_prio_update_stats,
1369 .get_num_stats = mlx5e_grp_pme_get_num_stats,
1370 .fill_strings = mlx5e_grp_pme_fill_strings,
1371 .fill_stats = mlx5e_grp_pme_fill_stats,
1374 .get_num_stats = mlx5e_grp_ipsec_get_num_stats,
1375 .fill_strings = mlx5e_grp_ipsec_fill_strings,
1376 .fill_stats = mlx5e_grp_ipsec_fill_stats,
1377 .update_stats = mlx5e_grp_ipsec_update_stats,
1380 .get_num_stats = mlx5e_grp_tls_get_num_stats,
1381 .fill_strings = mlx5e_grp_tls_fill_strings,
1382 .fill_stats = mlx5e_grp_tls_fill_stats,
1385 .get_num_stats = mlx5e_grp_channels_get_num_stats,
1386 .fill_strings = mlx5e_grp_channels_fill_strings,
1387 .fill_stats = mlx5e_grp_channels_fill_stats,
1391 const int mlx5e_num_stats_grps = ARRAY_SIZE(mlx5e_stats_grps);