2 * Copyright (c) 2017, Mellanox Technologies, Ltd. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
34 #include "en_accel/ipsec.h"
35 #include "en_accel/tls.h"
37 static const struct counter_desc sw_stats_desc[] = {
38 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_packets) },
39 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_bytes) },
40 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_packets) },
41 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_bytes) },
42 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tso_packets) },
43 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tso_bytes) },
44 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tso_inner_packets) },
45 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tso_inner_bytes) },
46 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_added_vlan_packets) },
48 #ifdef CONFIG_MLX5_EN_TLS
49 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_ooo) },
50 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_resync_bytes) },
53 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_lro_packets) },
54 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_lro_bytes) },
55 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_removed_vlan_packets) },
56 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_unnecessary) },
57 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_none) },
58 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_complete) },
59 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_unnecessary_inner) },
60 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_drop) },
61 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx) },
62 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx_full) },
63 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_csum_none) },
64 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_csum_partial) },
65 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_csum_partial_inner) },
66 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_queue_stopped) },
67 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_queue_dropped) },
68 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xmit_more) },
69 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_recover) },
70 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_cqes) },
71 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_queue_wake) },
72 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_udp_seg_rem) },
73 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_cqe_err) },
74 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_wqe_err) },
75 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_mpwqe_filler) },
76 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_buff_alloc_err) },
77 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cqe_compress_blks) },
78 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cqe_compress_pkts) },
79 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_page_reuse) },
80 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cache_reuse) },
81 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cache_full) },
82 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cache_empty) },
83 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cache_busy) },
84 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cache_waive) },
85 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, ch_eq_rearm) },
88 #define NUM_SW_COUNTERS ARRAY_SIZE(sw_stats_desc)
90 static int mlx5e_grp_sw_get_num_stats(struct mlx5e_priv *priv)
92 return NUM_SW_COUNTERS;
95 static int mlx5e_grp_sw_fill_strings(struct mlx5e_priv *priv, u8 *data, int idx)
99 for (i = 0; i < NUM_SW_COUNTERS; i++)
100 strcpy(data + (idx++) * ETH_GSTRING_LEN, sw_stats_desc[i].format);
104 static int mlx5e_grp_sw_fill_stats(struct mlx5e_priv *priv, u64 *data, int idx)
108 for (i = 0; i < NUM_SW_COUNTERS; i++)
109 data[idx++] = MLX5E_READ_CTR64_CPU(&priv->stats.sw, sw_stats_desc, i);
113 void mlx5e_grp_sw_update_stats(struct mlx5e_priv *priv)
115 struct mlx5e_sw_stats temp, *s = &temp;
118 memset(s, 0, sizeof(*s));
120 for (i = 0; i < priv->profile->max_nch(priv->mdev); i++) {
121 struct mlx5e_channel_stats *channel_stats =
122 &priv->channel_stats[i];
123 struct mlx5e_rq_stats *rq_stats = &channel_stats->rq;
124 struct mlx5e_ch_stats *ch_stats = &channel_stats->ch;
127 s->rx_packets += rq_stats->packets;
128 s->rx_bytes += rq_stats->bytes;
129 s->rx_lro_packets += rq_stats->lro_packets;
130 s->rx_lro_bytes += rq_stats->lro_bytes;
131 s->rx_removed_vlan_packets += rq_stats->removed_vlan_packets;
132 s->rx_csum_none += rq_stats->csum_none;
133 s->rx_csum_complete += rq_stats->csum_complete;
134 s->rx_csum_unnecessary += rq_stats->csum_unnecessary;
135 s->rx_csum_unnecessary_inner += rq_stats->csum_unnecessary_inner;
136 s->rx_xdp_drop += rq_stats->xdp_drop;
137 s->rx_xdp_tx += rq_stats->xdp_tx;
138 s->rx_xdp_tx_full += rq_stats->xdp_tx_full;
139 s->rx_wqe_err += rq_stats->wqe_err;
140 s->rx_mpwqe_filler += rq_stats->mpwqe_filler;
141 s->rx_buff_alloc_err += rq_stats->buff_alloc_err;
142 s->rx_cqe_compress_blks += rq_stats->cqe_compress_blks;
143 s->rx_cqe_compress_pkts += rq_stats->cqe_compress_pkts;
144 s->rx_page_reuse += rq_stats->page_reuse;
145 s->rx_cache_reuse += rq_stats->cache_reuse;
146 s->rx_cache_full += rq_stats->cache_full;
147 s->rx_cache_empty += rq_stats->cache_empty;
148 s->rx_cache_busy += rq_stats->cache_busy;
149 s->rx_cache_waive += rq_stats->cache_waive;
150 s->ch_eq_rearm += ch_stats->eq_rearm;
152 for (j = 0; j < priv->max_opened_tc; j++) {
153 struct mlx5e_sq_stats *sq_stats = &channel_stats->sq[j];
155 s->tx_packets += sq_stats->packets;
156 s->tx_bytes += sq_stats->bytes;
157 s->tx_tso_packets += sq_stats->tso_packets;
158 s->tx_tso_bytes += sq_stats->tso_bytes;
159 s->tx_tso_inner_packets += sq_stats->tso_inner_packets;
160 s->tx_tso_inner_bytes += sq_stats->tso_inner_bytes;
161 s->tx_added_vlan_packets += sq_stats->added_vlan_packets;
162 s->tx_queue_stopped += sq_stats->stopped;
163 s->tx_queue_wake += sq_stats->wake;
164 s->tx_udp_seg_rem += sq_stats->udp_seg_rem;
165 s->tx_queue_dropped += sq_stats->dropped;
166 s->tx_cqe_err += sq_stats->cqe_err;
167 s->tx_recover += sq_stats->recover;
168 s->tx_xmit_more += sq_stats->xmit_more;
169 s->tx_csum_partial_inner += sq_stats->csum_partial_inner;
170 s->tx_csum_none += sq_stats->csum_none;
171 s->tx_csum_partial += sq_stats->csum_partial;
172 #ifdef CONFIG_MLX5_EN_TLS
173 s->tx_tls_ooo += sq_stats->tls_ooo;
174 s->tx_tls_resync_bytes += sq_stats->tls_resync_bytes;
176 s->tx_cqes += sq_stats->cqes;
180 memcpy(&priv->stats.sw, s, sizeof(*s));
183 static const struct counter_desc q_stats_desc[] = {
184 { MLX5E_DECLARE_STAT(struct mlx5e_qcounter_stats, rx_out_of_buffer) },
187 static const struct counter_desc drop_rq_stats_desc[] = {
188 { MLX5E_DECLARE_STAT(struct mlx5e_qcounter_stats, rx_if_down_packets) },
191 #define NUM_Q_COUNTERS ARRAY_SIZE(q_stats_desc)
192 #define NUM_DROP_RQ_COUNTERS ARRAY_SIZE(drop_rq_stats_desc)
194 static int mlx5e_grp_q_get_num_stats(struct mlx5e_priv *priv)
199 num_stats += NUM_Q_COUNTERS;
201 if (priv->drop_rq_q_counter)
202 num_stats += NUM_DROP_RQ_COUNTERS;
207 static int mlx5e_grp_q_fill_strings(struct mlx5e_priv *priv, u8 *data, int idx)
211 for (i = 0; i < NUM_Q_COUNTERS && priv->q_counter; i++)
212 strcpy(data + (idx++) * ETH_GSTRING_LEN,
213 q_stats_desc[i].format);
215 for (i = 0; i < NUM_DROP_RQ_COUNTERS && priv->drop_rq_q_counter; i++)
216 strcpy(data + (idx++) * ETH_GSTRING_LEN,
217 drop_rq_stats_desc[i].format);
222 static int mlx5e_grp_q_fill_stats(struct mlx5e_priv *priv, u64 *data, int idx)
226 for (i = 0; i < NUM_Q_COUNTERS && priv->q_counter; i++)
227 data[idx++] = MLX5E_READ_CTR32_CPU(&priv->stats.qcnt,
229 for (i = 0; i < NUM_DROP_RQ_COUNTERS && priv->drop_rq_q_counter; i++)
230 data[idx++] = MLX5E_READ_CTR32_CPU(&priv->stats.qcnt,
231 drop_rq_stats_desc, i);
235 static void mlx5e_grp_q_update_stats(struct mlx5e_priv *priv)
237 struct mlx5e_qcounter_stats *qcnt = &priv->stats.qcnt;
238 u32 out[MLX5_ST_SZ_DW(query_q_counter_out)];
240 if (priv->q_counter &&
241 !mlx5_core_query_q_counter(priv->mdev, priv->q_counter, 0, out,
243 qcnt->rx_out_of_buffer = MLX5_GET(query_q_counter_out,
245 if (priv->drop_rq_q_counter &&
246 !mlx5_core_query_q_counter(priv->mdev, priv->drop_rq_q_counter, 0,
248 qcnt->rx_if_down_packets = MLX5_GET(query_q_counter_out, out,
252 #define VNIC_ENV_OFF(c) MLX5_BYTE_OFF(query_vnic_env_out, c)
253 static const struct counter_desc vnic_env_stats_desc[] = {
254 { "rx_steer_missed_packets",
255 VNIC_ENV_OFF(vport_env.nic_receive_steering_discard) },
258 #define NUM_VNIC_ENV_COUNTERS ARRAY_SIZE(vnic_env_stats_desc)
260 static int mlx5e_grp_vnic_env_get_num_stats(struct mlx5e_priv *priv)
262 return MLX5_CAP_GEN(priv->mdev, nic_receive_steering_discard) ?
263 NUM_VNIC_ENV_COUNTERS : 0;
266 static int mlx5e_grp_vnic_env_fill_strings(struct mlx5e_priv *priv, u8 *data,
271 if (!MLX5_CAP_GEN(priv->mdev, nic_receive_steering_discard))
274 for (i = 0; i < NUM_VNIC_ENV_COUNTERS; i++)
275 strcpy(data + (idx++) * ETH_GSTRING_LEN,
276 vnic_env_stats_desc[i].format);
280 static int mlx5e_grp_vnic_env_fill_stats(struct mlx5e_priv *priv, u64 *data,
285 if (!MLX5_CAP_GEN(priv->mdev, nic_receive_steering_discard))
288 for (i = 0; i < NUM_VNIC_ENV_COUNTERS; i++)
289 data[idx++] = MLX5E_READ_CTR64_BE(priv->stats.vnic.query_vnic_env_out,
290 vnic_env_stats_desc, i);
294 static void mlx5e_grp_vnic_env_update_stats(struct mlx5e_priv *priv)
296 u32 *out = (u32 *)priv->stats.vnic.query_vnic_env_out;
297 int outlen = MLX5_ST_SZ_BYTES(query_vnic_env_out);
298 u32 in[MLX5_ST_SZ_DW(query_vnic_env_in)] = {0};
299 struct mlx5_core_dev *mdev = priv->mdev;
301 if (!MLX5_CAP_GEN(priv->mdev, nic_receive_steering_discard))
304 MLX5_SET(query_vnic_env_in, in, opcode,
305 MLX5_CMD_OP_QUERY_VNIC_ENV);
306 MLX5_SET(query_vnic_env_in, in, op_mod, 0);
307 MLX5_SET(query_vnic_env_in, in, other_vport, 0);
308 mlx5_cmd_exec(mdev, in, sizeof(in), out, outlen);
311 #define VPORT_COUNTER_OFF(c) MLX5_BYTE_OFF(query_vport_counter_out, c)
312 static const struct counter_desc vport_stats_desc[] = {
313 { "rx_vport_unicast_packets",
314 VPORT_COUNTER_OFF(received_eth_unicast.packets) },
315 { "rx_vport_unicast_bytes",
316 VPORT_COUNTER_OFF(received_eth_unicast.octets) },
317 { "tx_vport_unicast_packets",
318 VPORT_COUNTER_OFF(transmitted_eth_unicast.packets) },
319 { "tx_vport_unicast_bytes",
320 VPORT_COUNTER_OFF(transmitted_eth_unicast.octets) },
321 { "rx_vport_multicast_packets",
322 VPORT_COUNTER_OFF(received_eth_multicast.packets) },
323 { "rx_vport_multicast_bytes",
324 VPORT_COUNTER_OFF(received_eth_multicast.octets) },
325 { "tx_vport_multicast_packets",
326 VPORT_COUNTER_OFF(transmitted_eth_multicast.packets) },
327 { "tx_vport_multicast_bytes",
328 VPORT_COUNTER_OFF(transmitted_eth_multicast.octets) },
329 { "rx_vport_broadcast_packets",
330 VPORT_COUNTER_OFF(received_eth_broadcast.packets) },
331 { "rx_vport_broadcast_bytes",
332 VPORT_COUNTER_OFF(received_eth_broadcast.octets) },
333 { "tx_vport_broadcast_packets",
334 VPORT_COUNTER_OFF(transmitted_eth_broadcast.packets) },
335 { "tx_vport_broadcast_bytes",
336 VPORT_COUNTER_OFF(transmitted_eth_broadcast.octets) },
337 { "rx_vport_rdma_unicast_packets",
338 VPORT_COUNTER_OFF(received_ib_unicast.packets) },
339 { "rx_vport_rdma_unicast_bytes",
340 VPORT_COUNTER_OFF(received_ib_unicast.octets) },
341 { "tx_vport_rdma_unicast_packets",
342 VPORT_COUNTER_OFF(transmitted_ib_unicast.packets) },
343 { "tx_vport_rdma_unicast_bytes",
344 VPORT_COUNTER_OFF(transmitted_ib_unicast.octets) },
345 { "rx_vport_rdma_multicast_packets",
346 VPORT_COUNTER_OFF(received_ib_multicast.packets) },
347 { "rx_vport_rdma_multicast_bytes",
348 VPORT_COUNTER_OFF(received_ib_multicast.octets) },
349 { "tx_vport_rdma_multicast_packets",
350 VPORT_COUNTER_OFF(transmitted_ib_multicast.packets) },
351 { "tx_vport_rdma_multicast_bytes",
352 VPORT_COUNTER_OFF(transmitted_ib_multicast.octets) },
355 #define NUM_VPORT_COUNTERS ARRAY_SIZE(vport_stats_desc)
357 static int mlx5e_grp_vport_get_num_stats(struct mlx5e_priv *priv)
359 return NUM_VPORT_COUNTERS;
362 static int mlx5e_grp_vport_fill_strings(struct mlx5e_priv *priv, u8 *data,
367 for (i = 0; i < NUM_VPORT_COUNTERS; i++)
368 strcpy(data + (idx++) * ETH_GSTRING_LEN, vport_stats_desc[i].format);
372 static int mlx5e_grp_vport_fill_stats(struct mlx5e_priv *priv, u64 *data,
377 for (i = 0; i < NUM_VPORT_COUNTERS; i++)
378 data[idx++] = MLX5E_READ_CTR64_BE(priv->stats.vport.query_vport_out,
379 vport_stats_desc, i);
383 static void mlx5e_grp_vport_update_stats(struct mlx5e_priv *priv)
385 int outlen = MLX5_ST_SZ_BYTES(query_vport_counter_out);
386 u32 *out = (u32 *)priv->stats.vport.query_vport_out;
387 u32 in[MLX5_ST_SZ_DW(query_vport_counter_in)] = {0};
388 struct mlx5_core_dev *mdev = priv->mdev;
390 MLX5_SET(query_vport_counter_in, in, opcode, MLX5_CMD_OP_QUERY_VPORT_COUNTER);
391 MLX5_SET(query_vport_counter_in, in, op_mod, 0);
392 MLX5_SET(query_vport_counter_in, in, other_vport, 0);
393 mlx5_cmd_exec(mdev, in, sizeof(in), out, outlen);
396 #define PPORT_802_3_OFF(c) \
397 MLX5_BYTE_OFF(ppcnt_reg, \
398 counter_set.eth_802_3_cntrs_grp_data_layout.c##_high)
399 static const struct counter_desc pport_802_3_stats_desc[] = {
400 { "tx_packets_phy", PPORT_802_3_OFF(a_frames_transmitted_ok) },
401 { "rx_packets_phy", PPORT_802_3_OFF(a_frames_received_ok) },
402 { "rx_crc_errors_phy", PPORT_802_3_OFF(a_frame_check_sequence_errors) },
403 { "tx_bytes_phy", PPORT_802_3_OFF(a_octets_transmitted_ok) },
404 { "rx_bytes_phy", PPORT_802_3_OFF(a_octets_received_ok) },
405 { "tx_multicast_phy", PPORT_802_3_OFF(a_multicast_frames_xmitted_ok) },
406 { "tx_broadcast_phy", PPORT_802_3_OFF(a_broadcast_frames_xmitted_ok) },
407 { "rx_multicast_phy", PPORT_802_3_OFF(a_multicast_frames_received_ok) },
408 { "rx_broadcast_phy", PPORT_802_3_OFF(a_broadcast_frames_received_ok) },
409 { "rx_in_range_len_errors_phy", PPORT_802_3_OFF(a_in_range_length_errors) },
410 { "rx_out_of_range_len_phy", PPORT_802_3_OFF(a_out_of_range_length_field) },
411 { "rx_oversize_pkts_phy", PPORT_802_3_OFF(a_frame_too_long_errors) },
412 { "rx_symbol_err_phy", PPORT_802_3_OFF(a_symbol_error_during_carrier) },
413 { "tx_mac_control_phy", PPORT_802_3_OFF(a_mac_control_frames_transmitted) },
414 { "rx_mac_control_phy", PPORT_802_3_OFF(a_mac_control_frames_received) },
415 { "rx_unsupported_op_phy", PPORT_802_3_OFF(a_unsupported_opcodes_received) },
416 { "rx_pause_ctrl_phy", PPORT_802_3_OFF(a_pause_mac_ctrl_frames_received) },
417 { "tx_pause_ctrl_phy", PPORT_802_3_OFF(a_pause_mac_ctrl_frames_transmitted) },
420 #define NUM_PPORT_802_3_COUNTERS ARRAY_SIZE(pport_802_3_stats_desc)
422 static int mlx5e_grp_802_3_get_num_stats(struct mlx5e_priv *priv)
424 return NUM_PPORT_802_3_COUNTERS;
427 static int mlx5e_grp_802_3_fill_strings(struct mlx5e_priv *priv, u8 *data,
432 for (i = 0; i < NUM_PPORT_802_3_COUNTERS; i++)
433 strcpy(data + (idx++) * ETH_GSTRING_LEN, pport_802_3_stats_desc[i].format);
437 static int mlx5e_grp_802_3_fill_stats(struct mlx5e_priv *priv, u64 *data,
442 for (i = 0; i < NUM_PPORT_802_3_COUNTERS; i++)
443 data[idx++] = MLX5E_READ_CTR64_BE(&priv->stats.pport.IEEE_802_3_counters,
444 pport_802_3_stats_desc, i);
448 static void mlx5e_grp_802_3_update_stats(struct mlx5e_priv *priv)
450 struct mlx5e_pport_stats *pstats = &priv->stats.pport;
451 struct mlx5_core_dev *mdev = priv->mdev;
452 u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {0};
453 int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
456 MLX5_SET(ppcnt_reg, in, local_port, 1);
457 out = pstats->IEEE_802_3_counters;
458 MLX5_SET(ppcnt_reg, in, grp, MLX5_IEEE_802_3_COUNTERS_GROUP);
459 mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
462 #define PPORT_2863_OFF(c) \
463 MLX5_BYTE_OFF(ppcnt_reg, \
464 counter_set.eth_2863_cntrs_grp_data_layout.c##_high)
465 static const struct counter_desc pport_2863_stats_desc[] = {
466 { "rx_discards_phy", PPORT_2863_OFF(if_in_discards) },
467 { "tx_discards_phy", PPORT_2863_OFF(if_out_discards) },
468 { "tx_errors_phy", PPORT_2863_OFF(if_out_errors) },
471 #define NUM_PPORT_2863_COUNTERS ARRAY_SIZE(pport_2863_stats_desc)
473 static int mlx5e_grp_2863_get_num_stats(struct mlx5e_priv *priv)
475 return NUM_PPORT_2863_COUNTERS;
478 static int mlx5e_grp_2863_fill_strings(struct mlx5e_priv *priv, u8 *data,
483 for (i = 0; i < NUM_PPORT_2863_COUNTERS; i++)
484 strcpy(data + (idx++) * ETH_GSTRING_LEN, pport_2863_stats_desc[i].format);
488 static int mlx5e_grp_2863_fill_stats(struct mlx5e_priv *priv, u64 *data,
493 for (i = 0; i < NUM_PPORT_2863_COUNTERS; i++)
494 data[idx++] = MLX5E_READ_CTR64_BE(&priv->stats.pport.RFC_2863_counters,
495 pport_2863_stats_desc, i);
499 static void mlx5e_grp_2863_update_stats(struct mlx5e_priv *priv)
501 struct mlx5e_pport_stats *pstats = &priv->stats.pport;
502 struct mlx5_core_dev *mdev = priv->mdev;
503 u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {0};
504 int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
507 MLX5_SET(ppcnt_reg, in, local_port, 1);
508 out = pstats->RFC_2863_counters;
509 MLX5_SET(ppcnt_reg, in, grp, MLX5_RFC_2863_COUNTERS_GROUP);
510 mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
513 #define PPORT_2819_OFF(c) \
514 MLX5_BYTE_OFF(ppcnt_reg, \
515 counter_set.eth_2819_cntrs_grp_data_layout.c##_high)
516 static const struct counter_desc pport_2819_stats_desc[] = {
517 { "rx_undersize_pkts_phy", PPORT_2819_OFF(ether_stats_undersize_pkts) },
518 { "rx_fragments_phy", PPORT_2819_OFF(ether_stats_fragments) },
519 { "rx_jabbers_phy", PPORT_2819_OFF(ether_stats_jabbers) },
520 { "rx_64_bytes_phy", PPORT_2819_OFF(ether_stats_pkts64octets) },
521 { "rx_65_to_127_bytes_phy", PPORT_2819_OFF(ether_stats_pkts65to127octets) },
522 { "rx_128_to_255_bytes_phy", PPORT_2819_OFF(ether_stats_pkts128to255octets) },
523 { "rx_256_to_511_bytes_phy", PPORT_2819_OFF(ether_stats_pkts256to511octets) },
524 { "rx_512_to_1023_bytes_phy", PPORT_2819_OFF(ether_stats_pkts512to1023octets) },
525 { "rx_1024_to_1518_bytes_phy", PPORT_2819_OFF(ether_stats_pkts1024to1518octets) },
526 { "rx_1519_to_2047_bytes_phy", PPORT_2819_OFF(ether_stats_pkts1519to2047octets) },
527 { "rx_2048_to_4095_bytes_phy", PPORT_2819_OFF(ether_stats_pkts2048to4095octets) },
528 { "rx_4096_to_8191_bytes_phy", PPORT_2819_OFF(ether_stats_pkts4096to8191octets) },
529 { "rx_8192_to_10239_bytes_phy", PPORT_2819_OFF(ether_stats_pkts8192to10239octets) },
532 #define NUM_PPORT_2819_COUNTERS ARRAY_SIZE(pport_2819_stats_desc)
534 static int mlx5e_grp_2819_get_num_stats(struct mlx5e_priv *priv)
536 return NUM_PPORT_2819_COUNTERS;
539 static int mlx5e_grp_2819_fill_strings(struct mlx5e_priv *priv, u8 *data,
544 for (i = 0; i < NUM_PPORT_2819_COUNTERS; i++)
545 strcpy(data + (idx++) * ETH_GSTRING_LEN, pport_2819_stats_desc[i].format);
549 static int mlx5e_grp_2819_fill_stats(struct mlx5e_priv *priv, u64 *data,
554 for (i = 0; i < NUM_PPORT_2819_COUNTERS; i++)
555 data[idx++] = MLX5E_READ_CTR64_BE(&priv->stats.pport.RFC_2819_counters,
556 pport_2819_stats_desc, i);
560 static void mlx5e_grp_2819_update_stats(struct mlx5e_priv *priv)
562 struct mlx5e_pport_stats *pstats = &priv->stats.pport;
563 struct mlx5_core_dev *mdev = priv->mdev;
564 u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {0};
565 int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
568 MLX5_SET(ppcnt_reg, in, local_port, 1);
569 out = pstats->RFC_2819_counters;
570 MLX5_SET(ppcnt_reg, in, grp, MLX5_RFC_2819_COUNTERS_GROUP);
571 mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
574 #define PPORT_PHY_STATISTICAL_OFF(c) \
575 MLX5_BYTE_OFF(ppcnt_reg, \
576 counter_set.phys_layer_statistical_cntrs.c##_high)
577 static const struct counter_desc pport_phy_statistical_stats_desc[] = {
578 { "rx_pcs_symbol_err_phy", PPORT_PHY_STATISTICAL_OFF(phy_symbol_errors) },
579 { "rx_corrected_bits_phy", PPORT_PHY_STATISTICAL_OFF(phy_corrected_bits) },
582 #define NUM_PPORT_PHY_STATISTICAL_COUNTERS ARRAY_SIZE(pport_phy_statistical_stats_desc)
584 static int mlx5e_grp_phy_get_num_stats(struct mlx5e_priv *priv)
586 /* "1" for link_down_events special counter */
587 return MLX5_CAP_PCAM_FEATURE((priv)->mdev, ppcnt_statistical_group) ?
588 NUM_PPORT_PHY_STATISTICAL_COUNTERS + 1 : 1;
591 static int mlx5e_grp_phy_fill_strings(struct mlx5e_priv *priv, u8 *data,
596 strcpy(data + (idx++) * ETH_GSTRING_LEN, "link_down_events_phy");
598 if (!MLX5_CAP_PCAM_FEATURE((priv)->mdev, ppcnt_statistical_group))
601 for (i = 0; i < NUM_PPORT_PHY_STATISTICAL_COUNTERS; i++)
602 strcpy(data + (idx++) * ETH_GSTRING_LEN,
603 pport_phy_statistical_stats_desc[i].format);
607 static int mlx5e_grp_phy_fill_stats(struct mlx5e_priv *priv, u64 *data, int idx)
611 /* link_down_events_phy has special handling since it is not stored in __be64 format */
612 data[idx++] = MLX5_GET(ppcnt_reg, priv->stats.pport.phy_counters,
613 counter_set.phys_layer_cntrs.link_down_events);
615 if (!MLX5_CAP_PCAM_FEATURE((priv)->mdev, ppcnt_statistical_group))
618 for (i = 0; i < NUM_PPORT_PHY_STATISTICAL_COUNTERS; i++)
620 MLX5E_READ_CTR64_BE(&priv->stats.pport.phy_statistical_counters,
621 pport_phy_statistical_stats_desc, i);
625 static void mlx5e_grp_phy_update_stats(struct mlx5e_priv *priv)
627 struct mlx5e_pport_stats *pstats = &priv->stats.pport;
628 struct mlx5_core_dev *mdev = priv->mdev;
629 u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {0};
630 int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
633 MLX5_SET(ppcnt_reg, in, local_port, 1);
634 out = pstats->phy_counters;
635 MLX5_SET(ppcnt_reg, in, grp, MLX5_PHYSICAL_LAYER_COUNTERS_GROUP);
636 mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
638 if (!MLX5_CAP_PCAM_FEATURE(mdev, ppcnt_statistical_group))
641 out = pstats->phy_statistical_counters;
642 MLX5_SET(ppcnt_reg, in, grp, MLX5_PHYSICAL_LAYER_STATISTICAL_GROUP);
643 mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
646 #define PPORT_ETH_EXT_OFF(c) \
647 MLX5_BYTE_OFF(ppcnt_reg, \
648 counter_set.eth_extended_cntrs_grp_data_layout.c##_high)
649 static const struct counter_desc pport_eth_ext_stats_desc[] = {
650 { "rx_buffer_passed_thres_phy", PPORT_ETH_EXT_OFF(rx_buffer_almost_full) },
653 #define NUM_PPORT_ETH_EXT_COUNTERS ARRAY_SIZE(pport_eth_ext_stats_desc)
655 static int mlx5e_grp_eth_ext_get_num_stats(struct mlx5e_priv *priv)
657 if (MLX5_CAP_PCAM_FEATURE((priv)->mdev, rx_buffer_fullness_counters))
658 return NUM_PPORT_ETH_EXT_COUNTERS;
663 static int mlx5e_grp_eth_ext_fill_strings(struct mlx5e_priv *priv, u8 *data,
668 if (MLX5_CAP_PCAM_FEATURE((priv)->mdev, rx_buffer_fullness_counters))
669 for (i = 0; i < NUM_PPORT_ETH_EXT_COUNTERS; i++)
670 strcpy(data + (idx++) * ETH_GSTRING_LEN,
671 pport_eth_ext_stats_desc[i].format);
675 static int mlx5e_grp_eth_ext_fill_stats(struct mlx5e_priv *priv, u64 *data,
680 if (MLX5_CAP_PCAM_FEATURE((priv)->mdev, rx_buffer_fullness_counters))
681 for (i = 0; i < NUM_PPORT_ETH_EXT_COUNTERS; i++)
683 MLX5E_READ_CTR64_BE(&priv->stats.pport.eth_ext_counters,
684 pport_eth_ext_stats_desc, i);
688 static void mlx5e_grp_eth_ext_update_stats(struct mlx5e_priv *priv)
690 struct mlx5e_pport_stats *pstats = &priv->stats.pport;
691 struct mlx5_core_dev *mdev = priv->mdev;
692 u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {0};
693 int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
696 if (!MLX5_CAP_PCAM_FEATURE(mdev, rx_buffer_fullness_counters))
699 MLX5_SET(ppcnt_reg, in, local_port, 1);
700 out = pstats->eth_ext_counters;
701 MLX5_SET(ppcnt_reg, in, grp, MLX5_ETHERNET_EXTENDED_COUNTERS_GROUP);
702 mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
705 #define PCIE_PERF_OFF(c) \
706 MLX5_BYTE_OFF(mpcnt_reg, counter_set.pcie_perf_cntrs_grp_data_layout.c)
707 static const struct counter_desc pcie_perf_stats_desc[] = {
708 { "rx_pci_signal_integrity", PCIE_PERF_OFF(rx_errors) },
709 { "tx_pci_signal_integrity", PCIE_PERF_OFF(tx_errors) },
712 #define PCIE_PERF_OFF64(c) \
713 MLX5_BYTE_OFF(mpcnt_reg, counter_set.pcie_perf_cntrs_grp_data_layout.c##_high)
714 static const struct counter_desc pcie_perf_stats_desc64[] = {
715 { "outbound_pci_buffer_overflow", PCIE_PERF_OFF64(tx_overflow_buffer_pkt) },
718 static const struct counter_desc pcie_perf_stall_stats_desc[] = {
719 { "outbound_pci_stalled_rd", PCIE_PERF_OFF(outbound_stalled_reads) },
720 { "outbound_pci_stalled_wr", PCIE_PERF_OFF(outbound_stalled_writes) },
721 { "outbound_pci_stalled_rd_events", PCIE_PERF_OFF(outbound_stalled_reads_events) },
722 { "outbound_pci_stalled_wr_events", PCIE_PERF_OFF(outbound_stalled_writes_events) },
725 #define NUM_PCIE_PERF_COUNTERS ARRAY_SIZE(pcie_perf_stats_desc)
726 #define NUM_PCIE_PERF_COUNTERS64 ARRAY_SIZE(pcie_perf_stats_desc64)
727 #define NUM_PCIE_PERF_STALL_COUNTERS ARRAY_SIZE(pcie_perf_stall_stats_desc)
729 static int mlx5e_grp_pcie_get_num_stats(struct mlx5e_priv *priv)
733 if (MLX5_CAP_MCAM_FEATURE((priv)->mdev, pcie_performance_group))
734 num_stats += NUM_PCIE_PERF_COUNTERS;
736 if (MLX5_CAP_MCAM_FEATURE((priv)->mdev, tx_overflow_buffer_pkt))
737 num_stats += NUM_PCIE_PERF_COUNTERS64;
739 if (MLX5_CAP_MCAM_FEATURE((priv)->mdev, pcie_outbound_stalled))
740 num_stats += NUM_PCIE_PERF_STALL_COUNTERS;
745 static int mlx5e_grp_pcie_fill_strings(struct mlx5e_priv *priv, u8 *data,
750 if (MLX5_CAP_MCAM_FEATURE((priv)->mdev, pcie_performance_group))
751 for (i = 0; i < NUM_PCIE_PERF_COUNTERS; i++)
752 strcpy(data + (idx++) * ETH_GSTRING_LEN,
753 pcie_perf_stats_desc[i].format);
755 if (MLX5_CAP_MCAM_FEATURE((priv)->mdev, tx_overflow_buffer_pkt))
756 for (i = 0; i < NUM_PCIE_PERF_COUNTERS64; i++)
757 strcpy(data + (idx++) * ETH_GSTRING_LEN,
758 pcie_perf_stats_desc64[i].format);
760 if (MLX5_CAP_MCAM_FEATURE((priv)->mdev, pcie_outbound_stalled))
761 for (i = 0; i < NUM_PCIE_PERF_STALL_COUNTERS; i++)
762 strcpy(data + (idx++) * ETH_GSTRING_LEN,
763 pcie_perf_stall_stats_desc[i].format);
767 static int mlx5e_grp_pcie_fill_stats(struct mlx5e_priv *priv, u64 *data,
772 if (MLX5_CAP_MCAM_FEATURE((priv)->mdev, pcie_performance_group))
773 for (i = 0; i < NUM_PCIE_PERF_COUNTERS; i++)
775 MLX5E_READ_CTR32_BE(&priv->stats.pcie.pcie_perf_counters,
776 pcie_perf_stats_desc, i);
778 if (MLX5_CAP_MCAM_FEATURE((priv)->mdev, tx_overflow_buffer_pkt))
779 for (i = 0; i < NUM_PCIE_PERF_COUNTERS64; i++)
781 MLX5E_READ_CTR64_BE(&priv->stats.pcie.pcie_perf_counters,
782 pcie_perf_stats_desc64, i);
784 if (MLX5_CAP_MCAM_FEATURE((priv)->mdev, pcie_outbound_stalled))
785 for (i = 0; i < NUM_PCIE_PERF_STALL_COUNTERS; i++)
787 MLX5E_READ_CTR32_BE(&priv->stats.pcie.pcie_perf_counters,
788 pcie_perf_stall_stats_desc, i);
792 static void mlx5e_grp_pcie_update_stats(struct mlx5e_priv *priv)
794 struct mlx5e_pcie_stats *pcie_stats = &priv->stats.pcie;
795 struct mlx5_core_dev *mdev = priv->mdev;
796 u32 in[MLX5_ST_SZ_DW(mpcnt_reg)] = {0};
797 int sz = MLX5_ST_SZ_BYTES(mpcnt_reg);
800 if (!MLX5_CAP_MCAM_FEATURE(mdev, pcie_performance_group))
803 out = pcie_stats->pcie_perf_counters;
804 MLX5_SET(mpcnt_reg, in, grp, MLX5_PCIE_PERFORMANCE_COUNTERS_GROUP);
805 mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_MPCNT, 0, 0);
808 #define PPORT_PER_PRIO_OFF(c) \
809 MLX5_BYTE_OFF(ppcnt_reg, \
810 counter_set.eth_per_prio_grp_data_layout.c##_high)
811 static const struct counter_desc pport_per_prio_traffic_stats_desc[] = {
812 { "rx_prio%d_bytes", PPORT_PER_PRIO_OFF(rx_octets) },
813 { "rx_prio%d_packets", PPORT_PER_PRIO_OFF(rx_frames) },
814 { "tx_prio%d_bytes", PPORT_PER_PRIO_OFF(tx_octets) },
815 { "tx_prio%d_packets", PPORT_PER_PRIO_OFF(tx_frames) },
818 #define NUM_PPORT_PER_PRIO_TRAFFIC_COUNTERS ARRAY_SIZE(pport_per_prio_traffic_stats_desc)
820 static int mlx5e_grp_per_prio_traffic_get_num_stats(struct mlx5e_priv *priv)
822 return NUM_PPORT_PER_PRIO_TRAFFIC_COUNTERS * NUM_PPORT_PRIO;
825 static int mlx5e_grp_per_prio_traffic_fill_strings(struct mlx5e_priv *priv,
831 for (prio = 0; prio < NUM_PPORT_PRIO; prio++) {
832 for (i = 0; i < NUM_PPORT_PER_PRIO_TRAFFIC_COUNTERS; i++)
833 sprintf(data + (idx++) * ETH_GSTRING_LEN,
834 pport_per_prio_traffic_stats_desc[i].format, prio);
840 static int mlx5e_grp_per_prio_traffic_fill_stats(struct mlx5e_priv *priv,
846 for (prio = 0; prio < NUM_PPORT_PRIO; prio++) {
847 for (i = 0; i < NUM_PPORT_PER_PRIO_TRAFFIC_COUNTERS; i++)
849 MLX5E_READ_CTR64_BE(&priv->stats.pport.per_prio_counters[prio],
850 pport_per_prio_traffic_stats_desc, i);
856 static const struct counter_desc pport_per_prio_pfc_stats_desc[] = {
857 /* %s is "global" or "prio{i}" */
858 { "rx_%s_pause", PPORT_PER_PRIO_OFF(rx_pause) },
859 { "rx_%s_pause_duration", PPORT_PER_PRIO_OFF(rx_pause_duration) },
860 { "tx_%s_pause", PPORT_PER_PRIO_OFF(tx_pause) },
861 { "tx_%s_pause_duration", PPORT_PER_PRIO_OFF(tx_pause_duration) },
862 { "rx_%s_pause_transition", PPORT_PER_PRIO_OFF(rx_pause_transition) },
865 static const struct counter_desc pport_pfc_stall_stats_desc[] = {
866 { "tx_pause_storm_warning_events ", PPORT_PER_PRIO_OFF(device_stall_minor_watermark_cnt) },
867 { "tx_pause_storm_error_events", PPORT_PER_PRIO_OFF(device_stall_critical_watermark_cnt) },
870 #define NUM_PPORT_PER_PRIO_PFC_COUNTERS ARRAY_SIZE(pport_per_prio_pfc_stats_desc)
871 #define NUM_PPORT_PFC_STALL_COUNTERS(priv) (ARRAY_SIZE(pport_pfc_stall_stats_desc) * \
872 MLX5_CAP_PCAM_FEATURE((priv)->mdev, pfcc_mask) * \
873 MLX5_CAP_DEBUG((priv)->mdev, stall_detect))
875 static unsigned long mlx5e_query_pfc_combined(struct mlx5e_priv *priv)
877 struct mlx5_core_dev *mdev = priv->mdev;
882 if (MLX5_CAP_GEN(mdev, port_type) != MLX5_CAP_PORT_TYPE_ETH)
885 err = mlx5_query_port_pfc(mdev, &pfc_en_tx, &pfc_en_rx);
887 return err ? 0 : pfc_en_tx | pfc_en_rx;
890 static bool mlx5e_query_global_pause_combined(struct mlx5e_priv *priv)
892 struct mlx5_core_dev *mdev = priv->mdev;
897 if (MLX5_CAP_GEN(mdev, port_type) != MLX5_CAP_PORT_TYPE_ETH)
900 err = mlx5_query_port_pause(mdev, &rx_pause, &tx_pause);
902 return err ? false : rx_pause | tx_pause;
905 static int mlx5e_grp_per_prio_pfc_get_num_stats(struct mlx5e_priv *priv)
907 return (mlx5e_query_global_pause_combined(priv) +
908 hweight8(mlx5e_query_pfc_combined(priv))) *
909 NUM_PPORT_PER_PRIO_PFC_COUNTERS +
910 NUM_PPORT_PFC_STALL_COUNTERS(priv);
913 static int mlx5e_grp_per_prio_pfc_fill_strings(struct mlx5e_priv *priv,
917 unsigned long pfc_combined;
920 pfc_combined = mlx5e_query_pfc_combined(priv);
921 for_each_set_bit(prio, &pfc_combined, NUM_PPORT_PRIO) {
922 for (i = 0; i < NUM_PPORT_PER_PRIO_PFC_COUNTERS; i++) {
923 char pfc_string[ETH_GSTRING_LEN];
925 snprintf(pfc_string, sizeof(pfc_string), "prio%d", prio);
926 sprintf(data + (idx++) * ETH_GSTRING_LEN,
927 pport_per_prio_pfc_stats_desc[i].format, pfc_string);
931 if (mlx5e_query_global_pause_combined(priv)) {
932 for (i = 0; i < NUM_PPORT_PER_PRIO_PFC_COUNTERS; i++) {
933 sprintf(data + (idx++) * ETH_GSTRING_LEN,
934 pport_per_prio_pfc_stats_desc[i].format, "global");
938 for (i = 0; i < NUM_PPORT_PFC_STALL_COUNTERS(priv); i++)
939 strcpy(data + (idx++) * ETH_GSTRING_LEN,
940 pport_pfc_stall_stats_desc[i].format);
945 static int mlx5e_grp_per_prio_pfc_fill_stats(struct mlx5e_priv *priv,
949 unsigned long pfc_combined;
952 pfc_combined = mlx5e_query_pfc_combined(priv);
953 for_each_set_bit(prio, &pfc_combined, NUM_PPORT_PRIO) {
954 for (i = 0; i < NUM_PPORT_PER_PRIO_PFC_COUNTERS; i++) {
956 MLX5E_READ_CTR64_BE(&priv->stats.pport.per_prio_counters[prio],
957 pport_per_prio_pfc_stats_desc, i);
961 if (mlx5e_query_global_pause_combined(priv)) {
962 for (i = 0; i < NUM_PPORT_PER_PRIO_PFC_COUNTERS; i++) {
964 MLX5E_READ_CTR64_BE(&priv->stats.pport.per_prio_counters[0],
965 pport_per_prio_pfc_stats_desc, i);
969 for (i = 0; i < NUM_PPORT_PFC_STALL_COUNTERS(priv); i++)
970 data[idx++] = MLX5E_READ_CTR64_BE(&priv->stats.pport.per_prio_counters[0],
971 pport_pfc_stall_stats_desc, i);
976 static int mlx5e_grp_per_prio_get_num_stats(struct mlx5e_priv *priv)
978 return mlx5e_grp_per_prio_traffic_get_num_stats(priv) +
979 mlx5e_grp_per_prio_pfc_get_num_stats(priv);
982 static int mlx5e_grp_per_prio_fill_strings(struct mlx5e_priv *priv, u8 *data,
985 idx = mlx5e_grp_per_prio_traffic_fill_strings(priv, data, idx);
986 idx = mlx5e_grp_per_prio_pfc_fill_strings(priv, data, idx);
990 static int mlx5e_grp_per_prio_fill_stats(struct mlx5e_priv *priv, u64 *data,
993 idx = mlx5e_grp_per_prio_traffic_fill_stats(priv, data, idx);
994 idx = mlx5e_grp_per_prio_pfc_fill_stats(priv, data, idx);
998 static void mlx5e_grp_per_prio_update_stats(struct mlx5e_priv *priv)
1000 struct mlx5e_pport_stats *pstats = &priv->stats.pport;
1001 struct mlx5_core_dev *mdev = priv->mdev;
1002 u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {0};
1003 int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
1007 MLX5_SET(ppcnt_reg, in, local_port, 1);
1008 MLX5_SET(ppcnt_reg, in, grp, MLX5_PER_PRIORITY_COUNTERS_GROUP);
1009 for (prio = 0; prio < NUM_PPORT_PRIO; prio++) {
1010 out = pstats->per_prio_counters[prio];
1011 MLX5_SET(ppcnt_reg, in, prio_tc, prio);
1012 mlx5_core_access_reg(mdev, in, sz, out, sz,
1013 MLX5_REG_PPCNT, 0, 0);
1017 static const struct counter_desc mlx5e_pme_status_desc[] = {
1018 { "module_unplug", 8 },
1021 static const struct counter_desc mlx5e_pme_error_desc[] = {
1022 { "module_bus_stuck", 16 }, /* bus stuck (I2C or data shorted) */
1023 { "module_high_temp", 48 }, /* high temperature */
1024 { "module_bad_shorted", 56 }, /* bad or shorted cable/module */
1027 #define NUM_PME_STATUS_STATS ARRAY_SIZE(mlx5e_pme_status_desc)
1028 #define NUM_PME_ERR_STATS ARRAY_SIZE(mlx5e_pme_error_desc)
1030 static int mlx5e_grp_pme_get_num_stats(struct mlx5e_priv *priv)
1032 return NUM_PME_STATUS_STATS + NUM_PME_ERR_STATS;
1035 static int mlx5e_grp_pme_fill_strings(struct mlx5e_priv *priv, u8 *data,
1040 for (i = 0; i < NUM_PME_STATUS_STATS; i++)
1041 strcpy(data + (idx++) * ETH_GSTRING_LEN, mlx5e_pme_status_desc[i].format);
1043 for (i = 0; i < NUM_PME_ERR_STATS; i++)
1044 strcpy(data + (idx++) * ETH_GSTRING_LEN, mlx5e_pme_error_desc[i].format);
1049 static int mlx5e_grp_pme_fill_stats(struct mlx5e_priv *priv, u64 *data,
1052 struct mlx5_priv *mlx5_priv = &priv->mdev->priv;
1055 for (i = 0; i < NUM_PME_STATUS_STATS; i++)
1056 data[idx++] = MLX5E_READ_CTR64_CPU(mlx5_priv->pme_stats.status_counters,
1057 mlx5e_pme_status_desc, i);
1059 for (i = 0; i < NUM_PME_ERR_STATS; i++)
1060 data[idx++] = MLX5E_READ_CTR64_CPU(mlx5_priv->pme_stats.error_counters,
1061 mlx5e_pme_error_desc, i);
1066 static int mlx5e_grp_ipsec_get_num_stats(struct mlx5e_priv *priv)
1068 return mlx5e_ipsec_get_count(priv);
1071 static int mlx5e_grp_ipsec_fill_strings(struct mlx5e_priv *priv, u8 *data,
1074 return idx + mlx5e_ipsec_get_strings(priv,
1075 data + idx * ETH_GSTRING_LEN);
1078 static int mlx5e_grp_ipsec_fill_stats(struct mlx5e_priv *priv, u64 *data,
1081 return idx + mlx5e_ipsec_get_stats(priv, data + idx);
1084 static void mlx5e_grp_ipsec_update_stats(struct mlx5e_priv *priv)
1086 mlx5e_ipsec_update_stats(priv);
1089 static int mlx5e_grp_tls_get_num_stats(struct mlx5e_priv *priv)
1091 return mlx5e_tls_get_count(priv);
1094 static int mlx5e_grp_tls_fill_strings(struct mlx5e_priv *priv, u8 *data,
1097 return idx + mlx5e_tls_get_strings(priv, data + idx * ETH_GSTRING_LEN);
1100 static int mlx5e_grp_tls_fill_stats(struct mlx5e_priv *priv, u64 *data, int idx)
1102 return idx + mlx5e_tls_get_stats(priv, data + idx);
1105 static const struct counter_desc rq_stats_desc[] = {
1106 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, packets) },
1107 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, bytes) },
1108 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_complete) },
1109 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_unnecessary) },
1110 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_unnecessary_inner) },
1111 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_none) },
1112 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, xdp_drop) },
1113 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, xdp_tx) },
1114 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, xdp_tx_full) },
1115 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, lro_packets) },
1116 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, lro_bytes) },
1117 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, removed_vlan_packets) },
1118 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, wqe_err) },
1119 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, mpwqe_filler) },
1120 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, buff_alloc_err) },
1121 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cqe_compress_blks) },
1122 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cqe_compress_pkts) },
1123 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, page_reuse) },
1124 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cache_reuse) },
1125 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cache_full) },
1126 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cache_empty) },
1127 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cache_busy) },
1128 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cache_waive) },
1131 static const struct counter_desc sq_stats_desc[] = {
1132 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, packets) },
1133 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, bytes) },
1134 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tso_packets) },
1135 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tso_bytes) },
1136 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tso_inner_packets) },
1137 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tso_inner_bytes) },
1138 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, csum_partial) },
1139 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, csum_partial_inner) },
1140 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, added_vlan_packets) },
1141 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, nop) },
1142 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, csum_none) },
1143 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, stopped) },
1144 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, dropped) },
1145 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, xmit_more) },
1146 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, recover) },
1147 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, cqes) },
1148 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, wake) },
1149 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, cqe_err) },
1152 static const struct counter_desc ch_stats_desc[] = {
1153 { MLX5E_DECLARE_CH_STAT(struct mlx5e_ch_stats, eq_rearm) },
1156 #define NUM_RQ_STATS ARRAY_SIZE(rq_stats_desc)
1157 #define NUM_SQ_STATS ARRAY_SIZE(sq_stats_desc)
1158 #define NUM_CH_STATS ARRAY_SIZE(ch_stats_desc)
1160 static int mlx5e_grp_channels_get_num_stats(struct mlx5e_priv *priv)
1162 int max_nch = priv->profile->max_nch(priv->mdev);
1164 return (NUM_RQ_STATS * max_nch) +
1165 (NUM_CH_STATS * max_nch) +
1166 (NUM_SQ_STATS * max_nch * priv->max_opened_tc);
1169 static int mlx5e_grp_channels_fill_strings(struct mlx5e_priv *priv, u8 *data,
1172 int max_nch = priv->profile->max_nch(priv->mdev);
1175 for (i = 0; i < max_nch; i++)
1176 for (j = 0; j < NUM_CH_STATS; j++)
1177 sprintf(data + (idx++) * ETH_GSTRING_LEN,
1178 ch_stats_desc[j].format, i);
1180 for (i = 0; i < max_nch; i++)
1181 for (j = 0; j < NUM_RQ_STATS; j++)
1182 sprintf(data + (idx++) * ETH_GSTRING_LEN, rq_stats_desc[j].format, i);
1184 for (tc = 0; tc < priv->max_opened_tc; tc++)
1185 for (i = 0; i < max_nch; i++)
1186 for (j = 0; j < NUM_SQ_STATS; j++)
1187 sprintf(data + (idx++) * ETH_GSTRING_LEN,
1188 sq_stats_desc[j].format,
1189 priv->channel_tc2txq[i][tc]);
1194 static int mlx5e_grp_channels_fill_stats(struct mlx5e_priv *priv, u64 *data,
1197 int max_nch = priv->profile->max_nch(priv->mdev);
1200 for (i = 0; i < max_nch; i++)
1201 for (j = 0; j < NUM_CH_STATS; j++)
1203 MLX5E_READ_CTR64_CPU(&priv->channel_stats[i].ch,
1206 for (i = 0; i < max_nch; i++)
1207 for (j = 0; j < NUM_RQ_STATS; j++)
1209 MLX5E_READ_CTR64_CPU(&priv->channel_stats[i].rq,
1212 for (tc = 0; tc < priv->max_opened_tc; tc++)
1213 for (i = 0; i < max_nch; i++)
1214 for (j = 0; j < NUM_SQ_STATS; j++)
1216 MLX5E_READ_CTR64_CPU(&priv->channel_stats[i].sq[tc],
1222 /* The stats groups order is opposite to the update_stats() order calls */
1223 const struct mlx5e_stats_grp mlx5e_stats_grps[] = {
1225 .get_num_stats = mlx5e_grp_sw_get_num_stats,
1226 .fill_strings = mlx5e_grp_sw_fill_strings,
1227 .fill_stats = mlx5e_grp_sw_fill_stats,
1228 .update_stats = mlx5e_grp_sw_update_stats,
1231 .get_num_stats = mlx5e_grp_q_get_num_stats,
1232 .fill_strings = mlx5e_grp_q_fill_strings,
1233 .fill_stats = mlx5e_grp_q_fill_stats,
1234 .update_stats_mask = MLX5E_NDO_UPDATE_STATS,
1235 .update_stats = mlx5e_grp_q_update_stats,
1238 .get_num_stats = mlx5e_grp_vnic_env_get_num_stats,
1239 .fill_strings = mlx5e_grp_vnic_env_fill_strings,
1240 .fill_stats = mlx5e_grp_vnic_env_fill_stats,
1241 .update_stats = mlx5e_grp_vnic_env_update_stats,
1244 .get_num_stats = mlx5e_grp_vport_get_num_stats,
1245 .fill_strings = mlx5e_grp_vport_fill_strings,
1246 .fill_stats = mlx5e_grp_vport_fill_stats,
1247 .update_stats_mask = MLX5E_NDO_UPDATE_STATS,
1248 .update_stats = mlx5e_grp_vport_update_stats,
1251 .get_num_stats = mlx5e_grp_802_3_get_num_stats,
1252 .fill_strings = mlx5e_grp_802_3_fill_strings,
1253 .fill_stats = mlx5e_grp_802_3_fill_stats,
1254 .update_stats_mask = MLX5E_NDO_UPDATE_STATS,
1255 .update_stats = mlx5e_grp_802_3_update_stats,
1258 .get_num_stats = mlx5e_grp_2863_get_num_stats,
1259 .fill_strings = mlx5e_grp_2863_fill_strings,
1260 .fill_stats = mlx5e_grp_2863_fill_stats,
1261 .update_stats = mlx5e_grp_2863_update_stats,
1264 .get_num_stats = mlx5e_grp_2819_get_num_stats,
1265 .fill_strings = mlx5e_grp_2819_fill_strings,
1266 .fill_stats = mlx5e_grp_2819_fill_stats,
1267 .update_stats = mlx5e_grp_2819_update_stats,
1270 .get_num_stats = mlx5e_grp_phy_get_num_stats,
1271 .fill_strings = mlx5e_grp_phy_fill_strings,
1272 .fill_stats = mlx5e_grp_phy_fill_stats,
1273 .update_stats = mlx5e_grp_phy_update_stats,
1276 .get_num_stats = mlx5e_grp_eth_ext_get_num_stats,
1277 .fill_strings = mlx5e_grp_eth_ext_fill_strings,
1278 .fill_stats = mlx5e_grp_eth_ext_fill_stats,
1279 .update_stats = mlx5e_grp_eth_ext_update_stats,
1282 .get_num_stats = mlx5e_grp_pcie_get_num_stats,
1283 .fill_strings = mlx5e_grp_pcie_fill_strings,
1284 .fill_stats = mlx5e_grp_pcie_fill_stats,
1285 .update_stats = mlx5e_grp_pcie_update_stats,
1288 .get_num_stats = mlx5e_grp_per_prio_get_num_stats,
1289 .fill_strings = mlx5e_grp_per_prio_fill_strings,
1290 .fill_stats = mlx5e_grp_per_prio_fill_stats,
1291 .update_stats = mlx5e_grp_per_prio_update_stats,
1294 .get_num_stats = mlx5e_grp_pme_get_num_stats,
1295 .fill_strings = mlx5e_grp_pme_fill_strings,
1296 .fill_stats = mlx5e_grp_pme_fill_stats,
1299 .get_num_stats = mlx5e_grp_ipsec_get_num_stats,
1300 .fill_strings = mlx5e_grp_ipsec_fill_strings,
1301 .fill_stats = mlx5e_grp_ipsec_fill_stats,
1302 .update_stats = mlx5e_grp_ipsec_update_stats,
1305 .get_num_stats = mlx5e_grp_tls_get_num_stats,
1306 .fill_strings = mlx5e_grp_tls_fill_strings,
1307 .fill_stats = mlx5e_grp_tls_fill_stats,
1310 .get_num_stats = mlx5e_grp_channels_get_num_stats,
1311 .fill_strings = mlx5e_grp_channels_fill_strings,
1312 .fill_stats = mlx5e_grp_channels_fill_stats,
1316 const int mlx5e_num_stats_grps = ARRAY_SIZE(mlx5e_stats_grps);