1 // SPDX-License-Identifier: GPL-2.0
2 /* Marvell RVU Admin Function driver
4 * Copyright (C) 2019 Marvell.
11 #include <linux/debugfs.h>
12 #include <linux/module.h>
13 #include <linux/pci.h>
15 #include "rvu_struct.h"
19 #include "lmac_common.h"
22 #define DEBUGFS_DIR_NAME "octeontx2"
73 static char *cgx_rx_stats_fields[] = {
74 [CGX_STAT0] = "Received packets",
75 [CGX_STAT1] = "Octets of received packets",
76 [CGX_STAT2] = "Received PAUSE packets",
77 [CGX_STAT3] = "Received PAUSE and control packets",
78 [CGX_STAT4] = "Filtered DMAC0 (NIX-bound) packets",
79 [CGX_STAT5] = "Filtered DMAC0 (NIX-bound) octets",
80 [CGX_STAT6] = "Packets dropped due to RX FIFO full",
81 [CGX_STAT7] = "Octets dropped due to RX FIFO full",
82 [CGX_STAT8] = "Error packets",
83 [CGX_STAT9] = "Filtered DMAC1 (NCSI-bound) packets",
84 [CGX_STAT10] = "Filtered DMAC1 (NCSI-bound) octets",
85 [CGX_STAT11] = "NCSI-bound packets dropped",
86 [CGX_STAT12] = "NCSI-bound octets dropped",
89 static char *cgx_tx_stats_fields[] = {
90 [CGX_STAT0] = "Packets dropped due to excessive collisions",
91 [CGX_STAT1] = "Packets dropped due to excessive deferral",
92 [CGX_STAT2] = "Multiple collisions before successful transmission",
93 [CGX_STAT3] = "Single collisions before successful transmission",
94 [CGX_STAT4] = "Total octets sent on the interface",
95 [CGX_STAT5] = "Total frames sent on the interface",
96 [CGX_STAT6] = "Packets sent with an octet count < 64",
97 [CGX_STAT7] = "Packets sent with an octet count == 64",
98 [CGX_STAT8] = "Packets sent with an octet count of 65–127",
99 [CGX_STAT9] = "Packets sent with an octet count of 128-255",
100 [CGX_STAT10] = "Packets sent with an octet count of 256-511",
101 [CGX_STAT11] = "Packets sent with an octet count of 512-1023",
102 [CGX_STAT12] = "Packets sent with an octet count of 1024-1518",
103 [CGX_STAT13] = "Packets sent with an octet count of > 1518",
104 [CGX_STAT14] = "Packets sent to a broadcast DMAC",
105 [CGX_STAT15] = "Packets sent to the multicast DMAC",
106 [CGX_STAT16] = "Transmit underflow and were truncated",
107 [CGX_STAT17] = "Control/PAUSE packets sent",
110 static char *rpm_rx_stats_fields[] = {
111 "Octets of received packets",
112 "Octets of received packets with out error",
113 "Received packets with alignment errors",
114 "Control/PAUSE packets received",
115 "Packets received with Frame too long Errors",
116 "Packets received with a1nrange length Errors",
118 "Packets received with FrameCheckSequenceErrors",
119 "Packets received with VLAN header",
121 "Packets received with unicast DMAC",
122 "Packets received with multicast DMAC",
123 "Packets received with broadcast DMAC",
125 "Total frames received on interface",
126 "Packets received with an octet count < 64",
127 "Packets received with an octet count == 64",
128 "Packets received with an octet count of 65â
\80\93127",
129 "Packets received with an octet count of 128-255",
130 "Packets received with an octet count of 256-511",
131 "Packets received with an octet count of 512-1023",
132 "Packets received with an octet count of 1024-1518",
133 "Packets received with an octet count of > 1518",
136 "Fragmented Packets",
137 "CBFC(class based flow control) pause frames received for class 0",
138 "CBFC pause frames received for class 1",
139 "CBFC pause frames received for class 2",
140 "CBFC pause frames received for class 3",
141 "CBFC pause frames received for class 4",
142 "CBFC pause frames received for class 5",
143 "CBFC pause frames received for class 6",
144 "CBFC pause frames received for class 7",
145 "CBFC pause frames received for class 8",
146 "CBFC pause frames received for class 9",
147 "CBFC pause frames received for class 10",
148 "CBFC pause frames received for class 11",
149 "CBFC pause frames received for class 12",
150 "CBFC pause frames received for class 13",
151 "CBFC pause frames received for class 14",
152 "CBFC pause frames received for class 15",
153 "MAC control packets received",
156 static char *rpm_tx_stats_fields[] = {
157 "Total octets sent on the interface",
158 "Total octets transmitted OK",
159 "Control/Pause frames sent",
160 "Total frames transmitted OK",
161 "Total frames sent with VLAN header",
163 "Packets sent to unicast DMAC",
164 "Packets sent to the multicast DMAC",
165 "Packets sent to a broadcast DMAC",
166 "Packets sent with an octet count == 64",
167 "Packets sent with an octet count of 65â
\80\93127",
168 "Packets sent with an octet count of 128-255",
169 "Packets sent with an octet count of 256-511",
170 "Packets sent with an octet count of 512-1023",
171 "Packets sent with an octet count of 1024-1518",
172 "Packets sent with an octet count of > 1518",
173 "CBFC(class based flow control) pause frames transmitted for class 0",
174 "CBFC pause frames transmitted for class 1",
175 "CBFC pause frames transmitted for class 2",
176 "CBFC pause frames transmitted for class 3",
177 "CBFC pause frames transmitted for class 4",
178 "CBFC pause frames transmitted for class 5",
179 "CBFC pause frames transmitted for class 6",
180 "CBFC pause frames transmitted for class 7",
181 "CBFC pause frames transmitted for class 8",
182 "CBFC pause frames transmitted for class 9",
183 "CBFC pause frames transmitted for class 10",
184 "CBFC pause frames transmitted for class 11",
185 "CBFC pause frames transmitted for class 12",
186 "CBFC pause frames transmitted for class 13",
187 "CBFC pause frames transmitted for class 14",
188 "CBFC pause frames transmitted for class 15",
189 "MAC control packets sent",
190 "Total frames sent on the interface"
199 #define NDC_MAX_BANK(rvu, blk_addr) (rvu_read64(rvu, \
200 blk_addr, NDC_AF_CONST) & 0xFF)
202 #define rvu_dbg_NULL NULL
203 #define rvu_dbg_open_NULL NULL
205 #define RVU_DEBUG_SEQ_FOPS(name, read_op, write_op) \
206 static int rvu_dbg_open_##name(struct inode *inode, struct file *file) \
208 return single_open(file, rvu_dbg_##read_op, inode->i_private); \
210 static const struct file_operations rvu_dbg_##name##_fops = { \
211 .owner = THIS_MODULE, \
212 .open = rvu_dbg_open_##name, \
214 .write = rvu_dbg_##write_op, \
215 .llseek = seq_lseek, \
216 .release = single_release, \
219 #define RVU_DEBUG_FOPS(name, read_op, write_op) \
220 static const struct file_operations rvu_dbg_##name##_fops = { \
221 .owner = THIS_MODULE, \
222 .open = simple_open, \
223 .read = rvu_dbg_##read_op, \
224 .write = rvu_dbg_##write_op \
227 static void print_nix_qsize(struct seq_file *filp, struct rvu_pfvf *pfvf);
229 /* Dumps current provisioning status of all RVU block LFs */
230 static ssize_t rvu_dbg_rsrc_attach_status(struct file *filp,
232 size_t count, loff_t *ppos)
234 int index, off = 0, flag = 0, go_back = 0, len = 0;
235 struct rvu *rvu = filp->private_data;
236 int lf, pf, vf, pcifunc;
237 struct rvu_block block;
238 int bytes_not_copied;
239 int lf_str_size = 12;
244 /* don't allow partial reads */
248 buf = kzalloc(buf_size, GFP_KERNEL);
252 lfs = kzalloc(lf_str_size, GFP_KERNEL);
257 off += scnprintf(&buf[off], buf_size - 1 - off, "%-*s", lf_str_size,
259 for (index = 0; index < BLK_COUNT; index++)
260 if (strlen(rvu->hw->block[index].name)) {
261 off += scnprintf(&buf[off], buf_size - 1 - off,
263 rvu->hw->block[index].name);
265 off += scnprintf(&buf[off], buf_size - 1 - off, "\n");
266 for (pf = 0; pf < rvu->hw->total_pfs; pf++) {
267 for (vf = 0; vf <= rvu->hw->total_vfs; vf++) {
268 pcifunc = pf << 10 | vf;
273 sprintf(lfs, "PF%d:VF%d", pf, vf - 1);
274 go_back = scnprintf(&buf[off],
276 "%-*s", lf_str_size, lfs);
278 sprintf(lfs, "PF%d", pf);
279 go_back = scnprintf(&buf[off],
281 "%-*s", lf_str_size, lfs);
285 for (index = 0; index < BLKTYPE_MAX; index++) {
286 block = rvu->hw->block[index];
287 if (!strlen(block.name))
291 for (lf = 0; lf < block.lf.max; lf++) {
292 if (block.fn_map[lf] != pcifunc)
295 len += sprintf(&lfs[len], "%d,", lf);
301 off += scnprintf(&buf[off], buf_size - 1 - off,
302 "%-*s", lf_str_size, lfs);
304 go_back += lf_str_size;
311 off += scnprintf(&buf[off], buf_size - 1 - off, "\n");
315 bytes_not_copied = copy_to_user(buffer, buf, off);
319 if (bytes_not_copied)
326 RVU_DEBUG_FOPS(rsrc_status, rsrc_attach_status, NULL);
328 static int rvu_dbg_rvu_pf_cgx_map_display(struct seq_file *filp, void *unused)
330 struct rvu *rvu = filp->private;
331 struct pci_dev *pdev = NULL;
332 struct mac_ops *mac_ops;
333 char cgx[10], lmac[10];
334 struct rvu_pfvf *pfvf;
335 int pf, domain, blkid;
340 mac_ops = get_mac_ops(rvu_first_cgx_pdata(rvu));
341 /* There can be no CGX devices at all */
344 seq_printf(filp, "PCI dev\t\tRVU PF Func\tNIX block\t%s\tLMAC\n",
346 for (pf = 0; pf < rvu->hw->total_pfs; pf++) {
347 if (!is_pf_cgxmapped(rvu, pf))
350 pdev = pci_get_domain_bus_and_slot(domain, pf + 1, 0);
357 pfvf = rvu_get_pfvf(rvu, pcifunc);
359 if (pfvf->nix_blkaddr == BLKADDR_NIX0)
364 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id,
366 sprintf(cgx, "%s%d", mac_ops->name, cgx_id);
367 sprintf(lmac, "LMAC%d", lmac_id);
368 seq_printf(filp, "%s\t0x%x\t\tNIX%d\t\t%s\t%s\n",
369 dev_name(&pdev->dev), pcifunc, blkid, cgx, lmac);
374 RVU_DEBUG_SEQ_FOPS(rvu_pf_cgx_map, rvu_pf_cgx_map_display, NULL);
376 static bool rvu_dbg_is_valid_lf(struct rvu *rvu, int blkaddr, int lf,
379 struct rvu_block *block;
380 struct rvu_hwinfo *hw;
383 block = &hw->block[blkaddr];
385 if (lf < 0 || lf >= block->lf.max) {
386 dev_warn(rvu->dev, "Invalid LF: valid range: 0-%d\n",
391 *pcifunc = block->fn_map[lf];
394 "This LF is not attached to any RVU PFFUNC\n");
400 static void print_npa_qsize(struct seq_file *m, struct rvu_pfvf *pfvf)
404 buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
408 if (!pfvf->aura_ctx) {
409 seq_puts(m, "Aura context is not initialized\n");
411 bitmap_print_to_pagebuf(false, buf, pfvf->aura_bmap,
412 pfvf->aura_ctx->qsize);
413 seq_printf(m, "Aura count : %d\n", pfvf->aura_ctx->qsize);
414 seq_printf(m, "Aura context ena/dis bitmap : %s\n", buf);
417 if (!pfvf->pool_ctx) {
418 seq_puts(m, "Pool context is not initialized\n");
420 bitmap_print_to_pagebuf(false, buf, pfvf->pool_bmap,
421 pfvf->pool_ctx->qsize);
422 seq_printf(m, "Pool count : %d\n", pfvf->pool_ctx->qsize);
423 seq_printf(m, "Pool context ena/dis bitmap : %s\n", buf);
428 /* The 'qsize' entry dumps current Aura/Pool context Qsize
429 * and each context's current enable/disable status in a bitmap.
431 static int rvu_dbg_qsize_display(struct seq_file *filp, void *unsused,
434 void (*print_qsize)(struct seq_file *filp,
435 struct rvu_pfvf *pfvf) = NULL;
436 struct dentry *current_dir;
437 struct rvu_pfvf *pfvf;
446 qsize_id = rvu->rvu_dbg.npa_qsize_id;
447 print_qsize = print_npa_qsize;
451 qsize_id = rvu->rvu_dbg.nix_qsize_id;
452 print_qsize = print_nix_qsize;
459 if (blktype == BLKTYPE_NPA) {
460 blkaddr = BLKADDR_NPA;
462 current_dir = filp->file->f_path.dentry->d_parent;
463 blkaddr = (!strcmp(current_dir->d_name.name, "nix1") ?
464 BLKADDR_NIX1 : BLKADDR_NIX0);
467 if (!rvu_dbg_is_valid_lf(rvu, blkaddr, qsize_id, &pcifunc))
470 pfvf = rvu_get_pfvf(rvu, pcifunc);
471 print_qsize(filp, pfvf);
476 static ssize_t rvu_dbg_qsize_write(struct file *filp,
477 const char __user *buffer, size_t count,
478 loff_t *ppos, int blktype)
480 char *blk_string = (blktype == BLKTYPE_NPA) ? "npa" : "nix";
481 struct seq_file *seqfile = filp->private_data;
482 char *cmd_buf, *cmd_buf_tmp, *subtoken;
483 struct rvu *rvu = seqfile->private;
484 struct dentry *current_dir;
489 cmd_buf = memdup_user(buffer, count + 1);
493 cmd_buf[count] = '\0';
495 cmd_buf_tmp = strchr(cmd_buf, '\n');
498 count = cmd_buf_tmp - cmd_buf + 1;
501 cmd_buf_tmp = cmd_buf;
502 subtoken = strsep(&cmd_buf, " ");
503 ret = subtoken ? kstrtoint(subtoken, 10, &lf) : -EINVAL;
507 if (!strncmp(subtoken, "help", 4) || ret < 0) {
508 dev_info(rvu->dev, "Use echo <%s-lf > qsize\n", blk_string);
509 goto qsize_write_done;
512 if (blktype == BLKTYPE_NPA) {
513 blkaddr = BLKADDR_NPA;
515 current_dir = filp->f_path.dentry->d_parent;
516 blkaddr = (!strcmp(current_dir->d_name.name, "nix1") ?
517 BLKADDR_NIX1 : BLKADDR_NIX0);
520 if (!rvu_dbg_is_valid_lf(rvu, blkaddr, lf, &pcifunc)) {
522 goto qsize_write_done;
524 if (blktype == BLKTYPE_NPA)
525 rvu->rvu_dbg.npa_qsize_id = lf;
527 rvu->rvu_dbg.nix_qsize_id = lf;
531 return ret ? ret : count;
534 static ssize_t rvu_dbg_npa_qsize_write(struct file *filp,
535 const char __user *buffer,
536 size_t count, loff_t *ppos)
538 return rvu_dbg_qsize_write(filp, buffer, count, ppos,
542 static int rvu_dbg_npa_qsize_display(struct seq_file *filp, void *unused)
544 return rvu_dbg_qsize_display(filp, unused, BLKTYPE_NPA);
547 RVU_DEBUG_SEQ_FOPS(npa_qsize, npa_qsize_display, npa_qsize_write);
549 /* Dumps given NPA Aura's context */
550 static void print_npa_aura_ctx(struct seq_file *m, struct npa_aq_enq_rsp *rsp)
552 struct npa_aura_s *aura = &rsp->aura;
553 struct rvu *rvu = m->private;
555 seq_printf(m, "W0: Pool addr\t\t%llx\n", aura->pool_addr);
557 seq_printf(m, "W1: ena\t\t\t%d\nW1: pool caching\t%d\n",
558 aura->ena, aura->pool_caching);
559 seq_printf(m, "W1: pool way mask\t%d\nW1: avg con\t\t%d\n",
560 aura->pool_way_mask, aura->avg_con);
561 seq_printf(m, "W1: pool drop ena\t%d\nW1: aura drop ena\t%d\n",
562 aura->pool_drop_ena, aura->aura_drop_ena);
563 seq_printf(m, "W1: bp_ena\t\t%d\nW1: aura drop\t\t%d\n",
564 aura->bp_ena, aura->aura_drop);
565 seq_printf(m, "W1: aura shift\t\t%d\nW1: avg_level\t\t%d\n",
566 aura->shift, aura->avg_level);
568 seq_printf(m, "W2: count\t\t%llu\nW2: nix0_bpid\t\t%d\nW2: nix1_bpid\t\t%d\n",
569 (u64)aura->count, aura->nix0_bpid, aura->nix1_bpid);
571 seq_printf(m, "W3: limit\t\t%llu\nW3: bp\t\t\t%d\nW3: fc_ena\t\t%d\n",
572 (u64)aura->limit, aura->bp, aura->fc_ena);
574 if (!is_rvu_otx2(rvu))
575 seq_printf(m, "W3: fc_be\t\t%d\n", aura->fc_be);
576 seq_printf(m, "W3: fc_up_crossing\t%d\nW3: fc_stype\t\t%d\n",
577 aura->fc_up_crossing, aura->fc_stype);
578 seq_printf(m, "W3: fc_hyst_bits\t%d\n", aura->fc_hyst_bits);
580 seq_printf(m, "W4: fc_addr\t\t%llx\n", aura->fc_addr);
582 seq_printf(m, "W5: pool_drop\t\t%d\nW5: update_time\t\t%d\n",
583 aura->pool_drop, aura->update_time);
584 seq_printf(m, "W5: err_int \t\t%d\nW5: err_int_ena\t\t%d\n",
585 aura->err_int, aura->err_int_ena);
586 seq_printf(m, "W5: thresh_int\t\t%d\nW5: thresh_int_ena \t%d\n",
587 aura->thresh_int, aura->thresh_int_ena);
588 seq_printf(m, "W5: thresh_up\t\t%d\nW5: thresh_qint_idx\t%d\n",
589 aura->thresh_up, aura->thresh_qint_idx);
590 seq_printf(m, "W5: err_qint_idx \t%d\n", aura->err_qint_idx);
592 seq_printf(m, "W6: thresh\t\t%llu\n", (u64)aura->thresh);
593 if (!is_rvu_otx2(rvu))
594 seq_printf(m, "W6: fc_msh_dst\t\t%d\n", aura->fc_msh_dst);
597 /* Dumps given NPA Pool's context */
598 static void print_npa_pool_ctx(struct seq_file *m, struct npa_aq_enq_rsp *rsp)
600 struct npa_pool_s *pool = &rsp->pool;
601 struct rvu *rvu = m->private;
603 seq_printf(m, "W0: Stack base\t\t%llx\n", pool->stack_base);
605 seq_printf(m, "W1: ena \t\t%d\nW1: nat_align \t\t%d\n",
606 pool->ena, pool->nat_align);
607 seq_printf(m, "W1: stack_caching\t%d\nW1: stack_way_mask\t%d\n",
608 pool->stack_caching, pool->stack_way_mask);
609 seq_printf(m, "W1: buf_offset\t\t%d\nW1: buf_size\t\t%d\n",
610 pool->buf_offset, pool->buf_size);
612 seq_printf(m, "W2: stack_max_pages \t%d\nW2: stack_pages\t\t%d\n",
613 pool->stack_max_pages, pool->stack_pages);
615 seq_printf(m, "W3: op_pc \t\t%llu\n", (u64)pool->op_pc);
617 seq_printf(m, "W4: stack_offset\t%d\nW4: shift\t\t%d\nW4: avg_level\t\t%d\n",
618 pool->stack_offset, pool->shift, pool->avg_level);
619 seq_printf(m, "W4: avg_con \t\t%d\nW4: fc_ena\t\t%d\nW4: fc_stype\t\t%d\n",
620 pool->avg_con, pool->fc_ena, pool->fc_stype);
621 seq_printf(m, "W4: fc_hyst_bits\t%d\nW4: fc_up_crossing\t%d\n",
622 pool->fc_hyst_bits, pool->fc_up_crossing);
623 if (!is_rvu_otx2(rvu))
624 seq_printf(m, "W4: fc_be\t\t%d\n", pool->fc_be);
625 seq_printf(m, "W4: update_time\t\t%d\n", pool->update_time);
627 seq_printf(m, "W5: fc_addr\t\t%llx\n", pool->fc_addr);
629 seq_printf(m, "W6: ptr_start\t\t%llx\n", pool->ptr_start);
631 seq_printf(m, "W7: ptr_end\t\t%llx\n", pool->ptr_end);
633 seq_printf(m, "W8: err_int\t\t%d\nW8: err_int_ena\t\t%d\n",
634 pool->err_int, pool->err_int_ena);
635 seq_printf(m, "W8: thresh_int\t\t%d\n", pool->thresh_int);
636 seq_printf(m, "W8: thresh_int_ena\t%d\nW8: thresh_up\t\t%d\n",
637 pool->thresh_int_ena, pool->thresh_up);
638 seq_printf(m, "W8: thresh_qint_idx\t%d\nW8: err_qint_idx\t%d\n",
639 pool->thresh_qint_idx, pool->err_qint_idx);
640 if (!is_rvu_otx2(rvu))
641 seq_printf(m, "W8: fc_msh_dst\t\t%d\n", pool->fc_msh_dst);
644 /* Reads aura/pool's ctx from admin queue */
645 static int rvu_dbg_npa_ctx_display(struct seq_file *m, void *unused, int ctype)
647 void (*print_npa_ctx)(struct seq_file *m, struct npa_aq_enq_rsp *rsp);
648 struct npa_aq_enq_req aq_req;
649 struct npa_aq_enq_rsp rsp;
650 struct rvu_pfvf *pfvf;
651 int aura, rc, max_id;
659 case NPA_AQ_CTYPE_AURA:
660 npalf = rvu->rvu_dbg.npa_aura_ctx.lf;
661 id = rvu->rvu_dbg.npa_aura_ctx.id;
662 all = rvu->rvu_dbg.npa_aura_ctx.all;
665 case NPA_AQ_CTYPE_POOL:
666 npalf = rvu->rvu_dbg.npa_pool_ctx.lf;
667 id = rvu->rvu_dbg.npa_pool_ctx.id;
668 all = rvu->rvu_dbg.npa_pool_ctx.all;
674 if (!rvu_dbg_is_valid_lf(rvu, BLKADDR_NPA, npalf, &pcifunc))
677 pfvf = rvu_get_pfvf(rvu, pcifunc);
678 if (ctype == NPA_AQ_CTYPE_AURA && !pfvf->aura_ctx) {
679 seq_puts(m, "Aura context is not initialized\n");
681 } else if (ctype == NPA_AQ_CTYPE_POOL && !pfvf->pool_ctx) {
682 seq_puts(m, "Pool context is not initialized\n");
686 memset(&aq_req, 0, sizeof(struct npa_aq_enq_req));
687 aq_req.hdr.pcifunc = pcifunc;
688 aq_req.ctype = ctype;
689 aq_req.op = NPA_AQ_INSTOP_READ;
690 if (ctype == NPA_AQ_CTYPE_AURA) {
691 max_id = pfvf->aura_ctx->qsize;
692 print_npa_ctx = print_npa_aura_ctx;
694 max_id = pfvf->pool_ctx->qsize;
695 print_npa_ctx = print_npa_pool_ctx;
698 if (id < 0 || id >= max_id) {
699 seq_printf(m, "Invalid %s, valid range is 0-%d\n",
700 (ctype == NPA_AQ_CTYPE_AURA) ? "aura" : "pool",
710 for (aura = id; aura < max_id; aura++) {
711 aq_req.aura_id = aura;
712 seq_printf(m, "======%s : %d=======\n",
713 (ctype == NPA_AQ_CTYPE_AURA) ? "AURA" : "POOL",
715 rc = rvu_npa_aq_enq_inst(rvu, &aq_req, &rsp);
717 seq_puts(m, "Failed to read context\n");
720 print_npa_ctx(m, &rsp);
725 static int write_npa_ctx(struct rvu *rvu, bool all,
726 int npalf, int id, int ctype)
728 struct rvu_pfvf *pfvf;
732 if (!rvu_dbg_is_valid_lf(rvu, BLKADDR_NPA, npalf, &pcifunc))
735 pfvf = rvu_get_pfvf(rvu, pcifunc);
737 if (ctype == NPA_AQ_CTYPE_AURA) {
738 if (!pfvf->aura_ctx) {
739 dev_warn(rvu->dev, "Aura context is not initialized\n");
742 max_id = pfvf->aura_ctx->qsize;
743 } else if (ctype == NPA_AQ_CTYPE_POOL) {
744 if (!pfvf->pool_ctx) {
745 dev_warn(rvu->dev, "Pool context is not initialized\n");
748 max_id = pfvf->pool_ctx->qsize;
751 if (id < 0 || id >= max_id) {
752 dev_warn(rvu->dev, "Invalid %s, valid range is 0-%d\n",
753 (ctype == NPA_AQ_CTYPE_AURA) ? "aura" : "pool",
759 case NPA_AQ_CTYPE_AURA:
760 rvu->rvu_dbg.npa_aura_ctx.lf = npalf;
761 rvu->rvu_dbg.npa_aura_ctx.id = id;
762 rvu->rvu_dbg.npa_aura_ctx.all = all;
765 case NPA_AQ_CTYPE_POOL:
766 rvu->rvu_dbg.npa_pool_ctx.lf = npalf;
767 rvu->rvu_dbg.npa_pool_ctx.id = id;
768 rvu->rvu_dbg.npa_pool_ctx.all = all;
776 static int parse_cmd_buffer_ctx(char *cmd_buf, size_t *count,
777 const char __user *buffer, int *npalf,
780 int bytes_not_copied;
785 bytes_not_copied = copy_from_user(cmd_buf, buffer, *count);
786 if (bytes_not_copied)
789 cmd_buf[*count] = '\0';
790 cmd_buf_tmp = strchr(cmd_buf, '\n');
794 *count = cmd_buf_tmp - cmd_buf + 1;
797 subtoken = strsep(&cmd_buf, " ");
798 ret = subtoken ? kstrtoint(subtoken, 10, npalf) : -EINVAL;
801 subtoken = strsep(&cmd_buf, " ");
802 if (subtoken && strcmp(subtoken, "all") == 0) {
805 ret = subtoken ? kstrtoint(subtoken, 10, id) : -EINVAL;
814 static ssize_t rvu_dbg_npa_ctx_write(struct file *filp,
815 const char __user *buffer,
816 size_t count, loff_t *ppos, int ctype)
818 char *cmd_buf, *ctype_string = (ctype == NPA_AQ_CTYPE_AURA) ?
820 struct seq_file *seqfp = filp->private_data;
821 struct rvu *rvu = seqfp->private;
822 int npalf, id = 0, ret;
825 if ((*ppos != 0) || !count)
828 cmd_buf = kzalloc(count + 1, GFP_KERNEL);
831 ret = parse_cmd_buffer_ctx(cmd_buf, &count, buffer,
835 "Usage: echo <npalf> [%s number/all] > %s_ctx\n",
836 ctype_string, ctype_string);
839 ret = write_npa_ctx(rvu, all, npalf, id, ctype);
843 return ret ? ret : count;
846 static ssize_t rvu_dbg_npa_aura_ctx_write(struct file *filp,
847 const char __user *buffer,
848 size_t count, loff_t *ppos)
850 return rvu_dbg_npa_ctx_write(filp, buffer, count, ppos,
854 static int rvu_dbg_npa_aura_ctx_display(struct seq_file *filp, void *unused)
856 return rvu_dbg_npa_ctx_display(filp, unused, NPA_AQ_CTYPE_AURA);
859 RVU_DEBUG_SEQ_FOPS(npa_aura_ctx, npa_aura_ctx_display, npa_aura_ctx_write);
861 static ssize_t rvu_dbg_npa_pool_ctx_write(struct file *filp,
862 const char __user *buffer,
863 size_t count, loff_t *ppos)
865 return rvu_dbg_npa_ctx_write(filp, buffer, count, ppos,
869 static int rvu_dbg_npa_pool_ctx_display(struct seq_file *filp, void *unused)
871 return rvu_dbg_npa_ctx_display(filp, unused, NPA_AQ_CTYPE_POOL);
874 RVU_DEBUG_SEQ_FOPS(npa_pool_ctx, npa_pool_ctx_display, npa_pool_ctx_write);
876 static void ndc_cache_stats(struct seq_file *s, int blk_addr,
877 int ctype, int transaction)
879 u64 req, out_req, lat, cant_alloc;
880 struct nix_hw *nix_hw;
884 if (blk_addr == BLKADDR_NDC_NPA0) {
891 for (port = 0; port < NDC_MAX_PORT; port++) {
892 req = rvu_read64(rvu, blk_addr, NDC_AF_PORTX_RTX_RWX_REQ_PC
893 (port, ctype, transaction));
894 lat = rvu_read64(rvu, blk_addr, NDC_AF_PORTX_RTX_RWX_LAT_PC
895 (port, ctype, transaction));
896 out_req = rvu_read64(rvu, blk_addr,
897 NDC_AF_PORTX_RTX_RWX_OSTDN_PC
898 (port, ctype, transaction));
899 cant_alloc = rvu_read64(rvu, blk_addr,
900 NDC_AF_PORTX_RTX_CANT_ALLOC_PC
901 (port, transaction));
902 seq_printf(s, "\nPort:%d\n", port);
903 seq_printf(s, "\tTotal Requests:\t\t%lld\n", req);
904 seq_printf(s, "\tTotal Time Taken:\t%lld cycles\n", lat);
905 seq_printf(s, "\tAvg Latency:\t\t%lld cycles\n", lat / req);
906 seq_printf(s, "\tOutstanding Requests:\t%lld\n", out_req);
907 seq_printf(s, "\tCant Alloc Requests:\t%lld\n", cant_alloc);
911 static int ndc_blk_cache_stats(struct seq_file *s, int idx, int blk_addr)
913 seq_puts(s, "\n***** CACHE mode read stats *****\n");
914 ndc_cache_stats(s, blk_addr, CACHING, NDC_READ_TRANS);
915 seq_puts(s, "\n***** CACHE mode write stats *****\n");
916 ndc_cache_stats(s, blk_addr, CACHING, NDC_WRITE_TRANS);
917 seq_puts(s, "\n***** BY-PASS mode read stats *****\n");
918 ndc_cache_stats(s, blk_addr, BYPASS, NDC_READ_TRANS);
919 seq_puts(s, "\n***** BY-PASS mode write stats *****\n");
920 ndc_cache_stats(s, blk_addr, BYPASS, NDC_WRITE_TRANS);
924 static int rvu_dbg_npa_ndc_cache_display(struct seq_file *filp, void *unused)
926 return ndc_blk_cache_stats(filp, NPA0_U, BLKADDR_NDC_NPA0);
929 RVU_DEBUG_SEQ_FOPS(npa_ndc_cache, npa_ndc_cache_display, NULL);
931 static int ndc_blk_hits_miss_stats(struct seq_file *s, int idx, int blk_addr)
933 struct nix_hw *nix_hw;
937 if (blk_addr == BLKADDR_NDC_NPA0) {
944 max_bank = NDC_MAX_BANK(rvu, blk_addr);
945 for (bank = 0; bank < max_bank; bank++) {
946 seq_printf(s, "BANK:%d\n", bank);
947 seq_printf(s, "\tHits:\t%lld\n",
948 (u64)rvu_read64(rvu, blk_addr,
949 NDC_AF_BANKX_HIT_PC(bank)));
950 seq_printf(s, "\tMiss:\t%lld\n",
951 (u64)rvu_read64(rvu, blk_addr,
952 NDC_AF_BANKX_MISS_PC(bank)));
957 static int rvu_dbg_nix_ndc_rx_cache_display(struct seq_file *filp, void *unused)
959 struct nix_hw *nix_hw = filp->private;
963 blkaddr = (nix_hw->blkaddr == BLKADDR_NIX1 ?
964 BLKADDR_NDC_NIX1_RX : BLKADDR_NDC_NIX0_RX);
965 ndc_idx = (nix_hw->blkaddr == BLKADDR_NIX1 ? NIX1_RX : NIX0_RX);
967 return ndc_blk_cache_stats(filp, ndc_idx, blkaddr);
970 RVU_DEBUG_SEQ_FOPS(nix_ndc_rx_cache, nix_ndc_rx_cache_display, NULL);
972 static int rvu_dbg_nix_ndc_tx_cache_display(struct seq_file *filp, void *unused)
974 struct nix_hw *nix_hw = filp->private;
978 blkaddr = (nix_hw->blkaddr == BLKADDR_NIX1 ?
979 BLKADDR_NDC_NIX1_TX : BLKADDR_NDC_NIX0_TX);
980 ndc_idx = (nix_hw->blkaddr == BLKADDR_NIX1 ? NIX1_TX : NIX0_TX);
982 return ndc_blk_cache_stats(filp, ndc_idx, blkaddr);
985 RVU_DEBUG_SEQ_FOPS(nix_ndc_tx_cache, nix_ndc_tx_cache_display, NULL);
987 static int rvu_dbg_npa_ndc_hits_miss_display(struct seq_file *filp,
990 return ndc_blk_hits_miss_stats(filp, NPA0_U, BLKADDR_NDC_NPA0);
993 RVU_DEBUG_SEQ_FOPS(npa_ndc_hits_miss, npa_ndc_hits_miss_display, NULL);
995 static int rvu_dbg_nix_ndc_rx_hits_miss_display(struct seq_file *filp,
998 struct nix_hw *nix_hw = filp->private;
999 int ndc_idx = NPA0_U;
1002 blkaddr = (nix_hw->blkaddr == BLKADDR_NIX1 ?
1003 BLKADDR_NDC_NIX1_RX : BLKADDR_NDC_NIX0_RX);
1005 return ndc_blk_hits_miss_stats(filp, ndc_idx, blkaddr);
1008 RVU_DEBUG_SEQ_FOPS(nix_ndc_rx_hits_miss, nix_ndc_rx_hits_miss_display, NULL);
1010 static int rvu_dbg_nix_ndc_tx_hits_miss_display(struct seq_file *filp,
1013 struct nix_hw *nix_hw = filp->private;
1014 int ndc_idx = NPA0_U;
1017 blkaddr = (nix_hw->blkaddr == BLKADDR_NIX1 ?
1018 BLKADDR_NDC_NIX1_TX : BLKADDR_NDC_NIX0_TX);
1020 return ndc_blk_hits_miss_stats(filp, ndc_idx, blkaddr);
1023 RVU_DEBUG_SEQ_FOPS(nix_ndc_tx_hits_miss, nix_ndc_tx_hits_miss_display, NULL);
1025 static void print_nix_cn10k_sq_ctx(struct seq_file *m,
1026 struct nix_cn10k_sq_ctx_s *sq_ctx)
1028 seq_printf(m, "W0: ena \t\t\t%d\nW0: qint_idx \t\t\t%d\n",
1029 sq_ctx->ena, sq_ctx->qint_idx);
1030 seq_printf(m, "W0: substream \t\t\t0x%03x\nW0: sdp_mcast \t\t\t%d\n",
1031 sq_ctx->substream, sq_ctx->sdp_mcast);
1032 seq_printf(m, "W0: cq \t\t\t\t%d\nW0: sqe_way_mask \t\t%d\n\n",
1033 sq_ctx->cq, sq_ctx->sqe_way_mask);
1035 seq_printf(m, "W1: smq \t\t\t%d\nW1: cq_ena \t\t\t%d\nW1: xoff\t\t\t%d\n",
1036 sq_ctx->smq, sq_ctx->cq_ena, sq_ctx->xoff);
1037 seq_printf(m, "W1: sso_ena \t\t\t%d\nW1: smq_rr_weight\t\t%d\n",
1038 sq_ctx->sso_ena, sq_ctx->smq_rr_weight);
1039 seq_printf(m, "W1: default_chan\t\t%d\nW1: sqb_count\t\t\t%d\n\n",
1040 sq_ctx->default_chan, sq_ctx->sqb_count);
1042 seq_printf(m, "W2: smq_rr_count_lb \t\t%d\n", sq_ctx->smq_rr_count_lb);
1043 seq_printf(m, "W2: smq_rr_count_ub \t\t%d\n", sq_ctx->smq_rr_count_ub);
1044 seq_printf(m, "W2: sqb_aura \t\t\t%d\nW2: sq_int \t\t\t%d\n",
1045 sq_ctx->sqb_aura, sq_ctx->sq_int);
1046 seq_printf(m, "W2: sq_int_ena \t\t\t%d\nW2: sqe_stype \t\t\t%d\n",
1047 sq_ctx->sq_int_ena, sq_ctx->sqe_stype);
1049 seq_printf(m, "W3: max_sqe_size\t\t%d\nW3: cq_limit\t\t\t%d\n",
1050 sq_ctx->max_sqe_size, sq_ctx->cq_limit);
1051 seq_printf(m, "W3: lmt_dis \t\t\t%d\nW3: mnq_dis \t\t\t%d\n",
1052 sq_ctx->mnq_dis, sq_ctx->lmt_dis);
1053 seq_printf(m, "W3: smq_next_sq\t\t\t%d\nW3: smq_lso_segnum\t\t%d\n",
1054 sq_ctx->smq_next_sq, sq_ctx->smq_lso_segnum);
1055 seq_printf(m, "W3: tail_offset \t\t%d\nW3: smenq_offset\t\t%d\n",
1056 sq_ctx->tail_offset, sq_ctx->smenq_offset);
1057 seq_printf(m, "W3: head_offset\t\t\t%d\nW3: smenq_next_sqb_vld\t\t%d\n\n",
1058 sq_ctx->head_offset, sq_ctx->smenq_next_sqb_vld);
1060 seq_printf(m, "W4: next_sqb \t\t\t%llx\n\n", sq_ctx->next_sqb);
1061 seq_printf(m, "W5: tail_sqb \t\t\t%llx\n\n", sq_ctx->tail_sqb);
1062 seq_printf(m, "W6: smenq_sqb \t\t\t%llx\n\n", sq_ctx->smenq_sqb);
1063 seq_printf(m, "W7: smenq_next_sqb \t\t%llx\n\n",
1064 sq_ctx->smenq_next_sqb);
1066 seq_printf(m, "W8: head_sqb\t\t\t%llx\n\n", sq_ctx->head_sqb);
1068 seq_printf(m, "W9: vfi_lso_total\t\t%d\n", sq_ctx->vfi_lso_total);
1069 seq_printf(m, "W9: vfi_lso_sizem1\t\t%d\nW9: vfi_lso_sb\t\t\t%d\n",
1070 sq_ctx->vfi_lso_sizem1, sq_ctx->vfi_lso_sb);
1071 seq_printf(m, "W9: vfi_lso_mps\t\t\t%d\nW9: vfi_lso_vlan0_ins_ena\t%d\n",
1072 sq_ctx->vfi_lso_mps, sq_ctx->vfi_lso_vlan0_ins_ena);
1073 seq_printf(m, "W9: vfi_lso_vlan1_ins_ena\t%d\nW9: vfi_lso_vld \t\t%d\n\n",
1074 sq_ctx->vfi_lso_vld, sq_ctx->vfi_lso_vlan1_ins_ena);
1076 seq_printf(m, "W10: scm_lso_rem \t\t%llu\n\n",
1077 (u64)sq_ctx->scm_lso_rem);
1078 seq_printf(m, "W11: octs \t\t\t%llu\n\n", (u64)sq_ctx->octs);
1079 seq_printf(m, "W12: pkts \t\t\t%llu\n\n", (u64)sq_ctx->pkts);
1080 seq_printf(m, "W14: dropped_octs \t\t%llu\n\n",
1081 (u64)sq_ctx->dropped_octs);
1082 seq_printf(m, "W15: dropped_pkts \t\t%llu\n\n",
1083 (u64)sq_ctx->dropped_pkts);
1086 /* Dumps given nix_sq's context */
1087 static void print_nix_sq_ctx(struct seq_file *m, struct nix_aq_enq_rsp *rsp)
1089 struct nix_sq_ctx_s *sq_ctx = &rsp->sq;
1090 struct nix_hw *nix_hw = m->private;
1091 struct rvu *rvu = nix_hw->rvu;
1093 if (!is_rvu_otx2(rvu)) {
1094 print_nix_cn10k_sq_ctx(m, (struct nix_cn10k_sq_ctx_s *)sq_ctx);
1097 seq_printf(m, "W0: sqe_way_mask \t\t%d\nW0: cq \t\t\t\t%d\n",
1098 sq_ctx->sqe_way_mask, sq_ctx->cq);
1099 seq_printf(m, "W0: sdp_mcast \t\t\t%d\nW0: substream \t\t\t0x%03x\n",
1100 sq_ctx->sdp_mcast, sq_ctx->substream);
1101 seq_printf(m, "W0: qint_idx \t\t\t%d\nW0: ena \t\t\t%d\n\n",
1102 sq_ctx->qint_idx, sq_ctx->ena);
1104 seq_printf(m, "W1: sqb_count \t\t\t%d\nW1: default_chan \t\t%d\n",
1105 sq_ctx->sqb_count, sq_ctx->default_chan);
1106 seq_printf(m, "W1: smq_rr_quantum \t\t%d\nW1: sso_ena \t\t\t%d\n",
1107 sq_ctx->smq_rr_quantum, sq_ctx->sso_ena);
1108 seq_printf(m, "W1: xoff \t\t\t%d\nW1: cq_ena \t\t\t%d\nW1: smq\t\t\t\t%d\n\n",
1109 sq_ctx->xoff, sq_ctx->cq_ena, sq_ctx->smq);
1111 seq_printf(m, "W2: sqe_stype \t\t\t%d\nW2: sq_int_ena \t\t\t%d\n",
1112 sq_ctx->sqe_stype, sq_ctx->sq_int_ena);
1113 seq_printf(m, "W2: sq_int \t\t\t%d\nW2: sqb_aura \t\t\t%d\n",
1114 sq_ctx->sq_int, sq_ctx->sqb_aura);
1115 seq_printf(m, "W2: smq_rr_count \t\t%d\n\n", sq_ctx->smq_rr_count);
1117 seq_printf(m, "W3: smq_next_sq_vld\t\t%d\nW3: smq_pend\t\t\t%d\n",
1118 sq_ctx->smq_next_sq_vld, sq_ctx->smq_pend);
1119 seq_printf(m, "W3: smenq_next_sqb_vld \t\t%d\nW3: head_offset\t\t\t%d\n",
1120 sq_ctx->smenq_next_sqb_vld, sq_ctx->head_offset);
1121 seq_printf(m, "W3: smenq_offset\t\t%d\nW3: tail_offset\t\t\t%d\n",
1122 sq_ctx->smenq_offset, sq_ctx->tail_offset);
1123 seq_printf(m, "W3: smq_lso_segnum \t\t%d\nW3: smq_next_sq\t\t\t%d\n",
1124 sq_ctx->smq_lso_segnum, sq_ctx->smq_next_sq);
1125 seq_printf(m, "W3: mnq_dis \t\t\t%d\nW3: lmt_dis \t\t\t%d\n",
1126 sq_ctx->mnq_dis, sq_ctx->lmt_dis);
1127 seq_printf(m, "W3: cq_limit\t\t\t%d\nW3: max_sqe_size\t\t%d\n\n",
1128 sq_ctx->cq_limit, sq_ctx->max_sqe_size);
1130 seq_printf(m, "W4: next_sqb \t\t\t%llx\n\n", sq_ctx->next_sqb);
1131 seq_printf(m, "W5: tail_sqb \t\t\t%llx\n\n", sq_ctx->tail_sqb);
1132 seq_printf(m, "W6: smenq_sqb \t\t\t%llx\n\n", sq_ctx->smenq_sqb);
1133 seq_printf(m, "W7: smenq_next_sqb \t\t%llx\n\n",
1134 sq_ctx->smenq_next_sqb);
1136 seq_printf(m, "W8: head_sqb\t\t\t%llx\n\n", sq_ctx->head_sqb);
1138 seq_printf(m, "W9: vfi_lso_vld\t\t\t%d\nW9: vfi_lso_vlan1_ins_ena\t%d\n",
1139 sq_ctx->vfi_lso_vld, sq_ctx->vfi_lso_vlan1_ins_ena);
1140 seq_printf(m, "W9: vfi_lso_vlan0_ins_ena\t%d\nW9: vfi_lso_mps\t\t\t%d\n",
1141 sq_ctx->vfi_lso_vlan0_ins_ena, sq_ctx->vfi_lso_mps);
1142 seq_printf(m, "W9: vfi_lso_sb\t\t\t%d\nW9: vfi_lso_sizem1\t\t%d\n",
1143 sq_ctx->vfi_lso_sb, sq_ctx->vfi_lso_sizem1);
1144 seq_printf(m, "W9: vfi_lso_total\t\t%d\n\n", sq_ctx->vfi_lso_total);
1146 seq_printf(m, "W10: scm_lso_rem \t\t%llu\n\n",
1147 (u64)sq_ctx->scm_lso_rem);
1148 seq_printf(m, "W11: octs \t\t\t%llu\n\n", (u64)sq_ctx->octs);
1149 seq_printf(m, "W12: pkts \t\t\t%llu\n\n", (u64)sq_ctx->pkts);
1150 seq_printf(m, "W14: dropped_octs \t\t%llu\n\n",
1151 (u64)sq_ctx->dropped_octs);
1152 seq_printf(m, "W15: dropped_pkts \t\t%llu\n\n",
1153 (u64)sq_ctx->dropped_pkts);
1156 static void print_nix_cn10k_rq_ctx(struct seq_file *m,
1157 struct nix_cn10k_rq_ctx_s *rq_ctx)
1159 seq_printf(m, "W0: ena \t\t\t%d\nW0: sso_ena \t\t\t%d\n",
1160 rq_ctx->ena, rq_ctx->sso_ena);
1161 seq_printf(m, "W0: ipsech_ena \t\t\t%d\nW0: ena_wqwd \t\t\t%d\n",
1162 rq_ctx->ipsech_ena, rq_ctx->ena_wqwd);
1163 seq_printf(m, "W0: cq \t\t\t\t%d\nW0: lenerr_dis \t\t\t%d\n",
1164 rq_ctx->cq, rq_ctx->lenerr_dis);
1165 seq_printf(m, "W0: csum_il4_dis \t\t%d\nW0: csum_ol4_dis \t\t%d\n",
1166 rq_ctx->csum_il4_dis, rq_ctx->csum_ol4_dis);
1167 seq_printf(m, "W0: len_il4_dis \t\t%d\nW0: len_il3_dis \t\t%d\n",
1168 rq_ctx->len_il4_dis, rq_ctx->len_il3_dis);
1169 seq_printf(m, "W0: len_ol4_dis \t\t%d\nW0: len_ol3_dis \t\t%d\n",
1170 rq_ctx->len_ol4_dis, rq_ctx->len_ol3_dis);
1171 seq_printf(m, "W0: wqe_aura \t\t\t%d\n\n", rq_ctx->wqe_aura);
1173 seq_printf(m, "W1: spb_aura \t\t\t%d\nW1: lpb_aura \t\t\t%d\n",
1174 rq_ctx->spb_aura, rq_ctx->lpb_aura);
1175 seq_printf(m, "W1: spb_aura \t\t\t%d\n", rq_ctx->spb_aura);
1176 seq_printf(m, "W1: sso_grp \t\t\t%d\nW1: sso_tt \t\t\t%d\n",
1177 rq_ctx->sso_grp, rq_ctx->sso_tt);
1178 seq_printf(m, "W1: pb_caching \t\t\t%d\nW1: wqe_caching \t\t%d\n",
1179 rq_ctx->pb_caching, rq_ctx->wqe_caching);
1180 seq_printf(m, "W1: xqe_drop_ena \t\t%d\nW1: spb_drop_ena \t\t%d\n",
1181 rq_ctx->xqe_drop_ena, rq_ctx->spb_drop_ena);
1182 seq_printf(m, "W1: lpb_drop_ena \t\t%d\nW1: pb_stashing \t\t%d\n",
1183 rq_ctx->lpb_drop_ena, rq_ctx->pb_stashing);
1184 seq_printf(m, "W1: ipsecd_drop_ena \t\t%d\nW1: chi_ena \t\t\t%d\n\n",
1185 rq_ctx->ipsecd_drop_ena, rq_ctx->chi_ena);
1187 seq_printf(m, "W2: band_prof_id \t\t%d\n", rq_ctx->band_prof_id);
1188 seq_printf(m, "W2: policer_ena \t\t%d\n", rq_ctx->policer_ena);
1189 seq_printf(m, "W2: spb_sizem1 \t\t\t%d\n", rq_ctx->spb_sizem1);
1190 seq_printf(m, "W2: wqe_skip \t\t\t%d\nW2: sqb_ena \t\t\t%d\n",
1191 rq_ctx->wqe_skip, rq_ctx->spb_ena);
1192 seq_printf(m, "W2: lpb_size1 \t\t\t%d\nW2: first_skip \t\t\t%d\n",
1193 rq_ctx->lpb_sizem1, rq_ctx->first_skip);
1194 seq_printf(m, "W2: later_skip\t\t\t%d\nW2: xqe_imm_size\t\t%d\n",
1195 rq_ctx->later_skip, rq_ctx->xqe_imm_size);
1196 seq_printf(m, "W2: xqe_imm_copy \t\t%d\nW2: xqe_hdr_split \t\t%d\n\n",
1197 rq_ctx->xqe_imm_copy, rq_ctx->xqe_hdr_split);
1199 seq_printf(m, "W3: xqe_drop \t\t\t%d\nW3: xqe_pass \t\t\t%d\n",
1200 rq_ctx->xqe_drop, rq_ctx->xqe_pass);
1201 seq_printf(m, "W3: wqe_pool_drop \t\t%d\nW3: wqe_pool_pass \t\t%d\n",
1202 rq_ctx->wqe_pool_drop, rq_ctx->wqe_pool_pass);
1203 seq_printf(m, "W3: spb_pool_drop \t\t%d\nW3: spb_pool_pass \t\t%d\n",
1204 rq_ctx->spb_pool_drop, rq_ctx->spb_pool_pass);
1205 seq_printf(m, "W3: spb_aura_drop \t\t%d\nW3: spb_aura_pass \t\t%d\n\n",
1206 rq_ctx->spb_aura_pass, rq_ctx->spb_aura_drop);
1208 seq_printf(m, "W4: lpb_aura_drop \t\t%d\nW3: lpb_aura_pass \t\t%d\n",
1209 rq_ctx->lpb_aura_pass, rq_ctx->lpb_aura_drop);
1210 seq_printf(m, "W4: lpb_pool_drop \t\t%d\nW3: lpb_pool_pass \t\t%d\n",
1211 rq_ctx->lpb_pool_drop, rq_ctx->lpb_pool_pass);
1212 seq_printf(m, "W4: rq_int \t\t\t%d\nW4: rq_int_ena\t\t\t%d\n",
1213 rq_ctx->rq_int, rq_ctx->rq_int_ena);
1214 seq_printf(m, "W4: qint_idx \t\t\t%d\n\n", rq_ctx->qint_idx);
1216 seq_printf(m, "W5: ltag \t\t\t%d\nW5: good_utag \t\t\t%d\n",
1217 rq_ctx->ltag, rq_ctx->good_utag);
1218 seq_printf(m, "W5: bad_utag \t\t\t%d\nW5: flow_tagw \t\t\t%d\n",
1219 rq_ctx->bad_utag, rq_ctx->flow_tagw);
1220 seq_printf(m, "W5: ipsec_vwqe \t\t\t%d\nW5: vwqe_ena \t\t\t%d\n",
1221 rq_ctx->ipsec_vwqe, rq_ctx->vwqe_ena);
1222 seq_printf(m, "W5: vwqe_wait \t\t\t%d\nW5: max_vsize_exp\t\t%d\n",
1223 rq_ctx->vwqe_wait, rq_ctx->max_vsize_exp);
1224 seq_printf(m, "W5: vwqe_skip \t\t\t%d\n\n", rq_ctx->vwqe_skip);
1226 seq_printf(m, "W6: octs \t\t\t%llu\n\n", (u64)rq_ctx->octs);
1227 seq_printf(m, "W7: pkts \t\t\t%llu\n\n", (u64)rq_ctx->pkts);
1228 seq_printf(m, "W8: drop_octs \t\t\t%llu\n\n", (u64)rq_ctx->drop_octs);
1229 seq_printf(m, "W9: drop_pkts \t\t\t%llu\n\n", (u64)rq_ctx->drop_pkts);
1230 seq_printf(m, "W10: re_pkts \t\t\t%llu\n", (u64)rq_ctx->re_pkts);
1233 /* Dumps given nix_rq's context */
1234 static void print_nix_rq_ctx(struct seq_file *m, struct nix_aq_enq_rsp *rsp)
1236 struct nix_rq_ctx_s *rq_ctx = &rsp->rq;
1237 struct nix_hw *nix_hw = m->private;
1238 struct rvu *rvu = nix_hw->rvu;
1240 if (!is_rvu_otx2(rvu)) {
1241 print_nix_cn10k_rq_ctx(m, (struct nix_cn10k_rq_ctx_s *)rq_ctx);
1245 seq_printf(m, "W0: wqe_aura \t\t\t%d\nW0: substream \t\t\t0x%03x\n",
1246 rq_ctx->wqe_aura, rq_ctx->substream);
1247 seq_printf(m, "W0: cq \t\t\t\t%d\nW0: ena_wqwd \t\t\t%d\n",
1248 rq_ctx->cq, rq_ctx->ena_wqwd);
1249 seq_printf(m, "W0: ipsech_ena \t\t\t%d\nW0: sso_ena \t\t\t%d\n",
1250 rq_ctx->ipsech_ena, rq_ctx->sso_ena);
1251 seq_printf(m, "W0: ena \t\t\t%d\n\n", rq_ctx->ena);
1253 seq_printf(m, "W1: lpb_drop_ena \t\t%d\nW1: spb_drop_ena \t\t%d\n",
1254 rq_ctx->lpb_drop_ena, rq_ctx->spb_drop_ena);
1255 seq_printf(m, "W1: xqe_drop_ena \t\t%d\nW1: wqe_caching \t\t%d\n",
1256 rq_ctx->xqe_drop_ena, rq_ctx->wqe_caching);
1257 seq_printf(m, "W1: pb_caching \t\t\t%d\nW1: sso_tt \t\t\t%d\n",
1258 rq_ctx->pb_caching, rq_ctx->sso_tt);
1259 seq_printf(m, "W1: sso_grp \t\t\t%d\nW1: lpb_aura \t\t\t%d\n",
1260 rq_ctx->sso_grp, rq_ctx->lpb_aura);
1261 seq_printf(m, "W1: spb_aura \t\t\t%d\n\n", rq_ctx->spb_aura);
1263 seq_printf(m, "W2: xqe_hdr_split \t\t%d\nW2: xqe_imm_copy \t\t%d\n",
1264 rq_ctx->xqe_hdr_split, rq_ctx->xqe_imm_copy);
1265 seq_printf(m, "W2: xqe_imm_size \t\t%d\nW2: later_skip \t\t\t%d\n",
1266 rq_ctx->xqe_imm_size, rq_ctx->later_skip);
1267 seq_printf(m, "W2: first_skip \t\t\t%d\nW2: lpb_sizem1 \t\t\t%d\n",
1268 rq_ctx->first_skip, rq_ctx->lpb_sizem1);
1269 seq_printf(m, "W2: spb_ena \t\t\t%d\nW2: wqe_skip \t\t\t%d\n",
1270 rq_ctx->spb_ena, rq_ctx->wqe_skip);
1271 seq_printf(m, "W2: spb_sizem1 \t\t\t%d\n\n", rq_ctx->spb_sizem1);
1273 seq_printf(m, "W3: spb_pool_pass \t\t%d\nW3: spb_pool_drop \t\t%d\n",
1274 rq_ctx->spb_pool_pass, rq_ctx->spb_pool_drop);
1275 seq_printf(m, "W3: spb_aura_pass \t\t%d\nW3: spb_aura_drop \t\t%d\n",
1276 rq_ctx->spb_aura_pass, rq_ctx->spb_aura_drop);
1277 seq_printf(m, "W3: wqe_pool_pass \t\t%d\nW3: wqe_pool_drop \t\t%d\n",
1278 rq_ctx->wqe_pool_pass, rq_ctx->wqe_pool_drop);
1279 seq_printf(m, "W3: xqe_pass \t\t\t%d\nW3: xqe_drop \t\t\t%d\n\n",
1280 rq_ctx->xqe_pass, rq_ctx->xqe_drop);
1282 seq_printf(m, "W4: qint_idx \t\t\t%d\nW4: rq_int_ena \t\t\t%d\n",
1283 rq_ctx->qint_idx, rq_ctx->rq_int_ena);
1284 seq_printf(m, "W4: rq_int \t\t\t%d\nW4: lpb_pool_pass \t\t%d\n",
1285 rq_ctx->rq_int, rq_ctx->lpb_pool_pass);
1286 seq_printf(m, "W4: lpb_pool_drop \t\t%d\nW4: lpb_aura_pass \t\t%d\n",
1287 rq_ctx->lpb_pool_drop, rq_ctx->lpb_aura_pass);
1288 seq_printf(m, "W4: lpb_aura_drop \t\t%d\n\n", rq_ctx->lpb_aura_drop);
1290 seq_printf(m, "W5: flow_tagw \t\t\t%d\nW5: bad_utag \t\t\t%d\n",
1291 rq_ctx->flow_tagw, rq_ctx->bad_utag);
1292 seq_printf(m, "W5: good_utag \t\t\t%d\nW5: ltag \t\t\t%d\n\n",
1293 rq_ctx->good_utag, rq_ctx->ltag);
1295 seq_printf(m, "W6: octs \t\t\t%llu\n\n", (u64)rq_ctx->octs);
1296 seq_printf(m, "W7: pkts \t\t\t%llu\n\n", (u64)rq_ctx->pkts);
1297 seq_printf(m, "W8: drop_octs \t\t\t%llu\n\n", (u64)rq_ctx->drop_octs);
1298 seq_printf(m, "W9: drop_pkts \t\t\t%llu\n\n", (u64)rq_ctx->drop_pkts);
1299 seq_printf(m, "W10: re_pkts \t\t\t%llu\n", (u64)rq_ctx->re_pkts);
1302 /* Dumps given nix_cq's context */
1303 static void print_nix_cq_ctx(struct seq_file *m, struct nix_aq_enq_rsp *rsp)
1305 struct nix_cq_ctx_s *cq_ctx = &rsp->cq;
1307 seq_printf(m, "W0: base \t\t\t%llx\n\n", cq_ctx->base);
1309 seq_printf(m, "W1: wrptr \t\t\t%llx\n", (u64)cq_ctx->wrptr);
1310 seq_printf(m, "W1: avg_con \t\t\t%d\nW1: cint_idx \t\t\t%d\n",
1311 cq_ctx->avg_con, cq_ctx->cint_idx);
1312 seq_printf(m, "W1: cq_err \t\t\t%d\nW1: qint_idx \t\t\t%d\n",
1313 cq_ctx->cq_err, cq_ctx->qint_idx);
1314 seq_printf(m, "W1: bpid \t\t\t%d\nW1: bp_ena \t\t\t%d\n\n",
1315 cq_ctx->bpid, cq_ctx->bp_ena);
1317 seq_printf(m, "W2: update_time \t\t%d\nW2:avg_level \t\t\t%d\n",
1318 cq_ctx->update_time, cq_ctx->avg_level);
1319 seq_printf(m, "W2: head \t\t\t%d\nW2:tail \t\t\t%d\n\n",
1320 cq_ctx->head, cq_ctx->tail);
1322 seq_printf(m, "W3: cq_err_int_ena \t\t%d\nW3:cq_err_int \t\t\t%d\n",
1323 cq_ctx->cq_err_int_ena, cq_ctx->cq_err_int);
1324 seq_printf(m, "W3: qsize \t\t\t%d\nW3:caching \t\t\t%d\n",
1325 cq_ctx->qsize, cq_ctx->caching);
1326 seq_printf(m, "W3: substream \t\t\t0x%03x\nW3: ena \t\t\t%d\n",
1327 cq_ctx->substream, cq_ctx->ena);
1328 seq_printf(m, "W3: drop_ena \t\t\t%d\nW3: drop \t\t\t%d\n",
1329 cq_ctx->drop_ena, cq_ctx->drop);
1330 seq_printf(m, "W3: bp \t\t\t\t%d\n\n", cq_ctx->bp);
1333 static int rvu_dbg_nix_queue_ctx_display(struct seq_file *filp,
1334 void *unused, int ctype)
1336 void (*print_nix_ctx)(struct seq_file *filp,
1337 struct nix_aq_enq_rsp *rsp) = NULL;
1338 struct nix_hw *nix_hw = filp->private;
1339 struct rvu *rvu = nix_hw->rvu;
1340 struct nix_aq_enq_req aq_req;
1341 struct nix_aq_enq_rsp rsp;
1342 char *ctype_string = NULL;
1343 int qidx, rc, max_id = 0;
1344 struct rvu_pfvf *pfvf;
1349 case NIX_AQ_CTYPE_CQ:
1350 nixlf = rvu->rvu_dbg.nix_cq_ctx.lf;
1351 id = rvu->rvu_dbg.nix_cq_ctx.id;
1352 all = rvu->rvu_dbg.nix_cq_ctx.all;
1355 case NIX_AQ_CTYPE_SQ:
1356 nixlf = rvu->rvu_dbg.nix_sq_ctx.lf;
1357 id = rvu->rvu_dbg.nix_sq_ctx.id;
1358 all = rvu->rvu_dbg.nix_sq_ctx.all;
1361 case NIX_AQ_CTYPE_RQ:
1362 nixlf = rvu->rvu_dbg.nix_rq_ctx.lf;
1363 id = rvu->rvu_dbg.nix_rq_ctx.id;
1364 all = rvu->rvu_dbg.nix_rq_ctx.all;
1371 if (!rvu_dbg_is_valid_lf(rvu, nix_hw->blkaddr, nixlf, &pcifunc))
1374 pfvf = rvu_get_pfvf(rvu, pcifunc);
1375 if (ctype == NIX_AQ_CTYPE_SQ && !pfvf->sq_ctx) {
1376 seq_puts(filp, "SQ context is not initialized\n");
1378 } else if (ctype == NIX_AQ_CTYPE_RQ && !pfvf->rq_ctx) {
1379 seq_puts(filp, "RQ context is not initialized\n");
1381 } else if (ctype == NIX_AQ_CTYPE_CQ && !pfvf->cq_ctx) {
1382 seq_puts(filp, "CQ context is not initialized\n");
1386 if (ctype == NIX_AQ_CTYPE_SQ) {
1387 max_id = pfvf->sq_ctx->qsize;
1388 ctype_string = "sq";
1389 print_nix_ctx = print_nix_sq_ctx;
1390 } else if (ctype == NIX_AQ_CTYPE_RQ) {
1391 max_id = pfvf->rq_ctx->qsize;
1392 ctype_string = "rq";
1393 print_nix_ctx = print_nix_rq_ctx;
1394 } else if (ctype == NIX_AQ_CTYPE_CQ) {
1395 max_id = pfvf->cq_ctx->qsize;
1396 ctype_string = "cq";
1397 print_nix_ctx = print_nix_cq_ctx;
1400 memset(&aq_req, 0, sizeof(struct nix_aq_enq_req));
1401 aq_req.hdr.pcifunc = pcifunc;
1402 aq_req.ctype = ctype;
1403 aq_req.op = NIX_AQ_INSTOP_READ;
1408 for (qidx = id; qidx < max_id; qidx++) {
1410 seq_printf(filp, "=====%s_ctx for nixlf:%d and qidx:%d is=====\n",
1411 ctype_string, nixlf, aq_req.qidx);
1412 rc = rvu_mbox_handler_nix_aq_enq(rvu, &aq_req, &rsp);
1414 seq_puts(filp, "Failed to read the context\n");
1417 print_nix_ctx(filp, &rsp);
1422 static int write_nix_queue_ctx(struct rvu *rvu, bool all, int nixlf,
1423 int id, int ctype, char *ctype_string,
1426 struct nix_hw *nix_hw = m->private;
1427 struct rvu_pfvf *pfvf;
1431 if (!rvu_dbg_is_valid_lf(rvu, nix_hw->blkaddr, nixlf, &pcifunc))
1434 pfvf = rvu_get_pfvf(rvu, pcifunc);
1436 if (ctype == NIX_AQ_CTYPE_SQ) {
1437 if (!pfvf->sq_ctx) {
1438 dev_warn(rvu->dev, "SQ context is not initialized\n");
1441 max_id = pfvf->sq_ctx->qsize;
1442 } else if (ctype == NIX_AQ_CTYPE_RQ) {
1443 if (!pfvf->rq_ctx) {
1444 dev_warn(rvu->dev, "RQ context is not initialized\n");
1447 max_id = pfvf->rq_ctx->qsize;
1448 } else if (ctype == NIX_AQ_CTYPE_CQ) {
1449 if (!pfvf->cq_ctx) {
1450 dev_warn(rvu->dev, "CQ context is not initialized\n");
1453 max_id = pfvf->cq_ctx->qsize;
1456 if (id < 0 || id >= max_id) {
1457 dev_warn(rvu->dev, "Invalid %s_ctx valid range 0-%d\n",
1458 ctype_string, max_id - 1);
1462 case NIX_AQ_CTYPE_CQ:
1463 rvu->rvu_dbg.nix_cq_ctx.lf = nixlf;
1464 rvu->rvu_dbg.nix_cq_ctx.id = id;
1465 rvu->rvu_dbg.nix_cq_ctx.all = all;
1468 case NIX_AQ_CTYPE_SQ:
1469 rvu->rvu_dbg.nix_sq_ctx.lf = nixlf;
1470 rvu->rvu_dbg.nix_sq_ctx.id = id;
1471 rvu->rvu_dbg.nix_sq_ctx.all = all;
1474 case NIX_AQ_CTYPE_RQ:
1475 rvu->rvu_dbg.nix_rq_ctx.lf = nixlf;
1476 rvu->rvu_dbg.nix_rq_ctx.id = id;
1477 rvu->rvu_dbg.nix_rq_ctx.all = all;
1485 static ssize_t rvu_dbg_nix_queue_ctx_write(struct file *filp,
1486 const char __user *buffer,
1487 size_t count, loff_t *ppos,
1490 struct seq_file *m = filp->private_data;
1491 struct nix_hw *nix_hw = m->private;
1492 struct rvu *rvu = nix_hw->rvu;
1493 char *cmd_buf, *ctype_string;
1494 int nixlf, id = 0, ret;
1497 if ((*ppos != 0) || !count)
1501 case NIX_AQ_CTYPE_SQ:
1502 ctype_string = "sq";
1504 case NIX_AQ_CTYPE_RQ:
1505 ctype_string = "rq";
1507 case NIX_AQ_CTYPE_CQ:
1508 ctype_string = "cq";
1514 cmd_buf = kzalloc(count + 1, GFP_KERNEL);
1519 ret = parse_cmd_buffer_ctx(cmd_buf, &count, buffer,
1523 "Usage: echo <nixlf> [%s number/all] > %s_ctx\n",
1524 ctype_string, ctype_string);
1527 ret = write_nix_queue_ctx(rvu, all, nixlf, id, ctype,
1532 return ret ? ret : count;
1535 static ssize_t rvu_dbg_nix_sq_ctx_write(struct file *filp,
1536 const char __user *buffer,
1537 size_t count, loff_t *ppos)
1539 return rvu_dbg_nix_queue_ctx_write(filp, buffer, count, ppos,
1543 static int rvu_dbg_nix_sq_ctx_display(struct seq_file *filp, void *unused)
1545 return rvu_dbg_nix_queue_ctx_display(filp, unused, NIX_AQ_CTYPE_SQ);
1548 RVU_DEBUG_SEQ_FOPS(nix_sq_ctx, nix_sq_ctx_display, nix_sq_ctx_write);
1550 static ssize_t rvu_dbg_nix_rq_ctx_write(struct file *filp,
1551 const char __user *buffer,
1552 size_t count, loff_t *ppos)
1554 return rvu_dbg_nix_queue_ctx_write(filp, buffer, count, ppos,
1558 static int rvu_dbg_nix_rq_ctx_display(struct seq_file *filp, void *unused)
1560 return rvu_dbg_nix_queue_ctx_display(filp, unused, NIX_AQ_CTYPE_RQ);
1563 RVU_DEBUG_SEQ_FOPS(nix_rq_ctx, nix_rq_ctx_display, nix_rq_ctx_write);
1565 static ssize_t rvu_dbg_nix_cq_ctx_write(struct file *filp,
1566 const char __user *buffer,
1567 size_t count, loff_t *ppos)
1569 return rvu_dbg_nix_queue_ctx_write(filp, buffer, count, ppos,
1573 static int rvu_dbg_nix_cq_ctx_display(struct seq_file *filp, void *unused)
1575 return rvu_dbg_nix_queue_ctx_display(filp, unused, NIX_AQ_CTYPE_CQ);
1578 RVU_DEBUG_SEQ_FOPS(nix_cq_ctx, nix_cq_ctx_display, nix_cq_ctx_write);
1580 static void print_nix_qctx_qsize(struct seq_file *filp, int qsize,
1581 unsigned long *bmap, char *qtype)
1585 buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
1589 bitmap_print_to_pagebuf(false, buf, bmap, qsize);
1590 seq_printf(filp, "%s context count : %d\n", qtype, qsize);
1591 seq_printf(filp, "%s context ena/dis bitmap : %s\n",
1596 static void print_nix_qsize(struct seq_file *filp, struct rvu_pfvf *pfvf)
1599 seq_puts(filp, "cq context is not initialized\n");
1601 print_nix_qctx_qsize(filp, pfvf->cq_ctx->qsize, pfvf->cq_bmap,
1605 seq_puts(filp, "rq context is not initialized\n");
1607 print_nix_qctx_qsize(filp, pfvf->rq_ctx->qsize, pfvf->rq_bmap,
1611 seq_puts(filp, "sq context is not initialized\n");
1613 print_nix_qctx_qsize(filp, pfvf->sq_ctx->qsize, pfvf->sq_bmap,
1617 static ssize_t rvu_dbg_nix_qsize_write(struct file *filp,
1618 const char __user *buffer,
1619 size_t count, loff_t *ppos)
1621 return rvu_dbg_qsize_write(filp, buffer, count, ppos,
1625 static int rvu_dbg_nix_qsize_display(struct seq_file *filp, void *unused)
1627 return rvu_dbg_qsize_display(filp, unused, BLKTYPE_NIX);
1630 RVU_DEBUG_SEQ_FOPS(nix_qsize, nix_qsize_display, nix_qsize_write);
1632 static void print_band_prof_ctx(struct seq_file *m,
1633 struct nix_bandprof_s *prof)
1637 switch (prof->pc_mode) {
1638 case NIX_RX_PC_MODE_VLAN:
1641 case NIX_RX_PC_MODE_DSCP:
1644 case NIX_RX_PC_MODE_GEN:
1647 case NIX_RX_PC_MODE_RSVD:
1651 seq_printf(m, "W0: pc_mode\t\t%s\n", str);
1652 str = (prof->icolor == 3) ? "Color blind" :
1653 (prof->icolor == 0) ? "Green" :
1654 (prof->icolor == 1) ? "Yellow" : "Red";
1655 seq_printf(m, "W0: icolor\t\t%s\n", str);
1656 seq_printf(m, "W0: tnl_ena\t\t%d\n", prof->tnl_ena);
1657 seq_printf(m, "W0: peir_exponent\t%d\n", prof->peir_exponent);
1658 seq_printf(m, "W0: pebs_exponent\t%d\n", prof->pebs_exponent);
1659 seq_printf(m, "W0: cir_exponent\t%d\n", prof->cir_exponent);
1660 seq_printf(m, "W0: cbs_exponent\t%d\n", prof->cbs_exponent);
1661 seq_printf(m, "W0: peir_mantissa\t%d\n", prof->peir_mantissa);
1662 seq_printf(m, "W0: pebs_mantissa\t%d\n", prof->pebs_mantissa);
1663 seq_printf(m, "W0: cir_mantissa\t%d\n", prof->cir_mantissa);
1665 seq_printf(m, "W1: cbs_mantissa\t%d\n", prof->cbs_mantissa);
1666 str = (prof->lmode == 0) ? "byte" : "packet";
1667 seq_printf(m, "W1: lmode\t\t%s\n", str);
1668 seq_printf(m, "W1: l_select\t\t%d\n", prof->l_sellect);
1669 seq_printf(m, "W1: rdiv\t\t%d\n", prof->rdiv);
1670 seq_printf(m, "W1: adjust_exponent\t%d\n", prof->adjust_exponent);
1671 seq_printf(m, "W1: adjust_mantissa\t%d\n", prof->adjust_mantissa);
1672 str = (prof->gc_action == 0) ? "PASS" :
1673 (prof->gc_action == 1) ? "DROP" : "RED";
1674 seq_printf(m, "W1: gc_action\t\t%s\n", str);
1675 str = (prof->yc_action == 0) ? "PASS" :
1676 (prof->yc_action == 1) ? "DROP" : "RED";
1677 seq_printf(m, "W1: yc_action\t\t%s\n", str);
1678 str = (prof->rc_action == 0) ? "PASS" :
1679 (prof->rc_action == 1) ? "DROP" : "RED";
1680 seq_printf(m, "W1: rc_action\t\t%s\n", str);
1681 seq_printf(m, "W1: meter_algo\t\t%d\n", prof->meter_algo);
1682 seq_printf(m, "W1: band_prof_id\t%d\n", prof->band_prof_id);
1683 seq_printf(m, "W1: hl_en\t\t%d\n", prof->hl_en);
1685 seq_printf(m, "W2: ts\t\t\t%lld\n", (u64)prof->ts);
1686 seq_printf(m, "W3: pe_accum\t\t%d\n", prof->pe_accum);
1687 seq_printf(m, "W3: c_accum\t\t%d\n", prof->c_accum);
1688 seq_printf(m, "W4: green_pkt_pass\t%lld\n",
1689 (u64)prof->green_pkt_pass);
1690 seq_printf(m, "W5: yellow_pkt_pass\t%lld\n",
1691 (u64)prof->yellow_pkt_pass);
1692 seq_printf(m, "W6: red_pkt_pass\t%lld\n", (u64)prof->red_pkt_pass);
1693 seq_printf(m, "W7: green_octs_pass\t%lld\n",
1694 (u64)prof->green_octs_pass);
1695 seq_printf(m, "W8: yellow_octs_pass\t%lld\n",
1696 (u64)prof->yellow_octs_pass);
1697 seq_printf(m, "W9: red_octs_pass\t%lld\n", (u64)prof->red_octs_pass);
1698 seq_printf(m, "W10: green_pkt_drop\t%lld\n",
1699 (u64)prof->green_pkt_drop);
1700 seq_printf(m, "W11: yellow_pkt_drop\t%lld\n",
1701 (u64)prof->yellow_pkt_drop);
1702 seq_printf(m, "W12: red_pkt_drop\t%lld\n", (u64)prof->red_pkt_drop);
1703 seq_printf(m, "W13: green_octs_drop\t%lld\n",
1704 (u64)prof->green_octs_drop);
1705 seq_printf(m, "W14: yellow_octs_drop\t%lld\n",
1706 (u64)prof->yellow_octs_drop);
1707 seq_printf(m, "W15: red_octs_drop\t%lld\n", (u64)prof->red_octs_drop);
1708 seq_puts(m, "==============================\n");
1711 static int rvu_dbg_nix_band_prof_ctx_display(struct seq_file *m, void *unused)
1713 struct nix_hw *nix_hw = m->private;
1714 struct nix_cn10k_aq_enq_req aq_req;
1715 struct nix_cn10k_aq_enq_rsp aq_rsp;
1716 struct rvu *rvu = nix_hw->rvu;
1717 struct nix_ipolicer *ipolicer;
1718 int layer, prof_idx, idx, rc;
1722 /* Ingress policers do not exist on all platforms */
1723 if (!nix_hw->ipolicer)
1726 for (layer = 0; layer < BAND_PROF_NUM_LAYERS; layer++) {
1727 if (layer == BAND_PROF_INVAL_LAYER)
1729 str = (layer == BAND_PROF_LEAF_LAYER) ? "Leaf" :
1730 (layer == BAND_PROF_MID_LAYER) ? "Mid" : "Top";
1732 seq_printf(m, "\n%s bandwidth profiles\n", str);
1733 seq_puts(m, "=======================\n");
1735 ipolicer = &nix_hw->ipolicer[layer];
1737 for (idx = 0; idx < ipolicer->band_prof.max; idx++) {
1738 if (is_rsrc_free(&ipolicer->band_prof, idx))
1741 prof_idx = (idx & 0x3FFF) | (layer << 14);
1742 rc = nix_aq_context_read(rvu, nix_hw, &aq_req, &aq_rsp,
1743 0x00, NIX_AQ_CTYPE_BANDPROF,
1747 "%s: Failed to fetch context of %s profile %d, err %d\n",
1748 __func__, str, idx, rc);
1751 seq_printf(m, "\n%s bandwidth profile:: %d\n", str, idx);
1752 pcifunc = ipolicer->pfvf_map[idx];
1753 if (!(pcifunc & RVU_PFVF_FUNC_MASK))
1754 seq_printf(m, "Allocated to :: PF %d\n",
1755 rvu_get_pf(pcifunc));
1757 seq_printf(m, "Allocated to :: PF %d VF %d\n",
1758 rvu_get_pf(pcifunc),
1759 (pcifunc & RVU_PFVF_FUNC_MASK) - 1);
1760 print_band_prof_ctx(m, &aq_rsp.prof);
1766 RVU_DEBUG_SEQ_FOPS(nix_band_prof_ctx, nix_band_prof_ctx_display, NULL);
1768 static int rvu_dbg_nix_band_prof_rsrc_display(struct seq_file *m, void *unused)
1770 struct nix_hw *nix_hw = m->private;
1771 struct nix_ipolicer *ipolicer;
1775 /* Ingress policers do not exist on all platforms */
1776 if (!nix_hw->ipolicer)
1779 seq_puts(m, "\nBandwidth profile resource free count\n");
1780 seq_puts(m, "=====================================\n");
1781 for (layer = 0; layer < BAND_PROF_NUM_LAYERS; layer++) {
1782 if (layer == BAND_PROF_INVAL_LAYER)
1784 str = (layer == BAND_PROF_LEAF_LAYER) ? "Leaf" :
1785 (layer == BAND_PROF_MID_LAYER) ? "Mid " : "Top ";
1787 ipolicer = &nix_hw->ipolicer[layer];
1788 seq_printf(m, "%s :: Max: %4d Free: %4d\n", str,
1789 ipolicer->band_prof.max,
1790 rvu_rsrc_free_count(&ipolicer->band_prof));
1792 seq_puts(m, "=====================================\n");
1797 RVU_DEBUG_SEQ_FOPS(nix_band_prof_rsrc, nix_band_prof_rsrc_display, NULL);
1799 static void rvu_dbg_nix_init(struct rvu *rvu, int blkaddr)
1801 struct nix_hw *nix_hw;
1803 if (!is_block_implemented(rvu->hw, blkaddr))
1806 if (blkaddr == BLKADDR_NIX0) {
1807 rvu->rvu_dbg.nix = debugfs_create_dir("nix", rvu->rvu_dbg.root);
1808 nix_hw = &rvu->hw->nix[0];
1810 rvu->rvu_dbg.nix = debugfs_create_dir("nix1",
1812 nix_hw = &rvu->hw->nix[1];
1815 debugfs_create_file("sq_ctx", 0600, rvu->rvu_dbg.nix, nix_hw,
1816 &rvu_dbg_nix_sq_ctx_fops);
1817 debugfs_create_file("rq_ctx", 0600, rvu->rvu_dbg.nix, nix_hw,
1818 &rvu_dbg_nix_rq_ctx_fops);
1819 debugfs_create_file("cq_ctx", 0600, rvu->rvu_dbg.nix, nix_hw,
1820 &rvu_dbg_nix_cq_ctx_fops);
1821 debugfs_create_file("ndc_tx_cache", 0600, rvu->rvu_dbg.nix, nix_hw,
1822 &rvu_dbg_nix_ndc_tx_cache_fops);
1823 debugfs_create_file("ndc_rx_cache", 0600, rvu->rvu_dbg.nix, nix_hw,
1824 &rvu_dbg_nix_ndc_rx_cache_fops);
1825 debugfs_create_file("ndc_tx_hits_miss", 0600, rvu->rvu_dbg.nix, nix_hw,
1826 &rvu_dbg_nix_ndc_tx_hits_miss_fops);
1827 debugfs_create_file("ndc_rx_hits_miss", 0600, rvu->rvu_dbg.nix, nix_hw,
1828 &rvu_dbg_nix_ndc_rx_hits_miss_fops);
1829 debugfs_create_file("qsize", 0600, rvu->rvu_dbg.nix, rvu,
1830 &rvu_dbg_nix_qsize_fops);
1831 debugfs_create_file("ingress_policer_ctx", 0600, rvu->rvu_dbg.nix, nix_hw,
1832 &rvu_dbg_nix_band_prof_ctx_fops);
1833 debugfs_create_file("ingress_policer_rsrc", 0600, rvu->rvu_dbg.nix, nix_hw,
1834 &rvu_dbg_nix_band_prof_rsrc_fops);
1837 static void rvu_dbg_npa_init(struct rvu *rvu)
1839 rvu->rvu_dbg.npa = debugfs_create_dir("npa", rvu->rvu_dbg.root);
1841 debugfs_create_file("qsize", 0600, rvu->rvu_dbg.npa, rvu,
1842 &rvu_dbg_npa_qsize_fops);
1843 debugfs_create_file("aura_ctx", 0600, rvu->rvu_dbg.npa, rvu,
1844 &rvu_dbg_npa_aura_ctx_fops);
1845 debugfs_create_file("pool_ctx", 0600, rvu->rvu_dbg.npa, rvu,
1846 &rvu_dbg_npa_pool_ctx_fops);
1847 debugfs_create_file("ndc_cache", 0600, rvu->rvu_dbg.npa, rvu,
1848 &rvu_dbg_npa_ndc_cache_fops);
1849 debugfs_create_file("ndc_hits_miss", 0600, rvu->rvu_dbg.npa, rvu,
1850 &rvu_dbg_npa_ndc_hits_miss_fops);
1853 #define PRINT_CGX_CUML_NIXRX_STATUS(idx, name) \
1856 err = rvu_cgx_nix_cuml_stats(rvu, cgxd, lmac_id, (idx), \
1857 NIX_STATS_RX, &(cnt)); \
1859 seq_printf(s, "%s: %llu\n", name, cnt); \
1863 #define PRINT_CGX_CUML_NIXTX_STATUS(idx, name) \
1866 err = rvu_cgx_nix_cuml_stats(rvu, cgxd, lmac_id, (idx), \
1867 NIX_STATS_TX, &(cnt)); \
1869 seq_printf(s, "%s: %llu\n", name, cnt); \
1873 static int cgx_print_stats(struct seq_file *s, int lmac_id)
1875 struct cgx_link_user_info linfo;
1876 struct mac_ops *mac_ops;
1877 void *cgxd = s->private;
1878 u64 ucast, mcast, bcast;
1879 int stat = 0, err = 0;
1880 u64 tx_stat, rx_stat;
1883 rvu = pci_get_drvdata(pci_get_device(PCI_VENDOR_ID_CAVIUM,
1884 PCI_DEVID_OCTEONTX2_RVU_AF, NULL));
1888 mac_ops = get_mac_ops(cgxd);
1894 seq_puts(s, "\n=======Link Status======\n\n");
1895 err = cgx_get_link_info(cgxd, lmac_id, &linfo);
1897 seq_puts(s, "Failed to read link status\n");
1898 seq_printf(s, "\nLink is %s %d Mbps\n\n",
1899 linfo.link_up ? "UP" : "DOWN", linfo.speed);
1902 seq_printf(s, "\n=======NIX RX_STATS(%s port level)======\n\n",
1904 ucast = PRINT_CGX_CUML_NIXRX_STATUS(RX_UCAST, "rx_ucast_frames");
1907 mcast = PRINT_CGX_CUML_NIXRX_STATUS(RX_MCAST, "rx_mcast_frames");
1910 bcast = PRINT_CGX_CUML_NIXRX_STATUS(RX_BCAST, "rx_bcast_frames");
1913 seq_printf(s, "rx_frames: %llu\n", ucast + mcast + bcast);
1914 PRINT_CGX_CUML_NIXRX_STATUS(RX_OCTS, "rx_bytes");
1917 PRINT_CGX_CUML_NIXRX_STATUS(RX_DROP, "rx_drops");
1920 PRINT_CGX_CUML_NIXRX_STATUS(RX_ERR, "rx_errors");
1925 seq_printf(s, "\n=======NIX TX_STATS(%s port level)======\n\n",
1927 ucast = PRINT_CGX_CUML_NIXTX_STATUS(TX_UCAST, "tx_ucast_frames");
1930 mcast = PRINT_CGX_CUML_NIXTX_STATUS(TX_MCAST, "tx_mcast_frames");
1933 bcast = PRINT_CGX_CUML_NIXTX_STATUS(TX_BCAST, "tx_bcast_frames");
1936 seq_printf(s, "tx_frames: %llu\n", ucast + mcast + bcast);
1937 PRINT_CGX_CUML_NIXTX_STATUS(TX_OCTS, "tx_bytes");
1940 PRINT_CGX_CUML_NIXTX_STATUS(TX_DROP, "tx_drops");
1945 seq_printf(s, "\n=======%s RX_STATS======\n\n", mac_ops->name);
1946 while (stat < mac_ops->rx_stats_cnt) {
1947 err = mac_ops->mac_get_rx_stats(cgxd, lmac_id, stat, &rx_stat);
1950 if (is_rvu_otx2(rvu))
1951 seq_printf(s, "%s: %llu\n", cgx_rx_stats_fields[stat],
1954 seq_printf(s, "%s: %llu\n", rpm_rx_stats_fields[stat],
1961 seq_printf(s, "\n=======%s TX_STATS======\n\n", mac_ops->name);
1962 while (stat < mac_ops->tx_stats_cnt) {
1963 err = mac_ops->mac_get_tx_stats(cgxd, lmac_id, stat, &tx_stat);
1967 if (is_rvu_otx2(rvu))
1968 seq_printf(s, "%s: %llu\n", cgx_tx_stats_fields[stat],
1971 seq_printf(s, "%s: %llu\n", rpm_tx_stats_fields[stat],
1979 static int rvu_dbg_derive_lmacid(struct seq_file *filp, int *lmac_id)
1981 struct dentry *current_dir;
1984 current_dir = filp->file->f_path.dentry->d_parent;
1985 buf = strrchr(current_dir->d_name.name, 'c');
1989 return kstrtoint(buf + 1, 10, lmac_id);
1992 static int rvu_dbg_cgx_stat_display(struct seq_file *filp, void *unused)
1996 err = rvu_dbg_derive_lmacid(filp, &lmac_id);
1998 return cgx_print_stats(filp, lmac_id);
2003 RVU_DEBUG_SEQ_FOPS(cgx_stat, cgx_stat_display, NULL);
2005 static int cgx_print_dmac_flt(struct seq_file *s, int lmac_id)
2007 struct pci_dev *pdev = NULL;
2008 void *cgxd = s->private;
2009 char *bcast, *mcast;
2016 rvu = pci_get_drvdata(pci_get_device(PCI_VENDOR_ID_CAVIUM,
2017 PCI_DEVID_OCTEONTX2_RVU_AF, NULL));
2021 pf = cgxlmac_to_pf(rvu, cgx_get_cgxid(cgxd), lmac_id);
2024 pdev = pci_get_domain_bus_and_slot(domain, pf + 1, 0);
2028 cfg = cgx_read_dmac_ctrl(cgxd, lmac_id);
2029 bcast = cfg & CGX_DMAC_BCAST_MODE ? "ACCEPT" : "REJECT";
2030 mcast = cfg & CGX_DMAC_MCAST_MODE ? "ACCEPT" : "REJECT";
2033 "PCI dev RVUPF BROADCAST MULTICAST FILTER-MODE\n");
2034 seq_printf(s, "%s PF%d %9s %9s",
2035 dev_name(&pdev->dev), pf, bcast, mcast);
2036 if (cfg & CGX_DMAC_CAM_ACCEPT)
2037 seq_printf(s, "%12s\n\n", "UNICAST");
2039 seq_printf(s, "%16s\n\n", "PROMISCUOUS");
2041 seq_puts(s, "\nDMAC-INDEX ADDRESS\n");
2043 for (index = 0 ; index < 32 ; index++) {
2044 cfg = cgx_read_dmac_entry(cgxd, index);
2045 /* Display enabled dmac entries associated with current lmac */
2046 if (lmac_id == FIELD_GET(CGX_DMAC_CAM_ENTRY_LMACID, cfg) &&
2047 FIELD_GET(CGX_DMAC_CAM_ADDR_ENABLE, cfg)) {
2048 mac = FIELD_GET(CGX_RX_DMAC_ADR_MASK, cfg);
2049 u64_to_ether_addr(mac, dmac);
2050 seq_printf(s, "%7d %pM\n", index, dmac);
2057 static int rvu_dbg_cgx_dmac_flt_display(struct seq_file *filp, void *unused)
2061 err = rvu_dbg_derive_lmacid(filp, &lmac_id);
2063 return cgx_print_dmac_flt(filp, lmac_id);
2068 RVU_DEBUG_SEQ_FOPS(cgx_dmac_flt, cgx_dmac_flt_display, NULL);
2070 static void rvu_dbg_cgx_init(struct rvu *rvu)
2072 struct mac_ops *mac_ops;
2073 unsigned long lmac_bmap;
2078 if (!cgx_get_cgxcnt_max())
2081 mac_ops = get_mac_ops(rvu_first_cgx_pdata(rvu));
2085 rvu->rvu_dbg.cgx_root = debugfs_create_dir(mac_ops->name,
2088 for (i = 0; i < cgx_get_cgxcnt_max(); i++) {
2089 cgx = rvu_cgx_pdata(i, rvu);
2092 lmac_bmap = cgx_get_lmac_bmap(cgx);
2093 /* cgx debugfs dir */
2094 sprintf(dname, "%s%d", mac_ops->name, i);
2095 rvu->rvu_dbg.cgx = debugfs_create_dir(dname,
2096 rvu->rvu_dbg.cgx_root);
2098 for_each_set_bit(lmac_id, &lmac_bmap, MAX_LMAC_PER_CGX) {
2099 /* lmac debugfs dir */
2100 sprintf(dname, "lmac%d", lmac_id);
2102 debugfs_create_dir(dname, rvu->rvu_dbg.cgx);
2104 debugfs_create_file("stats", 0600, rvu->rvu_dbg.lmac,
2105 cgx, &rvu_dbg_cgx_stat_fops);
2106 debugfs_create_file("mac_filter", 0600,
2107 rvu->rvu_dbg.lmac, cgx,
2108 &rvu_dbg_cgx_dmac_flt_fops);
2113 /* NPC debugfs APIs */
2114 static void rvu_print_npc_mcam_info(struct seq_file *s,
2115 u16 pcifunc, int blkaddr)
2117 struct rvu *rvu = s->private;
2118 int entry_acnt, entry_ecnt;
2119 int cntr_acnt, cntr_ecnt;
2121 rvu_npc_get_mcam_entry_alloc_info(rvu, pcifunc, blkaddr,
2122 &entry_acnt, &entry_ecnt);
2123 rvu_npc_get_mcam_counter_alloc_info(rvu, pcifunc, blkaddr,
2124 &cntr_acnt, &cntr_ecnt);
2125 if (!entry_acnt && !cntr_acnt)
2128 if (!(pcifunc & RVU_PFVF_FUNC_MASK))
2129 seq_printf(s, "\n\t\t Device \t\t: PF%d\n",
2130 rvu_get_pf(pcifunc));
2132 seq_printf(s, "\n\t\t Device \t\t: PF%d VF%d\n",
2133 rvu_get_pf(pcifunc),
2134 (pcifunc & RVU_PFVF_FUNC_MASK) - 1);
2137 seq_printf(s, "\t\t Entries allocated \t: %d\n", entry_acnt);
2138 seq_printf(s, "\t\t Entries enabled \t: %d\n", entry_ecnt);
2141 seq_printf(s, "\t\t Counters allocated \t: %d\n", cntr_acnt);
2142 seq_printf(s, "\t\t Counters enabled \t: %d\n", cntr_ecnt);
2146 static int rvu_dbg_npc_mcam_info_display(struct seq_file *filp, void *unsued)
2148 struct rvu *rvu = filp->private;
2149 int pf, vf, numvfs, blkaddr;
2150 struct npc_mcam *mcam;
2151 u16 pcifunc, counters;
2154 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
2158 mcam = &rvu->hw->mcam;
2159 counters = rvu->hw->npc_counters;
2161 seq_puts(filp, "\nNPC MCAM info:\n");
2162 /* MCAM keywidth on receive and transmit sides */
2163 cfg = rvu_read64(rvu, blkaddr, NPC_AF_INTFX_KEX_CFG(NIX_INTF_RX));
2164 cfg = (cfg >> 32) & 0x07;
2165 seq_printf(filp, "\t\t RX keywidth \t: %s\n", (cfg == NPC_MCAM_KEY_X1) ?
2166 "112bits" : ((cfg == NPC_MCAM_KEY_X2) ?
2167 "224bits" : "448bits"));
2168 cfg = rvu_read64(rvu, blkaddr, NPC_AF_INTFX_KEX_CFG(NIX_INTF_TX));
2169 cfg = (cfg >> 32) & 0x07;
2170 seq_printf(filp, "\t\t TX keywidth \t: %s\n", (cfg == NPC_MCAM_KEY_X1) ?
2171 "112bits" : ((cfg == NPC_MCAM_KEY_X2) ?
2172 "224bits" : "448bits"));
2174 mutex_lock(&mcam->lock);
2176 seq_printf(filp, "\n\t\t MCAM entries \t: %d\n", mcam->total_entries);
2177 seq_printf(filp, "\t\t Reserved \t: %d\n",
2178 mcam->total_entries - mcam->bmap_entries);
2179 seq_printf(filp, "\t\t Available \t: %d\n", mcam->bmap_fcnt);
2182 seq_printf(filp, "\n\t\t MCAM counters \t: %d\n", counters);
2183 seq_printf(filp, "\t\t Reserved \t: %d\n",
2184 counters - mcam->counters.max);
2185 seq_printf(filp, "\t\t Available \t: %d\n",
2186 rvu_rsrc_free_count(&mcam->counters));
2188 if (mcam->bmap_entries == mcam->bmap_fcnt) {
2189 mutex_unlock(&mcam->lock);
2193 seq_puts(filp, "\n\t\t Current allocation\n");
2194 seq_puts(filp, "\t\t====================\n");
2195 for (pf = 0; pf < rvu->hw->total_pfs; pf++) {
2196 pcifunc = (pf << RVU_PFVF_PF_SHIFT);
2197 rvu_print_npc_mcam_info(filp, pcifunc, blkaddr);
2199 cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_CFG(pf));
2200 numvfs = (cfg >> 12) & 0xFF;
2201 for (vf = 0; vf < numvfs; vf++) {
2202 pcifunc = (pf << RVU_PFVF_PF_SHIFT) | (vf + 1);
2203 rvu_print_npc_mcam_info(filp, pcifunc, blkaddr);
2207 mutex_unlock(&mcam->lock);
2211 RVU_DEBUG_SEQ_FOPS(npc_mcam_info, npc_mcam_info_display, NULL);
2213 static int rvu_dbg_npc_rx_miss_stats_display(struct seq_file *filp,
2216 struct rvu *rvu = filp->private;
2217 struct npc_mcam *mcam;
2220 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
2224 mcam = &rvu->hw->mcam;
2226 seq_puts(filp, "\nNPC MCAM RX miss action stats\n");
2227 seq_printf(filp, "\t\tStat %d: \t%lld\n", mcam->rx_miss_act_cntr,
2228 rvu_read64(rvu, blkaddr,
2229 NPC_AF_MATCH_STATX(mcam->rx_miss_act_cntr)));
2234 RVU_DEBUG_SEQ_FOPS(npc_rx_miss_act, npc_rx_miss_stats_display, NULL);
2236 static void rvu_dbg_npc_mcam_show_flows(struct seq_file *s,
2237 struct rvu_npc_mcam_rule *rule)
2241 for_each_set_bit(bit, (unsigned long *)&rule->features, 64) {
2242 seq_printf(s, "\t%s ", npc_get_field_name(bit));
2245 seq_printf(s, "%pM ", rule->packet.dmac);
2246 seq_printf(s, "mask %pM\n", rule->mask.dmac);
2249 seq_printf(s, "%pM ", rule->packet.smac);
2250 seq_printf(s, "mask %pM\n", rule->mask.smac);
2253 seq_printf(s, "0x%x ", ntohs(rule->packet.etype));
2254 seq_printf(s, "mask 0x%x\n", ntohs(rule->mask.etype));
2257 seq_printf(s, "0x%x ", ntohs(rule->packet.vlan_tci));
2258 seq_printf(s, "mask 0x%x\n",
2259 ntohs(rule->mask.vlan_tci));
2262 seq_printf(s, "%d ", rule->packet.tos);
2263 seq_printf(s, "mask 0x%x\n", rule->mask.tos);
2266 seq_printf(s, "%pI4 ", &rule->packet.ip4src);
2267 seq_printf(s, "mask %pI4\n", &rule->mask.ip4src);
2270 seq_printf(s, "%pI4 ", &rule->packet.ip4dst);
2271 seq_printf(s, "mask %pI4\n", &rule->mask.ip4dst);
2274 seq_printf(s, "%pI6 ", rule->packet.ip6src);
2275 seq_printf(s, "mask %pI6\n", rule->mask.ip6src);
2278 seq_printf(s, "%pI6 ", rule->packet.ip6dst);
2279 seq_printf(s, "mask %pI6\n", rule->mask.ip6dst);
2283 case NPC_SPORT_SCTP:
2284 seq_printf(s, "%d ", ntohs(rule->packet.sport));
2285 seq_printf(s, "mask 0x%x\n", ntohs(rule->mask.sport));
2289 case NPC_DPORT_SCTP:
2290 seq_printf(s, "%d ", ntohs(rule->packet.dport));
2291 seq_printf(s, "mask 0x%x\n", ntohs(rule->mask.dport));
2300 static void rvu_dbg_npc_mcam_show_action(struct seq_file *s,
2301 struct rvu_npc_mcam_rule *rule)
2303 if (is_npc_intf_tx(rule->intf)) {
2304 switch (rule->tx_action.op) {
2305 case NIX_TX_ACTIONOP_DROP:
2306 seq_puts(s, "\taction: Drop\n");
2308 case NIX_TX_ACTIONOP_UCAST_DEFAULT:
2309 seq_puts(s, "\taction: Unicast to default channel\n");
2311 case NIX_TX_ACTIONOP_UCAST_CHAN:
2312 seq_printf(s, "\taction: Unicast to channel %d\n",
2313 rule->tx_action.index);
2315 case NIX_TX_ACTIONOP_MCAST:
2316 seq_puts(s, "\taction: Multicast\n");
2318 case NIX_TX_ACTIONOP_DROP_VIOL:
2319 seq_puts(s, "\taction: Lockdown Violation Drop\n");
2325 switch (rule->rx_action.op) {
2326 case NIX_RX_ACTIONOP_DROP:
2327 seq_puts(s, "\taction: Drop\n");
2329 case NIX_RX_ACTIONOP_UCAST:
2330 seq_printf(s, "\taction: Direct to queue %d\n",
2331 rule->rx_action.index);
2333 case NIX_RX_ACTIONOP_RSS:
2334 seq_puts(s, "\taction: RSS\n");
2336 case NIX_RX_ACTIONOP_UCAST_IPSEC:
2337 seq_puts(s, "\taction: Unicast ipsec\n");
2339 case NIX_RX_ACTIONOP_MCAST:
2340 seq_puts(s, "\taction: Multicast\n");
2348 static const char *rvu_dbg_get_intf_name(int intf)
2351 case NIX_INTFX_RX(0):
2353 case NIX_INTFX_RX(1):
2355 case NIX_INTFX_TX(0):
2357 case NIX_INTFX_TX(1):
2366 static int rvu_dbg_npc_mcam_show_rules(struct seq_file *s, void *unused)
2368 struct rvu_npc_mcam_rule *iter;
2369 struct rvu *rvu = s->private;
2370 struct npc_mcam *mcam;
2377 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
2381 mcam = &rvu->hw->mcam;
2383 mutex_lock(&mcam->lock);
2384 list_for_each_entry(iter, &mcam->mcam_rules, list) {
2385 pf = (iter->owner >> RVU_PFVF_PF_SHIFT) & RVU_PFVF_PF_MASK;
2386 seq_printf(s, "\n\tInstalled by: PF%d ", pf);
2388 if (iter->owner & RVU_PFVF_FUNC_MASK) {
2389 vf = (iter->owner & RVU_PFVF_FUNC_MASK) - 1;
2390 seq_printf(s, "VF%d", vf);
2394 seq_printf(s, "\tdirection: %s\n", is_npc_intf_rx(iter->intf) ?
2396 seq_printf(s, "\tinterface: %s\n",
2397 rvu_dbg_get_intf_name(iter->intf));
2398 seq_printf(s, "\tmcam entry: %d\n", iter->entry);
2400 rvu_dbg_npc_mcam_show_flows(s, iter);
2401 if (is_npc_intf_rx(iter->intf)) {
2402 target = iter->rx_action.pf_func;
2403 pf = (target >> RVU_PFVF_PF_SHIFT) & RVU_PFVF_PF_MASK;
2404 seq_printf(s, "\tForward to: PF%d ", pf);
2406 if (target & RVU_PFVF_FUNC_MASK) {
2407 vf = (target & RVU_PFVF_FUNC_MASK) - 1;
2408 seq_printf(s, "VF%d", vf);
2413 rvu_dbg_npc_mcam_show_action(s, iter);
2415 enabled = is_mcam_entry_enabled(rvu, mcam, blkaddr, iter->entry);
2416 seq_printf(s, "\tenabled: %s\n", enabled ? "yes" : "no");
2418 if (!iter->has_cntr)
2420 seq_printf(s, "\tcounter: %d\n", iter->cntr);
2422 hits = rvu_read64(rvu, blkaddr, NPC_AF_MATCH_STATX(iter->cntr));
2423 seq_printf(s, "\thits: %lld\n", hits);
2425 mutex_unlock(&mcam->lock);
2430 RVU_DEBUG_SEQ_FOPS(npc_mcam_rules, npc_mcam_show_rules, NULL);
2432 static void rvu_dbg_npc_init(struct rvu *rvu)
2434 rvu->rvu_dbg.npc = debugfs_create_dir("npc", rvu->rvu_dbg.root);
2436 debugfs_create_file("mcam_info", 0444, rvu->rvu_dbg.npc, rvu,
2437 &rvu_dbg_npc_mcam_info_fops);
2438 debugfs_create_file("mcam_rules", 0444, rvu->rvu_dbg.npc, rvu,
2439 &rvu_dbg_npc_mcam_rules_fops);
2440 debugfs_create_file("rx_miss_act_stats", 0444, rvu->rvu_dbg.npc, rvu,
2441 &rvu_dbg_npc_rx_miss_act_fops);
2444 static int cpt_eng_sts_display(struct seq_file *filp, u8 eng_type)
2446 struct cpt_ctx *ctx = filp->private;
2447 u64 busy_sts = 0, free_sts = 0;
2448 u32 e_min = 0, e_max = 0, e, i;
2449 u16 max_ses, max_ies, max_aes;
2450 struct rvu *rvu = ctx->rvu;
2451 int blkaddr = ctx->blkaddr;
2454 reg = rvu_read64(rvu, blkaddr, CPT_AF_CONSTANTS1);
2455 max_ses = reg & 0xffff;
2456 max_ies = (reg >> 16) & 0xffff;
2457 max_aes = (reg >> 32) & 0xffff;
2461 e_min = max_ses + max_ies;
2462 e_max = max_ses + max_ies + max_aes;
2470 e_max = max_ses + max_ies;
2476 for (e = e_min, i = 0; e < e_max; e++, i++) {
2477 reg = rvu_read64(rvu, blkaddr, CPT_AF_EXEX_STS(e));
2479 busy_sts |= 1ULL << i;
2482 free_sts |= 1ULL << i;
2484 seq_printf(filp, "FREE STS : 0x%016llx\n", free_sts);
2485 seq_printf(filp, "BUSY STS : 0x%016llx\n", busy_sts);
2490 static int rvu_dbg_cpt_ae_sts_display(struct seq_file *filp, void *unused)
2492 return cpt_eng_sts_display(filp, CPT_AE_TYPE);
2495 RVU_DEBUG_SEQ_FOPS(cpt_ae_sts, cpt_ae_sts_display, NULL);
2497 static int rvu_dbg_cpt_se_sts_display(struct seq_file *filp, void *unused)
2499 return cpt_eng_sts_display(filp, CPT_SE_TYPE);
2502 RVU_DEBUG_SEQ_FOPS(cpt_se_sts, cpt_se_sts_display, NULL);
2504 static int rvu_dbg_cpt_ie_sts_display(struct seq_file *filp, void *unused)
2506 return cpt_eng_sts_display(filp, CPT_IE_TYPE);
2509 RVU_DEBUG_SEQ_FOPS(cpt_ie_sts, cpt_ie_sts_display, NULL);
2511 static int rvu_dbg_cpt_engines_info_display(struct seq_file *filp, void *unused)
2513 struct cpt_ctx *ctx = filp->private;
2514 u16 max_ses, max_ies, max_aes;
2515 struct rvu *rvu = ctx->rvu;
2516 int blkaddr = ctx->blkaddr;
2520 reg = rvu_read64(rvu, blkaddr, CPT_AF_CONSTANTS1);
2521 max_ses = reg & 0xffff;
2522 max_ies = (reg >> 16) & 0xffff;
2523 max_aes = (reg >> 32) & 0xffff;
2525 e_max = max_ses + max_ies + max_aes;
2527 seq_puts(filp, "===========================================\n");
2528 for (e = 0; e < e_max; e++) {
2529 reg = rvu_read64(rvu, blkaddr, CPT_AF_EXEX_CTL2(e));
2530 seq_printf(filp, "CPT Engine[%u] Group Enable 0x%02llx\n", e,
2532 reg = rvu_read64(rvu, blkaddr, CPT_AF_EXEX_ACTIVE(e));
2533 seq_printf(filp, "CPT Engine[%u] Active Info 0x%llx\n", e,
2535 reg = rvu_read64(rvu, blkaddr, CPT_AF_EXEX_CTL(e));
2536 seq_printf(filp, "CPT Engine[%u] Control 0x%llx\n", e,
2538 seq_puts(filp, "===========================================\n");
2543 RVU_DEBUG_SEQ_FOPS(cpt_engines_info, cpt_engines_info_display, NULL);
2545 static int rvu_dbg_cpt_lfs_info_display(struct seq_file *filp, void *unused)
2547 struct cpt_ctx *ctx = filp->private;
2548 int blkaddr = ctx->blkaddr;
2549 struct rvu *rvu = ctx->rvu;
2550 struct rvu_block *block;
2551 struct rvu_hwinfo *hw;
2556 block = &hw->block[blkaddr];
2557 if (!block->lf.bmap)
2560 seq_puts(filp, "===========================================\n");
2561 for (lf = 0; lf < block->lf.max; lf++) {
2562 reg = rvu_read64(rvu, blkaddr, CPT_AF_LFX_CTL(lf));
2563 seq_printf(filp, "CPT Lf[%u] CTL 0x%llx\n", lf, reg);
2564 reg = rvu_read64(rvu, blkaddr, CPT_AF_LFX_CTL2(lf));
2565 seq_printf(filp, "CPT Lf[%u] CTL2 0x%llx\n", lf, reg);
2566 reg = rvu_read64(rvu, blkaddr, CPT_AF_LFX_PTR_CTL(lf));
2567 seq_printf(filp, "CPT Lf[%u] PTR_CTL 0x%llx\n", lf, reg);
2568 reg = rvu_read64(rvu, blkaddr, block->lfcfg_reg |
2569 (lf << block->lfshift));
2570 seq_printf(filp, "CPT Lf[%u] CFG 0x%llx\n", lf, reg);
2571 seq_puts(filp, "===========================================\n");
2576 RVU_DEBUG_SEQ_FOPS(cpt_lfs_info, cpt_lfs_info_display, NULL);
2578 static int rvu_dbg_cpt_err_info_display(struct seq_file *filp, void *unused)
2580 struct cpt_ctx *ctx = filp->private;
2581 struct rvu *rvu = ctx->rvu;
2582 int blkaddr = ctx->blkaddr;
2585 reg0 = rvu_read64(rvu, blkaddr, CPT_AF_FLTX_INT(0));
2586 reg1 = rvu_read64(rvu, blkaddr, CPT_AF_FLTX_INT(1));
2587 seq_printf(filp, "CPT_AF_FLTX_INT: 0x%llx 0x%llx\n", reg0, reg1);
2588 reg0 = rvu_read64(rvu, blkaddr, CPT_AF_PSNX_EXE(0));
2589 reg1 = rvu_read64(rvu, blkaddr, CPT_AF_PSNX_EXE(1));
2590 seq_printf(filp, "CPT_AF_PSNX_EXE: 0x%llx 0x%llx\n", reg0, reg1);
2591 reg0 = rvu_read64(rvu, blkaddr, CPT_AF_PSNX_LF(0));
2592 seq_printf(filp, "CPT_AF_PSNX_LF: 0x%llx\n", reg0);
2593 reg0 = rvu_read64(rvu, blkaddr, CPT_AF_RVU_INT);
2594 seq_printf(filp, "CPT_AF_RVU_INT: 0x%llx\n", reg0);
2595 reg0 = rvu_read64(rvu, blkaddr, CPT_AF_RAS_INT);
2596 seq_printf(filp, "CPT_AF_RAS_INT: 0x%llx\n", reg0);
2597 reg0 = rvu_read64(rvu, blkaddr, CPT_AF_EXE_ERR_INFO);
2598 seq_printf(filp, "CPT_AF_EXE_ERR_INFO: 0x%llx\n", reg0);
2603 RVU_DEBUG_SEQ_FOPS(cpt_err_info, cpt_err_info_display, NULL);
2605 static int rvu_dbg_cpt_pc_display(struct seq_file *filp, void *unused)
2607 struct cpt_ctx *ctx = filp->private;
2608 struct rvu *rvu = ctx->rvu;
2609 int blkaddr = ctx->blkaddr;
2612 reg = rvu_read64(rvu, blkaddr, CPT_AF_INST_REQ_PC);
2613 seq_printf(filp, "CPT instruction requests %llu\n", reg);
2614 reg = rvu_read64(rvu, blkaddr, CPT_AF_INST_LATENCY_PC);
2615 seq_printf(filp, "CPT instruction latency %llu\n", reg);
2616 reg = rvu_read64(rvu, blkaddr, CPT_AF_RD_REQ_PC);
2617 seq_printf(filp, "CPT NCB read requests %llu\n", reg);
2618 reg = rvu_read64(rvu, blkaddr, CPT_AF_RD_LATENCY_PC);
2619 seq_printf(filp, "CPT NCB read latency %llu\n", reg);
2620 reg = rvu_read64(rvu, blkaddr, CPT_AF_RD_UC_PC);
2621 seq_printf(filp, "CPT read requests caused by UC fills %llu\n", reg);
2622 reg = rvu_read64(rvu, blkaddr, CPT_AF_ACTIVE_CYCLES_PC);
2623 seq_printf(filp, "CPT active cycles pc %llu\n", reg);
2624 reg = rvu_read64(rvu, blkaddr, CPT_AF_CPTCLK_CNT);
2625 seq_printf(filp, "CPT clock count pc %llu\n", reg);
2630 RVU_DEBUG_SEQ_FOPS(cpt_pc, cpt_pc_display, NULL);
2632 static void rvu_dbg_cpt_init(struct rvu *rvu, int blkaddr)
2634 struct cpt_ctx *ctx;
2636 if (!is_block_implemented(rvu->hw, blkaddr))
2639 if (blkaddr == BLKADDR_CPT0) {
2640 rvu->rvu_dbg.cpt = debugfs_create_dir("cpt", rvu->rvu_dbg.root);
2641 ctx = &rvu->rvu_dbg.cpt_ctx[0];
2642 ctx->blkaddr = BLKADDR_CPT0;
2645 rvu->rvu_dbg.cpt = debugfs_create_dir("cpt1",
2647 ctx = &rvu->rvu_dbg.cpt_ctx[1];
2648 ctx->blkaddr = BLKADDR_CPT1;
2652 debugfs_create_file("cpt_pc", 0600, rvu->rvu_dbg.cpt, ctx,
2653 &rvu_dbg_cpt_pc_fops);
2654 debugfs_create_file("cpt_ae_sts", 0600, rvu->rvu_dbg.cpt, ctx,
2655 &rvu_dbg_cpt_ae_sts_fops);
2656 debugfs_create_file("cpt_se_sts", 0600, rvu->rvu_dbg.cpt, ctx,
2657 &rvu_dbg_cpt_se_sts_fops);
2658 debugfs_create_file("cpt_ie_sts", 0600, rvu->rvu_dbg.cpt, ctx,
2659 &rvu_dbg_cpt_ie_sts_fops);
2660 debugfs_create_file("cpt_engines_info", 0600, rvu->rvu_dbg.cpt, ctx,
2661 &rvu_dbg_cpt_engines_info_fops);
2662 debugfs_create_file("cpt_lfs_info", 0600, rvu->rvu_dbg.cpt, ctx,
2663 &rvu_dbg_cpt_lfs_info_fops);
2664 debugfs_create_file("cpt_err_info", 0600, rvu->rvu_dbg.cpt, ctx,
2665 &rvu_dbg_cpt_err_info_fops);
2668 static const char *rvu_get_dbg_dir_name(struct rvu *rvu)
2670 if (!is_rvu_otx2(rvu))
2676 void rvu_dbg_init(struct rvu *rvu)
2678 rvu->rvu_dbg.root = debugfs_create_dir(rvu_get_dbg_dir_name(rvu), NULL);
2680 debugfs_create_file("rsrc_alloc", 0444, rvu->rvu_dbg.root, rvu,
2681 &rvu_dbg_rsrc_status_fops);
2683 if (!cgx_get_cgxcnt_max())
2686 if (is_rvu_otx2(rvu))
2687 debugfs_create_file("rvu_pf_cgx_map", 0444, rvu->rvu_dbg.root,
2688 rvu, &rvu_dbg_rvu_pf_cgx_map_fops);
2690 debugfs_create_file("rvu_pf_rpm_map", 0444, rvu->rvu_dbg.root,
2691 rvu, &rvu_dbg_rvu_pf_cgx_map_fops);
2694 rvu_dbg_npa_init(rvu);
2695 rvu_dbg_nix_init(rvu, BLKADDR_NIX0);
2697 rvu_dbg_nix_init(rvu, BLKADDR_NIX1);
2698 rvu_dbg_cgx_init(rvu);
2699 rvu_dbg_npc_init(rvu);
2700 rvu_dbg_cpt_init(rvu, BLKADDR_CPT0);
2701 rvu_dbg_cpt_init(rvu, BLKADDR_CPT1);
2704 void rvu_dbg_exit(struct rvu *rvu)
2706 debugfs_remove_recursive(rvu->rvu_dbg.root);
2709 #endif /* CONFIG_DEBUG_FS */